Merge "ARM: dts: msm: Add SID 1 to secure context bank for sdm670"
diff --git a/.gitignore b/.gitignore
index c2ed4ec..d47ecbb 100644
--- a/.gitignore
+++ b/.gitignore
@@ -114,3 +114,6 @@
# Kdevelop4
*.kdev4
+
+# fetched Android config fragments
+kernel/configs/android-*.cfg
diff --git a/Documentation/devicetree/bindings/arm/arch_timer.txt b/Documentation/devicetree/bindings/arm/arch_timer.txt
index ad440a2..e926aea 100644
--- a/Documentation/devicetree/bindings/arm/arch_timer.txt
+++ b/Documentation/devicetree/bindings/arm/arch_timer.txt
@@ -31,6 +31,12 @@
This also affects writes to the tval register, due to the implicit
counter read.
+- hisilicon,erratum-161010101 : A boolean property. Indicates the
+ presence of Hisilicon erratum 161010101, which says that reading the
+ counters is unreliable in some cases, and reads may return a value 32
+ beyond the correct value. This also affects writes to the tval
+ registers, due to the implicit counter read.
+
** Optional properties:
- arm,cpu-registers-not-fw-configured : Firmware does not initialize
diff --git a/Documentation/devicetree/bindings/arm/davinci.txt b/Documentation/devicetree/bindings/arm/davinci.txt
index f0841ce..715622c 100644
--- a/Documentation/devicetree/bindings/arm/davinci.txt
+++ b/Documentation/devicetree/bindings/arm/davinci.txt
@@ -13,6 +13,10 @@
Required root node properties:
- compatible = "enbw,cmc", "ti,da850;
+LEGO MINDSTORMS EV3 (AM1808 based)
+Required root node properties:
+ - compatible = "lego,ev3", "ti,da850";
+
Generic DaVinci Boards
----------------------
diff --git a/Documentation/devicetree/bindings/arm/msm/glink_smem_native_xprt.txt b/Documentation/devicetree/bindings/arm/msm/glink_smem_native_xprt.txt
index f68c8e4..7011d5c 100644
--- a/Documentation/devicetree/bindings/arm/msm/glink_smem_native_xprt.txt
+++ b/Documentation/devicetree/bindings/arm/msm/glink_smem_native_xprt.txt
@@ -11,6 +11,7 @@
-label : the name of the subsystem this link connects to
Optional properties:
+-cpu-affinity: Cores to pin the interrupt and receiving work thread to.
-qcom,qos-config: Reference to the qos configuration elements.It depends on
ramp-time.
-qcom,ramp-time: Worst case time in microseconds to transition to this power
@@ -36,6 +37,7 @@
qcom,irq-mask = <0x1000>;
interrupts = <0 25 1>;
label = "lpass";
+ cpu-affinity = <1 2>;
qcom,qos-config = <&glink_qos_adsp>;
qcom,ramp-time = <0x10>,
<0x20>,
diff --git a/Documentation/devicetree/bindings/arm/msm/mdm-modem.txt b/Documentation/devicetree/bindings/arm/msm/mdm-modem.txt
index a6537eb..105dcac 100644
--- a/Documentation/devicetree/bindings/arm/msm/mdm-modem.txt
+++ b/Documentation/devicetree/bindings/arm/msm/mdm-modem.txt
@@ -110,6 +110,10 @@
on behalf of the subsystem driver.
- qcom,mdm-link-info: a string indicating additional info about the physical link.
For example: "devID_domain.bus.slot" in case of PCIe.
+- qcom,mdm-auto-boot: Boolean. To indicate this instance of esoc boots independently.
+- qcom,mdm-statusline-not-a-powersource: Boolean. If set, status line to esoc device is not a
+ power source.
+- qcom,mdm-userspace-handle-shutdown: Boolean. If set, userspace handles shutdown requests.
Example:
mdm0: qcom,mdm0 {
diff --git a/Documentation/devicetree/bindings/arm/msm/msm.txt b/Documentation/devicetree/bindings/arm/msm/msm.txt
index 327a7d4..b3d4d44 100644
--- a/Documentation/devicetree/bindings/arm/msm/msm.txt
+++ b/Documentation/devicetree/bindings/arm/msm/msm.txt
@@ -107,6 +107,9 @@
- MSM8953
compatible = "qcom,msm8953"
+- SDM450
+ compatible = "qcom,sdm450"
+
- MSM8937
compatible = "qcom,msm8937"
@@ -169,6 +172,9 @@
- HDK device:
compatible = "qcom,hdk"
+- IPC device:
+ compatible = "qcom,ipc"
+
Boards (SoC type + board variant):
compatible = "qcom,apq8016"
@@ -198,6 +204,7 @@
compatible = "qcom,apq8017-mtp"
compatible = "qcom,apq8053-cdp"
compatible = "qcom,apq8053-mtp"
+compatible = "qcom,apq8053-ipc"
compatible = "qcom,mdm9630-cdp"
compatible = "qcom,mdm9630-mtp"
compatible = "qcom,mdm9630-sim"
@@ -308,8 +315,12 @@
compatible = "qcom,msm8953-sim"
compatible = "qcom,msm8953-cdp"
compatible = "qcom,msm8953-mtp"
+compatible = "qcom,msm8953-ipc"
compatible = "qcom,msm8953-qrd"
compatible = "qcom,msm8953-qrd-sku3"
+compatible = "qcom,sdm450-mtp"
+compatible = "qcom,sdm450-cdp"
+compatible = "qcom,sdm450-qrd"
compatible = "qcom,mdm9640-cdp"
compatible = "qcom,mdm9640-mtp"
compatible = "qcom,mdm9640-rumi"
diff --git a/Documentation/devicetree/bindings/arm/msm/msm_core.txt b/Documentation/devicetree/bindings/arm/msm/msm_core.txt
deleted file mode 100644
index f385915..0000000
--- a/Documentation/devicetree/bindings/arm/msm/msm_core.txt
+++ /dev/null
@@ -1,71 +0,0 @@
-MSM Core Energy Aware driver
-
-The Energy Aware driver provides per core power and temperature
-information to the scheduler for it to make more power efficient
-scheduling decision.
-
-The required properties for the Energy-aware driver are:
-
-- compatible: "qcom,apss-core-ea"
-- reg: Physical address mapped to this device
-
-Required nodes:
-- ea@X: Parent node that has the sensor mapping for each cpu.
- This node's phandle is provided within cpu node
- to invoke/probe energy-aware only for available cpus.
- There should be one such node present for each cpu.
-
-Optional properties:
-- qcom,low-hyst-temp: Degrees C below which the power numbers
- need to be recomputed for the cores and reset
- the threshold. If this is not present, the default
- value is 10C.
-- qcom,high-hyst-temp: Degrees C above which the power numbers
- need to be recomputed for the cores and reset
- the threshold. If this property is not present,
- the default value is 5C.
-- qcom,polling-interval: Interval for which the power numbers
- need to be recomputed for the cores if there
- is no change in threshold. If this property is not
- present, the power is recalculated only on
- temperature threshold notifications.
--qcom,throttling-temp: Temperature threshold for cpu frequency mitigation.
- The value should be set same as the threshold temperature
- in thermal module - 5 C, such that there is a bandwidth to
- control the cores before frequency mitigation happens.
-
-[Second level nodes]
-Require properties to define per core characteristics:
-- sensor: Sensor phandle to map a particular sensor to the core.
- If this property is not present, then the core is assumed
- to be at 40C for all the power estimations. No sensor
- threshold is set. This phandle's compatible property is
- "qcom,sensor-information". This driver relies on the
- sensor-type and scaling-factor information provided in this
- phandle.
-
-Example
-
-qcom,msm-core@0xfc4b0000 {
- compatible = "qcom,apss-core-ea";
- reg = <0xfc4b0000 0x1000>;
- qcom,low-hyst-temp = <10>;
- qcom,high-hyst-temp = <5>;
- qcom,polling-interval = <50>;
-
- ea0: ea0 {
- sensor = <&sensor_information0>;
- };
-
- ea1: ea1 {
- sensor = <&sensor_information1>;
- };
-
-};
-
-CPU0: cpu@0 {
- device_type = "cpu";
- compatible = "arm,cortex-a53";
- reg = <0x0>;
- qcom,ea = <&ea0>;
-};
diff --git a/Documentation/devicetree/bindings/arm/msm/qcom,osm.txt b/Documentation/devicetree/bindings/arm/msm/qcom,osm.txt
index bce983a..7496f4d 100644
--- a/Documentation/devicetree/bindings/arm/msm/qcom,osm.txt
+++ b/Documentation/devicetree/bindings/arm/msm/qcom,osm.txt
@@ -21,10 +21,27 @@
Usage: required
Value type: <stringlist>
Definition: Address names. Must be "osm_l3_base", "osm_pwrcl_base",
- "osm_perfcl_base".
+ "osm_perfcl_base", and "cpr_rc".
Must be specified in the same order as the corresponding
addresses are specified in the reg property.
+- vdd_l3_mx_ao-supply
+ Usage: required
+ Value type: <phandle>
+ Definition: Phandle to the MX active-only regulator device.
+
+- vdd_pwrcl_mx_ao-supply
+ Usage: required
+ Value type: <phandle>
+ Definition: Phandle to the MX active-only regulator device.
+
+- qcom,mx-turbo-freq
+ Usage: required
+ Value type: <array>
+ Definition: List of frequencies for the 3 clock domains (following the
+ order of L3, power, and performance clusters) that denote
+ the lowest rate that requires a TURBO vote on the MX rail.
+
- l3-devs
Usage: optional
Value type: <phandle>
@@ -46,10 +63,15 @@
compatible = "qcom,clk-cpu-osm";
reg = <0x17d41000 0x1400>,
<0x17d43000 0x1400>,
- <0x17d45800 0x1400>;
- reg-names = "osm_l3_base", "osm_pwrcl_base", "osm_perfcl_base";
+ <0x17d45800 0x1400>,
+ <0x784248 0x4>;
+ reg-names = "osm_l3_base", "osm_pwrcl_base", "osm_perfcl_base",
+ "cpr_rc";
+ vdd_l3_mx_ao-supply = <&pm8998_s6_level_ao>;
+ vdd_pwrcl_mx_ao-supply = <&pm8998_s6_level_ao>;
- l3-devs = <&phandle0 &phandle1 &phandle2>;
+ qcom,mx-turbo-freq = <1478400000 1689600000 3300000001>;
+ l3-devs = <&l3_cpu0 &l3_cpu4 &l3_cdsp>;
clock-names = "xo_ao";
clocks = <&clock_rpmh RPMH_CXO_CLK_A>;
diff --git a/Documentation/devicetree/bindings/arm/msm/qdss_mhi.txt b/Documentation/devicetree/bindings/arm/msm/qdss_mhi.txt
new file mode 100644
index 0000000..928a4f4
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/msm/qdss_mhi.txt
@@ -0,0 +1,15 @@
+Qualcomm Technologies, Inc. QDSS bridge Driver
+
+This device will enable routing debug data from modem
+subsystem to APSS host.
+
+Required properties:
+-compatible : "qcom,qdss-mhi".
+-qcom,mhi : phandle of MHI Device to connect to.
+
+Example:
+ qcom,qdss-mhi {
+ compatible = "qcom,qdss-mhi";
+ qcom,mhi = <&mhi_0>;
+ };
+
diff --git a/Documentation/devicetree/bindings/batterydata/batterydata.txt b/Documentation/devicetree/bindings/batterydata/batterydata.txt
new file mode 100644
index 0000000..884b19c
--- /dev/null
+++ b/Documentation/devicetree/bindings/batterydata/batterydata.txt
@@ -0,0 +1,268 @@
+Battery Profile Data
+
+Battery Data is a collection of battery profile data made available to
+the QPNP Charger and BMS drivers via device tree.
+
+qcom,battery-data node required properties:
+- qcom,rpull-up-kohm : The vadc pullup resistor's resistance value in kOhms.
+- qcom,vref-batt-therm-uv : The vadc voltage used to make readings.
+ For Qualcomm Technologies, Inc. VADCs, this should be
+ 1800000uV.
+
+qcom,battery-data node optional properties:
+- qcom,batt-id-range-pct : The area of variation between upper and lower bound
+ for which a given battery ID resistance is valid. This
+ value is expressed as a percentage of the specified kohm
+ resistance provided by qcom,batt-id-kohm.
+
+qcom,battery-data can also include any number of children nodes. These children
+nodes will be treated as battery profile data nodes.
+
+Profile data node required properties:
+- qcom,fcc-mah : Full charge count of the battery in milliamp-hours
+- qcom,default-rbatt-mohm : The nominal battery resistance value
+- qcom,rbatt-capacitive-mohm : The capacitive resistance of the battery.
+- qcom,flat-ocv-threshold-uv : The threshold under which the battery can be
+ considered to be in the flat portion of the discharge
+ curve.
+- qcom,max-voltage-uv : The maximum rated voltage of the battery
+- qcom,v-cutoff-uv : The cutoff voltage of the battery at which the device
+ should shutdown gracefully.
+- qcom,chg-term-ua : The termination charging current of the battery.
+- qcom,batt-id-kohm : The battery id resistance of the battery. It can be
+ used as an array which could support multiple IDs for one battery
+ module when the ID resistance of some battery modules goes across
+ several ranges.
+- qcom,battery-type : A string indicating the type of battery.
+- qcom,fg-profile-data : An array of hexadecimal values used to configure more
+ complex fuel gauge peripherals which have a large amount
+ of coefficients used in hardware state machines and thus
+ influencing the final output of the state of charge read
+ by software.
+
+Profile data node optional properties:
+- qcom,chg-rslow-comp-c1 : A constant for rslow compensation in the fuel gauge.
+ This will be provided by the profiling tool for
+ additional fuel gauge accuracy during charging.
+- qcom,chg-rslow-comp-c2 : A constant for rslow compensation in the fuel gauge.
+ This will be provided by the profiling tool for
+ additional fuel gauge accuracy during charging.
+- qcom,chg-rslow-comp-thr : A constant for rslow compensation in the fuel gauge.
+ This will be provided by the profiling tool for
+ additional fuel gauge accuracy during charging.
+- qcom,chg-rs-to-rslow: A constant for rslow compensation in the fuel gauge.
+ This will be provided by the profiling tool for
+ additional fuel gauge accuracy during charging.
+- qcom,fastchg-current-ma: Specifies the maximum fastcharge current.
+- qcom,fg-cc-cv-threshold-mv: Voltage threshold in mV for transition from constant
+ charge (CC) to constant voltage (CV). This value should
+ be 10 mV less than the float voltage.
+ This property should only be specified if
+ "qcom,autoadjust-vfloat" property is specified in the
+ charger driver to ensure a proper operation.
+- qcom,thermal-coefficients: Byte array of thermal coefficients for reading
+ battery thermistor. This should be exactly 6 bytes
+ in length.
+ Example: [01 02 03 04 05 06]
+- qcom,soc-based-step-chg: A bool property to indicate if the battery will
+ perform SoC (State of Charge) based step charging.
+ If yes, the low and high thresholds defined in
+ "qcom,step-chg-ranges" tuples should be assigned as
+ SoC values in percentage.
+- qcom,step-chg-ranges: Array of tuples in which a tuple describes a range
+ data of step charging setting.
+ A range contains following 3 integer elements:
+ [0]: the low threshold of battery votlage in uV
+ or SoC (State of Charge) in percentage when
+ SoC based step charge is used;
+ [1]: the high threshold of battery voltage in uV
+ or SoC in percentage when SoC based step charge
+ is used;
+ [2]: the FCC (full charging current) in uA when battery
+ voltage or SoC falls between the low and high
+ thresholds.
+ The threshold values in range should be in ascending
+ and shouldn't overlap. It support 8 ranges at max.
+- qcom,jeita-fcc-ranges: Array of tuples in which a tuple describes a range
+ data of sw-jeita FCC (full charging current) setting.
+ A range contains following 3 integer elements:
+ [0]: the low threshold of battery temperature in deci-degree;
+ [1]: the high threshold of battery temperature in deci-degree;
+ [2]: the FCC in uA when battery temperature falls between
+ the low and high thresholds.
+ The threshold values in range should be in ascending
+ and shouldn't overlap. It support 8 ranges at max.
+- qcom,jeita-fv-ranges: Array of tuples in which a tuple describes a range
+ data of sw-jeita FV (float voltage) setting.
+ A range contains following 3 integer elements:
+ [0]: the low threshold of battery temperature in deci-degree;
+ [1]: the high threshold of battery temperature in deci-degree;
+ [3]: the FV in uV when battery temperature falls between
+ the low and high thresholds.
+ The threshold values in range should be in ascending
+ and shouldn't overlap. It support 8 ranges at max.
+
+Profile data node required subnodes:
+- qcom,fcc-temp-lut : An 1-dimensional lookup table node that encodes
+ temperature to fcc lookup. The units for this lookup
+ table should be degrees celsius to milliamp-hours.
+- qcom,pc-temp-ocv-lut : A 2-dimensional lookup table node that encodes
+ temperature and percent charge to open circuit voltage
+ lookup. The units for this lookup table should be
+ degrees celsius and percent to millivolts.
+- qcom,rbatt-sf-lut : A 2-dimentional lookup table node that encodes
+ temperature and percent charge to battery internal
+ resistance lookup. The units for this lookup table
+ should be degrees celsius and percent to milliohms.
+
+Profile data node optional subnodes:
+- qcom,ibat-acc-luit: A 2-dimentional lookup table that encodes temperature
+ and battery current to battery ACC (apparent charge
+ capacity). The units for this lookup table should be
+ temperature in degrees celsius, ibat in milli-amps
+ and ACC in milli-ampere-hour.
+
+Lookup table required properties:
+- qcom,lut-col-legend : An array that encodes the legend of the lookup table's
+ columns. The length of this array will determine the
+ lookup table's width.
+- qcom,lut-data : An array that encodes the lookup table's data. The size of this
+ array should be equal to the size of qcom,lut-col-legend
+ multiplied by 1 if it's a 1-dimensional table, or
+ the size of qcom,lut-row-legend if it's a 2-dimensional
+ table. The data should be in a flattened row-major
+ representation.
+
+Lookup table optional properties:
+- qcom,lut-row-legend : An array that encodes the legend of the lookup table's rows.
+ If this property exists, then it is assumed that the
+ lookup table is a 2-dimensional table.
+
+Example:
+
+In msm8974-mtp.dtsi:
+
+mtp_batterydata: qcom,battery-data {
+ qcom,rpull-up-kohm = <100>;
+ qcom,vref-batt-therm-uv = <1800000>;
+
+ /include/ "batterydata-palladium.dtsi"
+ /include/ "batterydata-mtp-3000mah.dtsi"
+};
+
+&pm8941_bms {
+ qcom,battery-data = <&mtp_batterydata>;
+};
+
+In batterydata-palladium.dtsi:
+
+qcom,palladium-batterydata {
+ qcom,fcc-mah = <1500>;
+ qcom,default-rbatt-mohm = <236>;
+ qcom,rbatt-capacitive-mohm = <50>;
+ qcom,flat-ocv-threshold-uv = <3800000>;
+ qcom,max-voltage-uv = <4200000>;
+ qcom,v-cutoff-uv = <3400000>;
+ qcom,chg-term-ua = <100000>;
+ qcom,batt-id-kohm = <75>;
+ qcom,step-chg-ranges = <3600000 4000000 3000000
+ 4001000 4200000 2800000
+ 4201000 4400000 2000000>;
+ qcom,jeita-fcc-ranges = <0 100 600000
+ 101 200 2000000
+ 201 450 3000000
+ 451 550 600000>;
+ qcom,jeita-fv-ranges = <0 100 4200000
+ 101 450 4350000
+ 451 550 4200000>;
+ qcom,battery-type = "palladium_1500mah";
+
+ qcom,fcc-temp-lut {
+ qcom,lut-col-legend = <(-20) 0 25 40 65>;
+ qcom,lut-data = <1492 1492 1493 1483 1502>;
+ };
+
+ qcom,pc-temp-ocv-lut {
+ qcom,lut-col-legend = <(-20) 0 25 40 65>;
+ qcom,lut-row-legend = <100 95 90 85 80 75 70>,
+ <65 60 55 50 45 40 35>,
+ <30 25 20 15 10 9 8>,
+ <7 6 5 4 3 2 1 0>;
+ qcom,lut-data = <4173 4167 4163 4156 4154>,
+ <4104 4107 4108 4102 4104>,
+ <4057 4072 4069 4061 4060>,
+ <3973 4009 4019 4016 4020>,
+ <3932 3959 3981 3982 3983>,
+ <3899 3928 3954 3950 3950>,
+ <3868 3895 3925 3921 3920>,
+ <3837 3866 3898 3894 3892>,
+ <3812 3841 3853 3856 3862>,
+ <3794 3818 3825 3823 3822>,
+ <3780 3799 3804 3804 3803>,
+ <3768 3787 3790 3788 3788>,
+ <3757 3779 3778 3775 3776>,
+ <3747 3772 3771 3766 3765>,
+ <3736 3763 3766 3760 3746>,
+ <3725 3749 3756 3747 3729>,
+ <3714 3718 3734 3724 3706>,
+ <3701 3703 3696 3689 3668>,
+ <3675 3695 3682 3675 3662>,
+ <3670 3691 3680 3673 3661>,
+ <3661 3686 3679 3672 3656>,
+ <3649 3680 3676 3669 3641>,
+ <3633 3669 3667 3655 3606>,
+ <3610 3647 3640 3620 3560>,
+ <3580 3607 3596 3572 3501>,
+ <3533 3548 3537 3512 3425>,
+ <3457 3468 3459 3429 3324>,
+ <3328 3348 3340 3297 3172>,
+ <3000 3000 3000 3000 3000>;
+ };
+
+ qcom,rbatt-sf-lut {
+ qcom,lut-col-legend = <(-20) 0 25 40 65>;
+ qcom,lut-row-legend = <100 95 90 85 80 75 70>,
+ <65 60 55 50 45 40 35>,
+ <30 25 20 15 10 9 8>,
+ <7 6 5 4 3 2 1 0>;
+ qcom,lut-data = <357 187 100 91 91>,
+ <400 208 105 94 94>,
+ <390 204 106 95 96>,
+ <391 201 108 98 98>,
+ <391 202 110 98 100>,
+ <390 200 110 99 102>,
+ <389 200 110 99 102>,
+ <393 202 101 93 100>,
+ <407 205 99 89 94>,
+ <428 208 100 91 96>,
+ <455 212 102 92 98>,
+ <495 220 104 93 101>,
+ <561 232 107 95 102>,
+ <634 245 112 98 98>,
+ <714 258 114 98 98>,
+ <791 266 114 97 100>,
+ <871 289 108 95 97>,
+ <973 340 124 108 105>,
+ <489 241 109 96 99>,
+ <511 246 110 96 99>,
+ <534 252 111 95 98>,
+ <579 263 112 96 96>,
+ <636 276 111 95 97>,
+ <730 294 109 96 99>,
+ <868 328 112 98 104>,
+ <1089 374 119 101 115>,
+ <1559 457 128 105 213>,
+ <12886 1026 637 422 3269>,
+ <170899 127211 98968 88907 77102>;
+ };
+
+ qcom,ibat-acc-lut {
+ qcom,lut-col-legend = <(-20) 0 25>;
+ qcom,lut-row-legend = <0 250 500 1000>;
+ qcom,lut-data = <1470 1470 1473>,
+ <1406 1406 1430>,
+ <1247 1247 1414>,
+ <764 764 1338>;
+ };
+};
+
diff --git a/Documentation/devicetree/bindings/clock/qcom,a7-cpucc.txt b/Documentation/devicetree/bindings/clock/qcom,a7-cpucc.txt
new file mode 100644
index 0000000..2782b9c
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/qcom,a7-cpucc.txt
@@ -0,0 +1,48 @@
+Qualcomm Application A7 CPU clock driver
+-------------------------------------
+
+It is the clock controller driver which provides higher frequency
+clocks and allows A7 CPU frequency scaling on sdxpoorwills based platforms.
+
+Required properties:
+- compatible : shall contain only one of the following:
+ "qcom,cpu-sdxpoorwills",
+- clocks : Phandle to the clock device.
+- clock-names: Names of the used clocks.
+- qcom,a7cc-init-rate = Initial rate which needs to be set from cpu driver.
+- reg : shall contain base register offset and size.
+- reg-names : Names of the bases for the above registers.
+- vdd_dig_ao-supply : The regulator powering the APSS PLL.
+- cpu-vdd-supply : The regulator powering the APSS RCG.
+- qcom,rcg-reg-offset : Register offset for APSS RCG.
+- qcom,speedX-bin-vZ : A table of CPU frequency (Hz) to regulator voltage (uV) mapping.
+ Format: <freq uV>
+ This represents the max frequency possible for each possible
+ power configuration for a CPU that's binned as speed bin X,
+ speed bin revision Z. Speed bin values can be between [0-7]
+ and the version can be between [0-3].
+- #clock-cells : shall contain 1.
+
+Optional properties :
+- reg-names: "efuse",
+
+Example:
+ clock_cpu: qcom,clock-a7@17808100 {
+ compatible = "qcom,cpu-sdxpoorwills";
+ clocks = <&clock_rpmh RPMH_CXO_CLK_A>;
+ clock-names = "xo_ao";
+ qcom,a7cc-init-rate = <1497600000>;
+ reg = <0x17808100 0x7F10>;
+ reg-names = "apcs_pll";
+
+ vdd_dig_ao-supply = <&pmxpoorwills_s5_level_ao>;
+ cpu-vdd-supply = <&pmxpoorwills_s5_level_ao>;
+ qcom,rcg-reg-offset = <0x7F08>;
+ qcom,speed0-bin-v0 =
+ < 0 RPMH_REGULATOR_LEVEL_OFF>,
+ < 345600000 RPMH_REGULATOR_LEVEL_LOW_SVS>,
+ < 576000000 RPMH_REGULATOR_LEVEL_SVS>,
+ < 1094400000 RPMH_REGULATOR_LEVEL_NOM>,
+ < 1497600000 RPMH_REGULATOR_LEVEL_TURBO>;
+ #clock-cells = <1>;
+ };
diff --git a/Documentation/devicetree/bindings/clock/qcom,gcc.txt b/Documentation/devicetree/bindings/clock/qcom,gcc.txt
index 78bb87a..7330db4 100644
--- a/Documentation/devicetree/bindings/clock/qcom,gcc.txt
+++ b/Documentation/devicetree/bindings/clock/qcom,gcc.txt
@@ -21,6 +21,7 @@
"qcom,gcc-sdm845-v2.1"
"qcom,gcc-sdm670"
"qcom,debugcc-sdm845"
+ "qcom,gcc-sdxpoorwills"
- reg : shall contain base register location and length
- #clock-cells : shall contain 1
diff --git a/Documentation/devicetree/bindings/clock/qcom,rpmh.txt b/Documentation/devicetree/bindings/clock/qcom,rpmh.txt
index 9ad7263..d57f61a 100644
--- a/Documentation/devicetree/bindings/clock/qcom,rpmh.txt
+++ b/Documentation/devicetree/bindings/clock/qcom,rpmh.txt
@@ -3,6 +3,7 @@
Required properties :
- compatible : shall contain "qcom,rpmh-clk-sdm845" or "qcom,rpmh-clk-sdm670"
+ or "qcom,rpmh-clk-sdxpoorwills"
- #clock-cells : must contain 1
- mboxes : list of RPMh mailbox phandle and channel identifier tuples.
diff --git a/Documentation/devicetree/bindings/clock/qoriq-clock.txt b/Documentation/devicetree/bindings/clock/qoriq-clock.txt
index 16a3ec4..1bd2c76 100644
--- a/Documentation/devicetree/bindings/clock/qoriq-clock.txt
+++ b/Documentation/devicetree/bindings/clock/qoriq-clock.txt
@@ -31,6 +31,7 @@
* "fsl,t4240-clockgen"
* "fsl,b4420-clockgen"
* "fsl,b4860-clockgen"
+ * "fsl,ls1012a-clockgen"
* "fsl,ls1021a-clockgen"
Chassis-version clock strings include:
* "fsl,qoriq-clockgen-1.0": for chassis 1.0 clocks
diff --git a/Documentation/devicetree/bindings/devfreq/arm-memlat-mon.txt b/Documentation/devicetree/bindings/devfreq/arm-memlat-mon.txt
index 6f2fac7..3786412 100644
--- a/Documentation/devicetree/bindings/devfreq/arm-memlat-mon.txt
+++ b/Documentation/devicetree/bindings/devfreq/arm-memlat-mon.txt
@@ -4,7 +4,7 @@
to measure the parameters for latency driven memory access patterns.
Required properties:
-- compatible: Must be "qcom,arm-memlat-mon"
+- compatible: Must be "qcom,arm-memlat-mon" or "qcom,arm-cpu-mon"
- qcom,cpulist: List of CPU phandles to be monitored in a cluster
- qcom,target-dev: The DT device that corresponds to this master port
- qcom,core-dev-table: A mapping table of core frequency to a required bandwidth vote at the
diff --git a/Documentation/devicetree/bindings/display/bridge/ti,ths8135.txt b/Documentation/devicetree/bindings/display/bridge/ti,ths8135.txt
new file mode 100644
index 0000000..6ec1a88
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/bridge/ti,ths8135.txt
@@ -0,0 +1,46 @@
+THS8135 Video DAC
+-----------------
+
+This is the binding for Texas Instruments THS8135 Video DAC bridge.
+
+Required properties:
+
+- compatible: Must be "ti,ths8135"
+
+Required nodes:
+
+This device has two video ports. Their connections are modelled using the OF
+graph bindings specified in Documentation/devicetree/bindings/graph.txt.
+
+- Video port 0 for RGB input
+- Video port 1 for VGA output
+
+Example
+-------
+
+vga-bridge {
+ compatible = "ti,ths8135";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ vga_bridge_in: endpoint {
+ remote-endpoint = <&lcdc_out_vga>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ vga_bridge_out: endpoint {
+ remote-endpoint = <&vga_con_in>;
+ };
+ };
+ };
+};
diff --git a/Documentation/devicetree/bindings/display/msm/dsi.txt b/Documentation/devicetree/bindings/display/msm/dsi.txt
index 3534f04..fc95288 100644
--- a/Documentation/devicetree/bindings/display/msm/dsi.txt
+++ b/Documentation/devicetree/bindings/display/msm/dsi.txt
@@ -121,6 +121,12 @@
If ping pong split is enabled, this time should not be higher
than two times the dsi link rate time.
If the property is not specified, then the default value is 14000 us.
+- qcom,panel-allow-phy-poweroff: A boolean property indicates that panel allows to turn off the phy power
+ supply during idle screen. A panel should be able to handle the dsi lanes
+ in floating state(not LP00 or LP11) to turn on this property. Software
+ turns off PHY pmic power supply, phy ldo and DSI Lane ldo during
+ idle screen (footswitch control off) when this property is enabled.
+- qcom,dsi-phy-regulator-min-datarate-bps: Minimum per lane data rate (bps) to turn on PHY regulator.
[1] Documentation/devicetree/bindings/clocks/clock-bindings.txt
[2] Documentation/devicetree/bindings/graph.txt
@@ -229,4 +235,6 @@
vddio-supply = <&pma8084_l12>;
qcom,dsi-phy-regulator-ldo-mode;
+ qcom,panel-allow-phy-poweroff;
+ qcom,dsi-phy-regulator-min-datarate-bps = <1200000000>;
};
diff --git a/Documentation/devicetree/bindings/display/msm/sde.txt b/Documentation/devicetree/bindings/display/msm/sde.txt
index 22b4e91..4b4c274 100644
--- a/Documentation/devicetree/bindings/display/msm/sde.txt
+++ b/Documentation/devicetree/bindings/display/msm/sde.txt
@@ -125,6 +125,11 @@
configuration value.
- qcom,sde-ubwc-swizzle: Property to specify the default UBWC swizzle
configuration value.
+- qcom,sde-smart-panel-align-mode: A u32 property to specify the align mode for
+ split display on smart panel. Possible values:
+ 0x0 - no alignment
+ 0xc - align at start of frame
+ 0xd - align at start of line
- qcom,sde-panic-per-pipe: Boolean property to indicate if panic signal
control feature is available on each source pipe.
- qcom,sde-has-src-split: Boolean property to indicate if source split
@@ -366,6 +371,8 @@
- qcom,sde-cdp-setting: Array of 2 cell property, with a format of
<read enable, write enable> for cdp use cases in
order of <real_time>, and <non_real_time>.
+- qcom,sde-qos-cpu-mask: A u32 value indicating desired PM QoS CPU affine mask.
+- qcom,sde-qos-cpu-dma-latency: A u32 value indicating desired PM QoS CPU DMA latency in usec.
- qcom,sde-inline-rot-xin: An integer array of xin-ids related to inline
rotation.
- qcom,sde-inline-rot-xin-type: A string array indicating the type of xin,
@@ -533,6 +540,7 @@
qcom,sde-ubwc-version = <0x100>;
qcom,sde-ubwc-static = <0x100>;
qcom,sde-ubwc-swizzle = <0>;
+ qcom,sde-smart-panel-align-mode = <0xd>;
qcom,sde-panic-per-pipe;
qcom,sde-has-src-split;
qcom,sde-has-dim-layer;
@@ -611,6 +619,9 @@
qcom,sde-cdp-setting = <1 1>, <1 0>;
+ qcom,sde-qos-cpu-mask = <0x3>;
+ qcom,sde-qos-cpu-dma-latency = <300>;
+
qcom,sde-vbif-off = <0 0>;
qcom,sde-vbif-id = <0 1>;
qcom,sde-vbif-default-ot-rd-limit = <32>;
diff --git a/Documentation/devicetree/bindings/drm/msm/mdss-dsi-panel.txt b/Documentation/devicetree/bindings/drm/msm/mdss-dsi-panel.txt
index 32c31af..806c458 100644
--- a/Documentation/devicetree/bindings/drm/msm/mdss-dsi-panel.txt
+++ b/Documentation/devicetree/bindings/drm/msm/mdss-dsi-panel.txt
@@ -340,13 +340,15 @@
"single_roi": default enable mode, only single roi is sent to panel
"dual_roi": two rois are merged into one big roi. Panel ddic should be able
to process two roi's along with the DCS command to send two rois.
- disabled if property is not specified.
+ disabled if property is not specified. This property is specified
+ per timing node to support resolution restrictions.
- qcom,mdss-dsi-horizontal-line-idle: List of width ranges (EC - SC) in pixels indicating
additional idle time in dsi clock cycles that is needed
to compensate for smaller line width.
- qcom,partial-update-roi-merge: Boolean indicates roi combination is need
and function has been provided for dcs
- 2A/2B command.
+ 2A/2B command. This property is specified per timing node to support
+ resolution restrictions.
- qcom,dcs-cmd-by-left: Boolean to indicate that dcs command are sent
through the left DSI controller only in a dual-dsi configuration
- qcom,mdss-dsi-panel-hdr-enabled: Boolean to indicate HDR support in panel.
@@ -383,7 +385,8 @@
- qcom,suspend-ulps-enabled: Boolean to enable support for ULPS mode for panels during suspend state.
- qcom,panel-roi-alignment: Specifies the panel ROI alignment restrictions on its
left, top, width, height alignments and minimum width and
- height values
+ height values. This property is specified per timing node to support
+ resolution's alignment restrictions.
- qcom,esd-check-enabled: Boolean used to enable ESD recovery feature.
- qcom,mdss-dsi-panel-status-command: A byte stream formed by multiple dcs packets based on
qcom dsi controller protocol, to read the panel status.
@@ -654,7 +657,6 @@
qcom,mdss-tear-check-rd-ptr-trigger-intr = <1281>;
qcom,mdss-tear-check-frame-rate = <6000>;
qcom,mdss-dsi-reset-sequence = <1 2>, <0 10>, <1 10>;
- qcom,partial-update-enabled = "single_roi";
qcom,dcs-cmd-by-left;
qcom,mdss-dsi-lp11-init;
qcom,mdss-dsi-init-delay-us = <100>;
@@ -662,7 +664,6 @@
mdss-dsi-tx-eot-append;
qcom,ulps-enabled;
qcom,suspend-ulps-enabled;
- qcom,panel-roi-alignment = <4 4 2 2 20 20>;
qcom,esd-check-enabled;
qcom,mdss-dsi-panel-status-command = [06 01 00 01 05 00 02 0A 08];
qcom,mdss-dsi-panel-status-command-state = "dsi_lp_mode";
@@ -721,6 +722,8 @@
qcom,mdss-dsc-config-by-manufacture-cmd;
qcom,display-topology = <1 1 1>;
qcom,default-topology-index = <0>;
+ qcom,partial-update-enabled = "single_roi";
+ qcom,panel-roi-alignment = <4 4 2 2 20 20>;
};
};
qcom,panel-supply-entries {
diff --git a/Documentation/devicetree/bindings/drm/msm/sde-dsi.txt b/Documentation/devicetree/bindings/drm/msm/sde-dsi.txt
index 641cc26..a89b834 100644
--- a/Documentation/devicetree/bindings/drm/msm/sde-dsi.txt
+++ b/Documentation/devicetree/bindings/drm/msm/sde-dsi.txt
@@ -100,3 +100,4 @@
controller. This must be enabled for debugging purpose
only with simulator panel. It should not be enabled for
normal DSI panels.
+- - qcom,null-insertion-enabled: A boolean to enable NULL packet insertion feature for DSI controller.
diff --git a/Documentation/devicetree/bindings/gpu/adreno.txt b/Documentation/devicetree/bindings/gpu/adreno.txt
index b18d573..69174ca 100644
--- a/Documentation/devicetree/bindings/gpu/adreno.txt
+++ b/Documentation/devicetree/bindings/gpu/adreno.txt
@@ -5,14 +5,17 @@
Required properties:
- label: A string used as a descriptive name for the device.
- compatible: Must be "qcom,kgsl-3d0" and "qcom,kgsl-3d"
-- reg: Specifies the register base address and size. The second interval
- specifies the shader memory base address and size.
+- reg: Specifies the register base address and size, the shader memory
+ base address and size (if it exists), and the base address and size
+ of the CX_DBGC block (if it exists).
- reg-names: Resource names used for the physical address of device registers
and shader memory. "kgsl_3d0_reg_memory" gives the physical address
and length of device registers while "kgsl_3d0_shader_memory" gives
physical address and length of device shader memory. If
specified, "qfprom_memory" gives the range for the efuse
- registers used for various configuration options.
+ registers used for various configuration options. If specified,
+ "kgsl_3d0_cx_dbgc_memory" gives the physical address and length
+ of the CX DBGC block.
- interrupts: Interrupt mapping for GPU IRQ.
- interrupt-names: String property to describe the name of the interrupt.
- qcom,id: An integer used as an identification number for the device.
diff --git a/Documentation/devicetree/bindings/hwmon/qpnp-adc-voltage.txt b/Documentation/devicetree/bindings/hwmon/qpnp-adc-voltage.txt
index 62ba54b..b0c5b57 100644
--- a/Documentation/devicetree/bindings/hwmon/qpnp-adc-voltage.txt
+++ b/Documentation/devicetree/bindings/hwmon/qpnp-adc-voltage.txt
@@ -18,7 +18,6 @@
"high-thr-en-set" for high threshold interrupts and
"low-thr-en-set" for low threshold interrupts. High and low threshold
interrupts are to be enabled if VADC_USR needs to support recurring measurement.
-- qcom,adc-bit-resolution : Bit resolution of the ADC.
- qcom,adc-vdd-reference : Voltage reference used by the ADC.
Channel nodes
@@ -46,6 +45,12 @@
0 : The calibration values used for measurement are from a timer.
1 : Forces a fresh measurement for calibration values at the same time
measurement is taken.
+- qcom,adc-full-scale-code: Full scale code with offset removed.
+- pinctrl-names: should be "default" (see pinctrl binding [0]).
+- pinctrl-0: a phandle pointing to the pin settings for the
+ device (see pinctrl binding [0]).
+
+[0]: Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt
Client required property:
- qcom,<consumer name>-vadc : The phandle to the corresponding vadc device.
diff --git a/Documentation/devicetree/bindings/iio/adc/avia-hx711.txt b/Documentation/devicetree/bindings/iio/adc/avia-hx711.txt
new file mode 100644
index 0000000..b362940
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/adc/avia-hx711.txt
@@ -0,0 +1,18 @@
+* AVIA HX711 ADC chip for weight cells
+ Bit-banging driver
+
+Required properties:
+ - compatible: Should be "avia,hx711"
+ - sck-gpios: Definition of the GPIO for the clock
+ - dout-gpios: Definition of the GPIO for data-out
+ See Documentation/devicetree/bindings/gpio/gpio.txt
+ - avdd-supply: Definition of the regulator used as analog supply
+
+Example:
+weight@0 {
+ compatible = "avia,hx711";
+ sck-gpios = <&gpio3 10 GPIO_ACTIVE_HIGH>;
+ dout-gpios = <&gpio0 7 GPIO_ACTIVE_HIGH>;
+ avdd-suppy = <&avdd>;
+};
+
diff --git a/Documentation/devicetree/bindings/iommu/arm,smmu.txt b/Documentation/devicetree/bindings/iommu/arm,smmu.txt
index d2e635a..2a7e161 100644
--- a/Documentation/devicetree/bindings/iommu/arm,smmu.txt
+++ b/Documentation/devicetree/bindings/iommu/arm,smmu.txt
@@ -104,6 +104,11 @@
An array of <sid mask>.
Indicates the SIDs for which the workaround is required.
+- qcom,actlr:
+ An array of <sid mask actlr-setting>.
+ Any sid X for which X&~mask==sid will be programmed with the
+ given actlr-setting.
+
- qcom,deferred-regulator-disable-delay : The time delay for deferred regulator
disable in ms. In case of unmap call, regulator is
enabled/disabled. This may introduce additional delay. For
diff --git a/Documentation/devicetree/bindings/media/video/msm-cam-cci.txt b/Documentation/devicetree/bindings/media/video/msm-cam-cci.txt
index 54365b1..cd4d222 100644
--- a/Documentation/devicetree/bindings/media/video/msm-cam-cci.txt
+++ b/Documentation/devicetree/bindings/media/video/msm-cam-cci.txt
@@ -54,6 +54,21 @@
- hw-trdhld : should contain internal hold time for SDA
- hw-tsp : should contain filtering of glitches
+* Qualcomm Technologies, Inc. MSM Camera Sensor Resource Manager
+
+MSM camera sensor resource manager node contains properties of shared camera
+sensor resource.
+
+Required properties:
+- compatible : should be manufacturer name followed by sensor name
+ - "qcom,cam-res-mgr"
+Optional properties:
+- shared-gpios : should contain the gpios which are used by two or more
+ cameras, and these cameras may be opened together.
+- pinctrl-names: List of names to assign the shared pin state defined in pinctrl device node
+- pinctrl-<0..n>: Lists phandles each pointing to the pin configuration node within a pin
+ controller. These pin configurations are installed in the pinctrl device node.
+
* Qualcomm Technologies, Inc. MSM Sensor
MSM sensor node contains properties of camera sensor
@@ -165,6 +180,9 @@
should contain phandle of respective ir-cut node
- qcom,special-support-sensors: if only some special sensors are supported
on this board, add sensor name in this property.
+- use-shared-clk : It is booloean property. This property is required
+ if the clk is shared clk between different sensor and ois, if this
+ device need to be opened together.
- clock-rates: clock rate in Hz.
- clock-cntl-level: says what all different cloc level node has.
- clock-cntl-support: Says whether clock control support is present or not
@@ -233,6 +251,9 @@
required from the regulators mentioned in the regulator-names property
(in the same order).
- cam_vaf-supply : should contain regulator from which ois voltage is supplied
+- use-shared-clk : It is booloean property. This property is required
+ if the clk is shared clk between different sensor and ois, if this
+ device need to be opened together.
Example:
@@ -334,6 +355,15 @@
rgltr-load-current = <100000>;
};
+ qcom,cam-res-mgr {
+ compatible = "qcom,cam-res-mgr";
+ status = "ok";
+ shared-gpios = <18 19>;
+ pinctrl-names = "cam_res_mgr_default", "cam_res_mgr_suspend";
+ pinctrl-0 = <&cam_shared_clk_active &cam_res_mgr_active>;
+ pinctrl-1 = <&cam_shared_clk_suspend &cam_res_mgr_suspend>;
+ };
+
qcom,cam-sensor@0 {
cell-index = <0>;
compatible = "qcom,camera";
@@ -350,7 +380,7 @@
cam_vio-supply = <&pm845_lvs1>;
cam_vana-supply = <&pmi8998_bob>;
regulator-names = "cam_vdig", "cam_vio", "cam_vana";
- rgltr-cntrl-support;
+ rgltr-cntrl-support;
rgltr-min-voltage = <0 3312000 1352000>;
rgltr-max-voltage = <0 3312000 1352000>;
rgltr-load-current = <0 80000 105000>;
@@ -374,6 +404,7 @@
sensor-mode = <0>;
cci-master = <0>;
status = "ok";
+ use-shared-clk;
clocks = <&clock_mmss clk_mclk0_clk_src>,
<&clock_mmss clk_camss_mclk0_clk>;
clock-names = "cam_src_clk", "cam_clk";
diff --git a/Documentation/devicetree/bindings/media/video/msm-cam-icp.txt b/Documentation/devicetree/bindings/media/video/msm-cam-icp.txt
index 36dad1a..ffc0e96 100644
--- a/Documentation/devicetree/bindings/media/video/msm-cam-icp.txt
+++ b/Documentation/devicetree/bindings/media/video/msm-cam-icp.txt
@@ -131,6 +131,11 @@
Value type: <string>
Definition: Name of firmware image.
+- ubwc-cfg
+ Usage: required
+ Value type: <u32>
+ Definition: UBWC configuration.
+
Examples:
a5: qcom,a5@ac00000 {
cell-index = <0>;
@@ -169,6 +174,7 @@
clock-rates = <0 0 0 80000000 0 0 0 0 600000000 0 0>;
clock-cntl-level = "turbo";
fw_name = "CAMERA_ICP.elf";
+ ubwc-cfg = <0x7F 0x1FF>;
};
qcom,ipe0 {
diff --git a/Documentation/devicetree/bindings/media/video/msm-cam-lrme.txt b/Documentation/devicetree/bindings/media/video/msm-cam-lrme.txt
new file mode 100644
index 0000000..9a37922
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/video/msm-cam-lrme.txt
@@ -0,0 +1,149 @@
+* Qualcomm Technologies, Inc. MSM Camera LRME
+
+The MSM camera Low Resolution Motion Estimation device provides dependency
+definitions for enabling Camera LRME HW. MSM camera LRME is implemented in
+multiple device nodes. The root LRME device node has properties defined to
+hint the driver about the LRME HW nodes available during the probe sequence.
+Each node has multiple properties defined for interrupts, clocks and
+regulators.
+
+=======================
+Required Node Structure
+=======================
+LRME root interface node takes care of the handling LRME high level
+driver handling and controls underlying LRME hardware present.
+
+- compatible
+ Usage: required
+ Value type: <string>
+ Definition: Should be "qcom,cam-lrme"
+
+- compat-hw-name
+ Usage: required
+ Value type: <string>
+ Definition: Should be "qcom,lrme"
+
+- num-lrme
+ Usage: required
+ Value type: <u32>
+ Definition: Number of supported LRME HW blocks
+
+Example:
+ qcom,cam-lrme {
+ compatible = "qcom,cam-lrme";
+ compat-hw-name = "qcom,lrme";
+ num-lrme = <1>;
+ };
+
+=======================
+Required Node Structure
+=======================
+LRME Node provides interface for Low Resolution Motion Estimation hardware
+driver about the device register map, interrupt map, clocks, regulators.
+
+- cell-index
+ Usage: required
+ Value type: <u32>
+ Definition: Node instance number
+
+- compatible
+ Usage: required
+ Value type: <string>
+ Definition: Should be "qcom,lrme"
+
+- reg-names
+ Usage: optional
+ Value type: <string>
+ Definition: Name of the register resources
+
+- reg
+ Usage: optional
+ Value type: <u32>
+ Definition: Register values
+
+- reg-cam-base
+ Usage: optional
+ Value type: <u32>
+ Definition: Offset of the register space compared to
+ to Camera base register space
+
+- interrupt-names
+ Usage: optional
+ Value type: <string>
+ Definition: Name of the interrupt
+
+- interrupts
+ Usage: optional
+ Value type: <u32>
+ Definition: Interrupt line associated with LRME HW
+
+- regulator-names
+ Usage: required
+ Value type: <string>
+ Definition: Name of the regulator resources for LRME HW
+
+- camss-supply
+ Usage: required
+ Value type: <phandle>
+ Definition: Regulator reference corresponding to the names listed
+ in "regulator-names"
+
+- clock-names
+ Usage: required
+ Value type: <string>
+ Definition: List of clock names required for LRME HW
+
+- clocks
+ Usage: required
+ Value type: <phandle>
+ Definition: List of clocks required for LRME HW
+
+- clock-rates
+ Usage: required
+ Value type: <u32>
+ Definition: List of clocks rates
+
+- clock-cntl-level
+ Usage: required
+ Value type: <string>
+ Definition: List of strings corresponds clock-rates levels
+ Supported strings: minsvs, lowsvs, svs, svs_l1, nominal, turbo
+
+- src-clock-name
+ Usage: required
+ Value type: <string>
+ Definition: Source clock name
+
+Examples:
+ cam_lrme: qcom,lrme@ac6b000 {
+ cell-index = <0>;
+ compatible = "qcom,lrme";
+ reg-names = "lrme";
+ reg = <0xac6b000 0xa00>;
+ reg-cam-base = <0x6b000>;
+ interrupt-names = "lrme";
+ interrupts = <0 476 0>;
+ regulator-names = "camss";
+ camss-supply = <&titan_top_gdsc>;
+ clock-names = "camera_ahb",
+ "camera_axi",
+ "soc_ahb_clk",
+ "cpas_ahb_clk",
+ "camnoc_axi_clk",
+ "lrme_clk_src",
+ "lrme_clk";
+ clocks = <&clock_gcc GCC_CAMERA_AHB_CLK>,
+ <&clock_gcc GCC_CAMERA_AXI_CLK>,
+ <&clock_camcc CAM_CC_SOC_AHB_CLK>,
+ <&clock_camcc CAM_CC_CPAS_AHB_CLK>,
+ <&clock_camcc CAM_CC_CAMNOC_AXI_CLK>,
+ <&clock_camcc CAM_CC_LRME_CLK_SRC>,
+ <&clock_camcc CAM_CC_LRME_CLK>;
+ clock-rates = <0 0 0 0 0 0 0>,
+ <0 0 0 0 0 19200000 19200000>,
+ <0 0 0 0 0 19200000 19200000>,
+ <0 0 0 0 0 19200000 19200000>;
+ clock-cntl-level = "lowsvs", "svs", "svs_l1", "turbo";
+ src-clock-name = "lrme_core_clk_src";
+ };
+
diff --git a/Documentation/devicetree/bindings/media/video/msm-cam-vfe.txt b/Documentation/devicetree/bindings/media/video/msm-cam-vfe.txt
index 1c18228..99f2c7a 100644
--- a/Documentation/devicetree/bindings/media/video/msm-cam-vfe.txt
+++ b/Documentation/devicetree/bindings/media/video/msm-cam-vfe.txt
@@ -12,6 +12,7 @@
======================================
First Level Node - CAM VFE device
======================================
+Required properties:
- compatible
Usage: required
Value type: <string>
@@ -74,6 +75,22 @@
Value type: <string>
Definition: Source clock name.
+Optional properties:
+- clock-names-option
+ Usage: optional
+ Value type: <string>
+ Definition: Optional clock names.
+
+- clocks-option
+ Usage: required if clock-names-option defined
+ Value type: <phandle>
+ Definition: List of optinal clocks used for VFE HW.
+
+- clock-rates-option
+ Usage: required if clock-names-option defined
+ Value type: <u32>
+ Definition: List of clocks rates for optional clocks.
+
Example:
qcom,vfe0@acaf000 {
cell-index = <0>;
@@ -105,5 +122,8 @@
<&clock_camcc CAM_CC_IFE_0_AXI_CLK>,
clock-rates = <0 0 80000000 0 320000000 0 384000000 0 0 0>;
src-clock-name = "ife_clk_src";
+ clock-names-option = "ife_dsp_clk";
+ clocks-option = <&clock_camcc CAM_CC_IFE_0_DSP_CLK>;
+ clock-rates-option = <600000000>;
status = "ok";
};
diff --git a/Documentation/devicetree/bindings/mtd/msm_qpic_nand.txt b/Documentation/devicetree/bindings/mtd/msm_qpic_nand.txt
index 094dc25..a98e4ae 100644
--- a/Documentation/devicetree/bindings/mtd/msm_qpic_nand.txt
+++ b/Documentation/devicetree/bindings/mtd/msm_qpic_nand.txt
@@ -9,6 +9,8 @@
defined.
- qcom,reg-adjustment-offset : Specify the base adjustment offset value for the
version registers
+- qcom,qpic-clk-rpmh: Indicates whether QPIC clock is RPMH controlled clock or
+ not.
MTD flash partition layout for NAND devices -
@@ -53,6 +55,7 @@
qcom,msm-bus,num-cases = <1>;
qcom,msm-bus,num-paths = <1>;
qcom,msm-bus,vectors-KBps = <91 512 0 0>,
+ qcom,qpic-clk-rpmh;
};
qcom,mtd-partitions {
diff --git a/Documentation/devicetree/bindings/net/qcom,emac-dwc-eqos.txt b/Documentation/devicetree/bindings/net/qcom,emac-dwc-eqos.txt
new file mode 100644
index 0000000..8e56180
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/qcom,emac-dwc-eqos.txt
@@ -0,0 +1,43 @@
+Qualcomm Technologies Inc. EMAC Gigabit Ethernet controller
+
+This network controller consists of the MAC and
+RGMII IO Macro for interfacing with PHY.
+
+Required properties:
+
+emac_hw node:
+- compatible: Should be "qcom,emac-dwc-eqos"
+- reg: Offset and length of the register regions for the mac and io-macro
+- interrupts: Interrupt number used by this controller
+- io-macro-info: Internal io-macro-info
+
+Internal io-macro-info:
+- io-macro-bypass-mode: <0 or 1> internal or external delay configuration
+- io-interface: <rgmii/mii/rmii> PHY interface used
+
+Example:
+
+soc {
+ emac_hw: qcom,emac@00020000 {
+ compatible = "qcom,emac-dwc-eqos";
+ reg = <0x20000 0x10000>,
+ <0x36000 0x100>;
+ reg-names = "emac-base", "rgmii-base";
+ interrupts = <0 62 4>, <0 60 4>,
+ <0 49 4>, <0 50 4>,
+ <0 51 4>, <0 52 4>,
+ <0 53 4>, <0 54 4>,
+ <0 55 4>, <0 56 4>,
+ <0 57 4>;
+ interrupt-names = "sbd-intr", "lpi-intr",
+ "tx-ch0-intr", "tx-ch1-intr",
+ "tx-ch2-intr", "tx-ch3-intr",
+ "tx-ch4-intr", "rx-ch0-intr",
+ "rx-ch1-intr", "rx-ch2-intr",
+ "rx-ch3-intr";
+ io-macro-info {
+ io-macro-bypass-mode = <0>;
+ io-interface = "rgmii";
+ };
+ };
+}
diff --git a/Documentation/devicetree/bindings/platform/msm/ipa.txt b/Documentation/devicetree/bindings/platform/msm/ipa.txt
index aede546..6b40b30 100644
--- a/Documentation/devicetree/bindings/platform/msm/ipa.txt
+++ b/Documentation/devicetree/bindings/platform/msm/ipa.txt
@@ -79,6 +79,8 @@
- qcom,rx-polling-sleep-ms: Receive Polling Timeout in millisecond,
default is 1 millisecond.
- qcom,ipa-polling-iteration: IPA Polling Iteration Count,default is 40.
+- qcom,mhi-event-ring-id-limits: Two elements property. Start and End limits
+ for MHI event rings ids.
- qcom,ipa-tz-unlock-reg: Register start addresses and ranges which
need to be unlocked by TZ.
diff --git a/Documentation/devicetree/bindings/power/supply/qcom/qpnp-smb2.txt b/Documentation/devicetree/bindings/power/supply/qcom/qpnp-smb2.txt
index 05fa6e4..8795aff 100644
--- a/Documentation/devicetree/bindings/power/supply/qcom/qpnp-smb2.txt
+++ b/Documentation/devicetree/bindings/power/supply/qcom/qpnp-smb2.txt
@@ -137,12 +137,6 @@
be based off battery voltage. For both SOC and battery voltage,
charger receives the signal from FG to resume charging.
-- qcom,micro-usb
- Usage: optional
- Value type: <empty>
- Definition: Boolean flag which indicates that the platform only support
- micro usb port.
-
- qcom,suspend-input-on-debug-batt
Usage: optional
Value type: <empty>
@@ -183,6 +177,15 @@
Value type: bool
Definition: Boolean flag which when present enables sw compensation for jeita
+- qcom,battery-data
+ Usage: optional
+ Value type: <u32>
+ Definition: Specifies the phandle of the node which contains the battery
+ profiles supported on the device. This is only specified
+ when step charging and sw-jeita configurations are desired
+ to be get from these properties defined in battery profile:
+ qcom,step-chg-ranges, qcom,jeita-fcc-ranges, qcom,jeita-fv-ranges.
+
=============================================
Second Level Nodes - SMB2 Charger Peripherals
=============================================
diff --git a/Documentation/devicetree/bindings/power/supply/qcom/smb1355-charger.txt b/Documentation/devicetree/bindings/power/supply/qcom/smb1355-charger.txt
index ca584e5..abbb981 100644
--- a/Documentation/devicetree/bindings/power/supply/qcom/smb1355-charger.txt
+++ b/Documentation/devicetree/bindings/power/supply/qcom/smb1355-charger.txt
@@ -29,6 +29,13 @@
Definition: Should specify the phandle of SMB's revid module. This is used
to identify the SMB subtype.
+- qcom,disable-ctm
+ Usage: optional
+ Value type: <empty>
+ Definition: boolean flag. Usually a thermistor near usb/typeC connector is
+ connected to AUX. Set this flag to indicate the thermistor
+ doesn't exist.
+
================================================
Second Level Nodes - SMB1355 Charger Peripherals
================================================
diff --git a/Documentation/devicetree/bindings/regulator/cpr4-apss-regulator.txt b/Documentation/devicetree/bindings/regulator/cpr4-apss-regulator.txt
index 29bb2d3..05792b0 100644
--- a/Documentation/devicetree/bindings/regulator/cpr4-apss-regulator.txt
+++ b/Documentation/devicetree/bindings/regulator/cpr4-apss-regulator.txt
@@ -165,7 +165,7 @@
and Turbo.
- qcom,cpr-fuse-combos
- Usage: required
+ Usage: optional
Value type: <u32>
Definition: Specifies the number of fuse combinations being supported by
the device. This value is utilized by several other
@@ -178,6 +178,11 @@
The last 8 fuse combos correspond to speed bin fuse value 7
along with CPR revision fuse values 0 to 7.
+ This property must be specified unless qcom,cpr-fuse-combo-map
+ is present. In that case, qcom,cpr-fuse-combos is implicitly
+ assumed to have a value equal to the number of tuple lists (rows)
+ found in the qcom,cpr-fuse-combo-map property.
+
- qcom,cpr-speed-bins
Usage: optional
Value type: <u32>
@@ -368,6 +373,27 @@
speed bins 1-to-1 or exactly 1 list which is used regardless
of the speed bin found on a given chip.
+- qcom,cpr-fuse-combo-map
+ Usage: optional
+ Value type: <prop-encoded-array>
+ Definition: A grouping of integer tuple lists where each tuple list (row)
+ defines a mapping from combinations of fuse parameter ranges
+ to fuse combo ID (i.e., map row index). Each tuple defines the
+ beginning and ending fuse parameter value that matches. The
+ number of tuples in each row is equal to the number of selection
+ fuse parameters used in fuse combo logic. For MSM8953, the fuse
+ parameters are "speed-bin", "cpr fuse revision", and "foundry id".
+ The tuples in each row correspond to the fuses in order:
+ "speed-bin", "cpr fuse revision" and "foundry id". An example entry
+ for speed-bin '3', cpr fuse revisions >= '2', and foundry '2' is
+ as shown below:
+ <3 3>, <2 7>, <2 2>
+
+ The number of rows in the property is arbitrary but used to size
+ other properties. qcom,cpr-fuse-combos must be set to the number
+ of rows specified in this property. For msm8953, the maximum number
+ of rows for this property is 512 (8 * 8 * 8).
+
=======
Example
=======
@@ -414,6 +440,8 @@
"APCS_ALIAS0_APM_CTLER_STATUS",
"APCS0_CPR_CORE_ADJ_MODE_REG";
+ qcom,cpr-aging-ref-voltage = <990000>; /* Turbo corner */
+
thread@0 {
qcom,cpr-thread-id = <0>;
qcom,cpr-consecutive-up = <1>;
@@ -517,6 +545,14 @@
<(-20000) (-15000) (-10000) 0>;
qcom,allow-boost =
<1>;
+
+ qcom,cpr-aging-max-voltage-adjustment = <15000>;
+ qcom,cpr-aging-ref-corner = <6>; /* Turbo corner */
+ qcom,cpr-aging-ro-scaling-factor = <2800>;
+ qcom,cpr-aging-derate =
+ <1000 1000 1000 1000 1000
+ 1000 1000 1000>;
+ qcom,allow-aging-voltage-adjustment = <1>;
};
};
};
diff --git a/Documentation/devicetree/bindings/regulator/mem-acc-regulator.txt b/Documentation/devicetree/bindings/regulator/mem-acc-regulator.txt
index 5515457..f4f549a 100644
--- a/Documentation/devicetree/bindings/regulator/mem-acc-regulator.txt
+++ b/Documentation/devicetree/bindings/regulator/mem-acc-regulator.txt
@@ -159,6 +159,21 @@
not specified, then "qcom,override-cornerX-reg-config" must contain a single
register configuration sequence list which is then applied unconditionally.
This property can only be specified if qcom,cornerX-reg-config property is already defined.
+- qcom,override-acc-range-fuse-list: Array of tuples define the selection parameters used for selecting the override
+ mem-acc configuration. The fused values for these selection parameters are used by the
+ qcom,override-fuse-range-map to identify the correct set of override properties.
+ Each tuple contains 4 elements as defined below:
+ [0] => the fuse row number of the selector
+ [1] => LSB bit position of the bits
+ [2] => number of bits
+ [3] => fuse reading method, 0 for direct reading or 1 for SCM reading
+- qcom,override-fuse-range-map: Array of tuples where each tuple specifies the allowed range for all the selection parameters
+ defined in qcom,override-acc-range-fuse-list. The fused values of these selection parameters
+ are compared against their allowed range in each tuple starting from 0th tuple and use the
+ first matched tuple index to select the right tuples from the other override properties.
+ Either qcom,override-fuse-range-map or qcom,override-fuse-version-map is used to select
+ the override configuration. The qcom,override-fuse-range-map is used if both the
+ properties are specified.
mem_acc_vreg_corner: regulator@fd4aa044 {
compatible = "qcom,mem-acc-regulator";
@@ -184,6 +199,13 @@
qcom,override-fuse-version-map = <0>,
<2>,
<(-1)>;
+ qcom,override-acc-range-fuse-list =
+ <37 40 3 0>,
+ <36 30 8 0>;
+ qcom,override-fuse-range-map =
+ <0 0>, < 0 0>, <49 63>,
+ <1 1>, < 0 0>, <50 63>,
+ <0 1>, < 95 255>, < 0 63>;
qcom,override-corner-acc-map = <0 0 1>,
<0 1 2>,
<0 1 1>;
diff --git a/Documentation/devicetree/bindings/regulator/rpmh-regulator.txt b/Documentation/devicetree/bindings/regulator/rpmh-regulator.txt
index 9deb7d4..7de891e 100644
--- a/Documentation/devicetree/bindings/regulator/rpmh-regulator.txt
+++ b/Documentation/devicetree/bindings/regulator/rpmh-regulator.txt
@@ -1,12 +1,13 @@
Qualcomm Technologies, Inc. RPMh Regulators
-rpmh-regulator devices support PMIC regulator management via the VRM and ARC
-RPMh accelerators. The APPS processor communicates with these hardware blocks
-via an RSC using command packets. The VRM allows changing four parameters for
-a given regulator: enable state, output voltage, operating mode, and minimum
-headroom voltage. The ARC allows changing only a single parameter for a given
-regulator: its operating level. This operating level is fed into CPR which then
-decides upon a final explicit voltage for the regulator.
+rpmh-regulator devices support PMIC regulator management via the VRM, ARC and
+XOB RPMh accelerators. The APPS processor communicates with these hardware
+blocks via an RSC using command packets. The VRM allows changing four
+parameters for a given regulator: enable state, output voltage, operating mode,
+and minimum headroom voltage. The ARC allows changing only a single parameter
+for a given regulator: its operating level. This operating level is fed into
+CPR which then decides upon a final explicit voltage for the regulator. The XOB
+allows changing only a single parameter for a given regulator: its enable state.
=======================
Required Node Structure
@@ -24,9 +25,10 @@
- compatible
Usage: required
Value type: <string>
- Definition: Must be "qcom,rpmh-vrm-regulator" or
- "qcom,rpmh-arc-regulator" depending upon the hardware type,
- VRM or ARC, of the RPMh managed regulator resource.
+ Definition: Must be "qcom,rpmh-vrm-regulator", "qcom,rpmh-arc-regulator"
+ or "qcom,rpmh-xob-regulator" depending upon the hardware
+ type, VRM, ARC or XOB, of the RPMh managed regulator
+ resource.
- mboxes
Usage: required
@@ -116,8 +118,8 @@
- regulator-enable-ramp-delay
Usage: optional
Value type: <u32>
- Definition: For VRM resources, the time in microseconds to delay after
- enabling a regulator.
+ Definition: For VRM and XOB resources, the time in microseconds to delay
+ after enabling a regulator.
- qcom,set
Usage: required
@@ -130,7 +132,7 @@
one of RPMH_REGULATOR_SET_* (i.e. 1, 2, or 3).
- qcom,init-enable
- Usage: optional; VRM regulators only
+ Usage: optional; VRM and XOB regulators only
Value type: <u32>
Definition: Specifies the initial enable state to request for a VRM
regulator. Supported values are 0 (regulator disabled) and
@@ -267,3 +269,15 @@
qcom,init-voltage = <1000000>;
};
};
+
+rpmh-regulator-ldoc1 {
+ compatible = "qcom,rpmh-xob-regulator";
+ mboxes = <&apps_rsc 0>;
+ qcom,resource-name = "ldoc1";
+ pm855l_l1: regulator-pm855l-l1 {
+ regulator-name = "pm855l_l1";
+ qcom,set = <RPMH_REGULATOR_SET_ALL>;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+};
diff --git a/Documentation/devicetree/bindings/soc/qcom/dcc.txt b/Documentation/devicetree/bindings/soc/qcom/dcc.txt
index 8a9761c..5150459 100644
--- a/Documentation/devicetree/bindings/soc/qcom/dcc.txt
+++ b/Documentation/devicetree/bindings/soc/qcom/dcc.txt
@@ -35,6 +35,14 @@
"atb" : To send captured data over ATB to a trace sink
"sram" : To save captured data in dcc internal SRAM.
+- qcom,curr-link-list: int, To specify the link list to use for the default list.
+
+- qcom,link-list: The values to be programmed into the default link list.
+ The enum values for DCC operations is defined in dt-bindings/soc/qcom,dcc_v2.h
+ The following gives basic structure to be used for each operation:
+ <DCC_operation addr val apb_bus>
+ val is to be interpreted based on what operation is to be performed.
+
Example:
dcc: dcc@4b3000 {
@@ -47,6 +55,13 @@
clocks = <&clock_gcc clk_gcc_dcc_ahb_clk>;
clock-names = "dcc_clk";
+ qcom,curr-link-list = <2>;
+ qcom,link-list = <DCC_READ 0x1740300 6 0>,
+ <DCC_READ 0x1620500 4 0>,
+ <DCC_READ 0x7840000 1 0>,
+ <DCC_READ 0x7841010 12 0>,
+ <DCC_READ 0x7842000 16 0>,
+ <DCC_READ 0x7842500 2 0>;
qcom,save-reg;
};
diff --git a/Documentation/devicetree/bindings/soc/qcom/qcom,msm-eud.txt b/Documentation/devicetree/bindings/soc/qcom/qcom,msm-eud.txt
index 45e309c..bf2a91a 100644
--- a/Documentation/devicetree/bindings/soc/qcom/qcom,msm-eud.txt
+++ b/Documentation/devicetree/bindings/soc/qcom/qcom,msm-eud.txt
@@ -14,6 +14,9 @@
Documentation/devicetree/bindings/clock/clock-bindings.txt
- clock-names: Names of the clocks in 1-1 correspondence with
the "clocks" property.
+ - <supply-name>-supply: phandle to the regulator device tree node
+ Required "supply-name" examples are:
+ "vdda33" : 3.3v supply to eud.
Driver notifies clients via extcon for VBUS spoof attach/detach
and charger enable/disable events. Clients registered for these
@@ -29,6 +32,7 @@
reg-names = "eud_base";
clocks = <&clock_gcc GCC_USB_PHY_CFG_AHB2PHY_CLK>;
clock-names = "cfg_ahb_clk";
+ vdda33-supply = <&pm8998_l24>;
};
An example for EUD extcon client:
diff --git a/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt b/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt
index d4db970..34c2963 100644
--- a/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt
+++ b/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt
@@ -2016,6 +2016,66 @@
qcom,aux-codec = <&stub_codec>;
};
+* SDX ASoC Machine driver
+
+Required properties:
+- compatible : "qcom,sdx-asoc-snd-tavil"
+- qcom,model : The user-visible name of this sound card.
+- qcom,prim_mi2s_aux_master : Handle to prim_master pinctrl configurations
+- qcom,prim_mi2s_aux_slave : Handle to prim_slave pinctrl configurations
+- qcom,sec_mi2s_aux_master : Handle to sec_master pinctrl configurations
+- qcom,sec_mi2s_aux_slave : Handle to sec_slave pinctrl configurations
+- asoc-platform: This is phandle list containing the references to platform device
+ nodes that are used as part of the sound card dai-links.
+- asoc-platform-names: This property contains list of platform names. The order of
+ the platform names should match to that of the phandle order
+ given in "asoc-platform".
+- asoc-cpu: This is phandle list containing the references to cpu dai device nodes
+ that are used as part of the sound card dai-links.
+- asoc-cpu-names: This property contains list of cpu dai names. The order of the
+ cpu dai names should match to that of the phandle order give
+ in "asoc-cpu". The cpu names are in the form of "%s.%d" form,
+ where the id (%d) field represents the back-end AFE port id that
+ this CPU dai is associated with.
+
+Example:
+
+ sound-tavil {
+ compatible = "qcom,sdx-asoc-snd-tavil";
+ qcom,model = "sdx-tavil-i2s-snd-card";
+ qcom,prim_mi2s_aux_master = <&prim_master>;
+ qcom,prim_mi2s_aux_slave = <&prim_slave>;
+ qcom,sec_mi2s_aux_master = <&sec_master>;
+ qcom,sec_mi2s_aux_slave = <&sec_slave>;
+
+ asoc-platform = <&pcm0>, <&pcm1>, <&voip>, <&voice>,
+ <&loopback>, <&hostless>, <&afe>, <&routing>,
+ <&pcm_dtmf>, <&host_pcm>, <&compress>;
+ asoc-platform-names = "msm-pcm-dsp.0", "msm-pcm-dsp.1",
+ "msm-voip-dsp", "msm-pcm-voice",
+ "msm-pcm-loopback", "msm-pcm-hostless",
+ "msm-pcm-afe", "msm-pcm-routing",
+ "msm-pcm-dtmf", "msm-voice-host-pcm",
+ "msm-compress-dsp";
+ asoc-cpu = <&dai_pri_auxpcm>, <&mi2s_prim>, <&mi2s_sec>,
+ <&dtmf_tx>,
+ <&rx_capture_tx>, <&rx_playback_rx>,
+ <&tx_capture_tx>, <&tx_playback_rx>,
+ <&afe_pcm_rx>, <&afe_pcm_tx>, <&afe_proxy_rx>,
+ <&afe_proxy_tx>, <&incall_record_rx>,
+ <&incall_record_tx>, <&incall_music_rx>,
+ <&dai_sec_auxpcm>;
+ asoc-cpu-names = "msm-dai-q6-auxpcm.1",
+ "msm-dai-q6-mi2s.0", "msm-dai-q6-mi2s.1",
+ "msm-dai-stub-dev.4", "msm-dai-stub-dev.5",
+ "msm-dai-stub-dev.6", "msm-dai-stub-dev.7",
+ "msm-dai-stub-dev.8", "msm-dai-q6-dev.224",
+ "msm-dai-q6-dev.225", "msm-dai-q6-dev.241",
+ "msm-dai-q6-dev.240", "msm-dai-q6-dev.32771",
+ "msm-dai-q6-dev.32772", "msm-dai-q6-dev.32773",
+ "msm-dai-q6-auxpcm.2";
+ };
+
* APQ8096 Automotive ASoC Machine driver
Required properties:
diff --git a/Documentation/devicetree/bindings/sound/wcd_codec.txt b/Documentation/devicetree/bindings/sound/wcd_codec.txt
index c848ab5..6d2ae5e 100644
--- a/Documentation/devicetree/bindings/sound/wcd_codec.txt
+++ b/Documentation/devicetree/bindings/sound/wcd_codec.txt
@@ -3,7 +3,7 @@
Required properties:
- compatible : "qcom,tasha-slim-pgd" or "qcom,tasha-i2c-pgd" for Tasha Codec
- or "qcom,tavil-slim-pgd" for Tavil Codec
+ "qcom,tavil-slim-pgd" or "qcom,tavil-i2c-pgd" for Tavil Codec
- elemental-addr: codec slimbus slave PGD enumeration address.(48 bits)
- qcom,cdc-reset-gpio: gpio used for codec SOC reset.
diff --git a/Documentation/devicetree/bindings/usb/dwc3.txt b/Documentation/devicetree/bindings/usb/dwc3.txt
index 6838afd..5d3b232 100644
--- a/Documentation/devicetree/bindings/usb/dwc3.txt
+++ b/Documentation/devicetree/bindings/usb/dwc3.txt
@@ -59,6 +59,8 @@
- snps,xhci-imod-value: Interrupt moderation interval for host mode
(in increments of 250nsec).
- usb-core-id: Differentiates between different controllers present on a device.
+ - snps,bus-suspend-enable: If present then controller supports low power mode
+ during bus suspend.
This is usually a subnode to DWC3 glue to which it is connected.
diff --git a/Documentation/devicetree/bindings/usb/msm-phy.txt b/Documentation/devicetree/bindings/usb/msm-phy.txt
index d23cb46..9ee2cc6 100644
--- a/Documentation/devicetree/bindings/usb/msm-phy.txt
+++ b/Documentation/devicetree/bindings/usb/msm-phy.txt
@@ -159,7 +159,7 @@
"efuse_addr": EFUSE address to read and update analog tune parameter.
"emu_phy_base" : phy base address used for programming emulation target phy.
"ref_clk_addr" : ref_clk bcr address used for on/off ref_clk before reset.
- "eud_base" : EUD device register address space to use EUD pet functionality.
+ "refgen_north_bg_reg" : address used to read REFGEN status for overriding QUSB PHY register.
- clocks: a list of phandles to the PHY clocks. Use as per
Documentation/devicetree/bindings/clock/clock-bindings.txt
- clock-names: Names of the clocks in 1-1 correspondence with the "clocks"
@@ -192,7 +192,8 @@
0x210 /* QUSB2PHY_PWR_CTRL1 */
0x230 /* QUSB2PHY_INTR_CTRL */
0x0a8 /* QUSB2PHY_PLL_CORE_INPUT_OVERRIDE */
- 0x254>; /* QUSB2PHY_TEST1 */
+ 0x254 /* QUSB2PHY_TEST1 */
+ 0x198>; /* QUSB2PHY_PLL_BIAS_CONTROL_2 */
qcom,efuse-bit-pos = <21>;
qcom,efuse-num-bits = <3>;
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt
index 3bdc896..86c9259 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.txt
+++ b/Documentation/devicetree/bindings/vendor-prefixes.txt
@@ -38,6 +38,7 @@
auo AU Optronics Corporation
auvidea Auvidea GmbH
avago Avago Technologies
+avia avia semiconductor
avic Shanghai AVIC Optoelectronics Co., Ltd.
axis Axis Communications AB
boe BOE Technology Group Co., Ltd.
@@ -154,6 +155,7 @@
kyo Kyocera Corporation
lacie LaCie
lantiq Lantiq Semiconductor
+lego LEGO Systems A/S
lenovo Lenovo Group Ltd.
lg LG Corporation
linux Linux-specific binding
diff --git a/Makefile b/Makefile
index 665104d..6f6262b 100644
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
VERSION = 4
PATCHLEVEL = 9
-SUBLEVEL = 51
+SUBLEVEL = 62
EXTRAVERSION =
NAME = Roaring Lionus
diff --git a/arch/arc/kernel/entry.S b/arch/arc/kernel/entry.S
index 1eea99b..85d9ea4 100644
--- a/arch/arc/kernel/entry.S
+++ b/arch/arc/kernel/entry.S
@@ -92,6 +92,12 @@
lr r0, [efa]
mov r1, sp
+ ; hardware auto-disables MMU, re-enable it to allow kernel vaddr
+ ; access for say stack unwinding of modules for crash dumps
+ lr r3, [ARC_REG_PID]
+ or r3, r3, MMU_ENABLE
+ sr r3, [ARC_REG_PID]
+
lsr r3, r2, 8
bmsk r3, r3, 7
brne r3, ECR_C_MCHK_DUP_TLB, 1f
diff --git a/arch/arc/mm/tlb.c b/arch/arc/mm/tlb.c
index bdb295e..a4dc881 100644
--- a/arch/arc/mm/tlb.c
+++ b/arch/arc/mm/tlb.c
@@ -896,9 +896,6 @@ void do_tlb_overlap_fault(unsigned long cause, unsigned long address,
local_irq_save(flags);
- /* re-enable the MMU */
- write_aux_reg(ARC_REG_PID, MMU_ENABLE | read_aux_reg(ARC_REG_PID));
-
/* loop thru all sets of TLB */
for (set = 0; set < mmu->sets; set++) {
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 63da745..d8d8b82 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -572,6 +572,7 @@
select USE_OF
select PINCTRL
select ARCH_WANT_KMAP_ATOMIC_FLUSH
+ select SND_SOC_COMPRESS
help
Support for Qualcomm MSM/QSD based systems. This runs on the
apps processor of the MSM/QSD and depends on a shared memory
@@ -1487,6 +1488,7 @@
config HOTPLUG_CPU
bool "Support for hot-pluggable CPUs"
+ select GENERIC_IRQ_MIGRATION
depends on SMP
help
Say Y here to experiment with turning CPUs off and on. CPUs
diff --git a/arch/arm/Kconfig-nommu b/arch/arm/Kconfig-nommu
index aed66d5..b757634 100644
--- a/arch/arm/Kconfig-nommu
+++ b/arch/arm/Kconfig-nommu
@@ -34,8 +34,7 @@
used instead of the auto-probing which utilizes the register.
config REMAP_VECTORS_TO_RAM
- bool 'Install vectors to the beginning of RAM' if DRAM_BASE
- depends on DRAM_BASE
+ bool 'Install vectors to the beginning of RAM'
help
The kernel needs to change the hardware exception vectors.
In nommu mode, the hardware exception vectors are normally
diff --git a/arch/arm/boot/dts/am335x-chilisom.dtsi b/arch/arm/boot/dts/am335x-chilisom.dtsi
index f9ee585..1b43ebd 100644
--- a/arch/arm/boot/dts/am335x-chilisom.dtsi
+++ b/arch/arm/boot/dts/am335x-chilisom.dtsi
@@ -124,6 +124,14 @@
&rtc {
system-power-controller;
+
+ pinctrl-0 = <&ext_wakeup>;
+ pinctrl-names = "default";
+
+ ext_wakeup: ext-wakeup {
+ pins = "ext_wakeup0";
+ input-enable;
+ };
};
/* NAND Flash */
diff --git a/arch/arm/boot/dts/armada-375.dtsi b/arch/arm/boot/dts/armada-375.dtsi
index cc952cf..024f1b7 100644
--- a/arch/arm/boot/dts/armada-375.dtsi
+++ b/arch/arm/boot/dts/armada-375.dtsi
@@ -176,9 +176,9 @@
reg = <0x8000 0x1000>;
cache-unified;
cache-level = <2>;
- arm,double-linefill-incr = <1>;
+ arm,double-linefill-incr = <0>;
arm,double-linefill-wrap = <0>;
- arm,double-linefill = <1>;
+ arm,double-linefill = <0>;
prefetch-data = <1>;
};
diff --git a/arch/arm/boot/dts/armada-38x.dtsi b/arch/arm/boot/dts/armada-38x.dtsi
index 2d76688..c60cfe9 100644
--- a/arch/arm/boot/dts/armada-38x.dtsi
+++ b/arch/arm/boot/dts/armada-38x.dtsi
@@ -143,9 +143,9 @@
reg = <0x8000 0x1000>;
cache-unified;
cache-level = <2>;
- arm,double-linefill-incr = <1>;
+ arm,double-linefill-incr = <0>;
arm,double-linefill-wrap = <0>;
- arm,double-linefill = <1>;
+ arm,double-linefill = <0>;
prefetch-data = <1>;
};
diff --git a/arch/arm/boot/dts/armada-39x.dtsi b/arch/arm/boot/dts/armada-39x.dtsi
index 34cba87..aeecfa7 100644
--- a/arch/arm/boot/dts/armada-39x.dtsi
+++ b/arch/arm/boot/dts/armada-39x.dtsi
@@ -111,9 +111,9 @@
reg = <0x8000 0x1000>;
cache-unified;
cache-level = <2>;
- arm,double-linefill-incr = <1>;
+ arm,double-linefill-incr = <0>;
arm,double-linefill-wrap = <0>;
- arm,double-linefill = <1>;
+ arm,double-linefill = <0>;
prefetch-data = <1>;
};
diff --git a/arch/arm/boot/dts/bcm953012k.dts b/arch/arm/boot/dts/bcm953012k.dts
index 05a985a..6208e85 100644
--- a/arch/arm/boot/dts/bcm953012k.dts
+++ b/arch/arm/boot/dts/bcm953012k.dts
@@ -48,7 +48,7 @@
};
memory {
- reg = <0x00000000 0x10000000>;
+ reg = <0x80000000 0x10000000>;
};
};
diff --git a/arch/arm/boot/dts/exynos4412-odroid-common.dtsi b/arch/arm/boot/dts/exynos4412-odroid-common.dtsi
index 8aa19ba..5282d69 100644
--- a/arch/arm/boot/dts/exynos4412-odroid-common.dtsi
+++ b/arch/arm/boot/dts/exynos4412-odroid-common.dtsi
@@ -97,11 +97,11 @@
thermal-zones {
cpu_thermal: cpu-thermal {
cooling-maps {
- map0 {
+ cooling_map0: map0 {
/* Corresponds to 800MHz at freq_table */
cooling-device = <&cpu0 7 7>;
};
- map1 {
+ cooling_map1: map1 {
/* Corresponds to 200MHz at freq_table */
cooling-device = <&cpu0 13 13>;
};
diff --git a/arch/arm/boot/dts/exynos4412-odroidu3.dts b/arch/arm/boot/dts/exynos4412-odroidu3.dts
index 99634c5..7504a5a 100644
--- a/arch/arm/boot/dts/exynos4412-odroidu3.dts
+++ b/arch/arm/boot/dts/exynos4412-odroidu3.dts
@@ -13,6 +13,7 @@
/dts-v1/;
#include "exynos4412-odroid-common.dtsi"
+#include "exynos4412-prime.dtsi"
/ {
model = "Hardkernel ODROID-U3 board based on Exynos4412";
@@ -47,11 +48,11 @@
cooling-maps {
map0 {
trip = <&cpu_alert1>;
- cooling-device = <&cpu0 7 7>;
+ cooling-device = <&cpu0 9 9>;
};
map1 {
trip = <&cpu_alert2>;
- cooling-device = <&cpu0 13 13>;
+ cooling-device = <&cpu0 15 15>;
};
map2 {
trip = <&cpu_alert0>;
diff --git a/arch/arm/boot/dts/exynos4412-odroidx2.dts b/arch/arm/boot/dts/exynos4412-odroidx2.dts
index 4d22885..d6e92eb 100644
--- a/arch/arm/boot/dts/exynos4412-odroidx2.dts
+++ b/arch/arm/boot/dts/exynos4412-odroidx2.dts
@@ -12,6 +12,7 @@
*/
#include "exynos4412-odroidx.dts"
+#include "exynos4412-prime.dtsi"
/ {
model = "Hardkernel ODROID-X2 board based on Exynos4412";
diff --git a/arch/arm/boot/dts/exynos4412-prime.dtsi b/arch/arm/boot/dts/exynos4412-prime.dtsi
new file mode 100644
index 0000000..e75bc17
--- /dev/null
+++ b/arch/arm/boot/dts/exynos4412-prime.dtsi
@@ -0,0 +1,41 @@
+/*
+ * Samsung's Exynos4412 Prime SoC device tree source
+ *
+ * Copyright (c) 2016 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*
+ * Exynos4412 Prime SoC revision supports higher CPU frequencies than
+ * non-Prime version. Therefore we need to update OPPs table and
+ * thermal maps accordingly.
+ */
+
+&cpu0_opp_1500 {
+ /delete-property/turbo-mode;
+};
+
+&cpu0_opp_table {
+ opp@1600000000 {
+ opp-hz = /bits/ 64 <1600000000>;
+ opp-microvolt = <1350000>;
+ clock-latency-ns = <200000>;
+ };
+ opp@1704000000 {
+ opp-hz = /bits/ 64 <1704000000>;
+ opp-microvolt = <1350000>;
+ clock-latency-ns = <200000>;
+ };
+};
+
+&cooling_map0 {
+ cooling-device = <&cpu0 9 9>;
+};
+
+&cooling_map1 {
+ cooling-device = <&cpu0 15 15>;
+};
diff --git a/arch/arm/boot/dts/exynos4412.dtsi b/arch/arm/boot/dts/exynos4412.dtsi
index 40beede..3ebdf01 100644
--- a/arch/arm/boot/dts/exynos4412.dtsi
+++ b/arch/arm/boot/dts/exynos4412.dtsi
@@ -130,7 +130,7 @@
opp-microvolt = <1287500>;
clock-latency-ns = <200000>;
};
- opp@1500000000 {
+ cpu0_opp_1500: opp@1500000000 {
opp-hz = /bits/ 64 <1500000000>;
opp-microvolt = <1350000>;
clock-latency-ns = <200000>;
diff --git a/arch/arm/boot/dts/imx53-qsb-common.dtsi b/arch/arm/boot/dts/imx53-qsb-common.dtsi
index c05e7cf..40b3e31 100644
--- a/arch/arm/boot/dts/imx53-qsb-common.dtsi
+++ b/arch/arm/boot/dts/imx53-qsb-common.dtsi
@@ -215,16 +215,16 @@
pinctrl_fec: fecgrp {
fsl,pins = <
- MX53_PAD_FEC_MDC__FEC_MDC 0x80000000
- MX53_PAD_FEC_MDIO__FEC_MDIO 0x80000000
- MX53_PAD_FEC_REF_CLK__FEC_TX_CLK 0x80000000
- MX53_PAD_FEC_RX_ER__FEC_RX_ER 0x80000000
- MX53_PAD_FEC_CRS_DV__FEC_RX_DV 0x80000000
- MX53_PAD_FEC_RXD1__FEC_RDATA_1 0x80000000
- MX53_PAD_FEC_RXD0__FEC_RDATA_0 0x80000000
- MX53_PAD_FEC_TX_EN__FEC_TX_EN 0x80000000
- MX53_PAD_FEC_TXD1__FEC_TDATA_1 0x80000000
- MX53_PAD_FEC_TXD0__FEC_TDATA_0 0x80000000
+ MX53_PAD_FEC_MDC__FEC_MDC 0x4
+ MX53_PAD_FEC_MDIO__FEC_MDIO 0x1fc
+ MX53_PAD_FEC_REF_CLK__FEC_TX_CLK 0x180
+ MX53_PAD_FEC_RX_ER__FEC_RX_ER 0x180
+ MX53_PAD_FEC_CRS_DV__FEC_RX_DV 0x180
+ MX53_PAD_FEC_RXD1__FEC_RDATA_1 0x180
+ MX53_PAD_FEC_RXD0__FEC_RDATA_0 0x180
+ MX53_PAD_FEC_TX_EN__FEC_TX_EN 0x4
+ MX53_PAD_FEC_TXD1__FEC_TDATA_1 0x4
+ MX53_PAD_FEC_TXD0__FEC_TDATA_0 0x4
>;
};
diff --git a/arch/arm/boot/dts/mt2701.dtsi b/arch/arm/boot/dts/mt2701.dtsi
index 18596a2..77c6b93 100644
--- a/arch/arm/boot/dts/mt2701.dtsi
+++ b/arch/arm/boot/dts/mt2701.dtsi
@@ -174,4 +174,40 @@
clocks = <&uart_clk>;
status = "disabled";
};
+
+ mmsys: syscon@14000000 {
+ compatible = "mediatek,mt2701-mmsys", "syscon";
+ reg = <0 0x14000000 0 0x1000>;
+ #clock-cells = <1>;
+ };
+
+ imgsys: syscon@15000000 {
+ compatible = "mediatek,mt2701-imgsys", "syscon";
+ reg = <0 0x15000000 0 0x1000>;
+ #clock-cells = <1>;
+ };
+
+ vdecsys: syscon@16000000 {
+ compatible = "mediatek,mt2701-vdecsys", "syscon";
+ reg = <0 0x16000000 0 0x1000>;
+ #clock-cells = <1>;
+ };
+
+ hifsys: syscon@1a000000 {
+ compatible = "mediatek,mt2701-hifsys", "syscon";
+ reg = <0 0x1a000000 0 0x1000>;
+ #clock-cells = <1>;
+ };
+
+ ethsys: syscon@1b000000 {
+ compatible = "mediatek,mt2701-ethsys", "syscon";
+ reg = <0 0x1b000000 0 0x1000>;
+ #clock-cells = <1>;
+ };
+
+ bdpsys: syscon@1c000000 {
+ compatible = "mediatek,mt2701-bdpsys", "syscon";
+ reg = <0 0x1c000000 0 0x1000>;
+ #clock-cells = <1>;
+ };
};
diff --git a/arch/arm/boot/dts/qcom/Makefile b/arch/arm/boot/dts/qcom/Makefile
index 3826bad..c51581d 100644
--- a/arch/arm/boot/dts/qcom/Makefile
+++ b/arch/arm/boot/dts/qcom/Makefile
@@ -3,17 +3,15 @@
sdxpoorwills-cdp.dtb \
sdxpoorwills-mtp.dtb
-
-ifeq ($(CONFIG_ARM64),y)
-always := $(dtb-y)
-subdir-y := $(dts-dirs)
-else
targets += dtbs
targets += $(addprefix ../, $(dtb-y))
$(obj)/../%.dtb: $(src)/%.dts FORCE
$(call if_changed_dep,dtc)
+include $(srctree)/arch/arm64/boot/dts/qcom/Makefile
+$(obj)/../%.dtb: $(src)/../../../../arm64/boot/dts/qcom/%.dts FORCE
+ $(call if_changed_dep,dtc)
+
dtbs: $(addprefix $(obj)/../,$(dtb-y))
-endif
clean-files := *.dtb
diff --git a/arch/arm/boot/dts/qcom/msm-smb138x.dtsi b/arch/arm/boot/dts/qcom/msm-smb138x.dtsi
new file mode 100644
index 0000000..fa21dd7
--- /dev/null
+++ b/arch/arm/boot/dts/qcom/msm-smb138x.dtsi
@@ -0,0 +1,137 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <dt-bindings/interrupt-controller/irq.h>
+
+&i2c_7 {
+ status = "okay";
+ smb138x: qcom,smb138x@8 {
+ compatible = "qcom,i2c-pmic";
+ reg = <0x8>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interrupt-parent = <&spmi_bus>;
+ interrupts = <0x0 0xd1 0x0 IRQ_TYPE_LEVEL_LOW>;
+ interrupt_names = "smb138x";
+ interrupt-controller;
+ #interrupt-cells = <3>;
+ qcom,periph-map = <0x10 0x11 0x12 0x13 0x14 0x16 0x36>;
+
+ smb138x_revid: qcom,revid@100 {
+ compatible = "qcom,qpnp-revid";
+ reg = <0x100 0x100>;
+ };
+
+ smb138x_tadc: qcom,tadc@3600 {
+ compatible = "qcom,tadc";
+ reg = <0x3600 0x100>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ #io-channel-cells = <1>;
+ interrupt-parent = <&smb138x>;
+ interrupts = <0x36 0x0 IRQ_TYPE_EDGE_BOTH>;
+ interrupt-names = "eoc";
+
+ batt_temp@0 {
+ reg = <0>;
+ qcom,rbias = <68100>;
+ qcom,rtherm-at-25degc = <68000>;
+ qcom,beta-coefficient = <3450>;
+ };
+
+ skin_temp@1 {
+ reg = <1>;
+ qcom,rbias = <33000>;
+ qcom,rtherm-at-25degc = <68000>;
+ qcom,beta-coefficient = <3450>;
+ };
+
+ die_temp@2 {
+ reg = <2>;
+ qcom,scale = <(-1306)>;
+ qcom,offset = <397904>;
+ };
+
+ batt_i@3 {
+ reg = <3>;
+ qcom,channel = <3>;
+ qcom,scale = <(-20000000)>;
+ };
+
+ batt_v@4 {
+ reg = <4>;
+ qcom,scale = <5000000>;
+ };
+
+ input_i@5 {
+ reg = <5>;
+ qcom,scale = <14285714>;
+ };
+
+ input_v@6 {
+ reg = <6>;
+ qcom,scale = <25000000>;
+ };
+
+ otg_i@7 {
+ reg = <7>;
+ qcom,scale = <5714286>;
+ };
+ };
+
+ smb1381_charger: qcom,smb1381-charger@1000 {
+ compatible = "qcom,smb138x-parallel-slave";
+ qcom,pmic-revid = <&smb138x_revid>;
+ reg = <0x1000 0x700>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ interrupt-parent = <&smb138x>;
+ io-channels =
+ <&smb138x_tadc 1>,
+ <&smb138x_tadc 2>,
+ <&smb138x_tadc 3>,
+ <&smb138x_tadc 14>,
+ <&smb138x_tadc 15>,
+ <&smb138x_tadc 16>,
+ <&smb138x_tadc 17>;
+ io-channel-names =
+ "connector_temp",
+ "charger_temp",
+ "batt_i",
+ "connector_temp_thr1",
+ "connector_temp_thr2",
+ "connector_temp_thr3",
+ "charger_temp_max";
+
+ qcom,chgr@1000 {
+ reg = <0x1000 0x100>;
+ interrupts = <0x10 0x1 IRQ_TYPE_EDGE_RISING>;
+ interrupt-names = "chg-state-change";
+ };
+
+ qcom,chgr-misc@1600 {
+ reg = <0x1600 0x100>;
+ interrupts = <0x16 0x1 IRQ_TYPE_EDGE_RISING>,
+ <0x16 0x6 IRQ_TYPE_EDGE_RISING>;
+ interrupt-names = "wdog-bark",
+ "temperature-change";
+ };
+ };
+ };
+};
+
+&smb1381_charger {
+ smb138x_vbus: qcom,smb138x-vbus {
+ status = "disabled";
+ regulator-name = "smb138x-vbus";
+ };
+};
diff --git a/arch/arm/boot/dts/qcom/pmxpoorwills.dtsi b/arch/arm/boot/dts/qcom/pmxpoorwills.dtsi
index e3f154b..2106759 100644
--- a/arch/arm/boot/dts/qcom/pmxpoorwills.dtsi
+++ b/arch/arm/boot/dts/qcom/pmxpoorwills.dtsi
@@ -85,6 +85,55 @@
interrupts = <0x0 0x61 0x1 IRQ_TYPE_NONE>;
};
};
+
+ pmxpoorwills_vadc: vadc@3100 {
+ compatible = "qcom,qpnp-vadc-hc";
+ reg = <0x3100 0x100>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interrupts = <0x0 0x31 0x0 IRQ_TYPE_EDGE_RISING>;
+ interrupt-names = "eoc-int-en-set";
+ qcom,adc-full-scale-code = <0x70e4>;
+ qcom,adc-vdd-reference = <1875>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&ambient_therm_default>;
+
+ chan@6 {
+ label = "die_temp";
+ reg = <6>;
+ qcom,decimation = <2>;
+ qcom,pre-div-channel-scaling = <0>;
+ qcom,calibration-type = "absolute";
+ qcom,scale-function = <3>;
+ qcom,hw-settle-time = <0>;
+ qcom,fast-avg-setup = <0>;
+ qcom,cal-val = <0>;
+ };
+
+ chan@0 {
+ label = "ref_gnd";
+ reg = <0>;
+ qcom,decimation = <2>;
+ qcom,pre-div-channel-scaling = <0>;
+ qcom,calibration-type = "absolute";
+ qcom,scale-function = <0>;
+ qcom,hw-settle-time = <0>;
+ qcom,fast-avg-setup = <0>;
+ qcom,cal-val = <0>;
+ };
+
+ chan@1 {
+ label = "ref_1250v";
+ reg = <1>;
+ qcom,decimation = <2>;
+ qcom,pre-div-channel-scaling = <0>;
+ qcom,calibration-type = "absolute";
+ qcom,scale-function = <0>;
+ qcom,hw-settle-time = <0>;
+ qcom,fast-avg-setup = <0>;
+ qcom,cal-val = <0>;
+ };
+ };
};
qcom,pmxpoorwills@1 {
diff --git a/arch/arm/boot/dts/qcom/sdx-audio-lpass.dtsi b/arch/arm/boot/dts/qcom/sdx-audio-lpass.dtsi
new file mode 100644
index 0000000..0fd3b34
--- /dev/null
+++ b/arch/arm/boot/dts/qcom/sdx-audio-lpass.dtsi
@@ -0,0 +1,261 @@
+/*
+ * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+&soc {
+ qcom,msm-adsp-loader {
+ compatible = "qcom,adsp-loader";
+ qcom,adsp-state = <0>;
+ qcom,proc-img-to-load = "modem";
+ };
+
+ qcom,msm-audio-ion {
+ compatible = "qcom,msm-audio-ion";
+ qcom,scm-mp-enabled;
+ memory-region = <&audio_mem>;
+ };
+
+ pcm0: qcom,msm-pcm {
+ compatible = "qcom,msm-pcm-dsp";
+ qcom,msm-pcm-dsp-id = <0>;
+ };
+
+ routing: qcom,msm-pcm-routing {
+ compatible = "qcom,msm-pcm-routing";
+ };
+
+ pcm1: qcom,msm-pcm-low-latency {
+ compatible = "qcom,msm-pcm-dsp";
+ qcom,msm-pcm-dsp-id = <1>;
+ qcom,msm-pcm-low-latency;
+ qcom,latency-level = "ultra";
+ };
+
+ qcom,msm-compr-dsp {
+ compatible = "qcom,msm-compr-dsp";
+ };
+
+ voip: qcom,msm-voip-dsp {
+ compatible = "qcom,msm-voip-dsp";
+ };
+
+ voice: qcom,msm-pcm-voice {
+ compatible = "qcom,msm-pcm-voice";
+ qcom,destroy-cvd;
+ };
+
+ stub_codec: qcom,msm-stub-codec {
+ compatible = "qcom,msm-stub-codec";
+ };
+
+ qcom,msm-dai-fe {
+ compatible = "qcom,msm-dai-fe";
+ };
+
+ afe: qcom,msm-pcm-afe {
+ compatible = "qcom,msm-pcm-afe";
+ };
+
+ hostless: qcom,msm-pcm-hostless {
+ compatible = "qcom,msm-pcm-hostless";
+ };
+
+ host_pcm: qcom,msm-voice-host-pcm {
+ compatible = "qcom,msm-voice-host-pcm";
+ };
+
+ loopback: qcom,msm-pcm-loopback {
+ compatible = "qcom,msm-pcm-loopback";
+ };
+
+ compress: qcom,msm-compress-dsp {
+ compatible = "qcom,msm-compress-dsp";
+ qcom,adsp-version = "MDSP 1.2";
+ };
+
+ qcom,msm-dai-stub {
+ compatible = "qcom,msm-dai-stub";
+ dtmf_tx: qcom,msm-dai-stub-dtmf-tx {
+ compatible = "qcom,msm-dai-stub-dev";
+ qcom,msm-dai-stub-dev-id = <4>;
+ };
+
+ rx_capture_tx: qcom,msm-dai-stub-host-rx-capture-tx {
+ compatible = "qcom,msm-dai-stub-dev";
+ qcom,msm-dai-stub-dev-id = <5>;
+ };
+
+ rx_playback_rx: qcom,msm-dai-stub-host-rx-playback-rx {
+ compatible = "qcom,msm-dai-stub-dev";
+ qcom,msm-dai-stub-dev-id = <6>;
+ };
+
+ tx_capture_tx: qcom,msm-dai-stub-host-tx-capture-tx {
+ compatible = "qcom,msm-dai-stub-dev";
+ qcom,msm-dai-stub-dev-id = <7>;
+ };
+
+ tx_playback_rx: qcom,msm-dai-stub-host-tx-playback-rx {
+ compatible = "qcom,msm-dai-stub-dev";
+ qcom,msm-dai-stub-dev-id = <8>;
+ };
+ };
+
+ qcom,msm-dai-q6 {
+ compatible = "qcom,msm-dai-q6";
+ afe_pcm_rx: qcom,msm-dai-q6-be-afe-pcm-rx {
+ compatible = "qcom,msm-dai-q6-dev";
+ qcom,msm-dai-q6-dev-id = <224>;
+ };
+
+ afe_pcm_tx: qcom,msm-dai-q6-be-afe-pcm-tx {
+ compatible = "qcom,msm-dai-q6-dev";
+ qcom,msm-dai-q6-dev-id = <225>;
+ };
+
+ afe_proxy_rx: qcom,msm-dai-q6-afe-proxy-rx {
+ compatible = "qcom,msm-dai-q6-dev";
+ qcom,msm-dai-q6-dev-id = <241>;
+ };
+
+ afe_proxy_tx: qcom,msm-dai-q6-afe-proxy-tx {
+ compatible = "qcom,msm-dai-q6-dev";
+ qcom,msm-dai-q6-dev-id = <240>;
+ };
+
+ incall_record_rx: qcom,msm-dai-q6-incall-record-rx {
+ compatible = "qcom,msm-dai-q6-dev";
+ qcom,msm-dai-q6-dev-id = <32771>;
+ };
+
+ incall_record_tx: qcom,msm-dai-q6-incall-record-tx {
+ compatible = "qcom,msm-dai-q6-dev";
+ qcom,msm-dai-q6-dev-id = <32772>;
+ };
+
+ incall_music_rx: qcom,msm-dai-q6-incall-music-rx {
+ compatible = "qcom,msm-dai-q6-dev";
+ qcom,msm-dai-q6-dev-id = <32773>;
+ };
+ };
+
+ pcm_dtmf: qcom,msm-pcm-dtmf {
+ compatible = "qcom,msm-pcm-dtmf";
+ };
+
+ cpu-pmu {
+ compatible = "arm,cortex-a7-pmu";
+ qcom,irq-is-percpu;
+ interrupts = <1 8 0x100>;
+ };
+
+ dai_pri_auxpcm: qcom,msm-pri-auxpcm {
+ compatible = "qcom,msm-auxpcm-dev";
+ qcom,msm-cpudai-auxpcm-mode = <0>, <0>;
+ qcom,msm-cpudai-auxpcm-sync = <1>, <1>;
+ qcom,msm-cpudai-auxpcm-frame = <5>, <4>;
+ qcom,msm-cpudai-auxpcm-quant = <2>, <2>;
+ qcom,msm-cpudai-auxpcm-num-slots = <1>, <1>;
+ qcom,msm-cpudai-auxpcm-slot-mapping = <1>, <1>;
+ qcom,msm-cpudai-auxpcm-data = <0>, <0>;
+ qcom,msm-cpudai-auxpcm-pcm-clk-rate = <2048000>, <2048000>;
+ qcom,msm-auxpcm-interface = "primary";
+ qcom,msm-cpudai-afe-clk-ver = <2>;
+ };
+
+ dai_sec_auxpcm: qcom,msm-sec-auxpcm {
+ compatible = "qcom,msm-auxpcm-dev";
+ qcom,msm-cpudai-auxpcm-mode = <0>, <0>;
+ qcom,msm-cpudai-auxpcm-sync = <1>, <1>;
+ qcom,msm-cpudai-auxpcm-frame = <5>, <4>;
+ qcom,msm-cpudai-auxpcm-quant = <2>, <2>;
+ qcom,msm-cpudai-auxpcm-num-slots = <1>, <1>;
+ qcom,msm-cpudai-auxpcm-slot-mapping = <1>, <1>;
+ qcom,msm-cpudai-auxpcm-data = <0>, <0>;
+ qcom,msm-cpudai-auxpcm-pcm-clk-rate = <2048000>, <2048000>;
+ qcom,msm-auxpcm-interface = "secondary";
+ qcom,msm-cpudai-afe-clk-ver = <2>;
+ };
+
+ qcom,msm-dai-mi2s {
+ compatible = "qcom,msm-dai-mi2s";
+ mi2s_prim: qcom,msm-dai-q6-mi2s-prim {
+ compatible = "qcom,msm-dai-q6-mi2s";
+ qcom,msm-dai-q6-mi2s-dev-id = <0>;
+ qcom,msm-mi2s-rx-lines = <2>;
+ qcom,msm-mi2s-tx-lines = <1>;
+ };
+ mi2s_sec: qcom,msm-dai-q6-mi2s-sec {
+ compatible = "qcom,msm-dai-q6-mi2s";
+ qcom,msm-dai-q6-mi2s-dev-id = <1>;
+ qcom,msm-mi2s-rx-lines = <2>;
+ qcom,msm-mi2s-tx-lines = <1>;
+ };
+
+ };
+
+ prim_master: prim_master_pinctrl {
+ compatible = "qcom,msm-cdc-pinctrl";
+ pinctrl-names = "aud_active", "aud_sleep";
+ pinctrl-0 = <&pri_ws_active_master
+ &pri_sck_active_master
+ &pri_dout_active
+ &pri_din_active>;
+ pinctrl-1 = <&pri_ws_sleep
+ &pri_sck_sleep
+ &pri_dout_sleep
+ &pri_din_sleep>;
+ qcom,mi2s-auxpcm-cdc-gpios;
+ };
+
+ prim_slave: prim_slave_pinctrl {
+ compatible = "qcom,msm-cdc-pinctrl";
+ pinctrl-names = "aud_active", "aud_sleep";
+ pinctrl-0 = <&pri_ws_active_slave
+ &pri_sck_active_slave
+ &pri_dout_active
+ &pri_din_active>;
+ pinctrl-1 = <&pri_ws_sleep
+ &pri_sck_sleep
+ &pri_dout_sleep
+ &pri_din_sleep>;
+ qcom,mi2s-auxpcm-cdc-gpios;
+ };
+
+ sec_master: sec_master_pinctrl {
+ compatible = "qcom,msm-cdc-pinctrl";
+ pinctrl-names = "aud_active", "aud_sleep";
+ pinctrl-0 = <&sec_ws_active_master
+ &sec_sck_active_master
+ &sec_dout_active
+ &sec_din_active>;
+ pinctrl-1 = <&sec_ws_sleep
+ &sec_sck_sleep
+ &sec_dout_sleep
+ &sec_din_sleep>;
+ qcom,mi2s-auxpcm-cdc-gpios;
+ };
+
+ sec_slave: sec_slave_pinctrl {
+ compatible = "qcom,msm-cdc-pinctrl";
+ pinctrl-names = "aud_active", "aud_sleep";
+ pinctrl-0 = <&sec_ws_active_slave
+ &sec_sck_active_slave
+ &sec_dout_active
+ &sec_din_active>;
+ pinctrl-1 = <&sec_ws_sleep
+ &sec_sck_sleep
+ &sec_dout_sleep
+ &sec_din_sleep>;
+ qcom,mi2s-auxpcm-cdc-gpios;
+ };
+};
diff --git a/arch/arm/boot/dts/qcom/sdx-wsa881x.dtsi b/arch/arm/boot/dts/qcom/sdx-wsa881x.dtsi
new file mode 100644
index 0000000..a294e6c
--- /dev/null
+++ b/arch/arm/boot/dts/qcom/sdx-wsa881x.dtsi
@@ -0,0 +1,45 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+&i2c_3 {
+ tavil_codec {
+ swr_master {
+ compatible = "qcom,swr-wcd";
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ wsa881x_0211: wsa881x@20170211 {
+ compatible = "qcom,wsa881x";
+ reg = <0x00 0x20170211>;
+ qcom,spkr-sd-n-node = <&wsa_spkr_wcd_sd1>;
+ };
+
+ wsa881x_0212: wsa881x@20170212 {
+ compatible = "qcom,wsa881x";
+ reg = <0x00 0x20170212>;
+ qcom,spkr-sd-n-node = <&wsa_spkr_wcd_sd2>;
+ };
+
+ wsa881x_0213: wsa881x@21170213 {
+ compatible = "qcom,wsa881x";
+ reg = <0x00 0x21170213>;
+ qcom,spkr-sd-n-node = <&wsa_spkr_wcd_sd1>;
+ };
+
+ wsa881x_0214: wsa881x@21170214 {
+ compatible = "qcom,wsa881x";
+ reg = <0x00 0x21170214>;
+ qcom,spkr-sd-n-node = <&wsa_spkr_wcd_sd2>;
+ };
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/qcom/sdxpoorwills-audio-overlay.dtsi b/arch/arm/boot/dts/qcom/sdxpoorwills-audio-overlay.dtsi
new file mode 100644
index 0000000..f90bd7f
--- /dev/null
+++ b/arch/arm/boot/dts/qcom/sdxpoorwills-audio-overlay.dtsi
@@ -0,0 +1,143 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "sdxpoorwills-wcd.dtsi"
+#include "sdx-wsa881x.dtsi"
+#include <dt-bindings/clock/qcom,audio-ext-clk.h>
+
+&snd_934x {
+ qcom,audio-routing =
+ "AIF4 VI", "MCLK",
+ "RX_BIAS", "MCLK",
+ "MADINPUT", "MCLK",
+ "AMIC2", "MIC BIAS2",
+ "MIC BIAS2", "Headset Mic",
+ "AMIC3", "MIC BIAS2",
+ "MIC BIAS2", "ANCRight Headset Mic",
+ "AMIC4", "MIC BIAS2",
+ "MIC BIAS2", "ANCLeft Headset Mic",
+ "AMIC5", "MIC BIAS3",
+ "MIC BIAS3", "Handset Mic",
+ "DMIC0", "MIC BIAS1",
+ "MIC BIAS1", "Digital Mic0",
+ "DMIC1", "MIC BIAS1",
+ "MIC BIAS1", "Digital Mic1",
+ "DMIC2", "MIC BIAS3",
+ "MIC BIAS3", "Digital Mic2",
+ "DMIC3", "MIC BIAS3",
+ "MIC BIAS3", "Digital Mic3",
+ "DMIC4", "MIC BIAS4",
+ "MIC BIAS4", "Digital Mic4",
+ "DMIC5", "MIC BIAS4",
+ "MIC BIAS4", "Digital Mic5",
+ "SpkrLeft IN", "SPK1 OUT",
+ "SpkrRight IN", "SPK2 OUT";
+
+ qcom,msm-mbhc-hphl-swh = <1>;
+ qcom,msm-mbhc-gnd-swh = <1>;
+ qcom,msm-mbhc-hs-mic-max-threshold-mv = <1700>;
+ qcom,msm-mbhc-hs-mic-min-threshold-mv = <50>;
+ qcom,tavil-mclk-clk-freq = <12288000>;
+
+ asoc-codec = <&stub_codec>;
+ asoc-codec-names = "msm-stub-codec.1";
+
+ qcom,wsa-max-devs = <2>;
+ qcom,wsa-devs = <&wsa881x_0211>, <&wsa881x_0212>,
+ <&wsa881x_0213>, <&wsa881x_0214>;
+ qcom,wsa-aux-dev-prefix = "SpkrLeft", "SpkrRight",
+ "SpkrLeft", "SpkrRight";
+};
+
+&soc {
+ wcd9xxx_intc: wcd9xxx-irq {
+ status = "ok";
+ compatible = "qcom,wcd9xxx-irq";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ interrupt-parent = <&tlmm>;
+ qcom,gpio-connect = <&tlmm 71 0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&wcd_intr_default>;
+ };
+
+ clock_audio_up: audio_ext_clk_up {
+ compatible = "qcom,audio-ref-clk";
+ qcom,codec-mclk-clk-freq = <12288000>;
+ pinctrl-names = "sleep", "active";
+ pinctrl-0 = <&i2s_mclk_sleep>;
+ pinctrl-1 = <&i2s_mclk_active>;
+ #clock-cells = <1>;
+ };
+
+ wcd_rst_gpio: msm_cdc_pinctrl@77 {
+ compatible = "qcom,msm-cdc-pinctrl";
+ qcom,cdc-rst-n-gpio = <&tlmm 77 0>;
+ pinctrl-names = "aud_active", "aud_sleep";
+ pinctrl-0 = <&cdc_reset_active>;
+ pinctrl-1 = <&cdc_reset_sleep>;
+ };
+};
+
+&i2c_3 {
+ wcd934x_cdc: tavil_codec {
+ compatible = "qcom,tavil-i2c-pgd";
+ elemental-addr = [00 01 50 02 17 02];
+
+ interrupt-parent = <&wcd9xxx_intc>;
+ interrupts = <0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
+ 17 18 19 20 21 22 23 24 25 26 27 28 29
+ 30 31>;
+
+ qcom,wcd-rst-gpio-node = <&wcd_rst_gpio>;
+
+ clock-names = "wcd_clk";
+ clocks = <&clock_audio_up AUDIO_LPASS_MCLK>;
+
+ cdc-vdd-buck-supply = <&pmxpoorwills_l6>;
+ qcom,cdc-vdd-buck-voltage = <1800000 1800000>;
+ qcom,cdc-vdd-buck-current = <650000>;
+
+ cdc-buck-sido-supply = <&pmxpoorwills_l6>;
+ qcom,cdc-buck-sido-voltage = <1800000 1800000>;
+ qcom,cdc-buck-sido-current = <250000>;
+
+ cdc-vdd-tx-h-supply = <&pmxpoorwills_l6>;
+ qcom,cdc-vdd-tx-h-voltage = <1800000 1800000>;
+ qcom,cdc-vdd-tx-h-current = <25000>;
+
+ cdc-vdd-rx-h-supply = <&pmxpoorwills_l6>;
+ qcom,cdc-vdd-rx-h-voltage = <1800000 1800000>;
+ qcom,cdc-vdd-rx-h-current = <25000>;
+
+ cdc-vddpx-1-supply = <&pmxpoorwills_l6>;
+ qcom,cdc-vddpx-1-voltage = <1800000 1800000>;
+ qcom,cdc-vddpx-1-current = <10000>;
+
+ qcom,cdc-static-supplies = "cdc-vdd-buck",
+ "cdc-buck-sido",
+ "cdc-vdd-tx-h",
+ "cdc-vdd-rx-h",
+ "cdc-vddpx-1";
+
+ qcom,cdc-micbias1-mv = <1800>;
+ qcom,cdc-micbias2-mv = <1800>;
+ qcom,cdc-micbias3-mv = <1800>;
+ qcom,cdc-micbias4-mv = <1800>;
+
+ qcom,cdc-mclk-clk-rate = <12288000>;
+ qcom,cdc-dmic-sample-rate = <4800000>;
+
+ qcom,wdsp-cmpnt-dev-name = "tavil_codec";
+ };
+};
diff --git a/arch/arm/boot/dts/qcom/sdxpoorwills-audio.dtsi b/arch/arm/boot/dts/qcom/sdxpoorwills-audio.dtsi
new file mode 100644
index 0000000..a3eba9a
--- /dev/null
+++ b/arch/arm/boot/dts/qcom/sdxpoorwills-audio.dtsi
@@ -0,0 +1,51 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "sdx-audio-lpass.dtsi"
+
+&soc {
+ snd_934x: sound-tavil {
+ compatible = "qcom,sdx-asoc-snd-tavil";
+ qcom,model = "sdx-tavil-i2s-snd-card";
+ qcom,prim_mi2s_aux_master = <&prim_master>;
+ qcom,prim_mi2s_aux_slave = <&prim_slave>;
+ qcom,sec_mi2s_aux_master = <&sec_master>;
+ qcom,sec_mi2s_aux_slave = <&sec_slave>;
+
+ asoc-platform = <&pcm0>, <&pcm1>, <&voip>, <&voice>,
+ <&loopback>, <&hostless>, <&afe>, <&routing>,
+ <&pcm_dtmf>, <&host_pcm>, <&compress>;
+ asoc-platform-names = "msm-pcm-dsp.0", "msm-pcm-dsp.1",
+ "msm-voip-dsp", "msm-pcm-voice",
+ "msm-pcm-loopback", "msm-pcm-hostless",
+ "msm-pcm-afe", "msm-pcm-routing",
+ "msm-pcm-dtmf", "msm-voice-host-pcm",
+ "msm-compress-dsp";
+ asoc-cpu = <&dai_pri_auxpcm>, <&mi2s_prim>, <&mi2s_sec>,
+ <&dtmf_tx>,
+ <&rx_capture_tx>, <&rx_playback_rx>,
+ <&tx_capture_tx>, <&tx_playback_rx>,
+ <&afe_pcm_rx>, <&afe_pcm_tx>, <&afe_proxy_rx>,
+ <&afe_proxy_tx>, <&incall_record_rx>,
+ <&incall_record_tx>, <&incall_music_rx>,
+ <&dai_sec_auxpcm>;
+ asoc-cpu-names = "msm-dai-q6-auxpcm.1",
+ "msm-dai-q6-mi2s.0", "msm-dai-q6-mi2s.1",
+ "msm-dai-stub-dev.4", "msm-dai-stub-dev.5",
+ "msm-dai-stub-dev.6", "msm-dai-stub-dev.7",
+ "msm-dai-stub-dev.8", "msm-dai-q6-dev.224",
+ "msm-dai-q6-dev.225", "msm-dai-q6-dev.241",
+ "msm-dai-q6-dev.240", "msm-dai-q6-dev.32771",
+ "msm-dai-q6-dev.32772", "msm-dai-q6-dev.32773",
+ "msm-dai-q6-auxpcm.2";
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-qvr.dts b/arch/arm/boot/dts/qcom/sdxpoorwills-cdp-audio-overlay.dtsi
similarity index 62%
copy from arch/arm64/boot/dts/qcom/sdm845-qvr.dts
copy to arch/arm/boot/dts/qcom/sdxpoorwills-cdp-audio-overlay.dtsi
index c06b806..a7943cd 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-qvr.dts
+++ b/arch/arm/boot/dts/qcom/sdxpoorwills-cdp-audio-overlay.dtsi
@@ -1,4 +1,5 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -10,14 +11,12 @@
* GNU General Public License for more details.
*/
+#include "sdxpoorwills-audio-overlay.dtsi"
-/dts-v1/;
-
-#include "sdm845-v2.dtsi"
-#include "sdm845-qvr.dtsi"
-
-/ {
- model = "Qualcomm Technologies, Inc. SDM845 QVR";
- compatible = "qcom,sdm845-qvr", "qcom,sdm845", "qcom,qvr";
- qcom,board-id = <0x01000B 0x20>;
+&soc {
+ sound-tavil {
+ qcom,wsa-max-devs = <1>;
+ qcom,wsa-devs = <&wsa881x_0214>;
+ qcom,wsa-aux-dev-prefix = "SpkrRight";
+ };
};
diff --git a/arch/arm/boot/dts/qcom/sdxpoorwills-cdp.dts b/arch/arm/boot/dts/qcom/sdxpoorwills-cdp.dts
index 6be47b4..15129c7 100644
--- a/arch/arm/boot/dts/qcom/sdxpoorwills-cdp.dts
+++ b/arch/arm/boot/dts/qcom/sdxpoorwills-cdp.dts
@@ -29,3 +29,79 @@
status = "ok";
};
+&qnand_1 {
+ status = "ok";
+};
+
+&pmxpoorwills_vadc {
+ chan@83 {
+ label = "vph_pwr";
+ reg = <0x83>;
+ qcom,decimation = <2>;
+ qcom,pre-div-channel-scaling = <1>;
+ qcom,calibration-type = "absolute";
+ qcom,scale-function = <0>;
+ qcom,hw-settle-time = <0>;
+ qcom,fast-avg-setup = <0>;
+ };
+
+ chan@4c {
+ label = "xo_therm";
+ reg = <0x4c>;
+ qcom,decimation = <2>;
+ qcom,pre-div-channel-scaling = <0>;
+ qcom,calibration-type = "ratiometric";
+ qcom,scale-function = <4>;
+ qcom,hw-settle-time = <2>;
+ qcom,fast-avg-setup = <0>;
+ qcom,vadc-thermal-node;
+ };
+
+ chan@4d {
+ label = "pa_therm1";
+ reg = <0x4d>;
+ qcom,decimation = <2>;
+ qcom,pre-div-channel-scaling = <0>;
+ qcom,calibration-type = "ratiometric";
+ qcom,scale-function = <2>;
+ qcom,hw-settle-time = <2>;
+ qcom,fast-avg-setup = <0>;
+ qcom,vadc-thermal-node;
+ };
+
+ chan@4e {
+ label = "pa_therm2";
+ reg = <0x4e>;
+ qcom,decimation = <2>;
+ qcom,pre-div-channel-scaling = <0>;
+ qcom,calibration-type = "ratiometric";
+ qcom,scale-function = <2>;
+ qcom,hw-settle-time = <2>;
+ qcom,fast-avg-setup = <0>;
+ qcom,vadc-thermal-node;
+ };
+
+ chan@4f {
+ label = "mdm_case_therm";
+ reg = <0x4f>;
+ qcom,decimation = <2>;
+ qcom,pre-div-channel-scaling = <0>;
+ qcom,calibration-type = "ratiometric";
+ qcom,scale-function = <2>;
+ qcom,hw-settle-time = <2>;
+ qcom,fast-avg-setup = <0>;
+ qcom,vadc-thermal-node;
+ };
+
+ chan@52 {
+ label = "ambient_therm";
+ reg = <0x52>;
+ qcom,decimation = <2>;
+ qcom,pre-div-channel-scaling = <0>;
+ qcom,calibration-type = "ratiometric";
+ qcom,scale-function = <2>;
+ qcom,hw-settle-time = <2>;
+ qcom,fast-avg-setup = <0>;
+ qcom,vadc-thermal-node;
+ };
+};
diff --git a/arch/arm/boot/dts/qcom/sdxpoorwills-mtp.dts b/arch/arm/boot/dts/qcom/sdxpoorwills-mtp.dts
index 15ae24c..8d7e377 100644
--- a/arch/arm/boot/dts/qcom/sdxpoorwills-mtp.dts
+++ b/arch/arm/boot/dts/qcom/sdxpoorwills-mtp.dts
@@ -29,3 +29,79 @@
status = "ok";
};
+&qnand_1 {
+ status = "ok";
+};
+
+&pmxpoorwills_vadc {
+ chan@83 {
+ label = "vph_pwr";
+ reg = <0x83>;
+ qcom,decimation = <2>;
+ qcom,pre-div-channel-scaling = <1>;
+ qcom,calibration-type = "absolute";
+ qcom,scale-function = <0>;
+ qcom,hw-settle-time = <0>;
+ qcom,fast-avg-setup = <0>;
+ };
+
+ chan@4c {
+ label = "xo_therm";
+ reg = <0x4c>;
+ qcom,decimation = <2>;
+ qcom,pre-div-channel-scaling = <0>;
+ qcom,calibration-type = "ratiometric";
+ qcom,scale-function = <4>;
+ qcom,hw-settle-time = <2>;
+ qcom,fast-avg-setup = <0>;
+ qcom,vadc-thermal-node;
+ };
+
+ chan@4d {
+ label = "pa_therm1";
+ reg = <0x4d>;
+ qcom,decimation = <2>;
+ qcom,pre-div-channel-scaling = <0>;
+ qcom,calibration-type = "ratiometric";
+ qcom,scale-function = <2>;
+ qcom,hw-settle-time = <2>;
+ qcom,fast-avg-setup = <0>;
+ qcom,vadc-thermal-node;
+ };
+
+ chan@4e {
+ label = "pa_therm2";
+ reg = <0x4e>;
+ qcom,decimation = <2>;
+ qcom,pre-div-channel-scaling = <0>;
+ qcom,calibration-type = "ratiometric";
+ qcom,scale-function = <2>;
+ qcom,hw-settle-time = <2>;
+ qcom,fast-avg-setup = <0>;
+ qcom,vadc-thermal-node;
+ };
+
+ chan@4f {
+ label = "mdm_case_therm";
+ reg = <0x4f>;
+ qcom,decimation = <2>;
+ qcom,pre-div-channel-scaling = <0>;
+ qcom,calibration-type = "ratiometric";
+ qcom,scale-function = <2>;
+ qcom,hw-settle-time = <2>;
+ qcom,fast-avg-setup = <0>;
+ qcom,vadc-thermal-node;
+ };
+
+ chan@52 {
+ label = "ambient_therm";
+ reg = <0x52>;
+ qcom,decimation = <2>;
+ qcom,pre-div-channel-scaling = <0>;
+ qcom,calibration-type = "ratiometric";
+ qcom,scale-function = <2>;
+ qcom,hw-settle-time = <2>;
+ qcom,fast-avg-setup = <0>;
+ qcom,vadc-thermal-node;
+ };
+};
diff --git a/arch/arm/boot/dts/qcom/sdxpoorwills-pinctrl.dtsi b/arch/arm/boot/dts/qcom/sdxpoorwills-pinctrl.dtsi
index 8181fa8..b6c04ec 100644
--- a/arch/arm/boot/dts/qcom/sdxpoorwills-pinctrl.dtsi
+++ b/arch/arm/boot/dts/qcom/sdxpoorwills-pinctrl.dtsi
@@ -919,5 +919,369 @@
};
};
};
+
+ wcd9xxx_intr {
+ wcd_intr_default: wcd_intr_default{
+ mux {
+ pins = "gpio71";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio71";
+ drive-strength = <2>; /* 2 mA */
+ bias-pull-down; /* pull down */
+ input-enable;
+ };
+ };
+ };
+
+ cdc_reset_ctrl {
+ cdc_reset_sleep: cdc_reset_sleep {
+ mux {
+ pins = "gpio77";
+ function = "gpio";
+ };
+ config {
+ pins = "gpio77";
+ drive-strength = <2>;
+ bias-disable;
+ output-low;
+ };
+ };
+
+ cdc_reset_active:cdc_reset_active {
+ mux {
+ pins = "gpio77";
+ function = "gpio";
+ };
+ config {
+ pins = "gpio77";
+ drive-strength = <8>;
+ bias-pull-down;
+ output-high;
+ };
+ };
+ };
+
+ i2s_mclk {
+ i2s_mclk_sleep: i2s_mclk_sleep {
+ mux {
+ pins = "gpio62";
+ function = "i2s_mclk";
+ };
+
+ config {
+ pins = "gpio62";
+ drive-strength = <2>; /* 2 mA */
+ bias-pull-down; /* PULL DOWN */
+ };
+ };
+
+ i2s_mclk_active: i2s_mclk_active {
+ mux {
+ pins = "gpio62";
+ function = "i2s_mclk";
+ };
+
+ config {
+ pins = "gpio62";
+ drive-strength = <8>; /* 8 mA */
+ bias-disable; /* NO PULL*/
+ output-high;
+ };
+ };
+ };
+
+ pmx_pri_mi2s_aux {
+ pri_ws_sleep: pri_ws_sleep {
+ mux {
+ pins = "gpio12";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio12";
+ drive-strength = <2>; /* 2 mA */
+ bias-pull-down; /* PULL DOWN */
+ input-enable;
+ };
+ };
+
+ pri_sck_sleep: pri_sck_sleep {
+ mux {
+ pins = "gpio15";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio15";
+ drive-strength = <2>; /* 2 mA */
+ bias-pull-down; /* PULL DOWN */
+ input-enable;
+ };
+ };
+
+ pri_dout_sleep: pri_dout_sleep {
+ mux {
+ pins = "gpio14";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio14";
+ drive-strength = <2>; /* 2 mA */
+ bias-pull-down; /* PULL DOWN */
+ input-enable;
+ };
+ };
+
+ pri_ws_active_master: pri_ws_active_master {
+ mux {
+ pins = "gpio12";
+ function = "pri_mi2s_ws_a";
+ };
+
+ config {
+ pins = "gpio12";
+ drive-strength = <8>; /* 8 mA */
+ bias-disable; /* NO PULL*/
+ output-high;
+ };
+ };
+
+ pri_sck_active_master: pri_sck_active_master {
+ mux {
+ pins = "gpio15";
+ function = "pri_mi2s_sck_a";
+ };
+
+ config {
+ pins = "gpio15";
+ drive-strength = <8>; /* 8 mA */
+ bias-disable; /* NO PULL*/
+ output-high;
+ };
+ };
+
+ pri_ws_active_slave: pri_ws_active_slave {
+ mux {
+ pins = "gpio12";
+ function = "pri_mi2s_ws_a";
+ };
+
+ config {
+ pins = "gpio12";
+ drive-strength = <8>; /* 8 mA */
+ bias-disable; /* NO PULL*/
+ };
+ };
+
+ pri_sck_active_slave: pri_sck_active_slave {
+ mux {
+ pins = "gpio15";
+ function = "pri_mi2s_sck_a";
+ };
+
+ config {
+ pins = "gpio15";
+ drive-strength = <8>; /* 8 mA */
+ bias-disable; /* NO PULL*/
+ };
+ };
+
+ pri_dout_active: pri_dout_active {
+ mux {
+ pins = "gpio14";
+ function = "pri_mi2s_data1_a";
+ };
+
+ config {
+ pins = "gpio14";
+ drive-strength = <8>; /* 8 mA */
+ bias-disable; /* NO PULL*/
+ output-high;
+ };
+ };
+ };
+
+ pmx_pri_mi2s_aux_din {
+ pri_din_sleep: pri_din_sleep {
+ mux {
+ pins = "gpio13";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio13";
+ drive-strength = <2>; /* 2 mA */
+ bias-pull-down; /* PULL DOWN */
+ input-enable;
+ };
+ };
+
+ pri_din_active: pri_din_active {
+ mux {
+ pins = "gpio13";
+ function = "pri_mi2s_data0_a";
+ };
+
+ config {
+ pins = "gpio13";
+ drive-strength = <8>; /* 8 mA */
+ bias-disable; /* NO PULL */
+ };
+ };
+ };
+
+ pmx_sec_mi2s_aux {
+ sec_ws_sleep: sec_ws_sleep {
+ mux {
+ pins = "gpio16";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio16";
+ drive-strength = <2>; /* 2 mA */
+ bias-pull-down; /* PULL DOWN */
+ input-enable;
+ };
+ };
+
+ sec_sck_sleep: sec_sck_sleep {
+ mux {
+ pins = "gpio19";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio19";
+ drive-strength = <2>; /* 2 mA */
+ bias-pull-down; /* PULL DOWN */
+ input-enable;
+ };
+ };
+
+ sec_dout_sleep: sec_dout_sleep {
+ mux {
+ pins = "gpio18";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio18";
+ drive-strength = <2>; /* 2 mA */
+ bias-pull-down; /* PULL DOWN */
+ input-enable;
+ };
+ };
+
+ sec_ws_active_master: sec_ws_active_master {
+ mux {
+ pins = "gpio16";
+ function = "sec_mi2s_ws_a";
+ };
+
+ config {
+ pins = "gpio16";
+ drive-strength = <8>; /* 8 mA */
+ bias-disable; /* NO PULL*/
+ output-high;
+ };
+ };
+
+ sec_sck_active_master: sec_sck_active_master {
+ mux {
+ pins = "gpio19";
+ function = "sec_mi2s_sck_a";
+ };
+
+ config {
+ pins = "gpio19";
+ drive-strength = <8>; /* 8 mA */
+ bias-disable; /* NO PULL*/
+ output-high;
+ };
+ };
+
+ sec_ws_active_slave: sec_ws_active_slave {
+ mux {
+ pins = "gpio16";
+ function = "sec_mi2s_ws_a";
+ };
+
+ config {
+ pins = "gpio16";
+ drive-strength = <8>; /* 8 mA */
+ bias-disable; /* NO PULL*/
+ };
+ };
+
+ sec_sck_active_slave: sec_sck_active_slave {
+ mux {
+ pins = "gpio19";
+ function = "sec_mi2s_sck_a";
+ };
+
+ config {
+ pins = "gpio19";
+ drive-strength = <8>; /* 8 mA */
+ bias-disable; /* NO PULL*/
+ };
+ };
+
+ sec_dout_active: sec_dout_active {
+ mux {
+ pins = "gpio18";
+ function = "sec_mi2s_data1_a";
+ };
+
+ config {
+ pins = "gpio18";
+ drive-strength = <8>; /* 8 mA */
+ bias-disable; /* NO PULL*/
+ output-high;
+ };
+ };
+ };
+
+ pmx_sec_mi2s_aux_din {
+ sec_din_sleep: sec_din_sleep {
+ mux {
+ pins = "gpio17";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio17";
+ drive-strength = <2>; /* 2 mA */
+ bias-pull-down; /* PULL DOWN */
+ input-enable;
+ };
+ };
+
+ sec_din_active: sec_din_active {
+ mux {
+ pins = "gpio17";
+ function = "sec_mi2s_data0_a";
+ };
+
+ config {
+ pins = "gpio17";
+ drive-strength = <8>; /* 8 mA */
+ bias-disable; /* NO PULL */
+ };
+ };
+ };
+ };
+};
+
+&pmxpoorwills_gpios {
+ ambient_therm {
+ ambient_therm_default: ambient_therm_default {
+ pins = "gpio2";
+ bias-high-impedance;
+ };
};
};
diff --git a/arch/arm/boot/dts/qcom/sdxpoorwills-regulator.dtsi b/arch/arm/boot/dts/qcom/sdxpoorwills-regulator.dtsi
index cc126f6..9947594 100644
--- a/arch/arm/boot/dts/qcom/sdxpoorwills-regulator.dtsi
+++ b/arch/arm/boot/dts/qcom/sdxpoorwills-regulator.dtsi
@@ -12,103 +12,324 @@
#include <dt-bindings/regulator/qcom,rpmh-regulator.h>
-/* Stub regulators */
-/ {
- pmxpoorwills_s1: regualtor-pmxpoorwills-s1 {
- compatible = "qcom,stub-regulator";
- regulator-name = "pmxpoorwills_s1";
- qcom,hpm-min-load = <100000>;
- regulator-min-microvolt = <752000>;
- regulator-max-microvolt = <752000>;
+&soc {
+ /* RPMh regulators */
+
+ /* pmxpoorwills S1 - VDD_MODEM supply */
+ rpmh-regulator-modemlvl {
+ compatible = "qcom,rpmh-arc-regulator";
+ mboxes = <&apps_rsc 0>;
+ qcom,resource-name = "mss.lvl";
+ pmxpoorwills_s1_level: regualtor-pmxpoorwills-s1 {
+ regulator-name = "pmxpoorwills_s1_level";
+ qcom,set = <RPMH_REGULATOR_SET_ALL>;
+ regulator-min-microvolt = <RPMH_REGULATOR_LEVEL_OFF>;
+ regulator-max-microvolt = <RPMH_REGULATOR_LEVEL_MAX>;
+ };
};
- /* VDD CX supply */
- pmxpoorwills_s5_level: regualtor-pmxpoorwills-s5-level {
- compatible = "qcom,stub-regulator";
- regulator-name = "pmxpoorwills_s5_level";
- qcom,hpm-min-load = <100000>;
- regulator-min-microvolt = <RPMH_REGULATOR_LEVEL_OFF>;
- regulator-max-microvolt = <RPMH_REGULATOR_LEVEL_MAX>;
+ rpmh-regulator-smpa4 {
+ compatible = "qcom,rpmh-vrm-regulator";
+ mboxes = <&apps_rsc 0>;
+ qcom,resource-name = "smpa4";
+ pmxpoorwills_s4: regulator-pmxpoorwills-s4 {
+ regulator-name = "pmxpoorwills_s4";
+ qcom,set = <RPMH_REGULATOR_SET_ALL>;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ qcom,init-voltage = <1800000>;
+ };
};
- pmxpoorwills_s5_level_ao: regualtor-pmxpoorwills-s5-level-ao {
- compatible = "qcom,stub-regulator";
- regulator-name = "pmxpoorwills_s5_level_ao";
- qcom,hpm-min-load = <100000>;
- regulator-min-microvolt = <RPMH_REGULATOR_LEVEL_OFF>;
- regulator-max-microvolt = <RPMH_REGULATOR_LEVEL_MAX>;
+ /* pmxpoorwills S5 - VDD_CX supply */
+ rpmh-regulator-cxlvl {
+ compatible = "qcom,rpmh-arc-regulator";
+ mboxes = <&apps_rsc 0>;
+ qcom,resource-name = "cx.lvl";
+ pmxpoorwills_s5_level-parent-supply = <&pmxpoorwills_l9_level>;
+ pmxpoorwills_s5_level_ao-parent-supply =
+ <&pmxpoorwills_l9_level_ao>;
+ pmxpoorwills_s5_level: regualtor-pmxpoorwills-s5-level {
+ regulator-name = "pmxpoorwills_s5_level";
+ qcom,set = <RPMH_REGULATOR_SET_ALL>;
+ regulator-min-microvolt = <RPMH_REGULATOR_LEVEL_OFF>;
+ regulator-max-microvolt = <RPMH_REGULATOR_LEVEL_MAX>;
+ qcom,min-dropout-voltage-level = <(-1)>;
+ };
+
+ pmxpoorwills_s5_level_ao: regualtor-pmxpoorwills-s5-level-ao {
+ regulator-name = "pmxpoorwills_s5_level_ao";
+ qcom,set = <RPMH_REGULATOR_SET_ACTIVE>;
+ regulator-min-microvolt = <RPMH_REGULATOR_LEVEL_OFF>;
+ regulator-max-microvolt = <RPMH_REGULATOR_LEVEL_MAX>;
+ qcom,min-dropout-voltage-level = <(-1)>;
+ };
};
- pmxpoorwills_l1: regualtor-pmxpoorwills-11 {
- compatible = "qcom,stub-regulator";
- regulator-name = "pmxpoorwills_l1";
- qcom,hpm-min-load = <10000>;
- regulator-min-microvolt = <1200000>;
- regulator-max-microvolt = <1200000>;
+ rpmh-regulator-ldoa1 {
+ compatible = "qcom,rpmh-vrm-regulator";
+ mboxes = <&apps_rsc 0>;
+ qcom,resource-name = "ldoa1";
+ qcom,supported-modes =
+ <RPMH_REGULATOR_MODE_LDO_LPM
+ RPMH_REGULATOR_MODE_LDO_HPM>;
+ qcom,mode-threshold-currents = <0 1>;
+ pmxpoorwills_l1: regualtor-pmxpoorwills-11 {
+ regulator-name = "pmxpoorwills_l1";
+ qcom,set = <RPMH_REGULATOR_SET_ALL>;
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ qcom,init-voltage = <1200000>;
+ qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+ };
};
- pmxpoorwills_l3: regualtor-pmxpoorwills-l3 {
- compatible = "qcom,stub-regulator";
- regulator-name = "pmxpoorwills_l3";
- qcom,hpm-min-load = <10000>;
- regulator-min-microvolt = <800000>;
- regulator-max-microvolt = <800000>;
+ rpmh-regulator-ldoa2 {
+ compatible = "qcom,rpmh-vrm-regulator";
+ mboxes = <&apps_rsc 0>;
+ qcom,resource-name = "ldoa2";
+ qcom,supported-modes =
+ <RPMH_REGULATOR_MODE_LDO_LPM
+ RPMH_REGULATOR_MODE_LDO_HPM>;
+ qcom,mode-threshold-currents = <0 1>;
+ pmxpoorwills_l2: regualtor-pmxpoorwills-12 {
+ regulator-name = "pmxpoorwills_l2";
+ qcom,set = <RPMH_REGULATOR_SET_ALL>;
+ regulator-min-microvolt = <1128000>;
+ regulator-max-microvolt = <1128000>;
+ qcom,init-voltage = <1128000>;
+ qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+ regulator-always-on;
+ };
};
- pmxpoorwills_l4: regualtor-pmxpoorwills-l4 {
- compatible = "qcom,stub-regulator";
- regulator-name = "pmxpoorwills_l4";
- qcom,hpm-min-load = <10000>;
- regulator-min-microvolt = <872000>;
- regulator-max-microvolt = <872000>;
+ rpmh-regulator-ldoa3 {
+ compatible = "qcom,rpmh-vrm-regulator";
+ mboxes = <&apps_rsc 0>;
+ qcom,resource-name = "ldoa3";
+ qcom,supported-modes =
+ <RPMH_REGULATOR_MODE_LDO_LPM
+ RPMH_REGULATOR_MODE_LDO_HPM>;
+ qcom,mode-threshold-currents = <0 1>;
+ pmxpoorwills_l3: regualtor-pmxpoorwills-l3 {
+ regulator-name = "pmxpoorwills_l3";
+ qcom,set = <RPMH_REGULATOR_SET_ALL>;
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <800000>;
+ qcom,init-voltage = <800000>;
+ qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+ };
};
- pmxpoorwills_l5: regualtor-pmxpoorwills-l5 {
- compatible = "qcom,stub-regulator";
- regulator-name = "pmxpoorwills_l5";
- qcom,hpm-min-load = <10000>;
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <1800000>;
+ rpmh-regulator-ldoa4 {
+ compatible = "qcom,rpmh-vrm-regulator";
+ mboxes = <&apps_rsc 0>;
+ qcom,resource-name = "ldoa4";
+ qcom,supported-modes =
+ <RPMH_REGULATOR_MODE_LDO_LPM
+ RPMH_REGULATOR_MODE_LDO_HPM>;
+ qcom,mode-threshold-currents = <0 1>;
+ pmxpoorwills_l4: regualtor-pmxpoorwills-l4 {
+ regulator-name = "pmxpoorwills_l4";
+ qcom,set = <RPMH_REGULATOR_SET_ALL>;
+ regulator-min-microvolt = <872000>;
+ regulator-max-microvolt = <872000>;
+ qcom,init-voltage = <872000>;
+ qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+ };
};
- pmxpoorwills_l6: regualtor-pmxpoorwills-l6 {
- compatible = "qcom,stub-regulator";
- regulator-name = "pmxpoorwills_l6";
- qcom,hpm-min-load = <10000>;
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <1800000>;
+ rpmh-regulator-ldoa5 {
+ compatible = "qcom,rpmh-vrm-regulator";
+ mboxes = <&apps_rsc 0>;
+ qcom,resource-name = "ldoa5";
+ qcom,supported-modes =
+ <RPMH_REGULATOR_MODE_LDO_LPM
+ RPMH_REGULATOR_MODE_LDO_HPM>;
+ qcom,mode-threshold-currents = <0 1>;
+ pmxpoorwills_l5: regualtor-pmxpoorwills-l5 {
+ regulator-name = "pmxpoorwills_l5";
+ qcom,set = <RPMH_REGULATOR_SET_ALL>;
+ regulator-min-microvolt = <1704000>;
+ regulator-max-microvolt = <1704000>;
+ qcom,init-voltage = <1704000>;
+ qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+ };
};
- pmxpoorwills_l8: regualtor-pmxpoorwills-l8 {
- compatible = "qcom,stub-regulator";
- regulator-name = "pmxpoorwills_l8";
- qcom,hpm-min-load = <10000>;
- regulator-min-microvolt = <800000>;
- regulator-max-microvolt = <800000>;
+ rpmh-regulator-ldoa7 {
+ compatible = "qcom,rpmh-vrm-regulator";
+ mboxes = <&apps_rsc 0>;
+ qcom,resource-name = "ldoa7";
+ qcom,supported-modes =
+ <RPMH_REGULATOR_MODE_LDO_LPM
+ RPMH_REGULATOR_MODE_LDO_HPM>;
+ qcom,mode-threshold-currents = <0 1>;
+ pmxpoorwills_l7: regualtor-pmxpoorwills-l7 {
+ regulator-name = "pmxpoorwills_l7";
+ qcom,set = <RPMH_REGULATOR_SET_ALL>;
+ regulator-min-microvolt = <2952000>;
+ regulator-max-microvolt = <2952000>;
+ qcom,init-voltage = <2952000>;
+ qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+ };
};
- /* VDD MX supply */
- pmxpoorwills_l9_level: regualtor-pmxpoorwills-l9-level {
- compatible = "qcom,stub-regulator";
- regulator-name = "pmxpoorwills_l9_level";
- qcom,hpm-min-load = <10000>;
- regulator-min-microvolt = <RPMH_REGULATOR_LEVEL_OFF>;
- regulator-max-microvolt = <RPMH_REGULATOR_LEVEL_MAX>;
+ rpmh-regulator-ldoa8 {
+ compatible = "qcom,rpmh-vrm-regulator";
+ mboxes = <&apps_rsc 0>;
+ qcom,resource-name = "ldoa8";
+ qcom,supported-modes =
+ <RPMH_REGULATOR_MODE_LDO_LPM
+ RPMH_REGULATOR_MODE_LDO_HPM>;
+ qcom,mode-threshold-currents = <0 1>;
+ pmxpoorwills_l8: regualtor-pmxpoorwills-l8 {
+ regulator-name = "pmxpoorwills_l8";
+ qcom,set = <RPMH_REGULATOR_SET_ALL>;
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <800000>;
+ qcom,init-voltage = <800000>;
+ qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+ };
};
- pmxpoorwills_l9_level_ao: regualtor-pmxpoorwills-l9-level_ao {
- compatible = "qcom,stub-regulator";
- regulator-name = "pmxpoorwills_l9_level_ao";
- qcom,hpm-min-load = <10000>;
- regulator-min-microvolt = <RPMH_REGULATOR_LEVEL_OFF>;
- regulator-max-microvolt = <RPMH_REGULATOR_LEVEL_MAX>;
+ /* pmxpoorwills L9 - VDD_MX supply */
+ rpmh-regulator-mxlvl {
+ compatible = "qcom,rpmh-arc-regulator";
+ mboxes = <&apps_rsc 0>;
+ qcom,resource-name = "mx.lvl";
+ pmxpoorwills_l9_level: regualtor-pmxpoorwills-l9-level {
+ regulator-name = "pmxpoorwills_l9_level";
+ qcom,set = <RPMH_REGULATOR_SET_ALL>;
+ regulator-min-microvolt = <RPMH_REGULATOR_LEVEL_OFF>;
+ regulator-max-microvolt = <RPMH_REGULATOR_LEVEL_MAX>;
+ };
+
+ pmxpoorwills_l9_level_ao: regualtor-pmxpoorwills-l9-level-ao {
+ regulator-name = "pmxpoorwills_l9_level_ao";
+ qcom,set = <RPMH_REGULATOR_SET_ACTIVE>;
+ regulator-min-microvolt = <RPMH_REGULATOR_LEVEL_OFF>;
+ regulator-max-microvolt = <RPMH_REGULATOR_LEVEL_MAX>;
+ };
};
- pmxpoorwills_l10: regualtor-pmxpoorwills-l10 {
- compatible = "qcom,stub-regulator";
- regulator-name = "pmxpoorwills_l10";
- qcom,hpm-min-load = <10000>;
- regulator-min-microvolt = <3088000>;
- regulator-max-microvolt = <3088000>;
+ rpmh-regulator-ldoa10 {
+ compatible = "qcom,rpmh-vrm-regulator";
+ mboxes = <&apps_rsc 0>;
+ qcom,resource-name = "ldoa10";
+ qcom,supported-modes =
+ <RPMH_REGULATOR_MODE_LDO_LPM
+ RPMH_REGULATOR_MODE_LDO_HPM>;
+ qcom,mode-threshold-currents = <0 1>;
+ pmxpoorwills_l10: regualtor-pmxpoorwills-l10 {
+ regulator-name = "pmxpoorwills_l10";
+ qcom,set = <RPMH_REGULATOR_SET_ALL>;
+ regulator-min-microvolt = <3088000>;
+ regulator-max-microvolt = <3088000>;
+ qcom,init-voltage = <3088000>;
+ qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+ };
+ };
+
+ rpmh-regulator-ldoa11 {
+ compatible = "qcom,rpmh-vrm-regulator";
+ mboxes = <&apps_rsc 0>;
+ qcom,resource-name = "ldoa11";
+ qcom,supported-modes =
+ <RPMH_REGULATOR_MODE_LDO_LPM
+ RPMH_REGULATOR_MODE_LDO_HPM>;
+ qcom,mode-threshold-currents = <0 1>;
+ pmxpoorwills_l11: regualtor-pmxpoorwills-l11 {
+ regulator-name = "pmxpoorwills_l11";
+ qcom,set = <RPMH_REGULATOR_SET_ALL>;
+ regulator-min-microvolt = <1808000>;
+ regulator-max-microvolt = <1808000>;
+ qcom,init-voltage = <1808000>;
+ qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+ };
+ };
+
+ rpmh-regulator-ldoa12 {
+ compatible = "qcom,rpmh-vrm-regulator";
+ mboxes = <&apps_rsc 0>;
+ qcom,resource-name = "ldoa12";
+ qcom,supported-modes =
+ <RPMH_REGULATOR_MODE_LDO_LPM
+ RPMH_REGULATOR_MODE_LDO_HPM>;
+ qcom,mode-threshold-currents = <0 1>;
+ pmxpoorwills_l12: regualtor-pmxpoorwills-l12 {
+ regulator-name = "pmxpoorwills_l12";
+ qcom,set = <RPMH_REGULATOR_SET_ALL>;
+ regulator-min-microvolt = <2704000>;
+ regulator-max-microvolt = <2704000>;
+ qcom,init-voltage = <2704000>;
+ qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+ };
+ };
+
+ rpmh-regulator-ldoa13 {
+ compatible = "qcom,rpmh-vrm-regulator";
+ mboxes = <&apps_rsc 0>;
+ qcom,resource-name = "ldoa13";
+ qcom,supported-modes =
+ <RPMH_REGULATOR_MODE_LDO_LPM
+ RPMH_REGULATOR_MODE_LDO_HPM>;
+ qcom,mode-threshold-currents = <0 1>;
+ pmxpoorwills_l13: regualtor-pmxpoorwills-l13 {
+ regulator-name = "pmxpoorwills_l13";
+ qcom,set = <RPMH_REGULATOR_SET_ALL>;
+ regulator-min-microvolt = <1808000>;
+ regulator-max-microvolt = <1808000>;
+ qcom,init-voltage = <1808000>;
+ qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+ };
+ };
+
+ rpmh-regulator-ldoa14 {
+ compatible = "qcom,rpmh-vrm-regulator";
+ mboxes = <&apps_rsc 0>;
+ qcom,resource-name = "ldoa14";
+ qcom,supported-modes =
+ <RPMH_REGULATOR_MODE_LDO_LPM
+ RPMH_REGULATOR_MODE_LDO_HPM>;
+ qcom,mode-threshold-currents = <0 1>;
+ pmxpoorwills_l14: regualtor-pmxpoorwills-l14 {
+ regulator-name = "pmxpoorwills_l14";
+ qcom,set = <RPMH_REGULATOR_SET_ALL>;
+ regulator-min-microvolt = <620000>;
+ regulator-max-microvolt = <620000>;
+ qcom,init-voltage = <620000>;
+ qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+ };
+ };
+
+ rpmh-regulator-ldoa16 {
+ compatible = "qcom,rpmh-vrm-regulator";
+ mboxes = <&apps_rsc 0>;
+ qcom,resource-name = "ldoa16";
+ qcom,supported-modes =
+ <RPMH_REGULATOR_MODE_LDO_LPM
+ RPMH_REGULATOR_MODE_LDO_HPM>;
+ qcom,mode-threshold-currents = <0 1>;
+ pmxpoorwills_l16: regualtor-pmxpoorwills-l16 {
+ regulator-name = "pmxpoorwills_l16";
+ qcom,set = <RPMH_REGULATOR_SET_ALL>;
+ regulator-min-microvolt = <752000>;
+ regulator-max-microvolt = <752000>;
+ qcom,init-voltage = <752000>;
+ qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+ regulator-always-on;
+ };
+ };
+
+ /* VREF_RGMII */
+ rpmh-regulator-rgmii {
+ compatible = "qcom,rpmh-xob-regulator";
+ mboxes = <&apps_rsc 0>;
+ qcom,resource-name = "vrefa2";
+ vreg_rgmii: regulator-rgmii {
+ regulator-name = "vreg_rgmii";
+ qcom,set = <RPMH_REGULATOR_SET_ALL>;
+ };
};
};
diff --git a/arch/arm/boot/dts/qcom/sdxpoorwills-rumi.dts b/arch/arm/boot/dts/qcom/sdxpoorwills-rumi.dts
index b3103cd..aa9e7f2 100644
--- a/arch/arm/boot/dts/qcom/sdxpoorwills-rumi.dts
+++ b/arch/arm/boot/dts/qcom/sdxpoorwills-rumi.dts
@@ -23,6 +23,30 @@
qcom,board-id = <15 0>;
};
+&soc {
+ /* Delete rpmh regulators */
+ /delete-node/ rpmh-regulator-modemlvl;
+ /delete-node/ rpmh-regulator-smpa4;
+ /delete-node/ rpmh-regulator-cxlvl;
+ /delete-node/ rpmh-regulator-ldoa1;
+ /delete-node/ rpmh-regulator-ldoa2;
+ /delete-node/ rpmh-regulator-ldoa3;
+ /delete-node/ rpmh-regulator-ldoa4;
+ /delete-node/ rpmh-regulator-ldoa5;
+ /delete-node/ rpmh-regulator-ldoa7;
+ /delete-node/ rpmh-regulator-ldoa8;
+ /delete-node/ rpmh-regulator-mxlvl;
+ /delete-node/ rpmh-regulator-ldoa10;
+ /delete-node/ rpmh-regulator-ldoa11;
+ /delete-node/ rpmh-regulator-ldoa12;
+ /delete-node/ rpmh-regulator-ldoa13;
+ /delete-node/ rpmh-regulator-ldoa14;
+ /delete-node/ rpmh-regulator-ldoa16;
+ /delete-node/ rpmh-regulator-rgmii;
+};
+
+#include "sdxpoorwills-stub-regulator.dtsi"
+
&blsp1_uart2 {
pinctrl-names = "default";
pinctrl-0 = <&uart2_console_active>;
@@ -68,3 +92,7 @@
&usb3_qmp_phy {
status = "disabled";
};
+
+&qnand_1 {
+ status = "ok";
+};
diff --git a/arch/arm/boot/dts/qcom/sdxpoorwills-stub-regulator.dtsi b/arch/arm/boot/dts/qcom/sdxpoorwills-stub-regulator.dtsi
new file mode 100644
index 0000000..7c6b7b0
--- /dev/null
+++ b/arch/arm/boot/dts/qcom/sdxpoorwills-stub-regulator.dtsi
@@ -0,0 +1,176 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <dt-bindings/regulator/qcom,rpmh-regulator.h>
+
+/* Stub regulators */
+/ {
+ pmxpoorwills_s1: regualtor-pmxpoorwills-s1 {
+ compatible = "qcom,stub-regulator";
+ regulator-name = "pmxpoorwills_s1";
+ qcom,hpm-min-load = <100000>;
+ regulator-min-microvolt = <752000>;
+ regulator-max-microvolt = <752000>;
+ };
+
+ pmxpoorwills_s4: regualtor-pmxpoorwills-s4 {
+ compatible = "qcom,stub-regulator";
+ regulator-name = "pmxpoorwills_s4";
+ qcom,hpm-min-load = <100000>;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+
+ /* VDD CX supply */
+ pmxpoorwills_s5_level: regualtor-pmxpoorwills-s5-level {
+ compatible = "qcom,stub-regulator";
+ regulator-name = "pmxpoorwills_s5_level";
+ qcom,hpm-min-load = <100000>;
+ regulator-min-microvolt = <RPMH_REGULATOR_LEVEL_OFF>;
+ regulator-max-microvolt = <RPMH_REGULATOR_LEVEL_MAX>;
+ };
+
+ pmxpoorwills_s5_level_ao: regualtor-pmxpoorwills-s5-level-ao {
+ compatible = "qcom,stub-regulator";
+ regulator-name = "pmxpoorwills_s5_level_ao";
+ qcom,hpm-min-load = <100000>;
+ regulator-min-microvolt = <RPMH_REGULATOR_LEVEL_OFF>;
+ regulator-max-microvolt = <RPMH_REGULATOR_LEVEL_MAX>;
+ };
+
+ pmxpoorwills_l1: regualtor-pmxpoorwills-11 {
+ compatible = "qcom,stub-regulator";
+ regulator-name = "pmxpoorwills_l1";
+ qcom,hpm-min-load = <10000>;
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ };
+
+ pmxpoorwills_l2: regualtor-pmxpoorwills-12 {
+ compatible = "qcom,stub-regulator";
+ regulator-name = "pmxpoorwills_l2";
+ qcom,hpm-min-load = <10000>;
+ regulator-min-microvolt = <1128000>;
+ regulator-max-microvolt = <1128000>;
+ };
+
+ pmxpoorwills_l3: regualtor-pmxpoorwills-l3 {
+ compatible = "qcom,stub-regulator";
+ regulator-name = "pmxpoorwills_l3";
+ qcom,hpm-min-load = <10000>;
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <800000>;
+ };
+
+ pmxpoorwills_l4: regualtor-pmxpoorwills-l4 {
+ compatible = "qcom,stub-regulator";
+ regulator-name = "pmxpoorwills_l4";
+ qcom,hpm-min-load = <10000>;
+ regulator-min-microvolt = <872000>;
+ regulator-max-microvolt = <872000>;
+ };
+
+ pmxpoorwills_l5: regualtor-pmxpoorwills-l5 {
+ compatible = "qcom,stub-regulator";
+ regulator-name = "pmxpoorwills_l5";
+ qcom,hpm-min-load = <10000>;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+
+ pmxpoorwills_l7: regualtor-pmxpoorwills-l7 {
+ compatible = "qcom,stub-regulator";
+ regulator-name = "pmxpoorwills_l7";
+ qcom,hpm-min-load = <10000>;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <2950000>;
+ };
+
+ pmxpoorwills_l8: regualtor-pmxpoorwills-l8 {
+ compatible = "qcom,stub-regulator";
+ regulator-name = "pmxpoorwills_l8";
+ qcom,hpm-min-load = <10000>;
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <800000>;
+ };
+
+ /* VDD MX supply */
+ pmxpoorwills_l9_level: regualtor-pmxpoorwills-l9-level {
+ compatible = "qcom,stub-regulator";
+ regulator-name = "pmxpoorwills_l9_level";
+ qcom,hpm-min-load = <10000>;
+ regulator-min-microvolt = <RPMH_REGULATOR_LEVEL_OFF>;
+ regulator-max-microvolt = <RPMH_REGULATOR_LEVEL_MAX>;
+ };
+
+ pmxpoorwills_l9_level_ao: regualtor-pmxpoorwills-l9-level_ao {
+ compatible = "qcom,stub-regulator";
+ regulator-name = "pmxpoorwills_l9_level_ao";
+ qcom,hpm-min-load = <10000>;
+ regulator-min-microvolt = <RPMH_REGULATOR_LEVEL_OFF>;
+ regulator-max-microvolt = <RPMH_REGULATOR_LEVEL_MAX>;
+ };
+
+ pmxpoorwills_l10: regualtor-pmxpoorwills-l10 {
+ compatible = "qcom,stub-regulator";
+ regulator-name = "pmxpoorwills_l10";
+ qcom,hpm-min-load = <10000>;
+ regulator-min-microvolt = <3088000>;
+ regulator-max-microvolt = <3088000>;
+ };
+
+ pmxpoorwills_l11: regualtor-pmxpoorwills-l11 {
+ compatible = "qcom,stub-regulator";
+ regulator-name = "pmxpoorwills_l11";
+ qcom,hpm-min-load = <10000>;
+ regulator-min-microvolt = <1808000>;
+ regulator-max-microvolt = <2848000>;
+ };
+
+ pmxpoorwills_l12: regualtor-pmxpoorwills-l12 {
+ compatible = "qcom,stub-regulator";
+ regulator-name = "pmxpoorwills_l12";
+ qcom,hpm-min-load = <10000>;
+ regulator-min-microvolt = <2704000>;
+ regulator-max-microvolt = <2704000>;
+ };
+
+ pmxpoorwills_l13: regualtor-pmxpoorwills-l13 {
+ compatible = "qcom,stub-regulator";
+ regulator-name = "pmxpoorwills_l13";
+ qcom,hpm-min-load = <10000>;
+ regulator-min-microvolt = <1808000>;
+ regulator-max-microvolt = <2848000>;
+ };
+
+ pmxpoorwills_l14: regualtor-pmxpoorwills-l14 {
+ compatible = "qcom,stub-regulator";
+ regulator-name = "pmxpoorwills_l14";
+ qcom,hpm-min-load = <10000>;
+ regulator-min-microvolt = <620000>;
+ regulator-max-microvolt = <752000>;
+ };
+
+ pmxpoorwills_l16: regualtor-pmxpoorwills-l16 {
+ compatible = "qcom,stub-regulator";
+ regulator-name = "pmxpoorwills_l16";
+ qcom,hpm-min-load = <10000>;
+ regulator-min-microvolt = <752000>;
+ regulator-max-microvolt = <752000>;
+ };
+
+ /* VREF_RGMII */
+ vreg_rgmii: rgmii-regulator {
+ compatible = "regulator-fixed";
+ regulator-name = "vreg_rgmii";
+ };
+};
diff --git a/arch/arm/boot/dts/qcom/sdxpoorwills-thermal.dtsi b/arch/arm/boot/dts/qcom/sdxpoorwills-thermal.dtsi
new file mode 100644
index 0000000..5a4810a
--- /dev/null
+++ b/arch/arm/boot/dts/qcom/sdxpoorwills-thermal.dtsi
@@ -0,0 +1,293 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <dt-bindings/thermal/thermal.h>
+
+&soc {
+ qmi-tmd-devices {
+ compatible = "qcom,qmi_cooling_devices";
+
+ modem {
+ qcom,instance-id = <0x0>;
+
+ modem_pa: modem_pa {
+ qcom,qmi-dev-name = "pa";
+ #cooling-cells = <2>;
+ };
+
+ modem_proc: modem_proc {
+ qcom,qmi-dev-name = "modem";
+ #cooling-cells = <2>;
+ };
+
+ modem_current: modem_current {
+ qcom,qmi-dev-name = "modem_current";
+ #cooling-cells = <2>;
+ };
+
+ modem_skin: modem_skin {
+ qcom,qmi-dev-name = "modem_skin";
+ #cooling-cells = <2>;
+ };
+
+ modem_vdd: modem_vdd {
+ qcom,qmi-dev-name = "cpuv_restriction_cold";
+ #cooling-cells = <2>;
+ };
+ };
+
+ adsp {
+ qcom,instance-id = <0x1>;
+
+ adsp_vdd: adsp_vdd {
+ qcom,qmi-dev-name = "cpuv_restriction_cold";
+ #cooling-cells = <2>;
+ };
+ };
+ };
+};
+
+&thermal_zones {
+ aoss-usr {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-governor = "user_space";
+ thermal-sensors = <&tsens0 0>;
+ trips {
+ active-config0 {
+ temperature = <125000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+ };
+
+ mdm-q6-usr {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-governor = "user_space";
+ thermal-sensors = <&tsens0 1>;
+ trips {
+ active-config0 {
+ temperature = <125000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+ };
+
+ ddrss-usr {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-governor = "user_space";
+ thermal-sensors = <&tsens0 2>;
+ trips {
+ active-config0 {
+ temperature = <125000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+ };
+
+ cpu-usr {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-governor = "user_space";
+ thermal-sensors = <&tsens0 3>;
+ trips {
+ active-config0 {
+ temperature = <125000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+ };
+
+ mdm-core-usr {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-governor = "user_space";
+ thermal-sensors = <&tsens0 4>;
+ trips {
+ active-config0 {
+ temperature = <125000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+ };
+
+ mdm-vpe-usr {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-governor = "user_space";
+ thermal-sensors = <&tsens0 5>;
+ trips {
+ active-config0 {
+ temperature = <125000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+ };
+
+ aoss-lowf {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-governor = "low_limits_floor";
+ thermal-sensors = <&tsens0 0>;
+ tracks-low;
+ trips {
+ aoss_trip: aoss-trip {
+ temperature = <5000>;
+ hysteresis = <5000>;
+ type = "passive";
+ };
+ };
+ cooling-maps {
+ modem_vdd_cdev {
+ trip = <&aoss_trip>;
+ cooling-device = <&modem_vdd 0 0>;
+ };
+ adsp_vdd_cdev {
+ trip = <&aoss_trip>;
+ cooling-device = <&adsp_vdd 0 0>;
+ };
+ };
+ };
+
+ mdm-q6-lowf {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-governor = "low_limits_floor";
+ thermal-sensors = <&tsens0 1>;
+ tracks-low;
+ trips {
+ mdm_q6_trip: mdm-q6-trip {
+ temperature = <5000>;
+ hysteresis = <5000>;
+ type = "passive";
+ };
+ };
+ cooling-maps {
+ modem_vdd_cdev {
+ trip = <&mdm_q6_trip>;
+ cooling-device = <&modem_vdd 0 0>;
+ };
+ adsp_vdd_cdev {
+ trip = <&mdm_q6_trip>;
+ cooling-device = <&adsp_vdd 0 0>;
+ };
+ };
+ };
+
+ ddrss-lowf {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-governor = "low_limits_floor";
+ thermal-sensors = <&tsens0 2>;
+ tracks-low;
+ trips {
+ ddrss_trip: ddrss-trip {
+ temperature = <5000>;
+ hysteresis = <5000>;
+ type = "passive";
+ };
+ };
+ cooling-maps {
+ modem_vdd_cdev {
+ trip = <&ddrss_trip>;
+ cooling-device = <&modem_vdd 0 0>;
+ };
+ adsp_vdd_cdev {
+ trip = <&ddrss_trip>;
+ cooling-device = <&adsp_vdd 0 0>;
+ };
+ };
+ };
+
+ cpu-lowf {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-governor = "low_limits_floor";
+ thermal-sensors = <&tsens0 3>;
+ tracks-low;
+ trips {
+ cpu_trip: cpu-trip {
+ temperature = <5000>;
+ hysteresis = <5000>;
+ type = "passive";
+ };
+ };
+ cooling-maps {
+ modem_vdd_cdev {
+ trip = <&cpu_trip>;
+ cooling-device = <&modem_vdd 0 0>;
+ };
+ adsp_vdd_cdev {
+ trip = <&cpu_trip>;
+ cooling-device = <&adsp_vdd 0 0>;
+ };
+ };
+ };
+
+ mdm-core-lowf {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-governor = "low_limits_floor";
+ thermal-sensors = <&tsens0 4>;
+ tracks-low;
+ trips {
+ mdm_trip: mdm-trip {
+ temperature = <5000>;
+ hysteresis = <5000>;
+ type = "passive";
+ };
+ };
+ cooling-maps {
+ modem_vdd_cdev {
+ trip = <&mdm_trip>;
+ cooling-device = <&modem_vdd 0 0>;
+ };
+ adsp_vdd_cdev {
+ trip = <&mdm_trip>;
+ cooling-device = <&adsp_vdd 0 0>;
+ };
+ };
+ };
+
+ mdm-vpe-lowf {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-governor = "low_limits_floor";
+ thermal-sensors = <&tsens0 5>;
+ tracks-low;
+ trips {
+ mdm_vpe_trip: mdm-vpe-trip {
+ temperature = <125000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+ cooling-maps {
+ modem_vdd_cdev {
+ trip = <&mdm_vpe_trip>;
+ cooling-device = <&modem_vdd 0 0>;
+ };
+ adsp_vdd_cdev {
+ trip = <&mdm_vpe_trip>;
+ cooling-device = <&adsp_vdd 0 0>;
+ };
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/qcom/sdxpoorwills-wcd.dtsi b/arch/arm/boot/dts/qcom/sdxpoorwills-wcd.dtsi
new file mode 100644
index 0000000..9303ed1
--- /dev/null
+++ b/arch/arm/boot/dts/qcom/sdxpoorwills-wcd.dtsi
@@ -0,0 +1,80 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+&i2c_3 {
+ tavil_codec {
+ wcd: wcd_pinctrl@5 {
+ compatible = "qcom,wcd-pinctrl";
+ qcom,num-gpios = <5>;
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ spkr_1_wcd_en_active: spkr_1_wcd_en_active {
+ mux {
+ pins = "gpio2";
+ };
+
+ config {
+ pins = "gpio2";
+ output-high;
+ };
+ };
+
+ spkr_1_wcd_en_sleep: spkr_1_wcd_en_sleep {
+ mux {
+ pins = "gpio2";
+ };
+
+ config {
+ pins = "gpio2";
+ input-enable;
+ };
+ };
+
+ spkr_2_wcd_en_active: spkr_2_sd_n_active {
+ mux {
+ pins = "gpio3";
+ };
+
+ config {
+ pins = "gpio3";
+ output-high;
+ };
+ };
+
+ spkr_2_wcd_en_sleep: spkr_2_sd_n_sleep {
+ mux {
+ pins = "gpio3";
+ };
+
+ config {
+ pins = "gpio3";
+ input-enable;
+ };
+ };
+ };
+
+ wsa_spkr_wcd_sd1: msm_cdc_pinctrll {
+ compatible = "qcom,msm-cdc-pinctrl";
+ pinctrl-names = "aud_active", "aud_sleep";
+ pinctrl-0 = <&spkr_1_wcd_en_active>;
+ pinctrl-1 = <&spkr_1_wcd_en_sleep>;
+ };
+
+ wsa_spkr_wcd_sd2: msm_cdc_pinctrlr {
+ compatible = "qcom,msm-cdc-pinctrl";
+ pinctrl-names = "aud_active", "aud_sleep";
+ pinctrl-0 = <&spkr_2_wcd_en_active>;
+ pinctrl-1 = <&spkr_2_wcd_en_sleep>;
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/qcom/sdxpoorwills.dtsi b/arch/arm/boot/dts/qcom/sdxpoorwills.dtsi
index 146fc9c..b0be698 100644
--- a/arch/arm/boot/dts/qcom/sdxpoorwills.dtsi
+++ b/arch/arm/boot/dts/qcom/sdxpoorwills.dtsi
@@ -10,12 +10,12 @@
* GNU General Public License for more details.
*/
-
+#include <dt-bindings/soc/qcom,tcs-mbox.h>
#include "skeleton.dtsi"
-
#include <dt-bindings/clock/qcom,rpmh.h>
#include <dt-bindings/clock/qcom,gcc-sdxpoorwills.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/regulator/qcom,rpmh-regulator.h>
/ {
model = "Qualcomm Technologies, Inc. SDX POORWILLS";
@@ -41,6 +41,12 @@
reg = <0x87800000 0x8000000>;
label = "mss_mem";
};
+
+ audio_mem: audio_region@0 {
+ compatible = "shared-dma-pool";
+ reusable;
+ size = <0x400000>;
+ };
};
cpus {
@@ -51,9 +57,14 @@
device-type = "cpu";
compatible = "arm,cortex-a7";
reg = <0x0>;
+ #cooling-cells = <2>;
};
};
+ aliases {
+ qpic_nand1 = &qnand_1;
+ };
+
soc: soc { };
};
@@ -147,22 +158,40 @@
};
clock_gcc: qcom,gcc@100000 {
- compatible = "qcom,dummycc";
- clock-output-names = "gcc_clocks";
+ compatible = "qcom,gcc-sdxpoorwills";
+ reg = <0x100000 0x1f0000>;
+ reg-names = "cc_base";
+ vdd_cx-supply = <&pmxpoorwills_s5_level>;
+ vdd_cx_ao-supply = <&pmxpoorwills_s5_level_ao>;
#clock-cells = <1>;
#reset-cells = <1>;
};
- clock_cpu: qcom,clock-a7@17810008 {
- compatible = "qcom,dummycc";
- clock-output-names = "cpu_clocks";
+ clock_cpu: qcom,clock-a7@17808100 {
+ compatible = "qcom,cpu-sdxpoorwills";
+ clocks = <&clock_rpmh RPMH_CXO_CLK_A>;
+ clock-names = "xo_ao";
+ qcom,a7cc-init-rate = <1497600000>;
+ reg = <0x17808100 0x7F10>;
+ reg-names = "apcs_pll";
+ qcom,rcg-reg-offset = <0x7F08>;
+
+ vdd_dig_ao-supply = <&pmxpoorwills_s5_level_ao>;
+ cpu-vdd-supply = <&pmxpoorwills_s5_level_ao>;
+ qcom,speed0-bin-v0 =
+ < 0 RPMH_REGULATOR_LEVEL_OFF>,
+ < 345600000 RPMH_REGULATOR_LEVEL_LOW_SVS>,
+ < 576000000 RPMH_REGULATOR_LEVEL_SVS>,
+ < 1094400000 RPMH_REGULATOR_LEVEL_NOM>,
+ < 1497600000 RPMH_REGULATOR_LEVEL_TURBO>;
#clock-cells = <1>;
};
clock_rpmh: qcom,rpmhclk {
- compatible = "qcom,dummycc";
- clock-output-names = "rpmh_clocks";
+ compatible = "qcom,rpmh-clk-sdxpoorwills";
#clock-cells = <1>;
+ mboxes = <&apps_rsc 0>;
+ mbox-names = "apps";
};
blsp1_uart2: serial@831000 {
@@ -170,7 +199,7 @@
reg = <0x831000 0x200>;
interrupts = <0 26 0>;
status = "disabled";
- clocks = <&clock_gcc GCC_BLSP1_UART2_APPS_CLK>,
+ clocks = <&clock_gcc GCC_BLSP1_UART3_APPS_CLK>,
<&clock_gcc GCC_BLSP1_AHB_CLK>;
clock-names = "core", "iface";
};
@@ -179,7 +208,6 @@
compatible = "qcom,gdsc";
regulator-name = "gdsc_usb30";
reg = <0x0010b004 0x4>;
- status = "ok";
};
qcom,sps {
@@ -191,7 +219,36 @@
compatible = "qcom,gdsc";
regulator-name = "gdsc_pcie";
reg = <0x00137004 0x4>;
- status = "ok";
+ };
+
+ gdsc_emac: qcom,gdsc@147004 {
+ compatible = "qcom,gdsc";
+ regulator-name = "gdsc_emac";
+ reg = <0x00147004 0x4>;
+ };
+
+ qnand_1: nand@1b00000 {
+ compatible = "qcom,msm-nand";
+ reg = < 0x01b00000 0x10000>,
+ <0x01b04000 0x1a000>;
+ reg-names = "nand_phys",
+ "bam_phys";
+ qcom,reg-adjustment-offset = <0x4000>;
+ qcom,qpic-clk-rpmh;
+
+ interrupts = <0 135 0>;
+ interrupt-names = "bam_irq";
+
+ qcom,msm-bus,name = "qpic_nand";
+ qcom,msm-bus,num-cases = <2>;
+ qcom,msm-bus,num-paths = <1>;
+
+ qcom,msm-bus,vectors-KBps =
+ <91 512 0 0>,
+ /* Voting for max b/w on PNOC bus for now */
+ <91 512 400000 400000>;
+
+ status = "disabled";
};
qcom,msm-imem@8600000 {
@@ -235,77 +292,7 @@
#thermal-sensor-cells = <1>;
};
- thermal_zones: thermal-zones {
- mpm-usr {
- polling-delay-passive = <0>;
- polling-delay = <0>;
- thermal-governor = "user_space";
- thermal-sensors = <&tsens0 0>;
- trips {
- active-config0 {
- temperature = <125000>;
- hysteresis = <1000>;
- type = "passive";
- };
- };
- };
-
- q6-usr {
- polling-delay-passive = <0>;
- polling-delay = <0>;
- thermal-governor = "user_space";
- thermal-sensors = <&tsens0 1>;
- trips {
- active-config0 {
- temperature = <125000>;
- hysteresis = <1000>;
- type = "passive";
- };
- };
- };
-
- ctile-usr {
- polling-delay-passive = <0>;
- polling-delay = <0>;
- thermal-governor = "user_space";
- thermal-sensors = <&tsens0 2>;
- trips {
- active-config0 {
- temperature = <125000>;
- hysteresis = <1000>;
- type = "passive";
- };
- };
- };
-
- cpu-usr {
- polling-delay-passive = <0>;
- polling-delay = <0>;
- thermal-governor = "user_space";
- thermal-sensors = <&tsens0 3>;
- trips {
- active-config0 {
- temperature = <125000>;
- hysteresis = <1000>;
- type = "passive";
- };
- };
- };
-
- mdm-usr {
- polling-delay-passive = <0>;
- polling-delay = <0>;
- thermal-governor = "user_space";
- thermal-sensors = <&tsens0 4>;
- trips {
- active-config0 {
- temperature = <125000>;
- hysteresis = <1000>;
- type = "passive";
- };
- };
- };
- };
+ thermal_zones: thermal-zones { };
qcom,ipa_fws {
compatible = "qcom,pil-tz-generic";
@@ -353,8 +340,8 @@
reg = <0x8fe40000 0xc0000>,
<0x17811008 0x4>;
reg-names = "smem", "irq-reg-base";
- qcom,irq-mask = <0x1000>;
- interrupts = <GIC_SPI 111 IRQ_TYPE_EDGE_RISING>;
+ qcom,irq-mask = <0x8000>;
+ interrupts = <GIC_SPI 114 IRQ_TYPE_EDGE_RISING>;
label = "mpss";
};
@@ -456,6 +443,65 @@
/* GPIO output to mss */
qcom,gpio-force-stop = <&smp2pgpio_ssr_smp2p_1_out 0 0>;
};
+
+ apps_rsc: mailbox@17840000 {
+ compatible = "qcom,tcs-drv";
+ label = "apps_rsc";
+ reg = <0x17840000 0x100>, <0x17840d00 0x3000>;
+ interrupts = <0 17 0>;
+ #mbox-cells = <1>;
+ qcom,drv-id = <1>;
+ qcom,tcs-config = <ACTIVE_TCS 2>,
+ <SLEEP_TCS 2>,
+ <WAKE_TCS 2>,
+ <CONTROL_TCS 1>;
+ };
+
+ cmd_db: qcom,cmd-db@ca0000c {
+ compatible = "qcom,cmd-db";
+ reg = <0xca0000c 8>;
+ };
+
+ system_pm {
+ compatible = "qcom,system-pm";
+ mboxes = <&apps_rsc 0>;
+ };
+
+ emac_hw: qcom,emac@00020000 {
+ compatible = "qcom,emac-dwc-eqos";
+ reg = <0x20000 0x10000>,
+ <0x36000 0x100>;
+ reg-names = "emac-base", "rgmii-base";
+ interrupts = <0 62 4>, <0 60 4>,
+ <0 45 4>, <0 49 4>,
+ <0 50 4>, <0 51 4>,
+ <0 52 4>, <0 53 4>,
+ <0 54 4>, <0 55 4>,
+ <0 56 4>, <0 57 4>;
+ interrupt-names = "sbd-intr", "lpi-intr",
+ "wol-intr", "tx-ch0-intr",
+ "tx-ch1-intr", "tx-ch2-intr",
+ "tx-ch3-intr", "tx-ch4-intr",
+ "rx-ch0-intr", "rx-ch1-intr",
+ "rx-ch2-intr", "rx-ch3-intr";
+ io-macro-info {
+ io-macro-bypass-mode = <0>;
+ io-interface = "rgmii";
+ };
+ };
+
+ qmp_aop: qcom,qmp-aop@c300000 {
+ compatible = "qcom,qmp-mbox";
+ label = "aop";
+ reg = <0xc300000 0x400>,
+ <0x17811008 0x4>;
+ reg-names = "msgram", "irq-reg-base";
+ qcom,irq-mask = <0x1>;
+ interrupts = <GIC_SPI 221 IRQ_TYPE_EDGE_RISING>;
+ priority = <0>;
+ mbox-desc-offset = <0x0>;
+ #mbox-cells = <1>;
+ };
};
#include "pmxpoorwills.dtsi"
@@ -464,3 +510,5 @@
#include "sdxpoorwills-smp2p.dtsi"
#include "sdxpoorwills-usb.dtsi"
#include "sdxpoorwills-bus.dtsi"
+#include "sdxpoorwills-thermal.dtsi"
+#include "sdxpoorwills-audio.dtsi"
diff --git a/arch/arm/boot/dts/r8a7790.dtsi b/arch/arm/boot/dts/r8a7790.dtsi
index 351fcc2..b6c6410 100644
--- a/arch/arm/boot/dts/r8a7790.dtsi
+++ b/arch/arm/boot/dts/r8a7790.dtsi
@@ -1493,7 +1493,8 @@
};
msiof0: spi@e6e20000 {
- compatible = "renesas,msiof-r8a7790";
+ compatible = "renesas,msiof-r8a7790",
+ "renesas,rcar-gen2-msiof";
reg = <0 0xe6e20000 0 0x0064>;
interrupts = <GIC_SPI 156 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&mstp0_clks R8A7790_CLK_MSIOF0>;
@@ -1507,7 +1508,8 @@
};
msiof1: spi@e6e10000 {
- compatible = "renesas,msiof-r8a7790";
+ compatible = "renesas,msiof-r8a7790",
+ "renesas,rcar-gen2-msiof";
reg = <0 0xe6e10000 0 0x0064>;
interrupts = <GIC_SPI 157 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&mstp2_clks R8A7790_CLK_MSIOF1>;
@@ -1521,7 +1523,8 @@
};
msiof2: spi@e6e00000 {
- compatible = "renesas,msiof-r8a7790";
+ compatible = "renesas,msiof-r8a7790",
+ "renesas,rcar-gen2-msiof";
reg = <0 0xe6e00000 0 0x0064>;
interrupts = <GIC_SPI 158 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&mstp2_clks R8A7790_CLK_MSIOF2>;
@@ -1535,7 +1538,8 @@
};
msiof3: spi@e6c90000 {
- compatible = "renesas,msiof-r8a7790";
+ compatible = "renesas,msiof-r8a7790",
+ "renesas,rcar-gen2-msiof";
reg = <0 0xe6c90000 0 0x0064>;
interrupts = <GIC_SPI 159 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&mstp2_clks R8A7790_CLK_MSIOF3>;
diff --git a/arch/arm/boot/dts/stih410.dtsi b/arch/arm/boot/dts/stih410.dtsi
index a3ef734..4d329b2 100644
--- a/arch/arm/boot/dts/stih410.dtsi
+++ b/arch/arm/boot/dts/stih410.dtsi
@@ -131,7 +131,7 @@
<&clk_s_d2_quadfs 0>;
assigned-clock-rates = <297000000>,
- <108000000>,
+ <297000000>,
<0>,
<400000000>,
<400000000>;
diff --git a/arch/arm/configs/omap2plus_defconfig b/arch/arm/configs/omap2plus_defconfig
index 53e1a88..66d7196 100644
--- a/arch/arm/configs/omap2plus_defconfig
+++ b/arch/arm/configs/omap2plus_defconfig
@@ -216,6 +216,7 @@
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_8250_NR_UARTS=32
+CONFIG_SERIAL_8250_RUNTIME_UARTS=6
CONFIG_SERIAL_8250_EXTENDED=y
CONFIG_SERIAL_8250_MANY_PORTS=y
CONFIG_SERIAL_8250_SHARE_IRQ=y
diff --git a/arch/arm/configs/sdxpoorwills-perf_defconfig b/arch/arm/configs/sdxpoorwills-perf_defconfig
index 877406f..28a0c38 100644
--- a/arch/arm/configs/sdxpoorwills-perf_defconfig
+++ b/arch/arm/configs/sdxpoorwills-perf_defconfig
@@ -217,9 +217,16 @@
CONFIG_QCOM_DLOAD_MODE=y
CONFIG_POWER_SUPPLY=y
CONFIG_SMB138X_CHARGER=y
+CONFIG_SENSORS_QPNP_ADC_VOLTAGE=y
CONFIG_THERMAL=y
+CONFIG_THERMAL_WRITABLE_TRIPS=y
+CONFIG_THERMAL_GOV_USER_SPACE=y
+CONFIG_THERMAL_GOV_LOW_LIMITS=y
+CONFIG_CPU_THERMAL=y
CONFIG_THERMAL_QPNP=y
CONFIG_THERMAL_TSENS=y
+CONFIG_QTI_QMI_COOLING_DEVICE=y
+CONFIG_REGULATOR_COOLING_DEVICE=y
CONFIG_MFD_I2C_PMIC=y
CONFIG_MFD_SPMI_PMIC=y
CONFIG_REGULATOR=y
@@ -227,6 +234,9 @@
CONFIG_REGULATOR_QPNP=y
CONFIG_SOUND=y
CONFIG_SND=y
+CONFIG_SND_DYNAMIC_MINORS=y
+CONFIG_SND_USB_AUDIO=y
+CONFIG_SND_USB_AUDIO_QMI=y
CONFIG_SND_SOC=y
CONFIG_UHID=y
CONFIG_HID_APPLE=y
@@ -265,9 +275,11 @@
CONFIG_USB_CONFIGFS_MASS_STORAGE=y
CONFIG_USB_CONFIGFS_F_FS=y
CONFIG_USB_CONFIGFS_UEVENT=y
+CONFIG_USB_CONFIGFS_F_UAC1=y
CONFIG_USB_CONFIGFS_F_DIAG=y
CONFIG_USB_CONFIGFS_F_CDEV=y
CONFIG_USB_CONFIGFS_F_GSI=y
+CONFIG_USB_CONFIGFS_F_QDSS=y
CONFIG_MMC=y
CONFIG_MMC_PARANOID_SD_INIT=y
CONFIG_MMC_BLOCK_MINORS=32
@@ -281,6 +293,8 @@
CONFIG_QCOM_SPS_DMA=y
CONFIG_UIO=y
CONFIG_STAGING=y
+CONFIG_ION=y
+CONFIG_ION_MSM=y
CONFIG_GSI=y
CONFIG_IPA3=y
CONFIG_RMNET_IPA3=y
@@ -291,8 +305,11 @@
CONFIG_SPS_SUPPORT_NDP_BAM=y
CONFIG_QPNP_REVID=y
CONFIG_USB_BAM=y
+CONFIG_MDM_GCC_SDXPOORWILLS=y
+CONFIG_MDM_CLOCK_CPU_SDXPOORWILLS=y
CONFIG_REMOTE_SPINLOCK_MSM=y
CONFIG_MAILBOX=y
+CONFIG_MSM_QMP=y
CONFIG_QCOM_SCM=y
CONFIG_MSM_BOOT_STATS=y
CONFIG_MSM_SMEM=y
diff --git a/arch/arm/configs/sdxpoorwills_defconfig b/arch/arm/configs/sdxpoorwills_defconfig
index d860595..6c3ebc7 100644
--- a/arch/arm/configs/sdxpoorwills_defconfig
+++ b/arch/arm/configs/sdxpoorwills_defconfig
@@ -210,9 +210,16 @@
CONFIG_QCOM_DLOAD_MODE=y
CONFIG_POWER_SUPPLY=y
CONFIG_SMB138X_CHARGER=y
+CONFIG_SENSORS_QPNP_ADC_VOLTAGE=y
CONFIG_THERMAL=y
+CONFIG_THERMAL_WRITABLE_TRIPS=y
+CONFIG_THERMAL_GOV_USER_SPACE=y
+CONFIG_THERMAL_GOV_LOW_LIMITS=y
+CONFIG_CPU_THERMAL=y
CONFIG_THERMAL_QPNP=y
CONFIG_THERMAL_TSENS=y
+CONFIG_QTI_QMI_COOLING_DEVICE=y
+CONFIG_REGULATOR_COOLING_DEVICE=y
CONFIG_MFD_I2C_PMIC=y
CONFIG_MFD_SPMI_PMIC=y
CONFIG_MFD_SYSCON=y
@@ -225,6 +232,9 @@
CONFIG_FB=y
CONFIG_SOUND=y
CONFIG_SND=y
+CONFIG_SND_DYNAMIC_MINORS=y
+CONFIG_SND_USB_AUDIO=y
+CONFIG_SND_USB_AUDIO_QMI=y
CONFIG_SND_SOC=y
CONFIG_UHID=y
CONFIG_HID_APPLE=y
@@ -263,9 +273,11 @@
CONFIG_USB_CONFIGFS_MASS_STORAGE=y
CONFIG_USB_CONFIGFS_F_FS=y
CONFIG_USB_CONFIGFS_UEVENT=y
+CONFIG_USB_CONFIGFS_F_UAC1=y
CONFIG_USB_CONFIGFS_F_DIAG=y
CONFIG_USB_CONFIGFS_F_CDEV=y
CONFIG_USB_CONFIGFS_F_GSI=y
+CONFIG_USB_CONFIGFS_F_QDSS=y
CONFIG_MMC=y
CONFIG_MMC_PARANOID_SD_INIT=y
CONFIG_MMC_BLOCK_MINORS=32
@@ -278,6 +290,8 @@
CONFIG_QCOM_SPS_DMA=y
CONFIG_UIO=y
CONFIG_STAGING=y
+CONFIG_ION=y
+CONFIG_ION_MSM=y
CONFIG_GSI=y
CONFIG_IPA3=y
CONFIG_RMNET_IPA3=y
@@ -287,8 +301,11 @@
CONFIG_SPS=y
CONFIG_SPS_SUPPORT_NDP_BAM=y
CONFIG_QPNP_REVID=y
+CONFIG_MDM_GCC_SDXPOORWILLS=y
+CONFIG_MDM_CLOCK_CPU_SDXPOORWILLS=y
CONFIG_REMOTE_SPINLOCK_MSM=y
CONFIG_MAILBOX=y
+CONFIG_MSM_QMP=y
CONFIG_QCOM_SCM=y
CONFIG_MSM_BOOT_STATS=y
CONFIG_MSM_SMEM=y
diff --git a/arch/arm/include/asm/Kbuild b/arch/arm/include/asm/Kbuild
index 55e0e3e..bd12b98 100644
--- a/arch/arm/include/asm/Kbuild
+++ b/arch/arm/include/asm/Kbuild
@@ -37,4 +37,3 @@
generic-y += termios.h
generic-y += timex.h
generic-y += trace_clock.h
-generic-y += unaligned.h
diff --git a/arch/arm/include/asm/topology.h b/arch/arm/include/asm/topology.h
index 9edea10..41e9107 100644
--- a/arch/arm/include/asm/topology.h
+++ b/arch/arm/include/asm/topology.h
@@ -32,6 +32,9 @@ unsigned long arch_get_cpu_efficiency(int cpu);
#define arch_scale_cpu_capacity scale_cpu_capacity
extern unsigned long scale_cpu_capacity(struct sched_domain *sd, int cpu);
+#define arch_update_cpu_capacity update_cpu_power_capacity
+extern void update_cpu_power_capacity(int cpu);
+
#else
static inline void init_cpu_topology(void) { }
diff --git a/arch/arm/include/asm/unaligned.h b/arch/arm/include/asm/unaligned.h
new file mode 100644
index 0000000..ab905ff
--- /dev/null
+++ b/arch/arm/include/asm/unaligned.h
@@ -0,0 +1,27 @@
+#ifndef __ASM_ARM_UNALIGNED_H
+#define __ASM_ARM_UNALIGNED_H
+
+/*
+ * We generally want to set CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS on ARMv6+,
+ * but we don't want to use linux/unaligned/access_ok.h since that can lead
+ * to traps on unaligned stm/ldm or strd/ldrd.
+ */
+#include <asm/byteorder.h>
+
+#if defined(__LITTLE_ENDIAN)
+# include <linux/unaligned/le_struct.h>
+# include <linux/unaligned/be_byteshift.h>
+# include <linux/unaligned/generic.h>
+# define get_unaligned __get_unaligned_le
+# define put_unaligned __put_unaligned_le
+#elif defined(__BIG_ENDIAN)
+# include <linux/unaligned/be_struct.h>
+# include <linux/unaligned/le_byteshift.h>
+# include <linux/unaligned/generic.h>
+# define get_unaligned __get_unaligned_be
+# define put_unaligned __put_unaligned_be
+#else
+# error need to define endianess
+#endif
+
+#endif /* __ASM_ARM_UNALIGNED_H */
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index bbf60e3..ab509d6 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -466,17 +466,6 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
}
static void (*__smp_cross_call)(const struct cpumask *, unsigned int);
-DEFINE_PER_CPU(bool, pending_ipi);
-static void smp_cross_call_common(const struct cpumask *cpumask,
- unsigned int func)
-{
- unsigned int cpu;
-
- for_each_cpu(cpu, cpumask)
- per_cpu(pending_ipi, cpu) = true;
-
- __smp_cross_call(cpumask, func);
-}
void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int))
{
@@ -501,6 +490,18 @@ static void smp_cross_call(const struct cpumask *target, unsigned int ipinr)
__smp_cross_call(target, ipinr);
}
+DEFINE_PER_CPU(bool, pending_ipi);
+static void smp_cross_call_common(const struct cpumask *cpumask,
+ unsigned int func)
+{
+ unsigned int cpu;
+
+ for_each_cpu(cpu, cpumask)
+ per_cpu(pending_ipi, cpu) = true;
+
+ smp_cross_call(cpumask, func);
+}
+
void show_ipi_list(struct seq_file *p, int prec)
{
unsigned int cpu, i;
@@ -539,7 +540,7 @@ void arch_send_wakeup_ipi_mask(const struct cpumask *mask)
void arch_send_call_function_single_ipi(int cpu)
{
- smp_cross_call_common(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE);
+ smp_cross_call_common(cpumask_of(cpu), IPI_CALL_FUNC);
}
#ifdef CONFIG_IRQ_WORK
diff --git a/arch/arm/kernel/topology.c b/arch/arm/kernel/topology.c
index 2b6c530..28dcd44 100644
--- a/arch/arm/kernel/topology.c
+++ b/arch/arm/kernel/topology.c
@@ -42,6 +42,16 @@
*/
static DEFINE_PER_CPU(unsigned long, cpu_scale) = SCHED_CAPACITY_SCALE;
+unsigned long arch_scale_freq_power(struct sched_domain *sd, int cpu)
+{
+ return per_cpu(cpu_scale, cpu);
+}
+
+static void set_power_scale(unsigned int cpu, unsigned long power)
+{
+ per_cpu(cpu_scale, cpu) = power;
+}
+
unsigned long scale_cpu_capacity(struct sched_domain *sd, int cpu)
{
#ifdef CONFIG_CPU_FREQ
@@ -397,6 +407,23 @@ const struct cpumask *cpu_corepower_mask(int cpu)
return &cpu_topology[cpu].thread_sibling;
}
+static void update_cpu_power(unsigned int cpu)
+{
+ if (!cpu_capacity(cpu))
+ return;
+
+ set_power_scale(cpu, cpu_capacity(cpu) / middle_capacity);
+
+ pr_info("CPU%u: update cpu_power %lu\n",
+ cpu, arch_scale_freq_power(NULL, cpu));
+}
+
+void update_cpu_power_capacity(int cpu)
+{
+ update_cpu_power(cpu);
+ update_cpu_capacity(cpu);
+}
+
static void update_siblings_masks(unsigned int cpuid)
{
struct cputopo_arm *cpu_topo, *cpuid_topo = &cpu_topology[cpuid];
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index 9688ec0..1b30489 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -152,30 +152,26 @@ static void dump_mem(const char *lvl, const char *str, unsigned long bottom,
set_fs(fs);
}
-static void dump_instr(const char *lvl, struct pt_regs *regs)
+static void __dump_instr(const char *lvl, struct pt_regs *regs)
{
unsigned long addr = instruction_pointer(regs);
const int thumb = thumb_mode(regs);
const int width = thumb ? 4 : 8;
- mm_segment_t fs;
char str[sizeof("00000000 ") * 5 + 2 + 1], *p = str;
int i;
/*
- * We need to switch to kernel mode so that we can use __get_user
- * to safely read from kernel space. Note that we now dump the
- * code first, just in case the backtrace kills us.
+ * Note that we now dump the code first, just in case the backtrace
+ * kills us.
*/
- fs = get_fs();
- set_fs(KERNEL_DS);
for (i = -4; i < 1 + !!thumb; i++) {
unsigned int val, bad;
if (thumb)
- bad = __get_user(val, &((u16 *)addr)[i]);
+ bad = get_user(val, &((u16 *)addr)[i]);
else
- bad = __get_user(val, &((u32 *)addr)[i]);
+ bad = get_user(val, &((u32 *)addr)[i]);
if (!bad)
p += sprintf(p, i == 0 ? "(%0*x) " : "%0*x ",
@@ -186,8 +182,20 @@ static void dump_instr(const char *lvl, struct pt_regs *regs)
}
}
printk("%sCode: %s\n", lvl, str);
+}
- set_fs(fs);
+static void dump_instr(const char *lvl, struct pt_regs *regs)
+{
+ mm_segment_t fs;
+
+ if (!user_mode(regs)) {
+ fs = get_fs();
+ set_fs(KERNEL_DS);
+ __dump_instr(lvl, regs);
+ set_fs(fs);
+ } else {
+ __dump_instr(lvl, regs);
+ }
}
#ifdef CONFIG_ARM_UNWIND
diff --git a/arch/arm/kvm/emulate.c b/arch/arm/kvm/emulate.c
index 0064b86..30a13647 100644
--- a/arch/arm/kvm/emulate.c
+++ b/arch/arm/kvm/emulate.c
@@ -227,7 +227,7 @@ void kvm_inject_undefined(struct kvm_vcpu *vcpu)
u32 return_offset = (is_thumb) ? 2 : 4;
kvm_update_psr(vcpu, UND_MODE);
- *vcpu_reg(vcpu, 14) = *vcpu_pc(vcpu) - return_offset;
+ *vcpu_reg(vcpu, 14) = *vcpu_pc(vcpu) + return_offset;
/* Branch to exception vector */
*vcpu_pc(vcpu) = exc_vector_base(vcpu) + vect_offset;
@@ -239,10 +239,8 @@ void kvm_inject_undefined(struct kvm_vcpu *vcpu)
*/
static void inject_abt(struct kvm_vcpu *vcpu, bool is_pabt, unsigned long addr)
{
- unsigned long cpsr = *vcpu_cpsr(vcpu);
- bool is_thumb = (cpsr & PSR_T_BIT);
u32 vect_offset;
- u32 return_offset = (is_thumb) ? 4 : 0;
+ u32 return_offset = (is_pabt) ? 4 : 8;
bool is_lpae;
kvm_update_psr(vcpu, ABT_MODE);
diff --git a/arch/arm/kvm/hyp/Makefile b/arch/arm/kvm/hyp/Makefile
index 8679405..92eab1d 100644
--- a/arch/arm/kvm/hyp/Makefile
+++ b/arch/arm/kvm/hyp/Makefile
@@ -2,7 +2,7 @@
# Makefile for Kernel-based Virtual Machine module, HYP part
#
-ccflags-y += -fno-stack-protector
+ccflags-y += -fno-stack-protector -DDISABLE_BRANCH_PROFILING
KVM=../../../../virt/kvm
diff --git a/arch/arm/mach-at91/pm.c b/arch/arm/mach-at91/pm.c
index 31dde8b..8ba0e2e 100644
--- a/arch/arm/mach-at91/pm.c
+++ b/arch/arm/mach-at91/pm.c
@@ -335,7 +335,7 @@ static void at91sam9_sdram_standby(void)
at91_ramc_write(1, AT91_SDRAMC_LPR, saved_lpr1);
}
-static const struct of_device_id const ramc_ids[] __initconst = {
+static const struct of_device_id ramc_ids[] __initconst = {
{ .compatible = "atmel,at91rm9200-sdramc", .data = at91rm9200_standby },
{ .compatible = "atmel,at91sam9260-sdramc", .data = at91sam9_sdram_standby },
{ .compatible = "atmel,at91sam9g45-ddramc", .data = at91_ddr_standby },
diff --git a/arch/arm/mach-bcm/bcm_kona_smc.c b/arch/arm/mach-bcm/bcm_kona_smc.c
index cf3f865..a55a7ec 100644
--- a/arch/arm/mach-bcm/bcm_kona_smc.c
+++ b/arch/arm/mach-bcm/bcm_kona_smc.c
@@ -33,7 +33,7 @@ struct bcm_kona_smc_data {
unsigned result;
};
-static const struct of_device_id const bcm_kona_smc_ids[] __initconst = {
+static const struct of_device_id bcm_kona_smc_ids[] __initconst = {
{.compatible = "brcm,kona-smc"},
{.compatible = "bcm,kona-smc"}, /* deprecated name */
{},
diff --git a/arch/arm/mach-cns3xxx/core.c b/arch/arm/mach-cns3xxx/core.c
index 03da381..7d5a44a 100644
--- a/arch/arm/mach-cns3xxx/core.c
+++ b/arch/arm/mach-cns3xxx/core.c
@@ -346,7 +346,7 @@ static struct usb_ohci_pdata cns3xxx_usb_ohci_pdata = {
.power_off = csn3xxx_usb_power_off,
};
-static const struct of_dev_auxdata const cns3xxx_auxdata[] __initconst = {
+static const struct of_dev_auxdata cns3xxx_auxdata[] __initconst = {
{ "intel,usb-ehci", CNS3XXX_USB_BASE, "ehci-platform", &cns3xxx_usb_ehci_pdata },
{ "intel,usb-ohci", CNS3XXX_USB_OHCI_BASE, "ohci-platform", &cns3xxx_usb_ohci_pdata },
{ "cavium,cns3420-ahci", CNS3XXX_SATA2_BASE, "ahci", NULL },
diff --git a/arch/arm/mach-omap2/prm_common.c b/arch/arm/mach-omap2/prm_common.c
index 5b2f513..f1ca947 100644
--- a/arch/arm/mach-omap2/prm_common.c
+++ b/arch/arm/mach-omap2/prm_common.c
@@ -713,7 +713,7 @@ static struct omap_prcm_init_data scrm_data __initdata = {
};
#endif
-static const struct of_device_id const omap_prcm_dt_match_table[] __initconst = {
+static const struct of_device_id omap_prcm_dt_match_table[] __initconst = {
#ifdef CONFIG_SOC_AM33XX
{ .compatible = "ti,am3-prcm", .data = &am3_prm_data },
#endif
diff --git a/arch/arm/mach-omap2/vc.c b/arch/arm/mach-omap2/vc.c
index 2028167f..d76b1e5 100644
--- a/arch/arm/mach-omap2/vc.c
+++ b/arch/arm/mach-omap2/vc.c
@@ -559,7 +559,7 @@ struct i2c_init_data {
u8 hsscll_12;
};
-static const struct i2c_init_data const omap4_i2c_timing_data[] __initconst = {
+static const struct i2c_init_data omap4_i2c_timing_data[] __initconst = {
{
.load = 50,
.loadbits = 0x3,
diff --git a/arch/arm/mach-qcom/Kconfig b/arch/arm/mach-qcom/Kconfig
index f4d7965..4761bc5 100644
--- a/arch/arm/mach-qcom/Kconfig
+++ b/arch/arm/mach-qcom/Kconfig
@@ -51,5 +51,28 @@
select COMMON_CLK
select COMMON_CLK_QCOM
select QCOM_GDSC
+
+config ARCH_MSM8953
+ bool "Enable support for MSM8953"
+ select CPU_V7
+ select HAVE_ARM_ARCH_TIMER
+ select PINCTRL
+ select QCOM_SCM if SMP
+ select PM_DEVFREQ
+ select COMMON_CLK
+ select COMMON_CLK_QCOM
+ select QCOM_GDSC
+
+config ARCH_SDM450
+ bool "Enable support for SDM450"
+ select CPU_V7
+ select HAVE_ARM_ARCH_TIMER
+ select PINCTRL
+ select QCOM_SCM if SMP
+ select PM_DEVFREQ
+ select COMMON_CLK
+ select COMMON_CLK_QCOM
+ select QCOM_GDSC
+
endmenu
endif
diff --git a/arch/arm/mach-qcom/Makefile b/arch/arm/mach-qcom/Makefile
index d893b27..828e9c9 100644
--- a/arch/arm/mach-qcom/Makefile
+++ b/arch/arm/mach-qcom/Makefile
@@ -1,3 +1,5 @@
obj-$(CONFIG_USE_OF) += board-dt.o
obj-$(CONFIG_SMP) += platsmp.o
obj-$(CONFIG_ARCH_SDXPOORWILLS) += board-poorwills.o
+obj-$(CONFIG_ARCH_MSM8953) += board-msm8953.o
+obj-$(CONFIG_ARCH_SDM450) += board-sdm450.o
diff --git a/arch/arm/mach-qcom/board-msm8953.c b/arch/arm/mach-qcom/board-msm8953.c
new file mode 100644
index 0000000..cae3bf7
--- /dev/null
+++ b/arch/arm/mach-qcom/board-msm8953.c
@@ -0,0 +1,32 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include "board-dt.h"
+#include <asm/mach/map.h>
+#include <asm/mach/arch.h>
+
+static const char *msm8953_dt_match[] __initconst = {
+ "qcom,msm8953",
+ NULL
+};
+
+static void __init msm8953_init(void)
+{
+ board_dt_populate(NULL);
+}
+
+DT_MACHINE_START(MSM8953_DT,
+ "Qualcomm Technologies, Inc. MSM8953 (Flattened Device Tree)")
+ .init_machine = msm8953_init,
+ .dt_compat = msm8953_dt_match,
+MACHINE_END
diff --git a/arch/arm/mach-qcom/board-sdm450.c b/arch/arm/mach-qcom/board-sdm450.c
new file mode 100644
index 0000000..5f68ede
--- /dev/null
+++ b/arch/arm/mach-qcom/board-sdm450.c
@@ -0,0 +1,32 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include "board-dt.h"
+#include <asm/mach/map.h>
+#include <asm/mach/arch.h>
+
+static const char *sdm450_dt_match[] __initconst = {
+ "qcom,sdm450",
+ NULL
+};
+
+static void __init sdm450_init(void)
+{
+ board_dt_populate(NULL);
+}
+
+DT_MACHINE_START(SDM450_DT,
+ "Qualcomm Technologies, Inc. SDM450 (Flattened Device Tree)")
+ .init_machine = sdm450_init,
+ .dt_compat = sdm450_dt_match,
+MACHINE_END
diff --git a/arch/arm/mach-spear/time.c b/arch/arm/mach-spear/time.c
index 9ccffc1..aaaa678 100644
--- a/arch/arm/mach-spear/time.c
+++ b/arch/arm/mach-spear/time.c
@@ -204,7 +204,7 @@ static void __init spear_clockevent_init(int irq)
setup_irq(irq, &spear_timer_irq);
}
-static const struct of_device_id const timer_of_match[] __initconst = {
+static const struct of_device_id timer_of_match[] __initconst = {
{ .compatible = "st,spear-timer", },
{ },
};
diff --git a/arch/arm/mm/highmem.c b/arch/arm/mm/highmem.c
index 5d73327..0bb7673 100644
--- a/arch/arm/mm/highmem.c
+++ b/arch/arm/mm/highmem.c
@@ -150,7 +150,7 @@ void *kmap_atomic_pfn(unsigned long pfn)
}
#ifdef CONFIG_ARCH_WANT_KMAP_ATOMIC_FLUSH
-static void kmap_remove_unused_cpu(int cpu)
+int kmap_remove_unused_cpu(unsigned int cpu)
{
int start_idx, idx, type;
@@ -167,6 +167,7 @@ static void kmap_remove_unused_cpu(int cpu)
set_top_pte(vaddr, __pte(0));
}
pagefault_enable();
+ return 0;
}
static void kmap_remove_unused(void *unused)
@@ -179,27 +180,4 @@ void kmap_atomic_flush_unused(void)
on_each_cpu(kmap_remove_unused, NULL, 1);
}
-static int hotplug_kmap_atomic_callback(struct notifier_block *nfb,
- unsigned long action, void *hcpu)
-{
- switch (action & (~CPU_TASKS_FROZEN)) {
- case CPU_DYING:
- kmap_remove_unused_cpu((int)hcpu);
- break;
- default:
- break;
- }
-
- return NOTIFY_OK;
-}
-
-static struct notifier_block hotplug_kmap_atomic_notifier = {
- .notifier_call = hotplug_kmap_atomic_callback,
-};
-
-static int __init init_kmap_atomic(void)
-{
- return register_hotcpu_notifier(&hotplug_kmap_atomic_notifier);
-}
-early_initcall(init_kmap_atomic);
#endif
diff --git a/arch/arm/xen/mm.c b/arch/arm/xen/mm.c
index d062f08..4b24964 100644
--- a/arch/arm/xen/mm.c
+++ b/arch/arm/xen/mm.c
@@ -199,6 +199,7 @@ static struct dma_map_ops xen_swiotlb_dma_ops = {
.unmap_page = xen_swiotlb_unmap_page,
.dma_supported = xen_swiotlb_dma_supported,
.set_dma_mask = xen_swiotlb_set_dma_mask,
+ .mmap = xen_swiotlb_dma_mmap,
};
int __init xen_mm_init(void)
diff --git a/arch/arm64/Kconfig.platforms b/arch/arm64/Kconfig.platforms
index e1454fb..8edfbf2 100644
--- a/arch/arm64/Kconfig.platforms
+++ b/arch/arm64/Kconfig.platforms
@@ -148,6 +148,15 @@
This enables support for the MSM8953 chipset. If you do not
wish to build a kernel that runs on this chipset, say 'N' here.
+config ARCH_SDM450
+ bool "Enable Support for Qualcomm Technologies Inc. SDM450"
+ depends on ARCH_QCOM
+ select COMMON_CLK_QCOM
+ select QCOM_GDSC
+ help
+ This enables support for the sdm450 chipset. If you do not
+ wish to build a kernel that runs on this chipset, say 'N' here.
+
config ARCH_ROCKCHIP
bool "Rockchip Platforms"
select ARCH_HAS_RESET_CONTROLLER
diff --git a/arch/arm64/boot/dts/qcom/Makefile b/arch/arm64/boot/dts/qcom/Makefile
index aec9930..3df7439 100644
--- a/arch/arm64/boot/dts/qcom/Makefile
+++ b/arch/arm64/boot/dts/qcom/Makefile
@@ -7,10 +7,10 @@
sdm845-cdp-overlay.dtbo \
sdm845-mtp-overlay.dtbo \
sdm845-qrd-overlay.dtbo \
- sdm845-qvr-overlay.dtbo \
sdm845-4k-panel-mtp-overlay.dtbo \
sdm845-4k-panel-cdp-overlay.dtbo \
sdm845-4k-panel-qrd-overlay.dtbo \
+ sdm845-v2-qvr-overlay.dtbo \
sdm845-v2-cdp-overlay.dtbo \
sdm845-v2-mtp-overlay.dtbo \
sdm845-v2-qrd-overlay.dtbo \
@@ -35,16 +35,21 @@
sda845-v2-hdk-overlay.dtbo \
sda845-v2-4k-panel-mtp-overlay.dtbo \
sda845-v2-4k-panel-cdp-overlay.dtbo \
- sda845-v2-4k-panel-qrd-overlay.dtbo
+ sda845-v2-4k-panel-qrd-overlay.dtbo \
+ sda845-v2.1-cdp-overlay.dtbo \
+ sda845-v2.1-mtp-overlay.dtbo \
+ sda845-v2.1-qrd-overlay.dtbo \
+ sda845-v2.1-4k-panel-cdp-overlay.dtbo \
+ sda845-v2.1-4k-panel-mtp-overlay.dtbo \
+ sda845-v2.1-4k-panel-qrd-overlay.dtbo
sdm845-cdp-overlay.dtbo-base := sdm845.dtb
sdm845-mtp-overlay.dtbo-base := sdm845.dtb
sdm845-qrd-overlay.dtbo-base := sdm845.dtb
-sdm845-qvr-overlay.dtbo-base := sdm845-v2.dtb
-sdm845-qvr-overlay.dtbo-base := sdm845.dtb
sdm845-4k-panel-mtp-overlay.dtbo-base := sdm845.dtb
sdm845-4k-panel-cdp-overlay.dtbo-base := sdm845.dtb
sdm845-4k-panel-qrd-overlay.dtbo-base := sdm845.dtb
+sdm845-v2-qvr-overlay.dtbo-base := sdm845-v2.dtb
sdm845-v2-cdp-overlay.dtbo-base := sdm845-v2.dtb
sdm845-v2-mtp-overlay.dtbo-base := sdm845-v2.dtb
sdm845-v2-qrd-overlay.dtbo-base := sdm845-v2.dtb
@@ -70,6 +75,12 @@
sda845-v2-4k-panel-mtp-overlay.dtbo-base := sda845-v2.dtb
sda845-v2-4k-panel-cdp-overlay.dtbo-base := sda845-v2.dtb
sda845-v2-4k-panel-qrd-overlay.dtbo-base := sda845-v2.dtb
+sda845-v2.1-cdp-overlay.dtbo-base := sda845-v2.1.dtb
+sda845-v2.1-mtp-overlay.dtbo-base := sda845-v2.1.dtb
+sda845-v2.1-qrd-overlay.dtbo-base := sda845-v2.1.dtb
+sda845-v2.1-4k-panel-cdp-overlay.dtbo-base := sda845-v2.1.dtb
+sda845-v2.1-4k-panel-mtp-overlay.dtbo-base := sda845-v2.1.dtb
+sda845-v2.1-4k-panel-qrd-overlay.dtbo-base := sda845-v2.1.dtb
else
dtb-$(CONFIG_ARCH_SDM845) += sdm845-sim.dtb \
sdm845-rumi.dtb \
@@ -80,7 +91,7 @@
sdm845-v2-cdp.dtb \
sdm845-qrd.dtb \
sdm845-v2-qrd.dtb \
- sdm845-qvr.dtb \
+ sdm845-v2-qvr.dtb \
sdm845-4k-panel-mtp.dtb \
sdm845-4k-panel-cdp.dtb \
sdm845-4k-panel-qrd.dtb \
@@ -168,6 +179,7 @@
sda670-cdp.dtb \
sda670-pm660a-mtp.dtb \
sda670-pm660a-cdp.dtb \
+ qcs605-360camera.dtb \
qcs605-mtp.dtb \
qcs605-cdp.dtb \
qcs605-external-codec-mtp.dtb
@@ -175,7 +187,36 @@
ifeq ($(CONFIG_BUILD_ARM64_DT_OVERLAY),y)
else
-dtb-$(CONFIG_ARCH_MSM8953) += msm8953-mtp.dtb
+dtb-$(CONFIG_ARCH_MSM8953) += msm8953-cdp.dtb \
+ msm8953-mtp.dtb \
+ msm8953-ext-codec-mtp.dtb \
+ msm8953-qrd-sku3.dtb \
+ msm8953-rcm.dtb \
+ apq8053-rcm.dtb \
+ msm8953-ext-codec-rcm.dtb \
+ apq8053-cdp.dtb \
+ apq8053-ipc.dtb \
+ msm8953-ipc.dtb \
+ apq8053-mtp.dtb \
+ apq8053-ext-audio-mtp.dtb \
+ apq8053-ext-codec-rcm.dtb \
+ msm8953-cdp-1200p.dtb \
+ msm8953-iot-mtp.dtb \
+ apq8053-iot-mtp.dtb \
+ msm8953-pmi8940-cdp.dtb \
+ msm8953-pmi8940-mtp.dtb \
+ msm8953-pmi8937-cdp.dtb \
+ msm8953-pmi8937-mtp.dtb \
+ msm8953-pmi8940-ext-codec-mtp.dtb \
+ msm8953-pmi8937-ext-codec-mtp.dtb
+
+dtb-$(CONFIG_ARCH_SDM450) += sdm450-rcm.dtb \
+ sdm450-cdp.dtb \
+ sdm450-mtp.dtb \
+ sdm450-qrd.dtb \
+ sdm450-pmi8940-mtp.dtb \
+ sdm450-pmi8937-mtp.dtb \
+ sdm450-iot-mtp.dtb
endif
always := $(dtb-y)
diff --git a/arch/arm64/boot/dts/qcom/sdm845-qvr.dts b/arch/arm64/boot/dts/qcom/apq8053-cdp.dts
similarity index 60%
copy from arch/arm64/boot/dts/qcom/sdm845-qvr.dts
copy to arch/arm64/boot/dts/qcom/apq8053-cdp.dts
index c06b806..5e89e4f 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-qvr.dts
+++ b/arch/arm64/boot/dts/qcom/apq8053-cdp.dts
@@ -1,4 +1,5 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/*
+ * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -10,14 +11,15 @@
* GNU General Public License for more details.
*/
-
/dts-v1/;
-#include "sdm845-v2.dtsi"
-#include "sdm845-qvr.dtsi"
+#include "apq8053.dtsi"
+#include "msm8953-cdp.dtsi"
/ {
- model = "Qualcomm Technologies, Inc. SDM845 QVR";
- compatible = "qcom,sdm845-qvr", "qcom,sdm845", "qcom,qvr";
- qcom,board-id = <0x01000B 0x20>;
+ model = "Qualcomm Technologies, Inc. APQ8053 + PMI8950 CDP";
+ compatible = "qcom,apq8053-cdp", "qcom,apq8053", "qcom,cdp";
+ qcom,board-id= <1 0>;
+ qcom,pmic-id = <0x010016 0x010011 0x0 0x0>;
};
+
diff --git a/arch/arm64/boot/dts/qcom/apq8053-ext-audio-mtp.dts b/arch/arm64/boot/dts/qcom/apq8053-ext-audio-mtp.dts
new file mode 100644
index 0000000..2c7b228
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/apq8053-ext-audio-mtp.dts
@@ -0,0 +1,25 @@
+/*
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+
+#include "apq8053.dtsi"
+#include "msm8953-mtp.dtsi"
+
+/ {
+ model = "Qualcomm Technologies, Inc. APQ8053 + PMI8950 Ext Codec MTP";
+ compatible = "qcom,apq8053-mtp", "qcom,apq8053", "qcom,mtp";
+ qcom,board-id= <8 1>;
+ qcom,pmic-id = <0x010016 0x010011 0x0 0x0>;
+};
+
diff --git a/arch/arm64/boot/dts/qcom/apq8053-ext-codec-rcm.dts b/arch/arm64/boot/dts/qcom/apq8053-ext-codec-rcm.dts
new file mode 100644
index 0000000..d026734
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/apq8053-ext-codec-rcm.dts
@@ -0,0 +1,25 @@
+/*
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+
+#include "apq8053.dtsi"
+#include "msm8953-cdp.dtsi"
+
+/ {
+ model = "Qualcomm Technologies, Inc. APQ8053 + PMI8950 Ext Codec RCM";
+ compatible = "qcom,apq8053-cdp", "qcom,apq8053", "qcom,cdp";
+ qcom,board-id= <21 1>;
+ qcom,pmic-id = <0x010016 0x010011 0x0 0x0>;
+};
+
diff --git a/arch/arm64/boot/dts/qcom/sdm845-qvr.dts b/arch/arm64/boot/dts/qcom/apq8053-iot-mtp.dts
similarity index 60%
copy from arch/arm64/boot/dts/qcom/sdm845-qvr.dts
copy to arch/arm64/boot/dts/qcom/apq8053-iot-mtp.dts
index c06b806..177e105 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-qvr.dts
+++ b/arch/arm64/boot/dts/qcom/apq8053-iot-mtp.dts
@@ -1,4 +1,5 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/*
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -10,14 +11,15 @@
* GNU General Public License for more details.
*/
-
/dts-v1/;
-#include "sdm845-v2.dtsi"
-#include "sdm845-qvr.dtsi"
+#include "apq8053.dtsi"
+#include "msm8953-mtp.dtsi"
/ {
- model = "Qualcomm Technologies, Inc. SDM845 QVR";
- compatible = "qcom,sdm845-qvr", "qcom,sdm845", "qcom,qvr";
- qcom,board-id = <0x01000B 0x20>;
+ model = "Qualcomm Technologies, Inc. APQ8053 + PMI8950 IOT MTP";
+ compatible = "qcom,apq8053-mtp", "qcom,apq8053", "qcom,mtp";
+ qcom,board-id= <8 2>;
+ qcom,pmic-id = <0x010016 0x010011 0x0 0x0>;
};
+
diff --git a/arch/arm64/boot/dts/qcom/sdm845-qvr.dts b/arch/arm64/boot/dts/qcom/apq8053-ipc.dts
similarity index 60%
copy from arch/arm64/boot/dts/qcom/sdm845-qvr.dts
copy to arch/arm64/boot/dts/qcom/apq8053-ipc.dts
index c06b806..3381b2a 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-qvr.dts
+++ b/arch/arm64/boot/dts/qcom/apq8053-ipc.dts
@@ -1,4 +1,5 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/*
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -10,14 +11,14 @@
* GNU General Public License for more details.
*/
-
/dts-v1/;
-#include "sdm845-v2.dtsi"
-#include "sdm845-qvr.dtsi"
+#include "apq8053.dtsi"
+#include "msm8953-ipc.dtsi"
/ {
- model = "Qualcomm Technologies, Inc. SDM845 QVR";
- compatible = "qcom,sdm845-qvr", "qcom,sdm845", "qcom,qvr";
- qcom,board-id = <0x01000B 0x20>;
+ model = "Qualcomm Technologies, Inc. APQ8053 + PMI8950 IPC";
+ compatible = "qcom,apq8053-ipc", "qcom,apq8053", "qcom,ipc";
+ qcom,board-id= <12 0>;
+ qcom,pmic-id = <0x010016 0x010011 0x0 0x0>;
};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-qvr.dts b/arch/arm64/boot/dts/qcom/apq8053-mtp.dts
similarity index 60%
copy from arch/arm64/boot/dts/qcom/sdm845-qvr.dts
copy to arch/arm64/boot/dts/qcom/apq8053-mtp.dts
index c06b806..be544af 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-qvr.dts
+++ b/arch/arm64/boot/dts/qcom/apq8053-mtp.dts
@@ -1,4 +1,5 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/*
+ * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -10,14 +11,15 @@
* GNU General Public License for more details.
*/
-
/dts-v1/;
-#include "sdm845-v2.dtsi"
-#include "sdm845-qvr.dtsi"
+#include "apq8053.dtsi"
+#include "msm8953-mtp.dtsi"
/ {
- model = "Qualcomm Technologies, Inc. SDM845 QVR";
- compatible = "qcom,sdm845-qvr", "qcom,sdm845", "qcom,qvr";
- qcom,board-id = <0x01000B 0x20>;
+ model = "Qualcomm Technologies, Inc. APQ8053 + PMI8950 MTP";
+ compatible = "qcom,apq8053-mtp", "qcom,apq8053", "qcom,mtp";
+ qcom,board-id= <8 0>;
+ qcom,pmic-id = <0x010016 0x010011 0x0 0x0>;
};
+
diff --git a/arch/arm64/boot/dts/qcom/sdm845-qvr.dts b/arch/arm64/boot/dts/qcom/apq8053-rcm.dts
similarity index 60%
copy from arch/arm64/boot/dts/qcom/sdm845-qvr.dts
copy to arch/arm64/boot/dts/qcom/apq8053-rcm.dts
index c06b806..cc5bdaa 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-qvr.dts
+++ b/arch/arm64/boot/dts/qcom/apq8053-rcm.dts
@@ -1,4 +1,5 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/*
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -10,14 +11,14 @@
* GNU General Public License for more details.
*/
-
/dts-v1/;
-#include "sdm845-v2.dtsi"
-#include "sdm845-qvr.dtsi"
+#include "apq8053.dtsi"
+#include "msm8953-cdp.dtsi"
/ {
- model = "Qualcomm Technologies, Inc. SDM845 QVR";
- compatible = "qcom,sdm845-qvr", "qcom,sdm845", "qcom,qvr";
- qcom,board-id = <0x01000B 0x20>;
+ model = "Qualcomm Technologies, Inc. APQ8053 + PMI8950 RCM";
+ compatible = "qcom,apq8053-cdp", "qcom,apq8053", "qcom,cdp";
+ qcom,board-id= <21 0>;
+ qcom,pmic-id = <0x010016 0x010011 0x0 0x0>;
};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-qvr.dts b/arch/arm64/boot/dts/qcom/apq8053.dtsi
similarity index 62%
copy from arch/arm64/boot/dts/qcom/sdm845-qvr.dts
copy to arch/arm64/boot/dts/qcom/apq8053.dtsi
index c06b806..15a1595 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-qvr.dts
+++ b/arch/arm64/boot/dts/qcom/apq8053.dtsi
@@ -1,4 +1,5 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/*
+ * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -9,15 +10,14 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
-
-
-/dts-v1/;
-
-#include "sdm845-v2.dtsi"
-#include "sdm845-qvr.dtsi"
-
+#include "msm8953.dtsi"
/ {
- model = "Qualcomm Technologies, Inc. SDM845 QVR";
- compatible = "qcom,sdm845-qvr", "qcom,sdm845", "qcom,qvr";
- qcom,board-id = <0x01000B 0x20>;
+ model = "Qualcomm Technologies, Inc. APQ 8953";
+ compatible = "qcom,apq8053";
+ qcom,msm-id = <304 0x0>;
};
+
+&secure_mem {
+ status = "disabled";
+};
+
diff --git a/arch/arm64/boot/dts/qcom/dsi-panel-nt35597-truly-dualmipi-wqxga-cmd.dtsi b/arch/arm64/boot/dts/qcom/dsi-panel-nt35597-truly-dualmipi-wqxga-cmd.dtsi
index 5b5fbb8..1a8ce91 100644
--- a/arch/arm64/boot/dts/qcom/dsi-panel-nt35597-truly-dualmipi-wqxga-cmd.dtsi
+++ b/arch/arm64/boot/dts/qcom/dsi-panel-nt35597-truly-dualmipi-wqxga-cmd.dtsi
@@ -210,6 +210,11 @@
15 01 00 00 00 00 02 E5 01
/* CMD mode(10) VDO mode(03) */
15 01 00 00 00 00 02 BB 10
+ /* NVT SDC */
+ 15 01 00 00 00 00 02 C0 00
+ /* GRAM Slide Parameter */
+ 29 01 00 00 00 00 0C C9 01 01 70
+ 00 0A 06 67 04 C5 12 18
/* Non Reload MTP */
15 01 00 00 00 00 02 FB 01
/* SlpOut + DispOn */
diff --git a/arch/arm64/boot/dts/qcom/dsi-panel-nt36850-truly-dualmipi-wqhd-cmd.dtsi b/arch/arm64/boot/dts/qcom/dsi-panel-nt36850-truly-dualmipi-wqhd-cmd.dtsi
new file mode 100644
index 0000000..c059443
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/dsi-panel-nt36850-truly-dualmipi-wqhd-cmd.dtsi
@@ -0,0 +1,92 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+&mdss_mdp {
+ dsi_dual_nt36850_truly_cmd: qcom,mdss_dsi_nt36850_truly_wqhd_cmd {
+ qcom,mdss-dsi-panel-name =
+ "Dual nt36850 cmd mode dsi truly panel without DSC";
+ qcom,mdss-dsi-panel-type = "dsi_cmd_mode";
+ qcom,mdss-dsi-virtual-channel-id = <0>;
+ qcom,mdss-dsi-stream = <0>;
+ qcom,mdss-dsi-bpp = <24>;
+ qcom,mdss-dsi-color-order = "rgb_swap_rgb";
+ qcom,mdss-dsi-underflow-color = <0xff>;
+ qcom,mdss-dsi-border-color = <0>;
+
+ qcom,mdss-dsi-traffic-mode = "non_burst_sync_event";
+ qcom,mdss-dsi-lane-map = "lane_map_0123";
+ qcom,mdss-dsi-bllp-eof-power-mode;
+ qcom,mdss-dsi-bllp-power-mode;
+ qcom,mdss-dsi-tx-eot-append;
+ qcom,cmd-sync-wait-broadcast;
+ qcom,mdss-dsi-lane-0-state;
+ qcom,mdss-dsi-lane-1-state;
+ qcom,mdss-dsi-lane-2-state;
+ qcom,mdss-dsi-lane-3-state;
+ qcom,mdss-dsi-wr-mem-start = <0x2c>;
+ qcom,mdss-dsi-wr-mem-continue = <0x3c>;
+ qcom,mdss-dsi-te-pin-select = <1>;
+ qcom,mdss-dsi-te-dcs-command = <1>;
+ qcom,mdss-dsi-te-check-enable;
+ qcom,mdss-dsi-te-using-te-pin;
+ qcom,mdss-dsi-panel-timings =
+ [da 34 24 00 64 68 28 38 2a 03 04 00];
+ qcom,mdss-dsi-t-clk-pre = <0x29>;
+ qcom,mdss-dsi-t-clk-post = <0x03>;
+ qcom,mdss-dsi-dma-trigger = "trigger_sw";
+ qcom,mdss-dsi-mdp-trigger = "none";
+ qcom,mdss-dsi-lp11-init;
+ qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
+ qcom,mdss-dsi-bl-min-level = <1>;
+ qcom,mdss-dsi-bl-max-level = <4095>;
+ qcom,mdss-dsi-reset-sequence = <1 10>, <0 10>, <1 10>;
+ qcom,mdss-dsi-display-timings {
+ timing@0 {
+ qcom,mdss-dsi-panel-framerate = <60>;
+ qcom,mdss-dsi-panel-width = <720>;
+ qcom,mdss-dsi-panel-height = <2560>;
+ qcom,mdss-dsi-h-front-porch = <120>;
+ qcom,mdss-dsi-h-back-porch = <140>;
+ qcom,mdss-dsi-h-pulse-width = <20>;
+ qcom,mdss-dsi-h-sync-skew = <0>;
+ qcom,mdss-dsi-v-back-porch = <20>;
+ qcom,mdss-dsi-v-front-porch = <8>;
+ qcom,mdss-dsi-v-pulse-width = <4>;
+ qcom,mdss-dsi-h-left-border = <0>;
+ qcom,mdss-dsi-h-right-border = <0>;
+ qcom,mdss-dsi-v-top-border = <0>;
+ qcom,mdss-dsi-v-bottom-border = <0>;
+ qcom,mdss-dsi-on-command = [
+ 15 01 00 00 00 00 02 ff 10
+ 15 01 00 00 00 00 02 fb 01
+ 15 01 00 00 00 00 02 36 00
+ 15 01 00 00 00 00 02 35 00
+ 39 01 00 00 00 00 03 44 03 e8
+ 15 01 00 00 00 00 02 51 ff
+ 15 01 00 00 00 00 02 53 2c
+ 15 01 00 00 00 00 02 55 01
+ 05 01 00 00 0a 00 02 20 00
+ 15 01 00 00 00 00 02 bb 10
+ 05 01 00 00 78 00 02 11 00
+ 05 01 00 00 14 00 02 29 00
+ ];
+ qcom,mdss-dsi-off-command = [
+ 05 01 00 00 14 00 02
+ 28 00 05 01 00 00 78 00 02 10 00
+ ];
+ qcom,mdss-dsi-on-command-state = "dsi_lp_mode";
+ qcom,mdss-dsi-off-command-state = "dsi_hs_mode";
+ qcom,mdss-dsi-h-sync-pulse = <0>;
+ };
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/dsi-panel-sim-cmd.dtsi b/arch/arm64/boot/dts/qcom/dsi-panel-sim-cmd.dtsi
index 50da1bf..45ac042 100644
--- a/arch/arm64/boot/dts/qcom/dsi-panel-sim-cmd.dtsi
+++ b/arch/arm64/boot/dts/qcom/dsi-panel-sim-cmd.dtsi
@@ -41,6 +41,7 @@
qcom,mdss-dsi-te-using-wd;
qcom,mdss-dsi-te-using-te-pin;
qcom,mdss-dsi-panel-hdr-enabled;
+ qcom,ulps-enabled;
qcom,mdss-dsi-panel-hdr-color-primaries = <14500 15500 32000
17000 15500 30000 8000 3000>;
qcom,mdss-dsi-panel-peak-brightness = <4200000>;
@@ -49,26 +50,22 @@
qcom,mdss-dsi-display-timings {
timing@0{
- qcom,mdss-dsi-panel-width = <640>;
- qcom,mdss-dsi-panel-height = <480>;
- qcom,mdss-dsi-h-front-porch = <20>;
- qcom,mdss-dsi-h-back-porch = <20>;
- qcom,mdss-dsi-h-pulse-width = <16>;
+ qcom,mdss-dsi-panel-width = <1440>;
+ qcom,mdss-dsi-panel-height = <2560>;
+ qcom,mdss-dsi-h-front-porch = <120>;
+ qcom,mdss-dsi-h-back-porch = <100>;
+ qcom,mdss-dsi-h-pulse-width = <40>;
qcom,mdss-dsi-h-sync-skew = <0>;
- qcom,mdss-dsi-v-back-porch = <16>;
- qcom,mdss-dsi-v-front-porch = <4>;
- qcom,mdss-dsi-v-pulse-width = <1>;
+ qcom,mdss-dsi-v-back-porch = <100>;
+ qcom,mdss-dsi-v-front-porch = <100>;
+ qcom,mdss-dsi-v-pulse-width = <40>;
qcom,mdss-dsi-h-left-border = <0>;
qcom,mdss-dsi-h-right-border = <0>;
qcom,mdss-dsi-v-top-border = <0>;
qcom,mdss-dsi-v-bottom-border = <0>;
- qcom,mdss-dsi-h-sync-pulse = <0>;
qcom,mdss-dsi-panel-framerate = <60>;
- qcom,mdss-dsi-hor-line-idle = <0 40 256>,
- <40 120 128>,
- <120 240 64>;
qcom,mdss-dsi-panel-timings =
- [cd 32 22 00 60 64 26 34 29 03 04 00];
+ [00 21 09 09 24 23 08 08 08 03 04 00];
qcom,mdss-dsi-on-command =
[29 01 00 00 00 00 02 b0 03
05 01 00 00 0a 00 01 00
@@ -98,6 +95,124 @@
[05 01 00 00 32 00 02 28 00
05 01 00 00 78 00 02 10 00];
qcom,mdss-dsi-off-command-state = "dsi_hs_mode";
+
+ qcom,compression-mode = "dsc";
+ qcom,mdss-dsc-slice-height = <40>;
+ qcom,mdss-dsc-slice-width = <720>;
+ qcom,mdss-dsc-slice-per-pkt = <1>;
+ qcom,mdss-dsc-bit-per-component = <8>;
+ qcom,mdss-dsc-bit-per-pixel = <8>;
+ qcom,mdss-dsc-block-prediction-enable;
+ };
+ timing@1{
+ qcom,mdss-dsi-panel-width = <1080>;
+ qcom,mdss-dsi-panel-height = <1920>;
+ qcom,mdss-dsi-h-front-porch = <120>;
+ qcom,mdss-dsi-h-back-porch = <460>;
+ qcom,mdss-dsi-h-pulse-width = <40>;
+ qcom,mdss-dsi-h-sync-skew = <0>;
+ qcom,mdss-dsi-v-back-porch = <100>;
+ qcom,mdss-dsi-v-front-porch = <740>;
+ qcom,mdss-dsi-v-pulse-width = <40>;
+ qcom,mdss-dsi-h-left-border = <0>;
+ qcom,mdss-dsi-h-right-border = <0>;
+ qcom,mdss-dsi-v-top-border = <0>;
+ qcom,mdss-dsi-v-bottom-border = <0>;
+ qcom,mdss-dsi-panel-framerate = <60>;
+ qcom,mdss-dsi-panel-timings =
+ [00 21 09 09 24 23 08 08 08 03 04 00];
+ qcom,mdss-dsi-on-command =
+ [29 01 00 00 00 00 02 b0 03
+ 05 01 00 00 0a 00 01 00
+ /* Soft reset, wait 10ms */
+ 15 01 00 00 0a 00 02 3a 77
+ /* Set Pixel format (24 bpp) */
+ 39 01 00 00 0a 00 05 2a 00 00 04 ff
+ /* Set Column address */
+ 39 01 00 00 0a 00 05 2b 00 00 05 9f
+ /* Set page address */
+ 15 01 00 00 0a 00 02 35 00
+ /* Set tear on */
+ 39 01 00 00 0a 00 03 44 00 00
+ /* Set tear scan line */
+ 15 01 00 00 0a 00 02 51 ff
+ /* write display brightness */
+ 15 01 00 00 0a 00 02 53 24
+ /* write control brightness */
+ 15 01 00 00 0a 00 02 55 00
+ /* CABC brightness */
+ 05 01 00 00 78 00 01 11
+ /* exit sleep mode, wait 120ms */
+ 05 01 00 00 10 00 01 29];
+ /* Set display on, wait 16ms */
+ qcom,mdss-dsi-on-command-state = "dsi_lp_mode";
+ qcom,mdss-dsi-off-command =
+ [05 01 00 00 32 00 02 28 00
+ 05 01 00 00 78 00 02 10 00];
+ qcom,mdss-dsi-off-command-state = "dsi_hs_mode";
+
+ qcom,compression-mode = "dsc";
+ qcom,mdss-dsc-slice-height = <40>;
+ qcom,mdss-dsc-slice-width = <540>;
+ qcom,mdss-dsc-slice-per-pkt = <1>;
+ qcom,mdss-dsc-bit-per-component = <8>;
+ qcom,mdss-dsc-bit-per-pixel = <8>;
+ qcom,mdss-dsc-block-prediction-enable;
+ };
+ timing@2{
+ qcom,mdss-dsi-panel-width = <720>;
+ qcom,mdss-dsi-panel-height = <1280>;
+ qcom,mdss-dsi-h-front-porch = <100>;
+ qcom,mdss-dsi-h-back-porch = <840>;
+ qcom,mdss-dsi-h-pulse-width = <40>;
+ qcom,mdss-dsi-h-sync-skew = <0>;
+ qcom,mdss-dsi-v-back-porch = <100>;
+ qcom,mdss-dsi-v-front-porch = <1380>;
+ qcom,mdss-dsi-v-pulse-width = <40>;
+ qcom,mdss-dsi-h-left-border = <0>;
+ qcom,mdss-dsi-h-right-border = <0>;
+ qcom,mdss-dsi-v-top-border = <0>;
+ qcom,mdss-dsi-v-bottom-border = <0>;
+ qcom,mdss-dsi-panel-framerate = <60>;
+ qcom,mdss-dsi-panel-timings =
+ [00 21 09 09 24 23 08 08 08 03 04 00];
+ qcom,mdss-dsi-on-command =
+ [29 01 00 00 00 00 02 b0 03
+ 05 01 00 00 0a 00 01 00
+ /* Soft reset, wait 10ms */
+ 15 01 00 00 0a 00 02 3a 77
+ /* Set Pixel format (24 bpp) */
+ 39 01 00 00 0a 00 05 2a 00 00 04 ff
+ /* Set Column address */
+ 39 01 00 00 0a 00 05 2b 00 00 05 9f
+ /* Set page address */
+ 15 01 00 00 0a 00 02 35 00
+ /* Set tear on */
+ 39 01 00 00 0a 00 03 44 00 00
+ /* Set tear scan line */
+ 15 01 00 00 0a 00 02 51 ff
+ /* write display brightness */
+ 15 01 00 00 0a 00 02 53 24
+ /* write control brightness */
+ 15 01 00 00 0a 00 02 55 00
+ /* CABC brightness */
+ 05 01 00 00 78 00 01 11
+ /* exit sleep mode, wait 120ms */
+ 05 01 00 00 10 00 01 29];
+ /* Set display on, wait 16ms */
+ qcom,mdss-dsi-on-command-state = "dsi_lp_mode";
+ qcom,mdss-dsi-off-command =
+ [05 01 00 00 32 00 02 28 00
+ 05 01 00 00 78 00 02 10 00];
+ qcom,mdss-dsi-off-command-state = "dsi_hs_mode";
+
+ qcom,compression-mode = "dsc";
+ qcom,mdss-dsc-slice-height = <40>;
+ qcom,mdss-dsc-slice-width = <360>;
+ qcom,mdss-dsc-slice-per-pkt = <1>;
+ qcom,mdss-dsc-bit-per-component = <8>;
+ qcom,mdss-dsc-bit-per-pixel = <8>;
+ qcom,mdss-dsc-block-prediction-enable;
};
};
};
diff --git a/arch/arm64/boot/dts/qcom/dsi-panel-sim-dualmipi-cmd.dtsi b/arch/arm64/boot/dts/qcom/dsi-panel-sim-dualmipi-cmd.dtsi
index 895cbc5..9a4e318 100644
--- a/arch/arm64/boot/dts/qcom/dsi-panel-sim-dualmipi-cmd.dtsi
+++ b/arch/arm64/boot/dts/qcom/dsi-panel-sim-dualmipi-cmd.dtsi
@@ -71,20 +71,20 @@
qcom,mdss-dsi-off-command-state = "dsi_hs_mode";
};
timing@1{
- qcom,mdss-dsi-panel-width = <1280>;
- qcom,mdss-dsi-panel-height = <1440>;
- qcom,mdss-dsi-h-front-porch = <120>;
- qcom,mdss-dsi-h-back-porch = <44>;
+ qcom,mdss-dsi-panel-width = <720>;
+ qcom,mdss-dsi-panel-height = <2560>;
+ qcom,mdss-dsi-h-front-porch = <100>;
+ qcom,mdss-dsi-h-back-porch = <32>;
qcom,mdss-dsi-h-pulse-width = <16>;
qcom,mdss-dsi-h-sync-skew = <0>;
- qcom,mdss-dsi-v-back-porch = <4>;
+ qcom,mdss-dsi-v-back-porch = <7>;
qcom,mdss-dsi-v-front-porch = <8>;
- qcom,mdss-dsi-v-pulse-width = <4>;
+ qcom,mdss-dsi-v-pulse-width = <1>;
+ qcom,mdss-dsi-h-sync-pulse = <0>;
qcom,mdss-dsi-h-left-border = <0>;
qcom,mdss-dsi-h-right-border = <0>;
qcom,mdss-dsi-v-top-border = <0>;
qcom,mdss-dsi-v-bottom-border = <0>;
- qcom,mdss-dsi-h-sync-pulse = <0>;
qcom,mdss-dsi-panel-framerate = <60>;
qcom,mdss-dsi-on-command =
[/* exit sleep mode, wait 0ms */
diff --git a/arch/arm64/boot/dts/qcom/fg-gen3-batterydata-mlp356477-2800mah.dtsi b/arch/arm64/boot/dts/qcom/fg-gen3-batterydata-mlp356477-2800mah.dtsi
index 98dbf1c..4720238 100644
--- a/arch/arm64/boot/dts/qcom/fg-gen3-batterydata-mlp356477-2800mah.dtsi
+++ b/arch/arm64/boot/dts/qcom/fg-gen3-batterydata-mlp356477-2800mah.dtsi
@@ -14,9 +14,15 @@
/* #mlp356477_2800mah_averaged_MasterSlave_Aug14th2017*/
qcom,max-voltage-uv = <4400000>;
qcom,fg-cc-cv-threshold-mv = <4390>;
- qcom,fastchg-current-ma = <2800>;
+ qcom,fastchg-current-ma = <4200>;
qcom,batt-id-kohm = <82>;
qcom,battery-beta = <4250>;
+ qcom,jeita-fcc-ranges = <0 150 560000
+ 151 450 4200000
+ 451 550 2380000>;
+ qcom,jeita-fv-ranges = <0 150 4150000
+ 151 450 4400000
+ 451 550 4150000>;
qcom,battery-type =
"mlp356477_2800mah_averaged_masterslave_aug14th2017";
qcom,checksum = <0x71B8>;
diff --git a/arch/arm64/boot/dts/qcom/fg-gen3-batterydata-mlp446579-3800mah.dtsi b/arch/arm64/boot/dts/qcom/fg-gen3-batterydata-mlp446579-3800mah.dtsi
index ca43a45..75504d4 100644
--- a/arch/arm64/boot/dts/qcom/fg-gen3-batterydata-mlp446579-3800mah.dtsi
+++ b/arch/arm64/boot/dts/qcom/fg-gen3-batterydata-mlp446579-3800mah.dtsi
@@ -17,6 +17,12 @@
qcom,fastchg-current-ma = <3800>;
qcom,batt-id-kohm = <91>;
qcom,battery-beta = <4250>;
+ qcom,jeita-fcc-ranges = <0 150 760000
+ 151 450 3800000
+ 451 550 1900000>;
+ qcom,jeita-fv-ranges = <0 150 4150000
+ 151 450 4400000
+ 451 550 4150000>;
qcom,battery-type = "mlp446579_3800mah_averaged_masterslave_oct9th2017";
qcom,checksum = <0x3F0A>;
qcom,gui-version = "PMI8998GUI - 2.0.0.58";
diff --git a/arch/arm64/boot/dts/qcom/msm-arm-smmu-sdm670.dtsi b/arch/arm64/boot/dts/qcom/msm-arm-smmu-sdm670.dtsi
index ca856c8..ae22a36 100644
--- a/arch/arm64/boot/dts/qcom/msm-arm-smmu-sdm670.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm-arm-smmu-sdm670.dtsi
@@ -21,6 +21,7 @@
#iommu-cells = <1>;
qcom,dynamic;
qcom,use-3-lvl-tables;
+ qcom,disable-atos;
#global-interrupts = <2>;
qcom,regulator-names = "vdd";
vdd-supply = <&gpu_cx_gdsc>;
@@ -34,12 +35,8 @@
<GIC_SPI 369 IRQ_TYPE_EDGE_RISING>,
<GIC_SPI 370 IRQ_TYPE_EDGE_RISING>,
<GIC_SPI 371 IRQ_TYPE_EDGE_RISING>;
- clock-names = "gcc_ddrss_gpu_axi_clk",
- "gcc_gpu_memnoc_gfx_clk",
- "gpu_cc_cx_gmu_clk";
- clocks = <&clock_gcc GCC_DDRSS_GPU_AXI_CLK>,
- <&clock_gcc GCC_GPU_MEMNOC_GFX_CLK>,
- <&clock_gpucc GPU_CC_CX_GMU_CLK>;
+ clock-names = "gcc_gpu_memnoc_gfx_clk";
+ clocks = <&clock_gcc GCC_GPU_MEMNOC_GFX_CLK>;
attach-impl-defs =
<0x6000 0x2378>,
<0x6060 0x1055>,
@@ -63,6 +60,8 @@
#iommu-cells = <2>;
qcom,skip-init;
qcom,use-3-lvl-tables;
+ qcom,no-asid-retention;
+ qcom,disable-atos;
#global-interrupts = <1>;
#size-cells = <1>;
#address-cells = <1>;
@@ -307,9 +306,19 @@
apps_iommu_test_device {
compatible = "iommu-debug-test";
/*
- * This SID belongs to QUP1-GSI. We can't use a fake SID for
+ * This SID belongs to TSIF. We can't use a fake SID for
* the apps_smmu device.
*/
- iommus = <&apps_smmu 0x16 0x0>;
+ iommus = <&apps_smmu 0x20 0xf>;
+ };
+
+ apps_iommu_coherent_test_device {
+ compatible = "iommu-debug-test";
+ /*
+ * This SID belongs to TSIF. We can't use a fake SID for
+ * the apps_smmu device.
+ */
+ iommus = <&apps_smmu 0x20 0xf>;
+ dma-coherent;
};
};
diff --git a/arch/arm64/boot/dts/qcom/msm-arm-smmu-sdm845.dtsi b/arch/arm64/boot/dts/qcom/msm-arm-smmu-sdm845.dtsi
index 0a8fb4a..e4fe2e3 100644
--- a/arch/arm64/boot/dts/qcom/msm-arm-smmu-sdm845.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm-arm-smmu-sdm845.dtsi
@@ -346,15 +346,19 @@
};
&apps_smmu {
- qcom,actlr = <0x0000 0x3ff 0x3>,
- <0x0400 0x3ff 0x3>,
- <0x0800 0x3ff 0x103>,
- <0x0c00 0x3ff 0x103>,
- <0x1000 0x3ff 0x103>,
- <0x1400 0x3ff 0x3>,
- <0x1800 0x3ff 0x3>,
- <0x1c00 0x3ff 0x3>;
-
+ qcom,actlr = <0x0880 0x8 0x103>,
+ <0x0881 0x8 0x103>,
+ <0x0c80 0x8 0x103>,
+ <0x0c81 0x8 0x103>,
+ <0x1090 0x0 0x103>,
+ <0x1091 0x0 0x103>,
+ <0x10a0 0x8 0x103>,
+ <0x10b0 0x0 0x103>,
+ <0x10a1 0x8 0x103>,
+ <0x10a3 0x8 0x103>,
+ <0x10a4 0x8 0x103>,
+ <0x10b4 0x0 0x103>,
+ <0x10a5 0x8 0x103>;
qcom,mmu500-errata-1 = <0x800 0x3ff>,
<0xc00 0x3ff>;
};
diff --git a/arch/arm64/boot/dts/qcom/msm-audio-lpass.dtsi b/arch/arm64/boot/dts/qcom/msm-audio-lpass.dtsi
index 2fd1bc4..b20feef8 100644
--- a/arch/arm64/boot/dts/qcom/msm-audio-lpass.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm-audio-lpass.dtsi
@@ -117,7 +117,7 @@
dai_mi2s4: qcom,msm-dai-q6-mi2s-quin {
compatible = "qcom,msm-dai-q6-mi2s";
- qcom,msm-dai-q6-mi2s-dev-id = <5>;
+ qcom,msm-dai-q6-mi2s-dev-id = <4>;
qcom,msm-mi2s-rx-lines = <1>;
qcom,msm-mi2s-tx-lines = <2>;
};
diff --git a/arch/arm64/boot/dts/qcom/msm8953-cdp-1200p.dts b/arch/arm64/boot/dts/qcom/msm8953-cdp-1200p.dts
new file mode 100644
index 0000000..a685380
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msm8953-cdp-1200p.dts
@@ -0,0 +1,25 @@
+/*
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+
+#include "msm8953.dtsi"
+#include "msm8953-cdp.dtsi"
+
+/ {
+ model = "Qualcomm Technologies, Inc. MSM8953 + PMI8950 CDP 1200P";
+ compatible = "qcom,msm8953-cdp", "qcom,msm8953", "qcom,cdp";
+ qcom,board-id= <1 1>;
+ qcom,pmic-id = <0x010016 0x010011 0x0 0x0>;
+};
+
diff --git a/arch/arm64/boot/dts/qcom/sdm845-qvr.dts b/arch/arm64/boot/dts/qcom/msm8953-cdp.dts
similarity index 60%
copy from arch/arm64/boot/dts/qcom/sdm845-qvr.dts
copy to arch/arm64/boot/dts/qcom/msm8953-cdp.dts
index c06b806..1f78902 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-qvr.dts
+++ b/arch/arm64/boot/dts/qcom/msm8953-cdp.dts
@@ -1,4 +1,5 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/*
+ * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -10,14 +11,15 @@
* GNU General Public License for more details.
*/
-
/dts-v1/;
-#include "sdm845-v2.dtsi"
-#include "sdm845-qvr.dtsi"
+#include "msm8953.dtsi"
+#include "msm8953-cdp.dtsi"
/ {
- model = "Qualcomm Technologies, Inc. SDM845 QVR";
- compatible = "qcom,sdm845-qvr", "qcom,sdm845", "qcom,qvr";
- qcom,board-id = <0x01000B 0x20>;
+ model = "Qualcomm Technologies, Inc. MSM8953 + PMI8950 CDP";
+ compatible = "qcom,msm8953-cdp", "qcom,msm8953", "qcom,cdp";
+ qcom,board-id= <1 0>;
+ qcom,pmic-id = <0x010016 0x010011 0x0 0x0>;
};
+
diff --git a/arch/arm64/boot/dts/qcom/sdm845-qvr.dts b/arch/arm64/boot/dts/qcom/msm8953-cdp.dtsi
similarity index 62%
copy from arch/arm64/boot/dts/qcom/sdm845-qvr.dts
copy to arch/arm64/boot/dts/qcom/msm8953-cdp.dtsi
index c06b806..243aaf5 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-qvr.dts
+++ b/arch/arm64/boot/dts/qcom/msm8953-cdp.dtsi
@@ -1,4 +1,5 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/*
+ * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -10,14 +11,8 @@
* GNU General Public License for more details.
*/
-
-/dts-v1/;
-
-#include "sdm845-v2.dtsi"
-#include "sdm845-qvr.dtsi"
-
-/ {
- model = "Qualcomm Technologies, Inc. SDM845 QVR";
- compatible = "qcom,sdm845-qvr", "qcom,sdm845", "qcom,qvr";
- qcom,board-id = <0x01000B 0x20>;
+&blsp1_uart0 {
+ status = "ok";
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart_console_active>;
};
diff --git a/arch/arm64/boot/dts/qcom/msm8953-ext-codec-mtp.dts b/arch/arm64/boot/dts/qcom/msm8953-ext-codec-mtp.dts
new file mode 100644
index 0000000..3dfd848
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msm8953-ext-codec-mtp.dts
@@ -0,0 +1,25 @@
+/*
+ * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+
+#include "msm8953.dtsi"
+#include "msm8953-mtp.dtsi"
+
+/ {
+ model = "Qualcomm Technologies, Inc. MSM8953 + PMI8950 Ext Codec MTP";
+ compatible = "qcom,msm8953-mtp", "qcom,msm8953", "qcom,mtp";
+ qcom,board-id= <8 1>;
+ qcom,pmic-id = <0x010016 0x010011 0x0 0x0>;
+};
+
diff --git a/arch/arm64/boot/dts/qcom/msm8953-ext-codec-rcm.dts b/arch/arm64/boot/dts/qcom/msm8953-ext-codec-rcm.dts
new file mode 100644
index 0000000..a81e212
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msm8953-ext-codec-rcm.dts
@@ -0,0 +1,25 @@
+/*
+ * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+
+#include "msm8953.dtsi"
+#include "msm8953-cdp.dtsi"
+
+/ {
+ model = "Qualcomm Technologies, Inc. MSM8953 + PMI8950 Ext Codec RCM";
+ compatible = "qcom,msm8953-cdp", "qcom,msm8953", "qcom,cdp";
+ qcom,board-id= <21 1>;
+ qcom,pmic-id = <0x010016 0x010011 0x0 0x0>;
+};
+
diff --git a/arch/arm64/boot/dts/qcom/sdm845-qvr.dts b/arch/arm64/boot/dts/qcom/msm8953-iot-mtp.dts
similarity index 60%
copy from arch/arm64/boot/dts/qcom/sdm845-qvr.dts
copy to arch/arm64/boot/dts/qcom/msm8953-iot-mtp.dts
index c06b806..524e7ca 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-qvr.dts
+++ b/arch/arm64/boot/dts/qcom/msm8953-iot-mtp.dts
@@ -1,4 +1,5 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/*
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -10,14 +11,15 @@
* GNU General Public License for more details.
*/
-
/dts-v1/;
-#include "sdm845-v2.dtsi"
-#include "sdm845-qvr.dtsi"
+#include "msm8953.dtsi"
+#include "msm8953-mtp.dtsi"
/ {
- model = "Qualcomm Technologies, Inc. SDM845 QVR";
- compatible = "qcom,sdm845-qvr", "qcom,sdm845", "qcom,qvr";
- qcom,board-id = <0x01000B 0x20>;
+ model = "Qualcomm Technologies, Inc. MSM8953 + PMI8950 IOT MTP";
+ compatible = "qcom,msm8953-mtp", "qcom,msm8953", "qcom,mtp";
+ qcom,board-id= <8 2>;
+ qcom,pmic-id = <0x010016 0x010011 0x0 0x0>;
};
+
diff --git a/arch/arm64/boot/dts/qcom/sdm845-qvr.dts b/arch/arm64/boot/dts/qcom/msm8953-ipc.dts
similarity index 60%
copy from arch/arm64/boot/dts/qcom/sdm845-qvr.dts
copy to arch/arm64/boot/dts/qcom/msm8953-ipc.dts
index c06b806..89a54af 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-qvr.dts
+++ b/arch/arm64/boot/dts/qcom/msm8953-ipc.dts
@@ -1,4 +1,5 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/*
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -10,14 +11,15 @@
* GNU General Public License for more details.
*/
-
/dts-v1/;
-#include "sdm845-v2.dtsi"
-#include "sdm845-qvr.dtsi"
+#include "msm8953.dtsi"
+#include "msm8953-ipc.dtsi"
/ {
- model = "Qualcomm Technologies, Inc. SDM845 QVR";
- compatible = "qcom,sdm845-qvr", "qcom,sdm845", "qcom,qvr";
- qcom,board-id = <0x01000B 0x20>;
+ model = "Qualcomm Technologies, Inc. MSM8953 + PMI8950 IPC";
+ compatible = "qcom,msm8953-ipc", "qcom,msm8953", "qcom,ipc";
+ qcom,board-id= <12 0>;
+ qcom,pmic-id = <0x010016 0x010011 0x0 0x0>;
};
+
diff --git a/arch/arm64/boot/dts/qcom/sdm845-qvr.dts b/arch/arm64/boot/dts/qcom/msm8953-ipc.dtsi
similarity index 62%
copy from arch/arm64/boot/dts/qcom/sdm845-qvr.dts
copy to arch/arm64/boot/dts/qcom/msm8953-ipc.dtsi
index c06b806..26f4338 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-qvr.dts
+++ b/arch/arm64/boot/dts/qcom/msm8953-ipc.dtsi
@@ -1,4 +1,5 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/*
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -10,14 +11,8 @@
* GNU General Public License for more details.
*/
-
-/dts-v1/;
-
-#include "sdm845-v2.dtsi"
-#include "sdm845-qvr.dtsi"
-
-/ {
- model = "Qualcomm Technologies, Inc. SDM845 QVR";
- compatible = "qcom,sdm845-qvr", "qcom,sdm845", "qcom,qvr";
- qcom,board-id = <0x01000B 0x20>;
+&blsp1_uart0 {
+ status = "ok";
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart_console_active>;
};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-qvr.dts b/arch/arm64/boot/dts/qcom/msm8953-pmi8937-cdp.dts
similarity index 60%
copy from arch/arm64/boot/dts/qcom/sdm845-qvr.dts
copy to arch/arm64/boot/dts/qcom/msm8953-pmi8937-cdp.dts
index c06b806..a751d5d 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-qvr.dts
+++ b/arch/arm64/boot/dts/qcom/msm8953-pmi8937-cdp.dts
@@ -1,4 +1,5 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/*
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -10,14 +11,15 @@
* GNU General Public License for more details.
*/
-
/dts-v1/;
-#include "sdm845-v2.dtsi"
-#include "sdm845-qvr.dtsi"
+#include "msm8953.dtsi"
+#include "msm8953-cdp.dtsi"
/ {
- model = "Qualcomm Technologies, Inc. SDM845 QVR";
- compatible = "qcom,sdm845-qvr", "qcom,sdm845", "qcom,qvr";
- qcom,board-id = <0x01000B 0x20>;
+ model = "Qualcomm Technologies, Inc. MSM8953 + PMI8937 CDP";
+ compatible = "qcom,msm8953-cdp", "qcom,msm8953", "qcom,cdp";
+ qcom,board-id= <1 0>;
+ qcom,pmic-id = <0x010016 0x020037 0x0 0x0>;
};
+
diff --git a/arch/arm64/boot/dts/qcom/msm8953-pmi8937-ext-codec-mtp.dts b/arch/arm64/boot/dts/qcom/msm8953-pmi8937-ext-codec-mtp.dts
new file mode 100644
index 0000000..13aba62
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msm8953-pmi8937-ext-codec-mtp.dts
@@ -0,0 +1,25 @@
+/*
+ * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+
+#include "msm8953.dtsi"
+#include "msm8953-mtp.dtsi"
+
+/ {
+ model = "Qualcomm Technologies, Inc. MSM8953 + PMI8937 Ext Codec MTP";
+ compatible = "qcom,msm8953-mtp", "qcom,msm8953", "qcom,mtp";
+ qcom,board-id= <8 1>;
+ qcom,pmic-id = <0x010016 0x020037 0x0 0x0>;
+};
+
diff --git a/arch/arm64/boot/dts/qcom/sdm845-qvr.dts b/arch/arm64/boot/dts/qcom/msm8953-pmi8937-mtp.dts
similarity index 60%
copy from arch/arm64/boot/dts/qcom/sdm845-qvr.dts
copy to arch/arm64/boot/dts/qcom/msm8953-pmi8937-mtp.dts
index c06b806..9d6be47 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-qvr.dts
+++ b/arch/arm64/boot/dts/qcom/msm8953-pmi8937-mtp.dts
@@ -1,4 +1,5 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/*
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -10,14 +11,15 @@
* GNU General Public License for more details.
*/
-
/dts-v1/;
-#include "sdm845-v2.dtsi"
-#include "sdm845-qvr.dtsi"
+#include "msm8953.dtsi"
+#include "msm8953-mtp.dtsi"
/ {
- model = "Qualcomm Technologies, Inc. SDM845 QVR";
- compatible = "qcom,sdm845-qvr", "qcom,sdm845", "qcom,qvr";
- qcom,board-id = <0x01000B 0x20>;
+ model = "Qualcomm Technologies, Inc. MSM8953 + PMI8937 MTP";
+ compatible = "qcom,msm8953-mtp", "qcom,msm8953", "qcom,mtp";
+ qcom,board-id= <8 0>;
+ qcom,pmic-id = <0x010016 0x020037 0x0 0x0>;
};
+
diff --git a/arch/arm64/boot/dts/qcom/sdm845-qvr.dts b/arch/arm64/boot/dts/qcom/msm8953-pmi8940-cdp.dts
similarity index 60%
copy from arch/arm64/boot/dts/qcom/sdm845-qvr.dts
copy to arch/arm64/boot/dts/qcom/msm8953-pmi8940-cdp.dts
index c06b806..d2bb465 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-qvr.dts
+++ b/arch/arm64/boot/dts/qcom/msm8953-pmi8940-cdp.dts
@@ -1,4 +1,5 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/*
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -10,14 +11,15 @@
* GNU General Public License for more details.
*/
-
/dts-v1/;
-#include "sdm845-v2.dtsi"
-#include "sdm845-qvr.dtsi"
+#include "msm8953.dtsi"
+#include "msm8953-cdp.dtsi"
/ {
- model = "Qualcomm Technologies, Inc. SDM845 QVR";
- compatible = "qcom,sdm845-qvr", "qcom,sdm845", "qcom,qvr";
- qcom,board-id = <0x01000B 0x20>;
+ model = "Qualcomm Technologies, Inc. MSM8953 + PMI8940 CDP";
+ compatible = "qcom,msm8953-cdp", "qcom,msm8953", "qcom,cdp";
+ qcom,board-id= <1 0>;
+ qcom,pmic-id = <0x010016 0x020040 0x0 0x0>;
};
+
diff --git a/arch/arm64/boot/dts/qcom/msm8953-pmi8940-ext-codec-mtp.dts b/arch/arm64/boot/dts/qcom/msm8953-pmi8940-ext-codec-mtp.dts
new file mode 100644
index 0000000..dbbb6b8
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msm8953-pmi8940-ext-codec-mtp.dts
@@ -0,0 +1,25 @@
+/*
+ * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+
+#include "msm8953.dtsi"
+#include "msm8953-mtp.dtsi"
+
+/ {
+ model = "Qualcomm Technologies, Inc. MSM8953 + PMI8940 Ext Codec MTP";
+ compatible = "qcom,msm8953-mtp", "qcom,msm8953", "qcom,mtp";
+ qcom,board-id= <8 1>;
+ qcom,pmic-id = <0x010016 0x020040 0x0 0x0>;
+};
+
diff --git a/arch/arm64/boot/dts/qcom/sdm845-qvr.dts b/arch/arm64/boot/dts/qcom/msm8953-pmi8940-mtp.dts
similarity index 60%
copy from arch/arm64/boot/dts/qcom/sdm845-qvr.dts
copy to arch/arm64/boot/dts/qcom/msm8953-pmi8940-mtp.dts
index c06b806..0fb793b 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-qvr.dts
+++ b/arch/arm64/boot/dts/qcom/msm8953-pmi8940-mtp.dts
@@ -1,4 +1,5 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/*
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -10,14 +11,15 @@
* GNU General Public License for more details.
*/
-
/dts-v1/;
-#include "sdm845-v2.dtsi"
-#include "sdm845-qvr.dtsi"
+#include "msm8953.dtsi"
+#include "msm8953-mtp.dtsi"
/ {
- model = "Qualcomm Technologies, Inc. SDM845 QVR";
- compatible = "qcom,sdm845-qvr", "qcom,sdm845", "qcom,qvr";
- qcom,board-id = <0x01000B 0x20>;
+ model = "Qualcomm Technologies, Inc. MSM8953 + PMI8940 MTP";
+ compatible = "qcom,msm8953-mtp", "qcom,msm8953", "qcom,mtp";
+ qcom,board-id= <8 0>;
+ qcom,pmic-id = <0x010016 0x020040 0x0 0x0>;
};
+
diff --git a/arch/arm64/boot/dts/qcom/msm8953-qrd-sku3.dts b/arch/arm64/boot/dts/qcom/msm8953-qrd-sku3.dts
new file mode 100644
index 0000000..5d892fd
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msm8953-qrd-sku3.dts
@@ -0,0 +1,26 @@
+/*
+ * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+
+#include "msm8953.dtsi"
+#include "msm8953-qrd-sku3.dtsi"
+
+/ {
+ model = "Qualcomm Technologies, Inc. MSM8953 + PMI8950 QRD SKU3";
+ compatible = "qcom,msm8953-qrd-sku3",
+ "qcom,msm8953-qrd", "qcom,msm8953", "qcom,qrd";
+ qcom,board-id= <0x2000b 0>;
+ qcom,pmic-id = <0x010016 0x010011 0x0 0x0>;
+};
+
diff --git a/arch/arm64/boot/dts/qcom/sdm845-qvr.dts b/arch/arm64/boot/dts/qcom/msm8953-qrd-sku3.dtsi
similarity index 61%
copy from arch/arm64/boot/dts/qcom/sdm845-qvr.dts
copy to arch/arm64/boot/dts/qcom/msm8953-qrd-sku3.dtsi
index c06b806..96e185b 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-qvr.dts
+++ b/arch/arm64/boot/dts/qcom/msm8953-qrd-sku3.dtsi
@@ -1,4 +1,5 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/*
+ * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -10,14 +11,5 @@
* GNU General Public License for more details.
*/
+#include "msm8953-qrd.dtsi"
-/dts-v1/;
-
-#include "sdm845-v2.dtsi"
-#include "sdm845-qvr.dtsi"
-
-/ {
- model = "Qualcomm Technologies, Inc. SDM845 QVR";
- compatible = "qcom,sdm845-qvr", "qcom,sdm845", "qcom,qvr";
- qcom,board-id = <0x01000B 0x20>;
-};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-qvr.dts b/arch/arm64/boot/dts/qcom/msm8953-qrd.dtsi
similarity index 62%
copy from arch/arm64/boot/dts/qcom/sdm845-qvr.dts
copy to arch/arm64/boot/dts/qcom/msm8953-qrd.dtsi
index c06b806..243aaf5 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-qvr.dts
+++ b/arch/arm64/boot/dts/qcom/msm8953-qrd.dtsi
@@ -1,4 +1,5 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/*
+ * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -10,14 +11,8 @@
* GNU General Public License for more details.
*/
-
-/dts-v1/;
-
-#include "sdm845-v2.dtsi"
-#include "sdm845-qvr.dtsi"
-
-/ {
- model = "Qualcomm Technologies, Inc. SDM845 QVR";
- compatible = "qcom,sdm845-qvr", "qcom,sdm845", "qcom,qvr";
- qcom,board-id = <0x01000B 0x20>;
+&blsp1_uart0 {
+ status = "ok";
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart_console_active>;
};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-qvr.dts b/arch/arm64/boot/dts/qcom/msm8953-rcm.dts
similarity index 60%
copy from arch/arm64/boot/dts/qcom/sdm845-qvr.dts
copy to arch/arm64/boot/dts/qcom/msm8953-rcm.dts
index c06b806..a3117ed 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-qvr.dts
+++ b/arch/arm64/boot/dts/qcom/msm8953-rcm.dts
@@ -1,4 +1,5 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/*
+ * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -10,14 +11,15 @@
* GNU General Public License for more details.
*/
-
/dts-v1/;
-#include "sdm845-v2.dtsi"
-#include "sdm845-qvr.dtsi"
+#include "msm8953.dtsi"
+#include "msm8953-cdp.dtsi"
/ {
- model = "Qualcomm Technologies, Inc. SDM845 QVR";
- compatible = "qcom,sdm845-qvr", "qcom,sdm845", "qcom,qvr";
- qcom,board-id = <0x01000B 0x20>;
+ model = "Qualcomm Technologies, Inc. MSM8953 + PMI8950 RCM";
+ compatible = "qcom,msm8953-cdp", "qcom,msm8953", "qcom,cdp";
+ qcom,board-id= <21 0>;
+ qcom,pmic-id = <0x010016 0x010011 0x0 0x0>;
};
+
diff --git a/arch/arm64/boot/dts/qcom/msm8953.dtsi b/arch/arm64/boot/dts/qcom/msm8953.dtsi
index f17ac32..e90c30b 100644
--- a/arch/arm64/boot/dts/qcom/msm8953.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8953.dtsi
@@ -13,6 +13,7 @@
#include "skeleton64.dtsi"
#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
/ {
model = "Qualcomm Technologies, Inc. MSM 8953";
@@ -347,6 +348,32 @@
};
};
+ cpubw_compute: qcom,cpubw-compute {
+ compatible = "qcom,arm-cpu-mon";
+ qcom,cpulist = < &CPU0 &CPU1 &CPU2 &CPU3
+ &CPU4 &CPU5 &CPU6 &CPU7 >;
+ qcom,target-dev = <&cpubw>;
+ qcom,core-dev-table =
+ < 652800 1611>,
+ < 1036800 3221>,
+ < 1401600 5859>,
+ < 1689600 6445>,
+ < 1804800 7104>,
+ < 1958400 7104>,
+ < 2208000 7104>;
+ };
+
+ mincpubw_compute: qcom,mincpubw-compute {
+ compatible = "qcom,arm-cpu-mon";
+ qcom,cpulist = < &CPU0 &CPU1 &CPU2 &CPU3
+ &CPU4 &CPU5 &CPU6 &CPU7 >;
+ qcom,target-dev = <&mincpubw>;
+ qcom,core-dev-table =
+ < 652800 1611 >,
+ < 1401600 3221 >,
+ < 2208000 5859 >;
+ };
+
qcom,ipc-spinlock@1905000 {
compatible = "qcom,ipc-spinlock-sfpb";
reg = <0x1905000 0x8000>;
@@ -601,5 +628,22 @@
status = "disabled";
};
+ spmi_bus: qcom,spmi@200f000 {
+ compatible = "qcom,spmi-pmic-arb";
+ reg = <0x200f000 0x1000>,
+ <0x2400000 0x800000>,
+ <0x2c00000 0x800000>,
+ <0x3800000 0x200000>,
+ <0x200a000 0x2100>;
+ reg-names = "core", "chnls", "obsrvr", "intr", "cnfg";
+ interrupt-names = "periph_irq";
+ interrupts = <GIC_SPI 190 IRQ_TYPE_NONE>;
+ qcom,ee = <0>;
+ qcom,channel = <0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interrupt-controller;
+ #interrupt-cells = <3>;
+ cell-index = <0>;
+ };
};
-
diff --git a/arch/arm64/boot/dts/qcom/sdm845-qvr.dts b/arch/arm64/boot/dts/qcom/mtp8953-ipc.dts
similarity index 60%
copy from arch/arm64/boot/dts/qcom/sdm845-qvr.dts
copy to arch/arm64/boot/dts/qcom/mtp8953-ipc.dts
index c06b806..481e576 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-qvr.dts
+++ b/arch/arm64/boot/dts/qcom/mtp8953-ipc.dts
@@ -1,4 +1,5 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/*
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -10,14 +11,14 @@
* GNU General Public License for more details.
*/
-
/dts-v1/;
-#include "sdm845-v2.dtsi"
-#include "sdm845-qvr.dtsi"
+#include "msm8953.dtsi"
+#include "msm8953-ipc.dtsi"
/ {
- model = "Qualcomm Technologies, Inc. SDM845 QVR";
- compatible = "qcom,sdm845-qvr", "qcom,sdm845", "qcom,qvr";
- qcom,board-id = <0x01000B 0x20>;
+ model = "Qualcomm Technologies, Inc. MSM8953 + PMI8950 IPC";
+ compatible = "qcom,msm8953-ipc", "qcom,msm8953", "qcom,ipc";
+ qcom,board-id= <12 0>;
+ qcom,pmic-id = <0x010016 0x010011 0x0 0x0>;
};
diff --git a/arch/arm64/boot/dts/qcom/pm660.dtsi b/arch/arm64/boot/dts/qcom/pm660.dtsi
index df5a970..502b2fe 100644
--- a/arch/arm64/boot/dts/qcom/pm660.dtsi
+++ b/arch/arm64/boot/dts/qcom/pm660.dtsi
@@ -116,7 +116,6 @@
#size-cells = <0>;
interrupts = <0x0 0x31 0x0 IRQ_TYPE_EDGE_RISING>;
interrupt-names = "eoc-int-en-set";
- qcom,adc-bit-resolution = <15>;
qcom,adc-vdd-reference = <1875>;
chan@6 {
@@ -280,7 +279,6 @@
#size-cells = <0>;
interrupts = <0x0 0x34 0x0 IRQ_TYPE_EDGE_RISING>;
interrupt-names = "eoc-int-en-set";
- qcom,adc-bit-resolution = <15>;
qcom,adc-vdd-reference = <1875>;
qcom,adc_tm-vadc = <&pm660_vadc>;
qcom,decimation = <0>;
@@ -338,7 +336,7 @@
qcom,scale-function = <2>;
qcom,hw-settle-time = <2>;
qcom,btm-channel-number = <0x80>;
- qcom,vadc-thermal-node;
+ qcom,thermal-node;
};
chan@4f {
@@ -349,7 +347,7 @@
qcom,scale-function = <2>;
qcom,hw-settle-time = <2>;
qcom,btm-channel-number = <0x88>;
- qcom,vadc-thermal-node;
+ qcom,thermal-node;
};
};
diff --git a/arch/arm64/boot/dts/qcom/pm8998.dtsi b/arch/arm64/boot/dts/qcom/pm8998.dtsi
index 013ac48..dc3ffda 100644
--- a/arch/arm64/boot/dts/qcom/pm8998.dtsi
+++ b/arch/arm64/boot/dts/qcom/pm8998.dtsi
@@ -138,7 +138,6 @@
#size-cells = <0>;
interrupts = <0x0 0x31 0x0 IRQ_TYPE_EDGE_RISING>;
interrupt-names = "eoc-int-en-set";
- qcom,adc-bit-resolution = <15>;
qcom,adc-vdd-reference = <1875>;
chan@6 {
@@ -185,7 +184,6 @@
#size-cells = <0>;
interrupts = <0x0 0x34 0x0 IRQ_TYPE_EDGE_RISING>;
interrupt-names = "eoc-int-en-set";
- qcom,adc-bit-resolution = <15>;
qcom,adc-vdd-reference = <1875>;
qcom,adc_tm-vadc = <&pm8998_vadc>;
qcom,decimation = <0>;
diff --git a/arch/arm64/boot/dts/qcom/pmi8998.dtsi b/arch/arm64/boot/dts/qcom/pmi8998.dtsi
index 8d8bd63..c65430b1 100644
--- a/arch/arm64/boot/dts/qcom/pmi8998.dtsi
+++ b/arch/arm64/boot/dts/qcom/pmi8998.dtsi
@@ -102,6 +102,7 @@
qcom,thermal-mitigation
= <3000000 1500000 1000000 500000>;
+ qcom,auto-recharge-soc;
qcom,chgr@1000 {
reg = <0x1000 0x100>;
@@ -282,6 +283,9 @@
qcom,fg-esr-timer-asleep = <256 256>;
qcom,fg-esr-timer-charging = <0 96>;
qcom,cycle-counter-en;
+ qcom,hold-soc-while-full;
+ qcom,fg-auto-recharge-soc;
+ qcom,fg-recharge-soc-thr = <98>;
status = "okay";
qcom,fg-batt-soc@4000 {
diff --git a/arch/arm64/boot/dts/qcom/qcs605-360camera.dts b/arch/arm64/boot/dts/qcom/qcs605-360camera.dts
new file mode 100644
index 0000000..8caad4b
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/qcs605-360camera.dts
@@ -0,0 +1,27 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+
+/dts-v1/;
+
+#include "qcs605.dtsi"
+#include "qcs605-360camera.dtsi"
+
+/ {
+ model = "Qualcomm Technologies, Inc. QCS605 PM660 + PM660L 360camera";
+ compatible = "qcom,qcs605-mtp", "qcom,qcs605", "qcom,mtp";
+ qcom,board-id = <0x0000000b 1>;
+ qcom,pmic-id = <0x0001001b 0x0101011a 0x0 0x0>,
+ <0x0001001b 0x0102001a 0x0 0x0>,
+ <0x0001001b 0x0201011a 0x0 0x0>;
+};
diff --git a/arch/arm64/boot/dts/qcom/qcs605-360camera.dtsi b/arch/arm64/boot/dts/qcom/qcs605-360camera.dtsi
new file mode 100644
index 0000000..87e2e03
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/qcs605-360camera.dtsi
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "sdm670-mtp.dtsi"
+#include "sdm670-camera-sensor-360camera.dtsi"
+#include "sdm670-audio-overlay.dtsi"
+
+&qupv3_se3_i2c {
+ status = "disabled";
+};
+
+&qupv3_se10_i2c {
+ status = "okay";
+};
+
+&qupv3_se12_2uart {
+ status = "okay";
+};
+
+&qupv3_se6_4uart {
+ status = "okay";
+};
+
+&qupv3_se13_i2c {
+ status = "disabled";
+};
+
+&qupv3_se13_spi {
+ status = "disabled";
+};
+
+&int_codec {
+ qcom,model = "sdm670-360cam-snd-card";
+ qcom,audio-routing =
+ "RX_BIAS", "INT_MCLK0",
+ "SPK_RX_BIAS", "INT_MCLK0",
+ "INT_LDO_H", "INT_MCLK0",
+ "DMIC1", "MIC BIAS External",
+ "MIC BIAS External", "Digital Mic1",
+ "DMIC2", "MIC BIAS External",
+ "MIC BIAS External", "Digital Mic2",
+ "DMIC3", "MIC BIAS External2",
+ "MIC BIAS External2", "Digital Mic3",
+ "DMIC4", "MIC BIAS External2",
+ "MIC BIAS External2", "Digital Mic4",
+ "PDM_IN_RX1", "PDM_OUT_RX1",
+ "PDM_IN_RX2", "PDM_OUT_RX2",
+ "PDM_IN_RX3", "PDM_OUT_RX3",
+ "ADC1_IN", "ADC1_OUT",
+ "ADC2_IN", "ADC2_OUT",
+ "ADC3_IN", "ADC3_OUT";
+ qcom,wsa-max-devs = <0>;
+};
diff --git a/arch/arm64/boot/dts/qcom/qcs605-cdp-overlay.dts b/arch/arm64/boot/dts/qcom/qcs605-cdp-overlay.dts
index fe7a027..01471b6 100644
--- a/arch/arm64/boot/dts/qcom/qcs605-cdp-overlay.dts
+++ b/arch/arm64/boot/dts/qcom/qcs605-cdp-overlay.dts
@@ -21,6 +21,7 @@
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include "sdm670-cdp.dtsi"
+#include "sdm670-audio-overlay.dtsi"
/ {
model = "Qualcomm Technologies, Inc. QCS605 PM660 + PM660L CDP";
diff --git a/arch/arm64/boot/dts/qcom/qcs605-cdp.dts b/arch/arm64/boot/dts/qcom/qcs605-cdp.dts
index 7b38a58..ea10fa0 100644
--- a/arch/arm64/boot/dts/qcom/qcs605-cdp.dts
+++ b/arch/arm64/boot/dts/qcom/qcs605-cdp.dts
@@ -16,6 +16,7 @@
#include "qcs605.dtsi"
#include "sdm670-cdp.dtsi"
+#include "sdm670-audio-overlay.dtsi"
/ {
model = "Qualcomm Technologies, Inc. QCS605 PM660 + PM660L CDP";
diff --git a/arch/arm64/boot/dts/qcom/qcs605-external-codec-mtp-overlay.dts b/arch/arm64/boot/dts/qcom/qcs605-external-codec-mtp-overlay.dts
index 1f439ae..44fae6a 100644
--- a/arch/arm64/boot/dts/qcom/qcs605-external-codec-mtp-overlay.dts
+++ b/arch/arm64/boot/dts/qcom/qcs605-external-codec-mtp-overlay.dts
@@ -21,6 +21,7 @@
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include "sdm670-mtp.dtsi"
+#include "sdm670-external-codec.dtsi"
/ {
model = "Qualcomm Technologies, Inc. QCS605 PM660 + PM660L Ext. Audio Codec MTP";
diff --git a/arch/arm64/boot/dts/qcom/qcs605-mtp-overlay.dts b/arch/arm64/boot/dts/qcom/qcs605-mtp-overlay.dts
index 7327440..7955242 100644
--- a/arch/arm64/boot/dts/qcom/qcs605-mtp-overlay.dts
+++ b/arch/arm64/boot/dts/qcom/qcs605-mtp-overlay.dts
@@ -21,6 +21,7 @@
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include "sdm670-mtp.dtsi"
+#include "sdm670-audio-overlay.dtsi"
/ {
model = "Qualcomm Technologies, Inc. QCS605 PM660 + PM660L MTP";
diff --git a/arch/arm64/boot/dts/qcom/qcs605-mtp.dts b/arch/arm64/boot/dts/qcom/qcs605-mtp.dts
index bc7b376..dc3c7ce 100644
--- a/arch/arm64/boot/dts/qcom/qcs605-mtp.dts
+++ b/arch/arm64/boot/dts/qcom/qcs605-mtp.dts
@@ -16,6 +16,7 @@
#include "qcs605.dtsi"
#include "sdm670-mtp.dtsi"
+#include "sdm670-audio-overlay.dtsi"
/ {
model = "Qualcomm Technologies, Inc. QCS605 PM660 + PM660L MTP";
diff --git a/arch/arm64/boot/dts/qcom/sda670-cdp-overlay.dts b/arch/arm64/boot/dts/qcom/sda670-cdp-overlay.dts
index 141ed59..12a130c 100644
--- a/arch/arm64/boot/dts/qcom/sda670-cdp-overlay.dts
+++ b/arch/arm64/boot/dts/qcom/sda670-cdp-overlay.dts
@@ -21,6 +21,7 @@
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include "sdm670-cdp.dtsi"
+#include "sdm670-audio-overlay.dtsi"
/ {
model = "Qualcomm Technologies, Inc. SDA670 PM660 + PM660L CDP";
diff --git a/arch/arm64/boot/dts/qcom/sda670-cdp.dts b/arch/arm64/boot/dts/qcom/sda670-cdp.dts
index fcb340e..9cd9960 100644
--- a/arch/arm64/boot/dts/qcom/sda670-cdp.dts
+++ b/arch/arm64/boot/dts/qcom/sda670-cdp.dts
@@ -16,6 +16,7 @@
#include "sda670.dtsi"
#include "sdm670-cdp.dtsi"
+#include "sdm670-audio-overlay.dtsi"
/ {
model = "Qualcomm Technologies, Inc. SDA670 PM660 + PM660L CDP";
diff --git a/arch/arm64/boot/dts/qcom/sda670-mtp-overlay.dts b/arch/arm64/boot/dts/qcom/sda670-mtp-overlay.dts
index af8e8f1..b3f5a0b 100644
--- a/arch/arm64/boot/dts/qcom/sda670-mtp-overlay.dts
+++ b/arch/arm64/boot/dts/qcom/sda670-mtp-overlay.dts
@@ -21,6 +21,7 @@
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include "sdm670-mtp.dtsi"
+#include "sdm670-audio-overlay.dtsi"
/ {
model = "Qualcomm Technologies, Inc. SDA670 PM660 + PM660L MTP";
diff --git a/arch/arm64/boot/dts/qcom/sda670-mtp.dts b/arch/arm64/boot/dts/qcom/sda670-mtp.dts
index 2123b44..253ec0c 100644
--- a/arch/arm64/boot/dts/qcom/sda670-mtp.dts
+++ b/arch/arm64/boot/dts/qcom/sda670-mtp.dts
@@ -16,6 +16,7 @@
#include "sda670.dtsi"
#include "sdm670-mtp.dtsi"
+#include "sdm670-audio-overlay.dtsi"
/ {
model = "Qualcomm Technologies, Inc. SDA670 PM660 + PM660L MTP";
diff --git a/arch/arm64/boot/dts/qcom/sda670-pm660a-cdp-overlay.dts b/arch/arm64/boot/dts/qcom/sda670-pm660a-cdp-overlay.dts
index 3e1365d..7701c0b 100644
--- a/arch/arm64/boot/dts/qcom/sda670-pm660a-cdp-overlay.dts
+++ b/arch/arm64/boot/dts/qcom/sda670-pm660a-cdp-overlay.dts
@@ -22,6 +22,7 @@
#include "sdm670-cdp.dtsi"
#include "pm660a.dtsi"
+#include "sdm670-audio-overlay.dtsi"
/ {
model = "Qualcomm Technologies, Inc. SDA670 PM660 + PM660A CDP";
diff --git a/arch/arm64/boot/dts/qcom/sda670-pm660a-cdp.dts b/arch/arm64/boot/dts/qcom/sda670-pm660a-cdp.dts
index 6cbf224..e6f8d50 100644
--- a/arch/arm64/boot/dts/qcom/sda670-pm660a-cdp.dts
+++ b/arch/arm64/boot/dts/qcom/sda670-pm660a-cdp.dts
@@ -17,6 +17,7 @@
#include "sda670.dtsi"
#include "sdm670-cdp.dtsi"
#include "pm660a.dtsi"
+#include "sdm670-audio-overlay.dtsi"
/ {
model = "Qualcomm Technologies, Inc. SDA670 PM660 + PM660A CDP";
diff --git a/arch/arm64/boot/dts/qcom/sda670-pm660a-mtp-overlay.dts b/arch/arm64/boot/dts/qcom/sda670-pm660a-mtp-overlay.dts
index 9855b11..0b355ab 100644
--- a/arch/arm64/boot/dts/qcom/sda670-pm660a-mtp-overlay.dts
+++ b/arch/arm64/boot/dts/qcom/sda670-pm660a-mtp-overlay.dts
@@ -22,6 +22,7 @@
#include "sdm670-mtp.dtsi"
#include "pm660a.dtsi"
+#include "sdm670-audio-overlay.dtsi"
/ {
model = "Qualcomm Technologies, Inc. SDA670 PM660 + PM660A MTP";
diff --git a/arch/arm64/boot/dts/qcom/sda670-pm660a-mtp.dts b/arch/arm64/boot/dts/qcom/sda670-pm660a-mtp.dts
index ffb6aa3..0d7e34a 100644
--- a/arch/arm64/boot/dts/qcom/sda670-pm660a-mtp.dts
+++ b/arch/arm64/boot/dts/qcom/sda670-pm660a-mtp.dts
@@ -17,6 +17,7 @@
#include "sda670.dtsi"
#include "sdm670-mtp.dtsi"
#include "pm660a.dtsi"
+#include "sdm670-audio-overlay.dtsi"
/ {
model = "Qualcomm Technologies, Inc. SDA670 PM660 + PM660A MTP";
diff --git a/arch/arm64/boot/dts/qcom/sda845-v2-hdk-overlay.dts b/arch/arm64/boot/dts/qcom/sda845-v2-hdk-overlay.dts
index f836f50..813c198 100644
--- a/arch/arm64/boot/dts/qcom/sda845-v2-hdk-overlay.dts
+++ b/arch/arm64/boot/dts/qcom/sda845-v2-hdk-overlay.dts
@@ -19,7 +19,9 @@
#include <dt-bindings/clock/qcom,rpmh.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include "sdm845-sde-display.dtsi"
#include "sda845-v2-hdk.dtsi"
+#include "sdm845-hdk-audio-overlay.dtsi"
/ {
model = "Qualcomm Technologies, Inc. SDA845 v2 HDK";
@@ -27,3 +29,28 @@
qcom,msm-id = <341 0x20000>;
qcom,board-id = <0x01001F 0x00>;
};
+
+&dsi_dual_nt36850_truly_cmd {
+ qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+ qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
+ qcom,mdss-dsi-bl-min-level = <1>;
+ qcom,mdss-dsi-bl-max-level = <4095>;
+ qcom,mdss-dsi-mode-sel-gpio-state = "dual_port";
+ qcom,panel-mode-gpio = <&tlmm 52 0>;
+ qcom,platform-te-gpio = <&tlmm 10 0>;
+ qcom,platform-reset-gpio = <&tlmm 6 0>;
+};
+
+&dsi_dual_nt36850_truly_cmd_display {
+ qcom,dsi-display-active;
+};
+
+&labibb {
+ status = "ok";
+ qcom,qpnp-labibb-mode = "lcd";
+};
+
+&pmi8998_wled {
+ status = "okay";
+ qcom,led-strings-list = [01 02];
+};
diff --git a/arch/arm64/boot/dts/qcom/sda845-v2-hdk.dtsi b/arch/arm64/boot/dts/qcom/sda845-v2-hdk.dtsi
index 53617dc..d212554 100644
--- a/arch/arm64/boot/dts/qcom/sda845-v2-hdk.dtsi
+++ b/arch/arm64/boot/dts/qcom/sda845-v2-hdk.dtsi
@@ -18,3 +18,7 @@
#include "fg-gen3-batterydata-mlp356477-2800mah.dtsi"
};
};
+
+&sdhc_2 {
+ cd-gpios = <&tlmm 126 GPIO_ACTIVE_LOW>;
+};
diff --git a/arch/arm64/boot/dts/qcom/sda845-v2.1-4k-panel-cdp-overlay.dts b/arch/arm64/boot/dts/qcom/sda845-v2.1-4k-panel-cdp-overlay.dts
new file mode 100644
index 0000000..d49fdb6
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sda845-v2.1-4k-panel-cdp-overlay.dts
@@ -0,0 +1,66 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+
+/dts-v1/;
+/plugin/;
+
+#include <dt-bindings/clock/qcom,gcc-sdm845.h>
+#include <dt-bindings/clock/qcom,camcc-sdm845.h>
+#include <dt-bindings/clock/qcom,dispcc-sdm845.h>
+#include <dt-bindings/clock/qcom,rpmh.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+#include "sdm845-sde-display.dtsi"
+#include "sdm845-cdp.dtsi"
+#include "sdm845-cdp-audio-overlay.dtsi"
+
+/ {
+ model = "Qualcomm Technologies, Inc. sda845 v2.1 4K Display Panel CDP";
+ compatible = "qcom,sda845-cdp", "qcom,sda845", "qcom,cdp";
+ qcom,msm-id = <341 0x20001>;
+ qcom,board-id = <1 1>;
+};
+
+&dsi_nt35597_truly_dsc_cmd_display {
+ /delete-property/ qcom,dsi-display-active;
+};
+
+&mdss_mdp {
+ connectors = <&sde_rscc &sde_wb>;
+};
+
+&dsi_sharp_4k_dsc_video {
+ qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+ qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
+ qcom,mdss-dsi-bl-min-level = <1>;
+ qcom,mdss-dsi-bl-max-level = <4095>;
+ qcom,mdss-dsi-mode-sel-gpio-state = "dual_port";
+ qcom,panel-mode-gpio = <&tlmm 52 0>;
+ qcom,platform-te-gpio = <&tlmm 10 0>;
+ qcom,platform-reset-gpio = <&tlmm 6 0>;
+};
+
+&dsi_sharp_4k_dsc_cmd {
+ qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+ qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
+ qcom,mdss-dsi-bl-min-level = <1>;
+ qcom,mdss-dsi-bl-max-level = <4095>;
+ qcom,mdss-dsi-mode-sel-gpio-state = "dual_port";
+ qcom,panel-mode-gpio = <&tlmm 52 0>;
+ qcom,platform-te-gpio = <&tlmm 10 0>;
+ qcom,platform-reset-gpio = <&tlmm 6 0>;
+};
+
+&dsi_sharp_4k_dsc_video_display {
+ qcom,dsi-display-active;
+};
diff --git a/arch/arm64/boot/dts/qcom/sda845-v2.1-4k-panel-mtp-overlay.dts b/arch/arm64/boot/dts/qcom/sda845-v2.1-4k-panel-mtp-overlay.dts
new file mode 100644
index 0000000..c797492
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sda845-v2.1-4k-panel-mtp-overlay.dts
@@ -0,0 +1,66 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+
+/dts-v1/;
+/plugin/;
+
+#include <dt-bindings/clock/qcom,gcc-sdm845.h>
+#include <dt-bindings/clock/qcom,camcc-sdm845.h>
+#include <dt-bindings/clock/qcom,dispcc-sdm845.h>
+#include <dt-bindings/clock/qcom,rpmh.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+#include "sdm845-sde-display.dtsi"
+#include "sdm845-mtp.dtsi"
+#include "sdm845-audio-overlay.dtsi"
+
+/ {
+ model = "Qualcomm Technologies, Inc. sda845 v2.1 4K Display Panel MTP";
+ compatible = "qcom,sda845-mtp", "qcom,sda845", "qcom,mtp";
+ qcom,msm-id = <341 0x20001>;
+ qcom,board-id = <8 1>;
+};
+
+&dsi_nt35597_truly_dsc_cmd_display {
+ /delete-property/ qcom,dsi-display-active;
+};
+
+&mdss_mdp {
+ connectors = <&sde_rscc &sde_wb &sde_dp>;
+};
+
+&dsi_sharp_4k_dsc_video {
+ qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+ qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
+ qcom,mdss-dsi-bl-min-level = <1>;
+ qcom,mdss-dsi-bl-max-level = <4095>;
+ qcom,mdss-dsi-mode-sel-gpio-state = "dual_port";
+ qcom,panel-mode-gpio = <&tlmm 52 0>;
+ qcom,platform-te-gpio = <&tlmm 10 0>;
+ qcom,platform-reset-gpio = <&tlmm 6 0>;
+};
+
+&dsi_sharp_4k_dsc_cmd {
+ qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+ qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
+ qcom,mdss-dsi-bl-min-level = <1>;
+ qcom,mdss-dsi-bl-max-level = <4095>;
+ qcom,mdss-dsi-mode-sel-gpio-state = "dual_port";
+ qcom,panel-mode-gpio = <&tlmm 52 0>;
+ qcom,platform-te-gpio = <&tlmm 10 0>;
+ qcom,platform-reset-gpio = <&tlmm 6 0>;
+};
+
+&dsi_sharp_4k_dsc_video_display {
+ qcom,dsi-display-active;
+};
diff --git a/arch/arm64/boot/dts/qcom/sda845-v2.1-4k-panel-qrd-overlay.dts b/arch/arm64/boot/dts/qcom/sda845-v2.1-4k-panel-qrd-overlay.dts
new file mode 100644
index 0000000..221a1d7
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sda845-v2.1-4k-panel-qrd-overlay.dts
@@ -0,0 +1,64 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+
+/dts-v1/;
+/plugin/;
+
+#include <dt-bindings/clock/qcom,gcc-sdm845.h>
+#include <dt-bindings/clock/qcom,camcc-sdm845.h>
+#include <dt-bindings/clock/qcom,dispcc-sdm845.h>
+#include <dt-bindings/clock/qcom,rpmh.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+#include "sdm845-sde-display.dtsi"
+#include "sdm845-qrd.dtsi"
+#include "sdm845-qrd-audio-overlay.dtsi"
+
+/ {
+ model = "Qualcomm Technologies, Inc. sda845 v2.1 4K Display Panel QRD";
+ compatible = "qcom,sda845-qrd", "qcom,sda845", "qcom,qrd";
+ qcom,msm-id = <341 0x20001>;
+ qcom,board-id = <11 1>;
+};
+
+&dsi_nt35597_truly_dsc_cmd_display {
+ /delete-property/ qcom,dsi-display-active;
+};
+
+&dsi_sharp_4k_dsc_video {
+ qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+ qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
+ qcom,mdss-dsi-bl-min-level = <1>;
+ qcom,mdss-dsi-bl-max-level = <4095>;
+ qcom,mdss-dsi-mode-sel-gpio-state = "dual_port";
+ qcom,panel-mode-gpio = <&tlmm 52 0>;
+ qcom,platform-te-gpio = <&tlmm 10 0>;
+ qcom,platform-reset-gpio = <&tlmm 6 0>;
+ qcom,mdss-dsi-panel-orientation = "180";
+};
+
+&dsi_sharp_4k_dsc_cmd {
+ qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+ qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
+ qcom,mdss-dsi-bl-min-level = <1>;
+ qcom,mdss-dsi-bl-max-level = <4095>;
+ qcom,mdss-dsi-mode-sel-gpio-state = "dual_port";
+ qcom,panel-mode-gpio = <&tlmm 52 0>;
+ qcom,platform-te-gpio = <&tlmm 10 0>;
+ qcom,platform-reset-gpio = <&tlmm 6 0>;
+ qcom,mdss-dsi-panel-orientation = "180";
+};
+
+&dsi_sharp_4k_dsc_video_display {
+ qcom,dsi-display-active;
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-qvr-overlay.dts b/arch/arm64/boot/dts/qcom/sda845-v2.1-cdp-overlay.dts
similarity index 77%
copy from arch/arm64/boot/dts/qcom/sdm845-qvr-overlay.dts
copy to arch/arm64/boot/dts/qcom/sda845-v2.1-cdp-overlay.dts
index fb99157..64af617 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-qvr-overlay.dts
+++ b/arch/arm64/boot/dts/qcom/sda845-v2.1-cdp-overlay.dts
@@ -10,6 +10,7 @@
* GNU General Public License for more details.
*/
+
/dts-v1/;
/plugin/;
@@ -20,11 +21,12 @@
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include "sdm845-sde-display.dtsi"
-#include "sdm845-qvr.dtsi"
+#include "sdm845-cdp.dtsi"
+#include "sdm845-cdp-audio-overlay.dtsi"
/ {
- model = "Qualcomm Technologies, Inc. SDM845 v2 QVR";
- compatible = "qcom,sdm845-qvr", "qcom,sdm845", "qcom,qvr";
- qcom,msm-id = <321 0x20000>;
- qcom,board-id = <0x01000B 0x20>;
+ model = "Qualcomm Technologies, Inc. sda845 v2.1 CDP";
+ compatible = "qcom,sda845-cdp", "qcom,sda845", "qcom,cdp";
+ qcom,msm-id = <341 0x20001>;
+ qcom,board-id = <1 0>;
};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-qvr-overlay.dts b/arch/arm64/boot/dts/qcom/sda845-v2.1-mtp-overlay.dts
similarity index 77%
copy from arch/arm64/boot/dts/qcom/sdm845-qvr-overlay.dts
copy to arch/arm64/boot/dts/qcom/sda845-v2.1-mtp-overlay.dts
index fb99157..931f0e2 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-qvr-overlay.dts
+++ b/arch/arm64/boot/dts/qcom/sda845-v2.1-mtp-overlay.dts
@@ -10,6 +10,7 @@
* GNU General Public License for more details.
*/
+
/dts-v1/;
/plugin/;
@@ -20,11 +21,12 @@
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include "sdm845-sde-display.dtsi"
-#include "sdm845-qvr.dtsi"
+#include "sdm845-mtp.dtsi"
+#include "sdm845-audio-overlay.dtsi"
/ {
- model = "Qualcomm Technologies, Inc. SDM845 v2 QVR";
- compatible = "qcom,sdm845-qvr", "qcom,sdm845", "qcom,qvr";
- qcom,msm-id = <321 0x20000>;
- qcom,board-id = <0x01000B 0x20>;
+ model = "Qualcomm Technologies, Inc. sda845 v2.1 MTP";
+ compatible = "qcom,sda845-mtp", "qcom,sda845", "qcom,mtp";
+ qcom,msm-id = <341 0x20001>;
+ qcom,board-id = <8 0>;
};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-qvr-overlay.dts b/arch/arm64/boot/dts/qcom/sda845-v2.1-qrd-overlay.dts
similarity index 77%
copy from arch/arm64/boot/dts/qcom/sdm845-qvr-overlay.dts
copy to arch/arm64/boot/dts/qcom/sda845-v2.1-qrd-overlay.dts
index fb99157..d279fce 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-qvr-overlay.dts
+++ b/arch/arm64/boot/dts/qcom/sda845-v2.1-qrd-overlay.dts
@@ -20,11 +20,12 @@
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include "sdm845-sde-display.dtsi"
-#include "sdm845-qvr.dtsi"
+#include "sdm845-qrd.dtsi"
+#include "sdm845-qrd-audio-overlay.dtsi"
/ {
- model = "Qualcomm Technologies, Inc. SDM845 v2 QVR";
- compatible = "qcom,sdm845-qvr", "qcom,sdm845", "qcom,qvr";
- qcom,msm-id = <321 0x20000>;
- qcom,board-id = <0x01000B 0x20>;
+ model = "Qualcomm Technologies, Inc. SDA845 v2.1 QRD";
+ compatible = "qcom,sda845-qrd", "qcom,sda845", "qcom,qrd";
+ qcom,msm-id = <341 0x20001>;
+ qcom,board-id = <11 0>;
};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-qvr.dts b/arch/arm64/boot/dts/qcom/sda845-v2.1.dts
similarity index 73%
copy from arch/arm64/boot/dts/qcom/sdm845-qvr.dts
copy to arch/arm64/boot/dts/qcom/sda845-v2.1.dts
index c06b806..9706587 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-qvr.dts
+++ b/arch/arm64/boot/dts/qcom/sda845-v2.1.dts
@@ -10,14 +10,12 @@
* GNU General Public License for more details.
*/
-
/dts-v1/;
-#include "sdm845-v2.dtsi"
-#include "sdm845-qvr.dtsi"
+#include "sda845-v2.1.dtsi"
/ {
- model = "Qualcomm Technologies, Inc. SDM845 QVR";
- compatible = "qcom,sdm845-qvr", "qcom,sdm845", "qcom,qvr";
- qcom,board-id = <0x01000B 0x20>;
+ model = "Qualcomm Technologies, Inc. SDA845 v2.1 SoC";
+ compatible = "qcom,sda845";
+ qcom,board-id = <0 0>;
};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-qvr.dts b/arch/arm64/boot/dts/qcom/sda845-v2.1.dtsi
similarity index 71%
copy from arch/arm64/boot/dts/qcom/sdm845-qvr.dts
copy to arch/arm64/boot/dts/qcom/sda845-v2.1.dtsi
index c06b806..fe70be1 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-qvr.dts
+++ b/arch/arm64/boot/dts/qcom/sda845-v2.1.dtsi
@@ -10,14 +10,9 @@
* GNU General Public License for more details.
*/
-
-/dts-v1/;
-
-#include "sdm845-v2.dtsi"
-#include "sdm845-qvr.dtsi"
+#include "sdm845-v2.1.dtsi"
/ {
- model = "Qualcomm Technologies, Inc. SDM845 QVR";
- compatible = "qcom,sdm845-qvr", "qcom,sdm845", "qcom,qvr";
- qcom,board-id = <0x01000B 0x20>;
+ model = "Qualcomm Technologies, Inc. SDA 845 V2.1";
+ qcom,msm-id = <341 0x20001>;
};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-qvr.dts b/arch/arm64/boot/dts/qcom/sdm450-cdp.dts
similarity index 61%
copy from arch/arm64/boot/dts/qcom/sdm845-qvr.dts
copy to arch/arm64/boot/dts/qcom/sdm450-cdp.dts
index c06b806..3e06872 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-qvr.dts
+++ b/arch/arm64/boot/dts/qcom/sdm450-cdp.dts
@@ -1,4 +1,5 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -10,14 +11,14 @@
* GNU General Public License for more details.
*/
-
/dts-v1/;
-#include "sdm845-v2.dtsi"
-#include "sdm845-qvr.dtsi"
+#include "sdm450.dtsi"
+#include "msm8953-cdp.dtsi"
/ {
- model = "Qualcomm Technologies, Inc. SDM845 QVR";
- compatible = "qcom,sdm845-qvr", "qcom,sdm845", "qcom,qvr";
- qcom,board-id = <0x01000B 0x20>;
+ model = "Qualcomm Technologies, Inc. SDM450 + PMI8950 CDP";
+ compatible = "qcom,sdm450-cdp", "qcom,sdm450", "qcom,cdp";
+ qcom,board-id = <1 0>;
+ qcom,pmic-id = <0x010016 0x010011 0x0 0x0>;
};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-qvr.dts b/arch/arm64/boot/dts/qcom/sdm450-iot-mtp.dts
similarity index 60%
copy from arch/arm64/boot/dts/qcom/sdm845-qvr.dts
copy to arch/arm64/boot/dts/qcom/sdm450-iot-mtp.dts
index c06b806..7fac030 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-qvr.dts
+++ b/arch/arm64/boot/dts/qcom/sdm450-iot-mtp.dts
@@ -1,4 +1,5 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -10,14 +11,14 @@
* GNU General Public License for more details.
*/
-
/dts-v1/;
-#include "sdm845-v2.dtsi"
-#include "sdm845-qvr.dtsi"
+#include "sdm450.dtsi"
+#include "msm8953-mtp.dtsi"
/ {
- model = "Qualcomm Technologies, Inc. SDM845 QVR";
- compatible = "qcom,sdm845-qvr", "qcom,sdm845", "qcom,qvr";
- qcom,board-id = <0x01000B 0x20>;
+ model = "Qualcomm Technologies, Inc. SDM450 + PMI8950 IOT MTP";
+ compatible = "qcom,sdm450-mtp", "qcom,sdm450", "qcom,mtp";
+ qcom,board-id = <8 2>;
+ qcom,pmic-id = <0x010016 0x010011 0x0 0x0>;
};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-qvr.dts b/arch/arm64/boot/dts/qcom/sdm450-mtp.dts
similarity index 61%
copy from arch/arm64/boot/dts/qcom/sdm845-qvr.dts
copy to arch/arm64/boot/dts/qcom/sdm450-mtp.dts
index c06b806..2524b80 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-qvr.dts
+++ b/arch/arm64/boot/dts/qcom/sdm450-mtp.dts
@@ -1,4 +1,5 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -10,14 +11,14 @@
* GNU General Public License for more details.
*/
-
/dts-v1/;
-#include "sdm845-v2.dtsi"
-#include "sdm845-qvr.dtsi"
+#include "sdm450.dtsi"
+#include "msm8953-mtp.dtsi"
/ {
- model = "Qualcomm Technologies, Inc. SDM845 QVR";
- compatible = "qcom,sdm845-qvr", "qcom,sdm845", "qcom,qvr";
- qcom,board-id = <0x01000B 0x20>;
+ model = "Qualcomm Technologies, Inc. SDM450 + PMI8950 MTP";
+ compatible = "qcom,sdm450-mtp", "qcom,sdm450", "qcom,mtp";
+ qcom,board-id = <8 0>;
+ qcom,pmic-id = <0x010016 0x010011 0x0 0x0>;
};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-qvr.dts b/arch/arm64/boot/dts/qcom/sdm450-pmi8937-mtp.dts
similarity index 60%
copy from arch/arm64/boot/dts/qcom/sdm845-qvr.dts
copy to arch/arm64/boot/dts/qcom/sdm450-pmi8937-mtp.dts
index c06b806..6a6a09e 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-qvr.dts
+++ b/arch/arm64/boot/dts/qcom/sdm450-pmi8937-mtp.dts
@@ -1,4 +1,5 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/*
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -10,14 +11,14 @@
* GNU General Public License for more details.
*/
-
/dts-v1/;
-#include "sdm845-v2.dtsi"
-#include "sdm845-qvr.dtsi"
+#include "sdm450.dtsi"
+#include "msm8953-mtp.dtsi"
/ {
- model = "Qualcomm Technologies, Inc. SDM845 QVR";
- compatible = "qcom,sdm845-qvr", "qcom,sdm845", "qcom,qvr";
- qcom,board-id = <0x01000B 0x20>;
+ model = "Qualcomm Technologies, Inc. SDM450 + PMI8937 MTP";
+ compatible = "qcom,sdm450-mtp", "qcom,sdm450", "qcom,mtp";
+ qcom,board-id = <8 0>;
+ qcom,pmic-id = <0x010016 0x020037 0x0 0x0>;
};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-qvr.dts b/arch/arm64/boot/dts/qcom/sdm450-pmi8940-mtp.dts
similarity index 60%
copy from arch/arm64/boot/dts/qcom/sdm845-qvr.dts
copy to arch/arm64/boot/dts/qcom/sdm450-pmi8940-mtp.dts
index c06b806..3c4e802 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-qvr.dts
+++ b/arch/arm64/boot/dts/qcom/sdm450-pmi8940-mtp.dts
@@ -1,4 +1,5 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/*
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -10,14 +11,14 @@
* GNU General Public License for more details.
*/
-
/dts-v1/;
-#include "sdm845-v2.dtsi"
-#include "sdm845-qvr.dtsi"
+#include "sdm450.dtsi"
+#include "msm8953-mtp.dtsi"
/ {
- model = "Qualcomm Technologies, Inc. SDM845 QVR";
- compatible = "qcom,sdm845-qvr", "qcom,sdm845", "qcom,qvr";
- qcom,board-id = <0x01000B 0x20>;
+ model = "Qualcomm Technologies, Inc. SDM450 + PMI8940 MTP";
+ compatible = "qcom,sdm450-mtp", "qcom,sdm450", "qcom,mtp";
+ qcom,board-id = <8 0>;
+ qcom,pmic-id = <0x010016 0x020040 0x0 0x0>;
};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-qvr.dts b/arch/arm64/boot/dts/qcom/sdm450-qrd.dts
similarity index 60%
copy from arch/arm64/boot/dts/qcom/sdm845-qvr.dts
copy to arch/arm64/boot/dts/qcom/sdm450-qrd.dts
index c06b806..3c2e25b 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-qvr.dts
+++ b/arch/arm64/boot/dts/qcom/sdm450-qrd.dts
@@ -1,4 +1,5 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -10,14 +11,14 @@
* GNU General Public License for more details.
*/
-
/dts-v1/;
-#include "sdm845-v2.dtsi"
-#include "sdm845-qvr.dtsi"
+#include "sdm450.dtsi"
+#include "msm8953-qrd.dtsi"
/ {
- model = "Qualcomm Technologies, Inc. SDM845 QVR";
- compatible = "qcom,sdm845-qvr", "qcom,sdm845", "qcom,qvr";
- qcom,board-id = <0x01000B 0x20>;
+ model = "Qualcomm Technologies, Inc. SDM450 + PMI8950 QRD";
+ compatible = "qcom,sdm450-qrd", "qcom,sdm450", "qcom,qrd";
+ qcom,board-id = <0x5000b 0>;
+ qcom,pmic-id = <0x010016 0x010011 0x0 0x0>;
};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-qvr.dts b/arch/arm64/boot/dts/qcom/sdm450-rcm.dts
similarity index 61%
copy from arch/arm64/boot/dts/qcom/sdm845-qvr.dts
copy to arch/arm64/boot/dts/qcom/sdm450-rcm.dts
index c06b806..4ab131a 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-qvr.dts
+++ b/arch/arm64/boot/dts/qcom/sdm450-rcm.dts
@@ -1,4 +1,5 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -10,14 +11,14 @@
* GNU General Public License for more details.
*/
-
/dts-v1/;
-#include "sdm845-v2.dtsi"
-#include "sdm845-qvr.dtsi"
+#include "sdm450.dtsi"
+#include "msm8953-cdp.dtsi"
/ {
- model = "Qualcomm Technologies, Inc. SDM845 QVR";
- compatible = "qcom,sdm845-qvr", "qcom,sdm845", "qcom,qvr";
- qcom,board-id = <0x01000B 0x20>;
+ model = "Qualcomm Technologies, Inc. SDM450 + PMI8950 RCM";
+ compatible = "qcom,sdm450-cdp", "qcom,sdm450", "qcom,cdp";
+ qcom,board-id = <21 0>;
+ qcom,pmic-id = <0x010016 0x010011 0x0 0x0>;
};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-qvr.dts b/arch/arm64/boot/dts/qcom/sdm450.dtsi
similarity index 63%
copy from arch/arm64/boot/dts/qcom/sdm845-qvr.dts
copy to arch/arm64/boot/dts/qcom/sdm450.dtsi
index c06b806..8087399 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-qvr.dts
+++ b/arch/arm64/boot/dts/qcom/sdm450.dtsi
@@ -1,4 +1,5 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -10,14 +11,11 @@
* GNU General Public License for more details.
*/
-
-/dts-v1/;
-
-#include "sdm845-v2.dtsi"
-#include "sdm845-qvr.dtsi"
+#include "msm8953.dtsi"
/ {
- model = "Qualcomm Technologies, Inc. SDM845 QVR";
- compatible = "qcom,sdm845-qvr", "qcom,sdm845", "qcom,qvr";
- qcom,board-id = <0x01000B 0x20>;
+ model = "Qualcomm Technologies, Inc. SDM450";
+ compatible = "qcom,sdm450";
+ qcom,msm-id = <338 0x0>;
};
+
diff --git a/arch/arm64/boot/dts/qcom/sdm670-audio-overlay.dtsi b/arch/arm64/boot/dts/qcom/sdm670-audio-overlay.dtsi
index dfb8142..5dd5c0d 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-audio-overlay.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-audio-overlay.dtsi
@@ -50,8 +50,8 @@
qcom,hph-en0-gpio = <&tavil_hph_en0>;
qcom,hph-en1-gpio = <&tavil_hph_en1>;
qcom,msm-mclk-freq = <9600000>;
- asoc-codec = <&stub_codec>;
- asoc-codec-names = "msm-stub-codec.1";
+ asoc-codec = <&stub_codec>, <&ext_disp_audio_codec>;
+ asoc-codec-names = "msm-stub-codec.1", "msm-ext-disp-audio-codec-rx";
qcom,wsa-max-devs = <2>;
qcom,wsa-devs = <&wsa881x_0211>, <&wsa881x_0212>,
<&wsa881x_0213>, <&wsa881x_0214>;
@@ -64,6 +64,8 @@
"RX_BIAS", "INT_MCLK0",
"SPK_RX_BIAS", "INT_MCLK0",
"INT_LDO_H", "INT_MCLK0",
+ "RX_I2S_CLK", "INT_MCLK0",
+ "TX_I2S_CLK", "INT_MCLK0",
"MIC BIAS External", "Handset Mic",
"MIC BIAS External2", "Headset Mic",
"MIC BIAS External", "Secondary Mic",
@@ -98,9 +100,11 @@
qcom,cdc-dmic-gpios = <&cdc_dmic_gpios>;
asoc-codec = <&stub_codec>, <&msm_digital_codec>,
- <&pmic_analog_codec>, <&msm_sdw_codec>;
+ <&pmic_analog_codec>, <&msm_sdw_codec>,
+ <&ext_disp_audio_codec>;
asoc-codec-names = "msm-stub-codec.1", "msm-dig-codec",
- "analog-codec", "msm_sdw_codec";
+ "analog-codec", "msm_sdw_codec",
+ "msm-ext-disp-audio-codec-rx";
qcom,wsa-max-devs = <2>;
qcom,wsa-devs = <&wsa881x_211_en>, <&wsa881x_212_en>,
diff --git a/arch/arm64/boot/dts/qcom/sdm670-audio.dtsi b/arch/arm64/boot/dts/qcom/sdm670-audio.dtsi
index bbf6683..bda44cc 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-audio.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-audio.dtsi
@@ -39,6 +39,7 @@
qcom,wcn-btfm;
qcom,mi2s-audio-intf;
qcom,auxpcm-audio-intf;
+ qcom,ext-disp-audio-rx;
asoc-platform = <&pcm0>, <&pcm1>, <&pcm2>, <&voip>, <&voice>,
<&loopback>, <&compress>, <&hostless>,
<&afe>, <&lsm>, <&routing>, <&cpe>, <&compr>,
@@ -50,7 +51,7 @@
"msm-pcm-afe", "msm-lsm-client",
"msm-pcm-routing", "msm-cpe-lsm",
"msm-compr-dsp", "msm-pcm-dsp-noirq";
- asoc-cpu = <&dai_mi2s0>, <&dai_mi2s1>,
+ asoc-cpu = <&dai_dp>, <&dai_mi2s0>, <&dai_mi2s1>,
<&dai_mi2s2>, <&dai_mi2s3>, <&dai_mi2s4>,
<&dai_pri_auxpcm>, <&dai_sec_auxpcm>,
<&dai_tert_auxpcm>, <&dai_quat_auxpcm>,
@@ -70,9 +71,10 @@
<&dai_tert_tdm_rx_0>, <&dai_tert_tdm_tx_0>,
<&dai_quat_tdm_rx_0>, <&dai_quat_tdm_tx_0>,
<&dai_quin_tdm_rx_0>, <&dai_quin_tdm_tx_0>;
- asoc-cpu-names = "msm-dai-q6-mi2s.0", "msm-dai-q6-mi2s.1",
+ asoc-cpu-names = "msm-dai-q6-dp.24608",
+ "msm-dai-q6-mi2s.0", "msm-dai-q6-mi2s.1",
"msm-dai-q6-mi2s.2", "msm-dai-q6-mi2s.3",
- "msm-dai-q6-mi2s.5",
+ "msm-dai-q6-mi2s.4",
"msm-dai-q6-auxpcm.1", "msm-dai-q6-auxpcm.2",
"msm-dai-q6-auxpcm.3", "msm-dai-q6-auxpcm.4",
"msm-dai-q6-auxpcm.5",
@@ -102,6 +104,7 @@
compatible = "qcom,sdm670-asoc-snd";
qcom,model = "sdm670-mtp-snd-card";
qcom,wcn-btfm;
+ qcom,ext-disp-audio-rx;
qcom,mi2s-audio-intf;
qcom,auxpcm-audio-intf;
asoc-platform = <&pcm0>, <&pcm1>, <&pcm2>, <&voip>, <&voice>,
@@ -115,7 +118,7 @@
"msm-pcm-afe", "msm-lsm-client",
"msm-pcm-routing", "msm-compr-dsp",
"msm-pcm-dsp-noirq";
- asoc-cpu = <&dai_mi2s0>, <&dai_mi2s1>,
+ asoc-cpu = <&dai_dp>, <&dai_mi2s0>, <&dai_mi2s1>,
<&dai_mi2s2>, <&dai_mi2s3>, <&dai_mi2s4>,
<&dai_int_mi2s0>, <&dai_int_mi2s1>,
<&dai_int_mi2s2>, <&dai_int_mi2s3>,
@@ -134,9 +137,10 @@
<&dai_tert_tdm_rx_0>, <&dai_tert_tdm_tx_0>,
<&dai_quat_tdm_rx_0>, <&dai_quat_tdm_tx_0>,
<&dai_quin_tdm_rx_0>, <&dai_quin_tdm_tx_0>;
- asoc-cpu-names = "msm-dai-q6-mi2s.0", "msm-dai-q6-mi2s.1",
+ asoc-cpu-names = "msm-dai-q6-dp.24608",
+ "msm-dai-q6-mi2s.0","msm-dai-q6-mi2s.1",
"msm-dai-q6-mi2s.2", "msm-dai-q6-mi2s.3",
- "msm-dai-q6-mi2s.5",
+ "msm-dai-q6-mi2s.4",
"msm-dai-q6-mi2s.7", "msm-dai-q6-mi2s.8",
"msm-dai-q6-mi2s.9", "msm-dai-q6-mi2s.10",
"msm-dai-q6-mi2s.11", "msm-dai-q6-mi2s.12",
diff --git a/arch/arm64/boot/dts/qcom/sdm670-bus.dtsi b/arch/arm64/boot/dts/qcom/sdm670-bus.dtsi
index 6f22264..4f5a9b1 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-bus.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-bus.dtsi
@@ -12,6 +12,7 @@
#include <dt-bindings/msm/msm-bus-ids.h>
#include <dt-bindings/soc/qcom,tcs-mbox.h>
+#include <dt-bindings/clock/qcom,gcc-sdm845.h>
&soc {
ad_hoc_bus: ad-hoc-bus {
@@ -309,7 +310,6 @@
qcom,bcm-dev;
};
-
/*Buses*/
fab_aggre1_noc: fab-aggre1_noc {
cell-id = <MSM_BUS_FAB_A1_NOC>;
@@ -318,7 +318,6 @@
qcom,base-name = "aggre1_noc-base";
qcom,qos-off = <4096>;
qcom,base-offset = <16384>;
- qcom,bypass-qos-prg;
qcom,bus-type = <1>;
clocks = <>;
};
@@ -330,7 +329,6 @@
qcom,base-name = "aggre2_noc-base";
qcom,qos-off = <2048>;
qcom,base-offset = <12288>;
- qcom,bypass-qos-prg;
qcom,bus-type = <1>;
clocks = <>;
};
@@ -340,8 +338,6 @@
label = "fab-camnoc_virt";
qcom,fab-dev;
qcom,base-name = "camnoc_virt-base";
- qcom,qos-off = <0>;
- qcom,base-offset = <0>;
qcom,bypass-qos-prg;
clocks = <>;
};
@@ -351,8 +347,6 @@
label = "fab-config_noc";
qcom,fab-dev;
qcom,base-name = "config_noc-base";
- qcom,qos-off = <0>;
- qcom,base-offset = <0>;
qcom,bypass-qos-prg;
qcom,bus-type = <1>;
clocks = <>;
@@ -363,8 +357,6 @@
label = "fab-dc_noc";
qcom,fab-dev;
qcom,base-name = "dc_noc-base";
- qcom,qos-off = <0>;
- qcom,base-offset = <0>;
qcom,bypass-qos-prg;
qcom,bus-type = <1>;
clocks = <>;
@@ -375,8 +367,6 @@
label = "fab-gladiator_noc";
qcom,fab-dev;
qcom,base-name = "gladiator_noc-base";
- qcom,qos-off = <0>;
- qcom,base-offset = <0>;
qcom,bypass-qos-prg;
qcom,bus-type = <1>;
clocks = <>;
@@ -387,8 +377,6 @@
label = "fab-ipa_virt";
qcom,fab-dev;
qcom,base-name = "ipa_virt-base";
- qcom,qos-off = <0>;
- qcom,base-offset = <0>;
qcom,bypass-qos-prg;
clocks = <>;
};
@@ -398,8 +386,6 @@
label = "fab-mc_virt";
qcom,fab-dev;
qcom,base-name = "mc_virt-base";
- qcom,qos-off = <0>;
- qcom,base-offset = <0>;
qcom,bypass-qos-prg;
clocks = <>;
};
@@ -411,7 +397,6 @@
qcom,base-name = "mem_noc-base";
qcom,qos-off = <4096>;
qcom,base-offset = <65536>;
- qcom,bypass-qos-prg;
qcom,bus-type = <1>;
clocks = <>;
};
@@ -423,7 +408,6 @@
qcom,base-name = "mmss_noc-base";
qcom,qos-off = <4096>;
qcom,base-offset = <36864>;
- qcom,bypass-qos-prg;
qcom,bus-type = <1>;
clocks = <>;
};
@@ -435,7 +419,6 @@
qcom,base-name = "system_noc-base";
qcom,qos-off = <4096>;
qcom,base-offset = <36864>;
- qcom,bypass-qos-prg;
qcom,bus-type = <1>;
clocks = <>;
};
@@ -445,8 +428,6 @@
label = "fab-mc_virt_display";
qcom,fab-dev;
qcom,base-name = "mc_virt-base";
- qcom,qos-off = <0>;
- qcom,base-offset = <0>;
qcom,bypass-qos-prg;
clocks = <>;
};
@@ -468,14 +449,11 @@
label = "fab-mmss_noc_display";
qcom,fab-dev;
qcom,base-name = "mmss_noc-base";
- qcom,qos-off = <4096>;
- qcom,base-offset = <36864>;
qcom,bypass-qos-prg;
qcom,bus-type = <1>;
clocks = <>;
};
-
/*Masters*/
mas_qhm_a1noc_cfg: mas-qhm-a1noc-cfg {
@@ -492,12 +470,9 @@
label = "mas-qhm-qup1";
qcom,buswidth = <4>;
qcom,agg-ports = <1>;
- qcom,qport = <16>;
qcom,connections = <&slv_qns_a1noc_snoc>;
qcom,bus-dev = <&fab_aggre1_noc>;
qcom,bcms = <&bcm_qup0>;
- qcom,ap-owned;
- qcom,prio = <0>;
};
mas_qhm_tsif: mas-qhm-tsif {
@@ -571,11 +546,8 @@
label = "mas-qhm-qdss-bam";
qcom,buswidth = <4>;
qcom,agg-ports = <1>;
- qcom,qport = <17>;
qcom,connections = <&slv_qns_a2noc_snoc>;
qcom,bus-dev = <&fab_aggre2_noc>;
- qcom,ap-owned;
- qcom,prio = <0>;
};
mas_qhm_qup2: mas-qhm-qup2 {
@@ -583,12 +555,9 @@
label = "mas-qhm-qup2";
qcom,buswidth = <4>;
qcom,agg-ports = <1>;
- qcom,qport = <0>;
qcom,connections = <&slv_qns_a2noc_snoc>;
qcom,bus-dev = <&fab_aggre2_noc>;
qcom,bcms = <&bcm_qup0>;
- qcom,ap-owned;
- qcom,prio = <0>;
};
mas_qnm_cnoc: mas-qnm-cnoc {
@@ -626,6 +595,8 @@
qcom,bus-dev = <&fab_aggre2_noc>;
qcom,ap-owned;
qcom,prio = <2>;
+ qcom,defer-init-qos;
+ qcom,node-qos-bcms = <7035 0 1>;
};
mas_xm_qdss_etr: mas-xm-qdss-etr {
@@ -650,6 +621,12 @@
qcom,bus-dev = <&fab_aggre2_noc>;
qcom,ap-owned;
qcom,prio = <2>;
+ qcom,node-qos-clks {
+ clocks =
+ <&clock_gcc GCC_AGGRE_USB3_PRIM_AXI_CLK>;
+ clock-names =
+ "clk-usb3-prim-axi-no-rate";
+ };
};
mas_qxm_camnoc_hf0_uncomp: mas-qxm-camnoc-hf0-uncomp {
@@ -837,7 +814,7 @@
qcom,bus-dev = <&fab_mem_noc>;
qcom,bcms = <&bcm_sh3>;
qcom,ap-owned;
- qcom,prio = <6>;
+ qcom,prio = <7>;
};
mas_qhm_memnoc_cfg: mas-qhm-memnoc-cfg {
@@ -874,6 +851,7 @@
qcom,ap-owned;
qcom,prio = <0>;
qcom,forwarding;
+ qcom,node-qos-bcms = <7012 0 1>;
};
mas_qnm_mnoc_sf: mas-qnm-mnoc-sf {
@@ -888,6 +866,7 @@
qcom,ap-owned;
qcom,prio = <0>;
qcom,forwarding;
+ qcom,node-qos-bcms = <7012 0 1>;
};
mas_qnm_snoc_gc: mas-qnm-snoc-gc {
@@ -950,6 +929,7 @@
qcom,ap-owned;
qcom,prio = <0>;
qcom,forwarding;
+ qcom,node-qos-bcms = <7012 0 1>;
};
mas_qxm_camnoc_hf1: mas-qxm-camnoc-hf1 {
@@ -964,6 +944,7 @@
qcom,ap-owned;
qcom,prio = <0>;
qcom,forwarding;
+ qcom,node-qos-bcms = <7012 0 1>;
};
mas_qxm_camnoc_sf: mas-qxm-camnoc-sf {
@@ -978,6 +959,7 @@
qcom,ap-owned;
qcom,prio = <0>;
qcom,forwarding;
+ qcom,node-qos-bcms = <7012 0 1>;
};
mas_qxm_mdp0: mas-qxm-mdp0 {
@@ -992,6 +974,7 @@
qcom,ap-owned;
qcom,prio = <0>;
qcom,forwarding;
+ qcom,node-qos-bcms = <7012 0 1>;
};
mas_qxm_mdp1: mas-qxm-mdp1 {
@@ -1006,6 +989,7 @@
qcom,ap-owned;
qcom,prio = <0>;
qcom,forwarding;
+ qcom,node-qos-bcms = <7012 0 1>;
};
mas_qxm_rot: mas-qxm-rot {
@@ -1020,6 +1004,7 @@
qcom,ap-owned;
qcom,prio = <0>;
qcom,forwarding;
+ qcom,node-qos-bcms = <7012 0 1>;
};
mas_qxm_venus0: mas-qxm-venus0 {
@@ -1034,6 +1019,7 @@
qcom,ap-owned;
qcom,prio = <0>;
qcom,forwarding;
+ qcom,node-qos-bcms = <7012 0 1>;
};
mas_qxm_venus1: mas-qxm-venus1 {
@@ -1048,6 +1034,7 @@
qcom,ap-owned;
qcom,prio = <0>;
qcom,forwarding;
+ qcom,node-qos-bcms = <7012 0 1>;
};
mas_qxm_venus_arm9: mas-qxm-venus-arm9 {
@@ -1062,6 +1049,7 @@
qcom,ap-owned;
qcom,prio = <0>;
qcom,forwarding;
+ qcom,node-qos-bcms = <7012 0 1>;
};
mas_qhm_snoc_cfg: mas-qhm-snoc-cfg {
diff --git a/arch/arm64/boot/dts/qcom/sdm670-camera-sensor-360camera.dtsi b/arch/arm64/boot/dts/qcom/sdm670-camera-sensor-360camera.dtsi
new file mode 100644
index 0000000..18b0cd8
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm670-camera-sensor-360camera.dtsi
@@ -0,0 +1,382 @@
+/*
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+&soc {
+ led_flash_rear: qcom,camera-flash@0 {
+ cell-index = <0>;
+ reg = <0x00 0x00>;
+ compatible = "qcom,camera-flash";
+ flash-source = <&pm660l_flash0 &pm660l_flash1>;
+ torch-source = <&pm660l_torch0 &pm660l_torch1>;
+ switch-source = <&pm660l_switch0>;
+ status = "ok";
+ };
+
+ led_flash_front: qcom,camera-flash@1 {
+ cell-index = <1>;
+ reg = <0x01 0x00>;
+ compatible = "qcom,camera-flash";
+ flash-source = <&pm660l_flash2>;
+ torch-source = <&pm660l_torch2>;
+ switch-source = <&pm660l_switch1>;
+ status = "ok";
+ };
+
+ actuator_regulator: gpio-regulator@0 {
+ compatible = "regulator-fixed";
+ reg = <0x00 0x00>;
+ regulator-name = "actuator_regulator";
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ regulator-enable-ramp-delay = <100>;
+ enable-active-high;
+ gpio = <&tlmm 27 0>;
+ };
+
+ camera_ldo: gpio-regulator@2 {
+ compatible = "regulator-fixed";
+ reg = <0x02 0x00>;
+ regulator-name = "camera_ldo";
+ regulator-min-microvolt = <1352000>;
+ regulator-max-microvolt = <1352000>;
+ regulator-enable-ramp-delay = <233>;
+ enable-active-high;
+ gpio = <&pm660l_gpios 4 0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&camera_dvdd_en_default>;
+ vin-supply = <&pm660_s6>;
+ };
+
+ camera_rear_ldo: gpio-regulator@1 {
+ compatible = "regulator-fixed";
+ reg = <0x01 0x00>;
+ regulator-name = "camera_rear_ldo";
+ regulator-min-microvolt = <1352000>;
+ regulator-max-microvolt = <1352000>;
+ regulator-enable-ramp-delay = <135>;
+ enable-active-high;
+ gpio = <&pm660l_gpios 4 0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&camera_rear_dvdd_en_default>;
+ vin-supply = <&pm660_s6>;
+ };
+
+ camera_vio_ldo: gpio-regulator@3 {
+ compatible = "regulator-fixed";
+ reg = <0x03 0x00>;
+ regulator-name = "camera_vio_ldo";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-enable-ramp-delay = <233>;
+ enable-active-high;
+ gpio = <&tlmm 29 0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&cam_sensor_rear_vio>;
+ vin-supply = <&pm660_s4>;
+ };
+
+ camera_vana_ldo: gpio-regulator@4 {
+ compatible = "regulator-fixed";
+ reg = <0x04 0x00>;
+ regulator-name = "camera_vana_ldo";
+ regulator-min-microvolt = <2850000>;
+ regulator-max-microvolt = <2850000>;
+ regulator-enable-ramp-delay = <233>;
+ enable-active-high;
+ gpio = <&tlmm 8 0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&cam_sensor_rear_vana>;
+ vin-supply = <&pm660l_bob>;
+ };
+};
+
+&cam_cci {
+ actuator_rear: qcom,actuator@0 {
+ cell-index = <0>;
+ reg = <0x0>;
+ compatible = "qcom,actuator";
+ cci-master = <0>;
+ cam_vaf-supply = <&actuator_regulator>;
+ regulator-names = "cam_vaf";
+ rgltr-cntrl-support;
+ rgltr-min-voltage = <2800000>;
+ rgltr-max-voltage = <2800000>;
+ rgltr-load-current = <0>;
+ };
+
+ actuator_front: qcom,actuator@1 {
+ cell-index = <1>;
+ reg = <0x1>;
+ compatible = "qcom,actuator";
+ cci-master = <1>;
+ cam_vaf-supply = <&actuator_regulator>;
+ regulator-names = "cam_vaf";
+ rgltr-cntrl-support;
+ rgltr-min-voltage = <2800000>;
+ rgltr-max-voltage = <2800000>;
+ rgltr-load-current = <0>;
+ };
+
+ ois_rear: qcom,ois@0 {
+ cell-index = <0>;
+ reg = <0x0>;
+ compatible = "qcom,ois";
+ cci-master = <0>;
+ cam_vaf-supply = <&actuator_regulator>;
+ regulator-names = "cam_vaf";
+ rgltr-cntrl-support;
+ rgltr-min-voltage = <2800000>;
+ rgltr-max-voltage = <2800000>;
+ rgltr-load-current = <0>;
+ status = "disabled";
+ };
+
+ eeprom_rear: qcom,eeprom@0 {
+ cell-index = <0>;
+ reg = <0>;
+ compatible = "qcom,eeprom";
+ cam_vio-supply = <&camera_vio_ldo>;
+ cam_vana-supply = <&camera_vana_ldo>;
+ cam_vdig-supply = <&camera_rear_ldo>;
+ cam_clk-supply = <&titan_top_gdsc>;
+ cam_vaf-supply = <&actuator_regulator>;
+ regulator-names = "cam_vio", "cam_vana", "cam_vdig",
+ "cam_clk", "cam_vaf";
+ rgltr-cntrl-support;
+ rgltr-min-voltage = <1800000 2850000 1352000 0 2800000>;
+ rgltr-max-voltage = <1800000 2850000 1352000 0 2800000>;
+ rgltr-load-current = <0 80000 105000 0 0>;
+ gpio-no-mux = <0>;
+ pinctrl-names = "cam_default", "cam_suspend";
+ pinctrl-0 = <&cam_sensor_mclk0_active
+ &cam_sensor_rear_active>;
+ pinctrl-1 = <&cam_sensor_mclk0_suspend
+ &cam_sensor_rear_suspend>;
+ gpios = <&tlmm 13 0>,
+ <&tlmm 30 0>;
+ gpio-reset = <1>;
+ gpio-req-tbl-num = <0 1>;
+ gpio-req-tbl-flags = <1 0>;
+ gpio-req-tbl-label = "CAMIF_MCLK0",
+ "CAM_RESET0";
+ sensor-mode = <0>;
+ cci-master = <0>;
+ status = "ok";
+ clocks = <&clock_camcc CAM_CC_MCLK0_CLK>;
+ clock-names = "cam_clk";
+ clock-cntl-level = "turbo";
+ clock-rates = <24000000>;
+ };
+
+ eeprom_rear_aux: qcom,eeprom@1 {
+ cell-index = <1>;
+ reg = <0x1>;
+ compatible = "qcom,eeprom";
+ cam_vio-supply = <&camera_vio_ldo>;
+ cam_vana-supply = <&camera_vana_ldo>;
+ cam_vdig-supply = <&camera_ldo>;
+ cam_clk-supply = <&titan_top_gdsc>;
+ cam_vaf-supply = <&actuator_regulator>;
+ regulator-names = "cam_vdig", "cam_vio", "cam_vana",
+ "cam_clk", "cam_vaf";
+ rgltr-cntrl-support;
+ rgltr-min-voltage = <1352000 1800000 2850000 0 2800000>;
+ rgltr-max-voltage = <1352000 1800000 2850000 0 2800000>;
+ rgltr-load-current = <105000 0 80000 0>;
+ gpio-no-mux = <0>;
+ pinctrl-names = "cam_default", "cam_suspend";
+ pinctrl-0 = <&cam_sensor_mclk1_active
+ &cam_sensor_rear2_active>;
+ pinctrl-1 = <&cam_sensor_mclk1_suspend
+ &cam_sensor_rear2_suspend>;
+ gpios = <&tlmm 14 0>,
+ <&tlmm 28 0>;
+ gpio-reset = <1>;
+ gpio-req-tbl-num = <0 1>;
+ gpio-req-tbl-flags = <1 0>;
+ gpio-req-tbl-label = "CAMIF_MCLK1",
+ "CAM_RESET1";
+ sensor-position = <0>;
+ sensor-mode = <0>;
+ cci-master = <1>;
+ status = "ok";
+ clock-names = "cam_clk";
+ clock-cntl-level = "turbo";
+ clock-rates = <24000000>;
+ };
+
+ eeprom_front: qcom,eeprom@2 {
+ cell-index = <2>;
+ reg = <0x2>;
+ compatible = "qcom,eeprom";
+ cam_vio-supply = <&camera_vio_ldo>;
+ cam_vana-supply = <&camera_vana_ldo>;
+ cam_vdig-supply = <&camera_ldo>;
+ cam_clk-supply = <&titan_top_gdsc>;
+ cam_vaf-supply = <&actuator_regulator>;
+ regulator-names = "cam_vio", "cam_vana", "cam_vdig",
+ "cam_clk", "cam_vaf";
+ rgltr-cntrl-support;
+ rgltr-min-voltage = <1800000 2850000 1352000 0 2800000>;
+ rgltr-max-voltage = <1800000 2850000 1352000 0 2800000>;
+ rgltr-load-current = <0 80000 105000 0>;
+ gpio-no-mux = <0>;
+ pinctrl-names = "cam_default", "cam_suspend";
+ pinctrl-0 = <&cam_sensor_mclk2_active
+ &cam_sensor_front_active>;
+ pinctrl-1 = <&cam_sensor_mclk2_suspend
+ &cam_sensor_front_suspend>;
+ gpios = <&tlmm 15 0>,
+ <&tlmm 9 0>;
+ gpio-reset = <1>;
+ gpio-req-tbl-num = <0 1>;
+ gpio-req-tbl-flags = <1 0>;
+ gpio-req-tbl-label = "CAMIF_MCLK2",
+ "CAM_RESET2";
+ sensor-mode = <0>;
+ cci-master = <1>;
+ status = "ok";
+ clocks = <&clock_camcc CAM_CC_MCLK2_CLK>;
+ clock-names = "cam_clk";
+ clock-cntl-level = "turbo";
+ clock-rates = <24000000>;
+ };
+
+ qcom,cam-sensor@0 {
+ cell-index = <0>;
+ compatible = "qcom,cam-sensor";
+ reg = <0x0>;
+ csiphy-sd-index = <0>;
+ sensor-position-roll = <270>;
+ sensor-position-pitch = <0>;
+ sensor-position-yaw = <180>;
+ led-flash-src = <&led_flash_rear>;
+ actuator-src = <&actuator_rear>;
+ ois-src = <&ois_rear>;
+ eeprom-src = <&eeprom_rear>;
+ cam_vio-supply = <&camera_vio_ldo>;
+ cam_vana-supply = <&camera_vana_ldo>;
+ cam_vdig-supply = <&camera_rear_ldo>;
+ cam_clk-supply = <&titan_top_gdsc>;
+ regulator-names = "cam_vio", "cam_vana", "cam_vdig",
+ "cam_clk";
+ rgltr-cntrl-support;
+ rgltr-min-voltage = <1800000 2850000 1352000 0>;
+ rgltr-max-voltage = <1800000 2850000 1352000 0>;
+ rgltr-load-current = <0 80000 105000 0>;
+ gpio-no-mux = <0>;
+ pinctrl-names = "cam_default", "cam_suspend";
+ pinctrl-0 = <&cam_sensor_mclk0_active
+ &cam_sensor_rear_active>;
+ pinctrl-1 = <&cam_sensor_mclk0_suspend
+ &cam_sensor_rear_suspend>;
+ gpios = <&tlmm 13 0>,
+ <&tlmm 30 0>;
+ gpio-reset = <1>;
+ gpio-req-tbl-num = <0 1>;
+ gpio-req-tbl-flags = <1 0>;
+ gpio-req-tbl-label = "CAMIF_MCLK0",
+ "CAM_RESET0";
+ sensor-mode = <0>;
+ cci-master = <0>;
+ status = "ok";
+ clocks = <&clock_camcc CAM_CC_MCLK0_CLK>;
+ clock-names = "cam_clk";
+ clock-cntl-level = "turbo";
+ clock-rates = <24000000>;
+ };
+
+ qcom,cam-sensor@1 {
+ cell-index = <1>;
+ compatible = "qcom,cam-sensor";
+ reg = <0x1>;
+ csiphy-sd-index = <1>;
+ sensor-position-roll = <90>;
+ sensor-position-pitch = <0>;
+ sensor-position-yaw = <180>;
+ eeprom-src = <&eeprom_rear_aux>;
+ cam_vio-supply = <&camera_vio_ldo>;
+ cam_vana-supply = <&camera_vana_ldo>;
+ cam_vdig-supply = <&camera_ldo>;
+ cam_clk-supply = <&titan_top_gdsc>;
+ regulator-names = "cam_vdig", "cam_vio", "cam_vana",
+ "cam_clk";
+ rgltr-cntrl-support;
+ rgltr-min-voltage = <1352000 1800000 2850000 0>;
+ rgltr-max-voltage = <1352000 1800000 2850000 0>;
+ rgltr-load-current = <105000 0 80000 0>;
+ gpio-no-mux = <0>;
+ pinctrl-names = "cam_default", "cam_suspend";
+ pinctrl-0 = <&cam_sensor_mclk1_active
+ &cam_sensor_rear2_active>;
+ pinctrl-1 = <&cam_sensor_mclk1_suspend
+ &cam_sensor_rear2_suspend>;
+ gpios = <&tlmm 14 0>,
+ <&tlmm 28 0>;
+ gpio-reset = <1>;
+ gpio-req-tbl-num = <0 1>;
+ gpio-req-tbl-flags = <1 0>;
+ gpio-req-tbl-label = "CAMIF_MCLK1",
+ "CAM_RESET1";
+ sensor-mode = <0>;
+ cci-master = <1>;
+ status = "ok";
+ clocks = <&clock_camcc CAM_CC_MCLK1_CLK>;
+ clock-names = "cam_clk";
+ clock-cntl-level = "turbo";
+ clock-rates = <24000000>;
+ };
+
+ qcom,cam-sensor@2 {
+ cell-index = <2>;
+ compatible = "qcom,cam-sensor";
+ reg = <0x02>;
+ csiphy-sd-index = <2>;
+ sensor-position-roll = <270>;
+ sensor-position-pitch = <0>;
+ sensor-position-yaw = <0>;
+ eeprom-src = <&eeprom_front>;
+ actuator-src = <&actuator_front>;
+ led-flash-src = <&led_flash_front>;
+ cam_vio-supply = <&camera_vio_ldo>;
+ cam_vana-supply = <&camera_vana_ldo>;
+ cam_vdig-supply = <&camera_ldo>;
+ cam_clk-supply = <&titan_top_gdsc>;
+ regulator-names = "cam_vio", "cam_vana", "cam_vdig",
+ "cam_clk";
+ rgltr-cntrl-support;
+ rgltr-min-voltage = <1800000 2850000 1352000 0>;
+ rgltr-max-voltage = <1800000 2850000 1352000 0>;
+ rgltr-load-current = <0 80000 105000 0>;
+ gpio-no-mux = <0>;
+ pinctrl-names = "cam_default", "cam_suspend";
+ pinctrl-0 = <&cam_sensor_mclk2_active
+ &cam_sensor_front_active>;
+ pinctrl-1 = <&cam_sensor_mclk2_suspend
+ &cam_sensor_front_suspend>;
+ gpios = <&tlmm 15 0>,
+ <&tlmm 9 0>;
+ gpio-reset = <1>;
+ gpio-req-tbl-num = <0 1>;
+ gpio-req-tbl-flags = <1 0>;
+ gpio-req-tbl-label = "CAMIF_MCLK2",
+ "CAM_RESET2";
+ sensor-mode = <0>;
+ cci-master = <1>;
+ status = "ok";
+ clocks = <&clock_camcc CAM_CC_MCLK2_CLK>;
+ clock-names = "cam_clk";
+ clock-cntl-level = "turbo";
+ clock-rates = <24000000>;
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm670-camera-sensor-cdp.dtsi b/arch/arm64/boot/dts/qcom/sdm670-camera-sensor-cdp.dtsi
new file mode 100644
index 0000000..8b94ca2
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm670-camera-sensor-cdp.dtsi
@@ -0,0 +1,387 @@
+/*
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+&soc {
+ led_flash_rear: qcom,camera-flash@0 {
+ cell-index = <0>;
+ reg = <0x00 0x00>;
+ compatible = "qcom,camera-flash";
+ flash-source = <&pm660l_flash0 &pm660l_flash1>;
+ torch-source = <&pm660l_torch0 &pm660l_torch1>;
+ switch-source = <&pm660l_switch0>;
+ status = "ok";
+ };
+
+ led_flash_front: qcom,camera-flash@1 {
+ cell-index = <1>;
+ reg = <0x01 0x00>;
+ compatible = "qcom,camera-flash";
+ flash-source = <&pm660l_flash2>;
+ torch-source = <&pm660l_torch2>;
+ switch-source = <&pm660l_switch1>;
+ status = "ok";
+ };
+
+ actuator_regulator: gpio-regulator@0 {
+ compatible = "regulator-fixed";
+ reg = <0x00 0x00>;
+ regulator-name = "actuator_regulator";
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ regulator-enable-ramp-delay = <100>;
+ enable-active-high;
+ gpio = <&tlmm 27 0>;
+ };
+
+ camera_ldo: gpio-regulator@2 {
+ compatible = "regulator-fixed";
+ reg = <0x02 0x00>;
+ regulator-name = "camera_ldo";
+ regulator-min-microvolt = <1352000>;
+ regulator-max-microvolt = <1352000>;
+ regulator-enable-ramp-delay = <233>;
+ enable-active-high;
+ gpio = <&pm660l_gpios 3 0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&camera_dvdd_en_default>;
+ vin-supply = <&pm660_s6>;
+ };
+
+ camera_rear_ldo: gpio-regulator@1 {
+ compatible = "regulator-fixed";
+ reg = <0x01 0x00>;
+ regulator-name = "camera_rear_ldo";
+ regulator-min-microvolt = <1352000>;
+ regulator-max-microvolt = <1352000>;
+ regulator-enable-ramp-delay = <135>;
+ enable-active-high;
+ gpio = <&pm660l_gpios 4 0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&camera_rear_dvdd_en_default>;
+ vin-supply = <&pm660_s6>;
+ };
+
+ camera_vio_ldo: gpio-regulator@3 {
+ compatible = "regulator-fixed";
+ reg = <0x03 0x00>;
+ regulator-name = "camera_vio_ldo";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-enable-ramp-delay = <233>;
+ enable-active-high;
+ gpio = <&tlmm 29 0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&cam_sensor_rear_vio>;
+ vin-supply = <&pm660_s4>;
+ };
+
+ camera_vana_ldo: gpio-regulator@4 {
+ compatible = "regulator-fixed";
+ reg = <0x04 0x00>;
+ regulator-name = "camera_vana_ldo";
+ regulator-min-microvolt = <2850000>;
+ regulator-max-microvolt = <2850000>;
+ regulator-enable-ramp-delay = <233>;
+ enable-active-high;
+ gpio = <&tlmm 8 0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&cam_sensor_rear_vana>;
+ vin-supply = <&pm660l_bob>;
+ };
+};
+
+&cam_cci {
+ qcom,cam-res-mgr {
+ compatible = "qcom,cam-res-mgr";
+ status = "ok";
+ };
+
+ actuator_rear: qcom,actuator@0 {
+ cell-index = <0>;
+ reg = <0x0>;
+ compatible = "qcom,actuator";
+ cci-master = <0>;
+ cam_vaf-supply = <&actuator_regulator>;
+ regulator-names = "cam_vaf";
+ rgltr-cntrl-support;
+ rgltr-min-voltage = <2800000>;
+ rgltr-max-voltage = <2800000>;
+ rgltr-load-current = <0>;
+ };
+
+ actuator_front: qcom,actuator@1 {
+ cell-index = <1>;
+ reg = <0x1>;
+ compatible = "qcom,actuator";
+ cci-master = <1>;
+ cam_vaf-supply = <&actuator_regulator>;
+ regulator-names = "cam_vaf";
+ rgltr-cntrl-support;
+ rgltr-min-voltage = <2800000>;
+ rgltr-max-voltage = <2800000>;
+ rgltr-load-current = <0>;
+ };
+
+ ois_rear: qcom,ois@0 {
+ cell-index = <0>;
+ reg = <0x0>;
+ compatible = "qcom,ois";
+ cci-master = <0>;
+ cam_vaf-supply = <&actuator_regulator>;
+ regulator-names = "cam_vaf";
+ rgltr-cntrl-support;
+ rgltr-min-voltage = <2800000>;
+ rgltr-max-voltage = <2800000>;
+ rgltr-load-current = <0>;
+ status = "disabled";
+ };
+
+ eeprom_rear: qcom,eeprom@0 {
+ cell-index = <0>;
+ reg = <0>;
+ compatible = "qcom,eeprom";
+ cam_vio-supply = <&camera_vio_ldo>;
+ cam_vana-supply = <&camera_vana_ldo>;
+ cam_vdig-supply = <&camera_rear_ldo>;
+ cam_clk-supply = <&titan_top_gdsc>;
+ cam_vaf-supply = <&actuator_regulator>;
+ regulator-names = "cam_vio", "cam_vana", "cam_vdig",
+ "cam_clk", "cam_vaf";
+ rgltr-cntrl-support;
+ rgltr-min-voltage = <1800000 2850000 1352000 0 2800000>;
+ rgltr-max-voltage = <1800000 2850000 1352000 0 2800000>;
+ rgltr-load-current = <0 80000 105000 0 0>;
+ gpio-no-mux = <0>;
+ pinctrl-names = "cam_default", "cam_suspend";
+ pinctrl-0 = <&cam_sensor_mclk0_active
+ &cam_sensor_rear_active>;
+ pinctrl-1 = <&cam_sensor_mclk0_suspend
+ &cam_sensor_rear_suspend>;
+ gpios = <&tlmm 13 0>,
+ <&tlmm 30 0>;
+ gpio-reset = <1>;
+ gpio-req-tbl-num = <0 1>;
+ gpio-req-tbl-flags = <1 0>;
+ gpio-req-tbl-label = "CAMIF_MCLK0",
+ "CAM_RESET0";
+ sensor-mode = <0>;
+ cci-master = <0>;
+ status = "ok";
+ clocks = <&clock_camcc CAM_CC_MCLK0_CLK>;
+ clock-names = "cam_clk";
+ clock-cntl-level = "turbo";
+ clock-rates = <24000000>;
+ };
+
+ eeprom_rear_aux: qcom,eeprom@1 {
+ cell-index = <1>;
+ reg = <0x1>;
+ compatible = "qcom,eeprom";
+ cam_vio-supply = <&camera_vio_ldo>;
+ cam_vana-supply = <&camera_vana_ldo>;
+ cam_vdig-supply = <&camera_ldo>;
+ cam_clk-supply = <&titan_top_gdsc>;
+ cam_vaf-supply = <&actuator_regulator>;
+ regulator-names = "cam_vdig", "cam_vio", "cam_vana",
+ "cam_clk", "cam_vaf";
+ rgltr-cntrl-support;
+ rgltr-min-voltage = <1352000 1800000 2850000 0 2800000>;
+ rgltr-max-voltage = <1352000 1800000 2850000 0 2800000>;
+ rgltr-load-current = <105000 0 80000 0 0>;
+ gpio-no-mux = <0>;
+ pinctrl-names = "cam_default", "cam_suspend";
+ pinctrl-0 = <&cam_sensor_mclk1_active
+ &cam_sensor_rear2_active>;
+ pinctrl-1 = <&cam_sensor_mclk1_suspend
+ &cam_sensor_rear2_suspend>;
+ gpios = <&tlmm 14 0>,
+ <&tlmm 28 0>;
+ gpio-reset = <1>;
+ gpio-req-tbl-num = <0 1>;
+ gpio-req-tbl-flags = <1 0>;
+ gpio-req-tbl-label = "CAMIF_MCLK1",
+ "CAM_RESET1";
+ sensor-position = <0>;
+ sensor-mode = <0>;
+ cci-master = <1>;
+ status = "ok";
+ clock-names = "cam_clk";
+ clock-cntl-level = "turbo";
+ clock-rates = <24000000>;
+ };
+
+ eeprom_front: qcom,eeprom@2 {
+ cell-index = <2>;
+ reg = <0x2>;
+ compatible = "qcom,eeprom";
+ cam_vio-supply = <&camera_vio_ldo>;
+ cam_vana-supply = <&camera_vana_ldo>;
+ cam_vdig-supply = <&camera_ldo>;
+ cam_clk-supply = <&titan_top_gdsc>;
+ cam_vaf-supply = <&actuator_regulator>;
+ regulator-names = "cam_vio", "cam_vana", "cam_vdig",
+ "cam_clk", "cam_vaf";
+ rgltr-cntrl-support;
+ rgltr-min-voltage = <1800000 2850000 1352000 0 2800000>;
+ rgltr-max-voltage = <1800000 2850000 1352000 0 2800000>;
+ rgltr-load-current = <0 80000 105000 0 0>;
+ gpio-no-mux = <0>;
+ pinctrl-names = "cam_default", "cam_suspend";
+ pinctrl-0 = <&cam_sensor_mclk2_active
+ &cam_sensor_front_active>;
+ pinctrl-1 = <&cam_sensor_mclk2_suspend
+ &cam_sensor_front_suspend>;
+ gpios = <&tlmm 15 0>,
+ <&tlmm 9 0>;
+ gpio-reset = <1>;
+ gpio-req-tbl-num = <0 1>;
+ gpio-req-tbl-flags = <1 0>;
+ gpio-req-tbl-label = "CAMIF_MCLK2",
+ "CAM_RESET2";
+ sensor-mode = <0>;
+ cci-master = <1>;
+ status = "ok";
+ clocks = <&clock_camcc CAM_CC_MCLK2_CLK>;
+ clock-names = "cam_clk";
+ clock-cntl-level = "turbo";
+ clock-rates = <24000000>;
+ };
+
+ qcom,cam-sensor@0 {
+ cell-index = <0>;
+ compatible = "qcom,cam-sensor";
+ reg = <0x0>;
+ csiphy-sd-index = <0>;
+ sensor-position-roll = <270>;
+ sensor-position-pitch = <0>;
+ sensor-position-yaw = <180>;
+ led-flash-src = <&led_flash_rear>;
+ actuator-src = <&actuator_rear>;
+ ois-src = <&ois_rear>;
+ eeprom-src = <&eeprom_rear>;
+ cam_vio-supply = <&camera_vio_ldo>;
+ cam_vana-supply = <&camera_vana_ldo>;
+ cam_vdig-supply = <&camera_rear_ldo>;
+ cam_clk-supply = <&titan_top_gdsc>;
+ regulator-names = "cam_vio", "cam_vana", "cam_vdig",
+ "cam_clk";
+ rgltr-cntrl-support;
+ rgltr-min-voltage = <1800000 2850000 1352000 0>;
+ rgltr-max-voltage = <1800000 2850000 1352000 0>;
+ rgltr-load-current = <0 80000 105000 0>;
+ gpio-no-mux = <0>;
+ pinctrl-names = "cam_default", "cam_suspend";
+ pinctrl-0 = <&cam_sensor_mclk0_active
+ &cam_sensor_rear_active>;
+ pinctrl-1 = <&cam_sensor_mclk0_suspend
+ &cam_sensor_rear_suspend>;
+ gpios = <&tlmm 13 0>,
+ <&tlmm 30 0>;
+ gpio-reset = <1>;
+ gpio-req-tbl-num = <0 1>;
+ gpio-req-tbl-flags = <1 0>;
+ gpio-req-tbl-label = "CAMIF_MCLK0",
+ "CAM_RESET0";
+ sensor-mode = <0>;
+ cci-master = <0>;
+ status = "ok";
+ clocks = <&clock_camcc CAM_CC_MCLK0_CLK>;
+ clock-names = "cam_clk";
+ clock-cntl-level = "turbo";
+ clock-rates = <24000000>;
+ };
+
+ qcom,cam-sensor@1 {
+ cell-index = <1>;
+ compatible = "qcom,cam-sensor";
+ reg = <0x1>;
+ csiphy-sd-index = <1>;
+ sensor-position-roll = <90>;
+ sensor-position-pitch = <0>;
+ sensor-position-yaw = <180>;
+ eeprom-src = <&eeprom_rear_aux>;
+ cam_vio-supply = <&camera_vio_ldo>;
+ cam_vana-supply = <&camera_vana_ldo>;
+ cam_vdig-supply = <&camera_ldo>;
+ cam_clk-supply = <&titan_top_gdsc>;
+ regulator-names = "cam_vdig", "cam_vio", "cam_vana",
+ "cam_clk";
+ rgltr-cntrl-support;
+ rgltr-min-voltage = <1352000 1800000 2850000 0>;
+ rgltr-max-voltage = <1352000 1800000 2850000 0>;
+ rgltr-load-current = <105000 0 80000 0>;
+ gpio-no-mux = <0>;
+ pinctrl-names = "cam_default", "cam_suspend";
+ pinctrl-0 = <&cam_sensor_mclk1_active
+ &cam_sensor_rear2_active>;
+ pinctrl-1 = <&cam_sensor_mclk1_suspend
+ &cam_sensor_rear2_suspend>;
+ gpios = <&tlmm 14 0>,
+ <&tlmm 28 0>;
+ gpio-reset = <1>;
+ gpio-req-tbl-num = <0 1>;
+ gpio-req-tbl-flags = <1 0>;
+ gpio-req-tbl-label = "CAMIF_MCLK1",
+ "CAM_RESET1";
+ sensor-mode = <0>;
+ cci-master = <1>;
+ status = "ok";
+ clocks = <&clock_camcc CAM_CC_MCLK1_CLK>;
+ clock-names = "cam_clk";
+ clock-cntl-level = "turbo";
+ clock-rates = <24000000>;
+ };
+
+ qcom,cam-sensor@2 {
+ cell-index = <2>;
+ compatible = "qcom,cam-sensor";
+ reg = <0x02>;
+ csiphy-sd-index = <2>;
+ sensor-position-roll = <270>;
+ sensor-position-pitch = <0>;
+ sensor-position-yaw = <0>;
+ eeprom-src = <&eeprom_front>;
+ actuator-src = <&actuator_front>;
+ led-flash-src = <&led_flash_front>;
+ cam_vio-supply = <&camera_vio_ldo>;
+ cam_vana-supply = <&camera_vana_ldo>;
+ cam_vdig-supply = <&camera_ldo>;
+ cam_clk-supply = <&titan_top_gdsc>;
+ regulator-names = "cam_vio", "cam_vana", "cam_vdig",
+ "cam_clk";
+ rgltr-cntrl-support;
+ rgltr-min-voltage = <1800000 2850000 1352000 0>;
+ rgltr-max-voltage = <1800000 2850000 1352000 0>;
+ rgltr-load-current = <0 80000 105000 0>;
+ gpio-no-mux = <0>;
+ pinctrl-names = "cam_default", "cam_suspend";
+ pinctrl-0 = <&cam_sensor_mclk2_active
+ &cam_sensor_front_active>;
+ pinctrl-1 = <&cam_sensor_mclk2_suspend
+ &cam_sensor_front_suspend>;
+ gpios = <&tlmm 15 0>,
+ <&tlmm 9 0>;
+ gpio-reset = <1>;
+ gpio-req-tbl-num = <0 1>;
+ gpio-req-tbl-flags = <1 0>;
+ gpio-req-tbl-label = "CAMIF_MCLK2",
+ "CAM_RESET2";
+ sensor-mode = <0>;
+ cci-master = <1>;
+ status = "ok";
+ clocks = <&clock_camcc CAM_CC_MCLK2_CLK>;
+ clock-names = "cam_clk";
+ clock-cntl-level = "turbo";
+ clock-rates = <24000000>;
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm670-camera-sensor-mtp.dtsi b/arch/arm64/boot/dts/qcom/sdm670-camera-sensor-mtp.dtsi
new file mode 100644
index 0000000..8b94ca2
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm670-camera-sensor-mtp.dtsi
@@ -0,0 +1,387 @@
+/*
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+&soc {
+ led_flash_rear: qcom,camera-flash@0 {
+ cell-index = <0>;
+ reg = <0x00 0x00>;
+ compatible = "qcom,camera-flash";
+ flash-source = <&pm660l_flash0 &pm660l_flash1>;
+ torch-source = <&pm660l_torch0 &pm660l_torch1>;
+ switch-source = <&pm660l_switch0>;
+ status = "ok";
+ };
+
+ led_flash_front: qcom,camera-flash@1 {
+ cell-index = <1>;
+ reg = <0x01 0x00>;
+ compatible = "qcom,camera-flash";
+ flash-source = <&pm660l_flash2>;
+ torch-source = <&pm660l_torch2>;
+ switch-source = <&pm660l_switch1>;
+ status = "ok";
+ };
+
+ actuator_regulator: gpio-regulator@0 {
+ compatible = "regulator-fixed";
+ reg = <0x00 0x00>;
+ regulator-name = "actuator_regulator";
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ regulator-enable-ramp-delay = <100>;
+ enable-active-high;
+ gpio = <&tlmm 27 0>;
+ };
+
+ camera_ldo: gpio-regulator@2 {
+ compatible = "regulator-fixed";
+ reg = <0x02 0x00>;
+ regulator-name = "camera_ldo";
+ regulator-min-microvolt = <1352000>;
+ regulator-max-microvolt = <1352000>;
+ regulator-enable-ramp-delay = <233>;
+ enable-active-high;
+ gpio = <&pm660l_gpios 3 0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&camera_dvdd_en_default>;
+ vin-supply = <&pm660_s6>;
+ };
+
+ camera_rear_ldo: gpio-regulator@1 {
+ compatible = "regulator-fixed";
+ reg = <0x01 0x00>;
+ regulator-name = "camera_rear_ldo";
+ regulator-min-microvolt = <1352000>;
+ regulator-max-microvolt = <1352000>;
+ regulator-enable-ramp-delay = <135>;
+ enable-active-high;
+ gpio = <&pm660l_gpios 4 0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&camera_rear_dvdd_en_default>;
+ vin-supply = <&pm660_s6>;
+ };
+
+ camera_vio_ldo: gpio-regulator@3 {
+ compatible = "regulator-fixed";
+ reg = <0x03 0x00>;
+ regulator-name = "camera_vio_ldo";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-enable-ramp-delay = <233>;
+ enable-active-high;
+ gpio = <&tlmm 29 0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&cam_sensor_rear_vio>;
+ vin-supply = <&pm660_s4>;
+ };
+
+ camera_vana_ldo: gpio-regulator@4 {
+ compatible = "regulator-fixed";
+ reg = <0x04 0x00>;
+ regulator-name = "camera_vana_ldo";
+ regulator-min-microvolt = <2850000>;
+ regulator-max-microvolt = <2850000>;
+ regulator-enable-ramp-delay = <233>;
+ enable-active-high;
+ gpio = <&tlmm 8 0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&cam_sensor_rear_vana>;
+ vin-supply = <&pm660l_bob>;
+ };
+};
+
+&cam_cci {
+ qcom,cam-res-mgr {
+ compatible = "qcom,cam-res-mgr";
+ status = "ok";
+ };
+
+ actuator_rear: qcom,actuator@0 {
+ cell-index = <0>;
+ reg = <0x0>;
+ compatible = "qcom,actuator";
+ cci-master = <0>;
+ cam_vaf-supply = <&actuator_regulator>;
+ regulator-names = "cam_vaf";
+ rgltr-cntrl-support;
+ rgltr-min-voltage = <2800000>;
+ rgltr-max-voltage = <2800000>;
+ rgltr-load-current = <0>;
+ };
+
+ actuator_front: qcom,actuator@1 {
+ cell-index = <1>;
+ reg = <0x1>;
+ compatible = "qcom,actuator";
+ cci-master = <1>;
+ cam_vaf-supply = <&actuator_regulator>;
+ regulator-names = "cam_vaf";
+ rgltr-cntrl-support;
+ rgltr-min-voltage = <2800000>;
+ rgltr-max-voltage = <2800000>;
+ rgltr-load-current = <0>;
+ };
+
+ ois_rear: qcom,ois@0 {
+ cell-index = <0>;
+ reg = <0x0>;
+ compatible = "qcom,ois";
+ cci-master = <0>;
+ cam_vaf-supply = <&actuator_regulator>;
+ regulator-names = "cam_vaf";
+ rgltr-cntrl-support;
+ rgltr-min-voltage = <2800000>;
+ rgltr-max-voltage = <2800000>;
+ rgltr-load-current = <0>;
+ status = "disabled";
+ };
+
+ eeprom_rear: qcom,eeprom@0 {
+ cell-index = <0>;
+ reg = <0>;
+ compatible = "qcom,eeprom";
+ cam_vio-supply = <&camera_vio_ldo>;
+ cam_vana-supply = <&camera_vana_ldo>;
+ cam_vdig-supply = <&camera_rear_ldo>;
+ cam_clk-supply = <&titan_top_gdsc>;
+ cam_vaf-supply = <&actuator_regulator>;
+ regulator-names = "cam_vio", "cam_vana", "cam_vdig",
+ "cam_clk", "cam_vaf";
+ rgltr-cntrl-support;
+ rgltr-min-voltage = <1800000 2850000 1352000 0 2800000>;
+ rgltr-max-voltage = <1800000 2850000 1352000 0 2800000>;
+ rgltr-load-current = <0 80000 105000 0 0>;
+ gpio-no-mux = <0>;
+ pinctrl-names = "cam_default", "cam_suspend";
+ pinctrl-0 = <&cam_sensor_mclk0_active
+ &cam_sensor_rear_active>;
+ pinctrl-1 = <&cam_sensor_mclk0_suspend
+ &cam_sensor_rear_suspend>;
+ gpios = <&tlmm 13 0>,
+ <&tlmm 30 0>;
+ gpio-reset = <1>;
+ gpio-req-tbl-num = <0 1>;
+ gpio-req-tbl-flags = <1 0>;
+ gpio-req-tbl-label = "CAMIF_MCLK0",
+ "CAM_RESET0";
+ sensor-mode = <0>;
+ cci-master = <0>;
+ status = "ok";
+ clocks = <&clock_camcc CAM_CC_MCLK0_CLK>;
+ clock-names = "cam_clk";
+ clock-cntl-level = "turbo";
+ clock-rates = <24000000>;
+ };
+
+ eeprom_rear_aux: qcom,eeprom@1 {
+ cell-index = <1>;
+ reg = <0x1>;
+ compatible = "qcom,eeprom";
+ cam_vio-supply = <&camera_vio_ldo>;
+ cam_vana-supply = <&camera_vana_ldo>;
+ cam_vdig-supply = <&camera_ldo>;
+ cam_clk-supply = <&titan_top_gdsc>;
+ cam_vaf-supply = <&actuator_regulator>;
+ regulator-names = "cam_vdig", "cam_vio", "cam_vana",
+ "cam_clk", "cam_vaf";
+ rgltr-cntrl-support;
+ rgltr-min-voltage = <1352000 1800000 2850000 0 2800000>;
+ rgltr-max-voltage = <1352000 1800000 2850000 0 2800000>;
+ rgltr-load-current = <105000 0 80000 0 0>;
+ gpio-no-mux = <0>;
+ pinctrl-names = "cam_default", "cam_suspend";
+ pinctrl-0 = <&cam_sensor_mclk1_active
+ &cam_sensor_rear2_active>;
+ pinctrl-1 = <&cam_sensor_mclk1_suspend
+ &cam_sensor_rear2_suspend>;
+ gpios = <&tlmm 14 0>,
+ <&tlmm 28 0>;
+ gpio-reset = <1>;
+ gpio-req-tbl-num = <0 1>;
+ gpio-req-tbl-flags = <1 0>;
+ gpio-req-tbl-label = "CAMIF_MCLK1",
+ "CAM_RESET1";
+ sensor-position = <0>;
+ sensor-mode = <0>;
+ cci-master = <1>;
+ status = "ok";
+ clock-names = "cam_clk";
+ clock-cntl-level = "turbo";
+ clock-rates = <24000000>;
+ };
+
+ eeprom_front: qcom,eeprom@2 {
+ cell-index = <2>;
+ reg = <0x2>;
+ compatible = "qcom,eeprom";
+ cam_vio-supply = <&camera_vio_ldo>;
+ cam_vana-supply = <&camera_vana_ldo>;
+ cam_vdig-supply = <&camera_ldo>;
+ cam_clk-supply = <&titan_top_gdsc>;
+ cam_vaf-supply = <&actuator_regulator>;
+ regulator-names = "cam_vio", "cam_vana", "cam_vdig",
+ "cam_clk", "cam_vaf";
+ rgltr-cntrl-support;
+ rgltr-min-voltage = <1800000 2850000 1352000 0 2800000>;
+ rgltr-max-voltage = <1800000 2850000 1352000 0 2800000>;
+ rgltr-load-current = <0 80000 105000 0 0>;
+ gpio-no-mux = <0>;
+ pinctrl-names = "cam_default", "cam_suspend";
+ pinctrl-0 = <&cam_sensor_mclk2_active
+ &cam_sensor_front_active>;
+ pinctrl-1 = <&cam_sensor_mclk2_suspend
+ &cam_sensor_front_suspend>;
+ gpios = <&tlmm 15 0>,
+ <&tlmm 9 0>;
+ gpio-reset = <1>;
+ gpio-req-tbl-num = <0 1>;
+ gpio-req-tbl-flags = <1 0>;
+ gpio-req-tbl-label = "CAMIF_MCLK2",
+ "CAM_RESET2";
+ sensor-mode = <0>;
+ cci-master = <1>;
+ status = "ok";
+ clocks = <&clock_camcc CAM_CC_MCLK2_CLK>;
+ clock-names = "cam_clk";
+ clock-cntl-level = "turbo";
+ clock-rates = <24000000>;
+ };
+
+ qcom,cam-sensor@0 {
+ cell-index = <0>;
+ compatible = "qcom,cam-sensor";
+ reg = <0x0>;
+ csiphy-sd-index = <0>;
+ sensor-position-roll = <270>;
+ sensor-position-pitch = <0>;
+ sensor-position-yaw = <180>;
+ led-flash-src = <&led_flash_rear>;
+ actuator-src = <&actuator_rear>;
+ ois-src = <&ois_rear>;
+ eeprom-src = <&eeprom_rear>;
+ cam_vio-supply = <&camera_vio_ldo>;
+ cam_vana-supply = <&camera_vana_ldo>;
+ cam_vdig-supply = <&camera_rear_ldo>;
+ cam_clk-supply = <&titan_top_gdsc>;
+ regulator-names = "cam_vio", "cam_vana", "cam_vdig",
+ "cam_clk";
+ rgltr-cntrl-support;
+ rgltr-min-voltage = <1800000 2850000 1352000 0>;
+ rgltr-max-voltage = <1800000 2850000 1352000 0>;
+ rgltr-load-current = <0 80000 105000 0>;
+ gpio-no-mux = <0>;
+ pinctrl-names = "cam_default", "cam_suspend";
+ pinctrl-0 = <&cam_sensor_mclk0_active
+ &cam_sensor_rear_active>;
+ pinctrl-1 = <&cam_sensor_mclk0_suspend
+ &cam_sensor_rear_suspend>;
+ gpios = <&tlmm 13 0>,
+ <&tlmm 30 0>;
+ gpio-reset = <1>;
+ gpio-req-tbl-num = <0 1>;
+ gpio-req-tbl-flags = <1 0>;
+ gpio-req-tbl-label = "CAMIF_MCLK0",
+ "CAM_RESET0";
+ sensor-mode = <0>;
+ cci-master = <0>;
+ status = "ok";
+ clocks = <&clock_camcc CAM_CC_MCLK0_CLK>;
+ clock-names = "cam_clk";
+ clock-cntl-level = "turbo";
+ clock-rates = <24000000>;
+ };
+
+ qcom,cam-sensor@1 {
+ cell-index = <1>;
+ compatible = "qcom,cam-sensor";
+ reg = <0x1>;
+ csiphy-sd-index = <1>;
+ sensor-position-roll = <90>;
+ sensor-position-pitch = <0>;
+ sensor-position-yaw = <180>;
+ eeprom-src = <&eeprom_rear_aux>;
+ cam_vio-supply = <&camera_vio_ldo>;
+ cam_vana-supply = <&camera_vana_ldo>;
+ cam_vdig-supply = <&camera_ldo>;
+ cam_clk-supply = <&titan_top_gdsc>;
+ regulator-names = "cam_vdig", "cam_vio", "cam_vana",
+ "cam_clk";
+ rgltr-cntrl-support;
+ rgltr-min-voltage = <1352000 1800000 2850000 0>;
+ rgltr-max-voltage = <1352000 1800000 2850000 0>;
+ rgltr-load-current = <105000 0 80000 0>;
+ gpio-no-mux = <0>;
+ pinctrl-names = "cam_default", "cam_suspend";
+ pinctrl-0 = <&cam_sensor_mclk1_active
+ &cam_sensor_rear2_active>;
+ pinctrl-1 = <&cam_sensor_mclk1_suspend
+ &cam_sensor_rear2_suspend>;
+ gpios = <&tlmm 14 0>,
+ <&tlmm 28 0>;
+ gpio-reset = <1>;
+ gpio-req-tbl-num = <0 1>;
+ gpio-req-tbl-flags = <1 0>;
+ gpio-req-tbl-label = "CAMIF_MCLK1",
+ "CAM_RESET1";
+ sensor-mode = <0>;
+ cci-master = <1>;
+ status = "ok";
+ clocks = <&clock_camcc CAM_CC_MCLK1_CLK>;
+ clock-names = "cam_clk";
+ clock-cntl-level = "turbo";
+ clock-rates = <24000000>;
+ };
+
+ qcom,cam-sensor@2 {
+ cell-index = <2>;
+ compatible = "qcom,cam-sensor";
+ reg = <0x02>;
+ csiphy-sd-index = <2>;
+ sensor-position-roll = <270>;
+ sensor-position-pitch = <0>;
+ sensor-position-yaw = <0>;
+ eeprom-src = <&eeprom_front>;
+ actuator-src = <&actuator_front>;
+ led-flash-src = <&led_flash_front>;
+ cam_vio-supply = <&camera_vio_ldo>;
+ cam_vana-supply = <&camera_vana_ldo>;
+ cam_vdig-supply = <&camera_ldo>;
+ cam_clk-supply = <&titan_top_gdsc>;
+ regulator-names = "cam_vio", "cam_vana", "cam_vdig",
+ "cam_clk";
+ rgltr-cntrl-support;
+ rgltr-min-voltage = <1800000 2850000 1352000 0>;
+ rgltr-max-voltage = <1800000 2850000 1352000 0>;
+ rgltr-load-current = <0 80000 105000 0>;
+ gpio-no-mux = <0>;
+ pinctrl-names = "cam_default", "cam_suspend";
+ pinctrl-0 = <&cam_sensor_mclk2_active
+ &cam_sensor_front_active>;
+ pinctrl-1 = <&cam_sensor_mclk2_suspend
+ &cam_sensor_front_suspend>;
+ gpios = <&tlmm 15 0>,
+ <&tlmm 9 0>;
+ gpio-reset = <1>;
+ gpio-req-tbl-num = <0 1>;
+ gpio-req-tbl-flags = <1 0>;
+ gpio-req-tbl-label = "CAMIF_MCLK2",
+ "CAM_RESET2";
+ sensor-mode = <0>;
+ cci-master = <1>;
+ status = "ok";
+ clocks = <&clock_camcc CAM_CC_MCLK2_CLK>;
+ clock-names = "cam_clk";
+ clock-cntl-level = "turbo";
+ clock-rates = <24000000>;
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm670-camera-sensor-qrd.dtsi b/arch/arm64/boot/dts/qcom/sdm670-camera-sensor-qrd.dtsi
new file mode 100644
index 0000000..6506f98
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm670-camera-sensor-qrd.dtsi
@@ -0,0 +1,484 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+&soc {
+ led_flash_rear: qcom,camera-flash@0 {
+ cell-index = <0>;
+ reg = <0x00 0x00>;
+ compatible = "qcom,camera-flash";
+ flash-source = <&pm660l_flash0 &pm660l_flash1>;
+ torch-source = <&pm660l_torch0 &pm660l_torch1>;
+ switch-source = <&pm660l_switch0>;
+ status = "ok";
+ };
+
+ actuator_regulator: gpio-regulator@0 {
+ compatible = "regulator-fixed";
+ reg = <0x00 0x00>;
+ regulator-name = "actuator_regulator";
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ regulator-enable-ramp-delay = <100>;
+ enable-active-high;
+ gpio = <&tlmm 27 0>;
+ vin-supply = <&pm660l_bob>;
+ };
+
+ cam_avdd_gpio_regulator: gpio-regulator@1 {
+ compatible = "regulator-fixed";
+ reg = <0x01 0x00>;
+ regulator-name = "cam_avdd_gpio_regulator";
+ regulator-min-microvolt = <2850000>;
+ regulator-max-microvolt = <2850000>;
+ regulator-enable-ramp-delay = <135>;
+ enable-active-high;
+ gpio = <&tlmm 100 0>;
+ vin-supply = <&pm660l_bob>;
+ };
+
+ cam_dvdd_gpio_regulator: gpio-regulator@2 {
+ compatible = "regulator-fixed";
+ reg = <0x02 0x00>;
+ regulator-name = "cam_dvdd_gpio_regulator";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-enable-ramp-delay = <233>;
+ enable-active-high;
+ gpio = <&pm660l_gpios 4 0>;
+ vin-supply = <&pm660_s6>;
+ };
+
+ cam_iovdd_gpio_regulator: gpio-regulator@3 {
+ compatible = "regulator-fixed";
+ reg = <0x03 0x00>;
+ regulator-name = "cam_iovdd_gpio_regulator";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-enable-ramp-delay = <233>;
+ enable-active-high;
+ gpio = <&tlmm 29 0>;
+ vin-supply = <&pm660_s4>;
+ };
+
+ cam_rear_avdd_gpio_regulator: gpio-regulator@4 {
+ compatible = "regulator-fixed";
+ reg = <0x04 0x00>;
+ regulator-name = "cam_rear_avdd_gpio_regulator";
+ regulator-min-microvolt = <2850000>;
+ regulator-max-microvolt = <2850000>;
+ regulator-enable-ramp-delay = <233>;
+ enable-active-high;
+ gpio = <&tlmm 8 0>;
+ vin-supply = <&pm660l_bob>;
+ };
+
+ cam_rear_dvdd_gpio_regulator: gpio-regulator@5 {
+ compatible = "regulator-fixed";
+ reg = <0x05 0x00>;
+ regulator-name = "cam_rear_dvdd_gpio_regulator";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-enable-ramp-delay = <233>;
+ enable-active-high;
+ gpio = <&pm660l_gpios 3 0>;
+ vin-supply = <&pm660_s6>;
+ };
+};
+
+&tlmm {
+ cam_sensor_rear_active: cam_sensor_rear_active {
+ /* RESET */
+ mux {
+ pins = "gpio30";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio30";
+ bias-disable; /* No PULL */
+ drive-strength = <2>; /* 2 MA */
+ };
+ };
+
+ cam_sensor_rear_suspend: cam_sensor_rear_suspend {
+ /* RESET */
+ mux {
+ pins = "gpio30";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio30";
+ bias-disable; /* No PULL */
+ drive-strength = <2>; /* 2 MA */
+ };
+ };
+
+ cam_sensor_rear2_active: cam_sensor_rear2_active {
+ /* RESET */
+ mux {
+ pins = "gpio9";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio9";
+ bias-disable; /* No PULL */
+ drive-strength = <2>; /* 2 MA */
+ };
+ };
+
+ cam_sensor_rear2_suspend: cam_sensor_rear2_suspend {
+ /* RESET */
+ mux {
+ pins = "gpio9";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio9";
+ bias-disable; /* No PULL */
+ drive-strength = <2>; /* 2 MA */
+ };
+ };
+
+ cam_sensor_front_active: cam_sensor_front_active {
+ /* RESET */
+ mux {
+ pins = "gpio28";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio28";
+ bias-disable; /* No PULL */
+ drive-strength = <2>; /* 2 MA */
+ };
+ };
+
+ cam_sensor_front_suspend: cam_sensor_front_suspend {
+ /* RESET */
+ mux {
+ pins = "gpio28";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio28";
+ bias-disable; /* No PULL */
+ drive-strength = <2>; /* 2 MA */
+ };
+ };
+};
+
+&cam_cci {
+ actuator_rear: qcom,actuator@0 {
+ cell-index = <0>;
+ reg = <0x0>;
+ compatible = "qcom,actuator";
+ cci-master = <0>;
+ cam_vaf-supply = <&actuator_regulator>;
+ regulator-names = "cam_vaf";
+ rgltr-cntrl-support;
+ rgltr-min-voltage = <2800000>;
+ rgltr-max-voltage = <2800000>;
+ rgltr-load-current = <0>;
+ };
+
+ actuator_rear_aux: qcom,actuator@1 {
+ cell-index = <1>;
+ reg = <0x1>;
+ compatible = "qcom,actuator";
+ cci-master = <1>;
+ cam_vaf-supply = <&actuator_regulator>;
+ regulator-names = "cam_vaf";
+ rgltr-cntrl-support;
+ rgltr-min-voltage = <2800000>;
+ rgltr-max-voltage = <2800000>;
+ rgltr-load-current = <0>;
+ };
+
+ actuator_front: qcom,actuator@2 {
+ cell-index = <2>;
+ reg = <0x2>;
+ compatible = "qcom,actuator";
+ cci-master = <1>;
+ cam_vaf-supply = <&actuator_regulator>;
+ regulator-names = "cam_vaf";
+ rgltr-cntrl-support;
+ rgltr-min-voltage = <2800000>;
+ rgltr-max-voltage = <2800000>;
+ rgltr-load-current = <0>;
+ };
+
+ eeprom_rear: qcom,eeprom@0 {
+ cell-index = <0>;
+ reg = <0>;
+ compatible = "qcom,eeprom";
+ cam_vio-supply = <&cam_iovdd_gpio_regulator>;
+ cam_vana-supply = <&cam_rear_avdd_gpio_regulator>;
+ cam_vdig-supply = <&cam_rear_dvdd_gpio_regulator>;
+ cam_clk-supply = <&titan_top_gdsc>;
+ regulator-names = "cam_vio", "cam_vana", "cam_vdig",
+ "cam_clk";
+ rgltr-cntrl-support;
+ rgltr-min-voltage = <0 0 0 0>;
+ rgltr-max-voltage = <0 0 0 0>;
+ rgltr-load-current = <0 0 0 0>;
+ gpio-no-mux = <0>;
+ pinctrl-names = "cam_default", "cam_suspend";
+ pinctrl-0 = <&cam_sensor_mclk0_active
+ &cam_sensor_rear_active>;
+ pinctrl-1 = <&cam_sensor_mclk0_suspend
+ &cam_sensor_rear_suspend>;
+ gpios = <&tlmm 13 0>,
+ <&tlmm 30 0>;
+ gpio-reset = <1>;
+ gpio-req-tbl-num = <0 1>;
+ gpio-req-tbl-flags = <1 0>;
+ gpio-req-tbl-label = "CAMIF_MCLK0",
+ "CAM_RESET0";
+ sensor-position = <0>;
+ sensor-mode = <0>;
+ cci-master = <0>;
+ status = "ok";
+ clocks = <&clock_camcc CAM_CC_MCLK0_CLK>;
+ clock-names = "cam_clk";
+ clock-cntl-level = "turbo";
+ clock-rates = <24000000>;
+ };
+
+ eeprom_rear_aux: qcom,eeprom@1 {
+ cell-index = <1>;
+ reg = <0x1>;
+ compatible = "qcom,eeprom";
+ cam_vio-supply = <&cam_iovdd_gpio_regulator>;
+ cam_vana-supply = <&cam_avdd_gpio_regulator>;
+ cam_vdig-supply = <&cam_dvdd_gpio_regulator>;
+ cam_clk-supply = <&titan_top_gdsc>;
+ regulator-names = "cam_vio", "cam_vana", "cam_vdig",
+ "cam_clk";
+ rgltr-cntrl-support;
+ rgltr-min-voltage = <0 0 0 0>;
+ rgltr-max-voltage = <0 0 0 0>;
+ rgltr-load-current = <0 0 0 0>;
+ gpio-no-mux = <0>;
+ pinctrl-names = "cam_default", "cam_suspend";
+ pinctrl-0 = <&cam_sensor_mclk1_active
+ &cam_sensor_front_active>;
+ pinctrl-1 = <&cam_sensor_mclk1_suspend
+ &cam_sensor_front_suspend>;
+ gpios = <&tlmm 14 0>,
+ <&tlmm 28 0>;
+ gpio-reset = <1>;
+ gpio-req-tbl-num = <0 1>;
+ gpio-req-tbl-flags = <1 0>;
+ gpio-req-tbl-label = "CAMIF_MCLK1",
+ "CAM_RESET1";
+ sensor-position = <0>;
+ sensor-mode = <0>;
+ cci-master = <1>;
+ status = "ok";
+ clocks = <&clock_camcc CAM_CC_MCLK1_CLK>;
+ clock-names = "cam_clk";
+ clock-cntl-level = "turbo";
+ clock-rates = <24000000>;
+ };
+
+ eeprom_front: qcom,eeprom@2 {
+ cell-index = <2>;
+ reg = <0x2>;
+ compatible = "qcom,eeprom";
+ cam_vio-supply = <&cam_iovdd_gpio_regulator>;
+ cam_vana-supply = <&cam_avdd_gpio_regulator>;
+ cam_vdig-supply = <&cam_dvdd_gpio_regulator>;
+ cam_clk-supply = <&titan_top_gdsc>;
+ regulator-names = "cam_vio", "cam_vana", "cam_vdig",
+ "cam_clk";
+ rgltr-cntrl-support;
+ rgltr-min-voltage = <0 0 0 0>;
+ rgltr-max-voltage = <0 0 0 0>;
+ rgltr-load-current = <0 0 0 0>;
+ gpio-no-mux = <0>;
+ pinctrl-names = "cam_default", "cam_suspend";
+ pinctrl-0 = <&cam_sensor_mclk2_active
+ &cam_sensor_rear2_active>;
+ pinctrl-1 = <&cam_sensor_mclk2_suspend
+ &cam_sensor_rear2_suspend>;
+ gpios = <&tlmm 15 0>,
+ <&tlmm 9 0>;
+ gpio-reset = <1>;
+ gpio-req-tbl-num = <0 1>;
+ gpio-req-tbl-flags = <1 0>;
+ gpio-req-tbl-label = "CAMIF_MCLK2",
+ "CAM_RESET2";
+ sensor-position = <1>;
+ sensor-mode = <0>;
+ cci-master = <1>;
+ status = "ok";
+ clocks = <&clock_camcc CAM_CC_MCLK2_CLK>;
+ clock-names = "cam_clk";
+ clock-cntl-level = "turbo";
+ clock-rates = <24000000>;
+ };
+
+ qcom,cam-sensor@0 {
+ cell-index = <0>;
+ compatible = "qcom,cam-sensor";
+ reg = <0x0>;
+ csiphy-sd-index = <0>;
+ sensor-position-roll = <90>;
+ sensor-position-pitch = <0>;
+ sensor-position-yaw = <180>;
+ led-flash-src = <&led_flash_rear>;
+ actuator-src = <&actuator_rear>;
+ eeprom-src = <&eeprom_rear>;
+ cam_vio-supply = <&cam_iovdd_gpio_regulator>;
+ cam_vana-supply = <&cam_rear_avdd_gpio_regulator>;
+ cam_vdig-supply = <&cam_rear_dvdd_gpio_regulator>;
+ cam_clk-supply = <&titan_top_gdsc>;
+ regulator-names = "cam_vio", "cam_vana", "cam_vdig",
+ "cam_clk";
+ rgltr-cntrl-support;
+ rgltr-min-voltage = <0 0 0 0>;
+ rgltr-max-voltage = <0 0 0 0>;
+ rgltr-load-current = <0 0 0 0>;
+ gpio-no-mux = <0>;
+ pinctrl-names = "cam_default", "cam_suspend";
+ pinctrl-0 = <&cam_sensor_mclk0_active
+ &cam_sensor_rear_active>;
+ pinctrl-1 = <&cam_sensor_mclk0_suspend
+ &cam_sensor_rear_suspend>;
+ gpios = <&tlmm 13 0>,
+ <&tlmm 30 0>;
+ gpio-reset = <1>;
+ gpio-req-tbl-num = <0 1>;
+ gpio-req-tbl-flags = <1 0>;
+ gpio-req-tbl-label = "CAMIF_MCLK0",
+ "CAM_RESET0";
+ sensor-mode = <0>;
+ cci-master = <0>;
+ status = "ok";
+ clocks = <&clock_camcc CAM_CC_MCLK0_CLK>;
+ clock-names = "cam_clk";
+ clock-cntl-level = "turbo";
+ clock-rates = <24000000>;
+ };
+
+ qcom,cam-sensor@1 {
+ cell-index = <1>;
+ compatible = "qcom,cam-sensor";
+ reg = <0x1>;
+ csiphy-sd-index = <1>;
+ sensor-position-roll = <90>;
+ sensor-position-pitch = <0>;
+ sensor-position-yaw = <180>;
+ led-flash-src = <&led_flash_rear>;
+ actuator-src = <&actuator_rear_aux>;
+ eeprom-src = <&eeprom_rear_aux>;
+ cam_vio-supply = <&cam_iovdd_gpio_regulator>;
+ cam_vana-supply = <&cam_avdd_gpio_regulator>;
+ cam_vdig-supply = <&cam_dvdd_gpio_regulator>;
+ cam_clk-supply = <&titan_top_gdsc>;
+ regulator-names = "cam_vio", "cam_vana", "cam_vdig",
+ "cam_clk";
+ rgltr-cntrl-support;
+ rgltr-min-voltage = <0 0 0 0>;
+ rgltr-max-voltage = <0 0 0 0>;
+ rgltr-load-current = <0 0 0 0>;
+ gpio-no-mux = <0>;
+ pinctrl-names = "cam_default", "cam_suspend";
+ pinctrl-0 = <&cam_sensor_mclk1_active
+ &cam_sensor_front_active>;
+ pinctrl-1 = <&cam_sensor_mclk1_suspend
+ &cam_sensor_front_suspend>;
+ gpios = <&tlmm 14 0>,
+ <&tlmm 28 0>;
+ gpio-reset = <1>;
+ gpio-req-tbl-num = <0 1>;
+ gpio-req-tbl-flags = <1 0>;
+ gpio-req-tbl-label = "CAMIF_MCLK1",
+ "CAM_RESET1";
+ sensor-mode = <0>;
+ cci-master = <1>;
+ status = "ok";
+ clocks = <&clock_camcc CAM_CC_MCLK1_CLK>;
+ clock-names = "cam_clk";
+ clock-cntl-level = "turbo";
+ clock-rates = <24000000>;
+ };
+
+ qcom,cam-sensor@2 {
+ cell-index = <2>;
+ compatible = "qcom,cam-sensor";
+ reg = <0x02>;
+ csiphy-sd-index = <2>;
+ sensor-position-roll = <270>;
+ sensor-position-pitch = <0>;
+ sensor-position-yaw = <0>;
+ eeprom-src = <&eeprom_front>;
+ actuator-src = <&actuator_front>;
+ cam_vio-supply = <&cam_iovdd_gpio_regulator>;
+ cam_vana-supply = <&cam_avdd_gpio_regulator>;
+ cam_vdig-supply = <&cam_dvdd_gpio_regulator>;
+ cam_clk-supply = <&titan_top_gdsc>;
+ regulator-names = "cam_vio", "cam_vana", "cam_vdig",
+ "cam_clk";
+ rgltr-cntrl-support;
+ rgltr-min-voltage = <0 0 0 0>;
+ rgltr-max-voltage = <0 0 0 0>;
+ rgltr-load-current = <0 0 0 0>;
+ gpio-no-mux = <0>;
+ pinctrl-names = "cam_default", "cam_suspend";
+ pinctrl-0 = <&cam_sensor_mclk2_active
+ &cam_sensor_rear2_active>;
+ pinctrl-1 = <&cam_sensor_mclk2_suspend
+ &cam_sensor_rear2_suspend>;
+ gpios = <&tlmm 15 0>,
+ <&tlmm 9 0>;
+ gpio-reset = <1>;
+ gpio-req-tbl-num = <0 1>;
+ gpio-req-tbl-flags = <1 0>;
+ gpio-req-tbl-label = "CAMIF_MCLK2",
+ "CAM_RESET2";
+ sensor-mode = <0>;
+ cci-master = <1>;
+ status = "ok";
+ clocks = <&clock_camcc CAM_CC_MCLK2_CLK>;
+ clock-names = "cam_clk";
+ clock-cntl-level = "turbo";
+ clock-rates = <24000000>;
+ };
+};
+
+&pm660l_gpios {
+ gpio@c300 { /* GPIO4 -CAMERA SENSOR 1/2 VDIG*/
+ qcom,mode = <1>; /* Output */
+ qcom,pull = <5>; /* No Pull */
+ qcom,vin-sel = <0>; /* VIN1 GPIO_LV */
+ qcom,src-sel = <0>; /* GPIO */
+ qcom,invert = <0>; /* Invert */
+ qcom,master-en = <1>; /* Enable GPIO */
+ status = "ok";
+ };
+
+ gpio@c200 { /* GPIO3 -CAMERA SENSOR 0 VDIG*/
+ qcom,mode = <1>; /* Output */
+ qcom,pull = <5>; /* No Pull */
+ qcom,vin-sel = <0>; /* VIN1 GPIO_LV */
+ qcom,src-sel = <0>; /* GPIO */
+ qcom,invert = <0>; /* Invert */
+ qcom,master-en = <1>; /* Enable GPIO */
+ status = "ok";
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm670-camera.dtsi b/arch/arm64/boot/dts/qcom/sdm670-camera.dtsi
new file mode 100644
index 0000000..34b8740
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm670-camera.dtsi
@@ -0,0 +1,1065 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+&soc {
+ qcom,cam-req-mgr {
+ compatible = "qcom,cam-req-mgr";
+ status = "ok";
+ };
+
+ cam_csiphy0: qcom,csiphy@ac65000 {
+ cell-index = <0>;
+ compatible = "qcom,csiphy-v1.0", "qcom,csiphy";
+ reg = <0x0ac65000 0x1000>;
+ reg-names = "csiphy";
+ reg-cam-base = <0x65000>;
+ interrupts = <0 477 0>;
+ interrupt-names = "csiphy";
+ regulator-names = "gdscr", "refgen";
+ gdscr-supply = <&titan_top_gdsc>;
+ refgen-supply = <&refgen>;
+ csi-vdd-voltage = <1200000>;
+ mipi-csi-vdd-supply = <&pm660_l1>;
+ clocks = <&clock_camcc CAM_CC_CAMNOC_AXI_CLK>,
+ <&clock_camcc CAM_CC_SOC_AHB_CLK>,
+ <&clock_camcc CAM_CC_SLOW_AHB_CLK_SRC>,
+ <&clock_camcc CAM_CC_CPAS_AHB_CLK>,
+ <&clock_camcc CAM_CC_CPHY_RX_CLK_SRC>,
+ <&clock_camcc CAM_CC_CSIPHY0_CLK>,
+ <&clock_camcc CAM_CC_CSI0PHYTIMER_CLK_SRC>,
+ <&clock_camcc CAM_CC_CSI0PHYTIMER_CLK>;
+ clock-names = "camnoc_axi_clk",
+ "soc_ahb_clk",
+ "slow_ahb_src_clk",
+ "cpas_ahb_clk",
+ "cphy_rx_clk_src",
+ "csiphy0_clk",
+ "csi0phytimer_clk_src",
+ "csi0phytimer_clk";
+ clock-cntl-level = "turbo";
+ clock-rates =
+ <0 0 0 0 384000000 0 269333333 0>;
+ status = "ok";
+ };
+
+ cam_csiphy1: qcom,csiphy@ac66000{
+ cell-index = <1>;
+ compatible = "qcom,csiphy-v1.0", "qcom,csiphy";
+ reg = <0xac66000 0x1000>;
+ reg-names = "csiphy";
+ reg-cam-base = <0x66000>;
+ interrupts = <0 478 0>;
+ interrupt-names = "csiphy";
+ regulator-names = "gdscr", "refgen";
+ gdscr-supply = <&titan_top_gdsc>;
+ refgen-supply = <&refgen>;
+ csi-vdd-voltage = <1200000>;
+ mipi-csi-vdd-supply = <&pm660_l1>;
+ clocks = <&clock_camcc CAM_CC_CAMNOC_AXI_CLK>,
+ <&clock_camcc CAM_CC_SOC_AHB_CLK>,
+ <&clock_camcc CAM_CC_SLOW_AHB_CLK_SRC>,
+ <&clock_camcc CAM_CC_CPAS_AHB_CLK>,
+ <&clock_camcc CAM_CC_CPHY_RX_CLK_SRC>,
+ <&clock_camcc CAM_CC_CSIPHY1_CLK>,
+ <&clock_camcc CAM_CC_CSI1PHYTIMER_CLK_SRC>,
+ <&clock_camcc CAM_CC_CSI1PHYTIMER_CLK>;
+ clock-names = "camnoc_axi_clk",
+ "soc_ahb_clk",
+ "slow_ahb_src_clk",
+ "cpas_ahb_clk",
+ "cphy_rx_clk_src",
+ "csiphy1_clk",
+ "csi1phytimer_clk_src",
+ "csi1phytimer_clk";
+ clock-cntl-level = "turbo";
+ clock-rates =
+ <0 0 0 0 384000000 0 269333333 0>;
+
+ status = "ok";
+ };
+
+ cam_csiphy2: qcom,csiphy@ac67000 {
+ cell-index = <2>;
+ compatible = "qcom,csiphy-v1.0", "qcom,csiphy";
+ reg = <0xac67000 0x1000>;
+ reg-names = "csiphy";
+ reg-cam-base = <0x67000>;
+ interrupts = <0 479 0>;
+ interrupt-names = "csiphy";
+ regulator-names = "gdscr", "refgen";
+ gdscr-supply = <&titan_top_gdsc>;
+ refgen-supply = <&refgen>;
+ csi-vdd-voltage = <1200000>;
+ mipi-csi-vdd-supply = <&pm660_l1>;
+ clocks = <&clock_camcc CAM_CC_CAMNOC_AXI_CLK>,
+ <&clock_camcc CAM_CC_SOC_AHB_CLK>,
+ <&clock_camcc CAM_CC_SLOW_AHB_CLK_SRC>,
+ <&clock_camcc CAM_CC_CPAS_AHB_CLK>,
+ <&clock_camcc CAM_CC_CPHY_RX_CLK_SRC>,
+ <&clock_camcc CAM_CC_CSIPHY2_CLK>,
+ <&clock_camcc CAM_CC_CSI2PHYTIMER_CLK_SRC>,
+ <&clock_camcc CAM_CC_CSI2PHYTIMER_CLK>;
+ clock-names = "camnoc_axi_clk",
+ "soc_ahb_clk",
+ "slow_ahb_src_clk",
+ "cpas_ahb_clk",
+ "cphy_rx_clk_src",
+ "csiphy2_clk",
+ "csi2phytimer_clk_src",
+ "csi2phytimer_clk";
+ clock-cntl-level = "turbo";
+ clock-rates =
+ <0 0 0 0 384000000 0 269333333 0>;
+ status = "ok";
+ };
+
+ cam_cci: qcom,cci@ac4a000 {
+ cell-index = <0>;
+ compatible = "qcom,cci";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0xac4a000 0x4000>;
+ reg-names = "cci";
+ reg-cam-base = <0x4a000>;
+ interrupt-names = "cci";
+ interrupts = <0 460 0>;
+ status = "ok";
+ gdscr-supply = <&titan_top_gdsc>;
+ regulator-names = "gdscr";
+ clocks = <&clock_camcc CAM_CC_CAMNOC_AXI_CLK>,
+ <&clock_camcc CAM_CC_SOC_AHB_CLK>,
+ <&clock_camcc CAM_CC_SLOW_AHB_CLK_SRC>,
+ <&clock_camcc CAM_CC_CPAS_AHB_CLK>,
+ <&clock_camcc CAM_CC_CCI_CLK>,
+ <&clock_camcc CAM_CC_CCI_CLK_SRC>;
+ clock-names = "camnoc_axi_clk",
+ "soc_ahb_clk",
+ "slow_ahb_src_clk",
+ "cpas_ahb_clk",
+ "cci_clk",
+ "cci_clk_src";
+ src-clock-name = "cci_clk_src";
+ clock-cntl-level = "lowsvs";
+ clock-rates = <0 0 0 0 0 37500000>;
+ pinctrl-names = "cam_default", "cam_suspend";
+ pinctrl-0 = <&cci0_active &cci1_active>;
+ pinctrl-1 = <&cci0_suspend &cci1_suspend>;
+ gpios = <&tlmm 17 0>,
+ <&tlmm 18 0>,
+ <&tlmm 19 0>,
+ <&tlmm 20 0>;
+ gpio-req-tbl-num = <0 1 2 3>;
+ gpio-req-tbl-flags = <1 1 1 1>;
+ gpio-req-tbl-label = "CCI_I2C_DATA0",
+ "CCI_I2C_CLK0",
+ "CCI_I2C_DATA1",
+ "CCI_I2C_CLK1";
+
+ i2c_freq_100Khz: qcom,i2c_standard_mode {
+ hw-thigh = <201>;
+ hw-tlow = <174>;
+ hw-tsu-sto = <204>;
+ hw-tsu-sta = <231>;
+ hw-thd-dat = <22>;
+ hw-thd-sta = <162>;
+ hw-tbuf = <227>;
+ hw-scl-stretch-en = <0>;
+ hw-trdhld = <6>;
+ hw-tsp = <3>;
+ cci-clk-src = <37500000>;
+ status = "ok";
+ };
+
+ i2c_freq_400Khz: qcom,i2c_fast_mode {
+ hw-thigh = <38>;
+ hw-tlow = <56>;
+ hw-tsu-sto = <40>;
+ hw-tsu-sta = <40>;
+ hw-thd-dat = <22>;
+ hw-thd-sta = <35>;
+ hw-tbuf = <62>;
+ hw-scl-stretch-en = <0>;
+ hw-trdhld = <6>;
+ hw-tsp = <3>;
+ cci-clk-src = <37500000>;
+ status = "ok";
+ };
+
+ i2c_freq_custom: qcom,i2c_custom_mode {
+ hw-thigh = <38>;
+ hw-tlow = <56>;
+ hw-tsu-sto = <40>;
+ hw-tsu-sta = <40>;
+ hw-thd-dat = <22>;
+ hw-thd-sta = <35>;
+ hw-tbuf = <62>;
+ hw-scl-stretch-en = <1>;
+ hw-trdhld = <6>;
+ hw-tsp = <3>;
+ cci-clk-src = <37500000>;
+ status = "ok";
+ };
+
+ i2c_freq_1Mhz: qcom,i2c_fast_plus_mode {
+ hw-thigh = <16>;
+ hw-tlow = <22>;
+ hw-tsu-sto = <17>;
+ hw-tsu-sta = <18>;
+ hw-thd-dat = <16>;
+ hw-thd-sta = <15>;
+ hw-tbuf = <24>;
+ hw-scl-stretch-en = <0>;
+ hw-trdhld = <3>;
+ hw-tsp = <3>;
+ cci-clk-src = <37500000>;
+ status = "ok";
+ };
+ };
+
+ qcom,cam_smmu {
+ compatible = "qcom,msm-cam-smmu";
+ status = "ok";
+
+ msm_cam_smmu_ife {
+ compatible = "qcom,msm-cam-smmu-cb";
+ iommus = <&apps_smmu 0x808 0x0>,
+ <&apps_smmu 0x810 0x8>,
+ <&apps_smmu 0xc08 0x0>,
+ <&apps_smmu 0xc10 0x8>;
+ label = "ife";
+ ife_iova_mem_map: iova-mem-map {
+ /* IO region is approximately 3.4 GB */
+ iova-mem-region-io {
+ iova-region-name = "io";
+ iova-region-start = <0x7400000>;
+ iova-region-len = <0xd8c00000>;
+ iova-region-id = <0x3>;
+ status = "ok";
+ };
+ };
+ };
+
+ msm_cam_smmu_jpeg {
+ compatible = "qcom,msm-cam-smmu-cb";
+ iommus = <&apps_smmu 0x1060 0x8>,
+ <&apps_smmu 0x1068 0x8>;
+ label = "jpeg";
+ jpeg_iova_mem_map: iova-mem-map {
+ /* IO region is approximately 3.4 GB */
+ iova-mem-region-io {
+ iova-region-name = "io";
+ iova-region-start = <0x7400000>;
+ iova-region-len = <0xd8c00000>;
+ iova-region-id = <0x3>;
+ status = "ok";
+ };
+ };
+ };
+
+ msm_cam_icp_fw {
+ compatible = "qcom,msm-cam-smmu-fw-dev";
+ label="icp";
+ memory-region = <&pil_camera_mem>;
+ };
+
+ msm_cam_smmu_icp {
+ compatible = "qcom,msm-cam-smmu-cb";
+ iommus = <&apps_smmu 0x107A 0x0>,
+ <&apps_smmu 0x1020 0x8>,
+ <&apps_smmu 0x1040 0x8>,
+ <&apps_smmu 0x1030 0x0>,
+ <&apps_smmu 0x1050 0x0>;
+ label = "icp";
+ icp_iova_mem_map: iova-mem-map {
+ iova-mem-region-firmware {
+ /* Firmware region is 5MB */
+ iova-region-name = "firmware";
+ iova-region-start = <0x0>;
+ iova-region-len = <0x500000>;
+ iova-region-id = <0x0>;
+ status = "ok";
+ };
+
+ iova-mem-region-shared {
+ /* Shared region is 100MB long */
+ iova-region-name = "shared";
+ iova-region-start = <0x7400000>;
+ iova-region-len = <0x6400000>;
+ iova-region-id = <0x1>;
+ iova-granularity = <0x15>;
+ status = "ok";
+ };
+
+ iova-mem-region-secondary-heap {
+ /* Secondary heap region is 1MB long */
+ iova-region-name = "secheap";
+ iova-region-start = <0xd800000>;
+ iova-region-len = <0x100000>;
+ iova-region-id = <0x4>;
+ status = "ok";
+ };
+
+ iova-mem-region-io {
+ /* IO region is approximately 3.3 GB */
+ iova-region-name = "io";
+ iova-region-start = <0xd900000>;
+ iova-region-len = <0xd2700000>;
+ iova-region-id = <0x3>;
+ status = "ok";
+ };
+ };
+ };
+
+ msm_cam_smmu_cpas_cdm {
+ compatible = "qcom,msm-cam-smmu-cb";
+ iommus = <&apps_smmu 0x1000 0x0>;
+ label = "cpas-cdm0";
+ cpas_cdm_iova_mem_map: iova-mem-map {
+ iova-mem-region-io {
+ /* IO region is approximately 3.4 GB */
+ iova-region-name = "io";
+ iova-region-start = <0x7400000>;
+ iova-region-len = <0xd8c00000>;
+ iova-region-id = <0x3>;
+ status = "ok";
+ };
+ };
+ };
+
+ msm_cam_smmu_secure {
+ compatible = "qcom,msm-cam-smmu-cb";
+ label = "cam-secure";
+ qcom,secure-cb;
+ };
+
+ msm_cam_smmu_fd {
+ compatible = "qcom,msm-cam-smmu-cb";
+ iommus = <&apps_smmu 0x1070 0x0>;
+ label = "fd";
+ fd_iova_mem_map: iova-mem-map {
+ iova-mem-region-io {
+ /* IO region is approximately 3.4 GB */
+ iova-region-name = "io";
+ iova-region-start = <0x7400000>;
+ iova-region-len = <0xd8c00000>;
+ iova-region-id = <0x3>;
+ status = "ok";
+ };
+ };
+ };
+ };
+
+ qcom,cam-cpas@ac40000 {
+ cell-index = <0>;
+ compatible = "qcom,cam-cpas";
+ label = "cpas";
+ arch-compat = "cpas_top";
+ status = "ok";
+ reg-names = "cam_cpas_top", "cam_camnoc";
+ reg = <0xac40000 0x1000>,
+ <0xac42000 0x5000>;
+ reg-cam-base = <0x40000 0x42000>;
+ interrupt-names = "cpas_camnoc";
+ interrupts = <0 459 0>;
+ qcom,cpas-hw-ver = <0x170110>; /* Titan v170 v1.1.0 */
+ regulator-names = "camss-vdd";
+ camss-vdd-supply = <&titan_top_gdsc>;
+ clock-names = "gcc_ahb_clk",
+ "gcc_axi_clk",
+ "soc_ahb_clk",
+ "slow_ahb_clk_src",
+ "cpas_ahb_clk",
+ "camnoc_axi_clk";
+ clocks = <&clock_gcc GCC_CAMERA_AHB_CLK>,
+ <&clock_gcc GCC_CAMERA_AXI_CLK>,
+ <&clock_camcc CAM_CC_SOC_AHB_CLK>,
+ <&clock_camcc CAM_CC_SLOW_AHB_CLK_SRC>,
+ <&clock_camcc CAM_CC_CPAS_AHB_CLK>,
+ <&clock_camcc CAM_CC_CAMNOC_AXI_CLK>;
+ src-clock-name = "slow_ahb_clk_src";
+ clock-rates = <0 0 0 0 0 0>,
+ <0 0 0 19200000 0 0>,
+ <0 0 0 80000000 0 0>,
+ <0 0 0 80000000 0 0>,
+ <0 0 0 80000000 0 0>,
+ <0 0 0 80000000 0 0>,
+ <0 0 0 80000000 0 0>;
+ clock-cntl-level = "suspend", "minsvs", "lowsvs", "svs",
+ "svs_l1", "nominal", "turbo";
+ qcom,msm-bus,name = "cam_ahb";
+ qcom,msm-bus,num-cases = <7>;
+ qcom,msm-bus,num-paths = <1>;
+ qcom,msm-bus,vectors-KBps =
+ <MSM_BUS_MASTER_AMPSS_M0
+ MSM_BUS_SLAVE_CAMERA_CFG 0 0>,
+ <MSM_BUS_MASTER_AMPSS_M0
+ MSM_BUS_SLAVE_CAMERA_CFG 0 76500>,
+ <MSM_BUS_MASTER_AMPSS_M0
+ MSM_BUS_SLAVE_CAMERA_CFG 0 120000>,
+ <MSM_BUS_MASTER_AMPSS_M0
+ MSM_BUS_SLAVE_CAMERA_CFG 0 150000>,
+ <MSM_BUS_MASTER_AMPSS_M0
+ MSM_BUS_SLAVE_CAMERA_CFG 0 150000>,
+ <MSM_BUS_MASTER_AMPSS_M0
+ MSM_BUS_SLAVE_CAMERA_CFG 0 300000>,
+ <MSM_BUS_MASTER_AMPSS_M0
+ MSM_BUS_SLAVE_CAMERA_CFG 0 300000>;
+ vdd-corners = <RPMH_REGULATOR_LEVEL_OFF
+ RPMH_REGULATOR_LEVEL_RETENTION
+ RPMH_REGULATOR_LEVEL_MIN_SVS
+ RPMH_REGULATOR_LEVEL_LOW_SVS
+ RPMH_REGULATOR_LEVEL_SVS
+ RPMH_REGULATOR_LEVEL_SVS_L1
+ RPMH_REGULATOR_LEVEL_NOM
+ RPMH_REGULATOR_LEVEL_NOM_L1
+ RPMH_REGULATOR_LEVEL_NOM_L2
+ RPMH_REGULATOR_LEVEL_TURBO
+ RPMH_REGULATOR_LEVEL_TURBO_L1>;
+ vdd-corner-ahb-mapping = "suspend", "suspend",
+ "minsvs", "lowsvs", "svs", "svs_l1",
+ "nominal", "nominal", "nominal",
+ "turbo", "turbo";
+ client-id-based;
+ client-names =
+ "csiphy0", "csiphy1", "csiphy2", "cci0",
+ "csid0", "csid1", "csid2",
+ "ife0", "ife1", "ife2", "ipe0",
+ "ipe1", "cam-cdm-intf0", "cpas-cdm0", "bps0",
+ "icp0", "jpeg-dma0", "jpeg-enc0", "fd0";
+ client-axi-port-names =
+ "cam_hf_1", "cam_hf_2", "cam_hf_2", "cam_sf_1",
+ "cam_hf_1", "cam_hf_2", "cam_hf_2",
+ "cam_hf_1", "cam_hf_2", "cam_hf_2", "cam_sf_1",
+ "cam_sf_1", "cam_sf_1", "cam_sf_1", "cam_sf_1",
+ "cam_sf_1", "cam_sf_1", "cam_sf_1", "cam_sf_1";
+ client-bus-camnoc-based;
+ qcom,axi-port-list {
+ qcom,axi-port1 {
+ qcom,axi-port-name = "cam_hf_1";
+ qcom,axi-port-mnoc {
+ qcom,msm-bus,name = "cam_hf_1_mnoc";
+ qcom,msm-bus-vector-dyn-vote;
+ qcom,msm-bus,num-cases = <2>;
+ qcom,msm-bus,num-paths = <1>;
+ qcom,msm-bus,vectors-KBps =
+ <MSM_BUS_MASTER_CAMNOC_HF0
+ MSM_BUS_SLAVE_EBI_CH0 0 0>,
+ <MSM_BUS_MASTER_CAMNOC_HF0
+ MSM_BUS_SLAVE_EBI_CH0 0 0>;
+ };
+ qcom,axi-port-camnoc {
+ qcom,msm-bus,name = "cam_hf_1_camnoc";
+ qcom,msm-bus-vector-dyn-vote;
+ qcom,msm-bus,num-cases = <2>;
+ qcom,msm-bus,num-paths = <1>;
+ qcom,msm-bus,vectors-KBps =
+ <MSM_BUS_MASTER_CAMNOC_HF0_UNCOMP
+ MSM_BUS_SLAVE_CAMNOC_UNCOMP 0 0>,
+ <MSM_BUS_MASTER_CAMNOC_HF0_UNCOMP
+ MSM_BUS_SLAVE_CAMNOC_UNCOMP 0 0>;
+ };
+ };
+ qcom,axi-port2 {
+ qcom,axi-port-name = "cam_hf_2";
+ qcom,axi-port-mnoc {
+ qcom,msm-bus,name = "cam_hf_2_mnoc";
+ qcom,msm-bus-vector-dyn-vote;
+ qcom,msm-bus,num-cases = <2>;
+ qcom,msm-bus,num-paths = <1>;
+ qcom,msm-bus,vectors-KBps =
+ <MSM_BUS_MASTER_CAMNOC_HF1
+ MSM_BUS_SLAVE_EBI_CH0 0 0>,
+ <MSM_BUS_MASTER_CAMNOC_HF1
+ MSM_BUS_SLAVE_EBI_CH0 0 0>;
+ };
+ qcom,axi-port-camnoc {
+ qcom,msm-bus,name = "cam_hf_2_camnoc";
+ qcom,msm-bus-vector-dyn-vote;
+ qcom,msm-bus,num-cases = <2>;
+ qcom,msm-bus,num-paths = <1>;
+ qcom,msm-bus,vectors-KBps =
+ <MSM_BUS_MASTER_CAMNOC_HF1_UNCOMP
+ MSM_BUS_SLAVE_CAMNOC_UNCOMP 0 0>,
+ <MSM_BUS_MASTER_CAMNOC_HF1_UNCOMP
+ MSM_BUS_SLAVE_CAMNOC_UNCOMP 0 0>;
+ };
+ };
+ qcom,axi-port3 {
+ qcom,axi-port-name = "cam_sf_1";
+ qcom,axi-port-mnoc {
+ qcom,msm-bus,name = "cam_sf_1_mnoc";
+ qcom,msm-bus-vector-dyn-vote;
+ qcom,msm-bus,num-cases = <2>;
+ qcom,msm-bus,num-paths = <1>;
+ qcom,msm-bus,vectors-KBps =
+ <MSM_BUS_MASTER_CAMNOC_SF
+ MSM_BUS_SLAVE_EBI_CH0 0 0>,
+ <MSM_BUS_MASTER_CAMNOC_SF
+ MSM_BUS_SLAVE_EBI_CH0 0 0>;
+ };
+ qcom,axi-port-camnoc {
+ qcom,msm-bus,name = "cam_sf_1_camnoc";
+ qcom,msm-bus-vector-dyn-vote;
+ qcom,msm-bus,num-cases = <2>;
+ qcom,msm-bus,num-paths = <1>;
+ qcom,msm-bus,vectors-KBps =
+ <MSM_BUS_MASTER_CAMNOC_SF_UNCOMP
+ MSM_BUS_SLAVE_CAMNOC_UNCOMP 0 0>,
+ <MSM_BUS_MASTER_CAMNOC_SF_UNCOMP
+ MSM_BUS_SLAVE_CAMNOC_UNCOMP 0 0>;
+ };
+ };
+ };
+ };
+
+ qcom,cam-cdm-intf {
+ compatible = "qcom,cam-cdm-intf";
+ cell-index = <0>;
+ label = "cam-cdm-intf";
+ num-hw-cdm = <1>;
+ cdm-client-names = "vfe",
+ "jpegdma",
+ "jpegenc",
+ "fd";
+ status = "ok";
+ };
+
+ qcom,cpas-cdm0@ac48000 {
+ cell-index = <0>;
+ compatible = "qcom,cam170-cpas-cdm0";
+ label = "cpas-cdm";
+ reg = <0xac48000 0x1000>;
+ reg-names = "cpas-cdm";
+ reg-cam-base = <0x48000>;
+ interrupts = <0 461 0>;
+ interrupt-names = "cpas-cdm";
+ regulator-names = "camss";
+ camss-supply = <&titan_top_gdsc>;
+ clock-names = "gcc_camera_ahb",
+ "gcc_camera_axi",
+ "cam_cc_soc_ahb_clk",
+ "cam_cc_cpas_ahb_clk",
+ "cam_cc_camnoc_axi_clk";
+ clocks = <&clock_gcc GCC_CAMERA_AHB_CLK>,
+ <&clock_gcc GCC_CAMERA_AXI_CLK>,
+ <&clock_camcc CAM_CC_SOC_AHB_CLK>,
+ <&clock_camcc CAM_CC_CPAS_AHB_CLK>,
+ <&clock_camcc CAM_CC_CAMNOC_AXI_CLK>;
+ clock-rates = <0 0 0 0 0>;
+ clock-cntl-level = "svs";
+ cdm-client-names = "ife";
+ status = "ok";
+ };
+
+ qcom,cam-isp {
+ compatible = "qcom,cam-isp";
+ arch-compat = "ife";
+ status = "ok";
+ };
+
+ cam_csid0: qcom,csid0@acb3000 {
+ cell-index = <0>;
+ compatible = "qcom,csid170";
+ reg-names = "csid";
+ reg = <0xacb3000 0x1000>;
+ reg-cam-base = <0xb3000>;
+ interrupt-names = "csid";
+ interrupts = <0 464 0>;
+ regulator-names = "camss", "ife0";
+ camss-supply = <&titan_top_gdsc>;
+ ife0-supply = <&ife_0_gdsc>;
+ clock-names = "camera_ahb",
+ "camera_axi",
+ "soc_ahb_clk",
+ "cpas_ahb_clk",
+ "slow_ahb_clk_src",
+ "ife_csid_clk",
+ "ife_csid_clk_src",
+ "ife_cphy_rx_clk",
+ "cphy_rx_clk_src",
+ "ife_clk",
+ "ife_clk_src",
+ "camnoc_axi_clk",
+ "ife_axi_clk";
+ clocks = <&clock_gcc GCC_CAMERA_AHB_CLK>,
+ <&clock_gcc GCC_CAMERA_AXI_CLK>,
+ <&clock_camcc CAM_CC_SOC_AHB_CLK>,
+ <&clock_camcc CAM_CC_CPAS_AHB_CLK>,
+ <&clock_camcc CAM_CC_SLOW_AHB_CLK_SRC>,
+ <&clock_camcc CAM_CC_IFE_0_CSID_CLK>,
+ <&clock_camcc CAM_CC_IFE_0_CSID_CLK_SRC>,
+ <&clock_camcc CAM_CC_IFE_0_CPHY_RX_CLK>,
+ <&clock_camcc CAM_CC_CPHY_RX_CLK_SRC>,
+ <&clock_camcc CAM_CC_IFE_0_CLK>,
+ <&clock_camcc CAM_CC_IFE_0_CLK_SRC>,
+ <&clock_camcc CAM_CC_CAMNOC_AXI_CLK>,
+ <&clock_camcc CAM_CC_IFE_0_AXI_CLK>;
+ clock-rates =
+ <0 0 0 0 0 0 384000000 0 0 0 404000000 0 0>,
+ <0 0 0 0 0 0 538000000 0 0 0 600000000 0 0>;
+ clock-cntl-level = "svs", "turbo";
+ src-clock-name = "ife_csid_clk_src";
+ status = "ok";
+ };
+
+ cam_vfe0: qcom,vfe0@acaf000 {
+ cell-index = <0>;
+ compatible = "qcom,vfe170";
+ reg-names = "ife";
+ reg = <0xacaf000 0x4000>;
+ reg-cam-base = <0xaf000>;
+ interrupt-names = "ife";
+ interrupts = <0 465 0>;
+ regulator-names = "camss", "ife0";
+ camss-supply = <&titan_top_gdsc>;
+ ife0-supply = <&ife_0_gdsc>;
+ clock-names = "camera_ahb",
+ "camera_axi",
+ "soc_ahb_clk",
+ "cpas_ahb_clk",
+ "slow_ahb_clk_src",
+ "ife_clk",
+ "ife_clk_src",
+ "camnoc_axi_clk",
+ "ife_axi_clk";
+ clocks = <&clock_gcc GCC_CAMERA_AHB_CLK>,
+ <&clock_gcc GCC_CAMERA_AXI_CLK>,
+ <&clock_camcc CAM_CC_SOC_AHB_CLK>,
+ <&clock_camcc CAM_CC_CPAS_AHB_CLK>,
+ <&clock_camcc CAM_CC_SLOW_AHB_CLK_SRC>,
+ <&clock_camcc CAM_CC_IFE_0_CLK>,
+ <&clock_camcc CAM_CC_IFE_0_CLK_SRC>,
+ <&clock_camcc CAM_CC_CAMNOC_AXI_CLK>,
+ <&clock_camcc CAM_CC_IFE_0_AXI_CLK>;
+ clock-rates =
+ <0 0 0 0 0 0 404000000 0 0>,
+ <0 0 0 0 0 0 480000000 0 0>,
+ <0 0 0 0 0 0 600000000 0 0>;
+ clock-cntl-level = "svs", "svs_l1", "turbo";
+ src-clock-name = "ife_clk_src";
+ clock-names-option = "ife_dsp_clk";
+ clocks-option = <&clock_camcc CAM_CC_IFE_0_DSP_CLK>;
+ clock-rates-option = <600000000>;
+ status = "ok";
+ };
+
+ cam_csid1: qcom,csid1@acba000 {
+ cell-index = <1>;
+ compatible = "qcom,csid170";
+ reg-names = "csid";
+ reg = <0xacba000 0x1000>;
+ reg-cam-base = <0xba000>;
+ interrupt-names = "csid";
+ interrupts = <0 466 0>;
+ regulator-names = "camss", "ife1";
+ camss-supply = <&titan_top_gdsc>;
+ ife1-supply = <&ife_1_gdsc>;
+ clock-names = "camera_ahb",
+ "camera_axi",
+ "soc_ahb_clk",
+ "cpas_ahb_clk",
+ "slow_ahb_clk_src",
+ "ife_csid_clk",
+ "ife_csid_clk_src",
+ "ife_cphy_rx_clk",
+ "cphy_rx_clk_src",
+ "ife_clk",
+ "ife_clk_src",
+ "camnoc_axi_clk",
+ "ife_axi_clk";
+ clocks = <&clock_gcc GCC_CAMERA_AHB_CLK>,
+ <&clock_gcc GCC_CAMERA_AXI_CLK>,
+ <&clock_camcc CAM_CC_SOC_AHB_CLK>,
+ <&clock_camcc CAM_CC_CPAS_AHB_CLK>,
+ <&clock_camcc CAM_CC_SLOW_AHB_CLK_SRC>,
+ <&clock_camcc CAM_CC_IFE_1_CSID_CLK>,
+ <&clock_camcc CAM_CC_IFE_1_CSID_CLK_SRC>,
+ <&clock_camcc CAM_CC_IFE_1_CPHY_RX_CLK>,
+ <&clock_camcc CAM_CC_CPHY_RX_CLK_SRC>,
+ <&clock_camcc CAM_CC_IFE_1_CLK>,
+ <&clock_camcc CAM_CC_IFE_1_CLK_SRC>,
+ <&clock_camcc CAM_CC_CAMNOC_AXI_CLK>,
+ <&clock_camcc CAM_CC_IFE_1_AXI_CLK>;
+ clock-rates =
+ <0 0 0 0 0 0 384000000 0 0 0 404000000 0 0>,
+ <0 0 0 0 0 0 538000000 0 0 0 600000000 0 0>;
+ clock-cntl-level = "svs", "turbo";
+ src-clock-name = "ife_csid_clk_src";
+ status = "ok";
+ };
+
+ cam_vfe1: qcom,vfe1@acb6000 {
+ cell-index = <1>;
+ compatible = "qcom,vfe170";
+ reg-names = "ife";
+ reg = <0xacb6000 0x4000>;
+ reg-cam-base = <0xb6000>;
+ interrupt-names = "ife";
+ interrupts = <0 467 0>;
+ regulator-names = "camss", "ife1";
+ camss-supply = <&titan_top_gdsc>;
+ ife1-supply = <&ife_1_gdsc>;
+ clock-names = "camera_ahb",
+ "camera_axi",
+ "soc_ahb_clk",
+ "cpas_ahb_clk",
+ "slow_ahb_clk_src",
+ "ife_clk",
+ "ife_clk_src",
+ "camnoc_axi_clk",
+ "ife_axi_clk";
+ clocks = <&clock_gcc GCC_CAMERA_AHB_CLK>,
+ <&clock_gcc GCC_CAMERA_AXI_CLK>,
+ <&clock_camcc CAM_CC_SOC_AHB_CLK>,
+ <&clock_camcc CAM_CC_CPAS_AHB_CLK>,
+ <&clock_camcc CAM_CC_SLOW_AHB_CLK_SRC>,
+ <&clock_camcc CAM_CC_IFE_1_CLK>,
+ <&clock_camcc CAM_CC_IFE_1_CLK_SRC>,
+ <&clock_camcc CAM_CC_CAMNOC_AXI_CLK>,
+ <&clock_camcc CAM_CC_IFE_1_AXI_CLK>;
+ clock-rates =
+ <0 0 0 0 0 0 404000000 0 0>,
+ <0 0 0 0 0 0 480000000 0 0>,
+ <0 0 0 0 0 0 600000000 0 0>;
+ clock-cntl-level = "svs", "svs_l1", "turbo";
+ src-clock-name = "ife_clk_src";
+ clock-names-option = "ife_dsp_clk";
+ clocks-option = <&clock_camcc CAM_CC_IFE_1_DSP_CLK>;
+ clock-rates-option = <600000000>;
+ status = "ok";
+ };
+
+ cam_csid_lite: qcom,csid-lite@acc8000 {
+ cell-index = <2>;
+ compatible = "qcom,csid-lite170";
+ reg-names = "csid-lite";
+ reg = <0xacc8000 0x1000>;
+ reg-cam-base = <0xc8000>;
+ interrupt-names = "csid-lite";
+ interrupts = <0 468 0>;
+ regulator-names = "camss";
+ camss-supply = <&titan_top_gdsc>;
+ clock-names = "camera_ahb",
+ "camera_axi",
+ "soc_ahb_clk",
+ "cpas_ahb_clk",
+ "slow_ahb_clk_src",
+ "ife_csid_clk",
+ "ife_csid_clk_src",
+ "ife_cphy_rx_clk",
+ "cphy_rx_clk_src",
+ "ife_clk",
+ "ife_clk_src",
+ "camnoc_axi_clk";
+ clocks = <&clock_gcc GCC_CAMERA_AHB_CLK>,
+ <&clock_gcc GCC_CAMERA_AXI_CLK>,
+ <&clock_camcc CAM_CC_SOC_AHB_CLK>,
+ <&clock_camcc CAM_CC_CPAS_AHB_CLK>,
+ <&clock_camcc CAM_CC_SLOW_AHB_CLK_SRC>,
+ <&clock_camcc CAM_CC_IFE_LITE_CSID_CLK>,
+ <&clock_camcc CAM_CC_IFE_LITE_CSID_CLK_SRC>,
+ <&clock_camcc CAM_CC_IFE_LITE_CPHY_RX_CLK>,
+ <&clock_camcc CAM_CC_CPHY_RX_CLK_SRC>,
+ <&clock_camcc CAM_CC_IFE_LITE_CLK>,
+ <&clock_camcc CAM_CC_IFE_LITE_CLK_SRC>,
+ <&clock_camcc CAM_CC_CAMNOC_AXI_CLK>;
+ clock-rates =
+ <0 0 0 0 0 0 384000000 0 0 0 404000000 0>,
+ <0 0 0 0 0 0 538000000 0 0 0 600000000 0>;
+ clock-cntl-level = "svs", "turbo";
+ src-clock-name = "ife_csid_clk_src";
+ status = "ok";
+ };
+
+ cam_vfe_lite: qcom,vfe-lite@acc4000 {
+ cell-index = <2>;
+ compatible = "qcom,vfe-lite170";
+ reg-names = "ife-lite";
+ reg = <0xacc4000 0x4000>;
+ reg-cam-base = <0xc4000>;
+ interrupt-names = "ife-lite";
+ interrupts = <0 469 0>;
+ regulator-names = "camss";
+ camss-supply = <&titan_top_gdsc>;
+ clock-names = "camera_ahb",
+ "camera_axi",
+ "soc_ahb_clk",
+ "cpas_ahb_clk",
+ "slow_ahb_clk_src",
+ "ife_clk",
+ "ife_clk_src",
+ "camnoc_axi_clk";
+ clocks = <&clock_gcc GCC_CAMERA_AHB_CLK>,
+ <&clock_gcc GCC_CAMERA_AXI_CLK>,
+ <&clock_camcc CAM_CC_SOC_AHB_CLK>,
+ <&clock_camcc CAM_CC_CPAS_AHB_CLK>,
+ <&clock_camcc CAM_CC_SLOW_AHB_CLK_SRC>,
+ <&clock_camcc CAM_CC_IFE_LITE_CLK>,
+ <&clock_camcc CAM_CC_IFE_LITE_CLK_SRC>,
+ <&clock_camcc CAM_CC_CAMNOC_AXI_CLK>;
+ clock-rates =
+ <0 0 0 0 0 0 404000000 0>,
+ <0 0 0 0 0 0 480000000 0>,
+ <0 0 0 0 0 0 600000000 0>;
+ clock-cntl-level = "svs", "svs_l1", "turbo";
+ src-clock-name = "ife_clk_src";
+ status = "ok";
+ };
+
+ qcom,cam-icp {
+ compatible = "qcom,cam-icp";
+ compat-hw-name = "qcom,a5",
+ "qcom,ipe0",
+ "qcom,ipe1",
+ "qcom,bps";
+ num-a5 = <1>;
+ num-ipe = <2>;
+ num-bps = <1>;
+ status = "ok";
+ };
+
+ cam_a5: qcom,a5@ac00000 {
+ cell-index = <0>;
+ compatible = "qcom,cam-a5";
+ reg = <0xac00000 0x6000>,
+ <0xac10000 0x8000>,
+ <0xac18000 0x3000>;
+ reg-names = "a5_qgic", "a5_sierra", "a5_csr";
+ reg-cam-base = <0x00000 0x10000 0x18000>;
+ interrupts = <0 463 0>;
+ interrupt-names = "a5";
+ regulator-names = "camss-vdd";
+ camss-vdd-supply = <&titan_top_gdsc>;
+ clock-names = "gcc_cam_ahb_clk",
+ "gcc_cam_axi_clk",
+ "soc_fast_ahb",
+ "soc_ahb_clk",
+ "cpas_ahb_clk",
+ "camnoc_axi_clk",
+ "icp_apb_clk",
+ "icp_clk",
+ "icp_clk_src";
+ clocks = <&clock_gcc GCC_CAMERA_AHB_CLK>,
+ <&clock_gcc GCC_CAMERA_AXI_CLK>,
+ <&clock_camcc CAM_CC_FAST_AHB_CLK_SRC>,
+ <&clock_camcc CAM_CC_SOC_AHB_CLK>,
+ <&clock_camcc CAM_CC_CPAS_AHB_CLK>,
+ <&clock_camcc CAM_CC_CAMNOC_AXI_CLK>,
+ <&clock_camcc CAM_CC_ICP_APB_CLK>,
+ <&clock_camcc CAM_CC_ICP_CLK>,
+ <&clock_camcc CAM_CC_ICP_CLK_SRC>;
+
+ clock-rates =
+ <0 0 200000000 0 0 0 0 400000000>,
+ <0 0 200000000 0 0 0 0 600000000>;
+ clock-cntl-level = "svs", "turbo";
+ fw_name = "CAMERA_ICP.elf";
+ ubwc-cfg = <0x77 0x1DF>;
+ status = "ok";
+ };
+
+ cam_ipe0: qcom,ipe0 {
+ cell-index = <0>;
+ compatible = "qcom,cam-ipe";
+ regulator-names = "ipe0-vdd";
+ ipe0-vdd-supply = <&ipe_0_gdsc>;
+ clock-names = "ipe_0_ahb_clk",
+ "ipe_0_areg_clk",
+ "ipe_0_axi_clk",
+ "ipe_0_clk",
+ "ipe_0_clk_src";
+ src-clock-name = "ipe_0_clk_src";
+ clocks = <&clock_camcc CAM_CC_IPE_0_AHB_CLK>,
+ <&clock_camcc CAM_CC_IPE_0_AREG_CLK>,
+ <&clock_camcc CAM_CC_IPE_0_AXI_CLK>,
+ <&clock_camcc CAM_CC_IPE_0_CLK>,
+ <&clock_camcc CAM_CC_IPE_0_CLK_SRC>;
+
+ clock-rates = <0 0 0 0 240000000>,
+ <0 0 0 0 404000000>,
+ <0 0 0 0 480000000>,
+ <0 0 0 0 538000000>,
+ <0 0 0 0 600000000>;
+ clock-cntl-level = "lowsvs", "svs",
+ "svs_l1", "nominal", "turbo";
+ status = "ok";
+ };
+
+ cam_ipe1: qcom,ipe1 {
+ cell-index = <1>;
+ compatible = "qcom,cam-ipe";
+ regulator-names = "ipe1-vdd";
+ ipe1-vdd-supply = <&ipe_1_gdsc>;
+ clock-names = "ipe_1_ahb_clk",
+ "ipe_1_areg_clk",
+ "ipe_1_axi_clk",
+ "ipe_1_clk",
+ "ipe_1_clk_src";
+ src-clock-name = "ipe_1_clk_src";
+ clocks = <&clock_camcc CAM_CC_IPE_1_AHB_CLK>,
+ <&clock_camcc CAM_CC_IPE_1_AREG_CLK>,
+ <&clock_camcc CAM_CC_IPE_1_AXI_CLK>,
+ <&clock_camcc CAM_CC_IPE_1_CLK>,
+ <&clock_camcc CAM_CC_IPE_1_CLK_SRC>;
+
+ clock-rates = <0 0 0 0 240000000>,
+ <0 0 0 0 404000000>,
+ <0 0 0 0 480000000>,
+ <0 0 0 0 538000000>,
+ <0 0 0 0 600000000>;
+ clock-cntl-level = "lowsvs", "svs",
+ "svs_l1", "nominal", "turbo";
+ status = "ok";
+ };
+
+ cam_bps: qcom,bps {
+ cell-index = <0>;
+ compatible = "qcom,cam-bps";
+ regulator-names = "bps-vdd";
+ bps-vdd-supply = <&bps_gdsc>;
+ clock-names = "bps_ahb_clk",
+ "bps_areg_clk",
+ "bps_axi_clk",
+ "bps_clk",
+ "bps_clk_src";
+ src-clock-name = "bps_clk_src";
+ clocks = <&clock_camcc CAM_CC_BPS_AHB_CLK>,
+ <&clock_camcc CAM_CC_BPS_AREG_CLK>,
+ <&clock_camcc CAM_CC_BPS_AXI_CLK>,
+ <&clock_camcc CAM_CC_BPS_CLK>,
+ <&clock_camcc CAM_CC_BPS_CLK_SRC>;
+
+ clock-rates = <0 0 0 0 200000000>,
+ <0 0 0 0 404000000>,
+ <0 0 0 0 480000000>,
+ <0 0 0 0 600000000>,
+ <0 0 0 0 600000000>;
+ clock-cntl-level = "lowsvs", "svs",
+ "svs_l1", "nominal", "turbo";
+ status = "ok";
+ };
+
+ qcom,cam-jpeg {
+ compatible = "qcom,cam-jpeg";
+ compat-hw-name = "qcom,jpegenc",
+ "qcom,jpegdma";
+ num-jpeg-enc = <1>;
+ num-jpeg-dma = <1>;
+ status = "ok";
+ };
+
+ cam_jpeg_enc: qcom,jpegenc@ac4e000 {
+ cell-index = <0>;
+ compatible = "qcom,cam_jpeg_enc";
+ reg-names = "jpege_hw";
+ reg = <0xac4e000 0x4000>;
+ reg-cam-base = <0x4e000>;
+ interrupt-names = "jpeg";
+ interrupts = <0 474 0>;
+ regulator-names = "camss-vdd";
+ camss-vdd-supply = <&titan_top_gdsc>;
+ clock-names = "camera_ahb",
+ "camera_axi",
+ "soc_ahb_clk",
+ "cpas_ahb_clk",
+ "camnoc_axi_clk",
+ "jpegenc_clk_src",
+ "jpegenc_clk";
+ clocks = <&clock_gcc GCC_CAMERA_AHB_CLK>,
+ <&clock_gcc GCC_CAMERA_AXI_CLK>,
+ <&clock_camcc CAM_CC_SOC_AHB_CLK>,
+ <&clock_camcc CAM_CC_CPAS_AHB_CLK>,
+ <&clock_camcc CAM_CC_CAMNOC_AXI_CLK>,
+ <&clock_camcc CAM_CC_JPEG_CLK_SRC>,
+ <&clock_camcc CAM_CC_JPEG_CLK>;
+
+ clock-rates = <0 0 0 0 0 600000000 0>;
+ src-clock-name = "jpegenc_clk_src";
+ clock-cntl-level = "nominal";
+ status = "ok";
+ };
+
+ cam_jpeg_dma: qcom,jpegdma@0xac52000{
+ cell-index = <0>;
+ compatible = "qcom,cam_jpeg_dma";
+ reg-names = "jpegdma_hw";
+ reg = <0xac52000 0x4000>;
+ reg-cam-base = <0x52000>;
+ interrupt-names = "jpegdma";
+ interrupts = <0 475 0>;
+ regulator-names = "camss-vdd";
+ camss-vdd-supply = <&titan_top_gdsc>;
+ clock-names = "camera_ahb",
+ "camera_axi",
+ "soc_ahb_clk",
+ "cpas_ahb_clk",
+ "camnoc_axi_clk",
+ "jpegdma_clk_src",
+ "jpegdma_clk";
+ clocks = <&clock_gcc GCC_CAMERA_AHB_CLK>,
+ <&clock_gcc GCC_CAMERA_AXI_CLK>,
+ <&clock_camcc CAM_CC_SOC_AHB_CLK>,
+ <&clock_camcc CAM_CC_CPAS_AHB_CLK>,
+ <&clock_camcc CAM_CC_CAMNOC_AXI_CLK>,
+ <&clock_camcc CAM_CC_JPEG_CLK_SRC>,
+ <&clock_camcc CAM_CC_JPEG_CLK>;
+
+ clock-rates = <0 0 0 0 0 600000000 0>;
+ src-clock-name = "jpegdma_clk_src";
+ clock-cntl-level = "nominal";
+ status = "ok";
+ };
+
+ qcom,cam-fd {
+ compatible = "qcom,cam-fd";
+ compat-hw-name = "qcom,fd";
+ num-fd = <1>;
+ status = "ok";
+ };
+
+ cam_fd: qcom,fd@ac5a000 {
+ cell-index = <0>;
+ compatible = "qcom,fd41";
+ reg-names = "fd_core", "fd_wrapper";
+ reg = <0xac5a000 0x1000>,
+ <0xac5b000 0x400>;
+ reg-cam-base = <0x5a000 0x5b000>;
+ interrupt-names = "fd";
+ interrupts = <0 462 0>;
+ regulator-names = "camss-vdd";
+ camss-vdd-supply = <&titan_top_gdsc>;
+ clock-names = "gcc_ahb_clk",
+ "gcc_axi_clk",
+ "soc_ahb_clk",
+ "cpas_ahb_clk",
+ "camnoc_axi_clk",
+ "fd_core_clk_src",
+ "fd_core_clk",
+ "fd_core_uar_clk";
+ clocks = <&clock_gcc GCC_CAMERA_AHB_CLK>,
+ <&clock_gcc GCC_CAMERA_AXI_CLK>,
+ <&clock_camcc CAM_CC_SOC_AHB_CLK>,
+ <&clock_camcc CAM_CC_CPAS_AHB_CLK>,
+ <&clock_camcc CAM_CC_CAMNOC_AXI_CLK>,
+ <&clock_camcc CAM_CC_FD_CORE_CLK_SRC>,
+ <&clock_camcc CAM_CC_FD_CORE_CLK>,
+ <&clock_camcc CAM_CC_FD_CORE_UAR_CLK>;
+ src-clock-name = "fd_core_clk_src";
+ clock-cntl-level = "svs", "svs_l1", "turbo";
+ clock-rates =
+ <0 0 0 0 0 400000000 0 0>,
+ <0 0 0 0 0 538000000 0 0>,
+ <0 0 0 0 0 600000000 0 0>;
+ status = "ok";
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm670-cdp.dtsi b/arch/arm64/boot/dts/qcom/sdm670-cdp.dtsi
index 8e152b0..521b048 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-cdp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-cdp.dtsi
@@ -13,6 +13,7 @@
#include <dt-bindings/gpio/gpio.h>
#include "sdm670-pmic-overlay.dtsi"
#include "sdm670-sde-display.dtsi"
+#include "sdm670-camera-sensor-cdp.dtsi"
&ufsphy_mem {
compatible = "qcom,ufs-phy-qmp-v3";
@@ -40,6 +41,12 @@
status = "ok";
};
+&pm660l_switch1 {
+ pinctrl-names = "led_enable", "led_disable";
+ pinctrl-0 = <&flash_led3_front_en>;
+ pinctrl-1 = <&flash_led3_front_dis>;
+};
+
&qupv3_se9_2uart {
status = "disabled";
};
@@ -53,7 +60,27 @@
};
&qupv3_se3_i2c {
- status = "disabled";
+ status = "ok";
+ nq@28 {
+ compatible = "qcom,nq-nci";
+ reg = <0x28>;
+ qcom,nq-irq = <&tlmm 44 0x00>;
+ qcom,nq-ven = <&tlmm 12 0x00>;
+ qcom,nq-firm = <&tlmm 43 0x00>;
+ qcom,nq-clkreq = <&pm660_gpios 4 0x00>;
+ qcom,nq-esepwr = <&tlmm 116 0x00>;
+ interrupt-parent = <&tlmm>;
+ qcom,clk-src = "BBCLK3";
+ interrupts = <44 0>;
+ interrupt-names = "nfc_irq";
+ pinctrl-names = "nfc_active", "nfc_suspend";
+ pinctrl-0 = <&nfc_int_active
+ &nfc_enable_active
+ &nfc_clk_default>;
+ pinctrl-1 = <&nfc_int_suspend &nfc_enable_suspend>;
+ clocks = <&clock_rpmh RPMH_LN_BB_CLK3>;
+ clock-names = "ref_clk";
+ };
};
&qupv3_se10_i2c {
@@ -246,9 +273,7 @@
};
&dsi_rm67195_amoled_fhd_cmd {
- qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
- qcom,mdss-dsi-bl-min-level = <1>;
- qcom,mdss-dsi-bl-max-level = <4095>;
+ qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_dcs";
qcom,panel-supply-entries = <&dsi_panel_pwr_supply_labibb_amoled>;
qcom,platform-reset-gpio = <&tlmm 75 0>;
qcom,platform-te-gpio = <&tlmm 10 0>;
diff --git a/arch/arm64/boot/dts/qcom/sdm670-coresight.dtsi b/arch/arm64/boot/dts/qcom/sdm670-coresight.dtsi
index d054164..a885495 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-coresight.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-coresight.dtsi
@@ -537,6 +537,58 @@
<&funnel_apss_merg_out_funnel_in2>;
};
};
+ port@4 {
+ reg = <6>;
+ funnel_in2_in_funnel_gfx: endpoint {
+ slave-mode;
+ remote-endpoint =
+ <&funnel_gfx_out_funnel_in2>;
+ };
+ };
+ };
+ };
+
+ funnel_gfx: funnel@0x6943000 {
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b908>;
+
+ reg = <0x6943000 0x1000>;
+ reg-names = "funnel-base";
+
+ coresight-name = "coresight-funnel-gfx";
+
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ funnel_gfx_out_funnel_in2: endpoint {
+ remote-endpoint =
+ <&funnel_in2_in_funnel_gfx>;
+ };
+ };
+
+ port@1 {
+ reg = <0>;
+ funnel_in2_in_gfx: endpoint {
+ slave-mode;
+ remote-endpoint =
+ <&gfx_out_funnel_in2>;
+ };
+ };
+
+ port@2 {
+ reg = <1>;
+ funnel_in2_in_gfx_cx: endpoint {
+ slave-mode;
+ remote-endpoint =
+ <&gfx_cx_out_funnel_in2>;
+ };
+ };
};
};
@@ -562,6 +614,7 @@
<13 32>;
qcom,cmb-elem-size = <3 64>,
<7 64>,
+ <9 64>,
<13 64>;
clocks = <&clock_aop QDSS_CLK>;
@@ -625,17 +678,17 @@
};
port@6 {
- reg = <10>;
- tpda_in_tpdm_qm: endpoint {
+ reg = <9>;
+ tpda_in_tpdm_prng: endpoint {
slave-mode;
remote-endpoint =
- <&tpdm_qm_out_tpda>;
+ <&tpdm_prng_out_tpda>;
};
};
port@7 {
reg = <11>;
- tpda_in_tpdm_north: endpoint {
+ tpda_in_tpdm_north: endpoint {
slave-mode;
remote-endpoint =
<&tpdm_north_out_tpda>;
@@ -643,6 +696,15 @@
};
port@8 {
+ reg = <12>;
+ tpda_in_tpdm_qm: endpoint {
+ slave-mode;
+ remote-endpoint =
+ <&tpdm_qm_out_tpda>;
+ };
+ };
+
+ port@9 {
reg = <13>;
tpda_in_tpdm_pimem: endpoint {
slave-mode;
@@ -743,6 +805,24 @@
};
};
+ tpdm_prng: tpdm@684c000 {
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b968>;
+ reg = <0x684c000 0x1000>;
+ reg-names = "tpdm-base";
+
+ coresight-name = "coresight-tpdm-prng";
+
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
+
+ port {
+ tpdm_prng_out_tpda: endpoint {
+ remote-endpoint = <&tpda_in_tpdm_prng>;
+ };
+ };
+ };
+
tpdm_center: tpdm@6c28000 {
compatible = "arm,primecell";
arm,primecell-periphid = <0x0003b968>;
@@ -1308,7 +1388,7 @@
reg = <0x69e1000 0x1000>;
reg-names = "cti-base";
- coresight-name = "coresight-cti0-ddr0";
+ coresight-name = "coresight-cti-ddr_dl_0_cti0";
clocks = <&clock_aop QDSS_CLK>;
clock-names = "apb_pclk";
@@ -1320,7 +1400,7 @@
reg = <0x69e4000 0x1000>;
reg-names = "cti-base";
- coresight-name = "coresight-cti0-ddr1";
+ coresight-name = "coresight-cti-ddr_dl_1_cti0";
clocks = <&clock_aop QDSS_CLK>;
clock-names = "apb_pclk";
@@ -1332,7 +1412,7 @@
reg = <0x69e5000 0x1000>;
reg-names = "cti-base";
- coresight-name = "coresight-cti1-ddr1";
+ coresight-name = "coresight-cti-ddr_dl_1_cti1";
clocks = <&clock_aop QDSS_CLK>;
clock-names = "apb_pclk";
@@ -1344,7 +1424,7 @@
reg = <0x6c09000 0x1000>;
reg-names = "cti-base";
- coresight-name = "coresight-cti0-dlmm";
+ coresight-name = "coresight-cti-dlmm_cti0";
clocks = <&clock_aop QDSS_CLK>;
clock-names = "apb_pclk";
@@ -1356,7 +1436,7 @@
reg = <0x6c0a000 0x1000>;
reg-names = "cti-base";
- coresight-name = "coresight-cti1-dlmm";
+ coresight-name = "coresight-cti-dlmm_cti1";
clocks = <&clock_aop QDSS_CLK>;
clock-names = "apb_pclk";
@@ -1368,7 +1448,7 @@
reg = <0x6c29000 0x1000>;
reg-names = "cti-base";
- coresight-name = "coresight-cti0-dlct";
+ coresight-name = "coresight-cti-dlct_cti0";
clocks = <&clock_aop QDSS_CLK>;
clock-names = "apb_pclk";
@@ -1380,47 +1460,51 @@
reg = <0x6c2a000 0x1000>;
reg-names = "cti-base";
- coresight-name = "coresight-cti1-dlct";
+ coresight-name = "coresight-cti-dlct_cti1";
clocks = <&clock_aop QDSS_CLK>;
clock-names = "apb_pclk";
};
cti0_wcss: cti@69a4000 {
- compatible = "arm,coresight-cti";
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b966>;
reg = <0x69a4000 0x1000>;
reg-names = "cti-base";
- coresight-name = "coresight-cti0-wcss";
+ coresight-name = "coresight-cti-wcss_cti0";
clocks = <&clock_aop QDSS_CLK>;
clock-names = "apb_pclk";
};
cti1_wcss: cti@69a5000 {
- compatible = "arm,coresight-cti";
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b966>;
reg = <0x69a5000 0x1000>;
reg-names = "cti-base";
- coresight-name = "coresight-cti1-wcss";
+ coresight-name = "coresight-cti-wcss_cti1";
clocks = <&clock_aop QDSS_CLK>;
clock-names = "apb_pclk";
};
cti2_wcss: cti@69a6000 {
- compatible = "arm,coresight-cti";
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b966>;
reg = <0x69a6000 0x1000>;
reg-names = "cti-base";
- coresight-name = "coresight-cti2-wcss";
+ coresight-name = "coresight-cti-wcss_cti2";
clocks = <&clock_aop QDSS_CLK>;
clock-names = "apb_pclk";
};
cti_mss_q6: cti@683b000 {
- compatible = "arm,coresight-cti";
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b966>;
reg = <0x683b000 0x1000>;
reg-names = "cti-base";
@@ -1431,7 +1515,8 @@
};
cti_turing: cti@6867000 {
- compatible = "arm,coresight-cti";
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b966>;
reg = <0x6867000 0x1000>;
reg-names = "cti-base";
@@ -1442,106 +1527,116 @@
};
cti2_ssc_sdc: cti@6b10000 {
- compatible = "arm,coresight-cti";
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b966>;
reg = <0x6b10000 0x1000>;
reg-names = "cti-base";
- coresight-name = "coresight-cti2-ssc_sdc";
+ coresight-name = "coresight-cti-ssc_sdc_cti2";
clocks = <&clock_aop QDSS_CLK>;
clock-names = "apb_pclk";
};
cti1_ssc: cti@6b11000 {
- compatible = "arm,coresight-cti";
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b966>;
reg = <0x6b11000 0x1000>;
reg-names = "cti-base";
- coresight-name = "coresight-cti1-ssc";
+ coresight-name = "coresight-cti-ssc_cti1";
clocks = <&clock_aop QDSS_CLK>;
clock-names = "apb_pclk";
};
cti0_ssc_q6: cti@6b1b000 {
- compatible = "arm,coresight-cti";
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b966>;
reg = <0x6b1b000 0x1000>;
reg-names = "cti-base";
- coresight-name = "coresight-cti0-ssc-q6";
+ coresight-name = "coresight-cti-ssc_q6_cti0";
clocks = <&clock_aop QDSS_CLK>;
clock-names = "apb_pclk";
};
cti_ssc_noc: cti@6b1e000 {
- compatible = "arm,coresight-cti";
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b966>;
reg = <0x6b1e000 0x1000>;
reg-names = "cti-base";
- coresight-name = "coresight-cti-ssc-noc";
+ coresight-name = "coresight-cti-ssc_noc";
clocks = <&clock_aop QDSS_CLK>;
clock-names = "apb_pclk";
};
cti6_ssc_noc: cti@6b1f000 {
- compatible = "arm,coresight-cti";
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b966>;
reg = <0x6b1f000 0x1000>;
reg-names = "cti-base";
- coresight-name = "coresight-cti6-ssc-noc";
+ coresight-name = "coresight-cti-ssc_noc_cti6";
clocks = <&clock_aop QDSS_CLK>;
clock-names = "apb_pclk";
};
cti0_swao: cti@6b04000 {
- compatible = "arm,coresight-cti";
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b966>;
reg = <0x6b04000 0x1000>;
reg-names = "cti-base";
- coresight-name = "coresight-cti0-swao";
+ coresight-name = "coresight-cti-swao_cti0";
clocks = <&clock_aop QDSS_CLK>;
clock-names = "apb_pclk";
};
cti1_swao: cti@6b05000 {
- compatible = "arm,coresight-cti";
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b966>;
reg = <0x6b05000 0x1000>;
reg-names = "cti-base";
- coresight-name = "coresight-cti1-swao";
+ coresight-name = "coresight-cti-swao_cti1";
clocks = <&clock_aop QDSS_CLK>;
clock-names = "apb_pclk";
};
cti2_swao: cti@6b06000 {
- compatible = "arm,coresight-cti";
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b966>;
reg = <0x6b06000 0x1000>;
reg-names = "cti-base";
- coresight-name = "coresight-cti2-swao";
+ coresight-name = "coresight-cti-swao_cti2";
clocks = <&clock_aop QDSS_CLK>;
clock-names = "apb_pclk";
};
cti3_swao: cti@6b07000 {
- compatible = "arm,coresight-cti";
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b966>;
reg = <0x6b07000 0x1000>;
reg-names = "cti-base";
- coresight-name = "coresight-cti3-swao";
+ coresight-name = "coresight-cti-swao_cti3";
clocks = <&clock_aop QDSS_CLK>;
clock-names = "apb_pclk";
};
cti_aop_m3: cti@6b21000 {
- compatible = "arm,coresight-cti";
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b966>;
reg = <0x6b21000 0x1000>;
reg-names = "cti-base";
@@ -1552,7 +1647,8 @@
};
cti_titan: cti@6c13000 {
- compatible = "arm,coresight-cti";
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b966>;
reg = <0x6c13000 0x1000>;
reg-names = "cti-base";
@@ -1563,7 +1659,8 @@
};
cti_venus_arm9: cti@6c20000 {
- compatible = "arm,coresight-cti";
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b966>;
reg = <0x6c20000 0x1000>;
reg-names = "cti-base";
@@ -1574,33 +1671,36 @@
};
cti0_apss: cti@78e0000 {
- compatible = "arm,coresight-cti";
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b966>;
reg = <0x78e0000 0x1000>;
reg-names = "cti-base";
- coresight-name = "coresight-cti0-apss";
+ coresight-name = "coresight-cti-apss_cti0";
clocks = <&clock_aop QDSS_CLK>;
clock-names = "apb_pclk";
};
cti1_apss: cti@78f0000 {
- compatible = "arm,coresight-cti";
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b966>;
reg = <0x78f0000 0x1000>;
reg-names = "cti-base";
- coresight-name = "coresight-cti1-apss";
+ coresight-name = "coresight-cti-apss_cti1";
clocks = <&clock_aop QDSS_CLK>;
clock-names = "apb_pclk";
};
cti2_apss: cti@7900000 {
- compatible = "arm,coresight-cti";
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b966>;
reg = <0x7900000 0x1000>;
reg-names = "cti-base";
- coresight-name = "coresight-cti2-apss";
+ coresight-name = "coresight-cti-apss_cti2";
clocks = <&clock_aop QDSS_CLK>;
clock-names = "apb_pclk";
@@ -1951,7 +2051,7 @@
compatible = "qcom,coresight-remote-etm";
coresight-name = "coresight-audio-etm0";
- qcom,inst-id = <2>;
+ qcom,inst-id = <5>;
port {
audio_etm0_out_funnel_in1: endpoint {
@@ -1995,7 +2095,7 @@
};
port@2 {
- reg = <1>;
+ reg = <2>;
funnel_apss_merg_in_tpda_olc: endpoint {
slave-mode;
remote-endpoint =
@@ -2004,7 +2104,7 @@
};
port@3 {
- reg = <3>;
+ reg = <4>;
funnel_apss_merg_in_tpda_apss: endpoint {
slave-mode;
remote-endpoint =
@@ -2182,6 +2282,22 @@
};
};
+ ipcb_tgu: tgu@6b0c000 {
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b999>;
+ reg = <0x6b0c000 0x1000>;
+ reg-names = "tgu-base";
+ tgu-steps = <3>;
+ tgu-conditions = <4>;
+ tgu-regs = <4>;
+ tgu-timer-counters = <8>;
+
+ coresight-name = "coresight-tgu-ipcb";
+
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
+ };
+
funnel_apss: funnel@7800000 {
compatible = "arm,primecell";
arm,primecell-periphid = <0x0003b908>;
diff --git a/arch/arm64/boot/dts/qcom/sdm670-ext-codec-audio-overlay.dtsi b/arch/arm64/boot/dts/qcom/sdm670-ext-codec-audio-overlay.dtsi
index 775cf48..14a3e93 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-ext-codec-audio-overlay.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-ext-codec-audio-overlay.dtsi
@@ -47,16 +47,6 @@
status = "okay";
};
-&soc {
- wcd_buck_vreg_gpio: msm_cdc_pinctrl@94 {
- status = "okay";
- compatible = "qcom,msm-cdc-pinctrl";
- pinctrl-names = "aud_active", "aud_sleep";
- pinctrl-0 = <&wcd_buck_vsel_default>;
- pinctrl-1 = <&wcd_buck_vsel_default>;
- };
-};
-
&wcd9xxx_intc {
status = "okay";
};
@@ -79,8 +69,6 @@
&wcd934x_cdc {
status = "okay";
- qcom,has-buck-vsel-gpio;
- qcom,buck-vsel-gpio-node = <&wcd_buck_vreg_gpio>;
};
&clock_audio_lnbb {
diff --git a/arch/arm64/boot/dts/qcom/sdm670-gpu.dtsi b/arch/arm64/boot/dts/qcom/sdm670-gpu.dtsi
index 13672bc..1a93fc2 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-gpu.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-gpu.dtsi
@@ -46,8 +46,9 @@
label = "kgsl-3d0";
compatible = "qcom,kgsl-3d0", "qcom,kgsl-3d";
status = "ok";
- reg = <0x5000000 0x40000>;
- reg-names = "kgsl_3d0_reg_memory";
+ reg = <0x5000000 0x40000
+ 0x780000 0x6300>;
+ reg-names = "kgsl_3d0_reg_memory", "qfprom_memory";
interrupts = <0 300 0>;
interrupt-names = "kgsl_3d0_irq";
qcom,id = <0>;
@@ -126,6 +127,38 @@
/* Context aware jump target power level */
qcom,ca-target-pwrlevel = <1>;
+ qcom,gpu-speed-bin = <0x41a0 0x1fe00000 21>;
+
+ qcom,gpu-coresights {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "qcom,gpu-coresight";
+
+ qcom,gpu-coresight@0 {
+ reg = <0>;
+ coresight-name = "coresight-gfx";
+ coresight-atid = <50>;
+ port {
+ gfx_out_funnel_in2: endpoint {
+ remote-endpoint =
+ <&funnel_in2_in_gfx>;
+ };
+ };
+ };
+
+ qcom,gpu-coresight@1 {
+ reg = <1>;
+ coresight-name = "coresight-gfx-cx";
+ coresight-atid = <51>;
+ port {
+ gfx_cx_out_funnel_in2: endpoint {
+ remote-endpoint =
+ <&funnel_in2_in_gfx_cx>;
+ };
+ };
+ };
+ };
+
/* GPU Mempools */
qcom,gpu-mempools {
#address-cells = <1>;
@@ -158,57 +191,209 @@
};
};
- /* Power levels */
- qcom,gpu-pwrlevels {
+ /*
+ * Speed-bin zero is default speed bin.
+ * For rest of the speed bins, speed-bin value
+ * is calulated as FMAX/4.8 MHz round up to zero
+ * decimal places.
+ */
+ qcom,gpu-pwrlevel-bins {
#address-cells = <1>;
#size-cells = <0>;
- compatible = "qcom,gpu-pwrlevels";
+ compatible="qcom,gpu-pwrlevel-bins";
- /* SVS_L1 */
- qcom,gpu-pwrlevel@0 {
- reg = <0>;
- qcom,gpu-freq = <430000000>;
- qcom,bus-freq = <11>;
- qcom,bus-min = <8>;
- qcom,bus-max = <11>;
+ qcom,gpu-pwrlevels-0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ qcom,speed-bin = <0>;
+
+ qcom,initial-pwrlevel = <3>;
+
+ /* SVS_L1 */
+ qcom,gpu-pwrlevel@0 {
+ reg = <0>;
+ qcom,gpu-freq = <430000000>;
+ qcom,bus-freq = <11>;
+ qcom,bus-min = <8>;
+ qcom,bus-max = <11>;
+ };
+
+ /* SVS */
+ qcom,gpu-pwrlevel@1 {
+ reg = <1>;
+ qcom,gpu-freq = <355000000>;
+ qcom,bus-freq = <8>;
+ qcom,bus-min = <5>;
+ qcom,bus-max = <9>;
+ };
+
+ /* LOW SVS */
+ qcom,gpu-pwrlevel@2 {
+ reg = <2>;
+ qcom,gpu-freq = <267000000>;
+ qcom,bus-freq = <6>;
+ qcom,bus-min = <4>;
+ qcom,bus-max = <8>;
+ };
+
+ /* MIN SVS */
+ qcom,gpu-pwrlevel@3 {
+ reg = <3>;
+ qcom,gpu-freq = <180000000>;
+ qcom,bus-freq = <4>;
+ qcom,bus-min = <3>;
+ qcom,bus-max = <4>;
+ };
+
+ /* XO */
+ qcom,gpu-pwrlevel@4 {
+ reg = <4>;
+ qcom,gpu-freq = <0>;
+ qcom,bus-freq = <0>;
+ qcom,bus-min = <0>;
+ qcom,bus-max = <0>;
+ };
};
- /* SVS */
- qcom,gpu-pwrlevel@1 {
- reg = <1>;
- qcom,gpu-freq = <355000000>;
- qcom,bus-freq = <8>;
- qcom,bus-min = <5>;
- qcom,bus-max = <9>;
+ qcom,gpu-pwrlevels-1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ qcom,speed-bin = <90>;
+
+ qcom,initial-pwrlevel = <3>;
+
+ /* SVS_L1 */
+ qcom,gpu-pwrlevel@0 {
+ reg = <0>;
+ qcom,gpu-freq = <430000000>;
+ qcom,bus-freq = <11>;
+ qcom,bus-min = <8>;
+ qcom,bus-max = <11>;
+ };
+
+ /* SVS */
+ qcom,gpu-pwrlevel@1 {
+ reg = <1>;
+ qcom,gpu-freq = <355000000>;
+ qcom,bus-freq = <8>;
+ qcom,bus-min = <5>;
+ qcom,bus-max = <9>;
+ };
+
+ /* LOW SVS */
+ qcom,gpu-pwrlevel@2 {
+ reg = <2>;
+ qcom,gpu-freq = <267000000>;
+ qcom,bus-freq = <6>;
+ qcom,bus-min = <4>;
+ qcom,bus-max = <8>;
+ };
+
+ /* MIN SVS */
+ qcom,gpu-pwrlevel@3 {
+ reg = <3>;
+ qcom,gpu-freq = <180000000>;
+ qcom,bus-freq = <4>;
+ qcom,bus-min = <3>;
+ qcom,bus-max = <4>;
+ };
+
+ /* XO */
+ qcom,gpu-pwrlevel@4 {
+ reg = <4>;
+ qcom,gpu-freq = <0>;
+ qcom,bus-freq = <0>;
+ qcom,bus-min = <0>;
+ qcom,bus-max = <0>;
+ };
+
};
- /* LOW SVS */
- qcom,gpu-pwrlevel@2 {
- reg = <2>;
- qcom,gpu-freq = <267000000>;
- qcom,bus-freq = <6>;
- qcom,bus-min = <4>;
- qcom,bus-max = <8>;
+ qcom,gpu-pwrlevels-2 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ qcom,speed-bin = <146>;
+
+ qcom,initial-pwrlevel = <6>;
+
+ /* TURBO */
+ qcom,gpu-pwrlevel@0 {
+ reg = <0>;
+ qcom,gpu-freq = <700000000>;
+ qcom,bus-freq = <11>;
+ qcom,bus-min = <8>;
+ qcom,bus-max = <11>;
+ };
+
+ /* NOM_L1 */
+ qcom,gpu-pwrlevel@1 {
+ reg = <1>;
+ qcom,gpu-freq = <650000000>;
+ qcom,bus-freq = <11>;
+ qcom,bus-min = <8>;
+ qcom,bus-max = <11>;
+ };
+
+ /* NOM */
+ qcom,gpu-pwrlevel@2 {
+ reg = <2>;
+ qcom,gpu-freq = <565000000>;
+ qcom,bus-freq = <11>;
+ qcom,bus-min = <8>;
+ qcom,bus-max = <11>;
+ };
+
+ /* SVS_L1 */
+ qcom,gpu-pwrlevel@3 {
+ reg = <3>;
+ qcom,gpu-freq = <430000000>;
+ qcom,bus-freq = <11>;
+ qcom,bus-min = <8>;
+ qcom,bus-max = <11>;
+ };
+
+ /* SVS */
+ qcom,gpu-pwrlevel@4 {
+ reg = <4>;
+ qcom,gpu-freq = <355000000>;
+ qcom,bus-freq = <8>;
+ qcom,bus-min = <5>;
+ qcom,bus-max = <9>;
+ };
+
+ /* LOW SVS */
+ qcom,gpu-pwrlevel@5 {
+ reg = <5>;
+ qcom,gpu-freq = <267000000>;
+ qcom,bus-freq = <6>;
+ qcom,bus-min = <4>;
+ qcom,bus-max = <8>;
+ };
+
+ /* MIN SVS */
+ qcom,gpu-pwrlevel@6 {
+ reg = <6>;
+ qcom,gpu-freq = <180000000>;
+ qcom,bus-freq = <4>;
+ qcom,bus-min = <3>;
+ qcom,bus-max = <4>;
+ };
+
+ /* XO */
+ qcom,gpu-pwrlevel@7 {
+ reg = <7>;
+ qcom,gpu-freq = <0>;
+ qcom,bus-freq = <0>;
+ qcom,bus-min = <0>;
+ qcom,bus-max = <0>;
+ };
+
};
- /* MIN SVS */
- qcom,gpu-pwrlevel@3 {
- reg = <3>;
- qcom,gpu-freq = <180000000>;
- qcom,bus-freq = <4>;
- qcom,bus-min = <3>;
- qcom,bus-max = <4>;
- };
-
- /* XO */
- qcom,gpu-pwrlevel@4 {
- reg = <4>;
- qcom,gpu-freq = <0>;
- qcom,bus-freq = <0>;
- qcom,bus-min = <0>;
- qcom,bus-max = <0>;
- };
};
};
@@ -249,12 +434,10 @@
reg =
<0x506a000 0x31000>,
- <0xb200000 0x300000>,
- <0xc200000 0x10000>;
+ <0xb200000 0x300000>;
reg-names =
"kgsl_gmu_reg",
- "kgsl_gmu_pdc_reg",
- "kgsl_gmu_cpr_reg";
+ "kgsl_gmu_pdc_reg";
interrupts = <0 304 0>, <0 305 0>;
interrupt-names = "kgsl_hfi_irq", "kgsl_gmu_irq";
diff --git a/arch/arm64/boot/dts/qcom/sdm670-mtp.dtsi b/arch/arm64/boot/dts/qcom/sdm670-mtp.dtsi
index 016917b..ef1fc08 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-mtp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-mtp.dtsi
@@ -13,6 +13,7 @@
#include <dt-bindings/gpio/gpio.h>
#include "sdm670-pmic-overlay.dtsi"
#include "sdm670-sde-display.dtsi"
+#include "sdm670-camera-sensor-mtp.dtsi"
#include "smb1355.dtsi"
&ufsphy_mem {
@@ -41,6 +42,12 @@
status = "ok";
};
+&pm660l_switch1 {
+ pinctrl-names = "led_enable", "led_disable";
+ pinctrl-0 = <&flash_led3_front_en>;
+ pinctrl-1 = <&flash_led3_front_dis>;
+};
+
&qupv3_se9_2uart {
status = "disabled";
};
@@ -54,7 +61,27 @@
};
&qupv3_se3_i2c {
- status = "disabled";
+ status = "ok";
+ nq@28 {
+ compatible = "qcom,nq-nci";
+ reg = <0x28>;
+ qcom,nq-irq = <&tlmm 44 0x00>;
+ qcom,nq-ven = <&tlmm 12 0x00>;
+ qcom,nq-firm = <&tlmm 43 0x00>;
+ qcom,nq-clkreq = <&pm660_gpios 4 0x00>;
+ qcom,nq-esepwr = <&tlmm 116 0x00>;
+ interrupt-parent = <&tlmm>;
+ qcom,clk-src = "BBCLK3";
+ interrupts = <44 0>;
+ interrupt-names = "nfc_irq";
+ pinctrl-names = "nfc_active", "nfc_suspend";
+ pinctrl-0 = <&nfc_int_active
+ &nfc_enable_active
+ &nfc_clk_default>;
+ pinctrl-1 = <&nfc_int_suspend &nfc_enable_suspend>;
+ clocks = <&clock_rpmh RPMH_LN_BB_CLK3>;
+ clock-names = "ref_clk";
+ };
};
&qupv3_se10_i2c {
@@ -301,9 +328,7 @@
};
&dsi_rm67195_amoled_fhd_cmd {
- qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
- qcom,mdss-dsi-bl-min-level = <1>;
- qcom,mdss-dsi-bl-max-level = <4095>;
+ qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_dcs";
qcom,panel-supply-entries = <&dsi_panel_pwr_supply_labibb_amoled>;
qcom,platform-reset-gpio = <&tlmm 75 0>;
qcom,platform-te-gpio = <&tlmm 10 0>;
diff --git a/arch/arm64/boot/dts/qcom/sdm670-pinctrl.dtsi b/arch/arm64/boot/dts/qcom/sdm670-pinctrl.dtsi
index 2bf00fb..d4953c1 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-pinctrl.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-pinctrl.dtsi
@@ -908,7 +908,7 @@
qupv3_se12_2uart_active: qupv3_se12_2uart_active {
mux {
pins = "gpio51", "gpio52";
- function = "qup9";
+ function = "qup12";
};
config {
@@ -1371,6 +1371,70 @@
};
};
+ nfc {
+ nfc_int_active: nfc_int_active {
+ /* active state */
+ mux {
+ /* GPIO 44 NFC Read Interrupt */
+ pins = "gpio44";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio44";
+ drive-strength = <2>; /* 2 MA */
+ bias-pull-up;
+ };
+ };
+
+ nfc_int_suspend: nfc_int_suspend {
+ /* sleep state */
+ mux {
+ /* GPIO 44 NFC Read Interrupt */
+ pins = "gpio44";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio44";
+ drive-strength = <2>; /* 2 MA */
+ bias-pull-up;
+ };
+ };
+
+ nfc_enable_active: nfc_enable_active {
+ /* active state */
+ mux {
+ /* 12: NFC ENABLE 43: FW DNLD */
+ /* 116: ESE Enable */
+ pins = "gpio12", "gpio43", "gpio116";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio12", "gpio43", "gpio116";
+ drive-strength = <2>; /* 2 MA */
+ bias-pull-up;
+ };
+ };
+
+ nfc_enable_suspend: nfc_enable_suspend {
+ /* sleep state */
+ mux {
+ /* 12: NFC ENABLE 43: FW DNLD */
+ /* 116: ESE Enable */
+ pins = "gpio12", "gpio43", "gpio116";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio12", "gpio43", "gpio116";
+ drive-strength = <2>; /* 2 MA */
+ bias-disable;
+ };
+ };
+ };
+
/* WSA speaker reset pins */
spkr_1_sd_n {
spkr_1_sd_n_sleep: spkr_1_sd_n_sleep {
@@ -1494,6 +1558,36 @@
};
};
+ flash_led3_front {
+ flash_led3_front_en: flash_led3_front_en {
+ mux {
+ pins = "gpio21";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio21";
+ drive_strength = <2>;
+ output-high;
+ bias-disable;
+ };
+ };
+
+ flash_led3_front_dis: flash_led3_front_dis {
+ mux {
+ pins = "gpio21";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio21";
+ drive_strength = <2>;
+ output-low;
+ bias-disable;
+ };
+ };
+ };
+
/* Pinctrl setting for CAMERA GPIO key */
key_cam_snapshot {
key_cam_snapshot_default: key_cam_snapshot_default {
@@ -1621,6 +1715,281 @@
drive-strength = <2>;
};
};
+
+ cci0_active: cci0_active {
+ mux {
+ /* CLK, DATA */
+ pins = "gpio17","gpio18";
+ function = "cci_i2c";
+ };
+
+ config {
+ pins = "gpio17","gpio18";
+ bias-pull-up; /* PULL UP*/
+ drive-strength = <2>; /* 2 MA */
+ };
+ };
+
+ cci0_suspend: cci0_suspend {
+ mux {
+ /* CLK, DATA */
+ pins = "gpio17","gpio18";
+ function = "cci_i2c";
+ };
+
+ config {
+ pins = "gpio17","gpio18";
+ bias-pull-down; /* PULL DOWN */
+ drive-strength = <2>; /* 2 MA */
+ };
+ };
+
+ cci1_active: cci1_active {
+ mux {
+ /* CLK, DATA */
+ pins = "gpio19","gpio20";
+ function = "cci_i2c";
+ };
+
+ config {
+ pins = "gpio19","gpio20";
+ bias-pull-up; /* PULL UP*/
+ drive-strength = <2>; /* 2 MA */
+ };
+ };
+
+ cci1_suspend: cci1_suspend {
+ mux {
+ /* CLK, DATA */
+ pins = "gpio19","gpio20";
+ function = "cci_i2c";
+ };
+
+ config {
+ pins = "gpio19","gpio20";
+ bias-pull-down; /* PULL DOWN */
+ drive-strength = <2>; /* 2 MA */
+ };
+ };
+
+ cam_sensor_rear_active: cam_sensor_rear_active {
+ /* RESET */
+ mux {
+ pins = "gpio30";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio30";
+ bias-disable; /* No PULL */
+ drive-strength = <2>; /* 2 MA */
+ };
+ };
+
+ cam_sensor_rear_suspend: cam_sensor_rear_suspend {
+ /* RESET */
+ mux {
+ pins = "gpio30";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio30";
+ bias-pull-down; /* PULL DOWN */
+ drive-strength = <2>; /* 2 MA */
+ output-low;
+ };
+ };
+
+ cam_sensor_rear_vana: cam_sensor_rear_vana {
+ /* AVDD LDO */
+ mux {
+ pins = "gpio8";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio8";
+ bias-disable; /* No PULL */
+ drive-strength = <2>; /* 2 MA */
+ };
+ };
+
+ cam_sensor_rear_vio: cam_sensor_rear_vio {
+ /* DOVDD LDO */
+ mux {
+ pins = "gpio29";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio29";
+ bias-disable; /* No PULL */
+ drive-strength = <2>; /* 2 MA */
+ };
+ };
+
+ cam_sensor_mclk0_active: cam_sensor_mclk0_active {
+ /* MCLK0 */
+ mux {
+ pins = "gpio13";
+ function = "cam_mclk";
+ };
+
+ config {
+ pins = "gpio13";
+ bias-disable; /* No PULL */
+ drive-strength = <2>; /* 2 MA */
+ };
+ };
+
+ cam_sensor_mclk0_suspend: cam_sensor_mclk0_suspend {
+ /* MCLK0 */
+ mux {
+ pins = "gpio13";
+ function = "cam_mclk";
+ };
+
+ config {
+ pins = "gpio13";
+ bias-pull-down; /* PULL DOWN */
+ drive-strength = <2>; /* 2 MA */
+ };
+ };
+
+ cam_sensor_front_active: cam_sensor_front_active {
+ /* RESET */
+ mux {
+ pins = "gpio9";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio9";
+ bias-disable; /* No PULL */
+ drive-strength = <2>; /* 2 MA */
+ };
+ };
+
+ cam_sensor_front_suspend: cam_sensor_front_suspend {
+ /* RESET */
+ mux {
+ pins = "gpio9";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio9";
+ bias-pull-down; /* PULL DOWN */
+ drive-strength = <2>; /* 2 MA */
+ output-low;
+ };
+ };
+
+ cam_sensor_rear2_active: cam_sensor_rear2_active {
+ /* RESET */
+ mux {
+ pins = "gpio28";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio28";
+ bias-disable; /* No PULL */
+ drive-strength = <2>; /* 2 MA */
+ };
+ };
+
+ cam_sensor_rear2_suspend: cam_sensor_rear2_suspend {
+ /* RESET */
+ mux {
+ pins = "gpio28";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio28";
+ bias-pull-down; /* PULL DOWN */
+ drive-strength = <2>; /* 2 MA */
+ output-low;
+ };
+ };
+
+ cam_sensor_mclk1_active: cam_sensor_mclk1_active {
+ /* MCLK1 */
+ mux {
+ pins = "gpio14";
+ function = "cam_mclk";
+ };
+
+ config {
+ pins = "gpio14";
+ bias-disable; /* No PULL */
+ drive-strength = <2>; /* 2 MA */
+ };
+ };
+
+ cam_sensor_mclk1_suspend: cam_sensor_mclk1_suspend {
+ /* MCLK1 */
+ mux {
+ pins = "gpio14";
+ function = "cam_mclk";
+ };
+
+ config {
+ pins = "gpio14";
+ bias-pull-down; /* PULL DOWN */
+ drive-strength = <2>; /* 2 MA */
+ };
+ };
+
+ cam_sensor_mclk2_active: cam_sensor_mclk2_active {
+ /* MCLK2 */
+ mux {
+ pins = "gpio15";
+ function = "cam_mclk";
+ };
+
+ config {
+ pins = "gpio15";
+ bias-disable; /* No PULL */
+ drive-strength = <2>; /* 2 MA */
+ };
+ };
+
+ cam_sensor_mclk2_suspend: cam_sensor_mclk2_suspend {
+ /* MCLK2 */
+ mux {
+ pins = "gpio15";
+ function = "cam_mclk";
+ };
+
+ config {
+ pins = "gpio15";
+ bias-pull-down; /* PULL DOWN */
+ drive-strength = <2>; /* 2 MA */
+ };
+ };
+ };
+};
+
+&pm660l_gpios {
+ camera_rear_dvdd_en {
+ camera_rear_dvdd_en_default: camera_rear_dvdd_en_default {
+ pins = "gpio4";
+ function = "normal";
+ power-source = <0>;
+ output-low;
+ };
+ };
+
+ camera_dvdd_en {
+ camera_dvdd_en_default: camera_dvdd_en_default {
+ pins = "gpio3";
+ function = "normal";
+ power-source = <0>;
+ output-low;
+ };
};
};
@@ -1651,6 +2020,14 @@
};
&pm660_gpios {
+ nfc_clk {
+ nfc_clk_default: nfc_clk_default {
+ pins = "gpio4";
+ function = "normal";
+ input-enable;
+ power-source = <1>;
+ };
+ };
smb_shutdown_default: smb_shutdown_default {
pins = "gpio11";
function = "normal";
diff --git a/arch/arm64/boot/dts/qcom/sdm670-pm660a-cdp-overlay.dts b/arch/arm64/boot/dts/qcom/sdm670-pm660a-cdp-overlay.dts
index 5b67765..0ea4b1f 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-pm660a-cdp-overlay.dts
+++ b/arch/arm64/boot/dts/qcom/sdm670-pm660a-cdp-overlay.dts
@@ -33,3 +33,10 @@
<0x0001001b 0x0202001a 0x0 0x0>;
};
+&dsi_dual_nt35597_truly_video_display {
+ /delete-property/ qcom,dsi-display-active;
+};
+
+&dsi_rm67195_amoled_fhd_cmd_display {
+ qcom,dsi-display-active;
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm670-pm660a-cdp.dts b/arch/arm64/boot/dts/qcom/sdm670-pm660a-cdp.dts
index 26f5e78..1cf52f5 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-pm660a-cdp.dts
+++ b/arch/arm64/boot/dts/qcom/sdm670-pm660a-cdp.dts
@@ -26,3 +26,11 @@
<0x0001001b 0x0002001a 0x0 0x0>,
<0x0001001b 0x0202001a 0x0 0x0>;
};
+
+&dsi_dual_nt35597_truly_video_display {
+ /delete-property/ qcom,dsi-display-active;
+};
+
+&dsi_rm67195_amoled_fhd_cmd_display {
+ qcom,dsi-display-active;
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm670-pmic-overlay.dtsi b/arch/arm64/boot/dts/qcom/sdm670-pmic-overlay.dtsi
index c39978e..220487a 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-pmic-overlay.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-pmic-overlay.dtsi
@@ -35,6 +35,7 @@
qcom,thermal-mitigation
= <3000000 2500000 2000000 1500000
1000000 500000>;
+ qcom,auto-recharge-soc;
qcom,chgr@1000 {
reg = <0x1000 0x100>;
@@ -178,6 +179,9 @@
qcom,fg-esr-timer-asleep = <256 256>;
qcom,fg-esr-timer-charging = <0 96>;
qcom,cycle-counter-en;
+ qcom,hold-soc-while-full;
+ qcom,fg-auto-recharge-soc;
+ qcom,fg-recharge-soc-thr = <98>;
status = "okay";
qcom,fg-batt-soc@4000 {
@@ -376,5 +380,5 @@
};
&usb0 {
- extcon = <&pm660_pdphy>, <&pm660_pdphy>, <0> /* <&eud> */;
+ extcon = <&pm660_pdphy>, <&pm660_pdphy>, <&eud>;
};
diff --git a/arch/arm64/boot/dts/qcom/sdm670-qrd.dtsi b/arch/arm64/boot/dts/qcom/sdm670-qrd.dtsi
index 1925989..93e4c51 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-qrd.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-qrd.dtsi
@@ -11,9 +11,11 @@
*/
#include <dt-bindings/gpio/gpio.h>
+#include "sdm670-camera-sensor-qrd.dtsi"
#include "sdm670-pmic-overlay.dtsi"
#include "sdm670-audio-overlay.dtsi"
#include "smb1355.dtsi"
+#include "sdm670-sde-display.dtsi"
&qupv3_se9_2uart {
status = "disabled";
@@ -28,7 +30,27 @@
};
&qupv3_se3_i2c {
- status = "disabled";
+ status = "ok";
+ nq@28 {
+ compatible = "qcom,nq-nci";
+ reg = <0x28>;
+ qcom,nq-irq = <&tlmm 44 0x00>;
+ qcom,nq-ven = <&tlmm 12 0x00>;
+ qcom,nq-firm = <&tlmm 43 0x00>;
+ qcom,nq-clkreq = <&pm660_gpios 4 0x00>;
+ qcom,nq-esepwr = <&tlmm 116 0x00>;
+ interrupt-parent = <&tlmm>;
+ qcom,clk-src = "BBCLK3";
+ interrupts = <44 0>;
+ interrupt-names = "nfc_irq";
+ pinctrl-names = "nfc_active", "nfc_suspend";
+ pinctrl-0 = <&nfc_int_active
+ &nfc_enable_active
+ &nfc_clk_default>;
+ pinctrl-1 = <&nfc_int_suspend &nfc_enable_suspend>;
+ clocks = <&clock_rpmh RPMH_LN_BB_CLK3>;
+ clock-names = "ref_clk";
+ };
};
&qupv3_se10_i2c {
@@ -51,6 +73,11 @@
qcom,fg-bmd-en-delay-ms = <300>;
};
+&pm660_charger {
+ qcom,battery-data = <&qrd_batterydata>;
+ qcom,sw-jeita-enable;
+};
+
&tlmm {
smb_int_default: smb_int_default {
mux {
@@ -123,7 +150,7 @@
};
&int_codec {
- qcom,model = "sdm660-skuw-snd-card";
+ qcom,model = "sdm670-skuw-snd-card";
qcom,audio-routing =
"RX_BIAS", "INT_MCLK0",
"SPK_RX_BIAS", "INT_MCLK0",
@@ -145,3 +172,116 @@
qcom,wsa-devs = <&wsa881x_211_en>, <&wsa881x_213_en>;
qcom,wsa-aux-dev-prefix = "SpkrLeft", "SpkrLeft";
};
+
+&sdhc_1 {
+ vdd-supply = <&pm660l_l4>;
+ qcom,vdd-voltage-level = <2960000 2960000>;
+ qcom,vdd-current-level = <200 570000>;
+
+ vdd-io-supply = <&pm660_l8>;
+ qcom,vdd-io-always-on;
+ qcom,vdd-io-lpm-sup;
+ qcom,vdd-io-voltage-level = <1800000 1800000>;
+ qcom,vdd-io-current-level = <200 325000>;
+
+ pinctrl-names = "active", "sleep";
+ pinctrl-0 = <&sdc1_clk_on &sdc1_cmd_on &sdc1_data_on &sdc1_rclk_on>;
+ pinctrl-1 = <&sdc1_clk_off &sdc1_cmd_off &sdc1_data_off &sdc1_rclk_off>;
+
+ status = "ok";
+};
+
+&sdhc_2 {
+ vdd-supply = <&pm660l_l5>;
+ qcom,vdd-voltage-level = <2960000 2960000>;
+ qcom,vdd-current-level = <200 800000>;
+
+ vdd-io-supply = <&pm660l_l2>;
+ qcom,vdd-io-voltage-level = <1800000 2960000>;
+ qcom,vdd-io-current-level = <200 22000>;
+
+ pinctrl-names = "active", "sleep";
+ pinctrl-0 = <&sdc2_clk_on &sdc2_cmd_on &sdc2_data_on &sdc2_cd_on>;
+ pinctrl-1 = <&sdc2_clk_off &sdc2_cmd_off &sdc2_data_off &sdc2_cd_off>;
+
+ cd-gpios = <&tlmm 96 0>;
+
+ status = "ok";
+};
+
+&tlmm {
+ pmx_ts_rst_active {
+ ts_rst_active: ts_rst_active {
+ mux {
+ pins = "gpio99";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio99";
+ drive-strength = <16>;
+ bias-pull-up;
+ };
+ };
+ };
+
+ pmx_ts_rst_suspend {
+ ts_rst_suspend: ts_rst_suspend {
+ mux {
+ pins = "gpio99";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio99";
+ drive-strength = <2>;
+ bias-pull-down;
+ };
+ };
+ };
+};
+
+&soc {
+ hbtp {
+ compatible = "qcom,hbtp-input";
+ pinctrl-names = "pmx_ts_active", "pmx_ts_suspend";
+ pinctrl-0 = <&ts_rst_active>;
+ pinctrl-1 = <&ts_rst_suspend>;
+ vcc_ana-supply = <&pm660l_l3>;
+ vcc_dig-supply = <&pm660_l13>;
+ qcom,afe-load = <20000>;
+ qcom,afe-vtg-min = <3000000>;
+ qcom,afe-vtg-max = <3000000>;
+ qcom,dig-load = <40000>;
+ qcom,dig-vtg-min = <1800000>;
+ qcom,dig-vtg-max = <1800000>;
+ qcom,fb-resume-delay-us = <1000>;
+ qcom,afe-force-power-on;
+ qcom,afe-power-on-delay-us = <6>;
+ qcom,afe-power-off-delay-us = <6>;
+ };
+};
+
+&dsi_dual_nt36850_truly_cmd {
+ qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+ qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
+ qcom,mdss-dsi-bl-min-level = <1>;
+ qcom,mdss-dsi-bl-max-level = <4095>;
+ qcom,panel-mode-gpio = <&tlmm 76 0>;
+ qcom,mdss-dsi-mode-sel-gpio-state = "dual_port";
+ qcom,platform-reset-gpio = <&tlmm 75 0>;
+ qcom,platform-te-gpio = <&tlmm 10 0>;
+};
+
+&dsi_dual_nt36850_truly_cmd_display {
+ qcom,dsi-display-active;
+};
+
+&pm660l_wled {
+ status = "okay";
+ qcom,led-strings-list = [01 02];
+};
+
+&mdss_mdp {
+ #cooling-cells = <2>;
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm670-qupv3.dtsi b/arch/arm64/boot/dts/qcom/sdm670-qupv3.dtsi
index 657363f..c388f4a 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-qupv3.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-qupv3.dtsi
@@ -78,6 +78,9 @@
clocks = <&clock_gcc GCC_QUPV3_WRAP0_S0_CLK>,
<&clock_gcc GCC_QUPV3_WRAP_0_M_AHB_CLK>,
<&clock_gcc GCC_QUPV3_WRAP_0_S_AHB_CLK>;
+ dmas = <&gpi_dma0 0 0 3 64 0>,
+ <&gpi_dma0 1 0 3 64 0>;
+ dma-names = "tx", "rx";
pinctrl-names = "default", "sleep";
pinctrl-0 = <&qupv3_se0_i2c_active>;
pinctrl-1 = <&qupv3_se0_i2c_sleep>;
@@ -95,6 +98,9 @@
clocks = <&clock_gcc GCC_QUPV3_WRAP0_S1_CLK>,
<&clock_gcc GCC_QUPV3_WRAP_0_M_AHB_CLK>,
<&clock_gcc GCC_QUPV3_WRAP_0_S_AHB_CLK>;
+ dmas = <&gpi_dma0 0 1 3 64 0>,
+ <&gpi_dma0 1 1 3 64 0>;
+ dma-names = "tx", "rx";
pinctrl-names = "default", "sleep";
pinctrl-0 = <&qupv3_se1_i2c_active>;
pinctrl-1 = <&qupv3_se1_i2c_sleep>;
@@ -112,6 +118,9 @@
clocks = <&clock_gcc GCC_QUPV3_WRAP0_S2_CLK>,
<&clock_gcc GCC_QUPV3_WRAP_0_M_AHB_CLK>,
<&clock_gcc GCC_QUPV3_WRAP_0_S_AHB_CLK>;
+ dmas = <&gpi_dma0 0 2 3 64 0>,
+ <&gpi_dma0 1 2 3 64 0>;
+ dma-names = "tx", "rx";
pinctrl-names = "default", "sleep";
pinctrl-0 = <&qupv3_se2_i2c_active>;
pinctrl-1 = <&qupv3_se2_i2c_sleep>;
@@ -129,6 +138,9 @@
clocks = <&clock_gcc GCC_QUPV3_WRAP0_S3_CLK>,
<&clock_gcc GCC_QUPV3_WRAP_0_M_AHB_CLK>,
<&clock_gcc GCC_QUPV3_WRAP_0_S_AHB_CLK>;
+ dmas = <&gpi_dma0 0 3 3 64 0>,
+ <&gpi_dma0 1 3 3 64 0>;
+ dma-names = "tx", "rx";
pinctrl-names = "default", "sleep";
pinctrl-0 = <&qupv3_se3_i2c_active>;
pinctrl-1 = <&qupv3_se3_i2c_sleep>;
@@ -146,6 +158,9 @@
clocks = <&clock_gcc GCC_QUPV3_WRAP0_S4_CLK>,
<&clock_gcc GCC_QUPV3_WRAP_0_M_AHB_CLK>,
<&clock_gcc GCC_QUPV3_WRAP_0_S_AHB_CLK>;
+ dmas = <&gpi_dma0 0 4 3 64 0>,
+ <&gpi_dma0 1 4 3 64 0>;
+ dma-names = "tx", "rx";
pinctrl-names = "default", "sleep";
pinctrl-0 = <&qupv3_se4_i2c_active>;
pinctrl-1 = <&qupv3_se4_i2c_sleep>;
@@ -163,6 +178,9 @@
clocks = <&clock_gcc GCC_QUPV3_WRAP0_S5_CLK>,
<&clock_gcc GCC_QUPV3_WRAP_0_M_AHB_CLK>,
<&clock_gcc GCC_QUPV3_WRAP_0_S_AHB_CLK>;
+ dmas = <&gpi_dma0 0 5 3 64 0>,
+ <&gpi_dma0 1 5 3 64 0>;
+ dma-names = "tx", "rx";
pinctrl-names = "default", "sleep";
pinctrl-0 = <&qupv3_se5_i2c_active>;
pinctrl-1 = <&qupv3_se5_i2c_sleep>;
@@ -180,6 +198,9 @@
clocks = <&clock_gcc GCC_QUPV3_WRAP0_S6_CLK>,
<&clock_gcc GCC_QUPV3_WRAP_0_M_AHB_CLK>,
<&clock_gcc GCC_QUPV3_WRAP_0_S_AHB_CLK>;
+ dmas = <&gpi_dma0 0 6 3 64 0>,
+ <&gpi_dma0 1 6 3 64 0>;
+ dma-names = "tx", "rx";
pinctrl-names = "default", "sleep";
pinctrl-0 = <&qupv3_se6_i2c_active>;
pinctrl-1 = <&qupv3_se6_i2c_sleep>;
@@ -197,6 +218,9 @@
clocks = <&clock_gcc GCC_QUPV3_WRAP0_S7_CLK>,
<&clock_gcc GCC_QUPV3_WRAP_0_M_AHB_CLK>,
<&clock_gcc GCC_QUPV3_WRAP_0_S_AHB_CLK>;
+ dmas = <&gpi_dma0 0 7 3 64 0>,
+ <&gpi_dma0 1 7 3 64 0>;
+ dma-names = "tx", "rx";
pinctrl-names = "default", "sleep";
pinctrl-0 = <&qupv3_se7_i2c_active>;
pinctrl-1 = <&qupv3_se7_i2c_sleep>;
@@ -435,6 +459,9 @@
clocks = <&clock_gcc GCC_QUPV3_WRAP1_S0_CLK>,
<&clock_gcc GCC_QUPV3_WRAP_1_M_AHB_CLK>,
<&clock_gcc GCC_QUPV3_WRAP_1_S_AHB_CLK>;
+ dmas = <&gpi_dma1 0 0 3 64 0>,
+ <&gpi_dma1 1 0 3 64 0>;
+ dma-names = "tx", "rx";
pinctrl-names = "default", "sleep";
pinctrl-0 = <&qupv3_se8_i2c_active>;
pinctrl-1 = <&qupv3_se8_i2c_sleep>;
@@ -452,6 +479,9 @@
clocks = <&clock_gcc GCC_QUPV3_WRAP1_S1_CLK>,
<&clock_gcc GCC_QUPV3_WRAP_1_M_AHB_CLK>,
<&clock_gcc GCC_QUPV3_WRAP_1_S_AHB_CLK>;
+ dmas = <&gpi_dma1 0 1 3 64 0>,
+ <&gpi_dma1 1 1 3 64 0>;
+ dma-names = "tx", "rx";
pinctrl-names = "default", "sleep";
pinctrl-0 = <&qupv3_se9_i2c_active>;
pinctrl-1 = <&qupv3_se9_i2c_sleep>;
@@ -469,6 +499,9 @@
clocks = <&clock_gcc GCC_QUPV3_WRAP1_S2_CLK>,
<&clock_gcc GCC_QUPV3_WRAP_1_M_AHB_CLK>,
<&clock_gcc GCC_QUPV3_WRAP_1_S_AHB_CLK>;
+ dmas = <&gpi_dma1 0 2 3 64 0>,
+ <&gpi_dma1 1 2 3 64 0>;
+ dma-names = "tx", "rx";
pinctrl-names = "default", "sleep";
pinctrl-0 = <&qupv3_se10_i2c_active>;
pinctrl-1 = <&qupv3_se10_i2c_sleep>;
@@ -486,6 +519,9 @@
clocks = <&clock_gcc GCC_QUPV3_WRAP1_S3_CLK>,
<&clock_gcc GCC_QUPV3_WRAP_1_M_AHB_CLK>,
<&clock_gcc GCC_QUPV3_WRAP_1_S_AHB_CLK>;
+ dmas = <&gpi_dma1 0 3 3 64 0>,
+ <&gpi_dma1 1 3 3 64 0>;
+ dma-names = "tx", "rx";
pinctrl-names = "default", "sleep";
pinctrl-0 = <&qupv3_se11_i2c_active>;
pinctrl-1 = <&qupv3_se11_i2c_sleep>;
@@ -503,6 +539,9 @@
clocks = <&clock_gcc GCC_QUPV3_WRAP1_S4_CLK>,
<&clock_gcc GCC_QUPV3_WRAP_1_M_AHB_CLK>,
<&clock_gcc GCC_QUPV3_WRAP_1_S_AHB_CLK>;
+ dmas = <&gpi_dma1 0 4 3 64 0>,
+ <&gpi_dma1 1 4 3 64 0>;
+ dma-names = "tx", "rx";
pinctrl-names = "default", "sleep";
pinctrl-0 = <&qupv3_se12_i2c_active>;
pinctrl-1 = <&qupv3_se12_i2c_sleep>;
@@ -520,6 +559,9 @@
clocks = <&clock_gcc GCC_QUPV3_WRAP1_S5_CLK>,
<&clock_gcc GCC_QUPV3_WRAP_1_M_AHB_CLK>,
<&clock_gcc GCC_QUPV3_WRAP_1_S_AHB_CLK>;
+ dmas = <&gpi_dma1 0 5 3 64 0>,
+ <&gpi_dma1 1 5 3 64 0>;
+ dma-names = "tx", "rx";
pinctrl-names = "default", "sleep";
pinctrl-0 = <&qupv3_se13_i2c_active>;
pinctrl-1 = <&qupv3_se13_i2c_sleep>;
@@ -537,6 +579,9 @@
clocks = <&clock_gcc GCC_QUPV3_WRAP1_S6_CLK>,
<&clock_gcc GCC_QUPV3_WRAP_1_M_AHB_CLK>,
<&clock_gcc GCC_QUPV3_WRAP_1_S_AHB_CLK>;
+ dmas = <&gpi_dma1 0 6 3 64 0>,
+ <&gpi_dma1 1 6 3 64 0>;
+ dma-names = "tx", "rx";
pinctrl-names = "default", "sleep";
pinctrl-0 = <&qupv3_se14_i2c_active>;
pinctrl-1 = <&qupv3_se14_i2c_sleep>;
@@ -554,6 +599,9 @@
clocks = <&clock_gcc GCC_QUPV3_WRAP1_S7_CLK>,
<&clock_gcc GCC_QUPV3_WRAP_1_M_AHB_CLK>,
<&clock_gcc GCC_QUPV3_WRAP_1_S_AHB_CLK>;
+ dmas = <&gpi_dma1 0 7 3 64 0>,
+ <&gpi_dma1 1 7 3 64 0>;
+ dma-names = "tx", "rx";
pinctrl-names = "default", "sleep";
pinctrl-0 = <&qupv3_se15_i2c_active>;
pinctrl-1 = <&qupv3_se15_i2c_sleep>;
diff --git a/arch/arm64/boot/dts/qcom/sdm670-regulator.dtsi b/arch/arm64/boot/dts/qcom/sdm670-regulator.dtsi
index 24b8dd6..3c84314 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-regulator.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-regulator.dtsi
@@ -46,9 +46,9 @@
pm660_s4: regulator-pm660-s4 {
regulator-name = "pm660_s4";
qcom,set = <RPMH_REGULATOR_SET_ALL>;
- regulator-min-microvolt = <2040000>;
+ regulator-min-microvolt = <1808000>;
regulator-max-microvolt = <2040000>;
- qcom,init-voltage = <2040000>;
+ qcom,init-voltage = <1808000>;
};
};
@@ -72,9 +72,9 @@
pm660_s6: regulator-pm660-s6 {
regulator-name = "pm660_s6";
qcom,set = <RPMH_REGULATOR_SET_ALL>;
- regulator-min-microvolt = <1352000>;
+ regulator-min-microvolt = <1224000>;
regulator-max-microvolt = <1352000>;
- qcom,init-voltage = <1352000>;
+ qcom,init-voltage = <1224000>;
};
};
@@ -162,11 +162,14 @@
<RPMH_REGULATOR_MODE_LDO_LPM
RPMH_REGULATOR_MODE_LDO_HPM>;
qcom,mode-threshold-currents = <0 1>;
+ proxy-supply = <&pm660_l1>;
pm660_l1: regulator-pm660-l1 {
regulator-name = "pm660_l1";
qcom,set = <RPMH_REGULATOR_SET_ALL>;
regulator-min-microvolt = <1200000>;
regulator-max-microvolt = <1250000>;
+ qcom,proxy-consumer-enable;
+ qcom,proxy-consumer-current = <43600>;
qcom,init-voltage = <1200000>;
qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
};
@@ -237,9 +240,9 @@
pm660_l6: regulator-pm660-l6 {
regulator-name = "pm660_l6";
qcom,set = <RPMH_REGULATOR_SET_ALL>;
- regulator-min-microvolt = <1304000>;
+ regulator-min-microvolt = <1248000>;
regulator-max-microvolt = <1304000>;
- qcom,init-voltage = <1304000>;
+ qcom,init-voltage = <1248000>;
qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
};
};
@@ -324,11 +327,14 @@
<RPMH_REGULATOR_MODE_LDO_LPM
RPMH_REGULATOR_MODE_LDO_HPM>;
qcom,mode-threshold-currents = <0 1>;
+ proxy-supply = <&pm660_l11>;
pm660_l11: regulator-pm660-l11 {
regulator-name = "pm660_l11";
qcom,set = <RPMH_REGULATOR_SET_ALL>;
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
+ qcom,proxy-consumer-enable;
+ qcom,proxy-consumer-current = <115000>;
qcom,init-voltage = <1800000>;
qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
};
@@ -468,11 +474,14 @@
<RPMH_REGULATOR_MODE_LDO_LPM
RPMH_REGULATOR_MODE_LDO_HPM>;
qcom,mode-threshold-currents = <0 1>;
+ proxy-supply = <&pm660l_l1>;
pm660l_l1: regulator-pm660l-l1 {
regulator-name = "pm660l_l1";
qcom,set = <RPMH_REGULATOR_SET_ALL>;
regulator-min-microvolt = <880000>;
regulator-max-microvolt = <900000>;
+ qcom,proxy-consumer-enable;
+ qcom,proxy-consumer-current = <72000>;
qcom,init-voltage = <880000>;
qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
};
diff --git a/arch/arm64/boot/dts/qcom/sdm670-rumi.dts b/arch/arm64/boot/dts/qcom/sdm670-rumi.dts
index e137705..6201488 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-rumi.dts
+++ b/arch/arm64/boot/dts/qcom/sdm670-rumi.dts
@@ -16,7 +16,6 @@
#include "sdm670.dtsi"
#include "sdm670-rumi.dtsi"
-#include "sdm670-audio-overlay.dtsi"
/ {
model = "Qualcomm Technologies, Inc. SDM670 RUMI";
compatible = "qcom,sdm670-rumi", "qcom,sdm670", "qcom,rumi";
diff --git a/arch/arm64/boot/dts/qcom/sdm670-sde-display.dtsi b/arch/arm64/boot/dts/qcom/sdm670-sde-display.dtsi
index 2a61e18..8dbd063 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-sde-display.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-sde-display.dtsi
@@ -25,6 +25,7 @@
#include "dsi-panel-nt35695b-truly-fhd-video.dtsi"
#include "dsi-panel-nt35695b-truly-fhd-cmd.dtsi"
#include "dsi-panel-rm67195-amoled-fhd-cmd.dtsi"
+#include "dsi-panel-nt36850-truly-dualmipi-wqhd-cmd.dtsi"
#include <dt-bindings/clock/mdss-10nm-pll-clk.h>
&soc {
@@ -108,9 +109,9 @@
qcom,panel-supply-entry@0 {
reg = <0>;
- qcom,supply-name = "wqhd-vddio";
+ qcom,supply-name = "vddio";
qcom,supply-min-voltage = <1800000>;
- qcom,supply-max-voltage = <1950000>;
+ qcom,supply-max-voltage = <1800000>;
qcom,supply-enable-load = <32000>;
qcom,supply-disable-load = <80>;
};
@@ -420,8 +421,10 @@
qcom,dsi-panel = <&dsi_rm67195_amoled_fhd_cmd>;
vddio-supply = <&pm660_l11>;
- lab-supply = <&lcdb_ldo_vreg>;
- ibb-supply = <&lcdb_ncp_vreg>;
+ vdda-3p3-supply = <&pm660l_l6>;
+ lab-supply = <&lab_regulator>;
+ ibb-supply = <&ibb_regulator>;
+ oledb-supply = <&pm660a_oledb>;
};
dsi_nt35695b_truly_fhd_video_display: qcom,dsi-display@13 {
@@ -469,6 +472,29 @@
ibb-supply = <&lcdb_ncp_vreg>;
};
+ dsi_dual_nt36850_truly_cmd_display: qcom,dsi-display@15 {
+ compatible = "qcom,dsi-display";
+ label = "dsi_dual_nt36850_truly_cmd_display";
+ qcom,display-type = "primary";
+
+ qcom,dsi-ctrl = <&mdss_dsi0 &mdss_dsi1>;
+ qcom,dsi-phy = <&mdss_dsi_phy0 &mdss_dsi_phy1>;
+ clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>,
+ <&mdss_dsi0_pll PCLK_MUX_0_CLK>;
+ clock-names = "src_byte_clk", "src_pixel_clk";
+
+ pinctrl-names = "panel_active", "panel_suspend";
+ pinctrl-0 = <&sde_dsi_active &sde_te_active>;
+ pinctrl-1 = <&sde_dsi_suspend &sde_te_suspend>;
+ qcom,platform-te-gpio = <&tlmm 10 0>;
+ qcom,platform-reset-gpio = <&tlmm 75 0>;
+
+ qcom,dsi-panel = <&dsi_dual_nt36850_truly_cmd>;
+ vddio-supply = <&pm660_l11>;
+ lab-supply = <&lcdb_ldo_vreg>;
+ ibb-supply = <&lcdb_ncp_vreg>;
+ };
+
sde_wb: qcom,wb-display@0 {
compatible = "qcom,wb-display";
cell-index = <0>;
@@ -482,109 +508,12 @@
compatible = "qcom,msm-ext-disp-audio-codec-rx";
};
};
-
- sde_dp: qcom,dp_display@0{
- cell-index = <0>;
- compatible = "qcom,dp-display";
-
- gdsc-supply = <&mdss_core_gdsc>;
- vdda-1p2-supply = <&pm660_l1>;
- vdda-0p9-supply = <&pm660l_l1>;
-
- reg = <0xae90000 0xa84>,
- <0x88eaa00 0x200>,
- <0x88ea200 0x200>,
- <0x88ea600 0x200>,
- <0xaf02000 0x1a0>,
- <0x780000 0x621c>,
- <0x88ea030 0x10>,
- <0x88e8000 0x20>,
- <0x0aee1000 0x034>;
- reg-names = "dp_ctrl", "dp_phy", "dp_ln_tx0", "dp_ln_tx1",
- "dp_mmss_cc", "qfprom_physical", "dp_pll",
- "usb3_dp_com", "hdcp_physical";
-
- interrupt-parent = <&mdss_mdp>;
- interrupts = <12 0>;
-
- clocks = <&clock_dispcc DISP_CC_MDSS_DP_AUX_CLK>,
- <&clock_rpmh RPMH_CXO_CLK>,
- <&clock_gcc GCC_USB3_PRIM_CLKREF_CLK>,
- <&clock_gcc GCC_USB_PHY_CFG_AHB2PHY_CLK>,
- <&clock_gcc GCC_USB3_PRIM_PHY_PIPE_CLK>,
- <&clock_dispcc DISP_CC_MDSS_DP_LINK_CLK>,
- <&clock_dispcc DISP_CC_MDSS_DP_LINK_INTF_CLK>,
- <&clock_dispcc DISP_CC_MDSS_DP_PIXEL_CLK>,
- <&clock_dispcc DISP_CC_MDSS_DP_CRYPTO_CLK>,
- <&clock_dispcc DISP_CC_MDSS_DP_PIXEL_CLK_SRC>,
- <&mdss_dp_pll DP_VCO_DIVIDED_CLK_SRC_MUX>;
- clock-names = "core_aux_clk", "core_usb_ref_clk_src",
- "core_usb_ref_clk", "core_usb_cfg_ahb_clk",
- "core_usb_pipe_clk", "ctrl_link_clk",
- "ctrl_link_iface_clk", "ctrl_pixel_clk",
- "crypto_clk", "pixel_clk_rcg", "pixel_parent";
-
- qcom,dp-usbpd-detection = <&pm660_pdphy>;
- qcom,ext-disp = <&ext_disp>;
-
- qcom,aux-cfg0-settings = [20 00];
- qcom,aux-cfg1-settings = [24 13 23 1d];
- qcom,aux-cfg2-settings = [28 24];
- qcom,aux-cfg3-settings = [2c 00];
- qcom,aux-cfg4-settings = [30 0a];
- qcom,aux-cfg5-settings = [34 26];
- qcom,aux-cfg6-settings = [38 0a];
- qcom,aux-cfg7-settings = [3c 03];
- qcom,aux-cfg8-settings = [40 bb];
- qcom,aux-cfg9-settings = [44 03];
-
- qcom,max-pclk-frequency-khz = <675000>;
-
- qcom,core-supply-entries {
- #address-cells = <1>;
- #size-cells = <0>;
-
- qcom,core-supply-entry@0 {
- reg = <0>;
- qcom,supply-name = "gdsc";
- qcom,supply-min-voltage = <0>;
- qcom,supply-max-voltage = <0>;
- qcom,supply-enable-load = <0>;
- qcom,supply-disable-load = <0>;
- };
- };
-
- qcom,ctrl-supply-entries {
- #address-cells = <1>;
- #size-cells = <0>;
-
- qcom,ctrl-supply-entry@0 {
- reg = <0>;
- qcom,supply-name = "vdda-1p2";
- qcom,supply-min-voltage = <1200000>;
- qcom,supply-max-voltage = <1200000>;
- qcom,supply-enable-load = <21800>;
- qcom,supply-disable-load = <4>;
- };
- };
-
- qcom,phy-supply-entries {
- #address-cells = <1>;
- #size-cells = <0>;
-
- qcom,phy-supply-entry@0 {
- reg = <0>;
- qcom,supply-name = "vdda-0p9";
- qcom,supply-min-voltage = <880000>;
- qcom,supply-max-voltage = <880000>;
- qcom,supply-enable-load = <36000>;
- qcom,supply-disable-load = <32>;
- };
- };
- };
};
&sde_dp {
+ qcom,dp-usbpd-detection = <&pm660_pdphy>;
+ qcom,ext-disp = <&ext_disp>;
+
pinctrl-names = "mdss_dp_active", "mdss_dp_sleep";
pinctrl-0 = <&sde_dp_aux_active &sde_dp_usbplug_cc_active>;
pinctrl-1 = <&sde_dp_aux_suspend &sde_dp_usbplug_cc_suspend>;
@@ -600,6 +529,18 @@
&dsi_dual_nt35597_truly_video {
qcom,mdss-dsi-t-clk-post = <0x0D>;
qcom,mdss-dsi-t-clk-pre = <0x2D>;
+ qcom,mdss-dsi-min-refresh-rate = <53>;
+ qcom,mdss-dsi-max-refresh-rate = <60>;
+ qcom,mdss-dsi-pan-enable-dynamic-fps;
+ qcom,mdss-dsi-pan-fps-update =
+ "dfps_immediate_porch_mode_vfp";
+ qcom,esd-check-enabled;
+ qcom,mdss-dsi-panel-status-check-mode = "reg_read";
+ qcom,mdss-dsi-panel-status-command = [06 01 00 01 00 00 01 0a];
+ qcom,mdss-dsi-panel-status-command-state = "dsi_hs_mode";
+ qcom,mdss-dsi-panel-status-value = <0x9c>;
+ qcom,mdss-dsi-panel-on-check-value = <0x9c>;
+ qcom,mdss-dsi-panel-status-read-length = <1>;
qcom,mdss-dsi-display-timings {
timing@0{
qcom,mdss-dsi-panel-phy-timings = [00 1c 07 07 23 21 07
@@ -614,6 +555,16 @@
&dsi_dual_nt35597_truly_cmd {
qcom,mdss-dsi-t-clk-post = <0x0D>;
qcom,mdss-dsi-t-clk-pre = <0x2D>;
+ qcom,ulps-enabled;
+ qcom,partial-update-enabled = "single_roi";
+ qcom,panel-roi-alignment = <720 128 720 128 1440 128>;
+ qcom,esd-check-enabled;
+ qcom,mdss-dsi-panel-status-check-mode = "reg_read";
+ qcom,mdss-dsi-panel-status-command = [06 01 00 01 00 00 01 0a];
+ qcom,mdss-dsi-panel-status-command-state = "dsi_hs_mode";
+ qcom,mdss-dsi-panel-status-value = <0x9c>;
+ qcom,mdss-dsi-panel-on-check-value = <0x9c>;
+ qcom,mdss-dsi-panel-status-read-length = <1>;
qcom,mdss-dsi-display-timings {
timing@0{
qcom,mdss-dsi-panel-phy-timings = [00 1c 07 07 23 21 07
@@ -628,6 +579,14 @@
&dsi_nt35597_truly_dsc_cmd {
qcom,mdss-dsi-t-clk-post = <0x0b>;
qcom,mdss-dsi-t-clk-pre = <0x23>;
+ qcom,ulps-enabled;
+ qcom,esd-check-enabled;
+ qcom,mdss-dsi-panel-status-check-mode = "reg_read";
+ qcom,mdss-dsi-panel-status-command = [06 01 00 01 00 00 01 0a];
+ qcom,mdss-dsi-panel-status-command-state = "dsi_hs_mode";
+ qcom,mdss-dsi-panel-status-value = <0x9c>;
+ qcom,mdss-dsi-panel-on-check-value = <0x9c>;
+ qcom,mdss-dsi-panel-status-read-length = <1>;
qcom,mdss-dsi-display-timings {
timing@0{
qcom,mdss-dsi-panel-phy-timings = [00 15 05 05 20 1f 05
@@ -643,6 +602,18 @@
&dsi_nt35597_truly_dsc_video {
qcom,mdss-dsi-t-clk-post = <0x0b>;
qcom,mdss-dsi-t-clk-pre = <0x23>;
+ qcom,mdss-dsi-min-refresh-rate = <53>;
+ qcom,mdss-dsi-max-refresh-rate = <60>;
+ qcom,mdss-dsi-pan-enable-dynamic-fps;
+ qcom,mdss-dsi-pan-fps-update =
+ "dfps_immediate_porch_mode_vfp";
+ qcom,esd-check-enabled;
+ qcom,mdss-dsi-panel-status-check-mode = "reg_read";
+ qcom,mdss-dsi-panel-status-command = [06 01 00 01 00 00 01 0a];
+ qcom,mdss-dsi-panel-status-command-state = "dsi_hs_mode";
+ qcom,mdss-dsi-panel-status-value = <0x9c>;
+ qcom,mdss-dsi-panel-on-check-value = <0x9c>;
+ qcom,mdss-dsi-panel-status-read-length = <1>;
qcom,mdss-dsi-display-timings {
timing@0{
qcom,mdss-dsi-panel-phy-timings = [00 15 05 05 20 1f 05
@@ -780,6 +751,9 @@
&dsi_dual_nt35597_cmd {
qcom,mdss-dsi-t-clk-post = <0x0d>;
qcom,mdss-dsi-t-clk-pre = <0x2d>;
+ qcom,ulps-enabled;
+ qcom,partial-update-enabled = "single_roi";
+ qcom,panel-roi-alignment = <720 128 720 128 1440 128>;
qcom,mdss-dsi-display-timings {
timing@0 {
qcom,mdss-dsi-panel-timings = [00 1c 08 07 23 22 07 07
@@ -820,6 +794,7 @@
&dsi_nt35695b_truly_fhd_cmd {
qcom,mdss-dsi-t-clk-post = <0x07>;
qcom,mdss-dsi-t-clk-pre = <0x1c>;
+ qcom,ulps-enabled;
qcom,mdss-dsi-display-timings {
timing@0 {
qcom,mdss-dsi-panel-phy-timings = [00 1c 05 06 0b 0c
@@ -829,3 +804,17 @@
};
};
};
+
+&dsi_dual_nt36850_truly_cmd {
+ qcom,mdss-dsi-t-clk-post = <0x0E>;
+ qcom,mdss-dsi-t-clk-pre = <0x30>;
+ qcom,mdss-dsi-display-timings {
+ timing@0{
+ qcom,mdss-dsi-panel-phy-timings = [00 1f 08 08 24 23 08
+ 08 05 03 04 00];
+ qcom,display-topology = <2 0 2>,
+ <1 0 2>;
+ qcom,default-topology-index = <0>;
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm670-sde.dtsi b/arch/arm64/boot/dts/qcom/sdm670-sde.dtsi
index 2878184e..2b80c22 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-sde.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-sde.dtsi
@@ -9,6 +9,7 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
+#include <dt-bindings/clock/mdss-10nm-pll-clk.h>
&soc {
mdss_mdp: qcom,mdss_mdp@ae00000 {
@@ -45,6 +46,8 @@
#address-cells = <1>;
#size-cells = <0>;
+ #power-domain-cells = <0>;
+
/* hw blocks */
qcom,sde-off = <0x1000>;
qcom,sde-len = <0x45C>;
@@ -52,10 +55,13 @@
qcom,sde-ctl-off = <0x2000 0x2200 0x2400
0x2600 0x2800>;
qcom,sde-ctl-size = <0xE4>;
+ qcom,sde-ctl-display-pref = "primary", "primary", "none",
+ "none", "none";
- qcom,sde-mixer-off = <0x45000 0x46000 0x47000
- 0x48000 0x49000 0x4a000>;
+ qcom,sde-mixer-off = <0x45000 0x46000 0x47000 0x0 0x0 0x4a000>;
qcom,sde-mixer-size = <0x320>;
+ qcom,sde-mixer-display-pref = "primary", "primary", "none",
+ "none", "none", "none";
qcom,sde-dspp-top-off = <0x1300>;
qcom,sde-dspp-top-size = <0xc>;
@@ -63,6 +69,11 @@
qcom,sde-dspp-off = <0x55000 0x57000>;
qcom,sde-dspp-size = <0x17e0>;
+ qcom,sde-dest-scaler-top-off = <0x00061000>;
+ qcom,sde-dest-scaler-top-size = <0xc>;
+ qcom,sde-dest-scaler-off = <0x800 0x1000>;
+ qcom,sde-dest-scaler-size = <0x800>;
+
qcom,sde-wb-off = <0x66000>;
qcom,sde-wb-size = <0x2c8>;
qcom,sde-wb-xin-id = <6>;
@@ -120,11 +131,15 @@
qcom,sde-mixer-blendstages = <0xb>;
qcom,sde-highest-bank-bit = <0x1>;
qcom,sde-ubwc-version = <0x200>;
+ qcom,sde-smart-panel-align-mode = <0xc>;
qcom,sde-panic-per-pipe;
qcom,sde-has-cdp;
qcom,sde-has-src-split;
qcom,sde-has-dim-layer;
qcom,sde-has-idle-pc;
+ qcom,sde-has-dest-scaler;
+ qcom,sde-max-dest-scaler-input-linewidth = <2048>;
+ qcom,sde-max-dest-scaler-output-linewidth = <2560>;
qcom,sde-max-bw-low-kbps = <9600000>;
qcom,sde-max-bw-high-kbps = <9600000>;
qcom,sde-dram-channels = <2>;
@@ -143,7 +158,18 @@
qcom,sde-danger-lut = <0x0000000f 0x0000ffff 0x00000000
0x00000000>;
- qcom,sde-safe-lut = <0xfffc 0xff00 0xffff 0xffff>;
+ qcom,sde-safe-lut-linear =
+ <4 0xfff8>,
+ <0 0xfff0>;
+ qcom,sde-safe-lut-macrotile =
+ <10 0xfe00>,
+ <11 0xfc00>,
+ <12 0xf800>,
+ <0 0xf000>;
+ qcom,sde-safe-lut-nrt =
+ <0 0xffff>;
+ qcom,sde-safe-lut-cwb =
+ <0 0xffff>;
qcom,sde-qos-lut-linear =
<4 0x00000000 0x00000357>,
<5 0x00000000 0x00003357>,
@@ -354,6 +380,8 @@
interrupt-parent = <&mdss_mdp>;
interrupts = <2 0>;
+ power-domains = <&mdss_mdp>;
+
/* Offline rotator QoS setting */
qcom,mdss-rot-vbif-qos-setting = <3 3 3 3 3 3 3 3>;
qcom,mdss-rot-vbif-memtype = <3 3>;
@@ -419,6 +447,7 @@
"pixel_clk", "pixel_clk_rcg",
"esc_clk";
+ qcom,null-insertion-enabled;
qcom,ctrl-supply-entries {
#address-cells = <1>;
#size-cells = <0>;
@@ -452,6 +481,7 @@
<&clock_dispcc DISP_CC_MDSS_ESC1_CLK>;
clock-names = "byte_clk", "byte_clk_rcg", "byte_intf_clk",
"pixel_clk", "pixel_clk_rcg", "esc_clk";
+ qcom,null-insertion-enabled;
qcom,ctrl-supply-entries {
#address-cells = <1>;
#size-cells = <0>;
@@ -533,4 +563,90 @@
};
};
+ sde_dp: qcom,dp_display@0{
+ cell-index = <0>;
+ compatible = "qcom,dp-display";
+
+ vdda-1p2-supply = <&pm660_l1>;
+ vdda-0p9-supply = <&pm660l_l1>;
+
+ reg = <0xae90000 0x0dc>,
+ <0xae90200 0x0c0>,
+ <0xae90400 0x508>,
+ <0xae90a00 0x094>,
+ <0x88eaa00 0x200>,
+ <0x88ea200 0x200>,
+ <0x88ea600 0x200>,
+ <0xaf02000 0x1a0>,
+ <0x780000 0x621c>,
+ <0x88ea030 0x10>,
+ <0x88e8000 0x20>,
+ <0x0aee1000 0x034>;
+ /* dp_ctrl: dp_ahb, dp_aux, dp_link, dp_p0 */
+ reg-names = "dp_ahb", "dp_aux", "dp_link",
+ "dp_p0", "dp_phy", "dp_ln_tx0", "dp_ln_tx1",
+ "dp_mmss_cc", "qfprom_physical", "dp_pll",
+ "usb3_dp_com", "hdcp_physical";
+
+ interrupt-parent = <&mdss_mdp>;
+ interrupts = <12 0>;
+
+ clocks = <&clock_dispcc DISP_CC_MDSS_DP_AUX_CLK>,
+ <&clock_rpmh RPMH_CXO_CLK>,
+ <&clock_gcc GCC_USB3_PRIM_CLKREF_CLK>,
+ <&clock_gcc GCC_USB_PHY_CFG_AHB2PHY_CLK>,
+ <&clock_gcc GCC_USB3_PRIM_PHY_PIPE_CLK>,
+ <&clock_dispcc DISP_CC_MDSS_DP_LINK_CLK>,
+ <&clock_dispcc DISP_CC_MDSS_DP_LINK_INTF_CLK>,
+ <&clock_dispcc DISP_CC_MDSS_DP_PIXEL_CLK>,
+ <&clock_dispcc DISP_CC_MDSS_DP_CRYPTO_CLK>,
+ <&clock_dispcc DISP_CC_MDSS_DP_PIXEL_CLK_SRC>,
+ <&mdss_dp_pll DP_VCO_DIVIDED_CLK_SRC_MUX>;
+ clock-names = "core_aux_clk", "core_usb_ref_clk_src",
+ "core_usb_ref_clk", "core_usb_cfg_ahb_clk",
+ "core_usb_pipe_clk", "ctrl_link_clk",
+ "ctrl_link_iface_clk", "ctrl_pixel_clk",
+ "crypto_clk", "pixel_clk_rcg", "pixel_parent";
+
+ qcom,aux-cfg0-settings = [20 00];
+ qcom,aux-cfg1-settings = [24 13 23 1d];
+ qcom,aux-cfg2-settings = [28 24];
+ qcom,aux-cfg3-settings = [2c 00];
+ qcom,aux-cfg4-settings = [30 0a];
+ qcom,aux-cfg5-settings = [34 26];
+ qcom,aux-cfg6-settings = [38 0a];
+ qcom,aux-cfg7-settings = [3c 03];
+ qcom,aux-cfg8-settings = [40 bb];
+ qcom,aux-cfg9-settings = [44 03];
+
+ qcom,max-pclk-frequency-khz = <675000>;
+
+ qcom,ctrl-supply-entries {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ qcom,ctrl-supply-entry@0 {
+ reg = <0>;
+ qcom,supply-name = "vdda-1p2";
+ qcom,supply-min-voltage = <1200000>;
+ qcom,supply-max-voltage = <1200000>;
+ qcom,supply-enable-load = <21800>;
+ qcom,supply-disable-load = <4>;
+ };
+ };
+
+ qcom,phy-supply-entries {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ qcom,phy-supply-entry@0 {
+ reg = <0>;
+ qcom,supply-name = "vdda-0p9";
+ qcom,supply-min-voltage = <880000>;
+ qcom,supply-max-voltage = <880000>;
+ qcom,supply-enable-load = <36000>;
+ qcom,supply-disable-load = <32>;
+ };
+ };
+ };
};
diff --git a/arch/arm64/boot/dts/qcom/sdm670-vidc.dtsi b/arch/arm64/boot/dts/qcom/sdm670-vidc.dtsi
index a74f9d8..01d4057 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-vidc.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-vidc.dtsi
@@ -139,7 +139,7 @@
"bus_clk", "core0_clk", "core0_bus_clk";
qcom,clock-configs = <0x1 0x0 0x0 0x1 0x0>;
qcom,allowed-clock-rates = <100000000 200000000 320000000
- 364800000>;
+ 364700000>;
/* Buses */
bus_cnoc {
diff --git a/arch/arm64/boot/dts/qcom/sdm670.dtsi b/arch/arm64/boot/dts/qcom/sdm670.dtsi
index 2be63a1..95f831b 100644
--- a/arch/arm64/boot/dts/qcom/sdm670.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670.dtsi
@@ -347,23 +347,14 @@
CPU_COST_0: core-cost0 {
busy-cost-data = <
300000 14
- 403200 18
- 480000 21
576000 25
- 652800 27
748800 31
- 825600 40
- 902400 43
- 979200 46
- 1056000 50
- 1132800 53
- 1228800 57
+ 998400 46
+ 1209600 57
1324800 84
- 1420800 90
1516800 96
1612800 114
- 1689600 135
- 1766400 141
+ 1708000 139
>;
idle-cost-data = <
12 10 8 6
@@ -372,32 +363,23 @@
CPU_COST_1: core-cost1 {
busy-cost-data = <
300000 256
- 403200 271
- 480000 282
- 576000 296
652800 307
- 748800 321
825600 332
- 902400 369
979200 382
- 1056000 395
1132800 408
- 1209600 421
- 1286400 434
1363200 448
- 1459200 567
1536000 586
- 1612800 604
- 1689600 622
- 1766400 641
+ 1747200 641
1843200 659
- 1920000 678
1996800 696
- 2092800 876
+ 2054400 876
2169600 900
- 2246400 924
- 2323200 948
+ 2208000 924
+ 2361600 948
2400000 1170
+ 2457600 1200
+ 2515200 1300
+ 2611200 1400
>;
idle-cost-data = <
100 80 60 40
@@ -406,23 +388,14 @@
CLUSTER_COST_0: cluster-cost0 {
busy-cost-data = <
300000 5
- 403200 7
- 480000 7
576000 7
- 652800 8
748800 8
- 825600 9
- 902400 9
- 979200 9
- 1056000 10
- 1132800 10
- 1228800 10
+ 998400 9
+ 1209600 10
1324800 13
- 1420800 14
1516800 15
1612800 16
- 1689600 19
- 1766400 19
+ 1708000 19
>;
idle-cost-data = <
4 3 2 1
@@ -431,32 +404,23 @@
CLUSTER_COST_1: cluster-cost1 {
busy-cost-data = <
300000 25
- 403200 27
- 480000 28
- 576000 29
652800 30
- 748800 32
825600 33
- 902400 36
979200 38
- 1056000 39
1132800 40
- 1209600 42
- 1286400 43
1363200 44
- 1459200 56
1536000 58
- 1612800 60
- 1689600 62
- 1766400 64
+ 1747200 64
1843200 65
- 1920000 67
1996800 69
- 2092800 87
+ 2054400 87
2169600 90
- 2246400 92
- 2323200 94
+ 2208000 92
+ 2361600 94
2400000 117
+ 2457600 120
+ 2515200 130
+ 2611200 140
>;
idle-cost-data = <
4 3 2 1
@@ -489,7 +453,7 @@
dev = "/dev/block/platform/soc/1d84000.ufshc/by-name/vendor";
type = "ext4";
mnt_flags = "ro,barrier=1,discard";
- fsmgr_flags = "wait,slotselect";
+ fsmgr_flags = "wait,slotselect,avb";
};
};
};
@@ -553,7 +517,7 @@
qseecom_mem: qseecom_region {
compatible = "shared-dma-pool";
alloc-ranges = <0 0x00000000 0 0xffffffff>;
- reusable;
+ no-map;
alignment = <0 0x400000>;
size = <0 0x1400000>;
};
@@ -574,6 +538,11 @@
size = <0 0x5c00000>;
};
+ cont_splash_memory: cont_splash_region@9d400000 {
+ reg = <0x0 0x9d400000 0x0 0x02400000>;
+ label = "cont_splash_region";
+ };
+
dump_mem: mem_dump_region {
compatible = "shared-dma-pool";
reusable;
@@ -721,6 +690,32 @@
clock-frequency = <19200000>;
};
+ qcom,memshare {
+ compatible = "qcom,memshare";
+
+ qcom,client_1 {
+ compatible = "qcom,memshare-peripheral";
+ qcom,peripheral-size = <0x0>;
+ qcom,client-id = <0>;
+ qcom,allocate-boot-time;
+ label = "modem";
+ };
+
+ qcom,client_2 {
+ compatible = "qcom,memshare-peripheral";
+ qcom,peripheral-size = <0x0>;
+ qcom,client-id = <2>;
+ label = "modem";
+ };
+
+ mem_client_3_size: qcom,client_3 {
+ compatible = "qcom,memshare-peripheral";
+ qcom,peripheral-size = <0x500000>;
+ qcom,client-id = <1>;
+ label = "modem";
+ };
+ };
+
qcom,sps {
compatible = "qcom,msm_sps_4k";
qcom,pipe-attr-ee;
@@ -751,9 +746,9 @@
<&clock_gcc GCC_CE1_AXI_CLK>;
qcom,ce-opp-freq = <171430000>;
qcom,request-bw-before-clk;
- qcom,smmu-s1-bypass;
- iommus = <&apps_smmu 0x706 0x3>,
- <&apps_smmu 0x716 0x3>;
+ qcom,smmu-s1-enable;
+ iommus = <&apps_smmu 0x706 0x1>,
+ <&apps_smmu 0x716 0x1>;
};
qcom_crypto: qcrypto@1de0000 {
@@ -788,9 +783,9 @@
qcom,use-sw-aead-algo;
qcom,use-sw-ahash-algo;
qcom,use-sw-hmac-algo;
- qcom,smmu-s1-bypass;
- iommus = <&apps_smmu 0x704 0x3>,
- <&apps_smmu 0x714 0x3>;
+ qcom,smmu-s1-enable;
+ iommus = <&apps_smmu 0x704 0x1>,
+ <&apps_smmu 0x714 0x1>;
};
qcom,qbt1000 {
@@ -811,6 +806,7 @@
qcom,disk-encrypt-pipe-pair = <2>;
qcom,support-fde;
qcom,no-clock-support;
+ qcom,fde-key-size;
qcom,appsbl-qseecom-support;
qcom,msm-bus,name = "qseecom-noc";
qcom,msm-bus,num-cases = <4>;
@@ -1038,9 +1034,14 @@
compatible = "qcom,clk-cpu-osm-sdm670";
reg = <0x17d41000 0x1400>,
<0x17d43000 0x1400>,
- <0x17d45800 0x1400>;
- reg-names = "osm_l3_base", "osm_pwrcl_base", "osm_perfcl_base";
+ <0x17d45800 0x1400>,
+ <0x784248 0x4>;
+ reg-names = "osm_l3_base", "osm_pwrcl_base", "osm_perfcl_base",
+ "cpr_rc";
+ vdd_l3_mx_ao-supply = <&pm660l_s1_level_ao>;
+ vdd_pwrcl_mx_ao-supply = <&pm660l_s1_level_ao>;
+ qcom,mx-turbo-freq = <1478400000 1689600000 3300000001>;
l3-devs = <&l3_cpu0 &l3_cpu6>;
clock-names = "xo_ao";
@@ -1119,6 +1120,12 @@
qcom,rtb-size = <0x100000>;
};
+ qcom,mpm2-sleep-counter@c221000 {
+ compatible = "qcom,mpm2-sleep-counter";
+ reg = <0x0c221000 0x1000>;
+ clock-frequency = <32768>;
+ };
+
qcom,msm-imem@146bf000 {
compatible = "qcom,msm-imem";
reg = <0x146bf000 0x1000>;
@@ -1158,7 +1165,7 @@
};
gpi_dma0: qcom,gpi-dma@0x800000 {
- #dma-cells = <6>;
+ #dma-cells = <5>;
compatible = "qcom,gpi-dma";
reg = <0x800000 0x60000>;
reg-names = "gpi-top";
@@ -1170,11 +1177,13 @@
qcom,gpii-mask = <0xfa>;
qcom,ev-factor = <2>;
iommus = <&apps_smmu 0x0016 0x0>;
+ qcom,smmu-cfg = <0x1>;
+ qcom,iova-range = <0x0 0x100000 0x0 0x100000>;
status = "ok";
};
gpi_dma1: qcom,gpi-dma@0xa00000 {
- #dma-cells = <6>;
+ #dma-cells = <5>;
compatible = "qcom,gpi-dma";
reg = <0xa00000 0x60000>;
reg-names = "gpi-top";
@@ -1185,6 +1194,8 @@
qcom,max-num-gpii = <13>;
qcom,gpii-mask = <0xfa>;
qcom,ev-factor = <2>;
+ qcom,smmu-cfg = <0x1>;
+ qcom,iova-range = <0x0 0x100000 0x0 0x100000>;
iommus = <&apps_smmu 0x06d6 0x0>;
status = "ok";
};
@@ -1620,7 +1631,8 @@
interrupts = <GIC_SPI 492 IRQ_TYPE_LEVEL_HIGH>;
reg = <0x88e0000 0x2000>;
reg-names = "eud_base";
- status = "disabled";
+ clocks = <&clock_gcc GCC_USB_PHY_CFG_AHB2PHY_CLK>;
+ clock-names = "cfg_ahb_clk";
};
qcom,llcc@1100000 {
@@ -1637,6 +1649,10 @@
qcom,dump-size = <0x80000>;
};
+ qcom,llcc-perfmon {
+ compatible = "qcom,llcc-perfmon";
+ };
+
qcom,llcc-erp {
compatible = "qcom,llcc-erp";
interrupt-names = "ecc_irq";
@@ -1839,6 +1855,7 @@
qcom,sysmon-id = <1>;
qcom,ssctl-instance-id = <0x14>;
qcom,firmware-name = "adsp";
+ qcom,signal-aop;
memory-region = <&pil_adsp_mem>;
/* GPIO inputs from lpass */
@@ -1849,6 +1866,9 @@
/* GPIO output to lpass */
qcom,gpio-force-stop = <&smp2pgpio_ssr_smp2p_2_out 0 0>;
+
+ mboxes = <&qmp_aop 0>;
+ mbox-names = "adsp-pil";
status = "ok";
};
@@ -1857,6 +1877,7 @@
reg = <0x0 0x200000>;
reg-names = "rmtfs";
qcom,client-id = <0x00000001>;
+ qcom,guard-memory;
};
qcom,msm_gsi {
@@ -1888,7 +1909,6 @@
qcom,ipa-wdi2;
qcom,use-64-bit-dma-mask;
qcom,arm-smmu;
- qcom,smmu-s1-bypass;
qcom,bandwidth-vote-for-ipa;
qcom,msm-bus,name = "ipa";
qcom,msm-bus,num-cases = <4>;
@@ -2005,17 +2025,20 @@
ipa_smmu_ap: ipa_smmu_ap {
compatible = "qcom,ipa-smmu-ap-cb";
+ qcom,smmu-s1-bypass;
iommus = <&apps_smmu 0x720 0x0>;
qcom,iova-mapping = <0x20000000 0x40000000>;
};
ipa_smmu_wlan: ipa_smmu_wlan {
compatible = "qcom,ipa-smmu-wlan-cb";
+ qcom,smmu-s1-bypass;
iommus = <&apps_smmu 0x721 0x0>;
};
ipa_smmu_uc: ipa_smmu_uc {
compatible = "qcom,ipa-smmu-uc-cb";
+ qcom,smmu-s1-bypass;
iommus = <&apps_smmu 0x722 0x0>;
qcom,iova-mapping = <0x40000000 0x20000000>;
};
@@ -2062,11 +2085,14 @@
vdd_cx-voltage = <RPMH_REGULATOR_LEVEL_TURBO>;
vdd_mx-supply = <&pm660l_s1_level>;
vdd_mx-uV = <RPMH_REGULATOR_LEVEL_TURBO>;
+ vdd_mss-supply = <&pm660_s5_level>;
+ vdd_mss-uV = <RPMH_REGULATOR_LEVEL_TURBO>;
qcom,firmware-name = "modem";
qcom,pil-self-auth;
qcom,sysmon-id = <0>;
qcom,ssctl-instance-id = <0x12>;
qcom,override-acc;
+ qcom,signal-aop;
qcom,qdsp6v65-1-0;
qcom,mss_pdc_offset = <9>;
status = "ok";
@@ -2082,6 +2108,9 @@
/* GPIO output to mss */
qcom,gpio-force-stop = <&smp2pgpio_ssr_smp2p_1_out 0 0>;
+
+ mboxes = <&qmp_aop 0>;
+ mbox-names = "mss-pil";
qcom,mba-mem@0 {
compatible = "qcom,pil-mba-mem";
memory-region = <&pil_mba_mem>;
@@ -2133,6 +2162,7 @@
qcom,sysmon-id = <7>;
qcom,ssctl-instance-id = <0x17>;
qcom,firmware-name = "cdsp";
+ qcom,signal-aop;
memory-region = <&pil_cdsp_mem>;
/* GPIO inputs from turing */
@@ -2143,9 +2173,34 @@
/* GPIO output to turing*/
qcom,gpio-force-stop = <&smp2pgpio_ssr_smp2p_5_out 0 0>;
+
+ mboxes = <&qmp_aop 0>;
+ mbox-names = "cdsp-pil";
status = "ok";
};
+ sdcc1_ice: sdcc1ice@7c8000 {
+ compatible = "qcom,ice";
+ reg = <0x7c8000 0x8000>;
+ qcom,enable-ice-clk;
+ clock-names = "ice_core_clk_src", "ice_core_clk",
+ "bus_clk", "iface_clk";
+ clocks = <&clock_gcc GCC_SDCC1_ICE_CORE_CLK_SRC>,
+ <&clock_gcc GCC_SDCC1_ICE_CORE_CLK>,
+ <&clock_gcc GCC_SDCC1_APPS_CLK>,
+ <&clock_gcc GCC_SDCC1_AHB_CLK>;
+ qcom,op-freq-hz = <300000000>, <0>, <0>, <0>;
+ qcom,msm-bus,name = "sdcc_ice_noc";
+ qcom,msm-bus,num-cases = <2>;
+ qcom,msm-bus,num-paths = <1>;
+ qcom,msm-bus,vectors-KBps =
+ <150 512 0 0>, /* No vote */
+ <150 512 1000 0>; /* Max. bandwidth */
+ qcom,bus-vector-names = "MIN",
+ "MAX";
+ qcom,instance-type = "sdcc";
+ };
+
sdhc_1: sdhci@7c4000 {
compatible = "qcom,sdhci-msm-v5";
reg = <0x7C4000 0x1000>, <0x7C5000 0x1000>;
@@ -2156,6 +2211,7 @@
qcom,bus-width = <8>;
qcom,large-address-bus;
+ sdhc-msm-crypto = <&sdcc1_ice>;
qcom,clk-rates = <400000 20000000 25000000 50000000 100000000
192000000 384000000>;
@@ -2168,31 +2224,31 @@
qcom,msm-bus,num-paths = <2>;
qcom,msm-bus,vectors-KBps =
/* No vote */
- <150 512 0 0>, <1 606 0 0>,
+ <150 512 0 0>, <1 782 0 0>,
/* 400 KB/s*/
<150 512 1046 1600>,
- <1 606 1600 1600>,
+ <1 782 1600 1600>,
/* 20 MB/s */
<150 512 52286 80000>,
- <1 606 80000 80000>,
+ <1 782 80000 80000>,
/* 25 MB/s */
<150 512 65360 100000>,
- <1 606 100000 100000>,
+ <1 782 100000 100000>,
/* 50 MB/s */
<150 512 130718 200000>,
- <1 606 133320 133320>,
+ <1 782 100000 100000>,
/* 100 MB/s */
<150 512 130718 200000>,
- <1 606 150000 150000>,
+ <1 782 130000 130000>,
/* 200 MB/s */
<150 512 261438 400000>,
- <1 606 300000 300000>,
+ <1 782 300000 300000>,
/* 400 MB/s */
<150 512 261438 400000>,
- <1 606 300000 300000>,
+ <1 782 300000 300000>,
/* Max. bandwidth */
<150 512 1338562 4096000>,
- <1 606 1338562 4096000>;
+ <1 782 1338562 4096000>;
qcom,bus-bw-vectors-bps = <0 400000 20000000 25000000 50000000
100000000 200000000 400000000 4294967295>;
@@ -2255,10 +2311,10 @@
<1 608 100000 100000>,
/* 50 MB/s */
<81 512 130718 200000>,
- <1 608 133320 133320>,
+ <1 608 100000 100000>,
/* 100 MB/s */
<81 512 261438 200000>,
- <1 608 150000 150000>,
+ <1 608 130000 130000>,
/* 200 MB/s */
<81 512 261438 400000>,
<1 608 300000 300000>,
@@ -2441,6 +2497,8 @@
qcom,count-unit = <0x10000>;
qcom,hw-timer-hz = <19200000>;
qcom,target-dev = <&cpubw>;
+ qcom,byte-mid-mask = <0xe000>;
+ qcom,byte-mid-match = <0xe000>;
};
memlat_cpu0: qcom,memlat-cpu0 {
@@ -2491,8 +2549,8 @@
< 748800 MHZ_TO_MBPS( 300, 4) >,
< 998400 MHZ_TO_MBPS( 451, 4) >,
< 1209600 MHZ_TO_MBPS( 547, 4) >,
- < 1497600 MHZ_TO_MBPS( 768, 4) >,
- < 1728000 MHZ_TO_MBPS(1017, 4) >;
+ < 1516800 MHZ_TO_MBPS( 768, 4) >,
+ < 1708000 MHZ_TO_MBPS(1017, 4) >;
};
devfreq_memlat_6: qcom,cpu6-memlat-mon {
@@ -2501,11 +2559,11 @@
qcom,target-dev = <&memlat_cpu6>;
qcom,cachemiss-ev = <0x2a>;
qcom,core-dev-table =
- < 787200 MHZ_TO_MBPS( 300, 4) >,
- < 1113600 MHZ_TO_MBPS( 547, 4) >,
- < 1344000 MHZ_TO_MBPS(1017, 4) >,
- < 1900800 MHZ_TO_MBPS(1555, 4) >,
- < 2438400 MHZ_TO_MBPS(1804, 4) >;
+ < 825600 MHZ_TO_MBPS( 300, 4) >,
+ < 1132800 MHZ_TO_MBPS( 547, 4) >,
+ < 1363200 MHZ_TO_MBPS(1017, 4) >,
+ < 1996800 MHZ_TO_MBPS(1555, 4) >,
+ < 2457600 MHZ_TO_MBPS(1804, 4) >;
};
l3_cpu0: qcom,l3-cpu0 {
@@ -2528,12 +2586,13 @@
qcom,target-dev = <&l3_cpu0>;
qcom,cachemiss-ev = <0x17>;
qcom,core-dev-table =
- < 748800 566400000 >,
- < 998400 787200000 >,
+ < 576000 300000000 >,
+ < 748800 556800000 >,
+ < 998400 806400000 >,
< 1209660 940800000 >,
- < 1497600 1190400000 >,
+ < 1516800 1190400000 >,
< 1612800 1382400000 >,
- < 1728000 1440000000 >;
+ < 1708000 1440000000 >;
};
devfreq_l3lat_6: qcom,cpu6-l3lat-mon {
@@ -2542,11 +2601,11 @@
qcom,target-dev = <&l3_cpu6>;
qcom,cachemiss-ev = <0x17>;
qcom,core-dev-table =
- < 1113600 566400000 >,
- < 1344000 787200000 >,
- < 1728000 940800000 >,
- < 1900800 1190400000 >,
- < 2438400 1440000000 >;
+ < 1132800 556800000 >,
+ < 1363200 806400000 >,
+ < 1747200 940800000 >,
+ < 1996800 1190400000 >,
+ < 2457600 1440000000 >;
};
mincpubw: qcom,mincpubw {
@@ -2573,18 +2632,85 @@
target-dev = <&mincpubw>;
cpu-to-dev-map-0 =
< 748800 MHZ_TO_MBPS( 300, 4) >,
- < 1209600 MHZ_TO_MBPS( 451, 4) >,
- < 1612000 MHZ_TO_MBPS( 547, 4) >,
- < 1728000 MHZ_TO_MBPS( 768, 4) >;
+ < 1209660 MHZ_TO_MBPS( 451, 4) >,
+ < 1612800 MHZ_TO_MBPS( 547, 4) >,
+ < 1708000 MHZ_TO_MBPS( 768, 4) >;
cpu-to-dev-map-6 =
- < 1113600 MHZ_TO_MBPS( 300, 4) >,
- < 1344000 MHZ_TO_MBPS( 547, 4) >,
- < 1728000 MHZ_TO_MBPS( 768, 4) >,
- < 1900800 MHZ_TO_MBPS(1017, 4) >,
- < 2438400 MHZ_TO_MBPS(1804, 4) >;
+ < 1132800 MHZ_TO_MBPS( 300, 4) >,
+ < 1363200 MHZ_TO_MBPS( 547, 4) >,
+ < 1747200 MHZ_TO_MBPS( 768, 4) >,
+ < 1996800 MHZ_TO_MBPS(1017, 4) >,
+ < 2457600 MHZ_TO_MBPS(1804, 4) >;
};
};
+ mincpu0bw: qcom,mincpu0bw {
+ compatible = "qcom,devbw";
+ governor = "powersave";
+ qcom,src-dst-ports = <1 512>;
+ qcom,active-only;
+ qcom,bw-tbl =
+ < MHZ_TO_MBPS( 100, 4) >, /* 381 MB/s */
+ < MHZ_TO_MBPS( 200, 4) >, /* 762 MB/s */
+ < MHZ_TO_MBPS( 300, 4) >, /* 1144 MB/s */
+ < MHZ_TO_MBPS( 451, 4) >, /* 1720 MB/s */
+ < MHZ_TO_MBPS( 547, 4) >, /* 2086 MB/s */
+ < MHZ_TO_MBPS( 681, 4) >, /* 2597 MB/s */
+ < MHZ_TO_MBPS( 768, 4) >, /* 2929 MB/s */
+ < MHZ_TO_MBPS(1017, 4) >, /* 3879 MB/s */
+ < MHZ_TO_MBPS(1353, 4) >, /* 5161 MB/s */
+ < MHZ_TO_MBPS(1555, 4) >, /* 5931 MB/s */
+ < MHZ_TO_MBPS(1804, 4) >; /* 6881 MB/s */
+ };
+
+ mincpu6bw: qcom,mincpu6bw {
+ compatible = "qcom,devbw";
+ governor = "powersave";
+ qcom,src-dst-ports = <1 512>;
+ qcom,active-only;
+ qcom,bw-tbl =
+ < MHZ_TO_MBPS( 100, 4) >, /* 381 MB/s */
+ < MHZ_TO_MBPS( 200, 4) >, /* 762 MB/s */
+ < MHZ_TO_MBPS( 300, 4) >, /* 1144 MB/s */
+ < MHZ_TO_MBPS( 451, 4) >, /* 1720 MB/s */
+ < MHZ_TO_MBPS( 547, 4) >, /* 2086 MB/s */
+ < MHZ_TO_MBPS( 681, 4) >, /* 2597 MB/s */
+ < MHZ_TO_MBPS( 768, 4) >, /* 2929 MB/s */
+ < MHZ_TO_MBPS(1017, 4) >, /* 3879 MB/s */
+ < MHZ_TO_MBPS(1353, 4) >, /* 5161 MB/s */
+ < MHZ_TO_MBPS(1555, 4) >, /* 5931 MB/s */
+ < MHZ_TO_MBPS(1804, 4) >; /* 6881 MB/s */
+ };
+
+ devfreq_compute0: qcom,devfreq-compute0 {
+ compatible = "qcom,arm-cpu-mon";
+ qcom,cpulist = <&CPU0 &CPU1 &CPU2 &CPU3 &CPU4 &CPU5>;
+ qcom,target-dev = <&mincpu0bw>;
+ qcom,core-dev-table =
+ < 748800 MHZ_TO_MBPS( 300, 4) >,
+ < 1209660 MHZ_TO_MBPS( 451, 4) >,
+ < 1612800 MHZ_TO_MBPS( 547, 4) >,
+ < 1708000 MHZ_TO_MBPS( 768, 4) >;
+ };
+
+ devfreq_compute6: qcom,devfreq-compute6 {
+ compatible = "qcom,arm-cpu-mon";
+ qcom,cpulist = <&CPU6 &CPU7>;
+ qcom,target-dev = <&mincpu6bw>;
+ qcom,core-dev-table =
+ < 1132800 MHZ_TO_MBPS( 300, 4) >,
+ < 1363200 MHZ_TO_MBPS( 547, 4) >,
+ < 1747200 MHZ_TO_MBPS( 768, 4) >,
+ < 1996800 MHZ_TO_MBPS(1017, 4) >,
+ < 2457600 MHZ_TO_MBPS(1804, 4) >;
+ };
+
+ cpu_pmu: cpu-pmu {
+ compatible = "arm,armv8-pmuv3";
+ qcom,irq-is-percpu;
+ interrupts = <1 5 4>;
+ };
+
gpu_gx_domain_addr: syscon@0x5091508 {
compatible = "syscon";
reg = <0x5091508 0x4>;
@@ -2662,6 +2788,8 @@
&mdss_core_gdsc {
status = "ok";
+ proxy-supply = <&mdss_core_gdsc>;
+ qcom,proxy-consumer-enable;
};
&gpu_cx_gdsc {
@@ -2725,9 +2853,26 @@
};
};
+&sde_dp {
+ qcom,core-supply-entries {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ qcom,core-supply-entry@0 {
+ reg = <0>;
+ qcom,supply-name = "refgen";
+ qcom,supply-min-voltage = <0>;
+ qcom,supply-max-voltage = <0>;
+ qcom,supply-enable-load = <0>;
+ qcom,supply-disable-load = <0>;
+ };
+ };
+};
+
#include "sdm670-audio.dtsi"
#include "sdm670-usb.dtsi"
#include "sdm670-gpu.dtsi"
+#include "sdm670-camera.dtsi"
#include "sdm670-thermal.dtsi"
#include "sdm670-bus.dtsi"
diff --git a/arch/arm64/boot/dts/qcom/sdm845-4k-panel-qrd-overlay.dts b/arch/arm64/boot/dts/qcom/sdm845-4k-panel-qrd-overlay.dts
index c6622d4..f9c6f65 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-4k-panel-qrd-overlay.dts
+++ b/arch/arm64/boot/dts/qcom/sdm845-4k-panel-qrd-overlay.dts
@@ -62,3 +62,7 @@
&dsi_sharp_4k_dsc_video_display {
qcom,dsi-display-active;
};
+
+&mdss_mdp {
+ connectors = <&sde_rscc &sde_wb &sde_dp>;
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-cdp.dtsi b/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-cdp.dtsi
index 7ca2645..d8a6dc3 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-cdp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-cdp.dtsi
@@ -22,10 +22,20 @@
status = "ok";
};
- led_flash_front: qcom,camera-flash@1 {
+ led_flash_rear_aux: qcom,camera-flash@1 {
cell-index = <1>;
reg = <0x01 0x00>;
compatible = "qcom,camera-flash";
+ flash-source = <&pmi8998_flash0 &pmi8998_flash1>;
+ torch-source = <&pmi8998_torch0 &pmi8998_torch1 >;
+ switch-source = <&pmi8998_switch0>;
+ status = "ok";
+ };
+
+ led_flash_front: qcom,camera-flash@2 {
+ cell-index = <2>;
+ reg = <0x02 0x00>;
+ compatible = "qcom,camera-flash";
flash-source = <&pmi8998_flash2>;
torch-source = <&pmi8998_torch2>;
switch-source = <&pmi8998_switch1>;
@@ -74,6 +84,11 @@
};
&cam_cci {
+ qcom,cam-res-mgr {
+ compatible = "qcom,cam-res-mgr";
+ status = "ok";
+ };
+
actuator_rear: qcom,actuator@0 {
cell-index = <0>;
reg = <0x0>;
@@ -87,7 +102,7 @@
rgltr-load-current = <0>;
};
- actuator_front: qcom,actuator@1 {
+ actuator_rear_aux: qcom,actuator@1 {
cell-index = <1>;
reg = <0x1>;
compatible = "qcom,actuator";
@@ -100,6 +115,19 @@
rgltr-load-current = <0>;
};
+ actuator_front: qcom,actuator@2 {
+ cell-index = <2>;
+ reg = <0x2>;
+ compatible = "qcom,actuator";
+ cci-master = <1>;
+ cam_vaf-supply = <&actuator_regulator>;
+ regulator-names = "cam_vaf";
+ rgltr-cntrl-support;
+ rgltr-min-voltage = <2800000>;
+ rgltr-max-voltage = <2800000>;
+ rgltr-load-current = <0>;
+ };
+
ois_rear: qcom,ois@0 {
cell-index = <0>;
reg = <0x0>;
@@ -289,9 +317,11 @@
compatible = "qcom,cam-sensor";
reg = <0x1>;
csiphy-sd-index = <1>;
- sensor-position-roll = <90>;
+ sensor-position-roll = <270>;
sensor-position-pitch = <0>;
sensor-position-yaw = <180>;
+ actuator-src = <&actuator_rear_aux>;
+ led-flash-src = <&led_flash_rear_aux>;
eeprom-src = <&eeprom_rear_aux>;
cam_vdig-supply = <&camera_ldo>;
cam_vio-supply = <&pm8998_lvs1>;
diff --git a/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-mtp.dtsi b/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-mtp.dtsi
index aa55698..952ba29 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-mtp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-mtp.dtsi
@@ -22,10 +22,20 @@
status = "ok";
};
- led_flash_front: qcom,camera-flash@1 {
+ led_flash_rear_aux: qcom,camera-flash@1 {
cell-index = <1>;
reg = <0x01 0x00>;
compatible = "qcom,camera-flash";
+ flash-source = <&pmi8998_flash0 &pmi8998_flash1>;
+ torch-source = <&pmi8998_torch0 &pmi8998_torch1>;
+ switch-source = <&pmi8998_switch0>;
+ status = "ok";
+ };
+
+ led_flash_front: qcom,camera-flash@2 {
+ cell-index = <2>;
+ reg = <0x02 0x00>;
+ compatible = "qcom,camera-flash";
flash-source = <&pmi8998_flash2>;
torch-source = <&pmi8998_torch2>;
switch-source = <&pmi8998_switch1>;
@@ -74,6 +84,11 @@
};
&cam_cci {
+ qcom,cam-res-mgr {
+ compatible = "qcom,cam-res-mgr";
+ status = "ok";
+ };
+
actuator_rear: qcom,actuator@0 {
cell-index = <0>;
reg = <0x0>;
@@ -87,7 +102,7 @@
rgltr-load-current = <0>;
};
- actuator_front: qcom,actuator@1 {
+ actuator_rear_aux: qcom,actuator@1 {
cell-index = <1>;
reg = <0x1>;
compatible = "qcom,actuator";
@@ -100,6 +115,19 @@
rgltr-load-current = <0>;
};
+ actuator_front: qcom,actuator@2 {
+ cell-index = <2>;
+ reg = <0x2>;
+ compatible = "qcom,actuator";
+ cci-master = <1>;
+ cam_vaf-supply = <&actuator_regulator>;
+ regulator-names = "cam_vaf";
+ rgltr-cntrl-support;
+ rgltr-min-voltage = <2800000>;
+ rgltr-max-voltage = <2800000>;
+ rgltr-load-current = <0>;
+ };
+
ois_rear: qcom,ois@0 {
cell-index = <0>;
reg = <0x0>;
@@ -289,9 +317,11 @@
compatible = "qcom,cam-sensor";
reg = <0x1>;
csiphy-sd-index = <1>;
- sensor-position-roll = <90>;
+ sensor-position-roll = <270>;
sensor-position-pitch = <0>;
sensor-position-yaw = <180>;
+ actuator-src = <&actuator_rear_aux>;
+ led-flash-src = <&led_flash_rear_aux>;
eeprom-src = <&eeprom_rear_aux>;
cam_vdig-supply = <&camera_ldo>;
cam_vio-supply = <&pm8998_lvs1>;
diff --git a/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-qvr.dtsi b/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-qvr.dtsi
new file mode 100644
index 0000000..8ad5f3c
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-qvr.dtsi
@@ -0,0 +1,379 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+&soc {
+ led_flash_rear: qcom,camera-flash@0 {
+ cell-index = <0>;
+ reg = <0x00 0x00>;
+ compatible = "qcom,camera-flash";
+ flash-source = <&pmi8998_flash0 &pmi8998_flash1>;
+ torch-source = <&pmi8998_torch0 &pmi8998_torch1>;
+ switch-source = <&pmi8998_switch0>;
+ status = "ok";
+ };
+
+ led_flash_front: qcom,camera-flash@1 {
+ cell-index = <1>;
+ reg = <0x01 0x00>;
+ compatible = "qcom,camera-flash";
+ flash-source = <&pmi8998_flash2>;
+ torch-source = <&pmi8998_torch2>;
+ switch-source = <&pmi8998_switch1>;
+ status = "ok";
+ };
+
+ actuator_regulator: gpio-regulator@0 {
+ compatible = "regulator-fixed";
+ reg = <0x00 0x00>;
+ regulator-name = "actuator_regulator";
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ regulator-enable-ramp-delay = <100>;
+ enable-active-high;
+ gpio = <&tlmm 27 0>;
+ vin-supply = <&pmi8998_bob>;
+ };
+
+ camera_rear_ldo: gpio-regulator@1 {
+ compatible = "regulator-fixed";
+ reg = <0x01 0x00>;
+ regulator-name = "camera_rear_ldo";
+ regulator-min-microvolt = <1050000>;
+ regulator-max-microvolt = <1050000>;
+ regulator-enable-ramp-delay = <135>;
+ enable-active-high;
+ gpio = <&pm8998_gpios 12 0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&camera_rear_dvdd_en_default>;
+ vin-supply = <&pm8998_s3>;
+ };
+
+ camera_ldo: gpio-regulator@2 {
+ compatible = "regulator-fixed";
+ reg = <0x02 0x00>;
+ regulator-name = "camera_ldo";
+ regulator-min-microvolt = <1050000>;
+ regulator-max-microvolt = <1050000>;
+ regulator-enable-ramp-delay = <233>;
+ enable-active-high;
+ gpio = <&pm8998_gpios 9 0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&camera_dvdd_en_default>;
+ vin-supply = <&pm8998_s3>;
+ };
+};
+
+&cam_cci {
+ actuator_rear: qcom,actuator@0 {
+ cell-index = <0>;
+ reg = <0x0>;
+ compatible = "qcom,actuator";
+ cci-master = <0>;
+ cam_vaf-supply = <&actuator_regulator>;
+ regulator-names = "cam_vaf";
+ rgltr-cntrl-support;
+ rgltr-min-voltage = <2800000>;
+ rgltr-max-voltage = <2800000>;
+ rgltr-load-current = <0>;
+ };
+
+ actuator_rear_aux: qcom,actuator@1 {
+ cell-index = <1>;
+ reg = <0x1>;
+ compatible = "qcom,actuator";
+ cci-master = <1>;
+ cam_vaf-supply = <&actuator_regulator>;
+ regulator-names = "cam_vaf";
+ rgltr-cntrl-support;
+ rgltr-min-voltage = <2800000>;
+ rgltr-max-voltage = <2800000>;
+ rgltr-load-current = <0>;
+ };
+
+ ois_rear: qcom,ois@0 {
+ cell-index = <0>;
+ reg = <0x0>;
+ compatible = "qcom,ois";
+ cci-master = <0>;
+ cam_vaf-supply = <&actuator_regulator>;
+ regulator-names = "cam_vaf";
+ rgltr-cntrl-support;
+ rgltr-min-voltage = <2800000>;
+ rgltr-max-voltage = <2800000>;
+ rgltr-load-current = <0>;
+ status = "disabled";
+ };
+
+ eeprom_rear: qcom,eeprom@0 {
+ cell-index = <0>;
+ reg = <0>;
+ compatible = "qcom,eeprom";
+ cam_vio-supply = <&pm8998_lvs1>;
+ cam_vana-supply = <&pmi8998_bob>;
+ cam_vdig-supply = <&camera_rear_ldo>;
+ cam_clk-supply = <&titan_top_gdsc>;
+ regulator-names = "cam_vio", "cam_vana", "cam_vdig",
+ "cam_clk";
+ rgltr-cntrl-support;
+ rgltr-min-voltage = <0 3312000 1050000 0>;
+ rgltr-max-voltage = <0 3600000 1050000 0>;
+ rgltr-load-current = <0 80000 105000 0>;
+ gpio-no-mux = <0>;
+ pinctrl-names = "cam_default", "cam_suspend";
+ pinctrl-0 = <&cam_sensor_mclk0_active
+ &cam_sensor_rear_active>;
+ pinctrl-1 = <&cam_sensor_mclk0_suspend
+ &cam_sensor_rear_suspend>;
+ gpios = <&tlmm 13 0>,
+ <&tlmm 80 0>,
+ <&tlmm 79 0>,
+ <&tlmm 27 0>;
+ gpio-reset = <1>;
+ gpio-vana = <2>;
+ gpio-vaf = <3>;
+ gpio-req-tbl-num = <0 1 2 3>;
+ gpio-req-tbl-flags = <1 0 0 0>;
+ gpio-req-tbl-label = "CAMIF_MCLK0",
+ "CAM_RESET0",
+ "CAM_VANA0",
+ "CAM_VAF";
+ sensor-position = <0>;
+ sensor-mode = <0>;
+ cci-master = <0>;
+ status = "ok";
+ clocks = <&clock_camcc CAM_CC_MCLK0_CLK>;
+ clock-names = "cam_clk";
+ clock-cntl-level = "turbo";
+ clock-rates = <24000000>;
+ };
+
+ eeprom_rear_aux: qcom,eeprom@1 {
+ cell-index = <1>;
+ reg = <0x1>;
+ compatible = "qcom,eeprom";
+ cam_vio-supply = <&pm8998_lvs1>;
+ cam_vana-supply = <&pmi8998_bob>;
+ cam_vdig-supply = <&camera_ldo>;
+ cam_clk-supply = <&titan_top_gdsc>;
+ regulator-names = "cam_vio", "cam_vana", "cam_vdig",
+ "cam_clk";
+ rgltr-cntrl-support;
+ rgltr-min-voltage = <0 3312000 1050000 0>;
+ rgltr-max-voltage = <0 3600000 1050000 0>;
+ rgltr-load-current = <0 80000 105000 0>;
+ gpio-no-mux = <0>;
+ pinctrl-names = "cam_default", "cam_suspend";
+ pinctrl-0 = <&cam_sensor_mclk1_active
+ &cam_sensor_front_active>;
+ pinctrl-1 = <&cam_sensor_mclk1_suspend
+ &cam_sensor_front_suspend>;
+ gpios = <&tlmm 14 0>,
+ <&tlmm 28 0>,
+ <&tlmm 8 0>,
+ <&tlmm 27 0>;
+ gpio-reset = <1>;
+ gpio-vana = <2>;
+ gpio-vaf = <3>;
+ gpio-req-tbl-num = <0 1 2 3>;
+ gpio-req-tbl-flags = <1 0 0 0>;
+ gpio-req-tbl-label = "CAMIF_MCLK1",
+ "CAM_RESET1",
+ "CAM_VANA1",
+ "CAM_VAF";
+ sensor-position = <0>;
+ sensor-mode = <0>;
+ cci-master = <1>;
+ status = "ok";
+ clocks = <&clock_camcc CAM_CC_MCLK1_CLK>;
+ clock-names = "cam_clk";
+ clock-cntl-level = "turbo";
+ clock-rates = <24000000>;
+ };
+
+ eeprom_front: qcom,eeprom@2 {
+ cell-index = <2>;
+ reg = <0x2>;
+ compatible = "qcom,eeprom";
+ cam_vdig-supply = <&camera_ldo>;
+ cam_vio-supply = <&pm8998_lvs1>;
+ cam_vana-supply = <&pmi8998_bob>;
+ cam_clk-supply = <&titan_top_gdsc>;
+ regulator-names = "cam_vdig", "cam_vio", "cam_vana",
+ "cam_clk";
+ rgltr-cntrl-support;
+ rgltr-min-voltage = <1050000 0 3312000 0>;
+ rgltr-max-voltage = <1050000 0 3600000 0>;
+ rgltr-load-current = <105000 0 80000 0>;
+ gpio-no-mux = <0>;
+ pinctrl-names = "cam_default", "cam_suspend";
+ pinctrl-0 = <&cam_sensor_mclk2_active
+ &cam_sensor_rear2_active>;
+ pinctrl-1 = <&cam_sensor_mclk2_suspend
+ &cam_sensor_rear2_suspend>;
+ gpios = <&tlmm 15 0>,
+ <&tlmm 9 0>,
+ <&tlmm 8 0>;
+ gpio-reset = <1>;
+ gpio-vana = <2>;
+ gpio-req-tbl-num = <0 1 2>;
+ gpio-req-tbl-flags = <1 0 0>;
+ gpio-req-tbl-label = "CAMIF_MCLK2",
+ "CAM_RESET2",
+ "CAM_VANA2";
+ sensor-position = <1>;
+ sensor-mode = <0>;
+ cci-master = <1>;
+ status = "ok";
+ clocks = <&clock_camcc CAM_CC_MCLK2_CLK>;
+ clock-names = "cam_clk";
+ clock-cntl-level = "turbo";
+ clock-rates = <24000000>;
+ };
+
+ qcom,cam-sensor@0 {
+ cell-index = <0>;
+ compatible = "qcom,cam-sensor";
+ reg = <0x0>;
+ csiphy-sd-index = <0>;
+ sensor-position-roll = <270>;
+ sensor-position-pitch = <0>;
+ sensor-position-yaw = <180>;
+ led-flash-src = <&led_flash_rear>;
+ actuator-src = <&actuator_rear>;
+ ois-src = <&ois_rear>;
+ eeprom-src = <&eeprom_rear>;
+ cam_vio-supply = <&pm8998_lvs1>;
+ cam_vana-supply = <&pmi8998_bob>;
+ cam_vdig-supply = <&camera_rear_ldo>;
+ cam_clk-supply = <&titan_top_gdsc>;
+ regulator-names = "cam_vio", "cam_vana", "cam_vdig",
+ "cam_clk";
+ rgltr-cntrl-support;
+ rgltr-min-voltage = <0 3312000 1050000 0>;
+ rgltr-max-voltage = <0 3600000 1050000 0>;
+ rgltr-load-current = <0 80000 105000 0>;
+ gpio-no-mux = <0>;
+ pinctrl-names = "cam_default", "cam_suspend";
+ pinctrl-0 = <&cam_sensor_mclk0_active
+ &cam_sensor_rear_active>;
+ pinctrl-1 = <&cam_sensor_mclk0_suspend
+ &cam_sensor_rear_suspend>;
+ gpios = <&tlmm 13 0>,
+ <&tlmm 80 0>,
+ <&tlmm 79 0>;
+ gpio-reset = <1>;
+ gpio-vana = <2>;
+ gpio-req-tbl-num = <0 1 2>;
+ gpio-req-tbl-flags = <1 0 0>;
+ gpio-req-tbl-label = "CAMIF_MCLK0",
+ "CAM_RESET0",
+ "CAM_VANA";
+ sensor-mode = <0>;
+ cci-master = <0>;
+ status = "ok";
+ clocks = <&clock_camcc CAM_CC_MCLK0_CLK>;
+ clock-names = "cam_clk";
+ clock-cntl-level = "turbo";
+ clock-rates = <24000000>;
+ };
+
+ qcom,cam-sensor@1 {
+ cell-index = <1>;
+ compatible = "qcom,cam-sensor";
+ reg = <0x1>;
+ csiphy-sd-index = <1>;
+ sensor-position-roll = <90>;
+ sensor-position-pitch = <0>;
+ sensor-position-yaw = <180>;
+ eeprom-src = <&eeprom_rear_aux>;
+ actuator-src = <&actuator_rear_aux>;
+ led-flash-src = <&led_flash_front>;
+ cam_vio-supply = <&pm8998_lvs1>;
+ cam_vana-supply = <&pmi8998_bob>;
+ cam_vdig-supply = <&camera_ldo>;
+ cam_clk-supply = <&titan_top_gdsc>;
+ regulator-names = "cam_vio", "cam_vana", "cam_vdig",
+ "cam_clk";
+ rgltr-cntrl-support;
+ rgltr-min-voltage = <0 3312000 1050000 0>;
+ rgltr-max-voltage = <0 3600000 1050000 0>;
+ rgltr-load-current = <0 80000 105000 0>;
+ gpio-no-mux = <0>;
+ pinctrl-names = "cam_default", "cam_suspend";
+ pinctrl-0 = <&cam_sensor_mclk1_active
+ &cam_sensor_front_active>;
+ pinctrl-1 = <&cam_sensor_mclk1_suspend
+ &cam_sensor_front_suspend>;
+ gpios = <&tlmm 14 0>,
+ <&tlmm 28 0>,
+ <&tlmm 8 0>;
+ gpio-reset = <1>;
+ gpio-vana = <2>;
+ gpio-req-tbl-num = <0 1 2>;
+ gpio-req-tbl-flags = <1 0 0>;
+ gpio-req-tbl-label = "CAMIF_MCLK2",
+ "CAM_RESET2",
+ "CAM_VANA1";
+ sensor-mode = <0>;
+ cci-master = <1>;
+ status = "ok";
+ clocks = <&clock_camcc CAM_CC_MCLK1_CLK>;
+ clock-names = "cam_clk";
+ clock-cntl-level = "turbo";
+ clock-rates = <24000000>;
+ };
+
+ qcom,cam-sensor@2 {
+ cell-index = <2>;
+ compatible = "qcom,cam-sensor";
+ reg = <0x02>;
+ csiphy-sd-index = <2>;
+ sensor-position-roll = <270>;
+ sensor-position-pitch = <0>;
+ sensor-position-yaw = <0>;
+ eeprom-src = <&eeprom_front>;
+ cam_vdig-supply = <&camera_ldo>;
+ cam_vio-supply = <&pm8998_lvs1>;
+ cam_vana-supply = <&pmi8998_bob>;
+ cam_clk-supply = <&titan_top_gdsc>;
+ regulator-names = "cam_vdig", "cam_vio", "cam_vana",
+ "cam_clk";
+ rgltr-cntrl-support;
+ rgltr-min-voltage = <1050000 0 3312000 0>;
+ rgltr-max-voltage = <1050000 0 3600000 0>;
+ rgltr-load-current = <105000 0 80000 0>;
+ gpio-no-mux = <0>;
+ pinctrl-names = "cam_default", "cam_suspend";
+ pinctrl-0 = <&cam_sensor_mclk2_active
+ &cam_sensor_rear2_active>;
+ pinctrl-1 = <&cam_sensor_mclk2_suspend
+ &cam_sensor_rear2_suspend>;
+ gpios = <&tlmm 15 0>,
+ <&tlmm 9 0>,
+ <&tlmm 8 0>;
+ gpio-reset = <1>;
+ gpio-vana = <2>;
+ gpio-req-tbl-num = <0 1 2>;
+ gpio-req-tbl-flags = <1 0 0>;
+ gpio-req-tbl-label = "CAMIF_MCLK1",
+ "CAM_RESET1",
+ "CAM_VANA1";
+ sensor-mode = <0>;
+ cci-master = <1>;
+ status = "ok";
+ clocks = <&clock_camcc CAM_CC_MCLK2_CLK>;
+ clock-names = "cam_clk";
+ clock-cntl-level = "turbo";
+ clock-rates = <24000000>;
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-camera.dtsi b/arch/arm64/boot/dts/qcom/sdm845-camera.dtsi
index db57aae..35a7774 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-camera.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-camera.dtsi
@@ -45,9 +45,10 @@
"csiphy0_clk",
"csi0phytimer_clk_src",
"csi0phytimer_clk";
- clock-cntl-level = "turbo";
+ clock-cntl-level = "svs", "turbo";
clock-rates =
- <0 0 0 0 320000000 0 269333333 0>;
+ <0 0 0 0 320000000 0 269333333 0>,
+ <0 0 0 0 384000000 0 269333333 0>;
status = "ok";
};
@@ -79,9 +80,10 @@
"csiphy1_clk",
"csi1phytimer_clk_src",
"csi1phytimer_clk";
- clock-cntl-level = "turbo";
+ clock-cntl-level = "svs", "turbo";
clock-rates =
- <0 0 0 0 320000000 0 269333333 0>;
+ <0 0 0 0 320000000 0 269333333 0>,
+ <0 0 0 0 384000000 0 269333333 0>;
status = "ok";
};
@@ -114,9 +116,10 @@
"csiphy2_clk",
"csi2phytimer_clk_src",
"csi2phytimer_clk";
- clock-cntl-level = "turbo";
+ clock-cntl-level = "svs", "turbo";
clock-rates =
- <0 0 0 0 320000000 0 269333333 0>;
+ <0 0 0 0 320000000 0 269333333 0>,
+ <0 0 0 0 384000000 0 269333333 0>;
status = "ok";
};
@@ -146,7 +149,7 @@
"cci_clk",
"cci_clk_src";
src-clock-name = "cci_clk_src";
- clock-cntl-level = "turbo";
+ clock-cntl-level = "lowsvs";
clock-rates = <0 0 0 0 0 37500000>;
pinctrl-names = "cam_default", "cam_suspend";
pinctrl-0 = <&cci0_active &cci1_active>;
@@ -296,11 +299,20 @@
status = "ok";
};
+ iova-mem-region-secondary-heap {
+ /* Secondary heap region is 1MB long */
+ iova-region-name = "secheap";
+ iova-region-start = <0xd800000>;
+ iova-region-len = <0x100000>;
+ iova-region-id = <0x4>;
+ status = "ok";
+ };
+
iova-mem-region-io {
/* IO region is approximately 3.3 GB */
iova-region-name = "io";
- iova-region-start = <0xd800000>;
- iova-region-len = <0xd2800000>;
+ iova-region-start = <0xd900000>;
+ iova-region-len = <0xd2700000>;
iova-region-id = <0x3>;
status = "ok";
};
@@ -390,17 +402,17 @@
<MSM_BUS_MASTER_AMPSS_M0
MSM_BUS_SLAVE_CAMERA_CFG 0 0>,
<MSM_BUS_MASTER_AMPSS_M0
- MSM_BUS_SLAVE_CAMERA_CFG 0 180000>,
+ MSM_BUS_SLAVE_CAMERA_CFG 0 76500>,
<MSM_BUS_MASTER_AMPSS_M0
- MSM_BUS_SLAVE_CAMERA_CFG 0 180000>,
+ MSM_BUS_SLAVE_CAMERA_CFG 0 76500>,
+ <MSM_BUS_MASTER_AMPSS_M0
+ MSM_BUS_SLAVE_CAMERA_CFG 0 150000>,
+ <MSM_BUS_MASTER_AMPSS_M0
+ MSM_BUS_SLAVE_CAMERA_CFG 0 150000>,
<MSM_BUS_MASTER_AMPSS_M0
MSM_BUS_SLAVE_CAMERA_CFG 0 300000>,
<MSM_BUS_MASTER_AMPSS_M0
- MSM_BUS_SLAVE_CAMERA_CFG 0 300000>,
- <MSM_BUS_MASTER_AMPSS_M0
- MSM_BUS_SLAVE_CAMERA_CFG 0 640000>,
- <MSM_BUS_MASTER_AMPSS_M0
- MSM_BUS_SLAVE_CAMERA_CFG 0 640000>;
+ MSM_BUS_SLAVE_CAMERA_CFG 0 300000>;
vdd-corners = <RPMH_REGULATOR_LEVEL_OFF
RPMH_REGULATOR_LEVEL_RETENTION
RPMH_REGULATOR_LEVEL_MIN_SVS
@@ -422,13 +434,14 @@
"csid0", "csid1", "csid2",
"ife0", "ife1", "ife2", "ipe0",
"ipe1", "cam-cdm-intf0", "cpas-cdm0", "bps0",
- "icp0", "jpeg-dma0", "jpeg-enc0", "fd0";
+ "icp0", "jpeg-dma0", "jpeg-enc0", "fd0", "lrmecpas";
client-axi-port-names =
"cam_hf_1", "cam_hf_2", "cam_hf_2", "cam_sf_1",
"cam_hf_1", "cam_hf_2", "cam_hf_2",
"cam_hf_1", "cam_hf_2", "cam_hf_2", "cam_sf_1",
"cam_sf_1", "cam_sf_1", "cam_sf_1", "cam_sf_1",
- "cam_sf_1", "cam_sf_1", "cam_sf_1", "cam_sf_1";
+ "cam_sf_1", "cam_sf_1", "cam_sf_1", "cam_sf_1",
+ "cam_sf_1";
client-bus-camnoc-based;
qcom,axi-port-list {
qcom,axi-port1 {
@@ -517,7 +530,8 @@
cdm-client-names = "vfe",
"jpegdma",
"jpegenc",
- "fd";
+ "fd",
+ "lrmecdm";
status = "ok";
};
@@ -591,8 +605,10 @@
<&clock_camcc CAM_CC_IFE_0_CLK_SRC>,
<&clock_camcc CAM_CC_CAMNOC_AXI_CLK>,
<&clock_camcc CAM_CC_IFE_0_AXI_CLK>;
- clock-rates = <0 0 0 0 0 0 500000000 0 0 0 600000000 0 0>;
- clock-cntl-level = "turbo";
+ clock-rates =
+ <0 0 0 0 0 0 384000000 0 0 0 404000000 0 0>,
+ <0 0 0 0 0 0 538000000 0 0 0 600000000 0 0>;
+ clock-cntl-level = "svs", "turbo";
src-clock-name = "ife_csid_clk_src";
status = "ok";
};
@@ -626,12 +642,15 @@
<&clock_camcc CAM_CC_IFE_0_CLK_SRC>,
<&clock_camcc CAM_CC_CAMNOC_AXI_CLK>,
<&clock_camcc CAM_CC_IFE_0_AXI_CLK>;
- clock-rates = <0 0 0 0 0 0 600000000 0 0>;
- clock-cntl-level = "turbo";
+ clock-rates =
+ <0 0 0 0 0 0 404000000 0 0>,
+ <0 0 0 0 0 0 480000000 0 0>,
+ <0 0 0 0 0 0 600000000 0 0>;
+ clock-cntl-level = "svs", "svs_l1", "turbo";
src-clock-name = "ife_clk_src";
clock-names-option = "ife_dsp_clk";
clocks-option = <&clock_camcc CAM_CC_IFE_0_DSP_CLK>;
- clock-rates-option = <404000000>;
+ clock-rates-option = <600000000>;
status = "ok";
};
@@ -672,8 +691,10 @@
<&clock_camcc CAM_CC_IFE_1_CLK_SRC>,
<&clock_camcc CAM_CC_CAMNOC_AXI_CLK>,
<&clock_camcc CAM_CC_IFE_1_AXI_CLK>;
- clock-rates = <0 0 0 0 0 0 500000000 0 0 0 600000000 0 0>;
- clock-cntl-level = "turbo";
+ clock-rates =
+ <0 0 0 0 0 0 384000000 0 0 0 404000000 0 0>,
+ <0 0 0 0 0 0 538000000 0 0 0 600000000 0 0>;
+ clock-cntl-level = "svs", "turbo";
src-clock-name = "ife_csid_clk_src";
status = "ok";
};
@@ -707,12 +728,15 @@
<&clock_camcc CAM_CC_IFE_1_CLK_SRC>,
<&clock_camcc CAM_CC_CAMNOC_AXI_CLK>,
<&clock_camcc CAM_CC_IFE_1_AXI_CLK>;
- clock-rates = <0 0 0 0 0 0 600000000 0 0>;
- clock-cntl-level = "turbo";
+ clock-rates =
+ <0 0 0 0 0 0 404000000 0 0>,
+ <0 0 0 0 0 0 480000000 0 0>,
+ <0 0 0 0 0 0 600000000 0 0>;
+ clock-cntl-level = "svs", "svs_l1", "turbo";
src-clock-name = "ife_clk_src";
clock-names-option = "ife_dsp_clk";
clocks-option = <&clock_camcc CAM_CC_IFE_1_DSP_CLK>;
- clock-rates-option = <404000000>;
+ clock-rates-option = <600000000>;
status = "ok";
};
@@ -750,8 +774,10 @@
<&clock_camcc CAM_CC_IFE_LITE_CLK>,
<&clock_camcc CAM_CC_IFE_LITE_CLK_SRC>,
<&clock_camcc CAM_CC_CAMNOC_AXI_CLK>;
- clock-rates = <0 0 0 0 0 0 384000000 0 0 0 404000000 0>;
- clock-cntl-level = "turbo";
+ clock-rates =
+ <0 0 0 0 0 0 384000000 0 0 0 404000000 0>,
+ <0 0 0 0 0 0 538000000 0 0 0 600000000 0>;
+ clock-cntl-level = "svs", "turbo";
src-clock-name = "ife_csid_clk_src";
status = "ok";
};
@@ -782,8 +808,11 @@
<&clock_camcc CAM_CC_IFE_LITE_CLK>,
<&clock_camcc CAM_CC_IFE_LITE_CLK_SRC>,
<&clock_camcc CAM_CC_CAMNOC_AXI_CLK>;
- clock-rates = <0 0 0 0 0 0 404000000 0>;
- clock-cntl-level = "turbo";
+ clock-rates =
+ <0 0 0 0 0 0 404000000 0>,
+ <0 0 0 0 0 0 480000000 0>,
+ <0 0 0 0 0 0 600000000 0>;
+ clock-cntl-level = "svs", "svs_l1", "turbo";
src-clock-name = "ife_clk_src";
status = "ok";
};
@@ -829,9 +858,12 @@
<&clock_camcc CAM_CC_ICP_CLK>,
<&clock_camcc CAM_CC_ICP_CLK_SRC>;
- clock-rates = <0 0 400000000 0 0 0 0 600000000>;
- clock-cntl-level = "turbo";
+ clock-rates =
+ <0 0 200000000 0 0 0 0 400000000>,
+ <0 0 200000000 0 0 0 0 600000000>;
+ clock-cntl-level = "svs", "turbo";
fw_name = "CAMERA_ICP.elf";
+ ubwc-cfg = <0x7F 0x1FF>;
status = "ok";
};
@@ -852,7 +884,8 @@
<&clock_camcc CAM_CC_IPE_0_CLK>,
<&clock_camcc CAM_CC_IPE_0_CLK_SRC>;
- clock-rates = <0 0 0 0 240000000>,
+ clock-rates =
+ <0 0 0 0 240000000>,
<0 0 0 0 404000000>,
<0 0 0 0 480000000>,
<0 0 0 0 538000000>,
@@ -1022,8 +1055,11 @@
<&clock_camcc CAM_CC_FD_CORE_CLK>,
<&clock_camcc CAM_CC_FD_CORE_UAR_CLK>;
src-clock-name = "fd_core_clk_src";
- clock-cntl-level = "svs";
- clock-rates = <0 0 0 0 0 400000000 0 0>;
+ clock-cntl-level = "svs", "svs_l1", "turbo";
+ clock-rates =
+ <0 0 0 0 0 400000000 0 0>,
+ <0 0 0 0 0 538000000 0 0>,
+ <0 0 0 0 0 600000000 0 0>;
status = "ok";
};
};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-coresight.dtsi b/arch/arm64/boot/dts/qcom/sdm845-coresight.dtsi
index a61d96e..fcfab09 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-coresight.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-coresight.dtsi
@@ -602,6 +602,7 @@
<13 32>;
qcom,cmb-elem-size = <3 64>,
<7 64>,
+ <9 64>,
<13 64>;
clocks = <&clock_aop QDSS_CLK>;
@@ -674,6 +675,15 @@
};
port@7 {
+ reg = <9>;
+ tpda_in_tpdm_prng: endpoint {
+ slave-mode;
+ remote-endpoint =
+ <&tpdm_prng_out_tpda>;
+ };
+ };
+
+ port@8 {
reg = <10>;
tpda_in_tpdm_qm: endpoint {
slave-mode;
@@ -682,7 +692,7 @@
};
};
- port@8 {
+ port@9 {
reg = <11>;
tpda_in_tpdm_north: endpoint {
slave-mode;
@@ -691,7 +701,7 @@
};
};
- port@9 {
+ port@10 {
reg = <13>;
tpda_in_tpdm_pimem: endpoint {
slave-mode;
@@ -1329,6 +1339,24 @@
};
};
+ tpdm_prng: tpdm@684c000 {
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b968>;
+ reg = <0x684c000 0x1000>;
+ reg-names = "tpdm-base";
+
+ coresight-name = "coresight-tpdm-prng";
+
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
+
+ port{
+ tpdm_prng_out_tpda: endpoint {
+ remote-endpoint = <&tpda_in_tpdm_prng>;
+ };
+ };
+ };
+
tpdm_vsense: tpdm@6840000 {
compatible = "arm,primecell";
arm,primecell-periphid = <0x0003b968>;
@@ -1556,7 +1584,7 @@
reg = <0x69e1000 0x1000>;
reg-names = "cti-base";
- coresight-name = "coresight-cti-DDR_DL_0_CTI";
+ coresight-name = "coresight-cti-ddr_dl_0_cti";
clocks = <&clock_aop QDSS_CLK>;
clock-names = "apb_pclk";
@@ -1568,7 +1596,7 @@
reg = <0x69e4000 0x1000>;
reg-names = "cti-base";
- coresight-name = "coresight-cti-DDR_DL_1_CTI0";
+ coresight-name = "coresight-cti-ddr_dl_1_cti0";
clocks = <&clock_aop QDSS_CLK>;
clock-names = "apb_pclk";
@@ -1580,7 +1608,7 @@
reg = <0x69e5000 0x1000>;
reg-names = "cti-base";
- coresight-name = "coresight-cti-DDR_DL_1_CTI1";
+ coresight-name = "coresight-cti-ddr_dl_1_cti1";
clocks = <&clock_aop QDSS_CLK>;
clock-names = "apb_pclk";
@@ -1592,7 +1620,7 @@
reg = <0x6c09000 0x1000>;
reg-names = "cti-base";
- coresight-name = "coresight-cti-DLMM_CTI0";
+ coresight-name = "coresight-cti-dlmm_cti0";
clocks = <&clock_aop QDSS_CLK>;
clock-names = "apb_pclk";
@@ -1604,7 +1632,7 @@
reg = <0x6c0a000 0x1000>;
reg-names = "cti-base";
- coresight-name = "coresight-cti-DLMM_CTI1";
+ coresight-name = "coresight-cti-dlmm_cti1";
clocks = <&clock_aop QDSS_CLK>;
clock-names = "apb_pclk";
@@ -1616,7 +1644,7 @@
reg = <0x78e0000 0x1000>;
reg-names = "cti-base";
- coresight-name = "coresight-cti-APSS_CTI0";
+ coresight-name = "coresight-cti-apss_cti0";
clocks = <&clock_aop QDSS_CLK>;
clock-names = "apb_pclk";
@@ -1628,7 +1656,7 @@
reg = <0x78f0000 0x1000>;
reg-names = "cti-base";
- coresight-name = "coresight-cti-APSS_CTI1";
+ coresight-name = "coresight-cti-apss_cti1";
clocks = <&clock_aop QDSS_CLK>;
clock-names = "apb_pclk";
@@ -1640,7 +1668,7 @@
reg = <0x7900000 0x1000>;
reg-names = "cti-base";
- coresight-name = "coresight-cti-APSS_CTI2";
+ coresight-name = "coresight-cti-apss_cti2";
clocks = <&clock_aop QDSS_CLK>;
clock-names = "apb_pclk";
@@ -1968,7 +1996,7 @@
reg = <0x6b04000 0x1000>;
reg-names = "cti-base";
- coresight-name = "coresight-cti-SWAO_CTI0";
+ coresight-name = "coresight-cti-swao_cti0";
clocks = <&clock_aop QDSS_CLK>;
clock-names = "apb_pclk";
diff --git a/arch/arm64/boot/dts/qcom/sdm845-gpu.dtsi b/arch/arm64/boot/dts/qcom/sdm845-gpu.dtsi
index eac21c8..ee0ad1f 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-gpu.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-gpu.dtsi
@@ -47,8 +47,8 @@
label = "kgsl-3d0";
compatible = "qcom,kgsl-3d0", "qcom,kgsl-3d";
status = "ok";
- reg = <0x5000000 0x40000>;
- reg-names = "kgsl_3d0_reg_memory";
+ reg = <0x5000000 0x40000>, <0x5061000 0x800>;
+ reg-names = "kgsl_3d0_reg_memory", "kgsl_3d0_cx_dbgc_memory";
interrupts = <0 300 0>;
interrupt-names = "kgsl_3d0_irq";
qcom,id = <0>;
diff --git a/arch/arm64/boot/dts/qcom/sdm845-hdk-audio-overlay.dtsi b/arch/arm64/boot/dts/qcom/sdm845-hdk-audio-overlay.dtsi
new file mode 100644
index 0000000..492f07b
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm845-hdk-audio-overlay.dtsi
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "sdm845-audio-overlay.dtsi"
+
+&snd_934x {
+ qcom,model = "sdm845-tavil-hdk-snd-card";
+
+ qcom,audio-routing =
+ "AIF4 VI", "MCLK",
+ "RX_BIAS", "MCLK",
+ "MADINPUT", "MCLK",
+ "AMIC2", "MIC BIAS2",
+ "MIC BIAS2", "Headset Mic",
+ "DMIC0", "MIC BIAS1",
+ "MIC BIAS1", "Digital Mic0",
+ "DMIC1", "MIC BIAS1",
+ "MIC BIAS1", "Digital Mic1",
+ "DMIC2", "MIC BIAS3",
+ "MIC BIAS3", "Digital Mic2",
+ "DMIC5", "MIC BIAS4",
+ "MIC BIAS4", "Digital Mic5",
+ "SpkrLeft IN", "SPK1 OUT";
+
+ qcom,wsa-max-devs = <1>;
+ qcom,wsa-devs = <&wsa881x_0211>, <&wsa881x_0213>;
+ qcom,wsa-aux-dev-prefix = "SpkrLeft", "SpkrLeft";
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-interposer-pm660.dtsi b/arch/arm64/boot/dts/qcom/sdm845-interposer-pm660.dtsi
index 1a8de22..10efa20 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-interposer-pm660.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-interposer-pm660.dtsi
@@ -66,6 +66,12 @@
ibb-supply = <&lcdb_ncp_vreg>;
};
+&dsi_dual_nt36850_truly_cmd_display {
+ vddio-supply = <&pm660_l11>;
+ lab-supply = <&lcdb_ldo_vreg>;
+ ibb-supply = <&lcdb_ncp_vreg>;
+};
+
&sde_dp {
status = "disabled";
/delete-property/ vdda-1p2-supply;
@@ -154,6 +160,12 @@
/delete-property/ switch-source;
};
+&led_flash_rear_aux {
+ /delete-property/ flash-source;
+ /delete-property/ torch-source;
+ /delete-property/ switch-source;
+};
+
&led_flash_front {
/delete-property/ flash-source;
/delete-property/ torch-source;
@@ -230,6 +242,11 @@
/delete-property/ vdd_gfx-supply;
};
+&clock_cpucc {
+ /delete-property/ vdd_l3_mx_ao-supply;
+ /delete-property/ vdd_pwrcl_mx_ao-supply;
+};
+
&pil_modem {
/delete-property/ vdd_cx-supply;
/delete-property/ vdd_mx-supply;
diff --git a/arch/arm64/boot/dts/qcom/sdm845-interposer-sdm670-cdp.dtsi b/arch/arm64/boot/dts/qcom/sdm845-interposer-sdm670-cdp.dtsi
index 1265d2a..9313a75 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-interposer-sdm670-cdp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-interposer-sdm670-cdp.dtsi
@@ -37,3 +37,7 @@
&pcie0 {
status = "disabled";
};
+
+&eud {
+ vdda33-supply = <&pm660l_l7>;
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-interposer-sdm670-mtp.dtsi b/arch/arm64/boot/dts/qcom/sdm845-interposer-sdm670-mtp.dtsi
index 9d722df..e7ff910 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-interposer-sdm670-mtp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-interposer-sdm670-mtp.dtsi
@@ -42,3 +42,7 @@
&pcie0 {
status = "disabled";
};
+
+&eud {
+ vdda33-supply = <&pm660l_l7>;
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-mtp.dtsi b/arch/arm64/boot/dts/qcom/sdm845-mtp.dtsi
index 48a4a8b..d01149b 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-mtp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-mtp.dtsi
@@ -239,9 +239,18 @@
qcom,vdd-io-voltage-level = <1808000 2960000>;
qcom,vdd-io-current-level = <200 22000>;
- pinctrl-names = "active", "sleep";
+ pinctrl-names = "active", "sleep", "ds_400KHz",
+ "ds_50MHz", "ds_100MHz", "ds_200MHz";
pinctrl-0 = <&sdc2_clk_on &sdc2_cmd_on &sdc2_data_on &storage_cd>;
pinctrl-1 = <&sdc2_clk_off &sdc2_cmd_off &sdc2_data_off &storage_cd>;
+ pinctrl-2 = <&sdc2_clk_ds_400KHz
+ &sdc2_cmd_ds_400KHz &sdc2_data_ds_400KHz>;
+ pinctrl-3 = <&sdc2_clk_ds_50MHz
+ &sdc2_cmd_ds_50MHz &sdc2_data_ds_50MHz>;
+ pinctrl-4 = <&sdc2_clk_ds_100MHz
+ &sdc2_cmd_ds_100MHz &sdc2_data_ds_100MHz>;
+ pinctrl-5 = <&sdc2_clk_ds_200MHz
+ &sdc2_cmd_ds_200MHz &sdc2_data_ds_200MHz>;
cd-gpios = <&tlmm 126 GPIO_ACTIVE_LOW>;
@@ -277,10 +286,12 @@
&smb1355_charger_0 {
status = "ok";
+ qcom,disable-ctm;
};
&smb1355_charger_1 {
status = "ok";
+ qcom,disable-ctm;
};
&qupv3_se9_2uart {
diff --git a/arch/arm64/boot/dts/qcom/sdm845-pinctrl.dtsi b/arch/arm64/boot/dts/qcom/sdm845-pinctrl.dtsi
index a0207e5..5035c9f 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-pinctrl.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-pinctrl.dtsi
@@ -142,6 +142,38 @@
};
};
+ sdc2_clk_ds_400KHz: sdc2_clk_ds_400KHz {
+ config {
+ pins = "sdc2_clk";
+ bias-disable; /* NO pull */
+ drive-strength = <16>; /* 16 MA */
+ };
+ };
+
+ sdc2_clk_ds_50MHz: sdc2_clk_ds_50MHz {
+ config {
+ pins = "sdc2_clk";
+ bias-disable; /* NO pull */
+ drive-strength = <16>; /* 16 MA */
+ };
+ };
+
+ sdc2_clk_ds_100MHz: sdc2_clk_ds_100MHz {
+ config {
+ pins = "sdc2_clk";
+ bias-disable; /* NO pull */
+ drive-strength = <16>; /* 16 MA */
+ };
+ };
+
+ sdc2_clk_ds_200MHz: sdc2_clk_ds_200MHz {
+ config {
+ pins = "sdc2_clk";
+ bias-disable; /* NO pull */
+ drive-strength = <16>; /* 16 MA */
+ };
+ };
+
sdc2_cmd_on: sdc2_cmd_on {
config {
pins = "sdc2_cmd";
@@ -158,6 +190,38 @@
};
};
+ sdc2_cmd_ds_400KHz: sdc2_cmd_ds_400KHz {
+ config {
+ pins = "sdc2_cmd";
+ bias-pull-up; /* pull up */
+ drive-strength = <10>; /* 10 MA */
+ };
+ };
+
+ sdc2_cmd_ds_50MHz: sdc2_cmd_ds_50MHz {
+ config {
+ pins = "sdc2_cmd";
+ bias-pull-up; /* pull up */
+ drive-strength = <10>; /* 10 MA */
+ };
+ };
+
+ sdc2_cmd_ds_100MHz: sdc2_cmd_ds_100MHz {
+ config {
+ pins = "sdc2_cmd";
+ bias-pull-up; /* pull up */
+ drive-strength = <10>; /* 10 MA */
+ };
+ };
+
+ sdc2_cmd_ds_200MHz: sdc2_cmd_ds_200MHz {
+ config {
+ pins = "sdc2_cmd";
+ bias-pull-up; /* pull up */
+ drive-strength = <10>; /* 10 MA */
+ };
+ };
+
sdc2_data_on: sdc2_data_on {
config {
pins = "sdc2_data";
@@ -174,6 +238,38 @@
};
};
+ sdc2_data_ds_400KHz: sdc2_data_ds_400KHz {
+ config {
+ pins = "sdc2_data";
+ bias-pull-up; /* pull up */
+ drive-strength = <10>; /* 10 MA */
+ };
+ };
+
+ sdc2_data_ds_50MHz: sdc2_data_ds_50MHz {
+ config {
+ pins = "sdc2_data";
+ bias-pull-up; /* pull up */
+ drive-strength = <10>; /* 10 MA */
+ };
+ };
+
+ sdc2_data_ds_100MHz: sdc2_data_ds_100MHz {
+ config {
+ pins = "sdc2_data";
+ bias-pull-up; /* pull up */
+ drive-strength = <10>; /* 10 MA */
+ };
+ };
+
+ sdc2_data_ds_200MHz: sdc2_data_ds_200MHz {
+ config {
+ pins = "sdc2_data";
+ bias-pull-up; /* pull up */
+ drive-strength = <10>; /* 10 MA */
+ };
+ };
+
pcie0 {
pcie0_clkreq_default: pcie0_clkreq_default {
mux {
diff --git a/arch/arm64/boot/dts/qcom/sdm845-qvr-audio-overlay.dtsi b/arch/arm64/boot/dts/qcom/sdm845-qvr-audio-overlay.dtsi
new file mode 100644
index 0000000..77a89f0
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm845-qvr-audio-overlay.dtsi
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "sdm845-audio-overlay.dtsi"
+
+&snd_934x {
+ qcom,model = "sdm845-qvr-tavil-snd-card";
+
+ qcom,audio-routing =
+ "AIF4 VI", "MCLK",
+ "RX_BIAS", "MCLK",
+ "MADINPUT", "MCLK",
+ "AMIC1", "Handset Mic",
+ "AMIC2", "MIC BIAS2",
+ "MIC BIAS2", "Headset Mic",
+ "DMIC0", "MIC BIAS1",
+ "MIC BIAS1", "Digital Mic0",
+ "DMIC1", "MIC BIAS1",
+ "MIC BIAS1", "Digital Mic1",
+ "DMIC2", "MIC BIAS3",
+ "MIC BIAS3", "Digital Mic2",
+ "DMIC3", "MIC BIAS3",
+ "MIC BIAS3", "Digital Mic3",
+ "DMIC4", "MIC BIAS4",
+ "MIC BIAS4", "Digital Mic4",
+ "DMIC5", "MIC BIAS4",
+ "MIC BIAS4", "Digital Mic5",
+ "SpkrLeft IN", "SPK1 OUT";
+
+ qcom,msm-mbhc-hphl-swh = <0>;
+ /delete-property/ qcom,hph-en0-gpio;
+ /delete-property/ qcom,hph-en1-gpio;
+ /delete-property/ qcom,usbc-analog-en1-gpio;
+ /delete-property/ qcom,usbc-analog-en2-gpio;
+ /delete-property/ pinctrl-names;
+ /delete-property/ pinctrl-0;
+ /delete-property/ pinctrl-1;
+
+ qcom,wsa-max-devs = <1>;
+ qcom,wsa-devs = <&wsa881x_0211>, <&wsa881x_0213>;
+ qcom,wsa-aux-dev-prefix = "SpkrLeft", "SpkrLeft";
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-qvr.dtsi b/arch/arm64/boot/dts/qcom/sdm845-qvr.dtsi
index 2d701a5..00f0650 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-qvr.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-qvr.dtsi
@@ -15,6 +15,27 @@
#include "smb1355.dtsi"
&vendor {
+ bluetooth: bt_wcn3990 {
+ compatible = "qca,wcn3990";
+ qca,bt-vdd-io-supply = <&pm8998_s3>;
+ qca,bt-vdd-xtal-supply = <&pm8998_s5>;
+ qca,bt-vdd-core-supply = <&pm8998_l7>;
+ qca,bt-vdd-pa-supply = <&pm8998_l17>;
+ qca,bt-vdd-ldo-supply = <&pm8998_l25>;
+
+ qca,bt-vdd-io-voltage-level = <1352000 1352000>;
+ qca,bt-vdd-xtal-voltage-level = <2040000 2040000>;
+ qca,bt-vdd-core-voltage-level = <1800000 1800000>;
+ qca,bt-vdd-pa-voltage-level = <1304000 1304000>;
+ qca,bt-vdd-ldo-voltage-level = <3312000 3312000>;
+
+ qca,bt-vdd-io-current-level = <1>; /* LPM/PFM */
+ qca,bt-vdd-xtal-current-level = <1>; /* LPM/PFM */
+ qca,bt-vdd-core-current-level = <1>; /* LPM/PFM */
+ qca,bt-vdd-pa-current-level = <1>; /* LPM/PFM */
+ qca,bt-vdd-ldo-current-level = <1>; /* LPM/PFM */
+ };
+
qvr_batterydata: qcom,battery-data {
qcom,batt-id-range-pct = <15>;
#include "fg-gen3-batterydata-mlp446579-3800mah.dtsi"
@@ -25,11 +46,20 @@
vbus-supply = <&smb2_vbus>;
};
+&qupv3_se6_4uart {
+ status = "ok";
+};
+
&pmi8998_fg {
qcom,battery-data = <&qvr_batterydata>;
qcom,fg-bmd-en-delay-ms = <300>;
};
+&pmi8998_charger {
+ qcom,battery-data = <&qvr_batterydata>;
+ qcom,sw-jeita-enable;
+};
+
&qupv3_se10_i2c {
status = "ok";
};
@@ -129,3 +159,7 @@
status = "ok";
};
+
+&wil6210 {
+ status = "ok";
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-sde-display.dtsi b/arch/arm64/boot/dts/qcom/sdm845-sde-display.dtsi
index 4254fcd..4ecb49a 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-sde-display.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-sde-display.dtsi
@@ -27,6 +27,7 @@
#include "dsi-panel-s6e3ha3-amoled-dualmipi-wqhd-cmd.dtsi"
#include "dsi-panel-nt35597-dualmipi-wqxga-video.dtsi"
#include "dsi-panel-nt35597-dualmipi-wqxga-cmd.dtsi"
+#include "dsi-panel-nt36850-truly-dualmipi-wqhd-cmd.dtsi"
#include <dt-bindings/clock/mdss-10nm-pll-clk.h>
&soc {
@@ -451,6 +452,30 @@
ibb-supply = <&ibb_regulator>;
};
+ dsi_dual_nt36850_truly_cmd_display: qcom,dsi-display@16 {
+ compatible = "qcom,dsi-display";
+ label = "dsi_dual_nt36850_truly_cmd_display";
+ qcom,display-type = "primary";
+
+ qcom,dsi-ctrl = <&mdss_dsi0 &mdss_dsi1>;
+ qcom,dsi-phy = <&mdss_dsi_phy0 &mdss_dsi_phy1>;
+ clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>,
+ <&mdss_dsi0_pll PCLK_MUX_0_CLK>;
+ clock-names = "src_byte_clk", "src_pixel_clk";
+
+ pinctrl-names = "panel_active", "panel_suspend";
+ pinctrl-0 = <&sde_dsi_active &sde_te_active>;
+ pinctrl-1 = <&sde_dsi_suspend &sde_te_suspend>;
+ qcom,platform-te-gpio = <&tlmm 10 0>;
+ qcom,platform-reset-gpio = <&tlmm 6 0>;
+ qcom,panel-mode-gpio = <&tlmm 52 0>;
+
+ qcom,dsi-panel = <&dsi_dual_nt36850_truly_cmd>;
+ vddio-supply = <&pm8998_l14>;
+ lab-supply = <&lab_regulator>;
+ ibb-supply = <&ibb_regulator>;
+ };
+
sde_wb: qcom,wb-display@0 {
compatible = "qcom,wb-display";
cell-index = <0>;
@@ -518,6 +543,7 @@
&dsi_nt35597_truly_dsc_cmd {
qcom,mdss-dsi-t-clk-post = <0x0b>;
qcom,mdss-dsi-t-clk-pre = <0x23>;
+ qcom,ulps-enabled;
qcom,mdss-dsi-display-timings {
timing@0{
qcom,mdss-dsi-panel-phy-timings = [00 15 05 05 20 1f 05
@@ -633,15 +659,35 @@
};
&dsi_sim_cmd {
- qcom,mdss-dsi-t-clk-post = <0x0d>;
- qcom,mdss-dsi-t-clk-pre = <0x2d>;
+ qcom,mdss-dsi-t-clk-post = <0x0c>;
+ qcom,mdss-dsi-t-clk-pre = <0x29>;
qcom,mdss-dsi-display-timings {
timing@0{
- qcom,mdss-dsi-panel-phy-timings = [00 1c 07 07 23 21 07
- 07 05 03 04 00];
qcom,display-topology = <1 0 1>,
- <2 0 1>;
- qcom,default-topology-index = <0>;
+ <2 2 1>;
+ qcom,default-topology-index = <1>;
+ qcom,panel-roi-alignment = <720 40 720 40 720 40>;
+ qcom,partial-update-enabled = "single_roi";
+ qcom,mdss-dsi-panel-phy-timings = [00 1a 06 06 22 20 07
+ 07 04 03 04 00];
+ };
+ timing@1{
+ qcom,display-topology = <1 0 1>,
+ <2 2 1>;
+ qcom,default-topology-index = <1>;
+ qcom,panel-roi-alignment = <540 40 540 40 540 40>;
+ qcom,partial-update-enabled = "single_roi";
+ qcom,mdss-dsi-panel-phy-timings = [00 1a 06 06 22 20 07
+ 07 04 03 04 00];
+ };
+ timing@2{
+ qcom,display-topology = <1 0 1>,
+ <2 2 1>;
+ qcom,default-topology-index = <1>;
+ qcom,panel-roi-alignment = <360 40 360 40 360 40>;
+ qcom,partial-update-enabled = "single_roi";
+ qcom,mdss-dsi-panel-phy-timings = [00 1a 06 06 22 20 07
+ 07 04 03 04 00];
};
};
};
@@ -657,8 +703,8 @@
qcom,default-topology-index = <0>;
};
timing@1{
- qcom,mdss-dsi-panel-phy-timings = [00 1c 07 07 23 21 07
- 07 05 03 04 00];
+ qcom,mdss-dsi-panel-phy-timings = [00 30 0c 0d 2a 27 0c
+ 0d 09 03 04 00];
qcom,display-topology = <2 0 2>,
<1 0 2>;
qcom,default-topology-index = <0>;
@@ -739,3 +785,17 @@
};
};
};
+
+&dsi_dual_nt36850_truly_cmd {
+ qcom,mdss-dsi-t-clk-post = <0x0E>;
+ qcom,mdss-dsi-t-clk-pre = <0x30>;
+ qcom,mdss-dsi-display-timings {
+ timing@0 {
+ qcom,mdss-dsi-panel-phy-timings = [00 1f 08 08 24 23 08
+ 08 05 03 04 00];
+ qcom,display-topology = <2 0 2>,
+ <1 0 2>;
+ qcom,default-topology-index = <0>;
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-sde-pll.dtsi b/arch/arm64/boot/dts/qcom/sdm845-sde-pll.dtsi
index b9eac3c..967865b 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-sde-pll.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-sde-pll.dtsi
@@ -23,6 +23,8 @@
clocks = <&clock_dispcc DISP_CC_MDSS_AHB_CLK>;
clock-names = "iface_clk";
clock-rate = <0>;
+ qcom,dsi-pll-ssc-en;
+ qcom,dsi-pll-ssc-mode = "down-spread";
gdsc-supply = <&mdss_core_gdsc>;
qcom,platform-supply-entries {
#address-cells = <1>;
@@ -50,6 +52,8 @@
clocks = <&clock_dispcc DISP_CC_MDSS_AHB_CLK>;
clock-names = "iface_clk";
clock-rate = <0>;
+ qcom,dsi-pll-ssc-en;
+ qcom,dsi-pll-ssc-mode = "down-spread";
gdsc-supply = <&mdss_core_gdsc>;
qcom,platform-supply-entries {
#address-cells = <1>;
diff --git a/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi b/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi
index 7c8eab4..4194e67 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi
@@ -134,6 +134,7 @@
qcom,sde-mixer-blendstages = <0xb>;
qcom,sde-highest-bank-bit = <0x2>;
qcom,sde-ubwc-version = <0x200>;
+ qcom,sde-smart-panel-align-mode = <0xc>;
qcom,sde-panic-per-pipe;
qcom,sde-has-cdp;
qcom,sde-has-src-split;
@@ -202,6 +203,9 @@
qcom,sde-cdp-setting = <1 1>, <1 0>;
+ qcom,sde-qos-cpu-mask = <0x3>;
+ qcom,sde-qos-cpu-dma-latency = <300>;
+
qcom,sde-inline-rotator = <&mdss_rotator 0>;
qcom,sde-inline-rot-xin = <10 11>;
qcom,sde-inline-rot-xin-type = "sspp", "wb";
@@ -451,7 +455,7 @@
clock-names = "byte_clk", "byte_clk_rcg", "byte_intf_clk",
"pixel_clk", "pixel_clk_rcg",
"esc_clk";
-
+ qcom,null-insertion-enabled;
qcom,ctrl-supply-entries {
#address-cells = <1>;
#size-cells = <0>;
@@ -485,6 +489,7 @@
<&clock_dispcc DISP_CC_MDSS_ESC1_CLK>;
clock-names = "byte_clk", "byte_clk_rcg", "byte_intf_clk",
"pixel_clk", "pixel_clk_rcg", "esc_clk";
+ qcom,null-insertion-enabled;
qcom,ctrl-supply-entries {
#address-cells = <1>;
#size-cells = <0>;
@@ -574,7 +579,10 @@
vdda-1p2-supply = <&pm8998_l26>;
vdda-0p9-supply = <&pm8998_l1>;
- reg = <0xae90000 0xa84>,
+ reg = <0xae90000 0x0dc>,
+ <0xae90200 0x0c0>,
+ <0xae90400 0x508>,
+ <0xae90a00 0x094>,
<0x88eaa00 0x200>,
<0x88ea200 0x200>,
<0x88ea600 0x200>,
@@ -583,7 +591,9 @@
<0x88ea030 0x10>,
<0x88e8000 0x20>,
<0x0aee1000 0x034>;
- reg-names = "dp_ctrl", "dp_phy", "dp_ln_tx0", "dp_ln_tx1",
+ /* dp_ctrl: dp_ahb, dp_aux, dp_link, dp_p0 */
+ reg-names = "dp_ahb", "dp_aux", "dp_link",
+ "dp_p0", "dp_phy", "dp_ln_tx0", "dp_ln_tx1",
"dp_mmss_cc", "qfprom_physical", "dp_pll",
"usb3_dp_com", "hdcp_physical";
diff --git a/arch/arm64/boot/dts/qcom/sdm845-usb.dtsi b/arch/arm64/boot/dts/qcom/sdm845-usb.dtsi
index ba397e5..b9eabcf 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-usb.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-usb.dtsi
@@ -119,8 +119,9 @@
compatible = "qcom,qusb2phy-v2";
reg = <0x088e2000 0x400>,
<0x007801e8 0x4>,
- <0x088e0000 0x2000>;
- reg-names = "qusb_phy_base", "efuse_addr", "eud_base";
+ <0x088e7014 0x4>;
+ reg-names = "qusb_phy_base", "efuse_addr",
+ "refgen_north_bg_reg_addr";
qcom,efuse-bit-pos = <25>;
qcom,efuse-num-bits = <3>;
@@ -134,7 +135,10 @@
0x210 /* QUSB2PHY_PWR_CTRL1 */
0x230 /* QUSB2PHY_INTR_CTRL */
0x0a8 /* QUSB2PHY_PLL_CORE_INPUT_OVERRIDE */
- 0x254>; /* QUSB2PHY_TEST1 */
+ 0x254 /* QUSB2PHY_TEST1 */
+ 0x198 /* PLL_BIAS_CONTROL_2 */
+ 0x228 /* QUSB2PHY_SQ_CTRL1 */
+ 0x22c>; /* QUSB2PHY_SQ_CTRL2 */
qcom,qusb-phy-init-seq =
/* <value reg_offset> */
@@ -222,6 +226,8 @@
0x14fc 0x80 0x00 /* RXA_RX_OFFSET_ADAPTOR_CNTRL2 */
0x1504 0x03 0x00 /* RXA_SIGDET_CNTRL */
0x150c 0x16 0x00 /* RXA_SIGDET_DEGLITCH_CNTRL */
+ 0x1564 0x05 0x00 /* RXA_RX_MODE_00 */
+ 0x14c0 0x03 0x00 /* RXA_VGA_CAL_CNTRL2 */
0x1830 0x0b 0x00 /* RXB_UCDR_FASTLOCK_FO_GAIN */
0x18d4 0x0f 0x00 /* RXB_RX_EQU_ADAPTOR_CNTRL2 */
0x18d8 0x4e 0x00 /* RXB_RX_EQU_ADAPTOR_CNTRL3 */
@@ -230,6 +236,8 @@
0x18fc 0x80 0x00 /* RXB_RX_OFFSET_ADAPTOR_CNTRL2 */
0x1904 0x03 0x00 /* RXB_SIGDET_CNTRL */
0x190c 0x16 0x00 /* RXB_SIGDET_DEGLITCH_CNTRL */
+ 0x1964 0x05 0x00 /* RXB_RX_MODE_00 */
+ 0x18c0 0x03 0x00 /* RXB_VGA_CAL_CNTRL2 */
0x1260 0x10 0x00 /* TXA_HIGHZ_DRVR_EN */
0x12a4 0x12 0x00 /* TXA_RCV_DETECT_LVL_2 */
0x128c 0x16 0x00 /* TXA_LANE_MODE_1 */
@@ -270,6 +278,8 @@
0x1c48 0x0d 0x00 /* PCS_TXDEEMPH_M3P5DB_V4 */
0x1c4c 0x15 0x00 /* PCS_TXDEEMPH_M6DB_LS */
0x1c50 0x0d 0x00 /* PCS_TXDEEMPH_M3P5DB_LS */
+ 0x1e0c 0x21 0x00 /* PCS_REFGEN_REQ_CONFIG1 */
+ 0x1e10 0x60 0x00 /* PCS_REFGEN_REQ_CONFIG2 */
0x1c5c 0x02 0x00 /* PCS_RATE_SLEW_CNTRL */
0x1ca0 0x04 0x00 /* PCS_PWRUP_RESET_DLY_TIME_AUXCLK */
0x1c8c 0x44 0x00 /* PCS_TSYNC_RSYNC_TIME */
@@ -280,6 +290,7 @@
0x1cb8 0x75 0x00 /* PCS_RXEQTRAINING_WAIT_TIME */
0x1cb0 0x86 0x00 /* PCS_LFPS_TX_ECSTART_EQTLOCK */
0x1cbc 0x13 0x00 /* PCS_RXEQTRAINING_RUN_TIME */
+ 0x1cac 0x04 0x00 /* PCS_LFPS_DET_HIGH_COUNT_VAL */
0xffffffff 0xffffffff 0x00>;
qcom,qmp-phy-reg-offset =
@@ -396,8 +407,10 @@
/* Secondary USB port related QUSB2 PHY */
qusb_phy1: qusb@88e3000 {
compatible = "qcom,qusb2phy-v2";
- reg = <0x088e3000 0x400>;
- reg-names = "qusb_phy_base";
+ reg = <0x088e3000 0x400>,
+ <0x088e7014 0x4>;
+ reg-names = "qusb_phy_base",
+ "refgen_north_bg_reg_addr";
vdd-supply = <&pm8998_l1>;
vdda18-supply = <&pm8998_l12>;
@@ -409,7 +422,10 @@
0x210 /* QUSB2PHY_PWR_CTRL1 */
0x230 /* QUSB2PHY_INTR_CTRL */
0x0a8 /* QUSB2PHY_PLL_CORE_INPUT_OVERRIDE */
- 0x254>; /* QUSB2PHY_TEST1 */
+ 0x254 /* QUSB2PHY_TEST1 */
+ 0x198 /* PLL_BIAS_CONTROL_2 */
+ 0x228 /* QUSB2PHY_SQ_CTRL1 */
+ 0x22c>; /* QUSB2PHY_SQ_CTRL2 */
qcom,qusb-phy-init-seq =
/* <value reg_offset> */
diff --git a/arch/arm64/boot/dts/qcom/sdm845-v2-camera.dtsi b/arch/arm64/boot/dts/qcom/sdm845-v2-camera.dtsi
index ca83bed..d2ee9eb 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-v2-camera.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-v2-camera.dtsi
@@ -157,6 +157,33 @@
compatible = "qcom,msm-cam-smmu";
status = "ok";
+ msm_cam_smmu_lrme {
+ compatible = "qcom,msm-cam-smmu-cb";
+ iommus = <&apps_smmu 0x1038 0x0>,
+ <&apps_smmu 0x1058 0x0>,
+ <&apps_smmu 0x1039 0x0>,
+ <&apps_smmu 0x1059 0x0>;
+ label = "lrme";
+ lrme_iova_mem_map: iova-mem-map {
+ iova-mem-region-shared {
+ /* Shared region is 100MB long */
+ iova-region-name = "shared";
+ iova-region-start = <0x7400000>;
+ iova-region-len = <0x6400000>;
+ iova-region-id = <0x1>;
+ status = "ok";
+ };
+ /* IO region is approximately 3.3 GB */
+ iova-mem-region-io {
+ iova-region-name = "io";
+ iova-region-start = <0xd800000>;
+ iova-region-len = <0xd2800000>;
+ iova-region-id = <0x3>;
+ status = "ok";
+ };
+ };
+ };
+
msm_cam_smmu_ife {
compatible = "qcom,msm-cam-smmu-cb";
iommus = <&apps_smmu 0x808 0x0>,
@@ -210,11 +237,20 @@
status = "ok";
};
+ iova-mem-region-secondary-heap {
+ /* Secondary heap region is 1MB long */
+ iova-region-name = "secheap";
+ iova-region-start = <0xd800000>;
+ iova-region-len = <0x100000>;
+ iova-region-id = <0x4>;
+ status = "ok";
+ };
+
iova-mem-region-io {
/* IO region is approximately 3.3 GB */
iova-region-name = "io";
- iova-region-start = <0xd800000>;
- iova-region-len = <0xd2800000>;
+ iova-region-start = <0xd900000>;
+ iova-region-len = <0xd2700000>;
iova-region-id = <0x3>;
status = "ok";
};
@@ -288,17 +324,17 @@
<MSM_BUS_MASTER_AMPSS_M0
MSM_BUS_SLAVE_CAMERA_CFG 0 0>,
<MSM_BUS_MASTER_AMPSS_M0
- MSM_BUS_SLAVE_CAMERA_CFG 0 153000>,
+ MSM_BUS_SLAVE_CAMERA_CFG 0 76500>,
<MSM_BUS_MASTER_AMPSS_M0
- MSM_BUS_SLAVE_CAMERA_CFG 0 153000>,
+ MSM_BUS_SLAVE_CAMERA_CFG 0 76500>,
+ <MSM_BUS_MASTER_AMPSS_M0
+ MSM_BUS_SLAVE_CAMERA_CFG 0 150000>,
+ <MSM_BUS_MASTER_AMPSS_M0
+ MSM_BUS_SLAVE_CAMERA_CFG 0 150000>,
<MSM_BUS_MASTER_AMPSS_M0
MSM_BUS_SLAVE_CAMERA_CFG 0 300000>,
<MSM_BUS_MASTER_AMPSS_M0
- MSM_BUS_SLAVE_CAMERA_CFG 0 300000>,
- <MSM_BUS_MASTER_AMPSS_M0
- MSM_BUS_SLAVE_CAMERA_CFG 0 600000>,
- <MSM_BUS_MASTER_AMPSS_M0
- MSM_BUS_SLAVE_CAMERA_CFG 0 600000>;
+ MSM_BUS_SLAVE_CAMERA_CFG 0 300000>;
vdd-corners = <RPMH_REGULATOR_LEVEL_OFF
RPMH_REGULATOR_LEVEL_RETENTION
RPMH_REGULATOR_LEVEL_MIN_SVS
@@ -320,13 +356,14 @@
"csid0", "csid1", "csid2",
"ife0", "ife1", "ife2", "ipe0",
"ipe1", "cam-cdm-intf0", "cpas-cdm0", "bps0",
- "icp0", "jpeg-dma0", "jpeg-enc0", "fd0";
+ "icp0", "jpeg-dma0", "jpeg-enc0", "fd0", "lrmecpas0";
client-axi-port-names =
"cam_hf_1", "cam_hf_2", "cam_hf_2", "cam_hf_2",
"cam_sf_1", "cam_hf_1", "cam_hf_2", "cam_hf_2",
"cam_hf_1", "cam_hf_2", "cam_hf_2", "cam_sf_1",
"cam_sf_1", "cam_sf_1", "cam_sf_1", "cam_sf_1",
- "cam_sf_1", "cam_sf_1", "cam_sf_1", "cam_sf_1";
+ "cam_sf_1", "cam_sf_1", "cam_sf_1", "cam_sf_1",
+ "cam_sf_1";
client-bus-camnoc-based;
qcom,axi-port-list {
qcom,axi-port1 {
@@ -406,4 +443,44 @@
};
};
};
+
+ qcom,cam-lrme {
+ compatible = "qcom,cam-lrme";
+ arch-compat = "lrme";
+ status = "ok";
+ };
+
+ cam_lrme: qcom,lrme@ac6b000 {
+ cell-index = <0>;
+ compatible = "qcom,lrme";
+ reg-names = "lrme";
+ reg = <0xac6b000 0xa00>;
+ reg-cam-base = <0x6b000>;
+ interrupt-names = "lrme";
+ interrupts = <0 476 0>;
+ regulator-names = "camss";
+ camss-supply = <&titan_top_gdsc>;
+ clock-names = "camera_ahb",
+ "camera_axi",
+ "soc_ahb_clk",
+ "cpas_ahb_clk",
+ "camnoc_axi_clk",
+ "lrme_clk_src",
+ "lrme_clk";
+ clocks = <&clock_gcc GCC_CAMERA_AHB_CLK>,
+ <&clock_gcc GCC_CAMERA_AXI_CLK>,
+ <&clock_camcc CAM_CC_SOC_AHB_CLK>,
+ <&clock_camcc CAM_CC_CPAS_AHB_CLK>,
+ <&clock_camcc CAM_CC_CAMNOC_AXI_CLK>,
+ <&clock_camcc CAM_CC_LRME_CLK_SRC>,
+ <&clock_camcc CAM_CC_LRME_CLK>;
+ clock-rates = <0 0 0 0 0 200000000 200000000>,
+ <0 0 0 0 0 269000000 269000000>,
+ <0 0 0 0 0 320000000 320000000>,
+ <0 0 0 0 0 400000000 400000000>;
+
+ clock-cntl-level = "lowsvs", "svs", "svs_l1", "turbo";
+ src-clock-name = "lrme_clk_src";
+ status = "ok";
+ };
};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-qvr-overlay.dts b/arch/arm64/boot/dts/qcom/sdm845-v2-qvr-overlay.dts
similarity index 87%
rename from arch/arm64/boot/dts/qcom/sdm845-qvr-overlay.dts
rename to arch/arm64/boot/dts/qcom/sdm845-v2-qvr-overlay.dts
index fb99157..e1ec364 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-qvr-overlay.dts
+++ b/arch/arm64/boot/dts/qcom/sdm845-v2-qvr-overlay.dts
@@ -21,9 +21,11 @@
#include "sdm845-sde-display.dtsi"
#include "sdm845-qvr.dtsi"
+#include "sdm845-qvr-audio-overlay.dtsi"
+#include "sdm845-camera-sensor-qvr.dtsi"
/ {
- model = "Qualcomm Technologies, Inc. SDM845 v2 QVR";
+ model = "Qualcomm Technologies, Inc. SDM845 V2 QVR";
compatible = "qcom,sdm845-qvr", "qcom,sdm845", "qcom,qvr";
qcom,msm-id = <321 0x20000>;
qcom,board-id = <0x01000B 0x20>;
diff --git a/arch/arm64/boot/dts/qcom/sdm845-qvr.dts b/arch/arm64/boot/dts/qcom/sdm845-v2-qvr.dts
similarity index 87%
rename from arch/arm64/boot/dts/qcom/sdm845-qvr.dts
rename to arch/arm64/boot/dts/qcom/sdm845-v2-qvr.dts
index c06b806..0a56c79 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-qvr.dts
+++ b/arch/arm64/boot/dts/qcom/sdm845-v2-qvr.dts
@@ -15,9 +15,10 @@
#include "sdm845-v2.dtsi"
#include "sdm845-qvr.dtsi"
+#include "sdm845-camera-sensor-qvr.dtsi"
/ {
- model = "Qualcomm Technologies, Inc. SDM845 QVR";
+ model = "Qualcomm Technologies, Inc. SDM845 V2 QVR";
compatible = "qcom,sdm845-qvr", "qcom,sdm845", "qcom,qvr";
qcom,board-id = <0x01000B 0x20>;
};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-v2.1.dtsi b/arch/arm64/boot/dts/qcom/sdm845-v2.1.dtsi
index ff8c01a..b298272 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-v2.1.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-v2.1.dtsi
@@ -25,3 +25,7 @@
&clock_gcc {
compatible = "qcom,gcc-sdm845-v2.1", "syscon";
};
+
+&apps_smmu {
+ /delete-property/ qcom,no-asid-retention;
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-v2.dtsi b/arch/arm64/boot/dts/qcom/sdm845-v2.dtsi
index cfa4517..0f6650d 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-v2.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-v2.dtsi
@@ -29,6 +29,32 @@
};
&soc {
+ qcom,memshare {
+ compatible = "qcom,memshare";
+
+ qcom,client_1 {
+ compatible = "qcom,memshare-peripheral";
+ qcom,peripheral-size = <0x0>;
+ qcom,client-id = <0>;
+ qcom,allocate-boot-time;
+ label = "modem";
+ };
+
+ qcom,client_2 {
+ compatible = "qcom,memshare-peripheral";
+ qcom,peripheral-size = <0x0>;
+ qcom,client-id = <2>;
+ label = "modem";
+ };
+
+ mem_client_3_size: qcom,client_3 {
+ compatible = "qcom,memshare-peripheral";
+ qcom,peripheral-size = <0x500000>;
+ qcom,client-id = <1>;
+ label = "modem";
+ };
+ };
+
gpu_gx_domain_addr: syscon@0x5091508 {
compatible = "syscon";
reg = <0x5091508 0x4>;
@@ -55,6 +81,12 @@
&clock_cpucc {
compatible = "qcom,clk-cpu-osm-v2";
+ reg = <0x17d41000 0x1400>,
+ <0x17d43000 0x1400>,
+ <0x17d45800 0x1400>,
+ <0x78425c 0x4>;
+ reg-names = "osm_l3_base", "osm_pwrcl_base", "osm_perfcl_base",
+ "cpr_rc";
};
&pcie1 {
@@ -239,9 +271,8 @@
< 1324800 1036800000 >,
< 1420800 1132800000 >,
< 1516800 1209600000 >,
- < 1612800 1401600000 >,
- < 1689600 1497600000 >,
- < 1766400 1593600000 >;
+ < 1689600 1305600000 >,
+ < 1766400 1401600000 >;
};
&devfreq_l3lat_4 {
@@ -251,8 +282,9 @@
< 1132800 748800000 >,
< 1363200 940800000 >,
< 1689600 1209600000 >,
- < 1996800 1401600000 >,
- < 2400000 1593600000 >;
+ < 1996800 1305600000 >,
+ < 2400000 1401600000 >,
+ < 2745600 1593600000 >;
};
&bwmon {
@@ -278,6 +310,13 @@
};
};
+&devfreq_compute {
+ qcom,core-dev-table =
+ < 1881600 MHZ_TO_MBPS( 200, 4) >,
+ < 2649600 MHZ_TO_MBPS(1017, 4) >,
+ < 2745600 MHZ_TO_MBPS(1804, 4) >;
+};
+
&clock_gcc {
compatible = "qcom,gcc-sdm845-v2", "syscon";
};
@@ -432,6 +471,9 @@
2553600 12045
2649600 15686
2745600 25586
+ 2764800 30000
+ 2784000 35000
+ 2803200 40000
>;
idle-cost-data = <
100 80 60 40
@@ -495,6 +537,9 @@
2553600 145
2649600 150
2745600 155
+ 2764800 160
+ 2784000 165
+ 2803200 170
>;
idle-cost-data = <
4 3 2 1
@@ -623,7 +668,7 @@
0x40 0x194 /* PLL_BIAS_CONTROL_1 */
0x20 0x198 /* PLL_BIAS_CONTROL_2 */
0x21 0x214 /* PWR_CTRL2 */
- 0x07 0x220 /* IMP_CTRL1 */
+ 0x08 0x220 /* IMP_CTRL1 */
0x58 0x224 /* IMP_CTRL2 */
0x45 0x240 /* TUNE1 */
0x29 0x244 /* TUNE2 */
diff --git a/arch/arm64/boot/dts/qcom/sdm845.dtsi b/arch/arm64/boot/dts/qcom/sdm845.dtsi
index f162015..97904e3 100644
--- a/arch/arm64/boot/dts/qcom/sdm845.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845.dtsi
@@ -25,6 +25,7 @@
#include <dt-bindings/spmi/spmi.h>
#include <dt-bindings/thermal/thermal.h>
#include <dt-bindings/msm/msm-bus-ids.h>
+#include <dt-bindings/soc/qcom,dcc_v2.h>
#define MHZ_TO_MBPS(mhz, w) ((mhz * 1000000 * w) / (1024 * 1024))
@@ -591,7 +592,7 @@
alloc-ranges = <0 0x00000000 0 0xffffffff>;
reusable;
alignment = <0 0x400000>;
- size = <0 0xc00000>;
+ size = <0 0x1000000>;
};
qseecom_mem: qseecom_region {
@@ -1088,6 +1089,13 @@
< 1958400 1305600000 >;
};
+ l3_cdsp: qcom,l3-cdsp {
+ compatible = "devfreq-simple-dev";
+ clock-names = "devfreq_clk";
+ clocks = <&clock_cpucc L3_MISC_VOTE_CLK>;
+ governor = "powersave";
+ };
+
cpu_pmu: cpu-pmu {
compatible = "arm,armv8-pmuv3";
qcom,irq-is-percpu;
@@ -1123,6 +1131,15 @@
};
};
+ devfreq_compute: qcom,devfreq-compute {
+ compatible = "qcom,arm-cpu-mon";
+ qcom,cpulist = <&CPU4 &CPU5 &CPU6 &CPU7>;
+ qcom,target-dev = <&mincpubw>;
+ qcom,core-dev-table =
+ < 1881600 MHZ_TO_MBPS(200, 4) >,
+ < 2208000 MHZ_TO_MBPS(681, 4) >;
+ };
+
clock_rpmh: qcom,rpmhclk {
compatible = "qcom,rpmh-clk-sdm845";
#clock-cells = <1>;
@@ -1212,10 +1229,15 @@
compatible = "qcom,clk-cpu-osm";
reg = <0x17d41000 0x1400>,
<0x17d43000 0x1400>,
- <0x17d45800 0x1400>;
- reg-names = "osm_l3_base", "osm_pwrcl_base", "osm_perfcl_base";
+ <0x17d45800 0x1400>,
+ <0x784248 0x4>;
+ reg-names = "osm_l3_base", "osm_pwrcl_base", "osm_perfcl_base",
+ "cpr_rc";
+ vdd_l3_mx_ao-supply = <&pm8998_s6_level_ao>;
+ vdd_pwrcl_mx_ao-supply = <&pm8998_s6_level_ao>;
- l3-devs = <&l3_cpu0 &l3_cpu4>;
+ qcom,mx-turbo-freq = <1478400000 1689600000 3300000001>;
+ l3-devs = <&l3_cpu0 &l3_cpu4 &l3_cdsp>;
clock-names = "xo_ao";
clocks = <&clock_rpmh RPMH_CXO_CLK_A>;
@@ -1360,7 +1382,13 @@
<123 512 2097152 0>, <1 757 102400 0>, /* HS G3 RB */
<123 512 298189 0>, <1 757 1000 0>, /* HS G1 RB L2 */
<123 512 596378 0>, <1 757 1000 0>, /* HS G2 RB L2 */
- <123 512 4194304 0>, <1 757 204800 0>, /* HS G3 RB L2 */
+ /* As UFS working in HS G3 RB L2 mode, aggregated
+ * bandwidth (AB) should take care of providing
+ * optimum throughput requested. However, as tested,
+ * in order to scale up CNOC clock, instantaneous
+ * bindwidth (IB) needs to be given a proper value too.
+ */
+ <123 512 4194304 0>, <1 757 204800 409600>, /* HS G3 RB L2 */
<123 512 7643136 0>, <1 757 307200 0>; /* Max. bandwidth */
qcom,bus-vector-names = "MIN",
@@ -1487,7 +1515,7 @@
vdd_mx-supply = <&pm8998_s6_level>;
vdd_mx-uV = <RPMH_REGULATOR_LEVEL_TURBO>;
vdd_mss-supply = <&pm8005_s2_level>;
- vdd_mss-uV = <RPMH_REGULATOR_LEVEL_NOM>;
+ vdd_mss-uV = <RPMH_REGULATOR_LEVEL_TURBO>;
qcom,firmware-name = "modem";
qcom,pil-self-auth;
qcom,sysmon-id = <0>;
@@ -1647,6 +1675,7 @@
reg-names = "eud_base";
clocks = <&clock_gcc GCC_USB_PHY_CFG_AHB2PHY_CLK>;
clock-names = "cfg_ahb_clk";
+ vdda33-supply = <&pm8998_l24>;
status = "ok";
};
@@ -2169,6 +2198,7 @@
qcom,irq-mask = <0x100>;
interrupts = <GIC_SPI 156 IRQ_TYPE_EDGE_RISING>;
label = "lpass";
+ cpu-affinity = <1 2>;
qcom,qos-config = <&glink_qos_adsp>;
qcom,ramp-time = <0xaf>;
};
@@ -2467,7 +2497,7 @@
qcom,msm-bus,num-paths = <1>;
qcom,msm-bus,vectors-KBps =
<1 618 0 0>, /* No vote */
- <1 618 0 800>; /* 100 KHz */
+ <1 618 0 300000>; /* 75 MHz */
clocks = <&clock_gcc GCC_PRNG_AHB_CLK>;
clock-names = "iface_clk";
};
@@ -2785,6 +2815,214 @@
reg-names = "dcc-base", "dcc-ram-base";
dcc-ram-offset = <0x6000>;
+
+ qcom,curr-link-list = <2>;
+ qcom,link-list = <DCC_READ 0x1740300 6 0>,
+ <DCC_READ 0x1620500 4 0>,
+ <DCC_READ 0x7840000 1 0>,
+ <DCC_READ 0x7841010 12 0>,
+ <DCC_READ 0x7842000 16 0>,
+ <DCC_READ 0x7842500 2 0>,
+ <DCC_LOOP 7 0 0>,
+ <DCC_READ 0x7841000 1 0>,
+ <DCC_LOOP 1 0 0>,
+ <DCC_LOOP 165 0 0>,
+ <DCC_READ 0x7841008 2 0>,
+ <DCC_LOOP 1 0 0>,
+ <DCC_READ 0x17dc3a84 2 0>,
+ <DCC_READ 0x17db3a84 1 0>,
+ <DCC_READ 0x1301000 2 0>,
+ <DCC_READ 0x17990044 1 0>,
+ <DCC_READ 0x17d45f00 1 0>,
+ <DCC_READ 0x17d45f08 6 0>,
+ <DCC_READ 0x17d45f80 1 0>,
+ <DCC_READ 0x17d47418 1 0>,
+ <DCC_READ 0x17d47570 1 0>,
+ <DCC_READ 0x17d47588 1 0>,
+ <DCC_READ 0x17d43700 1 0>,
+ <DCC_READ 0x17d43708 6 0>,
+ <DCC_READ 0x17d43780 1 0>,
+ <DCC_READ 0x17d44c18 1 0>,
+ <DCC_READ 0x17d44d70 1 0>,
+ <DCC_READ 0x17d44d88 1 0>,
+ <DCC_READ 0x17d41700 1 0>,
+ <DCC_READ 0x17d41708 6 0>,
+ <DCC_READ 0x17d41780 1 0>,
+ <DCC_READ 0x17d42c18 1 0>,
+ <DCC_READ 0x17d42d70 1 0>,
+ <DCC_READ 0x17d42d88 1 0>,
+ <DCC_WRITE 0x69ea00c 0x600007 1>,
+ <DCC_WRITE 0x69ea01c 0x136800 1>,
+ <DCC_READ 0x69ea014 1 1>,
+ <DCC_WRITE 0x69ea01c 0x136810 1>,
+ <DCC_READ 0x69ea014 1 1>,
+ <DCC_WRITE 0x69ea01c 0x136820 1>,
+ <DCC_READ 0x69ea014 1 1>,
+ <DCC_WRITE 0x69ea01c 0x136830 1>,
+ <DCC_READ 0x69ea014 1 1>,
+ <DCC_WRITE 0x69ea01c 0x136840 1>,
+ <DCC_READ 0x69ea014 1 1>,
+ <DCC_WRITE 0x69ea01c 0x136850 1>,
+ <DCC_READ 0x69ea014 1 1>,
+ <DCC_WRITE 0x69ea01c 0x136860 1>,
+ <DCC_READ 0x69ea014 1 1>,
+ <DCC_WRITE 0x69ea01c 0x136870 1>,
+ <DCC_READ 0x69ea014 1 1>,
+ <DCC_WRITE 0x069ea01C 0x0003e9a0 1>,
+ <DCC_WRITE 0x069ea01C 0x001368a0 1>,
+ <DCC_READ 0x069ea014 1 1>,
+ <DCC_WRITE 0x069ea01c 0x0003c0a0 1>,
+ <DCC_WRITE 0x069ea01c 0x001368a0 1>,
+ <DCC_READ 0x069ea014 1 1>,
+ <DCC_WRITE 0x069ea01c 0x0003d1a0 1>,
+ <DCC_WRITE 0x069ea01C 0x001368a0 1>,
+ <DCC_READ 0x069ea014 1 1>,
+ <DCC_WRITE 0x069ea01c 0x0003d2a0 1>,
+ <DCC_WRITE 0x069ea01c 0x001368a0 1>,
+ <DCC_READ 0x069ea014 1 1>,
+ <DCC_WRITE 0x069ea01C 0x0003d5a0 1>,
+ <DCC_WRITE 0x069ea01C 0x001368a0 1>,
+ <DCC_READ 0x069ea014 1 1>,
+ <DCC_WRITE 0x069ea01C 0x0003d6a0 1>,
+ <DCC_WRITE 0x069ea01C 0x001368a0 1>,
+ <DCC_READ 0x069ea014 1 1>,
+ <DCC_WRITE 0x069ea01c 0x001368a0 1>,
+ <DCC_WRITE 0x069ea01c 0x001368a0 1>,
+ <DCC_READ 0x069ea014 1 1>,
+ <DCC_WRITE 0x069ea01c 0x0003b1a0 1>,
+ <DCC_WRITE 0x069ea01c 0x001368a0 1>,
+ <DCC_READ 0x069ea014 1 1>,
+ <DCC_WRITE 0x069ea01c 0x0003b2a0 1>,
+ <DCC_WRITE 0x069ea01c 0x001368a0 1>,
+ <DCC_READ 0x069ea014 1 1>,
+ <DCC_WRITE 0x069ea01c 0x0003b5a0 1>,
+ <DCC_WRITE 0x069ea01c 0x001368a0 1>,
+ <DCC_READ 0x069ea014 1 1>,
+ <DCC_WRITE 0x069ea01c 0x0003b6a0 1>,
+ <DCC_WRITE 0x069ea01c 0x001368a0 1>,
+ <DCC_READ 0x069ea014 1 1>,
+ <DCC_WRITE 0x069ea01c 0x0003c2a0 1>,
+ <DCC_WRITE 0x069ea01c 0x001368a0 1>,
+ <DCC_READ 0x069ea014 1 1>,
+ <DCC_WRITE 0x069ea01c 0x0003c5a0 1>,
+ <DCC_WRITE 0x069ea01c 0x001368a0 1>,
+ <DCC_READ 0x069ea014 1 1>,
+ <DCC_WRITE 0x069ea01c 0x0003c6a0 1>,
+ <DCC_WRITE 0x069ea01c 0x001368a0 1>,
+ <DCC_READ 0x069ea014 1 1>,
+ <DCC_WRITE 0x069ea01c 0x00f1e000 1>,
+ <DCC_WRITE 0x069ea008 0x00000007 1>,
+ <DCC_READ 0x013e7e00 31 0>,
+ <DCC_READ 0x01132100 1 0>,
+ <DCC_READ 0x01136044 4 0>,
+ <DCC_READ 0x011360b0 1 0>,
+ <DCC_READ 0x0113e030 2 0>,
+ <DCC_READ 0x01141000 1 0>,
+ <DCC_READ 0x01142028 1 0>,
+ <DCC_READ 0x01148058 4 0>,
+ <DCC_READ 0x01160410 3 0>,
+ <DCC_READ 0x011604a0 1 0>,
+ <DCC_READ 0x011604b8 1 0>,
+ <DCC_READ 0x01165804 1 0>,
+ <DCC_READ 0x01166418 1 0>,
+ <DCC_READ 0x011b2100 1 0>,
+ <DCC_READ 0x011b6044 4 0>,
+ <DCC_READ 0x011be030 2 0>,
+ <DCC_READ 0x011c1000 1 0>,
+ <DCC_READ 0x011c2028 1 0>,
+ <DCC_READ 0x011c8058 4 0>,
+ <DCC_READ 0x011e0410 3 0>,
+ <DCC_READ 0x011e04a0 1 0>,
+ <DCC_READ 0x011e04b8 1 0>,
+ <DCC_READ 0x011e5804 1 0>,
+ <DCC_READ 0x011e6418 1 0>,
+ <DCC_READ 0x01232100 1 0>,
+ <DCC_READ 0x01236044 4 0>,
+ <DCC_READ 0x012360B0 1 0>,
+ <DCC_READ 0x0123E030 2 0>,
+ <DCC_READ 0x01241000 1 0>,
+ <DCC_READ 0x01242028 1 0>,
+ <DCC_READ 0x01248058 4 0>,
+ <DCC_READ 0x01260410 3 0>,
+ <DCC_READ 0x012604a0 1 0>,
+ <DCC_READ 0x012604b8 1 0>,
+ <DCC_READ 0x01265804 1 0>,
+ <DCC_READ 0x01266418 1 0>,
+ <DCC_READ 0x012b2100 1 0>,
+ <DCC_READ 0x012b6044 3 0>,
+ <DCC_READ 0x012b6050 1 0>,
+ <DCC_READ 0x012b60b0 1 0>,
+ <DCC_READ 0x012be030 2 0>,
+ <DCC_READ 0x012c1000 1 0>,
+ <DCC_READ 0x012c2028 1 0>,
+ <DCC_READ 0x012c8058 4 0>,
+ <DCC_READ 0x012e0410 3 0>,
+ <DCC_READ 0x012e04a0 1 0>,
+ <DCC_READ 0x012e04b8 1 0>,
+ <DCC_READ 0x012e5804 1 0>,
+ <DCC_READ 0x012e6418 1 0>,
+ <DCC_READ 0x01380900 8 0>,
+ <DCC_READ 0x01380d00 5 0>,
+ <DCC_READ 0x01350110 4 0>,
+ <DCC_READ 0x01430280 1 0>,
+ <DCC_READ 0x01430288 1 0>,
+ <DCC_READ 0x0143028c 7 0>,
+ <DCC_READ 0x01132100 1 0>,
+ <DCC_READ 0x01136044 4 0>,
+ <DCC_READ 0x011360b0 1 0>,
+ <DCC_READ 0x0113e030 2 0>,
+ <DCC_READ 0x01141000 1 0>,
+ <DCC_READ 0x01142028 1 0>,
+ <DCC_READ 0x01148058 4 0>,
+ <DCC_READ 0x01160410 3 0>,
+ <DCC_READ 0x011604a0 1 0>,
+ <DCC_READ 0x011604b8 1 0>,
+ <DCC_READ 0x01165804 1 0>,
+ <DCC_READ 0x01166418 1 0>,
+ <DCC_READ 0x011b2100 1 0>,
+ <DCC_READ 0x011b6044 4 0>,
+ <DCC_READ 0x011be030 2 0>,
+ <DCC_READ 0x011c1000 1 0>,
+ <DCC_READ 0x011c2028 1 0>,
+ <DCC_READ 0x011c8058 4 0>,
+ <DCC_READ 0x011e0410 3 0>,
+ <DCC_READ 0x011e04a0 1 0>,
+ <DCC_READ 0x011e04b8 1 0>,
+ <DCC_READ 0x011e5804 1 0>,
+ <DCC_READ 0x011e6418 1 0>,
+ <DCC_READ 0x01232100 1 0>,
+ <DCC_READ 0x01236044 4 0>,
+ <DCC_READ 0x012360b0 1 0>,
+ <DCC_READ 0x0123e030 2 0>,
+ <DCC_READ 0x01241000 1 0>,
+ <DCC_READ 0x01242028 1 0>,
+ <DCC_READ 0x01248058 4 0>,
+ <DCC_READ 0x01260410 3 0>,
+ <DCC_READ 0x012604a0 1 0>,
+ <DCC_READ 0x012604b8 1 0>,
+ <DCC_READ 0x01265804 1 0>,
+ <DCC_READ 0x01266418 1 0>,
+ <DCC_READ 0x012b2100 1 0>,
+ <DCC_READ 0x012b6044 3 0>,
+ <DCC_READ 0x012b6050 1 0>,
+ <DCC_READ 0x012b60b0 1 0>,
+ <DCC_READ 0x012be030 2 0>,
+ <DCC_READ 0x012C1000 1 0>,
+ <DCC_READ 0x012C2028 1 0>,
+ <DCC_READ 0x012C8058 4 0>,
+ <DCC_READ 0x012e0410 3 0>,
+ <DCC_READ 0x012e04a0 1 0>,
+ <DCC_READ 0x012e04b8 1 0>,
+ <DCC_READ 0x012e5804 1 0>,
+ <DCC_READ 0x012e6418 1 0>,
+ <DCC_READ 0x01380900 8 0>,
+ <DCC_READ 0x01380d00 5 0>,
+ <DCC_READ 0x01350110 4 0>,
+ <DCC_READ 0x01430280 1 0>,
+ <DCC_READ 0x01430288 1 0>,
+ <DCC_READ 0x0143028c 7 0>,
+ <DCC_READ 0x0c201244 1 0>,
+ <DCC_READ 0x0c202244 1 0>;
};
qcom,msm-core@780000 {
@@ -3265,6 +3503,182 @@
};
};
+ cpu0-silver-step {
+ polling-delay-passive = <100>;
+ polling-delay = <0>;
+ thermal-sensors = <&tsens0 1>;
+ thermal-governor = "step_wise";
+ trips {
+ emerg_config0: emerg-config0 {
+ temperature = <110000>;
+ hysteresis = <10000>;
+ type = "passive";
+ };
+ };
+ cooling-maps {
+ emerg_cdev0 {
+ trip = <&emerg_config0>;
+ cooling-device =
+ <&CPU0 THERMAL_MAX_LIMIT
+ THERMAL_MAX_LIMIT>;
+ };
+ };
+ };
+
+ cpu1-silver-step {
+ polling-delay-passive = <100>;
+ polling-delay = <0>;
+ thermal-sensors = <&tsens0 2>;
+ thermal-governor = "step_wise";
+ trips {
+ emerg_config1: emerg-config1 {
+ temperature = <110000>;
+ hysteresis = <10000>;
+ type = "passive";
+ };
+ };
+ cooling-maps {
+ emerg_cdev1 {
+ trip = <&emerg_config1>;
+ cooling-device =
+ <&CPU1 THERMAL_MAX_LIMIT
+ THERMAL_MAX_LIMIT>;
+ };
+ };
+ };
+
+ cpu2-silver-step {
+ polling-delay-passive = <100>;
+ polling-delay = <0>;
+ thermal-sensors = <&tsens0 3>;
+ thermal-governor = "step_wise";
+ trips {
+ emerg_config2: emerg-config2 {
+ temperature = <110000>;
+ hysteresis = <10000>;
+ type = "passive";
+ };
+ };
+ cooling-maps {
+ emerg_cdev2 {
+ trip = <&emerg_config2>;
+ cooling-device =
+ <&CPU2 THERMAL_MAX_LIMIT
+ THERMAL_MAX_LIMIT>;
+ };
+ };
+ };
+
+ cpu3-silver-step {
+ polling-delay-passive = <100>;
+ polling-delay = <0>;
+ thermal-sensors = <&tsens0 4>;
+ thermal-governor = "step_wise";
+ trips {
+ emerg_config3: emerg-config3 {
+ temperature = <110000>;
+ hysteresis = <10000>;
+ type = "passive";
+ };
+ };
+ cooling-maps {
+ emerg_cdev3 {
+ trip = <&emerg_config3>;
+ cooling-device =
+ <&CPU3 THERMAL_MAX_LIMIT
+ THERMAL_MAX_LIMIT>;
+ };
+ };
+ };
+
+ cpu0-gold-step {
+ polling-delay-passive = <100>;
+ polling-delay = <0>;
+ thermal-sensors = <&tsens0 7>;
+ thermal-governor = "step_wise";
+ trips {
+ emerg_config4: emerg-config4 {
+ temperature = <110000>;
+ hysteresis = <10000>;
+ type = "passive";
+ };
+ };
+ cooling-maps {
+ emerg_cdev4 {
+ trip = <&emerg_config4>;
+ cooling-device =
+ <&CPU4 THERMAL_MAX_LIMIT
+ THERMAL_MAX_LIMIT>;
+ };
+ };
+ };
+
+ cpu1-gold-step {
+ polling-delay-passive = <100>;
+ polling-delay = <0>;
+ thermal-sensors = <&tsens0 8>;
+ thermal-governor = "step_wise";
+ trips {
+ emerg_config5: emerg-config5 {
+ temperature = <110000>;
+ hysteresis = <10000>;
+ type = "passive";
+ };
+ };
+ cooling-maps {
+ emerg_cdev5 {
+ trip = <&emerg_config5>;
+ cooling-device =
+ <&CPU5 THERMAL_MAX_LIMIT
+ THERMAL_MAX_LIMIT>;
+ };
+ };
+ };
+
+ cpu2-gold-step {
+ polling-delay-passive = <100>;
+ polling-delay = <0>;
+ thermal-sensors = <&tsens0 9>;
+ thermal-governor = "step_wise";
+ trips {
+ emerg_config6: emerg-config6 {
+ temperature = <110000>;
+ hysteresis = <10000>;
+ type = "passive";
+ };
+ };
+ cooling-maps {
+ emerg_cdev6 {
+ trip = <&emerg_config6>;
+ cooling-device =
+ <&CPU6 THERMAL_MAX_LIMIT
+ THERMAL_MAX_LIMIT>;
+ };
+ };
+ };
+
+ cpu3-gold-step {
+ polling-delay-passive = <100>;
+ polling-delay = <0>;
+ thermal-sensors = <&tsens0 10>;
+ thermal-governor = "step_wise";
+ trips {
+ emerg_config7: emerg-config7 {
+ temperature = <110000>;
+ hysteresis = <10000>;
+ type = "passive";
+ };
+ };
+ cooling-maps {
+ emerg_cdev7 {
+ trip = <&emerg_config7>;
+ cooling-device =
+ <&CPU7 THERMAL_MAX_LIMIT
+ THERMAL_MAX_LIMIT>;
+ };
+ };
+ };
+
lmh-dcvs-01 {
polling-delay-passive = <0>;
polling-delay = <0>;
@@ -3328,6 +3742,11 @@
qcom,dump-id = <0xec>;
};
+ fcm_dump {
+ qcom,dump-size = <0x400>;
+ qcom,dump-id = <0xee>;
+ };
+
rpm_sw_dump {
qcom,dump-size = <0x28000>;
qcom,dump-id = <0xea>;
@@ -3367,6 +3786,11 @@
qcom,dump-size = <0x1000>;
qcom,dump-id = <0xe8>;
};
+
+ tpdm_swao_dump {
+ qcom,dump-size = <0x512>;
+ qcom,dump-id = <0xf2>;
+ };
};
gpi_dma0: qcom,gpi-dma@0x800000 {
diff --git a/arch/arm64/boot/dts/qcom/smb1355.dtsi b/arch/arm64/boot/dts/qcom/smb1355.dtsi
index bde4d1e..3412b25d 100644
--- a/arch/arm64/boot/dts/qcom/smb1355.dtsi
+++ b/arch/arm64/boot/dts/qcom/smb1355.dtsi
@@ -52,8 +52,10 @@
qcom,chgr-misc@1600 {
reg = <0x1600 0x100>;
- interrupts = <0x16 0x1 IRQ_TYPE_EDGE_RISING>;
- interrupt-names = "wdog-bark";
+ interrupts = <0x16 0x1 IRQ_TYPE_EDGE_RISING>,
+ <0x16 0x6 IRQ_TYPE_EDGE_RISING>;
+ interrupt-names = "wdog-bark",
+ "temperature-change";
};
};
};
@@ -97,8 +99,10 @@
qcom,chgr-misc@1600 {
reg = <0x1600 0x100>;
- interrupts = <0x16 0x1 IRQ_TYPE_EDGE_RISING>;
- interrupt-names = "wdog-bark";
+ interrupts = <0x16 0x1 IRQ_TYPE_EDGE_RISING>,
+ <0x16 0x6 IRQ_TYPE_EDGE_RISING>;
+ interrupt-names = "wdog-bark",
+ "temperature-change";
};
};
};
diff --git a/arch/arm64/configs/msm8953-perf_defconfig b/arch/arm64/configs/msm8953-perf_defconfig
index 6b83b36..12365b3 100644
--- a/arch/arm64/configs/msm8953-perf_defconfig
+++ b/arch/arm64/configs/msm8953-perf_defconfig
@@ -51,6 +51,7 @@
# CONFIG_IOSCHED_DEADLINE is not set
CONFIG_ARCH_QCOM=y
CONFIG_ARCH_MSM8953=y
+CONFIG_ARCH_SDM450=y
CONFIG_SCHED_MC=y
CONFIG_NR_CPUS=8
CONFIG_PREEMPT=y
diff --git a/arch/arm64/configs/msm8953_defconfig b/arch/arm64/configs/msm8953_defconfig
index 791d349..8757cc3 100644
--- a/arch/arm64/configs/msm8953_defconfig
+++ b/arch/arm64/configs/msm8953_defconfig
@@ -55,6 +55,7 @@
# CONFIG_IOSCHED_DEADLINE is not set
CONFIG_ARCH_QCOM=y
CONFIG_ARCH_MSM8953=y
+CONFIG_ARCH_SDM450=y
CONFIG_SCHED_MC=y
CONFIG_NR_CPUS=8
CONFIG_PREEMPT=y
diff --git a/arch/arm64/configs/sdm670-perf_defconfig b/arch/arm64/configs/sdm670-perf_defconfig
index 784a986..dca942b 100644
--- a/arch/arm64/configs/sdm670-perf_defconfig
+++ b/arch/arm64/configs/sdm670-perf_defconfig
@@ -21,6 +21,9 @@
CONFIG_CPUSETS=y
CONFIG_CGROUP_CPUACCT=y
CONFIG_CGROUP_SCHEDTUNE=y
+CONFIG_MEMCG=y
+CONFIG_MEMCG_SWAP=y
+CONFIG_BLK_CGROUP=y
CONFIG_RT_GROUP_SCHED=y
CONFIG_CGROUP_BPF=y
CONFIG_SCHED_CORE_CTL=y
@@ -52,6 +55,7 @@
CONFIG_MODULE_SIG_FORCE=y
CONFIG_MODULE_SIG_SHA512=y
CONFIG_PARTITION_ADVANCED=y
+CONFIG_CFQ_GROUP_IOSCHED=y
CONFIG_ARCH_QCOM=y
CONFIG_ARCH_SDM670=y
CONFIG_PCI=y
@@ -63,12 +67,15 @@
CONFIG_CMA=y
CONFIG_ZSMALLOC=y
CONFIG_BALANCE_ANON_FILE_RECLAIM=y
+CONFIG_PROCESS_RECLAIM=y
CONFIG_SECCOMP=y
CONFIG_ARMV8_DEPRECATED=y
CONFIG_SWP_EMULATION=y
CONFIG_CP15_BARRIER_EMULATION=y
CONFIG_SETEND_EMULATION=y
# CONFIG_ARM64_VHE is not set
+CONFIG_RANDOMIZE_BASE=y
+# CONFIG_EFI is not set
CONFIG_BUILD_ARM64_APPENDED_DTB_IMAGE=y
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
CONFIG_COMPAT=y
@@ -279,8 +286,6 @@
CONFIG_PPP_ASYNC=y
CONFIG_PPP_SYNC_TTY=y
CONFIG_USB_USBNET=y
-CONFIG_WIL6210=m
-# CONFIG_WIL6210_TRACING is not set
CONFIG_WCNSS_MEM_PRE_ALLOC=y
CONFIG_CLD_LL_CORE=y
CONFIG_CNSS_GENL=y
@@ -301,6 +306,7 @@
CONFIG_DIAG_CHAR=y
CONFIG_HW_RANDOM=y
CONFIG_HW_RANDOM_MSM_LEGACY=y
+# CONFIG_DEVPORT is not set
CONFIG_MSM_ADSPRPC=y
CONFIG_MSM_RDBG=m
CONFIG_I2C_CHARDEV=y
@@ -345,6 +351,7 @@
CONFIG_MFD_I2C_PMIC=y
CONFIG_MFD_SPMI_PMIC=y
CONFIG_REGULATOR_FIXED_VOLTAGE=y
+CONFIG_REGULATOR_PROXY_CONSUMER=y
CONFIG_REGULATOR_CPRH_KBSS=y
CONFIG_REGULATOR_QPNP_LABIBB=y
CONFIG_REGULATOR_QPNP_LCDB=y
@@ -368,8 +375,7 @@
CONFIG_MSM_SDE_ROTATOR_EVTLOG_DEBUG=y
CONFIG_DVB_MPQ=m
CONFIG_DVB_MPQ_DEMUX=m
-CONFIG_DVB_MPQ_TSPP1=y
-CONFIG_TSPP=m
+CONFIG_DVB_MPQ_SW=y
CONFIG_QCOM_KGSL=y
CONFIG_DRM=y
CONFIG_DRM_SDE_EVTLOG_DEBUG=y
@@ -441,6 +447,7 @@
CONFIG_MMC_SDHCI=y
CONFIG_MMC_SDHCI_PLTFM=y
CONFIG_MMC_SDHCI_MSM=y
+CONFIG_MMC_SDHCI_MSM_ICE=y
CONFIG_MMC_CQ_HCI=y
CONFIG_NEW_LEDS=y
CONFIG_LEDS_CLASS=y
@@ -494,6 +501,7 @@
CONFIG_QCOM_RUN_QUEUE_STATS=y
CONFIG_QCOM_LLCC=y
CONFIG_QCOM_SDM670_LLCC=y
+CONFIG_QCOM_LLCC_PERFMON=m
CONFIG_MSM_SERVICE_LOCATOR=y
CONFIG_MSM_SERVICE_NOTIFIER=y
CONFIG_MSM_BOOT_STATS=y
@@ -531,9 +539,11 @@
CONFIG_MSM_EVENT_TIMER=y
CONFIG_MSM_PM=y
CONFIG_MSM_QBT1000=y
+CONFIG_QCOM_DCC_V2=y
CONFIG_QTI_RPM_STATS_LOG=y
CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y
CONFIG_QMP_DEBUGFS_CLIENT=y
+CONFIG_MEM_SHARE_QMI_SERVICE=y
CONFIG_MSM_REMOTEQDSS=y
CONFIG_QCOM_BIMC_BWMON=y
CONFIG_ARM_MEMLAT_MON=y
diff --git a/arch/arm64/configs/sdm670_defconfig b/arch/arm64/configs/sdm670_defconfig
index 6c29dff..4d7db53 100644
--- a/arch/arm64/configs/sdm670_defconfig
+++ b/arch/arm64/configs/sdm670_defconfig
@@ -22,6 +22,10 @@
CONFIG_CPUSETS=y
CONFIG_CGROUP_CPUACCT=y
CONFIG_CGROUP_SCHEDTUNE=y
+CONFIG_MEMCG=y
+CONFIG_MEMCG_SWAP=y
+CONFIG_BLK_CGROUP=y
+CONFIG_DEBUG_BLK_CGROUP=y
CONFIG_RT_GROUP_SCHED=y
CONFIG_CGROUP_BPF=y
CONFIG_SCHED_CORE_CTL=y
@@ -54,6 +58,7 @@
# CONFIG_BLK_DEV_BSG is not set
CONFIG_PARTITION_ADVANCED=y
# CONFIG_IOSCHED_DEADLINE is not set
+CONFIG_CFQ_GROUP_IOSCHED=y
CONFIG_ARCH_QCOM=y
CONFIG_ARCH_SDM670=y
CONFIG_PCI=y
@@ -67,12 +72,14 @@
CONFIG_CMA_DEBUGFS=y
CONFIG_ZSMALLOC=y
CONFIG_BALANCE_ANON_FILE_RECLAIM=y
+CONFIG_PROCESS_RECLAIM=y
CONFIG_SECCOMP=y
CONFIG_ARMV8_DEPRECATED=y
CONFIG_SWP_EMULATION=y
CONFIG_CP15_BARRIER_EMULATION=y
CONFIG_SETEND_EMULATION=y
# CONFIG_ARM64_VHE is not set
+CONFIG_RANDOMIZE_BASE=y
CONFIG_BUILD_ARM64_APPENDED_DTB_IMAGE=y
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
CONFIG_COMPAT=y
@@ -264,6 +271,7 @@
CONFIG_MD=y
CONFIG_BLK_DEV_DM=y
CONFIG_DM_CRYPT=y
+CONFIG_DM_REQ_CRYPT=y
CONFIG_DM_UEVENT=y
CONFIG_DM_VERITY=y
CONFIG_DM_VERITY_FEC=y
@@ -284,7 +292,6 @@
CONFIG_PPP_ASYNC=y
CONFIG_PPP_SYNC_TTY=y
CONFIG_USB_USBNET=y
-CONFIG_WIL6210=m
CONFIG_WCNSS_MEM_PRE_ALLOC=y
CONFIG_CLD_LL_CORE=y
CONFIG_CNSS_GENL=y
@@ -305,6 +312,7 @@
CONFIG_DIAG_CHAR=y
CONFIG_HW_RANDOM=y
CONFIG_HW_RANDOM_MSM_LEGACY=y
+# CONFIG_DEVPORT is not set
CONFIG_MSM_ADSPRPC=y
CONFIG_I2C_CHARDEV=y
CONFIG_I2C_QCOM_GENI=y
@@ -348,6 +356,7 @@
CONFIG_MFD_I2C_PMIC=y
CONFIG_MFD_SPMI_PMIC=y
CONFIG_REGULATOR_FIXED_VOLTAGE=y
+CONFIG_REGULATOR_PROXY_CONSUMER=y
CONFIG_REGULATOR_CPRH_KBSS=y
CONFIG_REGULATOR_QPNP_LABIBB=y
CONFIG_REGULATOR_QPNP_LCDB=y
@@ -358,6 +367,7 @@
CONFIG_REGULATOR_STUB=y
CONFIG_MEDIA_SUPPORT=y
CONFIG_MEDIA_CAMERA_SUPPORT=y
+CONFIG_MEDIA_DIGITAL_TV_SUPPORT=y
CONFIG_MEDIA_CONTROLLER=y
CONFIG_VIDEO_V4L2_SUBDEV_API=y
CONFIG_VIDEO_ADV_DEBUG=y
@@ -368,6 +378,9 @@
CONFIG_MSM_VIDC_GOVERNORS=y
CONFIG_MSM_SDE_ROTATOR=y
CONFIG_MSM_SDE_ROTATOR_EVTLOG_DEBUG=y
+CONFIG_DVB_MPQ=m
+CONFIG_DVB_MPQ_DEMUX=m
+CONFIG_DVB_MPQ_SW=y
CONFIG_QCOM_KGSL=y
CONFIG_DRM=y
CONFIG_DRM_SDE_EVTLOG_DEBUG=y
@@ -386,7 +399,11 @@
CONFIG_SND_SOC=y
CONFIG_UHID=y
CONFIG_HID_APPLE=y
+CONFIG_HID_ELECOM=y
+CONFIG_HID_MAGICMOUSE=y
CONFIG_HID_MICROSOFT=y
+CONFIG_HID_MULTITOUCH=y
+CONFIG_HID_PLANTRONICS=y
CONFIG_USB=y
CONFIG_USB_XHCI_HCD=y
CONFIG_USB_EHCI_HCD=y
@@ -435,6 +452,7 @@
CONFIG_MMC_SDHCI=y
CONFIG_MMC_SDHCI_PLTFM=y
CONFIG_MMC_SDHCI_MSM=y
+CONFIG_MMC_SDHCI_MSM_ICE=y
CONFIG_MMC_CQ_HCI=y
CONFIG_NEW_LEDS=y
CONFIG_LEDS_CLASS=y
@@ -446,6 +464,7 @@
CONFIG_EDAC=y
CONFIG_EDAC_MM_EDAC=y
CONFIG_EDAC_KRYO3XX_ARM64=y
+CONFIG_EDAC_KRYO3XX_ARM64_PANIC_ON_CE=y
CONFIG_EDAC_KRYO3XX_ARM64_PANIC_ON_UE=y
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_QPNP=y
@@ -494,6 +513,7 @@
CONFIG_QCOM_RUN_QUEUE_STATS=y
CONFIG_QCOM_LLCC=y
CONFIG_QCOM_SDM670_LLCC=y
+CONFIG_QCOM_LLCC_PERFMON=m
CONFIG_MSM_SERVICE_LOCATOR=y
CONFIG_MSM_SERVICE_NOTIFIER=y
CONFIG_MSM_BOOT_STATS=y
@@ -539,6 +559,7 @@
CONFIG_QTI_RPM_STATS_LOG=y
CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y
CONFIG_QMP_DEBUGFS_CLIENT=y
+CONFIG_MEM_SHARE_QMI_SERVICE=y
CONFIG_MSM_REMOTEQDSS=y
CONFIG_QCOM_BIMC_BWMON=y
CONFIG_ARM_MEMLAT_MON=y
@@ -638,6 +659,7 @@
CONFIG_CORESIGHT_TPDM=y
CONFIG_CORESIGHT_CTI=y
CONFIG_CORESIGHT_EVENT=y
+CONFIG_CORESIGHT_TGU=y
CONFIG_CORESIGHT_HWEVENT=y
CONFIG_CORESIGHT_DUMMY=y
CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
diff --git a/arch/arm64/configs/sdm845-perf_defconfig b/arch/arm64/configs/sdm845-perf_defconfig
index cdfa1eb..357a6b2 100644
--- a/arch/arm64/configs/sdm845-perf_defconfig
+++ b/arch/arm64/configs/sdm845-perf_defconfig
@@ -7,6 +7,9 @@
CONFIG_HIGH_RES_TIMERS=y
CONFIG_IRQ_TIME_ACCOUNTING=y
CONFIG_SCHED_WALT=y
+CONFIG_TASKSTATS=y
+CONFIG_TASK_XACCT=y
+CONFIG_TASK_IO_ACCOUNTING=y
CONFIG_RCU_EXPERT=y
CONFIG_RCU_FAST_NO_HZ=y
CONFIG_RCU_NOCB_CPU=y
@@ -237,6 +240,7 @@
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=8192
CONFIG_QSEECOM=y
+CONFIG_UID_SYS_STATS=y
CONFIG_MEMORY_STATE_TIME=y
CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
@@ -530,6 +534,7 @@
CONFIG_QTI_RPM_STATS_LOG=y
CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y
CONFIG_QMP_DEBUGFS_CLIENT=y
+CONFIG_MEM_SHARE_QMI_SERVICE=y
CONFIG_QSEE_IPC_IRQ_BRIDGE=y
CONFIG_QCOM_BIMC_BWMON=y
CONFIG_ARM_MEMLAT_MON=y
@@ -554,6 +559,9 @@
CONFIG_EXT2_FS_XATTR=y
CONFIG_EXT3_FS=y
CONFIG_EXT4_FS_SECURITY=y
+CONFIG_EXT4_ENCRYPTION=y
+CONFIG_EXT4_FS_ENCRYPTION=y
+CONFIG_EXT4_FS_ICE_ENCRYPTION=y
CONFIG_QUOTA=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
CONFIG_QFMT_V2=y
@@ -585,13 +593,13 @@
CONFIG_CORESIGHT_EVENT=y
CONFIG_CORESIGHT_HWEVENT=y
CONFIG_CORESIGHT_DUMMY=y
+CONFIG_PFK=y
CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
CONFIG_SECURITY=y
CONFIG_HARDENED_USERCOPY=y
CONFIG_FORTIFY_SOURCE=y
CONFIG_SECURITY_SELINUX=y
CONFIG_SECURITY_SMACK=y
-CONFIG_CRYPTO_CTR=y
CONFIG_CRYPTO_XCBC=y
CONFIG_CRYPTO_MD4=y
CONFIG_CRYPTO_TWOFISH=y
diff --git a/arch/arm64/configs/sdm845_defconfig b/arch/arm64/configs/sdm845_defconfig
index a2a9c12..d0a32e7 100644
--- a/arch/arm64/configs/sdm845_defconfig
+++ b/arch/arm64/configs/sdm845_defconfig
@@ -301,8 +301,6 @@
# CONFIG_SERIO_SERPORT is not set
# CONFIG_VT is not set
# CONFIG_LEGACY_PTYS is not set
-# CONFIG_DEVMEM is not set
-# CONFIG_DEVKMEM is not set
CONFIG_SERIAL_MSM_GENI=y
CONFIG_SERIAL_MSM_GENI_CONSOLE=y
CONFIG_DIAG_CHAR=y
@@ -550,6 +548,7 @@
CONFIG_QTI_RPM_STATS_LOG=y
CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y
CONFIG_QMP_DEBUGFS_CLIENT=y
+CONFIG_MEM_SHARE_QMI_SERVICE=y
CONFIG_MSM_REMOTEQDSS=y
CONFIG_QSEE_IPC_IRQ_BRIDGE=y
CONFIG_QCOM_BIMC_BWMON=y
@@ -576,6 +575,9 @@
CONFIG_EXT2_FS_XATTR=y
CONFIG_EXT3_FS=y
CONFIG_EXT4_FS_SECURITY=y
+CONFIG_EXT4_ENCRYPTION=y
+CONFIG_EXT4_FS_ENCRYPTION=y
+CONFIG_EXT4_FS_ICE_ENCRYPTION=y
CONFIG_QUOTA=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
CONFIG_QFMT_V2=y
@@ -620,7 +622,6 @@
CONFIG_PANIC_ON_RT_THROTTLING=y
CONFIG_SCHEDSTATS=y
CONFIG_SCHED_STACK_END_CHECK=y
-# CONFIG_DEBUG_PREEMPT is not set
CONFIG_DEBUG_SPINLOCK=y
CONFIG_DEBUG_MUTEXES=y
CONFIG_DEBUG_ATOMIC_SLEEP=y
@@ -657,13 +658,13 @@
CONFIG_CORESIGHT_TGU=y
CONFIG_CORESIGHT_HWEVENT=y
CONFIG_CORESIGHT_DUMMY=y
+CONFIG_PFK=y
CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
CONFIG_SECURITY=y
CONFIG_HARDENED_USERCOPY=y
CONFIG_FORTIFY_SOURCE=y
CONFIG_SECURITY_SELINUX=y
CONFIG_SECURITY_SMACK=y
-CONFIG_CRYPTO_CTR=y
CONFIG_CRYPTO_XCBC=y
CONFIG_CRYPTO_MD4=y
CONFIG_CRYPTO_TWOFISH=y
diff --git a/arch/arm64/include/asm/dma-iommu.h b/arch/arm64/include/asm/dma-iommu.h
index 110f750..cfd49b2 100644
--- a/arch/arm64/include/asm/dma-iommu.h
+++ b/arch/arm64/include/asm/dma-iommu.h
@@ -23,6 +23,8 @@ struct dma_iommu_mapping {
void *bitmap;
size_t bits;
dma_addr_t base;
+ u32 min_iova_align;
+ struct page *guard_page;
struct dma_fast_smmu_mapping *fast;
};
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index 7ee6d74..c186586 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -487,6 +487,7 @@
* booted in EL1 or EL2 respectively.
*/
ENTRY(el2_setup)
+ msr SPsel, #1 // We want to use SP_EL{1,2}
mrs x0, CurrentEL
cmp x0, #CurrentEL_EL2
b.ne 1f
diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c
index 900c1ec..f7ce3d2 100644
--- a/arch/arm64/kernel/stacktrace.c
+++ b/arch/arm64/kernel/stacktrace.c
@@ -176,7 +176,8 @@ void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace)
trace->entries[trace->nr_entries++] = ULONG_MAX;
}
-void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
+static noinline void __save_stack_trace(struct task_struct *tsk,
+ struct stack_trace *trace, unsigned int nosched)
{
struct stack_trace_data data;
struct stackframe frame;
@@ -186,17 +187,18 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
data.trace = trace;
data.skip = trace->skip;
+ data.no_sched_functions = nosched;
if (tsk != current) {
- data.no_sched_functions = 1;
frame.fp = thread_saved_fp(tsk);
frame.sp = thread_saved_sp(tsk);
frame.pc = thread_saved_pc(tsk);
} else {
- data.no_sched_functions = 0;
+ /* We don't want this function nor the caller */
+ data.skip += 2;
frame.fp = (unsigned long)__builtin_frame_address(0);
frame.sp = current_stack_pointer;
- frame.pc = (unsigned long)save_stack_trace_tsk;
+ frame.pc = (unsigned long)__save_stack_trace;
}
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
frame.graph = tsk->curr_ret_stack;
@@ -210,9 +212,15 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
}
EXPORT_SYMBOL(save_stack_trace_tsk);
+void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
+{
+ __save_stack_trace(tsk, trace, 1);
+}
+
void save_stack_trace(struct stack_trace *trace)
{
- save_stack_trace_tsk(current, trace);
+ __save_stack_trace(current, trace, 0);
}
+
EXPORT_SYMBOL_GPL(save_stack_trace);
#endif
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
index 5620500..19f3515 100644
--- a/arch/arm64/kernel/traps.c
+++ b/arch/arm64/kernel/traps.c
@@ -114,7 +114,7 @@ static void __dump_instr(const char *lvl, struct pt_regs *regs)
for (i = -4; i < 1; i++) {
unsigned int val, bad;
- bad = __get_user(val, &((u32 *)addr)[i]);
+ bad = get_user(val, &((u32 *)addr)[i]);
if (!bad)
p += sprintf(p, i == 0 ? "(%08x) " : "%08x ", val);
diff --git a/arch/arm64/kvm/hyp/Makefile b/arch/arm64/kvm/hyp/Makefile
index 14c4e3b..48b0354 100644
--- a/arch/arm64/kvm/hyp/Makefile
+++ b/arch/arm64/kvm/hyp/Makefile
@@ -2,7 +2,7 @@
# Makefile for Kernel-based Virtual Machine module, HYP part
#
-ccflags-y += -fno-stack-protector
+ccflags-y += -fno-stack-protector -DDISABLE_BRANCH_PROFILING
KVM=../../../../virt/kvm
diff --git a/arch/arm64/kvm/inject_fault.c b/arch/arm64/kvm/inject_fault.c
index da6a8cf..3556715 100644
--- a/arch/arm64/kvm/inject_fault.c
+++ b/arch/arm64/kvm/inject_fault.c
@@ -33,12 +33,26 @@
#define LOWER_EL_AArch64_VECTOR 0x400
#define LOWER_EL_AArch32_VECTOR 0x600
+/*
+ * Table taken from ARMv8 ARM DDI0487B-B, table G1-10.
+ */
+static const u8 return_offsets[8][2] = {
+ [0] = { 0, 0 }, /* Reset, unused */
+ [1] = { 4, 2 }, /* Undefined */
+ [2] = { 0, 0 }, /* SVC, unused */
+ [3] = { 4, 4 }, /* Prefetch abort */
+ [4] = { 8, 8 }, /* Data abort */
+ [5] = { 0, 0 }, /* HVC, unused */
+ [6] = { 4, 4 }, /* IRQ, unused */
+ [7] = { 4, 4 }, /* FIQ, unused */
+};
+
static void prepare_fault32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset)
{
unsigned long cpsr;
unsigned long new_spsr_value = *vcpu_cpsr(vcpu);
bool is_thumb = (new_spsr_value & COMPAT_PSR_T_BIT);
- u32 return_offset = (is_thumb) ? 4 : 0;
+ u32 return_offset = return_offsets[vect_offset >> 2][is_thumb];
u32 sctlr = vcpu_cp15(vcpu, c1_SCTLR);
cpsr = mode | COMPAT_PSR_I_BIT;
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index 41c2463..31d4684 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -31,13 +31,15 @@
#include <linux/vmalloc.h>
#include <linux/swiotlb.h>
#include <linux/io.h>
+#include <linux/pci.h>
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
#include <asm/dma-iommu.h>
#include <linux/dma-mapping-fast.h>
#include <linux/msm_dma_iommu_mapping.h>
-
+#include <linux/arm-smmu-errata.h>
+#include <soc/qcom/secure_buffer.h>
static int swiotlb __ro_after_init;
@@ -969,14 +971,21 @@ static bool do_iommu_attach(struct device *dev, const struct iommu_ops *ops,
* then the IOMMU core will have already configured a group for this
* device, and allocated the default domain for that group.
*/
- if (!domain || iommu_dma_init_domain(domain, dma_base, size, dev)) {
- pr_debug("Failed to set up IOMMU for device %s; retaining platform DMA ops\n",
- dev_name(dev));
- return false;
+ if (!domain)
+ goto out_err;
+
+ if (domain->type == IOMMU_DOMAIN_DMA) {
+ if (iommu_dma_init_domain(domain, dma_base, size, dev))
+ goto out_err;
+
+ dev->archdata.dma_ops = &iommu_dma_ops;
}
- dev->archdata.dma_ops = &iommu_dma_ops;
return true;
+out_err:
+ pr_debug("Failed to set up IOMMU for device %s; retaining platform DMA ops\n",
+ dev_name(dev));
+ return false;
}
static void queue_iommu_attach(struct device *dev, const struct iommu_ops *ops,
@@ -1165,15 +1174,24 @@ static void __dma_clear_buffer(struct page *page, size_t size,
static inline dma_addr_t __alloc_iova(struct dma_iommu_mapping *mapping,
size_t size)
{
- unsigned int order = get_order(size);
+ unsigned int order;
unsigned int align = 0;
unsigned int count, start;
unsigned long flags;
+ dma_addr_t iova;
+ size_t guard_len;
+ size = PAGE_ALIGN(size);
+ if (mapping->min_iova_align)
+ guard_len = ALIGN(size, mapping->min_iova_align) - size;
+ else
+ guard_len = 0;
+
+ order = get_order(size + guard_len);
if (order > CONFIG_ARM64_DMA_IOMMU_ALIGNMENT)
order = CONFIG_ARM64_DMA_IOMMU_ALIGNMENT;
- count = PAGE_ALIGN(size) >> PAGE_SHIFT;
+ count = PAGE_ALIGN(size + guard_len) >> PAGE_SHIFT;
align = (1 << order) - 1;
spin_lock_irqsave(&mapping->lock, flags);
@@ -1187,16 +1205,41 @@ static inline dma_addr_t __alloc_iova(struct dma_iommu_mapping *mapping,
bitmap_set(mapping->bitmap, start, count);
spin_unlock_irqrestore(&mapping->lock, flags);
- return mapping->base + (start << PAGE_SHIFT);
+ iova = mapping->base + (start << PAGE_SHIFT);
+
+ if (guard_len &&
+ iommu_map(mapping->domain, iova + size,
+ page_to_phys(mapping->guard_page),
+ guard_len, ARM_SMMU_GUARD_PROT)) {
+
+ spin_lock_irqsave(&mapping->lock, flags);
+ bitmap_clear(mapping->bitmap, start, count);
+ spin_unlock_irqrestore(&mapping->lock, flags);
+ return DMA_ERROR_CODE;
+ }
+
+ return iova;
}
static inline void __free_iova(struct dma_iommu_mapping *mapping,
dma_addr_t addr, size_t size)
{
- unsigned int start = (addr - mapping->base) >> PAGE_SHIFT;
- unsigned int count = size >> PAGE_SHIFT;
+ unsigned int start;
+ unsigned int count;
unsigned long flags;
+ size_t guard_len;
+ addr = addr & PAGE_MASK;
+ size = PAGE_ALIGN(size);
+ if (mapping->min_iova_align) {
+ guard_len = ALIGN(size, mapping->min_iova_align) - size;
+ iommu_unmap(mapping->domain, addr + size, guard_len);
+ } else {
+ guard_len = 0;
+ }
+
+ start = (addr - mapping->base) >> PAGE_SHIFT;
+ count = (size + guard_len) >> PAGE_SHIFT;
spin_lock_irqsave(&mapping->lock, flags);
bitmap_clear(mapping->bitmap, start, count);
spin_unlock_irqrestore(&mapping->lock, flags);
@@ -1942,6 +1985,23 @@ static int
bitmap_iommu_init_mapping(struct device *dev, struct dma_iommu_mapping *mapping)
{
unsigned int bitmap_size = BITS_TO_LONGS(mapping->bits) * sizeof(long);
+ int vmid = VMID_HLOS;
+ int min_iova_align = 0;
+
+ iommu_domain_get_attr(mapping->domain,
+ DOMAIN_ATTR_MMU500_ERRATA_MIN_ALIGN,
+ &min_iova_align);
+ iommu_domain_get_attr(mapping->domain,
+ DOMAIN_ATTR_SECURE_VMID, &vmid);
+ if (vmid >= VMID_LAST || vmid < 0)
+ vmid = VMID_HLOS;
+
+ if (min_iova_align) {
+ mapping->min_iova_align = ARM_SMMU_MIN_IOVA_ALIGN;
+ mapping->guard_page = arm_smmu_errata_get_guard_page(vmid);
+ if (!mapping->guard_page)
+ return -ENOMEM;
+ }
mapping->bitmap = kzalloc(bitmap_size, GFP_KERNEL | __GFP_NOWARN |
__GFP_NORETRY);
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 792dac8..b5d88f8 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -537,7 +537,7 @@ static const struct fault_info fault_info[] = {
{ do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 0 translation fault" },
{ do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 1 translation fault" },
{ do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 2 translation fault" },
- { do_page_fault, SIGSEGV, SEGV_MAPERR, "level 3 translation fault" },
+ { do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 3 translation fault" },
{ do_bad, SIGBUS, 0, "unknown 8" },
{ do_page_fault, SIGSEGV, SEGV_ACCERR, "level 1 access flag fault" },
{ do_page_fault, SIGSEGV, SEGV_ACCERR, "level 2 access flag fault" },
diff --git a/arch/mips/ar7/platform.c b/arch/mips/ar7/platform.c
index 58fca9a..3446b6f 100644
--- a/arch/mips/ar7/platform.c
+++ b/arch/mips/ar7/platform.c
@@ -576,6 +576,7 @@ static int __init ar7_register_uarts(void)
uart_port.type = PORT_AR7;
uart_port.uartclk = clk_get_rate(bus_clk) / 2;
uart_port.iotype = UPIO_MEM32;
+ uart_port.flags = UPF_FIXED_TYPE;
uart_port.regshift = 2;
uart_port.line = 0;
@@ -654,6 +655,10 @@ static int __init ar7_register_devices(void)
u32 val;
int res;
+ res = ar7_gpio_init();
+ if (res)
+ pr_warn("unable to register gpios: %d\n", res);
+
res = ar7_register_uarts();
if (res)
pr_err("unable to setup uart(s): %d\n", res);
diff --git a/arch/mips/ar7/prom.c b/arch/mips/ar7/prom.c
index a23adc4..36aabee 100644
--- a/arch/mips/ar7/prom.c
+++ b/arch/mips/ar7/prom.c
@@ -246,8 +246,6 @@ void __init prom_init(void)
ar7_init_cmdline(fw_arg0, (char **)fw_arg1);
ar7_init_env((struct env_var *)fw_arg2);
console_config();
-
- ar7_gpio_init();
}
#define PORT(offset) (KSEG1ADDR(AR7_REGS_UART0 + (offset * 4)))
diff --git a/arch/mips/ath79/clock.c b/arch/mips/ath79/clock.c
index cc3a1e3..7e2bb12 100644
--- a/arch/mips/ath79/clock.c
+++ b/arch/mips/ath79/clock.c
@@ -508,16 +508,19 @@ static void __init ath79_clocks_init_dt_ng(struct device_node *np)
ar9330_clk_init(ref_clk, pll_base);
else {
pr_err("%s: could not find any appropriate clk_init()\n", dnfn);
- goto err_clk;
+ goto err_iounmap;
}
if (of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data)) {
pr_err("%s: could not register clk provider\n", dnfn);
- goto err_clk;
+ goto err_iounmap;
}
return;
+err_iounmap:
+ iounmap(pll_base);
+
err_clk:
clk_put(ref_clk);
diff --git a/arch/mips/include/asm/irq.h b/arch/mips/include/asm/irq.h
index 956db6e..c5d3517 100644
--- a/arch/mips/include/asm/irq.h
+++ b/arch/mips/include/asm/irq.h
@@ -18,9 +18,24 @@
#include <irq.h>
#define IRQ_STACK_SIZE THREAD_SIZE
+#define IRQ_STACK_START (IRQ_STACK_SIZE - 16)
extern void *irq_stack[NR_CPUS];
+/*
+ * The highest address on the IRQ stack contains a dummy frame put down in
+ * genex.S (handle_int & except_vec_vi_handler) which is structured as follows:
+ *
+ * top ------------
+ * | task sp | <- irq_stack[cpu] + IRQ_STACK_START
+ * ------------
+ * | | <- First frame of IRQ context
+ * ------------
+ *
+ * task sp holds a copy of the task stack pointer where the struct pt_regs
+ * from exception entry can be found.
+ */
+
static inline bool on_irq_stack(int cpu, unsigned long sp)
{
unsigned long low = (unsigned long)irq_stack[cpu];
diff --git a/arch/mips/include/asm/mips-cm.h b/arch/mips/include/asm/mips-cm.h
index 2e41807..b6845db 100644
--- a/arch/mips/include/asm/mips-cm.h
+++ b/arch/mips/include/asm/mips-cm.h
@@ -239,8 +239,8 @@ BUILD_CM_Cx_R_(tcid_8_priority, 0x80)
#define CM_GCR_BASE_GCRBASE_MSK (_ULCAST_(0x1ffff) << 15)
#define CM_GCR_BASE_CMDEFTGT_SHF 0
#define CM_GCR_BASE_CMDEFTGT_MSK (_ULCAST_(0x3) << 0)
-#define CM_GCR_BASE_CMDEFTGT_DISABLED 0
-#define CM_GCR_BASE_CMDEFTGT_MEM 1
+#define CM_GCR_BASE_CMDEFTGT_MEM 0
+#define CM_GCR_BASE_CMDEFTGT_RESERVED 1
#define CM_GCR_BASE_CMDEFTGT_IOCU0 2
#define CM_GCR_BASE_CMDEFTGT_IOCU1 3
diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c
index 4be2763..bfff6ea 100644
--- a/arch/mips/kernel/asm-offsets.c
+++ b/arch/mips/kernel/asm-offsets.c
@@ -103,6 +103,7 @@ void output_thread_info_defines(void)
DEFINE(_THREAD_SIZE, THREAD_SIZE);
DEFINE(_THREAD_MASK, THREAD_MASK);
DEFINE(_IRQ_STACK_SIZE, IRQ_STACK_SIZE);
+ DEFINE(_IRQ_STACK_START, IRQ_STACK_START);
BLANK();
}
diff --git a/arch/mips/kernel/cps-vec.S b/arch/mips/kernel/cps-vec.S
index 59476a6..a00e87b 100644
--- a/arch/mips/kernel/cps-vec.S
+++ b/arch/mips/kernel/cps-vec.S
@@ -361,7 +361,7 @@
END(mips_cps_get_bootcfg)
LEAF(mips_cps_boot_vpes)
- PTR_L ta2, COREBOOTCFG_VPEMASK(a0)
+ lw ta2, COREBOOTCFG_VPEMASK(a0)
PTR_L ta3, COREBOOTCFG_VPECONFIG(a0)
#if defined(CONFIG_CPU_MIPSR6)
diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S
index 2ac6c26..ae810da 100644
--- a/arch/mips/kernel/genex.S
+++ b/arch/mips/kernel/genex.S
@@ -215,9 +215,11 @@
beq t0, t1, 2f
/* Switch to IRQ stack */
- li t1, _IRQ_STACK_SIZE
+ li t1, _IRQ_STACK_START
PTR_ADD sp, t0, t1
+ /* Save task's sp on IRQ stack so that unwinding can follow it */
+ LONG_S s1, 0(sp)
2:
jal plat_irq_dispatch
@@ -325,9 +327,11 @@
beq t0, t1, 2f
/* Switch to IRQ stack */
- li t1, _IRQ_STACK_SIZE
+ li t1, _IRQ_STACK_START
PTR_ADD sp, t0, t1
+ /* Save task's sp on IRQ stack so that unwinding can follow it */
+ LONG_S s1, 0(sp)
2:
jalr v0
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
index fbbf5fc..c558bce 100644
--- a/arch/mips/kernel/process.c
+++ b/arch/mips/kernel/process.c
@@ -50,9 +50,7 @@
#ifdef CONFIG_HOTPLUG_CPU
void arch_cpu_idle_dead(void)
{
- /* What the heck is this check doing ? */
- if (!cpumask_test_cpu(smp_processor_id(), &cpu_callin_map))
- play_dead();
+ play_dead();
}
#endif
@@ -487,31 +485,52 @@ unsigned long notrace unwind_stack_by_address(unsigned long stack_page,
unsigned long pc,
unsigned long *ra)
{
+ unsigned long low, high, irq_stack_high;
struct mips_frame_info info;
unsigned long size, ofs;
+ struct pt_regs *regs;
int leaf;
- extern void ret_from_irq(void);
- extern void ret_from_exception(void);
if (!stack_page)
return 0;
/*
- * If we reached the bottom of interrupt context,
- * return saved pc in pt_regs.
+ * IRQ stacks start at IRQ_STACK_START
+ * task stacks at THREAD_SIZE - 32
*/
- if (pc == (unsigned long)ret_from_irq ||
- pc == (unsigned long)ret_from_exception) {
- struct pt_regs *regs;
- if (*sp >= stack_page &&
- *sp + sizeof(*regs) <= stack_page + THREAD_SIZE - 32) {
- regs = (struct pt_regs *)*sp;
- pc = regs->cp0_epc;
- if (!user_mode(regs) && __kernel_text_address(pc)) {
- *sp = regs->regs[29];
- *ra = regs->regs[31];
- return pc;
- }
+ low = stack_page;
+ if (!preemptible() && on_irq_stack(raw_smp_processor_id(), *sp)) {
+ high = stack_page + IRQ_STACK_START;
+ irq_stack_high = high;
+ } else {
+ high = stack_page + THREAD_SIZE - 32;
+ irq_stack_high = 0;
+ }
+
+ /*
+ * If we reached the top of the interrupt stack, start unwinding
+ * the interrupted task stack.
+ */
+ if (unlikely(*sp == irq_stack_high)) {
+ unsigned long task_sp = *(unsigned long *)*sp;
+
+ /*
+ * Check that the pointer saved in the IRQ stack head points to
+ * something within the stack of the current task
+ */
+ if (!object_is_on_stack((void *)task_sp))
+ return 0;
+
+ /*
+ * Follow pointer to tasks kernel stack frame where interrupted
+ * state was saved.
+ */
+ regs = (struct pt_regs *)task_sp;
+ pc = regs->cp0_epc;
+ if (!user_mode(regs) && __kernel_text_address(pc)) {
+ *sp = regs->regs[29];
+ *ra = regs->regs[31];
+ return pc;
}
return 0;
}
@@ -532,8 +551,7 @@ unsigned long notrace unwind_stack_by_address(unsigned long stack_page,
if (leaf < 0)
return 0;
- if (*sp < stack_page ||
- *sp + info.frame_size > stack_page + THREAD_SIZE - 32)
+ if (*sp < low || *sp + info.frame_size > high)
return 0;
if (leaf)
diff --git a/arch/mips/kernel/smp-bmips.c b/arch/mips/kernel/smp-bmips.c
index 6d0f132..47c9646 100644
--- a/arch/mips/kernel/smp-bmips.c
+++ b/arch/mips/kernel/smp-bmips.c
@@ -587,11 +587,11 @@ void __init bmips_cpu_setup(void)
/* Flush and enable RAC */
cfg = __raw_readl(cbr + BMIPS_RAC_CONFIG);
- __raw_writel(cfg | 0x100, BMIPS_RAC_CONFIG);
+ __raw_writel(cfg | 0x100, cbr + BMIPS_RAC_CONFIG);
__raw_readl(cbr + BMIPS_RAC_CONFIG);
cfg = __raw_readl(cbr + BMIPS_RAC_CONFIG);
- __raw_writel(cfg | 0xf, BMIPS_RAC_CONFIG);
+ __raw_writel(cfg | 0xf, cbr + BMIPS_RAC_CONFIG);
__raw_readl(cbr + BMIPS_RAC_CONFIG);
cfg = __raw_readl(cbr + BMIPS_RAC_ADDRESS_RANGE);
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index 7ebb191..95ba427 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -68,6 +68,9 @@ EXPORT_SYMBOL(cpu_sibling_map);
cpumask_t cpu_core_map[NR_CPUS] __read_mostly;
EXPORT_SYMBOL(cpu_core_map);
+static DECLARE_COMPLETION(cpu_starting);
+static DECLARE_COMPLETION(cpu_running);
+
/*
* A logcal cpu mask containing only one VPE per core to
* reduce the number of IPIs on large MT systems.
@@ -369,9 +372,12 @@ asmlinkage void start_secondary(void)
cpumask_set_cpu(cpu, &cpu_coherent_mask);
notify_cpu_starting(cpu);
- cpumask_set_cpu(cpu, &cpu_callin_map);
+ /* Notify boot CPU that we're starting & ready to sync counters */
+ complete(&cpu_starting);
+
synchronise_count_slave(cpu);
+ /* The CPU is running and counters synchronised, now mark it online */
set_cpu_online(cpu, true);
set_cpu_sibling_map(cpu);
@@ -380,6 +386,12 @@ asmlinkage void start_secondary(void)
calculate_cpu_foreign_map();
/*
+ * Notify boot CPU that we're up & online and it can safely return
+ * from __cpu_up
+ */
+ complete(&cpu_running);
+
+ /*
* irq will be enabled in ->smp_finish(), enabling it too early
* is dangerous.
*/
@@ -430,22 +442,23 @@ void smp_prepare_boot_cpu(void)
{
set_cpu_possible(0, true);
set_cpu_online(0, true);
- cpumask_set_cpu(0, &cpu_callin_map);
}
int __cpu_up(unsigned int cpu, struct task_struct *tidle)
{
mp_ops->boot_secondary(cpu, tidle);
- /*
- * Trust is futile. We should really have timeouts ...
- */
- while (!cpumask_test_cpu(cpu, &cpu_callin_map)) {
- udelay(100);
- schedule();
+ /* Wait for CPU to start and be ready to sync counters */
+ if (!wait_for_completion_timeout(&cpu_starting,
+ msecs_to_jiffies(1000))) {
+ pr_crit("CPU%u: failed to start\n", cpu);
+ return -EIO;
}
synchronise_count_master(cpu);
+
+ /* Wait for CPU to finish startup & mark itself online before return */
+ wait_for_completion(&cpu_running);
return 0;
}
diff --git a/arch/mips/kernel/vmlinux.lds.S b/arch/mips/kernel/vmlinux.lds.S
index d5de675..f0a0e6d 100644
--- a/arch/mips/kernel/vmlinux.lds.S
+++ b/arch/mips/kernel/vmlinux.lds.S
@@ -182,7 +182,7 @@
* Force .bss to 64K alignment so that .bss..swapper_pg_dir
* gets that alignment. .sbss should be empty, so there will be
* no holes after __init_end. */
- BSS_SECTION(0, 0x10000, 0)
+ BSS_SECTION(0, 0x10000, 8)
_end = . ;
diff --git a/arch/mips/lantiq/xway/sysctrl.c b/arch/mips/lantiq/xway/sysctrl.c
index 9056547..95bec46 100644
--- a/arch/mips/lantiq/xway/sysctrl.c
+++ b/arch/mips/lantiq/xway/sysctrl.c
@@ -469,8 +469,8 @@ void __init ltq_soc_init(void)
panic("Failed to load xbar nodes from devicetree");
if (of_address_to_resource(np_xbar, 0, &res_xbar))
panic("Failed to get xbar resources");
- if (request_mem_region(res_xbar.start, resource_size(&res_xbar),
- res_xbar.name) < 0)
+ if (!request_mem_region(res_xbar.start, resource_size(&res_xbar),
+ res_xbar.name))
panic("Failed to get xbar resources");
ltq_xbar_membase = ioremap_nocache(res_xbar.start,
diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c
index e9385bc..9ade60c 100644
--- a/arch/mips/math-emu/cp1emu.c
+++ b/arch/mips/math-emu/cp1emu.c
@@ -2386,7 +2386,6 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
break;
default:
/* Reserved R6 ops */
- pr_err("Reserved MIPS R6 CMP.condn.S operation\n");
return SIGILL;
}
}
@@ -2460,7 +2459,6 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
break;
default:
/* Reserved R6 ops */
- pr_err("Reserved MIPS R6 CMP.condn.D operation\n");
return SIGILL;
}
}
diff --git a/arch/mips/math-emu/dp_fmax.c b/arch/mips/math-emu/dp_fmax.c
index fd71b8d..5bec64f 100644
--- a/arch/mips/math-emu/dp_fmax.c
+++ b/arch/mips/math-emu/dp_fmax.c
@@ -47,14 +47,26 @@ union ieee754dp ieee754dp_fmax(union ieee754dp x, union ieee754dp y)
case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
return ieee754dp_nanxcpt(x);
- /* numbers are preferred to NaNs */
+ /*
+ * Quiet NaN handling
+ */
+
+ /*
+ * The case of both inputs quiet NaNs
+ */
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
+ return x;
+
+ /*
+ * The cases of exactly one input quiet NaN (numbers
+ * are here preferred as returned values to NaNs)
+ */
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN):
return x;
- case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM):
@@ -80,9 +92,7 @@ union ieee754dp ieee754dp_fmax(union ieee754dp x, union ieee754dp y)
return ys ? x : y;
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
- if (xs == ys)
- return x;
- return ieee754dp_zero(1);
+ return ieee754dp_zero(xs & ys);
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM):
DPDNORMX;
@@ -106,16 +116,32 @@ union ieee754dp ieee754dp_fmax(union ieee754dp x, union ieee754dp y)
else if (xs < ys)
return x;
- /* Compare exponent */
- if (xe > ye)
- return x;
- else if (xe < ye)
- return y;
+ /* Signs of inputs are equal, let's compare exponents */
+ if (xs == 0) {
+ /* Inputs are both positive */
+ if (xe > ye)
+ return x;
+ else if (xe < ye)
+ return y;
+ } else {
+ /* Inputs are both negative */
+ if (xe > ye)
+ return y;
+ else if (xe < ye)
+ return x;
+ }
- /* Compare mantissa */
+ /* Signs and exponents of inputs are equal, let's compare mantissas */
+ if (xs == 0) {
+ /* Inputs are both positive, with equal signs and exponents */
+ if (xm <= ym)
+ return y;
+ return x;
+ }
+ /* Inputs are both negative, with equal signs and exponents */
if (xm <= ym)
- return y;
- return x;
+ return x;
+ return y;
}
union ieee754dp ieee754dp_fmaxa(union ieee754dp x, union ieee754dp y)
@@ -147,14 +173,26 @@ union ieee754dp ieee754dp_fmaxa(union ieee754dp x, union ieee754dp y)
case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
return ieee754dp_nanxcpt(x);
- /* numbers are preferred to NaNs */
+ /*
+ * Quiet NaN handling
+ */
+
+ /*
+ * The case of both inputs quiet NaNs
+ */
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
+ return x;
+
+ /*
+ * The cases of exactly one input quiet NaN (numbers
+ * are here preferred as returned values to NaNs)
+ */
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN):
return x;
- case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM):
@@ -164,6 +202,9 @@ union ieee754dp ieee754dp_fmaxa(union ieee754dp x, union ieee754dp y)
/*
* Infinity and zero handling
*/
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
+ return ieee754dp_inf(xs & ys);
+
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO):
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_NORM):
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM):
@@ -171,7 +212,6 @@ union ieee754dp ieee754dp_fmaxa(union ieee754dp x, union ieee754dp y)
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO):
return x;
- case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF):
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_INF):
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF):
@@ -180,9 +220,7 @@ union ieee754dp ieee754dp_fmaxa(union ieee754dp x, union ieee754dp y)
return y;
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
- if (xs == ys)
- return x;
- return ieee754dp_zero(1);
+ return ieee754dp_zero(xs & ys);
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM):
DPDNORMX;
@@ -207,7 +245,11 @@ union ieee754dp ieee754dp_fmaxa(union ieee754dp x, union ieee754dp y)
return y;
/* Compare mantissa */
- if (xm <= ym)
+ if (xm < ym)
return y;
- return x;
+ else if (xm > ym)
+ return x;
+ else if (xs == 0)
+ return x;
+ return y;
}
diff --git a/arch/mips/math-emu/dp_fmin.c b/arch/mips/math-emu/dp_fmin.c
index c1072b0..a287b23 100644
--- a/arch/mips/math-emu/dp_fmin.c
+++ b/arch/mips/math-emu/dp_fmin.c
@@ -47,14 +47,26 @@ union ieee754dp ieee754dp_fmin(union ieee754dp x, union ieee754dp y)
case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
return ieee754dp_nanxcpt(x);
- /* numbers are preferred to NaNs */
+ /*
+ * Quiet NaN handling
+ */
+
+ /*
+ * The case of both inputs quiet NaNs
+ */
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
+ return x;
+
+ /*
+ * The cases of exactly one input quiet NaN (numbers
+ * are here preferred as returned values to NaNs)
+ */
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN):
return x;
- case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM):
@@ -80,9 +92,7 @@ union ieee754dp ieee754dp_fmin(union ieee754dp x, union ieee754dp y)
return ys ? y : x;
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
- if (xs == ys)
- return x;
- return ieee754dp_zero(1);
+ return ieee754dp_zero(xs | ys);
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM):
DPDNORMX;
@@ -106,16 +116,32 @@ union ieee754dp ieee754dp_fmin(union ieee754dp x, union ieee754dp y)
else if (xs < ys)
return y;
- /* Compare exponent */
- if (xe > ye)
- return y;
- else if (xe < ye)
- return x;
+ /* Signs of inputs are the same, let's compare exponents */
+ if (xs == 0) {
+ /* Inputs are both positive */
+ if (xe > ye)
+ return y;
+ else if (xe < ye)
+ return x;
+ } else {
+ /* Inputs are both negative */
+ if (xe > ye)
+ return x;
+ else if (xe < ye)
+ return y;
+ }
- /* Compare mantissa */
+ /* Signs and exponents of inputs are equal, let's compare mantissas */
+ if (xs == 0) {
+ /* Inputs are both positive, with equal signs and exponents */
+ if (xm <= ym)
+ return x;
+ return y;
+ }
+ /* Inputs are both negative, with equal signs and exponents */
if (xm <= ym)
- return x;
- return y;
+ return y;
+ return x;
}
union ieee754dp ieee754dp_fmina(union ieee754dp x, union ieee754dp y)
@@ -147,14 +173,26 @@ union ieee754dp ieee754dp_fmina(union ieee754dp x, union ieee754dp y)
case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
return ieee754dp_nanxcpt(x);
- /* numbers are preferred to NaNs */
+ /*
+ * Quiet NaN handling
+ */
+
+ /*
+ * The case of both inputs quiet NaNs
+ */
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
+ return x;
+
+ /*
+ * The cases of exactly one input quiet NaN (numbers
+ * are here preferred as returned values to NaNs)
+ */
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN):
return x;
- case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM):
@@ -164,25 +202,25 @@ union ieee754dp ieee754dp_fmina(union ieee754dp x, union ieee754dp y)
/*
* Infinity and zero handling
*/
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
+ return ieee754dp_inf(xs | ys);
+
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO):
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_NORM):
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM):
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_ZERO):
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO):
- return x;
+ return y;
- case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF):
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_INF):
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF):
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_NORM):
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_DNORM):
- return y;
+ return x;
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
- if (xs == ys)
- return x;
- return ieee754dp_zero(1);
+ return ieee754dp_zero(xs | ys);
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM):
DPDNORMX;
@@ -207,7 +245,11 @@ union ieee754dp ieee754dp_fmina(union ieee754dp x, union ieee754dp y)
return x;
/* Compare mantissa */
- if (xm <= ym)
+ if (xm < ym)
+ return x;
+ else if (xm > ym)
+ return y;
+ else if (xs == 1)
return x;
return y;
}
diff --git a/arch/mips/math-emu/dp_maddf.c b/arch/mips/math-emu/dp_maddf.c
index 4a2d03c..e0d9be5 100644
--- a/arch/mips/math-emu/dp_maddf.c
+++ b/arch/mips/math-emu/dp_maddf.c
@@ -14,22 +14,45 @@
#include "ieee754dp.h"
-enum maddf_flags {
- maddf_negate_product = 1 << 0,
-};
+
+/* 128 bits shift right logical with rounding. */
+void srl128(u64 *hptr, u64 *lptr, int count)
+{
+ u64 low;
+
+ if (count >= 128) {
+ *lptr = *hptr != 0 || *lptr != 0;
+ *hptr = 0;
+ } else if (count >= 64) {
+ if (count == 64) {
+ *lptr = *hptr | (*lptr != 0);
+ } else {
+ low = *lptr;
+ *lptr = *hptr >> (count - 64);
+ *lptr |= (*hptr << (128 - count)) != 0 || low != 0;
+ }
+ *hptr = 0;
+ } else {
+ low = *lptr;
+ *lptr = low >> count | *hptr << (64 - count);
+ *lptr |= (low << (64 - count)) != 0;
+ *hptr = *hptr >> count;
+ }
+}
static union ieee754dp _dp_maddf(union ieee754dp z, union ieee754dp x,
union ieee754dp y, enum maddf_flags flags)
{
int re;
int rs;
- u64 rm;
unsigned lxm;
unsigned hxm;
unsigned lym;
unsigned hym;
u64 lrm;
u64 hrm;
+ u64 lzm;
+ u64 hzm;
u64 t;
u64 at;
int s;
@@ -48,52 +71,34 @@ static union ieee754dp _dp_maddf(union ieee754dp z, union ieee754dp x,
ieee754_clearcx();
- switch (zc) {
- case IEEE754_CLASS_SNAN:
- ieee754_setcx(IEEE754_INVALID_OPERATION);
+ /*
+ * Handle the cases when at least one of x, y or z is a NaN.
+ * Order of precedence is sNaN, qNaN and z, x, y.
+ */
+ if (zc == IEEE754_CLASS_SNAN)
return ieee754dp_nanxcpt(z);
- case IEEE754_CLASS_DNORM:
- DPDNORMZ;
- /* QNAN is handled separately below */
- }
-
- switch (CLPAIR(xc, yc)) {
- case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_SNAN):
- case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_SNAN):
- case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_SNAN):
- case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_SNAN):
- case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_SNAN):
- return ieee754dp_nanxcpt(y);
-
- case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_SNAN):
- case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_QNAN):
- case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_ZERO):
- case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_NORM):
- case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_DNORM):
- case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
+ if (xc == IEEE754_CLASS_SNAN)
return ieee754dp_nanxcpt(x);
-
- case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
- case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
- case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN):
- case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN):
+ if (yc == IEEE754_CLASS_SNAN)
+ return ieee754dp_nanxcpt(y);
+ if (zc == IEEE754_CLASS_QNAN)
+ return z;
+ if (xc == IEEE754_CLASS_QNAN)
+ return x;
+ if (yc == IEEE754_CLASS_QNAN)
return y;
- case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
- case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO):
- case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM):
- case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM):
- case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_INF):
- return x;
+ if (zc == IEEE754_CLASS_DNORM)
+ DPDNORMZ;
+ /* ZERO z cases are handled separately below */
+ switch (CLPAIR(xc, yc)) {
/*
* Infinity handling
*/
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO):
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF):
- if (zc == IEEE754_CLASS_QNAN)
- return z;
ieee754_setcx(IEEE754_INVALID_OPERATION);
return ieee754dp_indef();
@@ -102,9 +107,27 @@ static union ieee754dp _dp_maddf(union ieee754dp z, union ieee754dp x,
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_NORM):
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM):
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
- if (zc == IEEE754_CLASS_QNAN)
- return z;
- return ieee754dp_inf(xs ^ ys);
+ if ((zc == IEEE754_CLASS_INF) &&
+ ((!(flags & MADDF_NEGATE_PRODUCT) && (zs != (xs ^ ys))) ||
+ ((flags & MADDF_NEGATE_PRODUCT) && (zs == (xs ^ ys))))) {
+ /*
+ * Cases of addition of infinities with opposite signs
+ * or subtraction of infinities with same signs.
+ */
+ ieee754_setcx(IEEE754_INVALID_OPERATION);
+ return ieee754dp_indef();
+ }
+ /*
+ * z is here either not an infinity, or an infinity having the
+ * same sign as product (x*y) (in case of MADDF.D instruction)
+ * or product -(x*y) (in MSUBF.D case). The result must be an
+ * infinity, and its sign is determined only by the value of
+ * (flags & MADDF_NEGATE_PRODUCT) and the signs of x and y.
+ */
+ if (flags & MADDF_NEGATE_PRODUCT)
+ return ieee754dp_inf(1 ^ (xs ^ ys));
+ else
+ return ieee754dp_inf(xs ^ ys);
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_NORM):
@@ -113,32 +136,42 @@ static union ieee754dp _dp_maddf(union ieee754dp z, union ieee754dp x,
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO):
if (zc == IEEE754_CLASS_INF)
return ieee754dp_inf(zs);
- /* Multiplication is 0 so just return z */
+ if (zc == IEEE754_CLASS_ZERO) {
+ /* Handle cases +0 + (-0) and similar ones. */
+ if ((!(flags & MADDF_NEGATE_PRODUCT)
+ && (zs == (xs ^ ys))) ||
+ ((flags & MADDF_NEGATE_PRODUCT)
+ && (zs != (xs ^ ys))))
+ /*
+ * Cases of addition of zeros of equal signs
+ * or subtraction of zeroes of opposite signs.
+ * The sign of the resulting zero is in any
+ * such case determined only by the sign of z.
+ */
+ return z;
+
+ return ieee754dp_zero(ieee754_csr.rm == FPU_CSR_RD);
+ }
+ /* x*y is here 0, and z is not 0, so just return z */
return z;
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM):
DPDNORMX;
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_DNORM):
- if (zc == IEEE754_CLASS_QNAN)
- return z;
- else if (zc == IEEE754_CLASS_INF)
+ if (zc == IEEE754_CLASS_INF)
return ieee754dp_inf(zs);
DPDNORMY;
break;
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_NORM):
- if (zc == IEEE754_CLASS_QNAN)
- return z;
- else if (zc == IEEE754_CLASS_INF)
+ if (zc == IEEE754_CLASS_INF)
return ieee754dp_inf(zs);
DPDNORMX;
break;
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_NORM):
- if (zc == IEEE754_CLASS_QNAN)
- return z;
- else if (zc == IEEE754_CLASS_INF)
+ if (zc == IEEE754_CLASS_INF)
return ieee754dp_inf(zs);
/* fall through to real computations */
}
@@ -157,7 +190,7 @@ static union ieee754dp _dp_maddf(union ieee754dp z, union ieee754dp x,
re = xe + ye;
rs = xs ^ ys;
- if (flags & maddf_negate_product)
+ if (flags & MADDF_NEGATE_PRODUCT)
rs ^= 1;
/* shunt to top of word */
@@ -165,7 +198,7 @@ static union ieee754dp _dp_maddf(union ieee754dp z, union ieee754dp x,
ym <<= 64 - (DP_FBITS + 1);
/*
- * Multiply 64 bits xm, ym to give high 64 bits rm with stickness.
+ * Multiply 64 bits xm and ym to give 128 bits result in hrm:lrm.
*/
/* 32 * 32 => 64 */
@@ -195,78 +228,110 @@ static union ieee754dp _dp_maddf(union ieee754dp z, union ieee754dp x,
hrm = hrm + (t >> 32);
- rm = hrm | (lrm != 0);
-
- /*
- * Sticky shift down to normal rounding precision.
- */
- if ((s64) rm < 0) {
- rm = (rm >> (64 - (DP_FBITS + 1 + 3))) |
- ((rm << (DP_FBITS + 1 + 3)) != 0);
+ /* Put explicit bit at bit 126 if necessary */
+ if ((int64_t)hrm < 0) {
+ lrm = (hrm << 63) | (lrm >> 1);
+ hrm = hrm >> 1;
re++;
- } else {
- rm = (rm >> (64 - (DP_FBITS + 1 + 3 + 1))) |
- ((rm << (DP_FBITS + 1 + 3 + 1)) != 0);
}
- assert(rm & (DP_HIDDEN_BIT << 3));
- /* And now the addition */
- assert(zm & DP_HIDDEN_BIT);
+ assert(hrm & (1 << 62));
- /*
- * Provide guard,round and stick bit space.
- */
- zm <<= 3;
+ if (zc == IEEE754_CLASS_ZERO) {
+ /*
+ * Move explicit bit from bit 126 to bit 55 since the
+ * ieee754dp_format code expects the mantissa to be
+ * 56 bits wide (53 + 3 rounding bits).
+ */
+ srl128(&hrm, &lrm, (126 - 55));
+ return ieee754dp_format(rs, re, lrm);
+ }
+ /* Move explicit bit from bit 52 to bit 126 */
+ lzm = 0;
+ hzm = zm << 10;
+ assert(hzm & (1 << 62));
+
+ /* Make the exponents the same */
if (ze > re) {
/*
* Have to shift y fraction right to align.
*/
s = ze - re;
- rm = XDPSRS(rm, s);
+ srl128(&hrm, &lrm, s);
re += s;
} else if (re > ze) {
/*
* Have to shift x fraction right to align.
*/
s = re - ze;
- zm = XDPSRS(zm, s);
+ srl128(&hzm, &lzm, s);
ze += s;
}
assert(ze == re);
assert(ze <= DP_EMAX);
+ /* Do the addition */
if (zs == rs) {
/*
- * Generate 28 bit result of adding two 27 bit numbers
- * leaving result in xm, xs and xe.
+ * Generate 128 bit result by adding two 127 bit numbers
+ * leaving result in hzm:lzm, zs and ze.
*/
- zm = zm + rm;
-
- if (zm >> (DP_FBITS + 1 + 3)) { /* carry out */
- zm = XDPSRS1(zm);
+ hzm = hzm + hrm + (lzm > (lzm + lrm));
+ lzm = lzm + lrm;
+ if ((int64_t)hzm < 0) { /* carry out */
+ srl128(&hzm, &lzm, 1);
ze++;
}
} else {
- if (zm >= rm) {
- zm = zm - rm;
+ if (hzm > hrm || (hzm == hrm && lzm >= lrm)) {
+ hzm = hzm - hrm - (lzm < lrm);
+ lzm = lzm - lrm;
} else {
- zm = rm - zm;
+ hzm = hrm - hzm - (lrm < lzm);
+ lzm = lrm - lzm;
zs = rs;
}
- if (zm == 0)
+ if (lzm == 0 && hzm == 0)
return ieee754dp_zero(ieee754_csr.rm == FPU_CSR_RD);
/*
- * Normalize to rounding precision.
+ * Put explicit bit at bit 126 if necessary.
*/
- while ((zm >> (DP_FBITS + 3)) == 0) {
- zm <<= 1;
- ze--;
+ if (hzm == 0) {
+ /* left shift by 63 or 64 bits */
+ if ((int64_t)lzm < 0) {
+ /* MSB of lzm is the explicit bit */
+ hzm = lzm >> 1;
+ lzm = lzm << 63;
+ ze -= 63;
+ } else {
+ hzm = lzm;
+ lzm = 0;
+ ze -= 64;
+ }
+ }
+
+ t = 0;
+ while ((hzm >> (62 - t)) == 0)
+ t++;
+
+ assert(t <= 62);
+ if (t) {
+ hzm = hzm << t | lzm >> (64 - t);
+ lzm = lzm << t;
+ ze -= t;
}
}
- return ieee754dp_format(zs, ze, zm);
+ /*
+ * Move explicit bit from bit 126 to bit 55 since the
+ * ieee754dp_format code expects the mantissa to be
+ * 56 bits wide (53 + 3 rounding bits).
+ */
+ srl128(&hzm, &lzm, (126 - 55));
+
+ return ieee754dp_format(zs, ze, lzm);
}
union ieee754dp ieee754dp_maddf(union ieee754dp z, union ieee754dp x,
@@ -278,5 +343,5 @@ union ieee754dp ieee754dp_maddf(union ieee754dp z, union ieee754dp x,
union ieee754dp ieee754dp_msubf(union ieee754dp z, union ieee754dp x,
union ieee754dp y)
{
- return _dp_maddf(z, x, y, maddf_negate_product);
+ return _dp_maddf(z, x, y, MADDF_NEGATE_PRODUCT);
}
diff --git a/arch/mips/math-emu/ieee754int.h b/arch/mips/math-emu/ieee754int.h
index 8bc2f69..dd2071f 100644
--- a/arch/mips/math-emu/ieee754int.h
+++ b/arch/mips/math-emu/ieee754int.h
@@ -26,6 +26,10 @@
#define CLPAIR(x, y) ((x)*6+(y))
+enum maddf_flags {
+ MADDF_NEGATE_PRODUCT = 1 << 0,
+};
+
static inline void ieee754_clearcx(void)
{
ieee754_csr.cx = 0;
diff --git a/arch/mips/math-emu/ieee754sp.h b/arch/mips/math-emu/ieee754sp.h
index 8476067..0f63e42 100644
--- a/arch/mips/math-emu/ieee754sp.h
+++ b/arch/mips/math-emu/ieee754sp.h
@@ -45,6 +45,10 @@ static inline int ieee754sp_finite(union ieee754sp x)
return SPBEXP(x) != SP_EMAX + 1 + SP_EBIAS;
}
+/* 64 bit right shift with rounding */
+#define XSPSRS64(v, rs) \
+ (((rs) >= 64) ? ((v) != 0) : ((v) >> (rs)) | ((v) << (64-(rs)) != 0))
+
/* 3bit extended single precision sticky right shift */
#define XSPSRS(v, rs) \
((rs > (SP_FBITS+3))?1:((v) >> (rs)) | ((v) << (32-(rs)) != 0))
diff --git a/arch/mips/math-emu/sp_fmax.c b/arch/mips/math-emu/sp_fmax.c
index 4d00084..74a5a00 100644
--- a/arch/mips/math-emu/sp_fmax.c
+++ b/arch/mips/math-emu/sp_fmax.c
@@ -47,14 +47,26 @@ union ieee754sp ieee754sp_fmax(union ieee754sp x, union ieee754sp y)
case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
return ieee754sp_nanxcpt(x);
- /* numbers are preferred to NaNs */
+ /*
+ * Quiet NaN handling
+ */
+
+ /*
+ * The case of both inputs quiet NaNs
+ */
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
+ return x;
+
+ /*
+ * The cases of exactly one input quiet NaN (numbers
+ * are here preferred as returned values to NaNs)
+ */
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN):
return x;
- case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM):
@@ -80,9 +92,7 @@ union ieee754sp ieee754sp_fmax(union ieee754sp x, union ieee754sp y)
return ys ? x : y;
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
- if (xs == ys)
- return x;
- return ieee754sp_zero(1);
+ return ieee754sp_zero(xs & ys);
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM):
SPDNORMX;
@@ -106,16 +116,32 @@ union ieee754sp ieee754sp_fmax(union ieee754sp x, union ieee754sp y)
else if (xs < ys)
return x;
- /* Compare exponent */
- if (xe > ye)
- return x;
- else if (xe < ye)
- return y;
+ /* Signs of inputs are equal, let's compare exponents */
+ if (xs == 0) {
+ /* Inputs are both positive */
+ if (xe > ye)
+ return x;
+ else if (xe < ye)
+ return y;
+ } else {
+ /* Inputs are both negative */
+ if (xe > ye)
+ return y;
+ else if (xe < ye)
+ return x;
+ }
- /* Compare mantissa */
+ /* Signs and exponents of inputs are equal, let's compare mantissas */
+ if (xs == 0) {
+ /* Inputs are both positive, with equal signs and exponents */
+ if (xm <= ym)
+ return y;
+ return x;
+ }
+ /* Inputs are both negative, with equal signs and exponents */
if (xm <= ym)
- return y;
- return x;
+ return x;
+ return y;
}
union ieee754sp ieee754sp_fmaxa(union ieee754sp x, union ieee754sp y)
@@ -147,14 +173,26 @@ union ieee754sp ieee754sp_fmaxa(union ieee754sp x, union ieee754sp y)
case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
return ieee754sp_nanxcpt(x);
- /* numbers are preferred to NaNs */
+ /*
+ * Quiet NaN handling
+ */
+
+ /*
+ * The case of both inputs quiet NaNs
+ */
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
+ return x;
+
+ /*
+ * The cases of exactly one input quiet NaN (numbers
+ * are here preferred as returned values to NaNs)
+ */
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN):
return x;
- case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM):
@@ -164,6 +202,9 @@ union ieee754sp ieee754sp_fmaxa(union ieee754sp x, union ieee754sp y)
/*
* Infinity and zero handling
*/
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
+ return ieee754sp_inf(xs & ys);
+
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO):
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_NORM):
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM):
@@ -171,7 +212,6 @@ union ieee754sp ieee754sp_fmaxa(union ieee754sp x, union ieee754sp y)
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO):
return x;
- case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF):
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_INF):
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF):
@@ -180,9 +220,7 @@ union ieee754sp ieee754sp_fmaxa(union ieee754sp x, union ieee754sp y)
return y;
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
- if (xs == ys)
- return x;
- return ieee754sp_zero(1);
+ return ieee754sp_zero(xs & ys);
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM):
SPDNORMX;
@@ -207,7 +245,11 @@ union ieee754sp ieee754sp_fmaxa(union ieee754sp x, union ieee754sp y)
return y;
/* Compare mantissa */
- if (xm <= ym)
+ if (xm < ym)
return y;
- return x;
+ else if (xm > ym)
+ return x;
+ else if (xs == 0)
+ return x;
+ return y;
}
diff --git a/arch/mips/math-emu/sp_fmin.c b/arch/mips/math-emu/sp_fmin.c
index 4eb1bb9..c51385f 100644
--- a/arch/mips/math-emu/sp_fmin.c
+++ b/arch/mips/math-emu/sp_fmin.c
@@ -47,14 +47,26 @@ union ieee754sp ieee754sp_fmin(union ieee754sp x, union ieee754sp y)
case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
return ieee754sp_nanxcpt(x);
- /* numbers are preferred to NaNs */
+ /*
+ * Quiet NaN handling
+ */
+
+ /*
+ * The case of both inputs quiet NaNs
+ */
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
+ return x;
+
+ /*
+ * The cases of exactly one input quiet NaN (numbers
+ * are here preferred as returned values to NaNs)
+ */
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN):
return x;
- case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM):
@@ -80,9 +92,7 @@ union ieee754sp ieee754sp_fmin(union ieee754sp x, union ieee754sp y)
return ys ? y : x;
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
- if (xs == ys)
- return x;
- return ieee754sp_zero(1);
+ return ieee754sp_zero(xs | ys);
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM):
SPDNORMX;
@@ -106,16 +116,32 @@ union ieee754sp ieee754sp_fmin(union ieee754sp x, union ieee754sp y)
else if (xs < ys)
return y;
- /* Compare exponent */
- if (xe > ye)
- return y;
- else if (xe < ye)
- return x;
+ /* Signs of inputs are the same, let's compare exponents */
+ if (xs == 0) {
+ /* Inputs are both positive */
+ if (xe > ye)
+ return y;
+ else if (xe < ye)
+ return x;
+ } else {
+ /* Inputs are both negative */
+ if (xe > ye)
+ return x;
+ else if (xe < ye)
+ return y;
+ }
- /* Compare mantissa */
+ /* Signs and exponents of inputs are equal, let's compare mantissas */
+ if (xs == 0) {
+ /* Inputs are both positive, with equal signs and exponents */
+ if (xm <= ym)
+ return x;
+ return y;
+ }
+ /* Inputs are both negative, with equal signs and exponents */
if (xm <= ym)
- return x;
- return y;
+ return y;
+ return x;
}
union ieee754sp ieee754sp_fmina(union ieee754sp x, union ieee754sp y)
@@ -147,14 +173,26 @@ union ieee754sp ieee754sp_fmina(union ieee754sp x, union ieee754sp y)
case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
return ieee754sp_nanxcpt(x);
- /* numbers are preferred to NaNs */
+ /*
+ * Quiet NaN handling
+ */
+
+ /*
+ * The case of both inputs quiet NaNs
+ */
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
+ return x;
+
+ /*
+ * The cases of exactly one input quiet NaN (numbers
+ * are here preferred as returned values to NaNs)
+ */
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN):
return x;
- case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM):
@@ -164,25 +202,25 @@ union ieee754sp ieee754sp_fmina(union ieee754sp x, union ieee754sp y)
/*
* Infinity and zero handling
*/
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
+ return ieee754sp_inf(xs | ys);
+
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO):
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_NORM):
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM):
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_ZERO):
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO):
- return x;
+ return y;
- case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF):
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_INF):
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF):
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_NORM):
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_DNORM):
- return y;
+ return x;
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
- if (xs == ys)
- return x;
- return ieee754sp_zero(1);
+ return ieee754sp_zero(xs | ys);
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM):
SPDNORMX;
@@ -207,7 +245,11 @@ union ieee754sp ieee754sp_fmina(union ieee754sp x, union ieee754sp y)
return x;
/* Compare mantissa */
- if (xm <= ym)
+ if (xm < ym)
+ return x;
+ else if (xm > ym)
+ return y;
+ else if (xs == 1)
return x;
return y;
}
diff --git a/arch/mips/math-emu/sp_maddf.c b/arch/mips/math-emu/sp_maddf.c
index a8cd8b4..7195fe7 100644
--- a/arch/mips/math-emu/sp_maddf.c
+++ b/arch/mips/math-emu/sp_maddf.c
@@ -14,9 +14,6 @@
#include "ieee754sp.h"
-enum maddf_flags {
- maddf_negate_product = 1 << 0,
-};
static union ieee754sp _sp_maddf(union ieee754sp z, union ieee754sp x,
union ieee754sp y, enum maddf_flags flags)
@@ -24,14 +21,8 @@ static union ieee754sp _sp_maddf(union ieee754sp z, union ieee754sp x,
int re;
int rs;
unsigned rm;
- unsigned short lxm;
- unsigned short hxm;
- unsigned short lym;
- unsigned short hym;
- unsigned lrm;
- unsigned hrm;
- unsigned t;
- unsigned at;
+ uint64_t rm64;
+ uint64_t zm64;
int s;
COMPXSP;
@@ -48,51 +39,35 @@ static union ieee754sp _sp_maddf(union ieee754sp z, union ieee754sp x,
ieee754_clearcx();
- switch (zc) {
- case IEEE754_CLASS_SNAN:
- ieee754_setcx(IEEE754_INVALID_OPERATION);
+ /*
+ * Handle the cases when at least one of x, y or z is a NaN.
+ * Order of precedence is sNaN, qNaN and z, x, y.
+ */
+ if (zc == IEEE754_CLASS_SNAN)
return ieee754sp_nanxcpt(z);
- case IEEE754_CLASS_DNORM:
- SPDNORMZ;
- /* QNAN is handled separately below */
- }
-
- switch (CLPAIR(xc, yc)) {
- case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_SNAN):
- case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_SNAN):
- case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_SNAN):
- case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_SNAN):
- case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_SNAN):
- return ieee754sp_nanxcpt(y);
-
- case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_SNAN):
- case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_QNAN):
- case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_ZERO):
- case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_NORM):
- case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_DNORM):
- case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
+ if (xc == IEEE754_CLASS_SNAN)
return ieee754sp_nanxcpt(x);
-
- case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
- case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
- case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN):
- case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN):
+ if (yc == IEEE754_CLASS_SNAN)
+ return ieee754sp_nanxcpt(y);
+ if (zc == IEEE754_CLASS_QNAN)
+ return z;
+ if (xc == IEEE754_CLASS_QNAN)
+ return x;
+ if (yc == IEEE754_CLASS_QNAN)
return y;
- case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
- case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO):
- case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM):
- case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM):
- case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_INF):
- return x;
+ if (zc == IEEE754_CLASS_DNORM)
+ SPDNORMZ;
+ /* ZERO z cases are handled separately below */
+
+ switch (CLPAIR(xc, yc)) {
+
/*
* Infinity handling
*/
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO):
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF):
- if (zc == IEEE754_CLASS_QNAN)
- return z;
ieee754_setcx(IEEE754_INVALID_OPERATION);
return ieee754sp_indef();
@@ -101,9 +76,27 @@ static union ieee754sp _sp_maddf(union ieee754sp z, union ieee754sp x,
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_NORM):
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM):
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
- if (zc == IEEE754_CLASS_QNAN)
- return z;
- return ieee754sp_inf(xs ^ ys);
+ if ((zc == IEEE754_CLASS_INF) &&
+ ((!(flags & MADDF_NEGATE_PRODUCT) && (zs != (xs ^ ys))) ||
+ ((flags & MADDF_NEGATE_PRODUCT) && (zs == (xs ^ ys))))) {
+ /*
+ * Cases of addition of infinities with opposite signs
+ * or subtraction of infinities with same signs.
+ */
+ ieee754_setcx(IEEE754_INVALID_OPERATION);
+ return ieee754sp_indef();
+ }
+ /*
+ * z is here either not an infinity, or an infinity having the
+ * same sign as product (x*y) (in case of MADDF.D instruction)
+ * or product -(x*y) (in MSUBF.D case). The result must be an
+ * infinity, and its sign is determined only by the value of
+ * (flags & MADDF_NEGATE_PRODUCT) and the signs of x and y.
+ */
+ if (flags & MADDF_NEGATE_PRODUCT)
+ return ieee754sp_inf(1 ^ (xs ^ ys));
+ else
+ return ieee754sp_inf(xs ^ ys);
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_NORM):
@@ -112,32 +105,42 @@ static union ieee754sp _sp_maddf(union ieee754sp z, union ieee754sp x,
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO):
if (zc == IEEE754_CLASS_INF)
return ieee754sp_inf(zs);
- /* Multiplication is 0 so just return z */
+ if (zc == IEEE754_CLASS_ZERO) {
+ /* Handle cases +0 + (-0) and similar ones. */
+ if ((!(flags & MADDF_NEGATE_PRODUCT)
+ && (zs == (xs ^ ys))) ||
+ ((flags & MADDF_NEGATE_PRODUCT)
+ && (zs != (xs ^ ys))))
+ /*
+ * Cases of addition of zeros of equal signs
+ * or subtraction of zeroes of opposite signs.
+ * The sign of the resulting zero is in any
+ * such case determined only by the sign of z.
+ */
+ return z;
+
+ return ieee754sp_zero(ieee754_csr.rm == FPU_CSR_RD);
+ }
+ /* x*y is here 0, and z is not 0, so just return z */
return z;
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM):
SPDNORMX;
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_DNORM):
- if (zc == IEEE754_CLASS_QNAN)
- return z;
- else if (zc == IEEE754_CLASS_INF)
+ if (zc == IEEE754_CLASS_INF)
return ieee754sp_inf(zs);
SPDNORMY;
break;
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_NORM):
- if (zc == IEEE754_CLASS_QNAN)
- return z;
- else if (zc == IEEE754_CLASS_INF)
+ if (zc == IEEE754_CLASS_INF)
return ieee754sp_inf(zs);
SPDNORMX;
break;
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_NORM):
- if (zc == IEEE754_CLASS_QNAN)
- return z;
- else if (zc == IEEE754_CLASS_INF)
+ if (zc == IEEE754_CLASS_INF)
return ieee754sp_inf(zs);
/* fall through to real computations */
}
@@ -158,108 +161,93 @@ static union ieee754sp _sp_maddf(union ieee754sp z, union ieee754sp x,
re = xe + ye;
rs = xs ^ ys;
- if (flags & maddf_negate_product)
+ if (flags & MADDF_NEGATE_PRODUCT)
rs ^= 1;
- /* shunt to top of word */
- xm <<= 32 - (SP_FBITS + 1);
- ym <<= 32 - (SP_FBITS + 1);
+ /* Multiple 24 bit xm and ym to give 48 bit results */
+ rm64 = (uint64_t)xm * ym;
- /*
- * Multiply 32 bits xm, ym to give high 32 bits rm with stickness.
- */
- lxm = xm & 0xffff;
- hxm = xm >> 16;
- lym = ym & 0xffff;
- hym = ym >> 16;
+ /* Shunt to top of word */
+ rm64 = rm64 << 16;
- lrm = lxm * lym; /* 16 * 16 => 32 */
- hrm = hxm * hym; /* 16 * 16 => 32 */
-
- t = lxm * hym; /* 16 * 16 => 32 */
- at = lrm + (t << 16);
- hrm += at < lrm;
- lrm = at;
- hrm = hrm + (t >> 16);
-
- t = hxm * lym; /* 16 * 16 => 32 */
- at = lrm + (t << 16);
- hrm += at < lrm;
- lrm = at;
- hrm = hrm + (t >> 16);
-
- rm = hrm | (lrm != 0);
-
- /*
- * Sticky shift down to normal rounding precision.
- */
- if ((int) rm < 0) {
- rm = (rm >> (32 - (SP_FBITS + 1 + 3))) |
- ((rm << (SP_FBITS + 1 + 3)) != 0);
+ /* Put explicit bit at bit 62 if necessary */
+ if ((int64_t) rm64 < 0) {
+ rm64 = rm64 >> 1;
re++;
- } else {
- rm = (rm >> (32 - (SP_FBITS + 1 + 3 + 1))) |
- ((rm << (SP_FBITS + 1 + 3 + 1)) != 0);
}
- assert(rm & (SP_HIDDEN_BIT << 3));
- /* And now the addition */
+ assert(rm64 & (1 << 62));
- assert(zm & SP_HIDDEN_BIT);
+ if (zc == IEEE754_CLASS_ZERO) {
+ /*
+ * Move explicit bit from bit 62 to bit 26 since the
+ * ieee754sp_format code expects the mantissa to be
+ * 27 bits wide (24 + 3 rounding bits).
+ */
+ rm = XSPSRS64(rm64, (62 - 26));
+ return ieee754sp_format(rs, re, rm);
+ }
- /*
- * Provide guard,round and stick bit space.
- */
- zm <<= 3;
+ /* Move explicit bit from bit 23 to bit 62 */
+ zm64 = (uint64_t)zm << (62 - 23);
+ assert(zm64 & (1 << 62));
+ /* Make the exponents the same */
if (ze > re) {
/*
* Have to shift r fraction right to align.
*/
s = ze - re;
- rm = XSPSRS(rm, s);
+ rm64 = XSPSRS64(rm64, s);
re += s;
} else if (re > ze) {
/*
* Have to shift z fraction right to align.
*/
s = re - ze;
- zm = XSPSRS(zm, s);
+ zm64 = XSPSRS64(zm64, s);
ze += s;
}
assert(ze == re);
assert(ze <= SP_EMAX);
+ /* Do the addition */
if (zs == rs) {
/*
- * Generate 28 bit result of adding two 27 bit numbers
- * leaving result in zm, zs and ze.
+ * Generate 64 bit result by adding two 63 bit numbers
+ * leaving result in zm64, zs and ze.
*/
- zm = zm + rm;
-
- if (zm >> (SP_FBITS + 1 + 3)) { /* carry out */
- zm = XSPSRS1(zm);
+ zm64 = zm64 + rm64;
+ if ((int64_t)zm64 < 0) { /* carry out */
+ zm64 = XSPSRS1(zm64);
ze++;
}
} else {
- if (zm >= rm) {
- zm = zm - rm;
+ if (zm64 >= rm64) {
+ zm64 = zm64 - rm64;
} else {
- zm = rm - zm;
+ zm64 = rm64 - zm64;
zs = rs;
}
- if (zm == 0)
+ if (zm64 == 0)
return ieee754sp_zero(ieee754_csr.rm == FPU_CSR_RD);
/*
- * Normalize in extended single precision
+ * Put explicit bit at bit 62 if necessary.
*/
- while ((zm >> (SP_MBITS + 3)) == 0) {
- zm <<= 1;
+ while ((zm64 >> 62) == 0) {
+ zm64 <<= 1;
ze--;
}
-
}
+
+ /*
+ * Move explicit bit from bit 62 to bit 26 since the
+ * ieee754sp_format code expects the mantissa to be
+ * 27 bits wide (24 + 3 rounding bits).
+ */
+ zm = XSPSRS64(zm64, (62 - 26));
+
return ieee754sp_format(zs, ze, zm);
}
@@ -272,5 +260,5 @@ union ieee754sp ieee754sp_maddf(union ieee754sp z, union ieee754sp x,
union ieee754sp ieee754sp_msubf(union ieee754sp z, union ieee754sp x,
union ieee754sp y)
{
- return _sp_maddf(z, x, y, maddf_negate_product);
+ return _sp_maddf(z, x, y, MADDF_NEGATE_PRODUCT);
}
diff --git a/arch/mips/mm/uasm-micromips.c b/arch/mips/mm/uasm-micromips.c
index 277cf52..6c17cba 100644
--- a/arch/mips/mm/uasm-micromips.c
+++ b/arch/mips/mm/uasm-micromips.c
@@ -80,7 +80,7 @@ static struct insn insn_table_MM[] = {
{ insn_jr, M(mm_pool32a_op, 0, 0, 0, mm_jalr_op, mm_pool32axf_op), RS },
{ insn_lb, M(mm_lb32_op, 0, 0, 0, 0, 0), RT | RS | SIMM },
{ insn_ld, 0, 0 },
- { insn_lh, M(mm_lh32_op, 0, 0, 0, 0, 0), RS | RS | SIMM },
+ { insn_lh, M(mm_lh32_op, 0, 0, 0, 0, 0), RT | RS | SIMM },
{ insn_ll, M(mm_pool32c_op, 0, 0, (mm_ll_func << 1), 0, 0), RS | RT | SIMM },
{ insn_lld, 0, 0 },
{ insn_lui, M(mm_pool32i_op, mm_lui_op, 0, 0, 0, 0), RS | SIMM },
diff --git a/arch/mips/ralink/mt7620.c b/arch/mips/ralink/mt7620.c
index 3c7c9bf..6f892c1 100644
--- a/arch/mips/ralink/mt7620.c
+++ b/arch/mips/ralink/mt7620.c
@@ -176,7 +176,7 @@ static struct rt2880_pmx_func spi_cs1_grp_mt7628[] = {
static struct rt2880_pmx_func spis_grp_mt7628[] = {
FUNC("pwm_uart2", 3, 14, 4),
- FUNC("util", 2, 14, 4),
+ FUNC("utif", 2, 14, 4),
FUNC("gpio", 1, 14, 4),
FUNC("spis", 0, 14, 4),
};
@@ -190,28 +190,28 @@ static struct rt2880_pmx_func gpio_grp_mt7628[] = {
static struct rt2880_pmx_func p4led_kn_grp_mt7628[] = {
FUNC("jtag", 3, 30, 1),
- FUNC("util", 2, 30, 1),
+ FUNC("utif", 2, 30, 1),
FUNC("gpio", 1, 30, 1),
FUNC("p4led_kn", 0, 30, 1),
};
static struct rt2880_pmx_func p3led_kn_grp_mt7628[] = {
FUNC("jtag", 3, 31, 1),
- FUNC("util", 2, 31, 1),
+ FUNC("utif", 2, 31, 1),
FUNC("gpio", 1, 31, 1),
FUNC("p3led_kn", 0, 31, 1),
};
static struct rt2880_pmx_func p2led_kn_grp_mt7628[] = {
FUNC("jtag", 3, 32, 1),
- FUNC("util", 2, 32, 1),
+ FUNC("utif", 2, 32, 1),
FUNC("gpio", 1, 32, 1),
FUNC("p2led_kn", 0, 32, 1),
};
static struct rt2880_pmx_func p1led_kn_grp_mt7628[] = {
FUNC("jtag", 3, 33, 1),
- FUNC("util", 2, 33, 1),
+ FUNC("utif", 2, 33, 1),
FUNC("gpio", 1, 33, 1),
FUNC("p1led_kn", 0, 33, 1),
};
@@ -232,28 +232,28 @@ static struct rt2880_pmx_func wled_kn_grp_mt7628[] = {
static struct rt2880_pmx_func p4led_an_grp_mt7628[] = {
FUNC("jtag", 3, 39, 1),
- FUNC("util", 2, 39, 1),
+ FUNC("utif", 2, 39, 1),
FUNC("gpio", 1, 39, 1),
FUNC("p4led_an", 0, 39, 1),
};
static struct rt2880_pmx_func p3led_an_grp_mt7628[] = {
FUNC("jtag", 3, 40, 1),
- FUNC("util", 2, 40, 1),
+ FUNC("utif", 2, 40, 1),
FUNC("gpio", 1, 40, 1),
FUNC("p3led_an", 0, 40, 1),
};
static struct rt2880_pmx_func p2led_an_grp_mt7628[] = {
FUNC("jtag", 3, 41, 1),
- FUNC("util", 2, 41, 1),
+ FUNC("utif", 2, 41, 1),
FUNC("gpio", 1, 41, 1),
FUNC("p2led_an", 0, 41, 1),
};
static struct rt2880_pmx_func p1led_an_grp_mt7628[] = {
FUNC("jtag", 3, 42, 1),
- FUNC("util", 2, 42, 1),
+ FUNC("utif", 2, 42, 1),
FUNC("gpio", 1, 42, 1),
FUNC("p1led_an", 0, 42, 1),
};
diff --git a/arch/mips/ralink/rt3883.c b/arch/mips/ralink/rt3883.c
index 9e4631a..3e68e35 100644
--- a/arch/mips/ralink/rt3883.c
+++ b/arch/mips/ralink/rt3883.c
@@ -145,5 +145,5 @@ void prom_soc_init(struct ralink_soc_info *soc_info)
rt2880_pinmux_data = rt3883_pinmux_data;
- ralink_soc == RT3883_SOC;
+ ralink_soc = RT3883_SOC;
}
diff --git a/arch/parisc/kernel/perf.c b/arch/parisc/kernel/perf.c
index 518f4f5..d63d425 100644
--- a/arch/parisc/kernel/perf.c
+++ b/arch/parisc/kernel/perf.c
@@ -39,7 +39,7 @@
* the PDC INTRIGUE calls. This is done to eliminate bugs introduced
* in various PDC revisions. The code is much more maintainable
* and reliable this way vs having to debug on every version of PDC
- * on every box.
+ * on every box.
*/
#include <linux/capability.h>
@@ -195,8 +195,8 @@ static int perf_config(uint32_t *image_ptr);
static int perf_release(struct inode *inode, struct file *file);
static int perf_open(struct inode *inode, struct file *file);
static ssize_t perf_read(struct file *file, char __user *buf, size_t cnt, loff_t *ppos);
-static ssize_t perf_write(struct file *file, const char __user *buf, size_t count,
- loff_t *ppos);
+static ssize_t perf_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos);
static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
static void perf_start_counters(void);
static int perf_stop_counters(uint32_t *raddr);
@@ -222,7 +222,7 @@ extern void perf_intrigue_disable_perf_counters (void);
/*
* configure:
*
- * Configure the cpu with a given data image. First turn off the counters,
+ * Configure the cpu with a given data image. First turn off the counters,
* then download the image, then turn the counters back on.
*/
static int perf_config(uint32_t *image_ptr)
@@ -234,7 +234,7 @@ static int perf_config(uint32_t *image_ptr)
error = perf_stop_counters(raddr);
if (error != 0) {
printk("perf_config: perf_stop_counters = %ld\n", error);
- return -EINVAL;
+ return -EINVAL;
}
printk("Preparing to write image\n");
@@ -242,7 +242,7 @@ printk("Preparing to write image\n");
error = perf_write_image((uint64_t *)image_ptr);
if (error != 0) {
printk("perf_config: DOWNLOAD = %ld\n", error);
- return -EINVAL;
+ return -EINVAL;
}
printk("Preparing to start counters\n");
@@ -254,7 +254,7 @@ printk("Preparing to start counters\n");
}
/*
- * Open the device and initialize all of its memory. The device is only
+ * Open the device and initialize all of its memory. The device is only
* opened once, but can be "queried" by multiple processes that know its
* file descriptor.
*/
@@ -298,8 +298,8 @@ static ssize_t perf_read(struct file *file, char __user *buf, size_t cnt, loff_t
* called on the processor that the download should happen
* on.
*/
-static ssize_t perf_write(struct file *file, const char __user *buf, size_t count,
- loff_t *ppos)
+static ssize_t perf_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
{
int err;
size_t image_size;
@@ -307,11 +307,11 @@ static ssize_t perf_write(struct file *file, const char __user *buf, size_t coun
uint32_t interface_type;
uint32_t test;
- if (perf_processor_interface == ONYX_INTF)
+ if (perf_processor_interface == ONYX_INTF)
image_size = PCXU_IMAGE_SIZE;
- else if (perf_processor_interface == CUDA_INTF)
+ else if (perf_processor_interface == CUDA_INTF)
image_size = PCXW_IMAGE_SIZE;
- else
+ else
return -EFAULT;
if (!capable(CAP_SYS_ADMIN))
@@ -331,22 +331,22 @@ static ssize_t perf_write(struct file *file, const char __user *buf, size_t coun
/* First check the machine type is correct for
the requested image */
- if (((perf_processor_interface == CUDA_INTF) &&
- (interface_type != CUDA_INTF)) ||
- ((perf_processor_interface == ONYX_INTF) &&
- (interface_type != ONYX_INTF)))
+ if (((perf_processor_interface == CUDA_INTF) &&
+ (interface_type != CUDA_INTF)) ||
+ ((perf_processor_interface == ONYX_INTF) &&
+ (interface_type != ONYX_INTF)))
return -EINVAL;
/* Next check to make sure the requested image
is valid */
- if (((interface_type == CUDA_INTF) &&
+ if (((interface_type == CUDA_INTF) &&
(test >= MAX_CUDA_IMAGES)) ||
- ((interface_type == ONYX_INTF) &&
- (test >= MAX_ONYX_IMAGES)))
+ ((interface_type == ONYX_INTF) &&
+ (test >= MAX_ONYX_IMAGES)))
return -EINVAL;
/* Copy the image into the processor */
- if (interface_type == CUDA_INTF)
+ if (interface_type == CUDA_INTF)
return perf_config(cuda_images[test]);
else
return perf_config(onyx_images[test]);
@@ -360,7 +360,7 @@ static ssize_t perf_write(struct file *file, const char __user *buf, size_t coun
static void perf_patch_images(void)
{
#if 0 /* FIXME!! */
-/*
+/*
* NOTE: this routine is VERY specific to the current TLB image.
* If the image is changed, this routine might also need to be changed.
*/
@@ -368,9 +368,9 @@ static void perf_patch_images(void)
extern void $i_dtlb_miss_2_0();
extern void PA2_0_iva();
- /*
+ /*
* We can only use the lower 32-bits, the upper 32-bits should be 0
- * anyway given this is in the kernel
+ * anyway given this is in the kernel
*/
uint32_t itlb_addr = (uint32_t)&($i_itlb_miss_2_0);
uint32_t dtlb_addr = (uint32_t)&($i_dtlb_miss_2_0);
@@ -378,21 +378,21 @@ static void perf_patch_images(void)
if (perf_processor_interface == ONYX_INTF) {
/* clear last 2 bytes */
- onyx_images[TLBMISS][15] &= 0xffffff00;
+ onyx_images[TLBMISS][15] &= 0xffffff00;
/* set 2 bytes */
onyx_images[TLBMISS][15] |= (0x000000ff&((dtlb_addr) >> 24));
onyx_images[TLBMISS][16] = (dtlb_addr << 8)&0xffffff00;
onyx_images[TLBMISS][17] = itlb_addr;
/* clear last 2 bytes */
- onyx_images[TLBHANDMISS][15] &= 0xffffff00;
+ onyx_images[TLBHANDMISS][15] &= 0xffffff00;
/* set 2 bytes */
onyx_images[TLBHANDMISS][15] |= (0x000000ff&((dtlb_addr) >> 24));
onyx_images[TLBHANDMISS][16] = (dtlb_addr << 8)&0xffffff00;
onyx_images[TLBHANDMISS][17] = itlb_addr;
/* clear last 2 bytes */
- onyx_images[BIG_CPI][15] &= 0xffffff00;
+ onyx_images[BIG_CPI][15] &= 0xffffff00;
/* set 2 bytes */
onyx_images[BIG_CPI][15] |= (0x000000ff&((dtlb_addr) >> 24));
onyx_images[BIG_CPI][16] = (dtlb_addr << 8)&0xffffff00;
@@ -405,24 +405,24 @@ static void perf_patch_images(void)
} else if (perf_processor_interface == CUDA_INTF) {
/* Cuda interface */
- cuda_images[TLBMISS][16] =
+ cuda_images[TLBMISS][16] =
(cuda_images[TLBMISS][16]&0xffff0000) |
((dtlb_addr >> 8)&0x0000ffff);
- cuda_images[TLBMISS][17] =
+ cuda_images[TLBMISS][17] =
((dtlb_addr << 24)&0xff000000) | ((itlb_addr >> 16)&0x000000ff);
cuda_images[TLBMISS][18] = (itlb_addr << 16)&0xffff0000;
- cuda_images[TLBHANDMISS][16] =
+ cuda_images[TLBHANDMISS][16] =
(cuda_images[TLBHANDMISS][16]&0xffff0000) |
((dtlb_addr >> 8)&0x0000ffff);
- cuda_images[TLBHANDMISS][17] =
+ cuda_images[TLBHANDMISS][17] =
((dtlb_addr << 24)&0xff000000) | ((itlb_addr >> 16)&0x000000ff);
cuda_images[TLBHANDMISS][18] = (itlb_addr << 16)&0xffff0000;
- cuda_images[BIG_CPI][16] =
+ cuda_images[BIG_CPI][16] =
(cuda_images[BIG_CPI][16]&0xffff0000) |
((dtlb_addr >> 8)&0x0000ffff);
- cuda_images[BIG_CPI][17] =
+ cuda_images[BIG_CPI][17] =
((dtlb_addr << 24)&0xff000000) | ((itlb_addr >> 16)&0x000000ff);
cuda_images[BIG_CPI][18] = (itlb_addr << 16)&0xffff0000;
} else {
@@ -434,7 +434,7 @@ static void perf_patch_images(void)
/*
* ioctl routine
- * All routines effect the processor that they are executed on. Thus you
+ * All routines effect the processor that they are executed on. Thus you
* must be running on the processor that you wish to change.
*/
@@ -460,7 +460,7 @@ static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
}
/* copy out the Counters */
- if (copy_to_user((void __user *)arg, raddr,
+ if (copy_to_user((void __user *)arg, raddr,
sizeof (raddr)) != 0) {
error = -EFAULT;
break;
@@ -488,7 +488,7 @@ static const struct file_operations perf_fops = {
.open = perf_open,
.release = perf_release
};
-
+
static struct miscdevice perf_dev = {
MISC_DYNAMIC_MINOR,
PA_PERF_DEV,
@@ -596,7 +596,7 @@ static int perf_stop_counters(uint32_t *raddr)
/* OR sticky2 (bit 1496) to counter2 bit 32 */
tmp64 |= (userbuf[23] >> 8) & 0x0000000080000000;
raddr[2] = (uint32_t)tmp64;
-
+
/* Counter3 is bits 1497 to 1528 */
tmp64 = (userbuf[23] >> 7) & 0x00000000ffffffff;
/* OR sticky3 (bit 1529) to counter3 bit 32 */
@@ -618,7 +618,7 @@ static int perf_stop_counters(uint32_t *raddr)
userbuf[22] = 0;
userbuf[23] = 0;
- /*
+ /*
* Write back the zeroed bytes + the image given
* the read was destructive.
*/
@@ -626,13 +626,13 @@ static int perf_stop_counters(uint32_t *raddr)
} else {
/*
- * Read RDR-15 which contains the counters and sticky bits
+ * Read RDR-15 which contains the counters and sticky bits
*/
if (!perf_rdr_read_ubuf(15, userbuf)) {
return -13;
}
- /*
+ /*
* Clear out the counters
*/
perf_rdr_clear(15);
@@ -645,7 +645,7 @@ static int perf_stop_counters(uint32_t *raddr)
raddr[2] = (uint32_t)((userbuf[1] >> 32) & 0x00000000ffffffffUL);
raddr[3] = (uint32_t)(userbuf[1] & 0x00000000ffffffffUL);
}
-
+
return 0;
}
@@ -683,7 +683,7 @@ static int perf_rdr_read_ubuf(uint32_t rdr_num, uint64_t *buffer)
i = tentry->num_words;
while (i--) {
buffer[i] = 0;
- }
+ }
/* Check for bits an even number of 64 */
if ((xbits = width & 0x03f) != 0) {
@@ -809,18 +809,22 @@ static int perf_write_image(uint64_t *memaddr)
}
runway = ioremap_nocache(cpu_device->hpa.start, 4096);
+ if (!runway) {
+ pr_err("perf_write_image: ioremap failed!\n");
+ return -ENOMEM;
+ }
/* Merge intrigue bits into Runway STATUS 0 */
tmp64 = __raw_readq(runway + RUNWAY_STATUS) & 0xffecfffffffffffful;
- __raw_writeq(tmp64 | (*memaddr++ & 0x0013000000000000ul),
+ __raw_writeq(tmp64 | (*memaddr++ & 0x0013000000000000ul),
runway + RUNWAY_STATUS);
-
+
/* Write RUNWAY DEBUG registers */
for (i = 0; i < 8; i++) {
__raw_writeq(*memaddr++, runway + RUNWAY_DEBUG);
}
- return 0;
+ return 0;
}
/*
@@ -844,7 +848,7 @@ printk("perf_rdr_write\n");
perf_rdr_shift_out_U(rdr_num, buffer[i]);
} else {
perf_rdr_shift_out_W(rdr_num, buffer[i]);
- }
+ }
}
printk("perf_rdr_write done\n");
}
diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S
index 23de307..41e60a9 100644
--- a/arch/parisc/kernel/syscall.S
+++ b/arch/parisc/kernel/syscall.S
@@ -742,7 +742,7 @@
10: ldd 0(%r25), %r25
11: ldd 0(%r24), %r24
#else
- /* Load new value into r22/r23 - high/low */
+ /* Load old value into r22/r23 - high/low */
10: ldw 0(%r25), %r22
11: ldw 4(%r25), %r23
/* Load new value into fr4 for atomic store later */
@@ -834,11 +834,11 @@
copy %r0, %r28
#else
/* Compare first word */
-19: ldw,ma 0(%r26), %r29
+19: ldw 0(%r26), %r29
sub,= %r29, %r22, %r0
b,n cas2_end
/* Compare second word */
-20: ldw,ma 4(%r26), %r29
+20: ldw 4(%r26), %r29
sub,= %r29, %r23, %r0
b,n cas2_end
/* Perform the store */
diff --git a/arch/powerpc/boot/dts/fsl/kmcoge4.dts b/arch/powerpc/boot/dts/fsl/kmcoge4.dts
index ae70a24..e103c0f 100644
--- a/arch/powerpc/boot/dts/fsl/kmcoge4.dts
+++ b/arch/powerpc/boot/dts/fsl/kmcoge4.dts
@@ -83,6 +83,10 @@
};
};
+ sdhc@114000 {
+ status = "disabled";
+ };
+
i2c@119000 {
status = "disabled";
};
diff --git a/arch/powerpc/kernel/align.c b/arch/powerpc/kernel/align.c
index b2da7c8..292458b 100644
--- a/arch/powerpc/kernel/align.c
+++ b/arch/powerpc/kernel/align.c
@@ -235,6 +235,28 @@ static int emulate_dcbz(struct pt_regs *regs, unsigned char __user *addr)
#define SWIZ_PTR(p) ((unsigned char __user *)((p) ^ swiz))
+#define __get_user_or_set_dar(_regs, _dest, _addr) \
+ ({ \
+ int rc = 0; \
+ typeof(_addr) __addr = (_addr); \
+ if (__get_user_inatomic(_dest, __addr)) { \
+ _regs->dar = (unsigned long)__addr; \
+ rc = -EFAULT; \
+ } \
+ rc; \
+ })
+
+#define __put_user_or_set_dar(_regs, _src, _addr) \
+ ({ \
+ int rc = 0; \
+ typeof(_addr) __addr = (_addr); \
+ if (__put_user_inatomic(_src, __addr)) { \
+ _regs->dar = (unsigned long)__addr; \
+ rc = -EFAULT; \
+ } \
+ rc; \
+ })
+
static int emulate_multiple(struct pt_regs *regs, unsigned char __user *addr,
unsigned int reg, unsigned int nb,
unsigned int flags, unsigned int instr,
@@ -263,9 +285,10 @@ static int emulate_multiple(struct pt_regs *regs, unsigned char __user *addr,
} else {
unsigned long pc = regs->nip ^ (swiz & 4);
- if (__get_user_inatomic(instr,
- (unsigned int __user *)pc))
+ if (__get_user_or_set_dar(regs, instr,
+ (unsigned int __user *)pc))
return -EFAULT;
+
if (swiz == 0 && (flags & SW))
instr = cpu_to_le32(instr);
nb = (instr >> 11) & 0x1f;
@@ -309,31 +332,31 @@ static int emulate_multiple(struct pt_regs *regs, unsigned char __user *addr,
((nb0 + 3) / 4) * sizeof(unsigned long));
for (i = 0; i < nb; ++i, ++p)
- if (__get_user_inatomic(REG_BYTE(rptr, i ^ bswiz),
- SWIZ_PTR(p)))
+ if (__get_user_or_set_dar(regs, REG_BYTE(rptr, i ^ bswiz),
+ SWIZ_PTR(p)))
return -EFAULT;
if (nb0 > 0) {
rptr = ®s->gpr[0];
addr += nb;
for (i = 0; i < nb0; ++i, ++p)
- if (__get_user_inatomic(REG_BYTE(rptr,
- i ^ bswiz),
- SWIZ_PTR(p)))
+ if (__get_user_or_set_dar(regs,
+ REG_BYTE(rptr, i ^ bswiz),
+ SWIZ_PTR(p)))
return -EFAULT;
}
} else {
for (i = 0; i < nb; ++i, ++p)
- if (__put_user_inatomic(REG_BYTE(rptr, i ^ bswiz),
- SWIZ_PTR(p)))
+ if (__put_user_or_set_dar(regs, REG_BYTE(rptr, i ^ bswiz),
+ SWIZ_PTR(p)))
return -EFAULT;
if (nb0 > 0) {
rptr = ®s->gpr[0];
addr += nb;
for (i = 0; i < nb0; ++i, ++p)
- if (__put_user_inatomic(REG_BYTE(rptr,
- i ^ bswiz),
- SWIZ_PTR(p)))
+ if (__put_user_or_set_dar(regs,
+ REG_BYTE(rptr, i ^ bswiz),
+ SWIZ_PTR(p)))
return -EFAULT;
}
}
@@ -345,29 +368,32 @@ static int emulate_multiple(struct pt_regs *regs, unsigned char __user *addr,
* Only POWER6 has these instructions, and it does true little-endian,
* so we don't need the address swizzling.
*/
-static int emulate_fp_pair(unsigned char __user *addr, unsigned int reg,
- unsigned int flags)
+static int emulate_fp_pair(struct pt_regs *regs, unsigned char __user *addr,
+ unsigned int reg, unsigned int flags)
{
char *ptr0 = (char *) ¤t->thread.TS_FPR(reg);
char *ptr1 = (char *) ¤t->thread.TS_FPR(reg+1);
- int i, ret, sw = 0;
+ int i, sw = 0;
if (reg & 1)
return 0; /* invalid form: FRS/FRT must be even */
if (flags & SW)
sw = 7;
- ret = 0;
+
for (i = 0; i < 8; ++i) {
if (!(flags & ST)) {
- ret |= __get_user(ptr0[i^sw], addr + i);
- ret |= __get_user(ptr1[i^sw], addr + i + 8);
+ if (__get_user_or_set_dar(regs, ptr0[i^sw], addr + i))
+ return -EFAULT;
+ if (__get_user_or_set_dar(regs, ptr1[i^sw], addr + i + 8))
+ return -EFAULT;
} else {
- ret |= __put_user(ptr0[i^sw], addr + i);
- ret |= __put_user(ptr1[i^sw], addr + i + 8);
+ if (__put_user_or_set_dar(regs, ptr0[i^sw], addr + i))
+ return -EFAULT;
+ if (__put_user_or_set_dar(regs, ptr1[i^sw], addr + i + 8))
+ return -EFAULT;
}
}
- if (ret)
- return -EFAULT;
+
return 1; /* exception handled and fixed up */
}
@@ -377,24 +403,27 @@ static int emulate_lq_stq(struct pt_regs *regs, unsigned char __user *addr,
{
char *ptr0 = (char *)®s->gpr[reg];
char *ptr1 = (char *)®s->gpr[reg+1];
- int i, ret, sw = 0;
+ int i, sw = 0;
if (reg & 1)
return 0; /* invalid form: GPR must be even */
if (flags & SW)
sw = 7;
- ret = 0;
+
for (i = 0; i < 8; ++i) {
if (!(flags & ST)) {
- ret |= __get_user(ptr0[i^sw], addr + i);
- ret |= __get_user(ptr1[i^sw], addr + i + 8);
+ if (__get_user_or_set_dar(regs, ptr0[i^sw], addr + i))
+ return -EFAULT;
+ if (__get_user_or_set_dar(regs, ptr1[i^sw], addr + i + 8))
+ return -EFAULT;
} else {
- ret |= __put_user(ptr0[i^sw], addr + i);
- ret |= __put_user(ptr1[i^sw], addr + i + 8);
+ if (__put_user_or_set_dar(regs, ptr0[i^sw], addr + i))
+ return -EFAULT;
+ if (__put_user_or_set_dar(regs, ptr1[i^sw], addr + i + 8))
+ return -EFAULT;
}
}
- if (ret)
- return -EFAULT;
+
return 1; /* exception handled and fixed up */
}
#endif /* CONFIG_PPC64 */
@@ -687,9 +716,14 @@ static int emulate_vsx(unsigned char __user *addr, unsigned int reg,
for (j = 0; j < length; j += elsize) {
for (i = 0; i < elsize; ++i) {
if (flags & ST)
- ret |= __put_user(ptr[i^sw], addr + i);
+ ret = __put_user_or_set_dar(regs, ptr[i^sw],
+ addr + i);
else
- ret |= __get_user(ptr[i^sw], addr + i);
+ ret = __get_user_or_set_dar(regs, ptr[i^sw],
+ addr + i);
+
+ if (ret)
+ return ret;
}
ptr += elsize;
#ifdef __LITTLE_ENDIAN__
@@ -739,7 +773,7 @@ int fix_alignment(struct pt_regs *regs)
unsigned int dsisr;
unsigned char __user *addr;
unsigned long p, swiz;
- int ret, i;
+ int i;
union data {
u64 ll;
double dd;
@@ -936,7 +970,7 @@ int fix_alignment(struct pt_regs *regs)
if (flags & F) {
/* Special case for 16-byte FP loads and stores */
PPC_WARN_ALIGNMENT(fp_pair, regs);
- return emulate_fp_pair(addr, reg, flags);
+ return emulate_fp_pair(regs, addr, reg, flags);
} else {
#ifdef CONFIG_PPC64
/* Special case for 16-byte loads and stores */
@@ -966,15 +1000,12 @@ int fix_alignment(struct pt_regs *regs)
}
data.ll = 0;
- ret = 0;
p = (unsigned long)addr;
for (i = 0; i < nb; i++)
- ret |= __get_user_inatomic(data.v[start + i],
- SWIZ_PTR(p++));
-
- if (unlikely(ret))
- return -EFAULT;
+ if (__get_user_or_set_dar(regs, data.v[start + i],
+ SWIZ_PTR(p++)))
+ return -EFAULT;
} else if (flags & F) {
data.ll = current->thread.TS_FPR(reg);
@@ -1046,15 +1077,13 @@ int fix_alignment(struct pt_regs *regs)
break;
}
- ret = 0;
p = (unsigned long)addr;
for (i = 0; i < nb; i++)
- ret |= __put_user_inatomic(data.v[start + i],
- SWIZ_PTR(p++));
+ if (__put_user_or_set_dar(regs, data.v[start + i],
+ SWIZ_PTR(p++)))
+ return -EFAULT;
- if (unlikely(ret))
- return -EFAULT;
} else if (flags & F)
current->thread.TS_FPR(reg) = data.ll;
else
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index 767ef6d..caa6596 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -1235,10 +1235,14 @@
stdu r1,-SWITCH_FRAME_SIZE(r1)
/* Save all gprs to pt_regs */
- SAVE_8GPRS(0,r1)
- SAVE_8GPRS(8,r1)
- SAVE_8GPRS(16,r1)
- SAVE_8GPRS(24,r1)
+ SAVE_GPR(0, r1)
+ SAVE_10GPRS(2, r1)
+ SAVE_10GPRS(12, r1)
+ SAVE_10GPRS(22, r1)
+
+ /* Save previous stack pointer (r1) */
+ addi r8, r1, SWITCH_FRAME_SIZE
+ std r8, GPR1(r1)
/* Load special regs for save below */
mfmsr r8
@@ -1292,10 +1296,10 @@
#endif
/* Restore gprs */
- REST_8GPRS(0,r1)
- REST_8GPRS(8,r1)
- REST_8GPRS(16,r1)
- REST_8GPRS(24,r1)
+ REST_GPR(0,r1)
+ REST_10GPRS(2,r1)
+ REST_10GPRS(12,r1)
+ REST_10GPRS(22,r1)
/* Restore callee's TOC */
ld r2, 24(r1)
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index 2e2fc1e..fd68e19 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -764,7 +764,29 @@
EXC_VIRT(program_check, 0x4700, 0x4800, 0x700)
TRAMP_KVM(PACA_EXGEN, 0x700)
EXC_COMMON_BEGIN(program_check_common)
- EXCEPTION_PROLOG_COMMON(0x700, PACA_EXGEN)
+ /*
+ * It's possible to receive a TM Bad Thing type program check with
+ * userspace register values (in particular r1), but with SRR1 reporting
+ * that we came from the kernel. Normally that would confuse the bad
+ * stack logic, and we would report a bad kernel stack pointer. Instead
+ * we switch to the emergency stack if we're taking a TM Bad Thing from
+ * the kernel.
+ */
+ li r10,MSR_PR /* Build a mask of MSR_PR .. */
+ oris r10,r10,0x200000@h /* .. and SRR1_PROGTM */
+ and r10,r10,r12 /* Mask SRR1 with that. */
+ srdi r10,r10,8 /* Shift it so we can compare */
+ cmpldi r10,(0x200000 >> 8) /* .. with an immediate. */
+ bne 1f /* If != go to normal path. */
+
+ /* SRR1 had PR=0 and SRR1_PROGTM=1, so use the emergency stack */
+ andi. r10,r12,MSR_PR; /* Set CR0 correctly for label */
+ /* 3 in EXCEPTION_PROLOG_COMMON */
+ mr r10,r1 /* Save r1 */
+ ld r1,PACAEMERGSP(r13) /* Use emergency stack */
+ subi r1,r1,INT_FRAME_SIZE /* alloc stack frame */
+ b 3f /* Jump into the macro !! */
+1: EXCEPTION_PROLOG_COMMON(0x700, PACA_EXGEN)
bl save_nvgprs
RECONCILE_IRQ_STATE(r10, r11)
addi r3,r1,STACK_FRAME_OVERHEAD
diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
index dcbb914..d973708 100644
--- a/arch/powerpc/kernel/ptrace.c
+++ b/arch/powerpc/kernel/ptrace.c
@@ -131,7 +131,7 @@ static void flush_tmregs_to_thread(struct task_struct *tsk)
* in the appropriate thread structures from live.
*/
- if (tsk != current)
+ if ((!cpu_has_feature(CPU_FTR_TM)) || (tsk != current))
return;
if (MSR_TM_SUSPENDED(mfmsr())) {
diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
index 96698fd..04e9225 100644
--- a/arch/powerpc/kernel/signal_64.c
+++ b/arch/powerpc/kernel/signal_64.c
@@ -452,9 +452,20 @@ static long restore_tm_sigcontexts(struct task_struct *tsk,
if (MSR_TM_RESV(msr))
return -EINVAL;
- /* pull in MSR TM from user context */
+ /* pull in MSR TS bits from user context */
regs->msr = (regs->msr & ~MSR_TS_MASK) | (msr & MSR_TS_MASK);
+ /*
+ * Ensure that TM is enabled in regs->msr before we leave the signal
+ * handler. It could be the case that (a) user disabled the TM bit
+ * through the manipulation of the MSR bits in uc_mcontext or (b) the
+ * TM bit was disabled because a sufficient number of context switches
+ * happened whilst in the signal handler and load_tm overflowed,
+ * disabling the TM bit. In either case we can end up with an illegal
+ * TM state leading to a TM Bad Thing when we return to userspace.
+ */
+ regs->msr |= MSR_TM;
+
/* pull in MSR LE from user context */
regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE);
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index bc3f7d0..f1d7e99 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -407,6 +407,7 @@ void arch_vtime_task_switch(struct task_struct *prev)
struct cpu_accounting_data *acct = get_accounting(current);
acct->starttime = get_accounting(prev)->starttime;
+ acct->startspurr = get_accounting(prev)->startspurr;
acct->system_time = 0;
acct->user_time = 0;
}
diff --git a/arch/powerpc/kvm/book3s_64_vio.c b/arch/powerpc/kvm/book3s_64_vio.c
index c379ff5..da2a7ec 100644
--- a/arch/powerpc/kvm/book3s_64_vio.c
+++ b/arch/powerpc/kvm/book3s_64_vio.c
@@ -129,8 +129,11 @@ static int kvm_spapr_tce_mmap(struct file *file, struct vm_area_struct *vma)
static int kvm_spapr_tce_release(struct inode *inode, struct file *filp)
{
struct kvmppc_spapr_tce_table *stt = filp->private_data;
+ struct kvm *kvm = stt->kvm;
+ mutex_lock(&kvm->lock);
list_del_rcu(&stt->list);
+ mutex_unlock(&kvm->lock);
kvm_put_kvm(stt->kvm);
@@ -150,6 +153,7 @@ long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
struct kvm_create_spapr_tce_64 *args)
{
struct kvmppc_spapr_tce_table *stt = NULL;
+ struct kvmppc_spapr_tce_table *siter;
unsigned long npages, size;
int ret = -ENOMEM;
int i;
@@ -157,24 +161,16 @@ long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
if (!args->size)
return -EINVAL;
- /* Check this LIOBN hasn't been previously allocated */
- list_for_each_entry(stt, &kvm->arch.spapr_tce_tables, list) {
- if (stt->liobn == args->liobn)
- return -EBUSY;
- }
-
size = args->size;
npages = kvmppc_tce_pages(size);
ret = kvmppc_account_memlimit(kvmppc_stt_pages(npages), true);
- if (ret) {
- stt = NULL;
- goto fail;
- }
+ if (ret)
+ return ret;
stt = kzalloc(sizeof(*stt) + npages * sizeof(struct page *),
GFP_KERNEL);
if (!stt)
- goto fail;
+ goto fail_acct;
stt->liobn = args->liobn;
stt->page_shift = args->page_shift;
@@ -188,24 +184,39 @@ long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
goto fail;
}
- kvm_get_kvm(kvm);
-
mutex_lock(&kvm->lock);
- list_add_rcu(&stt->list, &kvm->arch.spapr_tce_tables);
+
+ /* Check this LIOBN hasn't been previously allocated */
+ ret = 0;
+ list_for_each_entry(siter, &kvm->arch.spapr_tce_tables, list) {
+ if (siter->liobn == args->liobn) {
+ ret = -EBUSY;
+ break;
+ }
+ }
+
+ if (!ret)
+ ret = anon_inode_getfd("kvm-spapr-tce", &kvm_spapr_tce_fops,
+ stt, O_RDWR | O_CLOEXEC);
+
+ if (ret >= 0) {
+ list_add_rcu(&stt->list, &kvm->arch.spapr_tce_tables);
+ kvm_get_kvm(kvm);
+ }
mutex_unlock(&kvm->lock);
- return anon_inode_getfd("kvm-spapr-tce", &kvm_spapr_tce_fops,
- stt, O_RDWR | O_CLOEXEC);
+ if (ret >= 0)
+ return ret;
-fail:
- if (stt) {
- for (i = 0; i < npages; i++)
- if (stt->pages[i])
- __free_page(stt->pages[i]);
+ fail:
+ for (i = 0; i < npages; i++)
+ if (stt->pages[i])
+ __free_page(stt->pages[i]);
- kfree(stt);
- }
+ kfree(stt);
+ fail_acct:
+ kvmppc_account_memlimit(kvmppc_stt_pages(npages), false);
return ret;
}
diff --git a/arch/powerpc/kvm/book3s_hv_rm_xics.c b/arch/powerpc/kvm/book3s_hv_rm_xics.c
index a0ea63a..a8e3498 100644
--- a/arch/powerpc/kvm/book3s_hv_rm_xics.c
+++ b/arch/powerpc/kvm/book3s_hv_rm_xics.c
@@ -376,6 +376,7 @@ static void icp_rm_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
*/
if (reject && reject != XICS_IPI) {
arch_spin_unlock(&ics->lock);
+ icp->n_reject++;
new_irq = reject;
goto again;
}
@@ -707,10 +708,8 @@ int kvmppc_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr)
state = &ics->irq_state[src];
/* Still asserted, resend it */
- if (state->asserted) {
- icp->n_reject++;
+ if (state->asserted)
icp_rm_deliver_irq(xics, icp, irq);
- }
if (!hlist_empty(&vcpu->kvm->irq_ack_notifier_list)) {
icp->rm_action |= XICS_RM_NOTIFY_EOI;
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 70963c8..fc0df0f 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -601,8 +601,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
break;
#endif
case KVM_CAP_PPC_HTM:
- r = cpu_has_feature(CPU_FTR_TM_COMP) &&
- is_kvmppc_hv_enabled(kvm);
+ r = cpu_has_feature(CPU_FTR_TM_COMP) && hv_enabled;
break;
default:
r = 0;
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
index d5ce34d..1e28747 100644
--- a/arch/powerpc/mm/init_64.c
+++ b/arch/powerpc/mm/init_64.c
@@ -42,6 +42,8 @@
#include <linux/memblock.h>
#include <linux/hugetlb.h>
#include <linux/slab.h>
+#include <linux/of_fdt.h>
+#include <linux/libfdt.h>
#include <asm/pgalloc.h>
#include <asm/page.h>
@@ -421,6 +423,28 @@ static int __init parse_disable_radix(char *p)
}
early_param("disable_radix", parse_disable_radix);
+/*
+ * If we're running under a hypervisor, we currently can't do radix
+ * since we don't have the code to do the H_REGISTER_PROC_TBL hcall.
+ * We tell that we're running under a hypervisor by looking for the
+ * /chosen/ibm,architecture-vec-5 property.
+ */
+static void early_check_vec5(void)
+{
+ unsigned long root, chosen;
+ int size;
+ const u8 *vec5;
+
+ root = of_get_flat_dt_root();
+ chosen = of_get_flat_dt_subnode_by_name(root, "chosen");
+ if (chosen == -FDT_ERR_NOTFOUND)
+ return;
+ vec5 = of_get_flat_dt_prop(chosen, "ibm,architecture-vec-5", &size);
+ if (!vec5)
+ return;
+ cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
+}
+
void __init mmu_early_init_devtree(void)
{
/* Disable radix mode based on kernel command line. */
@@ -428,6 +452,15 @@ void __init mmu_early_init_devtree(void)
if (disable_radix || !(mfmsr() & MSR_HV))
cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
+ /*
+ * Check /chosen/ibm,architecture-vec-5 if running as a guest.
+ * When running bare-metal, we can use radix if we like
+ * even though the ibm,architecture-vec-5 property created by
+ * skiboot doesn't have the necessary bits set.
+ */
+ if (early_radix_enabled() && !(mfmsr() & MSR_HV))
+ early_check_vec5();
+
if (early_radix_enabled())
radix__early_init_devtree();
else
diff --git a/arch/powerpc/perf/isa207-common.h b/arch/powerpc/perf/isa207-common.h
index 4d0a4e5..8e6dd17 100644
--- a/arch/powerpc/perf/isa207-common.h
+++ b/arch/powerpc/perf/isa207-common.h
@@ -201,6 +201,10 @@
CNST_PMC_VAL(1) | CNST_PMC_VAL(2) | CNST_PMC_VAL(3) | \
CNST_PMC_VAL(4) | CNST_PMC_VAL(5) | CNST_PMC_VAL(6) | CNST_NC_VAL
+/*
+ * Lets restrict use of PMC5 for instruction counting.
+ */
+#define P9_DD1_TEST_ADDER (ISA207_TEST_ADDER | CNST_PMC_VAL(5))
/* Bits in MMCR1 for PowerISA v2.07 */
#define MMCR1_UNIT_SHIFT(pmc) (60 - (4 * ((pmc) - 1)))
diff --git a/arch/powerpc/perf/power9-pmu.c b/arch/powerpc/perf/power9-pmu.c
index 8e9a819..9abcd8f 100644
--- a/arch/powerpc/perf/power9-pmu.c
+++ b/arch/powerpc/perf/power9-pmu.c
@@ -295,7 +295,7 @@ static struct power_pmu power9_pmu = {
.name = "POWER9",
.n_counter = MAX_PMU_COUNTERS,
.add_fields = ISA207_ADD_FIELDS,
- .test_adder = ISA207_TEST_ADDER,
+ .test_adder = P9_DD1_TEST_ADDER,
.compute_mmcr = isa207_compute_mmcr,
.config_bhrb = power9_config_bhrb,
.bhrb_filter_map = power9_bhrb_filter_map,
diff --git a/arch/powerpc/platforms/pseries/mobility.c b/arch/powerpc/platforms/pseries/mobility.c
index a560a98..6a5e746 100644
--- a/arch/powerpc/platforms/pseries/mobility.c
+++ b/arch/powerpc/platforms/pseries/mobility.c
@@ -225,8 +225,10 @@ static int add_dt_node(__be32 parent_phandle, __be32 drc_index)
return -ENOENT;
dn = dlpar_configure_connector(drc_index, parent_dn);
- if (!dn)
+ if (!dn) {
+ of_node_put(parent_dn);
return -ENOENT;
+ }
rc = dlpar_attach_node(dn);
if (rc)
diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c
index 303d28e..591cbdf6 100644
--- a/arch/s390/crypto/aes_s390.c
+++ b/arch/s390/crypto/aes_s390.c
@@ -28,6 +28,7 @@
#include <linux/cpufeature.h>
#include <linux/init.h>
#include <linux/spinlock.h>
+#include <linux/fips.h>
#include <crypto/xts.h>
#include <asm/cpacf.h>
@@ -501,6 +502,12 @@ static int xts_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
if (err)
return err;
+ /* In fips mode only 128 bit or 256 bit keys are valid */
+ if (fips_enabled && key_len != 32 && key_len != 64) {
+ tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+ return -EINVAL;
+ }
+
/* Pick the correct function code based on the key length */
fc = (key_len == 32) ? CPACF_KM_XTS_128 :
(key_len == 64) ? CPACF_KM_XTS_256 : 0;
diff --git a/arch/s390/crypto/prng.c b/arch/s390/crypto/prng.c
index 1113389..fe7368a 100644
--- a/arch/s390/crypto/prng.c
+++ b/arch/s390/crypto/prng.c
@@ -110,22 +110,30 @@ static const u8 initial_parm_block[32] __initconst = {
/*** helper functions ***/
+/*
+ * generate_entropy:
+ * This algorithm produces 64 bytes of entropy data based on 1024
+ * individual stckf() invocations assuming that each stckf() value
+ * contributes 0.25 bits of entropy. So the caller gets 256 bit
+ * entropy per 64 byte or 4 bits entropy per byte.
+ */
static int generate_entropy(u8 *ebuf, size_t nbytes)
{
int n, ret = 0;
- u8 *pg, *h, hash[32];
+ u8 *pg, *h, hash[64];
- pg = (u8 *) __get_free_page(GFP_KERNEL);
+ /* allocate 2 pages */
+ pg = (u8 *) __get_free_pages(GFP_KERNEL, 1);
if (!pg) {
prng_errorflag = PRNG_GEN_ENTROPY_FAILED;
return -ENOMEM;
}
while (nbytes) {
- /* fill page with urandom bytes */
- get_random_bytes(pg, PAGE_SIZE);
- /* exor page with stckf values */
- for (n = 0; n < PAGE_SIZE / sizeof(u64); n++) {
+ /* fill pages with urandom bytes */
+ get_random_bytes(pg, 2*PAGE_SIZE);
+ /* exor pages with 1024 stckf values */
+ for (n = 0; n < 2 * PAGE_SIZE / sizeof(u64); n++) {
u64 *p = ((u64 *)pg) + n;
*p ^= get_tod_clock_fast();
}
@@ -134,8 +142,8 @@ static int generate_entropy(u8 *ebuf, size_t nbytes)
h = hash;
else
h = ebuf;
- /* generate sha256 from this page */
- cpacf_kimd(CPACF_KIMD_SHA_256, h, pg, PAGE_SIZE);
+ /* hash over the filled pages */
+ cpacf_kimd(CPACF_KIMD_SHA_512, h, pg, 2*PAGE_SIZE);
if (n < sizeof(hash))
memcpy(ebuf, hash, n);
ret += n;
@@ -143,7 +151,7 @@ static int generate_entropy(u8 *ebuf, size_t nbytes)
nbytes -= n;
}
- free_page((unsigned long)pg);
+ free_pages((unsigned long)pg, 1);
return ret;
}
@@ -334,7 +342,7 @@ static int __init prng_sha512_selftest(void)
static int __init prng_sha512_instantiate(void)
{
int ret, datalen;
- u8 seed[64];
+ u8 seed[64 + 32 + 16];
pr_debug("prng runs in SHA-512 mode "
"with chunksize=%d and reseed_limit=%u\n",
@@ -357,12 +365,12 @@ static int __init prng_sha512_instantiate(void)
if (ret)
goto outfree;
- /* generate initial seed bytestring, first 48 bytes of entropy */
- ret = generate_entropy(seed, 48);
- if (ret != 48)
+ /* generate initial seed bytestring, with 256 + 128 bits entropy */
+ ret = generate_entropy(seed, 64 + 32);
+ if (ret != 64 + 32)
goto outfree;
/* followed by 16 bytes of unique nonce */
- get_tod_clock_ext(seed + 48);
+ get_tod_clock_ext(seed + 64 + 32);
/* initial seed of the ppno drng */
cpacf_ppno(CPACF_PPNO_SHA512_DRNG_SEED,
@@ -395,9 +403,9 @@ static void prng_sha512_deinstantiate(void)
static int prng_sha512_reseed(void)
{
int ret;
- u8 seed[32];
+ u8 seed[64];
- /* generate 32 bytes of fresh entropy */
+ /* fetch 256 bits of fresh entropy */
ret = generate_entropy(seed, sizeof(seed));
if (ret != sizeof(seed))
return ret;
diff --git a/arch/s390/include/asm/mmu.h b/arch/s390/include/asm/mmu.h
index bea785d..af85d6b 100644
--- a/arch/s390/include/asm/mmu.h
+++ b/arch/s390/include/asm/mmu.h
@@ -5,6 +5,7 @@
#include <linux/errno.h>
typedef struct {
+ spinlock_t lock;
cpumask_t cpu_attach_mask;
atomic_t flush_count;
unsigned int flush_mm;
@@ -25,6 +26,7 @@ typedef struct {
} mm_context_t;
#define INIT_MM_CONTEXT(name) \
+ .context.lock = __SPIN_LOCK_UNLOCKED(name.context.lock), \
.context.pgtable_lock = \
__SPIN_LOCK_UNLOCKED(name.context.pgtable_lock), \
.context.pgtable_list = LIST_HEAD_INIT(name.context.pgtable_list), \
diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h
index 515fea5..f65a708 100644
--- a/arch/s390/include/asm/mmu_context.h
+++ b/arch/s390/include/asm/mmu_context.h
@@ -15,6 +15,7 @@
static inline int init_new_context(struct task_struct *tsk,
struct mm_struct *mm)
{
+ spin_lock_init(&mm->context.lock);
spin_lock_init(&mm->context.pgtable_lock);
INIT_LIST_HEAD(&mm->context.pgtable_list);
spin_lock_init(&mm->context.gmap_lock);
@@ -93,7 +94,6 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
if (prev == next)
return;
cpumask_set_cpu(cpu, &next->context.cpu_attach_mask);
- cpumask_set_cpu(cpu, mm_cpumask(next));
/* Clear old ASCE by loading the kernel ASCE. */
__ctl_load(S390_lowcore.kernel_asce, 1, 1);
__ctl_load(S390_lowcore.kernel_asce, 7, 7);
@@ -111,9 +111,8 @@ static inline void finish_arch_post_lock_switch(void)
preempt_disable();
while (atomic_read(&mm->context.flush_count))
cpu_relax();
-
- if (mm->context.flush_mm)
- __tlb_flush_mm(mm);
+ cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
+ __tlb_flush_mm_lazy(mm);
preempt_enable();
}
set_fs(current->thread.mm_segment);
@@ -126,6 +125,7 @@ static inline void activate_mm(struct mm_struct *prev,
struct mm_struct *next)
{
switch_mm(prev, next, current);
+ cpumask_set_cpu(smp_processor_id(), mm_cpumask(next));
set_user_asce(next);
}
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index d33f245..db74d39 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -1359,7 +1359,9 @@ static inline pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma,
static inline void pmdp_invalidate(struct vm_area_struct *vma,
unsigned long addr, pmd_t *pmdp)
{
- pmdp_xchg_direct(vma->vm_mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_INVALID));
+ pmd_t pmd = __pmd(pmd_val(*pmdp) | _SEGMENT_ENTRY_INVALID);
+
+ pmdp_xchg_direct(vma->vm_mm, addr, pmdp, pmd);
}
#define __HAVE_ARCH_PMDP_SET_WRPROTECT
diff --git a/arch/s390/include/asm/tlbflush.h b/arch/s390/include/asm/tlbflush.h
index 3984610..eed927a 100644
--- a/arch/s390/include/asm/tlbflush.h
+++ b/arch/s390/include/asm/tlbflush.h
@@ -43,23 +43,6 @@ static inline void __tlb_flush_global(void)
* Flush TLB entries for a specific mm on all CPUs (in case gmap is used
* this implicates multiple ASCEs!).
*/
-static inline void __tlb_flush_full(struct mm_struct *mm)
-{
- preempt_disable();
- atomic_inc(&mm->context.flush_count);
- if (cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) {
- /* Local TLB flush */
- __tlb_flush_local();
- } else {
- /* Global TLB flush */
- __tlb_flush_global();
- /* Reset TLB flush mask */
- cpumask_copy(mm_cpumask(mm), &mm->context.cpu_attach_mask);
- }
- atomic_dec(&mm->context.flush_count);
- preempt_enable();
-}
-
static inline void __tlb_flush_mm(struct mm_struct *mm)
{
unsigned long gmap_asce;
@@ -71,16 +54,18 @@ static inline void __tlb_flush_mm(struct mm_struct *mm)
*/
preempt_disable();
atomic_inc(&mm->context.flush_count);
+ /* Reset TLB flush mask */
+ cpumask_copy(mm_cpumask(mm), &mm->context.cpu_attach_mask);
+ barrier();
gmap_asce = READ_ONCE(mm->context.gmap_asce);
if (MACHINE_HAS_IDTE && gmap_asce != -1UL) {
if (gmap_asce)
__tlb_flush_idte(gmap_asce);
__tlb_flush_idte(mm->context.asce);
} else {
- __tlb_flush_full(mm);
+ /* Global TLB flush */
+ __tlb_flush_global();
}
- /* Reset TLB flush mask */
- cpumask_copy(mm_cpumask(mm), &mm->context.cpu_attach_mask);
atomic_dec(&mm->context.flush_count);
preempt_enable();
}
@@ -94,7 +79,6 @@ static inline void __tlb_flush_kernel(void)
}
#else
#define __tlb_flush_global() __tlb_flush_local()
-#define __tlb_flush_full(mm) __tlb_flush_local()
/*
* Flush TLB entries for a specific ASCE on all CPUs.
@@ -112,10 +96,12 @@ static inline void __tlb_flush_kernel(void)
static inline void __tlb_flush_mm_lazy(struct mm_struct * mm)
{
+ spin_lock(&mm->context.lock);
if (mm->context.flush_mm) {
- __tlb_flush_mm(mm);
mm->context.flush_mm = 0;
+ __tlb_flush_mm(mm);
}
+ spin_unlock(&mm->context.lock);
}
/*
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index 2374c5b..0c19686 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -363,6 +363,18 @@ static inline void save_vector_registers(void)
#endif
}
+static int __init topology_setup(char *str)
+{
+ bool enabled;
+ int rc;
+
+ rc = kstrtobool(str, &enabled);
+ if (!rc && !enabled)
+ S390_lowcore.machine_flags &= ~MACHINE_HAS_TOPOLOGY;
+ return rc;
+}
+early_param("topology", topology_setup);
+
static int __init disable_vector_extension(char *str)
{
S390_lowcore.machine_flags &= ~MACHINE_FLAG_VX;
diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c
index 8705ee6..239f295 100644
--- a/arch/s390/kernel/topology.c
+++ b/arch/s390/kernel/topology.c
@@ -37,7 +37,6 @@ static void set_topology_timer(void);
static void topology_work_fn(struct work_struct *work);
static struct sysinfo_15_1_x *tl_info;
-static bool topology_enabled = true;
static DECLARE_WORK(topology_work, topology_work_fn);
/*
@@ -56,7 +55,7 @@ static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu)
cpumask_t mask;
cpumask_copy(&mask, cpumask_of(cpu));
- if (!topology_enabled || !MACHINE_HAS_TOPOLOGY)
+ if (!MACHINE_HAS_TOPOLOGY)
return mask;
for (; info; info = info->next) {
if (cpumask_test_cpu(cpu, &info->mask))
@@ -71,7 +70,7 @@ static cpumask_t cpu_thread_map(unsigned int cpu)
int i;
cpumask_copy(&mask, cpumask_of(cpu));
- if (!topology_enabled || !MACHINE_HAS_TOPOLOGY)
+ if (!MACHINE_HAS_TOPOLOGY)
return mask;
cpu -= cpu % (smp_cpu_mtid + 1);
for (i = 0; i <= smp_cpu_mtid; i++)
@@ -413,12 +412,6 @@ static const struct cpumask *cpu_drawer_mask(int cpu)
return &per_cpu(cpu_topology, cpu).drawer_mask;
}
-static int __init early_parse_topology(char *p)
-{
- return kstrtobool(p, &topology_enabled);
-}
-early_param("topology", early_parse_topology);
-
static struct sched_domain_topology_level s390_topology[] = {
{ cpu_thread_mask, cpu_smt_flags, SD_INIT_NAME(SMT) },
{ cpu_coregroup_mask, cpu_core_flags, SD_INIT_NAME(MC) },
diff --git a/arch/s390/mm/gup.c b/arch/s390/mm/gup.c
index 18d4107..97fc449 100644
--- a/arch/s390/mm/gup.c
+++ b/arch/s390/mm/gup.c
@@ -56,13 +56,12 @@ static inline int gup_pte_range(pmd_t *pmdp, pmd_t pmd, unsigned long addr,
static inline int gup_huge_pmd(pmd_t *pmdp, pmd_t pmd, unsigned long addr,
unsigned long end, int write, struct page **pages, int *nr)
{
- unsigned long mask, result;
struct page *head, *page;
+ unsigned long mask;
int refs;
- result = write ? 0 : _SEGMENT_ENTRY_PROTECT;
- mask = result | _SEGMENT_ENTRY_INVALID;
- if ((pmd_val(pmd) & mask) != result)
+ mask = (write ? _SEGMENT_ENTRY_PROTECT : 0) | _SEGMENT_ENTRY_INVALID;
+ if ((pmd_val(pmd) & mask) != 0)
return 0;
VM_BUG_ON(!pfn_valid(pmd_val(pmd) >> PAGE_SHIFT));
diff --git a/arch/sh/kernel/cpu/sh3/setup-sh770x.c b/arch/sh/kernel/cpu/sh3/setup-sh770x.c
index 538c10d..8dc315b 100644
--- a/arch/sh/kernel/cpu/sh3/setup-sh770x.c
+++ b/arch/sh/kernel/cpu/sh3/setup-sh770x.c
@@ -165,7 +165,6 @@ static struct plat_sci_port scif2_platform_data = {
.scscr = SCSCR_TE | SCSCR_RE,
.type = PORT_IRDA,
.ops = &sh770x_sci_port_ops,
- .regshift = 1,
};
static struct resource scif2_resources[] = {
diff --git a/arch/sparc/include/asm/setup.h b/arch/sparc/include/asm/setup.h
index be0cc1b..3fae200 100644
--- a/arch/sparc/include/asm/setup.h
+++ b/arch/sparc/include/asm/setup.h
@@ -59,8 +59,11 @@ extern atomic_t dcpage_flushes;
extern atomic_t dcpage_flushes_xcall;
extern int sysctl_tsb_ratio;
-#endif
+#ifdef CONFIG_SERIAL_SUNHV
+void sunhv_migrate_hvcons_irq(int cpu);
+#endif
+#endif
void sun_do_break(void);
extern int stop_a_enabled;
extern int scons_pwroff;
diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
index 2deb89e..ca7cb8e 100644
--- a/arch/sparc/kernel/smp_64.c
+++ b/arch/sparc/kernel/smp_64.c
@@ -1465,8 +1465,12 @@ void smp_send_stop(void)
int cpu;
if (tlb_type == hypervisor) {
+ int this_cpu = smp_processor_id();
+#ifdef CONFIG_SERIAL_SUNHV
+ sunhv_migrate_hvcons_irq(this_cpu);
+#endif
for_each_online_cpu(cpu) {
- if (cpu == smp_processor_id())
+ if (cpu == this_cpu)
continue;
#ifdef CONFIG_SUN_LDOMS
if (ldom_domaining_enabled) {
diff --git a/arch/x86/crypto/sha1-mb/sha1_mb_mgr_flush_avx2.S b/arch/x86/crypto/sha1-mb/sha1_mb_mgr_flush_avx2.S
index 96df6a3..a2ae689 100644
--- a/arch/x86/crypto/sha1-mb/sha1_mb_mgr_flush_avx2.S
+++ b/arch/x86/crypto/sha1-mb/sha1_mb_mgr_flush_avx2.S
@@ -157,8 +157,8 @@
.endr
# Find min length
- vmovdqa _lens+0*16(state), %xmm0
- vmovdqa _lens+1*16(state), %xmm1
+ vmovdqu _lens+0*16(state), %xmm0
+ vmovdqu _lens+1*16(state), %xmm1
vpminud %xmm1, %xmm0, %xmm2 # xmm2 has {D,C,B,A}
vpalignr $8, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,D,C}
@@ -178,8 +178,8 @@
vpsubd %xmm2, %xmm0, %xmm0
vpsubd %xmm2, %xmm1, %xmm1
- vmovdqa %xmm0, _lens+0*16(state)
- vmovdqa %xmm1, _lens+1*16(state)
+ vmovdqu %xmm0, _lens+0*16(state)
+ vmovdqu %xmm1, _lens+1*16(state)
# "state" and "args" are the same address, arg1
# len is arg2
@@ -235,8 +235,8 @@
jc .return_null
# Find min length
- vmovdqa _lens(state), %xmm0
- vmovdqa _lens+1*16(state), %xmm1
+ vmovdqu _lens(state), %xmm0
+ vmovdqu _lens+1*16(state), %xmm1
vpminud %xmm1, %xmm0, %xmm2 # xmm2 has {D,C,B,A}
vpalignr $8, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,D,C}
diff --git a/arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S b/arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S
index a78a069..ec9bee6 100644
--- a/arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S
+++ b/arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S
@@ -155,8 +155,8 @@
.endr
# Find min length
- vmovdqa _lens+0*16(state), %xmm0
- vmovdqa _lens+1*16(state), %xmm1
+ vmovdqu _lens+0*16(state), %xmm0
+ vmovdqu _lens+1*16(state), %xmm1
vpminud %xmm1, %xmm0, %xmm2 # xmm2 has {D,C,B,A}
vpalignr $8, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,D,C}
@@ -176,8 +176,8 @@
vpsubd %xmm2, %xmm0, %xmm0
vpsubd %xmm2, %xmm1, %xmm1
- vmovdqa %xmm0, _lens+0*16(state)
- vmovdqa %xmm1, _lens+1*16(state)
+ vmovdqu %xmm0, _lens+0*16(state)
+ vmovdqu %xmm1, _lens+1*16(state)
# "state" and "args" are the same address, arg1
# len is arg2
@@ -234,8 +234,8 @@
jc .return_null
# Find min length
- vmovdqa _lens(state), %xmm0
- vmovdqa _lens+1*16(state), %xmm1
+ vmovdqu _lens(state), %xmm0
+ vmovdqu _lens+1*16(state), %xmm1
vpminud %xmm1, %xmm0, %xmm2 # xmm2 has {D,C,B,A}
vpalignr $8, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,D,C}
diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h
index e7636ba..6c98821 100644
--- a/arch/x86/include/asm/alternative-asm.h
+++ b/arch/x86/include/asm/alternative-asm.h
@@ -62,8 +62,10 @@
#define new_len2 145f-144f
/*
- * max without conditionals. Idea adapted from:
+ * gas compatible max based on the idea from:
* http://graphics.stanford.edu/~seander/bithacks.html#IntegerMinOrMax
+ *
+ * The additional "-" is needed because gas uses a "true" value of -1.
*/
#define alt_max_short(a, b) ((a) ^ (((a) ^ (b)) & -(-((a) < (b)))))
diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
index 1b02038..d4aea31 100644
--- a/arch/x86/include/asm/alternative.h
+++ b/arch/x86/include/asm/alternative.h
@@ -103,12 +103,12 @@ static inline int alternatives_text_reserved(void *start, void *end)
alt_end_marker ":\n"
/*
- * max without conditionals. Idea adapted from:
+ * gas compatible max based on the idea from:
* http://graphics.stanford.edu/~seander/bithacks.html#IntegerMinOrMax
*
- * The additional "-" is needed because gas works with s32s.
+ * The additional "-" is needed because gas uses a "true" value of -1.
*/
-#define alt_max_short(a, b) "((" a ") ^ (((" a ") ^ (" b ")) & -(-((" a ") - (" b ")))))"
+#define alt_max_short(a, b) "((" a ") ^ (((" a ") ^ (" b ")) & -(-((" a ") < (" b ")))))"
/*
* Pad the second replacement alternative with additional NOPs if it is
diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h
index 19d14ac..fc3c7e4 100644
--- a/arch/x86/include/asm/kvm_emulate.h
+++ b/arch/x86/include/asm/kvm_emulate.h
@@ -296,6 +296,7 @@ struct x86_emulate_ctxt {
bool perm_ok; /* do not check permissions if true */
bool ud; /* inject an #UD if host doesn't support insn */
+ bool tf; /* TF value before instruction (after for syscall/sysret) */
bool have_exception;
struct x86_exception exception;
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
index a300aa1..dead0f3 100644
--- a/arch/x86/include/asm/uaccess.h
+++ b/arch/x86/include/asm/uaccess.h
@@ -68,6 +68,12 @@ static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, un
__chk_range_not_ok((unsigned long __force)(addr), size, limit); \
})
+#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
+# define WARN_ON_IN_IRQ() WARN_ON_ONCE(!in_task())
+#else
+# define WARN_ON_IN_IRQ()
+#endif
+
/**
* access_ok: - Checks if a user space pointer is valid
* @type: Type of access: %VERIFY_READ or %VERIFY_WRITE. Note that
@@ -88,8 +94,11 @@ static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, un
* checks that the pointer is in the user space range - after calling
* this function, memory access functions may still return -EFAULT.
*/
-#define access_ok(type, addr, size) \
- likely(!__range_not_ok(addr, size, user_addr_max()))
+#define access_ok(type, addr, size) \
+({ \
+ WARN_ON_IN_IRQ(); \
+ likely(!__range_not_ok(addr, size, user_addr_max())); \
+})
/*
* These are the main single-value transfer routines. They automatically
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index d3e0d04..b89bef9 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -176,10 +176,15 @@ static int acpi_register_lapic(int id, u32 acpiid, u8 enabled)
return -EINVAL;
}
+ if (!enabled) {
+ ++disabled_cpus;
+ return -EINVAL;
+ }
+
if (boot_cpu_physical_apicid != -1U)
ver = boot_cpu_apic_version;
- cpu = __generic_processor_info(id, ver, enabled);
+ cpu = generic_processor_info(id, ver);
if (cpu >= 0)
early_per_cpu(x86_cpu_to_acpiid, cpu) = acpiid;
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index f223491..e2ead34 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -2070,7 +2070,7 @@ static int allocate_logical_cpuid(int apicid)
return nr_logical_cpuids++;
}
-int __generic_processor_info(int apicid, int version, bool enabled)
+int generic_processor_info(int apicid, int version)
{
int cpu, max = nr_cpu_ids;
bool boot_cpu_detected = physid_isset(boot_cpu_physical_apicid,
@@ -2128,11 +2128,9 @@ int __generic_processor_info(int apicid, int version, bool enabled)
if (num_processors >= nr_cpu_ids) {
int thiscpu = max + disabled_cpus;
- if (enabled) {
- pr_warning("APIC: NR_CPUS/possible_cpus limit of %i "
- "reached. Processor %d/0x%x ignored.\n",
- max, thiscpu, apicid);
- }
+ pr_warning("APIC: NR_CPUS/possible_cpus limit of %i "
+ "reached. Processor %d/0x%x ignored.\n",
+ max, thiscpu, apicid);
disabled_cpus++;
return -EINVAL;
@@ -2184,23 +2182,13 @@ int __generic_processor_info(int apicid, int version, bool enabled)
apic->x86_32_early_logical_apicid(cpu);
#endif
set_cpu_possible(cpu, true);
-
- if (enabled) {
- num_processors++;
- physid_set(apicid, phys_cpu_present_map);
- set_cpu_present(cpu, true);
- } else {
- disabled_cpus++;
- }
+ physid_set(apicid, phys_cpu_present_map);
+ set_cpu_present(cpu, true);
+ num_processors++;
return cpu;
}
-int generic_processor_info(int apicid, int version)
-{
- return __generic_processor_info(apicid, version, true);
-}
-
int hard_smp_processor_id(void)
{
return read_apic_id();
diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c
index cdc0dea..13dbcc0 100644
--- a/arch/x86/kernel/cpu/microcode/intel.c
+++ b/arch/x86/kernel/cpu/microcode/intel.c
@@ -34,6 +34,7 @@
#include <linux/mm.h>
#include <asm/microcode_intel.h>
+#include <asm/intel-family.h>
#include <asm/processor.h>
#include <asm/tlbflush.h>
#include <asm/setup.h>
@@ -1046,6 +1047,18 @@ static int get_ucode_fw(void *to, const void *from, size_t n)
return 0;
}
+static bool is_blacklisted(unsigned int cpu)
+{
+ struct cpuinfo_x86 *c = &cpu_data(cpu);
+
+ if (c->x86 == 6 && c->x86_model == INTEL_FAM6_BROADWELL_X) {
+ pr_err_once("late loading on model 79 is disabled.\n");
+ return true;
+ }
+
+ return false;
+}
+
static enum ucode_state request_microcode_fw(int cpu, struct device *device,
bool refresh_fw)
{
@@ -1054,6 +1067,9 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device,
const struct firmware *firmware;
enum ucode_state ret;
+ if (is_blacklisted(cpu))
+ return UCODE_NFOUND;
+
sprintf(name, "intel-ucode/%02x-%02x-%02x",
c->x86, c->x86_model, c->x86_mask);
@@ -1078,6 +1094,9 @@ static int get_ucode_user(void *to, const void *from, size_t n)
static enum ucode_state
request_microcode_user(int cpu, const void __user *buf, size_t size)
{
+ if (is_blacklisted(cpu))
+ return UCODE_NFOUND;
+
return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
}
diff --git a/arch/x86/kernel/fpu/regset.c b/arch/x86/kernel/fpu/regset.c
index c114b13..7052d9a 100644
--- a/arch/x86/kernel/fpu/regset.c
+++ b/arch/x86/kernel/fpu/regset.c
@@ -130,11 +130,16 @@ int xstateregs_set(struct task_struct *target, const struct user_regset *regset,
fpu__activate_fpstate_write(fpu);
- if (boot_cpu_has(X86_FEATURE_XSAVES))
+ if (boot_cpu_has(X86_FEATURE_XSAVES)) {
ret = copyin_to_xsaves(kbuf, ubuf, xsave);
- else
+ } else {
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, xsave, 0, -1);
+ /* xcomp_bv must be 0 when using uncompacted format */
+ if (!ret && xsave->header.xcomp_bv)
+ ret = -EINVAL;
+ }
+
/*
* In case of failure, mark all states as init:
*/
diff --git a/arch/x86/kernel/fpu/signal.c b/arch/x86/kernel/fpu/signal.c
index a184c21..3ec0d2d 100644
--- a/arch/x86/kernel/fpu/signal.c
+++ b/arch/x86/kernel/fpu/signal.c
@@ -329,6 +329,10 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
} else {
err = __copy_from_user(&fpu->state.xsave,
buf_fx, state_size);
+
+ /* xcomp_bv must be 0 when using uncompacted format */
+ if (!err && state_size > offsetof(struct xregs_state, header) && fpu->state.xsave.header.xcomp_bv)
+ err = -EINVAL;
}
if (err || __copy_from_user(&env, buf, sizeof(env))) {
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index 55ffd9d..77f17cb 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -141,7 +141,8 @@ void kvm_async_pf_task_wait(u32 token)
n.token = token;
n.cpu = smp_processor_id();
- n.halted = is_idle_task(current) || preempt_count() > 1;
+ n.halted = is_idle_task(current) || preempt_count() > 1 ||
+ rcu_preempt_depth();
init_swait_queue_head(&n.wq);
hlist_add_head(&n.link, &b->list);
raw_spin_unlock(&b->lock);
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 36171bc..9fe7b9e 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -181,6 +181,12 @@ static void smp_callin(void)
smp_store_cpu_info(cpuid);
/*
+ * The topology information must be up to date before
+ * calibrate_delay() and notify_cpu_starting().
+ */
+ set_cpu_sibling_map(raw_smp_processor_id());
+
+ /*
* Get our bogomips.
* Update loops_per_jiffy in cpu_data. Previous call to
* smp_store_cpu_info() stored a value that is close but not as
@@ -190,11 +196,6 @@ static void smp_callin(void)
cpu_data(cpuid).loops_per_jiffy = loops_per_jiffy;
pr_debug("Stack at about %p\n", &cpuid);
- /*
- * This must be done before setting cpu_online_mask
- * or calling notify_cpu_starting.
- */
- set_cpu_sibling_map(raw_smp_processor_id());
wmb();
notify_cpu_starting(cpuid);
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 6e57edf..44bf5cf 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -1382,12 +1382,10 @@ void __init tsc_init(void)
unsigned long calibrate_delay_is_known(void)
{
int sibling, cpu = smp_processor_id();
- struct cpumask *mask = topology_core_cpumask(cpu);
+ int constant_tsc = cpu_has(&cpu_data(cpu), X86_FEATURE_CONSTANT_TSC);
+ const struct cpumask *mask = topology_core_cpumask(cpu);
- if (!tsc_disabled && !cpu_has(&cpu_data(cpu), X86_FEATURE_CONSTANT_TSC))
- return 0;
-
- if (!mask)
+ if (tsc_disabled || !constant_tsc || !mask)
return 0;
sibling = cpumask_any_but(mask, cpu);
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index de36660..72b737b 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -2738,6 +2738,7 @@ static int em_syscall(struct x86_emulate_ctxt *ctxt)
ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF);
}
+ ctxt->tf = (ctxt->eflags & X86_EFLAGS_TF) != 0;
return X86EMUL_CONTINUE;
}
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 5f24127..d29c745 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -3649,19 +3649,19 @@ static inline bool is_last_gpte(struct kvm_mmu *mmu,
unsigned level, unsigned gpte)
{
/*
- * PT_PAGE_TABLE_LEVEL always terminates. The RHS has bit 7 set
- * iff level <= PT_PAGE_TABLE_LEVEL, which for our purpose means
- * level == PT_PAGE_TABLE_LEVEL; set PT_PAGE_SIZE_MASK in gpte then.
- */
- gpte |= level - PT_PAGE_TABLE_LEVEL - 1;
-
- /*
* The RHS has bit 7 set iff level < mmu->last_nonleaf_level.
* If it is clear, there are no large pages at this level, so clear
* PT_PAGE_SIZE_MASK in gpte if that is the case.
*/
gpte &= level - mmu->last_nonleaf_level;
+ /*
+ * PT_PAGE_TABLE_LEVEL always terminates. The RHS has bit 7 set
+ * iff level <= PT_PAGE_TABLE_LEVEL, which for our purpose means
+ * level == PT_PAGE_TABLE_LEVEL; set PT_PAGE_SIZE_MASK in gpte then.
+ */
+ gpte |= level - PT_PAGE_TABLE_LEVEL - 1;
+
return gpte & PT_PAGE_SIZE_MASK;
}
@@ -4169,6 +4169,7 @@ void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly)
update_permission_bitmask(vcpu, context, true);
update_pkru_bitmask(vcpu, context, true);
+ update_last_nonleaf_level(vcpu, context);
reset_rsvds_bits_mask_ept(vcpu, context, execonly);
reset_ept_shadow_zero_bits_mask(vcpu, context, execonly);
}
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index a011054..3736390 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -324,10 +324,11 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker,
--walker->level;
index = PT_INDEX(addr, walker->level);
-
table_gfn = gpte_to_gfn(pte);
offset = index * sizeof(pt_element_t);
pte_gpa = gfn_to_gpa(table_gfn) + offset;
+
+ BUG_ON(walker->level < 1);
walker->table_gfn[walker->level - 1] = table_gfn;
walker->pte_gpa[walker->level - 1] = pte_gpa;
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 3dc6d80..a8ae57a 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -2167,46 +2167,44 @@ static void vmx_vcpu_pi_load(struct kvm_vcpu *vcpu, int cpu)
struct pi_desc old, new;
unsigned int dest;
- if (!kvm_arch_has_assigned_device(vcpu->kvm) ||
- !irq_remapping_cap(IRQ_POSTING_CAP) ||
- !kvm_vcpu_apicv_active(vcpu))
+ /*
+ * In case of hot-plug or hot-unplug, we may have to undo
+ * vmx_vcpu_pi_put even if there is no assigned device. And we
+ * always keep PI.NDST up to date for simplicity: it makes the
+ * code easier, and CPU migration is not a fast path.
+ */
+ if (!pi_test_sn(pi_desc) && vcpu->cpu == cpu)
return;
+ /*
+ * First handle the simple case where no cmpxchg is necessary; just
+ * allow posting non-urgent interrupts.
+ *
+ * If the 'nv' field is POSTED_INTR_WAKEUP_VECTOR, do not change
+ * PI.NDST: pi_post_block will do it for us and the wakeup_handler
+ * expects the VCPU to be on the blocked_vcpu_list that matches
+ * PI.NDST.
+ */
+ if (pi_desc->nv == POSTED_INTR_WAKEUP_VECTOR ||
+ vcpu->cpu == cpu) {
+ pi_clear_sn(pi_desc);
+ return;
+ }
+
+ /* The full case. */
do {
old.control = new.control = pi_desc->control;
- /*
- * If 'nv' field is POSTED_INTR_WAKEUP_VECTOR, there
- * are two possible cases:
- * 1. After running 'pre_block', context switch
- * happened. For this case, 'sn' was set in
- * vmx_vcpu_put(), so we need to clear it here.
- * 2. After running 'pre_block', we were blocked,
- * and woken up by some other guy. For this case,
- * we don't need to do anything, 'pi_post_block'
- * will do everything for us. However, we cannot
- * check whether it is case #1 or case #2 here
- * (maybe, not needed), so we also clear sn here,
- * I think it is not a big deal.
- */
- if (pi_desc->nv != POSTED_INTR_WAKEUP_VECTOR) {
- if (vcpu->cpu != cpu) {
- dest = cpu_physical_id(cpu);
+ dest = cpu_physical_id(cpu);
- if (x2apic_enabled())
- new.ndst = dest;
- else
- new.ndst = (dest << 8) & 0xFF00;
- }
+ if (x2apic_enabled())
+ new.ndst = dest;
+ else
+ new.ndst = (dest << 8) & 0xFF00;
- /* set 'NV' to 'notification vector' */
- new.nv = POSTED_INTR_VECTOR;
- }
-
- /* Allow posting non-urgent interrupts */
new.sn = 0;
- } while (cmpxchg(&pi_desc->control, old.control,
- new.control) != old.control);
+ } while (cmpxchg64(&pi_desc->control, old.control,
+ new.control) != old.control);
}
static void decache_tsc_multiplier(struct vcpu_vmx *vmx)
@@ -4761,21 +4759,30 @@ static inline bool kvm_vcpu_trigger_posted_interrupt(struct kvm_vcpu *vcpu)
{
#ifdef CONFIG_SMP
if (vcpu->mode == IN_GUEST_MODE) {
- struct vcpu_vmx *vmx = to_vmx(vcpu);
-
/*
- * Currently, we don't support urgent interrupt,
- * all interrupts are recognized as non-urgent
- * interrupt, so we cannot post interrupts when
- * 'SN' is set.
+ * The vector of interrupt to be delivered to vcpu had
+ * been set in PIR before this function.
*
- * If the vcpu is in guest mode, it means it is
- * running instead of being scheduled out and
- * waiting in the run queue, and that's the only
- * case when 'SN' is set currently, warning if
- * 'SN' is set.
+ * Following cases will be reached in this block, and
+ * we always send a notification event in all cases as
+ * explained below.
+ *
+ * Case 1: vcpu keeps in non-root mode. Sending a
+ * notification event posts the interrupt to vcpu.
+ *
+ * Case 2: vcpu exits to root mode and is still
+ * runnable. PIR will be synced to vIRR before the
+ * next vcpu entry. Sending a notification event in
+ * this case has no effect, as vcpu is not in root
+ * mode.
+ *
+ * Case 3: vcpu exits to root mode and is blocked.
+ * vcpu_block() has already synced PIR to vIRR and
+ * never blocks vcpu if vIRR is not cleared. Therefore,
+ * a blocked vcpu here does not wait for any requested
+ * interrupts in PIR, and sending a notification event
+ * which has no effect is safe here.
*/
- WARN_ON_ONCE(pi_test_sn(&vmx->pi_desc));
apic->send_IPI_mask(get_cpu_mask(vcpu->cpu),
POSTED_INTR_VECTOR);
@@ -9187,6 +9194,13 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
vmx->msr_ia32_feature_control_valid_bits = FEATURE_CONTROL_LOCKED;
+ /*
+ * Enforce invariant: pi_desc.nv is always either POSTED_INTR_VECTOR
+ * or POSTED_INTR_WAKEUP_VECTOR.
+ */
+ vmx->pi_desc.nv = POSTED_INTR_VECTOR;
+ vmx->pi_desc.sn = 1;
+
return &vmx->vcpu;
free_vmcs:
@@ -9996,6 +10010,11 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
vmcs_write64(VIRTUAL_APIC_PAGE_ADDR,
page_to_phys(vmx->nested.virtual_apic_page));
vmcs_write32(TPR_THRESHOLD, vmcs12->tpr_threshold);
+ } else {
+#ifdef CONFIG_X86_64
+ exec_control |= CPU_BASED_CR8_LOAD_EXITING |
+ CPU_BASED_CR8_STORE_EXITING;
+#endif
}
if (cpu_has_vmx_msr_bitmap() &&
@@ -10671,7 +10690,7 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
* (KVM doesn't change it)- no reason to call set_cr4_guest_host_mask();
*/
vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK);
- kvm_set_cr4(vcpu, vmcs12->host_cr4);
+ vmx_set_cr4(vcpu, vmcs12->host_cr4);
nested_ept_uninit_mmu_context(vcpu);
@@ -11000,6 +11019,37 @@ static void vmx_enable_log_dirty_pt_masked(struct kvm *kvm,
kvm_mmu_clear_dirty_pt_masked(kvm, memslot, offset, mask);
}
+static void __pi_post_block(struct kvm_vcpu *vcpu)
+{
+ struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
+ struct pi_desc old, new;
+ unsigned int dest;
+
+ do {
+ old.control = new.control = pi_desc->control;
+ WARN(old.nv != POSTED_INTR_WAKEUP_VECTOR,
+ "Wakeup handler not enabled while the VCPU is blocked\n");
+
+ dest = cpu_physical_id(vcpu->cpu);
+
+ if (x2apic_enabled())
+ new.ndst = dest;
+ else
+ new.ndst = (dest << 8) & 0xFF00;
+
+ /* set 'NV' to 'notification vector' */
+ new.nv = POSTED_INTR_VECTOR;
+ } while (cmpxchg64(&pi_desc->control, old.control,
+ new.control) != old.control);
+
+ if (!WARN_ON_ONCE(vcpu->pre_pcpu == -1)) {
+ spin_lock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu));
+ list_del(&vcpu->blocked_vcpu_list);
+ spin_unlock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu));
+ vcpu->pre_pcpu = -1;
+ }
+}
+
/*
* This routine does the following things for vCPU which is going
* to be blocked if VT-d PI is enabled.
@@ -11015,7 +11065,6 @@ static void vmx_enable_log_dirty_pt_masked(struct kvm *kvm,
*/
static int pi_pre_block(struct kvm_vcpu *vcpu)
{
- unsigned long flags;
unsigned int dest;
struct pi_desc old, new;
struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
@@ -11025,34 +11074,20 @@ static int pi_pre_block(struct kvm_vcpu *vcpu)
!kvm_vcpu_apicv_active(vcpu))
return 0;
- vcpu->pre_pcpu = vcpu->cpu;
- spin_lock_irqsave(&per_cpu(blocked_vcpu_on_cpu_lock,
- vcpu->pre_pcpu), flags);
- list_add_tail(&vcpu->blocked_vcpu_list,
- &per_cpu(blocked_vcpu_on_cpu,
- vcpu->pre_pcpu));
- spin_unlock_irqrestore(&per_cpu(blocked_vcpu_on_cpu_lock,
- vcpu->pre_pcpu), flags);
+ WARN_ON(irqs_disabled());
+ local_irq_disable();
+ if (!WARN_ON_ONCE(vcpu->pre_pcpu != -1)) {
+ vcpu->pre_pcpu = vcpu->cpu;
+ spin_lock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu));
+ list_add_tail(&vcpu->blocked_vcpu_list,
+ &per_cpu(blocked_vcpu_on_cpu,
+ vcpu->pre_pcpu));
+ spin_unlock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu));
+ }
do {
old.control = new.control = pi_desc->control;
- /*
- * We should not block the vCPU if
- * an interrupt is posted for it.
- */
- if (pi_test_on(pi_desc) == 1) {
- spin_lock_irqsave(&per_cpu(blocked_vcpu_on_cpu_lock,
- vcpu->pre_pcpu), flags);
- list_del(&vcpu->blocked_vcpu_list);
- spin_unlock_irqrestore(
- &per_cpu(blocked_vcpu_on_cpu_lock,
- vcpu->pre_pcpu), flags);
- vcpu->pre_pcpu = -1;
-
- return 1;
- }
-
WARN((pi_desc->sn == 1),
"Warning: SN field of posted-interrupts "
"is set before blocking\n");
@@ -11074,10 +11109,15 @@ static int pi_pre_block(struct kvm_vcpu *vcpu)
/* set 'NV' to 'wakeup vector' */
new.nv = POSTED_INTR_WAKEUP_VECTOR;
- } while (cmpxchg(&pi_desc->control, old.control,
- new.control) != old.control);
+ } while (cmpxchg64(&pi_desc->control, old.control,
+ new.control) != old.control);
- return 0;
+ /* We should not block the vCPU if an interrupt is posted for it. */
+ if (pi_test_on(pi_desc) == 1)
+ __pi_post_block(vcpu);
+
+ local_irq_enable();
+ return (vcpu->pre_pcpu == -1);
}
static int vmx_pre_block(struct kvm_vcpu *vcpu)
@@ -11093,44 +11133,13 @@ static int vmx_pre_block(struct kvm_vcpu *vcpu)
static void pi_post_block(struct kvm_vcpu *vcpu)
{
- struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
- struct pi_desc old, new;
- unsigned int dest;
- unsigned long flags;
-
- if (!kvm_arch_has_assigned_device(vcpu->kvm) ||
- !irq_remapping_cap(IRQ_POSTING_CAP) ||
- !kvm_vcpu_apicv_active(vcpu))
+ if (vcpu->pre_pcpu == -1)
return;
- do {
- old.control = new.control = pi_desc->control;
-
- dest = cpu_physical_id(vcpu->cpu);
-
- if (x2apic_enabled())
- new.ndst = dest;
- else
- new.ndst = (dest << 8) & 0xFF00;
-
- /* Allow posting non-urgent interrupts */
- new.sn = 0;
-
- /* set 'NV' to 'notification vector' */
- new.nv = POSTED_INTR_VECTOR;
- } while (cmpxchg(&pi_desc->control, old.control,
- new.control) != old.control);
-
- if(vcpu->pre_pcpu != -1) {
- spin_lock_irqsave(
- &per_cpu(blocked_vcpu_on_cpu_lock,
- vcpu->pre_pcpu), flags);
- list_del(&vcpu->blocked_vcpu_list);
- spin_unlock_irqrestore(
- &per_cpu(blocked_vcpu_on_cpu_lock,
- vcpu->pre_pcpu), flags);
- vcpu->pre_pcpu = -1;
- }
+ WARN_ON(irqs_disabled());
+ local_irq_disable();
+ __pi_post_block(vcpu);
+ local_irq_enable();
}
static void vmx_post_block(struct kvm_vcpu *vcpu)
@@ -11158,7 +11167,7 @@ static int vmx_update_pi_irte(struct kvm *kvm, unsigned int host_irq,
struct kvm_lapic_irq irq;
struct kvm_vcpu *vcpu;
struct vcpu_data vcpu_info;
- int idx, ret = -EINVAL;
+ int idx, ret = 0;
if (!kvm_arch_has_assigned_device(kvm) ||
!irq_remapping_cap(IRQ_POSTING_CAP) ||
@@ -11167,7 +11176,12 @@ static int vmx_update_pi_irte(struct kvm *kvm, unsigned int host_irq,
idx = srcu_read_lock(&kvm->irq_srcu);
irq_rt = srcu_dereference(kvm->irq_routing, &kvm->irq_srcu);
- BUG_ON(guest_irq >= irq_rt->nr_rt_entries);
+ if (guest_irq >= irq_rt->nr_rt_entries ||
+ hlist_empty(&irq_rt->map[guest_irq])) {
+ pr_warn_once("no route for guest_irq %u/%u (broken user space?)\n",
+ guest_irq, irq_rt->nr_rt_entries);
+ goto out;
+ }
hlist_for_each_entry(e, &irq_rt->map[guest_irq], link) {
if (e->type != KVM_IRQ_ROUTING_MSI)
@@ -11210,12 +11224,8 @@ static int vmx_update_pi_irte(struct kvm *kvm, unsigned int host_irq,
if (set)
ret = irq_set_vcpu_affinity(host_irq, &vcpu_info);
- else {
- /* suppress notification event before unposting */
- pi_set_sn(vcpu_to_pi_desc(vcpu));
+ else
ret = irq_set_vcpu_affinity(host_irq, NULL);
- pi_clear_sn(vcpu_to_pi_desc(vcpu));
- }
if (ret < 0) {
printk(KERN_INFO "%s: failed to update PI IRTE\n",
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 3dbcb09..595f814 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -5250,6 +5250,8 @@ static void init_emulate_ctxt(struct kvm_vcpu *vcpu)
kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
ctxt->eflags = kvm_get_rflags(vcpu);
+ ctxt->tf = (ctxt->eflags & X86_EFLAGS_TF) != 0;
+
ctxt->eip = kvm_rip_read(vcpu);
ctxt->mode = (!is_protmode(vcpu)) ? X86EMUL_MODE_REAL :
(ctxt->eflags & X86_EFLAGS_VM) ? X86EMUL_MODE_VM86 :
@@ -5465,37 +5467,26 @@ static int kvm_vcpu_check_hw_bp(unsigned long addr, u32 type, u32 dr7,
return dr6;
}
-static void kvm_vcpu_check_singlestep(struct kvm_vcpu *vcpu, unsigned long rflags, int *r)
+static void kvm_vcpu_do_singlestep(struct kvm_vcpu *vcpu, int *r)
{
struct kvm_run *kvm_run = vcpu->run;
- /*
- * rflags is the old, "raw" value of the flags. The new value has
- * not been saved yet.
- *
- * This is correct even for TF set by the guest, because "the
- * processor will not generate this exception after the instruction
- * that sets the TF flag".
- */
- if (unlikely(rflags & X86_EFLAGS_TF)) {
- if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
- kvm_run->debug.arch.dr6 = DR6_BS | DR6_FIXED_1 |
- DR6_RTM;
- kvm_run->debug.arch.pc = vcpu->arch.singlestep_rip;
- kvm_run->debug.arch.exception = DB_VECTOR;
- kvm_run->exit_reason = KVM_EXIT_DEBUG;
- *r = EMULATE_USER_EXIT;
- } else {
- vcpu->arch.emulate_ctxt.eflags &= ~X86_EFLAGS_TF;
- /*
- * "Certain debug exceptions may clear bit 0-3. The
- * remaining contents of the DR6 register are never
- * cleared by the processor".
- */
- vcpu->arch.dr6 &= ~15;
- vcpu->arch.dr6 |= DR6_BS | DR6_RTM;
- kvm_queue_exception(vcpu, DB_VECTOR);
- }
+ if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
+ kvm_run->debug.arch.dr6 = DR6_BS | DR6_FIXED_1 | DR6_RTM;
+ kvm_run->debug.arch.pc = vcpu->arch.singlestep_rip;
+ kvm_run->debug.arch.exception = DB_VECTOR;
+ kvm_run->exit_reason = KVM_EXIT_DEBUG;
+ *r = EMULATE_USER_EXIT;
+ } else {
+ vcpu->arch.emulate_ctxt.eflags &= ~X86_EFLAGS_TF;
+ /*
+ * "Certain debug exceptions may clear bit 0-3. The
+ * remaining contents of the DR6 register are never
+ * cleared by the processor".
+ */
+ vcpu->arch.dr6 &= ~15;
+ vcpu->arch.dr6 |= DR6_BS | DR6_RTM;
+ kvm_queue_exception(vcpu, DB_VECTOR);
}
}
@@ -5650,8 +5641,9 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu,
toggle_interruptibility(vcpu, ctxt->interruptibility);
vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
kvm_rip_write(vcpu, ctxt->eip);
- if (r == EMULATE_DONE)
- kvm_vcpu_check_singlestep(vcpu, rflags, &r);
+ if (r == EMULATE_DONE &&
+ (ctxt->tf || (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)))
+ kvm_vcpu_do_singlestep(vcpu, &r);
if (!ctxt->have_exception ||
exception_type(ctxt->exception.vector) == EXCPT_TRAP)
__kvm_set_rflags(vcpu, ctxt->eflags);
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 9f72ca3..1dd7960 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -191,8 +191,7 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
* 6. T1 : reaches here, sees vma_pkey(vma)=5, when we really
* faulted on a pte with its pkey=4.
*/
-static void fill_sig_info_pkey(int si_code, siginfo_t *info,
- struct vm_area_struct *vma)
+static void fill_sig_info_pkey(int si_code, siginfo_t *info, u32 *pkey)
{
/* This is effectively an #ifdef */
if (!boot_cpu_has(X86_FEATURE_OSPKE))
@@ -208,7 +207,7 @@ static void fill_sig_info_pkey(int si_code, siginfo_t *info,
* valid VMA, so we should never reach this without a
* valid VMA.
*/
- if (!vma) {
+ if (!pkey) {
WARN_ONCE(1, "PKU fault with no VMA passed in");
info->si_pkey = 0;
return;
@@ -218,13 +217,12 @@ static void fill_sig_info_pkey(int si_code, siginfo_t *info,
* absolutely guranteed to be 100% accurate because of
* the race explained above.
*/
- info->si_pkey = vma_pkey(vma);
+ info->si_pkey = *pkey;
}
static void
force_sig_info_fault(int si_signo, int si_code, unsigned long address,
- struct task_struct *tsk, struct vm_area_struct *vma,
- int fault)
+ struct task_struct *tsk, u32 *pkey, int fault)
{
unsigned lsb = 0;
siginfo_t info;
@@ -239,7 +237,7 @@ force_sig_info_fault(int si_signo, int si_code, unsigned long address,
lsb = PAGE_SHIFT;
info.si_addr_lsb = lsb;
- fill_sig_info_pkey(si_code, &info, vma);
+ fill_sig_info_pkey(si_code, &info, pkey);
force_sig_info(si_signo, &info, tsk);
}
@@ -718,8 +716,6 @@ no_context(struct pt_regs *regs, unsigned long error_code,
struct task_struct *tsk = current;
unsigned long flags;
int sig;
- /* No context means no VMA to pass down */
- struct vm_area_struct *vma = NULL;
/* Are we prepared to handle this kernel fault? */
if (fixup_exception(regs, X86_TRAP_PF)) {
@@ -744,7 +740,7 @@ no_context(struct pt_regs *regs, unsigned long error_code,
/* XXX: hwpoison faults will set the wrong code. */
force_sig_info_fault(signal, si_code, address,
- tsk, vma, 0);
+ tsk, NULL, 0);
}
/*
@@ -853,8 +849,7 @@ show_signal_msg(struct pt_regs *regs, unsigned long error_code,
static void
__bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
- unsigned long address, struct vm_area_struct *vma,
- int si_code)
+ unsigned long address, u32 *pkey, int si_code)
{
struct task_struct *tsk = current;
@@ -902,7 +897,7 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
tsk->thread.error_code = error_code;
tsk->thread.trap_nr = X86_TRAP_PF;
- force_sig_info_fault(SIGSEGV, si_code, address, tsk, vma, 0);
+ force_sig_info_fault(SIGSEGV, si_code, address, tsk, pkey, 0);
return;
}
@@ -915,9 +910,9 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
static noinline void
bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
- unsigned long address, struct vm_area_struct *vma)
+ unsigned long address, u32 *pkey)
{
- __bad_area_nosemaphore(regs, error_code, address, vma, SEGV_MAPERR);
+ __bad_area_nosemaphore(regs, error_code, address, pkey, SEGV_MAPERR);
}
static void
@@ -925,6 +920,10 @@ __bad_area(struct pt_regs *regs, unsigned long error_code,
unsigned long address, struct vm_area_struct *vma, int si_code)
{
struct mm_struct *mm = current->mm;
+ u32 pkey;
+
+ if (vma)
+ pkey = vma_pkey(vma);
/*
* Something tried to access memory that isn't in our memory map..
@@ -932,7 +931,8 @@ __bad_area(struct pt_regs *regs, unsigned long error_code,
*/
up_read(&mm->mmap_sem);
- __bad_area_nosemaphore(regs, error_code, address, vma, si_code);
+ __bad_area_nosemaphore(regs, error_code, address,
+ (vma) ? &pkey : NULL, si_code);
}
static noinline void
@@ -975,7 +975,7 @@ bad_area_access_error(struct pt_regs *regs, unsigned long error_code,
static void
do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
- struct vm_area_struct *vma, unsigned int fault)
+ u32 *pkey, unsigned int fault)
{
struct task_struct *tsk = current;
int code = BUS_ADRERR;
@@ -1002,13 +1002,12 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
code = BUS_MCEERR_AR;
}
#endif
- force_sig_info_fault(SIGBUS, code, address, tsk, vma, fault);
+ force_sig_info_fault(SIGBUS, code, address, tsk, pkey, fault);
}
static noinline void
mm_fault_error(struct pt_regs *regs, unsigned long error_code,
- unsigned long address, struct vm_area_struct *vma,
- unsigned int fault)
+ unsigned long address, u32 *pkey, unsigned int fault)
{
if (fatal_signal_pending(current) && !(error_code & PF_USER)) {
no_context(regs, error_code, address, 0, 0);
@@ -1032,9 +1031,9 @@ mm_fault_error(struct pt_regs *regs, unsigned long error_code,
} else {
if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON|
VM_FAULT_HWPOISON_LARGE))
- do_sigbus(regs, error_code, address, vma, fault);
+ do_sigbus(regs, error_code, address, pkey, fault);
else if (fault & VM_FAULT_SIGSEGV)
- bad_area_nosemaphore(regs, error_code, address, vma);
+ bad_area_nosemaphore(regs, error_code, address, pkey);
else
BUG();
}
@@ -1220,6 +1219,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code,
struct mm_struct *mm;
int fault, major = 0;
unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
+ u32 pkey;
tsk = current;
mm = tsk->mm;
@@ -1420,9 +1420,10 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code,
return;
}
+ pkey = vma_pkey(vma);
up_read(&mm->mmap_sem);
if (unlikely(fault & VM_FAULT_ERROR)) {
- mm_fault_error(regs, error_code, address, vma, fault);
+ mm_fault_error(regs, error_code, address, &pkey, fault);
return;
}
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 9a324fc..3e27ded 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -689,7 +689,7 @@ static void __meminit free_pagetable(struct page *page, int order)
if (PageReserved(page)) {
__ClearPageReserved(page);
- magic = (unsigned long)page->lru.next;
+ magic = (unsigned long)page->freelist;
if (magic == SECTION_INFO || magic == MIX_SECTION_INFO) {
while (nr_pages--)
put_page_bootmem(page++);
diff --git a/arch/x86/oprofile/op_model_ppro.c b/arch/x86/oprofile/op_model_ppro.c
index 350f709..7913b69 100644
--- a/arch/x86/oprofile/op_model_ppro.c
+++ b/arch/x86/oprofile/op_model_ppro.c
@@ -212,8 +212,8 @@ static void arch_perfmon_setup_counters(void)
eax.full = cpuid_eax(0xa);
/* Workaround for BIOS bugs in 6/15. Taken from perfmon2 */
- if (eax.split.version_id == 0 && __this_cpu_read(cpu_info.x86) == 6 &&
- __this_cpu_read(cpu_info.x86_model) == 15) {
+ if (eax.split.version_id == 0 && boot_cpu_data.x86 == 6 &&
+ boot_cpu_data.x86_model == 15) {
eax.split.version_id = 2;
eax.split.num_counters = 2;
eax.split.bit_width = 40;
diff --git a/arch/x86/purgatory/Makefile b/arch/x86/purgatory/Makefile
index 555b9fa..7dbdb78 100644
--- a/arch/x86/purgatory/Makefile
+++ b/arch/x86/purgatory/Makefile
@@ -8,6 +8,7 @@
LDFLAGS_purgatory.ro := -e purgatory_start -r --no-undefined -nostdlib -z nodefaultlib
targets += purgatory.ro
+KASAN_SANITIZE := n
KCOV_INSTRUMENT := n
# Default KBUILD_CFLAGS can have -pg option set when FTRACE is enabled. That
diff --git a/block/bio.c b/block/bio.c
index 655c901..e14a897 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -589,7 +589,7 @@ void __bio_clone_fast(struct bio *bio, struct bio *bio_src)
bio->bi_opf = bio_src->bi_opf;
bio->bi_iter = bio_src->bi_iter;
bio->bi_io_vec = bio_src->bi_io_vec;
-
+ bio->bi_dio_inode = bio_src->bi_dio_inode;
bio_clone_blkcg_association(bio, bio_src);
}
EXPORT_SYMBOL(__bio_clone_fast);
@@ -1171,8 +1171,8 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
*/
bmd->is_our_pages = map_data ? 0 : 1;
memcpy(bmd->iov, iter->iov, sizeof(struct iovec) * iter->nr_segs);
- iov_iter_init(&bmd->iter, iter->type, bmd->iov,
- iter->nr_segs, iter->count);
+ bmd->iter = *iter;
+ bmd->iter.iov = bmd->iov;
ret = -ENOMEM;
bio = bio_kmalloc(gfp_mask, nr_pages);
@@ -1266,6 +1266,7 @@ struct bio *bio_map_user_iov(struct request_queue *q,
int ret, offset;
struct iov_iter i;
struct iovec iov;
+ struct bio_vec *bvec;
iov_for_each(iov, i, *iter) {
unsigned long uaddr = (unsigned long) iov.iov_base;
@@ -1310,7 +1311,12 @@ struct bio *bio_map_user_iov(struct request_queue *q,
ret = get_user_pages_fast(uaddr, local_nr_pages,
(iter->type & WRITE) != WRITE,
&pages[cur_page]);
- if (ret < local_nr_pages) {
+ if (unlikely(ret < local_nr_pages)) {
+ for (j = cur_page; j < page_limit; j++) {
+ if (!pages[j])
+ break;
+ put_page(pages[j]);
+ }
ret = -EFAULT;
goto out_unmap;
}
@@ -1318,6 +1324,7 @@ struct bio *bio_map_user_iov(struct request_queue *q,
offset = offset_in_page(uaddr);
for (j = cur_page; j < page_limit; j++) {
unsigned int bytes = PAGE_SIZE - offset;
+ unsigned short prev_bi_vcnt = bio->bi_vcnt;
if (len <= 0)
break;
@@ -1332,6 +1339,13 @@ struct bio *bio_map_user_iov(struct request_queue *q,
bytes)
break;
+ /*
+ * check if vector was merged with previous
+ * drop page reference if needed
+ */
+ if (bio->bi_vcnt == prev_bi_vcnt)
+ put_page(pages[j]);
+
len -= bytes;
offset = 0;
}
@@ -1364,10 +1378,8 @@ struct bio *bio_map_user_iov(struct request_queue *q,
return bio;
out_unmap:
- for (j = 0; j < nr_pages; j++) {
- if (!pages[j])
- break;
- put_page(pages[j]);
+ bio_for_each_segment_all(bvec, bio, j) {
+ put_page(bvec->bv_page);
}
out:
kfree(pages);
diff --git a/block/blk-core.c b/block/blk-core.c
index d8fba67..9fc567c 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -235,7 +235,7 @@ EXPORT_SYMBOL(blk_start_queue_async);
**/
void blk_start_queue(struct request_queue *q)
{
- WARN_ON(!irqs_disabled());
+ WARN_ON(!in_interrupt() && !irqs_disabled());
queue_flag_clear(QUEUE_FLAG_STOPPED, q);
__blk_run_queue(q);
diff --git a/block/blk-merge.c b/block/blk-merge.c
index abde370..0272fac 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -6,7 +6,7 @@
#include <linux/bio.h>
#include <linux/blkdev.h>
#include <linux/scatterlist.h>
-
+#include <linux/pfk.h>
#include <trace/events/block.h>
#include "blk.h"
@@ -725,6 +725,11 @@ static void blk_account_io_merge(struct request *req)
}
}
+static bool crypto_not_mergeable(const struct bio *bio, const struct bio *nxt)
+{
+ return (!pfk_allow_merge_bio(bio, nxt));
+}
+
/*
* Has to be called with the request spinlock acquired
*/
@@ -752,6 +757,8 @@ static int attempt_merge(struct request_queue *q, struct request *req,
!blk_write_same_mergeable(req->bio, next->bio))
return 0;
+ if (crypto_not_mergeable(req->bio, next->bio))
+ return 0;
/*
* If we are allowed to merge, then append bio list
* from next to rq and release next. merge_requests_fn
@@ -862,6 +869,8 @@ bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
!blk_write_same_mergeable(rq->bio, bio))
return false;
+ if (crypto_not_mergeable(rq->bio, bio))
+ return false;
return true;
}
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index c7c3d4e..4ac4910 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -2948,10 +2948,11 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd)
/*
* SSD device without seek penalty, disable idling. But only do so
- * for devices that support queuing, otherwise we still have a problem
- * with sync vs async workloads.
+ * for devices that support queuing (and when group idle is 0),
+ * otherwise we still have a problem with sync vs async workloads.
*/
- if (blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag)
+ if (blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag &&
+ !cfqd->cfq_group_idle)
return;
WARN_ON(!RB_EMPTY_ROOT(&cfqq->sort_list));
diff --git a/block/partitions/efi.c b/block/partitions/efi.c
index bcd86e5..39f70d9 100644
--- a/block/partitions/efi.c
+++ b/block/partitions/efi.c
@@ -293,7 +293,7 @@ static gpt_entry *alloc_read_gpt_entries(struct parsed_partitions *state,
if (!gpt)
return NULL;
- count = le32_to_cpu(gpt->num_partition_entries) *
+ count = (size_t)le32_to_cpu(gpt->num_partition_entries) *
le32_to_cpu(gpt->sizeof_partition_entry);
if (!count)
return NULL;
@@ -352,7 +352,7 @@ static int is_gpt_valid(struct parsed_partitions *state, u64 lba,
gpt_header **gpt, gpt_entry **ptes)
{
u32 crc, origcrc;
- u64 lastlba;
+ u64 lastlba, pt_size;
if (!ptes)
return 0;
@@ -434,13 +434,20 @@ static int is_gpt_valid(struct parsed_partitions *state, u64 lba,
goto fail;
}
+ /* Sanity check partition table size */
+ pt_size = (u64)le32_to_cpu((*gpt)->num_partition_entries) *
+ le32_to_cpu((*gpt)->sizeof_partition_entry);
+ if (pt_size > KMALLOC_MAX_SIZE) {
+ pr_debug("GUID Partition Table is too large: %llu > %lu bytes\n",
+ (unsigned long long)pt_size, KMALLOC_MAX_SIZE);
+ goto fail;
+ }
+
if (!(*ptes = alloc_read_gpt_entries(state, *gpt)))
goto fail;
/* Check the GUID Partition Entry Array CRC */
- crc = efi_crc32((const unsigned char *) (*ptes),
- le32_to_cpu((*gpt)->num_partition_entries) *
- le32_to_cpu((*gpt)->sizeof_partition_entry));
+ crc = efi_crc32((const unsigned char *) (*ptes), pt_size);
if (crc != le32_to_cpu((*gpt)->partition_entry_array_crc32)) {
pr_debug("GUID Partition Entry Array CRC check failed.\n");
diff --git a/crypto/Kconfig b/crypto/Kconfig
index 84d7148..fa98ad7 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -360,6 +360,7 @@
select CRYPTO_BLKCIPHER
select CRYPTO_MANAGER
select CRYPTO_GF128MUL
+ select CRYPTO_ECB
help
XTS: IEEE1619/D16 narrow block cipher use with aes-xts-plain,
key size 256, 384 or 512 bits. This implementation currently
diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c
index 45af0fe..aaf2f81 100644
--- a/crypto/algif_skcipher.c
+++ b/crypto/algif_skcipher.c
@@ -143,8 +143,10 @@ static int skcipher_alloc_sgl(struct sock *sk)
sg_init_table(sgl->sg, MAX_SGL_ENTS + 1);
sgl->cur = 0;
- if (sg)
+ if (sg) {
sg_chain(sg, MAX_SGL_ENTS + 1, sgl->sg);
+ sg_unmark_end(sg + (MAX_SGL_ENTS - 1));
+ }
list_add_tail(&sgl->list, &ctx->tsgl);
}
diff --git a/crypto/asymmetric_keys/pkcs7_parser.c b/crypto/asymmetric_keys/pkcs7_parser.c
index af4cd86..d140d8bb 100644
--- a/crypto/asymmetric_keys/pkcs7_parser.c
+++ b/crypto/asymmetric_keys/pkcs7_parser.c
@@ -88,6 +88,9 @@ static int pkcs7_check_authattrs(struct pkcs7_message *msg)
bool want = false;
sinfo = msg->signed_infos;
+ if (!sinfo)
+ goto inconsistent;
+
if (sinfo->authattrs) {
want = true;
msg->have_authattrs = true;
diff --git a/crypto/ccm.c b/crypto/ccm.c
index 006d857..b3ace63 100644
--- a/crypto/ccm.c
+++ b/crypto/ccm.c
@@ -413,7 +413,7 @@ static int crypto_ccm_decrypt(struct aead_request *req)
unsigned int cryptlen = req->cryptlen;
u8 *authtag = pctx->auth_tag;
u8 *odata = pctx->odata;
- u8 *iv = req->iv;
+ u8 *iv = pctx->idata;
int err;
cryptlen -= authsize;
@@ -429,6 +429,8 @@ static int crypto_ccm_decrypt(struct aead_request *req)
if (req->src != req->dst)
dst = pctx->dst;
+ memcpy(iv, req->iv, 16);
+
skcipher_request_set_tfm(skreq, ctx->ctr);
skcipher_request_set_callback(skreq, pctx->flags,
crypto_ccm_decrypt_done, req);
diff --git a/crypto/drbg.c b/crypto/drbg.c
index 8cac3d3..942ddff 100644
--- a/crypto/drbg.c
+++ b/crypto/drbg.c
@@ -1133,10 +1133,10 @@ static inline void drbg_dealloc_state(struct drbg_state *drbg)
{
if (!drbg)
return;
- kzfree(drbg->V);
- drbg->Vbuf = NULL;
- kzfree(drbg->C);
- drbg->Cbuf = NULL;
+ kzfree(drbg->Vbuf);
+ drbg->V = NULL;
+ kzfree(drbg->Cbuf);
+ drbg->C = NULL;
kzfree(drbg->scratchpadbuf);
drbg->scratchpadbuf = NULL;
drbg->reseed_ctr = 0;
diff --git a/crypto/shash.c b/crypto/shash.c
index a051541..4d8a671 100644
--- a/crypto/shash.c
+++ b/crypto/shash.c
@@ -274,12 +274,14 @@ static int shash_async_finup(struct ahash_request *req)
int shash_ahash_digest(struct ahash_request *req, struct shash_desc *desc)
{
- struct scatterlist *sg = req->src;
- unsigned int offset = sg->offset;
unsigned int nbytes = req->nbytes;
+ struct scatterlist *sg;
+ unsigned int offset;
int err;
- if (nbytes < min(sg->length, ((unsigned int)(PAGE_SIZE)) - offset)) {
+ if (nbytes &&
+ (sg = req->src, offset = sg->offset,
+ nbytes < min(sg->length, ((unsigned int)(PAGE_SIZE)) - offset))) {
void *data;
data = kmap_atomic(sg_page(sg));
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index fcf85be..4e7e9a7 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -465,9 +465,8 @@ struct binder_ref {
};
enum binder_deferred_state {
- BINDER_DEFERRED_PUT_FILES = 0x01,
- BINDER_DEFERRED_FLUSH = 0x02,
- BINDER_DEFERRED_RELEASE = 0x04,
+ BINDER_DEFERRED_FLUSH = 0x01,
+ BINDER_DEFERRED_RELEASE = 0x02,
};
/**
@@ -504,8 +503,6 @@ struct binder_priority {
* (invariant after initialized)
* @tsk task_struct for group_leader of process
* (invariant after initialized)
- * @files files_struct for process
- * (invariant after initialized)
* @deferred_work_node: element for binder_deferred_list
* (protected by binder_deferred_lock)
* @deferred_work: bitmap of deferred work to perform
@@ -552,7 +549,6 @@ struct binder_proc {
struct list_head waiting_threads;
int pid;
struct task_struct *tsk;
- struct files_struct *files;
struct hlist_node deferred_work_node;
int deferred_work;
bool is_dead;
@@ -600,6 +596,8 @@ enum {
* (protected by @proc->inner_lock)
* @todo: list of work to do for this thread
* (protected by @proc->inner_lock)
+ * @process_todo: whether work in @todo should be processed
+ * (protected by @proc->inner_lock)
* @return_error: transaction errors reported by this thread
* (only accessed by this thread)
* @reply_error: transaction errors reported by target thread
@@ -626,6 +624,7 @@ struct binder_thread {
bool looper_need_return; /* can be written by other thread */
struct binder_transaction *transaction_stack;
struct list_head todo;
+ bool process_todo;
struct binder_error return_error;
struct binder_error reply_error;
wait_queue_head_t wait;
@@ -813,6 +812,16 @@ static bool binder_worklist_empty(struct binder_proc *proc,
return ret;
}
+/**
+ * binder_enqueue_work_ilocked() - Add an item to the work list
+ * @work: struct binder_work to add to list
+ * @target_list: list to add work to
+ *
+ * Adds the work to the specified list. Asserts that work
+ * is not already on a list.
+ *
+ * Requires the proc->inner_lock to be held.
+ */
static void
binder_enqueue_work_ilocked(struct binder_work *work,
struct list_head *target_list)
@@ -823,22 +832,56 @@ binder_enqueue_work_ilocked(struct binder_work *work,
}
/**
- * binder_enqueue_work() - Add an item to the work list
- * @proc: binder_proc associated with list
+ * binder_enqueue_deferred_thread_work_ilocked() - Add deferred thread work
+ * @thread: thread to queue work to
* @work: struct binder_work to add to list
- * @target_list: list to add work to
*
- * Adds the work to the specified list. Asserts that work
- * is not already on a list.
+ * Adds the work to the todo list of the thread. Doesn't set the process_todo
+ * flag, which means that (if it wasn't already set) the thread will go to
+ * sleep without handling this work when it calls read.
+ *
+ * Requires the proc->inner_lock to be held.
*/
static void
-binder_enqueue_work(struct binder_proc *proc,
- struct binder_work *work,
- struct list_head *target_list)
+binder_enqueue_deferred_thread_work_ilocked(struct binder_thread *thread,
+ struct binder_work *work)
{
- binder_inner_proc_lock(proc);
- binder_enqueue_work_ilocked(work, target_list);
- binder_inner_proc_unlock(proc);
+ binder_enqueue_work_ilocked(work, &thread->todo);
+}
+
+/**
+ * binder_enqueue_thread_work_ilocked() - Add an item to the thread work list
+ * @thread: thread to queue work to
+ * @work: struct binder_work to add to list
+ *
+ * Adds the work to the todo list of the thread, and enables processing
+ * of the todo queue.
+ *
+ * Requires the proc->inner_lock to be held.
+ */
+static void
+binder_enqueue_thread_work_ilocked(struct binder_thread *thread,
+ struct binder_work *work)
+{
+ binder_enqueue_work_ilocked(work, &thread->todo);
+ thread->process_todo = true;
+}
+
+/**
+ * binder_enqueue_thread_work() - Add an item to the thread work list
+ * @thread: thread to queue work to
+ * @work: struct binder_work to add to list
+ *
+ * Adds the work to the todo list of the thread, and enables processing
+ * of the todo queue.
+ */
+static void
+binder_enqueue_thread_work(struct binder_thread *thread,
+ struct binder_work *work)
+{
+ binder_inner_proc_lock(thread->proc);
+ binder_enqueue_thread_work_ilocked(thread, work);
+ binder_inner_proc_unlock(thread->proc);
}
static void
@@ -901,22 +944,34 @@ static void binder_free_thread(struct binder_thread *thread);
static void binder_free_proc(struct binder_proc *proc);
static void binder_inc_node_tmpref_ilocked(struct binder_node *node);
+struct files_struct *binder_get_files_struct(struct binder_proc *proc)
+{
+ return get_files_struct(proc->tsk);
+}
+
static int task_get_unused_fd_flags(struct binder_proc *proc, int flags)
{
- struct files_struct *files = proc->files;
+ struct files_struct *files;
unsigned long rlim_cur;
unsigned long irqs;
+ int ret;
+ files = binder_get_files_struct(proc);
if (files == NULL)
return -ESRCH;
- if (!lock_task_sighand(proc->tsk, &irqs))
- return -EMFILE;
+ if (!lock_task_sighand(proc->tsk, &irqs)) {
+ ret = -EMFILE;
+ goto err;
+ }
rlim_cur = task_rlimit(proc->tsk, RLIMIT_NOFILE);
unlock_task_sighand(proc->tsk, &irqs);
- return __alloc_fd(files, 0, rlim_cur, flags);
+ ret = __alloc_fd(files, 0, rlim_cur, flags);
+err:
+ put_files_struct(files);
+ return ret;
}
/*
@@ -925,8 +980,12 @@ static int task_get_unused_fd_flags(struct binder_proc *proc, int flags)
static void task_fd_install(
struct binder_proc *proc, unsigned int fd, struct file *file)
{
- if (proc->files)
- __fd_install(proc->files, fd, file);
+ struct files_struct *files = binder_get_files_struct(proc);
+
+ if (files) {
+ __fd_install(files, fd, file);
+ put_files_struct(files);
+ }
}
/*
@@ -934,18 +993,20 @@ static void task_fd_install(
*/
static long task_close_fd(struct binder_proc *proc, unsigned int fd)
{
+ struct files_struct *files = binder_get_files_struct(proc);
int retval;
- if (proc->files == NULL)
+ if (files == NULL)
return -ESRCH;
- retval = __close_fd(proc->files, fd);
+ retval = __close_fd(files, fd);
/* can't restart close syscall because file table entry was cleared */
if (unlikely(retval == -ERESTARTSYS ||
retval == -ERESTARTNOINTR ||
retval == -ERESTARTNOHAND ||
retval == -ERESTART_RESTARTBLOCK))
retval = -EINTR;
+ put_files_struct(files);
return retval;
}
@@ -953,7 +1014,7 @@ static long task_close_fd(struct binder_proc *proc, unsigned int fd)
static bool binder_has_work_ilocked(struct binder_thread *thread,
bool do_proc_work)
{
- return !binder_worklist_empty_ilocked(&thread->todo) ||
+ return thread->process_todo ||
thread->looper_need_return ||
(do_proc_work &&
!binder_worklist_empty_ilocked(&thread->proc->todo));
@@ -1153,6 +1214,10 @@ static void binder_do_set_priority(struct task_struct *task,
task->pid, desired.prio,
to_kernel_prio(policy, priority));
+ trace_binder_set_priority(task->tgid, task->pid, task->normal_prio,
+ to_kernel_prio(policy, priority),
+ desired.prio);
+
/* Set the actual priority */
if (task->policy != policy || is_rt_policy(policy)) {
struct sched_param params;
@@ -1184,7 +1249,7 @@ static void binder_transaction_priority(struct task_struct *task,
struct binder_priority node_prio,
bool inherit_rt)
{
- struct binder_priority desired_prio;
+ struct binder_priority desired_prio = t->priority;
if (t->set_priority_called)
return;
@@ -1196,9 +1261,6 @@ static void binder_transaction_priority(struct task_struct *task,
if (!inherit_rt && is_rt_policy(desired_prio.sched_policy)) {
desired_prio.prio = NICE_TO_PRIO(0);
desired_prio.sched_policy = SCHED_NORMAL;
- } else {
- desired_prio.prio = t->priority.prio;
- desired_prio.sched_policy = t->priority.sched_policy;
}
if (node_prio.prio < t->priority.prio ||
@@ -1301,7 +1363,7 @@ static struct binder_node *binder_init_node_ilocked(
node->cookie = cookie;
node->work.type = BINDER_WORK_NODE;
priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
- node->sched_policy = (flags & FLAT_BINDER_FLAG_PRIORITY_MASK) >>
+ node->sched_policy = (flags & FLAT_BINDER_FLAG_SCHED_POLICY_MASK) >>
FLAT_BINDER_FLAG_SCHED_POLICY_SHIFT;
node->min_priority = to_kernel_prio(node->sched_policy, priority);
node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
@@ -1369,6 +1431,17 @@ static int binder_inc_node_nilocked(struct binder_node *node, int strong,
node->local_strong_refs++;
if (!node->has_strong_ref && target_list) {
binder_dequeue_work_ilocked(&node->work);
+ /*
+ * Note: this function is the only place where we queue
+ * directly to a thread->todo without using the
+ * corresponding binder_enqueue_thread_work() helper
+ * functions; in this case it's ok to not set the
+ * process_todo flag, since we know this node work will
+ * always be followed by other work that starts queue
+ * processing: in case of synchronous transactions, a
+ * BR_REPLY or BR_ERROR; in case of oneway
+ * transactions, a BR_TRANSACTION_COMPLETE.
+ */
binder_enqueue_work_ilocked(&node->work, target_list);
}
} else {
@@ -1380,6 +1453,9 @@ static int binder_inc_node_nilocked(struct binder_node *node, int strong,
node->debug_id);
return -EINVAL;
}
+ /*
+ * See comment above
+ */
binder_enqueue_work_ilocked(&node->work, target_list);
}
}
@@ -2069,9 +2145,9 @@ static void binder_send_failed_reply(struct binder_transaction *t,
binder_pop_transaction_ilocked(target_thread, t);
if (target_thread->reply_error.cmd == BR_OK) {
target_thread->reply_error.cmd = error_code;
- binder_enqueue_work_ilocked(
- &target_thread->reply_error.work,
- &target_thread->todo);
+ binder_enqueue_thread_work_ilocked(
+ target_thread,
+ &target_thread->reply_error.work);
wake_up_interruptible(&target_thread->wait);
} else {
WARN(1, "Unexpected reply error: %u\n",
@@ -2102,6 +2178,26 @@ static void binder_send_failed_reply(struct binder_transaction *t,
}
/**
+ * binder_cleanup_transaction() - cleans up undelivered transaction
+ * @t: transaction that needs to be cleaned up
+ * @reason: reason the transaction wasn't delivered
+ * @error_code: error to return to caller (if synchronous call)
+ */
+static void binder_cleanup_transaction(struct binder_transaction *t,
+ const char *reason,
+ uint32_t error_code)
+{
+ if (t->buffer->target_node && !(t->flags & TF_ONE_WAY)) {
+ binder_send_failed_reply(t, error_code);
+ } else {
+ binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
+ "undelivered transaction %d, %s\n",
+ t->debug_id, reason);
+ binder_free_transaction(t);
+ }
+}
+
+/**
* binder_validate_object() - checks for a valid metadata object in a buffer.
* @buffer: binder_buffer that we're parsing.
* @offset: offset in the buffer at which to validate an object.
@@ -2690,11 +2786,10 @@ static bool binder_proc_transaction(struct binder_transaction *t,
struct binder_proc *proc,
struct binder_thread *thread)
{
- struct list_head *target_list = NULL;
struct binder_node *node = t->buffer->target_node;
struct binder_priority node_prio;
bool oneway = !!(t->flags & TF_ONE_WAY);
- bool wakeup = true;
+ bool pending_async = false;
BUG_ON(!node);
binder_node_lock(node);
@@ -2704,8 +2799,7 @@ static bool binder_proc_transaction(struct binder_transaction *t,
if (oneway) {
BUG_ON(thread);
if (node->has_async_transaction) {
- target_list = &node->async_todo;
- wakeup = false;
+ pending_async = true;
} else {
node->has_async_transaction = 1;
}
@@ -2719,22 +2813,20 @@ static bool binder_proc_transaction(struct binder_transaction *t,
return false;
}
- if (!thread && !target_list)
+ if (!thread && !pending_async)
thread = binder_select_thread_ilocked(proc);
if (thread) {
- target_list = &thread->todo;
binder_transaction_priority(thread->task, t, node_prio,
node->inherit_rt);
- } else if (!target_list) {
- target_list = &proc->todo;
+ binder_enqueue_thread_work_ilocked(thread, &t->work);
+ } else if (!pending_async) {
+ binder_enqueue_work_ilocked(&t->work, &proc->todo);
} else {
- BUG_ON(target_list != &node->async_todo);
+ binder_enqueue_work_ilocked(&t->work, &node->async_todo);
}
- binder_enqueue_work_ilocked(&t->work, target_list);
-
- if (wakeup)
+ if (!pending_async)
binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */);
binder_inner_proc_unlock(proc);
@@ -3236,10 +3328,10 @@ static void binder_transaction(struct binder_proc *proc,
}
}
tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
- binder_enqueue_work(proc, tcomplete, &thread->todo);
t->work.type = BINDER_WORK_TRANSACTION;
if (reply) {
+ binder_enqueue_thread_work(thread, tcomplete);
binder_inner_proc_lock(target_proc);
if (target_thread->is_dead) {
binder_inner_proc_unlock(target_proc);
@@ -3247,7 +3339,7 @@ static void binder_transaction(struct binder_proc *proc,
}
BUG_ON(t->buffer->async_transaction != 0);
binder_pop_transaction_ilocked(target_thread, in_reply_to);
- binder_enqueue_work_ilocked(&t->work, &target_thread->todo);
+ binder_enqueue_thread_work_ilocked(target_thread, &t->work);
binder_inner_proc_unlock(target_proc);
wake_up_interruptible_sync(&target_thread->wait);
binder_restore_priority(current, in_reply_to->saved_priority);
@@ -3255,6 +3347,14 @@ static void binder_transaction(struct binder_proc *proc,
} else if (!(t->flags & TF_ONE_WAY)) {
BUG_ON(t->buffer->async_transaction != 0);
binder_inner_proc_lock(proc);
+ /*
+ * Defer the TRANSACTION_COMPLETE, so we don't return to
+ * userspace immediately; this allows the target process to
+ * immediately start processing this transaction, reducing
+ * latency. We will then return the TRANSACTION_COMPLETE when
+ * the target replies (or there is an error).
+ */
+ binder_enqueue_deferred_thread_work_ilocked(thread, tcomplete);
t->need_reply = 1;
t->from_parent = thread->transaction_stack;
thread->transaction_stack = t;
@@ -3268,6 +3368,7 @@ static void binder_transaction(struct binder_proc *proc,
} else {
BUG_ON(target_node == NULL);
BUG_ON(t->buffer->async_transaction != 1);
+ binder_enqueue_thread_work(thread, tcomplete);
if (!binder_proc_transaction(t, target_proc, NULL))
goto err_dead_proc_or_thread;
}
@@ -3347,15 +3448,11 @@ static void binder_transaction(struct binder_proc *proc,
if (in_reply_to) {
binder_restore_priority(current, in_reply_to->saved_priority);
thread->return_error.cmd = BR_TRANSACTION_COMPLETE;
- binder_enqueue_work(thread->proc,
- &thread->return_error.work,
- &thread->todo);
+ binder_enqueue_thread_work(thread, &thread->return_error.work);
binder_send_failed_reply(in_reply_to, return_error);
} else {
thread->return_error.cmd = return_error;
- binder_enqueue_work(thread->proc,
- &thread->return_error.work,
- &thread->todo);
+ binder_enqueue_thread_work(thread, &thread->return_error.work);
}
}
@@ -3659,10 +3756,9 @@ static int binder_thread_write(struct binder_proc *proc,
WARN_ON(thread->return_error.cmd !=
BR_OK);
thread->return_error.cmd = BR_ERROR;
- binder_enqueue_work(
- thread->proc,
- &thread->return_error.work,
- &thread->todo);
+ binder_enqueue_thread_work(
+ thread,
+ &thread->return_error.work);
binder_debug(
BINDER_DEBUG_FAILED_TRANSACTION,
"%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
@@ -3742,9 +3838,9 @@ static int binder_thread_write(struct binder_proc *proc,
if (thread->looper &
(BINDER_LOOPER_STATE_REGISTERED |
BINDER_LOOPER_STATE_ENTERED))
- binder_enqueue_work_ilocked(
- &death->work,
- &thread->todo);
+ binder_enqueue_thread_work_ilocked(
+ thread,
+ &death->work);
else {
binder_enqueue_work_ilocked(
&death->work,
@@ -3799,8 +3895,8 @@ static int binder_thread_write(struct binder_proc *proc,
if (thread->looper &
(BINDER_LOOPER_STATE_REGISTERED |
BINDER_LOOPER_STATE_ENTERED))
- binder_enqueue_work_ilocked(
- &death->work, &thread->todo);
+ binder_enqueue_thread_work_ilocked(
+ thread, &death->work);
else {
binder_enqueue_work_ilocked(
&death->work,
@@ -3974,6 +4070,8 @@ static int binder_thread_read(struct binder_proc *proc,
break;
}
w = binder_dequeue_work_head_ilocked(list);
+ if (binder_worklist_empty_ilocked(&thread->todo))
+ thread->process_todo = false;
switch (w->type) {
case BINDER_WORK_TRANSACTION: {
@@ -4183,12 +4281,20 @@ static int binder_thread_read(struct binder_proc *proc,
if (put_user(cmd, (uint32_t __user *)ptr)) {
if (t_from)
binder_thread_dec_tmpref(t_from);
+
+ binder_cleanup_transaction(t, "put_user failed",
+ BR_FAILED_REPLY);
+
return -EFAULT;
}
ptr += sizeof(uint32_t);
if (copy_to_user(ptr, &tr, sizeof(tr))) {
if (t_from)
binder_thread_dec_tmpref(t_from);
+
+ binder_cleanup_transaction(t, "copy_to_user failed",
+ BR_FAILED_REPLY);
+
return -EFAULT;
}
ptr += sizeof(tr);
@@ -4258,15 +4364,9 @@ static void binder_release_work(struct binder_proc *proc,
struct binder_transaction *t;
t = container_of(w, struct binder_transaction, work);
- if (t->buffer->target_node &&
- !(t->flags & TF_ONE_WAY)) {
- binder_send_failed_reply(t, BR_DEAD_REPLY);
- } else {
- binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
- "undelivered transaction %d\n",
- t->debug_id);
- binder_free_transaction(t);
- }
+
+ binder_cleanup_transaction(t, "process died.",
+ BR_DEAD_REPLY);
} break;
case BINDER_WORK_RETURN_ERROR: {
struct binder_error *e = container_of(
@@ -4731,7 +4831,6 @@ static void binder_vma_close(struct vm_area_struct *vma)
(vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
(unsigned long)pgprot_val(vma->vm_page_prot));
binder_alloc_vma_close(&proc->alloc);
- binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES);
}
static int binder_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
@@ -4773,10 +4872,8 @@ static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
vma->vm_private_data = proc;
ret = binder_alloc_mmap_handler(&proc->alloc, vma);
- if (ret)
- return ret;
- proc->files = get_files_struct(current);
- return 0;
+
+ return ret;
err_bad_arg:
pr_err("binder_mmap: %d %lx-%lx %s failed %d\n",
@@ -4955,8 +5052,6 @@ static void binder_deferred_release(struct binder_proc *proc)
struct rb_node *n;
int threads, nodes, incoming_refs, outgoing_refs, active_transactions;
- BUG_ON(proc->files);
-
mutex_lock(&binder_procs_lock);
hlist_del(&proc->proc_node);
mutex_unlock(&binder_procs_lock);
@@ -5038,8 +5133,6 @@ static void binder_deferred_release(struct binder_proc *proc)
static void binder_deferred_func(struct work_struct *work)
{
struct binder_proc *proc;
- struct files_struct *files;
-
int defer;
do {
@@ -5056,21 +5149,11 @@ static void binder_deferred_func(struct work_struct *work)
}
mutex_unlock(&binder_deferred_lock);
- files = NULL;
- if (defer & BINDER_DEFERRED_PUT_FILES) {
- files = proc->files;
- if (files)
- proc->files = NULL;
- }
-
if (defer & BINDER_DEFERRED_FLUSH)
binder_deferred_flush(proc);
if (defer & BINDER_DEFERRED_RELEASE)
binder_deferred_release(proc); /* frees proc */
-
- if (files)
- put_files_struct(files);
} while (proc);
}
static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
index e026894..3ad1bcf 100644
--- a/drivers/android/binder_alloc.c
+++ b/drivers/android/binder_alloc.c
@@ -186,12 +186,12 @@ struct binder_buffer *binder_alloc_prepare_to_free(struct binder_alloc *alloc,
}
static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
- void *start, void *end,
- struct vm_area_struct *vma)
+ void *start, void *end)
{
void *page_addr;
unsigned long user_page_addr;
struct binder_lru_page *page;
+ struct vm_area_struct *vma = NULL;
struct mm_struct *mm = NULL;
bool need_mm = false;
@@ -215,17 +215,12 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
}
}
- if (!vma && need_mm)
- mm = get_task_mm(alloc->tsk);
+ if (need_mm && mmget_not_zero(alloc->vma_vm_mm))
+ mm = alloc->vma_vm_mm;
if (mm) {
down_write(&mm->mmap_sem);
vma = alloc->vma;
- if (vma && mm != alloc->vma_vm_mm) {
- pr_err("%d: vma mm and task mm mismatch\n",
- alloc->pid);
- vma = NULL;
- }
}
if (!vma && need_mm) {
@@ -442,7 +437,7 @@ struct binder_buffer *binder_alloc_new_buf_locked(struct binder_alloc *alloc,
if (end_page_addr > has_page_addr)
end_page_addr = has_page_addr;
ret = binder_update_page_range(alloc, 1,
- (void *)PAGE_ALIGN((uintptr_t)buffer->data), end_page_addr, NULL);
+ (void *)PAGE_ALIGN((uintptr_t)buffer->data), end_page_addr);
if (ret)
return ERR_PTR(ret);
@@ -483,7 +478,7 @@ struct binder_buffer *binder_alloc_new_buf_locked(struct binder_alloc *alloc,
err_alloc_buf_struct_failed:
binder_update_page_range(alloc, 0,
(void *)PAGE_ALIGN((uintptr_t)buffer->data),
- end_page_addr, NULL);
+ end_page_addr);
return ERR_PTR(-ENOMEM);
}
@@ -565,10 +560,9 @@ static void binder_delete_free_buffer(struct binder_alloc *alloc,
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
"%d: merge free, buffer %pK do not share page with %pK or %pK\n",
alloc->pid, buffer->data,
- prev->data, next->data);
+ prev->data, next ? next->data : NULL);
binder_update_page_range(alloc, 0, buffer_start_page(buffer),
- buffer_start_page(buffer) + PAGE_SIZE,
- NULL);
+ buffer_start_page(buffer) + PAGE_SIZE);
}
list_del(&buffer->entry);
kfree(buffer);
@@ -605,8 +599,7 @@ static void binder_free_buf_locked(struct binder_alloc *alloc,
binder_update_page_range(alloc, 0,
(void *)PAGE_ALIGN((uintptr_t)buffer->data),
- (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK),
- NULL);
+ (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK));
rb_erase(&buffer->rb_node, &alloc->allocated_buffers);
buffer->free = 1;
@@ -720,6 +713,8 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
barrier();
alloc->vma = vma;
alloc->vma_vm_mm = vma->vm_mm;
+ /* Same as mmgrab() in later kernel versions */
+ atomic_inc(&alloc->vma_vm_mm->mm_count);
return 0;
@@ -795,6 +790,8 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc)
vfree(alloc->buffer);
}
mutex_unlock(&alloc->mutex);
+ if (alloc->vma_vm_mm)
+ mmdrop(alloc->vma_vm_mm);
binder_alloc_debug(BINDER_DEBUG_OPEN_CLOSE,
"%s: %d buffers %d, pages %d\n",
@@ -889,7 +886,6 @@ int binder_alloc_get_allocated_count(struct binder_alloc *alloc)
void binder_alloc_vma_close(struct binder_alloc *alloc)
{
WRITE_ONCE(alloc->vma, NULL);
- WRITE_ONCE(alloc->vma_vm_mm, NULL);
}
/**
@@ -926,9 +922,9 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE;
vma = alloc->vma;
if (vma) {
- mm = get_task_mm(alloc->tsk);
- if (!mm)
- goto err_get_task_mm_failed;
+ if (!mmget_not_zero(alloc->vma_vm_mm))
+ goto err_mmget;
+ mm = alloc->vma_vm_mm;
if (!down_write_trylock(&mm->mmap_sem))
goto err_down_write_mmap_sem_failed;
}
@@ -964,7 +960,7 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
err_down_write_mmap_sem_failed:
mmput_async(mm);
-err_get_task_mm_failed:
+err_mmget:
err_page_already_freed:
mutex_unlock(&alloc->mutex);
err_get_alloc_mutex_failed:
@@ -988,7 +984,7 @@ binder_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
return ret;
}
-struct shrinker binder_shrinker = {
+static struct shrinker binder_shrinker = {
.count_objects = binder_shrink_count,
.scan_objects = binder_shrink_scan,
.seeks = DEFAULT_SEEKS,
@@ -1003,7 +999,6 @@ struct shrinker binder_shrinker = {
*/
void binder_alloc_init(struct binder_alloc *alloc)
{
- alloc->tsk = current->group_leader;
alloc->pid = current->group_leader->pid;
mutex_init(&alloc->mutex);
INIT_LIST_HEAD(&alloc->buffers);
diff --git a/drivers/android/binder_alloc.h b/drivers/android/binder_alloc.h
index a3a3602..2dd33b6 100644
--- a/drivers/android/binder_alloc.h
+++ b/drivers/android/binder_alloc.h
@@ -100,7 +100,6 @@ struct binder_lru_page {
*/
struct binder_alloc {
struct mutex mutex;
- struct task_struct *tsk;
struct vm_area_struct *vma;
struct mm_struct *vma_vm_mm;
void *buffer;
diff --git a/drivers/android/binder_trace.h b/drivers/android/binder_trace.h
index 76e3b9c..b11dffc 100644
--- a/drivers/android/binder_trace.h
+++ b/drivers/android/binder_trace.h
@@ -85,6 +85,30 @@ DEFINE_BINDER_FUNCTION_RETURN_EVENT(binder_ioctl_done);
DEFINE_BINDER_FUNCTION_RETURN_EVENT(binder_write_done);
DEFINE_BINDER_FUNCTION_RETURN_EVENT(binder_read_done);
+TRACE_EVENT(binder_set_priority,
+ TP_PROTO(int proc, int thread, unsigned int old_prio,
+ unsigned int desired_prio, unsigned int new_prio),
+ TP_ARGS(proc, thread, old_prio, new_prio, desired_prio),
+
+ TP_STRUCT__entry(
+ __field(int, proc)
+ __field(int, thread)
+ __field(unsigned int, old_prio)
+ __field(unsigned int, new_prio)
+ __field(unsigned int, desired_prio)
+ ),
+ TP_fast_assign(
+ __entry->proc = proc;
+ __entry->thread = thread;
+ __entry->old_prio = old_prio;
+ __entry->new_prio = new_prio;
+ __entry->desired_prio = desired_prio;
+ ),
+ TP_printk("proc=%d thread=%d old=%d => new=%d desired=%d",
+ __entry->proc, __entry->thread, __entry->old_prio,
+ __entry->new_prio, __entry->desired_prio)
+);
+
TRACE_EVENT(binder_wait_for_work,
TP_PROTO(bool proc_work, bool transaction_stack, bool thread_todo),
TP_ARGS(proc_work, transaction_stack, thread_todo),
diff --git a/drivers/ata/libata-transport.c b/drivers/ata/libata-transport.c
index 7ef16c0..20e2b7a 100644
--- a/drivers/ata/libata-transport.c
+++ b/drivers/ata/libata-transport.c
@@ -224,7 +224,6 @@ static DECLARE_TRANSPORT_CLASS(ata_port_class,
static void ata_tport_release(struct device *dev)
{
- put_device(dev->parent);
}
/**
@@ -284,7 +283,7 @@ int ata_tport_add(struct device *parent,
device_initialize(dev);
dev->type = &ata_port_type;
- dev->parent = get_device(parent);
+ dev->parent = parent;
dev->release = ata_tport_release;
dev_set_name(dev, "ata%d", ap->print_id);
transport_setup_device(dev);
@@ -348,7 +347,6 @@ static DECLARE_TRANSPORT_CLASS(ata_link_class,
static void ata_tlink_release(struct device *dev)
{
- put_device(dev->parent);
}
/**
@@ -410,7 +408,7 @@ int ata_tlink_add(struct ata_link *link)
int error;
device_initialize(dev);
- dev->parent = get_device(&ap->tdev);
+ dev->parent = &ap->tdev;
dev->release = ata_tlink_release;
if (ata_is_host_link(link))
dev_set_name(dev, "link%d", ap->print_id);
@@ -589,7 +587,6 @@ static DECLARE_TRANSPORT_CLASS(ata_dev_class,
static void ata_tdev_release(struct device *dev)
{
- put_device(dev->parent);
}
/**
@@ -662,7 +659,7 @@ static int ata_tdev_add(struct ata_device *ata_dev)
int error;
device_initialize(dev);
- dev->parent = get_device(&link->tdev);
+ dev->parent = &link->tdev;
dev->release = ata_tdev_release;
if (ata_is_host_link(link))
dev_set_name(dev, "dev%d.%d", ap->print_id,ata_dev->devno);
diff --git a/drivers/ata/sata_via.c b/drivers/ata/sata_via.c
index 0636d84..f3f538e 100644
--- a/drivers/ata/sata_via.c
+++ b/drivers/ata/sata_via.c
@@ -644,14 +644,16 @@ static void svia_configure(struct pci_dev *pdev, int board_id,
pci_write_config_byte(pdev, SATA_NATIVE_MODE, tmp8);
}
- /* enable IRQ on hotplug */
- pci_read_config_byte(pdev, SVIA_MISC_3, &tmp8);
- if ((tmp8 & SATA_HOTPLUG) != SATA_HOTPLUG) {
- dev_dbg(&pdev->dev,
- "enabling SATA hotplug (0x%x)\n",
- (int) tmp8);
- tmp8 |= SATA_HOTPLUG;
- pci_write_config_byte(pdev, SVIA_MISC_3, tmp8);
+ if (board_id == vt6421) {
+ /* enable IRQ on hotplug */
+ pci_read_config_byte(pdev, SVIA_MISC_3, &tmp8);
+ if ((tmp8 & SATA_HOTPLUG) != SATA_HOTPLUG) {
+ dev_dbg(&pdev->dev,
+ "enabling SATA hotplug (0x%x)\n",
+ (int) tmp8);
+ tmp8 |= SATA_HOTPLUG;
+ pci_write_config_byte(pdev, SVIA_MISC_3, tmp8);
+ }
}
/*
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index 5eba478..14ff403 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -858,7 +858,8 @@ static ssize_t driver_override_store(struct device *dev,
struct platform_device *pdev = to_platform_device(dev);
char *driver_override, *old, *cp;
- if (count > PATH_MAX)
+ /* We need to keep extra room for a newline */
+ if (count >= (PAGE_SIZE - 1))
return -EINVAL;
driver_override = kstrndup(buf, count, GFP_KERNEL);
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 4f99101..dc259d2 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -1762,10 +1762,13 @@ void device_pm_check_callbacks(struct device *dev)
{
spin_lock_irq(&dev->power.lock);
dev->power.no_pm_callbacks =
- (!dev->bus || pm_ops_is_empty(dev->bus->pm)) &&
- (!dev->class || pm_ops_is_empty(dev->class->pm)) &&
+ (!dev->bus || (pm_ops_is_empty(dev->bus->pm) &&
+ !dev->bus->suspend && !dev->bus->resume)) &&
+ (!dev->class || (pm_ops_is_empty(dev->class->pm) &&
+ !dev->class->suspend && !dev->class->resume)) &&
(!dev->type || pm_ops_is_empty(dev->type->pm)) &&
(!dev->pm_domain || pm_ops_is_empty(&dev->pm_domain->ops)) &&
- (!dev->driver || pm_ops_is_empty(dev->driver->pm));
+ (!dev->driver || (pm_ops_is_empty(dev->driver->pm) &&
+ !dev->driver->suspend && !dev->driver->resume));
spin_unlock_irq(&dev->power.lock);
}
diff --git a/drivers/base/power/opp/of.c b/drivers/base/power/opp/of.c
index 5552211..b52c617 100644
--- a/drivers/base/power/opp/of.c
+++ b/drivers/base/power/opp/of.c
@@ -386,7 +386,7 @@ static int _of_add_opp_table_v1(struct device *dev)
{
const struct property *prop;
const __be32 *val;
- int nr;
+ int nr, ret;
prop = of_find_property(dev->of_node, "operating-points", NULL);
if (!prop)
@@ -409,9 +409,13 @@ static int _of_add_opp_table_v1(struct device *dev)
unsigned long freq = be32_to_cpup(val++) * 1000;
unsigned long volt = be32_to_cpup(val++);
- if (_opp_add_v1(dev, freq, volt, false))
- dev_warn(dev, "%s: Failed to add OPP %ld\n",
- __func__, freq);
+ ret = _opp_add_v1(dev, freq, volt, false);
+ if (ret) {
+ dev_err(dev, "%s: Failed to add OPP %ld (%d)\n",
+ __func__, freq, ret);
+ dev_pm_opp_of_remove_table(dev);
+ return ret;
+ }
nr -= 2;
}
diff --git a/drivers/base/power/power.h b/drivers/base/power/power.h
index ce68c1e..882f1c9 100644
--- a/drivers/base/power/power.h
+++ b/drivers/base/power/power.h
@@ -131,7 +131,11 @@ extern int pm_async_enabled;
/* drivers/base/power/main.c */
extern struct list_head dpm_list; /* The active device list */
+#ifdef CONFIG_QCOM_SHOW_RESUME_IRQ
extern int msm_show_resume_irq_mask;
+#else
+#define msm_show_resume_irq_mask 0
+#endif
static inline struct device *to_device(struct list_head *entry)
{
diff --git a/drivers/base/power/wakeirq.c b/drivers/base/power/wakeirq.c
index 404d94c..feba1b2 100644
--- a/drivers/base/power/wakeirq.c
+++ b/drivers/base/power/wakeirq.c
@@ -141,6 +141,13 @@ static irqreturn_t handle_threaded_wake_irq(int irq, void *_wirq)
struct wake_irq *wirq = _wirq;
int res;
+ /* Maybe abort suspend? */
+ if (irqd_is_wakeup_set(irq_get_irq_data(irq))) {
+ pm_wakeup_event(wirq->dev, 0);
+
+ return IRQ_HANDLED;
+ }
+
/* We don't want RPM_ASYNC or RPM_NOWAIT here */
res = pm_runtime_resume(wirq->dev);
if (res < 0)
diff --git a/drivers/base/property.c b/drivers/base/property.c
index 06f6668..7b313b5 100644
--- a/drivers/base/property.c
+++ b/drivers/base/property.c
@@ -20,6 +20,7 @@
#include <linux/phy.h>
struct property_set {
+ struct device *dev;
struct fwnode_handle fwnode;
struct property_entry *properties;
};
@@ -817,6 +818,7 @@ static struct property_set *pset_copy_set(const struct property_set *pset)
void device_remove_properties(struct device *dev)
{
struct fwnode_handle *fwnode;
+ struct property_set *pset;
fwnode = dev_fwnode(dev);
if (!fwnode)
@@ -826,16 +828,16 @@ void device_remove_properties(struct device *dev)
* the pset. If there is no real firmware node (ACPI/DT) primary
* will hold the pset.
*/
- if (is_pset_node(fwnode)) {
+ pset = to_pset_node(fwnode);
+ if (pset) {
set_primary_fwnode(dev, NULL);
- pset_free_set(to_pset_node(fwnode));
} else {
- fwnode = fwnode->secondary;
- if (!IS_ERR(fwnode) && is_pset_node(fwnode)) {
+ pset = to_pset_node(fwnode->secondary);
+ if (pset && dev == pset->dev)
set_secondary_fwnode(dev, NULL);
- pset_free_set(to_pset_node(fwnode));
- }
}
+ if (pset && dev == pset->dev)
+ pset_free_set(pset);
}
EXPORT_SYMBOL_GPL(device_remove_properties);
@@ -863,6 +865,7 @@ int device_add_properties(struct device *dev, struct property_entry *properties)
p->fwnode.type = FWNODE_PDATA;
set_secondary_fwnode(dev, &p->fwnode);
+ p->dev = dev;
return 0;
}
EXPORT_SYMBOL_GPL(device_add_properties);
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 7b274ff..24f4b54 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -2788,7 +2788,7 @@ static int rbd_img_obj_parent_read_full(struct rbd_obj_request *obj_request)
* from the parent.
*/
page_count = (u32)calc_pages_for(0, length);
- pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
+ pages = ceph_alloc_page_vector(page_count, GFP_NOIO);
if (IS_ERR(pages)) {
result = PTR_ERR(pages);
pages = NULL;
@@ -2922,7 +2922,7 @@ static int rbd_img_obj_exists_submit(struct rbd_obj_request *obj_request)
*/
size = sizeof (__le64) + sizeof (__le32) + sizeof (__le32);
page_count = (u32)calc_pages_for(0, size);
- pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
+ pages = ceph_alloc_page_vector(page_count, GFP_NOIO);
if (IS_ERR(pages)) {
ret = PTR_ERR(pages);
goto fail_stat_request;
diff --git a/drivers/block/skd_main.c b/drivers/block/skd_main.c
index 3822eae..6f78cea7 100644
--- a/drivers/block/skd_main.c
+++ b/drivers/block/skd_main.c
@@ -2163,6 +2163,9 @@ static void skd_send_fitmsg(struct skd_device *skdev,
*/
qcmd |= FIT_QCMD_MSGSIZE_64;
+ /* Make sure skd_msg_buf is written before the doorbell is triggered. */
+ smp_wmb();
+
SKD_WRITEQ(skdev, qcmd, FIT_Q_COMMAND);
}
@@ -2209,6 +2212,9 @@ static void skd_send_special_fitmsg(struct skd_device *skdev,
qcmd = skspcl->mb_dma_address;
qcmd |= FIT_QCMD_QID_NORMAL + FIT_QCMD_MSGSIZE_128;
+ /* Make sure skd_msg_buf is written before the doorbell is triggered. */
+ smp_wmb();
+
SKD_WRITEQ(skdev, qcmd, FIT_Q_COMMAND);
}
@@ -4622,15 +4628,16 @@ static void skd_free_disk(struct skd_device *skdev)
{
struct gendisk *disk = skdev->disk;
- if (disk != NULL) {
- struct request_queue *q = disk->queue;
+ if (disk && (disk->flags & GENHD_FL_UP))
+ del_gendisk(disk);
- if (disk->flags & GENHD_FL_UP)
- del_gendisk(disk);
- if (q)
- blk_cleanup_queue(q);
- put_disk(disk);
+ if (skdev->queue) {
+ blk_cleanup_queue(skdev->queue);
+ skdev->queue = NULL;
+ disk->queue = NULL;
}
+
+ put_disk(disk);
skdev->disk = NULL;
}
diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
index b793853..3880c90 100644
--- a/drivers/bluetooth/ath3k.c
+++ b/drivers/bluetooth/ath3k.c
@@ -212,15 +212,28 @@ static int ath3k_load_firmware(struct usb_device *udev,
const struct firmware *firmware)
{
u8 *send_buf;
- int len = 0;
- int err, pipe, size, sent = 0;
- int count = firmware->size;
+ int err, pipe, len, size, sent = 0;
+ int count;
BT_DBG("udev %p", udev);
+ if (!firmware || !firmware->data || firmware->size <= 0) {
+ err = -EINVAL;
+ BT_ERR("Not a valid FW file");
+ return err;
+ }
+
+ count = firmware->size;
+
+ if (count < FW_HDR_SIZE) {
+ err = -EINVAL;
+ BT_ERR("ath3k loading invalid size of file");
+ return err;
+ }
+
pipe = usb_sndctrlpipe(udev, 0);
- send_buf = kmalloc(BULK_SIZE, GFP_KERNEL);
+ send_buf = kzalloc(BULK_SIZE, GFP_KERNEL);
if (!send_buf) {
BT_ERR("Can't allocate memory chunk for firmware");
return -ENOMEM;
diff --git a/drivers/bluetooth/btmrvl_sdio.c b/drivers/bluetooth/btmrvl_sdio.c
index d02f2c1..c738bae 100644
--- a/drivers/bluetooth/btmrvl_sdio.c
+++ b/drivers/bluetooth/btmrvl_sdio.c
@@ -1682,8 +1682,12 @@ static int btmrvl_sdio_resume(struct device *dev)
/* Disable platform specific wakeup interrupt */
if (card->plt_wake_cfg && card->plt_wake_cfg->irq_bt >= 0) {
disable_irq_wake(card->plt_wake_cfg->irq_bt);
- if (!card->plt_wake_cfg->wake_by_bt)
- disable_irq(card->plt_wake_cfg->irq_bt);
+ disable_irq(card->plt_wake_cfg->irq_bt);
+ if (card->plt_wake_cfg->wake_by_bt)
+ /* Undo our disable, since interrupt handler already
+ * did this.
+ */
+ enable_irq(card->plt_wake_cfg->irq_bt);
}
return 0;
diff --git a/drivers/bus/mvebu-mbus.c b/drivers/bus/mvebu-mbus.c
index c7f3969..70db4d5 100644
--- a/drivers/bus/mvebu-mbus.c
+++ b/drivers/bus/mvebu-mbus.c
@@ -720,7 +720,7 @@ mvebu_mbus_default_setup_cpu_target(struct mvebu_mbus_state *mbus)
if (mbus->hw_io_coherency)
w->mbus_attr |= ATTR_HW_COHERENCY;
w->base = base & DDR_BASE_CS_LOW_MASK;
- w->size = (size | ~DDR_SIZE_MASK) + 1;
+ w->size = (u64)(size | ~DDR_SIZE_MASK) + 1;
}
}
mvebu_mbus_dram_info.num_cs = cs;
diff --git a/drivers/char/adsprpc.c b/drivers/char/adsprpc.c
index 7de9b79c..c92819c 100644
--- a/drivers/char/adsprpc.c
+++ b/drivers/char/adsprpc.c
@@ -75,6 +75,8 @@
#define FASTRPC_LINK_CONNECTING (0x1)
#define FASTRPC_LINK_CONNECTED (0x3)
#define FASTRPC_LINK_DISCONNECTING (0x7)
+#define FASTRPC_LINK_REMOTE_DISCONNECTING (0x8)
+#define FASTRPC_GLINK_INTENT_LEN (64)
#define PERF_KEYS "count:flush:map:copy:glink:getargs:putargs:invalidate:invoke"
#define FASTRPC_STATIC_HANDLE_LISTENER (3)
@@ -235,13 +237,13 @@ struct fastrpc_channel_ctx {
int ramdumpenabled;
void *remoteheap_ramdump_dev;
struct fastrpc_glink_info link;
+ struct mutex mut;
};
struct fastrpc_apps {
struct fastrpc_channel_ctx *channel;
struct cdev cdev;
struct class *class;
- struct mutex smd_mutex;
struct smq_phy_page range;
struct hlist_head maps;
uint32_t staticpd_flags;
@@ -520,7 +522,7 @@ static int fastrpc_mmap_remove(struct fastrpc_file *fl, uintptr_t va,
return -ENOTTY;
}
-static void fastrpc_mmap_free(struct fastrpc_mmap *map)
+static void fastrpc_mmap_free(struct fastrpc_mmap *map, uint32_t flags)
{
struct fastrpc_apps *me = &gfa;
struct fastrpc_file *fl;
@@ -537,15 +539,17 @@ static void fastrpc_mmap_free(struct fastrpc_mmap *map)
if (!map->refs)
hlist_del_init(&map->hn);
spin_unlock(&me->hlock);
+ if (map->refs > 0)
+ return;
} else {
spin_lock(&fl->hlock);
map->refs--;
if (!map->refs)
hlist_del_init(&map->hn);
spin_unlock(&fl->hlock);
+ if (map->refs > 0 && !flags)
+ return;
}
- if (map->refs > 0)
- return;
if (map->flags == ADSP_MMAP_HEAP_ADDR ||
map->flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
@@ -635,6 +639,11 @@ static int fastrpc_mmap_create(struct fastrpc_file *fl, int fd,
map->size = len;
map->va = (uintptr_t __user)map->phys;
} else {
+ if (map->attr && (map->attr & FASTRPC_ATTR_KEEP_MAP)) {
+ pr_info("adsprpc: buffer mapped with persist attr %x\n",
+ (unsigned int)map->attr);
+ map->refs = 2;
+ }
VERIFY(err, !IS_ERR_OR_NULL(map->handle =
ion_import_dma_buf_fd(fl->apps->client, fd)));
if (err)
@@ -724,7 +733,7 @@ static int fastrpc_mmap_create(struct fastrpc_file *fl, int fd,
bail:
if (err && map)
- fastrpc_mmap_free(map);
+ fastrpc_mmap_free(map, 0);
return err;
}
@@ -995,7 +1004,7 @@ static void context_free(struct smq_invoke_ctx *ctx)
hlist_del_init(&ctx->hn);
spin_unlock(&ctx->fl->hlock);
for (i = 0; i < nbufs; ++i)
- fastrpc_mmap_free(ctx->maps[i]);
+ fastrpc_mmap_free(ctx->maps[i], 0);
fastrpc_buf_free(ctx->buf, 1);
ctx->magic = 0;
kfree(ctx);
@@ -1345,7 +1354,7 @@ static int put_args(uint32_t kernel, struct smq_invoke_ctx *ctx,
if (err)
goto bail;
} else {
- fastrpc_mmap_free(ctx->maps[i]);
+ fastrpc_mmap_free(ctx->maps[i], 0);
ctx->maps[i] = NULL;
}
}
@@ -1355,7 +1364,7 @@ static int put_args(uint32_t kernel, struct smq_invoke_ctx *ctx,
break;
if (!fastrpc_mmap_find(ctx->fl, (int)fdlist[i], 0, 0,
0, 0, &mmap))
- fastrpc_mmap_free(mmap);
+ fastrpc_mmap_free(mmap, 0);
}
}
if (ctx->crc && crclist && rpra)
@@ -1486,12 +1495,12 @@ static void fastrpc_init(struct fastrpc_apps *me)
INIT_HLIST_HEAD(&me->drivers);
spin_lock_init(&me->hlock);
- mutex_init(&me->smd_mutex);
me->channel = &gcinfo[0];
for (i = 0; i < NUM_CHANNELS; i++) {
init_completion(&me->channel[i].work);
init_completion(&me->channel[i].workport);
me->channel[i].sesscount = 0;
+ mutex_init(&me->channel[i].mut);
}
}
@@ -1506,7 +1515,7 @@ static int fastrpc_internal_invoke(struct fastrpc_file *fl, uint32_t mode,
int cid = fl->cid;
int interrupted = 0;
int err = 0;
- struct timespec invoket;
+ struct timespec invoket = {0};
if (fl->profile)
getnstimeofday(&invoket);
@@ -1723,7 +1732,7 @@ static int fastrpc_init_process(struct fastrpc_file *fl,
goto bail;
inbuf.pgid = current->tgid;
- inbuf.namelen = strlen(proc_name)+1;
+ inbuf.namelen = init->filelen;
inbuf.pageslen = 0;
if (!me->staticpd_flags) {
inbuf.pageslen = 1;
@@ -1775,16 +1784,17 @@ static int fastrpc_init_process(struct fastrpc_file *fl,
err = -ENOTTY;
}
bail:
+ kfree(proc_name);
if (err && (init->flags == FASTRPC_INIT_CREATE_STATIC))
me->staticpd_flags = 0;
if (mem && err) {
if (mem->flags == ADSP_MMAP_REMOTE_HEAP_ADDR)
hyp_assign_phys(mem->phys, (uint64_t)mem->size,
destVM, 1, srcVM, hlosVMperm, 1);
- fastrpc_mmap_free(mem);
+ fastrpc_mmap_free(mem, 0);
}
if (file)
- fastrpc_mmap_free(file);
+ fastrpc_mmap_free(file, 0);
return err;
}
@@ -2015,7 +2025,7 @@ static int fastrpc_mmap_remove_ssr(struct fastrpc_file *fl)
kfree(ramdump_segments_rh);
}
}
- fastrpc_mmap_free(match);
+ fastrpc_mmap_free(match, 0);
}
} while (match);
bail:
@@ -2041,13 +2051,36 @@ static int fastrpc_internal_munmap(struct fastrpc_file *fl,
VERIFY(err, !fastrpc_munmap_on_dsp(fl, map));
if (err)
goto bail;
- fastrpc_mmap_free(map);
+ fastrpc_mmap_free(map, 0);
bail:
if (err && map)
fastrpc_mmap_add(map);
return err;
}
+static int fastrpc_internal_munmap_fd(struct fastrpc_file *fl,
+ struct fastrpc_ioctl_munmap_fd *ud) {
+ int err = 0;
+ struct fastrpc_mmap *map = NULL;
+
+ VERIFY(err, (fl && ud));
+ if (err)
+ goto bail;
+
+ if (!fastrpc_mmap_find(fl, ud->fd, ud->va, ud->len, 0, 0, &map)) {
+ pr_err("mapping not found to unamp %x va %llx %x\n",
+ ud->fd, (unsigned long long)ud->va,
+ (unsigned int)ud->len);
+ err = -1;
+ goto bail;
+ }
+ if (map)
+ fastrpc_mmap_free(map, 0);
+bail:
+ return err;
+}
+
+
static int fastrpc_internal_mmap(struct fastrpc_file *fl,
struct fastrpc_ioctl_mmap *ud)
{
@@ -2070,7 +2103,7 @@ static int fastrpc_internal_mmap(struct fastrpc_file *fl,
ud->vaddrout = map->raddr;
bail:
if (err && map)
- fastrpc_mmap_free(map);
+ fastrpc_mmap_free(map, 0);
return err;
}
@@ -2086,7 +2119,7 @@ static void fastrpc_channel_close(struct kref *kref)
ctx->chan = NULL;
glink_unregister_link_state_cb(ctx->link.link_notify_handle);
ctx->link.link_notify_handle = NULL;
- mutex_unlock(&me->smd_mutex);
+ mutex_unlock(&ctx->mut);
pr_info("'closed /dev/%s c %d %d'\n", gcinfo[cid].name,
MAJOR(me->dev_no), cid);
}
@@ -2179,10 +2212,15 @@ static void fastrpc_glink_notify_state(void *handle, const void *priv,
link->port_state = FASTRPC_LINK_DISCONNECTED;
break;
case GLINK_REMOTE_DISCONNECTED:
+ mutex_lock(&me->channel[cid].mut);
if (me->channel[cid].chan) {
+ link->port_state = FASTRPC_LINK_REMOTE_DISCONNECTING;
fastrpc_glink_close(me->channel[cid].chan, cid);
me->channel[cid].chan = NULL;
+ } else {
+ link->port_state = FASTRPC_LINK_DISCONNECTED;
}
+ mutex_unlock(&me->channel[cid].mut);
break;
default:
break;
@@ -2193,23 +2231,20 @@ static int fastrpc_session_alloc(struct fastrpc_channel_ctx *chan, int secure,
struct fastrpc_session_ctx **session)
{
int err = 0;
- struct fastrpc_apps *me = &gfa;
- mutex_lock(&me->smd_mutex);
+ mutex_lock(&chan->mut);
if (!*session)
err = fastrpc_session_alloc_locked(chan, secure, session);
- mutex_unlock(&me->smd_mutex);
+ mutex_unlock(&chan->mut);
return err;
}
static void fastrpc_session_free(struct fastrpc_channel_ctx *chan,
struct fastrpc_session_ctx *session)
{
- struct fastrpc_apps *me = &gfa;
-
- mutex_lock(&me->smd_mutex);
+ mutex_lock(&chan->mut);
session->used = 0;
- mutex_unlock(&me->smd_mutex);
+ mutex_unlock(&chan->mut);
}
static int fastrpc_file_free(struct fastrpc_file *fl)
@@ -2238,11 +2273,11 @@ static int fastrpc_file_free(struct fastrpc_file *fl)
fastrpc_context_list_dtor(fl);
fastrpc_buf_list_free(fl);
hlist_for_each_entry_safe(map, n, &fl->maps, hn) {
- fastrpc_mmap_free(map);
+ fastrpc_mmap_free(map, 1);
}
if (fl->ssrcount == fl->apps->channel[cid].ssrcount)
kref_put_mutex(&fl->apps->channel[cid].kref,
- fastrpc_channel_close, &fl->apps->smd_mutex);
+ fastrpc_channel_close, &fl->apps->channel[cid].mut);
if (fl->sctx)
fastrpc_session_free(&fl->apps->channel[cid], fl->sctx);
if (fl->secsctx)
@@ -2319,6 +2354,20 @@ static int fastrpc_glink_register(int cid, struct fastrpc_apps *me)
return err;
}
+static void fastrpc_glink_stop(int cid)
+{
+ int err = 0;
+ struct fastrpc_glink_info *link;
+
+ VERIFY(err, (cid >= 0 && cid < NUM_CHANNELS));
+ if (err)
+ return;
+ link = &gfa.channel[cid].link;
+
+ if (link->port_state == FASTRPC_LINK_CONNECTED)
+ link->port_state = FASTRPC_LINK_REMOTE_DISCONNECTING;
+}
+
static void fastrpc_glink_close(void *chan, int cid)
{
int err = 0;
@@ -2329,7 +2378,8 @@ static void fastrpc_glink_close(void *chan, int cid)
return;
link = &gfa.channel[cid].link;
- if (link->port_state == FASTRPC_LINK_CONNECTED) {
+ if (link->port_state == FASTRPC_LINK_CONNECTED ||
+ link->port_state == FASTRPC_LINK_REMOTE_DISCONNECTING) {
link->port_state = FASTRPC_LINK_DISCONNECTING;
glink_close(chan);
}
@@ -2495,12 +2545,14 @@ static int fastrpc_channel_open(struct fastrpc_file *fl)
struct fastrpc_apps *me = &gfa;
int cid, err = 0;
- mutex_lock(&me->smd_mutex);
-
VERIFY(err, fl && fl->sctx);
if (err)
- goto bail;
+ return err;
cid = fl->cid;
+ VERIFY(err, cid >= 0 && cid < NUM_CHANNELS);
+ if (err)
+ goto bail;
+ mutex_lock(&me->channel[cid].mut);
if (me->channel[cid].ssrcount !=
me->channel[cid].prevssrcount) {
if (!me->channel[cid].issubsystemup) {
@@ -2509,9 +2561,6 @@ static int fastrpc_channel_open(struct fastrpc_file *fl)
goto bail;
}
}
- VERIFY(err, cid >= 0 && cid < NUM_CHANNELS);
- if (err)
- goto bail;
fl->ssrcount = me->channel[cid].ssrcount;
if ((kref_get_unless_zero(&me->channel[cid].kref) == 0) ||
(me->channel[cid].chan == NULL)) {
@@ -2522,9 +2571,11 @@ static int fastrpc_channel_open(struct fastrpc_file *fl)
if (err)
goto bail;
+ mutex_unlock(&me->channel[cid].mut);
VERIFY(err,
wait_for_completion_timeout(&me->channel[cid].workport,
RPC_TIMEOUT));
+ mutex_lock(&me->channel[cid].mut);
if (err) {
me->channel[cid].chan = NULL;
goto bail;
@@ -2532,8 +2583,10 @@ static int fastrpc_channel_open(struct fastrpc_file *fl)
kref_init(&me->channel[cid].kref);
pr_info("'opened /dev/%s c %d %d'\n", gcinfo[cid].name,
MAJOR(me->dev_no), cid);
- err = glink_queue_rx_intent(me->channel[cid].chan, NULL, 16);
- err |= glink_queue_rx_intent(me->channel[cid].chan, NULL, 64);
+ err = glink_queue_rx_intent(me->channel[cid].chan, NULL,
+ FASTRPC_GLINK_INTENT_LEN);
+ err |= glink_queue_rx_intent(me->channel[cid].chan, NULL,
+ FASTRPC_GLINK_INTENT_LEN);
if (err)
pr_warn("adsprpc: initial intent fail for %d err %d\n",
cid, err);
@@ -2547,7 +2600,7 @@ static int fastrpc_channel_open(struct fastrpc_file *fl)
}
bail:
- mutex_unlock(&me->smd_mutex);
+ mutex_unlock(&me->channel[cid].mut);
return err;
}
@@ -2654,6 +2707,7 @@ static long fastrpc_device_ioctl(struct file *file, unsigned int ioctl_num,
struct fastrpc_ioctl_invoke_crc inv;
struct fastrpc_ioctl_mmap mmap;
struct fastrpc_ioctl_munmap munmap;
+ struct fastrpc_ioctl_munmap_fd munmap_fd;
struct fastrpc_ioctl_init_attrs init;
struct fastrpc_ioctl_perf perf;
struct fastrpc_ioctl_control cp;
@@ -2720,6 +2774,16 @@ static long fastrpc_device_ioctl(struct file *file, unsigned int ioctl_num,
if (err)
goto bail;
break;
+ case FASTRPC_IOCTL_MUNMAP_FD:
+ K_COPY_FROM_USER(err, 0, &p.munmap_fd, param,
+ sizeof(p.munmap_fd));
+ if (err)
+ goto bail;
+ VERIFY(err, 0 == (err = fastrpc_internal_munmap_fd(fl,
+ &p.munmap_fd)));
+ if (err)
+ goto bail;
+ break;
case FASTRPC_IOCTL_SETMODE:
switch ((uint32_t)ioctl_param) {
case FASTRPC_MODE_PARALLEL:
@@ -2825,16 +2889,14 @@ static int fastrpc_restart_notifier_cb(struct notifier_block *nb,
ctx = container_of(nb, struct fastrpc_channel_ctx, nb);
cid = ctx - &me->channel[0];
if (code == SUBSYS_BEFORE_SHUTDOWN) {
- mutex_lock(&me->smd_mutex);
+ mutex_lock(&ctx->mut);
ctx->ssrcount++;
ctx->issubsystemup = 0;
- if (ctx->chan) {
- fastrpc_glink_close(ctx->chan, cid);
- ctx->chan = NULL;
- pr_info("'restart notifier: closed /dev/%s c %d %d'\n",
- gcinfo[cid].name, MAJOR(me->dev_no), cid);
- }
- mutex_unlock(&me->smd_mutex);
+ pr_info("'restart notifier: /dev/%s c %d %d'\n",
+ gcinfo[cid].name, MAJOR(me->dev_no), cid);
+ if (ctx->chan)
+ fastrpc_glink_stop(cid);
+ mutex_unlock(&ctx->mut);
if (cid == 0)
me->staticpd_flags = 0;
fastrpc_notify_drivers(me, cid);
@@ -2999,15 +3061,15 @@ static int fastrpc_probe(struct platform_device *pdev)
static void fastrpc_deinit(void)
{
- struct fastrpc_apps *me = &gfa;
struct fastrpc_channel_ctx *chan = gcinfo;
int i, j;
for (i = 0; i < NUM_CHANNELS; i++, chan++) {
if (chan->chan) {
kref_put_mutex(&chan->kref,
- fastrpc_channel_close, &me->smd_mutex);
+ fastrpc_channel_close, &chan->mut);
chan->chan = NULL;
+ mutex_destroy(&chan->mut);
}
for (j = 0; j < NUM_SESSIONS; j++) {
struct fastrpc_session_ctx *sess = &chan->session[j];
diff --git a/drivers/char/adsprpc_shared.h b/drivers/char/adsprpc_shared.h
index 43edf71..e2f8983 100644
--- a/drivers/char/adsprpc_shared.h
+++ b/drivers/char/adsprpc_shared.h
@@ -29,6 +29,7 @@
#define FASTRPC_IOCTL_INIT_ATTRS _IOWR('R', 10, struct fastrpc_ioctl_init_attrs)
#define FASTRPC_IOCTL_INVOKE_CRC _IOWR('R', 11, struct fastrpc_ioctl_invoke_crc)
#define FASTRPC_IOCTL_CONTROL _IOWR('R', 12, struct fastrpc_ioctl_control)
+#define FASTRPC_IOCTL_MUNMAP_FD _IOWR('R', 13, struct fastrpc_ioctl_munmap_fd)
#define FASTRPC_GLINK_GUID "fastrpcglink-apps-dsp"
#define FASTRPC_SMD_GUID "fastrpcsmd-apps-dsp"
@@ -43,6 +44,9 @@
/* Set for buffers that are dma coherent */
#define FASTRPC_ATTR_COHERENT 0x4
+/* Fastrpc attribute for keeping the map persistent */
+#define FASTRPC_ATTR_KEEP_MAP 0x8
+
/* Driver should operate in parallel with the co-processor */
#define FASTRPC_MODE_PARALLEL 0
@@ -204,6 +208,13 @@ struct fastrpc_ioctl_mmap {
uintptr_t vaddrout; /* dsps virtual address */
};
+struct fastrpc_ioctl_munmap_fd {
+ int fd; /* fd */
+ uint32_t flags; /* control flags */
+ uintptr_t va; /* va */
+ ssize_t len; /* length */
+};
+
struct fastrpc_ioctl_perf { /* kernel performance data */
uintptr_t __user data;
uint32_t numkeys;
diff --git a/drivers/char/diag/diag_debugfs.c b/drivers/char/diag/diag_debugfs.c
index 40bfd74..0a3faba 100644
--- a/drivers/char/diag/diag_debugfs.c
+++ b/drivers/char/diag/diag_debugfs.c
@@ -77,7 +77,8 @@ static ssize_t diag_dbgfs_read_status(struct file *file, char __user *ubuf,
"Time Sync Enabled: %d\n"
"MD session mode: %d\n"
"MD session mask: %d\n"
- "Uses Time API: %d\n",
+ "Uses Time API: %d\n"
+ "Supports PD buffering: %d\n",
chk_config_get_id(),
chk_polling_response(),
driver->polling_reg_flag,
@@ -92,11 +93,12 @@ static ssize_t diag_dbgfs_read_status(struct file *file, char __user *ubuf,
driver->time_sync_enabled,
driver->md_session_mode,
driver->md_session_mask,
- driver->uses_time_api);
+ driver->uses_time_api,
+ driver->supports_pd_buffering);
for (i = 0; i < NUM_PERIPHERALS; i++) {
ret += scnprintf(buf+ret, buf_size-ret,
- "p: %s Feature: %02x %02x |%c%c%c%c%c%c%c%c%c|\n",
+ "p: %s Feature: %02x %02x |%c%c%c%c%c%c%c%c%c%c|\n",
PERIPHERAL_STRING(i),
driver->feature[i].feature_mask[0],
driver->feature[i].feature_mask[1],
@@ -105,6 +107,7 @@ static ssize_t diag_dbgfs_read_status(struct file *file, char __user *ubuf,
driver->feature[i].encode_hdlc ? 'H':'h',
driver->feature[i].peripheral_buffering ? 'B':'b',
driver->feature[i].mask_centralization ? 'M':'m',
+ driver->feature[i].pd_buffering ? 'P':'p',
driver->feature[i].stm_support ? 'Q':'q',
driver->feature[i].sockets_enabled ? 'S':'s',
driver->feature[i].sent_feature_mask ? 'T':'t',
diff --git a/drivers/char/diag/diag_masks.c b/drivers/char/diag/diag_masks.c
index 2df62e4..8e5d836 100644
--- a/drivers/char/diag/diag_masks.c
+++ b/drivers/char/diag/diag_masks.c
@@ -28,8 +28,7 @@
#define DIAG_SET_FEATURE_MASK(x) (feature_bytes[(x)/8] |= (1 << (x & 0x7)))
#define diag_check_update(x) \
- (!info || (info && (info->peripheral_mask & MD_PERIPHERAL_MASK(x))) \
- || (info && (info->peripheral_mask & MD_PERIPHERAL_PD_MASK(x)))) \
+ (!info || (info && (info->peripheral_mask & MD_PERIPHERAL_MASK(x)))) \
struct diag_mask_info msg_mask;
struct diag_mask_info msg_bt_mask;
@@ -87,16 +86,15 @@ static int diag_apps_responds(void)
static void diag_send_log_mask_update(uint8_t peripheral, int equip_id)
{
- int i;
- int err = 0;
- int send_once = 0;
+ int err = 0, send_once = 0, i;
int header_len = sizeof(struct diag_ctrl_log_mask);
uint8_t *buf = NULL, *temp = NULL;
uint8_t upd = 0;
- uint32_t mask_size = 0;
+ uint32_t mask_size = 0, pd_mask = 0;
struct diag_ctrl_log_mask ctrl_pkt;
struct diag_mask_info *mask_info = NULL;
struct diag_log_mask_t *mask = NULL;
+ struct diagfwd_info *fwd_info = NULL;
if (peripheral >= NUM_PERIPHERALS)
return;
@@ -108,13 +106,14 @@ static void diag_send_log_mask_update(uint8_t peripheral, int equip_id)
return;
}
+ MD_PERIPHERAL_PD_MASK(TYPE_CNTL, peripheral, pd_mask);
+
if (driver->md_session_mask != 0) {
if (driver->md_session_mask & MD_PERIPHERAL_MASK(peripheral)) {
if (driver->md_session_map[peripheral])
mask_info =
driver->md_session_map[peripheral]->log_mask;
- } else if (driver->md_session_mask &
- MD_PERIPHERAL_PD_MASK(peripheral)) {
+ } else if (driver->md_session_mask & pd_mask) {
upd = diag_mask_to_pd_value(driver->md_session_mask);
if (upd && driver->md_session_map[upd])
mask_info =
@@ -213,12 +212,12 @@ static void diag_send_event_mask_update(uint8_t peripheral)
{
uint8_t *buf = NULL, *temp = NULL;
uint8_t upd = 0;
+ uint32_t pd_mask = 0;
+ int num_bytes = EVENT_COUNT_TO_BYTES(driver->last_event_id);
+ int write_len = 0, err = 0, i = 0, temp_len = 0;
struct diag_ctrl_event_mask header;
struct diag_mask_info *mask_info = NULL;
- int num_bytes = EVENT_COUNT_TO_BYTES(driver->last_event_id);
- int write_len = 0;
- int err = 0;
- int temp_len = 0;
+ struct diagfwd_info *fwd_info = NULL;
if (num_bytes <= 0 || num_bytes > driver->event_mask_size) {
pr_debug("diag: In %s, invalid event mask length %d\n",
@@ -236,13 +235,14 @@ static void diag_send_event_mask_update(uint8_t peripheral)
return;
}
+ MD_PERIPHERAL_PD_MASK(TYPE_CNTL, peripheral, pd_mask);
+
if (driver->md_session_mask != 0) {
if (driver->md_session_mask & MD_PERIPHERAL_MASK(peripheral)) {
if (driver->md_session_map[peripheral])
mask_info =
driver->md_session_map[peripheral]->event_mask;
- } else if (driver->md_session_mask &
- MD_PERIPHERAL_PD_MASK(peripheral)) {
+ } else if (driver->md_session_mask & pd_mask) {
upd = diag_mask_to_pd_value(driver->md_session_mask);
if (upd && driver->md_session_map[upd])
mask_info =
@@ -310,17 +310,16 @@ static void diag_send_event_mask_update(uint8_t peripheral)
static void diag_send_msg_mask_update(uint8_t peripheral, int first, int last)
{
- int i;
- int err = 0;
+ int i, err = 0, temp_len = 0;
int header_len = sizeof(struct diag_ctrl_msg_mask);
- int temp_len = 0;
uint8_t *buf = NULL, *temp = NULL;
uint8_t upd = 0;
- uint32_t mask_size = 0;
+ uint8_t msg_mask_tbl_count_local;
+ uint32_t mask_size = 0, pd_mask = 0;
struct diag_mask_info *mask_info = NULL;
struct diag_msg_mask_t *mask = NULL;
struct diag_ctrl_msg_mask header;
- uint8_t msg_mask_tbl_count_local;
+ struct diagfwd_info *fwd_info = NULL;
if (peripheral >= NUM_PERIPHERALS)
return;
@@ -332,13 +331,14 @@ static void diag_send_msg_mask_update(uint8_t peripheral, int first, int last)
return;
}
+ MD_PERIPHERAL_PD_MASK(TYPE_CNTL, peripheral, pd_mask);
+
if (driver->md_session_mask != 0) {
if (driver->md_session_mask & MD_PERIPHERAL_MASK(peripheral)) {
if (driver->md_session_map[peripheral])
mask_info =
driver->md_session_map[peripheral]->msg_mask;
- } else if (driver->md_session_mask &
- MD_PERIPHERAL_PD_MASK(peripheral)) {
+ } else if (driver->md_session_mask & pd_mask) {
upd = diag_mask_to_pd_value(driver->md_session_mask);
if (upd && driver->md_session_map[upd])
mask_info =
@@ -510,7 +510,7 @@ static void diag_send_feature_mask_update(uint8_t peripheral)
if (driver->supports_apps_hdlc_encoding)
DIAG_SET_FEATURE_MASK(F_DIAG_APPS_HDLC_ENCODE);
if (driver->supports_apps_header_untagging) {
- if (peripheral == PERIPHERAL_MODEM) {
+ if (driver->feature[peripheral].untag_header) {
DIAG_SET_FEATURE_MASK(F_DIAG_PKT_HEADER_UNTAG);
driver->peripheral_untag[peripheral] =
ENABLE_PKT_HEADER_UNTAGGING;
@@ -554,6 +554,11 @@ static int diag_cmd_get_ssid_range(unsigned char *src_buf, int src_len,
mask_info);
return -EINVAL;
}
+ if (!mask_info->ptr) {
+ pr_err("diag: In %s, invalid input mask_info->ptr: %pK\n",
+ __func__, mask_info->ptr);
+ return -EINVAL;
+ }
if (!diag_apps_responds())
return 0;
@@ -655,7 +660,11 @@ static int diag_cmd_get_msg_mask(unsigned char *src_buf, int src_len,
mask_info);
return -EINVAL;
}
-
+ if (!mask_info->ptr) {
+ pr_err("diag: In %s, invalid input mask_info->ptr: %pK\n",
+ __func__, mask_info->ptr);
+ return -EINVAL;
+ }
if (!diag_apps_responds())
return 0;
@@ -668,6 +677,12 @@ static int diag_cmd_get_msg_mask(unsigned char *src_buf, int src_len,
rsp.status = MSG_STATUS_FAIL;
rsp.padding = 0;
mask = (struct diag_msg_mask_t *)mask_info->ptr;
+ if (!mask->ptr) {
+ pr_err("diag: Invalid input in %s, mask->ptr: %pK\n",
+ __func__, mask->ptr);
+ mutex_unlock(&driver->msg_mask_lock);
+ return -EINVAL;
+ }
for (i = 0; i < driver->msg_mask_tbl_count; i++, mask++) {
if ((req->ssid_first < mask->ssid_first) ||
(req->ssid_first > mask->ssid_last_tools)) {
@@ -692,18 +707,15 @@ static int diag_cmd_set_msg_mask(unsigned char *src_buf, int src_len,
unsigned char *dest_buf, int dest_len,
struct diag_md_session_t *info)
{
- int i;
- int write_len = 0;
+ uint32_t mask_size = 0, offset = 0;
+ uint32_t *temp = NULL;
+ int write_len = 0, i = 0, found = 0, peripheral;
int header_len = sizeof(struct diag_msg_build_mask_t);
- int found = 0;
- uint32_t mask_size = 0;
- uint32_t offset = 0;
struct diag_msg_mask_t *mask = NULL;
struct diag_msg_build_mask_t *req = NULL;
struct diag_msg_build_mask_t rsp;
struct diag_mask_info *mask_info = NULL;
struct diag_msg_mask_t *mask_next = NULL;
- uint32_t *temp = NULL;
mask_info = (!info) ? &msg_mask : info->msg_mask;
if (!src_buf || !dest_buf || src_len <= 0 || dest_len <= 0 ||
@@ -713,11 +725,23 @@ static int diag_cmd_set_msg_mask(unsigned char *src_buf, int src_len,
mask_info);
return -EINVAL;
}
+ if (!mask_info->ptr) {
+ pr_err("diag: In %s, invalid input mask_info->ptr: %pK\n",
+ __func__, mask_info->ptr);
+ return -EINVAL;
+ }
req = (struct diag_msg_build_mask_t *)src_buf;
mutex_lock(&mask_info->lock);
mutex_lock(&driver->msg_mask_lock);
mask = (struct diag_msg_mask_t *)mask_info->ptr;
+ if (!mask->ptr) {
+ pr_err("diag: Invalid input in %s, mask->ptr: %pK\n",
+ __func__, mask->ptr);
+ mutex_unlock(&driver->msg_mask_lock);
+ mutex_unlock(&mask_info->lock);
+ return -EINVAL;
+ }
for (i = 0; i < driver->msg_mask_tbl_count; i++, mask++) {
if (i < (driver->msg_mask_tbl_count - 1)) {
mask_next = mask;
@@ -799,11 +823,18 @@ static int diag_cmd_set_msg_mask(unsigned char *src_buf, int src_len,
mask_size = dest_len - write_len;
memcpy(dest_buf + write_len, src_buf + header_len, mask_size);
write_len += mask_size;
- for (i = 0; i < NUM_PERIPHERALS; i++) {
+ for (i = 0; i < NUM_MD_SESSIONS; i++) {
+ if (i == APPS_DATA)
+ continue;
if (!diag_check_update(i))
continue;
+ if (i > NUM_PERIPHERALS)
+ peripheral = diag_search_peripheral_by_pd(i);
+ else
+ peripheral = i;
mutex_lock(&driver->md_session_lock);
- diag_send_msg_mask_update(i, req->ssid_first, req->ssid_last);
+ diag_send_msg_mask_update(peripheral, req->ssid_first,
+ req->ssid_last);
mutex_unlock(&driver->md_session_lock);
}
end:
@@ -814,8 +845,7 @@ static int diag_cmd_set_all_msg_mask(unsigned char *src_buf, int src_len,
unsigned char *dest_buf, int dest_len,
struct diag_md_session_t *info)
{
- int i;
- int write_len = 0;
+ int i, write_len = 0, peripheral;
int header_len = sizeof(struct diag_msg_config_rsp_t);
struct diag_msg_config_rsp_t rsp;
struct diag_msg_config_rsp_t *req = NULL;
@@ -830,6 +860,11 @@ static int diag_cmd_set_all_msg_mask(unsigned char *src_buf, int src_len,
mask_info);
return -EINVAL;
}
+ if (!mask_info->ptr) {
+ pr_err("diag: In %s, invalid input mask_info->ptr: %pK\n",
+ __func__, mask_info->ptr);
+ return -EINVAL;
+ }
req = (struct diag_msg_config_rsp_t *)src_buf;
@@ -837,6 +872,13 @@ static int diag_cmd_set_all_msg_mask(unsigned char *src_buf, int src_len,
mutex_lock(&driver->msg_mask_lock);
mask = (struct diag_msg_mask_t *)mask_info->ptr;
+ if (!mask->ptr) {
+ pr_err("diag: Invalid input in %s, mask->ptr: %pK\n",
+ __func__, mask->ptr);
+ mutex_unlock(&driver->msg_mask_lock);
+ mutex_unlock(&mask_info->lock);
+ return -EINVAL;
+ }
mask_info->status = (req->rt_mask) ? DIAG_CTRL_MASK_ALL_ENABLED :
DIAG_CTRL_MASK_ALL_DISABLED;
for (i = 0; i < driver->msg_mask_tbl_count; i++, mask++) {
@@ -863,11 +905,17 @@ static int diag_cmd_set_all_msg_mask(unsigned char *src_buf, int src_len,
memcpy(dest_buf, &rsp, header_len);
write_len += header_len;
- for (i = 0; i < NUM_PERIPHERALS; i++) {
+ for (i = 0; i < NUM_MD_SESSIONS; i++) {
+ if (i == APPS_DATA)
+ continue;
if (!diag_check_update(i))
continue;
+ if (i > NUM_PERIPHERALS)
+ peripheral = diag_search_peripheral_by_pd(i);
+ else
+ peripheral = i;
mutex_lock(&driver->md_session_lock);
- diag_send_msg_mask_update(i, ALL_SSID, ALL_SSID);
+ diag_send_msg_mask_update(peripheral, ALL_SSID, ALL_SSID);
mutex_unlock(&driver->md_session_lock);
}
@@ -914,9 +962,7 @@ static int diag_cmd_update_event_mask(unsigned char *src_buf, int src_len,
unsigned char *dest_buf, int dest_len,
struct diag_md_session_t *info)
{
- int i;
- int write_len = 0;
- int mask_len = 0;
+ int i, write_len = 0, mask_len = 0, peripheral;
int header_len = sizeof(struct diag_event_mask_config_t);
struct diag_event_mask_config_t rsp;
struct diag_event_mask_config_t *req;
@@ -930,7 +976,11 @@ static int diag_cmd_update_event_mask(unsigned char *src_buf, int src_len,
mask_info);
return -EINVAL;
}
-
+ if (!mask_info->ptr) {
+ pr_err("diag: In %s, invalid input mask_info->ptr: %pK\n",
+ __func__, mask_info->ptr);
+ return -EINVAL;
+ }
req = (struct diag_event_mask_config_t *)src_buf;
mask_len = EVENT_COUNT_TO_BYTES(req->num_bits);
if (mask_len <= 0 || mask_len > event_mask.mask_len) {
@@ -959,11 +1009,17 @@ static int diag_cmd_update_event_mask(unsigned char *src_buf, int src_len,
memcpy(dest_buf + write_len, mask_info->ptr, mask_len);
write_len += mask_len;
- for (i = 0; i < NUM_PERIPHERALS; i++) {
+ for (i = 0; i < NUM_MD_SESSIONS; i++) {
+ if (i == APPS_DATA)
+ continue;
if (!diag_check_update(i))
continue;
+ if (i > NUM_PERIPHERALS)
+ peripheral = diag_search_peripheral_by_pd(i);
+ else
+ peripheral = i;
mutex_lock(&driver->md_session_lock);
- diag_send_event_mask_update(i);
+ diag_send_event_mask_update(peripheral);
mutex_unlock(&driver->md_session_lock);
}
@@ -974,8 +1030,7 @@ static int diag_cmd_toggle_events(unsigned char *src_buf, int src_len,
unsigned char *dest_buf, int dest_len,
struct diag_md_session_t *info)
{
- int i;
- int write_len = 0;
+ int write_len = 0, i, peripheral;
uint8_t toggle = 0;
struct diag_event_report_t header;
struct diag_mask_info *mask_info = NULL;
@@ -988,6 +1043,11 @@ static int diag_cmd_toggle_events(unsigned char *src_buf, int src_len,
mask_info);
return -EINVAL;
}
+ if (!mask_info->ptr) {
+ pr_err("diag: In %s, invalid input mask_info->ptr: %pK\n",
+ __func__, mask_info->ptr);
+ return -EINVAL;
+ }
toggle = *(src_buf + 1);
mutex_lock(&mask_info->lock);
@@ -1008,11 +1068,17 @@ static int diag_cmd_toggle_events(unsigned char *src_buf, int src_len,
*/
header.cmd_code = DIAG_CMD_EVENT_TOGGLE;
header.padding = 0;
- for (i = 0; i < NUM_PERIPHERALS; i++) {
+ for (i = 0; i < NUM_MD_SESSIONS; i++) {
+ if (i == APPS_DATA)
+ continue;
if (!diag_check_update(i))
continue;
+ if (i > NUM_PERIPHERALS)
+ peripheral = diag_search_peripheral_by_pd(i);
+ else
+ peripheral = i;
mutex_lock(&driver->md_session_lock);
- diag_send_event_mask_update(i);
+ diag_send_event_mask_update(peripheral);
mutex_unlock(&driver->md_session_lock);
}
memcpy(dest_buf, &header, sizeof(header));
@@ -1045,6 +1111,11 @@ static int diag_cmd_get_log_mask(unsigned char *src_buf, int src_len,
mask_info);
return -EINVAL;
}
+ if (!mask_info->ptr) {
+ pr_err("diag: In %s, invalid input mask_info->ptr: %pK\n",
+ __func__, mask_info->ptr);
+ return -EINVAL;
+ }
if (!diag_apps_responds())
return 0;
@@ -1064,6 +1135,11 @@ static int diag_cmd_get_log_mask(unsigned char *src_buf, int src_len,
write_len += rsp_header_len;
log_item = (struct diag_log_mask_t *)mask_info->ptr;
+ if (!log_item->ptr) {
+ pr_err("diag: Invalid input in %s, mask: %pK\n",
+ __func__, log_item);
+ return -EINVAL;
+ }
for (i = 0; i < MAX_EQUIP_ID; i++, log_item++) {
if (log_item->equip_id != req->equip_id)
continue;
@@ -1149,19 +1225,17 @@ static int diag_cmd_set_log_mask(unsigned char *src_buf, int src_len,
unsigned char *dest_buf, int dest_len,
struct diag_md_session_t *info)
{
- int i;
- int write_len = 0;
+ int i, peripheral, write_len = 0;
int status = LOG_STATUS_SUCCESS;
- int read_len = 0;
- int payload_len = 0;
+ int read_len = 0, payload_len = 0;
int req_header_len = sizeof(struct diag_log_config_req_t);
int rsp_header_len = sizeof(struct diag_log_config_set_rsp_t);
uint32_t mask_size = 0;
struct diag_log_config_req_t *req;
struct diag_log_config_set_rsp_t rsp;
struct diag_log_mask_t *mask = NULL;
- unsigned char *temp_buf = NULL;
struct diag_mask_info *mask_info = NULL;
+ unsigned char *temp_buf = NULL;
mask_info = (!info) ? &log_mask : info->log_mask;
if (!src_buf || !dest_buf || src_len <= 0 || dest_len <= 0 ||
@@ -1171,11 +1245,20 @@ static int diag_cmd_set_log_mask(unsigned char *src_buf, int src_len,
mask_info);
return -EINVAL;
}
+ if (!mask_info->ptr) {
+ pr_err("diag: In %s, invalid input mask_info->ptr: %pK\n",
+ __func__, mask_info->ptr);
+ return -EINVAL;
+ }
req = (struct diag_log_config_req_t *)src_buf;
read_len += req_header_len;
mask = (struct diag_log_mask_t *)mask_info->ptr;
-
+ if (!mask->ptr) {
+ pr_err("diag: Invalid input in %s, mask->ptr: %pK\n",
+ __func__, mask->ptr);
+ return -EINVAL;
+ }
if (req->equip_id >= MAX_EQUIP_ID) {
pr_err("diag: In %s, Invalid logging mask request, equip_id: %d\n",
__func__, req->equip_id);
@@ -1264,11 +1347,17 @@ static int diag_cmd_set_log_mask(unsigned char *src_buf, int src_len,
memcpy(dest_buf + write_len, src_buf + read_len, payload_len);
write_len += payload_len;
- for (i = 0; i < NUM_PERIPHERALS; i++) {
+ for (i = 0; i < NUM_MD_SESSIONS; i++) {
+ if (i == APPS_DATA)
+ continue;
if (!diag_check_update(i))
continue;
+ if (i > NUM_PERIPHERALS)
+ peripheral = diag_search_peripheral_by_pd(i);
+ else
+ peripheral = i;
mutex_lock(&driver->md_session_lock);
- diag_send_log_mask_update(i, req->equip_id);
+ diag_send_log_mask_update(peripheral, req->equip_id);
mutex_unlock(&driver->md_session_lock);
}
end:
@@ -1282,8 +1371,7 @@ static int diag_cmd_disable_log_mask(unsigned char *src_buf, int src_len,
struct diag_mask_info *mask_info = NULL;
struct diag_log_mask_t *mask = NULL;
struct diag_log_config_rsp_t header;
- int write_len = 0;
- int i;
+ int write_len = 0, i, peripheral;
mask_info = (!info) ? &log_mask : info->log_mask;
if (!src_buf || !dest_buf || src_len <= 0 || dest_len <= 0 ||
@@ -1293,9 +1381,17 @@ static int diag_cmd_disable_log_mask(unsigned char *src_buf, int src_len,
mask_info);
return -EINVAL;
}
-
+ if (!mask_info->ptr) {
+ pr_err("diag: In %s, invalid input mask_info->ptr: %pK\n",
+ __func__, mask_info->ptr);
+ return -EINVAL;
+ }
mask = (struct diag_log_mask_t *)mask_info->ptr;
-
+ if (!mask->ptr) {
+ pr_err("diag: Invalid input in %s, mask->ptr: %pK\n",
+ __func__, mask->ptr);
+ return -EINVAL;
+ }
for (i = 0; i < MAX_EQUIP_ID; i++, mask++) {
mutex_lock(&mask->lock);
memset(mask->ptr, 0, mask->range);
@@ -1317,11 +1413,17 @@ static int diag_cmd_disable_log_mask(unsigned char *src_buf, int src_len,
header.status = LOG_STATUS_SUCCESS;
memcpy(dest_buf, &header, sizeof(struct diag_log_config_rsp_t));
write_len += sizeof(struct diag_log_config_rsp_t);
- for (i = 0; i < NUM_PERIPHERALS; i++) {
+ for (i = 0; i < NUM_MD_SESSIONS; i++) {
+ if (i == APPS_DATA)
+ continue;
if (!diag_check_update(i))
continue;
+ if (i > NUM_PERIPHERALS)
+ peripheral = diag_search_peripheral_by_pd(i);
+ else
+ peripheral = i;
mutex_lock(&driver->md_session_lock);
- diag_send_log_mask_update(i, ALL_EQUIP_ID);
+ diag_send_log_mask_update(peripheral, ALL_EQUIP_ID);
mutex_unlock(&driver->md_session_lock);
}
@@ -1355,8 +1457,7 @@ int diag_create_msg_mask_table_entry(struct diag_msg_mask_t *msg_mask,
static int diag_create_msg_mask_table(void)
{
- int i;
- int err = 0;
+ int i, err = 0;
struct diag_msg_mask_t *mask = (struct diag_msg_mask_t *)msg_mask.ptr;
struct diag_ssid_range_t range;
@@ -1377,8 +1478,7 @@ static int diag_create_msg_mask_table(void)
static int diag_create_build_time_mask(void)
{
- int i;
- int err = 0;
+ int i, err = 0;
const uint32_t *tbl = NULL;
uint32_t tbl_size = 0;
struct diag_msg_mask_t *build_mask = NULL;
@@ -1561,7 +1661,7 @@ static int __diag_mask_init(struct diag_mask_info *mask_info, int mask_len,
static void __diag_mask_exit(struct diag_mask_info *mask_info)
{
- if (!mask_info)
+ if (!mask_info || !mask_info->ptr)
return;
mutex_lock(&mask_info->lock);
@@ -1574,8 +1674,7 @@ static void __diag_mask_exit(struct diag_mask_info *mask_info)
int diag_log_mask_copy(struct diag_mask_info *dest, struct diag_mask_info *src)
{
- int i;
- int err = 0;
+ int i, err = 0;
struct diag_log_mask_t *src_mask = NULL;
struct diag_log_mask_t *dest_mask = NULL;
@@ -1618,11 +1717,17 @@ void diag_log_mask_free(struct diag_mask_info *mask_info)
int i;
struct diag_log_mask_t *mask = NULL;
- if (!mask_info)
+ if (!mask_info || !mask_info->ptr)
return;
mutex_lock(&mask_info->lock);
mask = (struct diag_log_mask_t *)mask_info->ptr;
+ if (!mask->ptr) {
+ pr_err("diag: Invalid input in %s, mask->ptr: %pK\n",
+ __func__, mask->ptr);
+ mutex_unlock(&mask_info->lock);
+ return;
+ }
for (i = 0; i < MAX_EQUIP_ID; i++, mask++) {
kfree(mask->ptr);
mask->ptr = NULL;
@@ -1635,8 +1740,7 @@ void diag_log_mask_free(struct diag_mask_info *mask_info)
static int diag_msg_mask_init(void)
{
- int err = 0;
- int i;
+ int err = 0, i;
err = __diag_mask_init(&msg_mask, MSG_MASK_SIZE, APPS_BUF_SIZE);
if (err)
@@ -1657,12 +1761,10 @@ static int diag_msg_mask_init(void)
int diag_msg_mask_copy(struct diag_mask_info *dest, struct diag_mask_info *src)
{
- int i;
- int err = 0;
+ int i, err = 0, mask_size = 0;
struct diag_msg_mask_t *src_mask = NULL;
struct diag_msg_mask_t *dest_mask = NULL;
struct diag_ssid_range_t range;
- int mask_size = 0;
if (!src || !dest)
return -EINVAL;
@@ -1701,11 +1803,18 @@ void diag_msg_mask_free(struct diag_mask_info *mask_info)
int i;
struct diag_msg_mask_t *mask = NULL;
- if (!mask_info)
+ if (!mask_info || !mask_info->ptr)
return;
mutex_lock(&mask_info->lock);
mutex_lock(&driver->msg_mask_lock);
mask = (struct diag_msg_mask_t *)mask_info->ptr;
+ if (!mask->ptr) {
+ pr_err("diag: Invalid input in %s, mask->ptr: %pK\n",
+ __func__, mask->ptr);
+ mutex_unlock(&driver->msg_mask_lock);
+ mutex_unlock(&mask_info->lock);
+ return;
+ }
for (i = 0; i < driver->msg_mask_tbl_count; i++, mask++) {
kfree(mask->ptr);
mask->ptr = NULL;
@@ -1767,8 +1876,7 @@ static void diag_build_time_mask_exit(void)
static int diag_log_mask_init(void)
{
- int err = 0;
- int i;
+ int err = 0, i;
err = __diag_mask_init(&log_mask, LOG_MASK_SIZE, APPS_BUF_SIZE);
if (err)
@@ -1801,8 +1909,7 @@ static void diag_log_mask_exit(void)
static int diag_event_mask_init(void)
{
- int err = 0;
- int i;
+ int err = 0, i;
err = __diag_mask_init(&event_mask, EVENT_MASK_SIZE, APPS_BUF_SIZE);
if (err)
@@ -1855,11 +1962,8 @@ static void diag_event_mask_exit(void)
int diag_copy_to_user_msg_mask(char __user *buf, size_t count,
struct diag_md_session_t *info)
{
- int i;
- int err = 0;
- int len = 0;
- int copy_len = 0;
- int total_len = 0;
+ int i, err = 0, len = 0;
+ int copy_len = 0, total_len = 0;
struct diag_msg_mask_userspace_t header;
struct diag_mask_info *mask_info = NULL;
struct diag_msg_mask_t *mask = NULL;
@@ -1872,6 +1976,11 @@ int diag_copy_to_user_msg_mask(char __user *buf, size_t count,
if (!mask_info)
return -EIO;
+ if (!mask_info->ptr || !mask_info->update_buf) {
+ pr_err("diag: In %s, invalid input mask_info->ptr: %pK, mask_info->update_buf: %pK\n",
+ __func__, mask_info->ptr, mask_info->update_buf);
+ return -EINVAL;
+ }
mutex_lock(&driver->diag_maskclear_mutex);
if (driver->mask_clear) {
DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
@@ -1884,6 +1993,13 @@ int diag_copy_to_user_msg_mask(char __user *buf, size_t count,
mutex_lock(&driver->msg_mask_lock);
mask = (struct diag_msg_mask_t *)(mask_info->ptr);
+ if (!mask->ptr) {
+ pr_err("diag: Invalid input in %s, mask->ptr: %pK\n",
+ __func__, mask->ptr);
+ mutex_unlock(&driver->msg_mask_lock);
+ mutex_unlock(&mask_info->lock);
+ return -EINVAL;
+ }
for (i = 0; i < driver->msg_mask_tbl_count; i++, mask++) {
ptr = mask_info->update_buf;
len = 0;
@@ -1927,11 +2043,8 @@ int diag_copy_to_user_msg_mask(char __user *buf, size_t count,
int diag_copy_to_user_log_mask(char __user *buf, size_t count,
struct diag_md_session_t *info)
{
- int i;
- int err = 0;
- int len = 0;
- int copy_len = 0;
- int total_len = 0;
+ int i, err = 0, len = 0;
+ int copy_len = 0, total_len = 0;
struct diag_log_mask_userspace_t header;
struct diag_log_mask_t *mask = NULL;
struct diag_mask_info *mask_info = NULL;
@@ -1944,8 +2057,20 @@ int diag_copy_to_user_log_mask(char __user *buf, size_t count,
if (!mask_info)
return -EIO;
+ if (!mask_info->ptr || !mask_info->update_buf) {
+ pr_err("diag: In %s, invalid input mask_info->ptr: %pK, mask_info->update_buf: %pK\n",
+ __func__, mask_info->ptr, mask_info->update_buf);
+ return -EINVAL;
+ }
+
mutex_lock(&mask_info->lock);
mask = (struct diag_log_mask_t *)(mask_info->ptr);
+ if (!mask->ptr) {
+ pr_err("diag: Invalid input in %s, mask->ptr: %pK\n",
+ __func__, mask->ptr);
+ mutex_unlock(&mask_info->lock);
+ return -EINVAL;
+ }
for (i = 0; i < MAX_EQUIP_ID; i++, mask++) {
ptr = mask_info->update_buf;
len = 0;
@@ -2014,8 +2139,7 @@ void diag_send_updates_peripheral(uint8_t peripheral)
int diag_process_apps_masks(unsigned char *buf, int len,
struct diag_md_session_t *info)
{
- int size = 0;
- int sub_cmd = 0;
+ int size = 0, sub_cmd = 0;
int (*hdlr)(unsigned char *src_buf, int src_len,
unsigned char *dest_buf, int dest_len,
struct diag_md_session_t *info) = NULL;
diff --git a/drivers/char/diag/diag_memorydevice.c b/drivers/char/diag/diag_memorydevice.c
index dabb1f4..6377677 100644
--- a/drivers/char/diag/diag_memorydevice.c
+++ b/drivers/char/diag/diag_memorydevice.c
@@ -152,15 +152,20 @@ int diag_md_write(int id, unsigned char *buf, int len, int ctx)
return -EIO;
ch = &diag_md[id];
+ if (!ch)
+ return -EINVAL;
spin_lock_irqsave(&ch->lock, flags);
for (i = 0; i < ch->num_tbl_entries && !found; i++) {
if (ch->tbl[i].buf != buf)
continue;
found = 1;
- pr_err_ratelimited("diag: trying to write the same buffer buf: %pK, ctxt: %d len: %d at i: %d back to the table, proc: %d, mode: %d\n",
- buf, ctx, ch->tbl[i].len,
- i, id, driver->logging_mode);
+ pr_err_ratelimited("diag: trying to write the same buffer buf: %pK, len: %d, back to the table for p: %d, t: %d, buf_num: %d, proc: %d, i: %d\n",
+ buf, ch->tbl[i].len, GET_BUF_PERIPHERAL(ctx),
+ GET_BUF_TYPE(ctx), GET_BUF_NUM(ctx), id, i);
+ ch->tbl[i].buf = NULL;
+ ch->tbl[i].len = 0;
+ ch->tbl[i].ctx = 0;
}
spin_unlock_irqrestore(&ch->lock, flags);
@@ -194,6 +199,7 @@ int diag_md_write(int id, unsigned char *buf, int len, int ctx)
found = 1;
driver->data_ready[i] |= USER_SPACE_DATA_TYPE;
+ atomic_inc(&driver->data_ready_notif[i]);
pr_debug("diag: wake up logging process\n");
wake_up_interruptible(&driver->wait_q);
}
@@ -224,7 +230,7 @@ int diag_md_copy_to_user(char __user *buf, int *pret, size_t buf_size,
ch = &diag_md[i];
for (j = 0; j < ch->num_tbl_entries && !err; j++) {
entry = &ch->tbl[j];
- if (entry->len <= 0)
+ if (entry->len <= 0 || entry->buf == NULL)
continue;
peripheral = diag_md_get_peripheral(entry->ctx);
diff --git a/drivers/char/diag/diagchar.h b/drivers/char/diag/diagchar.h
index c9ae689..9de40b0 100644
--- a/drivers/char/diag/diagchar.h
+++ b/drivers/char/diag/diagchar.h
@@ -25,6 +25,8 @@
#include <linux/atomic.h>
#include "diagfwd_bridge.h"
+#define THRESHOLD_CLIENT_LIMIT 50
+
/* Size of the USB buffers used for read and write*/
#define USB_MAX_OUT_BUF 4096
#define APPS_BUF_SIZE 4096
@@ -33,7 +35,7 @@
#define DIAG_MAX_REQ_SIZE (16 * 1024)
#define DIAG_MAX_RSP_SIZE (16 * 1024)
-#define APF_DIAG_PADDING 256
+#define APF_DIAG_PADDING 0
/*
* In the worst case, the HDLC buffer can be atmost twice the size of the
* original packet. Add 3 bytes for 16 bit CRC (2 bytes) and a delimiter
@@ -69,12 +71,17 @@
#define DIAG_CON_CDSP (0x0040) /* Bit mask for CDSP */
#define DIAG_CON_UPD_WLAN (0x1000) /*Bit mask for WLAN PD*/
+#define DIAG_CON_UPD_AUDIO (0x2000) /*Bit mask for AUDIO PD*/
+#define DIAG_CON_UPD_SENSORS (0x4000) /*Bit mask for SENSORS PD*/
+
#define DIAG_CON_NONE (0x0000) /* Bit mask for No SS*/
#define DIAG_CON_ALL (DIAG_CON_APSS | DIAG_CON_MPSS \
| DIAG_CON_LPASS | DIAG_CON_WCNSS \
| DIAG_CON_SENSORS | DIAG_CON_WDSP \
| DIAG_CON_CDSP)
-#define DIAG_CON_UPD_ALL (DIAG_CON_UPD_WLAN)
+#define DIAG_CON_UPD_ALL (DIAG_CON_UPD_WLAN \
+ | DIAG_CON_UPD_AUDIO \
+ | DIAG_CON_UPD_SENSORS)
#define DIAG_STM_MODEM 0x01
#define DIAG_STM_LPASS 0x02
@@ -214,16 +221,23 @@
#define APPS_DATA (NUM_PERIPHERALS)
#define UPD_WLAN 7
-#define NUM_UPD 1
-#define MAX_PERIPHERAL_UPD 1
+#define UPD_AUDIO 8
+#define UPD_SENSORS 9
+#define NUM_UPD 3
+
+#define MAX_PERIPHERAL_UPD 2
/* Number of sessions possible in Memory Device Mode. +1 for Apps data */
#define NUM_MD_SESSIONS (NUM_PERIPHERALS \
+ NUM_UPD + 1)
#define MD_PERIPHERAL_MASK(x) (1 << x)
-#define MD_PERIPHERAL_PD_MASK(x) \
- ((x == PERIPHERAL_MODEM) ? (1 << UPD_WLAN) : 0)\
+#define MD_PERIPHERAL_PD_MASK(x, peripheral, pd_mask) \
+do { \
+fwd_info = &peripheral_info[x][peripheral]; \
+for (i = 0; i <= fwd_info->num_pd - 2; i++) \
+ pd_mask |= (1 << fwd_info->upd_diag_id[i].pd);\
+} while (0)
/*
* Number of stm processors includes all the peripherals and
@@ -306,6 +320,8 @@ struct diag_cmd_diag_id_query_req_t {
struct diag_id_tbl_t {
struct list_head link;
uint8_t diag_id;
+ uint8_t pd_val;
+ uint8_t peripheral;
char *process_name;
} __packed;
struct diag_id_t {
@@ -452,6 +468,10 @@ struct diag_logging_mode_param_t {
uint32_t peripheral_mask;
uint32_t pd_mask;
uint8_t mode_param;
+ uint8_t diag_id;
+ uint8_t pd_val;
+ uint8_t reserved;
+ int peripheral;
} __packed;
struct diag_md_session_t {
@@ -499,6 +519,7 @@ struct diag_feature_t {
uint8_t encode_hdlc;
uint8_t untag_header;
uint8_t peripheral_buffering;
+ uint8_t pd_buffering;
uint8_t mask_centralization;
uint8_t stm_support;
uint8_t sockets_enabled;
@@ -525,12 +546,14 @@ struct diagchar_dev {
wait_queue_head_t wait_q;
struct diag_client_map *client_map;
int *data_ready;
+ atomic_t data_ready_notif[THRESHOLD_CLIENT_LIMIT];
int num_clients;
int polling_reg_flag;
int use_device_tree;
int supports_separate_cmdrsp;
int supports_apps_hdlc_encoding;
int supports_apps_header_untagging;
+ int supports_pd_buffering;
int peripheral_untag[NUM_PERIPHERALS];
int supports_sockets;
/* The state requested in the STM command */
@@ -584,8 +607,8 @@ struct diagchar_dev {
struct diagfwd_info *diagfwd_cmd[NUM_PERIPHERALS];
struct diagfwd_info *diagfwd_dci_cmd[NUM_PERIPHERALS];
struct diag_feature_t feature[NUM_PERIPHERALS];
- struct diag_buffering_mode_t buffering_mode[NUM_PERIPHERALS];
- uint8_t buffering_flag[NUM_PERIPHERALS];
+ struct diag_buffering_mode_t buffering_mode[NUM_MD_SESSIONS];
+ uint8_t buffering_flag[NUM_MD_SESSIONS];
struct mutex mode_lock;
unsigned char *user_space_data_buf;
uint8_t user_space_data_busy;
@@ -693,7 +716,10 @@ int diag_cmd_chk_polling(struct diag_cmd_reg_entry_t *entry);
int diag_mask_param(void);
void diag_clear_masks(struct diag_md_session_t *info);
uint8_t diag_mask_to_pd_value(uint32_t peripheral_mask);
-
+int diag_query_pd(char *process_name);
+int diag_search_peripheral_by_pd(uint8_t pd_val);
+uint8_t diag_search_diagid_by_pd(uint8_t pd_val,
+ uint8_t *diag_id, int *peripheral);
void diag_record_stats(int type, int flag);
struct diag_md_session_t *diag_md_session_get_pid(int pid);
diff --git a/drivers/char/diag/diagchar_core.c b/drivers/char/diag/diagchar_core.c
index 54e6486..919ea0f 100644
--- a/drivers/char/diag/diagchar_core.c
+++ b/drivers/char/diag/diagchar_core.c
@@ -136,7 +136,6 @@ module_param(poolsize_qsc_usb, uint, 0000);
/* This is the max number of user-space clients supported at initialization*/
static unsigned int max_clients = 15;
-static unsigned int threshold_client_limit = 50;
module_param(max_clients, uint, 0000);
/* Timer variables */
@@ -324,7 +323,7 @@ static int diagchar_open(struct inode *inode, struct file *file)
if (i < driver->num_clients) {
diag_add_client(i, file);
} else {
- if (i < threshold_client_limit) {
+ if (i < THRESHOLD_CLIENT_LIMIT) {
driver->num_clients++;
temp = krealloc(driver->client_map
, (driver->num_clients) * sizeof(struct
@@ -354,11 +353,17 @@ static int diagchar_open(struct inode *inode, struct file *file)
}
}
driver->data_ready[i] = 0x0;
+ atomic_set(&driver->data_ready_notif[i], 0);
driver->data_ready[i] |= MSG_MASKS_TYPE;
+ atomic_inc(&driver->data_ready_notif[i]);
driver->data_ready[i] |= EVENT_MASKS_TYPE;
+ atomic_inc(&driver->data_ready_notif[i]);
driver->data_ready[i] |= LOG_MASKS_TYPE;
+ atomic_inc(&driver->data_ready_notif[i]);
driver->data_ready[i] |= DCI_LOG_MASKS_TYPE;
+ atomic_inc(&driver->data_ready_notif[i]);
driver->data_ready[i] |= DCI_EVENT_MASKS_TYPE;
+ atomic_inc(&driver->data_ready_notif[i]);
if (driver->ref_count == 0)
diag_mempool_init();
@@ -395,24 +400,22 @@ static uint32_t diag_translate_kernel_to_user_mask(uint32_t peripheral_mask)
ret |= DIAG_CON_CDSP;
if (peripheral_mask & MD_PERIPHERAL_MASK(UPD_WLAN))
ret |= DIAG_CON_UPD_WLAN;
+ if (peripheral_mask & MD_PERIPHERAL_MASK(UPD_AUDIO))
+ ret |= DIAG_CON_UPD_AUDIO;
+ if (peripheral_mask & MD_PERIPHERAL_MASK(UPD_SENSORS))
+ ret |= DIAG_CON_UPD_SENSORS;
return ret;
}
uint8_t diag_mask_to_pd_value(uint32_t peripheral_mask)
{
uint8_t upd = 0;
- uint32_t pd_mask = 0;
- pd_mask = diag_translate_kernel_to_user_mask(peripheral_mask);
- switch (pd_mask) {
- case DIAG_CON_UPD_WLAN:
- upd = UPD_WLAN;
- break;
- default:
- DIAG_LOG(DIAG_DEBUG_MASKS,
- "asking for mask update with no pd mask set\n");
+ for (upd = UPD_WLAN; upd < NUM_MD_SESSIONS; upd++) {
+ if (peripheral_mask & (1 << upd))
+ return upd;
}
- return upd;
+ return 0;
}
int diag_mask_param(void)
@@ -1619,18 +1622,19 @@ static uint32_t diag_translate_mask(uint32_t peripheral_mask)
ret |= (1 << PERIPHERAL_CDSP);
if (peripheral_mask & DIAG_CON_UPD_WLAN)
ret |= (1 << UPD_WLAN);
-
+ if (peripheral_mask & DIAG_CON_UPD_AUDIO)
+ ret |= (1 << UPD_AUDIO);
+ if (peripheral_mask & DIAG_CON_UPD_SENSORS)
+ ret |= (1 << UPD_SENSORS);
return ret;
}
static int diag_switch_logging(struct diag_logging_mode_param_t *param)
{
int new_mode, i = 0;
- int curr_mode;
- int err = 0;
- uint8_t do_switch = 1;
- uint32_t peripheral_mask = 0;
- uint8_t peripheral, upd;
+ int curr_mode, err = 0;
+ uint8_t do_switch = 1, peripheral = 0;
+ uint32_t peripheral_mask = 0, pd_mask = 0;
if (!param)
return -EINVAL;
@@ -1642,30 +1646,42 @@ static int diag_switch_logging(struct diag_logging_mode_param_t *param)
}
if (param->pd_mask) {
- switch (param->pd_mask) {
- case DIAG_CON_UPD_WLAN:
- peripheral = PERIPHERAL_MODEM;
- upd = UPD_WLAN;
- break;
- default:
+ pd_mask = diag_translate_mask(param->pd_mask);
+ for (i = UPD_WLAN; i < NUM_MD_SESSIONS; i++) {
+ if (pd_mask & (1 << i)) {
+ if (diag_search_diagid_by_pd(i, ¶m->diag_id,
+ ¶m->peripheral)) {
+ param->pd_val = i;
+ break;
+ }
+ }
+ }
+ if (!param->diag_id) {
DIAG_LOG(DIAG_DEBUG_USERSPACE,
- "asking for mode switch with no pd mask set\n");
+ "diag_id support is not present for the pd mask = %d\n",
+ param->pd_mask);
return -EINVAL;
}
+ DIAG_LOG(DIAG_DEBUG_USERSPACE,
+ "diag: pd_mask = %d, diag_id = %d, peripheral = %d, pd_val = %d\n",
+ param->pd_mask, param->diag_id,
+ param->peripheral, param->pd_val);
+
+ peripheral = param->peripheral;
if (driver->md_session_map[peripheral] &&
(MD_PERIPHERAL_MASK(peripheral) &
diag_mux->mux_mask)) {
DIAG_LOG(DIAG_DEBUG_USERSPACE,
"diag_fr: User PD is already logging onto active peripheral logging\n");
- i = upd - UPD_WLAN;
+ i = param->pd_val - UPD_WLAN;
driver->pd_session_clear[i] = 0;
return -EINVAL;
}
peripheral_mask =
diag_translate_mask(param->pd_mask);
param->peripheral_mask = peripheral_mask;
- i = upd - UPD_WLAN;
+ i = param->pd_val - UPD_WLAN;
if (!driver->pd_session_clear[i]) {
driver->pd_logging_mode[i] = 1;
driver->num_pd_session += 1;
@@ -1834,6 +1850,7 @@ static int diag_ioctl_lsm_deinit(void)
}
driver->data_ready[i] |= DEINIT_TYPE;
+ atomic_inc(&driver->data_ready_notif[i]);
mutex_unlock(&driver->diagchar_mutex);
wake_up_interruptible(&driver->wait_q);
@@ -1943,12 +1960,33 @@ static int diag_ioctl_get_real_time(unsigned long ioarg)
static int diag_ioctl_set_buffering_mode(unsigned long ioarg)
{
struct diag_buffering_mode_t params;
+ int peripheral = 0;
+ uint8_t diag_id = 0;
if (copy_from_user(¶ms, (void __user *)ioarg, sizeof(params)))
return -EFAULT;
- if (params.peripheral >= NUM_PERIPHERALS)
- return -EINVAL;
+ diag_map_pd_to_diagid(params.peripheral, &diag_id, &peripheral);
+
+ if ((peripheral < 0) ||
+ peripheral >= NUM_PERIPHERALS) {
+ pr_err("diag: In %s, invalid peripheral = %d\n", __func__,
+ peripheral);
+ return -EIO;
+ }
+
+ if (params.peripheral > NUM_PERIPHERALS &&
+ !driver->feature[peripheral].pd_buffering) {
+ pr_err("diag: In %s, pd buffering not supported for peripheral:%d\n",
+ __func__, peripheral);
+ return -EIO;
+ }
+
+ if (!driver->feature[peripheral].peripheral_buffering) {
+ pr_err("diag: In %s, peripheral %d doesn't support buffering\n",
+ __func__, peripheral);
+ return -EIO;
+ }
mutex_lock(&driver->mode_lock);
driver->buffering_flag[params.peripheral] = 1;
@@ -1959,24 +1997,29 @@ static int diag_ioctl_set_buffering_mode(unsigned long ioarg)
static int diag_ioctl_peripheral_drain_immediate(unsigned long ioarg)
{
- uint8_t peripheral;
+ uint8_t pd, diag_id = 0;
+ int peripheral = 0;
- if (copy_from_user(&peripheral, (void __user *)ioarg, sizeof(uint8_t)))
+ if (copy_from_user(&pd, (void __user *)ioarg, sizeof(uint8_t)))
return -EFAULT;
- if (peripheral >= NUM_PERIPHERALS) {
+ diag_map_pd_to_diagid(pd, &diag_id, &peripheral);
+
+ if ((peripheral < 0) ||
+ peripheral >= NUM_PERIPHERALS) {
pr_err("diag: In %s, invalid peripheral %d\n", __func__,
peripheral);
return -EINVAL;
}
- if (!driver->feature[peripheral].peripheral_buffering) {
- pr_err("diag: In %s, peripheral %d doesn't support buffering\n",
- __func__, peripheral);
+ if (pd > NUM_PERIPHERALS &&
+ !driver->feature[peripheral].pd_buffering) {
+ pr_err("diag: In %s, pd buffering not supported for peripheral:%d\n",
+ __func__, peripheral);
return -EIO;
}
- return diag_send_peripheral_drain_immediate(peripheral);
+ return diag_send_peripheral_drain_immediate(pd, diag_id, peripheral);
}
static int diag_ioctl_dci_support(unsigned long ioarg)
@@ -2020,11 +2063,126 @@ static int diag_ioctl_hdlc_toggle(unsigned long ioarg)
return 0;
}
+/*
+ * diag_search_peripheral_by_pd(uint8_t pd_val)
+ *
+ * Function will return peripheral by searching pd in the
+ * diag_id table.
+ *
+ */
+
+int diag_search_peripheral_by_pd(uint8_t pd_val)
+{
+ struct list_head *start;
+ struct list_head *temp;
+ struct diag_id_tbl_t *item = NULL;
+
+ mutex_lock(&driver->diag_id_mutex);
+ list_for_each_safe(start, temp, &driver->diag_id_list) {
+ item = list_entry(start, struct diag_id_tbl_t, link);
+ if (pd_val == item->pd_val) {
+ mutex_unlock(&driver->diag_id_mutex);
+ return item->peripheral;
+ }
+ }
+ mutex_unlock(&driver->diag_id_mutex);
+ return -EINVAL;
+}
+
+/*
+ * diag_search_diagid_by_pd(uint8_t pd_val,
+ * uint8_t *diag_id, int *peripheral)
+ *
+ * Function will update the peripheral and diag_id
+ * from the pd passed as an argument.
+ *
+ */
+
+uint8_t diag_search_diagid_by_pd(uint8_t pd_val,
+ uint8_t *diag_id, int *peripheral)
+{
+ struct list_head *start;
+ struct list_head *temp;
+ struct diag_id_tbl_t *item = NULL;
+
+ mutex_lock(&driver->diag_id_mutex);
+ list_for_each_safe(start, temp, &driver->diag_id_list) {
+ item = list_entry(start, struct diag_id_tbl_t, link);
+ if (pd_val == item->pd_val) {
+ *peripheral = item->peripheral;
+ *diag_id = item->diag_id;
+ mutex_unlock(&driver->diag_id_mutex);
+ return 1;
+ }
+ }
+ mutex_unlock(&driver->diag_id_mutex);
+ return 0;
+}
+
+/*
+ * diag_query_pd_name(char *process_name, char *search_str)
+ *
+ * The function searches the pd string in the control packet string
+ * from the peripheral
+ *
+ */
+
+static int diag_query_pd_name(char *process_name, char *search_str)
+{
+ if (!process_name)
+ return -EINVAL;
+
+ if (strnstr(process_name, search_str, strlen(process_name)))
+ return 1;
+
+ return 0;
+}
+
+/*
+ * diag_query_pd_name(char *process_name, char *search_str)
+ *
+ * The function returns the PD information based on the presence of
+ * the pd specific string in the control packet's string from peripheral.
+ *
+ */
+
+int diag_query_pd(char *process_name)
+{
+ if (!process_name)
+ return -EINVAL;
+
+ if (diag_query_pd_name(process_name, "modem/root_pd"))
+ return PERIPHERAL_MODEM;
+ if (diag_query_pd_name(process_name, "adsp/root_pd"))
+ return PERIPHERAL_LPASS;
+ if (diag_query_pd_name(process_name, "slpi/root_pd"))
+ return PERIPHERAL_SENSORS;
+ if (diag_query_pd_name(process_name, "cdsp/root_pd"))
+ return PERIPHERAL_CDSP;
+ if (diag_query_pd_name(process_name, "wlan_pd"))
+ return UPD_WLAN;
+ if (diag_query_pd_name(process_name, "audio_pd"))
+ return UPD_AUDIO;
+ if (diag_query_pd_name(process_name, "sensor_pd"))
+ return UPD_SENSORS;
+
+ return -EINVAL;
+}
+
+/*
+ * diag_ioctl_query_pd_logging(struct diag_logging_mode_param_t *param)
+ *
+ * IOCTL handler based on the parameter received will check on which peripheral
+ * the PD is present and validate if the peripheral supports the diag_id and
+ * tagging feature.
+ *
+ */
+
static int diag_ioctl_query_pd_logging(struct diag_logging_mode_param_t *param)
{
- int ret = -EINVAL;
- int peripheral;
- char *p_str = NULL;
+ int ret = -EINVAL, i = 0;
+ int peripheral = -EINVAL;
+ uint32_t pd_mask = 0;
if (!param)
return -EINVAL;
@@ -2035,17 +2193,21 @@ static int diag_ioctl_query_pd_logging(struct diag_logging_mode_param_t *param)
return -EINVAL;
}
- switch (param->pd_mask) {
- case DIAG_CON_UPD_WLAN:
- peripheral = PERIPHERAL_MODEM;
- p_str = "MODEM";
- break;
- default:
- DIAG_LOG(DIAG_DEBUG_USERSPACE,
- "Invalid pd mask, returning EINVAL\n");
- return -EINVAL;
+ if (param->pd_mask) {
+ pd_mask = diag_translate_mask(param->pd_mask);
+ for (i = UPD_WLAN; i < NUM_MD_SESSIONS; i++) {
+ if (pd_mask & (1 << i)) {
+ peripheral = diag_search_peripheral_by_pd(i);
+ break;
+ }
+ }
+ if (peripheral < 0) {
+ DIAG_LOG(DIAG_DEBUG_USERSPACE,
+ "diag_id support is not present for the pd mask = %d\n",
+ param->pd_mask);
+ return -EINVAL;
+ }
}
-
mutex_lock(&driver->diag_cntl_mutex);
DIAG_LOG(DIAG_DEBUG_USERSPACE,
"diag: %s: Untagging support on APPS is %s\n", __func__,
@@ -2053,8 +2215,8 @@ static int diag_ioctl_query_pd_logging(struct diag_logging_mode_param_t *param)
"present" : "absent"));
DIAG_LOG(DIAG_DEBUG_USERSPACE,
- "diag: %s: Tagging support on %s is %s\n",
- __func__, p_str,
+ "diag: %s: Tagging support on peripheral = %d is %s\n",
+ __func__, peripheral,
(driver->feature[peripheral].untag_header ?
"present" : "absent"));
@@ -2966,16 +3128,6 @@ static int diag_user_process_apps_data(const char __user *buf, int len,
return 0;
}
-static int check_data_ready(int index)
-{
- int data_type = 0;
-
- mutex_lock(&driver->diagchar_mutex);
- data_type = driver->data_ready[index];
- mutex_unlock(&driver->diagchar_mutex);
- return data_type;
-}
-
static ssize_t diagchar_read(struct file *file, char __user *buf, size_t count,
loff_t *ppos)
{
@@ -3002,7 +3154,8 @@ static ssize_t diagchar_read(struct file *file, char __user *buf, size_t count,
pr_err("diag: bad address from user side\n");
return -EFAULT;
}
- wait_event_interruptible(driver->wait_q, (check_data_ready(index)) > 0);
+ wait_event_interruptible(driver->wait_q,
+ atomic_read(&driver->data_ready_notif[index]) > 0);
mutex_lock(&driver->diagchar_mutex);
@@ -3013,6 +3166,7 @@ static ssize_t diagchar_read(struct file *file, char __user *buf, size_t count,
/*Copy the type of data being passed*/
data_type = driver->data_ready[index] & USER_SPACE_DATA_TYPE;
driver->data_ready[index] ^= USER_SPACE_DATA_TYPE;
+ atomic_dec(&driver->data_ready_notif[index]);
COPY_USER_SPACE_OR_ERR(buf, data_type, sizeof(int));
if (ret == -EFAULT)
goto exit;
@@ -3029,11 +3183,13 @@ static ssize_t diagchar_read(struct file *file, char __user *buf, size_t count,
* memory device any more, the condition needs to be cleared.
*/
driver->data_ready[index] ^= USER_SPACE_DATA_TYPE;
+ atomic_dec(&driver->data_ready_notif[index]);
}
if (driver->data_ready[index] & HDLC_SUPPORT_TYPE) {
data_type = driver->data_ready[index] & HDLC_SUPPORT_TYPE;
driver->data_ready[index] ^= HDLC_SUPPORT_TYPE;
+ atomic_dec(&driver->data_ready_notif[index]);
COPY_USER_SPACE_OR_ERR(buf, data_type, sizeof(int));
if (ret == -EFAULT)
goto exit;
@@ -3058,6 +3214,7 @@ static ssize_t diagchar_read(struct file *file, char __user *buf, size_t count,
if (ret == -EFAULT)
goto exit;
driver->data_ready[index] ^= DEINIT_TYPE;
+ atomic_dec(&driver->data_ready_notif[index]);
mutex_unlock(&driver->diagchar_mutex);
diag_remove_client_entry(file);
return ret;
@@ -3075,6 +3232,7 @@ static ssize_t diagchar_read(struct file *file, char __user *buf, size_t count,
if (write_len > 0)
ret += write_len;
driver->data_ready[index] ^= MSG_MASKS_TYPE;
+ atomic_dec(&driver->data_ready_notif[index]);
goto exit;
}
@@ -3101,6 +3259,7 @@ static ssize_t diagchar_read(struct file *file, char __user *buf, size_t count,
goto exit;
}
driver->data_ready[index] ^= EVENT_MASKS_TYPE;
+ atomic_dec(&driver->data_ready_notif[index]);
goto exit;
}
@@ -3117,6 +3276,7 @@ static ssize_t diagchar_read(struct file *file, char __user *buf, size_t count,
if (write_len > 0)
ret += write_len;
driver->data_ready[index] ^= LOG_MASKS_TYPE;
+ atomic_dec(&driver->data_ready_notif[index]);
goto exit;
}
@@ -3133,6 +3293,7 @@ static ssize_t diagchar_read(struct file *file, char __user *buf, size_t count,
if (ret == -EFAULT)
goto exit;
driver->data_ready[index] ^= PKT_TYPE;
+ atomic_dec(&driver->data_ready_notif[index]);
driver->in_busy_pktdata = 0;
goto exit;
}
@@ -3150,6 +3311,7 @@ static ssize_t diagchar_read(struct file *file, char __user *buf, size_t count,
goto exit;
driver->data_ready[index] ^= DCI_PKT_TYPE;
+ atomic_dec(&driver->data_ready_notif[index]);
driver->in_busy_dcipktdata = 0;
goto exit;
}
@@ -3171,6 +3333,7 @@ static ssize_t diagchar_read(struct file *file, char __user *buf, size_t count,
goto exit;
driver->data_ready[index] ^= DCI_EVENT_MASKS_TYPE;
+ atomic_dec(&driver->data_ready_notif[index]);
goto exit;
}
@@ -3190,6 +3353,7 @@ static ssize_t diagchar_read(struct file *file, char __user *buf, size_t count,
if (ret == -EFAULT)
goto exit;
driver->data_ready[index] ^= DCI_LOG_MASKS_TYPE;
+ atomic_dec(&driver->data_ready_notif[index]);
goto exit;
}
@@ -3221,6 +3385,7 @@ static ssize_t diagchar_read(struct file *file, char __user *buf, size_t count,
exit_stat = diag_copy_dci(buf, count, entry, &ret);
mutex_lock(&driver->diagchar_mutex);
driver->data_ready[index] ^= DCI_DATA_TYPE;
+ atomic_dec(&driver->data_ready_notif[index]);
mutex_unlock(&driver->diagchar_mutex);
if (exit_stat == 1) {
mutex_unlock(&driver->dci_mutex);
@@ -3742,7 +3907,7 @@ static int __init diagchar_init(void)
goto fail;
mutex_init(&driver->diag_id_mutex);
INIT_LIST_HEAD(&driver->diag_id_list);
- diag_add_diag_id_to_list(DIAG_ID_APPS, "APPS");
+ diag_add_diag_id_to_list(DIAG_ID_APPS, "APPS", APPS_DATA, APPS_DATA);
pr_debug("diagchar initialized now");
ret = diagfwd_bridge_init();
if (ret)
diff --git a/drivers/char/diag/diagfwd.c b/drivers/char/diag/diagfwd.c
index fc67c1a..4195b40 100644
--- a/drivers/char/diag/diagfwd.c
+++ b/drivers/char/diag/diagfwd.c
@@ -226,6 +226,7 @@ void chk_logging_wakeup(void)
* situation.
*/
driver->data_ready[i] |= USER_SPACE_DATA_TYPE;
+ atomic_inc(&driver->data_ready_notif[i]);
pr_debug("diag: Force wakeup of logging process\n");
wake_up_interruptible(&driver->wait_q);
break;
@@ -491,8 +492,10 @@ void diag_update_userspace_clients(unsigned int type)
mutex_lock(&driver->diagchar_mutex);
for (i = 0; i < driver->num_clients; i++)
- if (driver->client_map[i].pid != 0)
+ if (driver->client_map[i].pid != 0) {
driver->data_ready[i] |= type;
+ atomic_inc(&driver->data_ready_notif[i]);
+ }
wake_up_interruptible(&driver->wait_q);
mutex_unlock(&driver->diagchar_mutex);
}
@@ -509,6 +512,8 @@ void diag_update_md_clients(unsigned int type)
driver->client_map[j].pid ==
driver->md_session_map[i]->pid) {
driver->data_ready[j] |= type;
+ atomic_inc(
+ &driver->data_ready_notif[j]);
break;
}
}
@@ -524,6 +529,7 @@ void diag_update_sleeping_process(int process_id, int data_type)
for (i = 0; i < driver->num_clients; i++)
if (driver->client_map[i].pid == process_id) {
driver->data_ready[i] |= data_type;
+ atomic_inc(&driver->data_ready_notif[i]);
break;
}
wake_up_interruptible(&driver->wait_q);
@@ -993,6 +999,8 @@ int diag_process_apps_pkt(unsigned char *buf, int len,
struct diag_cmd_reg_entry_t entry;
struct diag_cmd_reg_entry_t *temp_entry = NULL;
struct diag_cmd_reg_t *reg_item = NULL;
+ struct diagfwd_info *fwd_info = NULL;
+ uint32_t pd_mask = 0;
if (!buf)
return -EIO;
@@ -1030,12 +1038,13 @@ int diag_process_apps_pkt(unsigned char *buf, int len,
temp_entry = diag_cmd_search(&entry, ALL_PROC);
if (temp_entry) {
reg_item = container_of(temp_entry, struct diag_cmd_reg_t,
- entry);
+ entry);
if (info) {
+ MD_PERIPHERAL_PD_MASK(TYPE_CMD, reg_item->proc,
+ pd_mask);
if ((MD_PERIPHERAL_MASK(reg_item->proc) &
info->peripheral_mask) ||
- (MD_PERIPHERAL_PD_MASK(reg_item->proc) &
- info->peripheral_mask))
+ (pd_mask & info->peripheral_mask))
write_len = diag_send_data(reg_item, buf, len);
} else {
if (MD_PERIPHERAL_MASK(reg_item->proc) &
@@ -1657,6 +1666,9 @@ static int diagfwd_mux_write_done(unsigned char *buf, int len, int buf_ctxt,
switch (type) {
case TYPE_DATA:
if (peripheral >= 0 && peripheral < NUM_PERIPHERALS) {
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "Marking buffer as free after write done p: %d, t: %d, buf_num: %d\n",
+ peripheral, type, num);
diagfwd_write_done(peripheral, type, num);
diag_ws_on_copy(DIAG_WS_MUX);
} else if (peripheral == APPS_DATA) {
@@ -1671,6 +1683,9 @@ static int diagfwd_mux_write_done(unsigned char *buf, int len, int buf_ctxt,
case TYPE_CMD:
if (peripheral >= 0 && peripheral < NUM_PERIPHERALS) {
diagfwd_write_done(peripheral, type, num);
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "Marking buffer as free after write done p: %d, t: %d, buf_num: %d\n",
+ peripheral, type, num);
}
if (peripheral == APPS_DATA ||
ctxt == DIAG_MEMORY_DEVICE_MODE) {
@@ -1710,6 +1725,7 @@ int diagfwd_init(void)
driver->supports_separate_cmdrsp = 1;
driver->supports_apps_hdlc_encoding = 1;
driver->supports_apps_header_untagging = 1;
+ driver->supports_pd_buffering = 1;
for (i = 0; i < NUM_PERIPHERALS; i++)
driver->peripheral_untag[i] = 0;
mutex_init(&driver->diag_hdlc_mutex);
@@ -1740,6 +1756,7 @@ int diagfwd_init(void)
driver->feature[i].stm_support = DISABLE_STM;
driver->feature[i].rcvd_feature_mask = 0;
driver->feature[i].peripheral_buffering = 0;
+ driver->feature[i].pd_buffering = 0;
driver->feature[i].encode_hdlc = 0;
driver->feature[i].untag_header =
DISABLE_PKT_HEADER_UNTAGGING;
@@ -1747,6 +1764,9 @@ int diagfwd_init(void)
driver->feature[i].log_on_demand = 0;
driver->feature[i].sent_feature_mask = 0;
driver->feature[i].diag_id_support = 0;
+ }
+
+ for (i = 0; i < NUM_MD_SESSIONS; i++) {
driver->buffering_mode[i].peripheral = i;
driver->buffering_mode[i].mode = DIAG_BUFFERING_MODE_STREAMING;
driver->buffering_mode[i].high_wm_val = DEFAULT_HIGH_WM_VAL;
@@ -1787,6 +1807,9 @@ int diagfwd_init(void)
}
kmemleak_not_leak(driver->data_ready);
+ for (i = 0; i < THRESHOLD_CLIENT_LIMIT; i++)
+ atomic_set(&driver->data_ready_notif[i], 0);
+
if (driver->apps_req_buf == NULL) {
driver->apps_req_buf = kzalloc(DIAG_MAX_REQ_SIZE, GFP_KERNEL);
if (!driver->apps_req_buf)
diff --git a/drivers/char/diag/diagfwd_cntl.c b/drivers/char/diag/diagfwd_cntl.c
index d8c107e..eaca17a 100644
--- a/drivers/char/diag/diagfwd_cntl.c
+++ b/drivers/char/diag/diagfwd_cntl.c
@@ -423,6 +423,8 @@ static void process_incoming_feature_mask(uint8_t *buf, uint32_t len,
enable_socket_feature(peripheral);
if (FEATURE_SUPPORTED(F_DIAG_DIAGID_SUPPORT))
driver->feature[peripheral].diag_id_support = 1;
+ if (FEATURE_SUPPORTED(F_DIAG_PD_BUFFERING))
+ driver->feature[peripheral].pd_buffering = 1;
}
process_socket_feature(peripheral);
@@ -694,7 +696,8 @@ static void process_build_mask_report(uint8_t *buf, uint32_t len,
}
}
-int diag_add_diag_id_to_list(uint8_t diag_id, char *process_name)
+int diag_add_diag_id_to_list(uint8_t diag_id, char *process_name,
+ uint8_t pd_val, uint8_t peripheral)
{
struct diag_id_tbl_t *new_item = NULL;
@@ -713,6 +716,8 @@ int diag_add_diag_id_to_list(uint8_t diag_id, char *process_name)
}
kmemleak_not_leak(new_item->process_name);
new_item->diag_id = diag_id;
+ new_item->pd_val = pd_val;
+ new_item->peripheral = peripheral;
strlcpy(new_item->process_name, process_name, strlen(process_name) + 1);
INIT_LIST_HEAD(&new_item->link);
mutex_lock(&driver->diag_id_mutex);
@@ -747,54 +752,58 @@ static void process_diagid(uint8_t *buf, uint32_t len,
{
struct diag_ctrl_diagid *header = NULL;
struct diag_ctrl_diagid ctrl_pkt;
- struct diagfwd_info *fwd_info_data = NULL;
- struct diagfwd_info *fwd_info_cmd = NULL;
+ struct diagfwd_info *fwd_info = NULL;
char *process_name = NULL;
int err = 0;
+ int pd_val;
char *root_str = NULL;
uint8_t local_diag_id = 0;
- uint8_t new_request = 0, i = 0;
+ uint8_t new_request = 0, i = 0, ch_type = 0;
if (!buf || len == 0 || peripheral >= NUM_PERIPHERALS)
return;
- fwd_info_data = &peripheral_info[TYPE_DATA][peripheral];
- if (!fwd_info_data)
- return;
-
- fwd_info_cmd = &peripheral_info[TYPE_CMD][peripheral];
- if (!fwd_info_cmd)
- return;
-
header = (struct diag_ctrl_diagid *)buf;
process_name = (char *)&header->process_name;
if (diag_query_diag_id(process_name, &local_diag_id))
ctrl_pkt.diag_id = local_diag_id;
else {
diag_id++;
- diag_add_diag_id_to_list(diag_id, process_name);
- ctrl_pkt.diag_id = diag_id;
new_request = 1;
+ pd_val = diag_query_pd(process_name);
+ if (pd_val < 0)
+ return;
+ diag_add_diag_id_to_list(diag_id, process_name,
+ pd_val, peripheral);
+ ctrl_pkt.diag_id = diag_id;
}
root_str = strnstr(process_name, DIAG_ID_ROOT_STRING,
strlen(process_name));
if (new_request) {
- fwd_info_data->num_pd++;
- fwd_info_cmd->num_pd++;
- if (root_str) {
- fwd_info_cmd->diagid_root = ctrl_pkt.diag_id;
- fwd_info_data->diagid_root = ctrl_pkt.diag_id;
- } else {
- i = fwd_info_cmd->num_pd - 2;
- if (i >= 0 && i < MAX_PERIPHERAL_UPD)
- fwd_info_cmd->diagid_user[i] =
- ctrl_pkt.diag_id;
+ for (ch_type = 0; ch_type < NUM_TYPES; ch_type++) {
+ if (ch_type == TYPE_DCI ||
+ ch_type == TYPE_DCI_CMD)
+ continue;
+ fwd_info = &peripheral_info[ch_type][peripheral];
+ fwd_info->num_pd++;
- i = fwd_info_data->num_pd - 2;
- if (i >= 0 && i < MAX_PERIPHERAL_UPD)
- fwd_info_data->diagid_user[i] =
- ctrl_pkt.diag_id;
+ if (root_str) {
+ fwd_info->root_diag_id.diagid_val =
+ ctrl_pkt.diag_id;
+ fwd_info->root_diag_id.reg_str =
+ process_name;
+ fwd_info->root_diag_id.pd = pd_val;
+ } else {
+ i = fwd_info->num_pd - 2;
+ if (i >= 0 && i < MAX_PERIPHERAL_UPD) {
+ fwd_info->upd_diag_id[i].diagid_val =
+ ctrl_pkt.diag_id;
+ fwd_info->upd_diag_id[i].reg_str =
+ process_name;
+ fwd_info->upd_diag_id[i].pd = pd_val;
+ }
+ }
}
}
@@ -827,7 +836,8 @@ static void process_diagid(uint8_t *buf, uint32_t len,
driver->diag_id_sent[peripheral] = 1;
diag_send_updates_peripheral(peripheral);
}
- diagfwd_buffers_init(fwd_info_data);
+ fwd_info = &peripheral_info[TYPE_DATA][peripheral];
+ diagfwd_buffers_init(fwd_info);
DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
"diag: diag_id sent = %d to peripheral = %d with diag_id = %d for %s :\n",
driver->diag_id_sent[peripheral], peripheral,
@@ -939,32 +949,54 @@ static int diag_compute_real_time(int idx)
}
static void diag_create_diag_mode_ctrl_pkt(unsigned char *dest_buf,
- int real_time)
+ uint8_t diag_id, int real_time)
{
struct diag_ctrl_msg_diagmode diagmode;
+ struct diag_ctrl_msg_diagmode_v2 diagmode_v2;
int msg_size = sizeof(struct diag_ctrl_msg_diagmode);
+ int msg_size_2 = sizeof(struct diag_ctrl_msg_diagmode_v2);
if (!dest_buf)
return;
- diagmode.ctrl_pkt_id = DIAG_CTRL_MSG_DIAGMODE;
- diagmode.ctrl_pkt_data_len = DIAG_MODE_PKT_LEN;
- diagmode.version = 1;
- diagmode.sleep_vote = real_time ? 1 : 0;
- /*
- * 0 - Disables real-time logging (to prevent
- * frequent APPS wake-ups, etc.).
- * 1 - Enable real-time logging
- */
- diagmode.real_time = real_time;
- diagmode.use_nrt_values = 0;
- diagmode.commit_threshold = 0;
- diagmode.sleep_threshold = 0;
- diagmode.sleep_time = 0;
- diagmode.drain_timer_val = 0;
- diagmode.event_stale_timer_val = 0;
-
- memcpy(dest_buf, &diagmode, msg_size);
+ if (diag_id) {
+ diagmode_v2.ctrl_pkt_id = DIAG_CTRL_MSG_DIAGMODE;
+ diagmode_v2.ctrl_pkt_data_len = DIAG_MODE_PKT_LEN_V2;
+ diagmode_v2.version = 2;
+ diagmode_v2.sleep_vote = real_time ? 1 : 0;
+ /*
+ * 0 - Disables real-time logging (to prevent
+ * frequent APPS wake-ups, etc.).
+ * 1 - Enable real-time logging
+ */
+ diagmode_v2.real_time = real_time;
+ diagmode_v2.use_nrt_values = 0;
+ diagmode_v2.commit_threshold = 0;
+ diagmode_v2.sleep_threshold = 0;
+ diagmode_v2.sleep_time = 0;
+ diagmode_v2.drain_timer_val = 0;
+ diagmode_v2.event_stale_timer_val = 0;
+ diagmode_v2.diag_id = diag_id;
+ memcpy(dest_buf, &diagmode_v2, msg_size_2);
+ } else {
+ diagmode.ctrl_pkt_id = DIAG_CTRL_MSG_DIAGMODE;
+ diagmode.ctrl_pkt_data_len = DIAG_MODE_PKT_LEN;
+ diagmode.version = 1;
+ diagmode.sleep_vote = real_time ? 1 : 0;
+ /*
+ * 0 - Disables real-time logging (to prevent
+ * frequent APPS wake-ups, etc.).
+ * 1 - Enable real-time logging
+ */
+ diagmode.real_time = real_time;
+ diagmode.use_nrt_values = 0;
+ diagmode.commit_threshold = 0;
+ diagmode.sleep_threshold = 0;
+ diagmode.sleep_time = 0;
+ diagmode.drain_timer_val = 0;
+ diagmode.event_stale_timer_val = 0;
+ memcpy(dest_buf, &diagmode, msg_size);
+ }
}
void diag_update_proc_vote(uint16_t proc, uint8_t vote, int index)
@@ -1049,7 +1081,7 @@ static void diag_send_diag_mode_update_remote(int token, int real_time)
memcpy(buf + write_len, &dci_header, dci_header_size);
write_len += dci_header_size;
- diag_create_diag_mode_ctrl_pkt(buf + write_len, real_time);
+ diag_create_diag_mode_ctrl_pkt(buf + write_len, 0, real_time);
write_len += msg_size;
*(buf + write_len) = CONTROL_CHAR; /* End Terminator */
write_len += sizeof(uint8_t);
@@ -1155,14 +1187,18 @@ void diag_real_time_work_fn(struct work_struct *work)
}
#endif
-static int __diag_send_real_time_update(uint8_t peripheral, int real_time)
+static int __diag_send_real_time_update(uint8_t peripheral, int real_time,
+ uint8_t diag_id)
{
- char buf[sizeof(struct diag_ctrl_msg_diagmode)];
- int msg_size = sizeof(struct diag_ctrl_msg_diagmode);
+ char buf[sizeof(struct diag_ctrl_msg_diagmode_v2)];
+ int msg_size = 0;
int err = 0;
- if (peripheral >= NUM_PERIPHERALS)
+ if (peripheral >= NUM_PERIPHERALS) {
+ pr_err("diag: In %s, invalid peripheral %d\n", __func__,
+ peripheral);
return -EINVAL;
+ }
if (!driver->diagfwd_cntl[peripheral] ||
!driver->diagfwd_cntl[peripheral]->ch_open) {
@@ -1177,12 +1213,17 @@ static int __diag_send_real_time_update(uint8_t peripheral, int real_time)
return -EINVAL;
}
- diag_create_diag_mode_ctrl_pkt(buf, real_time);
+ msg_size = (diag_id ? sizeof(struct diag_ctrl_msg_diagmode_v2) :
+ sizeof(struct diag_ctrl_msg_diagmode));
+
+ diag_create_diag_mode_ctrl_pkt(buf, diag_id, real_time);
mutex_lock(&driver->diag_cntl_mutex);
+
err = diagfwd_write(peripheral, TYPE_CNTL, buf, msg_size);
+
if (err && err != -ENODEV) {
- pr_err("diag: In %s, unable to write to socket, peripheral: %d, type: %d, len: %d, err: %d\n",
+ pr_err("diag: In %s, unable to write, peripheral: %d, type: %d, len: %d, err: %d\n",
__func__, peripheral, TYPE_CNTL,
msg_size, err);
} else {
@@ -1208,27 +1249,56 @@ int diag_send_real_time_update(uint8_t peripheral, int real_time)
return -EINVAL;
}
- return __diag_send_real_time_update(peripheral, real_time);
+ return __diag_send_real_time_update(peripheral, real_time, 0);
+}
+
+void diag_map_pd_to_diagid(uint8_t pd, uint8_t *diag_id, int *peripheral)
+{
+ if (!diag_search_diagid_by_pd(pd, (void *)diag_id,
+ (void *)peripheral)) {
+ *diag_id = 0;
+ if ((pd >= 0) && pd < NUM_PERIPHERALS)
+ *peripheral = pd;
+ else
+ *peripheral = -EINVAL;
+ }
+
+ if (*peripheral >= 0)
+ if (!driver->feature[*peripheral].pd_buffering)
+ *diag_id = 0;
}
int diag_send_peripheral_buffering_mode(struct diag_buffering_mode_t *params)
{
int err = 0;
int mode = MODE_REALTIME;
- uint8_t peripheral = 0;
+ int peripheral = 0;
+ uint8_t diag_id = 0;
if (!params)
return -EIO;
- peripheral = params->peripheral;
- if (peripheral >= NUM_PERIPHERALS) {
+ diag_map_pd_to_diagid(params->peripheral,
+ &diag_id, &peripheral);
+
+ if ((peripheral < 0) ||
+ peripheral >= NUM_PERIPHERALS) {
pr_err("diag: In %s, invalid peripheral %d\n", __func__,
peripheral);
return -EINVAL;
}
- if (!driver->buffering_flag[peripheral])
+ if (!driver->buffering_flag[params->peripheral]) {
+ pr_err("diag: In %s, buffering flag not set for %d\n", __func__,
+ params->peripheral);
return -EINVAL;
+ }
+
+ if (!driver->feature[peripheral].peripheral_buffering) {
+ pr_err("diag: In %s, peripheral %d doesn't support buffering\n",
+ __func__, peripheral);
+ return -EIO;
+ }
switch (params->mode) {
case DIAG_BUFFERING_MODE_STREAMING:
@@ -1247,7 +1317,7 @@ int diag_send_peripheral_buffering_mode(struct diag_buffering_mode_t *params)
if (!driver->feature[peripheral].peripheral_buffering) {
pr_debug("diag: In %s, peripheral %d doesn't support buffering\n",
__func__, peripheral);
- driver->buffering_flag[peripheral] = 0;
+ driver->buffering_flag[params->peripheral] = 0;
return -EIO;
}
@@ -1262,35 +1332,39 @@ int diag_send_peripheral_buffering_mode(struct diag_buffering_mode_t *params)
(params->low_wm_val != DIAG_MIN_WM_VAL))) {
pr_err("diag: In %s, invalid watermark values, high: %d, low: %d, peripheral: %d\n",
__func__, params->high_wm_val, params->low_wm_val,
- peripheral);
+ params->peripheral);
return -EINVAL;
}
mutex_lock(&driver->mode_lock);
- err = diag_send_buffering_tx_mode_pkt(peripheral, params);
+ err = diag_send_buffering_tx_mode_pkt(peripheral, diag_id, params);
if (err) {
pr_err("diag: In %s, unable to send buffering mode packet to peripheral %d, err: %d\n",
__func__, peripheral, err);
goto fail;
}
- err = diag_send_buffering_wm_values(peripheral, params);
+ err = diag_send_buffering_wm_values(peripheral, diag_id, params);
if (err) {
pr_err("diag: In %s, unable to send buffering wm value packet to peripheral %d, err: %d\n",
__func__, peripheral, err);
goto fail;
}
- err = __diag_send_real_time_update(peripheral, mode);
+ err = __diag_send_real_time_update(peripheral, mode, diag_id);
if (err) {
pr_err("diag: In %s, unable to send mode update to peripheral %d, mode: %d, err: %d\n",
__func__, peripheral, mode, err);
goto fail;
}
- driver->buffering_mode[peripheral].peripheral = peripheral;
- driver->buffering_mode[peripheral].mode = params->mode;
- driver->buffering_mode[peripheral].low_wm_val = params->low_wm_val;
- driver->buffering_mode[peripheral].high_wm_val = params->high_wm_val;
+ driver->buffering_mode[params->peripheral].peripheral =
+ params->peripheral;
+ driver->buffering_mode[params->peripheral].mode =
+ params->mode;
+ driver->buffering_mode[params->peripheral].low_wm_val =
+ params->low_wm_val;
+ driver->buffering_mode[params->peripheral].high_wm_val =
+ params->high_wm_val;
if (params->mode == DIAG_BUFFERING_MODE_STREAMING)
- driver->buffering_flag[peripheral] = 0;
+ driver->buffering_flag[params->peripheral] = 0;
fail:
mutex_unlock(&driver->mode_lock);
return err;
@@ -1329,10 +1403,12 @@ int diag_send_stm_state(uint8_t peripheral, uint8_t stm_control_data)
return err;
}
-int diag_send_peripheral_drain_immediate(uint8_t peripheral)
+int diag_send_peripheral_drain_immediate(uint8_t pd,
+ uint8_t diag_id, int peripheral)
{
int err = 0;
struct diag_ctrl_drain_immediate ctrl_pkt;
+ struct diag_ctrl_drain_immediate_v2 ctrl_pkt_v2;
if (!driver->feature[peripheral].peripheral_buffering) {
pr_debug("diag: In %s, peripheral %d doesn't support buffering\n",
@@ -1347,32 +1423,57 @@ int diag_send_peripheral_drain_immediate(uint8_t peripheral)
return -ENODEV;
}
- ctrl_pkt.pkt_id = DIAG_CTRL_MSG_PERIPHERAL_BUF_DRAIN_IMM;
- /* The length of the ctrl pkt is size of version and stream id */
- ctrl_pkt.len = sizeof(uint32_t) + sizeof(uint8_t);
- ctrl_pkt.version = 1;
- ctrl_pkt.stream_id = 1;
-
- err = diagfwd_write(peripheral, TYPE_CNTL, &ctrl_pkt, sizeof(ctrl_pkt));
- if (err && err != -ENODEV) {
- pr_err("diag: Unable to send drain immediate ctrl packet to peripheral %d, err: %d\n",
- peripheral, err);
+ if (diag_id && driver->feature[peripheral].pd_buffering) {
+ ctrl_pkt_v2.pkt_id = DIAG_CTRL_MSG_PERIPHERAL_BUF_DRAIN_IMM;
+ /*
+ * The length of the ctrl pkt is size of version,
+ * diag_id and stream id
+ */
+ ctrl_pkt_v2.len = sizeof(uint32_t) + (2 * sizeof(uint8_t));
+ ctrl_pkt_v2.version = 2;
+ ctrl_pkt_v2.diag_id = diag_id;
+ ctrl_pkt_v2.stream_id = 1;
+ err = diagfwd_write(peripheral, TYPE_CNTL, &ctrl_pkt_v2,
+ sizeof(ctrl_pkt_v2));
+ if (err && err != -ENODEV) {
+ pr_err("diag: Unable to send drain immediate ctrl packet to peripheral %d, err: %d\n",
+ peripheral, err);
+ }
+ } else {
+ ctrl_pkt.pkt_id = DIAG_CTRL_MSG_PERIPHERAL_BUF_DRAIN_IMM;
+ /*
+ * The length of the ctrl pkt is
+ * size of version and stream id
+ */
+ ctrl_pkt.len = sizeof(uint32_t) + sizeof(uint8_t);
+ ctrl_pkt.version = 1;
+ ctrl_pkt.stream_id = 1;
+ err = diagfwd_write(peripheral, TYPE_CNTL, &ctrl_pkt,
+ sizeof(ctrl_pkt));
+ if (err && err != -ENODEV) {
+ pr_err("diag: Unable to send drain immediate ctrl packet to peripheral %d, err: %d\n",
+ peripheral, err);
+ }
}
return err;
}
int diag_send_buffering_tx_mode_pkt(uint8_t peripheral,
- struct diag_buffering_mode_t *params)
+ uint8_t diag_id, struct diag_buffering_mode_t *params)
{
int err = 0;
struct diag_ctrl_peripheral_tx_mode ctrl_pkt;
+ struct diag_ctrl_peripheral_tx_mode_v2 ctrl_pkt_v2;
if (!params)
return -EIO;
- if (peripheral >= NUM_PERIPHERALS)
+ if (peripheral >= NUM_PERIPHERALS) {
+ pr_err("diag: In %s, invalid peripheral %d\n", __func__,
+ peripheral);
return -EINVAL;
+ }
if (!driver->feature[peripheral].peripheral_buffering) {
pr_debug("diag: In %s, peripheral %d doesn't support buffering\n",
@@ -1380,9 +1481,6 @@ int diag_send_buffering_tx_mode_pkt(uint8_t peripheral,
return -EINVAL;
}
- if (params->peripheral != peripheral)
- return -EINVAL;
-
switch (params->mode) {
case DIAG_BUFFERING_MODE_STREAMING:
case DIAG_BUFFERING_MODE_THRESHOLD:
@@ -1394,36 +1492,67 @@ int diag_send_buffering_tx_mode_pkt(uint8_t peripheral,
return -EINVAL;
}
- ctrl_pkt.pkt_id = DIAG_CTRL_MSG_CONFIG_PERIPHERAL_TX_MODE;
- /* Control packet length is size of version, stream_id and tx_mode */
- ctrl_pkt.len = sizeof(uint32_t) + (2 * sizeof(uint8_t));
- ctrl_pkt.version = 1;
- ctrl_pkt.stream_id = 1;
- ctrl_pkt.tx_mode = params->mode;
+ if (diag_id &&
+ driver->feature[peripheral].pd_buffering) {
- err = diagfwd_write(peripheral, TYPE_CNTL, &ctrl_pkt, sizeof(ctrl_pkt));
- if (err && err != -ENODEV) {
- pr_err("diag: Unable to send tx_mode ctrl packet to peripheral %d, err: %d\n",
- peripheral, err);
- goto fail;
+ ctrl_pkt_v2.pkt_id = DIAG_CTRL_MSG_CONFIG_PERIPHERAL_TX_MODE;
+ /*
+ * Control packet length is size of version, diag_id,
+ * stream_id and tx_mode
+ */
+ ctrl_pkt_v2.len = sizeof(uint32_t) + (3 * sizeof(uint8_t));
+ ctrl_pkt_v2.version = 2;
+ ctrl_pkt_v2.diag_id = diag_id;
+ ctrl_pkt_v2.stream_id = 1;
+ ctrl_pkt_v2.tx_mode = params->mode;
+
+ err = diagfwd_write(peripheral, TYPE_CNTL, &ctrl_pkt_v2,
+ sizeof(ctrl_pkt_v2));
+ if (err && err != -ENODEV) {
+ pr_err("diag: Unable to send tx_mode ctrl packet to peripheral %d, err: %d\n",
+ peripheral, err);
+ goto fail;
+ }
+ } else {
+ ctrl_pkt.pkt_id = DIAG_CTRL_MSG_CONFIG_PERIPHERAL_TX_MODE;
+ /*
+ * Control packet length is size of version,
+ * stream_id and tx_mode
+ */
+ ctrl_pkt.len = sizeof(uint32_t) + (2 * sizeof(uint8_t));
+ ctrl_pkt.version = 1;
+ ctrl_pkt.stream_id = 1;
+ ctrl_pkt.tx_mode = params->mode;
+
+ err = diagfwd_write(peripheral, TYPE_CNTL, &ctrl_pkt,
+ sizeof(ctrl_pkt));
+ if (err && err != -ENODEV) {
+ pr_err("diag: Unable to send tx_mode ctrl packet to peripheral %d, err: %d\n",
+ peripheral, err);
+ goto fail;
+ }
}
- driver->buffering_mode[peripheral].mode = params->mode;
+ driver->buffering_mode[params->peripheral].mode = params->mode;
fail:
return err;
}
int diag_send_buffering_wm_values(uint8_t peripheral,
- struct diag_buffering_mode_t *params)
+ uint8_t diag_id, struct diag_buffering_mode_t *params)
{
int err = 0;
struct diag_ctrl_set_wq_val ctrl_pkt;
+ struct diag_ctrl_set_wq_val_v2 ctrl_pkt_v2;
if (!params)
return -EIO;
- if (peripheral >= NUM_PERIPHERALS)
+ if (peripheral >= NUM_PERIPHERALS) {
+ pr_err("diag: In %s, invalid peripheral %d\n", __func__,
+ peripheral);
return -EINVAL;
+ }
if (!driver->feature[peripheral].peripheral_buffering) {
pr_debug("diag: In %s, peripheral %d doesn't support buffering\n",
@@ -1438,9 +1567,6 @@ int diag_send_buffering_wm_values(uint8_t peripheral,
return -ENODEV;
}
- if (params->peripheral != peripheral)
- return -EINVAL;
-
switch (params->mode) {
case DIAG_BUFFERING_MODE_STREAMING:
case DIAG_BUFFERING_MODE_THRESHOLD:
@@ -1452,21 +1578,45 @@ int diag_send_buffering_wm_values(uint8_t peripheral,
return -EINVAL;
}
- ctrl_pkt.pkt_id = DIAG_CTRL_MSG_CONFIG_PERIPHERAL_WMQ_VAL;
- /* Control packet length is size of version, stream_id and wmq values */
- ctrl_pkt.len = sizeof(uint32_t) + (3 * sizeof(uint8_t));
- ctrl_pkt.version = 1;
- ctrl_pkt.stream_id = 1;
- ctrl_pkt.high_wm_val = params->high_wm_val;
- ctrl_pkt.low_wm_val = params->low_wm_val;
+ if (diag_id &&
+ driver->feature[peripheral].pd_buffering) {
+ ctrl_pkt_v2.pkt_id = DIAG_CTRL_MSG_CONFIG_PERIPHERAL_WMQ_VAL;
+ /*
+ * Control packet length is size of version, diag_id,
+ * stream_id and wmq values
+ */
+ ctrl_pkt_v2.len = sizeof(uint32_t) + (4 * sizeof(uint8_t));
+ ctrl_pkt_v2.version = 2;
+ ctrl_pkt_v2.diag_id = diag_id;
+ ctrl_pkt_v2.stream_id = 1;
+ ctrl_pkt_v2.high_wm_val = params->high_wm_val;
+ ctrl_pkt_v2.low_wm_val = params->low_wm_val;
- err = diagfwd_write(peripheral, TYPE_CNTL, &ctrl_pkt,
- sizeof(ctrl_pkt));
- if (err && err != -ENODEV) {
- pr_err("diag: Unable to send watermark values to peripheral %d, err: %d\n",
- peripheral, err);
+ err = diagfwd_write(peripheral, TYPE_CNTL, &ctrl_pkt_v2,
+ sizeof(ctrl_pkt_v2));
+ if (err && err != -ENODEV) {
+ pr_err("diag: Unable to send watermark values to peripheral %d, err: %d\n",
+ peripheral, err);
+ }
+ } else {
+ ctrl_pkt.pkt_id = DIAG_CTRL_MSG_CONFIG_PERIPHERAL_WMQ_VAL;
+ /*
+ * Control packet length is size of version,
+ * stream_id and wmq values
+ */
+ ctrl_pkt.len = sizeof(uint32_t) + (3 * sizeof(uint8_t));
+ ctrl_pkt.version = 1;
+ ctrl_pkt.stream_id = 1;
+ ctrl_pkt.high_wm_val = params->high_wm_val;
+ ctrl_pkt.low_wm_val = params->low_wm_val;
+
+ err = diagfwd_write(peripheral, TYPE_CNTL, &ctrl_pkt,
+ sizeof(ctrl_pkt));
+ if (err && err != -ENODEV) {
+ pr_err("diag: Unable to send watermark values to peripheral %d, err: %d\n",
+ peripheral, err);
+ }
}
-
return err;
}
diff --git a/drivers/char/diag/diagfwd_cntl.h b/drivers/char/diag/diagfwd_cntl.h
index 8b22d7e..848ad87 100644
--- a/drivers/char/diag/diagfwd_cntl.h
+++ b/drivers/char/diag/diagfwd_cntl.h
@@ -69,6 +69,7 @@
#define F_DIAG_DCI_EXTENDED_HEADER_SUPPORT 14
#define F_DIAG_DIAGID_SUPPORT 15
#define F_DIAG_PKT_HEADER_UNTAG 16
+#define F_DIAG_PD_BUFFERING 17
#define ENABLE_SEPARATE_CMDRSP 1
#define DISABLE_SEPARATE_CMDRSP 0
@@ -86,7 +87,8 @@
#define ENABLE_PKT_HEADER_UNTAGGING 1
#define DISABLE_PKT_HEADER_UNTAGGING 0
-#define DIAG_MODE_PKT_LEN 36
+#define DIAG_MODE_PKT_LEN 36
+#define DIAG_MODE_PKT_LEN_V2 37
struct diag_ctrl_pkt_header_t {
uint32_t pkt_id;
@@ -172,6 +174,21 @@ struct diag_ctrl_msg_diagmode {
uint32_t event_stale_timer_val;
} __packed;
+struct diag_ctrl_msg_diagmode_v2 {
+ uint32_t ctrl_pkt_id;
+ uint32_t ctrl_pkt_data_len;
+ uint32_t version;
+ uint32_t sleep_vote;
+ uint32_t real_time;
+ uint32_t use_nrt_values;
+ uint32_t commit_threshold;
+ uint32_t sleep_threshold;
+ uint32_t sleep_time;
+ uint32_t drain_timer_val;
+ uint32_t event_stale_timer_val;
+ uint8_t diag_id;
+} __packed;
+
struct diag_ctrl_msg_stm {
uint32_t ctrl_pkt_id;
uint32_t ctrl_pkt_data_len;
@@ -250,6 +267,15 @@ struct diag_ctrl_peripheral_tx_mode {
uint8_t tx_mode;
} __packed;
+struct diag_ctrl_peripheral_tx_mode_v2 {
+ uint32_t pkt_id;
+ uint32_t len;
+ uint32_t version;
+ uint8_t diag_id;
+ uint8_t stream_id;
+ uint8_t tx_mode;
+} __packed;
+
struct diag_ctrl_drain_immediate {
uint32_t pkt_id;
uint32_t len;
@@ -257,6 +283,14 @@ struct diag_ctrl_drain_immediate {
uint8_t stream_id;
} __packed;
+struct diag_ctrl_drain_immediate_v2 {
+ uint32_t pkt_id;
+ uint32_t len;
+ uint32_t version;
+ uint8_t diag_id;
+ uint8_t stream_id;
+} __packed;
+
struct diag_ctrl_set_wq_val {
uint32_t pkt_id;
uint32_t len;
@@ -266,6 +300,16 @@ struct diag_ctrl_set_wq_val {
uint8_t low_wm_val;
} __packed;
+struct diag_ctrl_set_wq_val_v2 {
+ uint32_t pkt_id;
+ uint32_t len;
+ uint32_t version;
+ uint8_t diag_id;
+ uint8_t stream_id;
+ uint8_t high_wm_val;
+ uint8_t low_wm_val;
+} __packed;
+
struct diag_ctrl_diagid {
uint32_t pkt_id;
uint32_t len;
@@ -275,7 +319,8 @@ struct diag_ctrl_diagid {
} __packed;
int diagfwd_cntl_init(void);
-int diag_add_diag_id_to_list(uint8_t diag_id, char *process_name);
+int diag_add_diag_id_to_list(uint8_t diag_id,
+ char *process_name, uint8_t pd_val, uint8_t peripheral);
void diagfwd_cntl_channel_init(void);
void diagfwd_cntl_exit(void);
void diag_cntl_channel_open(struct diagfwd_info *p_info);
@@ -289,9 +334,10 @@ void diag_update_proc_vote(uint16_t proc, uint8_t vote, int index);
void diag_update_real_time_vote(uint16_t proc, uint8_t real_time, int index);
void diag_real_time_work_fn(struct work_struct *work);
int diag_send_stm_state(uint8_t peripheral, uint8_t stm_control_data);
-int diag_send_peripheral_drain_immediate(uint8_t peripheral);
+int diag_send_peripheral_drain_immediate(uint8_t pd,
+ uint8_t diag_id, int peripheral);
int diag_send_buffering_tx_mode_pkt(uint8_t peripheral,
- struct diag_buffering_mode_t *params);
+ uint8_t diag_id, struct diag_buffering_mode_t *params);
int diag_send_buffering_wm_values(uint8_t peripheral,
- struct diag_buffering_mode_t *params);
+ uint8_t diag_id, struct diag_buffering_mode_t *params);
#endif
diff --git a/drivers/char/diag/diagfwd_peripheral.c b/drivers/char/diag/diagfwd_peripheral.c
index 4d4b660..4d6ae23 100644
--- a/drivers/char/diag/diagfwd_peripheral.c
+++ b/drivers/char/diag/diagfwd_peripheral.c
@@ -30,6 +30,7 @@
#include "diag_mux.h"
#include "diag_ipc_logging.h"
#include "diagfwd_glink.h"
+#include "diag_memorydevice.h"
struct data_header {
uint8_t control_char;
@@ -187,8 +188,10 @@ static int diag_add_hdlc_encoding(unsigned char *dest_buf, int *dest_len,
static int check_bufsize_for_encoding(struct diagfwd_buf_t *buf, uint32_t len)
{
+ int i, ctx = 0;
uint32_t max_size = 0;
unsigned char *temp_buf = NULL;
+ struct diag_md_info *ch = NULL;
if (!buf || len == 0)
return -EINVAL;
@@ -202,11 +205,31 @@ static int check_bufsize_for_encoding(struct diagfwd_buf_t *buf, uint32_t len)
}
if (buf->len < max_size) {
+ if (driver->logging_mode == DIAG_MEMORY_DEVICE_MODE) {
+ ch = &diag_md[DIAG_LOCAL_PROC];
+ for (i = 0; ch != NULL &&
+ i < ch->num_tbl_entries; i++) {
+ if (ch->tbl[i].buf == buf->data) {
+ ctx = ch->tbl[i].ctx;
+ ch->tbl[i].buf = NULL;
+ ch->tbl[i].len = 0;
+ ch->tbl[i].ctx = 0;
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "Flushed mdlog table entries before reallocating data buffer, p:%d, t:%d\n",
+ GET_BUF_PERIPHERAL(ctx),
+ GET_BUF_TYPE(ctx));
+ break;
+ }
+ }
+ }
temp_buf = krealloc(buf->data, max_size +
APF_DIAG_PADDING,
GFP_KERNEL);
if (!temp_buf)
return -ENOMEM;
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "Reallocated data buffer: %pK with size: %d\n",
+ temp_buf, max_size);
buf->data = temp_buf;
buf->len = max_size;
}
@@ -215,10 +238,19 @@ static int check_bufsize_for_encoding(struct diagfwd_buf_t *buf, uint32_t len)
return buf->len;
}
+/*
+ * diag_md_get_peripheral(int ctxt)
+ *
+ * Context(ctxt) contains peripheral, channel type, buffer num and diag_id
+ * The function decodes the ctxt, checks for the active user pd session
+ * using diag_id and returns peripheral if not active or the PD if active.
+ *
+ */
int diag_md_get_peripheral(int ctxt)
{
- int pd = 0, i = 0;
+ uint8_t diag_id = 0, i = 0, pd = 0;
int type = 0, peripheral = -EINVAL;
+ int index = 0;
struct diagfwd_info *fwd_info = NULL;
peripheral = GET_BUF_PERIPHERAL(ctxt);
@@ -246,22 +278,22 @@ int diag_md_get_peripheral(int ctxt)
if (!fwd_info)
return -EINVAL;
- pd = GET_PD_CTXT(ctxt);
+ diag_id = GET_PD_CTXT(ctxt);
- if (driver->num_pd_session) {
- if (pd == fwd_info->diagid_root) {
- if (peripheral > NUM_PERIPHERALS)
+ if (driver->num_pd_session &&
+ driver->feature[peripheral].untag_header) {
+ if (diag_id == fwd_info->root_diag_id.diagid_val) {
+ if (peripheral != fwd_info->root_diag_id.pd)
peripheral = -EINVAL;
} else {
for (i = 0; i <= (fwd_info->num_pd - 2); i++) {
- if (pd == fwd_info->diagid_user[i]) {
- switch (peripheral) {
- case PERIPHERAL_MODEM:
- if (driver->pd_logging_mode[0])
- peripheral = UPD_WLAN;
- break;
- default:
- peripheral = -EINVAL;
+ if (diag_id ==
+ fwd_info->upd_diag_id[i].diagid_val) {
+ pd = fwd_info->upd_diag_id[i].pd;
+ index = pd - UPD_WLAN;
+ if ((index >= 0 && index < NUM_UPD) &&
+ driver->pd_logging_mode[index]) {
+ peripheral = pd;
break;
}
}
@@ -377,12 +409,28 @@ static void diagfwd_data_process_done(struct diagfwd_info *fwd_info,
mutex_unlock(&fwd_info->data_mutex);
mutex_unlock(&driver->hdlc_disable_mutex);
if (buf) {
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "Marking buffer as free p: %d, t: %d, buf_num: %d\n",
+ fwd_info->peripheral, fwd_info->type,
+ GET_BUF_NUM(buf->ctxt));
diagfwd_write_done(fwd_info->peripheral, fwd_info->type,
GET_BUF_NUM(buf->ctxt));
}
diagfwd_queue_read(fwd_info);
}
+/*
+ * diagfwd_data_read_untag_done(struct diagfwd_info *fwd_info,
+ * unsigned char *buf, int len)
+ *
+ * Data received from the peripheral can contain data from core and user PD
+ * The function segregates the data depending on the diag_id in the header
+ * of the packet chunk and copies to PD specific buffers.
+ * Sets the context for the buffers using diag_id and process it later for
+ * splitting the stream based on active PD logging.
+ *
+ */
+
static void diagfwd_data_read_untag_done(struct diagfwd_info *fwd_info,
unsigned char *buf, int len)
{
@@ -458,8 +506,10 @@ static void diagfwd_data_read_untag_done(struct diagfwd_info *fwd_info,
*(uint16_t *) (temp_buf_main + 2);
if (packet_len > PERIPHERAL_BUF_SZ)
goto end;
- if ((*temp_buf_main) == fwd_info->diagid_root) {
- ctxt_cpd = fwd_info->diagid_root;
+ if ((*temp_buf_main) ==
+ fwd_info->root_diag_id.diagid_val) {
+ ctxt_cpd =
+ fwd_info->root_diag_id.diagid_val;
len_cpd += packet_len;
if (temp_buf_cpd) {
memcpy(temp_buf_cpd,
@@ -467,46 +517,25 @@ static void diagfwd_data_read_untag_done(struct diagfwd_info *fwd_info,
temp_buf_cpd += packet_len;
}
} else {
- for (i = 0; i <= (fwd_info->num_pd - 2); i++)
- if ((*temp_buf_main) ==
- fwd_info->diagid_user[i])
- break;
- ctxt_upd[i] = fwd_info->diagid_user[i];
- if (temp_buf_upd[i]) {
- memcpy(temp_buf_upd[i],
- (temp_buf_main + 4), packet_len);
- temp_buf_upd[i] += packet_len;
+ for (i = 0; i <= (fwd_info->num_pd - 2); i++) {
+ if ((*temp_buf_main) ==
+ fwd_info->upd_diag_id[i].diagid_val) {
+ ctxt_upd[i] =
+ fwd_info->upd_diag_id[i].diagid_val;
+ if (temp_buf_upd[i]) {
+ memcpy(temp_buf_upd[i],
+ (temp_buf_main + 4),
+ packet_len);
+ temp_buf_upd[i] += packet_len;
+ }
+ len_upd[i] += packet_len;
+ }
}
- len_upd[i] += packet_len;
}
len = len - 4;
temp_buf_main += (packet_len + 4);
processed += packet_len;
}
- for (i = 0; i <= (fwd_info->num_pd - 2); i++) {
- if (fwd_info->type == TYPE_DATA && len_upd[i]) {
- if (flag_buf_1) {
- fwd_info->upd_len[i][0] = len_upd[i];
- temp_fwdinfo_upd =
- fwd_info->buf_upd[i][0];
- } else {
- fwd_info->upd_len[i][1] = len_upd[i];
- temp_fwdinfo_upd =
- fwd_info->buf_upd[i][1];
- }
- temp_fwdinfo_upd->ctxt &= 0x00FFFFFF;
- temp_fwdinfo_upd->ctxt |=
- (SET_PD_CTXT(ctxt_upd[i]));
- atomic_set(&temp_fwdinfo_upd->in_busy, 1);
- diagfwd_data_process_done(fwd_info,
- temp_fwdinfo_upd, len_upd[i]);
- } else {
- if (flag_buf_1)
- fwd_info->upd_len[i][0] = 0;
- if (flag_buf_2)
- fwd_info->upd_len[i][1] = 0;
- }
- }
if (flag_buf_1) {
fwd_info->cpd_len_1 = len_cpd;
@@ -520,14 +549,31 @@ static void diagfwd_data_read_untag_done(struct diagfwd_info *fwd_info,
fwd_info->upd_len[i][1] = len_upd[i];
}
+ for (i = 0; i <= (fwd_info->num_pd - 2); i++) {
+ if (fwd_info->type == TYPE_DATA && len_upd[i]) {
+ if (flag_buf_1)
+ temp_fwdinfo_upd =
+ fwd_info->buf_upd[i][0];
+ else
+ temp_fwdinfo_upd =
+ fwd_info->buf_upd[i][1];
+ temp_fwdinfo_upd->ctxt &= 0x00FFFFFF;
+ temp_fwdinfo_upd->ctxt |=
+ (SET_PD_CTXT(ctxt_upd[i]));
+ atomic_set(&temp_fwdinfo_upd->in_busy, 1);
+ diagfwd_data_process_done(fwd_info,
+ temp_fwdinfo_upd, len_upd[i]);
+ } else {
+ if (flag_buf_1)
+ fwd_info->upd_len[i][0] = 0;
+ if (flag_buf_2)
+ fwd_info->upd_len[i][1] = 0;
+ }
+ }
+
if (len_cpd) {
- if (flag_buf_1)
- fwd_info->cpd_len_1 = len_cpd;
- else
- fwd_info->cpd_len_2 = len_cpd;
temp_fwdinfo_cpd->ctxt &= 0x00FFFFFF;
- temp_fwdinfo_cpd->ctxt |=
- (SET_PD_CTXT(ctxt_cpd));
+ temp_fwdinfo_cpd->ctxt |= (SET_PD_CTXT(ctxt_cpd));
diagfwd_data_process_done(fwd_info,
temp_fwdinfo_cpd, len_cpd);
} else {
@@ -543,6 +589,10 @@ static void diagfwd_data_read_untag_done(struct diagfwd_info *fwd_info,
end:
diag_ws_release();
if (temp_fwdinfo_cpd) {
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "Marking buffer as free p: %d, t: %d, buf_num: %d\n",
+ fwd_info->peripheral, fwd_info->type,
+ GET_BUF_NUM(temp_fwdinfo_cpd->ctxt));
diagfwd_write_done(fwd_info->peripheral, fwd_info->type,
GET_BUF_NUM(temp_fwdinfo_cpd->ctxt));
}
@@ -663,6 +713,10 @@ static void diagfwd_data_read_done(struct diagfwd_info *fwd_info,
mutex_unlock(&fwd_info->data_mutex);
mutex_unlock(&driver->hdlc_disable_mutex);
if (temp_buf) {
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "Marking buffer as free p: %d, t: %d, buf_num: %d\n",
+ fwd_info->peripheral, fwd_info->type,
+ GET_BUF_NUM(temp_buf->ctxt));
diagfwd_write_done(fwd_info->peripheral, fwd_info->type,
GET_BUF_NUM(temp_buf->ctxt));
}
@@ -742,6 +796,16 @@ static void diagfwd_reset_buffers(struct diagfwd_info *fwd_info,
else if (fwd_info->buf_2 && fwd_info->buf_2->data_raw == buf)
atomic_set(&fwd_info->buf_2->in_busy, 0);
}
+ if (fwd_info->buf_1 && !atomic_read(&(fwd_info->buf_1->in_busy))) {
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "Buffer 1 for core PD is marked free, p: %d, t: %d\n",
+ fwd_info->peripheral, fwd_info->type);
+ }
+ if (fwd_info->buf_2 && !atomic_read(&(fwd_info->buf_2->in_busy))) {
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "Buffer 2 for core PD is marked free, p: %d, t: %d\n",
+ fwd_info->peripheral, fwd_info->type);
+ }
}
int diagfwd_peripheral_init(void)
@@ -776,12 +840,13 @@ int diagfwd_peripheral_init(void)
fwd_info->cpd_len_1 = 0;
fwd_info->cpd_len_2 = 0;
fwd_info->num_pd = 0;
+ fwd_info->root_diag_id.diagid_val = 0;
mutex_init(&fwd_info->buf_mutex);
mutex_init(&fwd_info->data_mutex);
spin_lock_init(&fwd_info->write_buf_lock);
for (i = 0; i < MAX_PERIPHERAL_UPD; i++) {
- fwd_info->diagid_user[i] = 0;
+ fwd_info->upd_diag_id[i].diagid_val = 0;
fwd_info->upd_len[i][0] = 0;
fwd_info->upd_len[i][1] = 0;
fwd_info->buf_upd[i][0] = NULL;
@@ -803,12 +868,13 @@ int diagfwd_peripheral_init(void)
fwd_info->num_pd = 0;
fwd_info->cpd_len_1 = 0;
fwd_info->cpd_len_2 = 0;
+ fwd_info->root_diag_id.diagid_val = 0;
spin_lock_init(&fwd_info->write_buf_lock);
mutex_init(&fwd_info->buf_mutex);
mutex_init(&fwd_info->data_mutex);
for (i = 0; i < MAX_PERIPHERAL_UPD; i++) {
- fwd_info->diagid_user[i] = 0;
+ fwd_info->upd_diag_id[i].diagid_val = 0;
fwd_info->upd_len[i][0] = 0;
fwd_info->upd_len[i][1] = 0;
fwd_info->buf_upd[i][0] = NULL;
@@ -1086,7 +1152,7 @@ int diagfwd_write(uint8_t peripheral, uint8_t type, void *buf, int len)
if (type == TYPE_CMD) {
if (driver->feature[peripheral].diag_id_support)
- if (!fwd_info->diagid_root ||
+ if (!fwd_info->root_diag_id.diagid_val ||
(!driver->diag_id_sent[peripheral])) {
DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
"diag: diag_id is not assigned yet\n");
@@ -1137,10 +1203,28 @@ static void __diag_fwd_open(struct diagfwd_info *fwd_info)
if (!fwd_info->inited)
return;
- if (fwd_info->buf_1)
- atomic_set(&fwd_info->buf_1->in_busy, 0);
- if (fwd_info->buf_2)
- atomic_set(&fwd_info->buf_2->in_busy, 0);
+ /*
+ * Logging mode here is reflecting previous mode
+ * status and will be updated to new mode later.
+ *
+ * Keeping the buffers busy for Memory Device Mode.
+ */
+
+ if ((driver->logging_mode != DIAG_USB_MODE) ||
+ driver->usb_connected) {
+ if (fwd_info->buf_1) {
+ atomic_set(&fwd_info->buf_1->in_busy, 0);
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "Buffer 1 for core PD is marked free, p: %d, t: %d\n",
+ fwd_info->peripheral, fwd_info->type);
+ }
+ if (fwd_info->buf_2) {
+ atomic_set(&fwd_info->buf_2->in_busy, 0);
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "Buffer 2 for core PD is marked free, p: %d, t: %d\n",
+ fwd_info->peripheral, fwd_info->type);
+ }
+ }
if (fwd_info->p_ops && fwd_info->p_ops->open)
fwd_info->p_ops->open(fwd_info->ctxt);
@@ -1265,10 +1349,18 @@ int diagfwd_channel_close(struct diagfwd_info *fwd_info)
if (fwd_info && fwd_info->c_ops && fwd_info->c_ops->close)
fwd_info->c_ops->close(fwd_info);
- if (fwd_info->buf_1 && fwd_info->buf_1->data)
+ if (fwd_info->buf_1 && fwd_info->buf_1->data) {
atomic_set(&fwd_info->buf_1->in_busy, 0);
- if (fwd_info->buf_2 && fwd_info->buf_2->data)
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "Buffer 1 for core PD is marked free, p: %d, t: %d\n",
+ fwd_info->peripheral, fwd_info->type);
+ }
+ if (fwd_info->buf_2 && fwd_info->buf_2->data) {
atomic_set(&fwd_info->buf_2->in_busy, 0);
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "Buffer 2 for core PD is marked free, p: %d, t: %d\n",
+ fwd_info->peripheral, fwd_info->type);
+ }
for (i = 0; i < NUM_WRITE_BUFFERS; i++) {
if (fwd_info->buf_ptr[i])
@@ -1294,6 +1386,9 @@ int diagfwd_channel_read_done(struct diagfwd_info *fwd_info,
* in_busy flags. No need to queue read in this case.
*/
if (len == 0) {
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "Read Length is 0, resetting the diag buffers p: %d, t: %d\n",
+ fwd_info->peripheral, fwd_info->type);
diagfwd_reset_buffers(fwd_info, buf);
diag_ws_release();
return 0;
@@ -1306,7 +1401,7 @@ int diagfwd_channel_read_done(struct diagfwd_info *fwd_info,
return 0;
}
-void diagfwd_write_done(uint8_t peripheral, uint8_t type, int ctxt)
+void diagfwd_write_done(uint8_t peripheral, uint8_t type, int buf_num)
{
int i = 0, upd_valid_len = 0;
struct diagfwd_info *fwd_info = NULL;
@@ -1318,8 +1413,14 @@ void diagfwd_write_done(uint8_t peripheral, uint8_t type, int ctxt)
if (!fwd_info)
return;
- if (ctxt == 1 && fwd_info->buf_1) {
- /* Buffer 1 for core PD is freed */
+ if (buf_num == 1 && fwd_info->buf_1) {
+ /*
+ * Core PD buffer data is processed and
+ * length in the buffer is marked zero.
+ *
+ * Check if the user PD buffer contains any
+ * data before freeing core PD buffer.
+ */
fwd_info->cpd_len_1 = 0;
for (i = 0; i <= (fwd_info->num_pd - 2); i++) {
if (fwd_info->upd_len[i][0]) {
@@ -1327,10 +1428,24 @@ void diagfwd_write_done(uint8_t peripheral, uint8_t type, int ctxt)
break;
}
}
- if (!upd_valid_len)
+ /*
+ * Do not free the core pd buffer if valid data
+ * is present in any user PD buffer.
+ */
+ if (!upd_valid_len) {
atomic_set(&fwd_info->buf_1->in_busy, 0);
- } else if (ctxt == 2 && fwd_info->buf_2) {
- /* Buffer 2 for core PD is freed */
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "Buffer 1 for core PD is marked free, p: %d, t: %d, buf_num: %d\n",
+ fwd_info->peripheral, fwd_info->type, buf_num);
+ }
+ } else if (buf_num == 2 && fwd_info->buf_2) {
+ /*
+ * Core PD buffer data is processed and
+ * length in the buffer is marked zero.
+ *
+ * Check if the user PD buffer contains any
+ * data before freeing core PD buffer.
+ */
fwd_info->cpd_len_2 = 0;
for (i = 0; i <= (fwd_info->num_pd - 2); i++) {
if (fwd_info->upd_len[i][1]) {
@@ -1338,30 +1453,86 @@ void diagfwd_write_done(uint8_t peripheral, uint8_t type, int ctxt)
break;
}
}
- if (!upd_valid_len)
+ /*
+ * Do not free the core pd buffer if valid data
+ * is present in any user PD buffer
+ */
+ if (!upd_valid_len) {
atomic_set(&fwd_info->buf_2->in_busy, 0);
- } else if (ctxt >= 3 && (ctxt % 2)) {
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "Buffer 2 for core PD is marked free, p: %d, t: %d, buf_num: %d\n",
+ fwd_info->peripheral, fwd_info->type, buf_num);
+ }
+ } else if (buf_num >= 3 && (buf_num % 2)) {
+ /*
+ * Go through each User PD buffer, validate the
+ * request for freeing the buffer by validating
+ * the buffer number.
+ *
+ */
for (i = 0; i <= (fwd_info->num_pd - 2); i++) {
- if (fwd_info->buf_upd[i][0]) {
+ if (fwd_info->buf_upd[i][0] &&
+ (buf_num == ((2 * i) + 3))) {
/* Buffer 1 for ith user PD is freed */
- atomic_set(&fwd_info->buf_upd[i][0]->in_busy, 0);
- fwd_info->upd_len[i][0] = 0;
+ atomic_set(&fwd_info->buf_upd[i][0]->in_busy,
+ 0);
+ fwd_info->upd_len[i][0] = 0;
}
- if (!fwd_info->cpd_len_1)
+ /*
+ * Check if there is any data in user PD buffer other
+ * than buffer requested for freeing.
+ *
+ */
+ if (fwd_info->upd_len[i][0])
+ upd_valid_len = 1;
+ }
+ /*
+ * Mark the core pd buffer free if there is no
+ * data present in core PD buffer and other User PD buffer.
+ *
+ */
+ if (!upd_valid_len && !fwd_info->cpd_len_1) {
atomic_set(&fwd_info->buf_1->in_busy, 0);
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "Buffer 1 for core PD is marked free, p: %d, t: %d, buf_num: %d\n",
+ fwd_info->peripheral, fwd_info->type, buf_num);
}
- } else if (ctxt >= 4 && !(ctxt % 2)) {
+ } else if (buf_num >= 4 && !(buf_num % 2)) {
+ /*
+ * Go through each User PD buffer, validate the
+ * request for freeing the buffer by validating
+ * the buffer number.
+ *
+ */
for (i = 0; i <= (fwd_info->num_pd - 2); i++) {
- if (fwd_info->buf_upd[i][1]) {
+ if (fwd_info->buf_upd[i][1] &&
+ (buf_num == ((2 * i) + 4))) {
/* Buffer 2 for ith user PD is freed */
- atomic_set(&fwd_info->buf_upd[i][0]->in_busy, 0);
- fwd_info->upd_len[i][1] = 0;
+ atomic_set(&fwd_info->buf_upd[i][1]->in_busy,
+ 0);
+ fwd_info->upd_len[i][1] = 0;
}
- if (!fwd_info->cpd_len_2)
- atomic_set(&fwd_info->buf_2->in_busy, 0);
+ /*
+ * Check if there is any data in user PD buffer other
+ * than buffer requested for freeing.
+ *
+ */
+ if (fwd_info->upd_len[i][1])
+ upd_valid_len = 1;
}
+ /*
+ * Mark the core pd buffer free if there is no
+ * data present in core PD buffer and other User PD buffer.
+ *
+ */
+ if (!upd_valid_len && !fwd_info->cpd_len_2) {
+ atomic_set(&fwd_info->buf_2->in_busy, 0);
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "Buffer 2 for core PD is marked free, p: %d, t: %d, buf_num: %d\n",
+ fwd_info->peripheral, fwd_info->type, buf_num);
+ }
} else
- pr_err("diag: In %s, invalid ctxt %d\n", __func__, ctxt);
+ pr_err("diag: In %s, invalid buf_num %d\n", __func__, buf_num);
diagfwd_queue_read(fwd_info);
}
diff --git a/drivers/char/diag/diagfwd_peripheral.h b/drivers/char/diag/diagfwd_peripheral.h
index b16670e..6ddce32 100644
--- a/drivers/char/diag/diagfwd_peripheral.h
+++ b/drivers/char/diag/diagfwd_peripheral.h
@@ -62,6 +62,12 @@ struct diag_peripheral_ops {
void (*queue_read)(void *ctxt);
};
+struct diag_id_info {
+ uint8_t diagid_val;
+ uint8_t pd;
+ char *reg_str;
+};
+
struct diagfwd_info {
uint8_t peripheral;
uint8_t type;
@@ -69,8 +75,6 @@ struct diagfwd_info {
uint8_t inited;
uint8_t ch_open;
uint8_t num_pd;
- uint8_t diagid_root;
- uint8_t diagid_user[MAX_PERIPHERAL_UPD];
int cpd_len_1;
int cpd_len_2;
int upd_len[MAX_PERIPHERAL_UPD][2];
@@ -81,6 +85,8 @@ struct diagfwd_info {
struct mutex buf_mutex;
struct mutex data_mutex;
void *ctxt;
+ struct diag_id_info root_diag_id;
+ struct diag_id_info upd_diag_id[MAX_PERIPHERAL_UPD];
struct diagfwd_buf_t *buf_1;
struct diagfwd_buf_t *buf_2;
struct diagfwd_buf_t *buf_upd[MAX_PERIPHERAL_UPD][2];
@@ -113,7 +119,7 @@ int diagfwd_cntl_register(uint8_t transport, uint8_t peripheral, void *ctxt,
void diagfwd_deregister(uint8_t peripheral, uint8_t type, void *ctxt);
int diagfwd_write(uint8_t peripheral, uint8_t type, void *buf, int len);
-void diagfwd_write_done(uint8_t peripheral, uint8_t type, int ctxt);
+void diagfwd_write_done(uint8_t peripheral, uint8_t type, int buf_num);
void diagfwd_buffers_init(struct diagfwd_info *fwd_info);
/*
diff --git a/drivers/char/hw_random/msm_rng.c b/drivers/char/hw_random/msm_rng.c
index 7641a6a..fdcef1d 100644
--- a/drivers/char/hw_random/msm_rng.c
+++ b/drivers/char/hw_random/msm_rng.c
@@ -53,6 +53,9 @@
#define MAX_HW_FIFO_DEPTH 16 /* FIFO is 16 words deep */
#define MAX_HW_FIFO_SIZE (MAX_HW_FIFO_DEPTH * 4) /* FIFO is 32 bits wide */
+#define RETRY_MAX_CNT 5 /* max retry times to read register */
+#define RETRY_DELAY_INTERVAL 440 /* retry delay interval in us */
+
struct msm_rng_device {
struct platform_device *pdev;
void __iomem *base;
@@ -96,7 +99,7 @@ static int msm_rng_direct_read(struct msm_rng_device *msm_rng_dev,
struct platform_device *pdev;
void __iomem *base;
size_t currsize = 0;
- u32 val;
+ u32 val = 0;
u32 *retdata = data;
int ret;
int failed = 0;
@@ -113,41 +116,41 @@ static int msm_rng_direct_read(struct msm_rng_device *msm_rng_dev,
if (msm_rng_dev->qrng_perf_client) {
ret = msm_bus_scale_client_update_request(
msm_rng_dev->qrng_perf_client, 1);
- if (ret)
+ if (ret) {
pr_err("bus_scale_client_update_req failed!\n");
+ goto bus_err;
+ }
}
/* enable PRNG clock */
ret = clk_prepare_enable(msm_rng_dev->prng_clk);
if (ret) {
- dev_err(&pdev->dev, "failed to enable clock in callback\n");
+ pr_err("failed to enable prng clock\n");
goto err;
}
/* read random data from h/w */
do {
/* check status bit if data is available */
- while (!(readl_relaxed(base + PRNG_STATUS_OFFSET)
+ if (!(readl_relaxed(base + PRNG_STATUS_OFFSET)
& 0x00000001)) {
- if (failed == 10) {
- pr_err("Data not available after retry\n");
+ if (failed++ == RETRY_MAX_CNT) {
+ if (currsize == 0)
+ pr_err("Data not available\n");
break;
}
- pr_err("msm_rng:Data not available!\n");
- msleep_interruptible(10);
- failed++;
+ udelay(RETRY_DELAY_INTERVAL);
+ } else {
+
+ /* read FIFO */
+ val = readl_relaxed(base + PRNG_DATA_OUT_OFFSET);
+
+ /* write data back to callers pointer */
+ *(retdata++) = val;
+ currsize += 4;
+ /* make sure we stay on 32bit boundary */
+ if ((max - currsize) < 4)
+ break;
}
- /* read FIFO */
- val = readl_relaxed(base + PRNG_DATA_OUT_OFFSET);
- if (!val)
- break; /* no data to read so just bail */
-
- /* write data back to callers pointer */
- *(retdata++) = val;
- currsize += 4;
- /* make sure we stay on 32bit boundary */
- if ((max - currsize) < 4)
- break;
-
} while (currsize < max);
/* vote to turn off clock */
@@ -159,6 +162,7 @@ static int msm_rng_direct_read(struct msm_rng_device *msm_rng_dev,
if (ret)
pr_err("bus_scale_client_update_req failed!\n");
}
+bus_err:
mutex_unlock(&msm_rng_dev->rng_lock);
val = 0L;
diff --git a/drivers/clk/axs10x/i2s_pll_clock.c b/drivers/clk/axs10x/i2s_pll_clock.c
index 411310d..02d3bcd 100644
--- a/drivers/clk/axs10x/i2s_pll_clock.c
+++ b/drivers/clk/axs10x/i2s_pll_clock.c
@@ -182,6 +182,7 @@ static int i2s_pll_clk_probe(struct platform_device *pdev)
if (IS_ERR(pll_clk->base))
return PTR_ERR(pll_clk->base);
+ memset(&init, 0, sizeof(init));
clk_name = node->name;
init.name = clk_name;
init.ops = &i2s_pll_ops;
diff --git a/drivers/clk/mvebu/ap806-system-controller.c b/drivers/clk/mvebu/ap806-system-controller.c
index 02023ba..962e0c5 100644
--- a/drivers/clk/mvebu/ap806-system-controller.c
+++ b/drivers/clk/mvebu/ap806-system-controller.c
@@ -55,21 +55,39 @@ static int ap806_syscon_clk_probe(struct platform_device *pdev)
freq_mode = reg & AP806_SAR_CLKFREQ_MODE_MASK;
switch (freq_mode) {
- case 0x0 ... 0x5:
+ case 0x0:
+ case 0x1:
cpuclk_freq = 2000;
break;
- case 0x6 ... 0xB:
+ case 0x6:
+ case 0x7:
cpuclk_freq = 1800;
break;
- case 0xC ... 0x11:
+ case 0x4:
+ case 0xB:
+ case 0xD:
cpuclk_freq = 1600;
break;
- case 0x12 ... 0x16:
+ case 0x1a:
cpuclk_freq = 1400;
break;
- case 0x17 ... 0x19:
+ case 0x14:
+ case 0x17:
cpuclk_freq = 1300;
break;
+ case 0x19:
+ cpuclk_freq = 1200;
+ break;
+ case 0x13:
+ case 0x1d:
+ cpuclk_freq = 1000;
+ break;
+ case 0x1c:
+ cpuclk_freq = 800;
+ break;
+ case 0x1b:
+ cpuclk_freq = 600;
+ break;
default:
dev_err(&pdev->dev, "invalid SAR value\n");
return -EINVAL;
diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig
index d47b66e..87d067a 100644
--- a/drivers/clk/qcom/Kconfig
+++ b/drivers/clk/qcom/Kconfig
@@ -235,4 +235,21 @@
subsystems via QMP mailboxes.
Say Y to support the clocks managed by AOP on platforms such as sdm845.
+config MDM_GCC_SDXPOORWILLS
+ tristate "SDXPOORWILLS Global Clock Controller"
+ depends on COMMON_CLK_QCOM
+ help
+ Support for the global clock controller on sdxpoorwills devices.
+ Say Y if you want to use peripheral devices such as UART, SPI,
+ i2c, USB, SD/eMMC, etc.
+
+config MDM_CLOCK_CPU_SDXPOORWILLS
+ tristate "SDXPOORWILLS CPU Clock Controller"
+ depends on COMMON_CLK_QCOM
+ help
+ Support for the cpu clock controller on sdxpoorwills
+ based devices.
+ Say Y if you want to support CPU clock scaling using
+ CPUfreq drivers for dyanmic power management.
+
source "drivers/clk/qcom/mdss/Kconfig"
diff --git a/drivers/clk/qcom/Makefile b/drivers/clk/qcom/Makefile
index 6a8c43b..8cb46a7 100644
--- a/drivers/clk/qcom/Makefile
+++ b/drivers/clk/qcom/Makefile
@@ -22,7 +22,9 @@
obj-$(CONFIG_IPQ_GCC_4019) += gcc-ipq4019.o
obj-$(CONFIG_IPQ_GCC_806X) += gcc-ipq806x.o
obj-$(CONFIG_IPQ_LCC_806X) += lcc-ipq806x.o
+obj-$(CONFIG_MDM_CLOCK_CPU_SDXPOORWILLS) += clk-cpu-a7.o
obj-$(CONFIG_MDM_GCC_9615) += gcc-mdm9615.o
+obj-$(CONFIG_MDM_GCC_SDXPOORWILLS) += gcc-sdxpoorwills.o
obj-$(CONFIG_MDM_LCC_9615) += lcc-mdm9615.o
obj-$(CONFIG_MSM_CAMCC_SDM845) += camcc-sdm845.o
obj-$(CONFIG_MSM_CLK_AOP_QMP) += clk-aop-qmp.o
diff --git a/drivers/clk/qcom/camcc-sdm845.c b/drivers/clk/qcom/camcc-sdm845.c
index 5caa975..836c25c 100644
--- a/drivers/clk/qcom/camcc-sdm845.c
+++ b/drivers/clk/qcom/camcc-sdm845.c
@@ -1971,6 +1971,87 @@ static void cam_cc_sdm845_fixup_sdm845v2(void)
cam_cc_sdm845_clocks[CAM_CC_CSIPHY3_CLK] = &cam_cc_csiphy3_clk.clkr;
cam_cc_sdm845_clocks[CAM_CC_CSI3PHYTIMER_CLK_SRC] =
&cam_cc_csi3phytimer_clk_src.clkr;
+ cam_cc_bps_clk_src.clkr.hw.init->rate_max[VDD_CX_MIN] = 0;
+ cam_cc_bps_clk_src.clkr.hw.init->rate_max[VDD_CX_LOWER] = 0;
+ cam_cc_cci_clk_src.clkr.hw.init->rate_max[VDD_CX_MIN] = 0;
+ cam_cc_cci_clk_src.clkr.hw.init->rate_max[VDD_CX_LOWER] = 0;
+ cam_cc_cphy_rx_clk_src.freq_tbl = ftbl_cam_cc_cphy_rx_clk_src_sdm845_v2;
+ cam_cc_cphy_rx_clk_src.clkr.hw.init->rate_max[VDD_CX_MIN] = 0;
+ cam_cc_cphy_rx_clk_src.clkr.hw.init->rate_max[VDD_CX_LOWER] = 0;
+ cam_cc_cphy_rx_clk_src.clkr.hw.init->rate_max[VDD_CX_LOW] = 384000000;
+ cam_cc_csi0phytimer_clk_src.clkr.hw.init->rate_max[VDD_CX_MIN] = 0;
+ cam_cc_csi0phytimer_clk_src.clkr.hw.init->rate_max[VDD_CX_LOWER] = 0;
+ cam_cc_csi1phytimer_clk_src.clkr.hw.init->rate_max[VDD_CX_MIN] = 0;
+ cam_cc_csi1phytimer_clk_src.clkr.hw.init->rate_max[VDD_CX_LOWER] = 0;
+ cam_cc_csi2phytimer_clk_src.clkr.hw.init->rate_max[VDD_CX_MIN] = 0;
+ cam_cc_csi2phytimer_clk_src.clkr.hw.init->rate_max[VDD_CX_LOWER] = 0;
+ cam_cc_fast_ahb_clk_src.clkr.hw.init->rate_max[VDD_CX_MIN] = 0;
+ cam_cc_fast_ahb_clk_src.clkr.hw.init->rate_max[VDD_CX_LOWER] = 0;
+ cam_cc_fd_core_clk_src.freq_tbl = ftbl_cam_cc_fd_core_clk_src_sdm845_v2;
+ cam_cc_fd_core_clk_src.clkr.hw.init->rate_max[VDD_CX_MIN] = 0;
+ cam_cc_fd_core_clk_src.clkr.hw.init->rate_max[VDD_CX_LOWER] = 0;
+ cam_cc_icp_clk_src.freq_tbl = ftbl_cam_cc_icp_clk_src_sdm845_v2;
+ cam_cc_icp_clk_src.clkr.hw.init->rate_max[VDD_CX_MIN] = 0;
+ cam_cc_icp_clk_src.clkr.hw.init->rate_max[VDD_CX_LOWER] = 0;
+ cam_cc_icp_clk_src.clkr.hw.init->rate_max[VDD_CX_LOW_L1] = 600000000;
+ cam_cc_ife_0_clk_src.clkr.hw.init->rate_max[VDD_CX_MIN] = 0;
+ cam_cc_ife_0_clk_src.clkr.hw.init->rate_max[VDD_CX_LOWER] = 0;
+ cam_cc_ife_0_csid_clk_src.clkr.hw.init->rate_max[VDD_CX_MIN] = 0;
+ cam_cc_ife_0_csid_clk_src.clkr.hw.init->rate_max[VDD_CX_LOWER] = 0;
+ cam_cc_ife_0_csid_clk_src.clkr.hw.init->rate_max[VDD_CX_LOW] =
+ 384000000;
+ cam_cc_ife_1_clk_src.clkr.hw.init->rate_max[VDD_CX_MIN] = 0;
+ cam_cc_ife_1_clk_src.clkr.hw.init->rate_max[VDD_CX_LOWER] = 0;
+ cam_cc_ife_1_csid_clk_src.clkr.hw.init->rate_max[VDD_CX_MIN] = 0;
+ cam_cc_ife_1_csid_clk_src.clkr.hw.init->rate_max[VDD_CX_LOWER] = 0;
+ cam_cc_ife_1_csid_clk_src.clkr.hw.init->rate_max[VDD_CX_LOW] =
+ 384000000;
+ cam_cc_ife_lite_clk_src.clkr.hw.init->rate_max[VDD_CX_MIN] = 0;
+ cam_cc_ife_lite_clk_src.clkr.hw.init->rate_max[VDD_CX_LOWER] = 0;
+ cam_cc_ife_lite_csid_clk_src.clkr.hw.init->rate_max[VDD_CX_MIN] = 0;
+ cam_cc_ife_lite_csid_clk_src.clkr.hw.init->rate_max[VDD_CX_LOWER] = 0;
+ cam_cc_ife_lite_csid_clk_src.clkr.hw.init->rate_max[VDD_CX_LOW] =
+ 384000000;
+ cam_cc_ipe_0_clk_src.clkr.hw.init->rate_max[VDD_CX_MIN] = 0;
+ cam_cc_ipe_0_clk_src.clkr.hw.init->rate_max[VDD_CX_LOWER] = 0;
+ cam_cc_ipe_0_clk_src.clkr.hw.init->rate_max[VDD_CX_NOMINAL] = 600000000;
+ cam_cc_ipe_1_clk_src.clkr.hw.init->rate_max[VDD_CX_MIN] = 0;
+ cam_cc_ipe_1_clk_src.clkr.hw.init->rate_max[VDD_CX_LOWER] = 0;
+ cam_cc_ipe_1_clk_src.clkr.hw.init->rate_max[VDD_CX_NOMINAL] = 600000000;
+ cam_cc_jpeg_clk_src.clkr.hw.init->rate_max[VDD_CX_MIN] = 0;
+ cam_cc_jpeg_clk_src.clkr.hw.init->rate_max[VDD_CX_LOWER] = 0;
+ cam_cc_lrme_clk_src.freq_tbl = ftbl_cam_cc_lrme_clk_src_sdm845_v2;
+ cam_cc_lrme_clk_src.clkr.hw.init->rate_max[VDD_CX_MIN] = 0;
+ cam_cc_lrme_clk_src.clkr.hw.init->rate_max[VDD_CX_LOWER] = 0;
+ cam_cc_lrme_clk_src.clkr.hw.init->rate_max[VDD_CX_LOW] = 269333333;
+ cam_cc_lrme_clk_src.clkr.hw.init->rate_max[VDD_CX_LOW_L1] = 320000000;
+ cam_cc_lrme_clk_src.clkr.hw.init->rate_max[VDD_CX_NOMINAL] = 400000000;
+ cam_cc_mclk0_clk_src.clkr.hw.init->rate_max[VDD_CX_MIN] = 0;
+ cam_cc_mclk0_clk_src.clkr.hw.init->rate_max[VDD_CX_LOWER] = 0;
+ cam_cc_mclk0_clk_src.clkr.hw.init->rate_max[VDD_CX_LOW] = 34285714;
+ cam_cc_mclk1_clk_src.clkr.hw.init->rate_max[VDD_CX_MIN] = 0;
+ cam_cc_mclk1_clk_src.clkr.hw.init->rate_max[VDD_CX_LOWER] = 0;
+ cam_cc_mclk1_clk_src.clkr.hw.init->rate_max[VDD_CX_LOW] = 34285714;
+ cam_cc_mclk2_clk_src.clkr.hw.init->rate_max[VDD_CX_MIN] = 0;
+ cam_cc_mclk2_clk_src.clkr.hw.init->rate_max[VDD_CX_LOWER] = 0;
+ cam_cc_mclk2_clk_src.clkr.hw.init->rate_max[VDD_CX_LOW] = 34285714;
+ cam_cc_mclk3_clk_src.clkr.hw.init->rate_max[VDD_CX_MIN] = 0;
+ cam_cc_mclk3_clk_src.clkr.hw.init->rate_max[VDD_CX_LOWER] = 0;
+ cam_cc_mclk3_clk_src.clkr.hw.init->rate_max[VDD_CX_LOW] = 34285714;
+ cam_cc_slow_ahb_clk_src.clkr.hw.init->rate_max[VDD_CX_MIN] = 0;
+ cam_cc_slow_ahb_clk_src.clkr.hw.init->rate_max[VDD_CX_LOWER] = 0;
+ cam_cc_slow_ahb_clk_src.clkr.hw.init->rate_max[VDD_CX_LOW] = 80000000;
+ cam_cc_slow_ahb_clk_src.clkr.hw.init->rate_max[VDD_CX_LOW_L1] =
+ 80000000;
+}
+
+static void cam_cc_sdm845_fixup_sdm670(void)
+{
+ cam_cc_sdm845_clocks[CAM_CC_CSI3PHYTIMER_CLK] =
+ &cam_cc_csi3phytimer_clk.clkr;
+ cam_cc_sdm845_clocks[CAM_CC_CSIPHY3_CLK] = &cam_cc_csiphy3_clk.clkr;
+ cam_cc_sdm845_clocks[CAM_CC_CSI3PHYTIMER_CLK_SRC] =
+ &cam_cc_csi3phytimer_clk_src.clkr;
cam_cc_cphy_rx_clk_src.freq_tbl = ftbl_cam_cc_cphy_rx_clk_src_sdm845_v2;
cam_cc_cphy_rx_clk_src.clkr.hw.init->rate_max[VDD_CX_LOWER] = 384000000;
cam_cc_cphy_rx_clk_src.clkr.hw.init->rate_max[VDD_CX_LOW] = 384000000;
@@ -1991,11 +2072,6 @@ static void cam_cc_sdm845_fixup_sdm845v2(void)
80000000;
}
-static void cam_cc_sdm845_fixup_sdm670(void)
-{
- cam_cc_sdm845_fixup_sdm845v2();
-}
-
static int cam_cc_sdm845_fixup(struct platform_device *pdev)
{
const char *compat = NULL;
diff --git a/drivers/clk/qcom/clk-alpha-pll.c b/drivers/clk/qcom/clk-alpha-pll.c
index afb2c01..bf9b99d 100644
--- a/drivers/clk/qcom/clk-alpha-pll.c
+++ b/drivers/clk/qcom/clk-alpha-pll.c
@@ -22,6 +22,8 @@
#include "clk-alpha-pll.h"
#define PLL_MODE 0x00
+#define PLL_STANDBY 0x0
+#define PLL_RUN 0x1
# define PLL_OUTCTRL BIT(0)
# define PLL_BYPASSNL BIT(1)
# define PLL_RESET_N BIT(2)
@@ -51,25 +53,40 @@
#define PLL_TEST_CTL 0x1c
#define PLL_TEST_CTL_U 0x20
#define PLL_STATUS 0x24
+#define PLL_UPDATE BIT(22)
+#define PLL_ACK_LATCH BIT(29)
+#define PLL_CALIBRATION_MASK (0x7<<3)
+#define PLL_CALIBRATION_CONTROL 2
+#define PLL_HW_UPDATE_LOGIC_BYPASS BIT(23)
+#define ALPHA_16_BIT_PLL_RATE_MARGIN 500
/*
* Even though 40 bits are present, use only 32 for ease of calculation.
*/
#define ALPHA_REG_BITWIDTH 40
#define ALPHA_BITWIDTH 32
-#define FABIA_BITWIDTH 16
+#define SUPPORTS_16BIT_ALPHA 16
#define FABIA_USER_CTL_LO 0xc
#define FABIA_USER_CTL_HI 0x10
#define FABIA_FRAC_VAL 0x38
#define FABIA_OPMODE 0x2c
-#define FABIA_PLL_STANDBY 0x0
-#define FABIA_PLL_RUN 0x1
#define FABIA_PLL_OUT_MASK 0x7
-#define FABIA_PLL_RATE_MARGIN 500
#define FABIA_PLL_ACK_LATCH BIT(29)
#define FABIA_PLL_UPDATE BIT(22)
-#define FABIA_PLL_HW_UPDATE_LOGIC_BYPASS BIT(23)
+
+#define TRION_PLL_CAL_VAL 0x44
+#define TRION_PLL_CAL_L_VAL 0x8
+#define TRION_PLL_USER_CTL 0xc
+#define TRION_PLL_USER_CTL_U 0x10
+#define TRION_PLL_USER_CTL_U1 0x14
+#define TRION_PLL_CONFIG_CTL_U 0x1c
+#define TRION_PLL_CONFIG_CTL_U1 0x20
+#define TRION_PLL_OPMODE 0x38
+#define TRION_PLL_ALPHA_VAL 0x40
+
+#define TRION_PLL_OUT_MASK 0x7
+#define TRION_PLL_ENABLE_STATE_READ BIT(4)
#define to_clk_alpha_pll(_hw) container_of(to_clk_regmap(_hw), \
struct clk_alpha_pll, clkr)
@@ -121,6 +138,10 @@ static int wait_for_pll_offline(struct clk_alpha_pll *pll, u32 mask)
return wait_for_pll(pll, mask, 0, "offline");
}
+static int wait_for_pll_latch_ack(struct clk_alpha_pll *pll, u32 mask)
+{
+ return wait_for_pll(pll, mask, 0, "latch_ack");
+}
/* alpha pll with hwfsm support */
@@ -294,8 +315,8 @@ static unsigned long alpha_pll_calc_rate(const struct clk_alpha_pll *pll,
{
int alpha_bw = ALPHA_BITWIDTH;
- if (pll->type == FABIA_PLL)
- alpha_bw = FABIA_BITWIDTH;
+ if (pll->type == FABIA_PLL || pll->type == TRION_PLL)
+ alpha_bw = SUPPORTS_16BIT_ALPHA;
return (prate * l) + ((prate * a) >> alpha_bw);
}
@@ -326,9 +347,9 @@ alpha_pll_round_rate(const struct clk_alpha_pll *pll, unsigned long rate,
return rate;
}
- /* Fabia PLLs only have 16 bits to program the fractional divider */
- if (pll->type == FABIA_PLL)
- alpha_bw = FABIA_BITWIDTH;
+ /* Some PLLs only have 16 bits to program the fractional divider */
+ if (pll->type == FABIA_PLL || pll->type == TRION_PLL)
+ alpha_bw = SUPPORTS_16BIT_ALPHA;
/* Upper ALPHA_BITWIDTH bits of Alpha */
quotient = remainder << alpha_bw;
@@ -415,7 +436,8 @@ static long clk_alpha_pll_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long min_freq, max_freq;
rate = alpha_pll_round_rate(pll, rate, *prate, &l, &a);
- if (pll->type == FABIA_PLL || alpha_pll_find_vco(pll, rate))
+ if (pll->type == FABIA_PLL || pll->type == TRION_PLL ||
+ alpha_pll_find_vco(pll, rate))
return rate;
min_freq = pll->vco_table[0].min_freq;
@@ -523,8 +545,8 @@ void clk_fabia_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
clk_fabia_pll_latch_input(pll, regmap);
regmap_update_bits(regmap, pll->offset + PLL_MODE,
- FABIA_PLL_HW_UPDATE_LOGIC_BYPASS,
- FABIA_PLL_HW_UPDATE_LOGIC_BYPASS);
+ PLL_HW_UPDATE_LOGIC_BYPASS,
+ PLL_HW_UPDATE_LOGIC_BYPASS);
regmap_update_bits(regmap, pll->offset + PLL_MODE,
PLL_RESET_N, PLL_RESET_N);
@@ -560,7 +582,7 @@ static int clk_fabia_pll_enable(struct clk_hw *hw)
return ret;
/* Set operation mode to STANDBY */
- regmap_write(pll->clkr.regmap, off + FABIA_OPMODE, FABIA_PLL_STANDBY);
+ regmap_write(pll->clkr.regmap, off + FABIA_OPMODE, PLL_STANDBY);
/* PLL should be in STANDBY mode before continuing */
mb();
@@ -572,7 +594,7 @@ static int clk_fabia_pll_enable(struct clk_hw *hw)
return ret;
/* Set operation mode to RUN */
- regmap_write(pll->clkr.regmap, off + FABIA_OPMODE, FABIA_PLL_RUN);
+ regmap_write(pll->clkr.regmap, off + FABIA_OPMODE, PLL_RUN);
ret = wait_for_pll_enable(pll, PLL_LOCK_DET);
if (ret)
@@ -624,7 +646,7 @@ static void clk_fabia_pll_disable(struct clk_hw *hw)
return;
/* Place the PLL mode in STANDBY */
- regmap_write(pll->clkr.regmap, off + FABIA_OPMODE, FABIA_PLL_STANDBY);
+ regmap_write(pll->clkr.regmap, off + FABIA_OPMODE, PLL_STANDBY);
}
static unsigned long
@@ -659,7 +681,7 @@ static int clk_fabia_pll_set_rate(struct clk_hw *hw, unsigned long rate,
* Due to limited number of bits for fractional rate programming, the
* rounded up rate could be marginally higher than the requested rate.
*/
- if (rrate > (rate + FABIA_PLL_RATE_MARGIN) || rrate < rate) {
+ if (rrate > (rate + ALPHA_16_BIT_PLL_RATE_MARGIN) || rrate < rate) {
pr_err("Call set rate on the PLL with rounded rates!\n");
return -EINVAL;
}
@@ -879,3 +901,436 @@ const struct clk_ops clk_generic_pll_postdiv_ops = {
.set_rate = clk_generic_pll_postdiv_set_rate,
};
EXPORT_SYMBOL_GPL(clk_generic_pll_postdiv_ops);
+
+static int trion_pll_is_enabled(struct clk_alpha_pll *pll,
+ struct regmap *regmap)
+{
+ u32 mode_val, opmode_val, off = pll->offset;
+ int ret;
+
+ ret = regmap_read(regmap, off + PLL_MODE, &mode_val);
+ ret |= regmap_read(regmap, off + TRION_PLL_OPMODE, &opmode_val);
+ if (ret)
+ return 0;
+
+ return ((opmode_val & PLL_RUN) && (mode_val & PLL_OUTCTRL));
+}
+
+int clk_trion_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
+ const struct pll_config *config)
+{
+ int ret = 0;
+
+ if (trion_pll_is_enabled(pll, regmap)) {
+ pr_debug("PLL is already enabled. Skipping configuration.\n");
+
+ /*
+ * Set the PLL_HW_UPDATE_LOGIC_BYPASS bit to latch the input
+ * before continuing.
+ */
+ regmap_update_bits(regmap, pll->offset + PLL_MODE,
+ PLL_HW_UPDATE_LOGIC_BYPASS,
+ PLL_HW_UPDATE_LOGIC_BYPASS);
+
+ pll->inited = true;
+ return ret;
+ }
+
+ /*
+ * Disable the PLL if it's already been initialized. Not doing so might
+ * lead to the PLL running with the old frequency configuration.
+ */
+ if (pll->inited) {
+ ret = regmap_update_bits(regmap, pll->offset + PLL_MODE,
+ PLL_RESET_N, 0);
+ if (ret)
+ return ret;
+ }
+
+ if (config->l)
+ regmap_write(regmap, pll->offset + PLL_L_VAL,
+ config->l);
+
+ regmap_write(regmap, pll->offset + TRION_PLL_CAL_L_VAL,
+ TRION_PLL_CAL_VAL);
+
+ if (config->frac)
+ regmap_write(regmap, pll->offset + TRION_PLL_ALPHA_VAL,
+ config->frac);
+
+ if (config->config_ctl_val)
+ regmap_write(regmap, pll->offset + PLL_CONFIG_CTL,
+ config->config_ctl_val);
+
+ if (config->config_ctl_hi_val)
+ regmap_write(regmap, pll->offset + TRION_PLL_CONFIG_CTL_U,
+ config->config_ctl_hi_val);
+
+ if (config->config_ctl_hi1_val)
+ regmap_write(regmap, pll->offset + TRION_PLL_CONFIG_CTL_U1,
+ config->config_ctl_hi1_val);
+
+ if (config->post_div_mask)
+ regmap_update_bits(regmap, pll->offset + TRION_PLL_USER_CTL,
+ config->post_div_mask, config->post_div_val);
+
+ /* Disable state read */
+ regmap_update_bits(regmap, pll->offset + TRION_PLL_USER_CTL_U,
+ TRION_PLL_ENABLE_STATE_READ, 0);
+
+ regmap_update_bits(regmap, pll->offset + PLL_MODE,
+ PLL_HW_UPDATE_LOGIC_BYPASS,
+ PLL_HW_UPDATE_LOGIC_BYPASS);
+
+ /* Set calibration control to Automatic */
+ regmap_update_bits(regmap, pll->offset + TRION_PLL_USER_CTL_U,
+ PLL_CALIBRATION_MASK, PLL_CALIBRATION_CONTROL);
+
+ /* Disable PLL output */
+ ret = regmap_update_bits(regmap, pll->offset + PLL_MODE,
+ PLL_OUTCTRL, 0);
+ if (ret)
+ return ret;
+
+ /* Set operation mode to OFF */
+ regmap_write(regmap, pll->offset + TRION_PLL_OPMODE, PLL_STANDBY);
+
+ /* PLL should be in OFF mode before continuing */
+ wmb();
+
+ /* Place the PLL in STANDBY mode */
+ ret = regmap_update_bits(regmap, pll->offset + PLL_MODE,
+ PLL_RESET_N, PLL_RESET_N);
+ if (ret)
+ return ret;
+
+ pll->inited = true;
+
+ return ret;
+}
+
+static int clk_alpha_pll_latch_l_val(struct clk_alpha_pll *pll)
+{
+ int ret;
+
+ /* Latch the input to the PLL */
+ ret = regmap_update_bits(pll->clkr.regmap, pll->offset + PLL_MODE,
+ PLL_UPDATE, PLL_UPDATE);
+ if (ret)
+ return ret;
+
+ /* Wait for 2 reference cycle before checking ACK bit */
+ udelay(1);
+
+ ret = wait_for_pll_latch_ack(pll, PLL_ACK_LATCH);
+ if (ret)
+ return ret;
+
+ /* Return latch input to 0 */
+ ret = regmap_update_bits(pll->clkr.regmap, pll->offset + PLL_MODE,
+ PLL_UPDATE, (u32)~PLL_UPDATE);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int clk_trion_pll_enable(struct clk_hw *hw)
+{
+ int ret = 0;
+ struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
+ u32 val, off = pll->offset;
+
+ ret = regmap_read(pll->clkr.regmap, off + PLL_MODE, &val);
+ if (ret)
+ return ret;
+
+ /* If in FSM mode, just vote for it */
+ if (val & PLL_VOTE_FSM_ENA) {
+ ret = clk_enable_regmap(hw);
+ if (ret)
+ return ret;
+ return wait_for_pll_enable(pll, PLL_ACTIVE_FLAG);
+ }
+
+ if (unlikely(!pll->inited)) {
+ ret = clk_trion_pll_configure(pll, pll->clkr.regmap,
+ pll->config);
+ if (ret) {
+ pr_err("Failed to configure %s\n", clk_hw_get_name(hw));
+ return ret;
+ }
+ }
+
+ /* Skip If PLL is already running */
+ if (trion_pll_is_enabled(pll, pll->clkr.regmap))
+ return ret;
+
+ /* Set operation mode to RUN */
+ regmap_write(pll->clkr.regmap, off + TRION_PLL_OPMODE, PLL_RUN);
+
+ ret = wait_for_pll_enable(pll, PLL_LOCK_DET);
+ if (ret)
+ return ret;
+
+ /* Enable PLL main output */
+ ret = regmap_update_bits(pll->clkr.regmap, off + TRION_PLL_USER_CTL,
+ TRION_PLL_OUT_MASK, TRION_PLL_OUT_MASK);
+ if (ret)
+ return ret;
+
+ /* Enable Global PLL outputs */
+ ret = regmap_update_bits(pll->clkr.regmap, off + PLL_MODE,
+ PLL_OUTCTRL, PLL_OUTCTRL);
+ if (ret)
+ return ret;
+
+ /* Ensure that the write above goes through before returning. */
+ mb();
+ return ret;
+}
+
+static void clk_trion_pll_disable(struct clk_hw *hw)
+{
+ int ret;
+ struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
+ u32 val, off = pll->offset;
+
+ ret = regmap_read(pll->clkr.regmap, off + PLL_MODE, &val);
+ if (ret)
+ return;
+
+ /* If in FSM mode, just unvote it */
+ if (val & PLL_VOTE_FSM_ENA) {
+ clk_disable_regmap(hw);
+ return;
+ }
+
+ /* Disable Global PLL outputs */
+ ret = regmap_update_bits(pll->clkr.regmap, off + PLL_MODE,
+ PLL_OUTCTRL, 0);
+ if (ret)
+ return;
+
+ /* Disable the main PLL output */
+ ret = regmap_update_bits(pll->clkr.regmap, off + TRION_PLL_USER_CTL,
+ TRION_PLL_OUT_MASK, 0);
+ if (ret)
+ return;
+
+ /* Place the PLL into STANDBY mode */
+ regmap_write(pll->clkr.regmap, off + TRION_PLL_OPMODE, PLL_STANDBY);
+
+ regmap_update_bits(pll->clkr.regmap, off + PLL_MODE,
+ PLL_RESET_N, PLL_RESET_N);
+}
+
+static unsigned long
+clk_trion_pll_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
+{
+ u32 l, frac = 0;
+ u64 prate = parent_rate;
+ struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
+ u32 off = pll->offset;
+
+ regmap_read(pll->clkr.regmap, off + PLL_L_VAL, &l);
+ regmap_read(pll->clkr.regmap, off + TRION_PLL_ALPHA_VAL, &frac);
+
+ return alpha_pll_calc_rate(pll, prate, l, frac);
+}
+
+static int clk_trion_pll_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long prate)
+{
+ struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
+ unsigned long rrate;
+ bool is_enabled;
+ int ret;
+ u32 l, val, off = pll->offset;
+ u64 a;
+
+ rrate = alpha_pll_round_rate(pll, rate, prate, &l, &a);
+ /*
+ * Due to limited number of bits for fractional rate programming, the
+ * rounded up rate could be marginally higher than the requested rate.
+ */
+ if (rrate > (rate + ALPHA_16_BIT_PLL_RATE_MARGIN) || rrate < rate) {
+ pr_err("Trion_pll: Call clk_set_rate with rounded rates!\n");
+ return -EINVAL;
+ }
+
+ is_enabled = clk_hw_is_enabled(hw);
+
+ if (is_enabled)
+ hw->init->ops->disable(hw);
+
+ regmap_write(pll->clkr.regmap, off + PLL_L_VAL, l);
+ regmap_write(pll->clkr.regmap, off + TRION_PLL_ALPHA_VAL, a);
+
+ ret = regmap_read(pll->clkr.regmap, off + PLL_MODE, &val);
+ if (ret)
+ return ret;
+
+ /*
+ * If PLL is in Standby or RUN mode then only latch the L value
+ * Else PLL is in OFF mode and just configure L register - as per
+ * HPG no need to latch input.
+ */
+ if (val & PLL_RESET_N)
+ clk_alpha_pll_latch_l_val(pll);
+
+ if (is_enabled)
+ hw->init->ops->enable(hw);
+
+ /* Wait for PLL output to stabilize */
+ udelay(100);
+
+ return ret;
+}
+
+static int clk_trion_pll_is_enabled(struct clk_hw *hw)
+{
+ struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
+
+ return trion_pll_is_enabled(pll, pll->clkr.regmap);
+}
+
+static void clk_trion_pll_list_registers(struct seq_file *f, struct clk_hw *hw)
+{
+ struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
+ int size, i, val;
+
+ static struct clk_register_data data[] = {
+ {"PLL_MODE", 0x0},
+ {"PLL_L_VAL", 0x4},
+ {"PLL_USER_CTL", 0xc},
+ {"PLL_USER_CTL_U", 0x10},
+ {"PLL_USER_CTL_U1", 0x14},
+ {"PLL_CONFIG_CTL", 0x18},
+ {"PLL_CONFIG_CTL_U", 0x1c},
+ {"PLL_CONFIG_CTL_U1", 0x20},
+ {"PLL_OPMODE", 0x38},
+ };
+
+ static struct clk_register_data data1[] = {
+ {"APSS_PLL_VOTE", 0x0},
+ };
+
+ size = ARRAY_SIZE(data);
+
+ for (i = 0; i < size; i++) {
+ regmap_read(pll->clkr.regmap, pll->offset + data[i].offset,
+ &val);
+ seq_printf(f, "%20s: 0x%.8x\n", data[i].name, val);
+ }
+
+ regmap_read(pll->clkr.regmap, pll->offset + data[0].offset, &val);
+
+ if (val & PLL_VOTE_FSM_ENA) {
+ regmap_read(pll->clkr.regmap, pll->clkr.enable_reg +
+ data1[0].offset, &val);
+ seq_printf(f, "%20s: 0x%.8x\n", data1[0].name, val);
+ }
+}
+
+const struct clk_ops clk_trion_pll_ops = {
+ .enable = clk_trion_pll_enable,
+ .disable = clk_trion_pll_disable,
+ .recalc_rate = clk_trion_pll_recalc_rate,
+ .round_rate = clk_alpha_pll_round_rate,
+ .set_rate = clk_trion_pll_set_rate,
+ .is_enabled = clk_trion_pll_is_enabled,
+ .list_registers = clk_trion_pll_list_registers,
+};
+EXPORT_SYMBOL(clk_trion_pll_ops);
+
+const struct clk_ops clk_trion_fixed_pll_ops = {
+ .enable = clk_trion_pll_enable,
+ .disable = clk_trion_pll_disable,
+ .recalc_rate = clk_trion_pll_recalc_rate,
+ .round_rate = clk_alpha_pll_round_rate,
+ .is_enabled = clk_trion_pll_is_enabled,
+ .list_registers = clk_trion_pll_list_registers,
+};
+EXPORT_SYMBOL(clk_trion_fixed_pll_ops);
+
+static unsigned long clk_trion_pll_postdiv_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_alpha_pll_postdiv *pll = to_clk_alpha_pll_postdiv(hw);
+ u32 i, cal_div = 1, val;
+
+ if (!pll->post_div_table) {
+ pr_err("Missing the post_div_table for the PLL\n");
+ return -EINVAL;
+ }
+
+ regmap_read(pll->clkr.regmap, pll->offset + TRION_PLL_USER_CTL, &val);
+
+ val >>= pll->post_div_shift;
+ val &= PLL_POST_DIV_MASK;
+
+ for (i = 0; i < pll->num_post_div; i++) {
+ if (pll->post_div_table[i].val == val) {
+ cal_div = pll->post_div_table[i].div;
+ break;
+ }
+ }
+
+ return (parent_rate / cal_div);
+}
+
+static long clk_trion_pll_postdiv_round_rate(struct clk_hw *hw,
+ unsigned long rate, unsigned long *prate)
+{
+ struct clk_alpha_pll_postdiv *pll = to_clk_alpha_pll_postdiv(hw);
+
+ if (!pll->post_div_table)
+ return -EINVAL;
+
+ return divider_round_rate(hw, rate, prate, pll->post_div_table,
+ pll->width, CLK_DIVIDER_ROUND_CLOSEST);
+}
+
+static int clk_trion_pll_postdiv_set_rate(struct clk_hw *hw,
+ unsigned long rate, unsigned long parent_rate)
+{
+ struct clk_alpha_pll_postdiv *pll = to_clk_alpha_pll_postdiv(hw);
+ int i, val = 0, cal_div, ret;
+
+ /*
+ * If the PLL is in FSM mode, then treat the set_rate callback
+ * as a no-operation.
+ */
+ ret = regmap_read(pll->clkr.regmap, pll->offset + PLL_MODE, &val);
+ if (ret)
+ return ret;
+
+ if (val & PLL_VOTE_FSM_ENA)
+ return 0;
+
+ if (!pll->post_div_table) {
+ pr_err("Missing the post_div_table for the PLL\n");
+ return -EINVAL;
+ }
+
+ cal_div = DIV_ROUND_UP_ULL((u64)parent_rate, rate);
+ for (i = 0; i < pll->num_post_div; i++) {
+ if (pll->post_div_table[i].div == cal_div) {
+ val = pll->post_div_table[i].val;
+ break;
+ }
+ }
+
+ return regmap_update_bits(pll->clkr.regmap,
+ pll->offset + TRION_PLL_USER_CTL,
+ PLL_POST_DIV_MASK << pll->post_div_shift,
+ val << pll->post_div_shift);
+}
+
+const struct clk_ops clk_trion_pll_postdiv_ops = {
+ .recalc_rate = clk_trion_pll_postdiv_recalc_rate,
+ .round_rate = clk_trion_pll_postdiv_round_rate,
+ .set_rate = clk_trion_pll_postdiv_set_rate,
+};
+EXPORT_SYMBOL(clk_trion_pll_postdiv_ops);
diff --git a/drivers/clk/qcom/clk-alpha-pll.h b/drivers/clk/qcom/clk-alpha-pll.h
index 2656cd6..c5fecb1 100644
--- a/drivers/clk/qcom/clk-alpha-pll.h
+++ b/drivers/clk/qcom/clk-alpha-pll.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@@ -27,6 +27,7 @@ struct pll_vco {
enum pll_type {
ALPHA_PLL,
FABIA_PLL,
+ TRION_PLL,
};
/**
@@ -35,7 +36,7 @@ enum pll_type {
* @inited: flag that's set when the PLL is initialized
* @vco_table: array of VCO settings
* @clkr: regmap clock handle
- * @is_fabia: Set if the PLL type is FABIA
+ * @pll_type: Specify the type of PLL
*/
struct clk_alpha_pll {
u32 offset;
@@ -79,10 +80,15 @@ extern const struct clk_ops clk_alpha_pll_postdiv_ops;
extern const struct clk_ops clk_fabia_pll_ops;
extern const struct clk_ops clk_fabia_fixed_pll_ops;
extern const struct clk_ops clk_generic_pll_postdiv_ops;
+extern const struct clk_ops clk_trion_pll_ops;
+extern const struct clk_ops clk_trion_fixed_pll_ops;
+extern const struct clk_ops clk_trion_pll_postdiv_ops;
void clk_alpha_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
const struct pll_config *config);
void clk_fabia_pll_configure(struct clk_alpha_pll *pll,
struct regmap *regmap, const struct pll_config *config);
+int clk_trion_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
+ const struct pll_config *config);
#endif
diff --git a/drivers/clk/qcom/clk-cpu-a7.c b/drivers/clk/qcom/clk-cpu-a7.c
new file mode 100644
index 0000000..c0cc00f
--- /dev/null
+++ b/drivers/clk/qcom/clk-cpu-a7.c
@@ -0,0 +1,718 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/cpu.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/pm_opp.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <dt-bindings/clock/qcom,cpu-a7.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-debug.h"
+#include "clk-rcg.h"
+#include "clk-regmap-mux-div.h"
+#include "common.h"
+#include "vdd-level-sdm845.h"
+
+#define SYS_APC0_AUX_CLK_SRC 1
+
+#define PLL_MODE_REG 0x0
+#define PLL_OPMODE_RUN 0x1
+#define PLL_OPMODE_REG 0x38
+#define PLL_MODE_OUTCTRL BIT(0)
+
+#define to_clk_regmap_mux_div(_hw) \
+ container_of(to_clk_regmap(_hw), struct clk_regmap_mux_div, clkr)
+
+static DEFINE_VDD_REGULATORS(vdd_cx, VDD_CX_NUM, 1, vdd_corner);
+static DEFINE_VDD_REGS_INIT(vdd_cpu, 1);
+
+enum apcs_clk_parent_index {
+ XO_AO_INDEX,
+ SYS_APC0_AUX_CLK_INDEX,
+ APCS_CPU_PLL_INDEX,
+};
+
+enum {
+ P_SYS_APC0_AUX_CLK,
+ P_APCS_CPU_PLL,
+ P_BI_TCXO_AO,
+};
+
+static const struct parent_map apcs_clk_parent_map[] = {
+ [XO_AO_INDEX] = { P_BI_TCXO_AO, 0 },
+ [SYS_APC0_AUX_CLK_INDEX] = { P_SYS_APC0_AUX_CLK, 1 },
+ [APCS_CPU_PLL_INDEX] = { P_APCS_CPU_PLL, 5 },
+};
+
+static const char *const apcs_clk_parent_name[] = {
+ [XO_AO_INDEX] = "bi_tcxo_ao",
+ [SYS_APC0_AUX_CLK_INDEX] = "sys_apc0_aux_clk",
+ [APCS_CPU_PLL_INDEX] = "apcs_cpu_pll",
+};
+
+static int a7cc_clk_set_rate_and_parent(struct clk_hw *hw, unsigned long rate,
+ unsigned long prate, u8 index)
+{
+ struct clk_regmap_mux_div *cpuclk = to_clk_regmap_mux_div(hw);
+
+ return __mux_div_set_src_div(cpuclk, cpuclk->parent_map[index].cfg,
+ cpuclk->div);
+}
+
+static int a7cc_clk_set_parent(struct clk_hw *hw, u8 index)
+{
+ /*
+ * Since a7cc_clk_set_rate_and_parent() is defined and set_parent()
+ * will never gets called from clk_change_rate() so return 0.
+ */
+ return 0;
+}
+
+static int a7cc_clk_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long prate)
+{
+ struct clk_regmap_mux_div *cpuclk = to_clk_regmap_mux_div(hw);
+
+ /*
+ * Parent is same as the last rate.
+ * Here just configure new div.
+ */
+ return __mux_div_set_src_div(cpuclk, cpuclk->src, cpuclk->div);
+}
+
+static int a7cc_clk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ int ret;
+ u32 div = 1;
+ struct clk_hw *xo, *apc0_auxclk_hw, *apcs_cpu_pll_hw;
+ unsigned long apc0_auxclk_rate, rate = req->rate;
+ struct clk_rate_request parent_req = { };
+ struct clk_regmap_mux_div *cpuclk = to_clk_regmap_mux_div(hw);
+ unsigned long mask = BIT(cpuclk->hid_width) - 1;
+
+ xo = clk_hw_get_parent_by_index(hw, XO_AO_INDEX);
+ if (rate == clk_hw_get_rate(xo)) {
+ req->best_parent_hw = xo;
+ req->best_parent_rate = rate;
+ cpuclk->div = div;
+ cpuclk->src = cpuclk->parent_map[XO_AO_INDEX].cfg;
+ return 0;
+ }
+
+ apc0_auxclk_hw = clk_hw_get_parent_by_index(hw, SYS_APC0_AUX_CLK_INDEX);
+ apcs_cpu_pll_hw = clk_hw_get_parent_by_index(hw, APCS_CPU_PLL_INDEX);
+
+ apc0_auxclk_rate = clk_hw_get_rate(apc0_auxclk_hw);
+ if (rate <= apc0_auxclk_rate) {
+ req->best_parent_hw = apc0_auxclk_hw;
+ req->best_parent_rate = apc0_auxclk_rate;
+
+ div = DIV_ROUND_UP((2 * req->best_parent_rate), rate) - 1;
+ div = min_t(unsigned long, div, mask);
+
+ req->rate = clk_rcg2_calc_rate(req->best_parent_rate, 0,
+ 0, 0, div);
+ cpuclk->src = cpuclk->parent_map[SYS_APC0_AUX_CLK_INDEX].cfg;
+ } else {
+ parent_req.rate = rate;
+ parent_req.best_parent_hw = apcs_cpu_pll_hw;
+
+ req->best_parent_hw = apcs_cpu_pll_hw;
+ ret = __clk_determine_rate(req->best_parent_hw, &parent_req);
+ if (ret)
+ return ret;
+
+ req->best_parent_rate = parent_req.rate;
+ cpuclk->src = cpuclk->parent_map[APCS_CPU_PLL_INDEX].cfg;
+ }
+ cpuclk->div = div;
+
+ return 0;
+}
+
+static void a7cc_clk_list_registers(struct seq_file *f, struct clk_hw *hw)
+{
+ struct clk_regmap_mux_div *cpuclk = to_clk_regmap_mux_div(hw);
+ int i = 0, size = 0, val;
+
+ static struct clk_register_data data[] = {
+ {"CMD_RCGR", 0x0},
+ {"CFG_RCGR", 0x4},
+ };
+
+ size = ARRAY_SIZE(data);
+ for (i = 0; i < size; i++) {
+ regmap_read(cpuclk->clkr.regmap,
+ cpuclk->reg_offset + data[i].offset, &val);
+ seq_printf(f, "%20s: 0x%.8x\n", data[i].name, val);
+ }
+}
+
+static unsigned long a7cc_clk_recalc_rate(struct clk_hw *hw,
+ unsigned long prate)
+{
+ struct clk_regmap_mux_div *cpuclk = to_clk_regmap_mux_div(hw);
+ const char *name = clk_hw_get_name(hw);
+ struct clk_hw *parent;
+ int ret = 0;
+ unsigned long parent_rate;
+ u32 i, div, src = 0;
+ u32 num_parents = clk_hw_get_num_parents(hw);
+
+ ret = mux_div_get_src_div(cpuclk, &src, &div);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < num_parents; i++) {
+ if (src == cpuclk->parent_map[i].cfg) {
+ parent = clk_hw_get_parent_by_index(hw, i);
+ parent_rate = clk_hw_get_rate(parent);
+ return clk_rcg2_calc_rate(parent_rate, 0, 0, 0, div);
+ }
+ }
+ pr_err("%s: Can't find parent %d\n", name, src);
+ return ret;
+}
+
+static int a7cc_clk_enable(struct clk_hw *hw)
+{
+ return clk_regmap_mux_div_ops.enable(hw);
+}
+
+static void a7cc_clk_disable(struct clk_hw *hw)
+{
+ clk_regmap_mux_div_ops.disable(hw);
+}
+
+static u8 a7cc_clk_get_parent(struct clk_hw *hw)
+{
+ return clk_regmap_mux_div_ops.get_parent(hw);
+}
+
+/*
+ * We use the notifier function for switching to a temporary safe configuration
+ * (mux and divider), while the APSS pll is reconfigured.
+ */
+static int a7cc_notifier_cb(struct notifier_block *nb, unsigned long event,
+ void *data)
+{
+ int ret = 0;
+ struct clk_regmap_mux_div *cpuclk = container_of(nb,
+ struct clk_regmap_mux_div, clk_nb);
+
+ if (event == PRE_RATE_CHANGE)
+ /* set the mux to safe source(sys_apc0_aux_clk) & div */
+ ret = __mux_div_set_src_div(cpuclk, SYS_APC0_AUX_CLK_SRC, 1);
+
+ if (event == ABORT_RATE_CHANGE)
+ pr_err("Error in configuring PLL - stay at safe src only\n");
+
+ return notifier_from_errno(ret);
+}
+
+static const struct clk_ops a7cc_clk_ops = {
+ .enable = a7cc_clk_enable,
+ .disable = a7cc_clk_disable,
+ .get_parent = a7cc_clk_get_parent,
+ .set_rate = a7cc_clk_set_rate,
+ .set_parent = a7cc_clk_set_parent,
+ .set_rate_and_parent = a7cc_clk_set_rate_and_parent,
+ .determine_rate = a7cc_clk_determine_rate,
+ .recalc_rate = a7cc_clk_recalc_rate,
+ .debug_init = clk_debug_measure_add,
+ .list_registers = a7cc_clk_list_registers,
+};
+
+/*
+ * As per HW, sys_apc0_aux_clk runs at 300MHz and configured by BOOT
+ * So adding it as dummy clock.
+ */
+
+static struct clk_dummy sys_apc0_aux_clk = {
+ .rrate = 300000000,
+ .hw.init = &(struct clk_init_data){
+ .name = "sys_apc0_aux_clk",
+ .ops = &clk_dummy_ops,
+ },
+};
+
+/* Initial configuration for 1497.6MHz(Turbo) */
+static const struct pll_config apcs_cpu_pll_config = {
+ .l = 0x4E,
+};
+
+static struct pll_vco trion_vco[] = {
+ { 249600000, 2000000000, 0 },
+};
+
+static struct clk_alpha_pll apcs_cpu_pll = {
+ .type = TRION_PLL,
+ .vco_table = trion_vco,
+ .num_vco = ARRAY_SIZE(trion_vco),
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "apcs_cpu_pll",
+ .parent_names = (const char *[]){ "bi_tcxo_ao" },
+ .num_parents = 1,
+ .ops = &clk_trion_pll_ops,
+ VDD_CX_FMAX_MAP4(LOWER, 345600000,
+ LOW, 576000000,
+ NOMINAL, 1094400000,
+ HIGH, 1497600000),
+ },
+};
+
+static struct clk_regmap_mux_div apcs_clk = {
+ .hid_width = 5,
+ .hid_shift = 0,
+ .src_width = 3,
+ .src_shift = 8,
+ .safe_src = 1,
+ .safe_div = 1,
+ .parent_map = apcs_clk_parent_map,
+ .clk_nb.notifier_call = a7cc_notifier_cb,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "apcs_clk",
+ .parent_names = apcs_clk_parent_name,
+ .num_parents = 3,
+ .vdd_class = &vdd_cpu,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &a7cc_clk_ops,
+ },
+};
+
+static const struct of_device_id match_table[] = {
+ { .compatible = "qcom,cpu-sdxpoorwills" },
+ {}
+};
+
+static const struct regmap_config cpu_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x7F10,
+ .fast_io = true,
+};
+
+static struct clk_hw *cpu_clks_hws[] = {
+ [SYS_APC0_AUX_CLK] = &sys_apc0_aux_clk.hw,
+ [APCS_CPU_PLL] = &apcs_cpu_pll.clkr.hw,
+ [APCS_CLK] = &apcs_clk.clkr.hw,
+};
+
+static void a7cc_clk_get_speed_bin(struct platform_device *pdev, int *bin,
+ int *version)
+{
+ struct resource *res;
+ void __iomem *base;
+ u32 pte_efuse, valid;
+
+ *bin = 0;
+ *version = 0;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "efuse");
+ if (!res) {
+ dev_info(&pdev->dev,
+ "No speed/PVS binning available. Defaulting to 0!\n");
+ return;
+ }
+
+ base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
+ if (!base) {
+ dev_info(&pdev->dev,
+ "Unable to read efuse data. Defaulting to 0!\n");
+ return;
+ }
+
+ pte_efuse = readl_relaxed(base);
+ devm_iounmap(&pdev->dev, base);
+
+ *bin = pte_efuse & 0x7;
+ valid = (pte_efuse >> 3) & 0x1;
+ *version = (pte_efuse >> 4) & 0x3;
+
+ if (!valid) {
+ dev_info(&pdev->dev, "Speed bin not set. Defaulting to 0!\n");
+ *bin = 0;
+ } else {
+ dev_info(&pdev->dev, "Speed bin: %d\n", *bin);
+ }
+
+ dev_info(&pdev->dev, "PVS version: %d\n", *version);
+}
+
+static int a7cc_clk_get_fmax_vdd_class(struct platform_device *pdev,
+ struct clk_init_data *clk_intd, char *prop_name)
+{
+ struct device_node *of = pdev->dev.of_node;
+ int prop_len, i, j;
+ struct clk_vdd_class *vdd = clk_intd->vdd_class;
+ int num = vdd->num_regulators + 1;
+ u32 *array;
+
+ if (!of_find_property(of, prop_name, &prop_len)) {
+ dev_err(&pdev->dev, "missing %s\n", prop_name);
+ return -EINVAL;
+ }
+
+ prop_len /= sizeof(u32);
+ if (prop_len % num) {
+ dev_err(&pdev->dev, "bad length %d\n", prop_len);
+ return -EINVAL;
+ }
+
+ prop_len /= num;
+ vdd->level_votes = devm_kzalloc(&pdev->dev, prop_len * sizeof(int),
+ GFP_KERNEL);
+ if (!vdd->level_votes)
+ return -ENOMEM;
+
+ vdd->vdd_uv = devm_kzalloc(&pdev->dev,
+ prop_len * sizeof(int) * (num - 1), GFP_KERNEL);
+ if (!vdd->vdd_uv)
+ return -ENOMEM;
+
+ clk_intd->rate_max = devm_kzalloc(&pdev->dev,
+ prop_len * sizeof(unsigned long), GFP_KERNEL);
+ if (!clk_intd->rate_max)
+ return -ENOMEM;
+
+ array = devm_kzalloc(&pdev->dev,
+ prop_len * sizeof(u32) * num, GFP_KERNEL);
+ if (!array)
+ return -ENOMEM;
+
+ of_property_read_u32_array(of, prop_name, array, prop_len * num);
+ for (i = 0; i < prop_len; i++) {
+ clk_intd->rate_max[i] = array[num * i];
+ for (j = 1; j < num; j++) {
+ vdd->vdd_uv[(num - 1) * i + (j - 1)] =
+ array[num * i + j];
+ }
+ }
+
+ devm_kfree(&pdev->dev, array);
+ vdd->num_levels = prop_len;
+ vdd->cur_level = prop_len;
+ clk_intd->num_rate_max = prop_len;
+
+ return 0;
+}
+
+/*
+ * Find the voltage level required for a given clock rate.
+ */
+static int find_vdd_level(struct clk_init_data *clk_intd, unsigned long rate)
+{
+ int level;
+
+ for (level = 0; level < clk_intd->num_rate_max; level++)
+ if (rate <= clk_intd->rate_max[level])
+ break;
+
+ if (level == clk_intd->num_rate_max) {
+ pr_err("Rate %lu for %s is greater than highest Fmax\n", rate,
+ clk_intd->name);
+ return -EINVAL;
+ }
+
+ return level;
+}
+
+static int
+a7cc_clk_add_opp(struct clk_hw *hw, struct device *dev, unsigned long max_rate)
+{
+ unsigned long rate = 0;
+ int level, uv, j = 1;
+ long ret;
+ struct clk_init_data *clk_intd = (struct clk_init_data *)hw->init;
+ struct clk_vdd_class *vdd = clk_intd->vdd_class;
+
+ if (IS_ERR_OR_NULL(dev)) {
+ pr_err("%s: Invalid parameters\n", __func__);
+ return -EINVAL;
+ }
+
+ while (1) {
+ rate = clk_intd->rate_max[j++];
+ level = find_vdd_level(clk_intd, rate);
+ if (level <= 0) {
+ pr_warn("clock-cpu: no corner for %lu.\n", rate);
+ return -EINVAL;
+ }
+
+ uv = vdd->vdd_uv[level];
+ if (uv < 0) {
+ pr_warn("clock-cpu: no uv for %lu.\n", rate);
+ return -EINVAL;
+ }
+
+ ret = dev_pm_opp_add(dev, rate, uv);
+ if (ret) {
+ pr_warn("clock-cpu: failed to add OPP for %lu\n", rate);
+ return rate;
+ }
+
+ if (rate >= max_rate)
+ break;
+ }
+
+ return 0;
+}
+
+static void a7cc_clk_print_opp_table(int a7_cpu)
+{
+ struct dev_pm_opp *oppfmax, *oppfmin;
+ unsigned long apc_fmax, apc_fmin;
+ u32 max_a7ss_index = apcs_clk.clkr.hw.init->num_rate_max;
+
+ apc_fmax = apcs_clk.clkr.hw.init->rate_max[max_a7ss_index - 1];
+ apc_fmin = apcs_clk.clkr.hw.init->rate_max[1];
+
+ rcu_read_lock();
+
+ oppfmax = dev_pm_opp_find_freq_exact(get_cpu_device(a7_cpu),
+ apc_fmax, true);
+ oppfmin = dev_pm_opp_find_freq_exact(get_cpu_device(a7_cpu),
+ apc_fmin, true);
+ pr_info("Clock_cpu: OPP voltage for %lu: %ld\n", apc_fmin,
+ dev_pm_opp_get_voltage(oppfmin));
+ pr_info("Clock_cpu: OPP voltage for %lu: %ld\n", apc_fmax,
+ dev_pm_opp_get_voltage(oppfmax));
+
+ rcu_read_unlock();
+}
+
+static void a7cc_clk_populate_opp_table(struct platform_device *pdev)
+{
+ unsigned long apc_fmax;
+ int cpu, a7_cpu = 0;
+ u32 max_a7ss_index = apcs_clk.clkr.hw.init->num_rate_max;
+
+ apc_fmax = apcs_clk.clkr.hw.init->rate_max[max_a7ss_index - 1];
+
+ for_each_possible_cpu(cpu) {
+ a7_cpu = cpu;
+ WARN(a7cc_clk_add_opp(&apcs_clk.clkr.hw, get_cpu_device(cpu),
+ apc_fmax),
+ "Failed to add OPP levels for apcs_clk\n");
+ }
+ /* One time print during bootup */
+ dev_info(&pdev->dev, "OPP tables populated (cpu %d)\n", a7_cpu);
+
+ a7cc_clk_print_opp_table(a7_cpu);
+}
+
+static int a7cc_driver_probe(struct platform_device *pdev)
+{
+ struct clk *clk;
+ void __iomem *base;
+ u32 opmode_regval, mode_regval;
+ struct resource *res;
+ struct clk_onecell_data *data;
+ struct device *dev = &pdev->dev;
+ struct device_node *of = pdev->dev.of_node;
+ int i, ret, speed_bin, version, cpu;
+ int num_clks = ARRAY_SIZE(cpu_clks_hws);
+ u32 a7cc_clk_init_rate = 0;
+ char prop_name[] = "qcom,speedX-bin-vX";
+ struct clk *ext_xo_clk;
+
+ /* Require the RPMH-XO clock to be registered before */
+ ext_xo_clk = devm_clk_get(dev, "xo_ao");
+ if (IS_ERR(ext_xo_clk)) {
+ if (PTR_ERR(ext_xo_clk) != -EPROBE_DEFER)
+ dev_err(dev, "Unable to get xo clock\n");
+ return PTR_ERR(ext_xo_clk);
+ }
+
+ /* Get speed bin information */
+ a7cc_clk_get_speed_bin(pdev, &speed_bin, &version);
+
+ /* Rail Regulator for apcs_pll */
+ vdd_cx.regulator[0] = devm_regulator_get(&pdev->dev, "vdd_dig_ao");
+ if (IS_ERR(vdd_cx.regulator[0])) {
+ if (!(PTR_ERR(vdd_cx.regulator[0]) == -EPROBE_DEFER))
+ dev_err(&pdev->dev,
+ "Unable to get vdd_dig_ao regulator\n");
+ return PTR_ERR(vdd_cx.regulator[0]);
+ }
+
+ /* Rail Regulator for APSS a7ss mux */
+ vdd_cpu.regulator[0] = devm_regulator_get(&pdev->dev, "cpu-vdd");
+ if (IS_ERR(vdd_cpu.regulator[0])) {
+ if (!(PTR_ERR(vdd_cpu.regulator[0]) == -EPROBE_DEFER))
+ dev_err(&pdev->dev,
+ "Unable to get cpu-vdd regulator\n");
+ return PTR_ERR(vdd_cpu.regulator[0]);
+ }
+
+ snprintf(prop_name, ARRAY_SIZE(prop_name),
+ "qcom,speed%d-bin-v%d", speed_bin, version);
+
+ ret = a7cc_clk_get_fmax_vdd_class(pdev,
+ (struct clk_init_data *)apcs_clk.clkr.hw.init, prop_name);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "Can't get speed bin for apcs_clk. Falling back to zero\n");
+ ret = a7cc_clk_get_fmax_vdd_class(pdev,
+ (struct clk_init_data *)apcs_clk.clkr.hw.init,
+ "qcom,speed0-bin-v0");
+ if (ret) {
+ dev_err(&pdev->dev,
+ "Unable to get speed bin for apcs_clk freq-corner mapping info\n");
+ return ret;
+ }
+ }
+
+ ret = of_property_read_u32(of, "qcom,a7cc-init-rate",
+ &a7cc_clk_init_rate);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "unable to find qcom,a7cc_clk_init_rate property,ret=%d\n",
+ ret);
+ return -EINVAL;
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "apcs_pll");
+ base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(base)) {
+ dev_err(&pdev->dev, "Failed to map apcs_cpu_pll register base\n");
+ return PTR_ERR(base);
+ }
+
+ apcs_cpu_pll.clkr.regmap = devm_regmap_init_mmio(dev, base,
+ &cpu_regmap_config);
+ if (IS_ERR(apcs_cpu_pll.clkr.regmap)) {
+ dev_err(&pdev->dev, "Couldn't get regmap for apcs_cpu_pll\n");
+ return PTR_ERR(apcs_cpu_pll.clkr.regmap);
+ }
+
+ ret = of_property_read_u32(of, "qcom,rcg-reg-offset",
+ &apcs_clk.reg_offset);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "unable to find qcom,rcg-reg-offset property,ret=%d\n",
+ ret);
+ return -EINVAL;
+ }
+
+ apcs_clk.clkr.regmap = apcs_cpu_pll.clkr.regmap;
+
+ /* Read PLLs OPMODE and mode register */
+ ret = regmap_read(apcs_cpu_pll.clkr.regmap, PLL_OPMODE_REG,
+ &opmode_regval);
+ if (ret)
+ return ret;
+
+ ret = regmap_read(apcs_cpu_pll.clkr.regmap, PLL_MODE_REG,
+ &mode_regval);
+ if (ret)
+ return ret;
+
+ /* Configure APSS PLL only if it is not enabled and running */
+ if (!(opmode_regval & PLL_OPMODE_RUN) &&
+ !(mode_regval & PLL_MODE_OUTCTRL))
+ clk_trion_pll_configure(&apcs_cpu_pll,
+ apcs_cpu_pll.clkr.regmap, &apcs_cpu_pll_config);
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->clk_num = num_clks;
+
+ data->clks = devm_kzalloc(dev, num_clks * sizeof(struct clk *),
+ GFP_KERNEL);
+ if (!data->clks)
+ return -ENOMEM;
+
+ /* Register clocks with clock framework */
+ for (i = 0; i < num_clks; i++) {
+ clk = devm_clk_register(dev, cpu_clks_hws[i]);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+ data->clks[i] = clk;
+ }
+
+ ret = of_clk_add_provider(dev->of_node, of_clk_src_onecell_get, data);
+ if (ret) {
+ dev_err(&pdev->dev, "CPU clock driver registeration failed\n");
+ return ret;
+ }
+
+ ret = clk_notifier_register(apcs_cpu_pll.clkr.hw.clk, &apcs_clk.clk_nb);
+ if (ret) {
+ dev_err(dev, "failed to register clock notifier: %d\n", ret);
+ return ret;
+ }
+
+ /* Put proxy vote for APSS PLL */
+ clk_prepare_enable(apcs_cpu_pll.clkr.hw.clk);
+
+ /* Set to TURBO boot frequency */
+ ret = clk_set_rate(apcs_clk.clkr.hw.clk, a7cc_clk_init_rate);
+ if (ret)
+ dev_err(&pdev->dev, "Unable to set init rate on apcs_clk\n");
+
+ /*
+ * We don't want the CPU clocks to be turned off at late init
+ * if CPUFREQ or HOTPLUG configs are disabled. So, bump up the
+ * refcount of these clocks. Any cpufreq/hotplug manager can assume
+ * that the clocks have already been prepared and enabled by the time
+ * they take over.
+ */
+
+ get_online_cpus();
+ for_each_online_cpu(cpu)
+ WARN(clk_prepare_enable(apcs_clk.clkr.hw.clk),
+ "Unable to turn on CPU clock\n");
+ put_online_cpus();
+
+ /* Remove proxy vote for APSS PLL */
+ clk_disable_unprepare(apcs_cpu_pll.clkr.hw.clk);
+
+ a7cc_clk_populate_opp_table(pdev);
+
+ dev_info(dev, "CPU clock Driver probed successfully\n");
+
+ return ret;
+}
+
+static struct platform_driver a7_clk_driver = {
+ .probe = a7cc_driver_probe,
+ .driver = {
+ .name = "qcom-cpu-sdxpoorwills",
+ .of_match_table = match_table,
+ },
+};
+
+static int __init a7_clk_init(void)
+{
+ return platform_driver_register(&a7_clk_driver);
+}
+subsys_initcall(a7_clk_init);
+
+static void __exit a7_clk_exit(void)
+{
+ platform_driver_unregister(&a7_clk_driver);
+}
+module_exit(a7_clk_exit);
+
+MODULE_ALIAS("platform:cpu");
+MODULE_DESCRIPTION("A7 CPU clock Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/qcom/clk-cpu-osm.c b/drivers/clk/qcom/clk-cpu-osm.c
index ec4c83e..7e665ca 100644
--- a/drivers/clk/qcom/clk-cpu-osm.c
+++ b/drivers/clk/qcom/clk-cpu-osm.c
@@ -31,7 +31,9 @@
#include <linux/sched.h>
#include <linux/cpufreq.h>
#include <linux/slab.h>
+#include <linux/regulator/consumer.h>
#include <dt-bindings/clock/qcom,cpucc-sdm845.h>
+#include <dt-bindings/regulator/qcom,rpmh-regulator.h>
#include "common.h"
#include "clk-regmap.h"
@@ -53,6 +55,9 @@
#define VOLT_REG 0x114
#define CORE_DCVS_CTRL 0xbc
+#define EFUSE_SHIFT(v1) ((v1) ? 3 : 2)
+#define EFUSE_MASK 0x7
+
#define DCVS_PERF_STATE_DESIRED_REG_0_V1 0x780
#define DCVS_PERF_STATE_DESIRED_REG_0_V2 0x920
#define DCVS_PERF_STATE_DESIRED_REG(n, v1) \
@@ -65,6 +70,9 @@
(((v1) ? OSM_CYCLE_COUNTER_STATUS_REG_0_V1 \
: OSM_CYCLE_COUNTER_STATUS_REG_0_V2) + 4 * (n))
+static DEFINE_VDD_REGS_INIT(vdd_l3_mx_ao, 1);
+static DEFINE_VDD_REGS_INIT(vdd_pwrcl_mx_ao, 1);
+
struct osm_entry {
u16 virtual_corner;
u16 open_loop_volt;
@@ -85,6 +93,8 @@ struct clk_osm {
u64 total_cycle_counter;
u32 prev_cycle_counter;
u32 max_core_count;
+ u32 mx_turbo_freq;
+ unsigned int cpr_rc;
};
static bool is_sdm845v1;
@@ -131,6 +141,18 @@ static inline bool is_better_rate(unsigned long req, unsigned long best,
return (req <= new && new < best) || (best < req && best < new);
}
+static int clk_osm_search_table(struct osm_entry *table, int entries, long rate)
+{
+ int index;
+
+ for (index = 0; index < entries; index++) {
+ if (rate == table[index].frequency)
+ return index;
+ }
+
+ return -EINVAL;
+}
+
static long clk_osm_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *parent_rate)
{
@@ -161,23 +183,62 @@ static long clk_osm_round_rate(struct clk_hw *hw, unsigned long rate,
return rrate;
}
-static int clk_osm_search_table(struct osm_entry *table, int entries, long rate)
+static int clk_cpu_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
{
+ struct clk_osm *c = to_clk_osm(hw);
+ struct clk_hw *p_hw = clk_hw_get_parent(hw);
+ struct clk_osm *parent = to_clk_osm(p_hw);
int index = 0;
- for (index = 0; index < entries; index++) {
- if (rate == table[index].frequency)
- return index;
+ if (!c || !parent)
+ return -EINVAL;
+
+ index = clk_osm_search_table(parent->osm_table,
+ parent->num_entries, rate);
+ if (index < 0) {
+ pr_err("cannot set %s to %lu\n", clk_hw_get_name(hw), rate);
+ return -EINVAL;
}
- return -EINVAL;
+ clk_osm_write_reg(parent, index,
+ DCVS_PERF_STATE_DESIRED_REG(c->core_num,
+ is_sdm845v1));
+
+ /* Make sure the write goes through before proceeding */
+ clk_osm_mb(parent);
+
+ return 0;
}
-const struct clk_ops clk_ops_cpu_osm = {
- .round_rate = clk_osm_round_rate,
- .list_rate = clk_osm_list_rate,
- .debug_init = clk_debug_measure_add,
-};
+static unsigned long clk_cpu_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_osm *c = to_clk_osm(hw);
+ struct clk_hw *p_hw = clk_hw_get_parent(hw);
+ struct clk_osm *parent = to_clk_osm(p_hw);
+ int index = 0;
+
+ if (!c || !parent)
+ return -EINVAL;
+
+ index = clk_osm_read_reg(parent,
+ DCVS_PERF_STATE_DESIRED_REG(c->core_num,
+ is_sdm845v1));
+ return parent->osm_table[index].frequency;
+}
+
+static long clk_cpu_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ struct clk_hw *parent_hw = clk_hw_get_parent(hw);
+
+ if (!parent_hw)
+ return -EINVAL;
+
+ *parent_rate = rate;
+ return clk_hw_round_rate(parent_hw, rate);
+}
static int l3_clk_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
@@ -233,7 +294,6 @@ static unsigned long l3_clk_recalc_rate(struct clk_hw *hw,
return cpuclk->osm_table[index].frequency;
}
-
static struct clk_ops clk_ops_l3_osm = {
.round_rate = clk_osm_round_rate,
.list_rate = clk_osm_list_rate,
@@ -242,18 +302,23 @@ static struct clk_ops clk_ops_l3_osm = {
.debug_init = clk_debug_measure_add,
};
+static struct clk_ops clk_ops_core;
+static struct clk_ops clk_ops_cpu_osm;
+
static struct clk_init_data osm_clks_init[] = {
[0] = {
.name = "l3_clk",
.parent_names = (const char *[]){ "bi_tcxo_ao" },
.num_parents = 1,
.ops = &clk_ops_l3_osm,
+ .vdd_class = &vdd_l3_mx_ao,
},
[1] = {
.name = "pwrcl_clk",
.parent_names = (const char *[]){ "bi_tcxo_ao" },
.num_parents = 1,
.ops = &clk_ops_cpu_osm,
+ .vdd_class = &vdd_pwrcl_mx_ao,
},
[2] = {
.name = "perfcl_clk",
@@ -287,7 +352,8 @@ static struct clk_osm cpu0_pwrcl_clk = {
.name = "cpu0_pwrcl_clk",
.parent_names = (const char *[]){ "pwrcl_clk" },
.num_parents = 1,
- .ops = &clk_dummy_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_ops_core,
},
};
@@ -299,7 +365,8 @@ static struct clk_osm cpu1_pwrcl_clk = {
.name = "cpu1_pwrcl_clk",
.parent_names = (const char *[]){ "pwrcl_clk" },
.num_parents = 1,
- .ops = &clk_dummy_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_ops_core,
},
};
@@ -311,7 +378,8 @@ static struct clk_osm cpu2_pwrcl_clk = {
.name = "cpu2_pwrcl_clk",
.parent_names = (const char *[]){ "pwrcl_clk" },
.num_parents = 1,
- .ops = &clk_dummy_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_ops_core,
},
};
@@ -323,7 +391,8 @@ static struct clk_osm cpu3_pwrcl_clk = {
.name = "cpu3_pwrcl_clk",
.parent_names = (const char *[]){ "pwrcl_clk" },
.num_parents = 1,
- .ops = &clk_dummy_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_ops_core,
},
};
@@ -335,7 +404,8 @@ static struct clk_osm cpu4_pwrcl_clk = {
.name = "cpu4_pwrcl_clk",
.parent_names = (const char *[]){ "pwrcl_clk" },
.num_parents = 1,
- .ops = &clk_dummy_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_ops_core,
},
};
@@ -347,7 +417,8 @@ static struct clk_osm cpu5_pwrcl_clk = {
.name = "cpu5_pwrcl_clk",
.parent_names = (const char *[]){ "pwrcl_clk" },
.num_parents = 1,
- .ops = &clk_dummy_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_ops_core,
},
};
@@ -366,7 +437,8 @@ static struct clk_osm cpu4_perfcl_clk = {
.name = "cpu4_perfcl_clk",
.parent_names = (const char *[]){ "perfcl_clk" },
.num_parents = 1,
- .ops = &clk_dummy_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_ops_core,
},
};
@@ -378,7 +450,8 @@ static struct clk_osm cpu5_perfcl_clk = {
.name = "cpu5_perfcl_clk",
.parent_names = (const char *[]){ "perfcl_clk" },
.num_parents = 1,
- .ops = &clk_dummy_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_ops_core,
},
};
@@ -390,7 +463,8 @@ static struct clk_osm cpu6_perfcl_clk = {
.name = "cpu6_perfcl_clk",
.parent_names = (const char *[]){ "perfcl_clk" },
.num_parents = 1,
- .ops = &clk_dummy_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_ops_core,
},
};
@@ -402,7 +476,8 @@ static struct clk_osm cpu7_perfcl_clk = {
.name = "cpu7_perfcl_clk",
.parent_names = (const char *[]){ "perfcl_clk" },
.num_parents = 1,
- .ops = &clk_dummy_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_ops_core,
},
};
@@ -515,13 +590,23 @@ static struct clk_osm *osm_configure_policy(struct cpufreq_policy *policy)
}
static void
-osm_set_index(struct clk_osm *c, unsigned int index, unsigned int num)
+osm_set_index(struct clk_osm *c, unsigned int index)
{
- clk_osm_write_reg(c, index,
- DCVS_PERF_STATE_DESIRED_REG(num, is_sdm845v1));
+ struct clk_hw *p_hw = clk_hw_get_parent(&c->hw);
+ struct clk_osm *parent = to_clk_osm(p_hw);
+ unsigned long rate = 0;
- /* Make sure the write goes through before proceeding */
- clk_osm_mb(c);
+ if (index >= OSM_TABLE_SIZE) {
+ pr_err("Passing an index (%u) that's greater than max (%d)\n",
+ index, OSM_TABLE_SIZE - 1);
+ return;
+ }
+
+ rate = parent->osm_table[index].frequency;
+ if (!rate)
+ return;
+
+ clk_set_rate(c->hw.clk, clk_round_rate(c->hw.clk, rate));
}
static int
@@ -529,7 +614,7 @@ osm_cpufreq_target_index(struct cpufreq_policy *policy, unsigned int index)
{
struct clk_osm *c = policy->driver_data;
- osm_set_index(c, index, c->core_num);
+ osm_set_index(c, index);
return 0;
}
@@ -849,6 +934,7 @@ static u64 clk_osm_get_cpu_cycle_counter(int cpu)
static int clk_osm_read_lut(struct platform_device *pdev, struct clk_osm *c)
{
u32 data, src, lval, i, j = OSM_TABLE_SIZE;
+ struct clk_vdd_class *vdd = osm_clks_init[c->cluster_num].vdd_class;
for (i = 0; i < OSM_TABLE_SIZE; i++) {
data = clk_osm_read_reg(c, FREQ_REG + i * OSM_REG_SIZE);
@@ -881,6 +967,29 @@ static int clk_osm_read_lut(struct platform_device *pdev, struct clk_osm *c)
if (!osm_clks_init[c->cluster_num].rate_max)
return -ENOMEM;
+ if (vdd) {
+ vdd->level_votes = devm_kcalloc(&pdev->dev, j,
+ sizeof(*vdd->level_votes), GFP_KERNEL);
+ if (!vdd->level_votes)
+ return -ENOMEM;
+
+ vdd->vdd_uv = devm_kcalloc(&pdev->dev, j, sizeof(*vdd->vdd_uv),
+ GFP_KERNEL);
+ if (!vdd->vdd_uv)
+ return -ENOMEM;
+
+ for (i = 0; i < j; i++) {
+ if (c->osm_table[i].frequency < c->mx_turbo_freq ||
+ (c->cpr_rc > 1))
+ vdd->vdd_uv[i] = RPMH_REGULATOR_LEVEL_NOM;
+ else
+ vdd->vdd_uv[i] = RPMH_REGULATOR_LEVEL_TURBO;
+ }
+ vdd->num_levels = j;
+ vdd->cur_level = j;
+ vdd->use_max_uV = true;
+ }
+
for (i = 0; i < j; i++)
osm_clks_init[c->cluster_num].rate_max[i] =
c->osm_table[i].frequency;
@@ -964,12 +1073,17 @@ static void clk_cpu_osm_driver_sdm670_fixup(void)
static int clk_cpu_osm_driver_probe(struct platform_device *pdev)
{
- int rc = 0, i;
- u32 val;
+ int rc = 0, i, cpu;
+ bool is_sdm670 = false;
+ u32 *array;
+ u32 val, pte_efuse;
+ void __iomem *vbase;
int num_clks = ARRAY_SIZE(osm_qcom_clk_hws);
struct clk *ext_xo_clk, *clk;
+ struct clk_osm *osm_clk;
struct device *dev = &pdev->dev;
struct clk_onecell_data *clk_data;
+ struct resource *res;
struct cpu_cycle_counter_cb cb = {
.get_cpu_cycle_counter = clk_osm_get_cpu_cycle_counter,
};
@@ -989,8 +1103,68 @@ static int clk_cpu_osm_driver_probe(struct platform_device *pdev)
"qcom,clk-cpu-osm");
if (of_device_is_compatible(pdev->dev.of_node,
- "qcom,clk-cpu-osm-sdm670"))
+ "qcom,clk-cpu-osm-sdm670")) {
+ is_sdm670 = true;
clk_cpu_osm_driver_sdm670_fixup();
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cpr_rc");
+ if (res) {
+ vbase = devm_ioremap(&pdev->dev, res->start,
+ resource_size(res));
+ if (!vbase) {
+ dev_err(&pdev->dev, "Unable to map in cpr_rc base\n");
+ return -ENOMEM;
+ }
+ pte_efuse = readl_relaxed(vbase);
+ l3_clk.cpr_rc = pwrcl_clk.cpr_rc = perfcl_clk.cpr_rc =
+ ((pte_efuse >> EFUSE_SHIFT(is_sdm845v1 | is_sdm670))
+ & EFUSE_MASK);
+ pr_info("LOCAL_CPR_RC: %u\n", l3_clk.cpr_rc);
+ devm_iounmap(&pdev->dev, vbase);
+ } else {
+ dev_err(&pdev->dev,
+ "Unable to get platform resource for cpr_rc\n");
+ return -ENOMEM;
+ }
+
+ vdd_l3_mx_ao.regulator[0] = devm_regulator_get(&pdev->dev,
+ "vdd_l3_mx_ao");
+ if (IS_ERR(vdd_l3_mx_ao.regulator[0])) {
+ if (PTR_ERR(vdd_l3_mx_ao.regulator[0]) != -EPROBE_DEFER)
+ dev_err(&pdev->dev,
+ "Unable to get vdd_l3_mx_ao regulator\n");
+ return PTR_ERR(vdd_l3_mx_ao.regulator[0]);
+ }
+
+ vdd_pwrcl_mx_ao.regulator[0] = devm_regulator_get(&pdev->dev,
+ "vdd_pwrcl_mx_ao");
+ if (IS_ERR(vdd_pwrcl_mx_ao.regulator[0])) {
+ if (PTR_ERR(vdd_pwrcl_mx_ao.regulator[0]) != -EPROBE_DEFER)
+ dev_err(&pdev->dev,
+ "Unable to get vdd_pwrcl_mx_ao regulator\n");
+ return PTR_ERR(vdd_pwrcl_mx_ao.regulator[0]);
+ }
+
+ array = devm_kcalloc(&pdev->dev, MAX_CLUSTER_CNT, sizeof(*array),
+ GFP_KERNEL);
+ if (!array)
+ return -ENOMEM;
+
+ rc = of_property_read_u32_array(pdev->dev.of_node, "qcom,mx-turbo-freq",
+ array, MAX_CLUSTER_CNT);
+ if (rc) {
+ dev_err(&pdev->dev, "unable to find qcom,mx-turbo-freq property, rc=%d\n",
+ rc);
+ devm_kfree(&pdev->dev, array);
+ return rc;
+ }
+
+ l3_clk.mx_turbo_freq = array[l3_clk.cluster_num];
+ pwrcl_clk.mx_turbo_freq = array[pwrcl_clk.cluster_num];
+ perfcl_clk.mx_turbo_freq = array[perfcl_clk.cluster_num];
+
+ devm_kfree(&pdev->dev, array);
clk_data = devm_kzalloc(&pdev->dev, sizeof(struct clk_onecell_data),
GFP_KERNEL);
@@ -1046,6 +1220,16 @@ static int clk_cpu_osm_driver_probe(struct platform_device *pdev)
spin_lock_init(&pwrcl_clk.lock);
spin_lock_init(&perfcl_clk.lock);
+ clk_ops_core = clk_dummy_ops;
+ clk_ops_core.set_rate = clk_cpu_set_rate;
+ clk_ops_core.round_rate = clk_cpu_round_rate;
+ clk_ops_core.recalc_rate = clk_cpu_recalc_rate;
+
+ clk_ops_cpu_osm = clk_dummy_ops;
+ clk_ops_cpu_osm.round_rate = clk_osm_round_rate;
+ clk_ops_cpu_osm.list_rate = clk_osm_list_rate;
+ clk_ops_cpu_osm.debug_init = clk_debug_measure_add;
+
/* Register OSM l3, pwr and perf clocks with Clock Framework */
for (i = 0; i < num_clks; i++) {
if (!osm_qcom_clk_hws[i])
@@ -1076,6 +1260,16 @@ static int clk_cpu_osm_driver_probe(struct platform_device *pdev)
WARN(clk_prepare_enable(l3_misc_vote_clk.hw.clk),
"clk: Failed to enable misc clock for L3\n");
+ /*
+ * Call clk_prepare_enable for the silver clock explicitly in order to
+ * place an implicit vote on MX
+ */
+ for_each_online_cpu(cpu) {
+ osm_clk = logical_cpu_to_clk(cpu);
+ if (!osm_clk)
+ return -EINVAL;
+ clk_prepare_enable(osm_clk->hw.clk);
+ }
populate_opp_table(pdev);
of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
diff --git a/drivers/clk/qcom/clk-pll.h b/drivers/clk/qcom/clk-pll.h
index 9682799..70f7612 100644
--- a/drivers/clk/qcom/clk-pll.h
+++ b/drivers/clk/qcom/clk-pll.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013, 2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013, 2016-2017, The Linux Foundation. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@@ -83,6 +83,8 @@ struct pll_config {
u32 aux2_output_mask;
u32 early_output_mask;
u32 config_ctl_val;
+ u32 config_ctl_hi_val;
+ u32 config_ctl_hi1_val;
};
void clk_pll_configure_sr(struct clk_pll *pll, struct regmap *regmap,
diff --git a/drivers/clk/qcom/clk-rcg.h b/drivers/clk/qcom/clk-rcg.h
index 60758b4..aaf2324 100644
--- a/drivers/clk/qcom/clk-rcg.h
+++ b/drivers/clk/qcom/clk-rcg.h
@@ -188,4 +188,6 @@ extern const struct clk_ops clk_dp_ops;
extern int clk_rcg2_get_dfs_clock_rate(struct clk_rcg2 *clk,
struct device *dev, u8 rcg_flags);
+extern unsigned long
+clk_rcg2_calc_rate(unsigned long rate, u32 m, u32 n, u32 mode, u32 hid_div);
#endif
diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c
index 8d5e527..35bcf5a 100644
--- a/drivers/clk/qcom/clk-rcg2.c
+++ b/drivers/clk/qcom/clk-rcg2.c
@@ -223,8 +223,8 @@ static void disable_unprepare_rcg_srcs(struct clk *curr, struct clk *new)
* rate = ----------- x ---
* hid_div n
*/
-static unsigned long
-calc_rate(unsigned long rate, u32 m, u32 n, u32 mode, u32 hid_div)
+unsigned long
+clk_rcg2_calc_rate(unsigned long rate, u32 m, u32 n, u32 mode, u32 hid_div)
{
if (hid_div) {
rate *= 2;
@@ -240,6 +240,7 @@ calc_rate(unsigned long rate, u32 m, u32 n, u32 mode, u32 hid_div)
return rate;
}
+EXPORT_SYMBOL(clk_rcg2_calc_rate);
static unsigned long
clk_rcg2_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
@@ -274,7 +275,7 @@ clk_rcg2_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
hid_div = cfg >> CFG_SRC_DIV_SHIFT;
hid_div &= mask;
- return calc_rate(parent_rate, m, n, mode, hid_div);
+ return clk_rcg2_calc_rate(parent_rate, m, n, mode, hid_div);
}
static int _freq_tbl_determine_rate(struct clk_hw *hw,
@@ -764,7 +765,7 @@ static int clk_edp_pixel_determine_rate(struct clk_hw *hw,
hid_div >>= CFG_SRC_DIV_SHIFT;
hid_div &= mask;
- req->rate = calc_rate(req->best_parent_rate,
+ req->rate = clk_rcg2_calc_rate(req->best_parent_rate,
frac->num, frac->den,
!!frac->den, hid_div);
return 0;
@@ -804,7 +805,7 @@ static int clk_byte_determine_rate(struct clk_hw *hw,
div = DIV_ROUND_UP((2 * parent_rate), req->rate) - 1;
div = min_t(u32, div, mask);
- req->rate = calc_rate(parent_rate, 0, 0, 0, div);
+ req->rate = clk_rcg2_calc_rate(parent_rate, 0, 0, 0, div);
return 0;
}
@@ -862,7 +863,7 @@ static int clk_byte2_determine_rate(struct clk_hw *hw,
div = DIV_ROUND_UP((2 * parent_rate), rate) - 1;
div = min_t(u32, div, mask);
- req->rate = calc_rate(parent_rate, 0, 0, 0, div);
+ req->rate = clk_rcg2_calc_rate(parent_rate, 0, 0, 0, div);
return 0;
}
@@ -1318,7 +1319,7 @@ int clk_rcg2_get_dfs_clock_rate(struct clk_rcg2 *clk, struct device *dev,
dfs_freq_tbl[i].n = n;
/* calculate the final frequency */
- calc_freq = calc_rate(prate, dfs_freq_tbl[i].m,
+ calc_freq = clk_rcg2_calc_rate(prate, dfs_freq_tbl[i].m,
dfs_freq_tbl[i].n, mode,
dfs_freq_tbl[i].pre_div);
diff --git a/drivers/clk/qcom/clk-regmap-mux-div.h b/drivers/clk/qcom/clk-regmap-mux-div.h
index 63a696a..6cd8d4f 100644
--- a/drivers/clk/qcom/clk-regmap-mux-div.h
+++ b/drivers/clk/qcom/clk-regmap-mux-div.h
@@ -42,6 +42,7 @@
* on and runs at only one rate.
* @parent_map: pointer to parent_map struct
* @clkr: handle between common and hardware-specific interfaces
+ * @clk_nb: clock notifier registered for clock rate change
*/
struct clk_regmap_mux_div {
@@ -57,6 +58,7 @@ struct clk_regmap_mux_div {
unsigned long safe_freq;
const struct parent_map *parent_map;
struct clk_regmap clkr;
+ struct notifier_block clk_nb;
};
extern const struct clk_ops clk_regmap_mux_div_ops;
diff --git a/drivers/clk/qcom/clk-rpmh.c b/drivers/clk/qcom/clk-rpmh.c
index 2109132..1f90d46 100644
--- a/drivers/clk/qcom/clk-rpmh.c
+++ b/drivers/clk/qcom/clk-rpmh.c
@@ -318,17 +318,30 @@ static const struct clk_rpmh_desc clk_rpmh_sdm845 = {
static const struct of_device_id clk_rpmh_match_table[] = {
{ .compatible = "qcom,rpmh-clk-sdm845", .data = &clk_rpmh_sdm845},
{ .compatible = "qcom,rpmh-clk-sdm670", .data = &clk_rpmh_sdm845},
+ { .compatible = "qcom,rpmh-clk-sdxpoorwills", .data = &clk_rpmh_sdm845},
{ }
};
MODULE_DEVICE_TABLE(of, clk_rpmh_match_table);
-static void clk_rpmh_sdm670_fixup_sdm670(void)
+static void clk_rpmh_sdm670_fixup(void)
{
sdm845_rpmh_clocks[RPMH_RF_CLK3] = NULL;
sdm845_rpmh_clocks[RPMH_RF_CLK3_A] = NULL;
}
-static int clk_rpmh_sdm670_fixup(struct platform_device *pdev)
+static void clk_rpmh_sdxpoorwills_fixup(void)
+{
+ sdm845_rpmh_clocks[RPMH_LN_BB_CLK2] = NULL;
+ sdm845_rpmh_clocks[RPMH_LN_BB_CLK2_A] = NULL;
+ sdm845_rpmh_clocks[RPMH_LN_BB_CLK3] = NULL;
+ sdm845_rpmh_clocks[RPMH_LN_BB_CLK3_A] = NULL;
+ sdm845_rpmh_clocks[RPMH_RF_CLK2] = NULL;
+ sdm845_rpmh_clocks[RPMH_RF_CLK2_A] = NULL;
+ sdm845_rpmh_clocks[RPMH_RF_CLK3] = NULL;
+ sdm845_rpmh_clocks[RPMH_RF_CLK3_A] = NULL;
+}
+
+static int clk_rpmh_fixup(struct platform_device *pdev)
{
const char *compat = NULL;
int compatlen = 0;
@@ -338,7 +351,9 @@ static int clk_rpmh_sdm670_fixup(struct platform_device *pdev)
return -EINVAL;
if (!strcmp(compat, "qcom,rpmh-clk-sdm670"))
- clk_rpmh_sdm670_fixup_sdm670();
+ clk_rpmh_sdm670_fixup();
+ else if (!strcmp(compat, "qcom,rpmh-clk-sdxpoorwills"))
+ clk_rpmh_sdxpoorwills_fixup();
return 0;
}
@@ -410,7 +425,7 @@ static int clk_rpmh_probe(struct platform_device *pdev)
goto err2;
}
- ret = clk_rpmh_sdm670_fixup(pdev);
+ ret = clk_rpmh_fixup(pdev);
if (ret)
return ret;
diff --git a/drivers/clk/qcom/dispcc-sdm845.c b/drivers/clk/qcom/dispcc-sdm845.c
index 3b13c9b..d4f27d7 100644
--- a/drivers/clk/qcom/dispcc-sdm845.c
+++ b/drivers/clk/qcom/dispcc-sdm845.c
@@ -390,7 +390,7 @@ static const struct freq_tbl ftbl_disp_cc_mdss_mdp_clk_src_sdm670[] = {
F(150000000, P_GPLL0_OUT_MAIN, 4, 0, 0),
F(171428571, P_GPLL0_OUT_MAIN, 3.5, 0, 0),
F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0),
- F(286670000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(286666667, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
F(300000000, P_GPLL0_OUT_MAIN, 2, 0, 0),
F(344000000, P_DISP_CC_PLL0_OUT_MAIN, 2.5, 0, 0),
F(430000000, P_DISP_CC_PLL0_OUT_MAIN, 2, 0, 0),
diff --git a/drivers/clk/qcom/gcc-sdxpoorwills.c b/drivers/clk/qcom/gcc-sdxpoorwills.c
new file mode 100644
index 0000000..1b5cf61
--- /dev/null
+++ b/drivers/clk/qcom/gcc-sdxpoorwills.c
@@ -0,0 +1,1916 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "clk: %s: " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/regmap.h>
+#include <linux/reset-controller.h>
+
+#include <dt-bindings/clock/qcom,gcc-sdxpoorwills.h>
+
+#include "common.h"
+#include "clk-regmap.h"
+#include "clk-pll.h"
+#include "clk-rcg.h"
+#include "clk-branch.h"
+#include "reset.h"
+
+#include "clk-alpha-pll.h"
+#include "vdd-level-sdm845.h"
+
+#define F(f, s, h, m, n) { (f), (s), (2 * (h) - 1), (m), (n) }
+
+static DEFINE_VDD_REGULATORS(vdd_cx, VDD_CX_NUM, 1, vdd_corner);
+
+enum {
+ P_BI_TCXO,
+ P_CORE_BI_PLL_TEST_SE,
+ P_GPLL0_OUT_EVEN,
+ P_GPLL0_OUT_MAIN,
+ P_GPLL4_OUT_EVEN,
+ P_SLEEP_CLK,
+};
+
+static const struct parent_map gcc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL0_OUT_EVEN, 6 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const gcc_parent_names_0[] = {
+ "bi_tcxo",
+ "gpll0",
+ "gpll0_out_even",
+ "core_bi_pll_test_se",
+};
+
+static const struct parent_map gcc_parent_map_1[] = {
+ { P_BI_TCXO, 0 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const gcc_parent_names_1[] = {
+ "bi_tcxo",
+ "core_bi_pll_test_se",
+};
+
+static const struct parent_map gcc_parent_map_2[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_SLEEP_CLK, 5 },
+ { P_GPLL0_OUT_EVEN, 6 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const gcc_parent_names_2[] = {
+ "bi_tcxo",
+ "gpll0",
+ "core_pi_sleep_clk",
+ "gpll0_out_even",
+ "core_bi_pll_test_se",
+};
+
+static const struct parent_map gcc_parent_map_3[] = {
+ { P_BI_TCXO, 0 },
+ { P_SLEEP_CLK, 5 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const gcc_parent_names_3[] = {
+ "bi_tcxo",
+ "core_pi_sleep_clk",
+ "core_bi_pll_test_se",
+};
+
+static const struct parent_map gcc_parent_map_4[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL4_OUT_EVEN, 2 },
+ { P_GPLL0_OUT_EVEN, 6 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const gcc_parent_names_4[] = {
+ "bi_tcxo",
+ "gpll0",
+ "gpll4_out_even",
+ "gpll0_out_even",
+ "core_bi_pll_test_se",
+};
+
+static struct pll_vco trion_vco[] = {
+ { 249600000, 2000000000, 0 },
+};
+
+static struct clk_alpha_pll gpll0 = {
+ .offset = 0x0,
+ .vco_table = trion_vco,
+ .num_vco = ARRAY_SIZE(trion_vco),
+ .type = TRION_PLL,
+ .clkr = {
+ .enable_reg = 0x6d000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll0",
+ .parent_names = (const char *[]){ "bi_tcxo" },
+ .num_parents = 1,
+ .ops = &clk_trion_fixed_pll_ops,
+ VDD_CX_FMAX_MAP4(
+ MIN, 615000000,
+ LOW, 1066000000,
+ LOW_L1, 1600000000,
+ NOMINAL, 2000000000),
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_trion_even[] = {
+ { 0x0, 1 },
+ { 0x1, 2 },
+ { 0x3, 4 },
+ { 0x7, 8 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv gpll0_out_even = {
+ .offset = 0x0,
+ .post_div_shift = 8,
+ .post_div_table = post_div_table_trion_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_trion_even),
+ .width = 4,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll0_out_even",
+ .parent_names = (const char *[]){ "gpll0" },
+ .num_parents = 1,
+ .ops = &clk_trion_pll_postdiv_ops,
+ },
+};
+
+static struct clk_alpha_pll gpll4 = {
+ .offset = 0x76000,
+ .vco_table = trion_vco,
+ .num_vco = ARRAY_SIZE(trion_vco),
+ .type = TRION_PLL,
+ .clkr = {
+ .enable_reg = 0x6d000,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll4",
+ .parent_names = (const char *[]){ "bi_tcxo" },
+ .num_parents = 1,
+ .ops = &clk_trion_fixed_pll_ops,
+ VDD_CX_FMAX_MAP4(
+ MIN, 615000000,
+ LOW, 1066000000,
+ LOW_L1, 1600000000,
+ NOMINAL, 2000000000),
+ },
+ },
+};
+
+static struct clk_alpha_pll_postdiv gpll4_out_even = {
+ .offset = 0x76000,
+ .post_div_shift = 8,
+ .post_div_table = post_div_table_trion_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_trion_even),
+ .width = 4,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll4_out_even",
+ .parent_names = (const char *[]){ "gpll4" },
+ .num_parents = 1,
+ .ops = &clk_trion_pll_postdiv_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_blsp1_qup1_i2c_apps_clk_src[] = {
+ F(9600000, P_BI_TCXO, 2, 0, 0),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(50000000, P_GPLL0_OUT_MAIN, 12, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_blsp1_qup1_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x11024,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_blsp1_qup1_i2c_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup1_i2c_apps_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ VDD_CX_FMAX_MAP3(
+ MIN, 9600000,
+ LOWER, 19200000,
+ LOW, 50000000),
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_blsp1_qup1_spi_apps_clk_src[] = {
+ F(960000, P_BI_TCXO, 10, 1, 2),
+ F(4800000, P_BI_TCXO, 4, 0, 0),
+ F(9600000, P_BI_TCXO, 2, 0, 0),
+ F(15000000, P_GPLL0_OUT_EVEN, 5, 1, 4),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(24000000, P_GPLL0_OUT_MAIN, 12.5, 1, 2),
+ F(25000000, P_GPLL0_OUT_MAIN, 12, 1, 2),
+ F(50000000, P_GPLL0_OUT_MAIN, 12, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_blsp1_qup1_spi_apps_clk_src = {
+ .cmd_rcgr = 0x1100c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_blsp1_qup1_spi_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup1_spi_apps_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ VDD_CX_FMAX_MAP4(
+ MIN, 9600000,
+ LOWER, 19200000,
+ LOW, 25000000,
+ NOMINAL, 50000000),
+ },
+};
+
+static struct clk_rcg2 gcc_blsp1_qup2_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x13024,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_blsp1_qup1_i2c_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup2_i2c_apps_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ VDD_CX_FMAX_MAP3(
+ MIN, 9600000,
+ LOWER, 19200000,
+ LOW, 50000000),
+ },
+};
+
+static struct clk_rcg2 gcc_blsp1_qup2_spi_apps_clk_src = {
+ .cmd_rcgr = 0x1300c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_blsp1_qup1_spi_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup2_spi_apps_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ VDD_CX_FMAX_MAP4(
+ MIN, 9600000,
+ LOWER, 19200000,
+ LOW, 25000000,
+ NOMINAL, 50000000),
+ },
+};
+
+static struct clk_rcg2 gcc_blsp1_qup3_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x15024,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_blsp1_qup1_i2c_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup3_i2c_apps_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ VDD_CX_FMAX_MAP3(
+ MIN, 9600000,
+ LOWER, 19200000,
+ LOW, 50000000),
+ },
+};
+
+static struct clk_rcg2 gcc_blsp1_qup3_spi_apps_clk_src = {
+ .cmd_rcgr = 0x1500c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_blsp1_qup1_spi_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup3_spi_apps_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ VDD_CX_FMAX_MAP4(
+ MIN, 9600000,
+ LOWER, 19200000,
+ LOW, 25000000,
+ NOMINAL, 50000000),
+ },
+};
+
+static struct clk_rcg2 gcc_blsp1_qup4_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x17024,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_blsp1_qup1_i2c_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup4_i2c_apps_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ VDD_CX_FMAX_MAP3(
+ MIN, 9600000,
+ LOWER, 19200000,
+ LOW, 50000000),
+ },
+};
+
+static struct clk_rcg2 gcc_blsp1_qup4_spi_apps_clk_src = {
+ .cmd_rcgr = 0x1700c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_blsp1_qup1_spi_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup4_spi_apps_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ VDD_CX_FMAX_MAP4(
+ MIN, 9600000,
+ LOWER, 19200000,
+ LOW, 25000000,
+ NOMINAL, 50000000),
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_blsp1_uart1_apps_clk_src[] = {
+ F(3686400, P_GPLL0_OUT_EVEN, 1, 192, 15625),
+ F(7372800, P_GPLL0_OUT_EVEN, 1, 384, 15625),
+ F(9600000, P_BI_TCXO, 2, 0, 0),
+ F(14745600, P_GPLL0_OUT_EVEN, 1, 768, 15625),
+ F(16000000, P_GPLL0_OUT_EVEN, 1, 4, 75),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(19354839, P_GPLL0_OUT_MAIN, 15.5, 1, 2),
+ F(20000000, P_GPLL0_OUT_MAIN, 15, 1, 2),
+ F(20689655, P_GPLL0_OUT_MAIN, 14.5, 1, 2),
+ F(21428571, P_GPLL0_OUT_MAIN, 14, 1, 2),
+ F(22222222, P_GPLL0_OUT_MAIN, 13.5, 1, 2),
+ F(23076923, P_GPLL0_OUT_MAIN, 13, 1, 2),
+ F(24000000, P_GPLL0_OUT_MAIN, 5, 1, 5),
+ F(25000000, P_GPLL0_OUT_MAIN, 12, 1, 2),
+ F(26086957, P_GPLL0_OUT_MAIN, 11.5, 1, 2),
+ F(27272727, P_GPLL0_OUT_MAIN, 11, 1, 2),
+ F(28571429, P_GPLL0_OUT_MAIN, 10.5, 1, 2),
+ F(32000000, P_GPLL0_OUT_MAIN, 1, 4, 75),
+ F(40000000, P_GPLL0_OUT_MAIN, 15, 0, 0),
+ F(46400000, P_GPLL0_OUT_MAIN, 1, 29, 375),
+ F(48000000, P_GPLL0_OUT_MAIN, 12.5, 0, 0),
+ F(51200000, P_GPLL0_OUT_MAIN, 1, 32, 375),
+ F(56000000, P_GPLL0_OUT_MAIN, 1, 7, 75),
+ F(58982400, P_GPLL0_OUT_MAIN, 1, 1536, 15625),
+ F(60000000, P_GPLL0_OUT_MAIN, 10, 0, 0),
+ F(63157895, P_GPLL0_OUT_MAIN, 9.5, 0, 0),
+ { }
+};
+
+
+static struct clk_rcg2 gcc_blsp1_uart1_apps_clk_src = {
+ .cmd_rcgr = 0x1200c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_blsp1_uart1_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_uart1_apps_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ VDD_CX_FMAX_MAP4(
+ MIN, 9600000,
+ LOWER, 19200000,
+ LOW, 48000000,
+ NOMINAL, 63157895),
+ },
+};
+
+static struct clk_rcg2 gcc_blsp1_uart2_apps_clk_src = {
+ .cmd_rcgr = 0x1400c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_blsp1_uart1_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_uart2_apps_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ VDD_CX_FMAX_MAP4(
+ MIN, 9600000,
+ LOWER, 19200000,
+ LOW, 48000000,
+ NOMINAL, 63157895),
+ },
+};
+
+static struct clk_rcg2 gcc_blsp1_uart3_apps_clk_src = {
+ .cmd_rcgr = 0x1600c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_blsp1_uart1_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_uart3_apps_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ VDD_CX_FMAX_MAP4(
+ MIN, 9600000,
+ LOWER, 19200000,
+ LOW, 48000000,
+ NOMINAL, 63157895),
+ },
+};
+
+static struct clk_rcg2 gcc_blsp1_uart4_apps_clk_src = {
+ .cmd_rcgr = 0x1800c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_blsp1_uart1_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_uart4_apps_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ VDD_CX_FMAX_MAP4(
+ MIN, 9600000,
+ LOWER, 19200000,
+ LOW, 48000000,
+ NOMINAL, 63157895),
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_cpuss_ahb_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(50000000, P_GPLL0_OUT_EVEN, 6, 0, 0),
+ F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(133333333, P_GPLL0_OUT_MAIN, 4.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_cpuss_ahb_clk_src = {
+ .cmd_rcgr = 0x24010,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_cpuss_ahb_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_cpuss_ahb_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ VDD_CX_FMAX_MAP4(
+ MIN, 19200000,
+ LOWER, 50000000,
+ NOMINAL, 100000000,
+ HIGH, 133333333),
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_cpuss_rbcpr_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(50000000, P_GPLL0_OUT_MAIN, 12, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_cpuss_rbcpr_clk_src = {
+ .cmd_rcgr = 0x2402c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_cpuss_rbcpr_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_cpuss_rbcpr_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ VDD_CX_FMAX_MAP2(
+ MIN, 19200000,
+ NOMINAL, 50000000),
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_emac_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(50000000, P_GPLL0_OUT_EVEN, 6, 0, 0),
+ F(125000000, P_GPLL4_OUT_EVEN, 4, 0, 0),
+ F(250000000, P_GPLL4_OUT_EVEN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_emac_clk_src = {
+ .cmd_rcgr = 0x47020,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_4,
+ .freq_tbl = ftbl_gcc_emac_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_emac_clk_src",
+ .parent_names = gcc_parent_names_4,
+ .num_parents = 5,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ VDD_CX_FMAX_MAP4(
+ MIN, 19200000,
+ LOWER, 50000000,
+ LOW, 125000000,
+ NOMINAL, 250000000),
+ },
+};
+
+static struct clk_rcg2 gcc_emac_ptp_clk_src = {
+ .cmd_rcgr = 0x47038,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_4,
+ .freq_tbl = ftbl_gcc_emac_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_emac_ptp_clk_src",
+ .parent_names = gcc_parent_names_4,
+ .num_parents = 5,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ VDD_CX_FMAX_MAP4(
+ MIN, 19200000,
+ LOWER, 50000000,
+ LOW, 125000000,
+ NOMINAL, 250000000),
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_gp1_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(25000000, P_GPLL0_OUT_EVEN, 12, 0, 0),
+ F(50000000, P_GPLL0_OUT_EVEN, 6, 0, 0),
+ F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_gp1_clk_src = {
+ .cmd_rcgr = 0x2b004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_gp1_clk_src",
+ .parent_names = gcc_parent_names_2,
+ .num_parents = 5,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ VDD_CX_FMAX_MAP4(
+ MIN, 19200000,
+ LOWER, 50000000,
+ LOW, 100000000,
+ NOMINAL, 200000000),
+ },
+};
+
+static struct clk_rcg2 gcc_gp2_clk_src = {
+ .cmd_rcgr = 0x2c004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_gp2_clk_src",
+ .parent_names = gcc_parent_names_2,
+ .num_parents = 5,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ VDD_CX_FMAX_MAP4(
+ MIN, 19200000,
+ LOWER, 50000000,
+ LOW, 100000000,
+ NOMINAL, 200000000),
+ },
+};
+
+static struct clk_rcg2 gcc_gp3_clk_src = {
+ .cmd_rcgr = 0x2d004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_gp3_clk_src",
+ .parent_names = gcc_parent_names_2,
+ .num_parents = 5,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ VDD_CX_FMAX_MAP4(
+ MIN, 19200000,
+ LOWER, 50000000,
+ LOW, 100000000,
+ NOMINAL, 200000000),
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_pcie_aux_phy_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_pcie_aux_phy_clk_src = {
+ .cmd_rcgr = 0x37030,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_gcc_pcie_aux_phy_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_aux_phy_clk_src",
+ .parent_names = gcc_parent_names_3,
+ .num_parents = 3,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ VDD_CX_FMAX_MAP1(
+ MIN, 19200000),
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_pcie_phy_refgen_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_pcie_phy_refgen_clk_src = {
+ .cmd_rcgr = 0x39010,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_phy_refgen_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_phy_refgen_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ VDD_CX_FMAX_MAP2(
+ MIN, 19200000,
+ LOW, 100000000),
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_pdm2_clk_src[] = {
+ F(9600000, P_BI_TCXO, 2, 0, 0),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(60000000, P_GPLL0_OUT_MAIN, 10, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_pdm2_clk_src = {
+ .cmd_rcgr = 0x19010,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pdm2_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_pdm2_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ VDD_CX_FMAX_MAP3(
+ MIN, 9600000,
+ LOWER, 19200000,
+ LOW, 60000000),
+ },
+};
+
+static struct clk_rcg2 gcc_sdcc1_apps_clk_src = {
+ .cmd_rcgr = 0xf00c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc1_apps_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ VDD_CX_FMAX_MAP4(
+ MIN, 19200000,
+ LOWER, 50000000,
+ LOW, 100000000,
+ NOMINAL, 200000000),
+ },
+};
+
+static struct clk_rcg2 gcc_spmi_fetcher_clk_src = {
+ .cmd_rcgr = 0x3f00c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_pcie_aux_phy_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_spmi_fetcher_clk_src",
+ .parent_names = gcc_parent_names_1,
+ .num_parents = 2,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ VDD_CX_FMAX_MAP1(
+ MIN, 19200000),
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_usb30_master_clk_src[] = {
+ F(50000000, P_GPLL0_OUT_EVEN, 6, 0, 0),
+ F(75000000, P_GPLL0_OUT_EVEN, 4, 0, 0),
+ F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0),
+ F(240000000, P_GPLL0_OUT_MAIN, 2.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_usb30_master_clk_src = {
+ .cmd_rcgr = 0xb01c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_usb30_master_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_master_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ VDD_CX_FMAX_MAP5(
+ MIN, 50000000,
+ LOWER, 75000000,
+ LOW, 100000000,
+ NOMINAL, 200000000,
+ HIGH, 240000000),
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_usb30_mock_utmi_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(40000000, P_GPLL0_OUT_EVEN, 7.5, 0, 0),
+ F(60000000, P_GPLL0_OUT_MAIN, 10, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_usb30_mock_utmi_clk_src = {
+ .cmd_rcgr = 0xb034,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_usb30_mock_utmi_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_mock_utmi_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ VDD_CX_FMAX_MAP3(
+ MIN, 19200000,
+ LOWER, 40000000,
+ LOW, 60000000),
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_usb3_phy_aux_clk_src[] = {
+ F(1000000, P_BI_TCXO, 1, 5, 96),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_usb3_phy_aux_clk_src = {
+ .cmd_rcgr = 0xb05c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_gcc_usb3_phy_aux_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_phy_aux_clk_src",
+ .parent_names = gcc_parent_names_3,
+ .num_parents = 3,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ VDD_CX_FMAX_MAP1(
+ MIN, 19200000),
+ },
+};
+
+static struct clk_branch gcc_blsp1_ahb_clk = {
+ .halt_reg = 0x10004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x6d004,
+ .enable_mask = BIT(25),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup1_i2c_apps_clk = {
+ .halt_reg = 0x11008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x11008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup1_i2c_apps_clk",
+ .parent_names = (const char *[]){
+ "gcc_blsp1_qup1_i2c_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup1_spi_apps_clk = {
+ .halt_reg = 0x11004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x11004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup1_spi_apps_clk",
+ .parent_names = (const char *[]){
+ "gcc_blsp1_qup1_spi_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup2_i2c_apps_clk = {
+ .halt_reg = 0x13008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x13008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup2_i2c_apps_clk",
+ .parent_names = (const char *[]){
+ "gcc_blsp1_qup2_i2c_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup2_spi_apps_clk = {
+ .halt_reg = 0x13004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x13004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup2_spi_apps_clk",
+ .parent_names = (const char *[]){
+ "gcc_blsp1_qup2_spi_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup3_i2c_apps_clk = {
+ .halt_reg = 0x15008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x15008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup3_i2c_apps_clk",
+ .parent_names = (const char *[]){
+ "gcc_blsp1_qup3_i2c_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup3_spi_apps_clk = {
+ .halt_reg = 0x15004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x15004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup3_spi_apps_clk",
+ .parent_names = (const char *[]){
+ "gcc_blsp1_qup3_spi_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup4_i2c_apps_clk = {
+ .halt_reg = 0x17008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x17008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup4_i2c_apps_clk",
+ .parent_names = (const char *[]){
+ "gcc_blsp1_qup4_i2c_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup4_spi_apps_clk = {
+ .halt_reg = 0x17004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x17004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup4_spi_apps_clk",
+ .parent_names = (const char *[]){
+ "gcc_blsp1_qup4_spi_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_sleep_clk = {
+ .halt_reg = 0x10008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x6d004,
+ .enable_mask = BIT(26),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_sleep_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_uart1_apps_clk = {
+ .halt_reg = 0x12004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x12004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_uart1_apps_clk",
+ .parent_names = (const char *[]){
+ "gcc_blsp1_uart1_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_uart2_apps_clk = {
+ .halt_reg = 0x14004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x14004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_uart2_apps_clk",
+ .parent_names = (const char *[]){
+ "gcc_blsp1_uart2_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_uart3_apps_clk = {
+ .halt_reg = 0x16004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x16004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_uart3_apps_clk",
+ .parent_names = (const char *[]){
+ "gcc_blsp1_uart3_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_uart4_apps_clk = {
+ .halt_reg = 0x18004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x18004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_uart4_apps_clk",
+ .parent_names = (const char *[]){
+ "gcc_blsp1_uart4_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_boot_rom_ahb_clk = {
+ .halt_reg = 0x1c004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x1c004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x6d004,
+ .enable_mask = BIT(10),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_boot_rom_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ce1_ahb_clk = {
+ .halt_reg = 0x2100c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x2100c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x6d004,
+ .enable_mask = BIT(3),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ce1_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ce1_axi_clk = {
+ .halt_reg = 0x21008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x6d004,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ce1_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ce1_clk = {
+ .halt_reg = 0x21004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x6d004,
+ .enable_mask = BIT(5),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ce1_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cpuss_ahb_clk = {
+ .halt_reg = 0x24000,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x6d004,
+ .enable_mask = BIT(21),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_cpuss_ahb_clk",
+ .parent_names = (const char *[]){
+ "gcc_cpuss_ahb_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cpuss_gnoc_clk = {
+ .halt_reg = 0x24004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x24004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x6d004,
+ .enable_mask = BIT(22),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_cpuss_gnoc_clk",
+ .flags = CLK_IS_CRITICAL,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cpuss_rbcpr_clk = {
+ .halt_reg = 0x24008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x24008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_cpuss_rbcpr_clk",
+ .parent_names = (const char *[]){
+ "gcc_cpuss_rbcpr_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_eth_axi_clk = {
+ .halt_reg = 0x4701c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4701c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_eth_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_eth_ptp_clk = {
+ .halt_reg = 0x47018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x47018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_eth_ptp_clk",
+ .parent_names = (const char *[]){
+ "gcc_emac_ptp_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_eth_rgmii_clk = {
+ .halt_reg = 0x47010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x47010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_eth_rgmii_clk",
+ .parent_names = (const char *[]){
+ "gcc_emac_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_eth_slave_ahb_clk = {
+ .halt_reg = 0x47014,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x47014,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x47014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_eth_slave_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp1_clk = {
+ .halt_reg = 0x2b000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2b000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gp1_clk",
+ .parent_names = (const char *[]){
+ "gcc_gp1_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp2_clk = {
+ .halt_reg = 0x2c000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2c000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gp2_clk",
+ .parent_names = (const char *[]){
+ "gcc_gp2_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp3_clk = {
+ .halt_reg = 0x2d000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2d000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gp3_clk",
+ .parent_names = (const char *[]){
+ "gcc_gp3_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mss_cfg_ahb_clk = {
+ .halt_reg = 0x40000,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x40000,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x40000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mss_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_gate2 gcc_mss_gpll0_div_clk_src = {
+ .udelay = 500,
+ .clkr = {
+ .enable_reg = 0x6d004,
+ .enable_mask = BIT(17),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mss_gpll0_div_clk_src",
+ .ops = &clk_gate2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mss_snoc_axi_clk = {
+ .halt_reg = 0x40148,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x40148,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mss_snoc_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_aux_clk = {
+ .halt_reg = 0x37020,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x6d00c,
+ .enable_mask = BIT(3),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_aux_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_cfg_ahb_clk = {
+ .halt_reg = 0x3701c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x3701c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x6d00c,
+ .enable_mask = BIT(2),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_mstr_axi_clk = {
+ .halt_reg = 0x37018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x6d00c,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_mstr_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_phy_refgen_clk = {
+ .halt_reg = 0x39028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x39028,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_phy_refgen_clk",
+ .parent_names = (const char *[]){
+ "gcc_pcie_phy_refgen_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_pipe_clk = {
+ .halt_reg = 0x37028,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x6d00c,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_pipe_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_sleep_clk = {
+ .halt_reg = 0x37024,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x6d00c,
+ .enable_mask = BIT(6),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_sleep_clk",
+ .parent_names = (const char *[]){
+ "gcc_pcie_aux_phy_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_slv_axi_clk = {
+ .halt_reg = 0x37014,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x37014,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x6d00c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_slv_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_slv_q2a_axi_clk = {
+ .halt_reg = 0x37010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x6d00c,
+ .enable_mask = BIT(5),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_slv_q2a_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm2_clk = {
+ .halt_reg = 0x1900c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1900c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pdm2_clk",
+ .parent_names = (const char *[]){
+ "gcc_pdm2_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm_ahb_clk = {
+ .halt_reg = 0x19004,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x19004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x19004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pdm_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm_xo4_clk = {
+ .halt_reg = 0x19008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x19008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pdm_xo4_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_prng_ahb_clk = {
+ .halt_reg = 0x1a004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x6d004,
+ .enable_mask = BIT(13),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_prng_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc1_ahb_clk = {
+ .halt_reg = 0xf008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc1_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc1_apps_clk = {
+ .halt_reg = 0xf004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc1_apps_clk",
+ .parent_names = (const char *[]){
+ "gcc_sdcc1_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_spmi_fetcher_ahb_clk = {
+ .halt_reg = 0x3f008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3f008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_spmi_fetcher_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_spmi_fetcher_clk = {
+ .halt_reg = 0x3f004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3f004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_spmi_fetcher_clk",
+ .parent_names = (const char *[]){
+ "gcc_spmi_fetcher_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sys_noc_cpuss_ahb_clk = {
+ .halt_reg = 0x400c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x6d004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sys_noc_cpuss_ahb_clk",
+ .parent_names = (const char *[]){
+ "gcc_cpuss_ahb_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sys_noc_usb3_clk = {
+ .halt_reg = 0x4018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sys_noc_usb3_clk",
+ .parent_names = (const char *[]){
+ "gcc_usb30_master_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_master_clk = {
+ .halt_reg = 0xb010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_master_clk",
+ .parent_names = (const char *[]){
+ "gcc_usb30_master_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_mock_utmi_clk = {
+ .halt_reg = 0xb018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_mock_utmi_clk",
+ .parent_names = (const char *[]){
+ "gcc_usb30_mock_utmi_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_sleep_clk = {
+ .halt_reg = 0xb014,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_sleep_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_phy_aux_clk = {
+ .halt_reg = 0xb050,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb050,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_phy_aux_clk",
+ .parent_names = (const char *[]){
+ "gcc_usb3_phy_aux_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_phy_pipe_clk = {
+ .halt_reg = 0xb054,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb054,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_phy_pipe_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb_phy_cfg_ahb2phy_clk = {
+ .halt_reg = 0xe004,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0xe004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xe004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb_phy_cfg_ahb2phy_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_regmap *gcc_sdxpoorwills_clocks[] = {
+ [GCC_BLSP1_AHB_CLK] = &gcc_blsp1_ahb_clk.clkr,
+ [GCC_BLSP1_QUP1_I2C_APPS_CLK] = &gcc_blsp1_qup1_i2c_apps_clk.clkr,
+ [GCC_BLSP1_QUP1_I2C_APPS_CLK_SRC] =
+ &gcc_blsp1_qup1_i2c_apps_clk_src.clkr,
+ [GCC_BLSP1_QUP1_SPI_APPS_CLK] = &gcc_blsp1_qup1_spi_apps_clk.clkr,
+ [GCC_BLSP1_QUP1_SPI_APPS_CLK_SRC] =
+ &gcc_blsp1_qup1_spi_apps_clk_src.clkr,
+ [GCC_BLSP1_QUP2_I2C_APPS_CLK] = &gcc_blsp1_qup2_i2c_apps_clk.clkr,
+ [GCC_BLSP1_QUP2_I2C_APPS_CLK_SRC] =
+ &gcc_blsp1_qup2_i2c_apps_clk_src.clkr,
+ [GCC_BLSP1_QUP2_SPI_APPS_CLK] = &gcc_blsp1_qup2_spi_apps_clk.clkr,
+ [GCC_BLSP1_QUP2_SPI_APPS_CLK_SRC] =
+ &gcc_blsp1_qup2_spi_apps_clk_src.clkr,
+ [GCC_BLSP1_QUP3_I2C_APPS_CLK] = &gcc_blsp1_qup3_i2c_apps_clk.clkr,
+ [GCC_BLSP1_QUP3_I2C_APPS_CLK_SRC] =
+ &gcc_blsp1_qup3_i2c_apps_clk_src.clkr,
+ [GCC_BLSP1_QUP3_SPI_APPS_CLK] = &gcc_blsp1_qup3_spi_apps_clk.clkr,
+ [GCC_BLSP1_QUP3_SPI_APPS_CLK_SRC] =
+ &gcc_blsp1_qup3_spi_apps_clk_src.clkr,
+ [GCC_BLSP1_QUP4_I2C_APPS_CLK] = &gcc_blsp1_qup4_i2c_apps_clk.clkr,
+ [GCC_BLSP1_QUP4_I2C_APPS_CLK_SRC] =
+ &gcc_blsp1_qup4_i2c_apps_clk_src.clkr,
+ [GCC_BLSP1_QUP4_SPI_APPS_CLK] = &gcc_blsp1_qup4_spi_apps_clk.clkr,
+ [GCC_BLSP1_QUP4_SPI_APPS_CLK_SRC] =
+ &gcc_blsp1_qup4_spi_apps_clk_src.clkr,
+ [GCC_BLSP1_SLEEP_CLK] = &gcc_blsp1_sleep_clk.clkr,
+ [GCC_BLSP1_UART1_APPS_CLK] = &gcc_blsp1_uart1_apps_clk.clkr,
+ [GCC_BLSP1_UART1_APPS_CLK_SRC] = &gcc_blsp1_uart1_apps_clk_src.clkr,
+ [GCC_BLSP1_UART2_APPS_CLK] = &gcc_blsp1_uart2_apps_clk.clkr,
+ [GCC_BLSP1_UART2_APPS_CLK_SRC] = &gcc_blsp1_uart2_apps_clk_src.clkr,
+ [GCC_BLSP1_UART3_APPS_CLK] = &gcc_blsp1_uart3_apps_clk.clkr,
+ [GCC_BLSP1_UART3_APPS_CLK_SRC] = &gcc_blsp1_uart3_apps_clk_src.clkr,
+ [GCC_BLSP1_UART4_APPS_CLK] = &gcc_blsp1_uart4_apps_clk.clkr,
+ [GCC_BLSP1_UART4_APPS_CLK_SRC] = &gcc_blsp1_uart4_apps_clk_src.clkr,
+ [GCC_BOOT_ROM_AHB_CLK] = &gcc_boot_rom_ahb_clk.clkr,
+ [GCC_CE1_AHB_CLK] = &gcc_ce1_ahb_clk.clkr,
+ [GCC_CE1_AXI_CLK] = &gcc_ce1_axi_clk.clkr,
+ [GCC_CE1_CLK] = &gcc_ce1_clk.clkr,
+ [GCC_CPUSS_AHB_CLK] = &gcc_cpuss_ahb_clk.clkr,
+ [GCC_CPUSS_AHB_CLK_SRC] = &gcc_cpuss_ahb_clk_src.clkr,
+ [GCC_CPUSS_GNOC_CLK] = &gcc_cpuss_gnoc_clk.clkr,
+ [GCC_CPUSS_RBCPR_CLK] = &gcc_cpuss_rbcpr_clk.clkr,
+ [GCC_CPUSS_RBCPR_CLK_SRC] = &gcc_cpuss_rbcpr_clk_src.clkr,
+ [GCC_EMAC_CLK_SRC] = &gcc_emac_clk_src.clkr,
+ [GCC_EMAC_PTP_CLK_SRC] = &gcc_emac_ptp_clk_src.clkr,
+ [GCC_ETH_AXI_CLK] = &gcc_eth_axi_clk.clkr,
+ [GCC_ETH_PTP_CLK] = &gcc_eth_ptp_clk.clkr,
+ [GCC_ETH_RGMII_CLK] = &gcc_eth_rgmii_clk.clkr,
+ [GCC_ETH_SLAVE_AHB_CLK] = &gcc_eth_slave_ahb_clk.clkr,
+ [GCC_GP1_CLK] = &gcc_gp1_clk.clkr,
+ [GCC_GP1_CLK_SRC] = &gcc_gp1_clk_src.clkr,
+ [GCC_GP2_CLK] = &gcc_gp2_clk.clkr,
+ [GCC_GP2_CLK_SRC] = &gcc_gp2_clk_src.clkr,
+ [GCC_GP3_CLK] = &gcc_gp3_clk.clkr,
+ [GCC_GP3_CLK_SRC] = &gcc_gp3_clk_src.clkr,
+ [GCC_MSS_CFG_AHB_CLK] = &gcc_mss_cfg_ahb_clk.clkr,
+ [GCC_MSS_GPLL0_DIV_CLK_SRC] = &gcc_mss_gpll0_div_clk_src.clkr,
+ [GCC_MSS_SNOC_AXI_CLK] = &gcc_mss_snoc_axi_clk.clkr,
+ [GCC_PCIE_AUX_CLK] = &gcc_pcie_aux_clk.clkr,
+ [GCC_PCIE_AUX_PHY_CLK_SRC] = &gcc_pcie_aux_phy_clk_src.clkr,
+ [GCC_PCIE_CFG_AHB_CLK] = &gcc_pcie_cfg_ahb_clk.clkr,
+ [GCC_PCIE_MSTR_AXI_CLK] = &gcc_pcie_mstr_axi_clk.clkr,
+ [GCC_PCIE_PHY_REFGEN_CLK] = &gcc_pcie_phy_refgen_clk.clkr,
+ [GCC_PCIE_PHY_REFGEN_CLK_SRC] = &gcc_pcie_phy_refgen_clk_src.clkr,
+ [GCC_PCIE_PIPE_CLK] = &gcc_pcie_pipe_clk.clkr,
+ [GCC_PCIE_SLEEP_CLK] = &gcc_pcie_sleep_clk.clkr,
+ [GCC_PCIE_SLV_AXI_CLK] = &gcc_pcie_slv_axi_clk.clkr,
+ [GCC_PCIE_SLV_Q2A_AXI_CLK] = &gcc_pcie_slv_q2a_axi_clk.clkr,
+ [GCC_PDM2_CLK] = &gcc_pdm2_clk.clkr,
+ [GCC_PDM2_CLK_SRC] = &gcc_pdm2_clk_src.clkr,
+ [GCC_PDM_AHB_CLK] = &gcc_pdm_ahb_clk.clkr,
+ [GCC_PDM_XO4_CLK] = &gcc_pdm_xo4_clk.clkr,
+ [GCC_PRNG_AHB_CLK] = &gcc_prng_ahb_clk.clkr,
+ [GCC_SDCC1_AHB_CLK] = &gcc_sdcc1_ahb_clk.clkr,
+ [GCC_SDCC1_APPS_CLK] = &gcc_sdcc1_apps_clk.clkr,
+ [GCC_SDCC1_APPS_CLK_SRC] = &gcc_sdcc1_apps_clk_src.clkr,
+ [GCC_SPMI_FETCHER_AHB_CLK] = &gcc_spmi_fetcher_ahb_clk.clkr,
+ [GCC_SPMI_FETCHER_CLK] = &gcc_spmi_fetcher_clk.clkr,
+ [GCC_SPMI_FETCHER_CLK_SRC] = &gcc_spmi_fetcher_clk_src.clkr,
+ [GCC_SYS_NOC_CPUSS_AHB_CLK] = &gcc_sys_noc_cpuss_ahb_clk.clkr,
+ [GCC_SYS_NOC_USB3_CLK] = &gcc_sys_noc_usb3_clk.clkr,
+ [GCC_USB30_MASTER_CLK] = &gcc_usb30_master_clk.clkr,
+ [GCC_USB30_MASTER_CLK_SRC] = &gcc_usb30_master_clk_src.clkr,
+ [GCC_USB30_MOCK_UTMI_CLK] = &gcc_usb30_mock_utmi_clk.clkr,
+ [GCC_USB30_MOCK_UTMI_CLK_SRC] = &gcc_usb30_mock_utmi_clk_src.clkr,
+ [GCC_USB30_SLEEP_CLK] = &gcc_usb30_sleep_clk.clkr,
+ [GCC_USB3_PHY_AUX_CLK] = &gcc_usb3_phy_aux_clk.clkr,
+ [GCC_USB3_PHY_AUX_CLK_SRC] = &gcc_usb3_phy_aux_clk_src.clkr,
+ [GCC_USB3_PHY_PIPE_CLK] = &gcc_usb3_phy_pipe_clk.clkr,
+ [GCC_USB_PHY_CFG_AHB2PHY_CLK] = &gcc_usb_phy_cfg_ahb2phy_clk.clkr,
+ [GPLL0] = &gpll0.clkr,
+ [GPLL0_OUT_EVEN] = &gpll0_out_even.clkr,
+ [GPLL4] = &gpll4.clkr,
+ [GPLL4_OUT_EVEN] = &gpll4_out_even.clkr,
+};
+
+static const struct qcom_reset_map gcc_sdxpoorwills_resets[] = {
+ [GCC_BLSP1_QUP1_BCR] = { 0x11000 },
+ [GCC_BLSP1_QUP2_BCR] = { 0x13000 },
+ [GCC_BLSP1_QUP3_BCR] = { 0x15000 },
+ [GCC_BLSP1_QUP4_BCR] = { 0x17000 },
+ [GCC_BLSP1_UART2_BCR] = { 0x14000 },
+ [GCC_BLSP1_UART3_BCR] = { 0x16000 },
+ [GCC_BLSP1_UART4_BCR] = { 0x18000 },
+ [GCC_CE1_BCR] = { 0x21000 },
+ [GCC_EMAC_BCR] = { 0x47000 },
+ [GCC_PCIE_BCR] = { 0x37000 },
+ [GCC_PCIE_PHY_BCR] = { 0x39000 },
+ [GCC_PDM_BCR] = { 0x19000 },
+ [GCC_PRNG_BCR] = { 0x1a000 },
+ [GCC_SDCC1_BCR] = { 0xf000 },
+ [GCC_SPMI_FETCHER_BCR] = { 0x3f000 },
+ [GCC_USB30_BCR] = { 0xb000 },
+ [GCC_USB_PHY_CFG_AHB2PHY_BCR] = { 0xe000 },
+};
+
+
+static const struct regmap_config gcc_sdxpoorwills_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x9b040,
+ .fast_io = true,
+};
+
+static const struct qcom_cc_desc gcc_sdxpoorwills_desc = {
+ .config = &gcc_sdxpoorwills_regmap_config,
+ .clks = gcc_sdxpoorwills_clocks,
+ .num_clks = ARRAY_SIZE(gcc_sdxpoorwills_clocks),
+ .resets = gcc_sdxpoorwills_resets,
+ .num_resets = ARRAY_SIZE(gcc_sdxpoorwills_resets),
+};
+
+static const struct of_device_id gcc_sdxpoorwills_match_table[] = {
+ { .compatible = "qcom,gcc-sdxpoorwills" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, gcc_sdxpoorwills_match_table);
+
+static int gcc_sdxpoorwills_probe(struct platform_device *pdev)
+{
+ int ret = 0;
+ struct regmap *regmap;
+
+ regmap = qcom_cc_map(pdev, &gcc_sdxpoorwills_desc);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ vdd_cx.regulator[0] = devm_regulator_get(&pdev->dev, "vdd_cx");
+ if (IS_ERR(vdd_cx.regulator[0])) {
+ if (!(PTR_ERR(vdd_cx.regulator[0]) == -EPROBE_DEFER))
+ dev_err(&pdev->dev,
+ "Unable to get vdd_cx regulator\n");
+ return PTR_ERR(vdd_cx.regulator[0]);
+ }
+
+ ret = qcom_cc_really_probe(pdev, &gcc_sdxpoorwills_desc, regmap);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to register GCC clocks\n");
+ return ret;
+ }
+
+ dev_info(&pdev->dev, "Registered GCC clocks\n");
+
+ return ret;
+}
+
+static struct platform_driver gcc_sdxpoorwills_driver = {
+ .probe = gcc_sdxpoorwills_probe,
+ .driver = {
+ .name = "gcc-sdxpoorwills",
+ .of_match_table = gcc_sdxpoorwills_match_table,
+ },
+};
+
+static int __init gcc_sdxpoorwills_init(void)
+{
+ return platform_driver_register(&gcc_sdxpoorwills_driver);
+}
+core_initcall(gcc_sdxpoorwills_init);
+
+static void __exit gcc_sdxpoorwills_exit(void)
+{
+ platform_driver_unregister(&gcc_sdxpoorwills_driver);
+}
+module_exit(gcc_sdxpoorwills_exit);
+
+MODULE_DESCRIPTION("QTI GCC SDXPOORWILLS Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:gcc-sdxpoorwills");
diff --git a/drivers/clk/qcom/gpucc-sdm845.c b/drivers/clk/qcom/gpucc-sdm845.c
index 55d14ff..53446a2 100644
--- a/drivers/clk/qcom/gpucc-sdm845.c
+++ b/drivers/clk/qcom/gpucc-sdm845.c
@@ -292,6 +292,7 @@ static const struct freq_tbl ftbl_gpu_cc_gx_gfx3d_clk_src_sdm670[] = {
F(430000000, P_CRC_DIV, 1, 0, 0),
F(565000000, P_CRC_DIV, 1, 0, 0),
F(650000000, P_CRC_DIV, 1, 0, 0),
+ F(700000000, P_CRC_DIV, 1, 0, 0),
F(750000000, P_CRC_DIV, 1, 0, 0),
F(780000000, P_CRC_DIV, 1, 0, 0),
{ }
diff --git a/drivers/clk/qcom/mdss/mdss-dsi-pll-10nm.c b/drivers/clk/qcom/mdss/mdss-dsi-pll-10nm.c
index 89ed5cd..dd02a8f 100644
--- a/drivers/clk/qcom/mdss/mdss-dsi-pll-10nm.c
+++ b/drivers/clk/qcom/mdss/mdss-dsi-pll-10nm.c
@@ -762,6 +762,10 @@ static int vco_10nm_prepare(struct clk_hw *hw)
return -EINVAL;
}
+ /* Skip vco recalculation for continuous splash use case */
+ if (pll->handoff_resources == true)
+ return 0;
+
rc = mdss_pll_resource_enable(pll, true);
if (rc) {
pr_err("failed to enable pll (%d) resource, rc=%d\n",
@@ -814,6 +818,19 @@ static unsigned long vco_10nm_recalc_rate(struct clk_hw *hw,
if (!vco->priv)
pr_err("vco priv is null\n");
+ /*
+ * Calculate the vco rate from HW registers only for handoff cases.
+ * For other cases where a vco_10nm_set_rate() has already been
+ * called, just return the rate that was set earlier. This is due
+ * to the fact that recalculating VCO rate requires us to read the
+ * correct value of the pll_out_div divider clock, which is only set
+ * afterwards.
+ */
+ if (pll->vco_current_rate != 0) {
+ pr_debug("returning vco rate = %lld\n", pll->vco_current_rate);
+ return pll->vco_current_rate;
+ }
+
rc = mdss_pll_resource_enable(pll, true);
if (rc) {
pr_err("failed to enable pll(%d) resource, rc=%d\n",
@@ -821,6 +838,9 @@ static unsigned long vco_10nm_recalc_rate(struct clk_hw *hw,
return 0;
}
+ if (!dsi_pll_10nm_lock_status(pll))
+ pll->handoff_resources = true;
+
dec = MDSS_PLL_REG_R(pll->pll_base, PLL_DECIMAL_DIV_START_1);
dec &= 0xFF;
diff --git a/drivers/clk/samsung/clk-exynos5433.c b/drivers/clk/samsung/clk-exynos5433.c
index ea16086..2fe0573 100644
--- a/drivers/clk/samsung/clk-exynos5433.c
+++ b/drivers/clk/samsung/clk-exynos5433.c
@@ -2559,8 +2559,10 @@ static const struct samsung_fixed_rate_clock disp_fixed_clks[] __initconst = {
FRATE(0, "phyclk_mipidphy1_bitclkdiv8_phy", NULL, 0, 188000000),
FRATE(0, "phyclk_mipidphy1_rxclkesc0_phy", NULL, 0, 100000000),
/* PHY clocks from MIPI_DPHY0 */
- FRATE(0, "phyclk_mipidphy0_bitclkdiv8_phy", NULL, 0, 188000000),
- FRATE(0, "phyclk_mipidphy0_rxclkesc0_phy", NULL, 0, 100000000),
+ FRATE(CLK_PHYCLK_MIPIDPHY0_BITCLKDIV8_PHY, "phyclk_mipidphy0_bitclkdiv8_phy",
+ NULL, 0, 188000000),
+ FRATE(CLK_PHYCLK_MIPIDPHY0_RXCLKESC0_PHY, "phyclk_mipidphy0_rxclkesc0_phy",
+ NULL, 0, 100000000),
/* PHY clocks from HDMI_PHY */
FRATE(CLK_PHYCLK_HDMIPHY_TMDS_CLKO_PHY, "phyclk_hdmiphy_tmds_clko_phy",
NULL, 0, 300000000),
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-h3.c b/drivers/clk/sunxi-ng/ccu-sun8i-h3.c
index 21c427d..a26c8a1 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-h3.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-h3.c
@@ -803,6 +803,13 @@ static const struct sunxi_ccu_desc sun8i_h3_ccu_desc = {
.num_resets = ARRAY_SIZE(sun8i_h3_ccu_resets),
};
+static struct ccu_mux_nb sun8i_h3_cpu_nb = {
+ .common = &cpux_clk.common,
+ .cm = &cpux_clk.mux,
+ .delay_us = 1, /* > 8 clock cycles at 24 MHz */
+ .bypass_index = 1, /* index of 24 MHz oscillator */
+};
+
static void __init sun8i_h3_ccu_setup(struct device_node *node)
{
void __iomem *reg;
@@ -821,6 +828,9 @@ static void __init sun8i_h3_ccu_setup(struct device_node *node)
writel(val | (3 << 16), reg + SUN8I_H3_PLL_AUDIO_REG);
sunxi_ccu_probe(node, reg, &sun8i_h3_ccu_desc);
+
+ ccu_mux_notifier_register(pll_cpux_clk.common.hw.clk,
+ &sun8i_h3_cpu_nb);
}
CLK_OF_DECLARE(sun8i_h3_ccu, "allwinner,sun8i-h3-ccu",
sun8i_h3_ccu_setup);
diff --git a/drivers/clk/sunxi-ng/ccu_common.c b/drivers/clk/sunxi-ng/ccu_common.c
index 51d4bac..01d0594 100644
--- a/drivers/clk/sunxi-ng/ccu_common.c
+++ b/drivers/clk/sunxi-ng/ccu_common.c
@@ -70,6 +70,11 @@ int sunxi_ccu_probe(struct device_node *node, void __iomem *reg,
goto err_clk_unreg;
reset = kzalloc(sizeof(*reset), GFP_KERNEL);
+ if (!reset) {
+ ret = -ENOMEM;
+ goto err_alloc_reset;
+ }
+
reset->rcdev.of_node = node;
reset->rcdev.ops = &ccu_reset_ops;
reset->rcdev.owner = THIS_MODULE;
@@ -85,6 +90,16 @@ int sunxi_ccu_probe(struct device_node *node, void __iomem *reg,
return 0;
err_of_clk_unreg:
+ kfree(reset);
+err_alloc_reset:
+ of_clk_del_provider(node);
err_clk_unreg:
+ while (--i >= 0) {
+ struct clk_hw *hw = desc->hw_clks->hws[i];
+
+ if (!hw)
+ continue;
+ clk_hw_unregister(hw);
+ }
return ret;
}
diff --git a/drivers/clocksource/cs5535-clockevt.c b/drivers/clocksource/cs5535-clockevt.c
index 9a7e37c..e1d7373 100644
--- a/drivers/clocksource/cs5535-clockevt.c
+++ b/drivers/clocksource/cs5535-clockevt.c
@@ -117,7 +117,8 @@ static irqreturn_t mfgpt_tick(int irq, void *dev_id)
/* Turn off the clock (and clear the event) */
disable_timer(cs5535_event_clock);
- if (clockevent_state_shutdown(&cs5535_clockevent))
+ if (clockevent_state_detached(&cs5535_clockevent) ||
+ clockevent_state_shutdown(&cs5535_clockevent))
return IRQ_HANDLED;
/* Clear the counter */
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index e2023bd..fd5984f 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -244,7 +244,7 @@
config ACPI_CPPC_CPUFREQ
tristate "CPUFreq driver based on the ACPI CPPC spec"
- depends on ACPI
+ depends on ACPI_PROCESSOR
select ACPI_CPPC_LIB
default n
help
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 062d297..e8c7af52 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -1245,8 +1245,6 @@ static int cpufreq_online(unsigned int cpu)
if (new_policy) {
/* related_cpus should at least include policy->cpus. */
cpumask_copy(policy->related_cpus, policy->cpus);
- /* Clear mask of registered CPUs */
- cpumask_clear(policy->real_cpus);
}
/*
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 80fa656..a59ae8e 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -609,6 +609,7 @@ static void intel_pstate_hwp_set_online_cpus(void)
static int pid_param_set(void *data, u64 val)
{
*(u32 *)data = val;
+ pid_params.sample_rate_ns = pid_params.sample_rate_ms * NSEC_PER_MSEC;
intel_pstate_reset_all_pid();
return 0;
}
diff --git a/drivers/cpufreq/qcom-cpufreq.c b/drivers/cpufreq/qcom-cpufreq.c
index f968ffd9..d310380 100644
--- a/drivers/cpufreq/qcom-cpufreq.c
+++ b/drivers/cpufreq/qcom-cpufreq.c
@@ -42,6 +42,8 @@ struct cpufreq_suspend_t {
};
static DEFINE_PER_CPU(struct cpufreq_suspend_t, suspend_data);
+static DEFINE_PER_CPU(int, cached_resolve_idx);
+static DEFINE_PER_CPU(unsigned int, cached_resolve_freq);
static int set_cpu_freq(struct cpufreq_policy *policy, unsigned int new_freq,
unsigned int index)
@@ -74,6 +76,7 @@ static int msm_cpufreq_target(struct cpufreq_policy *policy,
int ret = 0;
int index;
struct cpufreq_frequency_table *table;
+ int first_cpu = cpumask_first(policy->related_cpus);
mutex_lock(&per_cpu(suspend_data, policy->cpu).suspend_mutex);
@@ -88,13 +91,11 @@ static int msm_cpufreq_target(struct cpufreq_policy *policy,
}
table = policy->freq_table;
- if (!table) {
- pr_err("cpufreq: Failed to get frequency table for CPU%u\n",
- policy->cpu);
- ret = -ENODEV;
- goto done;
- }
- index = cpufreq_frequency_table_target(policy, target_freq, relation);
+ if (per_cpu(cached_resolve_freq, first_cpu) == target_freq)
+ index = per_cpu(cached_resolve_idx, first_cpu);
+ else
+ index = cpufreq_frequency_table_target(policy, target_freq,
+ relation);
pr_debug("CPU[%d] target %d relation %d (%d-%d) selected %d\n",
policy->cpu, target_freq, relation,
@@ -107,6 +108,23 @@ static int msm_cpufreq_target(struct cpufreq_policy *policy,
return ret;
}
+static unsigned int msm_cpufreq_resolve_freq(struct cpufreq_policy *policy,
+ unsigned int target_freq)
+{
+ int index;
+ int first_cpu = cpumask_first(policy->related_cpus);
+ unsigned int freq;
+
+ index = cpufreq_frequency_table_target(policy, target_freq,
+ CPUFREQ_RELATION_L);
+ freq = policy->freq_table[index].frequency;
+
+ per_cpu(cached_resolve_idx, first_cpu) = index;
+ per_cpu(cached_resolve_freq, first_cpu) = freq;
+
+ return freq;
+}
+
static int msm_cpufreq_verify(struct cpufreq_policy *policy)
{
cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
@@ -296,6 +314,7 @@ static struct cpufreq_driver msm_cpufreq_driver = {
.init = msm_cpufreq_init,
.verify = msm_cpufreq_verify,
.target = msm_cpufreq_target,
+ .resolve_freq = msm_cpufreq_resolve_freq,
.get = msm_cpufreq_get_freq,
.name = "msm",
.attr = msm_freq_attr,
@@ -462,6 +481,7 @@ static int __init msm_cpufreq_register(void)
for_each_possible_cpu(cpu) {
mutex_init(&(per_cpu(suspend_data, cpu).suspend_mutex));
per_cpu(suspend_data, cpu).device_suspended = 0;
+ per_cpu(cached_resolve_freq, cpu) = UINT_MAX;
}
rc = platform_driver_register(&msm_cpufreq_plat_driver);
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index 78ab946..a3e1de0 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -613,16 +613,36 @@ EXPORT_SYMBOL_GPL(cpuidle_register);
#ifdef CONFIG_SMP
+static void wake_up_idle_cpus(void *v)
+{
+ int cpu;
+ struct cpumask cpus;
+
+ preempt_disable();
+ if (v) {
+ cpumask_andnot(&cpus, v, cpu_isolated_mask);
+ cpumask_and(&cpus, &cpus, cpu_online_mask);
+ } else
+ cpumask_andnot(&cpus, cpu_online_mask, cpu_isolated_mask);
+
+ for_each_cpu(cpu, &cpus) {
+ if (cpu == smp_processor_id())
+ continue;
+ wake_up_if_idle(cpu);
+ }
+ preempt_enable();
+}
+
/*
* This function gets called when a part of the kernel has a new latency
- * requirement. This means we need to get all processors out of their C-state,
- * and then recalculate a new suitable C-state. Just do a cross-cpu IPI; that
- * wakes them all right up.
+ * requirement. This means we need to get only those processors out of their
+ * C-state for which qos requirement is changed, and then recalculate a new
+ * suitable C-state. Just do a cross-cpu IPI; that wakes them all right up.
*/
static int cpuidle_latency_notify(struct notifier_block *b,
unsigned long l, void *v)
{
- wake_up_all_idle_cpus();
+ wake_up_idle_cpus(v);
return NOTIFY_OK;
}
diff --git a/drivers/cpuidle/lpm-levels.c b/drivers/cpuidle/lpm-levels.c
index 630cda2..fc1b4e4 100644
--- a/drivers/cpuidle/lpm-levels.c
+++ b/drivers/cpuidle/lpm-levels.c
@@ -1356,7 +1356,8 @@ static int lpm_cpuidle_enter(struct cpuidle_device *dev,
struct lpm_cpu *cpu = per_cpu(cpu_lpm, dev->cpu);
bool success = false;
const struct cpumask *cpumask = get_cpu_mask(dev->cpu);
- int64_t start_time = ktime_to_ns(ktime_get()), end_time;
+ ktime_t start = ktime_get();
+ uint64_t start_time = ktime_to_ns(start), end_time;
struct power_params *pwr_params;
pwr_params = &cpu->levels[idx].pwr;
@@ -1381,9 +1382,7 @@ static int lpm_cpuidle_enter(struct cpuidle_device *dev,
cluster_unprepare(cpu->parent, cpumask, idx, true, end_time);
cpu_unprepare(cpu, idx, true);
sched_set_cpu_cstate(smp_processor_id(), 0, 0, 0);
- end_time = ktime_to_ns(ktime_get()) - start_time;
- do_div(end_time, 1000);
- dev->last_residency = end_time;
+ dev->last_residency = ktime_us_delta(ktime_get(), start);
update_history(dev, idx);
trace_cpu_idle_exit(idx, success);
local_irq_enable();
diff --git a/drivers/cpuidle/lpm-levels.h b/drivers/cpuidle/lpm-levels.h
index b3364b4..71416f7 100644
--- a/drivers/cpuidle/lpm-levels.h
+++ b/drivers/cpuidle/lpm-levels.h
@@ -125,7 +125,7 @@ uint32_t *get_per_cpu_max_residency(int cpu);
uint32_t *get_per_cpu_min_residency(int cpu);
extern struct lpm_cluster *lpm_root_node;
-#if CONFIG_SMP
+#if defined(CONFIG_SMP)
extern DEFINE_PER_CPU(bool, pending_ipi);
static inline bool is_IPI_pending(const struct cpumask *mask)
{
diff --git a/drivers/crypto/ccp/ccp-crypto-aes-xts.c b/drivers/crypto/ccp/ccp-crypto-aes-xts.c
index 58a4244..3f26a41 100644
--- a/drivers/crypto/ccp/ccp-crypto-aes-xts.c
+++ b/drivers/crypto/ccp/ccp-crypto-aes-xts.c
@@ -1,8 +1,9 @@
/*
* AMD Cryptographic Coprocessor (CCP) AES XTS crypto API support
*
- * Copyright (C) 2013 Advanced Micro Devices, Inc.
+ * Copyright (C) 2013,2017 Advanced Micro Devices, Inc.
*
+ * Author: Gary R Hook <gary.hook@amd.com>
* Author: Tom Lendacky <thomas.lendacky@amd.com>
*
* This program is free software; you can redistribute it and/or modify
@@ -164,6 +165,7 @@ static int ccp_aes_xts_crypt(struct ablkcipher_request *req,
memset(&rctx->cmd, 0, sizeof(rctx->cmd));
INIT_LIST_HEAD(&rctx->cmd.entry);
rctx->cmd.engine = CCP_ENGINE_XTS_AES_128;
+ rctx->cmd.u.xts.type = CCP_AES_TYPE_128;
rctx->cmd.u.xts.action = (encrypt) ? CCP_AES_ACTION_ENCRYPT
: CCP_AES_ACTION_DECRYPT;
rctx->cmd.u.xts.unit_size = unit_size;
diff --git a/drivers/crypto/ccp/ccp-dev-v5.c b/drivers/crypto/ccp/ccp-dev-v5.c
index 2c0ce5f..71980c4 100644
--- a/drivers/crypto/ccp/ccp-dev-v5.c
+++ b/drivers/crypto/ccp/ccp-dev-v5.c
@@ -131,6 +131,7 @@ union ccp_function {
#define CCP_AES_MODE(p) ((p)->aes.mode)
#define CCP_AES_TYPE(p) ((p)->aes.type)
#define CCP_XTS_SIZE(p) ((p)->aes_xts.size)
+#define CCP_XTS_TYPE(p) ((p)->aes_xts.type)
#define CCP_XTS_ENCRYPT(p) ((p)->aes_xts.encrypt)
#define CCP_SHA_TYPE(p) ((p)->sha.type)
#define CCP_RSA_SIZE(p) ((p)->rsa.size)
@@ -277,8 +278,7 @@ static int ccp5_perform_aes(struct ccp_op *op)
CCP_AES_ENCRYPT(&function) = op->u.aes.action;
CCP_AES_MODE(&function) = op->u.aes.mode;
CCP_AES_TYPE(&function) = op->u.aes.type;
- if (op->u.aes.mode == CCP_AES_MODE_CFB)
- CCP_AES_SIZE(&function) = 0x7f;
+ CCP_AES_SIZE(&function) = op->u.aes.size;
CCP5_CMD_FUNCTION(&desc) = function.raw;
@@ -318,6 +318,7 @@ static int ccp5_perform_xts_aes(struct ccp_op *op)
CCP5_CMD_PROT(&desc) = 0;
function.raw = 0;
+ CCP_XTS_TYPE(&function) = op->u.xts.type;
CCP_XTS_ENCRYPT(&function) = op->u.xts.action;
CCP_XTS_SIZE(&function) = op->u.xts.unit_size;
CCP5_CMD_FUNCTION(&desc) = function.raw;
diff --git a/drivers/crypto/ccp/ccp-dev.h b/drivers/crypto/ccp/ccp-dev.h
index 8ac7ae1..347b771 100644
--- a/drivers/crypto/ccp/ccp-dev.h
+++ b/drivers/crypto/ccp/ccp-dev.h
@@ -187,6 +187,7 @@
#define CCP_AES_CTX_SB_COUNT 1
#define CCP_XTS_AES_KEY_SB_COUNT 1
+#define CCP5_XTS_AES_KEY_SB_COUNT 2
#define CCP_XTS_AES_CTX_SB_COUNT 1
#define CCP_SHA_SB_COUNT 1
@@ -469,9 +470,11 @@ struct ccp_aes_op {
enum ccp_aes_type type;
enum ccp_aes_mode mode;
enum ccp_aes_action action;
+ unsigned int size;
};
struct ccp_xts_aes_op {
+ enum ccp_aes_type type;
enum ccp_aes_action action;
enum ccp_xts_aes_unit_size unit_size;
};
diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c
index 50fae44..7d4cd51 100644
--- a/drivers/crypto/ccp/ccp-ops.c
+++ b/drivers/crypto/ccp/ccp-ops.c
@@ -692,6 +692,14 @@ static int ccp_run_aes_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
goto e_ctx;
}
}
+ switch (aes->mode) {
+ case CCP_AES_MODE_CFB: /* CFB128 only */
+ case CCP_AES_MODE_CTR:
+ op.u.aes.size = AES_BLOCK_SIZE * BITS_PER_BYTE - 1;
+ break;
+ default:
+ op.u.aes.size = 0;
+ }
/* Prepare the input and output data workareas. For in-place
* operations we need to set the dma direction to BIDIRECTIONAL
@@ -779,6 +787,8 @@ static int ccp_run_xts_aes_cmd(struct ccp_cmd_queue *cmd_q,
struct ccp_op op;
unsigned int unit_size, dm_offset;
bool in_place = false;
+ unsigned int sb_count;
+ enum ccp_aes_type aestype;
int ret;
switch (xts->unit_size) {
@@ -802,7 +812,9 @@ static int ccp_run_xts_aes_cmd(struct ccp_cmd_queue *cmd_q,
return -EINVAL;
}
- if (xts->key_len != AES_KEYSIZE_128)
+ if (xts->key_len == AES_KEYSIZE_128)
+ aestype = CCP_AES_TYPE_128;
+ else
return -EINVAL;
if (!xts->final && (xts->src_len & (AES_BLOCK_SIZE - 1)))
@@ -824,23 +836,44 @@ static int ccp_run_xts_aes_cmd(struct ccp_cmd_queue *cmd_q,
op.sb_key = cmd_q->sb_key;
op.sb_ctx = cmd_q->sb_ctx;
op.init = 1;
+ op.u.xts.type = aestype;
op.u.xts.action = xts->action;
op.u.xts.unit_size = xts->unit_size;
- /* All supported key sizes fit in a single (32-byte) SB entry
- * and must be in little endian format. Use the 256-bit byte
- * swap passthru option to convert from big endian to little
- * endian.
+ /* A version 3 device only supports 128-bit keys, which fits into a
+ * single SB entry. A version 5 device uses a 512-bit vector, so two
+ * SB entries.
*/
+ if (cmd_q->ccp->vdata->version == CCP_VERSION(3, 0))
+ sb_count = CCP_XTS_AES_KEY_SB_COUNT;
+ else
+ sb_count = CCP5_XTS_AES_KEY_SB_COUNT;
ret = ccp_init_dm_workarea(&key, cmd_q,
- CCP_XTS_AES_KEY_SB_COUNT * CCP_SB_BYTES,
+ sb_count * CCP_SB_BYTES,
DMA_TO_DEVICE);
if (ret)
return ret;
- dm_offset = CCP_SB_BYTES - AES_KEYSIZE_128;
- ccp_set_dm_area(&key, dm_offset, xts->key, 0, xts->key_len);
- ccp_set_dm_area(&key, 0, xts->key, dm_offset, xts->key_len);
+ if (cmd_q->ccp->vdata->version == CCP_VERSION(3, 0)) {
+ /* All supported key sizes must be in little endian format.
+ * Use the 256-bit byte swap passthru option to convert from
+ * big endian to little endian.
+ */
+ dm_offset = CCP_SB_BYTES - AES_KEYSIZE_128;
+ ccp_set_dm_area(&key, dm_offset, xts->key, 0, xts->key_len);
+ ccp_set_dm_area(&key, 0, xts->key, xts->key_len, xts->key_len);
+ } else {
+ /* Version 5 CCPs use a 512-bit space for the key: each portion
+ * occupies 256 bits, or one entire slot, and is zero-padded.
+ */
+ unsigned int pad;
+
+ dm_offset = CCP_SB_BYTES;
+ pad = dm_offset - xts->key_len;
+ ccp_set_dm_area(&key, pad, xts->key, 0, xts->key_len);
+ ccp_set_dm_area(&key, dm_offset + pad, xts->key, xts->key_len,
+ xts->key_len);
+ }
ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key,
CCP_PASSTHRU_BYTESWAP_256BIT);
if (ret) {
diff --git a/drivers/crypto/msm/ice.c b/drivers/crypto/msm/ice.c
index 6fa91ae..182097c 100644
--- a/drivers/crypto/msm/ice.c
+++ b/drivers/crypto/msm/ice.c
@@ -25,26 +25,8 @@
#include <soc/qcom/scm.h>
#include <soc/qcom/qseecomi.h>
#include "iceregs.h"
-
-#ifdef CONFIG_PFK
#include <linux/pfk.h>
-#else
-#include <linux/bio.h>
-static inline int pfk_load_key_start(const struct bio *bio,
- struct ice_crypto_setting *ice_setting, bool *is_pfe, bool async)
-{
- return 0;
-}
-static inline int pfk_load_key_end(const struct bio *bio, bool *is_pfe)
-{
- return 0;
-}
-
-static inline void pfk_clear_on_reset(void)
-{
-}
-#endif
#define TZ_SYSCALL_CREATE_SMC_ID(o, s, f) \
((uint32_t)((((o & 0x3f) << 24) | (s & 0xff) << 8) | (f & 0xff)))
@@ -144,6 +126,9 @@ static int qti_ice_setting_config(struct request *req,
return -EPERM;
}
+ if (!setting)
+ return -EINVAL;
+
if ((short)(crypto_data->key_index) >= 0) {
memcpy(&setting->crypto_data, crypto_data,
@@ -1451,7 +1436,7 @@ static int qcom_ice_config_start(struct platform_device *pdev,
int ret = 0;
bool is_pfe = false;
- if (!pdev || !req || !setting) {
+ if (!pdev || !req) {
pr_err("%s: Invalid params passed\n", __func__);
return -EINVAL;
}
@@ -1470,6 +1455,7 @@ static int qcom_ice_config_start(struct platform_device *pdev,
/* It is not an error to have a request with no bio */
return 0;
}
+ //pr_err("%s bio is %pK\n", __func__, req->bio);
ret = pfk_load_key_start(req->bio, &pfk_crypto_data, &is_pfe, async);
if (is_pfe) {
@@ -1633,7 +1619,7 @@ static struct ice_device *get_ice_device_from_storage_type
list_for_each_entry(ice_dev, &ice_devices, list) {
if (!strcmp(ice_dev->ice_instance_type, storage_type)) {
- pr_info("%s: found ice device %p\n", __func__, ice_dev);
+ pr_debug("%s: ice device %pK\n", __func__, ice_dev);
return ice_dev;
}
}
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index 571de2f..e2d323f 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -1756,9 +1756,9 @@ static int common_nonsnoop_hash(struct talitos_edesc *edesc,
req_ctx->swinit = 0;
} else {
desc->ptr[1] = zero_entry;
- /* Indicate next op is not the first. */
- req_ctx->first = 0;
}
+ /* Indicate next op is not the first. */
+ req_ctx->first = 0;
/* HMAC key */
if (ctx->keylen)
@@ -1769,7 +1769,7 @@ static int common_nonsnoop_hash(struct talitos_edesc *edesc,
sg_count = edesc->src_nents ?: 1;
if (is_sec1 && sg_count > 1)
- sg_copy_to_buffer(areq->src, sg_count, edesc->buf, length);
+ sg_copy_to_buffer(req_ctx->psrc, sg_count, edesc->buf, length);
else
sg_count = dma_map_sg(dev, req_ctx->psrc, sg_count,
DMA_TO_DEVICE);
@@ -3057,7 +3057,8 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
t_alg->algt.alg.hash.final = ahash_final;
t_alg->algt.alg.hash.finup = ahash_finup;
t_alg->algt.alg.hash.digest = ahash_digest;
- t_alg->algt.alg.hash.setkey = ahash_setkey;
+ if (!strncmp(alg->cra_name, "hmac", 4))
+ t_alg->algt.alg.hash.setkey = ahash_setkey;
t_alg->algt.alg.hash.import = ahash_import;
t_alg->algt.alg.hash.export = ahash_export;
diff --git a/drivers/crypto/vmx/aes_ctr.c b/drivers/crypto/vmx/aes_ctr.c
index 38ed10d..7cf6d31 100644
--- a/drivers/crypto/vmx/aes_ctr.c
+++ b/drivers/crypto/vmx/aes_ctr.c
@@ -80,11 +80,13 @@ static int p8_aes_ctr_setkey(struct crypto_tfm *tfm, const u8 *key,
int ret;
struct p8_aes_ctr_ctx *ctx = crypto_tfm_ctx(tfm);
+ preempt_disable();
pagefault_disable();
enable_kernel_vsx();
ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key);
disable_kernel_vsx();
pagefault_enable();
+ preempt_enable();
ret += crypto_blkcipher_setkey(ctx->fallback, key, keylen);
return ret;
@@ -99,11 +101,13 @@ static void p8_aes_ctr_final(struct p8_aes_ctr_ctx *ctx,
u8 *dst = walk->dst.virt.addr;
unsigned int nbytes = walk->nbytes;
+ preempt_disable();
pagefault_disable();
enable_kernel_vsx();
aes_p8_encrypt(ctrblk, keystream, &ctx->enc_key);
disable_kernel_vsx();
pagefault_enable();
+ preempt_enable();
crypto_xor(keystream, src, nbytes);
memcpy(dst, keystream, nbytes);
@@ -132,6 +136,7 @@ static int p8_aes_ctr_crypt(struct blkcipher_desc *desc,
blkcipher_walk_init(&walk, dst, src, nbytes);
ret = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE);
while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
+ preempt_disable();
pagefault_disable();
enable_kernel_vsx();
aes_p8_ctr32_encrypt_blocks(walk.src.virt.addr,
@@ -143,6 +148,7 @@ static int p8_aes_ctr_crypt(struct blkcipher_desc *desc,
walk.iv);
disable_kernel_vsx();
pagefault_enable();
+ preempt_enable();
/* We need to update IV mostly for last bytes/round */
inc = (nbytes & AES_BLOCK_MASK) / AES_BLOCK_SIZE;
diff --git a/drivers/devfreq/arm-memlat-mon.c b/drivers/devfreq/arm-memlat-mon.c
index 9943c8c..1dca479 100644
--- a/drivers/devfreq/arm-memlat-mon.c
+++ b/drivers/devfreq/arm-memlat-mon.c
@@ -31,6 +31,7 @@
#include "governor.h"
#include "governor_memlat.h"
#include <linux/perf_event.h>
+#include <linux/of_device.h>
enum ev_index {
INST_IDX,
@@ -63,6 +64,10 @@ struct cpu_grp_info {
struct list_head mon_list;
};
+struct memlat_mon_spec {
+ bool is_compute;
+};
+
#define to_cpustats(cpu_grp, cpu) \
(&cpu_grp->cpustats[cpu - cpumask_first(&cpu_grp->cpus)])
#define to_devstats(cpu_grp, cpu) \
@@ -96,6 +101,9 @@ static inline unsigned long read_event(struct event_data *event)
unsigned long ev_count;
u64 total, enabled, running;
+ if (!event->pevent)
+ return 0;
+
total = perf_event_read_value(event->pevent, &enabled, &running);
ev_count = total - event->prev_count;
event->prev_count = total;
@@ -314,6 +322,7 @@ static int arm_memlat_mon_driver_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct memlat_hwmon *hw;
struct cpu_grp_info *cpu_grp;
+ const struct memlat_mon_spec *spec;
int cpu, ret;
u32 event_id;
@@ -348,6 +357,22 @@ static int arm_memlat_mon_driver_probe(struct platform_device *pdev)
cpu_grp->event_ids[CYC_IDX] = CYC_EV;
+ for_each_cpu(cpu, &cpu_grp->cpus)
+ to_devstats(cpu_grp, cpu)->id = cpu;
+
+ hw->start_hwmon = &start_hwmon;
+ hw->stop_hwmon = &stop_hwmon;
+ hw->get_cnt = &get_cnt;
+
+ spec = of_device_get_match_data(dev);
+ if (spec && spec->is_compute) {
+ ret = register_compute(dev, hw);
+ if (ret)
+ pr_err("Compute Gov registration failed\n");
+
+ return ret;
+ }
+
ret = of_property_read_u32(dev->of_node, "qcom,cachemiss-ev",
&event_id);
if (ret) {
@@ -372,24 +397,21 @@ static int arm_memlat_mon_driver_probe(struct platform_device *pdev)
else
cpu_grp->event_ids[STALL_CYC_IDX] = event_id;
- for_each_cpu(cpu, &cpu_grp->cpus)
- to_devstats(cpu_grp, cpu)->id = cpu;
-
- hw->start_hwmon = &start_hwmon;
- hw->stop_hwmon = &stop_hwmon;
- hw->get_cnt = &get_cnt;
-
ret = register_memlat(dev, hw);
- if (ret) {
+ if (ret)
pr_err("Mem Latency Gov registration failed\n");
- return ret;
- }
- return 0;
+ return ret;
}
+static const struct memlat_mon_spec spec[] = {
+ [0] = { false },
+ [1] = { true },
+};
+
static const struct of_device_id memlat_match_table[] = {
- { .compatible = "qcom,arm-memlat-mon" },
+ { .compatible = "qcom,arm-memlat-mon", .data = &spec[0] },
+ { .compatible = "qcom,arm-cpu-mon", .data = &spec[1] },
{}
};
diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
index 8f582f6..b263696 100644
--- a/drivers/devfreq/devfreq.c
+++ b/drivers/devfreq/devfreq.c
@@ -598,7 +598,7 @@ struct devfreq *devfreq_add_device(struct device *dev,
err = device_register(&devfreq->dev);
if (err) {
mutex_unlock(&devfreq->lock);
- goto err_out;
+ goto err_dev;
}
devfreq->trans_table = devm_kzalloc(&devfreq->dev, sizeof(unsigned int) *
@@ -642,6 +642,9 @@ struct devfreq *devfreq_add_device(struct device *dev,
mutex_unlock(&devfreq_list_lock);
device_unregister(&devfreq->dev);
+err_dev:
+ if (devfreq)
+ kfree(devfreq);
err_out:
return ERR_PTR(err);
}
diff --git a/drivers/devfreq/governor_bw_hwmon.c b/drivers/devfreq/governor_bw_hwmon.c
index a1d9b50..3026bc2 100644
--- a/drivers/devfreq/governor_bw_hwmon.c
+++ b/drivers/devfreq/governor_bw_hwmon.c
@@ -48,9 +48,6 @@ struct hwmon_node {
unsigned int hyst_trigger_count;
unsigned int hyst_length;
unsigned int idle_mbps;
- unsigned int low_power_ceil_mbps;
- unsigned int low_power_io_percent;
- unsigned int low_power_delay;
unsigned int mbps_zones[NUM_MBPS_ZONES];
unsigned long prev_ab;
@@ -65,7 +62,6 @@ struct hwmon_node {
unsigned long hyst_mbps;
unsigned long hyst_trig_win;
unsigned long hyst_en;
- unsigned long above_low_power;
unsigned long prev_req;
unsigned int wake;
unsigned int down_cnt;
@@ -317,7 +313,7 @@ static unsigned long get_bw_and_set_irq(struct hwmon_node *node,
unsigned long meas_mbps_zone;
unsigned long hist_lo_tol, hyst_lo_tol;
struct bw_hwmon *hw = node->hw;
- unsigned int new_bw, io_percent;
+ unsigned int new_bw, io_percent = node->io_percent;
ktime_t ts;
unsigned int ms = 0;
@@ -353,17 +349,6 @@ static unsigned long get_bw_and_set_irq(struct hwmon_node *node,
node->hist_mem--;
}
- /* Keep track of whether we are in low power mode consistently. */
- if (meas_mbps > node->low_power_ceil_mbps)
- node->above_low_power = node->low_power_delay;
- if (node->above_low_power)
- node->above_low_power--;
-
- if (node->above_low_power)
- io_percent = node->io_percent;
- else
- io_percent = node->low_power_io_percent;
-
/*
* The AB value that corresponds to the lowest mbps zone greater than
* or equal to the "frequency" the current measurement will pick.
@@ -785,9 +770,6 @@ gov_attr(hist_memory, 0U, 90U);
gov_attr(hyst_trigger_count, 0U, 90U);
gov_attr(hyst_length, 0U, 90U);
gov_attr(idle_mbps, 0U, 2000U);
-gov_attr(low_power_ceil_mbps, 0U, 2500U);
-gov_attr(low_power_io_percent, 1U, 100U);
-gov_attr(low_power_delay, 1U, 60U);
gov_list_attr(mbps_zones, NUM_MBPS_ZONES, 0U, UINT_MAX);
static struct attribute *dev_attr[] = {
@@ -804,9 +786,6 @@ static struct attribute *dev_attr[] = {
&dev_attr_hyst_trigger_count.attr,
&dev_attr_hyst_length.attr,
&dev_attr_idle_mbps.attr,
- &dev_attr_low_power_ceil_mbps.attr,
- &dev_attr_low_power_io_percent.attr,
- &dev_attr_low_power_delay.attr,
&dev_attr_mbps_zones.attr,
&dev_attr_throttle_adj.attr,
NULL,
@@ -940,9 +919,6 @@ int register_bw_hwmon(struct device *dev, struct bw_hwmon *hwmon)
node->guard_band_mbps = 100;
node->decay_rate = 90;
node->io_percent = 16;
- node->low_power_ceil_mbps = 0;
- node->low_power_io_percent = 16;
- node->low_power_delay = 60;
node->bw_step = 190;
node->sample_ms = 50;
node->up_scale = 0;
diff --git a/drivers/devfreq/governor_memlat.c b/drivers/devfreq/governor_memlat.c
index 9688502..12a90d4 100644
--- a/drivers/devfreq/governor_memlat.c
+++ b/drivers/devfreq/governor_memlat.c
@@ -48,7 +48,8 @@ struct memlat_node {
static LIST_HEAD(memlat_list);
static DEFINE_MUTEX(list_lock);
-static int use_cnt;
+static int memlat_use_cnt;
+static int compute_use_cnt;
static DEFINE_MUTEX(state_lock);
#define show_attr(name) \
@@ -240,8 +241,7 @@ static int devfreq_memlat_get_freq(struct devfreq *df,
if (hw->core_stats[i].mem_count)
ratio /= hw->core_stats[i].mem_count;
- if (!hw->core_stats[i].inst_count
- || !hw->core_stats[i].freq)
+ if (!hw->core_stats[i].freq)
continue;
trace_memlat_dev_meas(dev_name(df->dev.parent),
@@ -280,16 +280,26 @@ static int devfreq_memlat_get_freq(struct devfreq *df,
gov_attr(ratio_ceil, 1U, 10000U);
gov_attr(stall_floor, 0U, 100U);
-static struct attribute *dev_attr[] = {
+static struct attribute *memlat_dev_attr[] = {
&dev_attr_ratio_ceil.attr,
&dev_attr_stall_floor.attr,
&dev_attr_freq_map.attr,
NULL,
};
-static struct attribute_group dev_attr_group = {
+static struct attribute *compute_dev_attr[] = {
+ &dev_attr_freq_map.attr,
+ NULL,
+};
+
+static struct attribute_group memlat_dev_attr_group = {
.name = "mem_latency",
- .attrs = dev_attr,
+ .attrs = memlat_dev_attr,
+};
+
+static struct attribute_group compute_dev_attr_group = {
+ .name = "compute",
+ .attrs = compute_dev_attr,
};
#define MIN_MS 10U
@@ -338,6 +348,12 @@ static struct devfreq_governor devfreq_gov_memlat = {
.event_handler = devfreq_memlat_ev_handler,
};
+static struct devfreq_governor devfreq_gov_compute = {
+ .name = "compute",
+ .get_target_freq = devfreq_memlat_get_freq,
+ .event_handler = devfreq_memlat_ev_handler,
+};
+
#define NUM_COLS 2
static struct core_dev_map *init_core_dev_map(struct device *dev,
char *prop_name)
@@ -380,20 +396,17 @@ static struct core_dev_map *init_core_dev_map(struct device *dev,
return tbl;
}
-int register_memlat(struct device *dev, struct memlat_hwmon *hw)
+static struct memlat_node *register_common(struct device *dev,
+ struct memlat_hwmon *hw)
{
- int ret = 0;
struct memlat_node *node;
if (!hw->dev && !hw->of_node)
- return -EINVAL;
+ return ERR_PTR(-EINVAL);
node = devm_kzalloc(dev, sizeof(*node), GFP_KERNEL);
if (!node)
- return -ENOMEM;
-
- node->gov = &devfreq_gov_memlat;
- node->attr_grp = &dev_attr_group;
+ return ERR_PTR(-ENOMEM);
node->ratio_ceil = 10;
node->hw = hw;
@@ -401,20 +414,68 @@ int register_memlat(struct device *dev, struct memlat_hwmon *hw)
hw->freq_map = init_core_dev_map(dev, "qcom,core-dev-table");
if (!hw->freq_map) {
dev_err(dev, "Couldn't find the core-dev freq table!\n");
- return -EINVAL;
+ return ERR_PTR(-EINVAL);
}
mutex_lock(&list_lock);
list_add_tail(&node->list, &memlat_list);
mutex_unlock(&list_lock);
+ return node;
+}
+
+int register_compute(struct device *dev, struct memlat_hwmon *hw)
+{
+ struct memlat_node *node;
+ int ret = 0;
+
+ node = register_common(dev, hw);
+ if (IS_ERR(node)) {
+ ret = PTR_ERR(node);
+ goto out;
+ }
+
mutex_lock(&state_lock);
- if (!use_cnt)
- ret = devfreq_add_governor(&devfreq_gov_memlat);
+ node->gov = &devfreq_gov_compute;
+ node->attr_grp = &compute_dev_attr_group;
+
+ if (!compute_use_cnt)
+ ret = devfreq_add_governor(&devfreq_gov_compute);
if (!ret)
- use_cnt++;
+ compute_use_cnt++;
mutex_unlock(&state_lock);
+out:
+ if (!ret)
+ dev_info(dev, "Compute governor registered.\n");
+ else
+ dev_err(dev, "Compute governor registration failed!\n");
+
+ return ret;
+}
+
+int register_memlat(struct device *dev, struct memlat_hwmon *hw)
+{
+ struct memlat_node *node;
+ int ret = 0;
+
+ node = register_common(dev, hw);
+ if (IS_ERR(node)) {
+ ret = PTR_ERR(node);
+ goto out;
+ }
+
+ mutex_lock(&state_lock);
+ node->gov = &devfreq_gov_memlat;
+ node->attr_grp = &memlat_dev_attr_group;
+
+ if (!memlat_use_cnt)
+ ret = devfreq_add_governor(&devfreq_gov_memlat);
+ if (!ret)
+ memlat_use_cnt++;
+ mutex_unlock(&state_lock);
+
+out:
if (!ret)
dev_info(dev, "Memory Latency governor registered.\n");
else
diff --git a/drivers/devfreq/governor_memlat.h b/drivers/devfreq/governor_memlat.h
index f2ba534..6491c6c 100644
--- a/drivers/devfreq/governor_memlat.h
+++ b/drivers/devfreq/governor_memlat.h
@@ -74,10 +74,16 @@ struct memlat_hwmon {
#ifdef CONFIG_DEVFREQ_GOV_MEMLAT
int register_memlat(struct device *dev, struct memlat_hwmon *hw);
+int register_compute(struct device *dev, struct memlat_hwmon *hw);
int update_memlat(struct memlat_hwmon *hw);
#else
static inline int register_memlat(struct device *dev,
- struct memlat_hwmon *hw)
+ struct memlat_hwmon *hw)
+{
+ return 0;
+}
+static inline int register_compute(struct device *dev,
+ struct memlat_hwmon *hw)
{
return 0;
}
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c
index 77242b3..57962bf 100644
--- a/drivers/dma/edma.c
+++ b/drivers/dma/edma.c
@@ -1143,11 +1143,24 @@ static struct dma_async_tx_descriptor *edma_prep_dma_memcpy(
struct edma_desc *edesc;
struct device *dev = chan->device->dev;
struct edma_chan *echan = to_edma_chan(chan);
- unsigned int width, pset_len;
+ unsigned int width, pset_len, array_size;
if (unlikely(!echan || !len))
return NULL;
+ /* Align the array size (acnt block) with the transfer properties */
+ switch (__ffs((src | dest | len))) {
+ case 0:
+ array_size = SZ_32K - 1;
+ break;
+ case 1:
+ array_size = SZ_32K - 2;
+ break;
+ default:
+ array_size = SZ_32K - 4;
+ break;
+ }
+
if (len < SZ_64K) {
/*
* Transfer size less than 64K can be handled with one paRAM
@@ -1169,7 +1182,7 @@ static struct dma_async_tx_descriptor *edma_prep_dma_memcpy(
* When the full_length is multibple of 32767 one slot can be
* used to complete the transfer.
*/
- width = SZ_32K - 1;
+ width = array_size;
pset_len = rounddown(len, width);
/* One slot is enough for lengths multiple of (SZ_32K -1) */
if (unlikely(pset_len == len))
@@ -1217,7 +1230,7 @@ static struct dma_async_tx_descriptor *edma_prep_dma_memcpy(
}
dest += pset_len;
src += pset_len;
- pset_len = width = len % (SZ_32K - 1);
+ pset_len = width = len % array_size;
ret = edma_config_pset(chan, &edesc->pset[1], src, dest, 1,
width, pset_len, DMA_MEM_TO_MEM);
diff --git a/drivers/dma/ti-dma-crossbar.c b/drivers/dma/ti-dma-crossbar.c
index 2403475..88a00d0 100644
--- a/drivers/dma/ti-dma-crossbar.c
+++ b/drivers/dma/ti-dma-crossbar.c
@@ -262,13 +262,14 @@ static void *ti_dra7_xbar_route_allocate(struct of_phandle_args *dma_spec,
mutex_lock(&xbar->mutex);
map->xbar_out = find_first_zero_bit(xbar->dma_inuse,
xbar->dma_requests);
- mutex_unlock(&xbar->mutex);
if (map->xbar_out == xbar->dma_requests) {
+ mutex_unlock(&xbar->mutex);
dev_err(&pdev->dev, "Run out of free DMA requests\n");
kfree(map);
return ERR_PTR(-ENOMEM);
}
set_bit(map->xbar_out, xbar->dma_inuse);
+ mutex_unlock(&xbar->mutex);
map->xbar_in = (u16)dma_spec->args[0];
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index ee181c5..6e197c1 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -2984,8 +2984,11 @@ static int __init amd64_edac_init(void)
int err = -ENODEV;
int i;
+ if (!x86_match_cpu(amd64_cpuids))
+ return -ENODEV;
+
if (amd_cache_northbridges() < 0)
- goto err_ret;
+ return -ENODEV;
opstate_init();
@@ -2998,14 +3001,16 @@ static int __init amd64_edac_init(void)
if (!msrs)
goto err_free;
- for (i = 0; i < amd_nb_num(); i++)
- if (probe_one_instance(i)) {
+ for (i = 0; i < amd_nb_num(); i++) {
+ err = probe_one_instance(i);
+ if (err) {
/* unwind properly */
while (--i >= 0)
remove_one_instance(i);
goto err_pci;
}
+ }
setup_pci_device();
@@ -3025,7 +3030,6 @@ static int __init amd64_edac_init(void)
kfree(ecc_stngs);
ecc_stngs = NULL;
-err_ret:
return err;
}
diff --git a/drivers/edac/amd64_edac.h b/drivers/edac/amd64_edac.h
index c088704..dcb5f94 100644
--- a/drivers/edac/amd64_edac.h
+++ b/drivers/edac/amd64_edac.h
@@ -16,6 +16,7 @@
#include <linux/slab.h>
#include <linux/mmzone.h>
#include <linux/edac.h>
+#include <asm/cpu_device_id.h>
#include <asm/msr.h>
#include "edac_core.h"
#include "mce_amd.h"
diff --git a/drivers/edac/kryo3xx_arm64_edac.c b/drivers/edac/kryo3xx_arm64_edac.c
index 5ca93a6..cf3fdde 100644
--- a/drivers/edac/kryo3xx_arm64_edac.c
+++ b/drivers/edac/kryo3xx_arm64_edac.c
@@ -30,10 +30,11 @@ module_param(poll_msec, int, 0444);
#endif
#ifdef CONFIG_EDAC_KRYO3XX_ARM64_PANIC_ON_CE
-#define ARM64_ERP_PANIC_ON_CE 1
+static bool panic_on_ce = 1;
#else
-#define ARM64_ERP_PANIC_ON_CE 0
+static bool panic_on_ce;
#endif
+module_param_named(panic_on_ce, panic_on_ce, bool, 0664);
#ifdef CONFIG_EDAC_KRYO3XX_ARM64_PANIC_ON_UE
#define ARM64_ERP_PANIC_ON_UE 1
@@ -238,6 +239,8 @@ static void dump_err_reg(int errorcode, int level, u64 errxstatus, u64 errxmisc,
else
edac_printk(KERN_CRIT, EDAC_CPU,
"Way: %d\n", (int) KRYO3XX_ERRXMISC_WAY(errxmisc) >> 2);
+
+ edev_ctl->panic_on_ce = panic_on_ce;
errors[errorcode].func(edev_ctl, smp_processor_id(),
level, errors[errorcode].msg);
}
@@ -360,11 +363,34 @@ static irqreturn_t kryo3xx_l3_scu_handler(int irq, void *drvdata)
return IRQ_HANDLED;
}
+static void initialize_registers(void *info)
+{
+ set_errxctlr_el1();
+ set_errxmisc_overflow();
+}
+
+static void init_regs_on_cpu(bool all_cpus)
+{
+ int cpu;
+
+ write_errselr_el1(0);
+ if (all_cpus) {
+ for_each_possible_cpu(cpu)
+ smp_call_function_single(cpu, initialize_registers,
+ NULL, 1);
+ } else
+ initialize_registers(NULL);
+
+ write_errselr_el1(1);
+ initialize_registers(NULL);
+}
+
static int kryo3xx_pmu_cpu_pm_notify(struct notifier_block *self,
unsigned long action, void *v)
{
switch (action) {
case CPU_PM_EXIT:
+ init_regs_on_cpu(false);
kryo3xx_check_l3_scu_error(panic_handler_drvdata->edev_ctl);
kryo3xx_check_l1_l2_ecc(panic_handler_drvdata->edev_ctl);
break;
@@ -373,23 +399,14 @@ static int kryo3xx_pmu_cpu_pm_notify(struct notifier_block *self,
return NOTIFY_OK;
}
-static void initialize_registers(void *info)
-{
- set_errxctlr_el1();
- set_errxmisc_overflow();
-}
-
static int kryo3xx_cpu_erp_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct erp_drvdata *drv;
int rc = 0;
int fail = 0;
- int cpu;
- for_each_possible_cpu(cpu)
- smp_call_function_single(cpu, initialize_registers, NULL, 1);
-
+ init_regs_on_cpu(true);
drv = devm_kzalloc(dev, sizeof(*drv), GFP_KERNEL);
@@ -413,7 +430,7 @@ static int kryo3xx_cpu_erp_probe(struct platform_device *pdev)
drv->edev_ctl->mod_name = dev_name(dev);
drv->edev_ctl->dev_name = dev_name(dev);
drv->edev_ctl->ctl_name = "cache";
- drv->edev_ctl->panic_on_ce = ARM64_ERP_PANIC_ON_CE;
+ drv->edev_ctl->panic_on_ce = panic_on_ce;
drv->edev_ctl->panic_on_ue = ARM64_ERP_PANIC_ON_UE;
drv->nb_pm.notifier_call = kryo3xx_pmu_cpu_pm_notify;
platform_set_drvdata(pdev, drv);
diff --git a/drivers/edac/mce_amd.c b/drivers/edac/mce_amd.c
index daaac2c..7db692e 100644
--- a/drivers/edac/mce_amd.c
+++ b/drivers/edac/mce_amd.c
@@ -981,20 +981,19 @@ int amd_decode_mce(struct notifier_block *nb, unsigned long val, void *data)
pr_cont("]: 0x%016llx\n", m->status);
if (m->status & MCI_STATUS_ADDRV)
- pr_emerg(HW_ERR "Error Addr: 0x%016llx", m->addr);
+ pr_emerg(HW_ERR "Error Addr: 0x%016llx\n", m->addr);
if (boot_cpu_has(X86_FEATURE_SMCA)) {
+ pr_emerg(HW_ERR "IPID: 0x%016llx", m->ipid);
+
if (m->status & MCI_STATUS_SYNDV)
pr_cont(", Syndrome: 0x%016llx", m->synd);
- pr_cont(", IPID: 0x%016llx", m->ipid);
-
pr_cont("\n");
decode_smca_errors(m);
goto err_code;
- } else
- pr_cont("\n");
+ }
if (!fam_ops)
goto err_code;
diff --git a/drivers/esoc/Kconfig b/drivers/esoc/Kconfig
index a56c7e0..3c65f69 100644
--- a/drivers/esoc/Kconfig
+++ b/drivers/esoc/Kconfig
@@ -38,7 +38,7 @@
allow logging of different esoc driver traces.
config ESOC_MDM_4x
- bool "Add support for external mdm9x25/mdm9x35/mdm9x45/mdm9x55"
+ bool "Add support for external mdm9x25/mdm9x35/mdm9x55"
help
In some Qualcomm Technologies, Inc. boards, an external modem such as
mdm9x25 or mdm9x35 is connected to a primary msm. The primary soc can
@@ -49,7 +49,7 @@
tristate "Command engine for 4x series external modems"
help
Provides a command engine to control the behavior of an external modem
- such as mdm9x25/mdm9x35/mdm9x45/mdm9x55/QSC. Allows the primary soc to put the
+ such as mdm9x25/mdm9x35/mdm9x55/QSC. Allows the primary soc to put the
external modem in a specific mode. Also listens for events on the
external modem.
diff --git a/drivers/esoc/esoc-mdm-4x.c b/drivers/esoc/esoc-mdm-4x.c
index 334278b..677e21d 100644
--- a/drivers/esoc/esoc-mdm-4x.c
+++ b/drivers/esoc/esoc-mdm-4x.c
@@ -88,12 +88,10 @@ static void mdm_enable_irqs(struct mdm_ctrl *mdm)
return;
if (mdm->irq_mask & IRQ_ERRFATAL) {
enable_irq(mdm->errfatal_irq);
- irq_set_irq_wake(mdm->errfatal_irq, 1);
mdm->irq_mask &= ~IRQ_ERRFATAL;
}
if (mdm->irq_mask & IRQ_STATUS) {
enable_irq(mdm->status_irq);
- irq_set_irq_wake(mdm->status_irq, 1);
mdm->irq_mask &= ~IRQ_STATUS;
}
if (mdm->irq_mask & IRQ_PBLRDY) {
@@ -107,12 +105,10 @@ static void mdm_disable_irqs(struct mdm_ctrl *mdm)
if (!mdm)
return;
if (!(mdm->irq_mask & IRQ_ERRFATAL)) {
- irq_set_irq_wake(mdm->errfatal_irq, 0);
disable_irq_nosync(mdm->errfatal_irq);
mdm->irq_mask |= IRQ_ERRFATAL;
}
if (!(mdm->irq_mask & IRQ_STATUS)) {
- irq_set_irq_wake(mdm->status_irq, 0);
disable_irq_nosync(mdm->status_irq);
mdm->irq_mask |= IRQ_STATUS;
}
@@ -179,26 +175,48 @@ static int mdm_cmd_exe(enum esoc_cmd cmd, struct esoc_clink *esoc)
struct device *dev = mdm->dev;
int ret;
bool graceful_shutdown = false;
+ u32 status, err_fatal;
switch (cmd) {
case ESOC_PWR_ON:
+ if (esoc->auto_boot) {
+ /*
+ * If esoc has already booted, we would have missed
+ * status change interrupt. Read status and err_fatal
+ * signals to arrive at the state of esoc.
+ */
+ esoc->clink_ops->get_status(&status, esoc);
+ esoc->clink_ops->get_err_fatal(&err_fatal, esoc);
+ if (err_fatal)
+ return -EIO;
+ if (status && !mdm->ready) {
+ mdm->ready = true;
+ esoc->clink_ops->notify(ESOC_BOOT_DONE, esoc);
+ }
+ }
gpio_set_value(MDM_GPIO(mdm, AP2MDM_ERRFATAL), 0);
- mdm_enable_irqs(mdm);
mdm->init = 1;
mdm_do_first_power_on(mdm);
+ mdm_enable_irqs(mdm);
break;
case ESOC_PWR_OFF:
mdm_disable_irqs(mdm);
mdm->debug = 0;
mdm->ready = false;
mdm->trig_cnt = 0;
+ if (esoc->primary)
+ break;
graceful_shutdown = true;
- ret = sysmon_send_shutdown(&esoc->subsys);
- if (ret) {
- dev_err(mdm->dev, "sysmon shutdown fail, ret = %d\n",
- ret);
- graceful_shutdown = false;
- goto force_poff;
+ if (!esoc->userspace_handle_shutdown) {
+ ret = sysmon_send_shutdown(&esoc->subsys);
+ if (ret) {
+ dev_err(mdm->dev,
+ "sysmon shutdown fail, ret = %d\n", ret);
+ graceful_shutdown = false;
+ goto force_poff;
+ }
+ } else {
+ esoc_clink_queue_request(ESOC_REQ_SEND_SHUTDOWN, esoc);
}
dev_dbg(mdm->dev, "Waiting for status gpio go low\n");
status_down = false;
@@ -229,12 +247,17 @@ static int mdm_cmd_exe(enum esoc_cmd cmd, struct esoc_clink *esoc)
esoc->subsys.sysmon_shutdown_ret);
}
+ if (esoc->primary)
+ break;
/*
* Force a shutdown of the mdm. This is required in order
* to prevent the mdm from immediately powering back on
- * after the shutdown
+ * after the shutdown. Avoid setting status to 0, if line is
+ * monitored by multiple mdms(might be wrongly interpreted as
+ * a primary crash).
*/
- gpio_set_value(MDM_GPIO(mdm, AP2MDM_STATUS), 0);
+ if (esoc->statusline_not_a_powersource == false)
+ gpio_set_value(MDM_GPIO(mdm, AP2MDM_STATUS), 0);
esoc_clink_queue_request(ESOC_REQ_SHUTDOWN, esoc);
mdm_power_down(mdm);
mdm_update_gpio_configs(mdm, GPIO_UPDATE_BOOTING_CONFIG);
@@ -250,9 +273,12 @@ static int mdm_cmd_exe(enum esoc_cmd cmd, struct esoc_clink *esoc)
*/
mdm->ready = false;
cancel_delayed_work(&mdm->mdm2ap_status_check_work);
- gpio_set_value(MDM_GPIO(mdm, AP2MDM_ERRFATAL), 1);
- dev_dbg(mdm->dev, "set ap2mdm errfatal to force reset\n");
- msleep(mdm->ramdump_delay_ms);
+ if (!mdm->esoc->auto_boot) {
+ gpio_set_value(MDM_GPIO(mdm, AP2MDM_ERRFATAL), 1);
+ dev_dbg(mdm->dev,
+ "set ap2mdm errfatal to force reset\n");
+ msleep(mdm->ramdump_delay_ms);
+ }
break;
case ESOC_EXE_DEBUG:
mdm->debug = 1;
@@ -380,6 +406,8 @@ static void mdm_notify(enum esoc_notify notify, struct esoc_clink *esoc)
status_down = false;
dev_dbg(dev, "signal apq err fatal for graceful restart\n");
gpio_set_value(MDM_GPIO(mdm, AP2MDM_ERRFATAL), 1);
+ if (esoc->primary)
+ break;
timeout = local_clock();
do_div(timeout, NSEC_PER_MSEC);
timeout += MDM_MODEM_TIMEOUT;
@@ -421,7 +449,8 @@ static irqreturn_t mdm_errfatal(int irq, void *dev_id)
goto mdm_pwroff_irq;
esoc = mdm->esoc;
dev_err(dev, "%s: mdm sent errfatal interrupt\n",
- __func__);
+ __func__);
+ subsys_set_crash_status(esoc->subsys_dev, true);
/* disable irq ?*/
esoc_clink_evt_notify(ESOC_ERR_FATAL, esoc);
return IRQ_HANDLED;
@@ -442,11 +471,26 @@ static irqreturn_t mdm_status_change(int irq, void *dev_id)
return IRQ_HANDLED;
dev = mdm->dev;
esoc = mdm->esoc;
+ /*
+ * On auto boot devices, there is a possibility of receiving
+ * status change interrupt before esoc_clink structure is
+ * initialized. Ignore them.
+ */
+ if (!esoc)
+ return IRQ_HANDLED;
value = gpio_get_value(MDM_GPIO(mdm, MDM2AP_STATUS));
if (value == 0 && mdm->ready) {
dev_err(dev, "unexpected reset external modem\n");
+ subsys_set_crash_status(esoc->subsys_dev, true);
esoc_clink_evt_notify(ESOC_UNEXPECTED_RESET, esoc);
} else if (value == 1) {
+ /*
+ * In auto_boot cases, bailout early if mdm
+ * is up already.
+ */
+ if (esoc->auto_boot && mdm->ready)
+ return IRQ_HANDLED;
+
cancel_delayed_work(&mdm->mdm2ap_status_check_work);
dev_dbg(dev, "status = 1: mdm is now ready\n");
mdm->ready = true;
@@ -454,6 +498,8 @@ static irqreturn_t mdm_status_change(int irq, void *dev_id)
queue_work(mdm->mdm_queue, &mdm->mdm_status_work);
if (mdm->get_restart_reason)
queue_work(mdm->mdm_queue, &mdm->restart_reason_work);
+ if (esoc->auto_boot)
+ esoc->clink_ops->notify(ESOC_BOOT_DONE, esoc);
}
return IRQ_HANDLED;
}
@@ -482,7 +528,7 @@ static irqreturn_t mdm_pblrdy_change(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int mdm_get_status(u32 *status, struct esoc_clink *esoc)
+static void mdm_get_status(u32 *status, struct esoc_clink *esoc)
{
struct mdm_ctrl *mdm = get_esoc_clink_data(esoc);
@@ -490,7 +536,16 @@ static int mdm_get_status(u32 *status, struct esoc_clink *esoc)
*status = 0;
else
*status = 1;
- return 0;
+}
+
+static void mdm_get_err_fatal(u32 *status, struct esoc_clink *esoc)
+{
+ struct mdm_ctrl *mdm = get_esoc_clink_data(esoc);
+
+ if (gpio_get_value(MDM_GPIO(mdm, MDM2AP_ERRFATAL)) == 0)
+ *status = 0;
+ else
+ *status = 1;
}
static void mdm_configure_debug(struct mdm_ctrl *mdm)
@@ -573,13 +628,21 @@ static int mdm_configure_ipc(struct mdm_ctrl *mdm, struct platform_device *pdev)
&mdm->ramdump_delay_ms);
if (ret)
mdm->ramdump_delay_ms = DEF_RAMDUMP_DELAY;
- /* Multilple gpio_request calls are allowed */
+ /*
+ * In certain scenarios, multiple esoc devices are monitoring
+ * same AP2MDM_STATUS line. But only one of them will have a
+ * successful gpio_request call. Initialize gpio only if request
+ * succeeds.
+ */
if (gpio_request(MDM_GPIO(mdm, AP2MDM_STATUS), "AP2MDM_STATUS"))
dev_err(dev, "Failed to configure AP2MDM_STATUS gpio\n");
- /* Multilple gpio_request calls are allowed */
+ else
+ gpio_direction_output(MDM_GPIO(mdm, AP2MDM_STATUS), 0);
if (gpio_request(MDM_GPIO(mdm, AP2MDM_ERRFATAL), "AP2MDM_ERRFATAL"))
dev_err(dev, "%s Failed to configure AP2MDM_ERRFATAL gpio\n",
__func__);
+ else
+ gpio_direction_output(MDM_GPIO(mdm, AP2MDM_ERRFATAL), 0);
if (gpio_request(MDM_GPIO(mdm, MDM2AP_STATUS), "MDM2AP_STATUS")) {
dev_err(dev, "%s Failed to configure MDM2AP_STATUS gpio\n",
__func__);
@@ -612,9 +675,6 @@ static int mdm_configure_ipc(struct mdm_ctrl *mdm, struct platform_device *pdev)
}
}
- gpio_direction_output(MDM_GPIO(mdm, AP2MDM_STATUS), 0);
- gpio_direction_output(MDM_GPIO(mdm, AP2MDM_ERRFATAL), 0);
-
if (gpio_is_valid(MDM_GPIO(mdm, AP2MDM_CHNLRDY)))
gpio_direction_output(MDM_GPIO(mdm, AP2MDM_CHNLRDY), 0);
@@ -637,6 +697,7 @@ static int mdm_configure_ipc(struct mdm_ctrl *mdm, struct platform_device *pdev)
goto errfatal_err;
}
mdm->errfatal_irq = irq;
+ irq_set_irq_wake(mdm->errfatal_irq, 1);
errfatal_err:
/* status irq */
@@ -655,6 +716,7 @@ static int mdm_configure_ipc(struct mdm_ctrl *mdm, struct platform_device *pdev)
goto status_err;
}
mdm->status_irq = irq;
+ irq_set_irq_wake(mdm->status_irq, 1);
status_err:
if (gpio_is_valid(MDM_GPIO(mdm, MDM2AP_PBLRDY))) {
irq = platform_get_irq_byname(pdev, "plbrdy_irq");
@@ -748,6 +810,7 @@ static int mdm9x25_setup_hw(struct mdm_ctrl *mdm,
dev_err(mdm->dev, "cannot allocate esoc device\n");
return PTR_ERR(esoc);
}
+ esoc->pdev = pdev;
mdm->mdm_queue = alloc_workqueue("mdm_queue", 0, 0);
if (!mdm->mdm_queue) {
dev_err(mdm->dev, "could not create mdm_queue\n");
@@ -818,6 +881,7 @@ static int mdm9x35_setup_hw(struct mdm_ctrl *mdm,
dev_err(mdm->dev, "cannot allocate esoc device\n");
return PTR_ERR(esoc);
}
+ esoc->pdev = pdev;
mdm->mdm_queue = alloc_workqueue("mdm_queue", 0, 0);
if (!mdm->mdm_queue) {
dev_err(mdm->dev, "could not create mdm_queue\n");
@@ -906,6 +970,7 @@ static int mdm9x55_setup_hw(struct mdm_ctrl *mdm,
dev_err(mdm->dev, "cannot allocate esoc device\n");
return PTR_ERR(esoc);
}
+ esoc->pdev = pdev;
mdm->mdm_queue = alloc_workqueue("mdm_queue", 0, 0);
if (!mdm->mdm_queue) {
dev_err(mdm->dev, "could not create mdm_queue\n");
@@ -966,6 +1031,7 @@ static int mdm9x55_setup_hw(struct mdm_ctrl *mdm,
static struct esoc_clink_ops mdm_cops = {
.cmd_exe = mdm_cmd_exe,
.get_status = mdm_get_status,
+ .get_err_fatal = mdm_get_err_fatal,
.notify = mdm_notify,
};
diff --git a/drivers/esoc/esoc-mdm-dbg-eng.c b/drivers/esoc/esoc-mdm-dbg-eng.c
index 309c820..a61588a 100644
--- a/drivers/esoc/esoc-mdm-dbg-eng.c
+++ b/drivers/esoc/esoc-mdm-dbg-eng.c
@@ -269,7 +269,7 @@ static ssize_t last_esoc_req_show(struct device_driver *drv, char *buf)
{
unsigned int i;
unsigned long flags;
- size_t count;
+ size_t count = 0;
spin_lock_irqsave(&req_lock, flags);
for (i = 0; i < ARRAY_SIZE(req_to_str); i++) {
diff --git a/drivers/esoc/esoc-mdm-drv.c b/drivers/esoc/esoc-mdm-drv.c
index 31cd8c4..77ae84b 100644
--- a/drivers/esoc/esoc-mdm-drv.c
+++ b/drivers/esoc/esoc-mdm-drv.c
@@ -13,6 +13,7 @@
#include <linux/delay.h>
#include <linux/workqueue.h>
#include <linux/reboot.h>
+#include <linux/of.h>
#include "esoc.h"
#include "mdm-dbg.h"
@@ -74,7 +75,14 @@ static void mdm_handle_clink_evt(enum esoc_evt evt,
break;
case ESOC_UNEXPECTED_RESET:
case ESOC_ERR_FATAL:
- if (mdm_drv->mode == CRASH)
+ /*
+ * Modem can crash while we are waiting for boot_done during
+ * a subsystem_get(). Setting mode to CRASH will prevent a
+ * subsequent subsystem_get() from entering poweron ops. Avoid
+ * this by seting mode to CRASH only if device was up and
+ * running.
+ */
+ if (mdm_drv->mode == CRASH || mdm_drv->mode != RUN)
return;
mdm_drv->mode = CRASH;
queue_work(mdm_drv->mdm_queue, &mdm_drv->ssr_work);
@@ -164,8 +172,9 @@ static int mdm_subsys_powerup(const struct subsys_desc *crashed_subsys)
subsys);
struct mdm_drv *mdm_drv = esoc_get_drv_data(esoc_clink);
const struct esoc_clink_ops * const clink_ops = esoc_clink->clink_ops;
+ int timeout = INT_MAX;
- if (!esoc_req_eng_enabled(esoc_clink)) {
+ if (!esoc_clink->auto_boot && !esoc_req_eng_enabled(esoc_clink)) {
dev_dbg(&esoc_clink->dev, "Wait for req eng registration\n");
wait_for_completion(&mdm_drv->req_eng_wait);
}
@@ -190,8 +199,17 @@ static int mdm_subsys_powerup(const struct subsys_desc *crashed_subsys)
return ret;
}
}
- wait_for_completion(&mdm_drv->boot_done);
- if (mdm_drv->boot_fail) {
+
+ /*
+ * In autoboot case, it is possible that we can forever wait for
+ * boot completion, when esoc fails to boot. This is because there
+ * is no helper application which can alert esoc driver about boot
+ * failure. Prevent going to wait forever in such case.
+ */
+ if (esoc_clink->auto_boot)
+ timeout = 10 * HZ;
+ ret = wait_for_completion_timeout(&mdm_drv->boot_done, timeout);
+ if (mdm_drv->boot_fail || ret <= 0) {
dev_err(&esoc_clink->dev, "booting failed\n");
return -EIO;
}
@@ -219,10 +237,12 @@ static int mdm_subsys_ramdumps(int want_dumps,
static int mdm_register_ssr(struct esoc_clink *esoc_clink)
{
- esoc_clink->subsys.shutdown = mdm_subsys_shutdown;
- esoc_clink->subsys.ramdump = mdm_subsys_ramdumps;
- esoc_clink->subsys.powerup = mdm_subsys_powerup;
- esoc_clink->subsys.crash_shutdown = mdm_crash_shutdown;
+ struct subsys_desc *subsys = &esoc_clink->subsys;
+
+ subsys->shutdown = mdm_subsys_shutdown;
+ subsys->ramdump = mdm_subsys_ramdumps;
+ subsys->powerup = mdm_subsys_powerup;
+ subsys->crash_shutdown = mdm_crash_shutdown;
return esoc_clink_register_ssr(esoc_clink);
}
diff --git a/drivers/esoc/esoc-mdm-pon.c b/drivers/esoc/esoc-mdm-pon.c
index 47d54db..0e85776 100644
--- a/drivers/esoc/esoc-mdm-pon.c
+++ b/drivers/esoc/esoc-mdm-pon.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2015, 2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -68,6 +68,9 @@ static int mdm4x_do_first_power_on(struct mdm_ctrl *mdm)
struct device *dev = mdm->dev;
dev_dbg(dev, "Powering on modem for the first time\n");
+ if (mdm->esoc->auto_boot)
+ return 0;
+
mdm_toggle_soft_reset(mdm, false);
/* Add a delay to allow PON sequence to complete*/
mdelay(50);
@@ -134,6 +137,9 @@ static int mdm9x55_power_down(struct mdm_ctrl *mdm)
static void mdm4x_cold_reset(struct mdm_ctrl *mdm)
{
+ if (!gpio_is_valid(MDM_GPIO(mdm, AP2MDM_SOFT_RESET)))
+ return;
+
dev_dbg(mdm->dev, "Triggering mdm cold reset");
gpio_direction_output(MDM_GPIO(mdm, AP2MDM_SOFT_RESET),
!!mdm->soft_reset_inverted);
@@ -201,15 +207,6 @@ struct mdm_pon_ops mdm9x35_pon_ops = {
.setup = mdm4x_pon_setup,
};
-struct mdm_pon_ops mdm9x45_pon_ops = {
- .pon = mdm4x_do_first_power_on,
- .soft_reset = mdm4x_toggle_soft_reset,
- .poff_force = mdm4x_power_down,
- .cold_reset = mdm4x_cold_reset,
- .dt_init = mdm4x_pon_dt_init,
- .setup = mdm4x_pon_setup,
-};
-
struct mdm_pon_ops mdm9x55_pon_ops = {
.pon = mdm4x_do_first_power_on,
.soft_reset = mdm9x55_toggle_soft_reset,
diff --git a/drivers/esoc/esoc-mdm.h b/drivers/esoc/esoc-mdm.h
index fa3a576..621d913 100644
--- a/drivers/esoc/esoc-mdm.h
+++ b/drivers/esoc/esoc-mdm.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2015, 2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -33,8 +33,6 @@
#define MDM9x35_PCIE "PCIe"
#define MDM9x35_DUAL_LINK "HSIC+PCIe"
#define MDM9x35_HSIC "HSIC"
-#define MDM9x45_LABEL "MDM9x45"
-#define MDM9x45_PCIE "PCIe"
#define MDM9x55_LABEL "MDM9x55"
#define MDM9x55_PCIE "PCIe"
#define MDM2AP_STATUS_TIMEOUT_MS 120000L
@@ -151,6 +149,5 @@ static inline int mdm_pon_setup(struct mdm_ctrl *mdm)
extern struct mdm_pon_ops mdm9x25_pon_ops;
extern struct mdm_pon_ops mdm9x35_pon_ops;
-extern struct mdm_pon_ops mdm9x45_pon_ops;
extern struct mdm_pon_ops mdm9x55_pon_ops;
#endif
diff --git a/drivers/esoc/esoc.h b/drivers/esoc/esoc.h
index 9fc3192..df3c9df 100644
--- a/drivers/esoc/esoc.h
+++ b/drivers/esoc/esoc.h
@@ -49,6 +49,7 @@ struct esoc_eng {
* @link_info: additional info about the physical link.
* @parent: parent device.
* @dev: device for userspace interface.
+ * @pdev: platform device to interface with SSR driver.
* @id: id of the external device.
* @owner: owner of the device.
* @clink_ops: control operations for the control link
@@ -59,6 +60,12 @@ struct esoc_eng {
* @subsys_desc: descriptor for subsystem restart
* @subsys_dev: ssr device handle.
* @np: device tree node for esoc_clink.
+ * @auto_boot: boots independently.
+ * @primary: primary esoc controls(reset/poweroff) all secondary
+ * esocs, but not otherway around.
+ * @statusline_not_a_powersource: True if status line to esoc is not a
+ * power source.
+ * @userspace_handle_shutdown: True if user space handles shutdown requests.
*/
struct esoc_clink {
const char *name;
@@ -66,6 +73,7 @@ struct esoc_clink {
const char *link_info;
struct device *parent;
struct device dev;
+ struct platform_device *pdev;
unsigned int id;
struct module *owner;
const struct esoc_clink_ops *clink_ops;
@@ -77,17 +85,23 @@ struct esoc_clink {
struct subsys_desc subsys;
struct subsys_device *subsys_dev;
struct device_node *np;
+ bool auto_boot;
+ bool primary;
+ bool statusline_not_a_powersource;
+ bool userspace_handle_shutdown;
};
/**
* struct esoc_clink_ops: Operations to control external soc
* @cmd_exe: Execute control command
* @get_status: Get current status, or response to previous command
+ * @get_err_fatal: Get status of err fatal signal
* @notify_esoc: notify external soc of events
*/
struct esoc_clink_ops {
int (*cmd_exe)(enum esoc_cmd cmd, struct esoc_clink *dev);
- int (*get_status)(u32 *status, struct esoc_clink *dev);
+ void (*get_status)(u32 *status, struct esoc_clink *dev);
+ void (*get_err_fatal)(u32 *status, struct esoc_clink *dev);
void (*notify)(enum esoc_notify notify, struct esoc_clink *dev);
};
diff --git a/drivers/esoc/esoc_bus.c b/drivers/esoc/esoc_bus.c
index cef570b..d9ab993 100644
--- a/drivers/esoc/esoc_bus.c
+++ b/drivers/esoc/esoc_bus.c
@@ -189,7 +189,7 @@ int esoc_clink_register_ssr(struct esoc_clink *esoc_clink)
snprintf(subsys_name, len, "esoc%d", esoc_clink->id);
esoc_clink->subsys.name = subsys_name;
esoc_clink->dev.of_node = esoc_clink->np;
- esoc_clink->subsys.dev = &esoc_clink->dev;
+ esoc_clink->subsys.dev = &esoc_clink->pdev->dev;
esoc_clink->subsys_dev = subsys_register(&esoc_clink->subsys);
if (IS_ERR_OR_NULL(esoc_clink->subsys_dev)) {
dev_err(&esoc_clink->dev, "failed to register ssr node\n");
diff --git a/drivers/esoc/esoc_dev.c b/drivers/esoc/esoc_dev.c
index 0c9e428..1d9e623 100644
--- a/drivers/esoc/esoc_dev.c
+++ b/drivers/esoc/esoc_dev.c
@@ -224,9 +224,11 @@ static long esoc_dev_ioctl(struct file *file, unsigned int cmd,
clink_ops->notify(esoc_cmd, esoc_clink);
break;
case ESOC_GET_STATUS:
- err = clink_ops->get_status(&status, esoc_clink);
- if (err)
- return err;
+ clink_ops->get_status(&status, esoc_clink);
+ put_user(status, (unsigned int __user *)uarg);
+ break;
+ case ESOC_GET_ERR_FATAL:
+ clink_ops->get_err_fatal(&status, esoc_clink);
put_user(status, (unsigned int __user *)uarg);
break;
case ESOC_WAIT_FOR_CRASH:
@@ -336,7 +338,6 @@ int esoc_clink_del_device(struct device *dev, void *dummy)
esoc_udev = esoc_udev_get_by_minor(esoc_clink->id);
if (!esoc_udev)
return 0;
- return_esoc_udev(esoc_udev);
device_destroy(esoc_class, MKDEV(esoc_major, esoc_clink->id));
return_esoc_udev(esoc_udev);
return 0;
diff --git a/drivers/extcon/extcon-axp288.c b/drivers/extcon/extcon-axp288.c
index 42f41e8..27f67c2 100644
--- a/drivers/extcon/extcon-axp288.c
+++ b/drivers/extcon/extcon-axp288.c
@@ -168,7 +168,7 @@ static int axp288_handle_chrg_det_event(struct axp288_extcon_info *info)
return ret;
}
- vbus_attach = (pwr_stat & PS_STAT_VBUS_PRESENT);
+ vbus_attach = (pwr_stat & PS_STAT_VBUS_VALID);
if (!vbus_attach)
goto notify_otg;
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 9338ff7..642fa03 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -1206,6 +1206,8 @@
tristate "Microchip MCP23xxx I/O expander"
depends on OF_GPIO
select GPIOLIB_IRQCHIP
+ select REGMAP_I2C if I2C
+ select REGMAP if SPI_MASTER
help
SPI/I2C driver for Microchip MCP23S08/MCP23S17/MCP23008/MCP23017
I/O expanders.
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 264899d..05ff98b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -491,6 +491,9 @@ static int amdgpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_
case TTM_PL_TT:
break;
case TTM_PL_VRAM:
+ if (mem->start == AMDGPU_BO_INVALID_OFFSET)
+ return -EINVAL;
+
mem->bus.offset = mem->start << PAGE_SHIFT;
/* check if it's visible */
if ((mem->bus.offset + mem->bus.size) > adev->mc.visible_vram_size)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index 7fe8fd8..743a12d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -315,6 +315,10 @@ static void amdgpu_vce_idle_work_handler(struct work_struct *work)
amdgpu_dpm_enable_vce(adev, false);
} else {
amdgpu_asic_set_vce_clocks(adev, 0, 0);
+ amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
+ AMD_PG_STATE_GATE);
+ amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
+ AMD_CG_STATE_GATE);
}
} else {
schedule_delayed_work(&adev->vce.idle_work, VCE_IDLE_TIMEOUT);
@@ -340,6 +344,11 @@ void amdgpu_vce_ring_begin_use(struct amdgpu_ring *ring)
amdgpu_dpm_enable_vce(adev, true);
} else {
amdgpu_asic_set_vce_clocks(adev, 53300, 40000);
+ amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
+ AMD_CG_STATE_UNGATE);
+ amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
+ AMD_PG_STATE_UNGATE);
+
}
}
mutex_unlock(&adev->vce.idle_mutex);
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
index ab3df6d..3f445df91 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
@@ -89,6 +89,10 @@ static int uvd_v6_0_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ if (!(adev->flags & AMD_IS_APU) &&
+ (RREG32_SMC(ixCC_HARVEST_FUSES) & CC_HARVEST_FUSES__UVD_DISABLE_MASK))
+ return -ENOENT;
+
uvd_v6_0_set_ring_funcs(adev);
uvd_v6_0_set_irq_funcs(adev);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
index a6a4b2b..6a3470f 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
@@ -739,8 +739,10 @@ int kfd_wait_on_events(struct kfd_process *p,
struct kfd_event_data event_data;
if (copy_from_user(&event_data, &events[i],
- sizeof(struct kfd_event_data)))
+ sizeof(struct kfd_event_data))) {
+ ret = -EFAULT;
goto fail;
+ }
ret = init_event_waiter(p, &event_waiters[i],
event_data.event_id, i);
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
index 08cd0bd..3907439 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
@@ -825,7 +825,7 @@ uint32_t smu7_get_xclk(struct pp_hwmgr *hwmgr)
{
uint32_t reference_clock, tmp;
struct cgs_display_info info = {0};
- struct cgs_mode_info mode_info;
+ struct cgs_mode_info mode_info = {0};
info.mode_info = &mode_info;
@@ -3718,10 +3718,9 @@ int smu7_program_display_gap(struct pp_hwmgr *hwmgr)
uint32_t ref_clock;
uint32_t refresh_rate = 0;
struct cgs_display_info info = {0};
- struct cgs_mode_info mode_info;
+ struct cgs_mode_info mode_info = {0};
info.mode_info = &mode_info;
-
cgs_get_active_displays_info(hwmgr->device, &info);
num_active_displays = info.display_count;
@@ -3737,6 +3736,7 @@ int smu7_program_display_gap(struct pp_hwmgr *hwmgr)
frame_time_in_us = 1000000 / refresh_rate;
pre_vbi_time_in_us = frame_time_in_us - 200 - mode_info.vblank_time_us;
+
data->frame_time_x2 = frame_time_in_us * 2 / 100;
display_gap2 = pre_vbi_time_in_us * (ref_clock / 100);
diff --git a/drivers/gpu/drm/arm/malidp_planes.c b/drivers/gpu/drm/arm/malidp_planes.c
index 82c193e..8b009b5 100644
--- a/drivers/gpu/drm/arm/malidp_planes.c
+++ b/drivers/gpu/drm/arm/malidp_planes.c
@@ -150,13 +150,8 @@ static void malidp_de_plane_update(struct drm_plane *plane,
/* convert src values from Q16 fixed point to integer */
src_w = plane->state->src_w >> 16;
src_h = plane->state->src_h >> 16;
- if (plane->state->rotation & MALIDP_ROTATED_MASK) {
- dest_w = plane->state->crtc_h;
- dest_h = plane->state->crtc_w;
- } else {
- dest_w = plane->state->crtc_w;
- dest_h = plane->state->crtc_h;
- }
+ dest_w = plane->state->crtc_w;
+ dest_h = plane->state->crtc_h;
malidp_hw_write(mp->hwdev, format_id, mp->layer->base);
@@ -187,11 +182,12 @@ static void malidp_de_plane_update(struct drm_plane *plane,
/* setup the rotation and axis flip bits */
if (plane->state->rotation & DRM_ROTATE_MASK)
- val = ilog2(plane->state->rotation & DRM_ROTATE_MASK) << LAYER_ROT_OFFSET;
+ val |= ilog2(plane->state->rotation & DRM_ROTATE_MASK) <<
+ LAYER_ROT_OFFSET;
if (plane->state->rotation & DRM_REFLECT_X)
- val |= LAYER_V_FLIP;
- if (plane->state->rotation & DRM_REFLECT_Y)
val |= LAYER_H_FLIP;
+ if (plane->state->rotation & DRM_REFLECT_Y)
+ val |= LAYER_V_FLIP;
/* set the 'enable layer' bit */
val |= LAYER_ENABLE;
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
index 213d892..a68f94d 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
@@ -325,7 +325,7 @@ static void adv7511_set_link_config(struct adv7511 *adv7511,
adv7511->rgb = config->input_colorspace == HDMI_COLORSPACE_RGB;
}
-static void adv7511_power_on(struct adv7511 *adv7511)
+static void __adv7511_power_on(struct adv7511 *adv7511)
{
adv7511->current_edid_segment = -1;
@@ -354,6 +354,11 @@ static void adv7511_power_on(struct adv7511 *adv7511)
regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER2,
ADV7511_REG_POWER2_HPD_SRC_MASK,
ADV7511_REG_POWER2_HPD_SRC_NONE);
+}
+
+static void adv7511_power_on(struct adv7511 *adv7511)
+{
+ __adv7511_power_on(adv7511);
/*
* Most of the registers are reset during power down or when HPD is low.
@@ -362,21 +367,23 @@ static void adv7511_power_on(struct adv7511 *adv7511)
if (adv7511->type == ADV7533)
adv7533_dsi_power_on(adv7511);
-
adv7511->powered = true;
}
-static void adv7511_power_off(struct adv7511 *adv7511)
+static void __adv7511_power_off(struct adv7511 *adv7511)
{
/* TODO: setup additional power down modes */
regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER,
ADV7511_POWER_POWER_DOWN,
ADV7511_POWER_POWER_DOWN);
regcache_mark_dirty(adv7511->regmap);
+}
+static void adv7511_power_off(struct adv7511 *adv7511)
+{
+ __adv7511_power_off(adv7511);
if (adv7511->type == ADV7533)
adv7533_dsi_power_off(adv7511);
-
adv7511->powered = false;
}
@@ -567,23 +574,20 @@ static int adv7511_get_modes(struct adv7511 *adv7511,
/* Reading the EDID only works if the device is powered */
if (!adv7511->powered) {
- regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER,
- ADV7511_POWER_POWER_DOWN, 0);
- if (adv7511->i2c_main->irq) {
- regmap_write(adv7511->regmap, ADV7511_REG_INT_ENABLE(0),
- ADV7511_INT0_EDID_READY);
- regmap_write(adv7511->regmap, ADV7511_REG_INT_ENABLE(1),
- ADV7511_INT1_DDC_ERROR);
- }
- adv7511->current_edid_segment = -1;
+ unsigned int edid_i2c_addr =
+ (adv7511->i2c_main->addr << 1) + 4;
+
+ __adv7511_power_on(adv7511);
+
+ /* Reset the EDID_I2C_ADDR register as it might be cleared */
+ regmap_write(adv7511->regmap, ADV7511_REG_EDID_I2C_ADDR,
+ edid_i2c_addr);
}
edid = drm_do_get_edid(connector, adv7511_get_edid_block, adv7511);
if (!adv7511->powered)
- regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER,
- ADV7511_POWER_POWER_DOWN,
- ADV7511_POWER_POWER_DOWN);
+ __adv7511_power_off(adv7511);
kfree(adv7511->edid);
adv7511->edid = edid;
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index 4e16dff..33778bf 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -1871,10 +1871,10 @@ int drm_mode_atomic_ioctl(struct drm_device *dev,
struct drm_atomic_state *state;
struct drm_modeset_acquire_ctx ctx;
struct drm_plane *plane;
- struct drm_out_fence_state *fence_state = NULL;
+ struct drm_out_fence_state *fence_state;
unsigned plane_mask;
int ret = 0;
- unsigned int i, j, num_fences = 0;
+ unsigned int i, j, num_fences;
/* disallow for drivers not supporting atomic: */
if (!drm_core_check_feature(dev, DRIVER_ATOMIC))
@@ -1915,6 +1915,8 @@ int drm_mode_atomic_ioctl(struct drm_device *dev,
plane_mask = 0;
copied_objs = 0;
copied_props = 0;
+ fence_state = NULL;
+ num_fences = 0;
for (i = 0; i < arg->count_objs; i++) {
uint32_t obj_id, count_props;
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 362b8cd..80a903b 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -218,7 +218,7 @@ static int drm_minor_register(struct drm_device *dev, unsigned int type)
ret = drm_debugfs_init(minor, minor->index, drm_debugfs_root);
if (ret) {
DRM_ERROR("DRM: Failed to initialize /sys/kernel/debug/dri.\n");
- return ret;
+ goto err_debugfs;
}
ret = device_add(minor->kdev);
diff --git a/drivers/gpu/drm/drm_mipi_dsi.c b/drivers/gpu/drm/drm_mipi_dsi.c
index 76a1e43..d9a5762 100644
--- a/drivers/gpu/drm/drm_mipi_dsi.c
+++ b/drivers/gpu/drm/drm_mipi_dsi.c
@@ -360,6 +360,7 @@ static ssize_t mipi_dsi_device_transfer(struct mipi_dsi_device *dsi,
if (dsi->mode_flags & MIPI_DSI_MODE_LPM)
msg->flags |= MIPI_DSI_MSG_USE_LPM;
+ msg->flags |= MIPI_DSI_MSG_LASTCOMMAND;
return ops->transfer(dsi->host, msg);
}
diff --git a/drivers/gpu/drm/drm_property.c b/drivers/gpu/drm/drm_property.c
index a4d81cf..ef80ec6 100644
--- a/drivers/gpu/drm/drm_property.c
+++ b/drivers/gpu/drm/drm_property.c
@@ -530,7 +530,7 @@ static void drm_property_free_blob(struct kref *kref)
drm_mode_object_unregister(blob->dev, &blob->base);
- kfree(blob);
+ vfree(blob);
}
/**
@@ -557,7 +557,7 @@ drm_property_create_blob(struct drm_device *dev, size_t length,
if (!length || length > ULONG_MAX - sizeof(struct drm_property_blob))
return ERR_PTR(-EINVAL);
- blob = kzalloc(sizeof(struct drm_property_blob)+length, GFP_KERNEL);
+ blob = vmalloc(sizeof(struct drm_property_blob)+length);
if (!blob)
return ERR_PTR(-ENOMEM);
@@ -573,7 +573,7 @@ drm_property_create_blob(struct drm_device *dev, size_t length,
ret = drm_mode_object_get_reg(dev, &blob->base, DRM_MODE_OBJECT_BLOB,
true, drm_property_free_blob);
if (ret) {
- kfree(blob);
+ vfree(blob);
return ERR_PTR(-EINVAL);
}
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
index 0370b84..82dd57d 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
@@ -549,12 +549,15 @@ static const struct etnaviv_gem_ops etnaviv_gem_shmem_ops = {
void etnaviv_gem_free_object(struct drm_gem_object *obj)
{
struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
+ struct etnaviv_drm_private *priv = obj->dev->dev_private;
struct etnaviv_vram_mapping *mapping, *tmp;
/* object should not be active */
WARN_ON(is_active(etnaviv_obj));
+ mutex_lock(&priv->gem_lock);
list_del(&etnaviv_obj->gem_node);
+ mutex_unlock(&priv->gem_lock);
list_for_each_entry_safe(mapping, tmp, &etnaviv_obj->vram_list,
obj_node) {
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
index fbd13fa..603d842 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
@@ -1193,6 +1193,17 @@ int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
if (!node)
return -ENOMEM;
+ /*
+ * To avoid an integer overflow for the later size computations, we
+ * enforce a maximum number of submitted commands here. This limit is
+ * sufficient for all conceivable usage cases of the G2D.
+ */
+ if (req->cmd_nr > G2D_CMDLIST_DATA_NUM ||
+ req->cmd_buf_nr > G2D_CMDLIST_DATA_NUM) {
+ dev_err(dev, "number of submitted G2D commands exceeds limit\n");
+ return -EINVAL;
+ }
+
node->event = NULL;
if (req->event_type != G2D_EVENT_NOT) {
@@ -1250,7 +1261,11 @@ int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
cmdlist->data[cmdlist->last++] = G2D_INTEN_ACF;
}
- /* Check size of cmdlist: last 2 is about G2D_BITBLT_START */
+ /*
+ * Check the size of cmdlist. The 2 that is added last comes from
+ * the implicit G2D_BITBLT_START that is appended once we have
+ * checked all the submitted commands.
+ */
size = cmdlist->last + req->cmd_nr * 2 + req->cmd_buf_nr * 2 + 2;
if (size > G2D_CMDLIST_DATA_NUM) {
dev_err(dev, "cmdlist size is too big\n");
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_tcon.c b/drivers/gpu/drm/fsl-dcu/fsl_tcon.c
index 3194e54..faacc81 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_tcon.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_tcon.c
@@ -89,9 +89,13 @@ struct fsl_tcon *fsl_tcon_init(struct device *dev)
goto err_node_put;
}
- of_node_put(np);
- clk_prepare_enable(tcon->ipg_clk);
+ ret = clk_prepare_enable(tcon->ipg_clk);
+ if (ret) {
+ dev_err(dev, "Couldn't enable the TCON clock\n");
+ goto err_node_put;
+ }
+ of_node_put(np);
dev_info(dev, "Using TCON in bypass mode\n");
return tcon;
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 4ac36e3..80c5cc5 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -1152,6 +1152,13 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
is_hdmi = is_dvi && (child->common.device_type & DEVICE_TYPE_NOT_HDMI_OUTPUT) == 0;
is_edp = is_dp && (child->common.device_type & DEVICE_TYPE_INTERNAL_CONNECTOR);
+ if (port == PORT_A && is_dvi) {
+ DRM_DEBUG_KMS("VBT claims port A supports DVI%s, ignoring\n",
+ is_hdmi ? "/HDMI" : "");
+ is_dvi = false;
+ is_hdmi = false;
+ }
+
info->supports_dvi = is_dvi;
info->supports_hdmi = is_hdmi;
info->supports_dp = is_dp;
@@ -1212,7 +1219,7 @@ static void parse_ddi_ports(struct drm_i915_private *dev_priv,
{
enum port port;
- if (!HAS_DDI(dev_priv))
+ if (!HAS_DDI(dev_priv) && !IS_CHERRYVIEW(dev_priv))
return;
if (!dev_priv->vbt.child_dev_num)
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index f8efd20..ce32303 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -11471,13 +11471,10 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
{
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
+ enum transcoder cpu_transcoder;
struct drm_display_mode *mode;
struct intel_crtc_state *pipe_config;
- int htot = I915_READ(HTOTAL(cpu_transcoder));
- int hsync = I915_READ(HSYNC(cpu_transcoder));
- int vtot = I915_READ(VTOTAL(cpu_transcoder));
- int vsync = I915_READ(VSYNC(cpu_transcoder));
+ u32 htot, hsync, vtot, vsync;
enum pipe pipe = intel_crtc->pipe;
mode = kzalloc(sizeof(*mode), GFP_KERNEL);
@@ -11505,6 +11502,13 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
i9xx_crtc_clock_get(intel_crtc, pipe_config);
mode->clock = pipe_config->port_clock / pipe_config->pixel_multiplier;
+
+ cpu_transcoder = pipe_config->cpu_transcoder;
+ htot = I915_READ(HTOTAL(cpu_transcoder));
+ hsync = I915_READ(HSYNC(cpu_transcoder));
+ vtot = I915_READ(VTOTAL(cpu_transcoder));
+ vsync = I915_READ(VSYNC(cpu_transcoder));
+
mode->hdisplay = (htot & 0xffff) + 1;
mode->htotal = ((htot & 0xffff0000) >> 16) + 1;
mode->hsync_start = (hsync & 0xffff) + 1;
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 7b06280..7fdc42e 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -2193,8 +2193,8 @@ static void edp_panel_off(struct intel_dp *intel_dp)
I915_WRITE(pp_ctrl_reg, pp);
POSTING_READ(pp_ctrl_reg);
- intel_dp->panel_power_off_time = ktime_get_boottime();
wait_panel_off(intel_dp);
+ intel_dp->panel_power_off_time = ktime_get_boottime();
/* We got a reference when we enabled the VDD. */
power_domain = intel_display_port_aux_power_domain(intel_encoder);
@@ -3558,9 +3558,16 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp)
dev_priv->psr.psr2_support ? "supported" : "not supported");
}
- /* Read the eDP Display control capabilities registers */
- if ((intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
- drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV,
+ /*
+ * Read the eDP display control registers.
+ *
+ * Do this independent of DP_DPCD_DISPLAY_CONTROL_CAPABLE bit in
+ * DP_EDP_CONFIGURATION_CAP, because some buggy displays do not have it
+ * set, but require eDP 1.4+ detection (e.g. for supported link rates
+ * method). The display control registers should read zero if they're
+ * not supported anyway.
+ */
+ if (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV,
intel_dp->edp_dpcd, sizeof(intel_dp->edp_dpcd)) ==
sizeof(intel_dp->edp_dpcd))
DRM_DEBUG_KMS("EDP DPCD : %*ph\n", (int) sizeof(intel_dp->edp_dpcd),
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index a19ec06..3ce9ba3 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -457,7 +457,6 @@ struct intel_crtc_scaler_state {
struct intel_pipe_wm {
struct intel_wm_level wm[5];
- struct intel_wm_level raw_wm[5];
uint32_t linetime;
bool fbc_wm_enabled;
bool pipe_enabled;
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index a2655cd..8ab6f30 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -272,8 +272,30 @@ static int intel_overlay_on(struct intel_overlay *overlay)
return intel_overlay_do_wait_request(overlay, req, NULL);
}
+static void intel_overlay_flip_prepare(struct intel_overlay *overlay,
+ struct i915_vma *vma)
+{
+ enum pipe pipe = overlay->crtc->pipe;
+
+ WARN_ON(overlay->old_vma);
+
+ i915_gem_track_fb(overlay->vma ? overlay->vma->obj : NULL,
+ vma ? vma->obj : NULL,
+ INTEL_FRONTBUFFER_OVERLAY(pipe));
+
+ intel_frontbuffer_flip_prepare(overlay->i915,
+ INTEL_FRONTBUFFER_OVERLAY(pipe));
+
+ overlay->old_vma = overlay->vma;
+ if (vma)
+ overlay->vma = i915_vma_get(vma);
+ else
+ overlay->vma = NULL;
+}
+
/* overlay needs to be enabled in OCMD reg */
static int intel_overlay_continue(struct intel_overlay *overlay,
+ struct i915_vma *vma,
bool load_polyphase_filter)
{
struct drm_i915_private *dev_priv = overlay->i915;
@@ -308,27 +330,35 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
intel_ring_emit(ring, flip_addr);
intel_ring_advance(ring);
+ intel_overlay_flip_prepare(overlay, vma);
+
intel_overlay_submit_request(overlay, req, NULL);
return 0;
}
+static void intel_overlay_release_old_vma(struct intel_overlay *overlay)
+{
+ struct i915_vma *vma;
+
+ vma = fetch_and_zero(&overlay->old_vma);
+ if (WARN_ON(!vma))
+ return;
+
+ intel_frontbuffer_flip_complete(overlay->i915,
+ INTEL_FRONTBUFFER_OVERLAY(overlay->crtc->pipe));
+
+ i915_gem_object_unpin_from_display_plane(vma);
+ i915_vma_put(vma);
+}
+
static void intel_overlay_release_old_vid_tail(struct i915_gem_active *active,
struct drm_i915_gem_request *req)
{
struct intel_overlay *overlay =
container_of(active, typeof(*overlay), last_flip);
- struct i915_vma *vma;
- vma = fetch_and_zero(&overlay->old_vma);
- if (WARN_ON(!vma))
- return;
-
- i915_gem_track_fb(vma->obj, NULL,
- INTEL_FRONTBUFFER_OVERLAY(overlay->crtc->pipe));
-
- i915_gem_object_unpin_from_display_plane(vma);
- i915_vma_put(vma);
+ intel_overlay_release_old_vma(overlay);
}
static void intel_overlay_off_tail(struct i915_gem_active *active,
@@ -336,15 +366,8 @@ static void intel_overlay_off_tail(struct i915_gem_active *active,
{
struct intel_overlay *overlay =
container_of(active, typeof(*overlay), last_flip);
- struct i915_vma *vma;
- /* never have the overlay hw on without showing a frame */
- vma = fetch_and_zero(&overlay->vma);
- if (WARN_ON(!vma))
- return;
-
- i915_gem_object_unpin_from_display_plane(vma);
- i915_vma_put(vma);
+ intel_overlay_release_old_vma(overlay);
overlay->crtc->overlay = NULL;
overlay->crtc = NULL;
@@ -398,6 +421,8 @@ static int intel_overlay_off(struct intel_overlay *overlay)
}
intel_ring_advance(ring);
+ intel_overlay_flip_prepare(overlay, NULL);
+
return intel_overlay_do_wait_request(overlay, req,
intel_overlay_off_tail);
}
@@ -836,18 +861,10 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
intel_overlay_unmap_regs(overlay, regs);
- ret = intel_overlay_continue(overlay, scale_changed);
+ ret = intel_overlay_continue(overlay, vma, scale_changed);
if (ret)
goto out_unpin;
- i915_gem_track_fb(overlay->vma ? overlay->vma->obj : NULL,
- vma->obj, INTEL_FRONTBUFFER_OVERLAY(pipe));
-
- overlay->old_vma = overlay->vma;
- overlay->vma = vma;
-
- intel_frontbuffer_flip(dev_priv, INTEL_FRONTBUFFER_OVERLAY(pipe));
-
return 0;
out_unpin:
@@ -1215,6 +1232,7 @@ int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data,
mutex_unlock(&dev->struct_mutex);
drm_modeset_unlock_all(dev);
+ i915_gem_object_put(new_bo);
kfree(params);
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 49de476..277a802 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -27,6 +27,7 @@
#include <linux/cpufreq.h>
#include <drm/drm_plane_helper.h>
+#include <drm/drm_atomic_helper.h>
#include "i915_drv.h"
#include "intel_drv.h"
#include "../../../platform/x86/intel_ips.h"
@@ -2017,9 +2018,9 @@ static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv,
const struct intel_crtc *intel_crtc,
int level,
struct intel_crtc_state *cstate,
- struct intel_plane_state *pristate,
- struct intel_plane_state *sprstate,
- struct intel_plane_state *curstate,
+ const struct intel_plane_state *pristate,
+ const struct intel_plane_state *sprstate,
+ const struct intel_plane_state *curstate,
struct intel_wm_level *result)
{
uint16_t pri_latency = dev_priv->wm.pri_latency[level];
@@ -2341,28 +2342,24 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate)
struct intel_pipe_wm *pipe_wm;
struct drm_device *dev = state->dev;
const struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_plane *intel_plane;
- struct intel_plane_state *pristate = NULL;
- struct intel_plane_state *sprstate = NULL;
- struct intel_plane_state *curstate = NULL;
+ struct drm_plane *plane;
+ const struct drm_plane_state *plane_state;
+ const struct intel_plane_state *pristate = NULL;
+ const struct intel_plane_state *sprstate = NULL;
+ const struct intel_plane_state *curstate = NULL;
int level, max_level = ilk_wm_max_level(dev), usable_level;
struct ilk_wm_maximums max;
pipe_wm = &cstate->wm.ilk.optimal;
- for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
- struct intel_plane_state *ps;
+ drm_atomic_crtc_state_for_each_plane_state(plane, plane_state, &cstate->base) {
+ const struct intel_plane_state *ps = to_intel_plane_state(plane_state);
- ps = intel_atomic_get_existing_plane_state(state,
- intel_plane);
- if (!ps)
- continue;
-
- if (intel_plane->base.type == DRM_PLANE_TYPE_PRIMARY)
+ if (plane->type == DRM_PLANE_TYPE_PRIMARY)
pristate = ps;
- else if (intel_plane->base.type == DRM_PLANE_TYPE_OVERLAY)
+ else if (plane->type == DRM_PLANE_TYPE_OVERLAY)
sprstate = ps;
- else if (intel_plane->base.type == DRM_PLANE_TYPE_CURSOR)
+ else if (plane->type == DRM_PLANE_TYPE_CURSOR)
curstate = ps;
}
@@ -2384,11 +2381,9 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate)
if (pipe_wm->sprites_scaled)
usable_level = 0;
- ilk_compute_wm_level(dev_priv, intel_crtc, 0, cstate,
- pristate, sprstate, curstate, &pipe_wm->raw_wm[0]);
-
memset(&pipe_wm->wm, 0, sizeof(pipe_wm->wm));
- pipe_wm->wm[0] = pipe_wm->raw_wm[0];
+ ilk_compute_wm_level(dev_priv, intel_crtc, 0, cstate,
+ pristate, sprstate, curstate, &pipe_wm->wm[0]);
if (IS_HASWELL(dev) || IS_BROADWELL(dev))
pipe_wm->linetime = hsw_compute_linetime_wm(cstate);
@@ -2398,8 +2393,8 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate)
ilk_compute_wm_reg_maximums(dev, 1, &max);
- for (level = 1; level <= max_level; level++) {
- struct intel_wm_level *wm = &pipe_wm->raw_wm[level];
+ for (level = 1; level <= usable_level; level++) {
+ struct intel_wm_level *wm = &pipe_wm->wm[level];
ilk_compute_wm_level(dev_priv, intel_crtc, level, cstate,
pristate, sprstate, curstate, wm);
@@ -2409,13 +2404,10 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate)
* register maximums since such watermarks are
* always invalid.
*/
- if (level > usable_level)
- continue;
-
- if (ilk_validate_wm_level(level, &max, wm))
- pipe_wm->wm[level] = *wm;
- else
- usable_level = level;
+ if (!ilk_validate_wm_level(level, &max, wm)) {
+ memset(wm, 0, sizeof(*wm));
+ break;
+ }
}
return 0;
diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c
index 9b307ce..dff4784 100644
--- a/drivers/gpu/drm/i915/intel_psr.c
+++ b/drivers/gpu/drm/i915/intel_psr.c
@@ -387,6 +387,13 @@ static bool intel_psr_match_conditions(struct intel_dp *intel_dp)
return false;
}
+ /* PSR2 is restricted to work with panel resolutions upto 3200x2000 */
+ if (intel_crtc->config->pipe_src_w > 3200 ||
+ intel_crtc->config->pipe_src_h > 2000) {
+ dev_priv->psr.psr2_support = false;
+ return false;
+ }
+
dev_priv->psr.source_ok = true;
return true;
}
@@ -425,7 +432,6 @@ void intel_psr_enable(struct intel_dp *intel_dp)
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
if (!HAS_PSR(dev)) {
DRM_DEBUG_KMS("PSR not supported on this platform\n");
@@ -452,12 +458,7 @@ void intel_psr_enable(struct intel_dp *intel_dp)
hsw_psr_setup_vsc(intel_dp);
if (dev_priv->psr.psr2_support) {
- /* PSR2 is restricted to work with panel resolutions upto 3200x2000 */
- if (crtc->config->pipe_src_w > 3200 ||
- crtc->config->pipe_src_h > 2000)
- dev_priv->psr.psr2_support = false;
- else
- skl_psr_setup_su_vsc(intel_dp);
+ skl_psr_setup_su_vsc(intel_dp);
}
/*
diff --git a/drivers/gpu/drm/msm/dp/dp_audio.c b/drivers/gpu/drm/msm/dp/dp_audio.c
index c4a60dc..51cb6c5 100644
--- a/drivers/gpu/drm/msm/dp/dp_audio.c
+++ b/drivers/gpu/drm/msm/dp/dp_audio.c
@@ -23,13 +23,6 @@
#include "dp_audio.h"
#include "dp_panel.h"
-#define HEADER_BYTE_2_BIT 0
-#define PARITY_BYTE_2_BIT 8
-#define HEADER_BYTE_1_BIT 16
-#define PARITY_BYTE_1_BIT 24
-#define HEADER_BYTE_3_BIT 16
-#define PARITY_BYTE_3_BIT 24
-
struct dp_audio_private {
struct platform_device *ext_pdev;
struct platform_device *pdev;
@@ -44,75 +37,12 @@ struct dp_audio_private {
u32 channels;
struct completion hpd_comp;
+ struct workqueue_struct *notify_workqueue;
+ struct delayed_work notify_delayed_work;
struct dp_audio dp_audio;
};
-static u8 dp_audio_get_g0_value(u8 data)
-{
- u8 c[4];
- u8 g[4];
- u8 ret_data = 0;
- u8 i;
-
- for (i = 0; i < 4; i++)
- c[i] = (data >> i) & 0x01;
-
- g[0] = c[3];
- g[1] = c[0] ^ c[3];
- g[2] = c[1];
- g[3] = c[2];
-
- for (i = 0; i < 4; i++)
- ret_data = ((g[i] & 0x01) << i) | ret_data;
-
- return ret_data;
-}
-
-static u8 dp_audio_get_g1_value(u8 data)
-{
- u8 c[4];
- u8 g[4];
- u8 ret_data = 0;
- u8 i;
-
- for (i = 0; i < 4; i++)
- c[i] = (data >> i) & 0x01;
-
- g[0] = c[0] ^ c[3];
- g[1] = c[0] ^ c[1] ^ c[3];
- g[2] = c[1] ^ c[2];
- g[3] = c[2] ^ c[3];
-
- for (i = 0; i < 4; i++)
- ret_data = ((g[i] & 0x01) << i) | ret_data;
-
- return ret_data;
-}
-
-static u8 dp_audio_calculate_parity(u32 data)
-{
- u8 x0 = 0;
- u8 x1 = 0;
- u8 ci = 0;
- u8 iData = 0;
- u8 i = 0;
- u8 parity_byte;
- u8 num_byte = (data & 0xFF00) > 0 ? 8 : 2;
-
- for (i = 0; i < num_byte; i++) {
- iData = (data >> i*4) & 0xF;
-
- ci = iData ^ x1;
- x1 = x0 ^ dp_audio_get_g1_value(ci);
- x0 = dp_audio_get_g0_value(ci);
- }
-
- parity_byte = x1 | (x0 << 4);
-
- return parity_byte;
-}
-
static u32 dp_audio_get_header(struct dp_catalog_audio *catalog,
enum dp_catalog_audio_sdp_type sdp,
enum dp_catalog_audio_header_type header)
@@ -146,7 +76,7 @@ static void dp_audio_stream_sdp(struct dp_audio_private *audio)
DP_AUDIO_SDP_STREAM, DP_AUDIO_SDP_HEADER_1);
new_value = 0x02;
- parity_byte = dp_audio_calculate_parity(new_value);
+ parity_byte = dp_header_get_parity(new_value);
value |= ((new_value << HEADER_BYTE_1_BIT)
| (parity_byte << PARITY_BYTE_1_BIT));
pr_debug("Header Byte 1: value = 0x%x, parity_byte = 0x%x\n",
@@ -158,7 +88,7 @@ static void dp_audio_stream_sdp(struct dp_audio_private *audio)
value = dp_audio_get_header(catalog,
DP_AUDIO_SDP_STREAM, DP_AUDIO_SDP_HEADER_2);
new_value = value;
- parity_byte = dp_audio_calculate_parity(new_value);
+ parity_byte = dp_header_get_parity(new_value);
value |= ((new_value << HEADER_BYTE_2_BIT)
| (parity_byte << PARITY_BYTE_2_BIT));
pr_debug("Header Byte 2: value = 0x%x, parity_byte = 0x%x\n",
@@ -172,7 +102,7 @@ static void dp_audio_stream_sdp(struct dp_audio_private *audio)
DP_AUDIO_SDP_STREAM, DP_AUDIO_SDP_HEADER_3);
new_value = audio->channels - 1;
- parity_byte = dp_audio_calculate_parity(new_value);
+ parity_byte = dp_header_get_parity(new_value);
value |= ((new_value << HEADER_BYTE_3_BIT)
| (parity_byte << PARITY_BYTE_3_BIT));
pr_debug("Header Byte 3: value = 0x%x, parity_byte = 0x%x\n",
@@ -193,7 +123,7 @@ static void dp_audio_timestamp_sdp(struct dp_audio_private *audio)
DP_AUDIO_SDP_TIMESTAMP, DP_AUDIO_SDP_HEADER_1);
new_value = 0x1;
- parity_byte = dp_audio_calculate_parity(new_value);
+ parity_byte = dp_header_get_parity(new_value);
value |= ((new_value << HEADER_BYTE_1_BIT)
| (parity_byte << PARITY_BYTE_1_BIT));
pr_debug("Header Byte 1: value = 0x%x, parity_byte = 0x%x\n",
@@ -206,7 +136,7 @@ static void dp_audio_timestamp_sdp(struct dp_audio_private *audio)
DP_AUDIO_SDP_TIMESTAMP, DP_AUDIO_SDP_HEADER_2);
new_value = 0x17;
- parity_byte = dp_audio_calculate_parity(new_value);
+ parity_byte = dp_header_get_parity(new_value);
value |= ((new_value << HEADER_BYTE_2_BIT)
| (parity_byte << PARITY_BYTE_2_BIT));
pr_debug("Header Byte 2: value = 0x%x, parity_byte = 0x%x\n",
@@ -219,7 +149,7 @@ static void dp_audio_timestamp_sdp(struct dp_audio_private *audio)
DP_AUDIO_SDP_TIMESTAMP, DP_AUDIO_SDP_HEADER_3);
new_value = (0x0 | (0x11 << 2));
- parity_byte = dp_audio_calculate_parity(new_value);
+ parity_byte = dp_header_get_parity(new_value);
value |= ((new_value << HEADER_BYTE_3_BIT)
| (parity_byte << PARITY_BYTE_3_BIT));
pr_debug("Header Byte 3: value = 0x%x, parity_byte = 0x%x\n",
@@ -239,7 +169,7 @@ static void dp_audio_infoframe_sdp(struct dp_audio_private *audio)
DP_AUDIO_SDP_INFOFRAME, DP_AUDIO_SDP_HEADER_1);
new_value = 0x84;
- parity_byte = dp_audio_calculate_parity(new_value);
+ parity_byte = dp_header_get_parity(new_value);
value |= ((new_value << HEADER_BYTE_1_BIT)
| (parity_byte << PARITY_BYTE_1_BIT));
pr_debug("Header Byte 1: value = 0x%x, parity_byte = 0x%x\n",
@@ -252,7 +182,7 @@ static void dp_audio_infoframe_sdp(struct dp_audio_private *audio)
DP_AUDIO_SDP_INFOFRAME, DP_AUDIO_SDP_HEADER_2);
new_value = 0x1b;
- parity_byte = dp_audio_calculate_parity(new_value);
+ parity_byte = dp_header_get_parity(new_value);
value |= ((new_value << HEADER_BYTE_2_BIT)
| (parity_byte << PARITY_BYTE_2_BIT));
pr_debug("Header Byte 2: value = 0x%x, parity_byte = 0x%x\n",
@@ -265,7 +195,7 @@ static void dp_audio_infoframe_sdp(struct dp_audio_private *audio)
DP_AUDIO_SDP_INFOFRAME, DP_AUDIO_SDP_HEADER_3);
new_value = (0x0 | (0x11 << 2));
- parity_byte = dp_audio_calculate_parity(new_value);
+ parity_byte = dp_header_get_parity(new_value);
value |= ((new_value << HEADER_BYTE_3_BIT)
| (parity_byte << PARITY_BYTE_3_BIT));
pr_debug("Header Byte 3: value = 0x%x, parity_byte = 0x%x\n",
@@ -285,7 +215,7 @@ static void dp_audio_copy_management_sdp(struct dp_audio_private *audio)
DP_AUDIO_SDP_COPYMANAGEMENT, DP_AUDIO_SDP_HEADER_1);
new_value = 0x05;
- parity_byte = dp_audio_calculate_parity(new_value);
+ parity_byte = dp_header_get_parity(new_value);
value |= ((new_value << HEADER_BYTE_1_BIT)
| (parity_byte << PARITY_BYTE_1_BIT));
pr_debug("Header Byte 1: value = 0x%x, parity_byte = 0x%x\n",
@@ -298,7 +228,7 @@ static void dp_audio_copy_management_sdp(struct dp_audio_private *audio)
DP_AUDIO_SDP_COPYMANAGEMENT, DP_AUDIO_SDP_HEADER_2);
new_value = 0x0F;
- parity_byte = dp_audio_calculate_parity(new_value);
+ parity_byte = dp_header_get_parity(new_value);
value |= ((new_value << HEADER_BYTE_2_BIT)
| (parity_byte << PARITY_BYTE_2_BIT));
pr_debug("Header Byte 2: value = 0x%x, parity_byte = 0x%x\n",
@@ -311,7 +241,7 @@ static void dp_audio_copy_management_sdp(struct dp_audio_private *audio)
DP_AUDIO_SDP_COPYMANAGEMENT, DP_AUDIO_SDP_HEADER_3);
new_value = 0x0;
- parity_byte = dp_audio_calculate_parity(new_value);
+ parity_byte = dp_header_get_parity(new_value);
value |= ((new_value << HEADER_BYTE_3_BIT)
| (parity_byte << PARITY_BYTE_3_BIT));
pr_debug("Header Byte 3: value = 0x%x, parity_byte = 0x%x\n",
@@ -331,7 +261,7 @@ static void dp_audio_isrc_sdp(struct dp_audio_private *audio)
DP_AUDIO_SDP_ISRC, DP_AUDIO_SDP_HEADER_1);
new_value = 0x06;
- parity_byte = dp_audio_calculate_parity(new_value);
+ parity_byte = dp_header_get_parity(new_value);
value |= ((new_value << HEADER_BYTE_1_BIT)
| (parity_byte << PARITY_BYTE_1_BIT));
pr_debug("Header Byte 1: value = 0x%x, parity_byte = 0x%x\n",
@@ -344,7 +274,7 @@ static void dp_audio_isrc_sdp(struct dp_audio_private *audio)
DP_AUDIO_SDP_ISRC, DP_AUDIO_SDP_HEADER_2);
new_value = 0x0F;
- parity_byte = dp_audio_calculate_parity(new_value);
+ parity_byte = dp_header_get_parity(new_value);
value |= ((new_value << HEADER_BYTE_2_BIT)
| (parity_byte << PARITY_BYTE_2_BIT));
pr_debug("Header Byte 2: value = 0x%x, parity_byte = 0x%x\n",
@@ -428,7 +358,7 @@ static void dp_audio_enable(struct dp_audio_private *audio, bool enable)
audio->engine_on = enable;
}
-static struct dp_audio_private *get_audio_get_data(struct platform_device *pdev)
+static struct dp_audio_private *dp_audio_get_data(struct platform_device *pdev)
{
struct msm_ext_disp_data *ext_data;
struct dp_audio *dp_audio;
@@ -459,18 +389,22 @@ static int dp_audio_info_setup(struct platform_device *pdev,
int rc = 0;
struct dp_audio_private *audio;
- audio = get_audio_get_data(pdev);
+ audio = dp_audio_get_data(pdev);
if (IS_ERR(audio)) {
rc = PTR_ERR(audio);
goto end;
}
+ mutex_lock(&audio->dp_audio.ops_lock);
+
audio->channels = params->num_of_channels;
dp_audio_setup_sdp(audio);
dp_audio_setup_acr(audio);
dp_audio_safe_to_exit_level(audio);
dp_audio_enable(audio, true);
+
+ mutex_unlock(&audio->dp_audio.ops_lock);
end:
return rc;
}
@@ -482,7 +416,7 @@ static int dp_audio_get_edid_blk(struct platform_device *pdev,
struct dp_audio_private *audio;
struct sde_edid_ctrl *edid;
- audio = get_audio_get_data(pdev);
+ audio = dp_audio_get_data(pdev);
if (IS_ERR(audio)) {
rc = PTR_ERR(audio);
goto end;
@@ -510,18 +444,12 @@ static int dp_audio_get_cable_status(struct platform_device *pdev, u32 vote)
int rc = 0;
struct dp_audio_private *audio;
- audio = get_audio_get_data(pdev);
+ audio = dp_audio_get_data(pdev);
if (IS_ERR(audio)) {
rc = PTR_ERR(audio);
goto end;
}
- if (!audio->panel) {
- pr_err("invalid panel data\n");
- rc = -EINVAL;
- goto end;
- }
-
return audio->session_on;
end:
return rc;
@@ -532,7 +460,7 @@ static int dp_audio_get_intf_id(struct platform_device *pdev)
int rc = 0;
struct dp_audio_private *audio;
- audio = get_audio_get_data(pdev);
+ audio = dp_audio_get_data(pdev);
if (IS_ERR(audio)) {
rc = PTR_ERR(audio);
goto end;
@@ -547,16 +475,13 @@ static void dp_audio_teardown_done(struct platform_device *pdev)
{
struct dp_audio_private *audio;
- audio = get_audio_get_data(pdev);
+ audio = dp_audio_get_data(pdev);
if (IS_ERR(audio))
return;
- if (!audio->panel) {
- pr_err("invalid panel data\n");
- return;
- }
-
+ mutex_lock(&audio->dp_audio.ops_lock);
dp_audio_enable(audio, false);
+ mutex_unlock(&audio->dp_audio.ops_lock);
complete_all(&audio->hpd_comp);
@@ -568,7 +493,7 @@ static int dp_audio_ack_done(struct platform_device *pdev, u32 ack)
int rc = 0, ack_hpd;
struct dp_audio_private *audio;
- audio = get_audio_get_data(pdev);
+ audio = dp_audio_get_data(pdev);
if (IS_ERR(audio)) {
rc = PTR_ERR(audio);
goto end;
@@ -596,6 +521,24 @@ static int dp_audio_ack_done(struct platform_device *pdev, u32 ack)
return rc;
}
+static int dp_audio_codec_ready(struct platform_device *pdev)
+{
+ int rc = 0;
+ struct dp_audio_private *audio;
+
+ audio = dp_audio_get_data(pdev);
+ if (IS_ERR(audio)) {
+ pr_err("invalid input\n");
+ rc = PTR_ERR(audio);
+ goto end;
+ }
+
+ queue_delayed_work(audio->notify_workqueue,
+ &audio->notify_delayed_work, HZ/4);
+end:
+ return rc;
+}
+
static int dp_audio_init_ext_disp(struct dp_audio_private *audio)
{
int rc = 0;
@@ -617,6 +560,7 @@ static int dp_audio_init_ext_disp(struct dp_audio_private *audio)
ops->get_intf_id = dp_audio_get_intf_id;
ops->teardown_done = dp_audio_teardown_done;
ops->acknowledge = dp_audio_ack_done;
+ ops->ready = dp_audio_codec_ready;
if (!audio->pdev->dev.of_node) {
pr_err("cannot find audio dev.of_node\n");
@@ -648,6 +592,31 @@ static int dp_audio_init_ext_disp(struct dp_audio_private *audio)
return rc;
}
+static int dp_audio_notify(struct dp_audio_private *audio, u32 state)
+{
+ int rc = 0;
+ struct msm_ext_disp_init_data *ext = &audio->ext_audio_data;
+
+ rc = ext->intf_ops.audio_notify(audio->ext_pdev,
+ EXT_DISPLAY_TYPE_DP, state);
+ if (rc) {
+ pr_err("failed to notify audio. state=%d err=%d\n", state, rc);
+ goto end;
+ }
+
+ reinit_completion(&audio->hpd_comp);
+ rc = wait_for_completion_timeout(&audio->hpd_comp, HZ * 5);
+ if (!rc) {
+ pr_err("timeout. state=%d err=%d\n", state, rc);
+ rc = -ETIMEDOUT;
+ goto end;
+ }
+
+ pr_debug("success\n");
+end:
+ return rc;
+}
+
static int dp_audio_on(struct dp_audio *dp_audio)
{
int rc = 0;
@@ -656,11 +625,14 @@ static int dp_audio_on(struct dp_audio *dp_audio)
if (!dp_audio) {
pr_err("invalid input\n");
- rc = -EINVAL;
- goto end;
+ return -EINVAL;
}
audio = container_of(dp_audio, struct dp_audio_private, dp_audio);
+ if (IS_ERR(audio)) {
+ pr_err("invalid input\n");
+ return -EINVAL;
+ }
ext = &audio->ext_audio_data;
@@ -674,21 +646,9 @@ static int dp_audio_on(struct dp_audio *dp_audio)
goto end;
}
- rc = ext->intf_ops.audio_notify(audio->ext_pdev,
- EXT_DISPLAY_TYPE_DP,
- EXT_DISPLAY_CABLE_CONNECT);
- if (rc) {
- pr_err("failed to notify audio, err=%d\n", rc);
+ rc = dp_audio_notify(audio, EXT_DISPLAY_CABLE_CONNECT);
+ if (rc)
goto end;
- }
-
- reinit_completion(&audio->hpd_comp);
- rc = wait_for_completion_timeout(&audio->hpd_comp, HZ * 5);
- if (!rc) {
- pr_err("timeout\n");
- rc = -ETIMEDOUT;
- goto end;
- }
pr_debug("success\n");
end:
@@ -700,6 +660,7 @@ static int dp_audio_off(struct dp_audio *dp_audio)
int rc = 0;
struct dp_audio_private *audio;
struct msm_ext_disp_init_data *ext;
+ bool work_pending = false;
if (!dp_audio) {
pr_err("invalid input\n");
@@ -709,21 +670,13 @@ static int dp_audio_off(struct dp_audio *dp_audio)
audio = container_of(dp_audio, struct dp_audio_private, dp_audio);
ext = &audio->ext_audio_data;
- rc = ext->intf_ops.audio_notify(audio->ext_pdev,
- EXT_DISPLAY_TYPE_DP,
- EXT_DISPLAY_CABLE_DISCONNECT);
- if (rc) {
- pr_err("failed to notify audio, err=%d\n", rc);
- goto end;
- }
+ work_pending = cancel_delayed_work_sync(&audio->notify_delayed_work);
+ if (work_pending)
+ pr_debug("pending notification work completed\n");
- reinit_completion(&audio->hpd_comp);
- rc = wait_for_completion_timeout(&audio->hpd_comp, HZ * 5);
- if (!rc) {
- pr_err("timeout\n");
- rc = -ETIMEDOUT;
+ rc = dp_audio_notify(audio, EXT_DISPLAY_CABLE_DISCONNECT);
+ if (rc)
goto end;
- }
pr_debug("success\n");
end:
@@ -739,6 +692,35 @@ static int dp_audio_off(struct dp_audio *dp_audio)
return rc;
}
+static void dp_audio_notify_work_fn(struct work_struct *work)
+{
+ struct dp_audio_private *audio;
+ struct delayed_work *dw = to_delayed_work(work);
+
+ audio = container_of(dw, struct dp_audio_private, notify_delayed_work);
+
+ dp_audio_notify(audio, EXT_DISPLAY_CABLE_CONNECT);
+}
+
+static int dp_audio_create_notify_workqueue(struct dp_audio_private *audio)
+{
+ audio->notify_workqueue = create_workqueue("sdm_dp_audio_notify");
+ if (IS_ERR_OR_NULL(audio->notify_workqueue)) {
+ pr_err("Error creating notify_workqueue\n");
+ return -EPERM;
+ }
+
+ INIT_DELAYED_WORK(&audio->notify_delayed_work, dp_audio_notify_work_fn);
+
+ return 0;
+}
+
+static void dp_audio_destroy_notify_workqueue(struct dp_audio_private *audio)
+{
+ if (audio->notify_workqueue)
+ destroy_workqueue(audio->notify_workqueue);
+}
+
struct dp_audio *dp_audio_get(struct platform_device *pdev,
struct dp_panel *panel,
struct dp_catalog_audio *catalog)
@@ -759,6 +741,10 @@ struct dp_audio *dp_audio_get(struct platform_device *pdev,
goto error;
}
+ rc = dp_audio_create_notify_workqueue(audio);
+ if (rc)
+ goto error_notify_workqueue;
+
init_completion(&audio->hpd_comp);
audio->pdev = pdev;
@@ -767,18 +753,23 @@ struct dp_audio *dp_audio_get(struct platform_device *pdev,
dp_audio = &audio->dp_audio;
+ mutex_init(&dp_audio->ops_lock);
+
dp_audio->on = dp_audio_on;
dp_audio->off = dp_audio_off;
rc = dp_audio_init_ext_disp(audio);
if (rc) {
- devm_kfree(&pdev->dev, audio);
- goto error;
+ goto error_ext_disp;
}
catalog->init(catalog);
return dp_audio;
+error_ext_disp:
+ dp_audio_destroy_notify_workqueue(audio);
+error_notify_workqueue:
+ devm_kfree(&pdev->dev, audio);
error:
return ERR_PTR(rc);
}
@@ -791,6 +782,9 @@ void dp_audio_put(struct dp_audio *dp_audio)
return;
audio = container_of(dp_audio, struct dp_audio_private, dp_audio);
+ mutex_destroy(&dp_audio->ops_lock);
+
+ dp_audio_destroy_notify_workqueue(audio);
devm_kfree(&audio->pdev->dev, audio);
}
diff --git a/drivers/gpu/drm/msm/dp/dp_audio.h b/drivers/gpu/drm/msm/dp/dp_audio.h
index d6e6b74..807444b 100644
--- a/drivers/gpu/drm/msm/dp/dp_audio.h
+++ b/drivers/gpu/drm/msm/dp/dp_audio.h
@@ -29,6 +29,8 @@ struct dp_audio {
u32 lane_count;
u32 bw_code;
+ struct mutex ops_lock;
+
/**
* on()
*
diff --git a/drivers/gpu/drm/msm/dp/dp_aux.c b/drivers/gpu/drm/msm/dp/dp_aux.c
index acbaec4..2d76d13 100644
--- a/drivers/gpu/drm/msm/dp/dp_aux.c
+++ b/drivers/gpu/drm/msm/dp/dp_aux.c
@@ -42,6 +42,7 @@ struct dp_aux_private {
bool no_send_stop;
u32 offset;
u32 segment;
+ atomic_t aborted;
struct drm_dp_aux drm_aux;
};
@@ -279,6 +280,20 @@ static void dp_aux_reconfig(struct dp_aux *dp_aux)
aux->catalog->reset(aux->catalog);
}
+static void dp_aux_abort_transaction(struct dp_aux *dp_aux)
+{
+ struct dp_aux_private *aux;
+
+ if (!dp_aux) {
+ pr_err("invalid input\n");
+ return;
+ }
+
+ aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
+
+ atomic_set(&aux->aborted, 1);
+}
+
static void dp_aux_update_offset_and_segment(struct dp_aux_private *aux,
struct drm_dp_aux_msg *input_msg)
{
@@ -330,17 +345,19 @@ static void dp_aux_transfer_helper(struct dp_aux_private *aux,
aux->no_send_stop = true;
/*
- * Send the segment address for every i2c read in which the
- * middle-of-tranaction flag is set. This is required to support EDID
- * reads of more than 2 blocks as the segment address is reset to 0
+ * Send the segment address for i2c reads for segment > 0 and for which
+ * the middle-of-transaction flag is set. This is required to support
+ * EDID reads of more than 2 blocks as the segment address is reset to 0
* since we are overriding the middle-of-transaction flag for read
* transactions.
*/
- memset(&helper_msg, 0, sizeof(helper_msg));
- helper_msg.address = segment_address;
- helper_msg.buffer = &aux->segment;
- helper_msg.size = 1;
- dp_aux_cmd_fifo_tx(aux, &helper_msg);
+ if (aux->segment) {
+ memset(&helper_msg, 0, sizeof(helper_msg));
+ helper_msg.address = segment_address;
+ helper_msg.buffer = &aux->segment;
+ helper_msg.size = 1;
+ dp_aux_cmd_fifo_tx(aux, &helper_msg);
+ }
/*
* Send the offset address for every i2c read in which the
@@ -377,6 +394,11 @@ static ssize_t dp_aux_transfer(struct drm_dp_aux *drm_aux,
mutex_lock(&aux->mutex);
+ if (atomic_read(&aux->aborted)) {
+ ret = -ETIMEDOUT;
+ goto unlock_exit;
+ }
+
aux->native = msg->request & (DP_AUX_NATIVE_WRITE & DP_AUX_NATIVE_READ);
/* Ignore address only message */
@@ -411,7 +433,7 @@ static ssize_t dp_aux_transfer(struct drm_dp_aux *drm_aux,
}
ret = dp_aux_cmd_fifo_tx(aux, msg);
- if ((ret < 0) && aux->native) {
+ if ((ret < 0) && aux->native && !atomic_read(&aux->aborted)) {
aux->retry_cnt++;
if (!(aux->retry_cnt % retry_count))
aux->catalog->update_aux_cfg(aux->catalog,
@@ -467,6 +489,7 @@ static void dp_aux_init(struct dp_aux *dp_aux, struct dp_aux_cfg *aux_cfg)
aux->catalog->setup(aux->catalog, aux_cfg);
aux->catalog->reset(aux->catalog);
aux->catalog->enable(aux->catalog, true);
+ atomic_set(&aux->aborted, 0);
aux->retry_cnt = 0;
}
@@ -481,6 +504,7 @@ static void dp_aux_deinit(struct dp_aux *dp_aux)
aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
+ atomic_set(&aux->aborted, 1);
aux->catalog->enable(aux->catalog, false);
}
@@ -558,6 +582,7 @@ struct dp_aux *dp_aux_get(struct device *dev, struct dp_catalog_aux *catalog,
dp_aux->drm_aux_register = dp_aux_register;
dp_aux->drm_aux_deregister = dp_aux_deregister;
dp_aux->reconfig = dp_aux_reconfig;
+ dp_aux->abort = dp_aux_abort_transaction;
return dp_aux;
error:
diff --git a/drivers/gpu/drm/msm/dp/dp_aux.h b/drivers/gpu/drm/msm/dp/dp_aux.h
index 85761ce..e8cb1cc 100644
--- a/drivers/gpu/drm/msm/dp/dp_aux.h
+++ b/drivers/gpu/drm/msm/dp/dp_aux.h
@@ -36,6 +36,7 @@ struct dp_aux {
void (*init)(struct dp_aux *aux, struct dp_aux_cfg *aux_cfg);
void (*deinit)(struct dp_aux *aux);
void (*reconfig)(struct dp_aux *aux);
+ void (*abort)(struct dp_aux *aux);
};
struct dp_aux *dp_aux_get(struct device *dev, struct dp_catalog_aux *catalog,
diff --git a/drivers/gpu/drm/msm/dp/dp_catalog.c b/drivers/gpu/drm/msm/dp/dp_catalog.c
index b9b996a..c237a23 100644
--- a/drivers/gpu/drm/msm/dp/dp_catalog.c
+++ b/drivers/gpu/drm/msm/dp/dp_catalog.c
@@ -84,7 +84,7 @@ static u32 dp_catalog_aux_read_data(struct dp_catalog_aux *aux)
}
dp_catalog_get_priv(aux);
- base = catalog->io->ctrl_io.base;
+ base = catalog->io->dp_aux.base;
return dp_read(base + DP_AUX_DATA);
end:
@@ -104,7 +104,7 @@ static int dp_catalog_aux_write_data(struct dp_catalog_aux *aux)
}
dp_catalog_get_priv(aux);
- base = catalog->io->ctrl_io.base;
+ base = catalog->io->dp_aux.base;
dp_write(base + DP_AUX_DATA, aux->data);
end:
@@ -124,7 +124,7 @@ static int dp_catalog_aux_write_trans(struct dp_catalog_aux *aux)
}
dp_catalog_get_priv(aux);
- base = catalog->io->ctrl_io.base;
+ base = catalog->io->dp_aux.base;
dp_write(base + DP_AUX_TRANS_CTRL, aux->data);
end:
@@ -145,7 +145,7 @@ static int dp_catalog_aux_clear_trans(struct dp_catalog_aux *aux, bool read)
}
dp_catalog_get_priv(aux);
- base = catalog->io->ctrl_io.base;
+ base = catalog->io->dp_aux.base;
if (read) {
data = dp_read(base + DP_AUX_TRANS_CTRL);
@@ -195,7 +195,7 @@ static void dp_catalog_aux_reset(struct dp_catalog_aux *aux)
}
dp_catalog_get_priv(aux);
- base = catalog->io->ctrl_io.base;
+ base = catalog->io->dp_aux.base;
aux_ctrl = dp_read(base + DP_AUX_CTRL);
@@ -220,7 +220,7 @@ static void dp_catalog_aux_enable(struct dp_catalog_aux *aux, bool enable)
}
dp_catalog_get_priv(aux);
- base = catalog->io->ctrl_io.base;
+ base = catalog->io->dp_aux.base;
aux_ctrl = dp_read(base + DP_AUX_CTRL);
@@ -297,7 +297,7 @@ static void dp_catalog_aux_get_irq(struct dp_catalog_aux *aux, bool cmd_busy)
{
u32 ack;
struct dp_catalog_private *catalog;
- void __iomem *base;
+ void __iomem *ahb_base;
if (!aux) {
pr_err("invalid input\n");
@@ -305,14 +305,14 @@ static void dp_catalog_aux_get_irq(struct dp_catalog_aux *aux, bool cmd_busy)
}
dp_catalog_get_priv(aux);
- base = catalog->io->ctrl_io.base;
+ ahb_base = catalog->io->dp_ahb.base;
- aux->isr = dp_read(base + DP_INTR_STATUS);
+ aux->isr = dp_read(ahb_base + DP_INTR_STATUS);
aux->isr &= ~DP_INTR_MASK1;
ack = aux->isr & DP_INTERRUPT_STATUS1;
ack <<= 1;
ack |= DP_INTR_MASK1;
- dp_write(base + DP_INTR_STATUS, ack);
+ dp_write(ahb_base + DP_INTR_STATUS, ack);
}
/* controller related catalog functions */
@@ -327,149 +327,266 @@ static u32 dp_catalog_ctrl_read_hdcp_status(struct dp_catalog_ctrl *ctrl)
}
dp_catalog_get_priv(ctrl);
- base = catalog->io->ctrl_io.base;
+ base = catalog->io->dp_ahb.base;
return dp_read(base + DP_HDCP_STATUS);
}
-static void dp_catalog_ctrl_setup_infoframe_sdp(struct dp_catalog_ctrl *ctrl)
+static void dp_catalog_panel_setup_infoframe_sdp(struct dp_catalog_panel *panel)
{
struct dp_catalog_private *catalog;
+ struct drm_msm_ext_hdr_metadata *hdr;
void __iomem *base;
- u32 header, data;
+ u32 header, parity, data;
- if (!ctrl) {
+ if (!panel) {
pr_err("invalid input\n");
return;
}
- dp_catalog_get_priv(ctrl);
- base = catalog->io->ctrl_io.base;
+ dp_catalog_get_priv(panel);
+ hdr = &panel->hdr_data.hdr_meta;
+ base = catalog->io->dp_link.base;
- header = dp_read(base + MMSS_DP_VSCEXT_0);
- header |= ctrl->hdr_data.vsc_hdr_byte1;
- dp_write(base + MMSS_DP_VSCEXT_0, header);
+ /* HEADER BYTE 1 */
+ header = panel->hdr_data.vscext_header_byte1;
+ parity = dp_header_get_parity(header);
+ data = ((header << HEADER_BYTE_1_BIT)
+ | (parity << PARITY_BYTE_1_BIT));
+ dp_write(base + MMSS_DP_VSCEXT_0, data);
+ pr_debug("Header#1: 0x%x, parity = 0x%x\n", header, parity);
+ pr_debug("DP_VSCEXT_0: 0x%x\n", data);
- header = dp_read(base + MMSS_DP_VSCEXT_1);
- header |= ctrl->hdr_data.vsc_hdr_byte1;
- dp_write(base + MMSS_DP_VSCEXT_1, header);
+ /* HEADER BYTE 2 */
+ header = panel->hdr_data.vscext_header_byte2;
+ parity = dp_header_get_parity(header);
+ data = ((header << HEADER_BYTE_2_BIT)
+ | (parity << PARITY_BYTE_2_BIT));
+ dp_write(base + MMSS_DP_VSCEXT_1, data);
+ pr_debug("Header#2: 0x%x, parity = 0x%x\n", header, parity);
+ pr_debug("DP_VSCEXT_1: 0x%x\n", data);
- header = dp_read(base + MMSS_DP_VSCEXT_1);
- header |= ctrl->hdr_data.vsc_hdr_byte1;
- dp_write(base + MMSS_DP_VSCEXT_1, header);
+ /* HEADER BYTE 3 */
+ header = panel->hdr_data.vscext_header_byte3;
+ parity = dp_header_get_parity(header);
+ data = ((header << HEADER_BYTE_3_BIT)
+ | (parity << PARITY_BYTE_3_BIT));
+ data |= dp_read(base + MMSS_DP_VSCEXT_1);
+ dp_write(base + MMSS_DP_VSCEXT_1, data);
+ pr_debug("Header#3: 0x%x, parity = 0x%x\n", header, parity);
+ pr_debug("DP_VSCEXT_1: 0x%x\n", data);
- header = ctrl->hdr_data.version;
- header |= ctrl->hdr_data.length << 8;
- header |= ctrl->hdr_data.eotf << 16;
- header |= (ctrl->hdr_data.descriptor_id << 24);
- dp_write(base + MMSS_DP_VSCEXT_2, header);
+ data = panel->hdr_data.version;
+ data |= panel->hdr_data.length << 8;
+ data |= hdr->eotf << 16;
+ pr_debug("DP_VSCEXT_2: 0x%x\n", data);
+ dp_write(base + MMSS_DP_VSCEXT_2, data);
- data = (DP_GET_LSB(ctrl->hdr_data.display_primaries_x[0]) |
- (DP_GET_MSB(ctrl->hdr_data.display_primaries_x[0]) << 8) |
- (DP_GET_LSB(ctrl->hdr_data.display_primaries_y[0]) << 16) |
- (DP_GET_MSB(ctrl->hdr_data.display_primaries_y[0]) << 24));
+ data = (DP_GET_LSB(hdr->display_primaries_x[0]) |
+ (DP_GET_MSB(hdr->display_primaries_x[0]) << 8) |
+ (DP_GET_LSB(hdr->display_primaries_y[0]) << 16) |
+ (DP_GET_MSB(hdr->display_primaries_y[0]) << 24));
+ pr_debug("DP_VSCEXT_3: 0x%x\n", data);
dp_write(base + MMSS_DP_VSCEXT_3, data);
- data = (DP_GET_LSB(ctrl->hdr_data.display_primaries_x[1]) |
- (DP_GET_MSB(ctrl->hdr_data.display_primaries_x[1]) << 8) |
- (DP_GET_LSB(ctrl->hdr_data.display_primaries_y[1]) << 16) |
- (DP_GET_MSB(ctrl->hdr_data.display_primaries_y[1]) << 24));
+ data = (DP_GET_LSB(hdr->display_primaries_x[1]) |
+ (DP_GET_MSB(hdr->display_primaries_x[1]) << 8) |
+ (DP_GET_LSB(hdr->display_primaries_y[1]) << 16) |
+ (DP_GET_MSB(hdr->display_primaries_y[1]) << 24));
+ pr_debug("DP_VSCEXT_4: 0x%x\n", data);
dp_write(base + MMSS_DP_VSCEXT_4, data);
- data = (DP_GET_LSB(ctrl->hdr_data.display_primaries_x[2]) |
- (DP_GET_MSB(ctrl->hdr_data.display_primaries_x[2]) << 8) |
- (DP_GET_LSB(ctrl->hdr_data.display_primaries_y[2]) << 16) |
- (DP_GET_MSB(ctrl->hdr_data.display_primaries_y[2]) << 24));
+ data = (DP_GET_LSB(hdr->display_primaries_x[2]) |
+ (DP_GET_MSB(hdr->display_primaries_x[2]) << 8) |
+ (DP_GET_LSB(hdr->display_primaries_y[2]) << 16) |
+ (DP_GET_MSB(hdr->display_primaries_y[2]) << 24));
+ pr_debug("DP_VSCEXT_5: 0x%x\n", data);
dp_write(base + MMSS_DP_VSCEXT_5, data);
- data = (DP_GET_LSB(ctrl->hdr_data.white_point_x) |
- (DP_GET_MSB(ctrl->hdr_data.white_point_x) << 8) |
- (DP_GET_LSB(ctrl->hdr_data.white_point_y) << 16) |
- (DP_GET_MSB(ctrl->hdr_data.white_point_y) << 24));
+ data = (DP_GET_LSB(hdr->white_point_x) |
+ (DP_GET_MSB(hdr->white_point_x) << 8) |
+ (DP_GET_LSB(hdr->white_point_y) << 16) |
+ (DP_GET_MSB(hdr->white_point_y) << 24));
+ pr_debug("DP_VSCEXT_6: 0x%x\n", data);
dp_write(base + MMSS_DP_VSCEXT_6, data);
- data = (DP_GET_LSB(ctrl->hdr_data.max_luminance) |
- (DP_GET_MSB(ctrl->hdr_data.max_luminance) << 8) |
- (DP_GET_LSB(ctrl->hdr_data.min_luminance) << 16) |
- (DP_GET_MSB(ctrl->hdr_data.min_luminance) << 24));
+ data = (DP_GET_LSB(hdr->max_luminance) |
+ (DP_GET_MSB(hdr->max_luminance) << 8) |
+ (DP_GET_LSB(hdr->min_luminance) << 16) |
+ (DP_GET_MSB(hdr->min_luminance) << 24));
+ pr_debug("DP_VSCEXT_7: 0x%x\n", data);
dp_write(base + MMSS_DP_VSCEXT_7, data);
- data = (DP_GET_LSB(ctrl->hdr_data.max_content_light_level) |
- (DP_GET_MSB(ctrl->hdr_data.max_content_light_level) << 8) |
- (DP_GET_LSB(ctrl->hdr_data.max_average_light_level) << 16) |
- (DP_GET_MSB(ctrl->hdr_data.max_average_light_level) << 24));
+ data = (DP_GET_LSB(hdr->max_content_light_level) |
+ (DP_GET_MSB(hdr->max_content_light_level) << 8) |
+ (DP_GET_LSB(hdr->max_average_light_level) << 16) |
+ (DP_GET_MSB(hdr->max_average_light_level) << 24));
+ pr_debug("DP_VSCEXT_8: 0x%x\n", data);
dp_write(base + MMSS_DP_VSCEXT_8, data);
dp_write(base + MMSS_DP_VSCEXT_9, 0x00);
}
-static void dp_catalog_ctrl_setup_vsc_sdp(struct dp_catalog_ctrl *ctrl)
+static void dp_catalog_panel_setup_ext_sdp(struct dp_catalog_panel *panel)
{
struct dp_catalog_private *catalog;
void __iomem *base;
- u32 value;
+ u32 header, parity, data;
- if (!ctrl) {
+ if (!panel) {
pr_err("invalid input\n");
return;
}
- dp_catalog_get_priv(ctrl);
+ dp_catalog_get_priv(panel);
+ base = catalog->io->dp_link.base;
+
+ /* HEADER BYTE 1 */
+ header = panel->hdr_data.ext_header_byte1;
+ parity = dp_header_get_parity(header);
+ data = ((header << HEADER_BYTE_1_BIT)
+ | (parity << PARITY_BYTE_1_BIT));
+ dp_write(base + MMSS_DP_EXTENSION_0, data);
+ pr_debug("Header#1: 0x%x, parity = 0x%x\n", header, parity);
+ pr_debug("DP_EXTENSION_0: 0x%x\n", data);
+
+ /* HEADER BYTE 2 */
+ header = panel->hdr_data.ext_header_byte2;
+ parity = dp_header_get_parity(header);
+ data = ((header << HEADER_BYTE_2_BIT)
+ | (parity << PARITY_BYTE_2_BIT));
+ dp_write(base + MMSS_DP_EXTENSION_1, data);
+ pr_debug("Header#2: 0x%x, parity = 0x%x\n", header, parity);
+ pr_debug("DP_EXTENSION_1: 0x%x\n", data);
+
+ dp_write(base + MMSS_DP_EXTENSION_1, 0x5AA55AA5);
+ dp_write(base + MMSS_DP_EXTENSION_2, 0x5AA55AA5);
+ dp_write(base + MMSS_DP_EXTENSION_3, 0x5AA55AA5);
+ dp_write(base + MMSS_DP_EXTENSION_4, 0x5AA55AA5);
+ dp_write(base + MMSS_DP_EXTENSION_5, 0x5AA55AA5);
+ dp_write(base + MMSS_DP_EXTENSION_6, 0x5AA55AA5);
+ dp_write(base + MMSS_DP_EXTENSION_7, 0x5AA55AA5);
+ dp_write(base + MMSS_DP_EXTENSION_8, 0x5AA55AA5);
+ dp_write(base + MMSS_DP_EXTENSION_9, 0x5AA55AA5);
+}
+
+static void dp_catalog_panel_setup_vsc_sdp(struct dp_catalog_panel *panel)
+{
+ struct dp_catalog_private *catalog;
+ void __iomem *base;
+ u32 header, parity, data;
+ u8 bpc;
+
+ if (!panel) {
+ pr_err("invalid input\n");
+ return;
+ }
+
+ dp_catalog_get_priv(panel);
base = catalog->io->ctrl_io.base;
- value = dp_read(base + MMSS_DP_GENERIC0_0);
- value |= ctrl->hdr_data.vsc_hdr_byte1;
- dp_write(base + MMSS_DP_GENERIC0_0, value);
+ /* HEADER BYTE 1 */
+ header = panel->hdr_data.vsc_header_byte1;
+ parity = dp_header_get_parity(header);
+ data = ((header << HEADER_BYTE_1_BIT)
+ | (parity << PARITY_BYTE_1_BIT));
+ dp_write(base + MMSS_DP_GENERIC0_0, data);
+ pr_debug("Header#1: 0x%x, parity = 0x%x\n", header, parity);
+ pr_debug("DP_GENERIC0_0: 0x%x\n", data);
- value = dp_read(base + MMSS_DP_GENERIC0_1);
- value |= ctrl->hdr_data.vsc_hdr_byte2;
- dp_write(base + MMSS_DP_GENERIC0_1, value);
+ /* HEADER BYTE 2 */
+ header = panel->hdr_data.vsc_header_byte2;
+ parity = dp_header_get_parity(header);
+ data = ((header << HEADER_BYTE_2_BIT)
+ | (parity << PARITY_BYTE_2_BIT));
+ dp_write(base + MMSS_DP_GENERIC0_1, data);
+ pr_debug("Header#2: 0x%x, parity = 0x%x\n", header, parity);
+ pr_debug("DP_GENERIC0_1: 0x%x\n", data);
- value = dp_read(base + MMSS_DP_GENERIC0_1);
- value |= ctrl->hdr_data.vsc_hdr_byte3;
- dp_write(base + MMSS_DP_GENERIC0_1, value);
+ /* HEADER BYTE 3 */
+ header = panel->hdr_data.vsc_header_byte3;
+ parity = dp_header_get_parity(header);
+ data = ((header << HEADER_BYTE_3_BIT)
+ | (parity << PARITY_BYTE_3_BIT));
+ data |= dp_read(base + MMSS_DP_GENERIC0_1);
+ dp_write(base + MMSS_DP_GENERIC0_1, data);
+ pr_debug("Header#3: 0x%x, parity = 0x%x\n", header, parity);
+ pr_debug("DP_GENERIC0_1: 0x%x\n", data);
dp_write(base + MMSS_DP_GENERIC0_2, 0x00);
dp_write(base + MMSS_DP_GENERIC0_3, 0x00);
dp_write(base + MMSS_DP_GENERIC0_4, 0x00);
dp_write(base + MMSS_DP_GENERIC0_5, 0x00);
- dp_write(base + MMSS_DP_GENERIC0_6, ctrl->hdr_data.pkt_payload);
+
+ switch (panel->hdr_data.bpc) {
+ default:
+ case 10:
+ bpc = BIT(1);
+ break;
+ case 8:
+ bpc = BIT(0);
+ break;
+ case 6:
+ bpc = 0;
+ break;
+ }
+
+ data = (panel->hdr_data.colorimetry & 0xF) |
+ ((panel->hdr_data.pixel_encoding & 0xF) << 4) |
+ (bpc << 8) |
+ ((panel->hdr_data.dynamic_range & 0x1) << 15) |
+ ((panel->hdr_data.content_type & 0x7) << 16);
+
+ pr_debug("DP_GENERIC0_6: 0x%x\n", data);
+ dp_write(base + MMSS_DP_GENERIC0_6, data);
dp_write(base + MMSS_DP_GENERIC0_7, 0x00);
dp_write(base + MMSS_DP_GENERIC0_8, 0x00);
dp_write(base + MMSS_DP_GENERIC0_9, 0x00);
}
-static void dp_catalog_ctrl_config_hdr(struct dp_catalog_ctrl *ctrl)
+static void dp_catalog_panel_config_hdr(struct dp_catalog_panel *panel)
{
struct dp_catalog_private *catalog;
void __iomem *base;
u32 cfg, cfg2;
- if (!ctrl) {
+ if (!panel) {
pr_err("invalid input\n");
return;
}
- dp_catalog_get_priv(ctrl);
- base = catalog->io->ctrl_io.base;
+ dp_catalog_get_priv(panel);
+ base = catalog->io->dp_link.base;
cfg = dp_read(base + MMSS_DP_SDP_CFG);
+ /* EXTENSION_SDP_EN */
+ cfg |= BIT(4);
+
/* VSCEXT_SDP_EN */
cfg |= BIT(16);
/* GEN0_SDP_EN */
cfg |= BIT(17);
+ /* GEN1_SDP_EN */
+ cfg |= BIT(18);
dp_write(base + MMSS_DP_SDP_CFG, cfg);
cfg2 = dp_read(base + MMSS_DP_SDP_CFG2);
- /* Generic0 SDP Payload is 19 bytes which is > 16, so Bit16 is 1 */
- cfg2 |= BIT(16);
+ /* EXTN_SDPSIZE */
+ cfg2 |= BIT(15);
+
+ /* GENERIC0_SDPSIZE */
+ cfg |= BIT(16);
+
+ /* GENERIC1_SDPSIZE */
+ cfg |= BIT(17);
dp_write(base + MMSS_DP_SDP_CFG2, cfg2);
- dp_catalog_ctrl_setup_vsc_sdp(ctrl);
- dp_catalog_ctrl_setup_infoframe_sdp(ctrl);
+ dp_catalog_panel_setup_ext_sdp(panel);
+ dp_catalog_panel_setup_vsc_sdp(panel);
+ dp_catalog_panel_setup_infoframe_sdp(panel);
cfg = dp_read(base + DP_MISC1_MISC0);
/* Indicates presence of VSC */
@@ -477,27 +594,8 @@ static void dp_catalog_ctrl_config_hdr(struct dp_catalog_ctrl *ctrl)
dp_write(base + DP_MISC1_MISC0, cfg);
- cfg = dp_read(base + DP_CONFIGURATION_CTRL);
- /* Send VSC */
- cfg |= BIT(7);
-
- switch (ctrl->hdr_data.bpc) {
- default:
- case 10:
- cfg |= BIT(9);
- break;
- case 8:
- cfg |= BIT(8);
- break;
- }
-
- dp_write(base + DP_CONFIGURATION_CTRL, cfg);
-
- cfg = dp_read(base + DP_COMPRESSION_MODE_CTRL);
-
- /* Trigger SDP values in registers */
- cfg |= BIT(8);
- dp_write(base + DP_COMPRESSION_MODE_CTRL, cfg);
+ dp_write(base + MMSS_DP_SDP_CFG3, 0x01);
+ dp_write(base + MMSS_DP_SDP_CFG3, 0x00);
}
static void dp_catalog_ctrl_update_transfer_unit(struct dp_catalog_ctrl *ctrl)
@@ -511,7 +609,7 @@ static void dp_catalog_ctrl_update_transfer_unit(struct dp_catalog_ctrl *ctrl)
}
dp_catalog_get_priv(ctrl);
- base = catalog->io->ctrl_io.base;
+ base = catalog->io->dp_link.base;
dp_write(base + DP_VALID_BOUNDARY, ctrl->valid_boundary);
dp_write(base + DP_TU, ctrl->dp_tu);
@@ -529,7 +627,7 @@ static void dp_catalog_ctrl_state_ctrl(struct dp_catalog_ctrl *ctrl, u32 state)
}
dp_catalog_get_priv(ctrl);
- base = catalog->io->ctrl_io.base;
+ base = catalog->io->dp_link.base;
dp_write(base + DP_STATE_CTRL, state);
}
@@ -537,7 +635,7 @@ static void dp_catalog_ctrl_state_ctrl(struct dp_catalog_ctrl *ctrl, u32 state)
static void dp_catalog_ctrl_config_ctrl(struct dp_catalog_ctrl *ctrl, u32 cfg)
{
struct dp_catalog_private *catalog;
- void __iomem *base;
+ void __iomem *link_base;
if (!ctrl) {
pr_err("invalid input\n");
@@ -545,13 +643,11 @@ static void dp_catalog_ctrl_config_ctrl(struct dp_catalog_ctrl *ctrl, u32 cfg)
}
dp_catalog_get_priv(ctrl);
- base = catalog->io->ctrl_io.base;
+ link_base = catalog->io->dp_link.base;
pr_debug("DP_CONFIGURATION_CTRL=0x%x\n", cfg);
- dp_write(base + DP_CONFIGURATION_CTRL, cfg);
- dp_write(base + DP_MAINLINK_LEVELS, 0xa08);
- dp_write(base + MMSS_DP_ASYNC_FIFO_CONFIG, 0x1);
+ dp_write(link_base + DP_CONFIGURATION_CTRL, cfg);
}
static void dp_catalog_ctrl_lane_mapping(struct dp_catalog_ctrl *ctrl)
@@ -565,9 +661,9 @@ static void dp_catalog_ctrl_lane_mapping(struct dp_catalog_ctrl *ctrl)
}
dp_catalog_get_priv(ctrl);
- base = catalog->io->ctrl_io.base;
+ base = catalog->io->dp_link.base;
- dp_write(base + DP_LOGICAL2PHYSCIAL_LANE_MAPPING, 0xe4);
+ dp_write(base + DP_LOGICAL2PHYSICAL_LANE_MAPPING, 0xe4);
}
static void dp_catalog_ctrl_mainlink_ctrl(struct dp_catalog_ctrl *ctrl,
@@ -583,7 +679,7 @@ static void dp_catalog_ctrl_mainlink_ctrl(struct dp_catalog_ctrl *ctrl,
}
dp_catalog_get_priv(ctrl);
- base = catalog->io->ctrl_io.base;
+ base = catalog->io->dp_link.base;
if (enable) {
dp_write(base + DP_MAINLINK_CTRL, 0x02000000);
@@ -614,7 +710,7 @@ static void dp_catalog_ctrl_config_misc(struct dp_catalog_ctrl *ctrl,
}
dp_catalog_get_priv(ctrl);
- base = catalog->io->ctrl_io.base;
+ base = catalog->io->dp_link.base;
misc_val |= (tb << 5);
misc_val |= BIT(0); /* Configure clock to synchronous mode */
@@ -680,7 +776,7 @@ static void dp_catalog_ctrl_config_msa(struct dp_catalog_ctrl *ctrl,
nvid *= 3;
}
- base_ctrl = catalog->io->ctrl_io.base;
+ base_ctrl = catalog->io->dp_link.base;
pr_debug("mvid=0x%x, nvid=0x%x\n", mvid, nvid);
dp_write(base_ctrl + DP_SOFTWARE_MVID, mvid);
dp_write(base_ctrl + DP_SOFTWARE_NVID, nvid);
@@ -700,7 +796,7 @@ static void dp_catalog_ctrl_set_pattern(struct dp_catalog_ctrl *ctrl,
}
dp_catalog_get_priv(ctrl);
- base = catalog->io->ctrl_io.base;
+ base = catalog->io->dp_link.base;
bit = 1;
bit <<= (pattern - 1);
@@ -754,7 +850,57 @@ static void dp_catalog_ctrl_usb_reset(struct dp_catalog_ctrl *ctrl, bool flip)
dp_write(base + USB3_DP_COM_RESET_OVRD_CTRL, 0x00);
/* make sure phy is brought out of reset */
wmb();
+}
+static void dp_catalog_panel_tpg_cfg(struct dp_catalog_panel *panel,
+ bool enable)
+{
+ struct dp_catalog_private *catalog;
+ void __iomem *base;
+
+ if (!panel) {
+ pr_err("invalid input\n");
+ return;
+ }
+
+ dp_catalog_get_priv(panel);
+ base = catalog->io->dp_p0.base;
+
+ if (!enable) {
+ dp_write(base + MMSS_DP_TPG_MAIN_CONTROL, 0x0);
+ dp_write(base + MMSS_DP_BIST_ENABLE, 0x0);
+ dp_write(base + MMSS_DP_TIMING_ENGINE_EN, 0x0);
+ wmb(); /* ensure Timing generator is turned off */
+ return;
+ }
+
+ dp_write(base + MMSS_DP_INTF_CONFIG, 0x0);
+ dp_write(base + MMSS_DP_INTF_HSYNC_CTL, panel->hsync_ctl);
+ dp_write(base + MMSS_DP_INTF_VSYNC_PERIOD_F0, panel->vsync_period *
+ panel->hsync_period);
+ dp_write(base + MMSS_DP_INTF_VSYNC_PULSE_WIDTH_F0, panel->v_sync_width *
+ panel->hsync_period);
+ dp_write(base + MMSS_DP_INTF_VSYNC_PERIOD_F1, 0);
+ dp_write(base + MMSS_DP_INTF_VSYNC_PULSE_WIDTH_F1, 0);
+ dp_write(base + MMSS_DP_INTF_DISPLAY_HCTL, panel->display_hctl);
+ dp_write(base + MMSS_DP_INTF_ACTIVE_HCTL, 0);
+ dp_write(base + MMSS_INTF_DISPLAY_V_START_F0, panel->display_v_start);
+ dp_write(base + MMSS_DP_INTF_DISPLAY_V_END_F0, panel->display_v_end);
+ dp_write(base + MMSS_INTF_DISPLAY_V_START_F1, 0);
+ dp_write(base + MMSS_DP_INTF_DISPLAY_V_END_F1, 0);
+ dp_write(base + MMSS_DP_INTF_ACTIVE_V_START_F0, 0);
+ dp_write(base + MMSS_DP_INTF_ACTIVE_V_END_F0, 0);
+ dp_write(base + MMSS_DP_INTF_ACTIVE_V_START_F1, 0);
+ dp_write(base + MMSS_DP_INTF_ACTIVE_V_END_F1, 0);
+ dp_write(base + MMSS_DP_INTF_POLARITY_CTL, 0);
+ wmb(); /* ensure TPG registers are programmed */
+
+ dp_write(base + MMSS_DP_TPG_MAIN_CONTROL, 0x100);
+ dp_write(base + MMSS_DP_TPG_VIDEO_CONFIG, 0x5);
+ wmb(); /* ensure TPG config is programmed */
+ dp_write(base + MMSS_DP_BIST_ENABLE, 0x1);
+ dp_write(base + MMSS_DP_TIMING_ENGINE_EN, 0x1);
+ wmb(); /* ensure Timing generator is turned on */
}
static void dp_catalog_ctrl_reset(struct dp_catalog_ctrl *ctrl)
@@ -769,7 +915,7 @@ static void dp_catalog_ctrl_reset(struct dp_catalog_ctrl *ctrl)
}
dp_catalog_get_priv(ctrl);
- base = catalog->io->ctrl_io.base;
+ base = catalog->io->dp_ahb.base;
sw_reset = dp_read(base + DP_SW_RESET);
@@ -794,7 +940,7 @@ static bool dp_catalog_ctrl_mainlink_ready(struct dp_catalog_ctrl *ctrl)
}
dp_catalog_get_priv(ctrl);
- base = catalog->io->ctrl_io.base;
+ base = catalog->io->dp_link.base;
while (--cnt) {
/* DP_MAINLINK_READY */
@@ -821,7 +967,7 @@ static void dp_catalog_ctrl_enable_irq(struct dp_catalog_ctrl *ctrl,
}
dp_catalog_get_priv(ctrl);
- base = catalog->io->ctrl_io.base;
+ base = catalog->io->dp_ahb.base;
if (enable) {
dp_write(base + DP_INTR_STATUS, DP_INTR_MASK1);
@@ -843,7 +989,7 @@ static void dp_catalog_ctrl_hpd_config(struct dp_catalog_ctrl *ctrl, bool en)
}
dp_catalog_get_priv(ctrl);
- base = catalog->io->ctrl_io.base;
+ base = catalog->io->dp_aux.base;
if (en) {
u32 reftimer = dp_read(base + DP_DP_HPD_REFTIMER);
@@ -874,7 +1020,7 @@ static void dp_catalog_ctrl_get_interrupt(struct dp_catalog_ctrl *ctrl)
}
dp_catalog_get_priv(ctrl);
- base = catalog->io->ctrl_io.base;
+ base = catalog->io->dp_ahb.base;
ctrl->isr = dp_read(base + DP_INTR_STATUS2);
ctrl->isr &= ~DP_INTR_MASK2;
@@ -895,7 +1041,7 @@ static void dp_catalog_ctrl_phy_reset(struct dp_catalog_ctrl *ctrl)
}
dp_catalog_get_priv(ctrl);
- base = catalog->io->ctrl_io.base;
+ base = catalog->io->dp_ahb.base;
dp_write(base + DP_PHY_CTRL, 0x5); /* bit 0 & 2 */
usleep_range(1000, 1010); /* h/w recommended delay */
@@ -984,7 +1130,7 @@ static void dp_catalog_ctrl_send_phy_pattern(struct dp_catalog_ctrl *ctrl,
dp_catalog_get_priv(ctrl);
- base = catalog->io->ctrl_io.base;
+ base = catalog->io->dp_link.base;
dp_write(base + DP_STATE_CTRL, 0x0);
@@ -1012,7 +1158,7 @@ static void dp_catalog_ctrl_send_phy_pattern(struct dp_catalog_ctrl *ctrl,
/* 1111100000111110 */
dp_write(base + DP_TEST_80BIT_CUSTOM_PATTERN_REG2, 0x0000F83E);
break;
- case DP_TEST_PHY_PATTERN_HBR2_CTS_EYE_PATTERN:
+ case DP_TEST_PHY_PATTERN_CP2520_PATTERN_1:
value = BIT(16);
dp_write(base + DP_HBR2_COMPLIANCE_SCRAMBLER_RESET, value);
value |= 0xFC;
@@ -1020,6 +1166,10 @@ static void dp_catalog_ctrl_send_phy_pattern(struct dp_catalog_ctrl *ctrl,
dp_write(base + DP_MAINLINK_LEVELS, 0x2);
dp_write(base + DP_STATE_CTRL, 0x10);
break;
+ case DP_TEST_PHY_PATTERN_CP2520_PATTERN_3:
+ dp_write(base + DP_MAINLINK_CTRL, 0x11);
+ dp_write(base + DP_STATE_CTRL, 0x8);
+ break;
default:
pr_debug("No valid test pattern requested: 0x%x\n", pattern);
return;
@@ -1041,7 +1191,7 @@ static u32 dp_catalog_ctrl_read_phy_pattern(struct dp_catalog_ctrl *ctrl)
dp_catalog_get_priv(ctrl);
- base = catalog->io->ctrl_io.base;
+ base = catalog->io->dp_link.base;
return dp_read(base + DP_MAINLINK_READY);
}
@@ -1058,7 +1208,7 @@ static int dp_catalog_panel_timing_cfg(struct dp_catalog_panel *panel)
}
dp_catalog_get_priv(panel);
- base = catalog->io->ctrl_io.base;
+ base = catalog->io->dp_link.base;
dp_write(base + DP_TOTAL_HOR_VER, panel->total);
dp_write(base + DP_START_HOR_VER_FROM_SYNC, panel->sync_start);
@@ -1118,7 +1268,9 @@ static void dp_catalog_audio_config_sdp(struct dp_catalog_audio *audio)
return;
dp_catalog_get_priv(audio);
- base = catalog->io->ctrl_io.base;
+ base = catalog->io->dp_link.base;
+
+ sdp_cfg = dp_read(base + MMSS_DP_SDP_CFG);
/* AUDIO_TIMESTAMP_SDP_EN */
sdp_cfg |= BIT(1);
@@ -1157,7 +1309,7 @@ static void dp_catalog_audio_get_header(struct dp_catalog_audio *audio)
dp_catalog_get_priv(audio);
- base = catalog->io->ctrl_io.base;
+ base = catalog->io->dp_link.base;
sdp_map = catalog->audio_map;
sdp = audio->sdp_type;
header = audio->sdp_header;
@@ -1179,7 +1331,7 @@ static void dp_catalog_audio_set_header(struct dp_catalog_audio *audio)
dp_catalog_get_priv(audio);
- base = catalog->io->ctrl_io.base;
+ base = catalog->io->dp_link.base;
sdp_map = catalog->audio_map;
sdp = audio->sdp_type;
header = audio->sdp_header;
@@ -1197,7 +1349,7 @@ static void dp_catalog_audio_config_acr(struct dp_catalog_audio *audio)
dp_catalog_get_priv(audio);
select = audio->data;
- base = catalog->io->ctrl_io.base;
+ base = catalog->io->dp_link.base;
acr_ctrl = select << 4 | BIT(31) | BIT(8) | BIT(14);
@@ -1214,7 +1366,7 @@ static void dp_catalog_audio_safe_to_exit_level(struct dp_catalog_audio *audio)
dp_catalog_get_priv(audio);
- base = catalog->io->ctrl_io.base;
+ base = catalog->io->dp_link.base;
safe_to_exit_level = audio->data;
mainlink_levels = dp_read(base + DP_MAINLINK_LEVELS);
@@ -1236,7 +1388,7 @@ static void dp_catalog_audio_enable(struct dp_catalog_audio *audio)
dp_catalog_get_priv(audio);
- base = catalog->io->ctrl_io.base;
+ base = catalog->io->dp_link.base;
enable = !!audio->data;
audio_ctrl = dp_read(base + MMSS_DP_AUDIO_CFG);
@@ -1253,6 +1405,131 @@ static void dp_catalog_audio_enable(struct dp_catalog_audio *audio)
wmb();
}
+static void dp_catalog_config_spd_header(struct dp_catalog_panel *panel)
+{
+ struct dp_catalog_private *catalog;
+ void __iomem *base;
+ u32 value, new_value;
+ u8 parity_byte;
+
+ if (!panel)
+ return;
+
+ dp_catalog_get_priv(panel);
+ base = catalog->io->dp_link.base;
+
+ /* Config header and parity byte 1 */
+ value = dp_read(base + MMSS_DP_GENERIC1_0);
+
+ new_value = 0x83;
+ parity_byte = dp_header_get_parity(new_value);
+ value |= ((new_value << HEADER_BYTE_1_BIT)
+ | (parity_byte << PARITY_BYTE_1_BIT));
+ pr_debug("Header Byte 1: value = 0x%x, parity_byte = 0x%x\n",
+ value, parity_byte);
+ dp_write(base + MMSS_DP_GENERIC1_0, value);
+
+ /* Config header and parity byte 2 */
+ value = dp_read(base + MMSS_DP_GENERIC1_1);
+
+ new_value = 0x1b;
+ parity_byte = dp_header_get_parity(new_value);
+ value |= ((new_value << HEADER_BYTE_2_BIT)
+ | (parity_byte << PARITY_BYTE_2_BIT));
+ pr_debug("Header Byte 2: value = 0x%x, parity_byte = 0x%x\n",
+ value, parity_byte);
+ dp_write(base + MMSS_DP_GENERIC1_1, value);
+
+ /* Config header and parity byte 3 */
+ value = dp_read(base + MMSS_DP_GENERIC1_1);
+
+ new_value = (0x0 | (0x12 << 2));
+ parity_byte = dp_header_get_parity(new_value);
+ value |= ((new_value << HEADER_BYTE_3_BIT)
+ | (parity_byte << PARITY_BYTE_3_BIT));
+ pr_debug("Header Byte 3: value = 0x%x, parity_byte = 0x%x\n",
+ new_value, parity_byte);
+ dp_write(base + MMSS_DP_GENERIC1_1, value);
+}
+
+static void dp_catalog_panel_config_spd(struct dp_catalog_panel *panel)
+{
+ struct dp_catalog_private *catalog;
+ void __iomem *base;
+ u32 spd_cfg = 0, spd_cfg2 = 0;
+ u8 *vendor = NULL, *product = NULL;
+ /*
+ * Source Device Information
+ * 00h unknown
+ * 01h Digital STB
+ * 02h DVD
+ * 03h D-VHS
+ * 04h HDD Video
+ * 05h DVC
+ * 06h DSC
+ * 07h Video CD
+ * 08h Game
+ * 09h PC general
+ * 0ah Bluray-Disc
+ * 0bh Super Audio CD
+ * 0ch HD DVD
+ * 0dh PMP
+ * 0eh-ffh reserved
+ */
+ u32 device_type = 0;
+
+ if (!panel)
+ return;
+
+ dp_catalog_get_priv(panel);
+ base = catalog->io->dp_link.base;
+
+ dp_catalog_config_spd_header(panel);
+
+ vendor = panel->spd_vendor_name;
+ product = panel->spd_product_description;
+
+ dp_write(base + MMSS_DP_GENERIC1_2, ((vendor[0] & 0x7f) |
+ ((vendor[1] & 0x7f) << 8) |
+ ((vendor[2] & 0x7f) << 16) |
+ ((vendor[3] & 0x7f) << 24)));
+ dp_write(base + MMSS_DP_GENERIC1_3, ((vendor[4] & 0x7f) |
+ ((vendor[5] & 0x7f) << 8) |
+ ((vendor[6] & 0x7f) << 16) |
+ ((vendor[7] & 0x7f) << 24)));
+ dp_write(base + MMSS_DP_GENERIC1_4, ((product[0] & 0x7f) |
+ ((product[1] & 0x7f) << 8) |
+ ((product[2] & 0x7f) << 16) |
+ ((product[3] & 0x7f) << 24)));
+ dp_write(base + MMSS_DP_GENERIC1_5, ((product[4] & 0x7f) |
+ ((product[5] & 0x7f) << 8) |
+ ((product[6] & 0x7f) << 16) |
+ ((product[7] & 0x7f) << 24)));
+ dp_write(base + MMSS_DP_GENERIC1_6, ((product[8] & 0x7f) |
+ ((product[9] & 0x7f) << 8) |
+ ((product[10] & 0x7f) << 16) |
+ ((product[11] & 0x7f) << 24)));
+ dp_write(base + MMSS_DP_GENERIC1_7, ((product[12] & 0x7f) |
+ ((product[13] & 0x7f) << 8) |
+ ((product[14] & 0x7f) << 16) |
+ ((product[15] & 0x7f) << 24)));
+ dp_write(base + MMSS_DP_GENERIC1_8, device_type);
+ dp_write(base + MMSS_DP_GENERIC1_9, 0x00);
+
+ spd_cfg = dp_read(base + MMSS_DP_SDP_CFG);
+ /* GENERIC1_SDP for SPD Infoframe */
+ spd_cfg |= BIT(18);
+ dp_write(base + MMSS_DP_SDP_CFG, spd_cfg);
+
+ spd_cfg2 = dp_read(base + MMSS_DP_SDP_CFG2);
+ /* 28 data bytes for SPD Infoframe with GENERIC1 set */
+ spd_cfg2 |= BIT(17);
+ dp_write(base + MMSS_DP_SDP_CFG2, spd_cfg2);
+
+ dp_write(base + MMSS_DP_SDP_CFG3, 0x1);
+ dp_write(base + MMSS_DP_SDP_CFG3, 0x0);
+}
+
struct dp_catalog *dp_catalog_get(struct device *dev, struct dp_io *io)
{
int rc = 0;
@@ -1287,7 +1564,6 @@ struct dp_catalog *dp_catalog_get(struct device *dev, struct dp_io *io)
.phy_lane_cfg = dp_catalog_ctrl_phy_lane_cfg,
.update_vx_px = dp_catalog_ctrl_update_vx_px,
.get_interrupt = dp_catalog_ctrl_get_interrupt,
- .config_hdr = dp_catalog_ctrl_config_hdr,
.update_transfer_unit = dp_catalog_ctrl_update_transfer_unit,
.read_hdcp_status = dp_catalog_ctrl_read_hdcp_status,
.send_phy_pattern = dp_catalog_ctrl_send_phy_pattern,
@@ -1304,6 +1580,9 @@ struct dp_catalog *dp_catalog_get(struct device *dev, struct dp_io *io)
};
struct dp_catalog_panel panel = {
.timing_cfg = dp_catalog_panel_timing_cfg,
+ .config_hdr = dp_catalog_panel_config_hdr,
+ .tpg_config = dp_catalog_panel_tpg_cfg,
+ .config_spd = dp_catalog_panel_config_spd,
};
if (!io) {
diff --git a/drivers/gpu/drm/msm/dp/dp_catalog.h b/drivers/gpu/drm/msm/dp/dp_catalog.h
index aca2f18..b270545 100644
--- a/drivers/gpu/drm/msm/dp/dp_catalog.h
+++ b/drivers/gpu/drm/msm/dp/dp_catalog.h
@@ -15,6 +15,8 @@
#ifndef _DP_CATALOG_H_
#define _DP_CATALOG_H_
+#include <drm/msm_drm.h>
+
#include "dp_parser.h"
/* interrupts */
@@ -34,30 +36,32 @@
#define DP_INTR_FRAME_END BIT(6)
#define DP_INTR_CRC_UPDATED BIT(9)
-#define HDR_PRIMARIES_COUNT 3
-
struct dp_catalog_hdr_data {
- u32 vsc_hdr_byte0;
- u32 vsc_hdr_byte1;
- u32 vsc_hdr_byte2;
- u32 vsc_hdr_byte3;
- u32 pkt_payload;
+ u32 ext_header_byte0;
+ u32 ext_header_byte1;
+ u32 ext_header_byte2;
+ u32 ext_header_byte3;
+
+ u32 vsc_header_byte0;
+ u32 vsc_header_byte1;
+ u32 vsc_header_byte2;
+ u32 vsc_header_byte3;
+
+ u32 vscext_header_byte0;
+ u32 vscext_header_byte1;
+ u32 vscext_header_byte2;
+ u32 vscext_header_byte3;
u32 bpc;
u32 version;
u32 length;
- u32 eotf;
- u32 descriptor_id;
+ u32 pixel_encoding;
+ u32 colorimetry;
+ u32 dynamic_range;
+ u32 content_type;
- u32 display_primaries_x[HDR_PRIMARIES_COUNT];
- u32 display_primaries_y[HDR_PRIMARIES_COUNT];
- u32 white_point_x;
- u32 white_point_y;
- u32 max_luminance;
- u32 min_luminance;
- u32 max_content_light_level;
- u32 max_average_light_level;
+ struct drm_msm_ext_hdr_metadata hdr_meta;
};
struct dp_catalog_aux {
@@ -83,7 +87,6 @@ struct dp_catalog_ctrl {
u32 valid_boundary;
u32 valid_boundary2;
u32 isr;
- struct dp_catalog_hdr_data hdr_data;
void (*state_ctrl)(struct dp_catalog_ctrl *ctrl, u32 state);
void (*config_ctrl)(struct dp_catalog_ctrl *ctrl, u32 config);
@@ -104,7 +107,6 @@ struct dp_catalog_ctrl {
void (*update_vx_px)(struct dp_catalog_ctrl *ctrl, u8 v_level,
u8 p_level);
void (*get_interrupt)(struct dp_catalog_ctrl *ctrl);
- void (*config_hdr)(struct dp_catalog_ctrl *ctrl);
void (*update_transfer_unit)(struct dp_catalog_ctrl *ctrl);
u32 (*read_hdcp_status)(struct dp_catalog_ctrl *ctrl);
void (*send_phy_pattern)(struct dp_catalog_ctrl *ctrl,
@@ -112,6 +114,13 @@ struct dp_catalog_ctrl {
u32 (*read_phy_pattern)(struct dp_catalog_ctrl *ctrl);
};
+#define HEADER_BYTE_2_BIT 0
+#define PARITY_BYTE_2_BIT 8
+#define HEADER_BYTE_1_BIT 16
+#define PARITY_BYTE_1_BIT 24
+#define HEADER_BYTE_3_BIT 16
+#define PARITY_BYTE_3_BIT 24
+
enum dp_catalog_audio_sdp_type {
DP_AUDIO_SDP_STREAM,
DP_AUDIO_SDP_TIMESTAMP,
@@ -147,8 +156,24 @@ struct dp_catalog_panel {
u32 sync_start;
u32 width_blanking;
u32 dp_active;
+ u8 *spd_vendor_name;
+ u8 *spd_product_description;
+
+ struct dp_catalog_hdr_data hdr_data;
+
+ /* TPG */
+ u32 hsync_period;
+ u32 vsync_period;
+ u32 display_v_start;
+ u32 display_v_end;
+ u32 v_sync_width;
+ u32 hsync_ctl;
+ u32 display_hctl;
int (*timing_cfg)(struct dp_catalog_panel *panel);
+ void (*config_hdr)(struct dp_catalog_panel *panel);
+ void (*tpg_config)(struct dp_catalog_panel *panel, bool enable);
+ void (*config_spd)(struct dp_catalog_panel *panel);
};
struct dp_catalog {
@@ -158,6 +183,71 @@ struct dp_catalog {
struct dp_catalog_panel panel;
};
+static inline u8 dp_ecc_get_g0_value(u8 data)
+{
+ u8 c[4];
+ u8 g[4];
+ u8 ret_data = 0;
+ u8 i;
+
+ for (i = 0; i < 4; i++)
+ c[i] = (data >> i) & 0x01;
+
+ g[0] = c[3];
+ g[1] = c[0] ^ c[3];
+ g[2] = c[1];
+ g[3] = c[2];
+
+ for (i = 0; i < 4; i++)
+ ret_data = ((g[i] & 0x01) << i) | ret_data;
+
+ return ret_data;
+}
+
+static inline u8 dp_ecc_get_g1_value(u8 data)
+{
+ u8 c[4];
+ u8 g[4];
+ u8 ret_data = 0;
+ u8 i;
+
+ for (i = 0; i < 4; i++)
+ c[i] = (data >> i) & 0x01;
+
+ g[0] = c[0] ^ c[3];
+ g[1] = c[0] ^ c[1] ^ c[3];
+ g[2] = c[1] ^ c[2];
+ g[3] = c[2] ^ c[3];
+
+ for (i = 0; i < 4; i++)
+ ret_data = ((g[i] & 0x01) << i) | ret_data;
+
+ return ret_data;
+}
+
+static inline u8 dp_header_get_parity(u32 data)
+{
+ u8 x0 = 0;
+ u8 x1 = 0;
+ u8 ci = 0;
+ u8 iData = 0;
+ u8 i = 0;
+ u8 parity_byte;
+ u8 num_byte = (data & 0xFF00) > 0 ? 8 : 2;
+
+ for (i = 0; i < num_byte; i++) {
+ iData = (data >> i*4) & 0xF;
+
+ ci = iData ^ x1;
+ x1 = x0 ^ dp_ecc_get_g1_value(ci);
+ x0 = dp_ecc_get_g0_value(ci);
+ }
+
+ parity_byte = x1 | (x0 << 4);
+
+ return parity_byte;
+}
+
struct dp_catalog *dp_catalog_get(struct device *dev, struct dp_io *io);
void dp_catalog_put(struct dp_catalog *catalog);
diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.c b/drivers/gpu/drm/msm/dp/dp_ctrl.c
index 13ca6b2..576ed52 100644
--- a/drivers/gpu/drm/msm/dp/dp_ctrl.c
+++ b/drivers/gpu/drm/msm/dp/dp_ctrl.c
@@ -40,6 +40,7 @@
#define MR_LINK_SYMBOL_ERM 0x80
#define MR_LINK_PRBS7 0x100
#define MR_LINK_CUSTOM80 0x200
+#define MR_LINK_TRAINING4 0x40
struct dp_vc_tu_mapping_table {
u32 vic;
@@ -760,18 +761,18 @@ static int dp_ctrl_update_sink_vx_px(struct dp_ctrl_private *ctrl,
return drm_dp_dpcd_write(ctrl->aux->drm_aux, 0x103, buf, 4);
}
-static void dp_ctrl_update_vx_px(struct dp_ctrl_private *ctrl)
+static int dp_ctrl_update_vx_px(struct dp_ctrl_private *ctrl)
{
struct dp_link *link = ctrl->link;
ctrl->catalog->update_vx_px(ctrl->catalog,
link->phy_params.v_level, link->phy_params.p_level);
- dp_ctrl_update_sink_vx_px(ctrl, link->phy_params.v_level,
+ return dp_ctrl_update_sink_vx_px(ctrl, link->phy_params.v_level,
link->phy_params.p_level);
}
-static void dp_ctrl_train_pattern_set(struct dp_ctrl_private *ctrl,
+static int dp_ctrl_train_pattern_set(struct dp_ctrl_private *ctrl,
u8 pattern)
{
u8 buf[4];
@@ -779,7 +780,8 @@ static void dp_ctrl_train_pattern_set(struct dp_ctrl_private *ctrl,
pr_debug("sink: pattern=%x\n", pattern);
buf[0] = pattern;
- drm_dp_dpcd_write(ctrl->aux->drm_aux, DP_TRAINING_PATTERN_SET, buf, 1);
+ return drm_dp_dpcd_write(ctrl->aux->drm_aux,
+ DP_TRAINING_PATTERN_SET, buf, 1);
}
static int dp_ctrl_read_link_status(struct dp_ctrl_private *ctrl,
@@ -816,9 +818,18 @@ static int dp_ctrl_link_train_1(struct dp_ctrl_private *ctrl)
wmb();
ctrl->catalog->set_pattern(ctrl->catalog, 0x01);
- dp_ctrl_train_pattern_set(ctrl, DP_TRAINING_PATTERN_1 |
+ ret = dp_ctrl_train_pattern_set(ctrl, DP_TRAINING_PATTERN_1 |
DP_LINK_SCRAMBLING_DISABLE); /* train_1 */
- dp_ctrl_update_vx_px(ctrl);
+ if (ret <= 0) {
+ ret = -EINVAL;
+ return ret;
+ }
+
+ ret = dp_ctrl_update_vx_px(ctrl);
+ if (ret <= 0) {
+ ret = -EINVAL;
+ return ret;
+ }
tries = 0;
old_v_level = ctrl->link->phy_params.v_level;
@@ -855,7 +866,11 @@ static int dp_ctrl_link_train_1(struct dp_ctrl_private *ctrl)
pr_debug("clock recovery not done, adjusting vx px\n");
ctrl->link->adjust_levels(ctrl->link, link_status);
- dp_ctrl_update_vx_px(ctrl);
+ ret = dp_ctrl_update_vx_px(ctrl);
+ if (ret <= 0) {
+ ret = -EINVAL;
+ break;
+ }
}
return ret;
@@ -909,9 +924,18 @@ static int dp_ctrl_link_training_2(struct dp_ctrl_private *ctrl)
else
pattern = DP_TRAINING_PATTERN_2;
- dp_ctrl_update_vx_px(ctrl);
+ ret = dp_ctrl_update_vx_px(ctrl);
+ if (ret <= 0) {
+ ret = -EINVAL;
+ return ret;
+ }
ctrl->catalog->set_pattern(ctrl->catalog, pattern);
- dp_ctrl_train_pattern_set(ctrl, pattern | DP_RECOVERED_CLOCK_OUT_EN);
+ ret = dp_ctrl_train_pattern_set(ctrl,
+ pattern | DP_RECOVERED_CLOCK_OUT_EN);
+ if (ret <= 0) {
+ ret = -EINVAL;
+ return ret;
+ }
do {
drm_dp_link_train_channel_eq_delay(ctrl->panel->dpcd);
@@ -931,7 +955,11 @@ static int dp_ctrl_link_training_2(struct dp_ctrl_private *ctrl)
tries++;
ctrl->link->adjust_levels(ctrl->link, link_status);
- dp_ctrl_update_vx_px(ctrl);
+ ret = dp_ctrl_update_vx_px(ctrl);
+ if (ret <= 0) {
+ ret = -EINVAL;
+ break;
+ }
} while (1);
return ret;
@@ -953,9 +981,16 @@ static int dp_ctrl_link_train(struct dp_ctrl_private *ctrl)
ctrl->link->link_params.bw_code);
link_info.capabilities = ctrl->panel->link_info.capabilities;
- drm_dp_link_configure(ctrl->aux->drm_aux, &link_info);
- drm_dp_dpcd_write(ctrl->aux->drm_aux, DP_MAIN_LINK_CHANNEL_CODING_SET,
- &encoding, 1);
+ ret = drm_dp_link_configure(ctrl->aux->drm_aux, &link_info);
+ if (ret)
+ goto end;
+
+ ret = drm_dp_dpcd_write(ctrl->aux->drm_aux,
+ DP_MAIN_LINK_CHANNEL_CODING_SET, &encoding, 1);
+ if (ret <= 0) {
+ ret = -EINVAL;
+ goto end;
+ }
ret = dp_ctrl_link_train_1(ctrl);
if (ret) {
@@ -973,7 +1008,7 @@ static int dp_ctrl_link_train(struct dp_ctrl_private *ctrl)
}
/* print success info as this is a result of user initiated action */
- pr_debug("link training #2 successful\n");
+ pr_info("link training #2 successful\n");
end:
dp_ctrl_state_ctrl(ctrl, 0);
@@ -991,11 +1026,6 @@ static int dp_ctrl_setup_main_link(struct dp_ctrl_private *ctrl, bool train)
ctrl->catalog->mainlink_ctrl(ctrl->catalog, true);
- ret = ctrl->link->psm_config(ctrl->link,
- &ctrl->panel->link_info, false);
- if (ret)
- goto end;
-
if (ctrl->link->sink_request & DP_TEST_LINK_PHY_TEST_PATTERN)
goto end;
@@ -1072,7 +1102,8 @@ static int dp_ctrl_disable_mainlink_clocks(struct dp_ctrl_private *ctrl)
return ctrl->power->clk_enable(ctrl->power, DP_CTRL_PM, false);
}
-static int dp_ctrl_host_init(struct dp_ctrl *dp_ctrl, bool flip)
+static int dp_ctrl_host_init(struct dp_ctrl *dp_ctrl,
+ bool flip, bool multi_func)
{
struct dp_ctrl_private *ctrl;
struct dp_catalog_ctrl *catalog;
@@ -1087,8 +1118,10 @@ static int dp_ctrl_host_init(struct dp_ctrl *dp_ctrl, bool flip)
ctrl->orientation = flip;
catalog = ctrl->catalog;
- catalog->usb_reset(ctrl->catalog, flip);
- catalog->phy_reset(ctrl->catalog);
+ if (!multi_func) {
+ catalog->usb_reset(ctrl->catalog, flip);
+ catalog->phy_reset(ctrl->catalog);
+ }
catalog->enable_irq(ctrl->catalog, true);
return 0;
@@ -1214,9 +1247,6 @@ static void dp_ctrl_send_phy_test_pattern(struct dp_ctrl_private *ctrl)
u32 pattern_sent = 0x0;
u32 pattern_requested = ctrl->link->phy_params.phy_test_pattern_sel;
- pr_debug("request: %s\n",
- dp_link_get_phy_test_pattern(pattern_requested));
-
ctrl->catalog->update_vx_px(ctrl->catalog,
ctrl->link->phy_params.v_level,
ctrl->link->phy_params.p_level);
@@ -1224,6 +1254,9 @@ static void dp_ctrl_send_phy_test_pattern(struct dp_ctrl_private *ctrl)
ctrl->link->send_test_response(ctrl->link);
pattern_sent = ctrl->catalog->read_phy_pattern(ctrl->catalog);
+ pr_debug("pattern_request: %s. pattern_sent: 0x%x\n",
+ dp_link_get_phy_test_pattern(pattern_requested),
+ pattern_sent);
switch (pattern_sent) {
case MR_LINK_TRAINING1:
@@ -1235,7 +1268,7 @@ static void dp_ctrl_send_phy_test_pattern(struct dp_ctrl_private *ctrl)
if ((pattern_requested ==
DP_TEST_PHY_PATTERN_SYMBOL_ERR_MEASUREMENT_CNT)
|| (pattern_requested ==
- DP_TEST_PHY_PATTERN_HBR2_CTS_EYE_PATTERN))
+ DP_TEST_PHY_PATTERN_CP2520_PATTERN_1))
success = true;
break;
case MR_LINK_PRBS7:
@@ -1247,40 +1280,57 @@ static void dp_ctrl_send_phy_test_pattern(struct dp_ctrl_private *ctrl)
DP_TEST_PHY_PATTERN_80_BIT_CUSTOM_PATTERN)
success = true;
break;
+ case MR_LINK_TRAINING4:
+ if (pattern_requested ==
+ DP_TEST_PHY_PATTERN_CP2520_PATTERN_3)
+ success = true;
+ break;
default:
success = false;
- return;
+ break;
}
pr_debug("%s: %s\n", success ? "success" : "failed",
dp_link_get_phy_test_pattern(pattern_requested));
}
-static void dp_ctrl_handle_sink_request(struct dp_ctrl *dp_ctrl)
+static bool dp_ctrl_handle_sink_request(struct dp_ctrl *dp_ctrl)
{
struct dp_ctrl_private *ctrl;
u32 sink_request = 0x0;
+ bool req_handled = false;
if (!dp_ctrl) {
pr_err("invalid input\n");
- return;
+ goto end;
}
ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
sink_request = ctrl->link->sink_request;
if (sink_request & DP_TEST_LINK_PHY_TEST_PATTERN) {
- pr_info("PHY_TEST_PATTERN request\n");
+ pr_info("PHY_TEST_PATTERN\n");
dp_ctrl_process_phy_test_request(ctrl);
+
+ req_handled = true;
}
- if (sink_request & DP_LINK_STATUS_UPDATED)
+ if (sink_request & DP_LINK_STATUS_UPDATED) {
+ pr_info("DP_LINK_STATUS_UPDATED\n");
dp_ctrl_link_maintenance(ctrl);
+ req_handled = true;
+ }
+
if (sink_request & DP_TEST_LINK_TRAINING) {
+ pr_info("DP_TEST_LINK_TRAINING\n");
ctrl->link->send_test_response(ctrl->link);
dp_ctrl_link_maintenance(ctrl);
+
+ req_handled = true;
}
+end:
+ return req_handled;
}
static void dp_ctrl_reset(struct dp_ctrl *dp_ctrl)
@@ -1442,6 +1492,7 @@ struct dp_ctrl *dp_ctrl_get(struct dp_ctrl_in *in)
ctrl->aux = in->aux;
ctrl->link = in->link;
ctrl->catalog = in->catalog;
+ ctrl->dev = in->dev;
dp_ctrl = &ctrl->dp_ctrl;
diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.h b/drivers/gpu/drm/msm/dp/dp_ctrl.h
index d6d10ed..aaac0ab 100644
--- a/drivers/gpu/drm/msm/dp/dp_ctrl.h
+++ b/drivers/gpu/drm/msm/dp/dp_ctrl.h
@@ -23,7 +23,7 @@
#include "dp_catalog.h"
struct dp_ctrl {
- int (*init)(struct dp_ctrl *dp_ctrl, bool flip);
+ int (*init)(struct dp_ctrl *dp_ctrl, bool flip, bool multi_func);
void (*deinit)(struct dp_ctrl *dp_ctrl);
int (*on)(struct dp_ctrl *dp_ctrl);
void (*off)(struct dp_ctrl *dp_ctrl);
@@ -31,7 +31,7 @@ struct dp_ctrl {
void (*push_idle)(struct dp_ctrl *dp_ctrl);
void (*abort)(struct dp_ctrl *dp_ctrl);
void (*isr)(struct dp_ctrl *dp_ctrl);
- void (*handle_sink_request)(struct dp_ctrl *dp_ctrl);
+ bool (*handle_sink_request)(struct dp_ctrl *dp_ctrl);
};
struct dp_ctrl_in {
diff --git a/drivers/gpu/drm/msm/dp/dp_debug.c b/drivers/gpu/drm/msm/dp/dp_debug.c
index 92ac0ec..d00f159 100644
--- a/drivers/gpu/drm/msm/dp/dp_debug.c
+++ b/drivers/gpu/drm/msm/dp/dp_debug.c
@@ -29,6 +29,11 @@
struct dp_debug_private {
struct dentry *root;
+ u8 *edid;
+ u32 edid_size;
+
+ u8 *dpcd;
+ u32 dpcd_size;
struct dp_usbpd *usbpd;
struct dp_link *link;
@@ -39,6 +44,138 @@ struct dp_debug_private {
struct dp_debug dp_debug;
};
+static ssize_t dp_debug_write_edid(struct file *file,
+ const char __user *user_buff, size_t count, loff_t *ppos)
+{
+ struct dp_debug_private *debug = file->private_data;
+ u8 *buf = NULL, *buf_t = NULL, *edid = NULL;
+ const int char_to_nib = 2;
+ size_t edid_size = 0;
+ size_t size = 0, edid_buf_index = 0;
+ ssize_t rc = count;
+
+ if (!debug)
+ return -ENODEV;
+
+ if (*ppos)
+ goto bail;
+
+ size = min_t(size_t, count, SZ_1K);
+
+ buf = kzalloc(size, GFP_KERNEL);
+ if (!buf) {
+ rc = -ENOMEM;
+ goto bail;
+ }
+
+ if (copy_from_user(buf, user_buff, size))
+ goto bail;
+
+ edid_size = size / char_to_nib;
+ buf_t = buf;
+
+ memset(debug->edid, 0, debug->edid_size);
+
+ if (edid_size != debug->edid_size) {
+ pr_debug("clearing debug edid\n");
+ goto bail;
+ }
+
+ while (edid_size--) {
+ char t[3];
+ int d;
+
+ memcpy(t, buf_t, sizeof(char) * char_to_nib);
+ t[char_to_nib] = '\0';
+
+ if (kstrtoint(t, 16, &d)) {
+ pr_err("kstrtoint error\n");
+ goto bail;
+ }
+
+ if (edid_buf_index < debug->edid_size)
+ debug->edid[edid_buf_index++] = d;
+
+ buf_t += char_to_nib;
+ }
+
+ print_hex_dump(KERN_DEBUG, "DEBUG EDID: ", DUMP_PREFIX_NONE,
+ 16, 1, debug->edid, debug->edid_size, false);
+
+ edid = debug->edid;
+bail:
+ kfree(buf);
+ debug->panel->set_edid(debug->panel, edid);
+ return rc;
+}
+
+static ssize_t dp_debug_write_dpcd(struct file *file,
+ const char __user *user_buff, size_t count, loff_t *ppos)
+{
+ struct dp_debug_private *debug = file->private_data;
+ u8 *buf = NULL, *buf_t = NULL, *dpcd = NULL;
+ const int char_to_nib = 2;
+ size_t dpcd_size = 0;
+ size_t size = 0, dpcd_buf_index = 0;
+ ssize_t rc = count;
+
+ pr_debug("count=%zu\n", count);
+
+ if (!debug)
+ return -ENODEV;
+
+ if (*ppos)
+ goto bail;
+
+ size = min_t(size_t, count, SZ_32);
+
+ buf = kzalloc(size, GFP_KERNEL);
+ if (!buf) {
+ rc = -ENOMEM;
+ goto bail;
+ }
+
+ if (copy_from_user(buf, user_buff, size))
+ goto bail;
+
+ dpcd_size = size / char_to_nib;
+ buf_t = buf;
+
+ memset(debug->dpcd, 0, debug->dpcd_size);
+
+ if (dpcd_size != debug->dpcd_size) {
+ pr_debug("clearing debug dpcd\n");
+ goto bail;
+ }
+
+ while (dpcd_size--) {
+ char t[3];
+ int d;
+
+ memcpy(t, buf_t, sizeof(char) * char_to_nib);
+ t[char_to_nib] = '\0';
+
+ if (kstrtoint(t, 16, &d)) {
+ pr_err("kstrtoint error\n");
+ goto bail;
+ }
+
+ if (dpcd_buf_index < debug->dpcd_size)
+ debug->dpcd[dpcd_buf_index++] = d;
+
+ buf_t += char_to_nib;
+ }
+
+ print_hex_dump(KERN_DEBUG, "DEBUG DPCD: ", DUMP_PREFIX_NONE,
+ 8, 1, debug->dpcd, debug->dpcd_size, false);
+
+ dpcd = debug->dpcd;
+bail:
+ kfree(buf);
+ debug->panel->set_dpcd(debug->panel, dpcd);
+ return rc;
+}
+
static ssize_t dp_debug_write_hpd(struct file *file,
const char __user *user_buff, size_t count, loff_t *ppos)
{
@@ -63,9 +200,13 @@ static ssize_t dp_debug_write_hpd(struct file *file,
if (kstrtoint(buf, 10, &hpd) != 0)
goto end;
- debug->usbpd->connect(debug->usbpd, hpd);
+ hpd &= 0x3;
+
+ debug->dp_debug.psm_enabled = !!(hpd & BIT(1));
+
+ debug->usbpd->simulate_connect(debug->usbpd, !!(hpd & BIT(0)));
end:
- return -len;
+ return len;
}
static ssize_t dp_debug_write_edid_modes(struct file *file,
@@ -143,6 +284,44 @@ static ssize_t dp_debug_bw_code_write(struct file *file,
return len;
}
+static ssize_t dp_debug_tpg_write(struct file *file,
+ const char __user *user_buff, size_t count, loff_t *ppos)
+{
+ struct dp_debug_private *debug = file->private_data;
+ char buf[SZ_8];
+ size_t len = 0;
+ u32 tpg_state = 0;
+
+ if (!debug)
+ return -ENODEV;
+
+ if (*ppos)
+ return 0;
+
+ /* Leave room for termination char */
+ len = min_t(size_t, count, SZ_8 - 1);
+ if (copy_from_user(buf, user_buff, len))
+ goto bail;
+
+ buf[len] = '\0';
+
+ if (kstrtoint(buf, 10, &tpg_state) != 0)
+ goto bail;
+
+ tpg_state &= 0x1;
+ pr_debug("tpg_state: %d\n", tpg_state);
+
+ if (tpg_state == debug->dp_debug.tpg_state)
+ goto bail;
+
+ if (debug->panel)
+ debug->panel->tpg_config(debug->panel, tpg_state);
+
+ debug->dp_debug.tpg_state = tpg_state;
+bail:
+ return len;
+}
+
static ssize_t dp_debug_read_connected(struct file *file,
char __user *user_buff, size_t count, loff_t *ppos)
{
@@ -198,6 +377,7 @@ static ssize_t dp_debug_read_edid_modes(struct file *file,
goto error;
}
+ mutex_lock(&connector->dev->mode_config.mutex);
list_for_each_entry(mode, &connector->modes, head) {
len += snprintf(buf + len, SZ_4K - len,
"%s %d %d %d %d %d %d %d %d %d %d 0x%x\n",
@@ -206,6 +386,7 @@ static ssize_t dp_debug_read_edid_modes(struct file *file,
mode->htotal, mode->vdisplay, mode->vsync_start,
mode->vsync_end, mode->vtotal, mode->flags);
}
+ mutex_unlock(&connector->dev->mode_config.mutex);
if (copy_to_user(user_buff, buf, len)) {
kfree(buf);
@@ -415,6 +596,28 @@ static ssize_t dp_debug_bw_code_read(struct file *file,
return len;
}
+static ssize_t dp_debug_tpg_read(struct file *file,
+ char __user *user_buff, size_t count, loff_t *ppos)
+{
+ struct dp_debug_private *debug = file->private_data;
+ char buf[SZ_8];
+ u32 len = 0;
+
+ if (!debug)
+ return -ENODEV;
+
+ if (*ppos)
+ return 0;
+
+ len += snprintf(buf, SZ_8, "%d\n", debug->dp_debug.tpg_state);
+
+ if (copy_to_user(user_buff, buf, len))
+ return -EFAULT;
+
+ *ppos += len;
+ return len;
+}
+
static const struct file_operations dp_debug_fops = {
.open = simple_open,
.read = dp_debug_read_info,
@@ -431,6 +634,16 @@ static const struct file_operations hpd_fops = {
.write = dp_debug_write_hpd,
};
+static const struct file_operations edid_fops = {
+ .open = simple_open,
+ .write = dp_debug_write_edid,
+};
+
+static const struct file_operations dpcd_fops = {
+ .open = simple_open,
+ .write = dp_debug_write_dpcd,
+};
+
static const struct file_operations connected_fops = {
.open = simple_open,
.read = dp_debug_read_connected,
@@ -442,24 +655,32 @@ static const struct file_operations bw_code_fops = {
.write = dp_debug_bw_code_write,
};
+static const struct file_operations tpg_fops = {
+ .open = simple_open,
+ .read = dp_debug_tpg_read,
+ .write = dp_debug_tpg_write,
+};
+
static int dp_debug_init(struct dp_debug *dp_debug)
{
int rc = 0;
struct dp_debug_private *debug = container_of(dp_debug,
struct dp_debug_private, dp_debug);
- struct dentry *dir, *file, *edid_modes;
- struct dentry *hpd, *connected;
- struct dentry *max_bw_code;
- struct dentry *root = debug->root;
+ struct dentry *dir, *file;
dir = debugfs_create_dir(DEBUG_NAME, NULL);
if (IS_ERR_OR_NULL(dir)) {
- rc = PTR_ERR(dir);
+ if (!dir)
+ rc = -EINVAL;
+ else
+ rc = PTR_ERR(dir);
pr_err("[%s] debugfs create dir failed, rc = %d\n",
DEBUG_NAME, rc);
goto error;
}
+ debug->root = dir;
+
file = debugfs_create_file("dp_debug", 0444, dir,
debug, &dp_debug_fops);
if (IS_ERR_OR_NULL(file)) {
@@ -469,46 +690,75 @@ static int dp_debug_init(struct dp_debug *dp_debug)
goto error_remove_dir;
}
- edid_modes = debugfs_create_file("edid_modes", 0644, dir,
+ file = debugfs_create_file("edid_modes", 0644, dir,
debug, &edid_modes_fops);
- if (IS_ERR_OR_NULL(edid_modes)) {
- rc = PTR_ERR(edid_modes);
+ if (IS_ERR_OR_NULL(file)) {
+ rc = PTR_ERR(file);
pr_err("[%s] debugfs create edid_modes failed, rc=%d\n",
DEBUG_NAME, rc);
goto error_remove_dir;
}
- hpd = debugfs_create_file("hpd", 0644, dir,
+ file = debugfs_create_file("hpd", 0644, dir,
debug, &hpd_fops);
- if (IS_ERR_OR_NULL(hpd)) {
- rc = PTR_ERR(hpd);
+ if (IS_ERR_OR_NULL(file)) {
+ rc = PTR_ERR(file);
pr_err("[%s] debugfs hpd failed, rc=%d\n",
DEBUG_NAME, rc);
goto error_remove_dir;
}
- connected = debugfs_create_file("connected", 0444, dir,
+ file = debugfs_create_file("connected", 0444, dir,
debug, &connected_fops);
- if (IS_ERR_OR_NULL(connected)) {
- rc = PTR_ERR(connected);
+ if (IS_ERR_OR_NULL(file)) {
+ rc = PTR_ERR(file);
pr_err("[%s] debugfs connected failed, rc=%d\n",
DEBUG_NAME, rc);
goto error_remove_dir;
}
- max_bw_code = debugfs_create_file("max_bw_code", 0644, dir,
+ file = debugfs_create_file("max_bw_code", 0644, dir,
debug, &bw_code_fops);
- if (IS_ERR_OR_NULL(max_bw_code)) {
- rc = PTR_ERR(max_bw_code);
+ if (IS_ERR_OR_NULL(file)) {
+ rc = PTR_ERR(file);
pr_err("[%s] debugfs max_bw_code failed, rc=%d\n",
DEBUG_NAME, rc);
goto error_remove_dir;
}
- root = dir;
- return rc;
+ file = debugfs_create_file("edid", 0644, dir,
+ debug, &edid_fops);
+ if (IS_ERR_OR_NULL(file)) {
+ rc = PTR_ERR(file);
+ pr_err("[%s] debugfs edid failed, rc=%d\n",
+ DEBUG_NAME, rc);
+ goto error_remove_dir;
+ }
+
+ file = debugfs_create_file("dpcd", 0644, dir,
+ debug, &dpcd_fops);
+ if (IS_ERR_OR_NULL(file)) {
+ rc = PTR_ERR(file);
+ pr_err("[%s] debugfs dpcd failed, rc=%d\n",
+ DEBUG_NAME, rc);
+ goto error_remove_dir;
+ }
+
+ file = debugfs_create_file("tpg_ctrl", 0644, dir,
+ debug, &tpg_fops);
+ if (IS_ERR_OR_NULL(file)) {
+ rc = PTR_ERR(file);
+ pr_err("[%s] debugfs tpg failed, rc=%d\n",
+ DEBUG_NAME, rc);
+ goto error_remove_dir;
+ }
+
+ return 0;
+
error_remove_dir:
- debugfs_remove(dir);
+ if (!file)
+ rc = -EINVAL;
+ debugfs_remove_recursive(dir);
error:
return rc;
}
@@ -533,6 +783,24 @@ struct dp_debug *dp_debug_get(struct device *dev, struct dp_panel *panel,
goto error;
}
+ debug->edid = devm_kzalloc(dev, SZ_256, GFP_KERNEL);
+ if (!debug->edid) {
+ rc = -ENOMEM;
+ kfree(debug);
+ goto error;
+ }
+
+ debug->edid_size = SZ_256;
+
+ debug->dpcd = devm_kzalloc(dev, SZ_16, GFP_KERNEL);
+ if (!debug->dpcd) {
+ rc = -ENOMEM;
+ kfree(debug);
+ goto error;
+ }
+
+ debug->dpcd_size = SZ_16;
+
debug->dp_debug.debug_en = false;
debug->usbpd = usbpd;
debug->link = link;
@@ -565,7 +833,7 @@ static int dp_debug_deinit(struct dp_debug *dp_debug)
debug = container_of(dp_debug, struct dp_debug_private, dp_debug);
- debugfs_remove(debug->root);
+ debugfs_remove_recursive(debug->root);
return 0;
}
@@ -581,5 +849,7 @@ void dp_debug_put(struct dp_debug *dp_debug)
dp_debug_deinit(dp_debug);
+ devm_kfree(debug->dev, debug->edid);
+ devm_kfree(debug->dev, debug->dpcd);
devm_kfree(debug->dev, debug);
}
diff --git a/drivers/gpu/drm/msm/dp/dp_debug.h b/drivers/gpu/drm/msm/dp/dp_debug.h
index d5a9301..3b2d23e 100644
--- a/drivers/gpu/drm/msm/dp/dp_debug.h
+++ b/drivers/gpu/drm/msm/dp/dp_debug.h
@@ -25,13 +25,16 @@
* @vdisplay: used to filter out vdisplay value
* @hdisplay: used to filter out hdisplay value
* @vrefresh: used to filter out vrefresh value
+ * @tpg_state: specifies whether tpg feature is enabled
*/
struct dp_debug {
bool debug_en;
+ bool psm_enabled;
int aspect_ratio;
int vdisplay;
int hdisplay;
int vrefresh;
+ bool tpg_state;
};
/**
diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c
index 7fbc63a..01a2a9c 100644
--- a/drivers/gpu/drm/msm/dp/dp_display.c
+++ b/drivers/gpu/drm/msm/dp/dp_display.c
@@ -61,7 +61,6 @@ struct dp_display_private {
/* state variables */
bool core_initialized;
bool power_on;
- bool hpd_irq_on;
bool audio_supported;
struct platform_device *pdev;
@@ -85,8 +84,10 @@ struct dp_display_private {
struct dp_display_mode mode;
struct dp_display dp_display;
- struct workqueue_struct *hdcp_workqueue;
+ struct workqueue_struct *wq;
struct delayed_work hdcp_cb_work;
+ struct work_struct connect_work;
+ struct work_struct attention_work;
struct mutex hdcp_mutex;
struct mutex session_lock;
int hdcp_status;
@@ -191,26 +192,13 @@ static void dp_display_notify_hdcp_status_cb(void *ptr,
dp->hdcp_status = status;
if (dp->dp_display.is_connected)
- queue_delayed_work(dp->hdcp_workqueue, &dp->hdcp_cb_work, HZ/4);
-}
-
-static int dp_display_create_hdcp_workqueue(struct dp_display_private *dp)
-{
- dp->hdcp_workqueue = create_workqueue("sdm_dp_hdcp");
- if (IS_ERR_OR_NULL(dp->hdcp_workqueue)) {
- pr_err("Error creating hdcp_workqueue\n");
- return -EPERM;
- }
-
- INIT_DELAYED_WORK(&dp->hdcp_cb_work, dp_display_hdcp_cb_work);
-
- return 0;
+ queue_delayed_work(dp->wq, &dp->hdcp_cb_work, HZ/4);
}
static void dp_display_destroy_hdcp_workqueue(struct dp_display_private *dp)
{
- if (dp->hdcp_workqueue)
- destroy_workqueue(dp->hdcp_workqueue);
+ if (dp->wq)
+ destroy_workqueue(dp->wq);
}
static void dp_display_update_hdcp_info(struct dp_display_private *dp)
@@ -277,7 +265,6 @@ static void dp_display_deinitialize_hdcp(struct dp_display_private *dp)
static int dp_display_initialize_hdcp(struct dp_display_private *dp)
{
struct sde_hdcp_init_data hdcp_init_data;
- struct resource *res;
int rc = 0;
if (!dp) {
@@ -287,29 +274,18 @@ static int dp_display_initialize_hdcp(struct dp_display_private *dp)
mutex_init(&dp->hdcp_mutex);
- rc = dp_display_create_hdcp_workqueue(dp);
- if (rc) {
- pr_err("Failed to create HDCP workqueue\n");
- goto error;
- }
-
- res = platform_get_resource_byname(dp->pdev,
- IORESOURCE_MEM, "dp_ctrl");
- if (!res) {
- pr_err("Error getting dp ctrl resource\n");
- rc = -EINVAL;
- goto error;
- }
-
- hdcp_init_data.phy_addr = res->start;
hdcp_init_data.client_id = HDCP_CLIENT_DP;
hdcp_init_data.drm_aux = dp->aux->drm_aux;
hdcp_init_data.cb_data = (void *)dp;
- hdcp_init_data.workq = dp->hdcp_workqueue;
+ hdcp_init_data.workq = dp->wq;
hdcp_init_data.mutex = &dp->hdcp_mutex;
hdcp_init_data.sec_access = true;
hdcp_init_data.notify_status = dp_display_notify_hdcp_status_cb;
hdcp_init_data.core_io = &dp->parser->io.ctrl_io;
+ hdcp_init_data.dp_ahb = &dp->parser->io.dp_ahb;
+ hdcp_init_data.dp_aux = &dp->parser->io.dp_aux;
+ hdcp_init_data.dp_link = &dp->parser->io.dp_link;
+ hdcp_init_data.dp_p0 = &dp->parser->io.dp_p0;
hdcp_init_data.qfprom_io = &dp->parser->io.qfprom_io;
hdcp_init_data.hdcp_io = &dp->parser->io.hdcp_io;
hdcp_init_data.revision = &dp->panel->link_info.revision;
@@ -363,24 +339,12 @@ static int dp_display_bind(struct device *dev, struct device *master,
dp->dp_display.drm_dev = drm;
priv = drm->dev_private;
- rc = dp->parser->parse(dp->parser);
- if (rc) {
- pr_err("device tree parsing failed\n");
- goto end;
- }
-
rc = dp->aux->drm_aux_register(dp->aux);
if (rc) {
pr_err("DRM DP AUX register failed\n");
goto end;
}
- rc = dp->panel->sde_edid_register(dp->panel);
- if (rc) {
- pr_err("DRM DP EDID register failed\n");
- goto end;
- }
-
rc = dp->power->power_client_init(dp->power, &priv->phandle);
if (rc) {
pr_err("Power client create failed\n");
@@ -414,7 +378,6 @@ static void dp_display_unbind(struct device *dev, struct device *master,
}
(void)dp->power->power_client_deinit(dp->power);
- (void)dp->panel->sde_edid_deregister(dp->panel);
(void)dp->aux->drm_aux_deregister(dp->aux);
dp_display_deinitialize_hdcp(dp);
}
@@ -485,16 +448,6 @@ static void dp_display_send_hpd_event(struct dp_display *dp_display)
static int dp_display_send_hpd_notification(struct dp_display_private *dp,
bool hpd)
{
- if ((hpd && dp->dp_display.is_connected) ||
- (!hpd && !dp->dp_display.is_connected)) {
- pr_info("HPD already %s\n", (hpd ? "on" : "off"));
- return 0;
- }
-
- /* reset video pattern flag on disconnect */
- if (!hpd)
- dp->panel->video_test = false;
-
dp->dp_display.is_connected = hpd;
reinit_completion(&dp->notification_comp);
dp_display_send_hpd_event(&dp->dp_display);
@@ -503,6 +456,8 @@ static int dp_display_send_hpd_notification(struct dp_display_private *dp,
pr_warn("%s timeout\n", hpd ? "connect" : "disconnect");
/* cancel any pending request */
dp->ctrl->abort(dp->ctrl);
+ dp->aux->abort(dp->aux);
+
return -EINVAL;
}
@@ -512,17 +467,24 @@ static int dp_display_send_hpd_notification(struct dp_display_private *dp,
static int dp_display_process_hpd_high(struct dp_display_private *dp)
{
int rc = 0;
- u32 max_pclk_from_edid = 0;
struct edid *edid;
dp->aux->init(dp->aux, dp->parser->aux_cfg);
- if (dp->link->psm_enabled)
- goto notify;
+ if (dp->debug->psm_enabled) {
+ dp->link->psm_config(dp->link, &dp->panel->link_info, false);
+ dp->debug->psm_enabled = false;
+ }
rc = dp->panel->read_sink_caps(dp->panel, dp->dp_display.connector);
- if (rc)
- goto notify;
+ if (rc) {
+ if (rc == -ETIMEDOUT) {
+ pr_err("Sink cap read failed, skip notification\n");
+ goto end;
+ } else {
+ goto notify;
+ }
+ }
dp->link->process_request(dp->link);
@@ -538,11 +500,7 @@ static int dp_display_process_hpd_high(struct dp_display_private *dp)
dp->panel->handle_sink_request(dp->panel);
- max_pclk_from_edid = dp->panel->get_max_pclk(dp->panel);
-
- dp->dp_display.max_pclk_khz = min(max_pclk_from_edid,
- dp->parser->max_pclk_khz);
-
+ dp->dp_display.max_pclk_khz = dp->parser->max_pclk_khz;
notify:
dp_display_send_hpd_notification(dp, true);
@@ -563,7 +521,7 @@ static void dp_display_host_init(struct dp_display_private *dp)
flip = true;
dp->power->init(dp->power, flip);
- dp->ctrl->init(dp->ctrl, flip);
+ dp->ctrl->init(dp->ctrl, flip, dp->usbpd->multi_func);
enable_irq(dp->irq);
dp->core_initialized = true;
}
@@ -581,22 +539,28 @@ static void dp_display_host_deinit(struct dp_display_private *dp)
dp->core_initialized = false;
}
-static void dp_display_process_hpd_low(struct dp_display_private *dp)
+static int dp_display_process_hpd_low(struct dp_display_private *dp)
{
- /* cancel any pending request */
- dp->ctrl->abort(dp->ctrl);
+ int rc = 0;
- if (dp_display_is_hdcp_enabled(dp) && dp->hdcp.ops->off) {
- cancel_delayed_work_sync(&dp->hdcp_cb_work);
- dp->hdcp.ops->off(dp->hdcp.data);
+ if (!dp->dp_display.is_connected) {
+ pr_debug("HPD already off\n");
+ return 0;
}
+ if (dp_display_is_hdcp_enabled(dp) && dp->hdcp.ops->off)
+ dp->hdcp.ops->off(dp->hdcp.data);
+
if (dp->audio_supported)
dp->audio->off(dp->audio);
- dp_display_send_hpd_notification(dp, false);
+ rc = dp_display_send_hpd_notification(dp, false);
dp->aux->deinit(dp->aux);
+
+ dp->panel->video_test = false;
+
+ return rc;
}
static int dp_display_usbpd_configure_cb(struct device *dev)
@@ -620,7 +584,7 @@ static int dp_display_usbpd_configure_cb(struct device *dev)
dp_display_host_init(dp);
if (dp->usbpd->hpd_high)
- dp_display_process_hpd_high(dp);
+ queue_work(dp->wq, &dp->connect_work);
end:
return rc;
}
@@ -640,6 +604,24 @@ static void dp_display_clean(struct dp_display_private *dp)
dp->power_on = false;
}
+static int dp_display_handle_disconnect(struct dp_display_private *dp)
+{
+ int rc;
+
+ rc = dp_display_process_hpd_low(dp);
+
+ mutex_lock(&dp->session_lock);
+ if (rc && dp->power_on)
+ dp_display_clean(dp);
+
+ if (!dp->usbpd->alt_mode_cfg_done)
+ dp_display_host_deinit(dp);
+
+ mutex_unlock(&dp->session_lock);
+
+ return rc;
+}
+
static int dp_display_usbpd_disconnect_cb(struct device *dev)
{
int rc = 0;
@@ -658,64 +640,70 @@ static int dp_display_usbpd_disconnect_cb(struct device *dev)
goto end;
}
+ if (dp->debug->psm_enabled)
+ dp->link->psm_config(dp->link, &dp->panel->link_info, true);
+
/* cancel any pending request */
dp->ctrl->abort(dp->ctrl);
+ dp->aux->abort(dp->aux);
- if (dp->audio_supported)
- dp->audio->off(dp->audio);
+ /* wait for idle state */
+ flush_workqueue(dp->wq);
- rc = dp_display_send_hpd_notification(dp, false);
-
- mutex_lock(&dp->session_lock);
-
- /* if cable is disconnected, reset psm_enabled flag */
- if (!dp->usbpd->alt_mode_cfg_done)
- dp->link->psm_enabled = false;
-
- if ((rc < 0) && dp->power_on)
- dp_display_clean(dp);
-
- dp_display_host_deinit(dp);
-
- mutex_unlock(&dp->session_lock);
+ dp_display_handle_disconnect(dp);
end:
return rc;
}
-static void dp_display_handle_video_request(struct dp_display_private *dp)
+static void dp_display_attention_work(struct work_struct *work)
{
- if (dp->link->sink_request & DP_TEST_LINK_VIDEO_PATTERN) {
- /* force disconnect followed by connect */
- dp->usbpd->connect(dp->usbpd, false);
- dp->panel->video_test = true;
- dp->usbpd->connect(dp->usbpd, true);
- dp->link->send_test_response(dp->link);
- }
-}
+ bool req_handled;
+ struct dp_display_private *dp = container_of(work,
+ struct dp_display_private, attention_work);
-static int dp_display_handle_hpd_irq(struct dp_display_private *dp)
-{
+ if (dp_display_is_hdcp_enabled(dp) && dp->hdcp.ops->cp_irq) {
+ if (!dp->hdcp.ops->cp_irq(dp->hdcp.data))
+ return;
+ }
+
if (dp->link->sink_request & DS_PORT_STATUS_CHANGED) {
- dp_display_send_hpd_notification(dp, false);
+ dp_display_handle_disconnect(dp);
if (dp_display_is_sink_count_zero(dp)) {
pr_debug("sink count is zero, nothing to do\n");
- return 0;
+ return;
}
- return dp_display_process_hpd_high(dp);
+ queue_work(dp->wq, &dp->connect_work);
+ return;
}
- dp->ctrl->handle_sink_request(dp->ctrl);
+ if (dp->link->sink_request & DP_TEST_LINK_VIDEO_PATTERN) {
+ dp_display_handle_disconnect(dp);
- dp_display_handle_video_request(dp);
+ dp->panel->video_test = true;
+ dp_display_send_hpd_notification(dp, true);
+ dp->link->send_test_response(dp->link);
- return 0;
+ return;
+ }
+
+ mutex_lock(&dp->audio->ops_lock);
+ req_handled = dp->ctrl->handle_sink_request(dp->ctrl);
+ mutex_unlock(&dp->audio->ops_lock);
+
+ /*
+ * reconfigure audio if test was executed
+ * which could have changed the contoller's state
+ */
+ if (req_handled && dp->audio_supported) {
+ dp->audio->off(dp->audio);
+ dp->audio->on(dp->audio);
+ }
}
static int dp_display_usbpd_attention_cb(struct device *dev)
{
- int rc = 0;
struct dp_display_private *dp;
if (!dev) {
@@ -729,32 +717,36 @@ static int dp_display_usbpd_attention_cb(struct device *dev)
return -ENODEV;
}
- if (dp->usbpd->hpd_irq) {
- dp->hpd_irq_on = true;
+ if (dp->usbpd->hpd_irq && dp->usbpd->hpd_high) {
+ dp->link->process_request(dp->link);
+ queue_work(dp->wq, &dp->attention_work);
+ } else if (dp->usbpd->hpd_high) {
+ queue_work(dp->wq, &dp->connect_work);
+ } else {
+ /* cancel any pending request */
+ dp->ctrl->abort(dp->ctrl);
+ dp->aux->abort(dp->aux);
- if (dp_display_is_hdcp_enabled(dp) && dp->hdcp.ops->cp_irq) {
- if (!dp->hdcp.ops->cp_irq(dp->hdcp.data))
- goto end;
- }
+ /* wait for idle state */
+ flush_workqueue(dp->wq);
- rc = dp->link->process_request(dp->link);
- /* check for any test request issued by sink */
- if (!rc)
- dp_display_handle_hpd_irq(dp);
-
- dp->hpd_irq_on = false;
- goto end;
+ dp_display_handle_disconnect(dp);
}
- if (!dp->usbpd->hpd_high) {
- dp_display_process_hpd_low(dp);
- goto end;
+ return 0;
+}
+
+static void dp_display_connect_work(struct work_struct *work)
+{
+ struct dp_display_private *dp = container_of(work,
+ struct dp_display_private, connect_work);
+
+ if (dp->dp_display.is_connected) {
+ pr_debug("HPD already on\n");
+ return;
}
- if (dp->usbpd->alt_mode_cfg_done)
- dp_display_process_hpd_high(dp);
-end:
- return rc;
+ dp_display_process_hpd_high(dp);
}
static void dp_display_deinit_sub_modules(struct dp_display_private *dp)
@@ -806,6 +798,12 @@ static int dp_init_sub_modules(struct dp_display_private *dp)
goto error_parser;
}
+ rc = dp->parser->parse(dp->parser);
+ if (rc) {
+ pr_err("device tree parsing failed\n");
+ goto error_catalog;
+ }
+
dp->catalog = dp_catalog_get(dev, &dp->parser->io);
if (IS_ERR(dp->catalog)) {
rc = PTR_ERR(dp->catalog);
@@ -909,6 +907,7 @@ static int dp_init_sub_modules(struct dp_display_private *dp)
static int dp_display_set_mode(struct dp_display *dp_display,
struct dp_display_mode *mode)
{
+ const u32 num_components = 3, default_bpp = 24;
struct dp_display_private *dp;
if (!dp_display) {
@@ -918,8 +917,16 @@ static int dp_display_set_mode(struct dp_display *dp_display,
dp = container_of(dp_display, struct dp_display_private, dp_display);
mutex_lock(&dp->session_lock);
+ mode->timing.bpp =
+ dp_display->connector->display_info.bpc * num_components;
+ if (!mode->timing.bpp)
+ mode->timing.bpp = default_bpp;
+
+ mode->timing.bpp = dp->panel->get_mode_bpp(dp->panel,
+ mode->timing.bpp, mode->timing.pixel_clk_khz);
+
dp->panel->pinfo = mode->timing;
- dp->panel->init_info(dp->panel);
+ dp->panel->init(dp->panel);
mutex_unlock(&dp->session_lock);
return 0;
@@ -949,7 +956,13 @@ static int dp_display_enable(struct dp_display *dp_display)
goto end;
}
+ dp->aux->init(dp->aux, dp->parser->aux_cfg);
+
rc = dp->ctrl->on(dp->ctrl);
+
+ if (dp->debug->tpg_state)
+ dp->panel->tpg_config(dp->panel, true);
+
if (!rc)
dp->power_on = true;
end:
@@ -975,25 +988,27 @@ static int dp_display_post_enable(struct dp_display *dp_display)
goto end;
}
+ dp->panel->spd_config(dp->panel);
+
if (dp->audio_supported) {
dp->audio->bw_code = dp->link->link_params.bw_code;
dp->audio->lane_count = dp->link->link_params.lane_count;
dp->audio->on(dp->audio);
}
- complete_all(&dp->notification_comp);
-
dp_display_update_hdcp_info(dp);
if (dp_display_is_hdcp_enabled(dp)) {
cancel_delayed_work_sync(&dp->hdcp_cb_work);
dp->hdcp_status = HDCP_STATE_AUTHENTICATING;
- queue_delayed_work(dp->hdcp_workqueue,
- &dp->hdcp_cb_work, HZ / 2);
+ queue_delayed_work(dp->wq, &dp->hdcp_cb_work, HZ / 2);
}
-
end:
+ /* clear framework event notifier */
+ dp_display->send_hpd_event = NULL;
+
+ complete_all(&dp->notification_comp);
mutex_unlock(&dp->session_lock);
return 0;
}
@@ -1024,12 +1039,7 @@ static int dp_display_pre_disable(struct dp_display *dp_display)
dp->hdcp.ops->off(dp->hdcp.data);
}
- if (dp->usbpd->alt_mode_cfg_done && (dp->usbpd->hpd_high ||
- dp->usbpd->forced_disconnect))
- dp->link->psm_config(dp->link, &dp->panel->link_info, true);
-
dp->ctrl->push_idle(dp->ctrl);
-
end:
mutex_unlock(&dp->session_lock);
return 0;
@@ -1054,6 +1064,7 @@ static int dp_display_disable(struct dp_display *dp_display)
}
dp->ctrl->off(dp->ctrl);
+ dp->panel->deinit(dp->panel);
dp->power_on = false;
@@ -1113,10 +1124,35 @@ static int dp_display_unprepare(struct dp_display *dp)
return 0;
}
-static int dp_display_validate_mode(struct dp_display *dp,
- struct dp_display_mode *mode)
+static int dp_display_validate_mode(struct dp_display *dp, u32 mode_pclk_khz)
{
- return 0;
+ const u32 num_components = 3, default_bpp = 24;
+ struct dp_display_private *dp_display;
+ struct drm_dp_link *link_info;
+ u32 mode_rate_khz = 0, supported_rate_khz = 0, mode_bpp = 0;
+
+ if (!dp || !mode_pclk_khz) {
+ pr_err("invalid params\n");
+ return -EINVAL;
+ }
+
+ dp_display = container_of(dp, struct dp_display_private, dp_display);
+ link_info = &dp_display->panel->link_info;
+
+ mode_bpp = dp->connector->display_info.bpc * num_components;
+ if (!mode_bpp)
+ mode_bpp = default_bpp;
+
+ mode_bpp = dp_display->panel->get_mode_bpp(dp_display->panel,
+ mode_bpp, mode_pclk_khz);
+
+ mode_rate_khz = mode_pclk_khz * mode_bpp;
+ supported_rate_khz = link_info->num_lanes * link_info->rate * 8;
+
+ if (mode_rate_khz > supported_rate_khz)
+ return MODE_BAD;
+
+ return MODE_OK;
}
static int dp_display_get_modes(struct dp_display *dp,
@@ -1139,36 +1175,35 @@ static int dp_display_get_modes(struct dp_display *dp,
return ret;
}
-static bool dp_display_check_video_test(struct dp_display *dp)
-{
- struct dp_display_private *dp_display;
- if (!dp) {
- pr_err("invalid params\n");
- return false;
+static int dp_display_pre_kickoff(struct dp_display *dp_display,
+ struct drm_msm_ext_hdr_metadata *hdr)
+{
+ struct dp_display_private *dp;
+
+ if (!dp_display) {
+ pr_err("invalid input\n");
+ return -EINVAL;
}
- dp_display = container_of(dp, struct dp_display_private, dp_display);
+ dp = container_of(dp_display, struct dp_display_private, dp_display);
- if (dp_display->panel->video_test)
- return true;
-
- return false;
+ return dp->panel->setup_hdr(dp->panel, hdr);
}
-static int dp_display_get_test_bpp(struct dp_display *dp)
+static int dp_display_create_workqueue(struct dp_display_private *dp)
{
- struct dp_display_private *dp_display;
-
- if (!dp) {
- pr_err("invalid params\n");
- return 0;
+ dp->wq = create_singlethread_workqueue("drm_dp");
+ if (IS_ERR_OR_NULL(dp->wq)) {
+ pr_err("Error creating wq\n");
+ return -EPERM;
}
- dp_display = container_of(dp, struct dp_display_private, dp_display);
+ INIT_DELAYED_WORK(&dp->hdcp_cb_work, dp_display_hdcp_cb_work);
+ INIT_WORK(&dp->connect_work, dp_display_connect_work);
+ INIT_WORK(&dp->attention_work, dp_display_attention_work);
- return dp_link_bit_depth_to_bpp(
- dp_display->link->test_video.test_bit_depth);
+ return 0;
}
static int dp_display_probe(struct platform_device *pdev)
@@ -1178,12 +1213,15 @@ static int dp_display_probe(struct platform_device *pdev)
if (!pdev || !pdev->dev.of_node) {
pr_err("pdev not found\n");
- return -ENODEV;
+ rc = -ENODEV;
+ goto bail;
}
dp = devm_kzalloc(&pdev->dev, sizeof(*dp), GFP_KERNEL);
- if (!dp)
- return -ENOMEM;
+ if (!dp) {
+ rc = -ENOMEM;
+ goto bail;
+ }
init_completion(&dp->notification_comp);
@@ -1192,8 +1230,14 @@ static int dp_display_probe(struct platform_device *pdev)
rc = dp_init_sub_modules(dp);
if (rc) {
- devm_kfree(&pdev->dev, dp);
- return -EPROBE_DEFER;
+ rc = -EPROBE_DEFER;
+ goto err_dev;
+ }
+
+ rc = dp_display_create_workqueue(dp);
+ if (rc) {
+ pr_err("Failed to create workqueue\n");
+ goto err_sub_mod;
}
platform_set_drvdata(pdev, dp);
@@ -1212,16 +1256,21 @@ static int dp_display_probe(struct platform_device *pdev)
g_dp_display->request_irq = dp_request_irq;
g_dp_display->get_debug = dp_get_debug;
g_dp_display->send_hpd_event = dp_display_send_hpd_event;
- g_dp_display->is_video_test = dp_display_check_video_test;
- g_dp_display->get_test_bpp = dp_display_get_test_bpp;
+ g_dp_display->pre_kickoff = dp_display_pre_kickoff;
rc = component_add(&pdev->dev, &dp_display_comp_ops);
if (rc) {
pr_err("component add failed, rc=%d\n", rc);
- dp_display_deinit_sub_modules(dp);
- devm_kfree(&pdev->dev, dp);
+ goto err_sub_mod;
}
+ return 0;
+
+err_sub_mod:
+ dp_display_deinit_sub_modules(dp);
+err_dev:
+ devm_kfree(&pdev->dev, dp);
+bail:
return rc;
}
@@ -1284,7 +1333,7 @@ static int __init dp_display_init(void)
return ret;
}
-module_init(dp_display_init);
+late_initcall(dp_display_init);
static void __exit dp_display_cleanup(void)
{
diff --git a/drivers/gpu/drm/msm/dp/dp_display.h b/drivers/gpu/drm/msm/dp/dp_display.h
index 5539d61..2d314c7 100644
--- a/drivers/gpu/drm/msm/dp/dp_display.h
+++ b/drivers/gpu/drm/msm/dp/dp_display.h
@@ -16,6 +16,7 @@
#define _DP_DISPLAY_H_
#include <drm/drmP.h>
+#include <drm/msm_drm.h>
#include "dp_panel.h"
@@ -34,8 +35,7 @@ struct dp_display {
int (*set_mode)(struct dp_display *dp_display,
struct dp_display_mode *mode);
- int (*validate_mode)(struct dp_display *dp_display,
- struct dp_display_mode *mode);
+ int (*validate_mode)(struct dp_display *dp_display, u32 mode_pclk_khz);
int (*get_modes)(struct dp_display *dp_display,
struct dp_display_mode *dp_mode);
int (*prepare)(struct dp_display *dp_display);
@@ -43,8 +43,8 @@ struct dp_display {
int (*request_irq)(struct dp_display *dp_display);
struct dp_debug *(*get_debug)(struct dp_display *dp_display);
void (*send_hpd_event)(struct dp_display *dp_display);
- bool (*is_video_test)(struct dp_display *dp_display);
- int (*get_test_bpp)(struct dp_display *dp_display);
+ int (*pre_kickoff)(struct dp_display *dp_display,
+ struct drm_msm_ext_hdr_metadata *hdr_meta);
};
int dp_display_get_num_of_displays(void);
diff --git a/drivers/gpu/drm/msm/dp/dp_drm.c b/drivers/gpu/drm/msm/dp/dp_drm.c
index 170734f..2c29ad2 100644
--- a/drivers/gpu/drm/msm/dp/dp_drm.c
+++ b/drivers/gpu/drm/msm/dp/dp_drm.c
@@ -29,8 +29,6 @@
static void convert_to_dp_mode(const struct drm_display_mode *drm_mode,
struct dp_display_mode *dp_mode, struct dp_display *dp)
{
- const u32 num_components = 3;
-
memset(dp_mode, 0, sizeof(*dp_mode));
dp_mode->timing.h_active = drm_mode->hdisplay;
@@ -49,15 +47,6 @@ static void convert_to_dp_mode(const struct drm_display_mode *drm_mode,
dp_mode->timing.v_front_porch = drm_mode->vsync_start -
drm_mode->vdisplay;
- if (dp->is_video_test(dp))
- dp_mode->timing.bpp = dp->get_test_bpp(dp);
- else
- dp_mode->timing.bpp = dp->connector->display_info.bpc *
- num_components;
-
- if (!dp_mode->timing.bpp)
- dp_mode->timing.bpp = 24;
-
dp_mode->timing.refresh_rate = drm_mode->vrefresh;
dp_mode->timing.pixel_clk_khz = drm_mode->clock;
@@ -254,7 +243,6 @@ static bool dp_bridge_mode_fixup(struct drm_bridge *drm_bridge,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
- int rc = 0;
bool ret = true;
struct dp_display_mode dp_mode;
struct dp_bridge *bridge;
@@ -270,14 +258,7 @@ static bool dp_bridge_mode_fixup(struct drm_bridge *drm_bridge,
dp = bridge->display;
convert_to_dp_mode(mode, &dp_mode, dp);
-
- rc = dp->validate_mode(dp, &dp_mode);
- if (rc) {
- pr_err("[%d] mode is not valid, rc=%d\n", bridge->id, rc);
- ret = false;
- } else {
- convert_to_drm_mode(&dp_mode, adjusted_mode);
- }
+ convert_to_drm_mode(&dp_mode, adjusted_mode);
end:
return ret;
}
@@ -292,13 +273,25 @@ static const struct drm_bridge_funcs dp_bridge_ops = {
.mode_set = dp_bridge_mode_set,
};
-int dp_connector_post_init(struct drm_connector *connector,
- void *info,
- void *display)
+int dp_connector_pre_kickoff(struct drm_connector *connector,
+ void *display,
+ struct msm_display_kickoff_params *params)
+{
+ struct dp_display *dp = display;
+
+ if (!connector || !display || !params) {
+ pr_err("invalid params\n");
+ return -EINVAL;
+ }
+
+ return dp->pre_kickoff(dp, params->hdr_meta);
+}
+
+int dp_connector_post_init(struct drm_connector *connector, void *display)
{
struct dp_display *dp_display = display;
- if (!info || !dp_display)
+ if (!dp_display)
return -EINVAL;
dp_display->connector = connector;
@@ -516,9 +509,6 @@ enum drm_mode_status dp_connector_mode_valid(struct drm_connector *connector,
mode->vrefresh = drm_mode_vrefresh(mode);
- if (mode->vrefresh > 60)
- return MODE_BAD;
-
if (mode->clock > dp_disp->max_pclk_khz)
return MODE_BAD;
@@ -528,5 +518,5 @@ enum drm_mode_status dp_connector_mode_valid(struct drm_connector *connector,
mode->picture_aspect_ratio != debug->aspect_ratio))
return MODE_BAD;
- return MODE_OK;
+ return dp_disp->validate_mode(dp_disp, mode->clock);
}
diff --git a/drivers/gpu/drm/msm/dp/dp_drm.h b/drivers/gpu/drm/msm/dp/dp_drm.h
index eb78e71..1673212 100644
--- a/drivers/gpu/drm/msm/dp/dp_drm.h
+++ b/drivers/gpu/drm/msm/dp/dp_drm.h
@@ -32,15 +32,23 @@ struct dp_bridge {
};
/**
+ * dp_connector_pre_kickoff - callback to perform pre kickoff initialization
+ * @connector: Pointer to drm connector structure
+ * @display: Pointer to private display handle
+ * @params: Pointer to kickoff parameters
+ * Returns: Zero on success
+ */
+int dp_connector_pre_kickoff(struct drm_connector *connector,
+ void *display,
+ struct msm_display_kickoff_params *params);
+
+/**
* dp_connector_post_init - callback to perform additional initialization steps
* @connector: Pointer to drm connector structure
- * @info: Pointer to sde connector info structure
* @display: Pointer to private display handle
* Returns: Zero on success
*/
-int dp_connector_post_init(struct drm_connector *connector,
- void *info,
- void *display);
+int dp_connector_post_init(struct drm_connector *connector, void *display);
/**
* dp_connector_detect - callback to determine if connector is connected
diff --git a/drivers/gpu/drm/msm/dp/dp_hdcp2p2.c b/drivers/gpu/drm/msm/dp/dp_hdcp2p2.c
index 016e1b8..0e1490f 100644
--- a/drivers/gpu/drm/msm/dp/dp_hdcp2p2.c
+++ b/drivers/gpu/drm/msm/dp/dp_hdcp2p2.c
@@ -234,7 +234,7 @@ static void dp_hdcp2p2_reset(struct dp_hdcp2p2_ctrl *ctrl)
static void dp_hdcp2p2_set_interrupts(struct dp_hdcp2p2_ctrl *ctrl, bool enable)
{
- void __iomem *base = ctrl->init_data.core_io->base;
+ void __iomem *base = ctrl->init_data.dp_ahb->base;
struct dp_hdcp2p2_interrupts *intr = ctrl->intr;
while (intr && intr->reg) {
@@ -740,13 +740,13 @@ static int dp_hdcp2p2_isr(void *input)
struct dp_hdcp2p2_interrupts *intr;
u32 hdcp_int_val = 0;
- if (!ctrl || !ctrl->init_data.core_io) {
+ if (!ctrl || !ctrl->init_data.dp_ahb) {
pr_err("invalid input\n");
rc = -EINVAL;
goto end;
}
- io = ctrl->init_data.core_io;
+ io = ctrl->init_data.dp_ahb;
intr = ctrl->intr;
while (intr && intr->reg) {
diff --git a/drivers/gpu/drm/msm/dp/dp_link.c b/drivers/gpu/drm/msm/dp/dp_link.c
index 0cf488d..3ca247c 100644
--- a/drivers/gpu/drm/msm/dp/dp_link.c
+++ b/drivers/gpu/drm/msm/dp/dp_link.c
@@ -680,7 +680,8 @@ static bool dp_link_is_phy_test_pattern_supported(u32 phy_test_pattern_sel)
case DP_TEST_PHY_PATTERN_SYMBOL_ERR_MEASUREMENT_CNT:
case DP_TEST_PHY_PATTERN_PRBS7:
case DP_TEST_PHY_PATTERN_80_BIT_CUSTOM_PATTERN:
- case DP_TEST_PHY_PATTERN_HBR2_CTS_EYE_PATTERN:
+ case DP_TEST_PHY_PATTERN_CP2520_PATTERN_1:
+ case DP_TEST_PHY_PATTERN_CP2520_PATTERN_3:
return true;
default:
return false;
@@ -986,8 +987,6 @@ static int dp_link_psm_config(struct dp_link *dp_link,
if (ret)
pr_err("Failed to %s low power mode\n",
(enable ? "enter" : "exit"));
- else
- dp_link->psm_enabled = enable;
return ret;
}
diff --git a/drivers/gpu/drm/msm/dp/dp_link.h b/drivers/gpu/drm/msm/dp/dp_link.h
index b1d9249..6f79b6a 100644
--- a/drivers/gpu/drm/msm/dp/dp_link.h
+++ b/drivers/gpu/drm/msm/dp/dp_link.h
@@ -86,7 +86,6 @@ struct dp_link_params {
struct dp_link {
u32 sink_request;
u32 test_response;
- bool psm_enabled;
struct dp_link_sink_count sink_count;
struct dp_link_test_video test_video;
@@ -121,9 +120,12 @@ static inline char *dp_link_get_phy_test_pattern(u32 phy_test_pattern_sel)
case DP_TEST_PHY_PATTERN_80_BIT_CUSTOM_PATTERN:
return DP_LINK_ENUM_STR(
DP_TEST_PHY_PATTERN_80_BIT_CUSTOM_PATTERN);
- case DP_TEST_PHY_PATTERN_HBR2_CTS_EYE_PATTERN:
- return DP_LINK_ENUM_STR(
- DP_TEST_PHY_PATTERN_HBR2_CTS_EYE_PATTERN);
+ case DP_TEST_PHY_PATTERN_CP2520_PATTERN_1:
+ return DP_LINK_ENUM_STR(DP_TEST_PHY_PATTERN_CP2520_PATTERN_1);
+ case DP_TEST_PHY_PATTERN_CP2520_PATTERN_2:
+ return DP_LINK_ENUM_STR(DP_TEST_PHY_PATTERN_CP2520_PATTERN_2);
+ case DP_TEST_PHY_PATTERN_CP2520_PATTERN_3:
+ return DP_LINK_ENUM_STR(DP_TEST_PHY_PATTERN_CP2520_PATTERN_3);
default:
return "unknown";
}
diff --git a/drivers/gpu/drm/msm/dp/dp_panel.c b/drivers/gpu/drm/msm/dp/dp_panel.c
index fc3fb56..96f9d3a 100644
--- a/drivers/gpu/drm/msm/dp/dp_panel.c
+++ b/drivers/gpu/drm/msm/dp/dp_panel.c
@@ -29,7 +29,11 @@ struct dp_panel_private {
struct dp_aux *aux;
struct dp_link *link;
struct dp_catalog_panel *catalog;
- bool aux_cfg_update_done;
+ bool custom_edid;
+ bool custom_dpcd;
+ bool panel_on;
+ u8 spd_vendor_name[8];
+ u8 spd_product_description[16];
};
static const struct dp_panel_info fail_safe = {
@@ -49,6 +53,13 @@ static const struct dp_panel_info fail_safe = {
.bpp = 24,
};
+/* OEM NAME */
+static const u8 vendor_name[8] = {81, 117, 97, 108, 99, 111, 109, 109};
+
+/* MODEL NAME */
+static const u8 product_desc[16] = {83, 110, 97, 112, 100, 114, 97, 103,
+ 111, 110, 0, 0, 0, 0, 0, 0};
+
static int dp_panel_read_dpcd(struct dp_panel *dp_panel)
{
int rlen, rc = 0;
@@ -69,12 +80,21 @@ static int dp_panel_read_dpcd(struct dp_panel *dp_panel)
panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
link_info = &dp_panel->link_info;
- rlen = drm_dp_dpcd_read(panel->aux->drm_aux, DP_DPCD_REV,
- dpcd, (DP_RECEIVER_CAP_SIZE + 1));
- if (rlen < (DP_RECEIVER_CAP_SIZE + 1)) {
- pr_err("dpcd read failed, rlen=%d\n", rlen);
- rc = -EINVAL;
- goto end;
+ if (!panel->custom_dpcd) {
+ rlen = drm_dp_dpcd_read(panel->aux->drm_aux, DP_DPCD_REV,
+ dp_panel->dpcd, (DP_RECEIVER_CAP_SIZE + 1));
+ if (rlen < (DP_RECEIVER_CAP_SIZE + 1)) {
+ pr_err("dpcd read failed, rlen=%d\n", rlen);
+ if (rlen == -ETIMEDOUT)
+ rc = rlen;
+ else
+ rc = -EINVAL;
+
+ goto end;
+ }
+
+ print_hex_dump(KERN_DEBUG, "[drm-dp] SINK DPCD: ",
+ DUMP_PREFIX_NONE, 8, 1, dp_panel->dpcd, rlen, false);
}
link_info->revision = dp_panel->dpcd[DP_DPCD_REV];
@@ -133,14 +153,12 @@ static int dp_panel_set_default_link_params(struct dp_panel *dp_panel)
link_info->num_lanes = default_num_lanes;
pr_debug("link_rate=%d num_lanes=%d\n",
link_info->rate, link_info->num_lanes);
+
return 0;
}
-static int dp_panel_read_edid(struct dp_panel *dp_panel,
- struct drm_connector *connector)
+static int dp_panel_set_edid(struct dp_panel *dp_panel, u8 *edid)
{
- int retry_cnt = 0;
- const int max_retry = 10;
struct dp_panel_private *panel;
if (!dp_panel) {
@@ -150,18 +168,70 @@ static int dp_panel_read_edid(struct dp_panel *dp_panel,
panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
- do {
- sde_get_edid(connector, &panel->aux->drm_aux->ddc,
- (void **)&dp_panel->edid_ctrl);
- if (!dp_panel->edid_ctrl->edid) {
- pr_err("EDID read failed\n");
- retry_cnt++;
- panel->aux->reconfig(panel->aux);
- panel->aux_cfg_update_done = true;
- } else {
- return 0;
- }
- } while (retry_cnt < max_retry);
+ if (edid) {
+ dp_panel->edid_ctrl->edid = (struct edid *)edid;
+ panel->custom_edid = true;
+ } else {
+ panel->custom_edid = false;
+ }
+
+ return 0;
+}
+
+static int dp_panel_set_dpcd(struct dp_panel *dp_panel, u8 *dpcd)
+{
+ struct dp_panel_private *panel;
+ u8 *dp_dpcd;
+
+ if (!dp_panel) {
+ pr_err("invalid input\n");
+ return -EINVAL;
+ }
+
+ dp_dpcd = dp_panel->dpcd;
+
+ panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
+
+ if (dpcd) {
+ memcpy(dp_dpcd, dpcd, DP_RECEIVER_CAP_SIZE + 1);
+ panel->custom_dpcd = true;
+ } else {
+ panel->custom_dpcd = false;
+ }
+
+ return 0;
+}
+
+static int dp_panel_read_edid(struct dp_panel *dp_panel,
+ struct drm_connector *connector)
+{
+ struct dp_panel_private *panel;
+
+ if (!dp_panel) {
+ pr_err("invalid input\n");
+ return -EINVAL;
+ }
+
+ panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
+
+ if (panel->custom_edid) {
+ pr_debug("skip edid read in debug mode\n");
+ return 0;
+ }
+
+ sde_get_edid(connector, &panel->aux->drm_aux->ddc,
+ (void **)&dp_panel->edid_ctrl);
+ if (!dp_panel->edid_ctrl->edid) {
+ pr_err("EDID read failed\n");
+ } else {
+ u8 *buf = (u8 *)dp_panel->edid_ctrl->edid;
+ u32 size = buf[0x7E] ? 256 : 128;
+
+ print_hex_dump(KERN_DEBUG, "[drm-dp] SINK EDID: ",
+ DUMP_PREFIX_NONE, 16, 1, buf, size, false);
+
+ return 0;
+ }
return -EINVAL;
}
@@ -185,6 +255,10 @@ static int dp_panel_read_sink_caps(struct dp_panel *dp_panel,
dp_panel->link_info.num_lanes) ||
((drm_dp_link_rate_to_bw_code(dp_panel->link_info.rate)) >
dp_panel->max_bw_code)) {
+ if ((rc == -ETIMEDOUT) || (rc == -ENODEV)) {
+ pr_err("DPCD read failed, return early\n");
+ return rc;
+ }
pr_err("panel dpcd read failed/incorrect, set default params\n");
dp_panel_set_default_link_params(dp_panel);
}
@@ -195,41 +269,51 @@ static int dp_panel_read_sink_caps(struct dp_panel *dp_panel,
return rc;
}
- if (panel->aux_cfg_update_done) {
- pr_debug("read DPCD with updated AUX config\n");
- dp_panel_read_dpcd(dp_panel);
- panel->aux_cfg_update_done = false;
- }
-
return 0;
}
-static u32 dp_panel_get_max_pclk(struct dp_panel *dp_panel)
+static u32 dp_panel_get_supported_bpp(struct dp_panel *dp_panel,
+ u32 mode_edid_bpp, u32 mode_pclk_khz)
{
struct drm_dp_link *link_info;
- const u8 num_components = 3;
- u32 bpc = 0, bpp = 0, max_data_rate_khz = 0, max_pclk_rate_khz = 0;
+ const u32 max_supported_bpp = 30, min_supported_bpp = 18;
+ u32 bpp = 0, data_rate_khz = 0;
- if (!dp_panel) {
+ bpp = min_t(u32, mode_edid_bpp, max_supported_bpp);
+
+ link_info = &dp_panel->link_info;
+ data_rate_khz = link_info->num_lanes * link_info->rate * 8;
+
+ while (bpp > min_supported_bpp) {
+ if (mode_pclk_khz * bpp <= data_rate_khz)
+ break;
+ bpp -= 6;
+ }
+
+ return bpp;
+}
+
+static u32 dp_panel_get_mode_bpp(struct dp_panel *dp_panel,
+ u32 mode_edid_bpp, u32 mode_pclk_khz)
+{
+ struct dp_panel_private *panel;
+ u32 bpp = mode_edid_bpp;
+
+ if (!dp_panel || !mode_edid_bpp || !mode_pclk_khz) {
pr_err("invalid input\n");
return 0;
}
- link_info = &dp_panel->link_info;
+ panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
- bpc = sde_get_sink_bpc(dp_panel->edid_ctrl);
- bpp = bpc * num_components;
- if (!bpp)
- bpp = DP_PANEL_DEFAULT_BPP;
+ if (dp_panel->video_test)
+ bpp = dp_link_bit_depth_to_bpp(
+ panel->link->test_video.test_bit_depth);
+ else
+ bpp = dp_panel_get_supported_bpp(dp_panel, mode_edid_bpp,
+ mode_pclk_khz);
- max_data_rate_khz = (link_info->num_lanes * link_info->rate * 8);
- max_pclk_rate_khz = max_data_rate_khz / bpp;
-
- pr_debug("bpp=%d, max_lane_cnt=%d\n", bpp, link_info->num_lanes);
- pr_debug("max_data_rate=%dKHz, max_pclk_rate=%dKHz\n",
- max_data_rate_khz, max_pclk_rate_khz);
-
- return max_pclk_rate_khz;
+ return bpp;
}
static void dp_panel_set_test_mode(struct dp_panel_private *panel,
@@ -320,6 +404,58 @@ static void dp_panel_handle_sink_request(struct dp_panel *dp_panel)
}
}
+static void dp_panel_tpg_config(struct dp_panel *dp_panel, bool enable)
+{
+ u32 hsync_start_x, hsync_end_x;
+ struct dp_catalog_panel *catalog;
+ struct dp_panel_private *panel;
+ struct dp_panel_info *pinfo;
+
+ if (!dp_panel) {
+ pr_err("invalid input\n");
+ return;
+ }
+
+ panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
+ catalog = panel->catalog;
+ pinfo = &panel->dp_panel.pinfo;
+
+ if (!panel->panel_on) {
+ pr_debug("DP panel not enabled, handle TPG on next panel on\n");
+ return;
+ }
+
+ if (!enable) {
+ panel->catalog->tpg_config(catalog, false);
+ return;
+ }
+
+ /* TPG config */
+ catalog->hsync_period = pinfo->h_sync_width + pinfo->h_back_porch +
+ pinfo->h_active + pinfo->h_front_porch;
+ catalog->vsync_period = pinfo->v_sync_width + pinfo->v_back_porch +
+ pinfo->v_active + pinfo->v_front_porch;
+
+ catalog->display_v_start = ((pinfo->v_sync_width +
+ pinfo->v_back_porch) * catalog->hsync_period);
+ catalog->display_v_end = ((catalog->vsync_period -
+ pinfo->v_front_porch) * catalog->hsync_period) - 1;
+
+ catalog->display_v_start += pinfo->h_sync_width + pinfo->h_back_porch;
+ catalog->display_v_end -= pinfo->h_front_porch;
+
+ hsync_start_x = pinfo->h_back_porch + pinfo->h_sync_width;
+ hsync_end_x = catalog->hsync_period - pinfo->h_front_porch - 1;
+
+ catalog->v_sync_width = pinfo->v_sync_width;
+
+ catalog->hsync_ctl = (catalog->hsync_period << 16) |
+ pinfo->h_sync_width;
+ catalog->display_hctl = (hsync_end_x << 16) | hsync_start_x;
+
+ panel->catalog->tpg_config(catalog, true);
+}
+
static int dp_panel_timing_cfg(struct dp_panel *dp_panel)
{
int rc = 0;
@@ -379,38 +515,27 @@ static int dp_panel_timing_cfg(struct dp_panel *dp_panel)
catalog->dp_active = data;
panel->catalog->timing_cfg(catalog);
+ panel->panel_on = true;
end:
return rc;
}
-static int dp_panel_edid_register(struct dp_panel *dp_panel)
+static int dp_panel_edid_register(struct dp_panel_private *panel)
{
int rc = 0;
- if (!dp_panel) {
- pr_err("invalid input\n");
- rc = -EINVAL;
- goto end;
- }
-
- dp_panel->edid_ctrl = sde_edid_init();
- if (!dp_panel->edid_ctrl) {
+ panel->dp_panel.edid_ctrl = sde_edid_init();
+ if (!panel->dp_panel.edid_ctrl) {
pr_err("sde edid init for DP failed\n");
rc = -ENOMEM;
- goto end;
}
-end:
+
return rc;
}
-static void dp_panel_edid_deregister(struct dp_panel *dp_panel)
+static void dp_panel_edid_deregister(struct dp_panel_private *panel)
{
- if (!dp_panel) {
- pr_err("invalid input\n");
- return;
- }
-
- sde_edid_deinit((void **)&dp_panel->edid_ctrl);
+ sde_edid_deinit((void **)&panel->dp_panel.edid_ctrl);
}
static int dp_panel_init_panel_info(struct dp_panel *dp_panel)
@@ -445,13 +570,31 @@ static int dp_panel_init_panel_info(struct dp_panel *dp_panel)
pr_info("bpp = %d\n", pinfo->bpp);
pr_info("active low (h|v)=(%d|%d)\n", pinfo->h_active_low,
pinfo->v_active_low);
-
- pinfo->bpp = max_t(u32, 18, min_t(u32, pinfo->bpp, 30));
- pr_info("updated bpp = %d\n", pinfo->bpp);
end:
return rc;
}
+static int dp_panel_deinit_panel_info(struct dp_panel *dp_panel)
+{
+ int rc = 0;
+ struct dp_panel_private *panel;
+
+ if (!dp_panel) {
+ pr_err("invalid input\n");
+ return -EINVAL;
+ }
+
+ panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
+
+ if (!panel->custom_edid)
+ sde_free_edid((void **)&dp_panel->edid_ctrl);
+
+ memset(&dp_panel->pinfo, 0, sizeof(dp_panel->pinfo));
+ panel->panel_on = false;
+
+ return rc;
+}
+
static u32 dp_panel_get_min_req_link_rate(struct dp_panel *dp_panel)
{
const u32 encoding_factx10 = 8;
@@ -478,6 +621,120 @@ static u32 dp_panel_get_min_req_link_rate(struct dp_panel *dp_panel)
return min_link_rate_khz;
}
+enum dp_panel_hdr_pixel_encoding {
+ RGB,
+ YCbCr444,
+ YCbCr422,
+ YCbCr420,
+ YONLY,
+ RAW,
+};
+
+enum dp_panel_hdr_rgb_colorimetry {
+ sRGB,
+ RGB_WIDE_GAMUT_FIXED_POINT,
+ RGB_WIDE_GAMUT_FLOATING_POINT,
+ ADOBERGB,
+ DCI_P3,
+ CUSTOM_COLOR_PROFILE,
+ ITU_R_BT_2020_RGB,
+};
+
+enum dp_panel_hdr_dynamic_range {
+ VESA,
+ CEA,
+};
+
+enum dp_panel_hdr_content_type {
+ NOT_DEFINED,
+ GRAPHICS,
+ PHOTO,
+ VIDEO,
+ GAME,
+};
+
+static int dp_panel_setup_hdr(struct dp_panel *dp_panel,
+ struct drm_msm_ext_hdr_metadata *hdr_meta)
+{
+ int rc = 0;
+ struct dp_panel_private *panel;
+ struct dp_catalog_hdr_data *hdr;
+
+ if (!hdr_meta || !hdr_meta->hdr_state)
+ goto end;
+
+ if (!dp_panel) {
+ pr_err("invalid input\n");
+ rc = -EINVAL;
+ goto end;
+ }
+
+ panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
+ hdr = &panel->catalog->hdr_data;
+
+ hdr->ext_header_byte0 = 0x00;
+ hdr->ext_header_byte1 = 0x04;
+ hdr->ext_header_byte2 = 0x1F;
+ hdr->ext_header_byte3 = 0x00;
+
+ hdr->vsc_header_byte0 = 0x00;
+ hdr->vsc_header_byte1 = 0x07;
+ hdr->vsc_header_byte2 = 0x05;
+ hdr->vsc_header_byte3 = 0x13;
+
+ /* VSC SDP Payload for DB16 */
+ hdr->pixel_encoding = RGB;
+ hdr->colorimetry = ITU_R_BT_2020_RGB;
+
+ /* VSC SDP Payload for DB17 */
+ hdr->dynamic_range = CEA;
+
+ /* VSC SDP Payload for DB18 */
+ hdr->content_type = GRAPHICS;
+
+ hdr->bpc = dp_panel->pinfo.bpp / 3;
+
+ hdr->vscext_header_byte0 = 0x00;
+ hdr->vscext_header_byte1 = 0x87;
+ hdr->vscext_header_byte2 = 0x1D;
+ hdr->vscext_header_byte3 = 0x13 << 2;
+
+ hdr->version = 0x01;
+ hdr->length = 0x1A;
+
+ memcpy(&hdr->hdr_meta, hdr_meta, sizeof(hdr->hdr_meta));
+
+ panel->catalog->config_hdr(panel->catalog);
+end:
+ return rc;
+}
+
+static int dp_panel_spd_config(struct dp_panel *dp_panel)
+{
+ int rc = 0;
+ struct dp_panel_private *panel;
+
+ if (!dp_panel) {
+ pr_err("invalid input\n");
+ rc = -EINVAL;
+ goto end;
+ }
+
+ if (!dp_panel->spd_enabled) {
+ pr_debug("SPD Infoframe not enabled\n");
+ goto end;
+ }
+
+ panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
+
+ panel->catalog->spd_vendor_name = panel->spd_vendor_name;
+ panel->catalog->spd_product_description =
+ panel->spd_product_description;
+ panel->catalog->config_spd(panel->catalog);
+end:
+ return rc;
+}
+
struct dp_panel *dp_panel_get(struct dp_panel_in *in)
{
int rc = 0;
@@ -502,18 +759,26 @@ struct dp_panel *dp_panel_get(struct dp_panel_in *in)
panel->link = in->link;
dp_panel = &panel->dp_panel;
- panel->aux_cfg_update_done = false;
dp_panel->max_bw_code = DP_LINK_BW_8_1;
+ dp_panel->spd_enabled = true;
+ memcpy(panel->spd_vendor_name, vendor_name, (sizeof(u8) * 8));
+ memcpy(panel->spd_product_description, product_desc, (sizeof(u8) * 16));
- dp_panel->sde_edid_register = dp_panel_edid_register;
- dp_panel->sde_edid_deregister = dp_panel_edid_deregister;
- dp_panel->init_info = dp_panel_init_panel_info;
+ dp_panel->init = dp_panel_init_panel_info;
+ dp_panel->deinit = dp_panel_deinit_panel_info;
dp_panel->timing_cfg = dp_panel_timing_cfg;
dp_panel->read_sink_caps = dp_panel_read_sink_caps;
dp_panel->get_min_req_link_rate = dp_panel_get_min_req_link_rate;
- dp_panel->get_max_pclk = dp_panel_get_max_pclk;
+ dp_panel->get_mode_bpp = dp_panel_get_mode_bpp;
dp_panel->get_modes = dp_panel_get_modes;
dp_panel->handle_sink_request = dp_panel_handle_sink_request;
+ dp_panel->set_edid = dp_panel_set_edid;
+ dp_panel->set_dpcd = dp_panel_set_dpcd;
+ dp_panel->tpg_config = dp_panel_tpg_config;
+ dp_panel->spd_config = dp_panel_spd_config;
+ dp_panel->setup_hdr = dp_panel_setup_hdr;
+
+ dp_panel_edid_register(panel);
return dp_panel;
error:
@@ -529,5 +794,6 @@ void dp_panel_put(struct dp_panel *dp_panel)
panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
+ dp_panel_edid_deregister(panel);
devm_kfree(panel->dev, panel);
}
diff --git a/drivers/gpu/drm/msm/dp/dp_panel.h b/drivers/gpu/drm/msm/dp/dp_panel.h
index 01a978a..128f694 100644
--- a/drivers/gpu/drm/msm/dp/dp_panel.h
+++ b/drivers/gpu/drm/msm/dp/dp_panel.h
@@ -15,6 +15,8 @@
#ifndef _DP_PANEL_H_
#define _DP_PANEL_H_
+#include <drm/msm_drm.h>
+
#include "dp_aux.h"
#include "dp_link.h"
#include "dp_usbpd.h"
@@ -59,14 +61,14 @@ struct dp_panel_in {
struct dp_panel {
/* dpcd raw data */
- u8 dpcd[DP_RECEIVER_CAP_SIZE];
+ u8 dpcd[DP_RECEIVER_CAP_SIZE + 1];
u8 ds_ports[DP_MAX_DOWNSTREAM_PORTS];
struct drm_dp_link link_info;
struct sde_edid_ctrl *edid_ctrl;
- struct drm_connector *connector;
struct dp_panel_info pinfo;
bool video_test;
+ bool spd_enabled;
u32 vic;
u32 max_pclk_khz;
@@ -74,17 +76,23 @@ struct dp_panel {
/* debug */
u32 max_bw_code;
- int (*sde_edid_register)(struct dp_panel *dp_panel);
- void (*sde_edid_deregister)(struct dp_panel *dp_panel);
- int (*init_info)(struct dp_panel *dp_panel);
+ int (*init)(struct dp_panel *dp_panel);
+ int (*deinit)(struct dp_panel *dp_panel);
int (*timing_cfg)(struct dp_panel *dp_panel);
int (*read_sink_caps)(struct dp_panel *dp_panel,
struct drm_connector *connector);
u32 (*get_min_req_link_rate)(struct dp_panel *dp_panel);
- u32 (*get_max_pclk)(struct dp_panel *dp_panel);
+ u32 (*get_mode_bpp)(struct dp_panel *dp_panel, u32 mode_max_bpp,
+ u32 mode_pclk_khz);
int (*get_modes)(struct dp_panel *dp_panel,
struct drm_connector *connector, struct dp_display_mode *mode);
void (*handle_sink_request)(struct dp_panel *dp_panel);
+ int (*set_edid)(struct dp_panel *dp_panel, u8 *edid);
+ int (*set_dpcd)(struct dp_panel *dp_panel, u8 *dpcd);
+ int (*setup_hdr)(struct dp_panel *dp_panel,
+ struct drm_msm_ext_hdr_metadata *hdr_meta);
+ void (*tpg_config)(struct dp_panel *dp_panel, bool enable);
+ int (*spd_config)(struct dp_panel *dp_panel);
};
/**
diff --git a/drivers/gpu/drm/msm/dp/dp_parser.c b/drivers/gpu/drm/msm/dp/dp_parser.c
index c8da99a..c112cdc 100644
--- a/drivers/gpu/drm/msm/dp/dp_parser.c
+++ b/drivers/gpu/drm/msm/dp/dp_parser.c
@@ -22,7 +22,10 @@ static void dp_parser_unmap_io_resources(struct dp_parser *parser)
{
struct dp_io *io = &parser->io;
- msm_dss_iounmap(&io->ctrl_io);
+ msm_dss_iounmap(&io->dp_ahb);
+ msm_dss_iounmap(&io->dp_aux);
+ msm_dss_iounmap(&io->dp_link);
+ msm_dss_iounmap(&io->dp_p0);
msm_dss_iounmap(&io->phy_io);
msm_dss_iounmap(&io->ln_tx0_io);
msm_dss_iounmap(&io->ln_tx0_io);
@@ -47,7 +50,25 @@ static int dp_parser_ctrl_res(struct dp_parser *parser)
goto err;
}
- rc = msm_dss_ioremap_byname(pdev, &io->ctrl_io, "dp_ctrl");
+ rc = msm_dss_ioremap_byname(pdev, &io->dp_ahb, "dp_ahb");
+ if (rc) {
+ pr_err("unable to remap dp io resources\n");
+ goto err;
+ }
+
+ rc = msm_dss_ioremap_byname(pdev, &io->dp_aux, "dp_aux");
+ if (rc) {
+ pr_err("unable to remap dp io resources\n");
+ goto err;
+ }
+
+ rc = msm_dss_ioremap_byname(pdev, &io->dp_link, "dp_link");
+ if (rc) {
+ pr_err("unable to remap dp io resources\n");
+ goto err;
+ }
+
+ rc = msm_dss_ioremap_byname(pdev, &io->dp_p0, "dp_p0");
if (rc) {
pr_err("unable to remap dp io resources\n");
goto err;
@@ -240,6 +261,9 @@ static int dp_parser_gpio(struct dp_parser *parser)
mp->gpio_config = devm_kzalloc(dev,
sizeof(struct dss_gpio) * ARRAY_SIZE(dp_gpios), GFP_KERNEL);
+ if (!mp->gpio_config)
+ return -ENOMEM;
+
mp->num_gpio = ARRAY_SIZE(dp_gpios);
for (i = 0; i < ARRAY_SIZE(dp_gpios); i++) {
@@ -438,6 +462,22 @@ static void dp_parser_put_clk_data(struct device *dev,
mp->num_clk = 0;
}
+static void dp_parser_put_gpio_data(struct device *dev,
+ struct dss_module_power *mp)
+{
+ if (!mp) {
+ DEV_ERR("%s: invalid input\n", __func__);
+ return;
+ }
+
+ if (mp->gpio_config) {
+ devm_kfree(dev, mp->gpio_config);
+ mp->gpio_config = NULL;
+ }
+
+ mp->num_gpio = 0;
+}
+
static int dp_parser_init_clk_data(struct dp_parser *parser)
{
int num_clk = 0, i = 0, rc = 0;
@@ -634,11 +674,9 @@ void dp_parser_put(struct dp_parser *parser)
power = parser->mp;
for (i = 0; i < DP_MAX_PM; i++) {
- struct dss_module_power *mp = &power[i];
-
- devm_kfree(&parser->pdev->dev, mp->clk_config);
- devm_kfree(&parser->pdev->dev, mp->vreg_config);
- devm_kfree(&parser->pdev->dev, mp->gpio_config);
+ dp_parser_put_clk_data(&parser->pdev->dev, &power[i]);
+ dp_parser_put_vreg_data(&parser->pdev->dev, &power[i]);
+ dp_parser_put_gpio_data(&parser->pdev->dev, &power[i]);
}
devm_kfree(&parser->pdev->dev, parser);
diff --git a/drivers/gpu/drm/msm/dp/dp_parser.h b/drivers/gpu/drm/msm/dp/dp_parser.h
index 76a72a2..72da381 100644
--- a/drivers/gpu/drm/msm/dp/dp_parser.h
+++ b/drivers/gpu/drm/msm/dp/dp_parser.h
@@ -58,7 +58,10 @@ struct dp_display_data {
/**
* struct dp_ctrl_resource - controller's IO related data
*
- * @ctrl_io: controller's mapped memory address
+ * @dp_ahb: controller's ahb mapped memory address
+ * @dp_aux: controller's aux mapped memory address
+ * @dp_link: controller's link mapped memory address
+ * @dp_p0: controller's p0 mapped memory address
* @phy_io: phy's mapped memory address
* @ln_tx0_io: USB-DP lane TX0's mapped memory address
* @ln_tx1_io: USB-DP lane TX1's mapped memory address
@@ -70,6 +73,10 @@ struct dp_display_data {
*/
struct dp_io {
struct dss_io_data ctrl_io;
+ struct dss_io_data dp_ahb;
+ struct dss_io_data dp_aux;
+ struct dss_io_data dp_link;
+ struct dss_io_data dp_p0;
struct dss_io_data phy_io;
struct dss_io_data ln_tx0_io;
struct dss_io_data ln_tx1_io;
diff --git a/drivers/gpu/drm/msm/dp/dp_reg.h b/drivers/gpu/drm/msm/dp/dp_reg.h
index 25d035d..4e2194e 100644
--- a/drivers/gpu/drm/msm/dp/dp_reg.h
+++ b/drivers/gpu/drm/msm/dp/dp_reg.h
@@ -25,137 +25,158 @@
#define DP_INTR_STATUS2 (0x00000024)
#define DP_INTR_STATUS3 (0x00000028)
-#define DP_DP_HPD_CTRL (0x00000200)
-#define DP_DP_HPD_INT_STATUS (0x00000204)
-#define DP_DP_HPD_INT_ACK (0x00000208)
-#define DP_DP_HPD_INT_MASK (0x0000020C)
-#define DP_DP_HPD_REFTIMER (0x00000218)
-#define DP_DP_HPD_EVENT_TIME_0 (0x0000021C)
-#define DP_DP_HPD_EVENT_TIME_1 (0x00000220)
-#define DP_AUX_CTRL (0x00000230)
-#define DP_AUX_DATA (0x00000234)
-#define DP_AUX_TRANS_CTRL (0x00000238)
-#define DP_TIMEOUT_COUNT (0x0000023C)
-#define DP_AUX_LIMITS (0x00000240)
-#define DP_AUX_STATUS (0x00000244)
+#define DP_DP_HPD_CTRL (0x00000000)
+#define DP_DP_HPD_INT_STATUS (0x00000004)
+#define DP_DP_HPD_INT_ACK (0x00000008)
+#define DP_DP_HPD_INT_MASK (0x0000000C)
+#define DP_DP_HPD_REFTIMER (0x00000018)
+#define DP_DP_HPD_EVENT_TIME_0 (0x0000001C)
+#define DP_DP_HPD_EVENT_TIME_1 (0x00000020)
+#define DP_AUX_CTRL (0x00000030)
+#define DP_AUX_DATA (0x00000034)
+#define DP_AUX_TRANS_CTRL (0x00000038)
+#define DP_TIMEOUT_COUNT (0x0000003C)
+#define DP_AUX_LIMITS (0x00000040)
+#define DP_AUX_STATUS (0x00000044)
#define DP_DPCD_CP_IRQ (0x201)
#define DP_DPCD_RXSTATUS (0x69493)
-#define DP_INTERRUPT_TRANS_NUM (0x000002A0)
+#define DP_INTERRUPT_TRANS_NUM (0x000000A0)
-#define DP_MAINLINK_CTRL (0x00000400)
-#define DP_STATE_CTRL (0x00000404)
-#define DP_CONFIGURATION_CTRL (0x00000408)
-#define DP_SOFTWARE_MVID (0x00000410)
-#define DP_SOFTWARE_NVID (0x00000418)
-#define DP_TOTAL_HOR_VER (0x0000041C)
-#define DP_START_HOR_VER_FROM_SYNC (0x00000420)
-#define DP_HSYNC_VSYNC_WIDTH_POLARITY (0x00000424)
-#define DP_ACTIVE_HOR_VER (0x00000428)
-#define DP_MISC1_MISC0 (0x0000042C)
-#define DP_VALID_BOUNDARY (0x00000430)
-#define DP_VALID_BOUNDARY_2 (0x00000434)
-#define DP_LOGICAL2PHYSCIAL_LANE_MAPPING (0x00000438)
+#define DP_MAINLINK_CTRL (0x00000000)
+#define DP_STATE_CTRL (0x00000004)
+#define DP_CONFIGURATION_CTRL (0x00000008)
+#define DP_SOFTWARE_MVID (0x00000010)
+#define DP_SOFTWARE_NVID (0x00000018)
+#define DP_TOTAL_HOR_VER (0x0000001C)
+#define DP_START_HOR_VER_FROM_SYNC (0x00000020)
+#define DP_HSYNC_VSYNC_WIDTH_POLARITY (0x00000024)
+#define DP_ACTIVE_HOR_VER (0x00000028)
+#define DP_MISC1_MISC0 (0x0000002C)
+#define DP_VALID_BOUNDARY (0x00000030)
+#define DP_VALID_BOUNDARY_2 (0x00000034)
+#define DP_LOGICAL2PHYSICAL_LANE_MAPPING (0x00000038)
-#define DP_MAINLINK_READY (0x00000440)
-#define DP_MAINLINK_LEVELS (0x00000444)
-#define DP_TU (0x0000044C)
+#define DP_MAINLINK_READY (0x00000040)
+#define DP_MAINLINK_LEVELS (0x00000044)
+#define DP_TU (0x0000004C)
-#define DP_HBR2_COMPLIANCE_SCRAMBLER_RESET (0x00000454)
-#define DP_TEST_80BIT_CUSTOM_PATTERN_REG0 (0x000004C0)
-#define DP_TEST_80BIT_CUSTOM_PATTERN_REG1 (0x000004C4)
-#define DP_TEST_80BIT_CUSTOM_PATTERN_REG2 (0x000004C8)
+#define DP_HBR2_COMPLIANCE_SCRAMBLER_RESET (0x00000054)
+#define DP_TEST_80BIT_CUSTOM_PATTERN_REG0 (0x000000C0)
+#define DP_TEST_80BIT_CUSTOM_PATTERN_REG1 (0x000000C4)
+#define DP_TEST_80BIT_CUSTOM_PATTERN_REG2 (0x000000C8)
-#define MMSS_DP_MISC1_MISC0 (0x0000042C)
-#define MMSS_DP_AUDIO_TIMING_GEN (0x00000480)
-#define MMSS_DP_AUDIO_TIMING_RBR_32 (0x00000484)
-#define MMSS_DP_AUDIO_TIMING_HBR_32 (0x00000488)
-#define MMSS_DP_AUDIO_TIMING_RBR_44 (0x0000048C)
-#define MMSS_DP_AUDIO_TIMING_HBR_44 (0x00000490)
-#define MMSS_DP_AUDIO_TIMING_RBR_48 (0x00000494)
-#define MMSS_DP_AUDIO_TIMING_HBR_48 (0x00000498)
+#define MMSS_DP_MISC1_MISC0 (0x0000002C)
+#define MMSS_DP_AUDIO_TIMING_GEN (0x00000080)
+#define MMSS_DP_AUDIO_TIMING_RBR_32 (0x00000084)
+#define MMSS_DP_AUDIO_TIMING_HBR_32 (0x00000088)
+#define MMSS_DP_AUDIO_TIMING_RBR_44 (0x0000008C)
+#define MMSS_DP_AUDIO_TIMING_HBR_44 (0x00000090)
+#define MMSS_DP_AUDIO_TIMING_RBR_48 (0x00000094)
+#define MMSS_DP_AUDIO_TIMING_HBR_48 (0x00000098)
-#define MMSS_DP_PSR_CRC_RG (0x00000554)
-#define MMSS_DP_PSR_CRC_B (0x00000558)
+#define MMSS_DP_PSR_CRC_RG (0x00000154)
+#define MMSS_DP_PSR_CRC_B (0x00000158)
-#define DP_COMPRESSION_MODE_CTRL (0x00000580)
+#define DP_COMPRESSION_MODE_CTRL (0x00000180)
-#define MMSS_DP_AUDIO_CFG (0x00000600)
-#define MMSS_DP_AUDIO_STATUS (0x00000604)
-#define MMSS_DP_AUDIO_PKT_CTRL (0x00000608)
-#define MMSS_DP_AUDIO_PKT_CTRL2 (0x0000060C)
-#define MMSS_DP_AUDIO_ACR_CTRL (0x00000610)
-#define MMSS_DP_AUDIO_CTRL_RESET (0x00000614)
+#define MMSS_DP_AUDIO_CFG (0x00000200)
+#define MMSS_DP_AUDIO_STATUS (0x00000204)
+#define MMSS_DP_AUDIO_PKT_CTRL (0x00000208)
+#define MMSS_DP_AUDIO_PKT_CTRL2 (0x0000020C)
+#define MMSS_DP_AUDIO_ACR_CTRL (0x00000210)
+#define MMSS_DP_AUDIO_CTRL_RESET (0x00000214)
-#define MMSS_DP_SDP_CFG (0x00000628)
-#define MMSS_DP_SDP_CFG2 (0x0000062C)
-#define MMSS_DP_AUDIO_TIMESTAMP_0 (0x00000630)
-#define MMSS_DP_AUDIO_TIMESTAMP_1 (0x00000634)
+#define MMSS_DP_SDP_CFG (0x00000228)
+#define MMSS_DP_SDP_CFG2 (0x0000022C)
+#define MMSS_DP_SDP_CFG3 (0x0000024C)
+#define MMSS_DP_AUDIO_TIMESTAMP_0 (0x00000230)
+#define MMSS_DP_AUDIO_TIMESTAMP_1 (0x00000234)
-#define MMSS_DP_AUDIO_STREAM_0 (0x00000640)
-#define MMSS_DP_AUDIO_STREAM_1 (0x00000644)
+#define MMSS_DP_AUDIO_STREAM_0 (0x00000240)
+#define MMSS_DP_AUDIO_STREAM_1 (0x00000244)
-#define MMSS_DP_EXTENSION_0 (0x00000650)
-#define MMSS_DP_EXTENSION_1 (0x00000654)
-#define MMSS_DP_EXTENSION_2 (0x00000658)
-#define MMSS_DP_EXTENSION_3 (0x0000065C)
-#define MMSS_DP_EXTENSION_4 (0x00000660)
-#define MMSS_DP_EXTENSION_5 (0x00000664)
-#define MMSS_DP_EXTENSION_6 (0x00000668)
-#define MMSS_DP_EXTENSION_7 (0x0000066C)
-#define MMSS_DP_EXTENSION_8 (0x00000670)
-#define MMSS_DP_EXTENSION_9 (0x00000674)
-#define MMSS_DP_AUDIO_COPYMANAGEMENT_0 (0x00000678)
-#define MMSS_DP_AUDIO_COPYMANAGEMENT_1 (0x0000067C)
-#define MMSS_DP_AUDIO_COPYMANAGEMENT_2 (0x00000680)
-#define MMSS_DP_AUDIO_COPYMANAGEMENT_3 (0x00000684)
-#define MMSS_DP_AUDIO_COPYMANAGEMENT_4 (0x00000688)
-#define MMSS_DP_AUDIO_COPYMANAGEMENT_5 (0x0000068C)
-#define MMSS_DP_AUDIO_ISRC_0 (0x00000690)
-#define MMSS_DP_AUDIO_ISRC_1 (0x00000694)
-#define MMSS_DP_AUDIO_ISRC_2 (0x00000698)
-#define MMSS_DP_AUDIO_ISRC_3 (0x0000069C)
-#define MMSS_DP_AUDIO_ISRC_4 (0x000006A0)
-#define MMSS_DP_AUDIO_ISRC_5 (0x000006A4)
-#define MMSS_DP_AUDIO_INFOFRAME_0 (0x000006A8)
-#define MMSS_DP_AUDIO_INFOFRAME_1 (0x000006AC)
-#define MMSS_DP_AUDIO_INFOFRAME_2 (0x000006B0)
+#define MMSS_DP_EXTENSION_0 (0x00000250)
+#define MMSS_DP_EXTENSION_1 (0x00000254)
+#define MMSS_DP_EXTENSION_2 (0x00000258)
+#define MMSS_DP_EXTENSION_3 (0x0000025C)
+#define MMSS_DP_EXTENSION_4 (0x00000260)
+#define MMSS_DP_EXTENSION_5 (0x00000264)
+#define MMSS_DP_EXTENSION_6 (0x00000268)
+#define MMSS_DP_EXTENSION_7 (0x0000026C)
+#define MMSS_DP_EXTENSION_8 (0x00000270)
+#define MMSS_DP_EXTENSION_9 (0x00000274)
+#define MMSS_DP_AUDIO_COPYMANAGEMENT_0 (0x00000278)
+#define MMSS_DP_AUDIO_COPYMANAGEMENT_1 (0x0000027C)
+#define MMSS_DP_AUDIO_COPYMANAGEMENT_2 (0x00000280)
+#define MMSS_DP_AUDIO_COPYMANAGEMENT_3 (0x00000284)
+#define MMSS_DP_AUDIO_COPYMANAGEMENT_4 (0x00000288)
+#define MMSS_DP_AUDIO_COPYMANAGEMENT_5 (0x0000028C)
+#define MMSS_DP_AUDIO_ISRC_0 (0x00000290)
+#define MMSS_DP_AUDIO_ISRC_1 (0x00000294)
+#define MMSS_DP_AUDIO_ISRC_2 (0x00000298)
+#define MMSS_DP_AUDIO_ISRC_3 (0x0000029C)
+#define MMSS_DP_AUDIO_ISRC_4 (0x000002A0)
+#define MMSS_DP_AUDIO_ISRC_5 (0x000002A4)
+#define MMSS_DP_AUDIO_INFOFRAME_0 (0x000002A8)
+#define MMSS_DP_AUDIO_INFOFRAME_1 (0x000002AC)
+#define MMSS_DP_AUDIO_INFOFRAME_2 (0x000002B0)
-#define MMSS_DP_GENERIC0_0 (0x00000700)
-#define MMSS_DP_GENERIC0_1 (0x00000704)
-#define MMSS_DP_GENERIC0_2 (0x00000708)
-#define MMSS_DP_GENERIC0_3 (0x0000070C)
-#define MMSS_DP_GENERIC0_4 (0x00000710)
-#define MMSS_DP_GENERIC0_5 (0x00000714)
-#define MMSS_DP_GENERIC0_6 (0x00000718)
-#define MMSS_DP_GENERIC0_7 (0x0000071C)
-#define MMSS_DP_GENERIC0_8 (0x00000720)
-#define MMSS_DP_GENERIC0_9 (0x00000724)
-#define MMSS_DP_GENERIC1_0 (0x00000728)
-#define MMSS_DP_GENERIC1_1 (0x0000072C)
-#define MMSS_DP_GENERIC1_2 (0x00000730)
-#define MMSS_DP_GENERIC1_3 (0x00000734)
-#define MMSS_DP_GENERIC1_4 (0x00000738)
-#define MMSS_DP_GENERIC1_5 (0x0000073C)
-#define MMSS_DP_GENERIC1_6 (0x00000740)
-#define MMSS_DP_GENERIC1_7 (0x00000744)
-#define MMSS_DP_GENERIC1_8 (0x00000748)
-#define MMSS_DP_GENERIC1_9 (0x0000074C)
+#define MMSS_DP_GENERIC0_0 (0x00000300)
+#define MMSS_DP_GENERIC0_1 (0x00000304)
+#define MMSS_DP_GENERIC0_2 (0x00000308)
+#define MMSS_DP_GENERIC0_3 (0x0000030C)
+#define MMSS_DP_GENERIC0_4 (0x00000310)
+#define MMSS_DP_GENERIC0_5 (0x00000314)
+#define MMSS_DP_GENERIC0_6 (0x00000318)
+#define MMSS_DP_GENERIC0_7 (0x0000031C)
+#define MMSS_DP_GENERIC0_8 (0x00000320)
+#define MMSS_DP_GENERIC0_9 (0x00000324)
+#define MMSS_DP_GENERIC1_0 (0x00000328)
+#define MMSS_DP_GENERIC1_1 (0x0000032C)
+#define MMSS_DP_GENERIC1_2 (0x00000330)
+#define MMSS_DP_GENERIC1_3 (0x00000334)
+#define MMSS_DP_GENERIC1_4 (0x00000338)
+#define MMSS_DP_GENERIC1_5 (0x0000033C)
+#define MMSS_DP_GENERIC1_6 (0x00000340)
+#define MMSS_DP_GENERIC1_7 (0x00000344)
+#define MMSS_DP_GENERIC1_8 (0x00000348)
+#define MMSS_DP_GENERIC1_9 (0x0000034C)
-#define MMSS_DP_VSCEXT_0 (0x000006D0)
-#define MMSS_DP_VSCEXT_1 (0x000006D4)
-#define MMSS_DP_VSCEXT_2 (0x000006D8)
-#define MMSS_DP_VSCEXT_3 (0x000006DC)
-#define MMSS_DP_VSCEXT_4 (0x000006E0)
-#define MMSS_DP_VSCEXT_5 (0x000006E4)
-#define MMSS_DP_VSCEXT_6 (0x000006E8)
-#define MMSS_DP_VSCEXT_7 (0x000006EC)
-#define MMSS_DP_VSCEXT_8 (0x000006F0)
-#define MMSS_DP_VSCEXT_9 (0x000006F4)
+#define MMSS_DP_VSCEXT_0 (0x000002D0)
+#define MMSS_DP_VSCEXT_1 (0x000002D4)
+#define MMSS_DP_VSCEXT_2 (0x000002D8)
+#define MMSS_DP_VSCEXT_3 (0x000002DC)
+#define MMSS_DP_VSCEXT_4 (0x000002E0)
+#define MMSS_DP_VSCEXT_5 (0x000002E4)
+#define MMSS_DP_VSCEXT_6 (0x000002E8)
+#define MMSS_DP_VSCEXT_7 (0x000002EC)
+#define MMSS_DP_VSCEXT_8 (0x000002F0)
+#define MMSS_DP_VSCEXT_9 (0x000002F4)
-#define MMSS_DP_TIMING_ENGINE_EN (0x00000A10)
-#define MMSS_DP_ASYNC_FIFO_CONFIG (0x00000A88)
+#define MMSS_DP_BIST_ENABLE (0x00000000)
+#define MMSS_DP_TIMING_ENGINE_EN (0x00000010)
+#define MMSS_DP_INTF_CONFIG (0x00000014)
+#define MMSS_DP_INTF_HSYNC_CTL (0x00000018)
+#define MMSS_DP_INTF_VSYNC_PERIOD_F0 (0x0000001C)
+#define MMSS_DP_INTF_VSYNC_PERIOD_F1 (0x00000020)
+#define MMSS_DP_INTF_VSYNC_PULSE_WIDTH_F0 (0x00000024)
+#define MMSS_DP_INTF_VSYNC_PULSE_WIDTH_F1 (0x00000028)
+#define MMSS_INTF_DISPLAY_V_START_F0 (0x0000002C)
+#define MMSS_INTF_DISPLAY_V_START_F1 (0x00000030)
+#define MMSS_DP_INTF_DISPLAY_V_END_F0 (0x00000034)
+#define MMSS_DP_INTF_DISPLAY_V_END_F1 (0x00000038)
+#define MMSS_DP_INTF_ACTIVE_V_START_F0 (0x0000003C)
+#define MMSS_DP_INTF_ACTIVE_V_START_F1 (0x00000040)
+#define MMSS_DP_INTF_ACTIVE_V_END_F0 (0x00000044)
+#define MMSS_DP_INTF_ACTIVE_V_END_F1 (0x00000048)
+#define MMSS_DP_INTF_DISPLAY_HCTL (0x0000004C)
+#define MMSS_DP_INTF_ACTIVE_HCTL (0x00000050)
+#define MMSS_DP_INTF_POLARITY_CTL (0x00000058)
+#define MMSS_DP_TPG_MAIN_CONTROL (0x00000060)
+#define MMSS_DP_TPG_VIDEO_CONFIG (0x00000064)
+#define MMSS_DP_ASYNC_FIFO_CONFIG (0x00000088)
/*DP PHY Register offsets */
#define DP_PHY_REVISION_ID0 (0x00000000)
@@ -197,14 +218,14 @@
/* DP HDCP 1.3 registers */
#define DP_HDCP_CTRL (0x0A0)
#define DP_HDCP_STATUS (0x0A4)
-#define DP_HDCP_SW_UPPER_AKSV (0x298)
-#define DP_HDCP_SW_LOWER_AKSV (0x29C)
-#define DP_HDCP_ENTROPY_CTRL0 (0x750)
-#define DP_HDCP_ENTROPY_CTRL1 (0x75C)
+#define DP_HDCP_SW_UPPER_AKSV (0x098)
+#define DP_HDCP_SW_LOWER_AKSV (0x09C)
+#define DP_HDCP_ENTROPY_CTRL0 (0x350)
+#define DP_HDCP_ENTROPY_CTRL1 (0x35C)
#define DP_HDCP_SHA_STATUS (0x0C8)
#define DP_HDCP_RCVPORT_DATA2_0 (0x0B0)
-#define DP_HDCP_RCVPORT_DATA3 (0x2A4)
-#define DP_HDCP_RCVPORT_DATA4 (0x2A8)
+#define DP_HDCP_RCVPORT_DATA3 (0x0A4)
+#define DP_HDCP_RCVPORT_DATA4 (0x0A8)
#define DP_HDCP_RCVPORT_DATA5 (0x0C0)
#define DP_HDCP_RCVPORT_DATA6 (0x0C4)
diff --git a/drivers/gpu/drm/msm/dp/dp_usbpd.c b/drivers/gpu/drm/msm/dp/dp_usbpd.c
index 98781abb..3ddc499 100644
--- a/drivers/gpu/drm/msm/dp/dp_usbpd.c
+++ b/drivers/gpu/drm/msm/dp/dp_usbpd.c
@@ -64,6 +64,7 @@ struct dp_usbpd_capabilities {
};
struct dp_usbpd_private {
+ bool forced_disconnect;
u32 vdo;
struct device *dev;
struct usbpd *pd;
@@ -345,7 +346,7 @@ static void dp_usbpd_response_cb(struct usbpd_svid_handler *hdlr, u8 cmd,
dp_usbpd_send_event(pd, DP_USBPD_EVT_STATUS);
break;
case USBPD_SVDM_ATTENTION:
- if (pd->dp_usbpd.forced_disconnect)
+ if (pd->forced_disconnect)
break;
pd->vdo = *vdos;
@@ -396,7 +397,7 @@ static void dp_usbpd_response_cb(struct usbpd_svid_handler *hdlr, u8 cmd,
}
}
-static int dp_usbpd_connect(struct dp_usbpd *dp_usbpd, bool hpd)
+static int dp_usbpd_simulate_connect(struct dp_usbpd *dp_usbpd, bool hpd)
{
int rc = 0;
struct dp_usbpd_private *pd;
@@ -410,7 +411,7 @@ static int dp_usbpd_connect(struct dp_usbpd *dp_usbpd, bool hpd)
pd = container_of(dp_usbpd, struct dp_usbpd_private, dp_usbpd);
dp_usbpd->hpd_high = hpd;
- dp_usbpd->forced_disconnect = !hpd;
+ pd->forced_disconnect = !hpd;
if (hpd)
pd->dp_cb->configure(pd->dev);
@@ -469,7 +470,7 @@ struct dp_usbpd *dp_usbpd_get(struct device *dev, struct dp_usbpd_cb *cb)
}
dp_usbpd = &usbpd->dp_usbpd;
- dp_usbpd->connect = dp_usbpd_connect;
+ dp_usbpd->simulate_connect = dp_usbpd_simulate_connect;
return dp_usbpd;
error:
diff --git a/drivers/gpu/drm/msm/dp/dp_usbpd.h b/drivers/gpu/drm/msm/dp/dp_usbpd.h
index 5b392f5..e70ad7d 100644
--- a/drivers/gpu/drm/msm/dp/dp_usbpd.h
+++ b/drivers/gpu/drm/msm/dp/dp_usbpd.h
@@ -49,7 +49,7 @@ enum dp_usbpd_port {
* @hpd_irq: Change in the status since last message
* @alt_mode_cfg_done: bool to specify alt mode status
* @debug_en: bool to specify debug mode
- * @connect: simulate disconnect or connect for debug mode
+ * @simulate_connect: simulate disconnect or connect for debug mode
*/
struct dp_usbpd {
enum dp_usbpd_port port;
@@ -63,9 +63,8 @@ struct dp_usbpd {
bool hpd_irq;
bool alt_mode_cfg_done;
bool debug_en;
- bool forced_disconnect;
- int (*connect)(struct dp_usbpd *dp_usbpd, bool hpd);
+ int (*simulate_connect)(struct dp_usbpd *dp_usbpd, bool hpd);
};
/**
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.c b/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.c
index 5d9d21f..0ddb47f 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.c
@@ -62,6 +62,11 @@ static void dsi_catalog_cmn_init(struct dsi_ctrl_hw *ctrl,
ctrl->ops.debug_bus = dsi_ctrl_hw_cmn_debug_bus;
ctrl->ops.get_cmd_read_data = dsi_ctrl_hw_cmn_get_cmd_read_data;
ctrl->ops.clear_rdbk_register = dsi_ctrl_hw_cmn_clear_rdbk_reg;
+ ctrl->ops.ctrl_reset = dsi_ctrl_hw_cmn_ctrl_reset;
+ ctrl->ops.mask_error_intr = dsi_ctrl_hw_cmn_mask_error_intr;
+ ctrl->ops.error_intr_ctrl = dsi_ctrl_hw_cmn_error_intr_ctrl;
+ ctrl->ops.get_error_mask = dsi_ctrl_hw_cmn_get_error_mask;
+ ctrl->ops.get_hw_version = dsi_ctrl_hw_cmn_get_hw_version;
switch (version) {
case DSI_CTRL_VERSION_1_4:
@@ -76,6 +81,8 @@ static void dsi_catalog_cmn_init(struct dsi_ctrl_hw *ctrl,
ctrl->ops.clamp_disable = dsi_ctrl_hw_14_clamp_disable;
ctrl->ops.reg_dump_to_buffer =
dsi_ctrl_hw_14_reg_dump_to_buffer;
+ ctrl->ops.schedule_dma_cmd = NULL;
+ ctrl->ops.get_cont_splash_status = NULL;
break;
case DSI_CTRL_VERSION_2_0:
ctrl->ops.setup_lane_map = dsi_ctrl_hw_20_setup_lane_map;
@@ -88,9 +95,13 @@ static void dsi_catalog_cmn_init(struct dsi_ctrl_hw *ctrl,
ctrl->ops.ulps_ops.get_lanes_in_ulps = NULL;
ctrl->ops.clamp_enable = NULL;
ctrl->ops.clamp_disable = NULL;
+ ctrl->ops.schedule_dma_cmd = NULL;
+ ctrl->ops.get_cont_splash_status = NULL;
break;
case DSI_CTRL_VERSION_2_2:
ctrl->ops.phy_reset_config = dsi_ctrl_hw_22_phy_reset_config;
+ ctrl->ops.get_cont_splash_status =
+ dsi_ctrl_hw_22_get_cont_splash_status;
ctrl->ops.setup_lane_map = dsi_ctrl_hw_20_setup_lane_map;
ctrl->ops.wait_for_lane_idle =
dsi_ctrl_hw_20_wait_for_lane_idle;
@@ -101,6 +112,7 @@ static void dsi_catalog_cmn_init(struct dsi_ctrl_hw *ctrl,
ctrl->ops.ulps_ops.get_lanes_in_ulps = NULL;
ctrl->ops.clamp_enable = NULL;
ctrl->ops.clamp_disable = NULL;
+ ctrl->ops.schedule_dma_cmd = dsi_ctrl_hw_22_schedule_dma_cmd;
break;
default:
break;
@@ -113,6 +125,7 @@ static void dsi_catalog_cmn_init(struct dsi_ctrl_hw *ctrl,
* @version: DSI controller version.
* @index: DSI controller instance ID.
* @phy_isolation_enabled: DSI controller works isolated from phy.
+ * @null_insertion_enabled: DSI controller inserts null packet.
*
* This function setups the catalog information in the dsi_ctrl_hw object.
*
@@ -120,7 +133,7 @@ static void dsi_catalog_cmn_init(struct dsi_ctrl_hw *ctrl,
*/
int dsi_catalog_ctrl_setup(struct dsi_ctrl_hw *ctrl,
enum dsi_ctrl_version version, u32 index,
- bool phy_isolation_enabled)
+ bool phy_isolation_enabled, bool null_insertion_enabled)
{
int rc = 0;
@@ -131,6 +144,7 @@ int dsi_catalog_ctrl_setup(struct dsi_ctrl_hw *ctrl,
}
ctrl->index = index;
+ ctrl->null_insertion_enabled = null_insertion_enabled;
set_bit(DSI_CTRL_VIDEO_TPG, ctrl->feature_map);
set_bit(DSI_CTRL_CMD_TPG, ctrl->feature_map);
set_bit(DSI_CTRL_VARIABLE_REFRESH_RATE, ctrl->feature_map);
@@ -194,6 +208,7 @@ static void dsi_catalog_phy_3_0_init(struct dsi_phy_hw *phy)
phy->ops.ulps_ops.is_lanes_in_ulps =
dsi_phy_hw_v3_0_is_lanes_in_ulps;
phy->ops.phy_timing_val = dsi_phy_hw_timing_val_v3_0;
+ phy->ops.phy_lane_reset = dsi_phy_hw_v3_0_lane_reset;
}
/**
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.h b/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.h
index 186a5b5..735f61f 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.h
@@ -24,6 +24,7 @@
* @version: DSI controller version.
* @index: DSI controller instance ID.
* @phy_isolation_enabled: DSI controller works isolated from phy.
+ * @null_insertion_enabled: DSI controller inserts null packet.
*
* This function setups the catalog information in the dsi_ctrl_hw object.
*
@@ -31,7 +32,7 @@
*/
int dsi_catalog_ctrl_setup(struct dsi_ctrl_hw *ctrl,
enum dsi_ctrl_version version, u32 index,
- bool phy_isolation_enabled);
+ bool phy_isolation_enabled, bool null_insertion_enabled);
/**
* dsi_catalog_phy_setup() - return catalog info for dsi phy hardware
@@ -101,6 +102,7 @@ u32 dsi_phy_hw_v3_0_get_lanes_in_ulps(struct dsi_phy_hw *phy);
bool dsi_phy_hw_v3_0_is_lanes_in_ulps(u32 lanes, u32 ulps_lanes);
int dsi_phy_hw_timing_val_v3_0(struct dsi_phy_per_lane_cfgs *timing_cfg,
u32 *timing_val, u32 size);
+int dsi_phy_hw_v3_0_lane_reset(struct dsi_phy_hw *phy);
/* DSI controller common ops */
u32 dsi_ctrl_hw_cmn_get_interrupt_status(struct dsi_ctrl_hw *ctrl);
@@ -176,6 +178,14 @@ u32 dsi_ctrl_hw_cmn_get_cmd_read_data(struct dsi_ctrl_hw *ctrl,
u32 rx_byte,
u32 pkt_size, u32 *hw_read_cnt);
void dsi_ctrl_hw_cmn_clear_rdbk_reg(struct dsi_ctrl_hw *ctrl);
+void dsi_ctrl_hw_22_schedule_dma_cmd(struct dsi_ctrl_hw *ctrl, int line_on);
+int dsi_ctrl_hw_cmn_ctrl_reset(struct dsi_ctrl_hw *ctrl,
+ int mask);
+void dsi_ctrl_hw_cmn_mask_error_intr(struct dsi_ctrl_hw *ctrl, u32 idx,
+ bool en);
+void dsi_ctrl_hw_cmn_error_intr_ctrl(struct dsi_ctrl_hw *ctrl, bool en);
+u32 dsi_ctrl_hw_cmn_get_error_mask(struct dsi_ctrl_hw *ctrl);
+u32 dsi_ctrl_hw_cmn_get_hw_version(struct dsi_ctrl_hw *ctrl);
/* Definitions specific to 1.4 DSI controller hardware */
int dsi_ctrl_hw_14_wait_for_lane_idle(struct dsi_ctrl_hw *ctrl, u32 lanes);
@@ -204,4 +214,7 @@ ssize_t dsi_ctrl_hw_20_reg_dump_to_buffer(struct dsi_ctrl_hw *ctrl,
char *buf,
u32 size);
+/* Definitions specific to 2.2 DSI controller hardware */
+bool dsi_ctrl_hw_22_get_cont_splash_status(struct dsi_ctrl_hw *ctrl);
+
#endif /* _DSI_CATALOG_H_ */
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_clk.h b/drivers/gpu/drm/msm/dsi-staging/dsi_clk.h
index 2a84a2d..1fd10d9 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_clk.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_clk.h
@@ -198,6 +198,13 @@ struct dsi_clk_link_set {
};
/**
+ * dsi_display_clk_mngr_update_splash_status() - Update splash stattus
+ * @clk_mngr: Structure containing DSI clock information
+ * @status: Splash status
+ */
+void dsi_display_clk_mngr_update_splash_status(void *clk_mgr, bool status);
+
+/**
* dsi_display_clk_mgr_register() - Register DSI clock manager
* @info: Structure containing DSI clock information
*/
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_clk_manager.c b/drivers/gpu/drm/msm/dsi-staging/dsi_clk_manager.c
index 560964e..61406fe 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_clk_manager.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_clk_manager.c
@@ -46,6 +46,7 @@ struct dsi_clk_mngr {
post_clockon_cb post_clkon_cb;
pre_clockon_cb pre_clkon_cb;
+ bool is_cont_splash_enabled;
void *priv_data;
};
@@ -287,7 +288,18 @@ int dsi_core_clk_stop(struct dsi_core_clks *c_clks)
static int dsi_link_clk_set_rate(struct dsi_link_clks *l_clks)
{
int rc = 0;
+ struct dsi_clk_mngr *mngr;
+ mngr = container_of(l_clks, struct dsi_clk_mngr, link_clks[0]);
+
+ if (mngr->is_cont_splash_enabled)
+ return 0;
+ /*
+ * In an ideal world, cont_splash_enabled should not be required inside
+ * the clock manager. But, in the current driver cont_splash_enabled
+ * flag is set inside mdp driver and there is no interface event
+ * associated with this flag setting.
+ */
rc = clk_set_rate(l_clks->clks.esc_clk, l_clks->freq.esc_clk_rate);
if (rc) {
pr_err("clk_set_rate failed for esc_clk rc = %d\n", rc);
@@ -1143,6 +1155,19 @@ int dsi_deregister_clk_handle(void *client)
return rc;
}
+void dsi_display_clk_mngr_update_splash_status(void *clk_mgr, bool status)
+{
+ struct dsi_clk_mngr *mngr;
+
+ if (!clk_mgr) {
+ pr_err("Invalid params\n");
+ return;
+ }
+
+ mngr = (struct dsi_clk_mngr *)clk_mgr;
+ mngr->is_cont_splash_enabled = status;
+}
+
void *dsi_display_clk_mngr_register(struct dsi_clk_info *info)
{
struct dsi_clk_mngr *mngr;
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c
index 21a23e2..1f10e3c 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c
@@ -889,7 +889,6 @@ static int dsi_ctrl_copy_and_pad_cmd(struct dsi_ctrl *dsi_ctrl,
if (packet->payload_length > 0)
buf[3] |= BIT(6);
- buf[3] |= BIT(7);
/* send embedded BTA for read commands */
if ((buf[2] & 0x3f) == MIPI_DSI_DCS_READ)
@@ -906,7 +905,15 @@ static void dsi_ctrl_wait_for_video_done(struct dsi_ctrl *dsi_ctrl)
u32 v_total = 0, v_blank = 0, sleep_ms = 0, fps = 0, ret;
struct dsi_mode_info *timing;
- if (dsi_ctrl->host_config.panel_mode != DSI_OP_VIDEO_MODE)
+ /**
+ * No need to wait if the panel is not video mode or
+ * if DSI controller supports command DMA scheduling or
+ * if we are sending init commands.
+ */
+ if ((dsi_ctrl->host_config.panel_mode != DSI_OP_VIDEO_MODE) ||
+ (dsi_ctrl->version >= DSI_CTRL_VERSION_2_2) ||
+ (dsi_ctrl->current_state.vid_engine_state !=
+ DSI_CTRL_ENGINE_ON))
return;
dsi_ctrl->hw.ops.clear_interrupt_status(&dsi_ctrl->hw,
@@ -944,8 +951,17 @@ static int dsi_message_tx(struct dsi_ctrl *dsi_ctrl,
u32 hw_flags = 0;
u32 length = 0;
u8 *buffer = NULL;
- u32 cnt = 0;
+ u32 cnt = 0, line_no = 0x1;
u8 *cmdbuf;
+ struct dsi_mode_info *timing;
+
+ /* override cmd fetch mode during secure session */
+ if (dsi_ctrl->secure_mode) {
+ flags &= ~DSI_CTRL_CMD_FETCH_MEMORY;
+ flags |= DSI_CTRL_CMD_FIFO_STORE;
+ pr_debug("[%s] override to TPG during secure session\n",
+ dsi_ctrl->name);
+ }
rc = mipi_dsi_create_packet(&packet, msg);
if (rc) {
@@ -953,20 +969,31 @@ static int dsi_message_tx(struct dsi_ctrl *dsi_ctrl,
goto error;
}
+ /* fail cmds more than the supported size in TPG mode */
+ if ((flags & DSI_CTRL_CMD_FIFO_STORE) &&
+ (msg->tx_len > DSI_CTRL_MAX_CMD_FIFO_STORE_SIZE)) {
+ pr_err("[%s] TPG cmd size:%zd not supported, secure:%d\n",
+ dsi_ctrl->name, msg->tx_len,
+ dsi_ctrl->secure_mode);
+ rc = -ENOTSUPP;
+ goto error;
+ }
+
+ rc = dsi_ctrl_copy_and_pad_cmd(dsi_ctrl,
+ &packet,
+ &buffer,
+ &length);
+ if (rc) {
+ pr_err("[%s] failed to copy message, rc=%d\n",
+ dsi_ctrl->name, rc);
+ goto error;
+ }
+
+ if ((msg->flags & MIPI_DSI_MSG_LASTCOMMAND))
+ buffer[3] |= BIT(7);//set the last cmd bit in header.
+
if (flags & DSI_CTRL_CMD_FETCH_MEMORY) {
- rc = dsi_ctrl_copy_and_pad_cmd(dsi_ctrl,
- &packet,
- &buffer,
- &length);
-
- if (rc) {
- pr_err("[%s] failed to copy message, rc=%d\n",
- dsi_ctrl->name, rc);
- goto error;
- }
-
cmd_mem.offset = dsi_ctrl->cmd_buffer_iova;
- cmd_mem.length = length;
cmd_mem.en_broadcast = (flags & DSI_CTRL_CMD_BROADCAST) ?
true : false;
cmd_mem.is_master = (flags & DSI_CTRL_CMD_BROADCAST_MASTER) ?
@@ -975,19 +1002,20 @@ static int dsi_message_tx(struct dsi_ctrl *dsi_ctrl,
true : false;
cmdbuf = (u8 *)(dsi_ctrl->vaddr);
+
for (cnt = 0; cnt < length; cnt++)
- cmdbuf[cnt] = buffer[cnt];
+ cmdbuf[dsi_ctrl->cmd_len + cnt] = buffer[cnt];
+
+ dsi_ctrl->cmd_len += length;
+
+ if (!(msg->flags & MIPI_DSI_MSG_LASTCOMMAND)) {
+ goto error;
+ } else {
+ cmd_mem.length = dsi_ctrl->cmd_len;
+ dsi_ctrl->cmd_len = 0;
+ }
} else if (flags & DSI_CTRL_CMD_FIFO_STORE) {
- rc = dsi_ctrl_copy_and_pad_cmd(dsi_ctrl,
- &packet,
- &buffer,
- &length);
- if (rc) {
- pr_err("[%s] failed to copy message, rc=%d\n",
- dsi_ctrl->name, rc);
- goto error;
- }
cmd.command = (u32 *)buffer;
cmd.size = length;
cmd.en_broadcast = (flags & DSI_CTRL_CMD_BROADCAST) ?
@@ -998,9 +1026,23 @@ static int dsi_message_tx(struct dsi_ctrl *dsi_ctrl,
true : false;
}
+ timing = &(dsi_ctrl->host_config.video_timing);
+ if (timing)
+ line_no += timing->v_back_porch + timing->v_sync_width +
+ timing->v_active;
+ if ((dsi_ctrl->host_config.panel_mode == DSI_OP_VIDEO_MODE) &&
+ dsi_ctrl->hw.ops.schedule_dma_cmd &&
+ (dsi_ctrl->current_state.vid_engine_state ==
+ DSI_CTRL_ENGINE_ON))
+ dsi_ctrl->hw.ops.schedule_dma_cmd(&dsi_ctrl->hw,
+ line_no);
+
hw_flags |= (flags & DSI_CTRL_CMD_DEFER_TRIGGER) ?
DSI_CTRL_HW_CMD_WAIT_FOR_TRIGGER : 0;
+ if ((msg->flags & MIPI_DSI_MSG_LASTCOMMAND))
+ hw_flags |= DSI_CTRL_CMD_LAST_COMMAND;
+
if (flags & DSI_CTRL_CMD_DEFER_TRIGGER) {
if (flags & DSI_CTRL_CMD_FETCH_MEMORY) {
dsi_ctrl->hw.ops.kickoff_command(&dsi_ctrl->hw,
@@ -1017,6 +1059,9 @@ static int dsi_message_tx(struct dsi_ctrl *dsi_ctrl,
dsi_ctrl_wait_for_video_done(dsi_ctrl);
dsi_ctrl_enable_status_interrupt(dsi_ctrl,
DSI_SINT_CMD_MODE_DMA_DONE, NULL);
+ if (dsi_ctrl->hw.ops.mask_error_intr)
+ dsi_ctrl->hw.ops.mask_error_intr(&dsi_ctrl->hw,
+ BIT(DSI_FIFO_OVERFLOW), true);
reinit_completion(&dsi_ctrl->irq_info.cmd_dma_done);
if (flags & DSI_CTRL_CMD_FETCH_MEMORY) {
@@ -1034,7 +1079,8 @@ static int dsi_message_tx(struct dsi_ctrl *dsi_ctrl,
msecs_to_jiffies(DSI_CTRL_TX_TO_MS));
if (ret == 0) {
- u32 status = 0;
+ u32 status = dsi_ctrl->hw.ops.get_interrupt_status(
+ &dsi_ctrl->hw);
u32 mask = DSI_CMD_MODE_DMA_DONE;
if (status & mask) {
@@ -1056,6 +1102,9 @@ static int dsi_message_tx(struct dsi_ctrl *dsi_ctrl,
}
}
+ if (dsi_ctrl->hw.ops.mask_error_intr)
+ dsi_ctrl->hw.ops.mask_error_intr(&dsi_ctrl->hw,
+ BIT(DSI_FIFO_OVERFLOW), false);
dsi_ctrl->hw.ops.reset_cmd_fifo(&dsi_ctrl->hw);
}
error:
@@ -1444,6 +1493,9 @@ static int dsi_ctrl_dts_parse(struct dsi_ctrl *dsi_ctrl,
dsi_ctrl->phy_isolation_enabled = of_property_read_bool(of_node,
"qcom,dsi-phy-isolation-enabled");
+ dsi_ctrl->null_insertion_enabled = of_property_read_bool(of_node,
+ "qcom,null-insertion-enabled");
+
return 0;
}
@@ -1501,7 +1553,8 @@ static int dsi_ctrl_dev_probe(struct platform_device *pdev)
}
rc = dsi_catalog_ctrl_setup(&dsi_ctrl->hw, dsi_ctrl->version,
- dsi_ctrl->cell_index, dsi_ctrl->phy_isolation_enabled);
+ dsi_ctrl->cell_index, dsi_ctrl->phy_isolation_enabled,
+ dsi_ctrl->null_insertion_enabled);
if (rc) {
pr_err("Catalog does not support version (%d)\n",
dsi_ctrl->version);
@@ -1519,6 +1572,7 @@ static int dsi_ctrl_dev_probe(struct platform_device *pdev)
mutex_unlock(&dsi_ctrl_list_lock);
mutex_init(&dsi_ctrl->ctrl_lock);
+ dsi_ctrl->secure_mode = false;
dsi_ctrl->pdev = pdev;
platform_set_drvdata(pdev, dsi_ctrl);
@@ -1641,7 +1695,9 @@ struct dsi_ctrl *dsi_ctrl_get(struct device_node *of_node)
mutex_lock(&ctrl->ctrl_lock);
if (ctrl->refcount == 1) {
pr_err("[%s] Device in use\n", ctrl->name);
+ mutex_unlock(&ctrl->ctrl_lock);
ctrl = ERR_PTR(-EBUSY);
+ return ctrl;
} else {
ctrl->refcount++;
}
@@ -1914,7 +1970,7 @@ int dsi_ctrl_setup(struct dsi_ctrl *dsi_ctrl)
}
dsi_ctrl->hw.ops.enable_status_interrupts(&dsi_ctrl->hw, 0x0);
- dsi_ctrl->hw.ops.enable_error_interrupts(&dsi_ctrl->hw, 0x0);
+ dsi_ctrl->hw.ops.enable_error_interrupts(&dsi_ctrl->hw, 0xFF00E0);
dsi_ctrl->hw.ops.ctrl_en(&dsi_ctrl->hw, true);
mutex_unlock(&dsi_ctrl->ctrl_lock);
@@ -1965,33 +2021,77 @@ int dsi_ctrl_phy_reset_config(struct dsi_ctrl *dsi_ctrl, bool enable)
static void dsi_ctrl_handle_error_status(struct dsi_ctrl *dsi_ctrl,
unsigned long int error)
{
- pr_err("%s: %lu\n", __func__, error);
+ struct dsi_event_cb_info cb_info;
+
+ cb_info = dsi_ctrl->irq_info.irq_err_cb;
+
+ /* disable error interrupts */
+ if (dsi_ctrl->hw.ops.error_intr_ctrl)
+ dsi_ctrl->hw.ops.error_intr_ctrl(&dsi_ctrl->hw, false);
+
+ /* clear error interrupts first */
+ if (dsi_ctrl->hw.ops.clear_error_status)
+ dsi_ctrl->hw.ops.clear_error_status(&dsi_ctrl->hw,
+ error);
/* DTLN PHY error */
- if (error & 0x3000e00)
- if (dsi_ctrl->hw.ops.clear_error_status)
- dsi_ctrl->hw.ops.clear_error_status(&dsi_ctrl->hw,
- 0x3000e00);
+ if (error & 0x3000E00)
+ pr_err("dsi PHY contention error: 0x%lx\n", error);
+
+ /* TX timeout error */
+ if (error & 0xE0) {
+ if (error & 0xA0) {
+ if (cb_info.event_cb) {
+ cb_info.event_idx = DSI_LP_Rx_TIMEOUT;
+ (void)cb_info.event_cb(cb_info.event_usr_ptr,
+ cb_info.event_idx,
+ dsi_ctrl->cell_index,
+ 0, 0, 0, 0);
+ }
+ }
+ pr_err("tx timeout error: 0x%lx\n", error);
+ }
/* DSI FIFO OVERFLOW error */
- if (error & 0xf0000) {
- if (dsi_ctrl->hw.ops.clear_error_status)
- dsi_ctrl->hw.ops.clear_error_status(&dsi_ctrl->hw,
- 0xf0000);
+ if (error & 0xF0000) {
+ u32 mask = 0;
+
+ if (dsi_ctrl->hw.ops.get_error_mask)
+ mask = dsi_ctrl->hw.ops.get_error_mask(&dsi_ctrl->hw);
+ /* no need to report FIFO overflow if already masked */
+ if (cb_info.event_cb && !(mask & 0xf0000)) {
+ cb_info.event_idx = DSI_FIFO_OVERFLOW;
+ (void)cb_info.event_cb(cb_info.event_usr_ptr,
+ cb_info.event_idx,
+ dsi_ctrl->cell_index,
+ 0, 0, 0, 0);
+ pr_err("dsi FIFO OVERFLOW error: 0x%lx\n", error);
+ }
}
/* DSI FIFO UNDERFLOW error */
- if (error & 0xf00000) {
- if (dsi_ctrl->hw.ops.clear_error_status)
- dsi_ctrl->hw.ops.clear_error_status(&dsi_ctrl->hw,
- 0xf00000);
+ if (error & 0xF00000) {
+ if (cb_info.event_cb) {
+ cb_info.event_idx = DSI_FIFO_UNDERFLOW;
+ (void)cb_info.event_cb(cb_info.event_usr_ptr,
+ cb_info.event_idx,
+ dsi_ctrl->cell_index,
+ 0, 0, 0, 0);
+ }
+ pr_err("dsi FIFO UNDERFLOW error: 0x%lx\n", error);
}
/* DSI PLL UNLOCK error */
if (error & BIT(8))
- if (dsi_ctrl->hw.ops.clear_error_status)
- dsi_ctrl->hw.ops.clear_error_status(&dsi_ctrl->hw,
- BIT(8));
+ pr_err("dsi PLL unlock error: 0x%lx\n", error);
+
+ /* ACK error */
+ if (error & 0xF)
+ pr_err("ack error: 0x%lx\n", error);
+
+ /* enable back DSI interrupts */
+ if (dsi_ctrl->hw.ops.error_intr_ctrl)
+ dsi_ctrl->hw.ops.error_intr_ctrl(&dsi_ctrl->hw, true);
}
/**
@@ -2005,39 +2105,28 @@ static irqreturn_t dsi_ctrl_isr(int irq, void *ptr)
struct dsi_ctrl *dsi_ctrl;
struct dsi_event_cb_info cb_info;
unsigned long flags;
- uint32_t cell_index, status, i;
- uint64_t errors;
+ uint32_t status = 0x0, i;
+ uint64_t errors = 0x0;
if (!ptr)
return IRQ_NONE;
dsi_ctrl = ptr;
- /* clear status interrupts */
+ /* check status interrupts */
if (dsi_ctrl->hw.ops.get_interrupt_status)
status = dsi_ctrl->hw.ops.get_interrupt_status(&dsi_ctrl->hw);
- else
- status = 0x0;
- if (dsi_ctrl->hw.ops.clear_interrupt_status)
- dsi_ctrl->hw.ops.clear_interrupt_status(&dsi_ctrl->hw, status);
-
- spin_lock_irqsave(&dsi_ctrl->irq_info.irq_lock, flags);
- cell_index = dsi_ctrl->cell_index;
- spin_unlock_irqrestore(&dsi_ctrl->irq_info.irq_lock, flags);
-
- /* clear error interrupts */
+ /* check error interrupts */
if (dsi_ctrl->hw.ops.get_error_status)
errors = dsi_ctrl->hw.ops.get_error_status(&dsi_ctrl->hw);
- else
- errors = 0x0;
- if (errors) {
- /* handle DSI error recovery */
+ /* clear interrupts */
+ if (dsi_ctrl->hw.ops.clear_interrupt_status)
+ dsi_ctrl->hw.ops.clear_interrupt_status(&dsi_ctrl->hw, 0x0);
+
+ /* handle DSI error recovery */
+ if (status & DSI_ERROR)
dsi_ctrl_handle_error_status(dsi_ctrl, errors);
- if (dsi_ctrl->hw.ops.clear_error_status)
- dsi_ctrl->hw.ops.clear_error_status(&dsi_ctrl->hw,
- errors);
- }
if (status & DSI_CMD_MODE_DMA_DONE) {
dsi_ctrl_disable_status_interrupt(dsi_ctrl,
@@ -2058,9 +2147,16 @@ static irqreturn_t dsi_ctrl_isr(int irq, void *ptr)
}
if (status & DSI_BTA_DONE) {
+ u32 fifo_overflow_mask = (DSI_DLN0_HS_FIFO_OVERFLOW |
+ DSI_DLN1_HS_FIFO_OVERFLOW |
+ DSI_DLN2_HS_FIFO_OVERFLOW |
+ DSI_DLN3_HS_FIFO_OVERFLOW);
dsi_ctrl_disable_status_interrupt(dsi_ctrl,
DSI_SINT_BTA_DONE);
complete_all(&dsi_ctrl->irq_info.bta_done);
+ if (dsi_ctrl->hw.ops.clear_error_status)
+ dsi_ctrl->hw.ops.clear_error_status(&dsi_ctrl->hw,
+ fifo_overflow_mask);
}
for (i = 0; status && i < DSI_STATUS_INTERRUPT_COUNT; ++i) {
@@ -2073,7 +2169,8 @@ static irqreturn_t dsi_ctrl_isr(int irq, void *ptr)
if (cb_info.event_cb)
(void)cb_info.event_cb(cb_info.event_usr_ptr,
cb_info.event_idx,
- cell_index, irq, 0, 0, 0);
+ dsi_ctrl->cell_index,
+ irq, 0, 0, 0);
}
status >>= 1;
}
@@ -2225,6 +2322,7 @@ int dsi_ctrl_host_timing_update(struct dsi_ctrl *dsi_ctrl)
/**
* dsi_ctrl_host_init() - Initialize DSI host hardware.
* @dsi_ctrl: DSI controller handle.
+ * @is_splash_enabled: boolean signifying splash status.
*
* Initializes DSI controller hardware with host configuration provided by
* dsi_ctrl_update_host_config(). Initialization can be performed only during
@@ -2233,7 +2331,7 @@ int dsi_ctrl_host_timing_update(struct dsi_ctrl *dsi_ctrl)
*
* Return: error code.
*/
-int dsi_ctrl_host_init(struct dsi_ctrl *dsi_ctrl)
+int dsi_ctrl_host_init(struct dsi_ctrl *dsi_ctrl, bool is_splash_enabled)
{
int rc = 0;
@@ -2250,37 +2348,42 @@ int dsi_ctrl_host_init(struct dsi_ctrl *dsi_ctrl)
goto error;
}
- dsi_ctrl->hw.ops.setup_lane_map(&dsi_ctrl->hw,
+ /* For Splash usecases we omit hw operations as bootloader
+ * already takes care of them
+ */
+ if (!is_splash_enabled) {
+ dsi_ctrl->hw.ops.setup_lane_map(&dsi_ctrl->hw,
&dsi_ctrl->host_config.lane_map);
- dsi_ctrl->hw.ops.host_setup(&dsi_ctrl->hw,
+ dsi_ctrl->hw.ops.host_setup(&dsi_ctrl->hw,
&dsi_ctrl->host_config.common_config);
- if (dsi_ctrl->host_config.panel_mode == DSI_OP_CMD_MODE) {
- dsi_ctrl->hw.ops.cmd_engine_setup(&dsi_ctrl->hw,
+ if (dsi_ctrl->host_config.panel_mode == DSI_OP_CMD_MODE) {
+ dsi_ctrl->hw.ops.cmd_engine_setup(&dsi_ctrl->hw,
&dsi_ctrl->host_config.common_config,
&dsi_ctrl->host_config.u.cmd_engine);
- dsi_ctrl->hw.ops.setup_cmd_stream(&dsi_ctrl->hw,
+ dsi_ctrl->hw.ops.setup_cmd_stream(&dsi_ctrl->hw,
&dsi_ctrl->host_config.video_timing,
dsi_ctrl->host_config.video_timing.h_active * 3,
0x0,
NULL);
- } else {
- dsi_ctrl->hw.ops.video_engine_setup(&dsi_ctrl->hw,
+ } else {
+ dsi_ctrl->hw.ops.video_engine_setup(&dsi_ctrl->hw,
&dsi_ctrl->host_config.common_config,
&dsi_ctrl->host_config.u.video_engine);
- dsi_ctrl->hw.ops.set_video_timing(&dsi_ctrl->hw,
+ dsi_ctrl->hw.ops.set_video_timing(&dsi_ctrl->hw,
&dsi_ctrl->host_config.video_timing);
+ }
}
dsi_ctrl_setup_isr(dsi_ctrl);
dsi_ctrl->hw.ops.enable_status_interrupts(&dsi_ctrl->hw, 0x0);
- dsi_ctrl->hw.ops.enable_error_interrupts(&dsi_ctrl->hw, 0x0);
+ dsi_ctrl->hw.ops.enable_error_interrupts(&dsi_ctrl->hw, 0xFF00E0);
- pr_debug("[DSI_%d]Host initialization complete\n",
- dsi_ctrl->cell_index);
+ pr_debug("[DSI_%d]Host initialization complete, continuous splash status:%d\n",
+ dsi_ctrl->cell_index, is_splash_enabled);
dsi_ctrl_update_state(dsi_ctrl, DSI_CTRL_OP_HOST_INIT, 0x1);
error:
mutex_unlock(&dsi_ctrl->ctrl_lock);
@@ -2300,6 +2403,48 @@ int dsi_ctrl_soft_reset(struct dsi_ctrl *dsi_ctrl)
return 0;
}
+int dsi_ctrl_reset(struct dsi_ctrl *dsi_ctrl, int mask)
+{
+ int rc = 0;
+
+ if (!dsi_ctrl)
+ return -EINVAL;
+
+ mutex_lock(&dsi_ctrl->ctrl_lock);
+ rc = dsi_ctrl->hw.ops.ctrl_reset(&dsi_ctrl->hw, mask);
+ mutex_unlock(&dsi_ctrl->ctrl_lock);
+
+ return rc;
+}
+
+int dsi_ctrl_get_hw_version(struct dsi_ctrl *dsi_ctrl)
+{
+ int rc = 0;
+
+ if (!dsi_ctrl)
+ return -EINVAL;
+
+ mutex_lock(&dsi_ctrl->ctrl_lock);
+ rc = dsi_ctrl->hw.ops.get_hw_version(&dsi_ctrl->hw);
+ mutex_unlock(&dsi_ctrl->ctrl_lock);
+
+ return rc;
+}
+
+int dsi_ctrl_vid_engine_en(struct dsi_ctrl *dsi_ctrl, bool on)
+{
+ int rc = 0;
+
+ if (!dsi_ctrl)
+ return -EINVAL;
+
+ mutex_lock(&dsi_ctrl->ctrl_lock);
+ dsi_ctrl->hw.ops.video_engine_en(&dsi_ctrl->hw, on);
+ mutex_unlock(&dsi_ctrl->ctrl_lock);
+
+ return rc;
+}
+
/**
* dsi_ctrl_host_deinit() - De-Initialize DSI host hardware.
* @dsi_ctrl: DSI controller handle.
@@ -2487,6 +2632,10 @@ int dsi_ctrl_cmd_tx_trigger(struct dsi_ctrl *dsi_ctrl, u32 flags)
return -EINVAL;
}
+ /* Dont trigger the command if this is not the last ocmmand */
+ if (!(flags & DSI_CTRL_CMD_LAST_COMMAND))
+ return rc;
+
mutex_lock(&dsi_ctrl->ctrl_lock);
if (!(flags & DSI_CTRL_CMD_BROADCAST_MASTER))
@@ -2494,8 +2643,12 @@ int dsi_ctrl_cmd_tx_trigger(struct dsi_ctrl *dsi_ctrl, u32 flags)
if ((flags & DSI_CTRL_CMD_BROADCAST) &&
(flags & DSI_CTRL_CMD_BROADCAST_MASTER)) {
+ dsi_ctrl_wait_for_video_done(dsi_ctrl);
dsi_ctrl_enable_status_interrupt(dsi_ctrl,
DSI_SINT_CMD_MODE_DMA_DONE, NULL);
+ if (dsi_ctrl->hw.ops.mask_error_intr)
+ dsi_ctrl->hw.ops.mask_error_intr(&dsi_ctrl->hw,
+ BIT(DSI_FIFO_OVERFLOW), true);
reinit_completion(&dsi_ctrl->irq_info.cmd_dma_done);
/* trigger command */
@@ -2526,6 +2679,9 @@ int dsi_ctrl_cmd_tx_trigger(struct dsi_ctrl *dsi_ctrl, u32 flags)
dsi_ctrl->cell_index);
}
}
+ if (dsi_ctrl->hw.ops.mask_error_intr)
+ dsi_ctrl->hw.ops.mask_error_intr(&dsi_ctrl->hw,
+ BIT(DSI_FIFO_OVERFLOW), false);
}
mutex_unlock(&dsi_ctrl->ctrl_lock);
@@ -2555,6 +2711,43 @@ static void _dsi_ctrl_cache_misr(struct dsi_ctrl *dsi_ctrl)
}
/**
+ * dsi_ctrl_update_host_engine_state_for_cont_splash() -
+ * set engine state for dsi controller during continuous splash
+ * @dsi_ctrl: DSI controller handle.
+ * @state: Engine state.
+ *
+ * Set host engine state for DSI controller during continuous splash.
+ *
+ * Return: error code.
+ */
+int dsi_ctrl_update_host_engine_state_for_cont_splash(struct dsi_ctrl *dsi_ctrl,
+ enum dsi_engine_state state)
+{
+ int rc = 0;
+
+ if (!dsi_ctrl || (state >= DSI_CTRL_ENGINE_MAX)) {
+ pr_err("Invalid params\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&dsi_ctrl->ctrl_lock);
+
+ rc = dsi_ctrl_check_state(dsi_ctrl, DSI_CTRL_OP_HOST_ENGINE, state);
+ if (rc) {
+ pr_err("[DSI_%d] Controller state check failed, rc=%d\n",
+ dsi_ctrl->cell_index, rc);
+ goto error;
+ }
+
+ pr_debug("[DSI_%d] Set host engine state = %d\n", dsi_ctrl->cell_index,
+ state);
+ dsi_ctrl_update_state(dsi_ctrl, DSI_CTRL_OP_HOST_ENGINE, state);
+error:
+ mutex_unlock(&dsi_ctrl->ctrl_lock);
+ return rc;
+}
+
+/**
* dsi_ctrl_set_power_state() - set power state for dsi controller
* @dsi_ctrl: DSI controller handle.
* @state: Power state.
@@ -2724,8 +2917,6 @@ int dsi_ctrl_set_cmd_engine_state(struct dsi_ctrl *dsi_ctrl,
return -EINVAL;
}
- mutex_lock(&dsi_ctrl->ctrl_lock);
-
rc = dsi_ctrl_check_state(dsi_ctrl, DSI_CTRL_OP_CMD_ENGINE, state);
if (rc) {
pr_err("[DSI_%d] Controller state check failed, rc=%d\n",
@@ -2742,7 +2933,6 @@ int dsi_ctrl_set_cmd_engine_state(struct dsi_ctrl *dsi_ctrl,
state);
dsi_ctrl_update_state(dsi_ctrl, DSI_CTRL_OP_CMD_ENGINE, state);
error:
- mutex_unlock(&dsi_ctrl->ctrl_lock);
return rc;
}
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.h b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.h
index 4781299..f5b08a0 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.h
@@ -35,6 +35,8 @@
* reading data from memory.
* @DSI_CTRL_CMD_FETCH_MEMORY: Fetch command from memory through AXI bus
* and transfer it.
+ * @DSI_CTRL_CMD_LAST_COMMAND: Trigger the DMA cmd transfer if this is last
+ * command in the batch.
*/
#define DSI_CTRL_CMD_READ 0x1
#define DSI_CTRL_CMD_BROADCAST 0x2
@@ -42,6 +44,10 @@
#define DSI_CTRL_CMD_DEFER_TRIGGER 0x8
#define DSI_CTRL_CMD_FIFO_STORE 0x10
#define DSI_CTRL_CMD_FETCH_MEMORY 0x20
+#define DSI_CTRL_CMD_LAST_COMMAND 0x40
+
+/* max size supported for dsi cmd transfer using TPG */
+#define DSI_CTRL_MAX_CMD_FIFO_STORE_SIZE 64
/**
* enum dsi_power_state - defines power states for dsi controller.
@@ -143,6 +149,7 @@ struct dsi_ctrl_state_info {
* @irq_stat_mask: Hardware mask of currently enabled interrupts.
* @irq_stat_refcount: Number of times each interrupt has been requested.
* @irq_stat_cb: Status IRQ callback definitions.
+ * @irq_err_cb: IRQ callback definition to handle DSI ERRORs.
* @cmd_dma_done: Completion signal for DSI_CMD_MODE_DMA_DONE interrupt
* @vid_frame_done: Completion signal for DSI_VIDEO_MODE_FRAME_DONE int.
* @cmd_frame_done: Completion signal for DSI_CMD_FRAME_DONE interrupt.
@@ -153,6 +160,7 @@ struct dsi_ctrl_interrupts {
uint32_t irq_stat_mask;
int irq_stat_refcount[DSI_STATUS_INTERRUPT_COUNT];
struct dsi_event_cb_info irq_stat_cb[DSI_STATUS_INTERRUPT_COUNT];
+ struct dsi_event_cb_info irq_err_cb;
struct completion cmd_dma_done;
struct completion vid_frame_done;
@@ -174,6 +182,7 @@ struct dsi_ctrl_interrupts {
* @current_state: Current driver and hardware state.
* @clk_cb: Callback for DSI clock control.
* @irq_info: Interrupt information.
+ * @recovery_cb: Recovery call back to SDE.
* @clk_info: Clock information.
* @clk_freq: DSi Link clock frequency information.
* @pwr_info: Power information.
@@ -185,13 +194,16 @@ struct dsi_ctrl_interrupts {
* Origin is top left of this CTRL.
* @tx_cmd_buf: Tx command buffer.
* @cmd_buffer_iova: cmd buffer mapped address.
- * @vaddr: CPU virtual address of cmd buffer.
* @cmd_buffer_size: Size of command buffer.
+ * @vaddr: CPU virtual address of cmd buffer.
+ * @secure_mode: Indicates if secure-session is in progress
* @debugfs_root: Root for debugfs entries.
* @misr_enable: Frame MISR enable/disable
* @misr_cache: Cached Frame MISR value
* @phy_isolation_enabled: A boolean property allows to isolate the phy from
* dsi controller and run only dsi controller.
+ * @null_insertion_enabled: A boolean property to allow dsi controller to
+ * insert null packet.
*/
struct dsi_ctrl {
struct platform_device *pdev;
@@ -210,6 +222,7 @@ struct dsi_ctrl {
struct clk_ctrl_cb clk_cb;
struct dsi_ctrl_interrupts irq_info;
+ struct dsi_event_cb_info recovery_cb;
/* Clock and power states */
struct dsi_ctrl_clk_info clk_info;
@@ -225,7 +238,9 @@ struct dsi_ctrl {
struct drm_gem_object *tx_cmd_buf;
u32 cmd_buffer_size;
u32 cmd_buffer_iova;
+ u32 cmd_len;
void *vaddr;
+ u32 secure_mode;
/* Debug Information */
struct dentry *debugfs_root;
@@ -235,6 +250,7 @@ struct dsi_ctrl {
u32 misr_cache;
bool phy_isolation_enabled;
+ bool null_insertion_enabled;
};
/**
@@ -388,6 +404,7 @@ int dsi_ctrl_host_timing_update(struct dsi_ctrl *dsi_ctrl);
/**
* dsi_ctrl_host_init() - Initialize DSI host hardware.
* @dsi_ctrl: DSI controller handle.
+ * @is_splash_enabled: boolean signifying splash status.
*
* Initializes DSI controller hardware with host configuration provided by
* dsi_ctrl_update_host_config(). Initialization can be performed only during
@@ -396,7 +413,7 @@ int dsi_ctrl_host_timing_update(struct dsi_ctrl *dsi_ctrl);
*
* Return: error code.
*/
-int dsi_ctrl_host_init(struct dsi_ctrl *dsi_ctrl);
+int dsi_ctrl_host_init(struct dsi_ctrl *dsi_ctrl, bool is_splash_enabled);
/**
* dsi_ctrl_host_deinit() - De-Initialize DSI host hardware.
@@ -486,6 +503,17 @@ int dsi_ctrl_cmd_transfer(struct dsi_ctrl *dsi_ctrl,
int dsi_ctrl_cmd_tx_trigger(struct dsi_ctrl *dsi_ctrl, u32 flags);
/**
+ * dsi_ctrl_update_host_engine_state_for_cont_splash() - update engine
+ * states for cont splash usecase
+ * @dsi_ctrl: DSI controller handle.
+ * @state: DSI engine state
+ *
+ * Return: error code.
+ */
+int dsi_ctrl_update_host_engine_state_for_cont_splash(struct dsi_ctrl *dsi_ctrl,
+ enum dsi_engine_state state);
+
+/**
* dsi_ctrl_set_power_state() - set power state for dsi controller
* @dsi_ctrl: DSI controller handle.
* @state: Power state.
@@ -632,4 +660,24 @@ void dsi_ctrl_drv_register(void);
*/
void dsi_ctrl_drv_unregister(void);
+/**
+ * dsi_ctrl_reset() - Reset DSI PHY CLK/DATA lane
+ * @dsi_ctrl: DSI controller handle.
+ * @mask: Mask to indicate if CLK and/or DATA lane needs reset.
+ */
+int dsi_ctrl_reset(struct dsi_ctrl *dsi_ctrl, int mask);
+
+/**
+ * dsi_ctrl_get_hw_version() - read dsi controller hw revision
+ * @dsi_ctrl: DSI controller handle.
+ */
+int dsi_ctrl_get_hw_version(struct dsi_ctrl *dsi_ctrl);
+
+/**
+ * dsi_ctrl_vid_engine_en() - Control DSI video engine HW state
+ * @dsi_ctrl: DSI controller handle.
+ * @on: variable to control video engine ON/OFF.
+ */
+int dsi_ctrl_vid_engine_en(struct dsi_ctrl *dsi_ctrl, bool on);
+
#endif /* _DSI_CTRL_H_ */
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw.h b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw.h
index 714a450..c77065c 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw.h
@@ -96,6 +96,7 @@ enum dsi_test_pattern {
* @DSI_SINT_DESKEW_DONE: The deskew calibration operation done.
* @DSI_SINT_DYN_BLANK_DMA_DONE: The dynamic blankin DMA operation has
* completed.
+ * @DSI_SINT_ERROR: DSI error has happened.
*/
enum dsi_status_int_index {
DSI_SINT_CMD_MODE_DMA_DONE = 0,
@@ -108,6 +109,7 @@ enum dsi_status_int_index {
DSI_SINT_DYN_REFRESH_DONE = 7,
DSI_SINT_DESKEW_DONE = 8,
DSI_SINT_DYN_BLANK_DMA_DONE = 9,
+ DSI_SINT_ERROR = 10,
DSI_STATUS_INTERRUPT_COUNT
};
@@ -126,6 +128,7 @@ enum dsi_status_int_index {
* @DSI_DESKEW_DONE: The deskew calibration operation has completed
* @DSI_DYN_BLANK_DMA_DONE: The dynamic blankin DMA operation has
* completed.
+ * @DSI_ERROR: DSI error has happened.
*/
enum dsi_status_int_type {
DSI_CMD_MODE_DMA_DONE = BIT(DSI_SINT_CMD_MODE_DMA_DONE),
@@ -137,7 +140,8 @@ enum dsi_status_int_type {
DSI_CMD_FRAME_DONE = BIT(DSI_SINT_CMD_FRAME_DONE),
DSI_DYN_REFRESH_DONE = BIT(DSI_SINT_DYN_REFRESH_DONE),
DSI_DESKEW_DONE = BIT(DSI_SINT_DESKEW_DONE),
- DSI_DYN_BLANK_DMA_DONE = BIT(DSI_SINT_DYN_BLANK_DMA_DONE)
+ DSI_DYN_BLANK_DMA_DONE = BIT(DSI_SINT_DYN_BLANK_DMA_DONE),
+ DSI_ERROR = BIT(DSI_SINT_ERROR)
};
/**
@@ -175,6 +179,7 @@ enum dsi_status_int_type {
* @DSI_EINT_DLN1_LP1_CONTENTION: PHY level contention while lane 1 high.
* @DSI_EINT_DLN2_LP1_CONTENTION: PHY level contention while lane 2 high.
* @DSI_EINT_DLN3_LP1_CONTENTION: PHY level contention while lane 3 high.
+ * @DSI_EINT_PANEL_SPECIFIC_ERR: DSI Protocol violation error.
*/
enum dsi_error_int_index {
DSI_EINT_RDBK_SINGLE_ECC_ERR = 0,
@@ -209,6 +214,7 @@ enum dsi_error_int_index {
DSI_EINT_DLN1_LP1_CONTENTION = 29,
DSI_EINT_DLN2_LP1_CONTENTION = 30,
DSI_EINT_DLN3_LP1_CONTENTION = 31,
+ DSI_EINT_PANEL_SPECIFIC_ERR = 32,
DSI_ERROR_INTERRUPT_COUNT
};
@@ -248,6 +254,7 @@ enum dsi_error_int_index {
* @DSI_DLN1_LP1_CONTENTION: PHY level contention while lane 1 is high.
* @DSI_DLN2_LP1_CONTENTION: PHY level contention while lane 2 is high.
* @DSI_DLN3_LP1_CONTENTION: PHY level contention while lane 3 is high.
+ * @DSI_PANEL_SPECIFIC_ERR: DSI Protocol violation.
*/
enum dsi_error_int_type {
DSI_RDBK_SINGLE_ECC_ERR = BIT(DSI_EINT_RDBK_SINGLE_ECC_ERR),
@@ -282,6 +289,7 @@ enum dsi_error_int_type {
DSI_DLN1_LP1_CONTENTION = BIT(DSI_EINT_DLN1_LP1_CONTENTION),
DSI_DLN2_LP1_CONTENTION = BIT(DSI_EINT_DLN2_LP1_CONTENTION),
DSI_DLN3_LP1_CONTENTION = BIT(DSI_EINT_DLN3_LP1_CONTENTION),
+ DSI_PANEL_SPECIFIC_ERR = BIT(DSI_EINT_PANEL_SPECIFIC_ERR),
};
/**
@@ -533,6 +541,12 @@ struct dsi_ctrl_hw_ops {
u32 *hw_read_cnt);
/**
+ * get_cont_splash_status() - get continuous splash status
+ * @ctrl: Pointer to the controller host hardware.
+ */
+ bool (*get_cont_splash_status)(struct dsi_ctrl_hw *ctrl);
+
+ /**
* wait_for_lane_idle() - wait for DSI lanes to go to idle state
* @ctrl: Pointer to the controller host hardware.
* @lanes: ORed list of lanes (enum dsi_data_lanes) which need
@@ -721,6 +735,48 @@ struct dsi_ctrl_hw_ops {
* @ctrl: Pointer to the controller host hardware.
*/
void (*clear_rdbk_register)(struct dsi_ctrl_hw *ctrl);
+
+ /** schedule_dma_cmd() - Schdeule DMA command transfer on a
+ * particular blanking line.
+ * @ctrl: Pointer to the controller host hardware.
+ * @line_no: Blanking line number on whihch DMA command
+ * needs to be sent.
+ */
+ void (*schedule_dma_cmd)(struct dsi_ctrl_hw *ctrl, int line_no);
+
+ /**
+ * ctrl_reset() - Reset DSI lanes to recover from DSI errors
+ * @ctrl: Pointer to the controller host hardware.
+ * @mask: Indicates the error type.
+ */
+ int (*ctrl_reset)(struct dsi_ctrl_hw *ctrl, int mask);
+
+ /**
+ * mask_error_int() - Mask/Unmask particular DSI error interrupts
+ * @ctrl: Pointer to the controller host hardware.
+ * @idx: Indicates the errors to be masked.
+ * @en: Bool for mask or unmask of the error
+ */
+ void (*mask_error_intr)(struct dsi_ctrl_hw *ctrl, u32 idx, bool en);
+
+ /**
+ * error_intr_ctrl() - Mask/Unmask master DSI error interrupt
+ * @ctrl: Pointer to the controller host hardware.
+ * @en: Bool for mask or unmask of DSI error
+ */
+ void (*error_intr_ctrl)(struct dsi_ctrl_hw *ctrl, bool en);
+
+ /**
+ * get_error_mask() - get DSI error interrupt mask status
+ * @ctrl: Pointer to the controller host hardware.
+ */
+ u32 (*get_error_mask)(struct dsi_ctrl_hw *ctrl);
+
+ /**
+ * get_hw_version() - get DSI controller hw version
+ * @ctrl: Pointer to the controller host hardware.
+ */
+ u32 (*get_hw_version)(struct dsi_ctrl_hw *ctrl);
};
/*
@@ -739,6 +795,8 @@ struct dsi_ctrl_hw_ops {
* @supported_errors: Number of supported errors.
* @phy_isolation_enabled: A boolean property allows to isolate the phy from
* dsi controller and run only dsi controller.
+ * @null_insertion_enabled: A boolean property to allow dsi controller to
+ * insert null packet.
*/
struct dsi_ctrl_hw {
void __iomem *base;
@@ -758,6 +816,7 @@ struct dsi_ctrl_hw {
u64 supported_errors;
bool phy_isolation_enabled;
+ bool null_insertion_enabled;
};
#endif /* _DSI_CTRL_HW_H_ */
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_2_2.c b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_2_2.c
index 1b1e811..650c2e0 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_2_2.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_2_2.c
@@ -17,10 +17,14 @@
#include "dsi_ctrl_hw.h"
#include "dsi_ctrl_reg.h"
#include "dsi_hw.h"
+#include "dsi_catalog.h"
/* Equivalent to register DISP_CC_MISC_CMD */
#define DISP_CC_CLAMP_REG_OFF 0x00
+/* register to configure DMA scheduling */
+#define DSI_DMA_SCHEDULE_CTRL 0x100
+
/**
* dsi_ctrl_hw_22_phy_reset_config() - to configure clamp control during ulps
* @ctrl: Pointer to the controller host hardware.
@@ -40,3 +44,38 @@ void dsi_ctrl_hw_22_phy_reset_config(struct dsi_ctrl_hw *ctrl,
reg |= BIT(ctrl->index);
DSI_DISP_CC_W32(ctrl, DISP_CC_CLAMP_REG_OFF, reg);
}
+
+/**
+ * dsi_ctrl_hw_22_schedule_dma_cmd() - to schedule DMA command transfer
+ * @ctrl: Pointer to the controller host hardware.
+ * @line_no: Line number at which command needs to be sent.
+ */
+void dsi_ctrl_hw_22_schedule_dma_cmd(struct dsi_ctrl_hw *ctrl, int line_no)
+{
+ u32 reg = 0;
+
+ reg = DSI_R32(ctrl, DSI_DMA_SCHEDULE_CTRL);
+ reg |= BIT(28);
+ reg |= (line_no & 0xffff);
+
+ DSI_W32(ctrl, DSI_DMA_SCHEDULE_CTRL, reg);
+}
+
+/*
+ * dsi_ctrl_hw_22_get_cont_splash_status() - to verify whether continuous
+ * splash is enabled or not
+ * @ctrl: Pointer to the controller host hardware.
+ *
+ * Return: Return Continuous splash status
+ */
+bool dsi_ctrl_hw_22_get_cont_splash_status(struct dsi_ctrl_hw *ctrl)
+{
+ u32 reg = 0;
+
+ /**
+ * DSI scratch register 1 is used to notify whether continuous
+ * splash is enabled or not by bootloader
+ */
+ reg = DSI_R32(ctrl, DSI_SCRATCH_REGISTER_1);
+ return reg == 0x1 ? true : false;
+}
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_cmn.c b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_cmn.c
index 2959e94..c2c8f57 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_cmn.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_cmn.c
@@ -334,7 +334,7 @@ void dsi_ctrl_hw_cmn_setup_cmd_stream(struct dsi_ctrl_hw *ctrl,
u32 width_final, stride_final;
u32 height_final;
u32 stream_total = 0, stream_ctrl = 0;
- u32 reg_ctrl = 0, reg_ctrl2 = 0;
+ u32 reg_ctrl = 0, reg_ctrl2 = 0, data = 0;
if (roi && (!roi->w || !roi->h))
return;
@@ -391,6 +391,9 @@ void dsi_ctrl_hw_cmn_setup_cmd_stream(struct dsi_ctrl_hw *ctrl,
height_final = mode->v_active;
}
+ /* HS Timer value */
+ DSI_W32(ctrl, DSI_HS_TIMER_CTRL, 0x3FD08);
+
stream_ctrl = (stride_final + 1) << 16;
stream_ctrl |= (vc_id & 0x3) << 8;
stream_ctrl |= 0x39; /* packet data type */
@@ -405,6 +408,14 @@ void dsi_ctrl_hw_cmn_setup_cmd_stream(struct dsi_ctrl_hw *ctrl,
DSI_W32(ctrl, DSI_COMMAND_MODE_MDP_STREAM0_TOTAL, stream_total);
DSI_W32(ctrl, DSI_COMMAND_MODE_MDP_STREAM1_TOTAL, stream_total);
+ if (ctrl->null_insertion_enabled) {
+ /* enable null packet insertion */
+ data = (vc_id << 1);
+ data |= 0 << 16;
+ data |= 0x1;
+ DSI_W32(ctrl, DSI_COMMAND_MODE_NULL_INSERTION_CTRL, data);
+ }
+
pr_debug("ctrl %d stream_ctrl 0x%x stream_total 0x%x\n", ctrl->index,
stream_ctrl, stream_total);
}
@@ -834,6 +845,8 @@ u32 dsi_ctrl_hw_cmn_get_interrupt_status(struct dsi_ctrl_hw *ctrl)
ints |= DSI_DYN_REFRESH_DONE;
if (reg & BIT(30))
ints |= DSI_DESKEW_DONE;
+ if (reg & BIT(24))
+ ints |= DSI_ERROR;
pr_debug("[DSI_%d] Interrupt status = 0x%x, INT_CTRL=0x%x\n",
ctrl->index, ints, reg);
@@ -870,6 +883,12 @@ void dsi_ctrl_hw_cmn_clear_interrupt_status(struct dsi_ctrl_hw *ctrl, u32 ints)
if (ints & DSI_DESKEW_DONE)
reg |= BIT(30);
+ /*
+ * Do not clear error status.
+ * It will be cleared as part of
+ * error handler function.
+ */
+ reg &= ~BIT(24);
DSI_W32(ctrl, DSI_INT_CTRL, reg);
pr_debug("[DSI_%d] Clear interrupts, ints = 0x%x, INT_CTRL=0x%x\n",
@@ -936,7 +955,7 @@ u64 dsi_ctrl_hw_cmn_get_error_status(struct dsi_ctrl_hw *ctrl)
u32 timeout_errors;
u32 clk_error;
u32 dsi_status;
- u64 errors = 0;
+ u64 errors = 0, shift = 0x1;
dln0_phy_err = DSI_R32(ctrl, DSI_DLN0_PHY_ERR);
if (dln0_phy_err & BIT(0))
@@ -983,6 +1002,8 @@ u64 dsi_ctrl_hw_cmn_get_error_status(struct dsi_ctrl_hw *ctrl)
errors |= DSI_RDBK_INCOMPLETE_PKT;
if (ack_error & BIT(24))
errors |= DSI_PERIPH_ERROR_PKT;
+ if (ack_error & BIT(15))
+ errors |= (shift << DSI_EINT_PANEL_SPECIFIC_ERR);
timeout_errors = DSI_R32(ctrl, DSI_TIMEOUT_STATUS);
if (timeout_errors & BIT(0))
@@ -1020,7 +1041,6 @@ void dsi_ctrl_hw_cmn_clear_error_status(struct dsi_ctrl_hw *ctrl, u64 errors)
u32 timeout_error = 0;
u32 clk_error = 0;
u32 dsi_status = 0;
- u32 int_ctrl = 0;
if (errors & DSI_RDBK_SINGLE_ECC_ERR)
ack_error |= BIT(16);
@@ -1032,6 +1052,8 @@ void dsi_ctrl_hw_cmn_clear_error_status(struct dsi_ctrl_hw *ctrl, u64 errors)
ack_error |= BIT(23);
if (errors & DSI_PERIPH_ERROR_PKT)
ack_error |= BIT(24);
+ if (errors & DSI_PANEL_SPECIFIC_ERR)
+ ack_error |= BIT(15);
if (errors & DSI_LP_RX_TIMEOUT)
timeout_error |= BIT(4);
@@ -1080,14 +1102,14 @@ void dsi_ctrl_hw_cmn_clear_error_status(struct dsi_ctrl_hw *ctrl, u64 errors)
DSI_W32(ctrl, DSI_DLN0_PHY_ERR, dln0_phy_err);
DSI_W32(ctrl, DSI_FIFO_STATUS, fifo_status);
+ /* Writing of an extra 0 is needed to clear ack error bits */
DSI_W32(ctrl, DSI_ACK_ERR_STATUS, ack_error);
+ wmb(); /* make sure register is committed */
+ DSI_W32(ctrl, DSI_ACK_ERR_STATUS, 0x0);
DSI_W32(ctrl, DSI_TIMEOUT_STATUS, timeout_error);
DSI_W32(ctrl, DSI_CLK_STATUS, clk_error);
DSI_W32(ctrl, DSI_STATUS, dsi_status);
- int_ctrl = DSI_R32(ctrl, DSI_INT_CTRL);
- int_ctrl |= BIT(24);
- DSI_W32(ctrl, DSI_INT_CTRL, int_ctrl);
pr_debug("[DSI_%d] clear errors = 0x%llx, phy=0x%x, fifo=0x%x",
ctrl->index, errors, dln0_phy_err, fifo_status);
pr_debug("[DSI_%d] ack=0x%x, timeout=0x%x, clk=0x%x, dsi=0x%x\n",
@@ -1348,3 +1370,102 @@ void dsi_ctrl_hw_cmn_phy_reset_config(struct dsi_ctrl_hw *ctrl,
DSI_MMSS_MISC_W32(ctrl, MMSS_MISC_CLAMP_REG_OFF, reg);
}
+int dsi_ctrl_hw_cmn_ctrl_reset(struct dsi_ctrl_hw *ctrl,
+ int mask)
+{
+ int rc = 0;
+ u32 data;
+
+ pr_debug("DSI CTRL and PHY reset. ctrl-num = %d %d\n",
+ ctrl->index, mask);
+
+ data = DSI_R32(ctrl, 0x0004);
+ /* Disable DSI video mode */
+ DSI_W32(ctrl, 0x004, (data & ~BIT(1)));
+ wmb(); /* ensure register committed */
+ /* Disable DSI controller */
+ DSI_W32(ctrl, 0x004, (data & ~(BIT(0) | BIT(1))));
+ wmb(); /* ensure register committed */
+ /* "Force On" all dynamic clocks */
+ DSI_W32(ctrl, 0x11c, 0x100a00);
+
+ /* DSI_SW_RESET */
+ DSI_W32(ctrl, 0x118, 0x1);
+ wmb(); /* ensure register is committed */
+ DSI_W32(ctrl, 0x118, 0x0);
+ wmb(); /* ensure register is committed */
+
+ /* Remove "Force On" all dynamic clocks */
+ DSI_W32(ctrl, 0x11c, 0x00);
+ /* Enable DSI controller */
+ DSI_W32(ctrl, 0x004, (data & ~BIT(1)));
+ wmb(); /* ensure register committed */
+
+ return rc;
+}
+
+void dsi_ctrl_hw_cmn_mask_error_intr(struct dsi_ctrl_hw *ctrl, u32 idx, bool en)
+{
+ u32 reg = 0;
+
+ reg = DSI_R32(ctrl, 0x10c);
+
+ if (idx & BIT(DSI_FIFO_OVERFLOW)) {
+ if (en)
+ reg |= (0xf << 16);
+ else
+ reg &= ~(0xf << 16);
+ }
+
+ if (idx & BIT(DSI_FIFO_UNDERFLOW)) {
+ if (en)
+ reg |= (0xf << 26);
+ else
+ reg &= ~(0xf << 26);
+ }
+
+ if (idx & BIT(DSI_LP_Rx_TIMEOUT)) {
+ if (en)
+ reg |= (0x7 << 23);
+ else
+ reg &= ~(0x7 << 23);
+ }
+
+ DSI_W32(ctrl, 0x10c, reg);
+ wmb(); /* ensure error is masked */
+}
+
+void dsi_ctrl_hw_cmn_error_intr_ctrl(struct dsi_ctrl_hw *ctrl, bool en)
+{
+ u32 reg = 0;
+ u32 dsi_total_mask = 0x2222AA02;
+
+ reg = DSI_R32(ctrl, 0x110);
+ reg &= dsi_total_mask;
+
+ if (en)
+ reg |= (BIT(24) | BIT(25));
+ else
+ reg &= ~BIT(25);
+
+ DSI_W32(ctrl, 0x110, reg);
+ wmb(); /* ensure error is masked */
+}
+
+u32 dsi_ctrl_hw_cmn_get_error_mask(struct dsi_ctrl_hw *ctrl)
+{
+ u32 reg = 0;
+
+ reg = DSI_R32(ctrl, 0x10c);
+
+ return reg;
+}
+
+u32 dsi_ctrl_hw_cmn_get_hw_version(struct dsi_ctrl_hw *ctrl)
+{
+ u32 reg = 0;
+
+ reg = DSI_R32(ctrl, 0x0);
+
+ return reg;
+}
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_defs.h b/drivers/gpu/drm/msm/dsi-staging/dsi_defs.h
index 2f0d25f..d45f849 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_defs.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_defs.h
@@ -358,6 +358,7 @@ struct dsi_panel_cmd_set {
* @clk_rate_hz: DSI bit clock rate per lane in Hz.
* @dsc_enabled: DSC compression enabled.
* @dsc: DSC compression configuration.
+ * @roi_caps: Panel ROI capabilities.
*/
struct dsi_mode_info {
u32 h_active;
@@ -377,6 +378,7 @@ struct dsi_mode_info {
u64 clk_rate_hz;
bool dsc_enabled;
struct msm_display_dsc_info *dsc;
+ struct msm_roi_caps roi_caps;
};
/**
@@ -505,6 +507,7 @@ struct dsi_host_config {
* @topology: Topology selected for the panel
* @dsc: DSC compression info
* @dsc_enabled: DSC compression enabled
+ * @roi_caps: Panel ROI capabilities
*/
struct dsi_display_mode_priv_info {
struct dsi_panel_cmd_set cmd_sets[DSI_CMD_SET_MAX];
@@ -520,6 +523,7 @@ struct dsi_display_mode_priv_info {
struct msm_display_topology topology;
struct msm_display_dsc_info dsc;
bool dsc_enabled;
+ struct msm_roi_caps roi_caps;
};
/**
@@ -581,4 +585,16 @@ struct dsi_event_cb_info {
uint32_t data2, uint32_t data3);
};
+/**
+ * enum dsi_error_status - various dsi errors
+ * @DSI_FIFO_OVERFLOW: DSI FIFO Overflow error
+ * @DSI_FIFO_UNDERFLOW: DSI FIFO Underflow error
+ * @DSI_LP_Rx_TIMEOUT: DSI LP/RX Timeout error
+ */
+enum dsi_error_status {
+ DSI_FIFO_OVERFLOW = 1,
+ DSI_FIFO_UNDERFLOW,
+ DSI_LP_Rx_TIMEOUT,
+};
+
#endif /* _DSI_DEFS_H_ */
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_display.c b/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
index d71a5f21..982d16e 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
@@ -36,6 +36,8 @@
#define MISR_BUFF_SIZE 256
+#define MAX_NAME_SIZE 64
+
static DEFINE_MUTEX(dsi_display_list_lock);
static LIST_HEAD(dsi_display_list);
static char dsi_display_primary[MAX_CMDLINE_PARAM_LEN];
@@ -131,18 +133,19 @@ static int dsi_display_cmd_engine_enable(struct dsi_display *display)
int i;
struct dsi_display_ctrl *m_ctrl, *ctrl;
+ m_ctrl = &display->ctrl[display->cmd_master_idx];
+ mutex_lock(&m_ctrl->ctrl->ctrl_lock);
+
if (display->cmd_engine_refcount > 0) {
display->cmd_engine_refcount++;
- return 0;
+ goto done;
}
- m_ctrl = &display->ctrl[display->cmd_master_idx];
-
rc = dsi_ctrl_set_cmd_engine_state(m_ctrl->ctrl, DSI_CTRL_ENGINE_ON);
if (rc) {
pr_err("[%s] failed to enable cmd engine, rc=%d\n",
display->name, rc);
- goto error;
+ goto done;
}
for (i = 0; i < display->ctrl_count; i++) {
@@ -160,10 +163,11 @@ static int dsi_display_cmd_engine_enable(struct dsi_display *display)
}
display->cmd_engine_refcount++;
- return rc;
+ goto done;
error_disable_master:
(void)dsi_ctrl_set_cmd_engine_state(m_ctrl->ctrl, DSI_CTRL_ENGINE_OFF);
-error:
+done:
+ mutex_unlock(&m_ctrl->ctrl->ctrl_lock);
return rc;
}
@@ -173,15 +177,17 @@ static int dsi_display_cmd_engine_disable(struct dsi_display *display)
int i;
struct dsi_display_ctrl *m_ctrl, *ctrl;
+ m_ctrl = &display->ctrl[display->cmd_master_idx];
+ mutex_lock(&m_ctrl->ctrl->ctrl_lock);
+
if (display->cmd_engine_refcount == 0) {
pr_err("[%s] Invalid refcount\n", display->name);
- return 0;
+ goto done;
} else if (display->cmd_engine_refcount > 1) {
display->cmd_engine_refcount--;
- return 0;
+ goto done;
}
- m_ctrl = &display->ctrl[display->cmd_master_idx];
for (i = 0; i < display->ctrl_count; i++) {
ctrl = &display->ctrl[i];
if (!ctrl->ctrl || (ctrl == m_ctrl))
@@ -203,6 +209,8 @@ static int dsi_display_cmd_engine_disable(struct dsi_display *display)
error:
display->cmd_engine_refcount = 0;
+done:
+ mutex_unlock(&m_ctrl->ctrl->ctrl_lock);
return rc;
}
@@ -255,11 +263,18 @@ static int dsi_display_read_status(struct dsi_display_ctrl *ctrl,
if (!panel)
return -EINVAL;
+ /* acquire panel_lock to make sure no commands are in progress */
+ dsi_panel_acquire_panel_lock(panel);
+
config = &(panel->esd_config);
lenp = config->status_valid_params ?: config->status_cmds_rlen;
count = config->status_cmd.count;
cmds = config->status_cmd.cmds;
- flags = (DSI_CTRL_CMD_FETCH_MEMORY | DSI_CTRL_CMD_READ);
+ if (cmds->last_command) {
+ cmds->msg.flags |= MIPI_DSI_MSG_LASTCOMMAND;
+ flags |= DSI_CTRL_CMD_LAST_COMMAND;
+ }
+ flags |= (DSI_CTRL_CMD_FETCH_MEMORY | DSI_CTRL_CMD_READ);
for (i = 0; i < count; ++i) {
memset(config->status_buf, 0x0, SZ_4K);
@@ -277,6 +292,8 @@ static int dsi_display_read_status(struct dsi_display_ctrl *ctrl,
}
error:
+ /* release panel_lock */
+ dsi_panel_release_panel_lock(panel);
return rc;
}
@@ -372,7 +389,7 @@ int dsi_display_check_status(void *display)
struct dsi_display *dsi_display = display;
struct dsi_panel *panel;
u32 status_mode;
- int rc = 0;
+ int rc = 0x1;
if (dsi_display == NULL)
return -EINVAL;
@@ -381,6 +398,14 @@ int dsi_display_check_status(void *display)
status_mode = panel->esd_config.status_mode;
+ mutex_lock(&dsi_display->display_lock);
+
+ if (!panel->panel_initialized) {
+ pr_debug("Panel not initialized\n");
+ mutex_unlock(&dsi_display->display_lock);
+ return rc;
+ }
+
dsi_display_clk_ctrl(dsi_display->dsi_clk_handle,
DSI_ALL_CLKS, DSI_CLK_ON);
@@ -397,6 +422,7 @@ int dsi_display_check_status(void *display)
dsi_display_clk_ctrl(dsi_display->dsi_clk_handle,
DSI_ALL_CLKS, DSI_CLK_OFF);
+ mutex_unlock(&dsi_display->display_lock);
return rc;
}
@@ -453,6 +479,32 @@ static void _dsi_display_setup_misr(struct dsi_display *display)
}
}
+/**
+ * dsi_display_get_cont_splash_status - Get continuous splash status.
+ * @dsi_display: DSI display handle.
+ *
+ * Return: boolean to signify whether continuous splash is enabled.
+ */
+static bool dsi_display_get_cont_splash_status(struct dsi_display *display)
+{
+ u32 val = 0;
+ int i;
+ struct dsi_display_ctrl *ctrl;
+ struct dsi_ctrl_hw *hw;
+
+ for (i = 0; i < display->ctrl_count ; i++) {
+ ctrl = &(display->ctrl[i]);
+ if (!ctrl || !ctrl->ctrl)
+ continue;
+
+ hw = &(ctrl->ctrl->hw);
+ val = hw->ops.get_cont_splash_status(hw);
+ if (!val)
+ return false;
+ }
+ return true;
+}
+
int dsi_display_set_power(struct drm_connector *connector,
int power_mode, void *disp)
{
@@ -674,6 +726,8 @@ static int dsi_display_debugfs_init(struct dsi_display *display)
{
int rc = 0;
struct dentry *dir, *dump_file, *misr_data;
+ char name[MAX_NAME_SIZE];
+ int i;
dir = debugfs_create_dir(display->name, NULL);
if (IS_ERR_OR_NULL(dir)) {
@@ -707,6 +761,35 @@ static int dsi_display_debugfs_init(struct dsi_display *display)
goto error_remove_dir;
}
+ for (i = 0; i < display->ctrl_count; i++) {
+ struct msm_dsi_phy *phy = display->ctrl[i].phy;
+
+ if (!phy || !phy->name)
+ continue;
+
+ snprintf(name, ARRAY_SIZE(name),
+ "%s_allow_phy_power_off", phy->name);
+ dump_file = debugfs_create_bool(name, 0600, dir,
+ &phy->allow_phy_power_off);
+ if (IS_ERR_OR_NULL(dump_file)) {
+ rc = PTR_ERR(dump_file);
+ pr_err("[%s] debugfs create %s failed, rc=%d\n",
+ display->name, name, rc);
+ goto error_remove_dir;
+ }
+
+ snprintf(name, ARRAY_SIZE(name),
+ "%s_regulator_min_datarate_bps", phy->name);
+ dump_file = debugfs_create_u32(name, 0600, dir,
+ &phy->regulator_min_datarate_bps);
+ if (IS_ERR_OR_NULL(dump_file)) {
+ rc = PTR_ERR(dump_file);
+ pr_err("[%s] debugfs create %s failed, rc=%d\n",
+ display->name, name, rc);
+ goto error_remove_dir;
+ }
+ }
+
display->root = dir;
return rc;
error_remove_dir:
@@ -924,7 +1007,7 @@ static int dsi_display_phy_enable(struct dsi_display *display);
/**
* dsi_display_phy_idle_on() - enable DSI PHY while coming out of idle screen.
* @dsi_display: DSI display handle.
- * @enable: enable/disable DSI PHY.
+ * @mmss_clamp: True if clamp is enabled.
*
* Return: error code.
*/
@@ -971,7 +1054,6 @@ static int dsi_display_phy_idle_on(struct dsi_display *display,
/**
* dsi_display_phy_idle_off() - disable DSI PHY while going to idle screen.
* @dsi_display: DSI display handle.
- * @enable: enable/disable DSI PHY.
*
* Return: error code.
*/
@@ -986,9 +1068,16 @@ static int dsi_display_phy_idle_off(struct dsi_display *display)
return -EINVAL;
}
- if (!display->panel->allow_phy_power_off) {
- pr_debug("panel doesn't support this feature\n");
- return 0;
+ for (i = 0; i < display->ctrl_count; i++) {
+ struct msm_dsi_phy *phy = display->ctrl[i].phy;
+
+ if (!phy)
+ continue;
+
+ if (!phy->allow_phy_power_off) {
+ pr_debug("phy doesn't support this feature\n");
+ return 0;
+ }
}
m_ctrl = &display->ctrl[display->cmd_master_idx];
@@ -1037,6 +1126,13 @@ void dsi_display_enable_event(struct dsi_display *display,
case SDE_CONN_EVENT_CMD_DONE:
irq_status_idx = DSI_SINT_CMD_FRAME_DONE;
break;
+ case SDE_CONN_EVENT_VID_FIFO_OVERFLOW:
+ case SDE_CONN_EVENT_CMD_FIFO_UNDERFLOW:
+ if (event_info) {
+ for (i = 0; i < display->ctrl_count; i++)
+ display->ctrl[i].ctrl->recovery_cb =
+ *event_info;
+ }
default:
/* nothing to do */
pr_debug("[%s] unhandled event %d\n", display->name, event_idx);
@@ -1055,6 +1151,30 @@ void dsi_display_enable_event(struct dsi_display *display,
}
}
+/**
+ * dsi_config_host_engine_state_for_cont_splash()- update host engine state
+ * during continuous splash.
+ * @display: Handle to dsi display
+ *
+ */
+static void dsi_config_host_engine_state_for_cont_splash
+ (struct dsi_display *display)
+{
+ int i;
+ struct dsi_display_ctrl *ctrl;
+ enum dsi_engine_state host_state = DSI_CTRL_ENGINE_ON;
+
+ /* Sequence does not matter for split dsi usecases */
+ for (i = 0; i < display->ctrl_count; i++) {
+ ctrl = &display->ctrl[i];
+ if (!ctrl->ctrl)
+ continue;
+
+ dsi_ctrl_update_host_engine_state_for_cont_splash(ctrl->ctrl,
+ host_state);
+ }
+}
+
static int dsi_display_ctrl_power_on(struct dsi_display *display)
{
int rc = 0;
@@ -1452,7 +1572,8 @@ static int dsi_display_ctrl_init(struct dsi_display *display)
for (i = 0 ; i < display->ctrl_count; i++) {
ctrl = &display->ctrl[i];
- rc = dsi_ctrl_host_init(ctrl->ctrl);
+ rc = dsi_ctrl_host_init(ctrl->ctrl,
+ display->is_cont_splash_enabled);
if (rc) {
pr_err("[%s] failed to init host_%d, rc=%d\n",
display->name, i, rc);
@@ -1493,6 +1614,14 @@ static int dsi_display_ctrl_host_enable(struct dsi_display *display)
int i;
struct dsi_display_ctrl *m_ctrl, *ctrl;
+ /* Host engine states are already taken care for
+ * continuous splash case
+ */
+ if (display->is_cont_splash_enabled) {
+ pr_debug("cont splash enabled, host enable not required\n");
+ return 0;
+ }
+
m_ctrl = &display->ctrl[display->cmd_master_idx];
rc = dsi_ctrl_set_host_engine_state(m_ctrl->ctrl, DSI_CTRL_ENGINE_ON);
@@ -1631,7 +1760,8 @@ static int dsi_display_phy_enable(struct dsi_display *display)
rc = dsi_phy_enable(m_ctrl->phy,
&display->config,
m_src,
- true);
+ true,
+ display->is_cont_splash_enabled);
if (rc) {
pr_err("[%s] failed to enable DSI PHY, rc=%d\n",
display->name, rc);
@@ -1646,7 +1776,8 @@ static int dsi_display_phy_enable(struct dsi_display *display)
rc = dsi_phy_enable(ctrl->phy,
&display->config,
DSI_PLL_SOURCE_NON_NATIVE,
- true);
+ true,
+ display->is_cont_splash_enabled);
if (rc) {
pr_err("[%s] failed to enable DSI PHY, rc=%d\n",
display->name, rc);
@@ -1707,6 +1838,10 @@ static int dsi_display_broadcast_cmd(struct dsi_display *display,
flags = (DSI_CTRL_CMD_BROADCAST | DSI_CTRL_CMD_DEFER_TRIGGER |
DSI_CTRL_CMD_FETCH_MEMORY);
+ if ((msg->flags & MIPI_DSI_MSG_LASTCOMMAND)) {
+ flags |= DSI_CTRL_CMD_LAST_COMMAND;
+ m_flags |= DSI_CTRL_CMD_LAST_COMMAND;
+ }
/*
* 1. Setup commands in FIFO
* 2. Trigger commands
@@ -1756,6 +1891,14 @@ static int dsi_display_phy_sw_reset(struct dsi_display *display)
int i;
struct dsi_display_ctrl *m_ctrl, *ctrl;
+ /* For continuous splash use case ctrl states are updated
+ * separately and hence we do an early return
+ */
+ if (display->is_cont_splash_enabled) {
+ pr_debug("cont splash enabled, phy sw reset not required\n");
+ return 0;
+ }
+
m_ctrl = &display->ctrl[display->cmd_master_idx];
rc = dsi_ctrl_phy_sw_reset(m_ctrl->ctrl);
@@ -1781,6 +1924,62 @@ static int dsi_display_phy_sw_reset(struct dsi_display *display)
return rc;
}
+static void dsi_display_aspace_cb_locked(void *cb_data, bool is_detach)
+{
+ struct dsi_display *display;
+ struct dsi_display_ctrl *display_ctrl;
+ int rc, cnt;
+
+ if (!cb_data) {
+ pr_err("aspace cb called with invalid cb_data\n");
+ return;
+ }
+ display = (struct dsi_display *)cb_data;
+
+ /*
+ * acquire panel_lock to make sure no commands are in-progress
+ * while detaching the non-secure context banks
+ */
+ dsi_panel_acquire_panel_lock(display->panel);
+
+ if (is_detach) {
+ /* invalidate the stored iova */
+ display->cmd_buffer_iova = 0;
+
+ /* return the virtual address mapping */
+ msm_gem_put_vaddr_locked(display->tx_cmd_buf);
+ msm_gem_vunmap(display->tx_cmd_buf);
+
+ } else {
+ rc = msm_gem_get_iova_locked(display->tx_cmd_buf,
+ display->aspace, &(display->cmd_buffer_iova));
+ if (rc) {
+ pr_err("failed to get the iova rc %d\n", rc);
+ goto end;
+ }
+
+ display->vaddr =
+ (void *) msm_gem_get_vaddr_locked(display->tx_cmd_buf);
+
+ if (IS_ERR_OR_NULL(display->vaddr)) {
+ pr_err("failed to get va rc %d\n", rc);
+ goto end;
+ }
+ }
+
+ for (cnt = 0; cnt < display->ctrl_count; cnt++) {
+ display_ctrl = &display->ctrl[cnt];
+ display_ctrl->ctrl->cmd_buffer_size = display->cmd_buffer_size;
+ display_ctrl->ctrl->cmd_buffer_iova = display->cmd_buffer_iova;
+ display_ctrl->ctrl->vaddr = display->vaddr;
+ display_ctrl->ctrl->secure_mode = is_detach ? true : false;
+ }
+
+end:
+ /* release panel_lock */
+ dsi_panel_release_panel_lock(display->panel);
+}
+
static int dsi_host_attach(struct mipi_dsi_host *host,
struct mipi_dsi_device *dsi)
{
@@ -1798,7 +1997,6 @@ static ssize_t dsi_host_transfer(struct mipi_dsi_host *host,
{
struct dsi_display *display = to_dsi_display(host);
struct dsi_display_ctrl *display_ctrl;
- struct msm_gem_address_space *aspace = NULL;
int rc = 0, cnt = 0;
if (!host || !msg) {
@@ -1842,19 +2040,27 @@ static ssize_t dsi_host_transfer(struct mipi_dsi_host *host,
goto error_disable_cmd_engine;
}
- aspace = msm_gem_smmu_address_space_get(display->drm_dev,
- MSM_SMMU_DOMAIN_UNSECURE);
- if (!aspace) {
+ display->aspace = msm_gem_smmu_address_space_get(
+ display->drm_dev, MSM_SMMU_DOMAIN_UNSECURE);
+ if (!display->aspace) {
pr_err("failed to get aspace\n");
rc = -EINVAL;
goto free_gem;
}
- rc = msm_gem_get_iova(display->tx_cmd_buf, aspace,
+ /* register to aspace */
+ rc = msm_gem_address_space_register_cb(display->aspace,
+ dsi_display_aspace_cb_locked, (void *)display);
+ if (rc) {
+ pr_err("failed to register callback %d", rc);
+ goto free_gem;
+ }
+
+ rc = msm_gem_get_iova(display->tx_cmd_buf, display->aspace,
&(display->cmd_buffer_iova));
if (rc) {
pr_err("failed to get the iova rc %d\n", rc);
- goto free_gem;
+ goto free_aspace_cb;
}
display->vaddr =
@@ -1906,7 +2112,10 @@ static ssize_t dsi_host_transfer(struct mipi_dsi_host *host,
}
return rc;
put_iova:
- msm_gem_put_iova(display->tx_cmd_buf, aspace);
+ msm_gem_put_iova(display->tx_cmd_buf, display->aspace);
+free_aspace_cb:
+ msm_gem_address_space_unregister_cb(display->aspace,
+ dsi_display_aspace_cb_locked, display);
free_gem:
mutex_lock(&display->drm_dev->struct_mutex);
msm_gem_free_object(display->tx_cmd_buf);
@@ -2295,6 +2504,7 @@ int dsi_pre_clkon_cb(void *priv,
* not be changed during static screen.
*/
+ pr_debug("updating power states for ctrl and phy\n");
rc = dsi_display_ctrl_power_on(display);
if (rc) {
pr_err("[%s] failed to power on dsi controllers, rc=%d\n",
@@ -2961,6 +3171,20 @@ static int dsi_display_set_mode_sub(struct dsi_display *display,
}
}
+ for (i = 0; i < display->ctrl_count; i++) {
+ ctrl = &display->ctrl[i];
+
+ if (!ctrl->phy || !ctrl->ctrl)
+ continue;
+
+ rc = dsi_phy_set_clk_freq(ctrl->phy, &ctrl->ctrl->clk_freq);
+ if (rc) {
+ pr_err("[%s] failed to set phy clk freq, rc=%d\n",
+ display->name, rc);
+ goto error;
+ }
+ }
+
if (priv_info->phy_timing_len) {
for (i = 0; i < display->ctrl_count; i++) {
ctrl = &display->ctrl[i];
@@ -3038,6 +3262,110 @@ static int _dsi_display_dev_deinit(struct dsi_display *display)
}
/**
+ * dsi_display_splash_res_init() - Initialize resources for continuous splash
+ * @display: Pointer to dsi display
+ * Returns: Zero on success
+ */
+static int dsi_display_splash_res_init(struct dsi_display *display)
+{
+ int rc = 0;
+
+ /* Vote for gdsc required to read register address space */
+
+ display->cont_splash_client = sde_power_client_create(display->phandle,
+ "cont_splash_client");
+ rc = sde_power_resource_enable(display->phandle,
+ display->cont_splash_client, true);
+ if (rc) {
+ pr_err("failed to vote gdsc for continuous splash, rc=%d\n",
+ rc);
+ return -EINVAL;
+ }
+
+ /* Verify whether continuous splash is enabled or not */
+ display->is_cont_splash_enabled =
+ dsi_display_get_cont_splash_status(display);
+ if (!display->is_cont_splash_enabled) {
+ pr_err("Continuous splash is not enabled\n");
+ goto splash_disabled;
+ }
+
+ /* Update splash status for clock manager */
+ dsi_display_clk_mngr_update_splash_status(display->clk_mngr,
+ display->is_cont_splash_enabled);
+
+ /* Vote for Core clk and link clk. Votes on ctrl and phy
+ * regulator are inplicit from pre clk on callback
+ */
+ rc = dsi_display_clk_ctrl(display->dsi_clk_handle,
+ DSI_ALL_CLKS, DSI_CLK_ON);
+ if (rc) {
+ pr_err("[%s] failed to enable DSI link clocks, rc=%d\n",
+ display->name, rc);
+ goto clk_manager_update;
+ }
+
+ /* Vote on panel regulator will be removed during suspend path */
+ rc = dsi_pwr_enable_regulator(&display->panel->power_info, true);
+ if (rc) {
+ pr_err("[%s] failed to enable vregs, rc=%d\n",
+ display->panel->name, rc);
+ goto clks_disabled;
+ }
+
+ dsi_config_host_engine_state_for_cont_splash(display);
+
+ return rc;
+
+clks_disabled:
+ rc = dsi_display_clk_ctrl(display->dsi_clk_handle,
+ DSI_ALL_CLKS, DSI_CLK_OFF);
+
+clk_manager_update:
+ /* Update splash status for clock manager */
+ dsi_display_clk_mngr_update_splash_status(display->clk_mngr,
+ false);
+
+splash_disabled:
+ (void)sde_power_resource_enable(display->phandle,
+ display->cont_splash_client, false);
+ display->is_cont_splash_enabled = false;
+ return rc;
+}
+
+/**
+ * dsi_display_splash_res_cleanup() - cleanup for continuous splash
+ * @display: Pointer to dsi display
+ * Returns: Zero on success
+ */
+int dsi_display_splash_res_cleanup(struct dsi_display *display)
+{
+ int rc = 0;
+
+ if (!display->is_cont_splash_enabled)
+ return 0;
+
+ rc = dsi_display_clk_ctrl(display->dsi_clk_handle,
+ DSI_ALL_CLKS, DSI_CLK_OFF);
+ if (rc)
+ pr_err("[%s] failed to disable DSI link clocks, rc=%d\n",
+ display->name, rc);
+
+ rc = sde_power_resource_enable(display->phandle,
+ display->cont_splash_client, false);
+ if (rc)
+ pr_err("failed to remove vote on gdsc for continuous splash, rc=%d\n",
+ rc);
+
+ display->is_cont_splash_enabled = false;
+ /* Update splash status for clock manager */
+ dsi_display_clk_mngr_update_splash_status(display->clk_mngr,
+ display->is_cont_splash_enabled);
+
+ return rc;
+}
+
+/**
* dsi_display_bind - bind dsi device with controlling device
* @dev: Pointer to base of platform device
* @master: Pointer to container of drm device
@@ -3123,6 +3451,7 @@ static int dsi_display_bind(struct device *dev,
}
}
+ display->phandle = &priv->phandle;
info.pre_clkoff_cb = dsi_pre_clkoff_cb;
info.pre_clkon_cb = dsi_pre_clkon_cb;
info.post_clkoff_cb = dsi_post_clkoff_cb;
@@ -3199,6 +3528,12 @@ static int dsi_display_bind(struct device *dev,
pr_info("Successfully bind display panel '%s'\n", display->name);
display->drm_dev = drm;
+
+ /* Initialize resources for continuous splash */
+ rc = dsi_display_splash_res_init(display);
+ if (rc)
+ pr_err("Continuous splash resource init failed, rc=%d\n", rc);
+
goto error;
error_host_deinit:
@@ -3307,6 +3642,8 @@ int dsi_display_dev_probe(struct platform_device *pdev)
return -ENOMEM;
display->name = of_get_property(pdev->dev.of_node, "label", NULL);
+ if (!display->name)
+ display->name = "unknown";
if (!boot_displays_parsed) {
boot_displays[DSI_PRIMARY].boot_disp_en = false;
@@ -3616,9 +3953,6 @@ int dsi_display_get_info(struct msm_display_info *info, void *disp)
if (display->panel->esd_config.esd_enabled)
info->capabilities |= MSM_DISPLAY_ESD_ENABLED;
- memcpy(&info->roi_caps, &display->panel->roi_caps,
- sizeof(info->roi_caps));
-
error:
mutex_unlock(&display->display_lock);
return rc;
@@ -4024,6 +4358,232 @@ static int dsi_display_pre_switch(struct dsi_display *display)
return rc;
}
+static void dsi_display_handle_fifo_underflow(struct work_struct *work)
+{
+ struct dsi_display *display = NULL;
+
+ display = container_of(work, struct dsi_display, fifo_underflow_work);
+ if (!display)
+ return;
+ pr_debug("handle DSI FIFO underflow error\n");
+
+ dsi_display_clk_ctrl(display->dsi_clk_handle,
+ DSI_ALL_CLKS, DSI_CLK_ON);
+ dsi_display_soft_reset(display);
+ dsi_display_clk_ctrl(display->dsi_clk_handle,
+ DSI_ALL_CLKS, DSI_CLK_OFF);
+}
+
+static void dsi_display_handle_fifo_overflow(struct work_struct *work)
+{
+ struct dsi_display *display = NULL;
+ struct dsi_display_ctrl *ctrl;
+ int i, rc;
+ int mask = BIT(20); /* clock lane */
+ int (*cb_func)(void *event_usr_ptr,
+ uint32_t event_idx, uint32_t instance_idx,
+ uint32_t data0, uint32_t data1,
+ uint32_t data2, uint32_t data3);
+ void *data;
+ u32 version = 0;
+
+ display = container_of(work, struct dsi_display, fifo_overflow_work);
+ if (!display || !display->panel ||
+ (display->panel->panel_mode != DSI_OP_VIDEO_MODE))
+ return;
+
+ pr_debug("handle DSI FIFO overflow error\n");
+ dsi_display_clk_ctrl(display->dsi_clk_handle,
+ DSI_ALL_CLKS, DSI_CLK_ON);
+
+ /*
+ * below recovery sequence is not applicable to
+ * hw version 2.0.0, 2.1.0 and 2.2.0, so return early.
+ */
+ ctrl = &display->ctrl[display->clk_master_idx];
+ version = dsi_ctrl_get_hw_version(ctrl->ctrl);
+ if (!version || (version < 0x20020001))
+ goto end;
+
+ /* reset ctrl and lanes */
+ for (i = 0 ; i < display->ctrl_count; i++) {
+ ctrl = &display->ctrl[i];
+ rc = dsi_ctrl_reset(ctrl->ctrl, mask);
+ rc = dsi_phy_lane_reset(ctrl->phy);
+ }
+
+ /* wait for display line count to be in active area */
+ ctrl = &display->ctrl[display->clk_master_idx];
+ if (ctrl->ctrl->recovery_cb.event_cb) {
+ cb_func = ctrl->ctrl->recovery_cb.event_cb;
+ data = ctrl->ctrl->recovery_cb.event_usr_ptr;
+ rc = cb_func(data, SDE_CONN_EVENT_VID_FIFO_OVERFLOW,
+ display->clk_master_idx, 0, 0, 0, 0);
+ if (rc < 0) {
+ pr_debug("sde callback failed\n");
+ goto end;
+ }
+ }
+
+ /* Enable Video mode for DSI controller */
+ for (i = 0 ; i < display->ctrl_count; i++) {
+ ctrl = &display->ctrl[i];
+ dsi_ctrl_vid_engine_en(ctrl->ctrl, true);
+ }
+ /*
+ * Add sufficient delay to make sure
+ * pixel transmission has started
+ */
+ udelay(200);
+end:
+ dsi_display_clk_ctrl(display->dsi_clk_handle,
+ DSI_ALL_CLKS, DSI_CLK_OFF);
+}
+
+static void dsi_display_handle_lp_rx_timeout(struct work_struct *work)
+{
+ struct dsi_display *display = NULL;
+ struct dsi_display_ctrl *ctrl;
+ int i, rc;
+ int mask = (BIT(20) | (0xF << 16)); /* clock lane and 4 data lane */
+ int (*cb_func)(void *event_usr_ptr,
+ uint32_t event_idx, uint32_t instance_idx,
+ uint32_t data0, uint32_t data1,
+ uint32_t data2, uint32_t data3);
+ void *data;
+ u32 version = 0;
+
+ display = container_of(work, struct dsi_display, fifo_overflow_work);
+ if (!display || (display->panel->panel_mode != DSI_OP_VIDEO_MODE))
+ return;
+ pr_debug("handle DSI LP RX Timeout error\n");
+
+ dsi_display_clk_ctrl(display->dsi_clk_handle,
+ DSI_ALL_CLKS, DSI_CLK_ON);
+
+ /*
+ * below recovery sequence is not applicable to
+ * hw version 2.0.0, 2.1.0 and 2.2.0, so return early.
+ */
+ ctrl = &display->ctrl[display->clk_master_idx];
+ version = dsi_ctrl_get_hw_version(ctrl->ctrl);
+ if (!version || (version < 0x20020001))
+ goto end;
+
+ /* reset ctrl and lanes */
+ for (i = 0 ; i < display->ctrl_count; i++) {
+ ctrl = &display->ctrl[i];
+ rc = dsi_ctrl_reset(ctrl->ctrl, mask);
+ rc = dsi_phy_lane_reset(ctrl->phy);
+ }
+
+ ctrl = &display->ctrl[display->clk_master_idx];
+ if (ctrl->ctrl->recovery_cb.event_cb) {
+ cb_func = ctrl->ctrl->recovery_cb.event_cb;
+ data = ctrl->ctrl->recovery_cb.event_usr_ptr;
+ rc = cb_func(data, SDE_CONN_EVENT_VID_FIFO_OVERFLOW,
+ display->clk_master_idx, 0, 0, 0, 0);
+ if (rc < 0) {
+ pr_debug("Target is in suspend/shutdown\n");
+ goto end;
+ }
+ }
+
+ /* Enable Video mode for DSI controller */
+ for (i = 0 ; i < display->ctrl_count; i++) {
+ ctrl = &display->ctrl[i];
+ dsi_ctrl_vid_engine_en(ctrl->ctrl, true);
+ }
+
+ /*
+ * Add sufficient delay to make sure
+ * pixel transmission as started
+ */
+ udelay(200);
+end:
+ dsi_display_clk_ctrl(display->dsi_clk_handle,
+ DSI_ALL_CLKS, DSI_CLK_OFF);
+}
+
+static int dsi_display_cb_error_handler(void *data,
+ uint32_t event_idx, uint32_t instance_idx,
+ uint32_t data0, uint32_t data1,
+ uint32_t data2, uint32_t data3)
+{
+ struct dsi_display *display = data;
+
+ if (!display)
+ return -EINVAL;
+
+ switch (event_idx) {
+ case DSI_FIFO_UNDERFLOW:
+ queue_work(display->err_workq, &display->fifo_underflow_work);
+ break;
+ case DSI_FIFO_OVERFLOW:
+ queue_work(display->err_workq, &display->fifo_overflow_work);
+ break;
+ case DSI_LP_Rx_TIMEOUT:
+ queue_work(display->err_workq, &display->lp_rx_timeout_work);
+ break;
+ default:
+ pr_warn("unhandled error interrupt: %d\n", event_idx);
+ break;
+ }
+
+ return 0;
+}
+
+static void dsi_display_register_error_handler(struct dsi_display *display)
+{
+ int i = 0;
+ struct dsi_display_ctrl *ctrl;
+ struct dsi_event_cb_info event_info;
+
+ if (!display)
+ return;
+
+ display->err_workq = create_singlethread_workqueue("dsi_err_workq");
+ if (!display->err_workq) {
+ pr_err("failed to create dsi workq!\n");
+ return;
+ }
+
+ INIT_WORK(&display->fifo_underflow_work,
+ dsi_display_handle_fifo_underflow);
+ INIT_WORK(&display->fifo_overflow_work,
+ dsi_display_handle_fifo_overflow);
+ INIT_WORK(&display->lp_rx_timeout_work,
+ dsi_display_handle_lp_rx_timeout);
+
+ memset(&event_info, 0, sizeof(event_info));
+
+ event_info.event_cb = dsi_display_cb_error_handler;
+ event_info.event_usr_ptr = display;
+
+ for (i = 0; i < display->ctrl_count; i++) {
+ ctrl = &display->ctrl[i];
+ ctrl->ctrl->irq_info.irq_err_cb = event_info;
+ }
+}
+
+static void dsi_display_unregister_error_handler(struct dsi_display *display)
+{
+ int i = 0;
+ struct dsi_display_ctrl *ctrl;
+
+ if (!display)
+ return;
+
+ for (i = 0; i < display->ctrl_count; i++) {
+ ctrl = &display->ctrl[i];
+ memset(&ctrl->ctrl->irq_info.irq_err_cb,
+ 0, sizeof(struct dsi_event_cb_info));
+ }
+
+ if (display->err_workq)
+ destroy_workqueue(display->err_workq);
+}
+
int dsi_display_prepare(struct dsi_display *display)
{
int rc = 0;
@@ -4053,11 +4613,18 @@ int dsi_display_prepare(struct dsi_display *display)
goto error;
}
- rc = dsi_panel_pre_prepare(display->panel);
- if (rc) {
- pr_err("[%s] panel pre-prepare failed, rc=%d\n",
- display->name, rc);
- goto error;
+ if (!display->is_cont_splash_enabled) {
+ /*
+ * For continuous splash usecase we skip panel
+ * pre prepare since the regulator vote is already
+ * taken care in splash resource init
+ */
+ rc = dsi_panel_pre_prepare(display->panel);
+ if (rc) {
+ pr_err("[%s] panel pre-prepare failed, rc=%d\n",
+ display->name, rc);
+ goto error;
+ }
}
rc = dsi_display_clk_ctrl(display->dsi_clk_handle,
@@ -4094,6 +4661,8 @@ int dsi_display_prepare(struct dsi_display *display)
display->name, rc);
goto error_phy_disable;
}
+ /* Set up DSI ERROR event callback */
+ dsi_display_register_error_handler(display);
rc = dsi_display_ctrl_host_enable(display);
if (rc) {
@@ -4116,12 +4685,19 @@ int dsi_display_prepare(struct dsi_display *display)
goto error_ctrl_link_off;
}
- rc = dsi_panel_prepare(display->panel);
- if (rc) {
- pr_err("[%s] panel prepare failed, rc=%d\n", display->name, rc);
- goto error_ctrl_link_off;
+ if (!display->is_cont_splash_enabled) {
+ /*
+ * For continuous splash usecase we skip panel
+ * prepare since the pnael is already in
+ * active state and panel on commands are not needed
+ */
+ rc = dsi_panel_prepare(display->panel);
+ if (rc) {
+ pr_err("[%s] panel prepare failed, rc=%d\n",
+ display->name, rc);
+ goto error_ctrl_link_off;
+ }
}
-
goto error;
error_ctrl_link_off:
@@ -4149,13 +4725,20 @@ static int dsi_display_calc_ctrl_roi(const struct dsi_display *display,
struct dsi_rect *out_roi)
{
const struct dsi_rect *bounds = &ctrl->ctrl->mode_bounds;
+ struct dsi_display_mode *cur_mode;
+ struct msm_roi_caps *roi_caps;
struct dsi_rect req_roi = { 0 };
int rc = 0;
- if (req_rois->num_rects > display->panel->roi_caps.num_roi) {
+ cur_mode = display->panel->cur_mode;
+ if (!cur_mode)
+ return 0;
+
+ roi_caps = &cur_mode->priv_info->roi_caps;
+ if (req_rois->num_rects > roi_caps->num_roi) {
pr_err("request for %d rois greater than max %d\n",
req_rois->num_rects,
- display->panel->roi_caps.num_roi);
+ roi_caps->num_roi);
rc = -EINVAL;
goto exit;
}
@@ -4192,13 +4775,20 @@ static int dsi_display_calc_ctrl_roi(const struct dsi_display *display,
static int dsi_display_set_roi(struct dsi_display *display,
struct msm_roi_list *rois)
{
+ struct dsi_display_mode *cur_mode;
+ struct msm_roi_caps *roi_caps;
int rc = 0;
int i;
if (!display || !rois || !display->panel)
return -EINVAL;
- if (!display->panel->roi_caps.enabled)
+ cur_mode = display->panel->cur_mode;
+ if (!cur_mode)
+ return 0;
+
+ roi_caps = &cur_mode->priv_info->roi_caps;
+ if (!roi_caps->enabled)
return 0;
for (i = 0; i < display->ctrl_count; i++) {
@@ -4253,6 +4843,46 @@ int dsi_display_pre_kickoff(struct dsi_display *display,
return rc;
}
+int dsi_display_config_ctrl_for_cont_splash(struct dsi_display *display)
+{
+ int rc = 0;
+
+ if (!display || !display->panel) {
+ pr_err("Invalid params\n");
+ return -EINVAL;
+ }
+
+ if (!display->panel->cur_mode) {
+ pr_err("no valid mode set for the display");
+ return -EINVAL;
+ }
+
+ if (!display->is_cont_splash_enabled)
+ return 0;
+
+ if (display->config.panel_mode == DSI_OP_VIDEO_MODE) {
+ rc = dsi_display_vid_engine_enable(display);
+ if (rc) {
+ pr_err("[%s]failed to enable DSI video engine, rc=%d\n",
+ display->name, rc);
+ goto error_out;
+ }
+ } else if (display->config.panel_mode == DSI_OP_CMD_MODE) {
+ rc = dsi_display_cmd_engine_enable(display);
+ if (rc) {
+ pr_err("[%s]failed to enable DSI cmd engine, rc=%d\n",
+ display->name, rc);
+ goto error_out;
+ }
+ } else {
+ pr_err("[%s] Invalid configuration\n", display->name);
+ rc = -EINVAL;
+ }
+
+error_out:
+ return rc;
+}
+
int dsi_display_enable(struct dsi_display *display)
{
int rc = 0;
@@ -4268,6 +4898,25 @@ int dsi_display_enable(struct dsi_display *display)
return -EINVAL;
}
+ /* Engine states and panel states are populated during splash
+ * resource init and hence we return early
+ */
+ if (display->is_cont_splash_enabled) {
+
+ dsi_display_config_ctrl_for_cont_splash(display);
+
+ rc = dsi_display_splash_res_cleanup(display);
+ if (rc) {
+ pr_err("Continuous splash res cleanup failed, rc=%d\n",
+ rc);
+ return -EINVAL;
+ }
+
+ display->panel->panel_initialized = true;
+ pr_debug("cont splash enabled, display enable not required\n");
+ return 0;
+ }
+
mutex_lock(&display->display_lock);
mode = display->panel->cur_mode;
@@ -4475,6 +5124,9 @@ int dsi_display_unprepare(struct dsi_display *display)
pr_err("[%s] failed to disable Link clocks, rc=%d\n",
display->name, rc);
+ /* Free up DSI ERROR event callback */
+ dsi_display_unregister_error_handler(display);
+
rc = dsi_display_ctrl_deinit(display);
if (rc)
pr_err("[%s] failed to deinit controller, rc=%d\n",
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_display.h b/drivers/gpu/drm/msm/dsi-staging/dsi_display.h
index da4f5eb..886641b 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_display.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_display.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2017, The Linux Foundation.All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -128,6 +128,7 @@ struct dsi_display_clk_info {
* @display_type: Display type as defined in device tree.
* @list: List pointer.
* @is_active: Is display active.
+ * @is_cont_splash_enabled: Is continuous splash enabled
* @display_lock: Mutex for dsi_display interface.
* @ctrl_count: Number of DSI interfaces required by panel.
* @ctrl: Controller information for DSI display.
@@ -165,6 +166,7 @@ struct dsi_display {
const char *display_type;
struct list_head list;
bool is_active;
+ bool is_cont_splash_enabled;
struct mutex display_lock;
u32 ctrl_count;
@@ -192,11 +194,15 @@ struct dsi_display {
u32 cmd_buffer_size;
u32 cmd_buffer_iova;
void *vaddr;
+ struct msm_gem_address_space *aspace;
struct mipi_dsi_host host;
struct dsi_bridge *bridge;
u32 cmd_engine_refcount;
+ struct sde_power_handle *phandle;
+ struct sde_power_client *cont_splash_client;
+
void *clk_mngr;
void *dsi_clk_handle;
void *mdp_clk_handle;
@@ -206,6 +212,11 @@ struct dsi_display {
bool misr_enable;
u32 misr_frame_count;
+ /* multiple dsi error handlers */
+ struct workqueue_struct *err_workq;
+ struct work_struct fifo_underflow_work;
+ struct work_struct fifo_overflow_work;
+ struct work_struct lp_rx_timeout_work;
};
int dsi_display_dev_probe(struct platform_device *pdev);
@@ -359,6 +370,22 @@ int dsi_display_set_mode(struct dsi_display *display,
int dsi_display_prepare(struct dsi_display *display);
/**
+ * dsi_display_splash_res_cleanup() - cleanup for continuous splash
+ * @display: Pointer to dsi display
+ * Returns: Zero on success
+ */
+int dsi_display_splash_res_cleanup(struct dsi_display *display);
+
+/**
+ * dsi_display_config_ctrl_for_cont_splash()- Enable engine modes for DSI
+ * controller during continuous splash
+ * @display: Handle to DSI display
+ *
+ * Return: returns error code
+ */
+int dsi_display_config_ctrl_for_cont_splash(struct dsi_display *display);
+
+/**
* dsi_display_enable() - enable display
* @display: Handle to display.
*
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_drm.c b/drivers/gpu/drm/msm/dsi-staging/dsi_drm.c
index 280c754..b0a06e1 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_drm.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_drm.c
@@ -65,7 +65,7 @@ static void convert_to_dsi_mode(const struct drm_display_mode *drm_mode,
dsi_mode->dsi_mode_flags |= DSI_MODE_FLAG_VRR;
}
-static void convert_to_drm_mode(const struct dsi_display_mode *dsi_mode,
+void dsi_convert_to_drm_mode(const struct dsi_display_mode *dsi_mode,
struct drm_display_mode *drm_mode)
{
memset(drm_mode, 0, sizeof(*drm_mode));
@@ -129,6 +129,9 @@ static void dsi_bridge_pre_enable(struct drm_bridge *bridge)
return;
}
+ if (!c_bridge || !c_bridge->display)
+ pr_err("Incorrect bridge details\n");
+
/* By this point mode should have been validated through mode_fixup */
rc = dsi_display_set_mode(c_bridge->display,
&(c_bridge->dsi_mode), 0x0);
@@ -157,11 +160,16 @@ static void dsi_bridge_pre_enable(struct drm_bridge *bridge)
rc = dsi_display_enable(c_bridge->display);
if (rc) {
pr_err("[%d] DSI display enable failed, rc=%d\n",
- c_bridge->id, rc);
+ c_bridge->id, rc);
(void)dsi_display_unprepare(c_bridge->display);
}
SDE_ATRACE_END("dsi_display_enable");
SDE_ATRACE_END("dsi_bridge_pre_enable");
+
+ rc = dsi_display_splash_res_cleanup(c_bridge->display);
+ if (rc)
+ pr_err("Continuous splash pipeline cleanup failed, rc=%d\n",
+ rc);
}
static void dsi_bridge_enable(struct drm_bridge *bridge)
@@ -274,7 +282,7 @@ static bool dsi_bridge_mode_fixup(struct drm_bridge *bridge,
if (bridge->encoder && bridge->encoder->crtc) {
- convert_to_dsi_mode(&bridge->encoder->crtc->mode,
+ convert_to_dsi_mode(&bridge->encoder->crtc->state->mode,
&cur_dsi_mode);
rc = dsi_display_validate_mode_vrr(c_bridge->display,
&cur_dsi_mode, &dsi_mode);
@@ -290,7 +298,7 @@ static bool dsi_bridge_mode_fixup(struct drm_bridge *bridge,
dsi_mode.dsi_mode_flags |= DSI_MODE_FLAG_DMS;
}
- convert_to_drm_mode(&dsi_mode, adjusted_mode);
+ dsi_convert_to_drm_mode(&dsi_mode, adjusted_mode);
return true;
}
@@ -330,6 +338,11 @@ int dsi_conn_get_mode_info(const struct drm_display_mode *drm_mode,
sizeof(dsi_mode.priv_info->dsc));
}
+ if (dsi_mode.priv_info->roi_caps.enabled) {
+ memcpy(&mode_info->roi_caps, &dsi_mode.priv_info->roi_caps,
+ sizeof(dsi_mode.priv_info->roi_caps));
+ }
+
return 0;
}
@@ -343,9 +356,8 @@ static const struct drm_bridge_funcs dsi_bridge_ops = {
.mode_set = dsi_bridge_mode_set,
};
-int dsi_conn_post_init(struct drm_connector *connector,
- void *info,
- void *display)
+int dsi_conn_set_info_blob(struct drm_connector *connector,
+ void *info, void *display, struct msm_mode_info *mode_info)
{
struct dsi_display *dsi_display = display;
struct dsi_panel *panel;
@@ -444,23 +456,23 @@ int dsi_conn_post_init(struct drm_connector *connector,
break;
}
- if (panel->roi_caps.enabled) {
+ if (mode_info && mode_info->roi_caps.enabled) {
sde_kms_info_add_keyint(info, "partial_update_num_roi",
- panel->roi_caps.num_roi);
+ mode_info->roi_caps.num_roi);
sde_kms_info_add_keyint(info, "partial_update_xstart",
- panel->roi_caps.align.xstart_pix_align);
+ mode_info->roi_caps.align.xstart_pix_align);
sde_kms_info_add_keyint(info, "partial_update_walign",
- panel->roi_caps.align.width_pix_align);
+ mode_info->roi_caps.align.width_pix_align);
sde_kms_info_add_keyint(info, "partial_update_wmin",
- panel->roi_caps.align.min_width);
+ mode_info->roi_caps.align.min_width);
sde_kms_info_add_keyint(info, "partial_update_ystart",
- panel->roi_caps.align.ystart_pix_align);
+ mode_info->roi_caps.align.ystart_pix_align);
sde_kms_info_add_keyint(info, "partial_update_halign",
- panel->roi_caps.align.height_pix_align);
+ mode_info->roi_caps.align.height_pix_align);
sde_kms_info_add_keyint(info, "partial_update_hmin",
- panel->roi_caps.align.min_height);
+ mode_info->roi_caps.align.min_height);
sde_kms_info_add_keyint(info, "partial_update_roimerge",
- panel->roi_caps.merge_rois);
+ mode_info->roi_caps.merge_rois);
}
end:
@@ -554,7 +566,7 @@ int dsi_connector_get_modes(struct drm_connector *connector,
struct drm_display_mode *m;
memset(&drm_mode, 0x0, sizeof(drm_mode));
- convert_to_drm_mode(&modes[i], &drm_mode);
+ dsi_convert_to_drm_mode(&modes[i], &drm_mode);
m = drm_mode_duplicate(connector->dev, &drm_mode);
if (!m) {
pr_err("failed to add mode %ux%u\n",
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_drm.h b/drivers/gpu/drm/msm/dsi-staging/dsi_drm.h
index 828e65d..ec58479 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_drm.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_drm.h
@@ -33,15 +33,17 @@ struct dsi_bridge {
};
/**
- * dsi_conn_post_init - callback to perform additional initialization steps
+ * dsi_conn_set_info_blob - callback to perform info blob initialization
* @connector: Pointer to drm connector structure
* @info: Pointer to sde connector info structure
* @display: Pointer to private display handle
+ * @mode_info: Pointer to mode info structure
* Returns: Zero on success
*/
-int dsi_conn_post_init(struct drm_connector *connector,
+int dsi_conn_set_info_blob(struct drm_connector *connector,
void *info,
- void *display);
+ void *display,
+ struct msm_mode_info *mode_info);
/**
* dsi_conn_detect - callback to determine if connector is connected
@@ -128,4 +130,12 @@ int dsi_conn_pre_kickoff(struct drm_connector *connector,
*/
int dsi_conn_post_kickoff(struct drm_connector *connector);
+/**
+ * dsi_convert_to_drm_mode - Update drm mode with dsi mode information
+ * @dsi_mode: input parameter. structure having dsi mode information.
+ * @drm_mode: output parameter. DRM mode set for the display
+ */
+void dsi_convert_to_drm_mode(const struct dsi_display_mode *dsi_mode,
+ struct drm_display_mode *drm_mode);
+
#endif /* _DSI_DRM_H_ */
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c
index 4688741..d0cb51b 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c
@@ -493,10 +493,12 @@ static int dsi_panel_tx_cmd_set(struct dsi_panel *panel,
}
for (i = 0; i < count; i++) {
- /* TODO: handle last command */
if (state == DSI_CMD_SET_STATE_LP)
cmds->msg.flags |= MIPI_DSI_MSG_USE_LPM;
+ if (cmds->last_command)
+ cmds->msg.flags |= MIPI_DSI_MSG_LASTCOMMAND;
+
len = ops->transfer(panel->host, &cmds->msg);
if (len < 0) {
rc = len;
@@ -743,6 +745,10 @@ static int dsi_panel_parse_timing(struct dsi_mode_info *mode,
if (rc)
pr_err("qcom,mdss-dsi-h-sync-skew is not defined, rc=%d\n", rc);
+ pr_debug("panel horz active:%d front_portch:%d back_porch:%d sync_skew:%d\n",
+ mode->h_active, mode->h_front_porch, mode->h_back_porch,
+ mode->h_sync_width);
+
rc = of_property_read_u32(of_node, "qcom,mdss-dsi-panel-height",
&mode->v_active);
if (rc) {
@@ -774,6 +780,9 @@ static int dsi_panel_parse_timing(struct dsi_mode_info *mode,
rc);
goto error;
}
+ pr_debug("panel vert active:%d front_portch:%d back_porch:%d pulse_width:%d\n",
+ mode->v_active, mode->v_front_porch, mode->v_back_porch,
+ mode->v_sync_width);
error:
return rc;
@@ -2173,7 +2182,9 @@ static int dsi_panel_parse_dsc_params(struct dsi_display_mode *mode,
intf_width = mode->timing.h_active;
if (intf_width % priv_info->dsc.slice_width) {
- pr_err("invalid slice width for the panel\n");
+ pr_err("invalid slice width for the intf width:%d slice width:%d\n",
+ intf_width, priv_info->dsc.slice_width);
+ rc = -EINVAL;
goto error;
}
@@ -2392,21 +2403,37 @@ static int dsi_panel_parse_roi_alignment(struct device_node *of_node,
return rc;
}
-static int dsi_panel_parse_partial_update_caps(struct dsi_panel *panel,
- struct device_node *of_node)
+static int dsi_panel_parse_partial_update_caps(struct dsi_display_mode *mode,
+ struct device_node *of_node)
{
- struct msm_roi_caps *roi_caps = &panel->roi_caps;
+ struct msm_roi_caps *roi_caps = NULL;
const char *data;
int rc = 0;
+ if (!mode || !mode->priv_info) {
+ pr_err("invalid arguments\n");
+ return -EINVAL;
+ }
+
+ roi_caps = &mode->priv_info->roi_caps;
+
memset(roi_caps, 0, sizeof(*roi_caps));
data = of_get_property(of_node, "qcom,partial-update-enabled", NULL);
if (data) {
if (!strcmp(data, "dual_roi"))
roi_caps->num_roi = 2;
- else
+ else if (!strcmp(data, "single_roi"))
roi_caps->num_roi = 1;
+ else {
+ pr_info(
+ "invalid value for qcom,partial-update-enabled: %s\n",
+ data);
+ return 0;
+ }
+ } else {
+ pr_info("partial update disabled as the property is not set\n");
+ return 0;
}
roi_caps->merge_rois = of_property_read_bool(of_node,
@@ -2419,7 +2446,7 @@ static int dsi_panel_parse_partial_update_caps(struct dsi_panel *panel,
if (roi_caps->enabled)
rc = dsi_panel_parse_roi_alignment(of_node,
- &panel->roi_caps.align);
+ &roi_caps->align);
if (rc)
memset(roi_caps, 0, sizeof(*roi_caps));
@@ -2736,10 +2763,6 @@ struct dsi_panel *dsi_panel_get(struct device *parent,
if (rc)
pr_err("failed to parse hdr config, rc=%d\n", rc);
- rc = dsi_panel_parse_partial_update_caps(panel, of_node);
- if (rc)
- pr_debug("failed to partial update caps, rc=%d\n", rc);
-
rc = dsi_panel_get_mode_count(panel, of_node);
if (rc) {
pr_err("failed to get mode count, rc=%d\n", rc);
@@ -3048,6 +3071,10 @@ int dsi_panel_get_mode(struct dsi_panel *panel,
"failed to parse panel phy timings, rc=%d\n", rc);
goto parse_fail;
}
+
+ rc = dsi_panel_parse_partial_update_caps(mode, child_np);
+ if (rc)
+ pr_err("failed to partial update caps, rc=%d\n", rc);
}
goto done;
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_panel.h b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.h
index f63fd27..ea67f45 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_panel.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.h
@@ -156,8 +156,6 @@ struct dsi_panel {
enum dsi_op_mode panel_mode;
struct dsi_dfps_capabilities dfps_caps;
- struct msm_roi_caps roi_caps;
-
struct dsi_panel_phy_props phy_props;
struct dsi_display_mode *cur_mode;
@@ -193,6 +191,16 @@ static inline bool dsi_panel_initialized(struct dsi_panel *panel)
return panel->panel_initialized;
}
+static inline void dsi_panel_acquire_panel_lock(struct dsi_panel *panel)
+{
+ mutex_lock(&panel->panel_lock);
+}
+
+static inline void dsi_panel_release_panel_lock(struct dsi_panel *panel)
+{
+ mutex_unlock(&panel->panel_lock);
+}
+
struct dsi_panel *dsi_panel_get(struct device *parent,
struct device_node *of_node,
int topology_override);
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_phy.c b/drivers/gpu/drm/msm/dsi-staging/dsi_phy.c
index a91dba8..197d448 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_phy.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_phy.c
@@ -33,6 +33,8 @@
#define DSI_PHY_DEFAULT_LABEL "MDSS PHY CTRL"
+#define BITS_PER_BYTE 8
+
struct dsi_phy_list_item {
struct msm_dsi_phy *phy;
struct list_head list;
@@ -290,6 +292,14 @@ static int dsi_phy_settings_init(struct platform_device *pdev,
/* Actual timing values are dependent on panel */
timing->count_per_lane = phy->ver_info->timing_cfg_count;
+
+ phy->allow_phy_power_off = of_property_read_bool(pdev->dev.of_node,
+ "qcom,panel-allow-phy-poweroff");
+
+ of_property_read_u32(pdev->dev.of_node,
+ "qcom,dsi-phy-regulator-min-datarate-bps",
+ &phy->regulator_min_datarate_bps);
+
return 0;
err:
lane->count_per_lane = 0;
@@ -641,7 +651,8 @@ int dsi_phy_set_power_state(struct msm_dsi_phy *dsi_phy, bool enable)
goto error;
}
- if (dsi_phy->dsi_phy_state == DSI_PHY_ENGINE_OFF) {
+ if (dsi_phy->dsi_phy_state == DSI_PHY_ENGINE_OFF &&
+ dsi_phy->regulator_required) {
rc = dsi_pwr_enable_regulator(
&dsi_phy->pwr_info.phy_pwr, true);
if (rc) {
@@ -652,7 +663,8 @@ int dsi_phy_set_power_state(struct msm_dsi_phy *dsi_phy, bool enable)
}
}
} else {
- if (dsi_phy->dsi_phy_state == DSI_PHY_ENGINE_OFF) {
+ if (dsi_phy->dsi_phy_state == DSI_PHY_ENGINE_OFF &&
+ dsi_phy->regulator_required) {
rc = dsi_pwr_enable_regulator(
&dsi_phy->pwr_info.phy_pwr, false);
if (rc) {
@@ -787,6 +799,7 @@ int dsi_phy_set_ulps(struct msm_dsi_phy *phy, struct dsi_host_config *config,
* @config: DSI host configuration.
* @pll_source: Source PLL for PHY clock.
* @skip_validation: Validation will not be performed on parameters.
+ * @is_cont_splash_enabled: check whether continuous splash enabled.
*
* Validates and enables DSI PHY.
*
@@ -795,7 +808,8 @@ int dsi_phy_set_ulps(struct msm_dsi_phy *phy, struct dsi_host_config *config,
int dsi_phy_enable(struct msm_dsi_phy *phy,
struct dsi_host_config *config,
enum dsi_phy_pll_source pll_source,
- bool skip_validation)
+ bool skip_validation,
+ bool is_cont_splash_enabled)
{
int rc = 0;
@@ -829,7 +843,10 @@ int dsi_phy_enable(struct msm_dsi_phy *phy,
goto error;
}
- dsi_phy_enable_hw(phy);
+ if (!is_cont_splash_enabled) {
+ dsi_phy_enable_hw(phy);
+ pr_debug("cont splash not enabled, phy enable required\n");
+ }
phy->dsi_phy_state = DSI_PHY_ENGINE_ON;
error:
@@ -838,6 +855,21 @@ int dsi_phy_enable(struct msm_dsi_phy *phy,
return rc;
}
+int dsi_phy_lane_reset(struct msm_dsi_phy *phy)
+{
+ int ret = 0;
+
+ if (!phy)
+ return ret;
+
+ mutex_lock(&phy->phy_lock);
+ if (phy->hw.ops.phy_lane_reset)
+ ret = phy->hw.ops.phy_lane_reset(&phy->hw);
+ mutex_unlock(&phy->phy_lock);
+
+ return ret;
+}
+
/**
* dsi_phy_disable() - disable DSI PHY hardware.
* @phy: DSI PHY handle.
@@ -876,6 +908,8 @@ int dsi_phy_idle_ctrl(struct msm_dsi_phy *phy, bool enable)
return -EINVAL;
}
+ pr_debug("[%s] enable=%d\n", phy->name, enable);
+
mutex_lock(&phy->phy_lock);
if (enable) {
if (phy->hw.ops.phy_idle_on)
@@ -884,7 +918,17 @@ int dsi_phy_idle_ctrl(struct msm_dsi_phy *phy, bool enable)
if (phy->hw.ops.regulator_enable)
phy->hw.ops.regulator_enable(&phy->hw,
&phy->cfg.regulators);
+
+ if (phy->hw.ops.enable)
+ phy->hw.ops.enable(&phy->hw, &phy->cfg);
+
+ phy->dsi_phy_state = DSI_PHY_ENGINE_ON;
} else {
+ phy->dsi_phy_state = DSI_PHY_ENGINE_OFF;
+
+ if (phy->hw.ops.disable)
+ phy->hw.ops.disable(&phy->hw, &phy->cfg);
+
if (phy->hw.ops.phy_idle_off)
phy->hw.ops.phy_idle_off(&phy->hw);
}
@@ -894,6 +938,33 @@ int dsi_phy_idle_ctrl(struct msm_dsi_phy *phy, bool enable)
}
/**
+ * dsi_phy_set_clk_freq() - set DSI PHY clock frequency setting
+ * @phy: DSI PHY handle
+ * @clk_freq: link clock frequency
+ *
+ * Return: error code.
+ */
+int dsi_phy_set_clk_freq(struct msm_dsi_phy *phy,
+ struct link_clk_freq *clk_freq)
+{
+ if (!phy || !clk_freq) {
+ pr_err("Invalid params\n");
+ return -EINVAL;
+ }
+
+ phy->regulator_required = clk_freq->byte_clk_rate >
+ (phy->regulator_min_datarate_bps / BITS_PER_BYTE);
+
+ pr_debug("[%s] lane_datarate=%u min_datarate=%u required=%d\n",
+ phy->name,
+ clk_freq->byte_clk_rate * BITS_PER_BYTE,
+ phy->regulator_min_datarate_bps,
+ phy->regulator_required);
+
+ return 0;
+}
+
+/**
* dsi_phy_set_timing_params() - timing parameters for the panel
* @phy: DSI PHY handle
* @timing: array holding timing params.
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_phy.h b/drivers/gpu/drm/msm/dsi-staging/dsi_phy.h
index e721486..a158812 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_phy.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_phy.h
@@ -67,6 +67,9 @@ enum phy_engine_state {
* @mode: Current mode.
* @data_lanes: Number of data lanes used.
* @dst_format: Destination format.
+ * @allow_phy_power_off: True if PHY is allowed to power off when idle
+ * @regulator_min_datarate_bps: Minimum per lane data rate to turn on regulator
+ * @regulator_required: True if phy regulator is required
*/
struct msm_dsi_phy {
struct platform_device *pdev;
@@ -88,6 +91,10 @@ struct msm_dsi_phy {
struct dsi_mode_info mode;
enum dsi_data_lanes data_lanes;
enum dsi_pixel_format dst_format;
+
+ bool allow_phy_power_off;
+ u32 regulator_min_datarate_bps;
+ bool regulator_required;
};
/**
@@ -159,6 +166,7 @@ int dsi_phy_set_power_state(struct msm_dsi_phy *dsi_phy, bool enable);
* @config: DSI host configuration.
* @pll_source: Source PLL for PHY clock.
* @skip_validation: Validation will not be performed on parameters.
+ * @is_cont_splash_enabled: check whether continuous splash enabled.
*
* Validates and enables DSI PHY.
*
@@ -167,7 +175,8 @@ int dsi_phy_set_power_state(struct msm_dsi_phy *dsi_phy, bool enable);
int dsi_phy_enable(struct msm_dsi_phy *dsi_phy,
struct dsi_host_config *config,
enum dsi_phy_pll_source pll_source,
- bool skip_validation);
+ bool skip_validation,
+ bool is_cont_splash_enabled);
/**
* dsi_phy_disable() - disable DSI PHY hardware.
@@ -209,6 +218,16 @@ int dsi_phy_clk_cb_register(struct msm_dsi_phy *phy,
int dsi_phy_idle_ctrl(struct msm_dsi_phy *phy, bool enable);
/**
+ * dsi_phy_set_clk_freq() - set DSI PHY clock frequency setting
+ * @phy: DSI PHY handle
+ * @clk_freq: link clock frequency
+ *
+ * Return: error code.
+ */
+int dsi_phy_set_clk_freq(struct msm_dsi_phy *phy,
+ struct link_clk_freq *clk_freq);
+
+/**
* dsi_phy_set_timing_params() - timing parameters for the panel
* @phy: DSI PHY handle
* @timing: array holding timing params.
@@ -223,6 +242,14 @@ int dsi_phy_set_timing_params(struct msm_dsi_phy *phy,
u32 *timing, u32 size);
/**
+ * dsi_phy_lane_reset() - Reset DSI PHY lanes in case of error
+ * @phy: DSI PHY handle
+ *
+ * Return: error code.
+ */
+int dsi_phy_lane_reset(struct msm_dsi_phy *phy);
+
+/**
* dsi_phy_drv_register() - register platform driver for dsi phy
*/
void dsi_phy_drv_register(void);
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_phy_hw.h b/drivers/gpu/drm/msm/dsi-staging/dsi_phy_hw.h
index 51c2f46..efebd99 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_phy_hw.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_phy_hw.h
@@ -233,6 +233,13 @@ struct dsi_phy_hw_ops {
int (*phy_timing_val)(struct dsi_phy_per_lane_cfgs *timing_val,
u32 *timing, u32 size);
+ /**
+ * phy_lane_reset() - Reset dsi phy lanes in case of error.
+ * @phy: Pointer to DSI PHY hardware object.
+ * Return: error code.
+ */
+ int (*phy_lane_reset)(struct dsi_phy_hw *phy);
+
void *timing_ops;
struct phy_ulps_config_ops ulps_ops;
};
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_phy_hw_v3_0.c b/drivers/gpu/drm/msm/dsi-staging/dsi_phy_hw_v3_0.c
index 371239d..8d91141 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_phy_hw_v3_0.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_phy_hw_v3_0.c
@@ -17,6 +17,7 @@
#include <linux/iopoll.h>
#include "dsi_hw.h"
#include "dsi_phy_hw.h"
+#include "dsi_catalog.h"
#define DSIPHY_CMN_CLK_CFG0 0x010
#define DSIPHY_CMN_CLK_CFG1 0x014
@@ -373,6 +374,29 @@ void dsi_phy_hw_v3_0_ulps_request(struct dsi_phy_hw *phy,
lanes);
}
+int dsi_phy_hw_v3_0_lane_reset(struct dsi_phy_hw *phy)
+{
+ int ret = 0, loop = 10, u_dly = 200;
+ u32 ln_status = 0;
+
+ while ((ln_status != 0x1f) && loop) {
+ DSI_W32(phy, DSIPHY_CMN_LANE_CTRL3, 0x1f);
+ wmb(); /* ensure register is committed */
+ loop--;
+ udelay(u_dly);
+ ln_status = DSI_R32(phy, DSIPHY_CMN_LANE_STATUS1);
+ pr_debug("trial no: %d\n", loop);
+ }
+
+ if (!loop)
+ pr_debug("could not reset phy lanes\n");
+
+ DSI_W32(phy, DSIPHY_CMN_LANE_CTRL3, 0x0);
+ wmb(); /* ensure register is committed */
+
+ return ret;
+}
+
void dsi_phy_hw_v3_0_ulps_exit(struct dsi_phy_hw *phy,
struct dsi_phy_cfg *cfg, u32 lanes)
{
diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c
index 28f2e7c..95bdc36 100644
--- a/drivers/gpu/drm/msm/msm_atomic.c
+++ b/drivers/gpu/drm/msm/msm_atomic.c
@@ -16,6 +16,9 @@
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <linux/msm_drm_notify.h>
+#include <linux/notifier.h>
+
#include "msm_drv.h"
#include "msm_kms.h"
#include "msm_gem.h"
@@ -30,6 +33,47 @@ struct msm_commit {
struct kthread_work commit_work;
};
+static BLOCKING_NOTIFIER_HEAD(msm_drm_notifier_list);
+
+/**
+ * msm_drm_register_client - register a client notifier
+ * @nb: notifier block to callback on events
+ *
+ * This function registers a notifier callback function
+ * to msm_drm_notifier_list, which would be called when
+ * received unblank/power down event.
+ */
+int msm_drm_register_client(struct notifier_block *nb)
+{
+ return blocking_notifier_chain_register(&msm_drm_notifier_list,
+ nb);
+}
+
+/**
+ * msm_drm_unregister_client - unregister a client notifier
+ * @nb: notifier block to callback on events
+ *
+ * This function unregisters the callback function from
+ * msm_drm_notifier_list.
+ */
+int msm_drm_unregister_client(struct notifier_block *nb)
+{
+ return blocking_notifier_chain_unregister(&msm_drm_notifier_list,
+ nb);
+}
+
+/**
+ * msm_drm_notifier_call_chain - notify clients of drm_events
+ * @val: event MSM_DRM_EARLY_EVENT_BLANK or MSM_DRM_EVENT_BLANK
+ * @v: notifier data, inculde display id and display blank
+ * event(unblank or power down).
+ */
+static int msm_drm_notifier_call_chain(unsigned long val, void *v)
+{
+ return blocking_notifier_call_chain(&msm_drm_notifier_list, val,
+ v);
+}
+
/* block until specified crtcs are no longer pending update, and
* atomically mark them as pending update
*/
@@ -97,7 +141,8 @@ msm_disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state)
struct drm_connector_state *old_conn_state;
struct drm_crtc *crtc;
struct drm_crtc_state *old_crtc_state;
- int i;
+ struct msm_drm_notifier notifier_data;
+ int i, blank;
SDE_ATRACE_BEGIN("msm_disable");
for_each_connector_in_state(old_state, connector, old_conn_state, i) {
@@ -144,6 +189,11 @@ msm_disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state)
DRM_DEBUG_ATOMIC("disabling [ENCODER:%d:%s]\n",
encoder->base.id, encoder->name);
+ blank = MSM_DRM_BLANK_POWERDOWN;
+ notifier_data.data = ␣
+ notifier_data.id = crtc_idx;
+ msm_drm_notifier_call_chain(MSM_DRM_EARLY_EVENT_BLANK,
+ ¬ifier_data);
/*
* Each encoder has at most one connector (since we always steal
* it away), so we won't call disable hooks twice.
@@ -159,6 +209,8 @@ msm_disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state)
funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
drm_bridge_post_disable(encoder->bridge);
+ msm_drm_notifier_call_chain(MSM_DRM_EVENT_BLANK,
+ ¬ifier_data);
}
for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) {
@@ -296,10 +348,11 @@ static void msm_atomic_helper_commit_modeset_enables(struct drm_device *dev,
struct drm_crtc_state *old_crtc_state;
struct drm_connector *connector;
struct drm_connector_state *old_conn_state;
+ struct msm_drm_notifier notifier_data;
struct msm_drm_private *priv = dev->dev_private;
struct msm_kms *kms = priv->kms;
int bridge_enable_count = 0;
- int i;
+ int i, blank;
SDE_ATRACE_BEGIN("msm_enable");
for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) {
@@ -350,6 +403,12 @@ static void msm_atomic_helper_commit_modeset_enables(struct drm_device *dev,
DRM_DEBUG_ATOMIC("enabling [ENCODER:%d:%s]\n",
encoder->base.id, encoder->name);
+ blank = MSM_DRM_BLANK_UNBLANK;
+ notifier_data.data = ␣
+ notifier_data.id =
+ connector->state->crtc->index;
+ msm_drm_notifier_call_chain(MSM_DRM_EARLY_EVENT_BLANK,
+ ¬ifier_data);
/*
* Each encoder has at most one connector (since we always steal
* it away), so we won't call enable hooks twice.
@@ -391,6 +450,8 @@ static void msm_atomic_helper_commit_modeset_enables(struct drm_device *dev,
encoder->base.id, encoder->name);
drm_bridge_enable(encoder->bridge);
+ msm_drm_notifier_call_chain(MSM_DRM_EVENT_BLANK,
+ ¬ifier_data);
}
SDE_ATRACE_END("msm_enable");
}
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index a75a126..33778f8e 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -215,12 +215,16 @@ static void vblank_ctrl_worker(struct kthread_work *work)
struct msm_kms *kms = priv->kms;
struct vblank_event *vbl_ev, *tmp;
unsigned long flags;
+ LIST_HEAD(tmp_head);
spin_lock_irqsave(&vbl_ctrl->lock, flags);
list_for_each_entry_safe(vbl_ev, tmp, &vbl_ctrl->event_list, node) {
list_del(&vbl_ev->node);
- spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
+ list_add_tail(&vbl_ev->node, &tmp_head);
+ }
+ spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
+ list_for_each_entry_safe(vbl_ev, tmp, &tmp_head, node) {
if (vbl_ev->enable)
kms->funcs->enable_vblank(kms,
priv->crtcs[vbl_ev->crtc_id]);
@@ -229,11 +233,7 @@ static void vblank_ctrl_worker(struct kthread_work *work)
priv->crtcs[vbl_ev->crtc_id]);
kfree(vbl_ev);
-
- spin_lock_irqsave(&vbl_ctrl->lock, flags);
}
-
- spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
}
static int vblank_ctrl_queue_work(struct msm_drm_private *priv,
@@ -691,6 +691,14 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
drm_mode_config_reset(ddev);
+ if (kms && kms->funcs && kms->funcs->cont_splash_config) {
+ ret = kms->funcs->cont_splash_config(kms);
+ if (ret) {
+ dev_err(dev, "kms cont_splash config failed.\n");
+ goto fail;
+ }
+ }
+
#ifdef CONFIG_DRM_FBDEV_EMULATION
if (fbdev)
priv->fbdev = msm_fbdev_init(ddev);
@@ -937,9 +945,9 @@ static void msm_lastclose(struct drm_device *dev)
} else {
drm_modeset_lock_all(dev);
msm_disable_all_modes(dev);
- drm_modeset_unlock_all(dev);
if (kms && kms->funcs && kms->funcs->lastclose)
kms->funcs->lastclose(kms);
+ drm_modeset_unlock_all(dev);
}
}
@@ -1365,7 +1373,7 @@ void msm_mode_object_event_notify(struct drm_mode_object *obj,
if (node->event.type != event->type ||
obj->id != node->info.object_id)
continue;
- len = event->length + sizeof(struct drm_msm_event_resp);
+ len = event->length + sizeof(struct msm_drm_event);
if (node->base.file_priv->event_space < len) {
DRM_ERROR("Insufficient space %d for event %x len %d\n",
node->base.file_priv->event_space, event->type,
@@ -1379,7 +1387,8 @@ void msm_mode_object_event_notify(struct drm_mode_object *obj,
notify->base.event = ¬ify->event;
notify->base.pid = node->base.pid;
notify->event.type = node->event.type;
- notify->event.length = len;
+ notify->event.length = event->length +
+ sizeof(struct drm_msm_event_resp);
memcpy(¬ify->info, &node->info, sizeof(notify->info));
memcpy(notify->data, payload, event->length);
ret = drm_event_reserve_init_locked(dev, node->base.file_priv,
@@ -1766,6 +1775,14 @@ static int add_display_components(struct device *dev,
struct device_node *np = dev->of_node;
unsigned int i;
+ for (i = 0; ; i++) {
+ node = of_parse_phandle(np, "connectors", i);
+ if (!node)
+ break;
+
+ component_match_add(dev, matchptr, compare_of, node);
+ }
+
for (i = 0; i < MAX_DSI_ACTIVE_DISPLAY; i++) {
node = dsi_display_get_boot_display(i);
@@ -1777,13 +1794,6 @@ static int add_display_components(struct device *dev,
}
}
- for (i = 0; ; i++) {
- node = of_parse_phandle(np, "connectors", i);
- if (!node)
- break;
-
- component_match_add(dev, matchptr, compare_of, node);
- }
return 0;
}
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index cf61fac..e5c3082 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -83,8 +83,6 @@ struct msm_file_private {
enum msm_mdp_plane_property {
/* blob properties, always put these first */
- PLANE_PROP_SCALER_V1,
- PLANE_PROP_SCALER_V2,
PLANE_PROP_CSC_V1,
PLANE_PROP_INFO,
PLANE_PROP_SCALER_LUT_ED,
@@ -116,12 +114,15 @@ enum msm_mdp_plane_property {
PLANE_PROP_ROT_DST_H,
PLANE_PROP_PREFILL_SIZE,
PLANE_PROP_PREFILL_TIME,
+ PLANE_PROP_SCALER_V1,
+ PLANE_PROP_SCALER_V2,
/* enum/bitmask properties */
PLANE_PROP_ROTATION,
PLANE_PROP_BLEND_OP,
PLANE_PROP_SRC_CONFIG,
PLANE_PROP_FB_TRANSLATION_MODE,
+ PLANE_PROP_MULTIRECT_MODE,
/* total # of properties */
PLANE_PROP_COUNT
@@ -162,8 +163,11 @@ enum msm_mdp_crtc_property {
enum msm_mdp_conn_property {
/* blob properties, always put these first */
CONNECTOR_PROP_SDE_INFO,
+ CONNECTOR_PROP_MODE_INFO,
CONNECTOR_PROP_HDR_INFO,
+ CONNECTOR_PROP_EXT_HDR_INFO,
CONNECTOR_PROP_PP_DITHER,
+ CONNECTOR_PROP_HDR_METADATA,
/* # of blob properties */
CONNECTOR_PROP_BLOBCOUNT,
@@ -229,11 +233,13 @@ enum msm_display_caps {
* @MSM_ENC_COMMIT_DONE - wait for the driver to flush the registers to HW
* @MSM_ENC_TX_COMPLETE - wait for the HW to transfer the frame to panel
* @MSM_ENC_VBLANK - wait for the HW VBLANK event (for driver-internal waiters)
+ * @MSM_ENC_ACTIVE_REGION - wait for the TG to be in active pixel region
*/
enum msm_event_wait {
MSM_ENC_COMMIT_DONE = 0,
MSM_ENC_TX_COMPLETE,
MSM_ENC_VBLANK,
+ MSM_ENC_ACTIVE_REGION,
};
/**
@@ -409,6 +415,7 @@ struct msm_display_topology {
* @clk_rate: DSI bit clock per lane in HZ.
* @topology: supported topology for the mode
* @comp_info: compression info supported
+ * @roi_caps: panel roi capabilities
*/
struct msm_mode_info {
uint32_t frame_rate;
@@ -419,6 +426,7 @@ struct msm_mode_info {
uint64_t clk_rate;
struct msm_display_topology topology;
struct msm_compression_info comp_info;
+ struct msm_roi_caps roi_caps;
};
/**
@@ -480,6 +488,7 @@ struct msm_roi_list {
*/
struct msm_display_kickoff_params {
struct msm_roi_list *rois;
+ struct drm_msm_ext_hdr_metadata *hdr_meta;
};
/**
diff --git a/drivers/gpu/drm/msm/msm_fb.c b/drivers/gpu/drm/msm/msm_fb.c
index f5cdf64..e8bf244 100644
--- a/drivers/gpu/drm/msm/msm_fb.c
+++ b/drivers/gpu/drm/msm/msm_fb.c
@@ -42,15 +42,31 @@ static int msm_framebuffer_create_handle(struct drm_framebuffer *fb,
struct drm_file *file_priv,
unsigned int *handle)
{
- struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
+ struct msm_framebuffer *msm_fb;
+
+ if (!fb) {
+ DRM_ERROR("from:%pS null fb\n", __builtin_return_address(0));
+ return -EINVAL;
+ }
+
+ msm_fb = to_msm_framebuffer(fb);
+
return drm_gem_handle_create(file_priv,
msm_fb->planes[0], handle);
}
static void msm_framebuffer_destroy(struct drm_framebuffer *fb)
{
- struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
- int i, n = drm_format_num_planes(fb->pixel_format);
+ struct msm_framebuffer *msm_fb;
+ int i, n;
+
+ if (!fb) {
+ DRM_ERROR("from:%pS null fb\n", __builtin_return_address(0));
+ return;
+ }
+
+ msm_fb = to_msm_framebuffer(fb);
+ n = drm_format_num_planes(fb->pixel_format);
DBG("destroy: FB ID: %d (%p)", fb->base.id, fb);
@@ -73,9 +89,16 @@ static const struct drm_framebuffer_funcs msm_framebuffer_funcs = {
#ifdef CONFIG_DEBUG_FS
void msm_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m)
{
- struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
- int i, n = drm_format_num_planes(fb->pixel_format);
+ struct msm_framebuffer *msm_fb;
+ int i, n;
+ if (!fb) {
+ DRM_ERROR("from:%pS null fb\n", __builtin_return_address(0));
+ return;
+ }
+
+ msm_fb = to_msm_framebuffer(fb);
+ n = drm_format_num_planes(fb->pixel_format);
seq_printf(m, "fb: %dx%d@%4.4s (%2d, ID:%d)\n",
fb->width, fb->height, (char *)&fb->pixel_format,
drm_framebuffer_read_refcount(fb), fb->base.id);
@@ -90,8 +113,14 @@ void msm_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m)
void msm_framebuffer_set_kmap(struct drm_framebuffer *fb, bool enable)
{
- struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
+ struct msm_framebuffer *msm_fb;
+ if (!fb) {
+ DRM_ERROR("from:%pS null fb\n", __builtin_return_address(0));
+ return;
+ }
+
+ msm_fb = to_msm_framebuffer(fb);
if (enable)
msm_fb->flags |= MSM_FRAMEBUFFER_FLAG_KMAP;
else
@@ -100,10 +129,17 @@ void msm_framebuffer_set_kmap(struct drm_framebuffer *fb, bool enable)
static int msm_framebuffer_kmap(struct drm_framebuffer *fb)
{
- struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
- int i, n = drm_format_num_planes(fb->pixel_format);
+ struct msm_framebuffer *msm_fb;
+ int i, n;
struct drm_gem_object *bo;
+ if (!fb) {
+ DRM_ERROR("from:%pS null fb\n", __builtin_return_address(0));
+ return -EINVAL;
+ }
+
+ msm_fb = to_msm_framebuffer(fb);
+ n = drm_format_num_planes(fb->pixel_format);
if (atomic_inc_return(&msm_fb->kmap_count) > 1)
return 0;
@@ -124,10 +160,17 @@ static int msm_framebuffer_kmap(struct drm_framebuffer *fb)
static void msm_framebuffer_kunmap(struct drm_framebuffer *fb)
{
- struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
- int i, n = drm_format_num_planes(fb->pixel_format);
+ struct msm_framebuffer *msm_fb;
+ int i, n;
struct drm_gem_object *bo;
+ if (!fb) {
+ DRM_ERROR("from:%pS null fb\n", __builtin_return_address(0));
+ return;
+ }
+
+ msm_fb = to_msm_framebuffer(fb);
+ n = drm_format_num_planes(fb->pixel_format);
if (atomic_dec_return(&msm_fb->kmap_count) > 0)
return;
@@ -151,10 +194,17 @@ static void msm_framebuffer_kunmap(struct drm_framebuffer *fb)
int msm_framebuffer_prepare(struct drm_framebuffer *fb,
struct msm_gem_address_space *aspace)
{
- struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
- int ret, i, n = drm_format_num_planes(fb->pixel_format);
+ struct msm_framebuffer *msm_fb;
+ int ret, i, n;
uint32_t iova;
+ if (!fb) {
+ DRM_ERROR("from:%pS null fb\n", __builtin_return_address(0));
+ return -EINVAL;
+ }
+
+ msm_fb = to_msm_framebuffer(fb);
+ n = drm_format_num_planes(fb->pixel_format);
for (i = 0; i < n; i++) {
ret = msm_gem_get_iova(msm_fb->planes[i], aspace, &iova);
DBG("FB[%u]: iova[%d]: %08x (%d)", fb->base.id, i, iova, ret);
@@ -171,8 +221,16 @@ int msm_framebuffer_prepare(struct drm_framebuffer *fb,
void msm_framebuffer_cleanup(struct drm_framebuffer *fb,
struct msm_gem_address_space *aspace)
{
- struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
- int i, n = drm_format_num_planes(fb->pixel_format);
+ struct msm_framebuffer *msm_fb;
+ int i, n;
+
+ if (fb == NULL) {
+ DRM_ERROR("from:%pS null fb\n", __builtin_return_address(0));
+ return;
+ }
+
+ msm_fb = to_msm_framebuffer(fb);
+ n = drm_format_num_planes(fb->pixel_format);
if (msm_fb->flags & MSM_FRAMEBUFFER_FLAG_KMAP)
msm_framebuffer_kunmap(fb);
@@ -184,7 +242,14 @@ void msm_framebuffer_cleanup(struct drm_framebuffer *fb,
uint32_t msm_framebuffer_iova(struct drm_framebuffer *fb,
struct msm_gem_address_space *aspace, int plane)
{
- struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
+ struct msm_framebuffer *msm_fb;
+
+ if (!fb) {
+ DRM_ERROR("from:%pS null fb\n", __builtin_return_address(0));
+ return -EINVAL;
+ }
+
+ msm_fb = to_msm_framebuffer(fb);
if (!msm_fb->planes[plane])
return 0;
return msm_gem_iova(msm_fb->planes[plane], aspace) + fb->offsets[plane];
@@ -193,9 +258,15 @@ uint32_t msm_framebuffer_iova(struct drm_framebuffer *fb,
uint32_t msm_framebuffer_phys(struct drm_framebuffer *fb,
int plane)
{
- struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
+ struct msm_framebuffer *msm_fb;
dma_addr_t phys_addr;
+ if (!fb) {
+ DRM_ERROR("from:%pS null fb\n", __builtin_return_address(0));
+ return -EINVAL;
+ }
+
+ msm_fb = to_msm_framebuffer(fb);
if (!msm_fb->planes[plane])
return 0;
@@ -208,7 +279,14 @@ uint32_t msm_framebuffer_phys(struct drm_framebuffer *fb,
struct drm_gem_object *msm_framebuffer_bo(struct drm_framebuffer *fb, int plane)
{
- struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
+ struct msm_framebuffer *msm_fb;
+
+ if (!fb) {
+ DRM_ERROR("from:%pS null fb\n", __builtin_return_address(0));
+ return ERR_PTR(-EINVAL);
+ }
+
+ msm_fb = to_msm_framebuffer(fb);
return msm_fb->planes[plane];
}
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index cc75fb5..2581caf 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -34,8 +34,8 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev,
struct msm_gpu *gpu, uint32_t nr_bos, uint32_t nr_cmds)
{
struct msm_gem_submit *submit;
- uint64_t sz = sizeof(*submit) + (nr_bos * sizeof(submit->bos[0])) +
- (nr_cmds * sizeof(submit->cmd[0]));
+ uint64_t sz = sizeof(*submit) + ((u64)nr_bos * sizeof(submit->bos[0])) +
+ ((u64)nr_cmds * sizeof(submit->cmd[0]));
if (sz > SIZE_MAX)
return NULL;
diff --git a/drivers/gpu/drm/msm/msm_kms.h b/drivers/gpu/drm/msm/msm_kms.h
index 304faa6..db9e7ee 100644
--- a/drivers/gpu/drm/msm/msm_kms.h
+++ b/drivers/gpu/drm/msm/msm_kms.h
@@ -104,6 +104,8 @@ struct msm_kms_funcs {
struct msm_gem_address_space *(*get_address_space)(
struct msm_kms *kms,
unsigned int domain);
+ /* handle continuous splash */
+ int (*cont_splash_config)(struct msm_kms *kms);
};
struct msm_kms {
diff --git a/drivers/gpu/drm/msm/msm_prop.c b/drivers/gpu/drm/msm/msm_prop.c
index 033be6e..ce84b7a 100644
--- a/drivers/gpu/drm/msm/msm_prop.c
+++ b/drivers/gpu/drm/msm/msm_prop.c
@@ -393,20 +393,25 @@ int msm_property_atomic_set(struct msm_property_info *info,
struct drm_property_blob *blob;
int property_idx, rc = -EINVAL;
+ if (!info || !property_state) {
+ DRM_ERROR("invalid argument(s)\n");
+ return -EINVAL;
+ }
+
property_idx = msm_property_index(info, property);
- if (!info || !property_state ||
- (property_idx == -EINVAL) || !property_state->values) {
- DRM_DEBUG("invalid argument(s)\n");
+ if ((property_idx == -EINVAL) || !property_state->values) {
+ DRM_ERROR("invalid argument(s)\n");
} else {
/* extra handling for incoming properties */
mutex_lock(&info->property_lock);
- if ((property->flags & DRM_MODE_PROP_BLOB) &&
+ if (val && (property->flags & DRM_MODE_PROP_BLOB) &&
(property_idx < info->blob_count)) {
/* DRM lookup also takes a reference */
blob = drm_property_lookup_blob(info->dev,
(uint32_t)val);
if (!blob) {
- DRM_ERROR("blob not found\n");
+ DRM_ERROR("prop %d blob id 0x%llx not found\n",
+ property_idx, val);
val = 0;
} else {
DBG("Blob %u saved", blob->base.id);
diff --git a/drivers/gpu/drm/msm/msm_smmu.c b/drivers/gpu/drm/msm/msm_smmu.c
index 92d1865..7c879651 100644
--- a/drivers/gpu/drm/msm/msm_smmu.c
+++ b/drivers/gpu/drm/msm/msm_smmu.c
@@ -33,6 +33,10 @@
#define SZ_4G (((size_t) SZ_1G) * 4)
#endif
+#ifndef SZ_2G
+#define SZ_2G (((size_t) SZ_1G) * 2)
+#endif
+
struct msm_smmu_client {
struct device *dev;
struct dma_iommu_mapping *mmu_mapping;
@@ -300,26 +304,26 @@ static const struct msm_mmu_funcs funcs = {
static struct msm_smmu_domain msm_smmu_domains[MSM_SMMU_DOMAIN_MAX] = {
[MSM_SMMU_DOMAIN_UNSECURE] = {
.label = "mdp_ns",
- .va_start = SZ_128K,
- .va_size = SZ_4G - SZ_128K,
+ .va_start = SZ_2G,
+ .va_size = SZ_4G - SZ_2G,
.secure = false,
},
[MSM_SMMU_DOMAIN_SECURE] = {
.label = "mdp_s",
- .va_start = SZ_128K,
- .va_size = SZ_4G - SZ_128K,
+ .va_start = SZ_2G,
+ .va_size = SZ_4G - SZ_2G,
.secure = true,
},
[MSM_SMMU_DOMAIN_NRT_UNSECURE] = {
.label = "rot_ns",
- .va_start = SZ_128K,
- .va_size = SZ_4G - SZ_128K,
+ .va_start = SZ_2G,
+ .va_size = SZ_4G - SZ_2G,
.secure = false,
},
[MSM_SMMU_DOMAIN_NRT_SECURE] = {
.label = "rot_s",
- .va_start = SZ_128K,
- .va_size = SZ_4G - SZ_128K,
+ .va_start = SZ_2G,
+ .va_size = SZ_4G - SZ_2G,
.secure = true,
},
};
diff --git a/drivers/gpu/drm/msm/sde/sde_color_processing.c b/drivers/gpu/drm/msm/sde/sde_color_processing.c
index 2c5b7ea..c2419dc 100644
--- a/drivers/gpu/drm/msm/sde/sde_color_processing.c
+++ b/drivers/gpu/drm/msm/sde/sde_color_processing.c
@@ -1016,8 +1016,10 @@ int sde_cp_crtc_set_property(struct drm_crtc *crtc,
}
}
- if (!found)
+ if (!found) {
+ ret = -ENOENT;
goto exit;
+ }
/**
* sde_crtc is virtual ensure that hardware has been attached to the
@@ -1028,7 +1030,7 @@ int sde_cp_crtc_set_property(struct drm_crtc *crtc,
sde_crtc->num_mixers > ARRAY_SIZE(sde_crtc->mixers)) {
DRM_ERROR("Invalid mixer config act cnt %d max cnt %ld\n",
sde_crtc->num_mixers, ARRAY_SIZE(sde_crtc->mixers));
- ret = -EINVAL;
+ ret = -EPERM;
goto exit;
}
@@ -1896,7 +1898,7 @@ int sde_cp_hist_interrupt(struct drm_crtc *crtc_drm, bool en,
if (!hw_dspp) {
DRM_ERROR("invalid dspp\n");
- ret = -EINVAL;
+ ret = -EPERM;
goto exit;
}
diff --git a/drivers/gpu/drm/msm/sde/sde_connector.c b/drivers/gpu/drm/msm/sde/sde_connector.c
index d83f476..fb1f578 100644
--- a/drivers/gpu/drm/msm/sde/sde_connector.c
+++ b/drivers/gpu/drm/msm/sde/sde_connector.c
@@ -16,10 +16,12 @@
#include "sde_kms.h"
#include "sde_connector.h"
+#include "sde_encoder.h"
#include <linux/backlight.h>
#include "dsi_drm.h"
#include "dsi_display.h"
#include "sde_crtc.h"
+#include "sde_rm.h"
#define BL_NODE_NAME_SIZE 32
@@ -152,7 +154,7 @@ int sde_connector_trigger_event(void *drm_connector,
{
struct sde_connector *c_conn;
unsigned long irq_flags;
- void (*cb_func)(uint32_t event_idx,
+ int (*cb_func)(uint32_t event_idx,
uint32_t instance_idx, void *usr,
uint32_t data0, uint32_t data1,
uint32_t data2, uint32_t data3);
@@ -175,7 +177,7 @@ int sde_connector_trigger_event(void *drm_connector,
spin_unlock_irqrestore(&c_conn->event_lock, irq_flags);
if (cb_func)
- cb_func(event_idx, instance_idx, usr,
+ rc = cb_func(event_idx, instance_idx, usr,
data0, data1, data2, data3);
else
rc = -EAGAIN;
@@ -185,7 +187,7 @@ int sde_connector_trigger_event(void *drm_connector,
int sde_connector_register_event(struct drm_connector *connector,
uint32_t event_idx,
- void (*cb_func)(uint32_t event_idx,
+ int (*cb_func)(uint32_t event_idx,
uint32_t instance_idx, void *usr,
uint32_t data0, uint32_t data1,
uint32_t data2, uint32_t data3),
@@ -345,6 +347,39 @@ int sde_connector_get_dither_cfg(struct drm_connector *conn,
return 0;
}
+int sde_connector_get_mode_info(struct drm_connector_state *conn_state,
+ struct msm_mode_info *mode_info)
+{
+ struct sde_connector_state *sde_conn_state = NULL;
+
+ if (!conn_state || !mode_info) {
+ SDE_ERROR("Invalid arguments\n");
+ return -EINVAL;
+ }
+
+ sde_conn_state = to_sde_connector_state(conn_state);
+ memcpy(mode_info, &sde_conn_state->mode_info,
+ sizeof(sde_conn_state->mode_info));
+
+ return 0;
+}
+
+static int sde_connector_handle_disp_recovery(uint32_t event_idx,
+ uint32_t instance_idx, void *usr,
+ uint32_t data0, uint32_t data1,
+ uint32_t data2, uint32_t data3)
+{
+ struct sde_connector *c_conn = usr;
+ int rc = 0;
+
+ if (!c_conn)
+ return -EINVAL;
+
+ rc = sde_kms_handle_recovery(c_conn->encoder);
+
+ return rc;
+}
+
int sde_connector_get_info(struct drm_connector *connector,
struct msm_display_info *info)
{
@@ -441,6 +476,54 @@ static int _sde_connector_update_power_locked(struct sde_connector *c_conn)
return rc;
}
+static int _sde_connector_update_bl_scale(struct sde_connector *c_conn, int idx)
+{
+ struct drm_connector conn;
+ struct dsi_display *dsi_display;
+ struct dsi_backlight_config *bl_config;
+ uint64_t value;
+ int rc = 0;
+
+ if (!c_conn) {
+ SDE_ERROR("Invalid params sde_connector null\n");
+ return -EINVAL;
+ }
+
+ conn = c_conn->base;
+ dsi_display = c_conn->display;
+ if (!dsi_display || !dsi_display->panel) {
+ SDE_ERROR("Invalid params(s) dsi_display %pK, panel %pK\n",
+ dsi_display,
+ ((dsi_display) ? dsi_display->panel : NULL));
+ return -EINVAL;
+ }
+
+ bl_config = &dsi_display->panel->bl_config;
+ value = sde_connector_get_property(conn.state, idx);
+
+ if (idx == CONNECTOR_PROP_BL_SCALE) {
+ if (value > MAX_BL_SCALE_LEVEL)
+ bl_config->bl_scale = MAX_BL_SCALE_LEVEL;
+ else
+ bl_config->bl_scale = (u32)value;
+ } else if (idx == CONNECTOR_PROP_AD_BL_SCALE) {
+ if (value > MAX_AD_BL_SCALE_LEVEL)
+ bl_config->bl_scale_ad = MAX_AD_BL_SCALE_LEVEL;
+ else
+ bl_config->bl_scale_ad = (u32)value;
+ } else {
+ SDE_DEBUG("invalid idx %d\n", idx);
+ return 0;
+ }
+
+ SDE_DEBUG("bl_scale = %u, bl_scale_ad = %u, bl_level = %u\n",
+ bl_config->bl_scale, bl_config->bl_scale_ad,
+ bl_config->bl_level);
+ rc = c_conn->ops.set_backlight(dsi_display, bl_config->bl_level);
+
+ return rc;
+}
+
int sde_connector_pre_kickoff(struct drm_connector *connector)
{
struct sde_connector *c_conn;
@@ -471,6 +554,10 @@ int sde_connector_pre_kickoff(struct drm_connector *connector)
_sde_connector_update_power_locked(c_conn);
mutex_unlock(&c_conn->lock);
break;
+ case CONNECTOR_PROP_BL_SCALE:
+ case CONNECTOR_PROP_AD_BL_SCALE:
+ _sde_connector_update_bl_scale(c_conn, idx);
+ break;
default:
/* nothing to do for most properties */
break;
@@ -481,6 +568,7 @@ int sde_connector_pre_kickoff(struct drm_connector *connector)
return 0;
params.rois = &c_state->rois;
+ params.hdr_meta = &c_state->hdr_meta;
SDE_EVT32_VERBOSE(connector->base.id);
@@ -489,23 +577,26 @@ int sde_connector_pre_kickoff(struct drm_connector *connector)
return rc;
}
-void sde_connector_clk_ctrl(struct drm_connector *connector, bool enable)
+int sde_connector_clk_ctrl(struct drm_connector *connector, bool enable)
{
struct sde_connector *c_conn;
struct dsi_display *display;
u32 state = enable ? DSI_CLK_ON : DSI_CLK_OFF;
+ int rc = 0;
if (!connector) {
SDE_ERROR("invalid connector\n");
- return;
+ return -EINVAL;
}
c_conn = to_sde_connector(connector);
display = (struct dsi_display *) c_conn->display;
if (display && c_conn->ops.clk_ctrl)
- c_conn->ops.clk_ctrl(display->mdp_clk_handle,
+ rc = c_conn->ops.clk_ctrl(display->mdp_clk_handle,
DSI_ALL_CLKS, state);
+
+ return rc;
}
static void sde_connector_destroy(struct drm_connector *connector)
@@ -531,6 +622,10 @@ static void sde_connector_destroy(struct drm_connector *connector)
drm_property_unreference_blob(c_conn->blob_hdr);
if (c_conn->blob_dither)
drm_property_unreference_blob(c_conn->blob_dither);
+ if (c_conn->blob_mode_info)
+ drm_property_unreference_blob(c_conn->blob_mode_info);
+ if (c_conn->blob_ext_hdr)
+ drm_property_unreference_blob(c_conn->blob_ext_hdr);
msm_property_destroy(&c_conn->property_info);
if (c_conn->bl_device)
@@ -671,44 +766,81 @@ sde_connector_atomic_duplicate_state(struct drm_connector *connector)
return &c_state->base;
}
-static int _sde_connector_roi_v1_check_roi(
- struct sde_connector *c_conn,
- struct drm_clip_rect *roi_conn,
- const struct msm_roi_caps *caps)
+int sde_connector_roi_v1_check_roi(struct drm_connector_state *conn_state)
{
- const struct msm_roi_alignment *align = &caps->align;
- int w = roi_conn->x2 - roi_conn->x1;
- int h = roi_conn->y2 - roi_conn->y1;
+ const struct msm_roi_alignment *align = NULL;
+ struct sde_connector *c_conn = NULL;
+ struct msm_mode_info mode_info;
+ struct sde_connector_state *c_state;
+ int i, w, h;
- if (w <= 0 || h <= 0) {
- SDE_ERROR_CONN(c_conn, "invalid conn roi w %d h %d\n", w, h);
+ if (!conn_state)
return -EINVAL;
- }
- if (w < align->min_width || w % align->width_pix_align) {
- SDE_ERROR_CONN(c_conn,
- "invalid conn roi width %d min %d align %d\n",
- w, align->min_width, align->width_pix_align);
- return -EINVAL;
- }
+ memset(&mode_info, 0, sizeof(mode_info));
- if (h < align->min_height || h % align->height_pix_align) {
- SDE_ERROR_CONN(c_conn,
- "invalid conn roi height %d min %d align %d\n",
- h, align->min_height, align->height_pix_align);
- return -EINVAL;
- }
+ c_state = to_sde_connector_state(conn_state);
+ c_conn = to_sde_connector(conn_state->connector);
- if (roi_conn->x1 % align->xstart_pix_align) {
- SDE_ERROR_CONN(c_conn, "invalid conn roi x1 %d align %d\n",
- roi_conn->x1, align->xstart_pix_align);
- return -EINVAL;
- }
+ memcpy(&mode_info, &c_state->mode_info, sizeof(c_state->mode_info));
- if (roi_conn->y1 % align->ystart_pix_align) {
- SDE_ERROR_CONN(c_conn, "invalid conn roi y1 %d align %d\n",
- roi_conn->y1, align->ystart_pix_align);
- return -EINVAL;
+ if (!mode_info.roi_caps.enabled)
+ return 0;
+
+ if (c_state->rois.num_rects > mode_info.roi_caps.num_roi) {
+ SDE_ERROR_CONN(c_conn, "too many rects specified: %d > %d\n",
+ c_state->rois.num_rects,
+ mode_info.roi_caps.num_roi);
+ return -E2BIG;
+ };
+
+ align = &mode_info.roi_caps.align;
+ for (i = 0; i < c_state->rois.num_rects; ++i) {
+ struct drm_clip_rect *roi_conn;
+
+ roi_conn = &c_state->rois.roi[i];
+ w = roi_conn->x2 - roi_conn->x1;
+ h = roi_conn->y2 - roi_conn->y1;
+
+ SDE_EVT32_VERBOSE(DRMID(&c_conn->base),
+ roi_conn->x1, roi_conn->y1,
+ roi_conn->x2, roi_conn->y2);
+
+ if (w <= 0 || h <= 0) {
+ SDE_ERROR_CONN(c_conn, "invalid conn roi w %d h %d\n",
+ w, h);
+ return -EINVAL;
+ }
+
+ if (w < align->min_width || w % align->width_pix_align) {
+ SDE_ERROR_CONN(c_conn,
+ "invalid conn roi width %d min %d align %d\n",
+ w, align->min_width,
+ align->width_pix_align);
+ return -EINVAL;
+ }
+
+ if (h < align->min_height || h % align->height_pix_align) {
+ SDE_ERROR_CONN(c_conn,
+ "invalid conn roi height %d min %d align %d\n",
+ h, align->min_height,
+ align->height_pix_align);
+ return -EINVAL;
+ }
+
+ if (roi_conn->x1 % align->xstart_pix_align) {
+ SDE_ERROR_CONN(c_conn,
+ "invalid conn roi x1 %d align %d\n",
+ roi_conn->x1, align->xstart_pix_align);
+ return -EINVAL;
+ }
+
+ if (roi_conn->y1 % align->ystart_pix_align) {
+ SDE_ERROR_CONN(c_conn,
+ "invalid conn roi y1 %d align %d\n",
+ roi_conn->y1, align->ystart_pix_align);
+ return -EINVAL;
+ }
}
return 0;
@@ -720,27 +852,13 @@ static int _sde_connector_set_roi_v1(
void *usr_ptr)
{
struct sde_drm_roi_v1 roi_v1;
- struct msm_display_info display_info;
- struct msm_roi_caps *caps;
- int i, rc;
+ int i;
if (!c_conn || !c_state) {
SDE_ERROR("invalid args\n");
return -EINVAL;
}
- rc = sde_connector_get_info(&c_conn->base, &display_info);
- if (rc) {
- SDE_ERROR_CONN(c_conn, "display get info error: %d\n", rc);
- return rc;
- }
-
- caps = &display_info.roi_caps;
- if (!caps->enabled) {
- SDE_ERROR_CONN(c_conn, "display roi capability is disabled\n");
- return -ENOTSUPP;
- }
-
memset(&c_state->rois, 0, sizeof(c_state->rois));
if (!usr_ptr) {
@@ -760,22 +878,14 @@ static int _sde_connector_set_roi_v1(
return 0;
}
- if (roi_v1.num_rects > SDE_MAX_ROI_V1 ||
- roi_v1.num_rects > caps->num_roi) {
- SDE_ERROR_CONN(c_conn, "too many rects specified: %d\n",
+ if (roi_v1.num_rects > SDE_MAX_ROI_V1) {
+ SDE_ERROR_CONN(c_conn, "num roi rects more than supported: %d",
roi_v1.num_rects);
return -EINVAL;
}
c_state->rois.num_rects = roi_v1.num_rects;
for (i = 0; i < roi_v1.num_rects; ++i) {
- int rc;
-
- rc = _sde_connector_roi_v1_check_roi(c_conn, &roi_v1.roi[i],
- caps);
- if (rc)
- return rc;
-
c_state->rois.roi[i] = roi_v1.roi[i];
SDE_DEBUG_CONN(c_conn, "roi%d: roi (%d,%d) (%d,%d)\n", i,
c_state->rois.roi[i].x1,
@@ -787,40 +897,62 @@ static int _sde_connector_set_roi_v1(
return 0;
}
-static int _sde_connector_update_bl_scale(struct sde_connector *c_conn,
- int idx,
- uint64_t value)
+static int _sde_connector_set_ext_hdr_info(
+ struct sde_connector *c_conn,
+ struct sde_connector_state *c_state,
+ void *usr_ptr)
{
- struct dsi_display *dsi_display = c_conn->display;
- struct dsi_backlight_config *bl_config;
- int rc = 0;
+ struct drm_connector *connector;
+ struct drm_msm_ext_hdr_metadata *hdr_meta;
+ int i;
- if (!dsi_display || !dsi_display->panel) {
- pr_err("Invalid params(s) dsi_display %pK, panel %pK\n",
- dsi_display,
- ((dsi_display) ? dsi_display->panel : NULL));
+ if (!c_conn || !c_state) {
+ SDE_ERROR_CONN(c_conn, "invalid args\n");
return -EINVAL;
}
- bl_config = &dsi_display->panel->bl_config;
- if (idx == CONNECTOR_PROP_BL_SCALE) {
- bl_config->bl_scale = value;
- if (value > MAX_BL_SCALE_LEVEL)
- bl_config->bl_scale = MAX_BL_SCALE_LEVEL;
- SDE_DEBUG("set to panel: bl_scale = %u, bl_level = %u\n",
- bl_config->bl_scale, bl_config->bl_level);
- rc = c_conn->ops.set_backlight(dsi_display,
- bl_config->bl_level);
- } else if (idx == CONNECTOR_PROP_AD_BL_SCALE) {
- bl_config->bl_scale_ad = value;
- if (value > MAX_AD_BL_SCALE_LEVEL)
- bl_config->bl_scale_ad = MAX_AD_BL_SCALE_LEVEL;
- SDE_DEBUG("set to panel: bl_scale_ad = %u, bl_level = %u\n",
- bl_config->bl_scale_ad, bl_config->bl_level);
- rc = c_conn->ops.set_backlight(dsi_display,
- bl_config->bl_level);
+ connector = &c_conn->base;
+
+ if (!connector->hdr_supported) {
+ SDE_ERROR_CONN(c_conn, "sink doesn't support HDR\n");
+ return -ENOTSUPP;
}
- return rc;
+
+ memset(&c_state->hdr_meta, 0, sizeof(c_state->hdr_meta));
+
+ if (!usr_ptr) {
+ SDE_DEBUG_CONN(c_conn, "hdr metadata cleared\n");
+ return 0;
+ }
+
+ if (copy_from_user(&c_state->hdr_meta,
+ (void __user *)usr_ptr,
+ sizeof(*hdr_meta))) {
+ SDE_ERROR_CONN(c_conn, "failed to copy hdr metadata\n");
+ return -EFAULT;
+ }
+
+ hdr_meta = &c_state->hdr_meta;
+
+ SDE_DEBUG_CONN(c_conn, "hdr_state %d\n", hdr_meta->hdr_state);
+ SDE_DEBUG_CONN(c_conn, "hdr_supported %d\n", hdr_meta->hdr_supported);
+ SDE_DEBUG_CONN(c_conn, "eotf %d\n", hdr_meta->eotf);
+ SDE_DEBUG_CONN(c_conn, "white_point_x %d\n", hdr_meta->white_point_x);
+ SDE_DEBUG_CONN(c_conn, "white_point_y %d\n", hdr_meta->white_point_y);
+ SDE_DEBUG_CONN(c_conn, "max_luminance %d\n", hdr_meta->max_luminance);
+ SDE_DEBUG_CONN(c_conn, "max_content_light_level %d\n",
+ hdr_meta->max_content_light_level);
+ SDE_DEBUG_CONN(c_conn, "max_average_light_level %d\n",
+ hdr_meta->max_average_light_level);
+
+ for (i = 0; i < HDR_PRIMARIES_COUNT; i++) {
+ SDE_DEBUG_CONN(c_conn, "display_primaries_x [%d]\n",
+ hdr_meta->display_primaries_x[i]);
+ SDE_DEBUG_CONN(c_conn, "display_primaries_y [%d]\n",
+ hdr_meta->display_primaries_y[i]);
+ }
+
+ return 0;
}
static int sde_connector_atomic_set_property(struct drm_connector *connector,
@@ -831,6 +963,7 @@ static int sde_connector_atomic_set_property(struct drm_connector *connector,
struct sde_connector *c_conn;
struct sde_connector_state *c_state;
int idx, rc;
+ uint64_t fence_fd;
if (!connector || !state || !property) {
SDE_ERROR("invalid argument(s), conn %pK, state %pK, prp %pK\n",
@@ -869,18 +1002,40 @@ static int sde_connector_atomic_set_property(struct drm_connector *connector,
c_conn->fb_kmap);
}
break;
- case CONNECTOR_PROP_BL_SCALE:
- case CONNECTOR_PROP_AD_BL_SCALE:
- rc = _sde_connector_update_bl_scale(c_conn, idx, val);
+ case CONNECTOR_PROP_RETIRE_FENCE:
+ if (!val)
+ goto end;
+
+ rc = sde_fence_create(&c_conn->retire_fence, &fence_fd, 0);
+ if (rc) {
+ SDE_ERROR("fence create failed rc:%d\n", rc);
+ goto end;
+ }
+
+ rc = copy_to_user((uint64_t __user *)val, &fence_fd,
+ sizeof(uint64_t));
+ if (rc) {
+ SDE_ERROR("copy to user failed rc:%d\n", rc);
+ /* fence will be released with timeline update */
+ put_unused_fd(fence_fd);
+ rc = -EFAULT;
+ goto end;
+ }
+ break;
+ case CONNECTOR_PROP_ROI_V1:
+ rc = _sde_connector_set_roi_v1(c_conn, c_state, (void *)val);
+ if (rc)
+ SDE_ERROR_CONN(c_conn, "invalid roi_v1, rc: %d\n", rc);
break;
default:
break;
}
- if (idx == CONNECTOR_PROP_ROI_V1) {
- rc = _sde_connector_set_roi_v1(c_conn, c_state, (void *)val);
+ if (idx == CONNECTOR_PROP_HDR_METADATA) {
+ rc = _sde_connector_set_ext_hdr_info(c_conn,
+ c_state, (void *)val);
if (rc)
- SDE_ERROR_CONN(c_conn, "invalid roi_v1, rc: %d\n", rc);
+ SDE_ERROR_CONN(c_conn, "cannot set hdr info %d\n", rc);
}
/* check for custom property handling */
@@ -948,6 +1103,19 @@ static int sde_connector_atomic_get_property(struct drm_connector *connector,
return rc;
}
+void sde_conn_timeline_status(struct drm_connector *conn)
+{
+ struct sde_connector *c_conn;
+
+ if (!conn) {
+ SDE_ERROR("invalid connector\n");
+ return;
+ }
+
+ c_conn = to_sde_connector(conn);
+ sde_fence_timeline_status(&c_conn->retire_fence, &conn->base);
+}
+
void sde_connector_prepare_fence(struct drm_connector *connector)
{
if (!connector) {
@@ -981,6 +1149,29 @@ void sde_connector_commit_reset(struct drm_connector *connector, ktime_t ts)
sde_fence_signal(&to_sde_connector(connector)->retire_fence, ts, true);
}
+static void sde_connector_update_hdr_props(struct drm_connector *connector)
+{
+ struct sde_connector *c_conn = to_sde_connector(connector);
+ struct drm_msm_ext_hdr_properties hdr = {};
+
+ hdr.hdr_supported = connector->hdr_supported;
+
+ if (hdr.hdr_supported) {
+ hdr.hdr_eotf = connector->hdr_eotf;
+ hdr.hdr_metadata_type_one = connector->hdr_metadata_type_one;
+ hdr.hdr_max_luminance = connector->hdr_max_luminance;
+ hdr.hdr_avg_luminance = connector->hdr_avg_luminance;
+ hdr.hdr_min_luminance = connector->hdr_min_luminance;
+
+ msm_property_set_blob(&c_conn->property_info,
+ &c_conn->blob_ext_hdr,
+ &hdr,
+ sizeof(hdr),
+ CONNECTOR_PROP_EXT_HDR_INFO);
+
+ }
+}
+
static enum drm_connector_status
sde_connector_detect(struct drm_connector *connector, bool force)
{
@@ -1088,6 +1279,58 @@ int sde_connector_set_property_for_commit(struct drm_connector *connector,
connector, state, property, value);
}
+int sde_connector_helper_reset_custom_properties(
+ struct drm_connector *connector,
+ struct drm_connector_state *connector_state)
+{
+ struct sde_connector *c_conn;
+ struct sde_connector_state *c_state;
+ struct drm_property *drm_prop;
+ enum msm_mdp_conn_property prop_idx;
+
+ if (!connector || !connector_state) {
+ SDE_ERROR("invalid params\n");
+ return -EINVAL;
+ }
+
+ c_conn = to_sde_connector(connector);
+ c_state = to_sde_connector_state(connector_state);
+
+ for (prop_idx = 0; prop_idx < CONNECTOR_PROP_COUNT; prop_idx++) {
+ uint64_t val = c_state->property_values[prop_idx].value;
+ uint64_t def;
+ int ret;
+
+ drm_prop = msm_property_index_to_drm_property(
+ &c_conn->property_info, prop_idx);
+ if (!drm_prop) {
+ /* not all props will be installed, based on caps */
+ SDE_DEBUG_CONN(c_conn, "invalid property index %d\n",
+ prop_idx);
+ continue;
+ }
+
+ def = msm_property_get_default(&c_conn->property_info,
+ prop_idx);
+ if (val == def)
+ continue;
+
+ SDE_DEBUG_CONN(c_conn, "set prop %s idx %d from %llu to %llu\n",
+ drm_prop->name, prop_idx, val, def);
+
+ ret = drm_atomic_connector_set_property(connector,
+ connector_state, drm_prop, def);
+ if (ret) {
+ SDE_ERROR_CONN(c_conn,
+ "set property failed, idx %d ret %d\n",
+ prop_idx, ret);
+ continue;
+ }
+ }
+
+ return 0;
+}
+
#ifdef CONFIG_DEBUG_FS
/**
* sde_connector_init_debugfs - initialize connector debugfs
@@ -1137,12 +1380,39 @@ static void sde_connector_early_unregister(struct drm_connector *connector)
/* debugfs under connector->debugfs are deleted by drm_debugfs */
}
+static int sde_connector_fill_modes(struct drm_connector *connector,
+ uint32_t max_width, uint32_t max_height)
+{
+ int rc, mode_count = 0;
+ struct sde_connector *sde_conn = NULL;
+
+ sde_conn = to_sde_connector(connector);
+ if (!sde_conn) {
+ SDE_ERROR("invalid arguments\n");
+ return 0;
+ }
+
+ mode_count = drm_helper_probe_single_connector_modes(connector,
+ max_width, max_height);
+
+ rc = sde_connector_set_blob_data(connector,
+ connector->state,
+ CONNECTOR_PROP_MODE_INFO);
+ if (rc) {
+ SDE_ERROR_CONN(sde_conn,
+ "failed to setup mode info prop, rc = %d\n", rc);
+ return 0;
+ }
+
+ return mode_count;
+}
+
static const struct drm_connector_funcs sde_connector_ops = {
.dpms = sde_connector_dpms,
.reset = sde_connector_atomic_reset,
.detect = sde_connector_detect,
.destroy = sde_connector_destroy,
- .fill_modes = drm_helper_probe_single_connector_modes,
+ .fill_modes = sde_connector_fill_modes,
.atomic_duplicate_state = sde_connector_atomic_duplicate_state,
.atomic_destroy_state = sde_connector_atomic_destroy_state,
.atomic_set_property = sde_connector_atomic_set_property,
@@ -1155,6 +1425,7 @@ static const struct drm_connector_funcs sde_connector_ops = {
static int sde_connector_get_modes(struct drm_connector *connector)
{
struct sde_connector *c_conn;
+ int mode_count = 0;
if (!connector) {
SDE_ERROR("invalid connector\n");
@@ -1167,7 +1438,15 @@ static int sde_connector_get_modes(struct drm_connector *connector)
return 0;
}
- return c_conn->ops.get_modes(connector, c_conn->display);
+ mode_count = c_conn->ops.get_modes(connector, c_conn->display);
+ if (!mode_count) {
+ SDE_ERROR_CONN(c_conn, "failed to get modes\n");
+ return 0;
+ }
+
+ sde_connector_update_hdr_props(connector);
+
+ return mode_count;
}
static enum drm_mode_status
@@ -1264,6 +1543,161 @@ static const struct drm_connector_helper_funcs sde_connector_helper_ops = {
.best_encoder = sde_connector_best_encoder,
};
+static int sde_connector_populate_mode_info(struct drm_connector *conn,
+ struct sde_kms_info *info)
+{
+ struct msm_drm_private *priv;
+ struct sde_kms *sde_kms;
+ struct sde_connector *c_conn = NULL;
+ struct drm_display_mode *mode;
+ struct msm_mode_info mode_info;
+ int rc = 0;
+
+ if (!conn || !conn->dev || !conn->dev->dev_private) {
+ SDE_ERROR("invalid arguments\n");
+ return -EINVAL;
+ }
+
+ priv = conn->dev->dev_private;
+ sde_kms = to_sde_kms(priv->kms);
+
+ c_conn = to_sde_connector(conn);
+ if (!c_conn->ops.get_mode_info) {
+ SDE_ERROR_CONN(c_conn, "get_mode_info not defined\n");
+ return -EINVAL;
+ }
+
+ list_for_each_entry(mode, &conn->modes, head) {
+ int topology_idx = 0;
+
+ memset(&mode_info, 0, sizeof(mode_info));
+
+ rc = c_conn->ops.get_mode_info(mode, &mode_info,
+ sde_kms->catalog->max_mixer_width,
+ c_conn->display);
+ if (rc) {
+ SDE_ERROR_CONN(c_conn,
+ "failed to get mode info for mode %s\n",
+ mode->name);
+ continue;
+ }
+
+ sde_kms_info_add_keystr(info, "mode_name", mode->name);
+
+ topology_idx = (int)sde_rm_get_topology_name(
+ mode_info.topology);
+ if (topology_idx < SDE_RM_TOPOLOGY_MAX) {
+ sde_kms_info_add_keystr(info, "topology",
+ e_topology_name[topology_idx].name);
+ } else {
+ SDE_ERROR_CONN(c_conn, "invalid topology\n");
+ continue;
+ }
+
+ if (!mode_info.roi_caps.num_roi)
+ continue;
+
+ sde_kms_info_add_keyint(info, "partial_update_num_roi",
+ mode_info.roi_caps.num_roi);
+ sde_kms_info_add_keyint(info, "partial_update_xstart",
+ mode_info.roi_caps.align.xstart_pix_align);
+ sde_kms_info_add_keyint(info, "partial_update_walign",
+ mode_info.roi_caps.align.width_pix_align);
+ sde_kms_info_add_keyint(info, "partial_update_wmin",
+ mode_info.roi_caps.align.min_width);
+ sde_kms_info_add_keyint(info, "partial_update_ystart",
+ mode_info.roi_caps.align.ystart_pix_align);
+ sde_kms_info_add_keyint(info, "partial_update_halign",
+ mode_info.roi_caps.align.height_pix_align);
+ sde_kms_info_add_keyint(info, "partial_update_hmin",
+ mode_info.roi_caps.align.min_height);
+ sde_kms_info_add_keyint(info, "partial_update_roimerge",
+ mode_info.roi_caps.merge_rois);
+ }
+
+ return rc;
+}
+
+int sde_connector_set_blob_data(struct drm_connector *conn,
+ struct drm_connector_state *state,
+ enum msm_mdp_conn_property prop_id)
+{
+ struct sde_kms_info *info;
+ struct sde_connector *c_conn = NULL;
+ struct sde_connector_state *sde_conn_state = NULL;
+ struct msm_mode_info mode_info;
+ struct drm_property_blob *blob = NULL;
+ int rc = 0;
+
+ c_conn = to_sde_connector(conn);
+ if (!c_conn) {
+ SDE_ERROR("invalid argument\n");
+ return -EINVAL;
+ }
+
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ sde_kms_info_reset(info);
+
+ switch (prop_id) {
+ case CONNECTOR_PROP_SDE_INFO:
+ memset(&mode_info, 0, sizeof(mode_info));
+
+ if (state) {
+ sde_conn_state = to_sde_connector_state(state);
+ memcpy(&mode_info, &sde_conn_state->mode_info,
+ sizeof(sde_conn_state->mode_info));
+ } else {
+ /**
+ * connector state is assigned only on first
+ * atomic_commit. But this function is allowed to be
+ * invoked during probe/init sequence. So not throwing
+ * an error.
+ */
+ SDE_DEBUG_CONN(c_conn, "invalid connector state\n");
+ }
+
+ if (c_conn->ops.set_info_blob) {
+ rc = c_conn->ops.set_info_blob(conn, info,
+ c_conn->display, &mode_info);
+ if (rc) {
+ SDE_ERROR_CONN(c_conn,
+ "set_info_blob failed, %d\n",
+ rc);
+ goto exit;
+ }
+ }
+
+ blob = c_conn->blob_caps;
+ break;
+ case CONNECTOR_PROP_MODE_INFO:
+ rc = sde_connector_populate_mode_info(conn, info);
+ if (rc) {
+ SDE_ERROR_CONN(c_conn,
+ "mode info population failed, %d\n",
+ rc);
+ goto exit;
+ }
+ blob = c_conn->blob_mode_info;
+ break;
+ default:
+ SDE_ERROR_CONN(c_conn, "invalid prop_id: %d\n", prop_id);
+ goto exit;
+ };
+
+ msm_property_set_blob(&c_conn->property_info,
+ &blob,
+ SDE_KMS_INFO_DATA(info),
+ SDE_KMS_INFO_DATALEN(info),
+ prop_id);
+exit:
+ kfree(info);
+
+ return rc;
+}
+
struct drm_connector *sde_connector_init(struct drm_device *dev,
struct drm_encoder *encoder,
struct drm_panel *panel,
@@ -1274,7 +1708,6 @@ struct drm_connector *sde_connector_init(struct drm_device *dev,
{
struct msm_drm_private *priv;
struct sde_kms *sde_kms;
- struct sde_kms_info *info;
struct sde_connector *c_conn = NULL;
struct dsi_display *dsi_display;
struct msm_display_info display_info;
@@ -1372,34 +1805,32 @@ struct drm_connector *sde_connector_init(struct drm_device *dev,
sizeof(struct sde_connector_state));
if (c_conn->ops.post_init) {
- info = kmalloc(sizeof(*info), GFP_KERNEL);
- if (!info) {
- SDE_ERROR("failed to allocate info buffer\n");
- rc = -ENOMEM;
- goto error_cleanup_fence;
- }
-
- sde_kms_info_reset(info);
- rc = c_conn->ops.post_init(&c_conn->base, info, display);
+ rc = c_conn->ops.post_init(&c_conn->base, display);
if (rc) {
SDE_ERROR("post-init failed, %d\n", rc);
- kfree(info);
goto error_cleanup_fence;
}
-
- msm_property_install_blob(&c_conn->property_info,
- "capabilities",
- DRM_MODE_PROP_IMMUTABLE,
- CONNECTOR_PROP_SDE_INFO);
-
- msm_property_set_blob(&c_conn->property_info,
- &c_conn->blob_caps,
- SDE_KMS_INFO_DATA(info),
- SDE_KMS_INFO_DATALEN(info),
- CONNECTOR_PROP_SDE_INFO);
- kfree(info);
}
+ msm_property_install_blob(&c_conn->property_info,
+ "capabilities",
+ DRM_MODE_PROP_IMMUTABLE,
+ CONNECTOR_PROP_SDE_INFO);
+
+ rc = sde_connector_set_blob_data(&c_conn->base,
+ NULL,
+ CONNECTOR_PROP_SDE_INFO);
+ if (rc) {
+ SDE_ERROR_CONN(c_conn,
+ "failed to setup connector info, rc = %d\n", rc);
+ goto error_cleanup_fence;
+ }
+
+ msm_property_install_blob(&c_conn->property_info,
+ "mode_properties",
+ DRM_MODE_PROP_IMMUTABLE,
+ CONNECTOR_PROP_MODE_INFO);
+
if (connector_type == DRM_MODE_CONNECTOR_DSI) {
dsi_display = (struct dsi_display *)(display);
if (dsi_display && dsi_display->panel &&
@@ -1418,16 +1849,41 @@ struct drm_connector *sde_connector_init(struct drm_device *dev,
}
rc = sde_connector_get_info(&c_conn->base, &display_info);
- if (!rc && display_info.roi_caps.enabled) {
- msm_property_install_volatile_range(
- &c_conn->property_info, "sde_drm_roi_v1", 0x0,
- 0, ~0, 0, CONNECTOR_PROP_ROI_V1);
- }
+ if (!rc && (connector_type == DRM_MODE_CONNECTOR_DSI) &&
+ (display_info.capabilities & MSM_DISPLAY_CAP_VID_MODE))
+ sde_connector_register_event(&c_conn->base,
+ SDE_CONN_EVENT_VID_FIFO_OVERFLOW,
+ sde_connector_handle_disp_recovery,
+ c_conn);
+
+ msm_property_install_volatile_range(
+ &c_conn->property_info, "sde_drm_roi_v1", 0x0,
+ 0, ~0, 0, CONNECTOR_PROP_ROI_V1);
+
/* install PP_DITHER properties */
_sde_connector_install_dither_property(dev, sde_kms, c_conn);
- msm_property_install_range(&c_conn->property_info, "RETIRE_FENCE",
- 0x0, 0, INR_OPEN_MAX, 0, CONNECTOR_PROP_RETIRE_FENCE);
+ if (connector_type == DRM_MODE_CONNECTOR_DisplayPort) {
+ struct drm_msm_ext_hdr_properties hdr = {0};
+
+ msm_property_install_blob(&c_conn->property_info,
+ "ext_hdr_properties",
+ DRM_MODE_PROP_IMMUTABLE,
+ CONNECTOR_PROP_EXT_HDR_INFO);
+
+ /* set default values to avoid reading uninitialized data */
+ msm_property_set_blob(&c_conn->property_info,
+ &c_conn->blob_ext_hdr,
+ &hdr,
+ sizeof(hdr),
+ CONNECTOR_PROP_EXT_HDR_INFO);
+ }
+
+ msm_property_install_volatile_range(&c_conn->property_info,
+ "hdr_metadata", 0x0, 0, ~0, 0, CONNECTOR_PROP_HDR_METADATA);
+
+ msm_property_install_volatile_range(&c_conn->property_info,
+ "RETIRE_FENCE", 0x0, 0, ~0, 0, CONNECTOR_PROP_RETIRE_FENCE);
msm_property_install_range(&c_conn->property_info, "autorefresh",
0x0, 0, AUTOREFRESH_MAX_FRAME_CNT, 0,
@@ -1478,6 +1934,10 @@ struct drm_connector *sde_connector_init(struct drm_device *dev,
drm_property_unreference_blob(c_conn->blob_hdr);
if (c_conn->blob_dither)
drm_property_unreference_blob(c_conn->blob_dither);
+ if (c_conn->blob_mode_info)
+ drm_property_unreference_blob(c_conn->blob_mode_info);
+ if (c_conn->blob_ext_hdr)
+ drm_property_unreference_blob(c_conn->blob_ext_hdr);
msm_property_destroy(&c_conn->property_info);
error_cleanup_fence:
diff --git a/drivers/gpu/drm/msm/sde/sde_connector.h b/drivers/gpu/drm/msm/sde/sde_connector.h
index 35aec8c..a7bad7c 100644
--- a/drivers/gpu/drm/msm/sde/sde_connector.h
+++ b/drivers/gpu/drm/msm/sde/sde_connector.h
@@ -36,15 +36,26 @@ struct sde_connector_ops {
/**
* post_init - perform additional initialization steps
* @connector: Pointer to drm connector structure
- * @info: Pointer to sde connector info structure
* @display: Pointer to private display handle
* Returns: Zero on success
*/
int (*post_init)(struct drm_connector *connector,
- void *info,
void *display);
/**
+ * set_info_blob - initialize given info blob
+ * @connector: Pointer to drm connector structure
+ * @info: Pointer to sde connector info structure
+ * @display: Pointer to private display handle
+ * @mode_info: Pointer to mode info structure
+ * Returns: Zero on success
+ */
+ int (*set_info_blob)(struct drm_connector *connector,
+ void *info,
+ void *display,
+ struct msm_mode_info *mode_info);
+
+ /**
* detect - determine if connector is connected
* @connector: Pointer to drm connector structure
* @force: Force detect setting from drm framework
@@ -228,6 +239,8 @@ struct sde_connector_ops {
enum sde_connector_events {
SDE_CONN_EVENT_VID_DONE, /* video mode frame done */
SDE_CONN_EVENT_CMD_DONE, /* command mode frame done */
+ SDE_CONN_EVENT_VID_FIFO_OVERFLOW, /* dsi fifo overflow error */
+ SDE_CONN_EVENT_CMD_FIFO_UNDERFLOW, /* dsi fifo underflow error */
SDE_CONN_EVENT_COUNT,
};
@@ -235,9 +248,10 @@ enum sde_connector_events {
* struct sde_connector_evt - local event registration entry structure
* @cb_func: Pointer to desired callback function
* @usr: User pointer to pass to callback on event trigger
+ * Returns: Zero success, negetive for failure
*/
struct sde_connector_evt {
- void (*cb_func)(uint32_t event_idx,
+ int (*cb_func)(uint32_t event_idx,
uint32_t instance_idx, void *usr,
uint32_t data0, uint32_t data1,
uint32_t data2, uint32_t data3);
@@ -264,7 +278,9 @@ struct sde_connector_evt {
* @property_data: Array of private data for generic property handling
* @blob_caps: Pointer to blob structure for 'capabilities' property
* @blob_hdr: Pointer to blob structure for 'hdr_properties' property
+ * @blob_ext_hdr: Pointer to blob structure for 'ext_hdr_properties' property
* @blob_dither: Pointer to blob structure for default dither config
+ * @blob_mode_info: Pointer to blob structure for mode info
* @fb_kmap: true if kernel mapping of framebuffer is requested
* @event_table: Array of registered events
* @event_lock: Lock object for event_table
@@ -296,7 +312,9 @@ struct sde_connector {
struct msm_property_data property_data[CONNECTOR_PROP_COUNT];
struct drm_property_blob *blob_caps;
struct drm_property_blob *blob_hdr;
+ struct drm_property_blob *blob_ext_hdr;
struct drm_property_blob *blob_dither;
+ struct drm_property_blob *blob_mode_info;
bool fb_kmap;
struct sde_connector_evt event_table[SDE_CONN_EVENT_COUNT];
@@ -354,6 +372,10 @@ struct sde_connector {
* @property_values: Local cache of current connector property values
* @rois: Regions of interest structure for mapping CRTC to Connector output
* @property_blobs: blob properties
+ * @mode_info: local copy of msm_mode_info struct
+ * @hdr_meta: HDR metadata info passed from userspace
+ * @old_topology_name: topology of previous atomic state. remove this in later
+ * kernel versions which provide drm_atomic_state old_state pointers
*/
struct sde_connector_state {
struct drm_connector_state base;
@@ -363,6 +385,9 @@ struct sde_connector_state {
struct msm_roi_list rois;
struct drm_property_blob *property_blobs[CONNECTOR_PROP_BLOBCOUNT];
+ struct msm_mode_info mode_info;
+ struct drm_msm_ext_hdr_metadata hdr_meta;
+ enum sde_rm_topology_name old_topology_name;
};
/**
@@ -415,6 +440,43 @@ static inline uint64_t sde_connector_get_topology_name(
}
/**
+ * sde_connector_get_old_topology_name - helper accessor to retrieve
+ * topology_name for the previous mode
+ * @connector: pointer to drm connector state
+ * Returns: cached value of the previous topology, or SDE_RM_TOPOLOGY_NONE
+ */
+static inline enum sde_rm_topology_name sde_connector_get_old_topology_name(
+ struct drm_connector_state *state)
+{
+ struct sde_connector_state *c_state = to_sde_connector_state(state);
+
+ if (!state)
+ return SDE_RM_TOPOLOGY_NONE;
+
+ return c_state->old_topology_name;
+}
+
+/**
+ * sde_connector_set_old_topology_name - helper to cache value of previous
+ * mode's topology
+ * @connector: pointer to drm connector state
+ * Returns: 0 on success, negative errno on failure
+ */
+static inline int sde_connector_set_old_topology_name(
+ struct drm_connector_state *state,
+ enum sde_rm_topology_name top)
+{
+ struct sde_connector_state *c_state = to_sde_connector_state(state);
+
+ if (!state)
+ return -EINVAL;
+
+ c_state->old_topology_name = top;
+
+ return 0;
+}
+
+/**
* sde_connector_get_lp - helper accessor to retrieve LP state
* @connector: pointer to drm connector
* Returns: value of the CONNECTOR_PROP_LP property or 0
@@ -494,8 +556,9 @@ int sde_connector_get_info(struct drm_connector *connector,
* sde_connector_clk_ctrl - enables/disables the connector clks
* @connector: Pointer to drm connector object
* @enable: true/false to enable/disable
+ * Returns: Zero on success
*/
-void sde_connector_clk_ctrl(struct drm_connector *connector, bool enable);
+int sde_connector_clk_ctrl(struct drm_connector *connector, bool enable);
/**
* sde_connector_get_dpms - query dpms setting
@@ -532,7 +595,7 @@ int sde_connector_trigger_event(void *drm_connector,
*/
int sde_connector_register_event(struct drm_connector *connector,
uint32_t event_idx,
- void (*cb_func)(uint32_t event_idx,
+ int (*cb_func)(uint32_t event_idx,
uint32_t instance_idx, void *usr,
uint32_t data0, uint32_t data1,
uint32_t data2, uint32_t data3),
@@ -593,10 +656,53 @@ int sde_connector_get_dither_cfg(struct drm_connector *conn,
struct drm_connector_state *state, void **cfg, size_t *len);
/**
+ * sde_connector_set_blob_data - set connector blob property data
+ * @conn: Pointer to drm_connector struct
+ * @state: Pointer to the drm_connector_state struct
+ * @prop_id: property id to be populated
+ * Returns: Zero on success
+ */
+int sde_connector_set_blob_data(struct drm_connector *conn,
+ struct drm_connector_state *state,
+ enum msm_mdp_conn_property prop_id);
+
+/**
+ * sde_connector_roi_v1_check_roi - validate connector ROI
+ * @conn_state: Pointer to drm_connector_state struct
+ * Returns: Zero on success
+ */
+int sde_connector_roi_v1_check_roi(struct drm_connector_state *conn_state);
+
+/**
* sde_connector_schedule_status_work - manage ESD thread
* conn: Pointer to drm_connector struct
* @en: flag to start/stop ESD thread
*/
void sde_connector_schedule_status_work(struct drm_connector *conn, bool en);
+/**
+ * sde_connector_helper_reset_properties - reset properties to default values in
+ * the given DRM connector state object
+ * @connector: Pointer to DRM connector object
+ * @connector_state: Pointer to DRM connector state object
+ * Returns: 0 on success, negative errno on failure
+ */
+int sde_connector_helper_reset_custom_properties(
+ struct drm_connector *connector,
+ struct drm_connector_state *connector_state);
+
+/**
+ * sde_connector_get_mode_info - get information of the current mode in the
+ * given connector state.
+ * conn_state: Pointer to the DRM connector state object
+ * mode_info: Pointer to the mode info structure
+ */
+int sde_connector_get_mode_info(struct drm_connector_state *conn_state,
+ struct msm_mode_info *mode_info);
+
+/**
+ * sde_conn_timeline_status - current buffer timeline status
+ * conn: Pointer to drm_connector struct
+ */
+void sde_conn_timeline_status(struct drm_connector *conn);
#endif /* _SDE_CONNECTOR_H_ */
diff --git a/drivers/gpu/drm/msm/sde/sde_core_irq.c b/drivers/gpu/drm/msm/sde/sde_core_irq.c
index a0846ff..a6f22c9 100644
--- a/drivers/gpu/drm/msm/sde/sde_core_irq.c
+++ b/drivers/gpu/drm/msm/sde/sde_core_irq.c
@@ -55,9 +55,22 @@ static void sde_core_irq_callback_handler(void *arg, int irq_idx)
spin_unlock_irqrestore(&sde_kms->irq_obj.cb_lock, irq_flags);
if (cb_tbl_error) {
- SDE_ERROR("irq has no registered callback, idx %d enables %d\n",
- irq_idx, enable_counts);
- SDE_EVT32_IRQ(irq_idx, enable_counts, SDE_EVTLOG_ERROR);
+ /*
+ * If enable count is zero and callback list is empty, then it's
+ * not a fatal issue. Log this case as debug. If the enable
+ * count is nonzero and callback list is empty, then its a real
+ * issue. Log this case as error to ensure we don't have silent
+ * IRQs running.
+ */
+ if (!enable_counts) {
+ SDE_DEBUG("irq has no callback, idx %d enables %d\n",
+ irq_idx, enable_counts);
+ SDE_EVT32_IRQ(irq_idx, enable_counts);
+ } else {
+ SDE_ERROR("irq has no callback, idx %d enables %d\n",
+ irq_idx, enable_counts);
+ SDE_EVT32_IRQ(irq_idx, enable_counts, SDE_EVTLOG_ERROR);
+ }
}
/*
@@ -447,6 +460,7 @@ void sde_core_irq_preinstall(struct sde_kms *sde_kms)
{
struct msm_drm_private *priv;
int i;
+ int rc;
if (!sde_kms) {
SDE_ERROR("invalid sde_kms\n");
@@ -460,7 +474,14 @@ void sde_core_irq_preinstall(struct sde_kms *sde_kms)
}
priv = sde_kms->dev->dev_private;
- sde_power_resource_enable(&priv->phandle, sde_kms->core_client, true);
+ rc = sde_power_resource_enable(&priv->phandle, sde_kms->core_client,
+ true);
+ if (rc) {
+ SDE_ERROR("failed to enable power resource %d\n", rc);
+ SDE_EVT32(rc, SDE_EVTLOG_ERROR);
+ return;
+ }
+
sde_clear_all_irqs(sde_kms);
sde_disable_all_irqs(sde_kms);
sde_power_resource_enable(&priv->phandle, sde_kms->core_client, false);
@@ -491,6 +512,7 @@ void sde_core_irq_uninstall(struct sde_kms *sde_kms)
{
struct msm_drm_private *priv;
int i;
+ int rc;
if (!sde_kms) {
SDE_ERROR("invalid sde_kms\n");
@@ -504,7 +526,14 @@ void sde_core_irq_uninstall(struct sde_kms *sde_kms)
}
priv = sde_kms->dev->dev_private;
- sde_power_resource_enable(&priv->phandle, sde_kms->core_client, true);
+ rc = sde_power_resource_enable(&priv->phandle, sde_kms->core_client,
+ true);
+ if (rc) {
+ SDE_ERROR("failed to enable power resource %d\n", rc);
+ SDE_EVT32(rc, SDE_EVTLOG_ERROR);
+ return;
+ }
+
for (i = 0; i < sde_kms->irq_obj.total_irqs; i++)
if (atomic_read(&sde_kms->irq_obj.enable_counts[i]) ||
!list_empty(&sde_kms->irq_obj.irq_cb_tbl[i]))
diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.c b/drivers/gpu/drm/msm/sde/sde_crtc.c
index 03dab22..f68f64d 100644
--- a/drivers/gpu/drm/msm/sde/sde_crtc.c
+++ b/drivers/gpu/drm/msm/sde/sde_crtc.c
@@ -48,6 +48,9 @@
#define MEM_PROTECT_SD_CTRL_SWITCH 0x18
#define MDP_DEVICE_ID 0x1A
+#define SDE_PSTATES_MAX (SDE_STAGE_MAX * 4)
+#define SDE_MULTIRECT_PLANE_MAX (SDE_STAGE_MAX * 2)
+
struct sde_crtc_custom_events {
u32 event;
int (*func)(struct drm_crtc *crtc, bool en,
@@ -58,12 +61,15 @@ static int sde_crtc_power_interrupt_handler(struct drm_crtc *crtc_drm,
bool en, struct sde_irq_callback *ad_irq);
static int sde_crtc_idle_interrupt_handler(struct drm_crtc *crtc_drm,
bool en, struct sde_irq_callback *idle_irq);
+static int sde_crtc_pm_event_handler(struct drm_crtc *crtc, bool en,
+ struct sde_irq_callback *noirq);
static struct sde_crtc_custom_events custom_events[] = {
{DRM_EVENT_AD_BACKLIGHT, sde_cp_ad_interrupt},
{DRM_EVENT_CRTC_POWER, sde_crtc_power_interrupt_handler},
{DRM_EVENT_IDLE_NOTIFY, sde_crtc_idle_interrupt_handler},
{DRM_EVENT_HISTOGRAM, sde_cp_hist_interrupt},
+ {DRM_EVENT_SDE_POWER, sde_crtc_pm_event_handler},
};
/* default input fence timeout, in ms */
@@ -604,18 +610,6 @@ static void _sde_crtc_deinit_events(struct sde_crtc *sde_crtc)
return;
}
-/**
- * sde_crtc_destroy_dest_scaler - free memory allocated for scaler lut
- * @sde_crtc: Pointer to sde crtc
- */
-static void _sde_crtc_destroy_dest_scaler(struct sde_crtc *sde_crtc)
-{
- if (!sde_crtc)
- return;
-
- kfree(sde_crtc->scl3_lut_cfg);
-}
-
static void sde_crtc_destroy(struct drm_crtc *crtc)
{
struct sde_crtc *sde_crtc = to_sde_crtc(crtc);
@@ -629,7 +623,6 @@ static void sde_crtc_destroy(struct drm_crtc *crtc)
drm_property_unreference_blob(sde_crtc->blob_info);
msm_property_destroy(&sde_crtc->property_info);
sde_cp_crtc_destroy_properties(crtc);
- _sde_crtc_destroy_dest_scaler(sde_crtc);
sde_fence_deinit(&sde_crtc->output_fence);
_sde_crtc_deinit_events(sde_crtc);
@@ -846,6 +839,11 @@ static int _sde_crtc_set_roi_v1(struct drm_crtc_state *state,
cstate->user_roi_list.roi[i].y1,
cstate->user_roi_list.roi[i].x2,
cstate->user_roi_list.roi[i].y2);
+ SDE_EVT32_VERBOSE(DRMID(crtc),
+ cstate->user_roi_list.roi[i].x1,
+ cstate->user_roi_list.roi[i].y1,
+ cstate->user_roi_list.roi[i].x2,
+ cstate->user_roi_list.roi[i].y2);
}
return 0;
@@ -889,6 +887,7 @@ static int _sde_crtc_set_crtc_roi(struct drm_crtc *crtc,
for_each_connector_in_state(state->state, conn, conn_state, i) {
struct sde_connector_state *sde_conn_state;
+ struct sde_rect conn_roi;
if (!conn_state || conn_state->crtc != crtc)
continue;
@@ -915,12 +914,19 @@ static int _sde_crtc_set_crtc_roi(struct drm_crtc *crtc,
sde_crtc->name);
return -EINVAL;
}
+
+ sde_kms_rect_merge_rectangles(&sde_conn_state->rois, &conn_roi);
+ SDE_EVT32_VERBOSE(DRMID(crtc), DRMID(conn),
+ conn_roi.x, conn_roi.y,
+ conn_roi.w, conn_roi.h);
}
sde_kms_rect_merge_rectangles(&crtc_state->user_roi_list, crtc_roi);
SDE_DEBUG("%s: crtc roi (%d,%d,%d,%d)\n", sde_crtc->name,
crtc_roi->x, crtc_roi->y, crtc_roi->w, crtc_roi->h);
+ SDE_EVT32_VERBOSE(DRMID(crtc), crtc_roi->x, crtc_roi->y, crtc_roi->w,
+ crtc_roi->h);
return 0;
}
@@ -1178,14 +1184,56 @@ static int _sde_crtc_check_rois(struct drm_crtc *crtc,
struct drm_crtc_state *state)
{
struct sde_crtc *sde_crtc;
- int lm_idx;
- int rc;
+ struct sde_crtc_state *sde_crtc_state;
+ struct msm_mode_info mode_info;
+ struct drm_connector *conn;
+ struct drm_connector_state *conn_state;
+ int rc, lm_idx, i;
if (!crtc || !state)
return -EINVAL;
+ memset(&mode_info, 0, sizeof(mode_info));
+
sde_crtc = to_sde_crtc(crtc);
+ if (hweight_long(state->connector_mask) != 1) {
+ SDE_ERROR("invalid connector count(%d) for crtc: %d\n",
+ (int)hweight_long(state->connector_mask),
+ crtc->base.id);
+ return -EINVAL;
+ }
+
+ for_each_connector_in_state(state->state, conn, conn_state, i) {
+ rc = sde_connector_get_mode_info(conn_state, &mode_info);
+ if (rc) {
+ SDE_ERROR("failed to get mode info\n");
+ return -EINVAL;
+ }
+ break;
+ }
+
+ if (!mode_info.roi_caps.enabled)
+ return 0;
+
+ sde_crtc_state = to_sde_crtc_state(state);
+ if (sde_crtc_state->user_roi_list.num_rects >
+ mode_info.roi_caps.num_roi) {
+ SDE_ERROR("roi count is more than supported limit, %d > %d\n",
+ sde_crtc_state->user_roi_list.num_rects,
+ mode_info.roi_caps.num_roi);
+ return -E2BIG;
+ }
+
+ /**
+ * TODO: Need to check against ROI alignment restrictions if partial
+ * update support is added for destination scalar configurations
+ */
+ if (sde_crtc_state->num_ds_enabled) {
+ SDE_ERROR("DS and PU concurrency is not supported\n");
+ return -EINVAL;
+ }
+
rc = _sde_crtc_set_crtc_roi(crtc, state);
if (rc)
return rc;
@@ -1432,7 +1480,7 @@ static void _sde_crtc_swap_mixers_for_right_partial_update(
* _sde_crtc_blend_setup - configure crtc mixers
* @crtc: Pointer to drm crtc structure
*/
-static void _sde_crtc_blend_setup(struct drm_crtc *crtc)
+static void _sde_crtc_blend_setup(struct drm_crtc *crtc, bool add_planes)
{
struct sde_crtc *sde_crtc;
struct sde_crtc_state *sde_crtc_state;
@@ -1478,7 +1526,8 @@ static void _sde_crtc_blend_setup(struct drm_crtc *crtc)
/* initialize stage cfg */
memset(&sde_crtc->stage_cfg, 0, sizeof(struct sde_hw_stage_cfg));
- _sde_crtc_blend_setup_mixer(crtc, sde_crtc, mixer);
+ if (add_planes)
+ _sde_crtc_blend_setup_mixer(crtc, sde_crtc, mixer);
for (i = 0; i < sde_crtc->num_mixers; i++) {
const struct sde_rect *lm_roi = &sde_crtc_state->lm_roi[i];
@@ -1753,24 +1802,21 @@ static int _sde_crtc_set_dest_scaler_lut(struct sde_crtc *sde_crtc,
size_t len = 0;
int ret = 0;
- if (!sde_crtc || !cstate || !sde_crtc->scl3_lut_cfg) {
+ if (!sde_crtc || !cstate) {
SDE_ERROR("invalid args\n");
return -EINVAL;
}
- if (sde_crtc->scl3_lut_cfg->is_configured) {
- SDE_DEBUG("lut already configured\n");
- return 0;
- }
-
lut_data = msm_property_get_blob(&sde_crtc->property_info,
&cstate->property_state, &len, lut_idx);
if (!lut_data || !len) {
- SDE_ERROR("lut(%d): no data, len(%zu)\n", lut_idx, len);
- return -ENODATA;
+ SDE_DEBUG("%s: lut(%d): cleared: %pK, %zu\n", sde_crtc->name,
+ lut_idx, lut_data, len);
+ lut_data = NULL;
+ len = 0;
}
- cfg = sde_crtc->scl3_lut_cfg;
+ cfg = &cstate->scl3_lut_cfg;
switch (lut_idx) {
case CRTC_PROP_DEST_SCALER_LUT_ED:
@@ -1787,16 +1833,31 @@ static int _sde_crtc_set_dest_scaler_lut(struct sde_crtc *sde_crtc,
break;
default:
ret = -EINVAL;
- SDE_ERROR("invalid LUT index = %d", lut_idx);
+ SDE_ERROR("%s:invalid LUT idx(%d)\n", sde_crtc->name, lut_idx);
+ SDE_EVT32(DRMID(&sde_crtc->base), lut_idx, SDE_EVTLOG_ERROR);
break;
}
- if (cfg->dir_lut && cfg->cir_lut && cfg->sep_lut)
- cfg->is_configured = true;
+ cfg->is_configured = cfg->dir_lut && cfg->cir_lut && cfg->sep_lut;
+ SDE_EVT32_VERBOSE(DRMID(&sde_crtc->base), ret, lut_idx, len,
+ cfg->is_configured);
return ret;
}
+void sde_crtc_timeline_status(struct drm_crtc *crtc)
+{
+ struct sde_crtc *sde_crtc;
+
+ if (!crtc) {
+ SDE_ERROR("invalid crtc\n");
+ return;
+ }
+
+ sde_crtc = to_sde_crtc(crtc);
+ sde_fence_timeline_status(&sde_crtc->output_fence, &crtc->base);
+}
+
/**
* sde_crtc_secure_ctrl - Initiates the operations to swtich between secure
* and non-secure mode
@@ -1921,6 +1982,44 @@ int sde_crtc_secure_ctrl(struct drm_crtc *crtc, bool post_commit)
return ret;
}
+static int _sde_validate_hw_resources(struct sde_crtc *sde_crtc)
+{
+ int i;
+
+ /**
+ * Check if sufficient hw resources are
+ * available as per target caps & topology
+ */
+ if (!sde_crtc) {
+ SDE_ERROR("invalid argument\n");
+ return -EINVAL;
+ }
+
+ if (!sde_crtc->num_mixers ||
+ sde_crtc->num_mixers > CRTC_DUAL_MIXERS) {
+ SDE_ERROR("%s: invalid number mixers: %d\n",
+ sde_crtc->name, sde_crtc->num_mixers);
+ SDE_EVT32(DRMID(&sde_crtc->base), sde_crtc->num_mixers,
+ SDE_EVTLOG_ERROR);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < sde_crtc->num_mixers; i++) {
+ if (!sde_crtc->mixers[i].hw_lm || !sde_crtc->mixers[i].hw_ctl
+ || !sde_crtc->mixers[i].hw_ds) {
+ SDE_ERROR("%s:insufficient resources for mixer(%d)\n",
+ sde_crtc->name, i);
+ SDE_EVT32(DRMID(&sde_crtc->base), sde_crtc->num_mixers,
+ i, sde_crtc->mixers[i].hw_lm,
+ sde_crtc->mixers[i].hw_ctl,
+ sde_crtc->mixers[i].hw_ds, SDE_EVTLOG_ERROR);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
/**
* _sde_crtc_dest_scaler_setup - Set up dest scaler block
* @crtc: Pointer to drm crtc
@@ -1937,37 +2036,49 @@ static void _sde_crtc_dest_scaler_setup(struct drm_crtc *crtc)
u32 flush_mask = 0, op_mode = 0;
u32 lm_idx = 0, num_mixers = 0;
int i, count = 0;
+ bool ds_dirty = false;
if (!crtc)
return;
- sde_crtc = to_sde_crtc(crtc);
+ sde_crtc = to_sde_crtc(crtc);
cstate = to_sde_crtc_state(crtc->state);
- kms = _sde_crtc_get_kms(crtc);
+ kms = _sde_crtc_get_kms(crtc);
num_mixers = sde_crtc->num_mixers;
+ count = cstate->num_ds;
SDE_DEBUG("crtc%d\n", crtc->base.id);
+ SDE_EVT32(DRMID(crtc), num_mixers, count, cstate->ds_dirty,
+ sde_crtc->ds_reconfig, cstate->num_ds_enabled);
- if (!cstate->ds_dirty) {
+ /**
+ * destination scaler configuration will be done either
+ * or on set property or on power collapse (idle/suspend)
+ */
+ ds_dirty = (cstate->ds_dirty || sde_crtc->ds_reconfig);
+ if (sde_crtc->ds_reconfig) {
+ SDE_DEBUG("reconfigure dest scaler block\n");
+ sde_crtc->ds_reconfig = false;
+ }
+
+ if (!ds_dirty) {
SDE_DEBUG("no change in settings, skip commit\n");
} else if (!kms || !kms->catalog) {
- SDE_ERROR("invalid parameters\n");
+ SDE_ERROR("crtc%d:invalid parameters\n", crtc->base.id);
} else if (!kms->catalog->mdp[0].has_dest_scaler) {
SDE_DEBUG("dest scaler feature not supported\n");
- } else if (num_mixers > CRTC_DUAL_MIXERS) {
- SDE_ERROR("invalid number mixers: %d\n", num_mixers);
- } else if (!sde_crtc->scl3_lut_cfg->is_configured) {
- SDE_DEBUG("no LUT data available\n");
+ } else if (_sde_validate_hw_resources(sde_crtc)) {
+ //do nothing
+ } else if (!cstate->scl3_lut_cfg.is_configured) {
+ SDE_ERROR("crtc%d:no LUT data available\n", crtc->base.id);
} else {
- count = cstate->num_ds_enabled ? cstate->num_ds : num_mixers;
-
for (i = 0; i < count; i++) {
cfg = &cstate->ds_cfg[i];
if (!cfg->flags)
continue;
- lm_idx = cfg->ndx;
+ lm_idx = cfg->idx;
hw_lm = sde_crtc->mixers[lm_idx].hw_lm;
hw_ctl = sde_crtc->mixers[lm_idx].hw_ctl;
hw_ds = sde_crtc->mixers[lm_idx].hw_ds;
@@ -1981,7 +2092,7 @@ static void _sde_crtc_dest_scaler_setup(struct drm_crtc *crtc)
CRTC_DUAL_MIXERS) ?
SDE_DS_OP_MODE_DUAL : 0;
hw_ds->ops.setup_opmode(hw_ds, op_mode);
- SDE_EVT32(DRMID(crtc), op_mode);
+ SDE_EVT32_VERBOSE(DRMID(crtc), op_mode);
}
/* Setup scaler */
@@ -1990,33 +2101,23 @@ static void _sde_crtc_dest_scaler_setup(struct drm_crtc *crtc)
SDE_DRM_DESTSCALER_ENHANCER_UPDATE)) {
if (hw_ds->ops.setup_scaler)
hw_ds->ops.setup_scaler(hw_ds,
- cfg->scl3_cfg,
- sde_crtc->scl3_lut_cfg);
+ &cfg->scl3_cfg,
+ &cstate->scl3_lut_cfg);
- /**
- * Clear the flags as the block doesn't have to
- * be programmed in each commit if no updates
- */
- cfg->flags &= ~SDE_DRM_DESTSCALER_SCALE_UPDATE;
- cfg->flags &=
- ~SDE_DRM_DESTSCALER_ENHANCER_UPDATE;
}
/*
* Dest scaler shares the flush bit of the LM in control
*/
- if (cfg->set_lm_flush && hw_lm && hw_ctl &&
- hw_ctl->ops.get_bitmask_mixer) {
+ if (hw_ctl->ops.get_bitmask_mixer) {
flush_mask = hw_ctl->ops.get_bitmask_mixer(
hw_ctl, hw_lm->idx);
SDE_DEBUG("Set lm[%d] flush = %d",
hw_lm->idx, flush_mask);
hw_ctl->ops.update_pending_flush(hw_ctl,
- flush_mask);
+ flush_mask);
}
- cfg->set_lm_flush = false;
}
- cstate->ds_dirty = false;
}
}
@@ -2178,47 +2279,6 @@ static void _sde_crtc_retire_event(struct drm_crtc *crtc, ktime_t ts)
SDE_ATRACE_END("signal_retire_fence");
}
-/* _sde_crtc_idle_notify - signal idle timeout to client */
-static void _sde_crtc_idle_notify(struct sde_crtc *sde_crtc)
-{
- struct drm_crtc *crtc;
- struct drm_event event;
- int ret = 0;
-
- if (!sde_crtc) {
- SDE_ERROR("invalid sde crtc\n");
- return;
- }
-
- crtc = &sde_crtc->base;
- event.type = DRM_EVENT_IDLE_NOTIFY;
- event.length = sizeof(u32);
- msm_mode_object_event_notify(&crtc->base, crtc->dev, &event,
- (u8 *)&ret);
-
- SDE_DEBUG("crtc:%d idle timeout notified\n", crtc->base.id);
-}
-
-/*
- * sde_crtc_handle_event - crtc frame event handle.
- * This API must manage only non-IRQ context events.
- */
-static bool _sde_crtc_handle_event(struct sde_crtc *sde_crtc, u32 event)
-{
- bool event_processed = false;
-
- /**
- * idle events are originated from commit thread and can be processed
- * in same context
- */
- if (event & SDE_ENCODER_FRAME_EVENT_IDLE) {
- _sde_crtc_idle_notify(sde_crtc);
- event_processed = true;
- }
-
- return event_processed;
-}
-
static void sde_crtc_frame_event_work(struct kthread_work *work)
{
struct msm_drm_private *priv;
@@ -2312,15 +2372,6 @@ static void sde_crtc_frame_event_work(struct kthread_work *work)
SDE_ATRACE_END("crtc_frame_event");
}
-/*
- * sde_crtc_frame_event_cb - crtc frame event callback API. CRTC module
- * registers this API to encoder for all frame event callbacks like
- * release_fence, retire_fence, frame_error, frame_done, idle_timeout,
- * etc. Encoder may call different events from different context - IRQ,
- * user thread, commit_thread, etc. Each event should be carefully
- * reviewed and should be processed in proper task context to avoid scheduling
- * delay or properly manage the irq context's bottom half processing.
- */
static void sde_crtc_frame_event_cb(void *data, u32 event)
{
struct drm_crtc *crtc = (struct drm_crtc *)data;
@@ -2329,7 +2380,6 @@ static void sde_crtc_frame_event_cb(void *data, u32 event)
struct sde_crtc_frame_event *fevent;
unsigned long flags;
u32 crtc_id;
- bool event_processed = false;
if (!crtc || !crtc->dev || !crtc->dev->dev_private) {
SDE_ERROR("invalid parameters\n");
@@ -2342,11 +2392,6 @@ static void sde_crtc_frame_event_cb(void *data, u32 event)
SDE_DEBUG("crtc%d\n", crtc->base.id);
SDE_EVT32_VERBOSE(DRMID(crtc), event);
- /* try to process the event in caller context */
- event_processed = _sde_crtc_handle_event(sde_crtc, event);
- if (event_processed)
- return;
-
spin_lock_irqsave(&sde_crtc->spin_lock, flags);
fevent = list_first_entry_or_null(&sde_crtc->frame_event_list,
struct sde_crtc_frame_event, list);
@@ -2387,24 +2432,6 @@ void sde_crtc_complete_commit(struct drm_crtc *crtc,
sde_crtc_secure_ctrl(crtc, true);
}
-/* _sde_crtc_set_idle_timeout - update idle timeout wait duration */
-static void _sde_crtc_set_idle_timeout(struct drm_crtc *crtc, u64 val)
-{
- struct drm_encoder *encoder;
-
- if (!crtc) {
- SDE_ERROR("invalid crtc\n");
- return;
- }
-
- drm_for_each_encoder(encoder, crtc->dev) {
- if (encoder->crtc != crtc)
- continue;
-
- sde_encoder_set_idle_timeout(encoder, (u32) val);
- }
-}
-
/**
* _sde_crtc_set_input_fence_timeout - update ns version of in fence timeout
* @cstate: Pointer to sde crtc state
@@ -2488,28 +2515,6 @@ static void _sde_crtc_set_dim_layer_v1(struct sde_crtc_state *cstate,
}
/**
- * _sde_crtc_dest_scaler_init - allocate memory for scaler lut
- * @sde_crtc : Pointer to sde crtc
- * @catalog : Pointer to mdss catalog info
- */
-static void _sde_crtc_dest_scaler_init(struct sde_crtc *sde_crtc,
- struct sde_mdss_cfg *catalog)
-{
- if (!sde_crtc || !catalog)
- return;
-
- if (!catalog->mdp[0].has_dest_scaler) {
- SDE_DEBUG("dest scaler feature not supported\n");
- return;
- }
-
- sde_crtc->scl3_lut_cfg = kzalloc(sizeof(struct sde_hw_scaler3_lut_cfg),
- GFP_KERNEL);
- if (!sde_crtc->scl3_lut_cfg)
- SDE_ERROR("failed to create scale LUT for dest scaler");
-}
-
-/**
* _sde_crtc_set_dest_scaler - copy dest scaler settings from userspace
* @sde_crtc : Pointer to sde crtc
* @cstate : Pointer to sde crtc state
@@ -2523,7 +2528,7 @@ static int _sde_crtc_set_dest_scaler(struct sde_crtc *sde_crtc,
struct sde_drm_dest_scaler_cfg *ds_cfg_usr;
struct sde_drm_scaler_v2 scaler_v2;
void __user *scaler_v2_usr;
- int i, count, ret = 0;
+ int i, count;
if (!sde_crtc || !cstate) {
SDE_ERROR("invalid sde_crtc/state\n");
@@ -2532,15 +2537,14 @@ static int _sde_crtc_set_dest_scaler(struct sde_crtc *sde_crtc,
SDE_DEBUG("crtc %s\n", sde_crtc->name);
- cstate->num_ds = 0;
- cstate->ds_dirty = false;
if (!usr_ptr) {
SDE_DEBUG("ds data removed\n");
return 0;
}
if (copy_from_user(&ds_data, usr_ptr, sizeof(ds_data))) {
- SDE_ERROR("failed to copy dest scaler data from user\n");
+ SDE_ERROR("%s:failed to copy dest scaler data from user\n",
+ sde_crtc->name);
return -EINVAL;
}
@@ -2550,11 +2554,10 @@ static int _sde_crtc_set_dest_scaler(struct sde_crtc *sde_crtc,
return 0;
}
- if (!sde_crtc->num_mixers || count > sde_crtc->num_mixers ||
- (count && (count != sde_crtc->num_mixers) &&
- !(ds_data.ds_cfg[0].flags & SDE_DRM_DESTSCALER_PU_ENABLE))) {
- SDE_ERROR("invalid config:num ds(%d), mixers(%d),flags(%d)\n",
- count, sde_crtc->num_mixers, ds_data.ds_cfg[0].flags);
+ if (count > SDE_MAX_DS_COUNT) {
+ SDE_ERROR("%s: invalid config: num_ds(%d) max(%d)\n",
+ sde_crtc->name, count, SDE_MAX_DS_COUNT);
+ SDE_EVT32(DRMID(&sde_crtc->base), count, SDE_EVTLOG_ERROR);
return -EINVAL;
}
@@ -2562,49 +2565,35 @@ static int _sde_crtc_set_dest_scaler(struct sde_crtc *sde_crtc,
for (i = 0; i < count; i++) {
ds_cfg_usr = &ds_data.ds_cfg[i];
- cstate->ds_cfg[i].ndx = ds_cfg_usr->index;
+ cstate->ds_cfg[i].idx = ds_cfg_usr->index;
cstate->ds_cfg[i].flags = ds_cfg_usr->flags;
cstate->ds_cfg[i].lm_width = ds_cfg_usr->lm_width;
cstate->ds_cfg[i].lm_height = ds_cfg_usr->lm_height;
- cstate->ds_cfg[i].scl3_cfg = NULL;
+ memset(&scaler_v2, 0, sizeof(scaler_v2));
if (ds_cfg_usr->scaler_cfg) {
scaler_v2_usr =
(void __user *)((uintptr_t)ds_cfg_usr->scaler_cfg);
- memset(&scaler_v2, 0, sizeof(scaler_v2));
-
- cstate->ds_cfg[i].scl3_cfg =
- kzalloc(sizeof(struct sde_hw_scaler3_cfg),
- GFP_KERNEL);
-
- if (!cstate->ds_cfg[i].scl3_cfg) {
- ret = -ENOMEM;
- goto err;
- }
-
if (copy_from_user(&scaler_v2, scaler_v2_usr,
sizeof(scaler_v2))) {
- SDE_ERROR("scale data:copy from user failed\n");
- ret = -EINVAL;
- goto err;
+ SDE_ERROR("%s:scaler: copy from user failed\n",
+ sde_crtc->name);
+ return -EINVAL;
}
-
- sde_set_scaler_v2(cstate->ds_cfg[i].scl3_cfg,
- &scaler_v2);
-
- SDE_DEBUG("en(%d)dir(%d)de(%d) src(%dx%d) dst(%dx%d)\n",
- scaler_v2.enable, scaler_v2.dir_en,
- scaler_v2.de.enable, scaler_v2.src_width[0],
- scaler_v2.src_height[0], scaler_v2.dst_width,
- scaler_v2.dst_height);
- SDE_EVT32_VERBOSE(DRMID(&sde_crtc->base),
- scaler_v2.enable, scaler_v2.dir_en,
- scaler_v2.de.enable, scaler_v2.src_width[0],
- scaler_v2.src_height[0], scaler_v2.dst_width,
- scaler_v2.dst_height);
}
+ sde_set_scaler_v2(&cstate->ds_cfg[i].scl3_cfg, &scaler_v2);
+
+ SDE_DEBUG("en(%d)dir(%d)de(%d) src(%dx%d) dst(%dx%d)\n",
+ scaler_v2.enable, scaler_v2.dir_en, scaler_v2.de.enable,
+ scaler_v2.src_width[0], scaler_v2.src_height[0],
+ scaler_v2.dst_width, scaler_v2.dst_height);
+ SDE_EVT32_VERBOSE(DRMID(&sde_crtc->base),
+ scaler_v2.enable, scaler_v2.dir_en, scaler_v2.de.enable,
+ scaler_v2.src_width[0], scaler_v2.src_height[0],
+ scaler_v2.dst_width, scaler_v2.dst_height);
+
SDE_DEBUG("ds cfg[%d]-ndx(%d) flags(%d) lm(%dx%d)\n",
i, ds_cfg_usr->index, ds_cfg_usr->flags,
ds_cfg_usr->lm_width, ds_cfg_usr->lm_height);
@@ -2615,13 +2604,9 @@ static int _sde_crtc_set_dest_scaler(struct sde_crtc *sde_crtc,
cstate->num_ds = count;
cstate->ds_dirty = true;
+ SDE_EVT32_VERBOSE(DRMID(&sde_crtc->base), count, cstate->ds_dirty);
+
return 0;
-
-err:
- for (; i >= 0; i--)
- kfree(cstate->ds_cfg[i].scl3_cfg);
-
- return ret;
}
/**
@@ -2639,7 +2624,7 @@ static int _sde_crtc_check_dest_scaler_data(struct drm_crtc *crtc,
struct sde_hw_ds *hw_ds;
struct sde_hw_ds_cfg *cfg;
u32 i, ret = 0, lm_idx;
- u32 num_ds_enable = 0;
+ u32 num_ds_enable = 0, hdisplay = 0;
u32 max_in_width = 0, max_out_width = 0;
u32 prev_lm_width = 0, prev_lm_height = 0;
@@ -2653,13 +2638,13 @@ static int _sde_crtc_check_dest_scaler_data(struct drm_crtc *crtc,
SDE_DEBUG("crtc%d\n", crtc->base.id);
- if (!cstate->ds_dirty && !cstate->num_ds_enabled) {
+ if (!cstate->ds_dirty) {
SDE_DEBUG("dest scaler property not set, skip validation\n");
return 0;
}
if (!kms || !kms->catalog) {
- SDE_ERROR("invalid parameters\n");
+ SDE_ERROR("crtc%d: invalid parameters\n", crtc->base.id);
return -EINVAL;
}
@@ -2669,40 +2654,13 @@ static int _sde_crtc_check_dest_scaler_data(struct drm_crtc *crtc,
}
if (!sde_crtc->num_mixers) {
- SDE_ERROR("mixers not allocated\n");
- return -EINVAL;
+ SDE_DEBUG("mixers not allocated\n");
+ return 0;
}
- /**
- * Check if sufficient hw resources are
- * available as per target caps & topology
- */
- if (sde_crtc->num_mixers > CRTC_DUAL_MIXERS) {
- SDE_ERROR("invalid config: mixers(%d) max(%d)\n",
- sde_crtc->num_mixers, CRTC_DUAL_MIXERS);
- ret = -EINVAL;
+ ret = _sde_validate_hw_resources(sde_crtc);
+ if (ret)
goto err;
- }
-
- for (i = 0; i < sde_crtc->num_mixers; i++) {
- if (!sde_crtc->mixers[i].hw_lm || !sde_crtc->mixers[i].hw_ds) {
- SDE_ERROR("insufficient HW resources allocated\n");
- ret = -EINVAL;
- goto err;
- }
- }
-
- /**
- * Check if DS needs to be enabled or disabled
- * In case of enable, validate the data
- */
- if (!cstate->ds_dirty || !cstate->num_ds ||
- !(cstate->ds_cfg[0].flags & SDE_DRM_DESTSCALER_ENABLE)) {
- SDE_DEBUG("disable dest scaler,dirty(%d)num(%d)flags(%d)\n",
- cstate->ds_dirty, cstate->num_ds,
- cstate->ds_cfg[0].flags);
- goto disable;
- }
/**
* No of dest scalers shouldn't exceed hw ds block count and
@@ -2712,17 +2670,30 @@ static int _sde_crtc_check_dest_scaler_data(struct drm_crtc *crtc,
if (cstate->num_ds > kms->catalog->ds_count ||
((cstate->num_ds != sde_crtc->num_mixers) &&
!(cstate->ds_cfg[0].flags & SDE_DRM_DESTSCALER_PU_ENABLE))) {
- SDE_ERROR("invalid cfg: num_ds(%d), hw_ds_cnt(%d) flags(%d)\n",
- cstate->num_ds, kms->catalog->ds_count,
+ SDE_ERROR("crtc%d: num_ds(%d), hw_ds_cnt(%d) flags(%d)\n",
+ crtc->base.id, cstate->num_ds, kms->catalog->ds_count,
cstate->ds_cfg[0].flags);
ret = -EINVAL;
goto err;
}
+ /**
+ * Check if DS needs to be enabled or disabled
+ * In case of enable, validate the data
+ */
+ if (!(cstate->ds_cfg[0].flags & SDE_DRM_DESTSCALER_ENABLE)) {
+ SDE_DEBUG("disable dest scaler, num(%d) flags(%d)\n",
+ cstate->num_ds, cstate->ds_cfg[0].flags);
+ goto disable;
+ }
+
+ /* Display resolution */
+ hdisplay = mode->hdisplay/sde_crtc->num_mixers;
+
/* Validate the DS data */
for (i = 0; i < cstate->num_ds; i++) {
cfg = &cstate->ds_cfg[i];
- lm_idx = cfg->ndx;
+ lm_idx = cfg->idx;
/**
* Validate against topology
@@ -2731,8 +2702,10 @@ static int _sde_crtc_check_dest_scaler_data(struct drm_crtc *crtc,
*/
if (lm_idx >= sde_crtc->num_mixers || (i != lm_idx &&
!(cfg->flags & SDE_DRM_DESTSCALER_PU_ENABLE))) {
- SDE_ERROR("invalid user data(%d):idx(%d), flags(%d)\n",
- i, lm_idx, cfg->flags);
+ SDE_ERROR("crtc%d: ds_cfg id(%d):idx(%d), flags(%d)\n",
+ crtc->base.id, i, lm_idx, cfg->flags);
+ SDE_EVT32(DRMID(crtc), i, lm_idx, cfg->flags,
+ SDE_EVTLOG_ERROR);
ret = -EINVAL;
goto err;
}
@@ -2751,14 +2724,13 @@ static int _sde_crtc_check_dest_scaler_data(struct drm_crtc *crtc,
}
/* Check LM width and height */
- if (cfg->lm_width > (mode->hdisplay/sde_crtc->num_mixers) ||
- cfg->lm_height > mode->vdisplay ||
- !cfg->lm_width || !cfg->lm_height) {
- SDE_ERROR("invalid lm size[%d,%d] display [%d,%d]\n",
- cfg->lm_width,
- cfg->lm_height,
- mode->hdisplay/sde_crtc->num_mixers,
- mode->vdisplay);
+ if (cfg->lm_width > hdisplay || cfg->lm_height > mode->vdisplay
+ || !cfg->lm_width || !cfg->lm_height) {
+ SDE_ERROR("crtc%d: lm size[%d,%d] display [%d,%d]\n",
+ crtc->base.id, cfg->lm_width, cfg->lm_height,
+ hdisplay, mode->vdisplay);
+ SDE_EVT32(DRMID(crtc), cfg->lm_width, cfg->lm_height,
+ hdisplay, mode->vdisplay, SDE_EVTLOG_ERROR);
ret = -E2BIG;
goto err;
}
@@ -2769,9 +2741,13 @@ static int _sde_crtc_check_dest_scaler_data(struct drm_crtc *crtc,
} else {
if (cfg->lm_width != prev_lm_width ||
cfg->lm_height != prev_lm_height) {
- SDE_ERROR("lm size:left[%d,%d], right[%d %d]\n",
- cfg->lm_width, cfg->lm_height,
- prev_lm_width, prev_lm_height);
+ SDE_ERROR("crtc%d:lm left[%d,%d]right[%d %d]\n",
+ crtc->base.id, cfg->lm_width,
+ cfg->lm_height, prev_lm_width,
+ prev_lm_height);
+ SDE_EVT32(DRMID(crtc), cfg->lm_width,
+ cfg->lm_height, prev_lm_width,
+ prev_lm_height, SDE_EVTLOG_ERROR);
ret = -EINVAL;
goto err;
}
@@ -2780,22 +2756,40 @@ static int _sde_crtc_check_dest_scaler_data(struct drm_crtc *crtc,
/* Check scaler data */
if (cfg->flags & SDE_DRM_DESTSCALER_SCALE_UPDATE ||
cfg->flags & SDE_DRM_DESTSCALER_ENHANCER_UPDATE) {
- if (!cfg->scl3_cfg) {
- ret = -EINVAL;
- SDE_ERROR("null scale data\n");
- goto err;
- }
- if (cfg->scl3_cfg->src_width[0] > max_in_width ||
- cfg->scl3_cfg->dst_width > max_out_width ||
- !cfg->scl3_cfg->src_width[0] ||
- !cfg->scl3_cfg->dst_width) {
- SDE_ERROR("scale width(%d %d) for ds-%d:\n",
- cfg->scl3_cfg->src_width[0],
- cfg->scl3_cfg->dst_width,
+
+ /**
+ * Scaler src and dst width shouldn't exceed the maximum
+ * width limitation. Also, if there is no partial update
+ * dst width and height must match display resolution.
+ */
+ if (cfg->scl3_cfg.src_width[0] > max_in_width ||
+ cfg->scl3_cfg.dst_width > max_out_width ||
+ !cfg->scl3_cfg.src_width[0] ||
+ !cfg->scl3_cfg.dst_width ||
+ (!(cfg->flags & SDE_DRM_DESTSCALER_PU_ENABLE)
+ && (cfg->scl3_cfg.dst_width != hdisplay ||
+ cfg->scl3_cfg.dst_height != mode->vdisplay))) {
+ SDE_ERROR("crtc%d: ", crtc->base.id);
+ SDE_ERROR("src_w(%d) dst(%dx%d) display(%dx%d)",
+ cfg->scl3_cfg.src_width[0],
+ cfg->scl3_cfg.dst_width,
+ cfg->scl3_cfg.dst_height,
+ hdisplay, mode->vdisplay);
+ SDE_ERROR("num_mixers(%d) flags(%d) ds-%d:\n",
+ sde_crtc->num_mixers, cfg->flags,
hw_ds->idx - DS_0);
SDE_ERROR("scale_en = %d, DE_en =%d\n",
- cfg->scl3_cfg->enable,
- cfg->scl3_cfg->de.enable);
+ cfg->scl3_cfg.enable,
+ cfg->scl3_cfg.de.enable);
+
+ SDE_EVT32(DRMID(crtc), cfg->scl3_cfg.enable,
+ cfg->scl3_cfg.de.enable, cfg->flags,
+ max_in_width, max_out_width,
+ cfg->scl3_cfg.src_width[0],
+ cfg->scl3_cfg.dst_width,
+ cfg->scl3_cfg.dst_height, hdisplay,
+ mode->vdisplay, sde_crtc->num_mixers,
+ SDE_EVTLOG_ERROR);
cfg->flags &=
~SDE_DRM_DESTSCALER_SCALE_UPDATE;
@@ -2810,36 +2804,34 @@ static int _sde_crtc_check_dest_scaler_data(struct drm_crtc *crtc,
if (cfg->flags & SDE_DRM_DESTSCALER_ENABLE)
num_ds_enable++;
- /**
- * Validation successful, indicator for flush to be issued
- */
- cfg->set_lm_flush = true;
-
- SDE_DEBUG("ds[%d]: flags = 0x%X\n",
+ SDE_DEBUG("ds[%d]: flags[0x%X]\n",
hw_ds->idx - DS_0, cfg->flags);
+ SDE_EVT32_VERBOSE(DRMID(crtc), hw_ds->idx - DS_0, cfg->flags);
}
disable:
- SDE_DEBUG("dest scaler enable status, old = %d, new = %d",
- cstate->num_ds_enabled, num_ds_enable);
- SDE_EVT32(DRMID(crtc), cstate->num_ds_enabled, num_ds_enable,
- cstate->ds_dirty);
+ SDE_DEBUG("dest scaler status : %d -> %d\n",
+ cstate->num_ds_enabled, num_ds_enable);
+ SDE_EVT32_VERBOSE(DRMID(crtc), cstate->num_ds_enabled, num_ds_enable,
+ cstate->num_ds, cstate->ds_dirty);
if (cstate->num_ds_enabled != num_ds_enable) {
/* Disabling destination scaler */
if (!num_ds_enable) {
- for (i = 0; i < sde_crtc->num_mixers; i++) {
+ for (i = 0; i < cstate->num_ds; i++) {
cfg = &cstate->ds_cfg[i];
- cfg->ndx = i;
+ cfg->idx = i;
/* Update scaler settings in disable case */
cfg->flags = SDE_DRM_DESTSCALER_SCALE_UPDATE;
- cfg->scl3_cfg->enable = 0;
- cfg->scl3_cfg->de.enable = 0;
- cfg->set_lm_flush = true;
+ cfg->scl3_cfg.enable = 0;
+ cfg->scl3_cfg.de.enable = 0;
}
}
cstate->num_ds_enabled = num_ds_enable;
cstate->ds_dirty = true;
+ } else {
+ if (!cstate->num_ds_enabled)
+ cstate->ds_dirty = false;
}
return 0;
@@ -3056,6 +3048,11 @@ static void sde_crtc_atomic_begin(struct drm_crtc *crtc,
return;
}
+ if (!sde_kms_power_resource_is_enabled(crtc->dev)) {
+ SDE_ERROR("power resource is not enabled\n");
+ return;
+ }
+
SDE_DEBUG("crtc%d\n", crtc->base.id);
sde_crtc = to_sde_crtc(crtc);
@@ -3092,9 +3089,15 @@ static void sde_crtc_atomic_begin(struct drm_crtc *crtc,
if (unlikely(!sde_crtc->num_mixers))
return;
- _sde_crtc_blend_setup(crtc);
+ _sde_crtc_blend_setup(crtc, true);
_sde_crtc_dest_scaler_setup(crtc);
+ /* cancel the idle notify delayed work */
+ if (sde_encoder_check_mode(sde_crtc->mixers[0].encoder,
+ MSM_DISPLAY_CAP_VID_MODE) &&
+ kthread_cancel_delayed_work_sync(&sde_crtc->idle_notify_work))
+ SDE_DEBUG("idle notify work cancelled\n");
+
/*
* Since CP properties use AXI buffer to program the
* HW, check if context bank is in attached
@@ -3126,6 +3129,7 @@ static void sde_crtc_atomic_flush(struct drm_crtc *crtc,
struct msm_drm_thread *event_thread;
unsigned long flags;
struct sde_crtc_state *cstate;
+ int idle_time = 0;
if (!crtc || !crtc->dev || !crtc->dev->dev_private) {
SDE_ERROR("invalid crtc\n");
@@ -3138,6 +3142,11 @@ static void sde_crtc_atomic_flush(struct drm_crtc *crtc,
return;
}
+ if (!sde_kms_power_resource_is_enabled(crtc->dev)) {
+ SDE_ERROR("power resource is not enabled\n");
+ return;
+ }
+
SDE_DEBUG("crtc%d\n", crtc->base.id);
sde_crtc = to_sde_crtc(crtc);
@@ -3151,6 +3160,7 @@ static void sde_crtc_atomic_flush(struct drm_crtc *crtc,
}
event_thread = &priv->event_thread[crtc->index];
+ idle_time = sde_crtc_get_property(cstate, CRTC_PROP_IDLE_TIMEOUT);
if (sde_crtc->event) {
SDE_DEBUG("already received sde_crtc->event\n");
@@ -3181,6 +3191,15 @@ static void sde_crtc_atomic_flush(struct drm_crtc *crtc,
/* wait for acquire fences before anything else is done */
_sde_crtc_wait_for_fences(crtc);
+ /* schedule the idle notify delayed work */
+ if (idle_time && sde_encoder_check_mode(sde_crtc->mixers[0].encoder,
+ MSM_DISPLAY_CAP_VID_MODE)) {
+ kthread_queue_delayed_work(&event_thread->worker,
+ &sde_crtc->idle_notify_work,
+ msecs_to_jiffies(idle_time));
+ SDE_DEBUG("schedule idle notify work in %dms\n", idle_time);
+ }
+
if (!cstate->rsc_update) {
drm_for_each_encoder(encoder, dev) {
if (encoder->crtc != crtc)
@@ -3383,7 +3402,175 @@ static void _sde_crtc_remove_pipe_flush(struct sde_crtc *sde_crtc)
}
}
-void sde_crtc_commit_kickoff(struct drm_crtc *crtc)
+/**
+ * _sde_crtc_reset_hw - attempt hardware reset on errors
+ * @crtc: Pointer to DRM crtc instance
+ * @old_state: Pointer to crtc state for previous commit
+ * @dump_status: Whether or not to dump debug status before reset
+ * Returns: Zero if current commit should still be attempted
+ */
+static int _sde_crtc_reset_hw(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state, bool dump_status)
+{
+ struct drm_plane *plane_halt[MAX_PLANES];
+ struct drm_plane *plane;
+ const struct drm_plane_state *pstate;
+ struct sde_crtc *sde_crtc;
+ struct sde_hw_ctl *ctl;
+ enum sde_ctl_rot_op_mode old_rot_op_mode;
+ signed int i, plane_count;
+ int rc;
+
+ if (!crtc || !old_state)
+ return -EINVAL;
+ sde_crtc = to_sde_crtc(crtc);
+
+ old_rot_op_mode = to_sde_crtc_state(old_state)->sbuf_cfg.rot_op_mode;
+ SDE_EVT32(DRMID(crtc), old_rot_op_mode,
+ dump_status, SDE_EVTLOG_FUNC_ENTRY);
+
+ if (dump_status)
+ SDE_DBG_DUMP("all", "dbg_bus", "vbif_dbg_bus");
+
+ for (i = 0; i < sde_crtc->num_mixers; ++i) {
+ ctl = sde_crtc->mixers[i].hw_ctl;
+ if (!ctl || !ctl->ops.reset)
+ continue;
+
+ rc = ctl->ops.reset(ctl);
+ if (rc) {
+ SDE_DEBUG("crtc%d: ctl%d reset failure\n",
+ crtc->base.id, ctl->idx - CTL_0);
+ SDE_EVT32(DRMID(crtc), ctl->idx - CTL_0,
+ SDE_EVTLOG_ERROR);
+ break;
+ }
+ }
+
+ /*
+ * Early out if simple ctl reset succeeded and previous commit
+ * did not involve the rotator.
+ *
+ * If the previous commit had rotation enabled, then the ctl
+ * reset would also have reset the rotator h/w. The rotator
+ * programming for the current commit may need to be repeated,
+ * depending on the rotation mode; don't handle this for now
+ * and just force a hard reset in those cases.
+ */
+ if (i == sde_crtc->num_mixers &&
+ old_rot_op_mode == SDE_CTL_ROT_OP_MODE_OFFLINE)
+ return false;
+
+ SDE_DEBUG("crtc%d: issuing hard reset\n", DRMID(crtc));
+
+ /* force all components in the system into reset at the same time */
+ for (i = 0; i < sde_crtc->num_mixers; ++i) {
+ ctl = sde_crtc->mixers[i].hw_ctl;
+ if (!ctl || !ctl->ops.hard_reset)
+ continue;
+
+ SDE_EVT32(DRMID(crtc), ctl->idx - CTL_0);
+ ctl->ops.hard_reset(ctl, true);
+ }
+
+ plane_count = 0;
+ drm_atomic_crtc_state_for_each_plane(plane, old_state) {
+ if (plane_count >= ARRAY_SIZE(plane_halt))
+ break;
+
+ plane_halt[plane_count++] = plane;
+ sde_plane_halt_requests(plane, true);
+ sde_plane_set_revalidate(plane, true);
+ }
+
+ /* reset both previous... */
+ for_each_plane_in_state(old_state->state, plane, pstate, i) {
+ if (pstate->crtc != crtc)
+ continue;
+
+ sde_plane_reset_rot(plane, (struct drm_plane_state *)pstate);
+ }
+
+ /* ...and current rotation attempts, if applicable */
+ drm_atomic_crtc_for_each_plane(plane, crtc) {
+ pstate = plane->state;
+ if (!pstate)
+ continue;
+
+ sde_plane_reset_rot(plane, (struct drm_plane_state *)pstate);
+ }
+
+ /* take h/w components out of reset */
+ for (i = plane_count - 1; i >= 0; --i)
+ sde_plane_halt_requests(plane_halt[i], false);
+
+ for (i = 0; i < sde_crtc->num_mixers; ++i) {
+ ctl = sde_crtc->mixers[i].hw_ctl;
+ if (!ctl || !ctl->ops.hard_reset)
+ continue;
+
+ ctl->ops.hard_reset(ctl, false);
+ }
+
+ return -EAGAIN;
+}
+
+/**
+ * _sde_crtc_prepare_for_kickoff_rot - rotator related kickoff preparation
+ * @dev: Pointer to drm device
+ * @crtc: Pointer to crtc structure
+ * Returns: true on preparation errors
+ */
+static bool _sde_crtc_prepare_for_kickoff_rot(struct drm_device *dev,
+ struct drm_crtc *crtc)
+{
+ struct drm_encoder *encoder;
+ struct sde_crtc *sde_crtc;
+ struct sde_crtc_state *cstate;
+
+ if (!crtc || !dev) {
+ SDE_ERROR("invalid argument(s)\n");
+ return false;
+ }
+ sde_crtc = to_sde_crtc(crtc);
+ cstate = to_sde_crtc_state(crtc->state);
+
+ /* default to ASYNC mode for inline rotation */
+ cstate->sbuf_cfg.rot_op_mode = sde_crtc->sbuf_flush_mask ?
+ SDE_CTL_ROT_OP_MODE_INLINE_ASYNC : SDE_CTL_ROT_OP_MODE_OFFLINE;
+
+ if (cstate->sbuf_cfg.rot_op_mode == SDE_CTL_ROT_OP_MODE_OFFLINE)
+ return false;
+
+ /* extra steps needed for inline ASYNC modes */
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ if (encoder->crtc != crtc)
+ continue;
+
+ /*
+ * For inline ASYNC modes, the flush bits are not written
+ * to hardware atomically, so avoid using it if a video
+ * mode encoder is active on this CRTC.
+ */
+ if (sde_encoder_get_intf_mode(encoder) == INTF_MODE_VIDEO) {
+ cstate->sbuf_cfg.rot_op_mode =
+ SDE_CTL_ROT_OP_MODE_INLINE_SYNC;
+ return false;
+ }
+ }
+
+ /*
+ * For ASYNC inline modes, kick off the rotator now so that the H/W
+ * can start as soon as it's ready.
+ */
+ if (_sde_crtc_commit_kickoff_rot(crtc, cstate))
+ return true;
+
+ return false;
+}
+
+void sde_crtc_commit_kickoff(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
{
struct drm_encoder *encoder;
struct drm_device *dev;
@@ -3391,7 +3578,7 @@ void sde_crtc_commit_kickoff(struct drm_crtc *crtc)
struct msm_drm_private *priv;
struct sde_kms *sde_kms;
struct sde_crtc_state *cstate;
- bool is_error;
+ bool is_error, reset_req;
int ret;
if (!crtc) {
@@ -3401,7 +3588,7 @@ void sde_crtc_commit_kickoff(struct drm_crtc *crtc)
dev = crtc->dev;
sde_crtc = to_sde_crtc(crtc);
sde_kms = _sde_crtc_get_kms(crtc);
- is_error = false;
+ reset_req = false;
if (!sde_kms || !sde_kms->dev || !sde_kms->dev->dev_private) {
SDE_ERROR("invalid argument\n");
@@ -3421,9 +3608,7 @@ void sde_crtc_commit_kickoff(struct drm_crtc *crtc)
SDE_ATRACE_BEGIN("crtc_commit");
- /* default to ASYNC mode for inline rotation */
- cstate->sbuf_cfg.rot_op_mode = sde_crtc->sbuf_flush_mask ?
- SDE_CTL_ROT_OP_MODE_INLINE_ASYNC : SDE_CTL_ROT_OP_MODE_OFFLINE;
+ is_error = _sde_crtc_prepare_for_kickoff_rot(dev, crtc);
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
struct sde_encoder_kickoff_params params = { 0 };
@@ -3438,29 +3623,26 @@ void sde_crtc_commit_kickoff(struct drm_crtc *crtc)
params.inline_rotate_prefill = cstate->sbuf_prefill_line;
params.affected_displays = _sde_crtc_get_displays_affected(crtc,
crtc->state);
- sde_encoder_prepare_for_kickoff(encoder, ¶ms);
-
- /*
- * For inline ASYNC modes, the flush bits are not written
- * to hardware atomically, so avoid using it if a video
- * mode encoder is active on this CRTC.
- */
- if (cstate->sbuf_cfg.rot_op_mode ==
- SDE_CTL_ROT_OP_MODE_INLINE_ASYNC &&
- sde_encoder_get_intf_mode(encoder) ==
- INTF_MODE_VIDEO)
- cstate->sbuf_cfg.rot_op_mode =
- SDE_CTL_ROT_OP_MODE_INLINE_SYNC;
+ if (sde_encoder_prepare_for_kickoff(encoder, ¶ms))
+ reset_req = true;
}
/*
- * For ASYNC inline modes, kick off the rotator now so that the H/W
- * can start as soon as it's ready.
+ * Optionally attempt h/w recovery if any errors were detected while
+ * preparing for the kickoff
*/
- if (cstate->sbuf_cfg.rot_op_mode == SDE_CTL_ROT_OP_MODE_INLINE_ASYNC)
- if (_sde_crtc_commit_kickoff_rot(crtc, cstate))
+ if (reset_req) {
+ if (_sde_crtc_reset_hw(crtc, old_state,
+ !sde_crtc->reset_request))
is_error = true;
+ /* force offline rotation mode since the commit has no pipes */
+ if (is_error)
+ cstate->sbuf_cfg.rot_op_mode =
+ SDE_CTL_ROT_OP_MODE_OFFLINE;
+ }
+ sde_crtc->reset_request = reset_req;
+
/* wait for frame_event_done completion */
SDE_ATRACE_BEGIN("wait_for_frame_done_event");
ret = _sde_crtc_wait_for_frame_done(crtc);
@@ -3501,14 +3683,16 @@ void sde_crtc_commit_kickoff(struct drm_crtc *crtc)
sde_vbif_clear_errors(sde_kms);
- if (is_error)
+ if (is_error) {
_sde_crtc_remove_pipe_flush(sde_crtc);
+ _sde_crtc_blend_setup(crtc, false);
+ }
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
if (encoder->crtc != crtc)
continue;
- sde_encoder_kickoff(encoder, is_error);
+ sde_encoder_kickoff(encoder, false);
}
reinit_completion(&sde_crtc->frame_done_comp);
@@ -3660,6 +3844,9 @@ static struct drm_crtc_state *sde_crtc_duplicate_state(struct drm_crtc *crtc)
old_cstate, cstate,
&cstate->property_state, cstate->property_values);
+ /* clear destination scaler dirty bit */
+ cstate->ds_dirty = false;
+
/* duplicate base helper */
__drm_atomic_helper_crtc_duplicate_state(crtc, &cstate->base);
@@ -3726,19 +3913,22 @@ static void sde_crtc_handle_power_event(u32 event_type, void *arg)
{
struct drm_crtc *crtc = arg;
struct sde_crtc *sde_crtc;
+ struct sde_crtc_state *cstate;
struct drm_plane *plane;
struct drm_encoder *encoder;
struct sde_crtc_mixer *m;
- u32 i, misr_status;
+ u32 i, misr_status, power_on;
unsigned long flags;
struct sde_crtc_irq_info *node = NULL;
int ret = 0;
+ struct drm_event event;
if (!crtc) {
SDE_ERROR("invalid crtc\n");
return;
}
sde_crtc = to_sde_crtc(crtc);
+ cstate = to_sde_crtc_state(crtc->state);
mutex_lock(&sde_crtc->crtc_lock);
@@ -3767,6 +3957,12 @@ static void sde_crtc_handle_power_event(u32 event_type, void *arg)
sde_cp_crtc_post_ipc(crtc);
+ event.type = DRM_EVENT_SDE_POWER;
+ event.length = sizeof(power_on);
+ power_on = 1;
+ msm_mode_object_event_notify(&crtc->base, crtc->dev, &event,
+ (u8 *)&power_on);
+
for (i = 0; i < sde_crtc->num_mixers; ++i) {
m = &sde_crtc->mixers[i];
if (!m->hw_lm || !m->hw_lm->ops.setup_misr ||
@@ -3778,6 +3974,21 @@ static void sde_crtc_handle_power_event(u32 event_type, void *arg)
}
break;
case SDE_POWER_EVENT_PRE_DISABLE:
+ drm_for_each_encoder(encoder, crtc->dev) {
+ if (encoder->crtc != crtc)
+ continue;
+ /*
+ * disable the vsync source after updating the
+ * rsc state. rsc state update might have vsync wait
+ * and vsync source must be disabled after it.
+ * It will avoid generating any vsync from this point
+ * till mode-2 entry. It is SW workaround for HW
+ * limitation and should not be removed without
+ * checking the updated design.
+ */
+ sde_encoder_control_te(encoder, false);
+ }
+
for (i = 0; i < sde_crtc->num_mixers; ++i) {
m = &sde_crtc->mixers[i];
if (!m->hw_lm || !m->hw_lm->ops.collect_misr ||
@@ -3812,6 +4023,19 @@ static void sde_crtc_handle_power_event(u32 event_type, void *arg)
sde_plane_set_revalidate(plane, true);
sde_cp_crtc_suspend(crtc);
+
+ /**
+ * destination scaler if enabled should be reconfigured
+ * in the next frame update
+ */
+ if (cstate->num_ds_enabled)
+ sde_crtc->ds_reconfig = true;
+
+ event.type = DRM_EVENT_SDE_POWER;
+ event.length = sizeof(power_on);
+ power_on = 0;
+ msm_mode_object_event_notify(&crtc->base, crtc->dev, &event,
+ (u8 *)&power_on);
break;
default:
SDE_DEBUG("event:%d not handled\n", event_type);
@@ -3837,16 +4061,18 @@ static void sde_crtc_disable(struct drm_crtc *crtc)
SDE_ERROR("invalid crtc\n");
return;
}
+
+ if (!sde_kms_power_resource_is_enabled(crtc->dev)) {
+ SDE_ERROR("power resource is not enabled\n");
+ return;
+ }
+
sde_crtc = to_sde_crtc(crtc);
cstate = to_sde_crtc_state(crtc->state);
priv = crtc->dev->dev_private;
SDE_DEBUG("crtc%d\n", crtc->base.id);
- for (i = 0; i < cstate->num_connectors; i++)
- sde_connector_schedule_status_work(cstate->connectors[i],
- false);
-
if (sde_kms_is_suspend_state(crtc->dev))
_sde_crtc_set_suspend(crtc, true);
@@ -3861,6 +4087,10 @@ static void sde_crtc_disable(struct drm_crtc *crtc)
msm_mode_object_event_notify(&crtc->base, crtc->dev, &event,
(u8 *)&power_on);
+ /* destination scaler if enabled should be reconfigured on resume */
+ if (cstate->num_ds_enabled)
+ sde_crtc->ds_reconfig = true;
+
/* wait for frame_event_done completion */
if (_sde_crtc_wait_for_frame_done(crtc))
SDE_ERROR("crtc%d wait for frame done failed;frame_pending%d\n",
@@ -3943,15 +4173,18 @@ static void sde_crtc_enable(struct drm_crtc *crtc)
struct sde_crtc_irq_info *node = NULL;
struct drm_event event;
u32 power_on;
- int ret, i;
- struct sde_crtc_state *cstate;
+ int ret;
if (!crtc || !crtc->dev || !crtc->dev->dev_private) {
SDE_ERROR("invalid crtc\n");
return;
}
priv = crtc->dev->dev_private;
- cstate = to_sde_crtc_state(crtc->state);
+
+ if (!sde_kms_power_resource_is_enabled(crtc->dev)) {
+ SDE_ERROR("power resource is not enabled\n");
+ return;
+ }
SDE_DEBUG("crtc%d\n", crtc->base.id);
SDE_EVT32_VERBOSE(DRMID(crtc));
@@ -4015,9 +4248,6 @@ static void sde_crtc_enable(struct drm_crtc *crtc)
SDE_POWER_EVENT_POST_ENABLE | SDE_POWER_EVENT_POST_DISABLE |
SDE_POWER_EVENT_PRE_DISABLE,
sde_crtc_handle_power_event, crtc, sde_crtc->name);
-
- for (i = 0; i < cstate->num_connectors; i++)
- sde_connector_schedule_status_work(cstate->connectors[i], true);
}
struct plane_state {
@@ -4122,13 +4352,14 @@ static int _sde_crtc_excl_dim_layer_check(struct drm_crtc_state *state,
}
static int _sde_crtc_check_secure_state(struct drm_crtc *crtc,
- struct drm_crtc_state *state)
+ struct drm_crtc_state *state, struct plane_state pstates[],
+ int cnt)
{
struct drm_encoder *encoder;
struct sde_crtc_state *cstate;
uint32_t secure;
uint32_t fb_ns = 0, fb_sec = 0, fb_sec_dir = 0;
- int encoder_cnt = 0;
+ int encoder_cnt = 0, i;
int rc;
if (!crtc || !state) {
@@ -4138,31 +4369,40 @@ static int _sde_crtc_check_secure_state(struct drm_crtc *crtc,
cstate = to_sde_crtc_state(state);
- secure = sde_crtc_get_property(cstate,
- CRTC_PROP_SECURITY_LEVEL);
+ secure = sde_crtc_get_property(cstate, CRTC_PROP_SECURITY_LEVEL);
- rc = _sde_crtc_find_plane_fb_modes(state,
- &fb_ns,
- &fb_sec,
- &fb_sec_dir);
+ rc = _sde_crtc_find_plane_fb_modes(state, &fb_ns, &fb_sec, &fb_sec_dir);
if (rc)
return rc;
- /**
- * validate planes
- * fb_sec_dir is for secure camera preview and secure display use case,
- * fb_sec is for secure video playback,
- * fb_ns is for normal non secure use cases.
- */
- if ((secure == SDE_DRM_SEC_ONLY) &&
- (fb_ns || fb_sec || (fb_sec && fb_sec_dir))) {
- SDE_ERROR(
- "crtc%d: invalid planes fb_modes Sec:%d, NS:%d, Sec_Dir:%d\n",
+ if (secure == SDE_DRM_SEC_ONLY) {
+ /*
+ * validate planes - only fb_sec_dir is allowed during sec_crtc
+ * - fb_sec_dir is for secure camera preview and
+ * secure display use case
+ * - fb_sec is for secure video playback
+ * - fb_ns is for normal non secure use cases
+ */
+ if (fb_ns || fb_sec) {
+ SDE_ERROR(
+ "crtc%d: invalid fb_modes Sec:%d, NS:%d, Sec_Dir:%d\n",
crtc->base.id, fb_sec, fb_ns, fb_sec_dir);
- return -EINVAL;
+ return -EINVAL;
+ }
+
+ /* only one blending stage is allowed in sec_crtc */
+ for (i = 1; i < cnt; i++) {
+ if (pstates[i].stage != pstates[i-1].stage) {
+ SDE_ERROR(
+ "crtc%d: invalid blend stages %d:%d, %d:%d\n",
+ crtc->base.id, i, pstates[i].stage,
+ i-1, pstates[i-1].stage);
+ return -EINVAL;
+ }
+ }
}
- /**
+ /*
* secure_crtc is not allowed in a shared toppolgy
* across different encoders.
*/
@@ -4171,17 +4411,15 @@ static int _sde_crtc_check_secure_state(struct drm_crtc *crtc,
if (encoder->crtc == crtc)
encoder_cnt++;
- if (encoder_cnt >
- MAX_ALLOWED_ENCODER_CNT_PER_SECURE_CRTC) {
- SDE_ERROR(
- "crtc%d, invalid virtual encoder crtc%d\n",
- crtc->base.id,
- encoder_cnt);
+ if (encoder_cnt > MAX_ALLOWED_ENCODER_CNT_PER_SECURE_CRTC) {
+ SDE_ERROR("crtc%d, invalid virtual encoder crtc%d\n",
+ crtc->base.id, encoder_cnt);
return -EINVAL;
}
}
SDE_DEBUG("crtc:%d Secure validation successful\n", crtc->base.id);
+
return 0;
}
@@ -4189,7 +4427,7 @@ static int sde_crtc_atomic_check(struct drm_crtc *crtc,
struct drm_crtc_state *state)
{
struct sde_crtc *sde_crtc;
- struct plane_state pstates[SDE_STAGE_MAX * 4];
+ struct plane_state *pstates = NULL;
struct sde_crtc_state *cstate;
const struct drm_plane_state *pstate;
@@ -4198,7 +4436,7 @@ static int sde_crtc_atomic_check(struct drm_crtc *crtc,
int cnt = 0, rc = 0, mixer_width, i, z_pos;
- struct sde_multirect_plane_states multirect_plane[SDE_STAGE_MAX * 2];
+ struct sde_multirect_plane_states *multirect_plane = NULL;
int multirect_count = 0;
const struct drm_plane_state *pipe_staged[SSPP_MAX];
int left_zpos_cnt = 0, right_zpos_cnt = 0;
@@ -4217,6 +4455,17 @@ static int sde_crtc_atomic_check(struct drm_crtc *crtc,
goto end;
}
+ pstates = kzalloc(SDE_PSTATES_MAX *
+ sizeof(struct plane_state), GFP_KERNEL);
+
+ multirect_plane = kzalloc(SDE_MULTIRECT_PLANE_MAX *
+ sizeof(struct sde_multirect_plane_states), GFP_KERNEL);
+
+ if (!pstates || !multirect_plane) {
+ rc = -ENOMEM;
+ goto end;
+ }
+
mode = &state->adjusted_mode;
SDE_DEBUG("%s: check", sde_crtc->name);
@@ -4238,10 +4487,6 @@ static int sde_crtc_atomic_check(struct drm_crtc *crtc,
_sde_crtc_setup_is_ppsplit(state);
_sde_crtc_setup_lm_bounds(crtc, state);
- rc = _sde_crtc_check_secure_state(crtc, state);
- if (rc)
- return rc;
-
/* get plane state for all drm planes associated with crtc state */
drm_atomic_crtc_state_for_each_plane_state(plane, pstate, state) {
if (IS_ERR_OR_NULL(pstate)) {
@@ -4250,7 +4495,7 @@ static int sde_crtc_atomic_check(struct drm_crtc *crtc,
sde_crtc->name, plane->base.id, rc);
goto end;
}
- if (cnt >= ARRAY_SIZE(pstates))
+ if (cnt >= SDE_PSTATES_MAX)
continue;
pstates[cnt].sde_pstate = to_sde_plane_state(pstate);
@@ -4315,6 +4560,10 @@ static int sde_crtc_atomic_check(struct drm_crtc *crtc,
/* assign mixer stages based on sorted zpos property */
sort(pstates, cnt, sizeof(pstates[0]), pstate_cmp, NULL);
+ rc = _sde_crtc_check_secure_state(crtc, state, pstates, cnt);
+ if (rc)
+ goto end;
+
rc = _sde_crtc_excl_dim_layer_check(state, pstates, cnt);
if (rc)
goto end;
@@ -4460,6 +4709,8 @@ static int sde_crtc_atomic_check(struct drm_crtc *crtc,
}
end:
+ kfree(pstates);
+ kfree(multirect_plane);
_sde_crtc_rp_free_unused(&cstate->rp);
return rc;
}
@@ -4498,6 +4749,57 @@ void sde_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file)
_sde_crtc_complete_flip(crtc, file);
}
+int sde_crtc_helper_reset_custom_properties(struct drm_crtc *crtc,
+ struct drm_crtc_state *crtc_state)
+{
+ struct sde_crtc *sde_crtc;
+ struct sde_crtc_state *cstate;
+ struct drm_property *drm_prop;
+ enum msm_mdp_crtc_property prop_idx;
+
+ if (!crtc || !crtc_state) {
+ SDE_ERROR("invalid params\n");
+ return -EINVAL;
+ }
+
+ sde_crtc = to_sde_crtc(crtc);
+ cstate = to_sde_crtc_state(crtc_state);
+
+ for (prop_idx = 0; prop_idx < CRTC_PROP_COUNT; prop_idx++) {
+ uint64_t val = cstate->property_values[prop_idx].value;
+ uint64_t def;
+ int ret;
+
+ drm_prop = msm_property_index_to_drm_property(
+ &sde_crtc->property_info, prop_idx);
+ if (!drm_prop) {
+ /* not all props will be installed, based on caps */
+ SDE_DEBUG("%s: invalid property index %d\n",
+ sde_crtc->name, prop_idx);
+ continue;
+ }
+
+ def = msm_property_get_default(&sde_crtc->property_info,
+ prop_idx);
+ if (val == def)
+ continue;
+
+ SDE_DEBUG("%s: set prop %s idx %d from %llu to %llu\n",
+ sde_crtc->name, drm_prop->name, prop_idx, val,
+ def);
+
+ ret = drm_atomic_crtc_set_property(crtc, crtc_state, drm_prop,
+ def);
+ if (ret) {
+ SDE_ERROR("%s: set property failed, idx %d ret %d\n",
+ sde_crtc->name, prop_idx, ret);
+ continue;
+ }
+ }
+
+ return 0;
+}
+
/**
* sde_crtc_install_properties - install all drm properties for crtc
* @crtc: Pointer to drm crtc structure
@@ -4541,8 +4843,8 @@ static void sde_crtc_install_properties(struct drm_crtc *crtc,
"input_fence_timeout", 0x0, 0, SDE_CRTC_MAX_INPUT_FENCE_TIMEOUT,
SDE_CRTC_INPUT_FENCE_TIMEOUT, CRTC_PROP_INPUT_FENCE_TIMEOUT);
- msm_property_install_range(&sde_crtc->property_info, "output_fence",
- 0x0, 0, INR_OPEN_MAX, 0x0, CRTC_PROP_OUTPUT_FENCE);
+ msm_property_install_volatile_range(&sde_crtc->property_info,
+ "output_fence", 0x0, 0, ~0, 0, CRTC_PROP_OUTPUT_FENCE);
msm_property_install_range(&sde_crtc->property_info,
"output_fence_offset", 0x0, 0, 1, 0,
@@ -4586,7 +4888,7 @@ static void sde_crtc_install_properties(struct drm_crtc *crtc,
CRTC_PROP_ROT_CLK);
msm_property_install_range(&sde_crtc->property_info,
- "idle_time", IDLE_TIMEOUT, 0, U64_MAX, 0,
+ "idle_time", 0, 0, U64_MAX, 0,
CRTC_PROP_IDLE_TIMEOUT);
msm_property_install_blob(&sde_crtc->property_info, "capabilities",
@@ -4662,6 +4964,7 @@ static void sde_crtc_install_properties(struct drm_crtc *crtc,
}
sde_kms_info_add_keyint(info, "has_src_split", catalog->has_src_split);
+ sde_kms_info_add_keyint(info, "has_hdr", catalog->has_hdr);
if (catalog->perf.max_bw_low)
sde_kms_info_add_keyint(info, "max_bandwidth_low",
catalog->perf.max_bw_low * 1000LL);
@@ -4713,6 +5016,47 @@ static void sde_crtc_install_properties(struct drm_crtc *crtc,
kfree(info);
}
+static int _sde_crtc_get_output_fence(struct drm_crtc *crtc,
+ const struct drm_crtc_state *state, uint64_t *val)
+{
+ struct drm_encoder *encoder;
+ struct sde_crtc *sde_crtc;
+ struct sde_crtc_state *cstate;
+ uint32_t offset, i;
+ bool conn_offset = 0, is_cmd = true;
+
+ sde_crtc = to_sde_crtc(crtc);
+ cstate = to_sde_crtc_state(state);
+
+ for (i = 0; i < cstate->num_connectors; ++i) {
+ conn_offset = sde_connector_needs_offset(cstate->connectors[i]);
+ if (conn_offset)
+ break;
+ }
+
+ /**
+ * set the cmd flag only when all the encoders attached
+ * to the crtc are in cmd mode. Consider all other cases
+ * as video mode.
+ */
+ drm_for_each_encoder(encoder, crtc->dev) {
+ if (encoder->crtc == crtc)
+ is_cmd = sde_encoder_check_mode(encoder,
+ MSM_DISPLAY_CAP_CMD_MODE);
+ }
+
+ offset = sde_crtc_get_property(cstate, CRTC_PROP_OUTPUT_FENCE_OFFSET);
+
+ /**
+ * set the offset to 0 only for cmd mode panels, so
+ * the release fence for the current frame can be
+ * triggered right after PP_DONE interrupt.
+ */
+ offset = is_cmd ? 0 : (offset + conn_offset);
+
+ return sde_fence_create(&sde_crtc->output_fence, val, offset);
+}
+
/**
* sde_crtc_atomic_set_property - atomically set a crtc drm property
* @crtc: Pointer to drm crtc structure
@@ -4728,69 +5072,92 @@ static int sde_crtc_atomic_set_property(struct drm_crtc *crtc,
{
struct sde_crtc *sde_crtc;
struct sde_crtc_state *cstate;
- int idx, ret = -EINVAL;
+ int idx, ret;
+ uint64_t fence_fd;
if (!crtc || !state || !property) {
SDE_ERROR("invalid argument(s)\n");
- } else {
- sde_crtc = to_sde_crtc(crtc);
- cstate = to_sde_crtc_state(state);
- ret = msm_property_atomic_set(&sde_crtc->property_info,
- &cstate->property_state, property, val);
- if (!ret) {
- idx = msm_property_index(&sde_crtc->property_info,
- property);
- switch (idx) {
- case CRTC_PROP_INPUT_FENCE_TIMEOUT:
- _sde_crtc_set_input_fence_timeout(cstate);
- break;
- case CRTC_PROP_DIM_LAYER_V1:
- _sde_crtc_set_dim_layer_v1(cstate,
- (void __user *)val);
- break;
- case CRTC_PROP_ROI_V1:
- ret = _sde_crtc_set_roi_v1(state,
- (void __user *)val);
- break;
- case CRTC_PROP_DEST_SCALER:
- ret = _sde_crtc_set_dest_scaler(sde_crtc,
- cstate, (void __user *)val);
- break;
- case CRTC_PROP_DEST_SCALER_LUT_ED:
- case CRTC_PROP_DEST_SCALER_LUT_CIR:
- case CRTC_PROP_DEST_SCALER_LUT_SEP:
- ret = _sde_crtc_set_dest_scaler_lut(sde_crtc,
- cstate, idx);
- break;
- case CRTC_PROP_CORE_CLK:
- case CRTC_PROP_CORE_AB:
- case CRTC_PROP_CORE_IB:
- cstate->bw_control = true;
- break;
- case CRTC_PROP_LLCC_AB:
- case CRTC_PROP_LLCC_IB:
- case CRTC_PROP_DRAM_AB:
- case CRTC_PROP_DRAM_IB:
- cstate->bw_control = true;
- cstate->bw_split_vote = true;
- break;
- case CRTC_PROP_IDLE_TIMEOUT:
- _sde_crtc_set_idle_timeout(crtc, val);
- default:
- /* nothing to do */
- break;
- }
- } else {
- ret = sde_cp_crtc_set_property(crtc,
- property, val);
- }
- if (ret)
- DRM_ERROR("failed to set the property\n");
-
- SDE_DEBUG("crtc%d %s[%d] <= 0x%llx ret=%d\n", crtc->base.id,
- property->name, property->base.id, val, ret);
+ return -EINVAL;
}
+ sde_crtc = to_sde_crtc(crtc);
+ cstate = to_sde_crtc_state(state);
+
+ /* check with cp property system first */
+ ret = sde_cp_crtc_set_property(crtc, property, val);
+ if (ret != -ENOENT)
+ goto exit;
+
+ /* if not handled by cp, check msm_property system */
+ ret = msm_property_atomic_set(&sde_crtc->property_info,
+ &cstate->property_state, property, val);
+ if (ret)
+ goto exit;
+
+ idx = msm_property_index(&sde_crtc->property_info, property);
+ switch (idx) {
+ case CRTC_PROP_INPUT_FENCE_TIMEOUT:
+ _sde_crtc_set_input_fence_timeout(cstate);
+ break;
+ case CRTC_PROP_DIM_LAYER_V1:
+ _sde_crtc_set_dim_layer_v1(cstate, (void __user *)val);
+ break;
+ case CRTC_PROP_ROI_V1:
+ ret = _sde_crtc_set_roi_v1(state, (void __user *)val);
+ break;
+ case CRTC_PROP_DEST_SCALER:
+ ret = _sde_crtc_set_dest_scaler(sde_crtc, cstate,
+ (void __user *)val);
+ break;
+ case CRTC_PROP_DEST_SCALER_LUT_ED:
+ case CRTC_PROP_DEST_SCALER_LUT_CIR:
+ case CRTC_PROP_DEST_SCALER_LUT_SEP:
+ ret = _sde_crtc_set_dest_scaler_lut(sde_crtc, cstate, idx);
+ break;
+ case CRTC_PROP_CORE_CLK:
+ case CRTC_PROP_CORE_AB:
+ case CRTC_PROP_CORE_IB:
+ cstate->bw_control = true;
+ break;
+ case CRTC_PROP_LLCC_AB:
+ case CRTC_PROP_LLCC_IB:
+ case CRTC_PROP_DRAM_AB:
+ case CRTC_PROP_DRAM_IB:
+ cstate->bw_control = true;
+ cstate->bw_split_vote = true;
+ break;
+ case CRTC_PROP_OUTPUT_FENCE:
+ if (!val)
+ goto exit;
+
+ ret = _sde_crtc_get_output_fence(crtc, state, &fence_fd);
+ if (ret) {
+ SDE_ERROR("fence create failed rc:%d\n", ret);
+ goto exit;
+ }
+
+ ret = copy_to_user((uint64_t __user *)val, &fence_fd,
+ sizeof(uint64_t));
+ if (ret) {
+ SDE_ERROR("copy to user failed rc:%d\n", ret);
+ put_unused_fd(fence_fd);
+ ret = -EFAULT;
+ goto exit;
+ }
+ break;
+ default:
+ /* nothing to do */
+ break;
+ }
+
+exit:
+ if (ret)
+ SDE_ERROR("%s: failed to set property%d %s: %d\n", crtc->name,
+ DRMID(property), property->name, ret);
+ else
+ SDE_DEBUG("%s: %s[%d] <= 0x%llx\n", crtc->name, property->name,
+ property->base.id, val);
+
return ret;
}
@@ -4824,62 +5191,29 @@ static int sde_crtc_atomic_get_property(struct drm_crtc *crtc,
{
struct sde_crtc *sde_crtc;
struct sde_crtc_state *cstate;
- struct drm_encoder *encoder;
- int i, ret = -EINVAL;
- bool conn_offset = 0;
- bool is_cmd = true;
+ int ret = -EINVAL, i;
if (!crtc || !state) {
SDE_ERROR("invalid argument(s)\n");
- } else {
- sde_crtc = to_sde_crtc(crtc);
- cstate = to_sde_crtc_state(state);
-
- for (i = 0; i < cstate->num_connectors; ++i) {
- conn_offset = sde_connector_needs_offset(
- cstate->connectors[i]);
- if (conn_offset)
- break;
- }
-
- /**
- * set the cmd flag only when all the encoders attached
- * to the crtc are in cmd mode. Consider all other cases
- * as video mode.
- */
- drm_for_each_encoder(encoder, crtc->dev) {
- if (encoder->crtc == crtc)
- is_cmd = sde_encoder_check_mode(encoder,
- MSM_DISPLAY_CAP_CMD_MODE);
- }
-
- i = msm_property_index(&sde_crtc->property_info, property);
- if (i == CRTC_PROP_OUTPUT_FENCE) {
- uint32_t offset = sde_crtc_get_property(cstate,
- CRTC_PROP_OUTPUT_FENCE_OFFSET);
-
- /**
- * set the offset to 0 only for cmd mode panels, so
- * the release fence for the current frame can be
- * triggered right after PP_DONE interrupt.
- */
- offset = is_cmd ? 0 : (offset + conn_offset);
-
- ret = sde_fence_create(&sde_crtc->output_fence, val,
- offset);
- if (ret)
- SDE_ERROR("fence create failed\n");
- } else {
- ret = msm_property_atomic_get(&sde_crtc->property_info,
- &cstate->property_state,
- property, val);
- if (ret)
- ret = sde_cp_crtc_get_property(crtc,
- property, val);
- }
- if (ret)
- DRM_ERROR("get property failed\n");
+ goto end;
}
+
+ sde_crtc = to_sde_crtc(crtc);
+ cstate = to_sde_crtc_state(state);
+
+ i = msm_property_index(&sde_crtc->property_info, property);
+ if (i == CRTC_PROP_OUTPUT_FENCE) {
+ ret = _sde_crtc_get_output_fence(crtc, state, val);
+ } else {
+ ret = msm_property_atomic_get(&sde_crtc->property_info,
+ &cstate->property_state, property, val);
+ if (ret)
+ ret = sde_cp_crtc_get_property(crtc, property, val);
+ }
+ if (ret)
+ DRM_ERROR("get property failed\n");
+
+end:
return ret;
}
@@ -5377,6 +5711,30 @@ static int _sde_crtc_init_events(struct sde_crtc *sde_crtc)
return rc;
}
+/*
+ * __sde_crtc_idle_notify_work - signal idle timeout to user space
+ */
+static void __sde_crtc_idle_notify_work(struct kthread_work *work)
+{
+ struct sde_crtc *sde_crtc = container_of(work, struct sde_crtc,
+ idle_notify_work.work);
+ struct drm_crtc *crtc;
+ struct drm_event event;
+ int ret = 0;
+
+ if (!sde_crtc) {
+ SDE_ERROR("invalid sde crtc\n");
+ } else {
+ crtc = &sde_crtc->base;
+ event.type = DRM_EVENT_IDLE_NOTIFY;
+ event.length = sizeof(u32);
+ msm_mode_object_event_notify(&crtc->base, crtc->dev,
+ &event, (u8 *)&ret);
+
+ SDE_DEBUG("crtc[%d]: idle timeout notified\n", crtc->base.id);
+ }
+}
+
/* initialize crtc */
struct drm_crtc *sde_crtc_init(struct drm_device *dev, struct drm_plane *plane)
{
@@ -5444,13 +5802,13 @@ struct drm_crtc *sde_crtc_init(struct drm_device *dev, struct drm_plane *plane)
sde_crtc_install_properties(crtc, kms->catalog);
- /* Init dest scaler */
- _sde_crtc_dest_scaler_init(sde_crtc, kms->catalog);
-
/* Install color processing properties */
sde_cp_crtc_init(crtc);
sde_cp_crtc_install_properties(crtc);
+ kthread_init_delayed_work(&sde_crtc->idle_notify_work,
+ __sde_crtc_idle_notify_work);
+
SDE_DEBUG("%s: successfully initialized crtc\n", sde_crtc->name);
return crtc;
}
@@ -5502,8 +5860,15 @@ static int _sde_crtc_event_enable(struct sde_kms *kms,
priv = kms->dev->dev_private;
ret = 0;
if (crtc_drm->enabled) {
- sde_power_resource_enable(&priv->phandle, kms->core_client,
- true);
+ ret = sde_power_resource_enable(&priv->phandle,
+ kms->core_client, true);
+ if (ret) {
+ SDE_ERROR("failed to enable power resource %d\n", ret);
+ SDE_EVT32(ret, SDE_EVTLOG_ERROR);
+ kfree(node);
+ return ret;
+ }
+
INIT_LIST_HEAD(&node->irq.list);
ret = node->func(crtc_drm, true, &node->irq);
sde_power_resource_enable(&priv->phandle, kms->core_client,
@@ -5557,7 +5922,15 @@ static int _sde_crtc_event_disable(struct sde_kms *kms,
return 0;
}
priv = kms->dev->dev_private;
- sde_power_resource_enable(&priv->phandle, kms->core_client, true);
+ ret = sde_power_resource_enable(&priv->phandle, kms->core_client, true);
+ if (ret) {
+ SDE_ERROR("failed to enable power resource %d\n", ret);
+ SDE_EVT32(ret, SDE_EVTLOG_ERROR);
+ list_del(&node->list);
+ kfree(node);
+ return ret;
+ }
+
ret = node->func(crtc_drm, false, &node->irq);
list_del(&node->list);
kfree(node);
@@ -5592,8 +5965,30 @@ static int sde_crtc_power_interrupt_handler(struct drm_crtc *crtc_drm,
return 0;
}
+static int sde_crtc_pm_event_handler(struct drm_crtc *crtc, bool en,
+ struct sde_irq_callback *noirq)
+{
+ /*
+ * IRQ object noirq is not being used here since there is
+ * no crtc irq from pm event.
+ */
+ return 0;
+}
+
static int sde_crtc_idle_interrupt_handler(struct drm_crtc *crtc_drm,
bool en, struct sde_irq_callback *irq)
{
return 0;
}
+
+/**
+ * sde_crtc_update_cont_splash_mixer_settings - update mixer settings
+ * during device bootup for cont_splash use case
+ * @crtc: Pointer to drm crtc structure
+ */
+void sde_crtc_update_cont_splash_mixer_settings(
+ struct drm_crtc *crtc)
+{
+ _sde_crtc_setup_mixers(crtc);
+ crtc->enabled = true;
+}
diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.h b/drivers/gpu/drm/msm/sde/sde_crtc.h
index a4c0501..c6b4afa 100644
--- a/drivers/gpu/drm/msm/sde/sde_crtc.h
+++ b/drivers/gpu/drm/msm/sde/sde_crtc.h
@@ -188,6 +188,9 @@ struct sde_crtc_event {
* @enabled : whether the SDE CRTC is currently enabled. updated in the
* commit-thread, not state-swap time which is earlier, so
* safe to make decisions on during VBLANK on/off work
+ * @reset_request : whether or not a h/w request was requested for the previous
+ * frame
+ * @ds_reconfig : force reconfiguration of the destination scaler block
* @feature_list : list of color processing features supported on a crtc
* @active_list : list of color processing features are active
* @dirty_list : list of color processing features are dirty
@@ -211,11 +214,11 @@ struct sde_crtc_event {
* @misr_data : store misr data before turning off the clocks.
* @sbuf_flush_mask: flush mask for inline rotator
* @sbuf_flush_mask_old: inline rotator flush mask for previous commit
+ * @idle_notify_work: delayed worker to notify idle timeout to user space
* @power_event : registered power event handle
* @cur_perf : current performance committed to clock/bandwidth driver
* @rp_lock : serialization lock for resource pool
* @rp_head : list of active resource pool
- * @scl3_cfg_lut : qseed3 lut config
*/
struct sde_crtc {
struct drm_crtc base;
@@ -226,7 +229,6 @@ struct sde_crtc {
u32 num_mixers;
bool mixers_swapped;
struct sde_crtc_mixer mixers[CRTC_DUAL_MIXERS];
- struct sde_hw_scaler3_lut_cfg *scl3_lut_cfg;
struct drm_pending_vblank_event *event;
u32 vsync_count;
@@ -247,7 +249,9 @@ struct sde_crtc {
bool vblank_requested;
bool suspend;
bool enabled;
+ bool reset_request;
+ bool ds_reconfig;
struct list_head feature_list;
struct list_head active_list;
struct list_head dirty_list;
@@ -276,6 +280,7 @@ struct sde_crtc {
u32 sbuf_flush_mask;
u32 sbuf_flush_mask_old;
+ struct kthread_delayed_work idle_notify_work;
struct sde_power_event *power_event;
@@ -374,6 +379,7 @@ struct sde_crtc_respool {
* @num_ds_enabled: Number of destination scalers enabled
* @ds_dirty: Boolean to indicate if dirty or not
* @ds_cfg: Destination scaler config
+ * @scl3_lut_cfg: QSEED3 lut config
* @new_perf: new performance state being requested
* @sbuf_cfg: stream buffer configuration
* @sbuf_prefill_line: number of line for inline rotator prefetch
@@ -403,6 +409,7 @@ struct sde_crtc_state {
uint32_t num_ds_enabled;
bool ds_dirty;
struct sde_hw_ds_cfg ds_cfg[SDE_MAX_DS_COUNT];
+ struct sde_hw_scaler3_lut_cfg scl3_lut_cfg;
struct sde_core_perf_params new_perf;
struct sde_ctl_sbuf_cfg sbuf_cfg;
@@ -508,8 +515,10 @@ int sde_crtc_vblank(struct drm_crtc *crtc, bool en);
/**
* sde_crtc_commit_kickoff - trigger kickoff of the commit for this crtc
* @crtc: Pointer to drm crtc object
+ * @old_state: Pointer to drm crtc old state object
*/
-void sde_crtc_commit_kickoff(struct drm_crtc *crtc);
+void sde_crtc_commit_kickoff(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state);
/**
* sde_crtc_prepare_commit - callback to prepare for output fences
@@ -712,4 +721,28 @@ int sde_crtc_get_secure_transition_ops(struct drm_crtc *crtc,
*/
int sde_crtc_secure_ctrl(struct drm_crtc *crtc, bool post_commit);
+/**
+ * sde_crtc_helper_reset_properties - reset properties to default values in the
+ * given DRM CRTC state object
+ * @crtc: Pointer to DRM crtc object
+ * @crtc_state: Pointer to DRM crtc state object
+ * Returns: 0 on success, negative errno on failure
+ */
+int sde_crtc_helper_reset_custom_properties(struct drm_crtc *crtc,
+ struct drm_crtc_state *crtc_state);
+
+/**
+ * sde_crtc_timeline_status - current buffer timeline status
+ * @crtc: Pointer to crtc
+ */
+void sde_crtc_timeline_status(struct drm_crtc *crtc);
+
+/**
+ * sde_crtc_update_cont_splash_mixer_settings - update mixer settings
+ * during device bootup for cont_splash use case
+ * @crtc: Pointer to drm crtc structure
+ */
+void sde_crtc_update_cont_splash_mixer_settings(
+ struct drm_crtc *crtc);
+
#endif /* _SDE_CRTC_H_ */
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder.c b/drivers/gpu/drm/msm/sde/sde_encoder.c
index 7d1e4bf..a7dffba 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder.c
@@ -73,6 +73,10 @@
#define IDLE_SHORT_TIMEOUT 1
+#define FAULT_TOLERENCE_DELTA_IN_MS 2
+
+#define FAULT_TOLERENCE_WAIT_IN_MS 5
+
/* Maximum number of VSYNC wait attempts for RSC state transition */
#define MAX_RSC_WAIT 5
@@ -88,7 +92,7 @@
* This event happens at INTERRUPT level.
* Event signals the end of the data transfer after the PP FRAME_DONE
* event. At the end of this event, a delayed work is scheduled to go to
- * IDLE_PC state after IDLE_TIMEOUT time.
+ * IDLE_PC state after IDLE_POWERCOLLAPSE_DURATION time.
* @SDE_ENC_RC_EVENT_PRE_STOP:
* This event happens at NORMAL priority.
* This event, when received during the ON state, set RSC to IDLE, and
@@ -114,9 +118,9 @@
* with new vtotal.
* @SDE_ENC_RC_EVENT_ENTER_IDLE:
* This event happens at NORMAL priority from a work item.
- * Event signals that there were no frame updates for IDLE_TIMEOUT time.
- * This would disable MDP/DSI core clocks and request RSC with IDLE state
- * and change the resource state to IDLE.
+ * Event signals that there were no frame updates for
+ * IDLE_POWERCOLLAPSE_DURATION time. This would disable MDP/DSI core clocks
+ * and request RSC with IDLE state and change the resource state to IDLE.
*/
enum sde_enc_rc_events {
SDE_ENC_RC_EVENT_KICKOFF = 1,
@@ -177,13 +181,10 @@ enum sde_enc_rc_states {
* Bit0 = phys_encs[0] etc.
* @crtc_frame_event_cb: callback handler for frame event
* @crtc_frame_event_cb_data: callback handler private data
- * @frame_done_timeout: frame done timeout in Hz
- * @frame_done_timer: watchdog timer for frame done event
* @vsync_event_timer: vsync timer
* @rsc_client: rsc client pointer
* @rsc_state_init: boolean to indicate rsc config init
* @disp_info: local copy of msm_display_info struct
- * @mode_info: local copy of msm_mode_info struct
* @misr_enable: misr enable/disable status
* @misr_frame_count: misr frame count before start capturing the data
* @idle_pc_supported: indicate if idle power collaps is supported
@@ -198,7 +199,6 @@ enum sde_enc_rc_states {
* @rsc_config: rsc configuration for display vtotal, fps, etc.
* @cur_conn_roi: current connector roi
* @prv_conn_roi: previous connector roi to optimize if unchanged
- * @idle_timeout: idle timeout duration in milliseconds
*/
struct sde_encoder_virt {
struct drm_encoder base;
@@ -224,14 +224,11 @@ struct sde_encoder_virt {
void (*crtc_frame_event_cb)(void *, u32 event);
void *crtc_frame_event_cb_data;
- atomic_t frame_done_timeout;
- struct timer_list frame_done_timer;
struct timer_list vsync_event_timer;
struct sde_rsc_client *rsc_client;
bool rsc_state_init;
struct msm_display_info disp_info;
- struct msm_mode_info mode_info;
bool misr_enable;
u32 misr_frame_count;
@@ -246,38 +243,139 @@ struct sde_encoder_virt {
struct sde_rsc_cmd_config rsc_config;
struct sde_rect cur_conn_roi;
struct sde_rect prv_conn_roi;
-
- u32 idle_timeout;
};
#define to_sde_encoder_virt(x) container_of(x, struct sde_encoder_virt, base)
-bool sde_encoder_is_dsc_enabled(struct drm_encoder *drm_enc)
-
+static void _sde_encoder_pm_qos_add_request(struct drm_encoder *drm_enc)
{
- struct sde_encoder_virt *sde_enc;
+ struct msm_drm_private *priv;
+ struct sde_kms *sde_kms;
+ struct pm_qos_request *req;
+ u32 cpu_mask;
+ u32 cpu_dma_latency;
+ int cpu;
+
+ if (!drm_enc->dev || !drm_enc->dev->dev_private) {
+ SDE_ERROR("drm device invalid\n");
+ return;
+ }
+
+ priv = drm_enc->dev->dev_private;
+ if (!priv->kms) {
+ SDE_ERROR("invalid kms\n");
+ return;
+ }
+
+ sde_kms = to_sde_kms(priv->kms);
+ if (!sde_kms || !sde_kms->catalog)
+ return;
+
+ cpu_mask = sde_kms->catalog->perf.cpu_mask;
+ cpu_dma_latency = sde_kms->catalog->perf.cpu_dma_latency;
+ if (!cpu_mask)
+ return;
+
+ req = &sde_kms->pm_qos_cpu_req;
+ req->type = PM_QOS_REQ_AFFINE_CORES;
+ cpumask_empty(&req->cpus_affine);
+ for_each_possible_cpu(cpu) {
+ if ((1 << cpu) & cpu_mask)
+ cpumask_set_cpu(cpu, &req->cpus_affine);
+ }
+ pm_qos_add_request(req, PM_QOS_CPU_DMA_LATENCY, cpu_dma_latency);
+
+ SDE_EVT32_VERBOSE(DRMID(drm_enc), cpu_mask, cpu_dma_latency);
+}
+
+static void _sde_encoder_pm_qos_remove_request(struct drm_encoder *drm_enc)
+{
+ struct msm_drm_private *priv;
+ struct sde_kms *sde_kms;
+
+ if (!drm_enc->dev || !drm_enc->dev->dev_private) {
+ SDE_ERROR("drm device invalid\n");
+ return;
+ }
+
+ priv = drm_enc->dev->dev_private;
+ if (!priv->kms) {
+ SDE_ERROR("invalid kms\n");
+ return;
+ }
+
+ sde_kms = to_sde_kms(priv->kms);
+ if (!sde_kms || !sde_kms->catalog || !sde_kms->catalog->perf.cpu_mask)
+ return;
+
+ pm_qos_remove_request(&sde_kms->pm_qos_cpu_req);
+}
+
+static struct drm_connector_state *_sde_encoder_get_conn_state(
+ struct drm_encoder *drm_enc)
+{
+ struct msm_drm_private *priv;
+ struct sde_kms *sde_kms;
+ struct list_head *connector_list;
+ struct drm_connector *conn_iter;
+
+ if (!drm_enc) {
+ SDE_ERROR("invalid argument\n");
+ return NULL;
+ }
+
+ priv = drm_enc->dev->dev_private;
+ sde_kms = to_sde_kms(priv->kms);
+ connector_list = &sde_kms->dev->mode_config.connector_list;
+
+ list_for_each_entry(conn_iter, connector_list, head)
+ if (conn_iter->encoder == drm_enc)
+ return conn_iter->state;
+
+ return NULL;
+}
+
+static int _sde_encoder_get_mode_info(struct drm_encoder *drm_enc,
+ struct msm_mode_info *mode_info)
+{
+ struct drm_connector_state *conn_state;
+
+ if (!drm_enc || !mode_info) {
+ SDE_ERROR("invalid arguments\n");
+ return -EINVAL;
+ }
+
+ conn_state = _sde_encoder_get_conn_state(drm_enc);
+ if (!conn_state) {
+ SDE_ERROR("invalid connector state for the encoder: %d\n",
+ drm_enc->base.id);
+ return -EINVAL;
+ }
+
+ return sde_connector_get_mode_info(conn_state, mode_info);
+}
+
+static bool _sde_encoder_is_dsc_enabled(struct drm_encoder *drm_enc)
+{
struct msm_compression_info *comp_info;
+ struct msm_mode_info mode_info;
+ int rc = 0;
if (!drm_enc)
return false;
- sde_enc = to_sde_encoder_virt(drm_enc);
- comp_info = &sde_enc->mode_info.comp_info;
+ rc = _sde_encoder_get_mode_info(drm_enc, &mode_info);
+ if (rc) {
+ SDE_ERROR("failed to get mode info, enc: %d\n",
+ drm_enc->base.id);
+ return false;
+ }
+
+ comp_info = &mode_info.comp_info;
return (comp_info->comp_type == MSM_DISPLAY_COMPRESSION_DSC);
}
-void sde_encoder_set_idle_timeout(struct drm_encoder *drm_enc, u32 idle_timeout)
-{
- struct sde_encoder_virt *sde_enc;
-
- if (!drm_enc)
- return;
-
- sde_enc = to_sde_encoder_virt(drm_enc);
- sde_enc->idle_timeout = idle_timeout;
-}
-
bool sde_encoder_is_dsc_merge(struct drm_encoder *drm_enc)
{
enum sde_rm_topology_name topology;
@@ -536,7 +634,8 @@ void sde_encoder_get_hw_resources(struct drm_encoder *drm_enc,
struct drm_connector_state *conn_state)
{
struct sde_encoder_virt *sde_enc = NULL;
- int i = 0;
+ struct msm_mode_info mode_info;
+ int rc, i = 0;
if (!hw_res || !drm_enc || !conn_state) {
SDE_ERROR("invalid argument(s), drm_enc %d, res %d, state %d\n",
@@ -558,7 +657,18 @@ void sde_encoder_get_hw_resources(struct drm_encoder *drm_enc,
phys->ops.get_hw_resources(phys, hw_res, conn_state);
}
- hw_res->topology = sde_enc->mode_info.topology;
+ /**
+ * NOTE: Do not use sde_encoder_get_mode_info here as this function is
+ * called from atomic_check phase. Use the below API to get mode
+ * information of the temporary conn_state passed.
+ */
+ rc = sde_connector_get_mode_info(conn_state, &mode_info);
+ if (rc) {
+ SDE_ERROR_ENC(sde_enc, "failed to get mode info\n");
+ return;
+ }
+
+ hw_res->topology = mode_info.topology;
hw_res->is_primary = sde_enc->disp_info.is_primary;
}
@@ -699,6 +809,8 @@ static int sde_encoder_virt_atomic_check(
const struct drm_display_mode *mode;
struct drm_display_mode *adj_mode;
struct sde_connector *sde_conn = NULL;
+ struct sde_connector_state *sde_conn_state = NULL;
+ struct sde_crtc_state *sde_crtc_state = NULL;
int i = 0;
int ret = 0;
@@ -716,7 +828,10 @@ static int sde_encoder_virt_atomic_check(
mode = &crtc_state->mode;
adj_mode = &crtc_state->adjusted_mode;
sde_conn = to_sde_connector(conn_state->connector);
- SDE_EVT32(DRMID(drm_enc));
+ sde_conn_state = to_sde_connector_state(conn_state);
+ sde_crtc_state = to_sde_crtc_state(crtc_state);
+
+ SDE_EVT32(DRMID(drm_enc), drm_atomic_crtc_needs_modeset(crtc_state));
/*
* display drivers may populate private fields of the drm display mode
@@ -745,12 +860,59 @@ static int sde_encoder_virt_atomic_check(
}
}
+ if (!ret && drm_atomic_crtc_needs_modeset(crtc_state)) {
+ struct sde_rect mode_roi, roi;
+
+ mode_roi.x = 0;
+ mode_roi.y = 0;
+ mode_roi.w = crtc_state->adjusted_mode.hdisplay;
+ mode_roi.h = crtc_state->adjusted_mode.vdisplay;
+
+ if (sde_conn_state->rois.num_rects) {
+ sde_kms_rect_merge_rectangles(
+ &sde_conn_state->rois, &roi);
+ if (!sde_kms_rect_is_equal(&mode_roi, &roi)) {
+ SDE_ERROR_ENC(sde_enc,
+ "roi (%d,%d,%d,%d) on connector invalid during modeset\n",
+ roi.x, roi.y, roi.w, roi.h);
+ ret = -EINVAL;
+ }
+ }
+
+ if (sde_crtc_state->user_roi_list.num_rects) {
+ sde_kms_rect_merge_rectangles(
+ &sde_crtc_state->user_roi_list, &roi);
+ if (!sde_kms_rect_is_equal(&mode_roi, &roi)) {
+ SDE_ERROR_ENC(sde_enc,
+ "roi (%d,%d,%d,%d) on crtc invalid during modeset\n",
+ roi.x, roi.y, roi.w, roi.h);
+ ret = -EINVAL;
+ }
+ }
+
+ if (ret)
+ return ret;
+ }
+
+ if (!ret) {
+ /**
+ * record topology in previous atomic state to be able to handle
+ * topology transitions correctly.
+ */
+ enum sde_rm_topology_name old_top;
+
+ old_top = sde_connector_get_property(conn_state,
+ CONNECTOR_PROP_TOPOLOGY_NAME);
+ ret = sde_connector_set_old_topology_name(conn_state, old_top);
+ if (ret)
+ return ret;
+ }
if (!ret && sde_conn && drm_atomic_crtc_needs_modeset(crtc_state)) {
struct msm_display_topology *topology = NULL;
ret = sde_conn->ops.get_mode_info(adj_mode,
- &sde_enc->mode_info,
+ &sde_conn_state->mode_info,
sde_kms->catalog->max_mixer_width,
sde_conn->display);
if (ret) {
@@ -775,7 +937,7 @@ static int sde_encoder_virt_atomic_check(
* de-activating crtc.
*/
if (crtc_state->active)
- topology = &sde_enc->mode_info.topology;
+ topology = &sde_conn_state->mode_info.topology;
ret = sde_rm_update_topology(conn_state, topology);
if (ret) {
@@ -783,6 +945,24 @@ static int sde_encoder_virt_atomic_check(
"RM failed to update topology, rc: %d\n", ret);
return ret;
}
+
+ ret = sde_connector_set_blob_data(conn_state->connector,
+ conn_state,
+ CONNECTOR_PROP_SDE_INFO);
+ if (ret) {
+ SDE_ERROR_ENC(sde_enc,
+ "connector failed to update info, rc: %d\n",
+ ret);
+ return ret;
+ }
+
+ }
+
+ ret = sde_connector_roi_v1_check_roi(conn_state);
+ if (ret) {
+ SDE_ERROR_ENC(sde_enc, "connector roi check failed, rc: %d",
+ ret);
+ return ret;
}
if (!ret)
@@ -948,14 +1128,23 @@ static int _sde_encoder_dsc_n_lm_1_enc_1_intf(struct sde_encoder_virt *sde_enc)
struct sde_hw_dsc *hw_dsc = sde_enc->hw_dsc[0];
struct sde_encoder_phys *enc_master = sde_enc->cur_master;
const struct sde_rect *roi = &sde_enc->cur_conn_roi;
- struct msm_display_dsc_info *dsc =
- &sde_enc->mode_info.comp_info.dsc_info;
+ struct msm_mode_info mode_info;
+ struct msm_display_dsc_info *dsc = NULL;
+ int rc;
- if (dsc == NULL || hw_dsc == NULL || hw_pp == NULL || !enc_master) {
+ if (hw_dsc == NULL || hw_pp == NULL || !enc_master) {
SDE_ERROR_ENC(sde_enc, "invalid params for DSC\n");
return -EINVAL;
}
+ rc = _sde_encoder_get_mode_info(&sde_enc->base, &mode_info);
+ if (rc) {
+ SDE_ERROR_ENC(sde_enc, "failed to get mode info\n");
+ return -EINVAL;
+ }
+
+ dsc = &mode_info.comp_info.dsc_info;
+
_sde_encoder_dsc_update_pic_dim(dsc, roi->w, roi->h);
this_frame_slices = roi->w / dsc->slice_width;
@@ -992,8 +1181,9 @@ static int _sde_encoder_dsc_2_lm_2_enc_2_intf(struct sde_encoder_virt *sde_enc,
struct sde_hw_dsc *hw_dsc[MAX_CHANNELS_PER_ENC];
struct sde_hw_pingpong *hw_pp[MAX_CHANNELS_PER_ENC];
struct msm_display_dsc_info dsc[MAX_CHANNELS_PER_ENC];
+ struct msm_mode_info mode_info;
bool half_panel_partial_update;
- int i;
+ int i, rc;
for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) {
hw_pp[i] = sde_enc->hw_pp[i];
@@ -1005,6 +1195,12 @@ static int _sde_encoder_dsc_2_lm_2_enc_2_intf(struct sde_encoder_virt *sde_enc,
}
}
+ rc = _sde_encoder_get_mode_info(&sde_enc->base, &mode_info);
+ if (rc) {
+ SDE_ERROR_ENC(sde_enc, "failed to get mode info\n");
+ return -EINVAL;
+ }
+
half_panel_partial_update =
hweight_long(params->affected_displays) == 1;
@@ -1014,8 +1210,8 @@ static int _sde_encoder_dsc_2_lm_2_enc_2_intf(struct sde_encoder_virt *sde_enc,
if (enc_master->intf_mode == INTF_MODE_VIDEO)
dsc_common_mode |= DSC_MODE_VIDEO;
- memcpy(&dsc[0], &sde_enc->mode_info.comp_info.dsc_info, sizeof(dsc[0]));
- memcpy(&dsc[1], &sde_enc->mode_info.comp_info.dsc_info, sizeof(dsc[1]));
+ memcpy(&dsc[0], &mode_info.comp_info.dsc_info, sizeof(dsc[0]));
+ memcpy(&dsc[1], &mode_info.comp_info.dsc_info, sizeof(dsc[1]));
/*
* Since both DSC use same pic dimension, set same pic dimension
@@ -1078,10 +1274,10 @@ static int _sde_encoder_dsc_2_lm_2_enc_1_intf(struct sde_encoder_virt *sde_enc,
const struct sde_rect *roi = &sde_enc->cur_conn_roi;
struct sde_hw_dsc *hw_dsc[MAX_CHANNELS_PER_ENC];
struct sde_hw_pingpong *hw_pp[MAX_CHANNELS_PER_ENC];
- struct msm_display_dsc_info *dsc =
- &sde_enc->mode_info.comp_info.dsc_info;
+ struct msm_display_dsc_info *dsc = NULL;
+ struct msm_mode_info mode_info;
bool half_panel_partial_update;
- int i;
+ int i, rc;
for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) {
hw_pp[i] = sde_enc->hw_pp[i];
@@ -1093,6 +1289,14 @@ static int _sde_encoder_dsc_2_lm_2_enc_1_intf(struct sde_encoder_virt *sde_enc,
}
}
+ rc = _sde_encoder_get_mode_info(&sde_enc->base, &mode_info);
+ if (rc) {
+ SDE_ERROR_ENC(sde_enc, "failed to get mode info\n");
+ return -EINVAL;
+ }
+
+ dsc = &mode_info.comp_info.dsc_info;
+
half_panel_partial_update =
hweight_long(params->affected_displays) == 1;
@@ -1181,7 +1385,17 @@ static int _sde_encoder_dsc_setup(struct sde_encoder_virt *sde_enc,
}
SDE_DEBUG_ENC(sde_enc, "topology:%d\n", topology);
- SDE_EVT32(DRMID(&sde_enc->base));
+ SDE_EVT32(DRMID(&sde_enc->base), topology,
+ sde_enc->cur_conn_roi.x,
+ sde_enc->cur_conn_roi.y,
+ sde_enc->cur_conn_roi.w,
+ sde_enc->cur_conn_roi.h,
+ sde_enc->prv_conn_roi.x,
+ sde_enc->prv_conn_roi.y,
+ sde_enc->prv_conn_roi.w,
+ sde_enc->prv_conn_roi.h,
+ sde_enc->base.crtc->state->adjusted_mode.hdisplay,
+ sde_enc->base.crtc->state->adjusted_mode.vdisplay);
if (sde_kms_rect_is_equal(&sde_enc->cur_conn_roi,
&sde_enc->prv_conn_roi))
@@ -1215,8 +1429,8 @@ static void _sde_encoder_update_vsync_source(struct sde_encoder_virt *sde_enc,
struct sde_kms *sde_kms;
struct sde_hw_mdp *hw_mdptop;
struct drm_encoder *drm_enc;
- struct msm_mode_info *mode_info;
- int i;
+ struct msm_mode_info mode_info;
+ int i, rc = 0;
if (!sde_enc || !disp_info) {
SDE_ERROR("invalid param sde_enc:%d or disp_info:%d\n",
@@ -1245,9 +1459,9 @@ static void _sde_encoder_update_vsync_source(struct sde_encoder_virt *sde_enc,
return;
}
- mode_info = &sde_enc->mode_info;
- if (!mode_info) {
- SDE_ERROR("invalid mode info\n");
+ rc = _sde_encoder_get_mode_info(drm_enc, &mode_info);
+ if (rc) {
+ SDE_ERROR_ENC(sde_enc, "failed to get mode info\n");
return;
}
@@ -1257,7 +1471,7 @@ static void _sde_encoder_update_vsync_source(struct sde_encoder_virt *sde_enc,
vsync_cfg.ppnumber[i] = sde_enc->hw_pp[i]->idx;
vsync_cfg.pp_count = sde_enc->num_phys_encs;
- vsync_cfg.frame_rate = mode_info->frame_rate;
+ vsync_cfg.frame_rate = mode_info.frame_rate;
if (is_dummy)
vsync_cfg.vsync_source = SDE_VSYNC_SOURCE_WD_TIMER_1;
else if (disp_info->is_te_using_watchdog_timer)
@@ -1308,11 +1522,12 @@ static int _sde_encoder_update_rsc_client(
struct sde_rsc_cmd_config *rsc_config;
int ret, prefill_lines;
struct msm_display_info *disp_info;
- struct msm_mode_info *mode_info;
+ struct msm_mode_info mode_info;
int wait_vblank_crtc_id = SDE_RSC_INVALID_CRTC_ID;
int wait_count = 0;
struct drm_crtc *primary_crtc;
int pipe = -1;
+ int rc = 0;
if (!drm_enc || !drm_enc->crtc || !drm_enc->dev) {
SDE_ERROR("invalid arguments\n");
@@ -1322,7 +1537,6 @@ static int _sde_encoder_update_rsc_client(
sde_enc = to_sde_encoder_virt(drm_enc);
crtc = drm_enc->crtc;
disp_info = &sde_enc->disp_info;
- mode_info = &sde_enc->mode_info;
rsc_config = &sde_enc->rsc_config;
if (!sde_enc->rsc_client) {
@@ -1330,6 +1544,12 @@ static int _sde_encoder_update_rsc_client(
return 0;
}
+ rc = _sde_encoder_get_mode_info(drm_enc, &mode_info);
+ if (rc) {
+ SDE_ERROR_ENC(sde_enc, "failed to mode info\n");
+ return 0;
+ }
+
/**
* only primary command mode panel can request CMD state.
* all other panels/displays can request for VID state including
@@ -1339,20 +1559,20 @@ static int _sde_encoder_update_rsc_client(
(((disp_info->capabilities & MSM_DISPLAY_CAP_CMD_MODE) &&
disp_info->is_primary) ? SDE_RSC_CMD_STATE :
SDE_RSC_VID_STATE) : SDE_RSC_IDLE_STATE;
- prefill_lines = config ? mode_info->prefill_lines +
- config->inline_rotate_prefill : mode_info->prefill_lines;
+ prefill_lines = config ? mode_info.prefill_lines +
+ config->inline_rotate_prefill : mode_info.prefill_lines;
/* compare specific items and reconfigure the rsc */
- if ((rsc_config->fps != mode_info->frame_rate) ||
- (rsc_config->vtotal != mode_info->vtotal) ||
+ if ((rsc_config->fps != mode_info.frame_rate) ||
+ (rsc_config->vtotal != mode_info.vtotal) ||
(rsc_config->prefill_lines != prefill_lines) ||
- (rsc_config->jitter_numer != mode_info->jitter_numer) ||
- (rsc_config->jitter_denom != mode_info->jitter_denom)) {
- rsc_config->fps = mode_info->frame_rate;
- rsc_config->vtotal = mode_info->vtotal;
+ (rsc_config->jitter_numer != mode_info.jitter_numer) ||
+ (rsc_config->jitter_denom != mode_info.jitter_denom)) {
+ rsc_config->fps = mode_info.frame_rate;
+ rsc_config->vtotal = mode_info.vtotal;
rsc_config->prefill_lines = prefill_lines;
- rsc_config->jitter_numer = mode_info->jitter_numer;
- rsc_config->jitter_denom = mode_info->jitter_denom;
+ rsc_config->jitter_numer = mode_info.jitter_numer;
+ rsc_config->jitter_denom = mode_info.jitter_denom;
sde_enc->rsc_state_init = false;
}
@@ -1503,9 +1723,7 @@ struct sde_rsc_client *sde_encoder_get_rsc_client(struct drm_encoder *drm_enc)
static void _sde_encoder_resource_control_rsc_update(
struct drm_encoder *drm_enc, bool enable)
{
- struct sde_encoder_virt *sde_enc = to_sde_encoder_virt(drm_enc);
struct sde_encoder_rsc_config rsc_cfg = { 0 };
- int i;
if (enable) {
rsc_cfg.inline_rotate_prefill =
@@ -1514,56 +1732,64 @@ static void _sde_encoder_resource_control_rsc_update(
_sde_encoder_update_rsc_client(drm_enc, &rsc_cfg, true);
} else {
_sde_encoder_update_rsc_client(drm_enc, NULL, false);
-
- /**
- * disable the vsync source after updating the rsc state. rsc
- * state update might have vsync wait and vsync source must be
- * disabled after it. It will avoid generating any vsync from
- * this point till mode-2 entry. It is SW workaround for
- * HW limitation and should not be removed without checking the
- * updated design.
- */
- for (i = 0; i < sde_enc->num_phys_encs; i++) {
- struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
-
- if (phys && phys->ops.prepare_idle_pc)
- phys->ops.prepare_idle_pc(phys);
- }
-
}
}
-static void _sde_encoder_resource_control_helper(struct drm_encoder *drm_enc,
+static int _sde_encoder_resource_control_helper(struct drm_encoder *drm_enc,
bool enable)
{
struct msm_drm_private *priv;
struct sde_kms *sde_kms;
struct sde_encoder_virt *sde_enc;
+ int rc;
+ bool is_cmd_mode, is_primary;
sde_enc = to_sde_encoder_virt(drm_enc);
priv = drm_enc->dev->dev_private;
sde_kms = to_sde_kms(priv->kms);
+ is_cmd_mode = sde_enc->disp_info.capabilities &
+ MSM_DISPLAY_CAP_CMD_MODE;
+ is_primary = sde_enc->disp_info.is_primary;
+
SDE_DEBUG_ENC(sde_enc, "enable:%d\n", enable);
SDE_EVT32(DRMID(drm_enc), enable);
if (!sde_enc->cur_master) {
SDE_ERROR("encoder master not set\n");
- return;
+ return -EINVAL;
}
if (enable) {
/* enable SDE core clks */
- sde_power_resource_enable(&priv->phandle,
+ rc = sde_power_resource_enable(&priv->phandle,
sde_kms->core_client, true);
+ if (rc) {
+ SDE_ERROR("failed to enable power resource %d\n", rc);
+ SDE_EVT32(rc, SDE_EVTLOG_ERROR);
+ return rc;
+ }
/* enable DSI clks */
- sde_connector_clk_ctrl(sde_enc->cur_master->connector, true);
+ rc = sde_connector_clk_ctrl(sde_enc->cur_master->connector,
+ true);
+ if (rc) {
+ SDE_ERROR("failed to enable clk control %d\n", rc);
+ sde_power_resource_enable(&priv->phandle,
+ sde_kms->core_client, false);
+ return rc;
+ }
/* enable all the irq */
_sde_encoder_irq_control(drm_enc, true);
+ if (is_cmd_mode && is_primary)
+ _sde_encoder_pm_qos_add_request(drm_enc);
+
} else {
+ if (is_cmd_mode && is_primary)
+ _sde_encoder_pm_qos_remove_request(drm_enc);
+
/* disable all the irq */
_sde_encoder_irq_control(drm_enc, false);
@@ -1575,13 +1801,14 @@ static void _sde_encoder_resource_control_helper(struct drm_encoder *drm_enc,
sde_kms->core_client, false);
}
+ return 0;
}
static int sde_encoder_resource_control(struct drm_encoder *drm_enc,
u32 sw_event)
{
bool autorefresh_enabled = false;
- unsigned int lp, idle_timeout;
+ unsigned int lp, idle_pc_duration;
struct sde_encoder_virt *sde_enc;
struct msm_drm_private *priv;
struct msm_drm_thread *disp_thread;
@@ -1653,7 +1880,19 @@ static int sde_encoder_resource_control(struct drm_encoder *drm_enc,
_sde_encoder_irq_control(drm_enc, true);
} else {
/* enable all the clks and resources */
- _sde_encoder_resource_control_helper(drm_enc, true);
+ ret = _sde_encoder_resource_control_helper(drm_enc,
+ true);
+ if (ret) {
+ SDE_ERROR_ENC(sde_enc,
+ "sw_event:%d, rc in state %d\n",
+ sw_event, sde_enc->rc_state);
+ SDE_EVT32(DRMID(drm_enc), sw_event,
+ sde_enc->rc_state,
+ SDE_EVTLOG_ERROR);
+ mutex_unlock(&sde_enc->rc_lock);
+ return ret;
+ }
+
_sde_encoder_resource_control_rsc_update(drm_enc, true);
}
@@ -1705,18 +1944,18 @@ static int sde_encoder_resource_control(struct drm_encoder *drm_enc,
lp = SDE_MODE_DPMS_ON;
if (lp == SDE_MODE_DPMS_LP2)
- idle_timeout = IDLE_SHORT_TIMEOUT;
+ idle_pc_duration = IDLE_SHORT_TIMEOUT;
else
- idle_timeout = sde_enc->idle_timeout;
+ idle_pc_duration = IDLE_POWERCOLLAPSE_DURATION;
- if (!autorefresh_enabled && idle_timeout)
+ if (!autorefresh_enabled)
kthread_queue_delayed_work(
&disp_thread->worker,
&sde_enc->delayed_off_work,
- msecs_to_jiffies(idle_timeout));
+ msecs_to_jiffies(idle_pc_duration));
SDE_EVT32(DRMID(drm_enc), sw_event, sde_enc->rc_state,
autorefresh_enabled,
- idle_timeout, SDE_EVTLOG_FUNC_CASE2);
+ idle_pc_duration, SDE_EVTLOG_FUNC_CASE2);
SDE_DEBUG_ENC(sde_enc, "sw_event:%d, work scheduled\n",
sw_event);
break;
@@ -1761,8 +2000,11 @@ static int sde_encoder_resource_control(struct drm_encoder *drm_enc,
break;
case SDE_ENC_RC_EVENT_STOP:
- mutex_lock(&sde_enc->rc_lock);
+ /* cancel vsync event work and timer */
+ kthread_cancel_work_sync(&sde_enc->vsync_event_work);
+ del_timer_sync(&sde_enc->vsync_event_timer);
+ mutex_lock(&sde_enc->rc_lock);
/* return if the resource control is already in OFF state */
if (sde_enc->rc_state == SDE_ENC_RC_STATE_OFF) {
SDE_DEBUG_ENC(sde_enc, "sw_event:%d, rc in OFF state\n",
@@ -1808,7 +2050,18 @@ static int sde_encoder_resource_control(struct drm_encoder *drm_enc,
/* return if the resource control is already in ON state */
if (sde_enc->rc_state != SDE_ENC_RC_STATE_ON) {
/* enable all the clks and resources */
- _sde_encoder_resource_control_helper(drm_enc, true);
+ ret = _sde_encoder_resource_control_helper(drm_enc,
+ true);
+ if (ret) {
+ SDE_ERROR_ENC(sde_enc,
+ "sw_event:%d, rc in state %d\n",
+ sw_event, sde_enc->rc_state);
+ SDE_EVT32(DRMID(drm_enc), sw_event,
+ sde_enc->rc_state,
+ SDE_EVTLOG_ERROR);
+ mutex_unlock(&sde_enc->rc_lock);
+ return ret;
+ }
_sde_encoder_resource_control_rsc_update(drm_enc, true);
@@ -1927,6 +2180,7 @@ static void sde_encoder_virt_mode_set(struct drm_encoder *drm_enc,
struct sde_kms *sde_kms;
struct list_head *connector_list;
struct drm_connector *conn = NULL, *conn_iter;
+ struct sde_connector_state *sde_conn_state = NULL;
struct sde_connector *sde_conn = NULL;
struct sde_rm_hw_iter dsc_iter, pp_iter;
int i = 0, ret;
@@ -1936,6 +2190,11 @@ static void sde_encoder_virt_mode_set(struct drm_encoder *drm_enc,
return;
}
+ if (!sde_kms_power_resource_is_enabled(drm_enc->dev)) {
+ SDE_ERROR("power resource is not enabled\n");
+ return;
+ }
+
sde_enc = to_sde_encoder_virt(drm_enc);
SDE_DEBUG_ENC(sde_enc, "\n");
@@ -1958,13 +2217,15 @@ static void sde_encoder_virt_mode_set(struct drm_encoder *drm_enc,
}
sde_conn = to_sde_connector(conn);
- if (sde_conn) {
- ret = sde_conn->ops.get_mode_info(adj_mode, &sde_enc->mode_info,
+ sde_conn_state = to_sde_connector_state(conn->state);
+ if (sde_conn && sde_conn_state) {
+ ret = sde_conn->ops.get_mode_info(adj_mode,
+ &sde_conn_state->mode_info,
sde_kms->catalog->max_mixer_width,
sde_conn->display);
if (ret) {
SDE_ERROR_ENC(sde_enc,
- "invalid topology for the mode\n");
+ "failed to get mode info from the display\n");
return;
}
}
@@ -2035,6 +2296,30 @@ static void sde_encoder_virt_mode_set(struct drm_encoder *drm_enc,
SDE_ENC_RC_EVENT_POST_MODESET);
}
+void sde_encoder_control_te(struct drm_encoder *drm_enc, bool enable)
+{
+ struct sde_encoder_virt *sde_enc;
+ struct sde_encoder_phys *phys;
+ int i;
+
+ if (!drm_enc) {
+ SDE_ERROR("invalid parameters\n");
+ return;
+ }
+
+ sde_enc = to_sde_encoder_virt(drm_enc);
+ if (!sde_enc) {
+ SDE_ERROR("invalid sde encoder\n");
+ return;
+ }
+
+ for (i = 0; i < sde_enc->num_phys_encs; i++) {
+ phys = sde_enc->phys_encs[i];
+ if (phys && phys->ops.control_te)
+ phys->ops.control_te(phys, enable);
+ }
+}
+
static void _sde_encoder_virt_enable_helper(struct drm_encoder *drm_enc)
{
struct sde_encoder_virt *sde_enc = NULL;
@@ -2072,6 +2357,7 @@ static void _sde_encoder_virt_enable_helper(struct drm_encoder *drm_enc)
sde_kms->catalog);
_sde_encoder_update_vsync_source(sde_enc, &sde_enc->disp_info, false);
+ sde_encoder_control_te(drm_enc, true);
memset(&sde_enc->prv_conn_roi, 0, sizeof(sde_enc->prv_conn_roi));
memset(&sde_enc->cur_conn_roi, 0, sizeof(sde_enc->cur_conn_roi));
@@ -2107,13 +2393,27 @@ static void sde_encoder_virt_enable(struct drm_encoder *drm_enc)
int i, ret = 0;
struct msm_compression_info *comp_info = NULL;
struct drm_display_mode *cur_mode = NULL;
+ struct msm_mode_info mode_info;
+ struct drm_connector *drm_conn = NULL;
if (!drm_enc) {
SDE_ERROR("invalid encoder\n");
return;
}
sde_enc = to_sde_encoder_virt(drm_enc);
- comp_info = &sde_enc->mode_info.comp_info;
+
+ if (!sde_kms_power_resource_is_enabled(drm_enc->dev)) {
+ SDE_ERROR("power resource is not enabled\n");
+ return;
+ }
+
+ ret = _sde_encoder_get_mode_info(drm_enc, &mode_info);
+ if (ret) {
+ SDE_ERROR_ENC(sde_enc, "failed to get mode info\n");
+ return;
+ }
+
+ comp_info = &mode_info.comp_info;
cur_mode = &sde_enc->base.crtc->state->adjusted_mode;
SDE_DEBUG_ENC(sde_enc, "\n");
@@ -2175,6 +2475,10 @@ static void sde_encoder_virt_enable(struct drm_encoder *drm_enc)
sde_enc->cur_master->ops.enable(sde_enc->cur_master);
_sde_encoder_virt_enable_helper(drm_enc);
+
+ /* Enable ESD thread */
+ drm_conn = sde_enc->cur_master->connector;
+ sde_connector_schedule_status_work(drm_conn, true);
}
static void sde_encoder_virt_disable(struct drm_encoder *drm_enc)
@@ -2182,6 +2486,8 @@ static void sde_encoder_virt_disable(struct drm_encoder *drm_enc)
struct sde_encoder_virt *sde_enc = NULL;
struct msm_drm_private *priv;
struct sde_kms *sde_kms;
+ struct drm_connector *drm_conn = NULL;
+ enum sde_intf_mode intf_mode;
int i = 0;
if (!drm_enc) {
@@ -2195,24 +2501,54 @@ static void sde_encoder_virt_disable(struct drm_encoder *drm_enc)
return;
}
+ if (!sde_kms_power_resource_is_enabled(drm_enc->dev)) {
+ SDE_ERROR("power resource is not enabled\n");
+ return;
+ }
+
sde_enc = to_sde_encoder_virt(drm_enc);
SDE_DEBUG_ENC(sde_enc, "\n");
priv = drm_enc->dev->dev_private;
sde_kms = to_sde_kms(priv->kms);
+ intf_mode = sde_encoder_get_intf_mode(drm_enc);
SDE_EVT32(DRMID(drm_enc));
+ /* Disable ESD thread */
+ drm_conn = sde_enc->cur_master->connector;
+ sde_connector_schedule_status_work(drm_conn, false);
+
/* wait for idle */
sde_encoder_wait_for_event(drm_enc, MSM_ENC_TX_COMPLETE);
- sde_encoder_resource_control(drm_enc, SDE_ENC_RC_EVENT_PRE_STOP);
+ /*
+ * For primary command mode encoders, execute the resource control
+ * pre-stop operations before the physical encoders are disabled, to
+ * allow the rsc to transition its states properly.
+ *
+ * For other encoder types, rsc should not be enabled until after
+ * they have been fully disabled, so delay the pre-stop operations
+ * until after the physical disable calls have returned.
+ */
+ if (sde_enc->disp_info.is_primary && intf_mode == INTF_MODE_CMD) {
+ sde_encoder_resource_control(drm_enc,
+ SDE_ENC_RC_EVENT_PRE_STOP);
+ for (i = 0; i < sde_enc->num_phys_encs; i++) {
+ struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
- for (i = 0; i < sde_enc->num_phys_encs; i++) {
- struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
+ if (phys && phys->ops.disable)
+ phys->ops.disable(phys);
+ }
+ } else {
+ for (i = 0; i < sde_enc->num_phys_encs; i++) {
+ struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
- if (phys && phys->ops.disable)
- phys->ops.disable(phys);
+ if (phys && phys->ops.disable)
+ phys->ops.disable(phys);
+ }
+ sde_encoder_resource_control(drm_enc,
+ SDE_ENC_RC_EVENT_PRE_STOP);
}
/*
@@ -2222,12 +2558,6 @@ static void sde_encoder_virt_disable(struct drm_encoder *drm_enc)
*/
_sde_encoder_dsc_disable(sde_enc);
- /* after phys waits for frame-done, should be no more frames pending */
- if (atomic_xchg(&sde_enc->frame_done_timeout, 0)) {
- SDE_ERROR("enc%d timeout pending\n", drm_enc->base.id);
- del_timer_sync(&sde_enc->frame_done_timer);
- }
-
sde_encoder_resource_control(drm_enc, SDE_ENC_RC_EVENT_STOP);
for (i = 0; i < sde_enc->num_phys_encs; i++) {
@@ -2383,9 +2713,6 @@ static void sde_encoder_frame_done_callback(
}
if (!sde_enc->frame_busy_mask[0]) {
- atomic_set(&sde_enc->frame_done_timeout, 0);
- del_timer(&sde_enc->frame_done_timer);
-
sde_encoder_resource_control(drm_enc,
SDE_ENC_RC_EVENT_FRAME_DONE);
@@ -2413,9 +2740,6 @@ static void sde_encoder_off_work(struct kthread_work *work)
sde_encoder_resource_control(&sde_enc->base,
SDE_ENC_RC_EVENT_ENTER_IDLE);
-
- sde_encoder_frame_done_callback(&sde_enc->base, NULL,
- SDE_ENCODER_FRAME_EVENT_IDLE);
}
/**
@@ -2466,11 +2790,12 @@ static inline void _sde_encoder_trigger_flush(struct drm_encoder *drm_enc,
phys->ops.trigger_flush(phys);
if (ctl->ops.get_pending_flush)
- SDE_EVT32(DRMID(drm_enc), phys->intf_idx, pending_kickoff_cnt,
- ctl->idx, ctl->ops.get_pending_flush(ctl));
+ SDE_EVT32(DRMID(drm_enc), phys->intf_idx - INTF_0,
+ pending_kickoff_cnt, ctl->idx - CTL_0,
+ ctl->ops.get_pending_flush(ctl));
else
- SDE_EVT32(DRMID(drm_enc), phys->intf_idx, ctl->idx,
- pending_kickoff_cnt);
+ SDE_EVT32(DRMID(drm_enc), phys->intf_idx - INTF_0,
+ ctl->idx - CTL_0, pending_kickoff_cnt);
}
/**
@@ -2529,30 +2854,50 @@ void sde_encoder_helper_trigger_start(struct sde_encoder_phys *phys_enc)
ctl = phys_enc->hw_ctl;
if (ctl && ctl->ops.trigger_start) {
ctl->ops.trigger_start(ctl);
- SDE_EVT32(DRMID(phys_enc->parent), ctl->idx);
+ SDE_EVT32(DRMID(phys_enc->parent), ctl->idx - CTL_0);
}
}
-int sde_encoder_helper_wait_event_timeout(
- int32_t drm_id,
- int32_t hw_id,
- struct sde_encoder_wait_info *info)
+static int _sde_encoder_wait_timeout(int32_t drm_id, int32_t hw_id,
+ s64 timeout_ms, struct sde_encoder_wait_info *info)
{
int rc = 0;
- s64 expected_time = ktime_to_ms(ktime_get()) + info->timeout_ms;
- s64 jiffies = msecs_to_jiffies(info->timeout_ms);
- s64 time;
+ s64 wait_time_jiffies = msecs_to_jiffies(timeout_ms);
+ ktime_t cur_ktime;
+ ktime_t exp_ktime = ktime_add_ms(ktime_get(), timeout_ms);
do {
rc = wait_event_timeout(*(info->wq),
- atomic_read(info->atomic_cnt) == 0, jiffies);
- time = ktime_to_ms(ktime_get());
+ atomic_read(info->atomic_cnt) == 0, wait_time_jiffies);
+ cur_ktime = ktime_get();
- SDE_EVT32_VERBOSE(drm_id, hw_id, rc, time, expected_time,
- atomic_read(info->atomic_cnt));
+ SDE_EVT32(drm_id, hw_id, rc, ktime_to_ms(cur_ktime),
+ timeout_ms, atomic_read(info->atomic_cnt));
/* If we timed out, counter is valid and time is less, wait again */
} while (atomic_read(info->atomic_cnt) && (rc == 0) &&
- (time < expected_time));
+ (ktime_compare_safe(exp_ktime, cur_ktime) > 0));
+
+ return rc;
+}
+
+int sde_encoder_helper_wait_event_timeout(int32_t drm_id, int32_t hw_id,
+ struct sde_encoder_wait_info *info)
+{
+ int rc;
+ ktime_t exp_ktime = ktime_add_ms(ktime_get(), info->timeout_ms);
+
+ rc = _sde_encoder_wait_timeout(drm_id, hw_id, info->timeout_ms, info);
+
+ /**
+ * handle disabled irq case where timer irq is also delayed.
+ * wait for additional timeout of FAULT_TOLERENCE_WAIT_IN_MS
+ * if it event_timeout expired late detected.
+ */
+ if (atomic_read(info->atomic_cnt) && (!rc) &&
+ (ktime_compare_safe(ktime_get(), ktime_add_ms(exp_ktime,
+ FAULT_TOLERENCE_DELTA_IN_MS)) > 0))
+ rc = _sde_encoder_wait_timeout(drm_id, hw_id,
+ FAULT_TOLERENCE_WAIT_IN_MS, info);
return rc;
}
@@ -2594,12 +2939,6 @@ void sde_encoder_helper_hw_reset(struct sde_encoder_phys *phys_enc)
}
}
- rc = ctl->ops.reset(ctl);
- if (rc) {
- SDE_ERROR_ENC(sde_enc, "ctl %d reset failure\n", ctl->idx);
- SDE_DBG_DUMP("all", "dbg_bus", "vbif_dbg_bus", "panic");
- }
-
phys_enc->enable_state = SDE_ENC_ENABLED;
}
@@ -2776,6 +3115,8 @@ static void _sde_encoder_update_master(struct drm_encoder *drm_enc,
SDE_DEBUG_ENC(sde_enc, "affected_displays 0x%lx num_active_phys %d\n",
params->affected_displays, num_active_phys);
+ SDE_EVT32_VERBOSE(DRMID(drm_enc), params->affected_displays,
+ num_active_phys);
/* for left/right only update, ppsplit master switches interface */
_sde_encoder_ppsplit_swap_intf_for_right_only_update(drm_enc,
@@ -2985,7 +3326,6 @@ static void sde_encoder_vsync_event_handler(unsigned long data)
struct sde_encoder_virt *sde_enc;
struct msm_drm_private *priv;
struct msm_drm_thread *event_thread;
- bool autorefresh_enabled = false;
if (!drm_enc || !drm_enc->dev || !drm_enc->dev->dev_private ||
!drm_enc->crtc) {
@@ -3007,28 +3347,16 @@ static void sde_encoder_vsync_event_handler(unsigned long data)
return;
}
- if (sde_enc->cur_master &&
- sde_enc->cur_master->ops.is_autorefresh_enabled)
- autorefresh_enabled =
- sde_enc->cur_master->ops.is_autorefresh_enabled(
- sde_enc->cur_master);
-
- /*
- * Queue work to update the vsync event timer
- * if autorefresh is enabled.
- */
- SDE_EVT32_VERBOSE(autorefresh_enabled);
- if (autorefresh_enabled)
- kthread_queue_work(&event_thread->worker,
+ kthread_queue_work(&event_thread->worker,
&sde_enc->vsync_event_work);
- else
- del_timer(&sde_enc->vsync_event_timer);
}
static void sde_encoder_vsync_event_work_handler(struct kthread_work *work)
{
struct sde_encoder_virt *sde_enc = container_of(work,
struct sde_encoder_virt, vsync_event_work);
+ bool autorefresh_enabled = false;
+ int rc = 0;
ktime_t wakeup_time;
if (!sde_enc) {
@@ -3036,39 +3364,71 @@ static void sde_encoder_vsync_event_work_handler(struct kthread_work *work)
return;
}
- if (_sde_encoder_wakeup_time(&sde_enc->base, &wakeup_time))
+ rc = _sde_encoder_power_enable(sde_enc, true);
+ if (rc) {
+ SDE_ERROR_ENC(sde_enc, "sde enc power enabled failed:%d\n", rc);
return;
+ }
+
+ if (sde_enc->cur_master &&
+ sde_enc->cur_master->ops.is_autorefresh_enabled)
+ autorefresh_enabled =
+ sde_enc->cur_master->ops.is_autorefresh_enabled(
+ sde_enc->cur_master);
+
+ /* Update timer if autorefresh is enabled else return */
+ if (!autorefresh_enabled)
+ goto exit;
+
+ rc = _sde_encoder_wakeup_time(&sde_enc->base, &wakeup_time);
+ if (rc)
+ goto exit;
SDE_EVT32_VERBOSE(ktime_to_ms(wakeup_time));
mod_timer(&sde_enc->vsync_event_timer,
nsecs_to_jiffies(ktime_to_ns(wakeup_time)));
+
+exit:
+ _sde_encoder_power_enable(sde_enc, false);
}
-void sde_encoder_prepare_for_kickoff(struct drm_encoder *drm_enc,
+int sde_encoder_prepare_for_kickoff(struct drm_encoder *drm_enc,
struct sde_encoder_kickoff_params *params)
{
struct sde_encoder_virt *sde_enc;
struct sde_encoder_phys *phys;
bool needs_hw_reset = false;
+ uint32_t ln_cnt1, ln_cnt2;
unsigned int i;
- int rc;
+ int rc, ret = 0;
if (!drm_enc || !params) {
SDE_ERROR("invalid args\n");
- return;
+ return -EINVAL;
}
sde_enc = to_sde_encoder_virt(drm_enc);
SDE_DEBUG_ENC(sde_enc, "\n");
SDE_EVT32(DRMID(drm_enc));
+ /* save this for later, in case of errors */
+ if (sde_enc->cur_master && sde_enc->cur_master->ops.get_wr_line_count)
+ ln_cnt1 = sde_enc->cur_master->ops.get_wr_line_count(
+ sde_enc->cur_master);
+ else
+ ln_cnt1 = -EINVAL;
+
/* prepare for next kickoff, may include waiting on previous kickoff */
SDE_ATRACE_BEGIN("enc_prepare_for_kickoff");
for (i = 0; i < sde_enc->num_phys_encs; i++) {
phys = sde_enc->phys_encs[i];
if (phys) {
- if (phys->ops.prepare_for_kickoff)
- phys->ops.prepare_for_kickoff(phys, params);
+ if (phys->ops.prepare_for_kickoff) {
+ rc = phys->ops.prepare_for_kickoff(
+ phys, params);
+ if (rc)
+ ret = rc;
+ }
if (phys->enable_state == SDE_ENC_ERR_NEEDS_HW_RESET)
needs_hw_reset = true;
_sde_encoder_setup_dither(phys);
@@ -3076,11 +3436,24 @@ void sde_encoder_prepare_for_kickoff(struct drm_encoder *drm_enc,
}
SDE_ATRACE_END("enc_prepare_for_kickoff");
- sde_encoder_resource_control(drm_enc, SDE_ENC_RC_EVENT_KICKOFF);
+ rc = sde_encoder_resource_control(drm_enc, SDE_ENC_RC_EVENT_KICKOFF);
+ if (rc) {
+ SDE_ERROR_ENC(sde_enc, "resource kickoff failed rc %d\n", rc);
+ return rc;
+ }
/* if any phys needs reset, reset all phys, in-order */
if (needs_hw_reset) {
- SDE_EVT32(DRMID(drm_enc), SDE_EVTLOG_FUNC_CASE1);
+ /* query line count before cur_master is updated */
+ if (sde_enc->cur_master &&
+ sde_enc->cur_master->ops.get_wr_line_count)
+ ln_cnt2 = sde_enc->cur_master->ops.get_wr_line_count(
+ sde_enc->cur_master);
+ else
+ ln_cnt2 = -EINVAL;
+
+ SDE_EVT32(DRMID(drm_enc), ln_cnt1, ln_cnt2,
+ SDE_EVTLOG_FUNC_CASE1);
for (i = 0; i < sde_enc->num_phys_encs; i++) {
phys = sde_enc->phys_encs[i];
if (phys && phys->ops.hw_reset)
@@ -3094,17 +3467,23 @@ void sde_encoder_prepare_for_kickoff(struct drm_encoder *drm_enc,
if (sde_enc->cur_master && sde_enc->cur_master->connector) {
rc = sde_connector_pre_kickoff(sde_enc->cur_master->connector);
- if (rc)
+ if (rc) {
SDE_ERROR_ENC(sde_enc, "kickoff conn%d failed rc %d\n",
sde_enc->cur_master->connector->base.id,
rc);
+ ret = rc;
+ }
}
- if (sde_encoder_is_dsc_enabled(drm_enc)) {
+ if (_sde_encoder_is_dsc_enabled(drm_enc)) {
rc = _sde_encoder_dsc_setup(sde_enc, params);
- if (rc)
+ if (rc) {
SDE_ERROR_ENC(sde_enc, "failed to setup DSC: %d\n", rc);
+ ret = rc;
+ }
}
+
+ return ret;
}
/**
@@ -3162,12 +3541,6 @@ void sde_encoder_kickoff(struct drm_encoder *drm_enc, bool is_error)
SDE_DEBUG_ENC(sde_enc, "\n");
- atomic_set(&sde_enc->frame_done_timeout,
- SDE_FRAME_DONE_TIMEOUT * 1000 /
- drm_enc->crtc->state->adjusted_mode.vrefresh);
- mod_timer(&sde_enc->frame_done_timer, jiffies +
- ((atomic_read(&sde_enc->frame_done_timeout) * HZ) / 1000));
-
/* create a 'no pipes' commit to release buffers on errors */
if (is_error)
_sde_encoder_reset_ctl_hw(drm_enc);
@@ -3728,36 +4101,6 @@ static int sde_encoder_setup_display(struct sde_encoder_virt *sde_enc,
return ret;
}
-static void sde_encoder_frame_done_timeout(unsigned long data)
-{
- struct drm_encoder *drm_enc = (struct drm_encoder *) data;
- struct sde_encoder_virt *sde_enc = to_sde_encoder_virt(drm_enc);
- struct msm_drm_private *priv;
- u32 event;
-
- if (!drm_enc || !drm_enc->dev || !drm_enc->dev->dev_private) {
- SDE_ERROR("invalid parameters\n");
- return;
- }
- priv = drm_enc->dev->dev_private;
-
- if (!sde_enc->frame_busy_mask[0] || !sde_enc->crtc_frame_event_cb) {
- SDE_DEBUG_ENC(sde_enc, "invalid timeout\n");
- SDE_EVT32(DRMID(drm_enc), sde_enc->frame_busy_mask[0], 0);
- return;
- } else if (!atomic_xchg(&sde_enc->frame_done_timeout, 0)) {
- SDE_ERROR_ENC(sde_enc, "invalid timeout\n");
- SDE_EVT32(DRMID(drm_enc), 0, 1);
- return;
- }
-
- SDE_ERROR_ENC(sde_enc, "frame done timeout\n");
-
- event = SDE_ENCODER_FRAME_EVENT_ERROR;
- SDE_EVT32(DRMID(drm_enc), event);
- sde_enc->crtc_frame_event_cb(sde_enc->crtc_frame_event_cb_data, event);
-}
-
static const struct drm_encoder_helper_funcs sde_encoder_helper_funcs = {
.mode_set = sde_encoder_virt_mode_set,
.disable = sde_encoder_virt_disable,
@@ -3801,10 +4144,6 @@ struct drm_encoder *sde_encoder_init(
drm_encoder_init(dev, drm_enc, &sde_encoder_funcs, drm_enc_mode, NULL);
drm_encoder_helper_add(drm_enc, &sde_encoder_helper_funcs);
- atomic_set(&sde_enc->frame_done_timeout, 0);
- setup_timer(&sde_enc->frame_done_timer, sde_encoder_frame_done_timeout,
- (unsigned long) sde_enc);
-
if ((disp_info->intf_type == DRM_MODE_CONNECTOR_DSI) &&
disp_info->is_primary)
setup_timer(&sde_enc->vsync_event_timer,
@@ -3823,7 +4162,6 @@ struct drm_encoder *sde_encoder_init(
mutex_init(&sde_enc->rc_lock);
kthread_init_delayed_work(&sde_enc->delayed_off_work,
sde_encoder_off_work);
- sde_enc->idle_timeout = IDLE_TIMEOUT;
sde_enc->vblank_enabled = false;
kthread_init_work(&sde_enc->vsync_event_work,
@@ -3870,6 +4208,9 @@ int sde_encoder_wait_for_event(struct drm_encoder *drm_enc,
case MSM_ENC_VBLANK:
fn_wait = phys->ops.wait_for_vblank;
break;
+ case MSM_ENC_ACTIVE_REGION:
+ fn_wait = phys->ops.wait_for_active;
+ break;
default:
SDE_ERROR_ENC(sde_enc, "unknown wait event %d\n",
event);
@@ -3911,3 +4252,176 @@ enum sde_intf_mode sde_encoder_get_intf_mode(struct drm_encoder *encoder)
return INTF_MODE_NONE;
}
+
+/**
+ * sde_encoder_update_caps_for_cont_splash - update encoder settings during
+ * device bootup when cont_splash is enabled
+ * @drm_enc: Pointer to drm encoder structure
+ * @Return: true if successful in updating the encoder structure
+ */
+int sde_encoder_update_caps_for_cont_splash(struct drm_encoder *encoder)
+{
+ struct sde_encoder_virt *sde_enc;
+ struct msm_drm_private *priv;
+ struct sde_kms *sde_kms;
+ struct drm_connector *conn = NULL;
+ struct sde_connector *sde_conn = NULL;
+ struct sde_connector_state *sde_conn_state = NULL;
+ struct drm_display_mode *drm_mode = NULL;
+ struct sde_rm_hw_iter dsc_iter, pp_iter, ctl_iter;
+ int ret = 0, i;
+
+ if (!encoder) {
+ SDE_ERROR("invalid drm enc\n");
+ return -EINVAL;
+ }
+
+ if (!encoder->dev || !encoder->dev->dev_private) {
+ SDE_ERROR("drm device invalid\n");
+ return -EINVAL;
+ }
+
+ priv = encoder->dev->dev_private;
+ if (!priv->kms) {
+ SDE_ERROR("invalid kms\n");
+ return -EINVAL;
+ }
+
+ sde_kms = to_sde_kms(priv->kms);
+ sde_enc = to_sde_encoder_virt(encoder);
+ if (!priv->num_connectors) {
+ SDE_ERROR_ENC(sde_enc, "No connectors registered\n");
+ return -EINVAL;
+ }
+ SDE_DEBUG_ENC(sde_enc,
+ "num of connectors: %d\n", priv->num_connectors);
+
+ for (i = 0; i < priv->num_connectors; i++) {
+ SDE_DEBUG_ENC(sde_enc, "connector id: %d\n",
+ priv->connectors[i]->base.id);
+ sde_conn = to_sde_connector(priv->connectors[i]);
+ if (!sde_conn->encoder) {
+ SDE_DEBUG_ENC(sde_enc,
+ "encoder not attached to connector\n");
+ continue;
+ }
+ if (sde_conn->encoder->base.id
+ == encoder->base.id) {
+ conn = (priv->connectors[i]);
+ break;
+ }
+ }
+
+ if (!conn || !conn->state) {
+ SDE_ERROR_ENC(sde_enc, "connector not found\n");
+ return -EINVAL;
+ }
+
+ sde_conn_state = to_sde_connector_state(conn->state);
+
+ if (!sde_conn->ops.get_mode_info) {
+ SDE_ERROR_ENC(sde_enc, "conn: get_mode_info ops not found\n");
+ return -EINVAL;
+ }
+
+ ret = sde_conn->ops.get_mode_info(&encoder->crtc->state->adjusted_mode,
+ &sde_conn_state->mode_info,
+ sde_kms->catalog->max_mixer_width,
+ sde_conn->display);
+ if (ret) {
+ SDE_ERROR_ENC(sde_enc,
+ "conn: ->get_mode_info failed. ret=%d\n", ret);
+ return ret;
+ }
+
+ ret = sde_rm_reserve(&sde_kms->rm, encoder, encoder->crtc->state,
+ conn->state, false);
+ if (ret) {
+ SDE_ERROR_ENC(sde_enc,
+ "failed to reserve hw resources, %d\n", ret);
+ return ret;
+ }
+
+ if (conn->encoder) {
+ conn->state->best_encoder = conn->encoder;
+ SDE_DEBUG_ENC(sde_enc,
+ "configured cstate->best_encoder to ID = %d\n",
+ conn->state->best_encoder->base.id);
+ } else {
+ SDE_ERROR_ENC(sde_enc, "No encoder mapped to connector=%d\n",
+ conn->base.id);
+ }
+
+ SDE_DEBUG_ENC(sde_enc, "connector topology = %llu\n",
+ sde_connector_get_topology_name(conn));
+ drm_mode = &encoder->crtc->state->adjusted_mode;
+ SDE_DEBUG_ENC(sde_enc, "hdisplay = %d, vdisplay = %d\n",
+ drm_mode->hdisplay, drm_mode->vdisplay);
+ drm_set_preferred_mode(conn, drm_mode->hdisplay, drm_mode->vdisplay);
+
+ if (encoder->bridge) {
+ SDE_DEBUG_ENC(sde_enc, "Bridge mapped to encoder\n");
+ /*
+ * For cont-splash use case, we update the mode
+ * configurations manually. This will skip the
+ * usually mode set call when actual frame is
+ * pushed from framework. The bridge needs to
+ * be updated with the current drm mode by
+ * calling the bridge mode set ops.
+ */
+ if (encoder->bridge->funcs) {
+ SDE_DEBUG_ENC(sde_enc, "calling mode_set\n");
+ encoder->bridge->funcs->mode_set(encoder->bridge,
+ drm_mode, drm_mode);
+ }
+ } else {
+ SDE_ERROR_ENC(sde_enc, "No bridge attached to encoder\n");
+ }
+
+ sde_rm_init_hw_iter(&pp_iter, encoder->base.id, SDE_HW_BLK_PINGPONG);
+ for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) {
+ sde_enc->hw_pp[i] = NULL;
+ if (!sde_rm_get_hw(&sde_kms->rm, &pp_iter))
+ break;
+ sde_enc->hw_pp[i] = (struct sde_hw_pingpong *) pp_iter.hw;
+ }
+
+ sde_rm_init_hw_iter(&dsc_iter, encoder->base.id, SDE_HW_BLK_DSC);
+ for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) {
+ sde_enc->hw_dsc[i] = NULL;
+ if (!sde_rm_get_hw(&sde_kms->rm, &dsc_iter))
+ break;
+ sde_enc->hw_dsc[i] = (struct sde_hw_dsc *) dsc_iter.hw;
+ }
+
+ sde_rm_init_hw_iter(&ctl_iter, encoder->base.id, SDE_HW_BLK_CTL);
+ for (i = 0; i < sde_enc->num_phys_encs; i++) {
+ struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
+
+ phys->hw_ctl = NULL;
+ if (!sde_rm_get_hw(&sde_kms->rm, &ctl_iter))
+ break;
+ phys->hw_ctl = (struct sde_hw_ctl *) ctl_iter.hw;
+ }
+
+ for (i = 0; i < sde_enc->num_phys_encs; i++) {
+ struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
+
+ if (!phys) {
+ SDE_ERROR_ENC(sde_enc,
+ "phys encoders not initialized\n");
+ return -EINVAL;
+ }
+
+ phys->hw_pp = sde_enc->hw_pp[i];
+ if (phys->ops.cont_splash_mode_set)
+ phys->ops.cont_splash_mode_set(phys, drm_mode);
+
+ if (phys->ops.is_master && phys->ops.is_master(phys)) {
+ phys->connector = conn;
+ sde_enc->cur_master = phys;
+ }
+ }
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder.h b/drivers/gpu/drm/msm/sde/sde_encoder.h
index 9a9ff86..f8a3cf3 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder.h
+++ b/drivers/gpu/drm/msm/sde/sde_encoder.h
@@ -29,9 +29,8 @@
#define SDE_ENCODER_FRAME_EVENT_PANEL_DEAD BIT(2)
#define SDE_ENCODER_FRAME_EVENT_SIGNAL_RELEASE_FENCE BIT(3)
#define SDE_ENCODER_FRAME_EVENT_SIGNAL_RETIRE_FENCE BIT(4)
-#define SDE_ENCODER_FRAME_EVENT_IDLE BIT(5)
-#define IDLE_TIMEOUT (66 - 16/2)
+#define IDLE_POWERCOLLAPSE_DURATION (66 - 16/2)
/**
* Encoder functions and data types
@@ -115,8 +114,9 @@ struct sde_rsc_client *sde_encoder_get_rsc_client(struct drm_encoder *encoder);
* Delayed: Block until next trigger can be issued.
* @encoder: encoder pointer
* @params: kickoff time parameters
+ * @Returns: Zero on success, last detected error otherwise
*/
-void sde_encoder_prepare_for_kickoff(struct drm_encoder *encoder,
+int sde_encoder_prepare_for_kickoff(struct drm_encoder *encoder,
struct sde_encoder_kickoff_params *params);
/**
@@ -162,19 +162,19 @@ int sde_encoder_wait_for_event(struct drm_encoder *drm_encoder,
enum sde_intf_mode sde_encoder_get_intf_mode(struct drm_encoder *encoder);
/**
+ * sde_encoder_control_te - control enabling/disabling VSYNC_IN_EN
+ * @encoder: encoder pointer
+ * @enable: boolean to indicate enable/disable
+ */
+void sde_encoder_control_te(struct drm_encoder *encoder, bool enable);
+
+/**
* sde_encoder_virt_restore - restore the encoder configs
* @encoder: encoder pointer
*/
void sde_encoder_virt_restore(struct drm_encoder *encoder);
/**
- * sde_encoder_is_dsc_enabled - check if encoder is in DSC mode
- * @drm_enc: Pointer to drm encoder object
- * @Return: true if encoder is in DSC mode
- */
-bool sde_encoder_is_dsc_enabled(struct drm_encoder *drm_enc);
-
-/**
* sde_encoder_is_dsc_merge - check if encoder is in DSC merge mode
* @drm_enc: Pointer to drm encoder object
* @Return: true if encoder is in DSC merge mode
@@ -213,12 +213,11 @@ void sde_encoder_destroy(struct drm_encoder *drm_enc);
void sde_encoder_prepare_commit(struct drm_encoder *drm_enc);
/**
- * sde_encoder_set_idle_timeout - set the idle timeout for video
- * and command mode encoders.
- * @drm_enc: Pointer to previously created drm encoder structure
- * @idle_timeout: idle timeout duration in milliseconds
+ * sde_encoder_update_caps_for_cont_splash - update encoder settings during
+ * device bootup when cont_splash is enabled
+ * @drm_enc: Pointer to drm encoder structure
+ * @Return: true if successful in updating the encoder structure
*/
-void sde_encoder_set_idle_timeout(struct drm_encoder *drm_enc,
- u32 idle_timeout);
+int sde_encoder_update_caps_for_cont_splash(struct drm_encoder *encoder);
#endif /* __SDE_ENCODER_H__ */
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys.h b/drivers/gpu/drm/msm/sde/sde_encoder_phys.h
index e5a4da4..cfe2126 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder_phys.h
+++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys.h
@@ -99,6 +99,8 @@ struct sde_encoder_virt_ops {
* encoder. Can be switched at enable time. Based
* on split_role and current mode (CMD/VID).
* @mode_fixup: DRM Call. Fixup a DRM mode.
+ * @cont_splash_mode_set: mode set with specific HW resources during
+ * cont splash enabled state.
* @mode_set: DRM Call. Set a DRM mode.
* This likely caches the mode, for use at enable.
* @enable: DRM Call. Enable a DRM mode.
@@ -126,14 +128,15 @@ struct sde_encoder_virt_ops {
* SDE_ENC_ERR_NEEDS_HW_RESET state
* @irq_control: Handler to enable/disable all the encoder IRQs
* @update_split_role: Update the split role of the phys enc
- * @prepare_idle_pc: phys encoder can update the vsync_enable status
- * on idle power collapse prepare
+ * @control_te: Interface to control the vsync_enable status
* @restore: Restore all the encoder configs.
* @is_autorefresh_enabled: provides the autorefresh current
* enable/disable state.
- * @get_line_count: Obtain current vertical line count
+ * @get_line_count: Obtain current internal vertical line count
+ * @get_wr_line_count: Obtain current output vertical line count
* @wait_dma_trigger: Returns true if lut dma has to trigger and wait
* unitl transaction is complete.
+ * @wait_for_active: Wait for display scan line to be in active area
*/
struct sde_encoder_phys_ops {
@@ -147,6 +150,8 @@ struct sde_encoder_phys_ops {
void (*mode_set)(struct sde_encoder_phys *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
+ void (*cont_splash_mode_set)(struct sde_encoder_phys *encoder,
+ struct drm_display_mode *adjusted_mode);
void (*enable)(struct sde_encoder_phys *encoder);
void (*disable)(struct sde_encoder_phys *encoder);
int (*atomic_check)(struct sde_encoder_phys *encoder,
@@ -160,7 +165,7 @@ struct sde_encoder_phys_ops {
int (*wait_for_commit_done)(struct sde_encoder_phys *phys_enc);
int (*wait_for_tx_complete)(struct sde_encoder_phys *phys_enc);
int (*wait_for_vblank)(struct sde_encoder_phys *phys_enc);
- void (*prepare_for_kickoff)(struct sde_encoder_phys *phys_enc,
+ int (*prepare_for_kickoff)(struct sde_encoder_phys *phys_enc,
struct sde_encoder_kickoff_params *params);
void (*handle_post_kickoff)(struct sde_encoder_phys *phys_enc);
void (*trigger_flush)(struct sde_encoder_phys *phys_enc);
@@ -174,11 +179,13 @@ struct sde_encoder_phys_ops {
void (*irq_control)(struct sde_encoder_phys *phys, bool enable);
void (*update_split_role)(struct sde_encoder_phys *phys_enc,
enum sde_enc_split_role role);
- void (*prepare_idle_pc)(struct sde_encoder_phys *phys_enc);
+ void (*control_te)(struct sde_encoder_phys *phys_enc, bool enable);
void (*restore)(struct sde_encoder_phys *phys);
bool (*is_autorefresh_enabled)(struct sde_encoder_phys *phys);
int (*get_line_count)(struct sde_encoder_phys *phys);
+ int (*get_wr_line_count)(struct sde_encoder_phys *phys);
bool (*wait_dma_trigger)(struct sde_encoder_phys *phys);
+ int (*wait_for_active)(struct sde_encoder_phys *phys);
};
/**
@@ -296,6 +303,7 @@ static inline int sde_encoder_phys_inc_pending(struct sde_encoder_phys *phys)
* @hw_intf: Hardware interface to the intf registers
* @timing_params: Current timing parameter
* @rot_fetch: Prefill for inline rotation
+ * @error_count: Number of consecutive kickoffs that experienced an error
* @rot_fetch_valid: true if rot_fetch is updated (reset in enc enable)
*/
struct sde_encoder_phys_vid {
@@ -303,6 +311,7 @@ struct sde_encoder_phys_vid {
struct sde_hw_intf *hw_intf;
struct intf_timing_params timing_params;
struct intf_prog_fetch rot_fetch;
+ int error_count;
bool rot_fetch_valid;
};
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c b/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c
index 9976f85..d7cbfbe 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c
@@ -32,7 +32,7 @@
#define to_sde_encoder_phys_cmd(x) \
container_of(x, struct sde_encoder_phys_cmd, base)
-#define PP_TIMEOUT_MAX_TRIALS 10
+#define PP_TIMEOUT_MAX_TRIALS 2
/*
* Tearcheck sync start and continue thresholds are empirically found
@@ -359,6 +359,21 @@ static void _sde_encoder_phys_cmd_setup_irq_hw_idx(
irq->irq_idx = -EINVAL;
}
+static void sde_encoder_phys_cmd_cont_splash_mode_set(
+ struct sde_encoder_phys *phys_enc,
+ struct drm_display_mode *adj_mode)
+{
+ if (!phys_enc || !adj_mode) {
+ SDE_ERROR("invalid args\n");
+ return;
+ }
+
+ phys_enc->cached_mode = *adj_mode;
+ phys_enc->enable_state = SDE_ENC_ENABLED;
+
+ _sde_encoder_phys_cmd_setup_irq_hw_idx(phys_enc);
+}
+
static void sde_encoder_phys_cmd_mode_set(
struct sde_encoder_phys *phys_enc,
struct drm_display_mode *mode,
@@ -418,26 +433,24 @@ static int _sde_encoder_phys_cmd_handle_ppdone_timeout(
to_sde_encoder_phys_cmd(phys_enc);
u32 frame_event = SDE_ENCODER_FRAME_EVENT_ERROR
| SDE_ENCODER_FRAME_EVENT_SIGNAL_RELEASE_FENCE;
- bool do_log = false;
if (!phys_enc || !phys_enc->hw_pp || !phys_enc->hw_ctl)
return -EINVAL;
cmd_enc->pp_timeout_report_cnt++;
- if (cmd_enc->pp_timeout_report_cnt == PP_TIMEOUT_MAX_TRIALS) {
- frame_event |= SDE_ENCODER_FRAME_EVENT_PANEL_DEAD;
- do_log = true;
- } else if (cmd_enc->pp_timeout_report_cnt == 1) {
- do_log = true;
- }
SDE_EVT32(DRMID(phys_enc->parent), phys_enc->hw_pp->idx - PINGPONG_0,
cmd_enc->pp_timeout_report_cnt,
atomic_read(&phys_enc->pending_kickoff_cnt),
frame_event);
- /* to avoid flooding, only log first time, and "dead" time */
- if (do_log) {
+ if (cmd_enc->pp_timeout_report_cnt >= PP_TIMEOUT_MAX_TRIALS) {
+ cmd_enc->pp_timeout_report_cnt = PP_TIMEOUT_MAX_TRIALS;
+ frame_event |= SDE_ENCODER_FRAME_EVENT_PANEL_DEAD;
+
+ SDE_DBG_DUMP("panic");
+ } else if (cmd_enc->pp_timeout_report_cnt == 1) {
+ /* to avoid flooding, only log first time, and "dead" time */
SDE_ERROR_CMDENC(cmd_enc,
"pp:%d kickoff timed out ctl %d cnt %d koff_cnt %d\n",
phys_enc->hw_pp->idx - PINGPONG_0,
@@ -446,9 +459,6 @@ static int _sde_encoder_phys_cmd_handle_ppdone_timeout(
atomic_read(&phys_enc->pending_kickoff_cnt));
SDE_EVT32(DRMID(phys_enc->parent), SDE_EVTLOG_FATAL);
-
- sde_encoder_helper_unregister_irq(phys_enc, INTR_IDX_RDPTR);
- SDE_DBG_DUMP("all", "dbg_bus", "vbif_dbg_bus", "panic");
}
atomic_add_unless(&phys_enc->pending_kickoff_cnt, -1, 0);
@@ -473,6 +483,21 @@ static bool _sde_encoder_phys_is_ppsplit_slave(
phys_enc->split_role == ENC_ROLE_SLAVE;
}
+static bool _sde_encoder_phys_is_disabling_ppsplit_slave(
+ struct sde_encoder_phys *phys_enc)
+{
+ enum sde_rm_topology_name old_top;
+
+ if (!phys_enc || !phys_enc->connector ||
+ phys_enc->split_role != ENC_ROLE_SLAVE)
+ return false;
+
+ old_top = sde_connector_get_old_topology_name(
+ phys_enc->connector->state);
+
+ return old_top == SDE_RM_TOPOLOGY_PPSPLIT;
+}
+
static int _sde_encoder_phys_cmd_poll_write_pointer_started(
struct sde_encoder_phys *phys_enc)
{
@@ -662,7 +687,15 @@ void sde_encoder_phys_cmd_irq_control(struct sde_encoder_phys *phys_enc,
{
struct sde_encoder_phys_cmd *cmd_enc;
- if (!phys_enc || _sde_encoder_phys_is_ppsplit_slave(phys_enc))
+ if (!phys_enc)
+ return;
+
+ /**
+ * pingpong split slaves do not register for IRQs
+ * check old and new topologies
+ */
+ if (_sde_encoder_phys_is_ppsplit_slave(phys_enc) ||
+ _sde_encoder_phys_is_disabling_ppsplit_slave(phys_enc))
return;
cmd_enc = to_sde_encoder_phys_cmd(phys_enc);
@@ -890,7 +923,7 @@ static bool sde_encoder_phys_cmd_is_autorefresh_enabled(
return cfg.enable;
}
-static void _sde_encoder_phys_cmd_connect_te(
+static void sde_encoder_phys_cmd_connect_te(
struct sde_encoder_phys *phys_enc, bool enable)
{
if (!phys_enc || !phys_enc->hw_pp ||
@@ -901,12 +934,6 @@ static void _sde_encoder_phys_cmd_connect_te(
phys_enc->hw_pp->ops.connect_external_te(phys_enc->hw_pp, enable);
}
-static void sde_encoder_phys_cmd_prepare_idle_pc(
- struct sde_encoder_phys *phys_enc)
-{
- _sde_encoder_phys_cmd_connect_te(phys_enc, false);
-}
-
static int sde_encoder_phys_cmd_get_line_count(
struct sde_encoder_phys *phys_enc)
{
@@ -925,6 +952,28 @@ static int sde_encoder_phys_cmd_get_line_count(
return hw_pp->ops.get_line_count(hw_pp);
}
+static int sde_encoder_phys_cmd_get_write_line_count(
+ struct sde_encoder_phys *phys_enc)
+{
+ struct sde_hw_pingpong *hw_pp;
+ struct sde_hw_pp_vsync_info info;
+
+ if (!phys_enc || !phys_enc->hw_pp)
+ return -EINVAL;
+
+ if (!sde_encoder_phys_cmd_is_master(phys_enc))
+ return -EINVAL;
+
+ hw_pp = phys_enc->hw_pp;
+ if (!hw_pp->ops.get_vsync_info)
+ return -EINVAL;
+
+ if (hw_pp->ops.get_vsync_info(hw_pp, &info))
+ return -EINVAL;
+
+ return (int)info.wr_ptr_line_count;
+}
+
static void sde_encoder_phys_cmd_disable(struct sde_encoder_phys *phys_enc)
{
struct sde_encoder_phys_cmd *cmd_enc =
@@ -984,7 +1033,7 @@ static void sde_encoder_phys_cmd_get_hw_resources(
hw_res->intfs[phys_enc->intf_idx - INTF_0] = INTF_MODE_CMD;
}
-static void sde_encoder_phys_cmd_prepare_for_kickoff(
+static int sde_encoder_phys_cmd_prepare_for_kickoff(
struct sde_encoder_phys *phys_enc,
struct sde_encoder_kickoff_params *params)
{
@@ -994,7 +1043,7 @@ static void sde_encoder_phys_cmd_prepare_for_kickoff(
if (!phys_enc || !phys_enc->hw_pp) {
SDE_ERROR("invalid encoder\n");
- return;
+ return -EINVAL;
}
SDE_DEBUG_CMDENC(cmd_enc, "pp %d\n", phys_enc->hw_pp->idx - PINGPONG_0);
@@ -1018,6 +1067,7 @@ static void sde_encoder_phys_cmd_prepare_for_kickoff(
SDE_DEBUG_CMDENC(cmd_enc, "pp:%d pending_cnt %d\n",
phys_enc->hw_pp->idx - PINGPONG_0,
atomic_read(&phys_enc->pending_kickoff_cnt));
+ return ret;
}
static int _sde_encoder_phys_cmd_wait_for_ctl_start(
@@ -1027,6 +1077,7 @@ static int _sde_encoder_phys_cmd_wait_for_ctl_start(
to_sde_encoder_phys_cmd(phys_enc);
struct sde_encoder_wait_info wait_info;
int ret;
+ bool frame_pending = true;
if (!phys_enc || !phys_enc->hw_ctl) {
SDE_ERROR("invalid argument(s)\n");
@@ -1044,10 +1095,17 @@ static int _sde_encoder_phys_cmd_wait_for_ctl_start(
ret = sde_encoder_helper_wait_for_irq(phys_enc, INTR_IDX_CTL_START,
&wait_info);
if (ret == -ETIMEDOUT) {
- SDE_ERROR_CMDENC(cmd_enc, "ctl start interrupt wait failed\n");
- ret = -EINVAL;
- } else if (!ret)
- ret = 0;
+ struct sde_hw_ctl *ctl = phys_enc->hw_ctl;
+
+ if (ctl && ctl->ops.get_start_state)
+ frame_pending = ctl->ops.get_start_state(ctl);
+
+ if (frame_pending)
+ SDE_ERROR_CMDENC(cmd_enc,
+ "ctl start interrupt wait failed\n");
+ else
+ ret = 0;
+ }
return ret;
}
@@ -1212,19 +1270,6 @@ static void sde_encoder_phys_cmd_prepare_commit(
SDE_DEBUG_CMDENC(cmd_enc, "disabled autorefresh\n");
}
-static void sde_encoder_phys_cmd_handle_post_kickoff(
- struct sde_encoder_phys *phys_enc)
-{
- if (!phys_enc)
- return;
-
- /**
- * re-enable external TE, either for the first time after enabling
- * or if disabled for Autorefresh
- */
- _sde_encoder_phys_cmd_connect_te(phys_enc, true);
-}
-
static void sde_encoder_phys_cmd_trigger_start(
struct sde_encoder_phys *phys_enc)
{
@@ -1251,6 +1296,7 @@ static void sde_encoder_phys_cmd_init_ops(
ops->prepare_commit = sde_encoder_phys_cmd_prepare_commit;
ops->is_master = sde_encoder_phys_cmd_is_master;
ops->mode_set = sde_encoder_phys_cmd_mode_set;
+ ops->cont_splash_mode_set = sde_encoder_phys_cmd_cont_splash_mode_set;
ops->mode_fixup = sde_encoder_phys_cmd_mode_fixup;
ops->enable = sde_encoder_phys_cmd_enable;
ops->disable = sde_encoder_phys_cmd_disable;
@@ -1268,11 +1314,12 @@ static void sde_encoder_phys_cmd_init_ops(
ops->irq_control = sde_encoder_phys_cmd_irq_control;
ops->update_split_role = sde_encoder_phys_cmd_update_split_role;
ops->restore = sde_encoder_phys_cmd_enable_helper;
- ops->prepare_idle_pc = sde_encoder_phys_cmd_prepare_idle_pc;
+ ops->control_te = sde_encoder_phys_cmd_connect_te;
ops->is_autorefresh_enabled =
sde_encoder_phys_cmd_is_autorefresh_enabled;
- ops->handle_post_kickoff = sde_encoder_phys_cmd_handle_post_kickoff;
ops->get_line_count = sde_encoder_phys_cmd_get_line_count;
+ ops->get_wr_line_count = sde_encoder_phys_cmd_get_write_line_count;
+ ops->wait_for_active = NULL;
}
struct sde_encoder_phys *sde_encoder_phys_cmd_init(
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c b/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c
index a983b7c..aaf50f6 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c
@@ -33,6 +33,13 @@
#define to_sde_encoder_phys_vid(x) \
container_of(x, struct sde_encoder_phys_vid, base)
+/* maximum number of consecutive kickoff errors */
+#define KICKOFF_MAX_ERRORS 2
+
+/* Poll time to do recovery during active region */
+#define POLL_TIME_USEC_FOR_LN_CNT 500
+#define MAX_POLL_CNT 10
+
static bool sde_encoder_phys_vid_is_master(
struct sde_encoder_phys *phys_enc)
{
@@ -498,6 +505,21 @@ static void _sde_encoder_phys_vid_setup_irq_hw_idx(
irq->hw_idx = phys_enc->intf_idx;
}
+static void sde_encoder_phys_vid_cont_splash_mode_set(
+ struct sde_encoder_phys *phys_enc,
+ struct drm_display_mode *adj_mode)
+{
+ if (!phys_enc || !adj_mode) {
+ SDE_ERROR("invalid args\n");
+ return;
+ }
+
+ phys_enc->cached_mode = *adj_mode;
+ phys_enc->enable_state = SDE_ENC_ENABLED;
+
+ _sde_encoder_phys_vid_setup_irq_hw_idx(phys_enc);
+}
+
static void sde_encoder_phys_vid_mode_set(
struct sde_encoder_phys *phys_enc,
struct drm_display_mode *mode,
@@ -722,7 +744,8 @@ static int _sde_encoder_phys_vid_wait_for_vblank(
struct sde_encoder_phys *phys_enc, bool notify)
{
struct sde_encoder_wait_info wait_info;
- int ret;
+ int ret = 0;
+ u32 event = 0;
if (!phys_enc) {
pr_err("invalid encoder\n");
@@ -735,11 +758,10 @@ static int _sde_encoder_phys_vid_wait_for_vblank(
if (!sde_encoder_phys_vid_is_master(phys_enc)) {
/* signal done for slave video encoder, unless it is pp-split */
- if (!_sde_encoder_phys_is_ppsplit(phys_enc) &&
- notify && phys_enc->parent_ops.handle_frame_done)
- phys_enc->parent_ops.handle_frame_done(
- phys_enc->parent, phys_enc,
- SDE_ENCODER_FRAME_EVENT_DONE);
+ if (!_sde_encoder_phys_is_ppsplit(phys_enc) && notify) {
+ event = SDE_ENCODER_FRAME_EVENT_DONE;
+ goto end;
+ }
return 0;
}
@@ -747,13 +769,20 @@ static int _sde_encoder_phys_vid_wait_for_vblank(
ret = sde_encoder_helper_wait_for_irq(phys_enc, INTR_IDX_VSYNC,
&wait_info);
- if (ret == -ETIMEDOUT) {
- sde_encoder_helper_report_irq_timeout(phys_enc, INTR_IDX_VSYNC);
- } else if (!ret && notify && phys_enc->parent_ops.handle_frame_done)
+ if (ret == -ETIMEDOUT)
+ event = SDE_ENCODER_FRAME_EVENT_SIGNAL_RELEASE_FENCE
+ | SDE_ENCODER_FRAME_EVENT_SIGNAL_RETIRE_FENCE
+ | SDE_ENCODER_FRAME_EVENT_ERROR;
+ else if (!ret && notify)
+ event = SDE_ENCODER_FRAME_EVENT_DONE;
+
+end:
+ SDE_EVT32(DRMID(phys_enc->parent), event, notify, ret,
+ ret ? SDE_EVTLOG_FATAL : 0);
+ if (phys_enc->parent_ops.handle_frame_done && event)
phys_enc->parent_ops.handle_frame_done(
phys_enc->parent, phys_enc,
SDE_ENCODER_FRAME_EVENT_DONE);
-
return ret;
}
@@ -763,7 +792,7 @@ static int sde_encoder_phys_vid_wait_for_vblank(
return _sde_encoder_phys_vid_wait_for_vblank(phys_enc, true);
}
-static void sde_encoder_phys_vid_prepare_for_kickoff(
+static int sde_encoder_phys_vid_prepare_for_kickoff(
struct sde_encoder_phys *phys_enc,
struct sde_encoder_kickoff_params *params)
{
@@ -771,15 +800,15 @@ static void sde_encoder_phys_vid_prepare_for_kickoff(
struct sde_hw_ctl *ctl;
int rc;
- if (!phys_enc || !params) {
+ if (!phys_enc || !params || !phys_enc->hw_ctl) {
SDE_ERROR("invalid encoder/parameters\n");
- return;
+ return -EINVAL;
}
vid_enc = to_sde_encoder_phys_vid(phys_enc);
ctl = phys_enc->hw_ctl;
- if (!ctl || !ctl->ops.wait_reset_status)
- return;
+ if (!ctl->ops.wait_reset_status)
+ return 0;
/*
* hw supports hardware initiated ctl reset, so before we kickoff a new
@@ -789,11 +818,25 @@ static void sde_encoder_phys_vid_prepare_for_kickoff(
if (rc) {
SDE_ERROR_VIDENC(vid_enc, "ctl %d reset failure: %d\n",
ctl->idx, rc);
- sde_encoder_helper_unregister_irq(phys_enc, INTR_IDX_VSYNC);
- SDE_DBG_DUMP("all", "dbg_bus", "vbif_dbg_bus", "panic");
+
+ ++vid_enc->error_count;
+ if (vid_enc->error_count >= KICKOFF_MAX_ERRORS) {
+ vid_enc->error_count = KICKOFF_MAX_ERRORS;
+
+ SDE_DBG_DUMP("panic");
+ } else if (vid_enc->error_count == 1) {
+ SDE_EVT32(DRMID(phys_enc->parent), SDE_EVTLOG_FATAL);
+ }
+
+ /* request a ctl reset before the next flush */
+ phys_enc->enable_state = SDE_ENC_ERR_NEEDS_HW_RESET;
+ } else {
+ vid_enc->error_count = 0;
}
programmable_rot_fetch_config(phys_enc, params->inline_rotate_prefill);
+
+ return rc;
}
static void sde_encoder_phys_vid_disable(struct sde_encoder_phys *phys_enc)
@@ -833,6 +876,9 @@ static void sde_encoder_phys_vid_disable(struct sde_encoder_phys *phys_enc)
sde_encoder_phys_inc_pending(phys_enc);
spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
+ if (!sde_encoder_phys_vid_is_master(phys_enc))
+ goto exit;
+
/*
* Wait for a vsync so we know the ENABLE=0 latched before
* the (connector) source of the vsync's gets disabled,
@@ -841,7 +887,16 @@ static void sde_encoder_phys_vid_disable(struct sde_encoder_phys *phys_enc)
* the settings changes for the new modeset (like new
* scanout buffer) don't latch properly..
*/
- if (sde_encoder_phys_vid_is_master(phys_enc)) {
+ ret = sde_encoder_phys_vid_control_vblank_irq(phys_enc, true);
+ if (ret) {
+ SDE_ERROR_VIDENC(vid_enc,
+ "failed to enable vblank irq: %d\n",
+ ret);
+ SDE_EVT32(DRMID(phys_enc->parent),
+ vid_enc->hw_intf->idx - INTF_0, ret,
+ SDE_EVTLOG_FUNC_CASE1,
+ SDE_EVTLOG_ERROR);
+ } else {
ret = _sde_encoder_phys_vid_wait_for_vblank(phys_enc, false);
if (ret) {
atomic_set(&phys_enc->pending_kickoff_cnt, 0);
@@ -849,10 +904,13 @@ static void sde_encoder_phys_vid_disable(struct sde_encoder_phys *phys_enc)
"failure waiting for disable: %d\n",
ret);
SDE_EVT32(DRMID(phys_enc->parent),
- vid_enc->hw_intf->idx - INTF_0, ret);
+ vid_enc->hw_intf->idx - INTF_0, ret,
+ SDE_EVTLOG_FUNC_CASE2,
+ SDE_EVTLOG_ERROR);
}
+ sde_encoder_phys_vid_control_vblank_irq(phys_enc, false);
}
-
+exit:
phys_enc->enable_state = SDE_ENC_DISABLED;
}
@@ -954,10 +1012,77 @@ static int sde_encoder_phys_vid_get_line_count(
return vid_enc->hw_intf->ops.get_line_count(vid_enc->hw_intf);
}
+static int sde_encoder_phys_vid_wait_for_active(
+ struct sde_encoder_phys *phys_enc)
+{
+ struct drm_display_mode mode;
+ struct sde_encoder_phys_vid *vid_enc;
+ u32 ln_cnt, min_ln_cnt, active_lns_cnt;
+ u32 clk_period, time_of_line;
+ u32 delay, retry = MAX_POLL_CNT;
+
+ vid_enc = to_sde_encoder_phys_vid(phys_enc);
+
+ if (!vid_enc->hw_intf || !vid_enc->hw_intf->ops.get_line_count) {
+ SDE_ERROR_VIDENC(vid_enc, "invalid vid_enc params\n");
+ return -EINVAL;
+ }
+
+ mode = phys_enc->cached_mode;
+
+ /*
+ * calculate clk_period as pico second to maintain good
+ * accuracy with high pclk rate and this number is in 17 bit
+ * range.
+ */
+ clk_period = DIV_ROUND_UP_ULL(1000000000, mode.clock);
+ if (!clk_period) {
+ SDE_ERROR_VIDENC(vid_enc, "Unable to calculate clock period\n");
+ return -EINVAL;
+ }
+
+ min_ln_cnt = (mode.vtotal - mode.vsync_start) +
+ (mode.vsync_end - mode.vsync_start);
+ active_lns_cnt = mode.vdisplay;
+ time_of_line = mode.htotal * clk_period;
+
+ /* delay in micro seconds */
+ delay = (time_of_line * (min_ln_cnt +
+ (mode.vsync_start - mode.vdisplay))) / 1000000;
+
+ /*
+ * Wait for max delay before
+ * polling to check active region
+ */
+ if (delay > POLL_TIME_USEC_FOR_LN_CNT)
+ delay = POLL_TIME_USEC_FOR_LN_CNT;
+
+ while (retry) {
+ ln_cnt = vid_enc->hw_intf->ops.get_line_count(vid_enc->hw_intf);
+
+ if ((ln_cnt >= min_ln_cnt) &&
+ (ln_cnt < (active_lns_cnt + min_ln_cnt))) {
+ SDE_DEBUG_VIDENC(vid_enc,
+ "Needed lines left line_cnt=%d\n",
+ ln_cnt);
+ return 0;
+ }
+
+ SDE_ERROR_VIDENC(vid_enc, "line count is less. line_cnt = %d\n",
+ ln_cnt);
+ /* Add delay so that line count is in active region */
+ udelay(delay);
+ retry--;
+ }
+
+ return -EINVAL;
+}
+
static void sde_encoder_phys_vid_init_ops(struct sde_encoder_phys_ops *ops)
{
ops->is_master = sde_encoder_phys_vid_is_master;
ops->mode_set = sde_encoder_phys_vid_mode_set;
+ ops->cont_splash_mode_set = sde_encoder_phys_vid_cont_splash_mode_set;
ops->mode_fixup = sde_encoder_phys_vid_mode_fixup;
ops->enable = sde_encoder_phys_vid_enable;
ops->disable = sde_encoder_phys_vid_disable;
@@ -976,7 +1101,9 @@ static void sde_encoder_phys_vid_init_ops(struct sde_encoder_phys_ops *ops)
ops->trigger_flush = sde_encoder_helper_trigger_flush;
ops->hw_reset = sde_encoder_helper_hw_reset;
ops->get_line_count = sde_encoder_phys_vid_get_line_count;
+ ops->get_wr_line_count = sde_encoder_phys_vid_get_line_count;
ops->wait_dma_trigger = sde_encoder_phys_vid_wait_dma_trigger;
+ ops->wait_for_active = sde_encoder_phys_vid_wait_for_active;
}
struct sde_encoder_phys *sde_encoder_phys_vid_init(
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c b/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c
index d7084dd..42cf015 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c
@@ -27,7 +27,8 @@
#define to_sde_encoder_phys_wb(x) \
container_of(x, struct sde_encoder_phys_wb, base)
-#define WBID(wb_enc) ((wb_enc) ? wb_enc->wb_dev->wb_idx : -1)
+#define WBID(wb_enc) \
+ ((wb_enc && wb_enc->wb_dev) ? wb_enc->wb_dev->wb_idx - WB_0 : -1)
#define TO_S15D16(_x_) ((_x_) << 7)
@@ -867,11 +868,11 @@ static int sde_encoder_phys_wb_wait_for_commit_done(
wb_enc->irq_idx, true);
if (irq_status) {
SDE_DEBUG("wb:%d done but irq not triggered\n",
- wb_enc->wb_dev->wb_idx - WB_0);
+ WBID(wb_enc));
sde_encoder_phys_wb_done_irq(wb_enc, wb_enc->irq_idx);
} else {
SDE_ERROR("wb:%d kickoff timed out\n",
- wb_enc->wb_dev->wb_idx - WB_0);
+ WBID(wb_enc));
atomic_add_unless(
&phys_enc->pending_retire_fence_cnt, -1, 0);
@@ -904,8 +905,7 @@ static int sde_encoder_phys_wb_wait_for_commit_done(
if (!rc) {
wb_time = (u64)ktime_to_us(wb_enc->end_time) -
(u64)ktime_to_us(wb_enc->start_time);
- SDE_DEBUG("wb:%d took %llu us\n",
- wb_enc->wb_dev->wb_idx - WB_0, wb_time);
+ SDE_DEBUG("wb:%d took %llu us\n", WBID(wb_enc), wb_time);
}
/* cleanup writeback framebuffer */
@@ -925,8 +925,9 @@ static int sde_encoder_phys_wb_wait_for_commit_done(
* sde_encoder_phys_wb_prepare_for_kickoff - pre-kickoff processing
* @phys_enc: Pointer to physical encoder
* @params: kickoff parameters
+ * Returns: Zero on success
*/
-static void sde_encoder_phys_wb_prepare_for_kickoff(
+static int sde_encoder_phys_wb_prepare_for_kickoff(
struct sde_encoder_phys *phys_enc,
struct sde_encoder_kickoff_params *params)
{
@@ -941,7 +942,7 @@ static void sde_encoder_phys_wb_prepare_for_kickoff(
ret = sde_encoder_phys_wb_register_irq(phys_enc);
if (ret) {
SDE_ERROR("failed to register irq %d\n", ret);
- return;
+ return ret;
}
wb_enc->kickoff_count++;
@@ -955,6 +956,7 @@ static void sde_encoder_phys_wb_prepare_for_kickoff(
wb_enc->start_time = ktime_get();
SDE_EVT32(DRMID(phys_enc->parent), WBID(wb_enc), wb_enc->kickoff_count);
+ return 0;
}
/**
diff --git a/drivers/gpu/drm/msm/sde/sde_fence.c b/drivers/gpu/drm/msm/sde/sde_fence.c
index 816339b..686c640 100644
--- a/drivers/gpu/drm/msm/sde/sde_fence.c
+++ b/drivers/gpu/drm/msm/sde/sde_fence.c
@@ -402,3 +402,30 @@ void sde_fence_signal(struct sde_fence_context *ctx, ktime_t ts,
_sde_fence_trigger(ctx, ts);
}
+
+void sde_fence_timeline_status(struct sde_fence_context *ctx,
+ struct drm_mode_object *drm_obj)
+{
+ char *obj_name;
+
+ if (!ctx || !drm_obj) {
+ SDE_ERROR("invalid input params\n");
+ return;
+ }
+
+ switch (drm_obj->type) {
+ case DRM_MODE_OBJECT_CRTC:
+ obj_name = "crtc";
+ break;
+ case DRM_MODE_OBJECT_CONNECTOR:
+ obj_name = "connector";
+ break;
+ default:
+ obj_name = "unknown";
+ break;
+ }
+
+ SDE_ERROR("drm obj:%s id:%d type:0x%x done_count:%d commit_count:%d\n",
+ obj_name, drm_obj->id, drm_obj->type, ctx->done_count,
+ ctx->commit_count);
+}
diff --git a/drivers/gpu/drm/msm/sde/sde_fence.h b/drivers/gpu/drm/msm/sde/sde_fence.h
index 029175b..29d2ec7 100644
--- a/drivers/gpu/drm/msm/sde/sde_fence.h
+++ b/drivers/gpu/drm/msm/sde/sde_fence.h
@@ -132,6 +132,15 @@ int sde_fence_create(struct sde_fence_context *fence, uint64_t *val,
*/
void sde_fence_signal(struct sde_fence_context *fence, ktime_t ts,
bool reset_timeline);
+
+/**
+ * sde_fence_timeline_status - prints fence timeline status
+ * @fence: Pointer fence container
+ * @drm_obj Pointer to drm object associated with fence timeline
+ */
+void sde_fence_timeline_status(struct sde_fence_context *ctx,
+ struct drm_mode_object *drm_obj);
+
#else
static inline void *sde_sync_get(uint64_t fd)
{
@@ -185,6 +194,12 @@ static inline int sde_fence_create(struct sde_fence_context *fence,
{
return 0;
}
+
+static inline void sde_fence_timeline_status(struct sde_fence_context *ctx,
+ struct drm_mode_object *drm_obj);
+{
+ /* do nothing */
+}
#endif /* IS_ENABLED(CONFIG_SW_SYNC) */
#endif /* _SDE_FENCE_H_ */
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_catalog.c b/drivers/gpu/drm/msm/sde/sde_hw_catalog.c
index b8b0967..ddff6ee 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_catalog.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_catalog.c
@@ -15,6 +15,7 @@
#include <linux/of_address.h>
#include <linux/platform_device.h>
#include <linux/soc/qcom/llcc-qcom.h>
+#include <linux/pm_qos.h>
#include "sde_hw_mdss.h"
#include "sde_hw_catalog.h"
@@ -115,6 +116,8 @@
"NV12/5/1/1.25 AB24/5/1/1.25 XB24/5/1/1.25"
#define DEFAULT_MAX_PER_PIPE_BW 2400000
#define DEFAULT_AMORTIZABLE_THRESHOLD 25
+#define DEFAULT_CPU_MASK 0
+#define DEFAULT_CPU_DMA_LATENCY PM_QOS_DEFAULT_VALUE
/*************************************************************
* DTSI PROPERTY INDEX
@@ -145,6 +148,7 @@ enum sde_prop {
SMART_DMA_REV,
IDLE_PC,
DEST_SCALER,
+ SMART_PANEL_ALIGN_MODE,
SDE_PROP_MAX,
};
@@ -176,6 +180,8 @@ enum {
PERF_QOS_LUT_NRT,
PERF_QOS_LUT_CWB,
PERF_CDP_SETTING,
+ PERF_CPU_MASK,
+ PERF_CPU_DMA_LATENCY,
PERF_PROP_MAX,
};
@@ -398,6 +404,8 @@ static struct sde_prop_type sde_prop[] = {
{SMART_DMA_REV, "qcom,sde-smart-dma-rev", false, PROP_TYPE_STRING},
{IDLE_PC, "qcom,sde-has-idle-pc", false, PROP_TYPE_BOOL},
{DEST_SCALER, "qcom,sde-has-dest-scaler", false, PROP_TYPE_BOOL},
+ {SMART_PANEL_ALIGN_MODE, "qcom,sde-smart-panel-align-mode",
+ false, PROP_TYPE_U32},
};
static struct sde_prop_type sde_perf_prop[] = {
@@ -448,6 +456,9 @@ static struct sde_prop_type sde_perf_prop[] = {
{PERF_CDP_SETTING, "qcom,sde-cdp-setting", false,
PROP_TYPE_U32_ARRAY},
+ {PERF_CPU_MASK, "qcom,sde-qos-cpu-mask", false, PROP_TYPE_U32},
+ {PERF_CPU_DMA_LATENCY, "qcom,sde-qos-cpu-dma-latency", false,
+ PROP_TYPE_U32},
};
static struct sde_prop_type sspp_prop[] = {
@@ -2729,6 +2740,9 @@ static int sde_parse_dt(struct device_node *np, struct sde_mdss_cfg *cfg)
cfg->mdp[0].has_dest_scaler =
PROP_VALUE_ACCESS(prop_value, DEST_SCALER, 0);
+ cfg->mdp[0].smart_panel_align_mode =
+ PROP_VALUE_ACCESS(prop_value, SMART_PANEL_ALIGN_MODE, 0);
+
rc = of_property_read_string(np, sde_prop[QSEED_TYPE].prop_name, &type);
if (!rc && !strcmp(type, "qseedv3")) {
cfg->qseed_type = SDE_SSPP_SCALER_QSEED3;
@@ -3083,6 +3097,15 @@ static int sde_perf_parse_dt(struct device_node *np, struct sde_mdss_cfg *cfg)
cfg->has_cdp = true;
}
+ cfg->perf.cpu_mask =
+ prop_exists[PERF_CPU_MASK] ?
+ PROP_VALUE_ACCESS(prop_value, PERF_CPU_MASK, 0) :
+ DEFAULT_CPU_MASK;
+ cfg->perf.cpu_dma_latency =
+ prop_exists[PERF_CPU_DMA_LATENCY] ?
+ PROP_VALUE_ACCESS(prop_value, PERF_CPU_DMA_LATENCY, 0) :
+ DEFAULT_CPU_DMA_LATENCY;
+
freeprop:
kfree(prop_value);
end:
@@ -3147,6 +3170,12 @@ static int sde_hardware_format_caps(struct sde_mdss_cfg *sde_cfg,
goto end;
}
+ if (IS_SDE_MAJOR_MINOR_SAME((hw_rev), SDE_HW_VER_300) ||
+ IS_SDE_MAJOR_MINOR_SAME((hw_rev), SDE_HW_VER_301) ||
+ IS_SDE_MAJOR_MINOR_SAME((hw_rev), SDE_HW_VER_400) ||
+ IS_SDE_MAJOR_MINOR_SAME((hw_rev), SDE_HW_VER_401))
+ sde_cfg->has_hdr = true;
+
index = sde_copy_formats(sde_cfg->dma_formats, dma_list_size,
0, plane_formats, ARRAY_SIZE(plane_formats));
index += sde_copy_formats(sde_cfg->dma_formats, dma_list_size,
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_catalog.h b/drivers/gpu/drm/msm/sde/sde_hw_catalog.h
index d56ad06..1cd65ea 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_catalog.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_catalog.h
@@ -524,6 +524,7 @@ struct sde_clk_ctrl_reg {
* @ubwc_static: ubwc static configuration
* @ubwc_swizzle: ubwc default swizzle setting
* @has_dest_scaler: indicates support of destination scaler
+ * @smart_panel_align_mode: split display smart panel align modes
* @clk_ctrls clock control register definition
*/
struct sde_mdp_cfg {
@@ -532,6 +533,7 @@ struct sde_mdp_cfg {
u32 ubwc_static;
u32 ubwc_swizzle;
bool has_dest_scaler;
+ u32 smart_panel_align_mode;
struct sde_clk_ctrl_reg clk_ctrls[SDE_CLK_CTRL_MAX];
};
@@ -866,6 +868,8 @@ struct sde_perf_cdp_cfg {
* @sfe_lut_tbl: LUT tables for safe signals
* @qos_lut_tbl: LUT tables for QoS signals
* @cdp_cfg cdp use case configurations
+ * @cpu_mask: pm_qos cpu mask value
+ * @cpu_dma_latency: pm_qos cpu dma latency value
*/
struct sde_perf_cfg {
u32 max_bw_low;
@@ -890,6 +894,8 @@ struct sde_perf_cfg {
struct sde_qos_lut_tbl sfe_lut_tbl[SDE_QOS_LUT_USAGE_MAX];
struct sde_qos_lut_tbl qos_lut_tbl[SDE_QOS_LUT_USAGE_MAX];
struct sde_perf_cdp_cfg cdp_cfg[SDE_PERF_CDP_USAGE_MAX];
+ u32 cpu_mask;
+ u32 cpu_dma_latency;
};
/**
@@ -913,6 +919,7 @@ struct sde_perf_cfg {
* @has_sbuf indicate if stream buffer is available
* @sbuf_headroom stream buffer headroom in lines
* @has_idle_pc indicate if idle power collapse feature is supported
+ * @has_hdr HDR feature support
* @dma_formats Supported formats for dma pipe
* @cursor_formats Supported formats for cursor pipe
* @vig_formats Supported formats for vig pipe
@@ -941,6 +948,7 @@ struct sde_mdss_cfg {
u32 vbif_qos_nlvl;
u32 ts_prefill_rev;
+ bool has_hdr;
u32 mdss_count;
struct sde_mdss_base_cfg mdss[MAX_BLOCKS];
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_ctl.c b/drivers/gpu/drm/msm/sde/sde_hw_ctl.c
index 88f821d..426ecf1 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_ctl.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_ctl.c
@@ -30,6 +30,7 @@
#define CTL_START 0x01C
#define CTL_PREPARE 0x0d0
#define CTL_SW_RESET 0x030
+#define CTL_SW_RESET_OVERRIDE 0x060
#define CTL_LAYER_EXTN_OFFSET 0x40
#define CTL_ROT_TOP 0x0C0
#define CTL_ROT_FLUSH 0x0C4
@@ -41,6 +42,47 @@
#define SDE_REG_RESET_TIMEOUT_US 2000
+#define MDP_CTL_FLUSH(n) ((0x2000) + (0x200*n) + CTL_FLUSH)
+#define CTL_FLUSH_LM_BIT(n) (6 + n)
+#define CTL_TOP_LM_OFFSET(index, lm) (0x2000 + (0x200 * index) + (lm * 0x4))
+
+int sde_unstage_pipe_for_cont_splash(struct sde_splash_data *data,
+ void __iomem *mmio)
+{
+ int i, j;
+ u32 op_mode;
+
+ if (!data) {
+ pr_err("invalid splash data\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < data->ctl_top_cnt; i++) {
+ struct ctl_top *top = &data->top[i];
+ u8 ctl_id = data->ctl_ids[i] - CTL_0;
+ u32 regval = 0;
+
+ op_mode = readl_relaxed(mmio + MDP_CTL_FLUSH(ctl_id));
+
+ /* Set border fill*/
+ regval |= CTL_MIXER_BORDER_OUT;
+
+ for (j = 0; j < top->ctl_lm_cnt; j++) {
+ u8 lm_id = top->lm[j].lm_id - LM_0;
+
+ writel_relaxed(regval,
+ mmio + CTL_TOP_LM_OFFSET(ctl_id, lm_id));
+
+ op_mode |= BIT(CTL_FLUSH_LM_BIT(lm_id));
+ }
+ op_mode |= CTL_FLUSH_MASK_CTL;
+
+ writel_relaxed(op_mode, mmio + MDP_CTL_FLUSH(ctl_id));
+ }
+ return 0;
+
+}
+
static struct sde_ctl_cfg *_ctl_offset(enum sde_ctl ctl,
struct sde_mdss_cfg *m,
void __iomem *addr,
@@ -82,6 +124,11 @@ static inline void sde_hw_ctl_trigger_start(struct sde_hw_ctl *ctx)
SDE_REG_WRITE(&ctx->hw, CTL_START, 0x1);
}
+static inline int sde_hw_ctl_get_start_state(struct sde_hw_ctl *ctx)
+{
+ return SDE_REG_READ(&ctx->hw, CTL_START);
+}
+
static inline void sde_hw_ctl_trigger_pending(struct sde_hw_ctl *ctx)
{
SDE_REG_WRITE(&ctx->hw, CTL_PREPARE, 0x1);
@@ -345,7 +392,7 @@ static int sde_hw_ctl_reset_control(struct sde_hw_ctl *ctx)
{
struct sde_hw_blk_reg_map *c = &ctx->hw;
- pr_debug("issuing hw ctl reset for ctl:%d\n", ctx->idx);
+ pr_debug("issuing hw ctl reset for ctl:%d\n", ctx->idx - CTL_0);
SDE_REG_WRITE(c, CTL_SW_RESET, 0x1);
if (sde_hw_ctl_poll_reset_status(ctx, SDE_REG_RESET_TIMEOUT_US))
return -EINVAL;
@@ -353,6 +400,15 @@ static int sde_hw_ctl_reset_control(struct sde_hw_ctl *ctx)
return 0;
}
+static void sde_hw_ctl_hard_reset(struct sde_hw_ctl *ctx, bool enable)
+{
+ struct sde_hw_blk_reg_map *c = &ctx->hw;
+
+ pr_debug("hw ctl hard reset for ctl:%d, %d\n",
+ ctx->idx - CTL_0, enable);
+ SDE_REG_WRITE(c, CTL_SW_RESET_OVERRIDE, enable);
+}
+
static int sde_hw_ctl_wait_reset_status(struct sde_hw_ctl *ctx)
{
struct sde_hw_blk_reg_map *c = &ctx->hw;
@@ -586,6 +642,7 @@ static void _setup_ctl_ops(struct sde_hw_ctl_ops *ops,
ops->trigger_pending = sde_hw_ctl_trigger_pending;
ops->setup_intf_cfg = sde_hw_ctl_intf_cfg;
ops->reset = sde_hw_ctl_reset_control;
+ ops->hard_reset = sde_hw_ctl_hard_reset;
ops->wait_reset_status = sde_hw_ctl_wait_reset_status;
ops->clear_all_blendstages = sde_hw_ctl_clear_all_blendstages;
ops->setup_blendstage = sde_hw_ctl_setup_blendstage;
@@ -597,6 +654,7 @@ static void _setup_ctl_ops(struct sde_hw_ctl_ops *ops,
ops->get_bitmask_cdm = sde_hw_ctl_get_bitmask_cdm;
ops->get_bitmask_wb = sde_hw_ctl_get_bitmask_wb;
ops->reg_dma_flush = sde_hw_reg_dma_flush;
+ ops->get_start_state = sde_hw_ctl_get_start_state;
if (cap & BIT(SDE_CTL_SBUF)) {
ops->get_bitmask_rot = sde_hw_ctl_get_bitmask_rot;
@@ -605,6 +663,27 @@ static void _setup_ctl_ops(struct sde_hw_ctl_ops *ops,
}
};
+#define CTL_BASE_OFFSET 0x2000
+#define CTL_TOP_OFFSET(index) (CTL_BASE_OFFSET + (0x200 * (index)) + CTL_TOP)
+
+void sde_get_ctl_top_for_cont_splash(void __iomem *mmio,
+ struct ctl_top *top, int index)
+{
+ if (!mmio || !top) {
+ SDE_ERROR("invalid input parameters\n");
+ return;
+ }
+
+ top->value = readl_relaxed(mmio + CTL_TOP_OFFSET(index));
+ top->intf_sel = (top->value >> 4) & 0xf;
+ top->pp_sel = (top->value >> 8) & 0x7;
+ top->dspp_sel = (top->value >> 11) & 0x3;
+ top->mode_sel = (top->value >> 17) & 0x1;
+
+ SDE_DEBUG("ctl[%d]_top->0x%x,pp_sel=0x%x,dspp_sel=0x%x,intf_sel=0x%x\n",
+ index, top->value, top->pp_sel, top->dspp_sel, top->intf_sel);
+}
+
static struct sde_hw_blk_ops sde_hw_ops = {
.start = NULL,
.stop = NULL,
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_ctl.h b/drivers/gpu/drm/msm/sde/sde_hw_ctl.h
index bad80f0..f8594da 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_ctl.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_ctl.h
@@ -152,6 +152,13 @@ struct sde_hw_ctl_ops {
int (*reset)(struct sde_hw_ctl *c);
+ /**
+ * hard_reset - force reset on ctl_path
+ * @ctx : ctl path ctx pointer
+ * @enable : whether to enable/disable hard reset
+ */
+ void (*hard_reset)(struct sde_hw_ctl *c, bool enable);
+
/*
* wait_reset_status - checks ctl reset status
* @ctx : ctl path ctx pointer
@@ -218,6 +225,12 @@ struct sde_hw_ctl_ops {
*/
void (*reg_dma_flush)(struct sde_hw_ctl *ctx, bool blocking);
+ /**
+ * check if ctl start trigger state to confirm the frame pending
+ * status
+ * @ctx : ctl path ctx pointer
+ */
+ int (*get_start_state)(struct sde_hw_ctl *ctx);
};
/**
@@ -247,6 +260,24 @@ struct sde_hw_ctl {
};
/**
+ * sde_unstage_pipe_for_cont_splash - Unstage pipes for continuous splash
+ * @data: pointer to sde splash data
+ * @mmio: mapped register io address of MDP
+ * @return: error code
+ */
+int sde_unstage_pipe_for_cont_splash(struct sde_splash_data *data,
+ void __iomem *mmio);
+
+/**
+ * sde_get_ctl_top_for_cont_splash - retrieve the current LM blocks
+ * @mmio: mapped register io address of MDP
+ * @top: pointer to the current "ctl_top" structure thats needs update
+ * @index: ctl_top index
+ */
+void sde_get_ctl_top_for_cont_splash(void __iomem *mmio,
+ struct ctl_top *top, int index);
+
+/**
* sde_hw_ctl - convert base object sde_hw_base to container
* @hw: Pointer to base hardware block
* return: Pointer to hardware block container
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_ds.h b/drivers/gpu/drm/msm/sde/sde_hw_ds.h
index d81cfaf..6e97c5d 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_ds.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_ds.h
@@ -27,20 +27,18 @@ struct sde_hw_ds;
#define SDE_DS_OP_MODE_DUAL BIT(16)
/* struct sde_hw_ds_cfg - destination scaler config
- * @ndx : DS selection index
+ * @idx : DS selection index
* @flags : Flag to switch between mode for DS
* @lm_width : Layer mixer width configuration
* @lm_heigh : Layer mixer height configuration
- * @set_lm_flush : LM flush bit
- * @scl3_cfg : Pointer to sde_hw_scaler3_cfg.
+ * @scl3_cfg : Configuration data for scaler
*/
struct sde_hw_ds_cfg {
- u32 ndx;
+ u32 idx;
int flags;
u32 lm_width;
u32 lm_height;
- bool set_lm_flush;
- struct sde_hw_scaler3_cfg *scl3_cfg;
+ struct sde_hw_scaler3_cfg scl3_cfg;
};
/**
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_lm.c b/drivers/gpu/drm/msm/sde/sde_hw_lm.c
index 4e677c2..134db51 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_lm.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_lm.c
@@ -278,6 +278,43 @@ static void _setup_mixer_ops(struct sde_mdss_cfg *m,
}
};
+#define CTL_BASE_OFFSET 0x2000
+#define CTL_TOP_LM_OFFSET(index, lm) \
+ (CTL_BASE_OFFSET + (0x200 * index) + (lm * 0x4))
+
+int sde_get_ctl_lm_for_cont_splash(void __iomem *mmio, int max_lm_cnt,
+ u8 lm_cnt, u8 *lm_ids, struct ctl_top *top, int index)
+{
+ int j;
+ struct sde_splash_lm_hw *lm;
+
+ if (!mmio || !top || !lm_ids) {
+ SDE_ERROR("invalid input parameters\n");
+ return 0;
+ }
+
+ lm = top->lm;
+ for (j = 0; j < max_lm_cnt; j++) {
+ lm[top->ctl_lm_cnt].lm_reg_value = readl_relaxed(mmio
+ + CTL_TOP_LM_OFFSET(index, j));
+ SDE_DEBUG("ctl[%d]_top --> lm[%d]=0x%x, j=%d\n",
+ index, top->ctl_lm_cnt,
+ lm[top->ctl_lm_cnt].lm_reg_value, j);
+ SDE_DEBUG("lm_cnt = %d\n", lm_cnt);
+ if (lm[top->ctl_lm_cnt].lm_reg_value) {
+ lm[top->ctl_lm_cnt].ctl_id = index;
+ lm_ids[lm_cnt++] = j + LM_0;
+ lm[top->ctl_lm_cnt].lm_id = j + LM_0;
+ SDE_DEBUG("ctl_id=%d, lm[%d].lm_id = %d\n",
+ lm[top->ctl_lm_cnt].ctl_id,
+ top->ctl_lm_cnt,
+ lm[top->ctl_lm_cnt].lm_id);
+ top->ctl_lm_cnt++;
+ }
+ }
+ return top->ctl_lm_cnt;
+}
+
static struct sde_hw_blk_ops sde_hw_ops = {
.start = NULL,
.stop = NULL,
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_lm.h b/drivers/gpu/drm/msm/sde/sde_hw_lm.h
index 8a146bd..a2307ec 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_lm.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_lm.h
@@ -117,6 +117,19 @@ static inline struct sde_hw_mixer *to_sde_hw_mixer(struct sde_hw_blk *hw)
}
/**
+ * sde_get_ctl_lm_for_cont_splash - retrieve the current LM blocks
+ * @mmio: mapped register io address of MDP
+ * @max_lm_cnt: number of LM blocks supported in the hw
+ * @lm_cnt: number of LM blocks already active
+ * @lm_ids: pointer to store the active LM block IDs
+ * @top: pointer to the current "ctl_top" structure
+ * @index: ctl_top index
+ * return: number of active LM blocks for this CTL block
+ */
+int sde_get_ctl_lm_for_cont_splash(void __iomem *mmio, int max_lm_cnt,
+ u8 lm_cnt, u8 *lm_ids, struct ctl_top *top, int index);
+
+/**
* sde_hw_lm_init(): Initializes the mixer hw driver object.
* should be called once before accessing every mixer.
* @idx: mixer index for which driver object is required
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_mdss.h b/drivers/gpu/drm/msm/sde/sde_hw_mdss.h
index 952ee8f..3125ebf 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_mdss.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_mdss.h
@@ -535,4 +535,64 @@ struct sde_hw_dim_layer {
struct sde_rect rect;
};
+/**
+ * struct sde_splash_lm_hw - Struct contains LM block properties
+ * @lm_id: stores the current LM ID
+ * @ctl_id: stores the current CTL ID associated with the LM.
+ * @lm_reg_value:Store the LM block register value
+ */
+struct sde_splash_lm_hw {
+ u8 lm_id;
+ u8 ctl_id;
+ u32 lm_reg_value;
+};
+
+/**
+ * struct ctl_top - Struct contains CTL block properties
+ * @value: Store the CTL block register value
+ * @mode_sel: stores the mode selected in the CTL block
+ * @dspp_sel: stores the dspp selected in the CTL block
+ * @pp_sel: stores the pp selected in the CTL block
+ * @intf_sel: stores the intf selected in the CTL block
+ * @lm: Pointer to store the list of LMs in the CTL block
+ * @ctl_lm_cnt: stores the active number of MDSS "LM" blocks in the CTL block
+ */
+struct ctl_top {
+ u32 value;
+ u8 mode_sel;
+ u8 dspp_sel;
+ u8 pp_sel;
+ u8 intf_sel;
+ struct sde_splash_lm_hw lm[LM_MAX - LM_0];
+ u8 ctl_lm_cnt;
+};
+
+/**
+ * struct sde_splash_data - Struct contains details of continuous splash
+ * memory region and initial pipeline configuration.
+ * @smmu_handoff_pending:boolean to notify handoff from splash memory to smmu
+ * @splash_base: Base address of continuous splash region reserved
+ * by bootloader
+ * @splash_size: Size of continuous splash region
+ * @top: struct ctl_top objects
+ * @ctl_ids: stores the valid MDSS ctl block ids for the current mode
+ * @lm_ids: stores the valid MDSS layer mixer block ids for the current mode
+ * @dsc_ids: stores the valid MDSS DSC block ids for the current mode
+ * @ctl_top_cnt:stores the active number of MDSS "top" blks of the current mode
+ * @lm_cnt: stores the active number of MDSS "LM" blks for the current mode
+ * @dsc_cnt: stores the active number of MDSS "dsc" blks for the current mode
+ */
+struct sde_splash_data {
+ bool smmu_handoff_pending;
+ unsigned long splash_base;
+ u32 splash_size;
+ struct ctl_top top[CTL_MAX - CTL_0];
+ u8 ctl_ids[CTL_MAX - CTL_0];
+ u8 lm_ids[LM_MAX - LM_0];
+ u8 dsc_ids[DSC_MAX - DSC_0];
+ u8 ctl_top_cnt;
+ u8 lm_cnt;
+ u8 dsc_cnt;
+};
+
#endif /* _SDE_HW_MDSS_H */
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_pingpong.c b/drivers/gpu/drm/msm/sde/sde_hw_pingpong.c
index d8f79f1..050e21b 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_pingpong.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_pingpong.c
@@ -357,6 +357,49 @@ static void _setup_pingpong_ops(struct sde_hw_pingpong_ops *ops,
}
};
+#define MDP_PP_DSC_OFFSET(index) (0x71000 + (0x800 * index) + 0x0a0)
+#define MDP_PP_AUTOREFRESH_OFFSET(index) (0x71000 + (0x800 * index) + 0x030)
+
+int sde_get_pp_dsc_for_cont_splash(void __iomem *mmio,
+ int max_dsc_cnt, u8 *dsc_ids)
+{
+ int index;
+ int value, dsc_cnt = 0;
+
+ if (!mmio || !dsc_ids) {
+ SDE_ERROR("invalid input parameters\n");
+ return 0;
+ }
+
+ SDE_DEBUG("max_dsc_cnt = %d\n", max_dsc_cnt);
+ for (index = 0; index < max_dsc_cnt; index++) {
+ value = readl_relaxed(mmio
+ + MDP_PP_DSC_OFFSET(index));
+ SDE_DEBUG("DSC[%d]=0x%x\n",
+ index, value);
+ SDE_DEBUG("dsc_cnt = %d\n", dsc_cnt);
+ if (value) {
+ dsc_ids[dsc_cnt] = index + DSC_0;
+ dsc_cnt++;
+ }
+ value = readl_relaxed(mmio
+ + MDP_PP_AUTOREFRESH_OFFSET(index));
+ SDE_DEBUG("AUTOREFRESH[%d]=0x%x\n",
+ index, value);
+ if (value) {
+ SDE_DEBUG("Disabling autoreferesh\n");
+ writel_relaxed(0x0, mmio
+ + MDP_PP_AUTOREFRESH_OFFSET(index));
+ /*
+ * Wait for one frame update so that auto refresh
+ * disable is through
+ */
+ usleep_range(16000, 20000);
+ }
+ }
+ return dsc_cnt;
+}
+
static struct sde_hw_blk_ops sde_hw_ops = {
.start = NULL,
.stop = NULL,
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_pingpong.h b/drivers/gpu/drm/msm/sde/sde_hw_pingpong.h
index 389b2d2..fef49f4 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_pingpong.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_pingpong.h
@@ -161,6 +161,16 @@ static inline struct sde_hw_pingpong *to_sde_hw_pingpong(struct sde_hw_blk *hw)
}
/**
+ * sde_get_pp_dsc_for_cont_splash - retrieve the current dsc enabled blocks
+ * @mmio: mapped register io address of MDP
+ * @max_dsc_cnt: number of DSC blocks supported in the hw
+ * @dsc_ids: pointer to store the active DSC block IDs
+ * return: number of active DSC blocks
+ */
+int sde_get_pp_dsc_for_cont_splash(void __iomem *mmio,
+ int max_dsc_cnt, u8 *dsc_ids);
+
+/**
* sde_hw_pingpong_init - initializes the pingpong driver for the passed
* pingpong idx.
* @idx: Pingpong index for which driver object is required
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_reg_dma_v1.c b/drivers/gpu/drm/msm/sde/sde_hw_reg_dma_v1.c
index 3326aa2..cf65784 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_reg_dma_v1.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_reg_dma_v1.c
@@ -509,8 +509,13 @@ int init_v1(struct sde_hw_reg_dma *cfg)
last_cmd_buf[i] =
alloc_reg_dma_buf_v1(REG_DMA_HEADERS_BUFFER_SZ);
if (IS_ERR_OR_NULL(last_cmd_buf[i])) {
- rc = -EINVAL;
- break;
+ /*
+ * This will allow reg dma to fall back to
+ * AHB domain
+ */
+ pr_info("Failed to allocate reg dma, ret:%lu\n",
+ PTR_ERR(last_cmd_buf[i]));
+ return 0;
}
}
}
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_rot.c b/drivers/gpu/drm/msm/sde/sde_hw_rot.c
index c5af3a9..8d386a8 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_rot.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_rot.c
@@ -588,6 +588,10 @@ static int sde_hw_rot_commit(struct sde_hw_rot *hw, struct sde_hw_rot_cmd *data,
cmd_type = SDE_ROTATOR_INLINE_CMD_CLEANUP;
priv_handle = data->priv_handle;
break;
+ case SDE_HW_ROT_CMD_RESET:
+ cmd_type = SDE_ROTATOR_INLINE_CMD_ABORT;
+ priv_handle = data->priv_handle;
+ break;
default:
SDE_ERROR("invalid hw rotator command %d\n", hw_cmd);
return -EINVAL;
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_rot.h b/drivers/gpu/drm/msm/sde/sde_hw_rot.h
index 1237858..ea88d05 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_rot.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_rot.h
@@ -28,12 +28,14 @@ struct sde_hw_rot;
* @SDE_HW_ROT_CMD_COMMIT: commit/execute rotator command
* @SDE_HW_ROT_CMD_START: mdp is ready to start
* @SDE_HW_ROT_CMD_CLEANUP: cleanup rotator command after it is done
+ * @SDE_HW_ROT_CMD_RESET: request rotator h/w reset
*/
enum sde_hw_rot_cmd_type {
SDE_HW_ROT_CMD_VALIDATE,
SDE_HW_ROT_CMD_COMMIT,
SDE_HW_ROT_CMD_START,
SDE_HW_ROT_CMD_CLEANUP,
+ SDE_HW_ROT_CMD_RESET,
};
/**
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_sspp.c b/drivers/gpu/drm/msm/sde/sde_hw_sspp.c
index acecf1a..e7aa6ea 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_sspp.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_sspp.c
@@ -259,7 +259,8 @@ static void _sspp_setup_csc10_opmode(struct sde_hw_pipe *ctx,
* Setup source pixel format, flip,
*/
static void sde_hw_sspp_setup_format(struct sde_hw_pipe *ctx,
- const struct sde_format *fmt, u32 flags,
+ const struct sde_format *fmt,
+ bool blend_enabled, u32 flags,
enum sde_sspp_multirect_index rect_mode)
{
struct sde_hw_blk_reg_map *c;
@@ -328,7 +329,8 @@ static void sde_hw_sspp_setup_format(struct sde_hw_pipe *ctx,
SDE_FETCH_CONFIG_RESET_VALUE |
ctx->mdp->highest_bank_bit << 18);
if (IS_UBWC_20_SUPPORTED(ctx->catalog->ubwc_version)) {
- fast_clear = fmt->alpha_enable ? BIT(31) : 0;
+ fast_clear = (fmt->alpha_enable && blend_enabled) ?
+ BIT(31) : 0;
SDE_REG_WRITE(c, SSPP_UBWC_STATIC_CTRL,
fast_clear | (ctx->mdp->ubwc_swizzle) |
(ctx->mdp->highest_bank_bit << 4));
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_sspp.h b/drivers/gpu/drm/msm/sde/sde_hw_sspp.h
index d32c9d8..fdf215f 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_sspp.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_sspp.h
@@ -293,12 +293,14 @@ struct sde_hw_sspp_ops {
/**
* setup_format - setup pixel format cropping rectangle, flip
* @ctx: Pointer to pipe context
- * @cfg: Pointer to pipe config structure
+ * @fmt: Pointer to sde_format structure
+ * @blend_enabled: flag indicating blend enabled or disabled on plane
* @flags: Extra flags for format config
* @index: rectangle index in multirect
*/
void (*setup_format)(struct sde_hw_pipe *ctx,
- const struct sde_format *fmt, u32 flags,
+ const struct sde_format *fmt,
+ bool blend_enabled, u32 flags,
enum sde_sspp_multirect_index index);
/**
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_top.c b/drivers/gpu/drm/msm/sde/sde_hw_top.c
index b48022f..51e4ba2 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_top.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_top.c
@@ -86,6 +86,9 @@ static void sde_hw_setup_split_pipe(struct sde_hw_mdp *mdp,
lower_pipe = FLD_SMART_PANEL_FREE_RUN;
upper_pipe = lower_pipe;
+
+ /* smart panel align mode */
+ lower_pipe |= BIT(mdp->caps->smart_panel_align_mode);
} else {
if (cfg->intf == INTF_2) {
lower_pipe = FLD_INTF_1_SW_TRG_MUX;
diff --git a/drivers/gpu/drm/msm/sde/sde_irq.c b/drivers/gpu/drm/msm/sde/sde_irq.c
index d6a1d30..10435da 100644
--- a/drivers/gpu/drm/msm/sde/sde_irq.c
+++ b/drivers/gpu/drm/msm/sde/sde_irq.c
@@ -104,7 +104,8 @@ void sde_irq_preinstall(struct msm_kms *kms)
}
/* disable irq until power event enables it */
- irq_set_status_flags(sde_kms->irq_num, IRQ_NOAUTOEN);
+ if (!sde_kms->cont_splash_en)
+ irq_set_status_flags(sde_kms->irq_num, IRQ_NOAUTOEN);
}
int sde_irq_postinstall(struct msm_kms *kms)
diff --git a/drivers/gpu/drm/msm/sde/sde_kms.c b/drivers/gpu/drm/msm/sde/sde_kms.c
index 92d16e5..5d359be 100644
--- a/drivers/gpu/drm/msm/sde/sde_kms.c
+++ b/drivers/gpu/drm/msm/sde/sde_kms.c
@@ -20,8 +20,11 @@
#include <drm/drm_crtc.h>
#include <linux/debugfs.h>
+#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/dma-buf.h>
+#include <linux/memblock.h>
+#include <linux/bootmem.h>
#include "msm_drv.h"
#include "msm_mmu.h"
@@ -84,6 +87,7 @@ MODULE_PARM_DESC(sdecustom, "Enable customizations for sde clients");
static int sde_kms_hw_init(struct msm_kms *kms);
static int _sde_kms_mmu_destroy(struct sde_kms *sde_kms);
+static int _sde_kms_mmu_init(struct sde_kms *sde_kms);
static int _sde_kms_register_events(struct msm_kms *kms,
struct drm_mode_object *obj, u32 event, bool en);
bool sde_is_custom_client(void)
@@ -99,6 +103,7 @@ static int _sde_danger_signal_status(struct seq_file *s,
struct msm_drm_private *priv;
struct sde_danger_safe_status status;
int i;
+ int rc;
if (!kms || !kms->dev || !kms->dev->dev_private || !kms->hw_mdp) {
SDE_ERROR("invalid arg(s)\n");
@@ -108,7 +113,13 @@ static int _sde_danger_signal_status(struct seq_file *s,
priv = kms->dev->dev_private;
memset(&status, 0, sizeof(struct sde_danger_safe_status));
- sde_power_resource_enable(&priv->phandle, kms->core_client, true);
+ rc = sde_power_resource_enable(&priv->phandle, kms->core_client, true);
+ if (rc) {
+ SDE_ERROR("failed to enable power resource %d\n", rc);
+ SDE_EVT32(rc, SDE_EVTLOG_ERROR);
+ return rc;
+ }
+
if (danger_status) {
seq_puts(s, "\nDanger signal status:\n");
if (kms->hw_mdp->ops.get_danger_status)
@@ -491,6 +502,30 @@ static int sde_kms_prepare_secure_transition(struct msm_kms *kms,
return 0;
}
+static int _sde_kms_release_splash_buffer(unsigned int mem_addr,
+ unsigned int size)
+{
+ unsigned long pfn_start, pfn_end, pfn_idx;
+ int ret = 0;
+
+ if (!mem_addr || !size)
+ SDE_ERROR("invalid params\n");
+
+ pfn_start = mem_addr >> PAGE_SHIFT;
+ pfn_end = (mem_addr + size) >> PAGE_SHIFT;
+
+ ret = memblock_free(mem_addr, size);
+ if (ret) {
+ SDE_ERROR("continuous splash memory free failed:%d\n", ret);
+ return ret;
+ }
+ for (pfn_idx = pfn_start; pfn_idx < pfn_end; pfn_idx++)
+ free_reserved_page(pfn_to_page(pfn_idx));
+
+ return ret;
+
+}
+
static void sde_kms_prepare_commit(struct msm_kms *kms,
struct drm_atomic_state *state)
{
@@ -498,6 +533,11 @@ static void sde_kms_prepare_commit(struct msm_kms *kms,
struct msm_drm_private *priv;
struct drm_device *dev;
struct drm_encoder *encoder;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *crtc_state;
+ int i, rc = 0;
+ struct drm_plane *plane;
+ bool commit_no_planes = true;
if (!kms)
return;
@@ -508,11 +548,46 @@ static void sde_kms_prepare_commit(struct msm_kms *kms,
return;
priv = dev->dev_private;
- sde_power_resource_enable(&priv->phandle, sde_kms->core_client, true);
+ rc = sde_power_resource_enable(&priv->phandle, sde_kms->core_client,
+ true);
+ if (rc) {
+ SDE_ERROR("failed to enable power resource %d\n", rc);
+ SDE_EVT32(rc, SDE_EVTLOG_ERROR);
+ return;
+ }
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
- if (encoder->crtc != NULL)
+ for_each_crtc_in_state(state, crtc, crtc_state, i) {
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list,
+ head) {
+ if (encoder->crtc != crtc)
+ continue;
+
sde_encoder_prepare_commit(encoder);
+ }
+ }
+
+ if (sde_kms->splash_data.smmu_handoff_pending) {
+ list_for_each_entry(plane, &dev->mode_config.plane_list, head)
+ if (plane->state != NULL &&
+ plane->state->crtc != NULL)
+ commit_no_planes = false;
+ }
+
+ if (sde_kms->splash_data.smmu_handoff_pending && commit_no_planes) {
+
+ rc = sde_unstage_pipe_for_cont_splash(&sde_kms->splash_data,
+ sde_kms->mmio);
+ if (rc)
+ SDE_ERROR("pipe staging failed: %d\n", rc);
+
+ rc = _sde_kms_release_splash_buffer(
+ sde_kms->splash_data.splash_base,
+ sde_kms->splash_data.splash_size);
+ if (rc)
+ SDE_ERROR("release of splash memory failed %d\n", rc);
+
+ sde_kms->splash_data.smmu_handoff_pending = false;
+ }
/*
* NOTE: for secure use cases we want to apply the new HW
@@ -525,14 +600,24 @@ static void sde_kms_prepare_commit(struct msm_kms *kms,
static void sde_kms_commit(struct msm_kms *kms,
struct drm_atomic_state *old_state)
{
+ struct sde_kms *sde_kms;
struct drm_crtc *crtc;
struct drm_crtc_state *old_crtc_state;
int i;
+ if (!kms || !old_state)
+ return;
+ sde_kms = to_sde_kms(kms);
+
+ if (!sde_kms_power_resource_is_enabled(sde_kms->dev)) {
+ SDE_ERROR("power resource is not enabled\n");
+ return;
+ }
+
for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) {
if (crtc->state->active) {
SDE_EVT32(DRMID(crtc));
- sde_crtc_commit_kickoff(crtc);
+ sde_crtc_commit_kickoff(crtc, old_crtc_state);
}
}
}
@@ -556,6 +641,11 @@ static void sde_kms_complete_commit(struct msm_kms *kms,
return;
priv = sde_kms->dev->dev_private;
+ if (!sde_kms_power_resource_is_enabled(sde_kms->dev)) {
+ SDE_ERROR("power resource is not enabled\n");
+ return;
+ }
+
for_each_crtc_in_state(old_state, crtc, old_crtc_state, i)
sde_crtc_complete_commit(crtc, old_crtc_state);
@@ -575,6 +665,14 @@ static void sde_kms_complete_commit(struct msm_kms *kms,
sde_power_resource_enable(&priv->phandle, sde_kms->core_client, false);
SDE_EVT32_VERBOSE(SDE_EVTLOG_FUNC_EXIT);
+
+ if (sde_kms->cont_splash_en) {
+ SDE_DEBUG("Disabling cont_splash feature\n");
+ sde_kms->cont_splash_en = false;
+ sde_power_resource_enable(&priv->phandle,
+ sde_kms->core_client, false);
+ SDE_DEBUG("removing Vote for MDP Resources\n");
+ }
}
static void sde_kms_wait_for_commit_done(struct msm_kms *kms,
@@ -759,7 +857,7 @@ static int _sde_kms_setup_displays(struct drm_device *dev,
struct sde_kms *sde_kms)
{
static const struct sde_connector_ops dsi_ops = {
- .post_init = dsi_conn_post_init,
+ .set_info_blob = dsi_conn_set_info_blob,
.detect = dsi_conn_detect,
.get_modes = dsi_connector_get_modes,
.put_modes = dsi_connector_put_modes,
@@ -774,9 +872,11 @@ static int _sde_kms_setup_displays(struct drm_device *dev,
.get_dst_format = dsi_display_get_dst_format,
.post_kickoff = dsi_conn_post_kickoff,
.check_status = dsi_display_check_status,
+ .enable_event = dsi_conn_enable_event
};
static const struct sde_connector_ops wb_ops = {
.post_init = sde_wb_connector_post_init,
+ .set_info_blob = sde_wb_connector_set_info_blob,
.detect = sde_wb_connector_detect,
.get_modes = sde_wb_connector_get_modes,
.set_property = sde_wb_connector_set_property,
@@ -795,6 +895,7 @@ static int _sde_kms_setup_displays(struct drm_device *dev,
.get_mode_info = dp_connector_get_mode_info,
.send_hpd_event = dp_connector_send_hpd_event,
.check_status = NULL,
+ .pre_kickoff = dp_connector_pre_kickoff,
};
struct msm_display_info info;
struct drm_encoder *encoder;
@@ -1089,6 +1190,30 @@ static int _sde_kms_drm_obj_init(struct sde_kms *sde_kms)
}
/**
+ * sde_kms_timeline_status - provides current timeline status
+ * This API should be called without mode config lock.
+ * @dev: Pointer to drm device
+ */
+void sde_kms_timeline_status(struct drm_device *dev)
+{
+ struct drm_crtc *crtc;
+ struct drm_connector *conn;
+
+ if (!dev) {
+ SDE_ERROR("invalid drm device node\n");
+ return;
+ }
+
+ drm_for_each_crtc(crtc, dev)
+ sde_crtc_timeline_status(crtc);
+
+ mutex_lock(&dev->mode_config.mutex);
+ drm_for_each_connector(conn, dev)
+ sde_conn_timeline_status(conn);
+ mutex_unlock(&dev->mode_config.mutex);
+}
+
+/**
* struct sde_kms_fbo_fb - framebuffer creation list
* @list: list of framebuffer attached to framebuffer object
* @fb: Pointer to framebuffer attached to framebuffer object
@@ -1696,16 +1821,129 @@ static void sde_kms_preclose(struct msm_kms *kms, struct drm_file *file)
drm_modeset_unlock_all(dev);
}
+static int _sde_kms_helper_reset_custom_properties(struct sde_kms *sde_kms,
+ struct drm_atomic_state *state)
+{
+ struct drm_device *dev = sde_kms->dev;
+ struct drm_plane *plane;
+ struct drm_plane_state *plane_state;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *crtc_state;
+ struct drm_connector *conn;
+ struct drm_connector_state *conn_state;
+ int ret = 0;
+
+ drm_for_each_plane(plane, dev) {
+ plane_state = drm_atomic_get_plane_state(state, plane);
+ if (IS_ERR(plane_state)) {
+ ret = PTR_ERR(plane_state);
+ SDE_ERROR("error %d getting plane %d state\n",
+ ret, DRMID(plane));
+ return ret;
+ }
+
+ ret = sde_plane_helper_reset_custom_properties(plane,
+ plane_state);
+ if (ret) {
+ SDE_ERROR("error %d resetting plane props %d\n",
+ ret, DRMID(plane));
+ return ret;
+ }
+ }
+ drm_for_each_crtc(crtc, dev) {
+ crtc_state = drm_atomic_get_crtc_state(state, crtc);
+ if (IS_ERR(crtc_state)) {
+ ret = PTR_ERR(crtc_state);
+ SDE_ERROR("error %d getting crtc %d state\n",
+ ret, DRMID(crtc));
+ return ret;
+ }
+
+ ret = sde_crtc_helper_reset_custom_properties(crtc, crtc_state);
+ if (ret) {
+ SDE_ERROR("error %d resetting crtc props %d\n",
+ ret, DRMID(crtc));
+ return ret;
+ }
+ }
+
+ drm_for_each_connector(conn, dev) {
+ conn_state = drm_atomic_get_connector_state(state, conn);
+ if (IS_ERR(conn_state)) {
+ ret = PTR_ERR(conn_state);
+ SDE_ERROR("error %d getting connector %d state\n",
+ ret, DRMID(conn));
+ return ret;
+ }
+
+ ret = sde_connector_helper_reset_custom_properties(conn,
+ conn_state);
+ if (ret) {
+ SDE_ERROR("error %d resetting connector props %d\n",
+ ret, DRMID(conn));
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+static void sde_kms_lastclose(struct msm_kms *kms)
+{
+ struct sde_kms *sde_kms;
+ struct drm_device *dev;
+ struct drm_atomic_state *state;
+ int ret, i;
+
+ if (!kms) {
+ SDE_ERROR("invalid argument\n");
+ return;
+ }
+
+ sde_kms = to_sde_kms(kms);
+ dev = sde_kms->dev;
+
+ state = drm_atomic_state_alloc(dev);
+ if (!state)
+ return;
+
+ state->acquire_ctx = dev->mode_config.acquire_ctx;
+
+ for (i = 0; i < TEARDOWN_DEADLOCK_RETRY_MAX; i++) {
+ /* add reset of custom properties to the state */
+ ret = _sde_kms_helper_reset_custom_properties(sde_kms, state);
+ if (ret)
+ break;
+
+ ret = drm_atomic_commit(state);
+ if (ret != -EDEADLK)
+ break;
+
+ drm_atomic_state_clear(state);
+ drm_atomic_legacy_backoff(state);
+ SDE_DEBUG("deadlock backoff on attempt %d\n", i);
+ }
+
+ if (ret) {
+ /**
+ * on success, atomic state object ownership transfers to
+ * framework, otherwise, free it here
+ */
+ drm_atomic_state_free(state);
+ SDE_ERROR("failed to run last close: %d\n", ret);
+ }
+}
+
static int sde_kms_check_secure_transition(struct msm_kms *kms,
struct drm_atomic_state *state)
{
struct sde_kms *sde_kms;
struct drm_device *dev;
struct drm_crtc *crtc;
- struct drm_crtc *sec_crtc = NULL, *temp_crtc = NULL;
+ struct drm_crtc *cur_crtc = NULL, *global_crtc = NULL;
struct drm_crtc_state *crtc_state;
- int secure_crtc_cnt = 0, active_crtc_cnt = 0;
- int secure_global_crtc_cnt = 0, active_mode_crtc_cnt = 0;
+ int active_crtc_cnt = 0, global_active_crtc_cnt = 0;
+ bool sec_session = false, global_sec_session = false;
int i;
if (!kms || !state) {
@@ -1713,56 +1951,64 @@ static int sde_kms_check_secure_transition(struct msm_kms *kms,
SDE_ERROR("invalid arguments\n");
}
- /* iterate state object for active and secure crtc */
+ sde_kms = to_sde_kms(kms);
+ dev = sde_kms->dev;
+
+ /* iterate state object for active secure/non-secure crtc */
for_each_crtc_in_state(state, crtc, crtc_state, i) {
if (!crtc_state->active)
continue;
+
active_crtc_cnt++;
if (sde_crtc_get_secure_level(crtc, crtc_state) ==
- SDE_DRM_SEC_ONLY) {
- sec_crtc = crtc;
- secure_crtc_cnt++;
- }
+ SDE_DRM_SEC_ONLY)
+ sec_session = true;
+
+ cur_crtc = crtc;
}
- /* bail out from further validation if no secure ctrc */
- if (!secure_crtc_cnt)
- return 0;
-
- if ((secure_crtc_cnt > MAX_ALLOWED_SECURE_CLIENT_CNT) ||
- (secure_crtc_cnt &&
- (active_crtc_cnt > MAX_ALLOWED_CRTC_CNT_DURING_SECURE))) {
- SDE_ERROR("Secure check failed active:%d, secure:%d\n",
- active_crtc_cnt, secure_crtc_cnt);
- return -EPERM;
- }
-
- sde_kms = to_sde_kms(kms);
- dev = sde_kms->dev;
/* iterate global list for active and secure crtc */
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
-
if (!crtc->state->active)
continue;
- active_mode_crtc_cnt++;
-
+ global_active_crtc_cnt++;
if (sde_crtc_get_secure_level(crtc, crtc->state) ==
- SDE_DRM_SEC_ONLY) {
- secure_global_crtc_cnt++;
- temp_crtc = crtc;
- }
+ SDE_DRM_SEC_ONLY)
+ global_sec_session = true;
+
+ global_crtc = crtc;
}
- /**
- * if more than one crtc is active fail
- * check if the previous and current commit secure
- * are same
+ /*
+ * - fail secure crtc commit, if any other crtc session is already
+ * in progress
+ * - fail non-secure crtc commit, if any secure crtc session is already
+ * in progress
*/
- if (secure_crtc_cnt && ((active_mode_crtc_cnt > 1) ||
- (secure_global_crtc_cnt && (temp_crtc != sec_crtc))))
- SDE_ERROR("Secure check failed active:%d crtc_id:%d\n",
- active_mode_crtc_cnt, temp_crtc->base.id);
+ if (global_sec_session || sec_session) {
+ if ((global_active_crtc_cnt >
+ MAX_ALLOWED_CRTC_CNT_DURING_SECURE) ||
+ (active_crtc_cnt > MAX_ALLOWED_CRTC_CNT_DURING_SECURE)) {
+ SDE_ERROR(
+ "Secure check failed global_active:%d active:%d\n",
+ global_active_crtc_cnt, active_crtc_cnt);
+ return -EPERM;
+
+ /*
+ * As only one crtc is allowed during secure session, the crtc
+ * in this commit should match with the global crtc, if it
+ * exists
+ */
+ } else if (global_crtc && (global_crtc != cur_crtc)) {
+ SDE_ERROR(
+ "crtc%d-sec%d not allowed during crtc%d-sec%d\n",
+ cur_crtc ? cur_crtc->base.id : -1, sec_session,
+ global_crtc->base.id, global_sec_session);
+ return -EPERM;
+ }
+
+ }
return 0;
}
@@ -1861,6 +2107,142 @@ static void _sde_kms_post_open(struct msm_kms *kms, struct drm_file *file)
}
+static int _sde_kms_gen_drm_mode(struct sde_kms *sde_kms,
+ void *display,
+ struct drm_display_mode *drm_mode)
+{
+ struct dsi_display_mode *modes = NULL;
+ u32 count = 0;
+ u32 size = 0;
+ int rc = 0;
+
+ rc = dsi_display_get_mode_count(display, &count);
+ if (rc) {
+ SDE_ERROR("failed to get num of modes, rc=%d\n", rc);
+ return rc;
+ }
+
+ SDE_DEBUG("num of modes = %d\n", count);
+ size = count * sizeof(*modes);
+ modes = kzalloc(size, GFP_KERNEL);
+ if (!modes) {
+ count = 0;
+ goto end;
+ }
+
+ rc = dsi_display_get_modes(display, modes);
+ if (rc) {
+ SDE_ERROR("failed to get modes, rc=%d\n", rc);
+ count = 0;
+ goto error;
+ }
+
+ /* TODO; currently consider modes[0] as the preferred mode */
+ dsi_convert_to_drm_mode(&modes[0], drm_mode);
+
+ SDE_DEBUG("hdisplay = %d, vdisplay = %d\n",
+ drm_mode->hdisplay, drm_mode->vdisplay);
+ drm_mode_set_name(drm_mode);
+ drm_mode_set_crtcinfo(drm_mode, 0);
+error:
+ kfree(modes);
+end:
+ return rc;
+}
+
+static int sde_kms_cont_splash_config(struct msm_kms *kms)
+{
+ void *display;
+ struct dsi_display *dsi_display;
+ struct msm_display_info info;
+ struct drm_encoder *encoder = NULL;
+ struct drm_crtc *crtc = NULL;
+ int i, rc = 0;
+ struct drm_display_mode *drm_mode = NULL;
+ struct drm_device *dev;
+ struct msm_drm_private *priv;
+ struct sde_kms *sde_kms;
+
+ if (!kms) {
+ SDE_ERROR("invalid kms\n");
+ return -EINVAL;
+ }
+
+ sde_kms = to_sde_kms(kms);
+ dev = sde_kms->dev;
+ if (!dev || !dev->platformdev) {
+ SDE_ERROR("invalid device\n");
+ return -EINVAL;
+ }
+
+ if (!sde_kms->cont_splash_en) {
+ DRM_INFO("cont_splash feature not enabled\n");
+ return rc;
+ }
+
+ /* Currently, we only support one dsi display configuration */
+ /* dsi */
+ for (i = 0; i < sde_kms->dsi_display_count; ++i) {
+ display = sde_kms->dsi_displays[i];
+ dsi_display = (struct dsi_display *)display;
+ SDE_DEBUG("display->name = %s\n", dsi_display->name);
+
+ if (dsi_display->bridge->base.encoder) {
+ encoder = dsi_display->bridge->base.encoder;
+ SDE_DEBUG("encoder name = %s\n", encoder->name);
+ }
+ memset(&info, 0x0, sizeof(info));
+ rc = dsi_display_get_info(&info, display);
+ if (rc) {
+ SDE_ERROR("dsi get_info %d failed\n", i);
+ encoder = NULL;
+ continue;
+ }
+ SDE_DEBUG("info.is_connected = %s, info.is_primary = %s\n",
+ ((info.is_connected) ? "true" : "false"),
+ ((info.is_primary) ? "true" : "false"));
+ break;
+ }
+
+ if (!encoder) {
+ SDE_ERROR("encoder not initialized\n");
+ return -EINVAL;
+ }
+
+ priv = sde_kms->dev->dev_private;
+ encoder->crtc = priv->crtcs[0];
+ crtc = encoder->crtc;
+ SDE_DEBUG("crtc id = %d\n", crtc->base.id);
+
+ crtc->state->encoder_mask = (1 << drm_encoder_index(encoder));
+ drm_mode = drm_mode_create(encoder->dev);
+ if (!drm_mode) {
+ SDE_ERROR("drm_mode create failed\n");
+ return -EINVAL;
+ }
+ _sde_kms_gen_drm_mode(sde_kms, display, drm_mode);
+ SDE_DEBUG("drm_mode->name = %s, id=%d, type=0x%x, flags=0x%x\n",
+ drm_mode->name, drm_mode->base.id,
+ drm_mode->type, drm_mode->flags);
+
+ /* Update CRTC drm structure */
+ crtc->state->active = true;
+ rc = drm_atomic_set_mode_for_crtc(crtc->state, drm_mode);
+ if (rc) {
+ SDE_ERROR("Failed: set mode for crtc. rc = %d\n", rc);
+ return rc;
+ }
+ drm_mode_copy(&crtc->state->adjusted_mode, drm_mode);
+ drm_mode_copy(&crtc->mode, drm_mode);
+
+ /* Update encoder structure */
+ sde_encoder_update_caps_for_cont_splash(encoder);
+
+ sde_crtc_update_cont_splash_mixer_settings(crtc);
+
+ return rc;
+}
+
static int sde_kms_pm_suspend(struct device *dev)
{
struct drm_device *ddev;
@@ -2025,6 +2407,7 @@ static const struct msm_kms_funcs kms_funcs = {
.irq_uninstall = sde_irq_uninstall,
.irq = sde_irq,
.preclose = sde_kms_preclose,
+ .lastclose = sde_kms_lastclose,
.prepare_fence = sde_kms_prepare_fence,
.prepare_commit = sde_kms_prepare_commit,
.commit = sde_kms_commit,
@@ -2040,6 +2423,7 @@ static const struct msm_kms_funcs kms_funcs = {
.pm_suspend = sde_kms_pm_suspend,
.pm_resume = sde_kms_pm_resume,
.destroy = sde_kms_destroy,
+ .cont_splash_config = sde_kms_cont_splash_config,
.register_events = _sde_kms_register_events,
.get_address_space = _sde_kms_get_address_space,
.postopen = _sde_kms_post_open,
@@ -2115,6 +2499,68 @@ static int _sde_kms_mmu_init(struct sde_kms *sde_kms)
return ret;
}
+/* the caller api needs to turn on clock before calling this function */
+static int _sde_kms_cont_splash_res_init(struct sde_kms *sde_kms)
+{
+ struct sde_mdss_cfg *cat;
+ struct drm_device *dev;
+ struct msm_drm_private *priv;
+ struct sde_splash_data *splash_data;
+ int i;
+ int ctl_top_cnt;
+
+ if (!sde_kms || !sde_kms->catalog) {
+ SDE_ERROR("invalid kms\n");
+ return -EINVAL;
+ }
+ cat = sde_kms->catalog;
+ dev = sde_kms->dev;
+ priv = dev->dev_private;
+ splash_data = &sde_kms->splash_data;
+ SDE_DEBUG("mixer_count=%d, ctl_count=%d, dsc_count=%d\n",
+ cat->mixer_count,
+ cat->ctl_count,
+ cat->dsc_count);
+
+ ctl_top_cnt = cat->ctl_count;
+
+ if (ctl_top_cnt > ARRAY_SIZE(splash_data->top)) {
+ SDE_ERROR("Mismatch in ctl_top array size\n");
+ return -EINVAL;
+ }
+ for (i = 0; i < ctl_top_cnt; i++) {
+ sde_get_ctl_top_for_cont_splash(sde_kms->mmio,
+ &splash_data->top[i], i);
+ if (splash_data->top[i].intf_sel) {
+ splash_data->lm_cnt +=
+ sde_get_ctl_lm_for_cont_splash
+ (sde_kms->mmio,
+ sde_kms->catalog->mixer_count,
+ splash_data->lm_cnt,
+ splash_data->lm_ids,
+ &splash_data->top[i], i);
+ splash_data->ctl_ids[splash_data->ctl_top_cnt]
+ = i + CTL_0;
+ splash_data->ctl_top_cnt++;
+ sde_kms->cont_splash_en = true;
+ }
+ }
+
+ /* Skip DSC blk reads if cont_splash is disabled */
+ if (!sde_kms->cont_splash_en)
+ return 0;
+
+ splash_data->dsc_cnt =
+ sde_get_pp_dsc_for_cont_splash(sde_kms->mmio,
+ sde_kms->catalog->dsc_count,
+ splash_data->dsc_ids);
+ SDE_DEBUG("splash_data: ctl_top_cnt=%d, lm_cnt=%d, dsc_cnt=%d\n",
+ splash_data->ctl_top_cnt, splash_data->lm_cnt,
+ splash_data->dsc_cnt);
+
+ return 0;
+}
+
static void sde_kms_handle_power_event(u32 event_type, void *usr)
{
struct sde_kms *sde_kms = usr;
@@ -2185,11 +2631,49 @@ static int sde_kms_pd_disable(struct generic_pm_domain *genpd)
return rc;
}
+static int _sde_kms_get_splash_data(struct sde_splash_data *data)
+{
+ int ret = 0;
+ struct device_node *parent, *node;
+ struct resource r;
+
+ if (!data)
+ return -EINVAL;
+
+ parent = of_find_node_by_path("/reserved-memory");
+ if (!parent) {
+ SDE_ERROR("failed to find reserved-memory node\n");
+ return -EINVAL;
+ }
+
+ node = of_find_node_by_name(parent, "cont_splash_region");
+ if (!node) {
+ SDE_ERROR("failed to find splash memory reservation\n");
+ return -EINVAL;
+ }
+
+ if (of_address_to_resource(node, 0, &r)) {
+ SDE_ERROR("failed to find data for splash memory\n");
+ return -EINVAL;
+ }
+
+ data->splash_base = (unsigned long)r.start;
+ data->splash_size = (r.end - r.start) + 1;
+
+ pr_info("found continuous splash base address:%lx size:%x\n",
+ data->splash_base,
+ data->splash_size);
+ data->smmu_handoff_pending = true;
+
+ return ret;
+}
+
static int sde_kms_hw_init(struct msm_kms *kms)
{
struct sde_kms *sde_kms;
struct drm_device *dev;
struct msm_drm_private *priv;
+ bool splash_mem_found = false;
int i, rc = -EINVAL;
if (!kms) {
@@ -2282,6 +2766,14 @@ static int sde_kms_hw_init(struct msm_kms *kms)
goto error;
}
+ rc = _sde_kms_get_splash_data(&sde_kms->splash_data);
+ if (rc) {
+ SDE_DEBUG("sde splash data fetch failed: %d\n", rc);
+ splash_mem_found = false;
+ } else {
+ splash_mem_found = true;
+ }
+
rc = sde_power_resource_enable(&priv->phandle, sde_kms->core_client,
true);
if (rc) {
@@ -2306,14 +2798,11 @@ static int sde_kms_hw_init(struct msm_kms *kms)
sde_dbg_init_dbg_buses(sde_kms->core_rev);
/*
- * Now we need to read the HW catalog and initialize resources such as
- * clocks, regulators, GDSC/MMAGIC, ioremap the register ranges etc
+ * Attempt continuous splash handoff only if reserved
+ * splash memory is found.
*/
- rc = _sde_kms_mmu_init(sde_kms);
- if (rc) {
- SDE_ERROR("sde_kms_mmu_init failed: %d\n", rc);
- goto power_error;
- }
+ if (splash_mem_found)
+ _sde_kms_cont_splash_res_init(sde_kms);
/* Initialize reg dma block which is a singleton */
rc = sde_reg_dma_init(sde_kms->reg_dma, sde_kms->catalog,
@@ -2323,6 +2812,12 @@ static int sde_kms_hw_init(struct msm_kms *kms)
goto power_error;
}
+ rc = _sde_kms_mmu_init(sde_kms);
+ if (rc) {
+ SDE_ERROR("sde_kms_mmu_init failed: %d\n", rc);
+ goto power_error;
+ }
+
rc = sde_rm_init(&sde_kms->rm, sde_kms->catalog, sde_kms->mmio,
sde_kms->dev);
if (rc) {
@@ -2440,7 +2935,11 @@ static int sde_kms_hw_init(struct msm_kms *kms)
SDE_DEBUG("added genpd provider %s\n", sde_kms->genpd.name);
}
- sde_power_resource_enable(&priv->phandle, sde_kms->core_client, false);
+ if (sde_kms->cont_splash_en)
+ SDE_DEBUG("Skipping MDP Resources disable\n");
+ else
+ sde_power_resource_enable(&priv->phandle,
+ sde_kms->core_client, false);
return 0;
@@ -2509,3 +3008,9 @@ static int _sde_kms_register_events(struct msm_kms *kms,
return ret;
}
+
+int sde_kms_handle_recovery(struct drm_encoder *encoder)
+{
+ SDE_EVT32(DRMID(encoder), MSM_ENC_ACTIVE_REGION);
+ return sde_encoder_wait_for_event(encoder, MSM_ENC_ACTIVE_REGION);
+}
diff --git a/drivers/gpu/drm/msm/sde/sde_kms.h b/drivers/gpu/drm/msm/sde/sde_kms.h
index f047305..501797b 100644
--- a/drivers/gpu/drm/msm/sde/sde_kms.h
+++ b/drivers/gpu/drm/msm/sde/sde_kms.h
@@ -21,6 +21,7 @@
#include <linux/msm_ion.h>
#include <linux/pm_domain.h>
+#include <linux/pm_qos.h>
#include "msm_drv.h"
#include "msm_kms.h"
@@ -30,6 +31,7 @@
#include "sde_hw_catalog.h"
#include "sde_hw_ctl.h"
#include "sde_hw_lm.h"
+#include "sde_hw_pingpong.h"
#include "sde_hw_interrupts.h"
#include "sde_hw_wb.h"
#include "sde_hw_top.h"
@@ -182,6 +184,7 @@ struct sde_kms {
struct msm_gem_address_space *aspace[MSM_SMMU_DOMAIN_MAX];
struct sde_power_client *core_client;
+ struct pm_qos_request pm_qos_cpu_req;
struct ion_client *iclient;
struct sde_power_event *power_event;
@@ -212,7 +215,8 @@ struct sde_kms {
struct sde_rm rm;
bool rm_init;
-
+ struct sde_splash_data splash_data;
+ bool cont_splash_en;
struct sde_hw_vbif *hw_vbif[VBIF_MAX];
struct sde_hw_mdp *hw_mdp;
int dsi_display_count;
@@ -240,6 +244,23 @@ struct vsync_info {
bool sde_is_custom_client(void);
/**
+ * sde_kms_power_resource_is_enabled - whether or not power resource is enabled
+ * @dev: Pointer to drm device
+ * Return: true if power resource is enabled; false otherwise
+ */
+static inline bool sde_kms_power_resource_is_enabled(struct drm_device *dev)
+{
+ struct msm_drm_private *priv;
+
+ if (!dev || !dev->dev_private)
+ return false;
+
+ priv = dev->dev_private;
+
+ return sde_power_resource_is_enabled(&priv->phandle);
+}
+
+/**
* sde_kms_is_suspend_state - whether or not the system is pm suspended
* @dev: Pointer to drm device
* Return: Suspend status
@@ -366,7 +387,8 @@ struct sde_kms_info {
* @S: Pointer to sde_kms_info structure
* Returns: Pointer to byte data
*/
-#define SDE_KMS_INFO_DATA(S) ((S) ? ((struct sde_kms_info *)(S))->data : 0)
+#define SDE_KMS_INFO_DATA(S) ((S) ? ((struct sde_kms_info *)(S))->data \
+ : NULL)
/**
* SDE_KMS_INFO_DATALEN - Macro for accessing sde_kms_info data length
@@ -562,4 +584,17 @@ void sde_kms_fbo_unreference(struct sde_kms_fbo *fbo);
int sde_kms_mmu_attach(struct sde_kms *sde_kms, bool secure_only);
int sde_kms_mmu_detach(struct sde_kms *sde_kms, bool secure_only);
+/**
+ * sde_kms_timeline_status - provides current timeline status
+ * @dev: Pointer to drm device
+ */
+void sde_kms_timeline_status(struct drm_device *dev);
+
+/**
+ * sde_kms_handle_recovery - handler function for FIFO overflow issue
+ * @encoder: pointer to drm encoder structure
+ * return: 0 on success; error code otherwise
+ */
+int sde_kms_handle_recovery(struct drm_encoder *encoder);
+
#endif /* __sde_kms_H__ */
diff --git a/drivers/gpu/drm/msm/sde/sde_plane.c b/drivers/gpu/drm/msm/sde/sde_plane.c
index ef06f39..ab48c4a 100644
--- a/drivers/gpu/drm/msm/sde/sde_plane.c
+++ b/drivers/gpu/drm/msm/sde/sde_plane.c
@@ -38,14 +38,6 @@
#include "sde_color_processing.h"
#include "sde_hw_rot.h"
-static bool suspend_blank = true;
-module_param(suspend_blank, bool, 0400);
-MODULE_PARM_DESC(suspend_blank,
- "If set, active planes will force their outputs to black,\n"
- "by temporarily enabling the color fill, when recovering\n"
- "from a system resume instead of attempting to display the\n"
- "last provided frame buffer.");
-
#define SDE_DEBUG_PLANE(pl, fmt, ...) SDE_DEBUG("plane%d " fmt,\
(pl) ? (pl)->base.base.id : -1, ##__VA_ARGS__)
@@ -66,6 +58,9 @@ MODULE_PARM_DESC(suspend_blank,
#define SDE_PLANE_COLOR_FILL_FLAG BIT(31)
+#define TIME_MULTIPLEX_RECT(r0, r1, buffer_lines) \
+ ((r0).y >= ((r1).y + (r1).h + buffer_lines))
+
/* multirect rect index */
enum {
R0,
@@ -103,6 +98,7 @@ enum sde_plane_qos {
* @sbuf_mode: force stream buffer mode if set
* @sbuf_writeback: force stream buffer writeback if set
* @revalidate: force revalidation of all the plane properties
+ * @xin_halt_forced_clk: whether or not clocks were forced on for xin halt
* @blob_rot_caps: Pointer to rotator capability blob
*/
struct sde_plane {
@@ -128,6 +124,7 @@ struct sde_plane {
u32 sbuf_mode;
u32 sbuf_writeback;
bool revalidate;
+ bool xin_halt_forced_clk;
struct sde_csc_cfg csc_cfg;
struct sde_csc_cfg *csc_usr_ptr;
@@ -521,6 +518,7 @@ int sde_plane_danger_signal_ctrl(struct drm_plane *plane, bool enable)
struct sde_plane *psde;
struct msm_drm_private *priv;
struct sde_kms *sde_kms;
+ int rc;
if (!plane || !plane->dev) {
SDE_ERROR("invalid arguments\n");
@@ -539,7 +537,13 @@ int sde_plane_danger_signal_ctrl(struct drm_plane *plane, bool enable)
if (!psde->is_rt_pipe)
goto end;
- sde_power_resource_enable(&priv->phandle, sde_kms->core_client, true);
+ rc = sde_power_resource_enable(&priv->phandle, sde_kms->core_client,
+ true);
+ if (rc) {
+ SDE_ERROR("failed to enable power resource %d\n", rc);
+ SDE_EVT32(rc, SDE_EVTLOG_ERROR);
+ return rc;
+ }
_sde_plane_set_qos_ctrl(plane, enable, SDE_PLANE_QOS_PANIC_CTRL);
@@ -704,7 +708,10 @@ static void _sde_plane_set_input_fence(struct sde_plane *psde,
sde_sync_put(pstate->input_fence);
/* get fence pointer for later */
- pstate->input_fence = sde_sync_get(fd);
+ if (fd == 0)
+ pstate->input_fence = NULL;
+ else
+ pstate->input_fence = sde_sync_get(fd);
SDE_DEBUG_PLANE(psde, "0x%llX\n", fd);
}
@@ -821,6 +828,7 @@ int sde_plane_wait_input_fence(struct drm_plane *plane, uint32_t wait_ms)
SDE_ERROR_PLANE(psde, "%ums timeout on %08X\n",
wait_ms, prefix);
psde->is_error = true;
+ sde_kms_timeline_status(plane->dev);
ret = -ETIMEDOUT;
break;
case -ERESTARTSYS:
@@ -1453,6 +1461,7 @@ static int _sde_plane_color_fill(struct sde_plane *psde,
const struct sde_format *fmt;
const struct drm_plane *plane;
struct sde_plane_state *pstate;
+ bool blend_enable = true;
if (!psde || !psde->base.state) {
SDE_ERROR("invalid plane\n");
@@ -1475,6 +1484,9 @@ static int _sde_plane_color_fill(struct sde_plane *psde,
*/
fmt = sde_get_sde_format(DRM_FORMAT_ABGR8888);
+ blend_enable = (SDE_DRM_BLEND_OP_OPAQUE !=
+ sde_plane_get_property(pstate, PLANE_PROP_BLEND_OP));
+
/* update sspp */
if (fmt && psde->pipe_hw->ops.setup_solidfill) {
psde->pipe_hw->ops.setup_solidfill(psde->pipe_hw,
@@ -1490,7 +1502,7 @@ static int _sde_plane_color_fill(struct sde_plane *psde,
if (psde->pipe_hw->ops.setup_format)
psde->pipe_hw->ops.setup_format(psde->pipe_hw,
- fmt, SDE_SSPP_SOLID_FILL,
+ fmt, blend_enable, SDE_SSPP_SOLID_FILL,
pstate->multirect_index);
if (psde->pipe_hw->ops.setup_rects)
@@ -2486,6 +2498,111 @@ static void sde_plane_rot_atomic_update(struct drm_plane *plane,
msm_framebuffer_cleanup(state->fb, pstate->aspace);
}
+static bool _sde_plane_halt_requests(struct drm_plane *plane,
+ uint32_t xin_id, bool halt_forced_clk, bool enable)
+{
+ struct sde_plane *psde;
+ struct msm_drm_private *priv;
+ struct sde_vbif_set_xin_halt_params halt_params;
+
+ if (!plane || !plane->dev) {
+ SDE_ERROR("invalid arguments\n");
+ return false;
+ }
+
+ psde = to_sde_plane(plane);
+ if (!psde->pipe_hw || !psde->pipe_hw->cap) {
+ SDE_ERROR("invalid pipe reference\n");
+ return false;
+ }
+
+ priv = plane->dev->dev_private;
+ if (!priv || !priv->kms) {
+ SDE_ERROR("invalid KMS reference\n");
+ return false;
+ }
+
+ memset(&halt_params, 0, sizeof(halt_params));
+ halt_params.vbif_idx = VBIF_RT;
+ halt_params.xin_id = xin_id;
+ halt_params.clk_ctrl = psde->pipe_hw->cap->clk_ctrl;
+ halt_params.forced_on = halt_forced_clk;
+ halt_params.enable = enable;
+
+ return sde_vbif_set_xin_halt(to_sde_kms(priv->kms), &halt_params);
+}
+
+void sde_plane_halt_requests(struct drm_plane *plane, bool enable)
+{
+ struct sde_plane *psde;
+
+ if (!plane) {
+ SDE_ERROR("invalid plane\n");
+ return;
+ }
+
+ psde = to_sde_plane(plane);
+ if (!psde->pipe_hw || !psde->pipe_hw->cap) {
+ SDE_ERROR("invalid pipe reference\n");
+ return;
+ }
+
+ SDE_EVT32(DRMID(plane), psde->xin_halt_forced_clk, enable);
+
+ psde->xin_halt_forced_clk =
+ _sde_plane_halt_requests(plane, psde->pipe_hw->cap->xin_id,
+ psde->xin_halt_forced_clk, enable);
+}
+
+int sde_plane_reset_rot(struct drm_plane *plane, struct drm_plane_state *state)
+{
+ struct sde_plane *psde;
+ struct sde_plane_state *pstate;
+ struct sde_plane_rot_state *rstate;
+ bool halt_ret[MAX_BLOCKS] = {false};
+ signed int i, count;
+
+ if (!plane || !state) {
+ SDE_ERROR("invalid plane\n");
+ return -EINVAL;
+ }
+
+ psde = to_sde_plane(plane);
+ pstate = to_sde_plane_state(state);
+ rstate = &pstate->rot;
+
+ /* do nothing if not master rotator plane */
+ if (!rstate->out_sbuf || !rstate->rot_hw ||
+ !rstate->rot_hw->caps || (rstate->out_xpos != 0))
+ return 0;
+
+ count = (signed int)rstate->rot_hw->caps->xin_count;
+ if (count > ARRAY_SIZE(halt_ret))
+ count = ARRAY_SIZE(halt_ret);
+
+ SDE_DEBUG_PLANE(psde, "issuing reset for rotator\n");
+ SDE_EVT32(DRMID(plane), count);
+
+ for (i = 0; i < count; i++) {
+ const struct sde_rot_vbif_cfg *cfg =
+ &rstate->rot_hw->caps->vbif_cfg[i];
+
+ halt_ret[i] = _sde_plane_halt_requests(plane, cfg->xin_id,
+ false, true);
+ }
+
+ sde_plane_rot_submit_command(plane, state, SDE_HW_ROT_CMD_RESET);
+
+ for (i = count - 1; i >= 0; --i) {
+ const struct sde_rot_vbif_cfg *cfg =
+ &rstate->rot_hw->caps->vbif_cfg[i];
+
+ _sde_plane_halt_requests(plane, cfg->xin_id,
+ halt_ret[i], false);
+ }
+ return 0;
+}
+
int sde_plane_kickoff_rot(struct drm_plane *plane)
{
struct sde_plane_state *pstate;
@@ -2673,6 +2790,12 @@ void sde_plane_clear_multirect(const struct drm_plane_state *drm_state)
pstate->multirect_mode = SDE_SSPP_MULTIRECT_NONE;
}
+/**
+ * multi_rect validate API allows to validate only R0 and R1 RECT
+ * passing for each plane. Client of this API must not pass multiple
+ * plane which are not sharing same XIN client. Such calls will fail
+ * even though kernel client is passing valid multirect configuration.
+ */
int sde_plane_validate_multirect_v2(struct sde_multirect_plane_states *plane)
{
struct sde_plane_state *pstate[R_MAX];
@@ -2680,37 +2803,44 @@ int sde_plane_validate_multirect_v2(struct sde_multirect_plane_states *plane)
struct sde_rect src[R_MAX], dst[R_MAX];
struct sde_plane *sde_plane[R_MAX];
const struct sde_format *fmt[R_MAX];
+ int xin_id[R_MAX];
bool q16_data = true;
- int i, buffer_lines;
+ int i, j, buffer_lines, width_threshold[R_MAX];
unsigned int max_tile_height = 1;
bool parallel_fetch_qualified = true;
- bool has_tiled_rect = false;
+ enum sde_sspp_multirect_mode mode = SDE_SSPP_MULTIRECT_NONE;
+ const struct msm_format *msm_fmt;
for (i = 0; i < R_MAX; i++) {
- const struct msm_format *msm_fmt;
-
drm_state[i] = i ? plane->r1 : plane->r0;
- msm_fmt = msm_framebuffer_format(drm_state[i]->fb);
- fmt[i] = to_sde_format(msm_fmt);
-
- if (SDE_FORMAT_IS_UBWC(fmt[i])) {
- has_tiled_rect = true;
- if (fmt[i]->tile_height > max_tile_height)
- max_tile_height = fmt[i]->tile_height;
+ if (!drm_state[i]) {
+ SDE_ERROR("drm plane state is NULL\n");
+ return -EINVAL;
}
- }
-
- for (i = 0; i < R_MAX; i++) {
- int width_threshold;
pstate[i] = to_sde_plane_state(drm_state[i]);
sde_plane[i] = to_sde_plane(drm_state[i]->plane);
+ xin_id[i] = sde_plane[i]->pipe_hw->cap->xin_id;
- if (pstate[i] == NULL) {
- SDE_ERROR("SDE plane state of plane id %d is NULL\n",
- drm_state[i]->plane->base.id);
+ for (j = 0; j < i; j++) {
+ if (xin_id[i] != xin_id[j]) {
+ SDE_ERROR_PLANE(sde_plane[i],
+ "invalid multirect validate call base:%d xin_id:%d curr:%d xin:%d\n",
+ j, xin_id[j], i, xin_id[i]);
+ return -EINVAL;
+ }
+ }
+
+ msm_fmt = msm_framebuffer_format(drm_state[i]->fb);
+ if (!msm_fmt) {
+ SDE_ERROR_PLANE(sde_plane[i], "null fb\n");
return -EINVAL;
}
+ fmt[i] = to_sde_format(msm_fmt);
+
+ if (SDE_FORMAT_IS_UBWC(fmt[i]) &&
+ (fmt[i]->tile_height > max_tile_height))
+ max_tile_height = fmt[i]->tile_height;
POPULATE_RECT(&src[i], drm_state[i]->src_x, drm_state[i]->src_y,
drm_state[i]->src_w, drm_state[i]->src_h, q16_data);
@@ -2737,41 +2867,81 @@ int sde_plane_validate_multirect_v2(struct sde_multirect_plane_states *plane)
* So we cannot support more than half of the supported SSPP
* width for tiled formats.
*/
- width_threshold = sde_plane[i]->pipe_sblk->maxlinewidth;
- if (has_tiled_rect)
- width_threshold /= 2;
+ width_threshold[i] = sde_plane[i]->pipe_sblk->maxlinewidth;
+ if (SDE_FORMAT_IS_UBWC(fmt[i]))
+ width_threshold[i] /= 2;
- if (parallel_fetch_qualified && src[i].w > width_threshold)
+ if (parallel_fetch_qualified && src[i].w > width_threshold[i])
parallel_fetch_qualified = false;
+ if (sde_plane[i]->is_virtual)
+ mode = sde_plane_get_property(pstate[i],
+ PLANE_PROP_MULTIRECT_MODE);
}
- /* Validate RECT's and set the mode */
-
- /* Prefer PARALLEL FETCH Mode over TIME_MX Mode */
- if (parallel_fetch_qualified) {
- pstate[R0]->multirect_mode = SDE_SSPP_MULTIRECT_PARALLEL;
- pstate[R1]->multirect_mode = SDE_SSPP_MULTIRECT_PARALLEL;
-
- goto done;
- }
-
- /* TIME_MX Mode */
buffer_lines = 2 * max_tile_height;
- if ((dst[R1].y >= dst[R0].y + dst[R0].h + buffer_lines) ||
- (dst[R0].y >= dst[R1].y + dst[R1].h + buffer_lines)) {
- pstate[R0]->multirect_mode = SDE_SSPP_MULTIRECT_TIME_MX;
- pstate[R1]->multirect_mode = SDE_SSPP_MULTIRECT_TIME_MX;
- } else {
- SDE_ERROR(
- "No multirect mode possible for the planes (%d - %d)\n",
- drm_state[R0]->plane->base.id,
- drm_state[R1]->plane->base.id);
- return -EINVAL;
+ /**
+ * fallback to driver mode selection logic if client is using
+ * multirect plane without setting property.
+ *
+ * validate multirect mode configuration based on rectangle
+ */
+ switch (mode) {
+ case SDE_SSPP_MULTIRECT_NONE:
+ if (parallel_fetch_qualified)
+ mode = SDE_SSPP_MULTIRECT_PARALLEL;
+ else if (TIME_MULTIPLEX_RECT(dst[R1], dst[R0], buffer_lines) ||
+ TIME_MULTIPLEX_RECT(dst[R0], dst[R1], buffer_lines))
+ mode = SDE_SSPP_MULTIRECT_TIME_MX;
+ else
+ SDE_ERROR(
+ "planes(%d - %d) multirect mode selection fail\n",
+ drm_state[R0]->plane->base.id,
+ drm_state[R1]->plane->base.id);
+ break;
+
+ case SDE_SSPP_MULTIRECT_PARALLEL:
+ if (!parallel_fetch_qualified) {
+ SDE_ERROR("R0 plane:%d width_threshold:%d src_w:%d\n",
+ drm_state[R0]->plane->base.id,
+ width_threshold[R0], src[R0].w);
+ SDE_ERROR("R1 plane:%d width_threshold:%d src_w:%d\n",
+ drm_state[R1]->plane->base.id,
+ width_threshold[R1], src[R1].w);
+ SDE_ERROR("parallel fetch not qualified\n");
+ mode = SDE_SSPP_MULTIRECT_NONE;
+ }
+ break;
+
+ case SDE_SSPP_MULTIRECT_TIME_MX:
+ if (!TIME_MULTIPLEX_RECT(dst[R1], dst[R0], buffer_lines) &&
+ !TIME_MULTIPLEX_RECT(dst[R0], dst[R1], buffer_lines)) {
+ SDE_ERROR(
+ "buffer_lines:%d R0 plane:%d dst_y:%d dst_h:%d\n",
+ buffer_lines, drm_state[R0]->plane->base.id,
+ dst[R0].y, dst[R0].h);
+ SDE_ERROR(
+ "buffer_lines:%d R1 plane:%d dst_y:%d dst_h:%d\n",
+ buffer_lines, drm_state[R1]->plane->base.id,
+ dst[R1].y, dst[R1].h);
+ SDE_ERROR("time multiplexed fetch not qualified\n");
+ mode = SDE_SSPP_MULTIRECT_NONE;
+ }
+ break;
+
+ default:
+ SDE_ERROR("bad mode:%d selection\n", mode);
+ mode = SDE_SSPP_MULTIRECT_NONE;
+ break;
}
-done:
+ for (i = 0; i < R_MAX; i++)
+ pstate[i]->multirect_mode = mode;
+
+ if (mode == SDE_SSPP_MULTIRECT_NONE)
+ return -EINVAL;
+
if (sde_plane[R0]->is_virtual) {
pstate[R0]->multirect_index = SDE_SSPP_RECT_1;
pstate[R1]->multirect_index = SDE_SSPP_RECT_0;
@@ -2784,6 +2954,7 @@ int sde_plane_validate_multirect_v2(struct sde_multirect_plane_states *plane)
pstate[R0]->multirect_mode, pstate[R0]->multirect_index);
SDE_DEBUG_PLANE(sde_plane[R1], "R1: %d - %d\n",
pstate[R1]->multirect_mode, pstate[R1]->multirect_index);
+
return 0;
}
@@ -3389,10 +3560,6 @@ void sde_plane_flush(struct drm_plane *plane)
else if (psde->pipe_hw && psde->csc_ptr && psde->pipe_hw->ops.setup_csc)
psde->pipe_hw->ops.setup_csc(psde->pipe_hw, psde->csc_ptr);
- /* force black color fill during suspend */
- if (sde_kms_is_suspend_state(plane->dev) && suspend_blank)
- _sde_plane_color_fill(psde, 0x0, 0x0);
-
/* flag h/w flush complete */
if (plane->state)
pstate->pending = false;
@@ -3428,6 +3595,7 @@ static int sde_plane_sspp_atomic_update(struct drm_plane *plane,
struct sde_rect src, dst;
const struct sde_rect *crtc_roi;
bool q16_data = true;
+ bool blend_enabled = true;
int idx;
if (!plane) {
@@ -3498,6 +3666,7 @@ static int sde_plane_sspp_atomic_update(struct drm_plane *plane,
case PLANE_PROP_CSC_V1:
pstate->dirty |= SDE_PLANE_DIRTY_FORMAT;
break;
+ case PLANE_PROP_MULTIRECT_MODE:
case PLANE_PROP_COLOR_FILL:
/* potentially need to refresh everything */
pstate->dirty = SDE_PLANE_DIRTY_ALL;
@@ -3662,8 +3831,12 @@ static int sde_plane_sspp_atomic_update(struct drm_plane *plane,
if (rstate->out_rotation & DRM_REFLECT_Y)
src_flags |= SDE_SSPP_FLIP_UD;
+ blend_enabled = (SDE_DRM_BLEND_OP_OPAQUE !=
+ sde_plane_get_property(pstate, PLANE_PROP_BLEND_OP));
+
/* update format */
- psde->pipe_hw->ops.setup_format(psde->pipe_hw, fmt, src_flags,
+ psde->pipe_hw->ops.setup_format(psde->pipe_hw, fmt,
+ blend_enabled, src_flags,
pstate->multirect_index);
if (psde->pipe_hw->ops.setup_cdp) {
@@ -3845,6 +4018,56 @@ void sde_plane_restore(struct drm_plane *plane)
sde_plane_atomic_update(plane, plane->state);
}
+int sde_plane_helper_reset_custom_properties(struct drm_plane *plane,
+ struct drm_plane_state *plane_state)
+{
+ struct sde_plane *psde;
+ struct sde_plane_state *pstate;
+ struct drm_property *drm_prop;
+ enum msm_mdp_plane_property prop_idx;
+
+ if (!plane || !plane_state) {
+ SDE_ERROR("invalid params\n");
+ return -EINVAL;
+ }
+
+ psde = to_sde_plane(plane);
+ pstate = to_sde_plane_state(plane_state);
+
+ for (prop_idx = 0; prop_idx < PLANE_PROP_COUNT; prop_idx++) {
+ uint64_t val = pstate->property_values[prop_idx].value;
+ uint64_t def;
+ int ret;
+
+ drm_prop = msm_property_index_to_drm_property(
+ &psde->property_info, prop_idx);
+ if (!drm_prop) {
+ /* not all props will be installed, based on caps */
+ SDE_DEBUG_PLANE(psde, "invalid property index %d\n",
+ prop_idx);
+ continue;
+ }
+
+ def = msm_property_get_default(&psde->property_info, prop_idx);
+ if (val == def)
+ continue;
+
+ SDE_DEBUG_PLANE(psde, "set prop %s idx %d from %llu to %llu\n",
+ drm_prop->name, prop_idx, val, def);
+
+ ret = drm_atomic_plane_set_property(plane, plane_state,
+ drm_prop, def);
+ if (ret) {
+ SDE_ERROR_PLANE(psde,
+ "set property failed, idx %d ret %d\n",
+ prop_idx, ret);
+ continue;
+ }
+ }
+
+ return 0;
+}
+
/* helper to install properties which are common to planes and crtcs */
static void _sde_plane_install_properties(struct drm_plane *plane,
struct sde_mdss_cfg *catalog, u32 master_plane_id)
@@ -3864,6 +4087,11 @@ static void _sde_plane_install_properties(struct drm_plane *plane,
{SDE_DRM_FB_NON_SEC_DIR_TRANS, "non_sec_direct_translation"},
{SDE_DRM_FB_SEC_DIR_TRANS, "sec_direct_translation"},
};
+ static const struct drm_prop_enum_list e_multirect_mode[] = {
+ {SDE_SSPP_MULTIRECT_NONE, "none"},
+ {SDE_SSPP_MULTIRECT_PARALLEL, "parallel"},
+ {SDE_SSPP_MULTIRECT_TIME_MX, "serial"},
+ };
const struct sde_format_extended *format_list;
struct sde_kms_info *info;
struct sde_plane *psde = to_sde_plane(plane);
@@ -4013,6 +4241,10 @@ static void _sde_plane_install_properties(struct drm_plane *plane,
format_list = psde->pipe_sblk->virt_format_list;
sde_kms_info_add_keyint(info, "primary_smart_plane_id",
master_plane_id);
+ msm_property_install_enum(&psde->property_info,
+ "multirect_mode", 0x0, 0, e_multirect_mode,
+ ARRAY_SIZE(e_multirect_mode),
+ PLANE_PROP_MULTIRECT_MODE);
}
if (format_list) {
@@ -4313,15 +4545,6 @@ static int sde_plane_atomic_set_property(struct drm_plane *plane,
return ret;
}
-static int sde_plane_set_property(struct drm_plane *plane,
- struct drm_property *property, uint64_t val)
-{
- SDE_DEBUG("\n");
-
- return sde_plane_atomic_set_property(plane,
- plane->state, property, val);
-}
-
static int sde_plane_atomic_get_property(struct drm_plane *plane,
const struct drm_plane_state *state,
struct drm_property *property, uint64_t *val)
@@ -4725,7 +4948,7 @@ static const struct drm_plane_funcs sde_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
.destroy = sde_plane_destroy,
- .set_property = sde_plane_set_property,
+ .set_property = drm_atomic_helper_plane_set_property,
.atomic_set_property = sde_plane_atomic_set_property,
.atomic_get_property = sde_plane_atomic_get_property,
.reset = sde_plane_reset,
diff --git a/drivers/gpu/drm/msm/sde/sde_plane.h b/drivers/gpu/drm/msm/sde/sde_plane.h
index d6c5876..5c1fff1 100644
--- a/drivers/gpu/drm/msm/sde/sde_plane.h
+++ b/drivers/gpu/drm/msm/sde/sde_plane.h
@@ -228,6 +228,23 @@ void sde_plane_restore(struct drm_plane *plane);
void sde_plane_flush(struct drm_plane *plane);
/**
+ * sde_plane_halt_requests - control halting of vbif transactions for this plane
+ * This function isn't thread safe. Plane halt enable/disable requests
+ * should always be made from the same commit cycle.
+ * @plane: Pointer to drm plane structure
+ * @enable: Whether to enable/disable halting of vbif transactions
+ */
+void sde_plane_halt_requests(struct drm_plane *plane, bool enable);
+
+/**
+ * sde_plane_reset_rot - reset rotator operations before commit kickoff
+ * @plane: Pointer to drm plane structure
+ * @state: Pointer to plane state associated with reset request
+ * Returns: Zero on success
+ */
+int sde_plane_reset_rot(struct drm_plane *plane, struct drm_plane_state *state);
+
+/**
* sde_plane_kickoff_rot - final plane rotator operations before commit kickoff
* @plane: Pointer to drm plane structure
* Returns: Zero on success
@@ -294,4 +311,14 @@ int sde_plane_color_fill(struct drm_plane *plane,
*/
void sde_plane_set_revalidate(struct drm_plane *plane, bool enable);
+/**
+ * sde_plane_helper_reset_properties - reset properties to default values in the
+ * given DRM plane state object
+ * @plane: Pointer to DRM plane object
+ * @plane_state: Pointer to DRM plane state object
+ * Returns: 0 on success, negative errno on failure
+ */
+int sde_plane_helper_reset_custom_properties(struct drm_plane *plane,
+ struct drm_plane_state *plane_state);
+
#endif /* _SDE_PLANE_H_ */
diff --git a/drivers/gpu/drm/msm/sde/sde_rm.c b/drivers/gpu/drm/msm/sde/sde_rm.c
index 21fbcb5..c2c1f75 100644
--- a/drivers/gpu/drm/msm/sde/sde_rm.c
+++ b/drivers/gpu/drm/msm/sde/sde_rm.c
@@ -175,6 +175,18 @@ void sde_rm_init_hw_iter(
iter->type = type;
}
+enum sde_rm_topology_name sde_rm_get_topology_name(
+ struct msm_display_topology topology)
+{
+ int i;
+
+ for (i = 0; i < SDE_RM_TOPOLOGY_MAX; i++)
+ if (RM_IS_TOPOLOGY_MATCH(g_top_table[i], topology))
+ return g_top_table[i].top_name;
+
+ return SDE_RM_TOPOLOGY_NONE;
+}
+
static bool _sde_rm_get_hw_locked(struct sde_rm *rm, struct sde_rm_hw_iter *i)
{
struct list_head *blk_list;
@@ -714,7 +726,8 @@ static bool _sde_rm_check_lm_and_get_connected_blks(
static int _sde_rm_reserve_lms(
struct sde_rm *rm,
struct sde_rm_rsvp *rsvp,
- struct sde_rm_requirements *reqs)
+ struct sde_rm_requirements *reqs,
+ u8 *_lm_ids)
{
struct sde_rm_hw_blk *lm[MAX_BLOCKS];
@@ -742,6 +755,14 @@ static int _sde_rm_reserve_lms(
lm_count = 0;
lm[lm_count] = iter_i.blk;
+ SDE_DEBUG("blk id = %d, _lm_ids[%d] = %d\n",
+ iter_i.blk->id,
+ lm_count,
+ _lm_ids ? _lm_ids[lm_count] : -1);
+
+ if (_lm_ids && (lm[lm_count])->id != _lm_ids[lm_count])
+ continue;
+
if (!_sde_rm_check_lm_and_get_connected_blks(
rm, rsvp, reqs, lm[lm_count],
&dspp[lm_count], &ds[lm_count],
@@ -765,6 +786,14 @@ static int _sde_rm_reserve_lms(
continue;
lm[lm_count] = iter_j.blk;
+ SDE_DEBUG("blk id = %d, _lm_ids[%d] = %d\n",
+ iter_i.blk->id,
+ lm_count,
+ _lm_ids ? _lm_ids[lm_count] : -1);
+
+ if (_lm_ids && (lm[lm_count])->id != _lm_ids[lm_count])
+ continue;
+
++lm_count;
}
}
@@ -818,7 +847,8 @@ static int _sde_rm_reserve_ctls(
struct sde_rm *rm,
struct sde_rm_rsvp *rsvp,
struct sde_rm_requirements *reqs,
- const struct sde_rm_topology_def *top)
+ const struct sde_rm_topology_def *top,
+ u8 *_ctl_ids)
{
struct sde_rm_hw_blk *ctls[MAX_BLOCKS];
struct sde_rm_hw_iter iter;
@@ -845,7 +875,7 @@ static int _sde_rm_reserve_ctls(
* bypass rest feature checks on finding CTL preferred
* for primary displays.
*/
- if (!primary_pref) {
+ if (!primary_pref && !_ctl_ids) {
if (top->needs_split_display != has_split_display)
continue;
@@ -860,6 +890,14 @@ static int _sde_rm_reserve_ctls(
}
ctls[i] = iter.blk;
+
+ SDE_DEBUG("blk id = %d, _ctl_ids[%d] = %d\n",
+ iter.blk->id, i,
+ _ctl_ids ? _ctl_ids[i] : -1);
+
+ if (_ctl_ids && (ctls[i]->id != _ctl_ids[i]))
+ continue;
+
SDE_DEBUG("ctl %d match\n", iter.blk->id);
if (++i == top->num_ctl)
@@ -880,7 +918,8 @@ static int _sde_rm_reserve_ctls(
static int _sde_rm_reserve_dsc(
struct sde_rm *rm,
struct sde_rm_rsvp *rsvp,
- const struct sde_rm_topology_def *top)
+ const struct sde_rm_topology_def *top,
+ u8 *_dsc_ids)
{
struct sde_rm_hw_iter iter;
int alloc_count = 0;
@@ -895,6 +934,14 @@ static int _sde_rm_reserve_dsc(
if (RESERVED_BY_OTHER(iter.blk, rsvp))
continue;
+ SDE_DEBUG("blk id = %d, _dsc_ids[%d] = %d\n",
+ iter.blk->id,
+ alloc_count,
+ _dsc_ids ? _dsc_ids[alloc_count] : -1);
+
+ if (_dsc_ids && (iter.blk->id != _dsc_ids[alloc_count]))
+ continue;
+
iter.blk->rsvp_nxt = rsvp;
SDE_EVT32(iter.blk->type, rsvp->enc_id, iter.blk->id);
@@ -1043,10 +1090,10 @@ static int _sde_rm_make_next_rsvp(
* - Check mixers without DSPPs
* - Only then allow to grab from mixers with DSPP capability
*/
- ret = _sde_rm_reserve_lms(rm, rsvp, reqs);
+ ret = _sde_rm_reserve_lms(rm, rsvp, reqs, NULL);
if (ret && !RM_RQ_DSPP(reqs)) {
reqs->top_ctrl |= BIT(SDE_RM_TOPCTL_DSPP);
- ret = _sde_rm_reserve_lms(rm, rsvp, reqs);
+ ret = _sde_rm_reserve_lms(rm, rsvp, reqs, NULL);
}
if (ret) {
@@ -1059,11 +1106,11 @@ static int _sde_rm_make_next_rsvp(
* - Check mixers without Split Display
* - Only then allow to grab from CTLs with split display capability
*/
- _sde_rm_reserve_ctls(rm, rsvp, reqs, reqs->topology);
+ _sde_rm_reserve_ctls(rm, rsvp, reqs, reqs->topology, NULL);
if (ret && !reqs->topology->needs_split_display) {
memcpy(&topology, reqs->topology, sizeof(topology));
topology.needs_split_display = true;
- _sde_rm_reserve_ctls(rm, rsvp, reqs, &topology);
+ _sde_rm_reserve_ctls(rm, rsvp, reqs, &topology, NULL);
}
if (ret) {
SDE_ERROR("unable to find appropriate CTL\n");
@@ -1075,7 +1122,104 @@ static int _sde_rm_make_next_rsvp(
if (ret)
return ret;
- ret = _sde_rm_reserve_dsc(rm, rsvp, reqs->topology);
+ ret = _sde_rm_reserve_dsc(rm, rsvp, reqs->topology, NULL);
+ if (ret)
+ return ret;
+
+ return ret;
+}
+
+static int _sde_rm_make_next_rsvp_for_cont_splash(
+ struct sde_rm *rm,
+ struct drm_encoder *enc,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state,
+ struct sde_rm_rsvp *rsvp,
+ struct sde_rm_requirements *reqs)
+{
+ int ret;
+ struct sde_rm_topology_def topology;
+ struct msm_drm_private *priv;
+ struct sde_kms *sde_kms;
+ int i;
+
+ if (!enc->dev || !enc->dev->dev_private) {
+ SDE_ERROR("drm device invalid\n");
+ return -EINVAL;
+ }
+ priv = enc->dev->dev_private;
+ if (!priv->kms) {
+ SDE_ERROR("invalid kms\n");
+ return -EINVAL;
+ }
+ sde_kms = to_sde_kms(priv->kms);
+
+ for (i = 0; i < sde_kms->splash_data.lm_cnt; i++)
+ SDE_DEBUG("splash_data.lm_ids[%d] = %d\n",
+ i, sde_kms->splash_data.lm_ids[i]);
+
+ if (sde_kms->splash_data.lm_cnt !=
+ reqs->topology->num_lm)
+ SDE_DEBUG("Configured splash screen LMs != needed LM cnt\n");
+
+ /* Create reservation info, tag reserved blocks with it as we go */
+ rsvp->seq = ++rm->rsvp_next_seq;
+ rsvp->enc_id = enc->base.id;
+ rsvp->topology = reqs->topology->top_name;
+ list_add_tail(&rsvp->list, &rm->rsvps);
+
+ /*
+ * Assign LMs and blocks whose usage is tied to them: DSPP & Pingpong.
+ * Do assignment preferring to give away low-resource mixers first:
+ * - Check mixers without DSPPs
+ * - Only then allow to grab from mixers with DSPP capability
+ */
+ ret = _sde_rm_reserve_lms(rm, rsvp, reqs,
+ sde_kms->splash_data.lm_ids);
+ if (ret && !RM_RQ_DSPP(reqs)) {
+ reqs->top_ctrl |= BIT(SDE_RM_TOPCTL_DSPP);
+ ret = _sde_rm_reserve_lms(rm, rsvp, reqs,
+ sde_kms->splash_data.lm_ids);
+ }
+
+ if (ret) {
+ SDE_ERROR("unable to find appropriate mixers\n");
+ return ret;
+ }
+
+ /*
+ * Do assignment preferring to give away low-resource CTLs first:
+ * - Check mixers without Split Display
+ * - Only then allow to grab from CTLs with split display capability
+ */
+ for (i = 0; i < sde_kms->splash_data.ctl_top_cnt; i++)
+ SDE_DEBUG("splash_data.ctl_ids[%d] = %d\n",
+ i, sde_kms->splash_data.ctl_ids[i]);
+
+ _sde_rm_reserve_ctls(rm, rsvp, reqs, reqs->topology,
+ sde_kms->splash_data.ctl_ids);
+ if (ret && !reqs->topology->needs_split_display) {
+ memcpy(&topology, reqs->topology, sizeof(topology));
+ topology.needs_split_display = true;
+ _sde_rm_reserve_ctls(rm, rsvp, reqs, &topology,
+ sde_kms->splash_data.ctl_ids);
+ }
+ if (ret) {
+ SDE_ERROR("unable to find appropriate CTL\n");
+ return ret;
+ }
+
+ /* Assign INTFs, WBs, and blks whose usage is tied to them: CTL & CDM */
+ ret = _sde_rm_reserve_intf_related_hw(rm, rsvp, &reqs->hw_res);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < sde_kms->splash_data.dsc_cnt; i++)
+ SDE_DEBUG("splash_data.dsc_ids[%d] = %d\n",
+ i, sde_kms->splash_data.dsc_ids[i]);
+
+ ret = _sde_rm_reserve_dsc(rm, rsvp, reqs->topology,
+ sde_kms->splash_data.dsc_ids);
if (ret)
return ret;
@@ -1319,6 +1463,8 @@ int sde_rm_reserve(
{
struct sde_rm_rsvp *rsvp_cur, *rsvp_nxt;
struct sde_rm_requirements reqs;
+ struct msm_drm_private *priv;
+ struct sde_kms *sde_kms;
int ret;
if (!rm || !enc || !crtc_state || !conn_state) {
@@ -1326,8 +1472,20 @@ int sde_rm_reserve(
return -EINVAL;
}
+ if (!enc->dev || !enc->dev->dev_private) {
+ SDE_ERROR("drm device invalid\n");
+ return -EINVAL;
+ }
+ priv = enc->dev->dev_private;
+ if (!priv->kms) {
+ SDE_ERROR("invalid kms\n");
+ return -EINVAL;
+ }
+ sde_kms = to_sde_kms(priv->kms);
+
/* Check if this is just a page-flip */
- if (!drm_atomic_crtc_needs_modeset(crtc_state))
+ if (!sde_kms->cont_splash_en &&
+ !drm_atomic_crtc_needs_modeset(crtc_state))
return 0;
SDE_DEBUG("reserving hw for conn %d enc %d crtc %d test_only %d\n",
@@ -1378,8 +1536,14 @@ int sde_rm_reserve(
}
/* Check the proposed reservation, store it in hw's "next" field */
- ret = _sde_rm_make_next_rsvp(rm, enc, crtc_state, conn_state,
+ if (sde_kms->cont_splash_en) {
+ SDE_DEBUG("cont_splash feature enabled\n");
+ ret = _sde_rm_make_next_rsvp_for_cont_splash
+ (rm, enc, crtc_state, conn_state, rsvp_nxt, &reqs);
+ } else {
+ ret = _sde_rm_make_next_rsvp(rm, enc, crtc_state, conn_state,
rsvp_nxt, &reqs);
+ }
_sde_rm_print_rsvps(rm, SDE_RM_STAGE_AFTER_RSVPNEXT);
diff --git a/drivers/gpu/drm/msm/sde/sde_rm.h b/drivers/gpu/drm/msm/sde/sde_rm.h
index 3b9b82f..0545609 100644
--- a/drivers/gpu/drm/msm/sde/sde_rm.h
+++ b/drivers/gpu/drm/msm/sde/sde_rm.h
@@ -107,6 +107,15 @@ struct sde_rm_hw_iter {
};
/**
+ * sde_rm_get_topology_name - get the name of the given topology config
+ * @topology: msm_display_topology topology config
+ * @Return: name of the given topology
+ */
+enum sde_rm_topology_name sde_rm_get_topology_name(
+ struct msm_display_topology topology);
+
+
+/**
* sde_rm_init - Read hardware catalog and create reservation tracking objects
* for all HW blocks.
* @rm: SDE Resource Manager handle
diff --git a/drivers/gpu/drm/msm/sde/sde_vbif.c b/drivers/gpu/drm/msm/sde/sde_vbif.c
index 2cdc2f3..0dbc027 100644
--- a/drivers/gpu/drm/msm/sde/sde_vbif.c
+++ b/drivers/gpu/drm/msm/sde/sde_vbif.c
@@ -102,15 +102,6 @@ int sde_vbif_halt_plane_xin(struct sde_kms *sde_kms, u32 xin_id, u32 clk_ctrl)
"wait failed for pipe halt:xin_id %u, clk_ctrl %u, rc %u\n",
xin_id, clk_ctrl, rc);
SDE_EVT32(xin_id, clk_ctrl, rc, SDE_EVTLOG_ERROR);
- return rc;
- }
-
- status = vbif->ops.get_halt_ctrl(vbif, xin_id);
- if (status == 0) {
- SDE_ERROR("halt failed for pipe xin_id %u halt clk_ctrl %u\n",
- xin_id, clk_ctrl);
- SDE_EVT32(xin_id, clk_ctrl, SDE_EVTLOG_ERROR);
- return -ETIMEDOUT;
}
/* open xin client to enable transactions */
@@ -118,7 +109,7 @@ int sde_vbif_halt_plane_xin(struct sde_kms *sde_kms, u32 xin_id, u32 clk_ctrl)
if (forced_on)
mdp->ops.setup_clk_force_ctrl(mdp, clk_ctrl, false);
- return 0;
+ return rc;
}
/**
@@ -230,13 +221,15 @@ void sde_vbif_set_ot_limit(struct sde_kms *sde_kms,
for (i = 0; i < ARRAY_SIZE(sde_kms->hw_vbif); i++) {
if (sde_kms->hw_vbif[i] &&
- sde_kms->hw_vbif[i]->idx == params->vbif_idx)
+ sde_kms->hw_vbif[i]->idx == params->vbif_idx) {
vbif = sde_kms->hw_vbif[i];
+ break;
+ }
}
if (!vbif || !mdp) {
SDE_DEBUG("invalid arguments vbif %d mdp %d\n",
- vbif != 0, mdp != 0);
+ vbif != NULL, mdp != NULL);
return;
}
@@ -275,6 +268,58 @@ void sde_vbif_set_ot_limit(struct sde_kms *sde_kms,
return;
}
+bool sde_vbif_set_xin_halt(struct sde_kms *sde_kms,
+ struct sde_vbif_set_xin_halt_params *params)
+{
+ struct sde_hw_vbif *vbif = NULL;
+ struct sde_hw_mdp *mdp;
+ bool forced_on = false;
+ int ret, i;
+
+ if (!sde_kms || !params) {
+ SDE_ERROR("invalid arguments\n");
+ return false;
+ }
+ mdp = sde_kms->hw_mdp;
+
+ for (i = 0; i < ARRAY_SIZE(sde_kms->hw_vbif); i++) {
+ if (sde_kms->hw_vbif[i] &&
+ sde_kms->hw_vbif[i]->idx == params->vbif_idx) {
+ vbif = sde_kms->hw_vbif[i];
+ break;
+ }
+ }
+
+ if (!vbif || !mdp) {
+ SDE_DEBUG("invalid arguments vbif %d mdp %d\n",
+ vbif != NULL, mdp != NULL);
+ return false;
+ }
+
+ if (!mdp->ops.setup_clk_force_ctrl ||
+ !vbif->ops.set_halt_ctrl)
+ return false;
+
+ if (params->enable) {
+ forced_on = mdp->ops.setup_clk_force_ctrl(mdp,
+ params->clk_ctrl, true);
+
+ vbif->ops.set_halt_ctrl(vbif, params->xin_id, true);
+
+ ret = _sde_vbif_wait_for_xin_halt(vbif, params->xin_id);
+ if (ret)
+ SDE_EVT32(vbif->idx, params->xin_id, SDE_EVTLOG_ERROR);
+ } else {
+ vbif->ops.set_halt_ctrl(vbif, params->xin_id, false);
+
+ if (params->forced_on)
+ mdp->ops.setup_clk_force_ctrl(mdp,
+ params->clk_ctrl, false);
+ }
+
+ return forced_on;
+}
+
void sde_vbif_set_qos_remap(struct sde_kms *sde_kms,
struct sde_vbif_set_qos_params *params)
{
diff --git a/drivers/gpu/drm/msm/sde/sde_vbif.h b/drivers/gpu/drm/msm/sde/sde_vbif.h
index 30cc416..0edc1a6 100644
--- a/drivers/gpu/drm/msm/sde/sde_vbif.h
+++ b/drivers/gpu/drm/msm/sde/sde_vbif.h
@@ -35,6 +35,23 @@ struct sde_vbif_set_memtype_params {
};
/**
+ * struct sde_vbif_set_xin_halt_params - xin halt parameters
+ * @vbif_idx: vbif identifier
+ * @xin_id: client interface identifier
+ * @clk_ctrl: clock control identifier of the xin
+ * @forced_on: whether or not previous call to xin halt forced the clocks on,
+ * only applicable to xin halt disable calls
+ * @enable: whether to enable/disable xin halts
+ */
+struct sde_vbif_set_xin_halt_params {
+ u32 vbif_idx;
+ u32 xin_id;
+ u32 clk_ctrl;
+ bool forced_on;
+ bool enable;
+};
+
+/**
* struct sde_vbif_set_qos_params - QoS remapper parameter
* @vbif_idx: vbif identifier
* @xin_id: client interface identifier
@@ -59,6 +76,16 @@ void sde_vbif_set_ot_limit(struct sde_kms *sde_kms,
struct sde_vbif_set_ot_params *params);
/**
+ * sde_vbif_set_xin_halt - halt one of the xin ports
+ * This function isn't thread safe.
+ * @sde_kms: SDE handler
+ * @params: Pointer to halt configuration parameters
+ * Returns: Whether or not VBIF clocks were forced on
+ */
+bool sde_vbif_set_xin_halt(struct sde_kms *sde_kms,
+ struct sde_vbif_set_xin_halt_params *params);
+
+/**
* sde_vbif_set_qos_remap - set QoS priority level remap
* @sde_kms: SDE handler
* @params: Pointer to QoS configuration parameters
diff --git a/drivers/gpu/drm/msm/sde/sde_wb.c b/drivers/gpu/drm/msm/sde/sde_wb.c
index 8c0854f..71c8b63 100644
--- a/drivers/gpu/drm/msm/sde/sde_wb.c
+++ b/drivers/gpu/drm/msm/sde/sde_wb.c
@@ -352,49 +352,20 @@ int sde_wb_get_mode_info(const struct drm_display_mode *drm_mode,
return 0;
}
-int sde_wb_connector_post_init(struct drm_connector *connector,
- void *info,
- void *display)
+int sde_wb_connector_set_info_blob(struct drm_connector *connector,
+ void *info, void *display, struct msm_mode_info *mode_info)
{
- struct sde_connector *c_conn;
struct sde_wb_device *wb_dev = display;
const struct sde_format_extended *format_list;
- static const struct drm_prop_enum_list e_fb_translation_mode[] = {
- {SDE_DRM_FB_NON_SEC, "non_sec"},
- {SDE_DRM_FB_SEC, "sec"},
- };
if (!connector || !info || !display || !wb_dev->wb_cfg) {
SDE_ERROR("invalid params\n");
return -EINVAL;
}
- c_conn = to_sde_connector(connector);
- wb_dev->connector = connector;
- wb_dev->detect_status = connector_status_connected;
format_list = wb_dev->wb_cfg->format_list;
/*
- * Add extra connector properties
- */
- msm_property_install_range(&c_conn->property_info, "FB_ID",
- 0x0, 0, ~0, ~0, CONNECTOR_PROP_OUT_FB);
- msm_property_install_range(&c_conn->property_info, "DST_X",
- 0x0, 0, UINT_MAX, 0, CONNECTOR_PROP_DST_X);
- msm_property_install_range(&c_conn->property_info, "DST_Y",
- 0x0, 0, UINT_MAX, 0, CONNECTOR_PROP_DST_Y);
- msm_property_install_range(&c_conn->property_info, "DST_W",
- 0x0, 0, UINT_MAX, 0, CONNECTOR_PROP_DST_W);
- msm_property_install_range(&c_conn->property_info, "DST_H",
- 0x0, 0, UINT_MAX, 0, CONNECTOR_PROP_DST_H);
- msm_property_install_enum(&c_conn->property_info,
- "fb_translation_mode",
- 0x0,
- 0, e_fb_translation_mode,
- ARRAY_SIZE(e_fb_translation_mode),
- CONNECTOR_PROP_FB_TRANSLATION_MODE);
-
- /*
* Populate info buffer
*/
if (format_list) {
@@ -424,6 +395,47 @@ int sde_wb_connector_post_init(struct drm_connector *connector,
return 0;
}
+int sde_wb_connector_post_init(struct drm_connector *connector, void *display)
+{
+ struct sde_connector *c_conn;
+ struct sde_wb_device *wb_dev = display;
+ static const struct drm_prop_enum_list e_fb_translation_mode[] = {
+ {SDE_DRM_FB_NON_SEC, "non_sec"},
+ {SDE_DRM_FB_SEC, "sec"},
+ };
+
+ if (!connector || !display || !wb_dev->wb_cfg) {
+ SDE_ERROR("invalid params\n");
+ return -EINVAL;
+ }
+
+ c_conn = to_sde_connector(connector);
+ wb_dev->connector = connector;
+ wb_dev->detect_status = connector_status_connected;
+
+ /*
+ * Add extra connector properties
+ */
+ msm_property_install_range(&c_conn->property_info, "FB_ID",
+ 0x0, 0, ~0, 0, CONNECTOR_PROP_OUT_FB);
+ msm_property_install_range(&c_conn->property_info, "DST_X",
+ 0x0, 0, UINT_MAX, 0, CONNECTOR_PROP_DST_X);
+ msm_property_install_range(&c_conn->property_info, "DST_Y",
+ 0x0, 0, UINT_MAX, 0, CONNECTOR_PROP_DST_Y);
+ msm_property_install_range(&c_conn->property_info, "DST_W",
+ 0x0, 0, UINT_MAX, 0, CONNECTOR_PROP_DST_W);
+ msm_property_install_range(&c_conn->property_info, "DST_H",
+ 0x0, 0, UINT_MAX, 0, CONNECTOR_PROP_DST_H);
+ msm_property_install_enum(&c_conn->property_info,
+ "fb_translation_mode",
+ 0x0,
+ 0, e_fb_translation_mode,
+ ARRAY_SIZE(e_fb_translation_mode),
+ CONNECTOR_PROP_FB_TRANSLATION_MODE);
+
+ return 0;
+}
+
struct drm_framebuffer *sde_wb_get_output_fb(struct sde_wb_device *wb_dev)
{
struct drm_framebuffer *fb;
diff --git a/drivers/gpu/drm/msm/sde/sde_wb.h b/drivers/gpu/drm/msm/sde/sde_wb.h
index c3f9e06..d414bd0 100644
--- a/drivers/gpu/drm/msm/sde/sde_wb.h
+++ b/drivers/gpu/drm/msm/sde/sde_wb.h
@@ -131,13 +131,23 @@ int sde_wb_config(struct drm_device *drm_dev, void *data,
/**
* sde_wb_connector_post_init - perform writeback specific initialization
* @connector: Pointer to drm connector structure
- * @info: Pointer to connector info
* @display: Pointer to private display structure
* Returns: Zero on success
*/
-int sde_wb_connector_post_init(struct drm_connector *connector,
+int sde_wb_connector_post_init(struct drm_connector *connector, void *display);
+
+/**
+ * sde_wb_connector_set_info_blob - perform writeback info blob initialization
+ * @connector: Pointer to drm connector structure
+ * @info: Pointer to connector info
+ * @display: Pointer to private display structure
+ * @mode_info: Pointer to the mode info structure
+ * Returns: Zero on success
+ */
+int sde_wb_connector_set_info_blob(struct drm_connector *connector,
void *info,
- void *display);
+ void *display,
+ struct msm_mode_info *mode_info);
/**
* sde_wb_connector_detect - perform writeback connection status detection
@@ -280,7 +290,8 @@ int sde_wb_config(struct drm_device *drm_dev, void *data,
static inline
int sde_wb_connector_post_init(struct drm_connector *connector,
void *info,
- void *display)
+ void *display,
+ struct msm_mode_info *mode_info)
{
return 0;
}
diff --git a/drivers/gpu/drm/msm/sde_dbg.c b/drivers/gpu/drm/msm/sde_dbg.c
index c2ce9f0..6b5be3b 100644
--- a/drivers/gpu/drm/msm/sde_dbg.c
+++ b/drivers/gpu/drm/msm/sde_dbg.c
@@ -2034,12 +2034,13 @@ static struct vbif_debug_bus_entry vbif_dbg_bus_msm8998[] = {
/**
* _sde_dbg_enable_power - use callback to turn power on for hw register access
* @enable: whether to turn power on or off
+ * Return: zero if success; error code otherwise
*/
-static inline void _sde_dbg_enable_power(int enable)
+static inline int _sde_dbg_enable_power(int enable)
{
if (!sde_dbg_base.power_ctrl.enable_fn)
- return;
- sde_dbg_base.power_ctrl.enable_fn(
+ return -EINVAL;
+ return sde_dbg_base.power_ctrl.enable_fn(
sde_dbg_base.power_ctrl.handle,
sde_dbg_base.power_ctrl.client,
enable);
@@ -2063,6 +2064,7 @@ static void _sde_dump_reg(const char *dump_name, u32 reg_dump_flag,
u32 *dump_addr = NULL;
char *end_addr;
int i;
+ int rc;
if (!len_bytes)
return;
@@ -2103,8 +2105,13 @@ static void _sde_dump_reg(const char *dump_name, u32 reg_dump_flag,
}
}
- if (!from_isr)
- _sde_dbg_enable_power(true);
+ if (!from_isr) {
+ rc = _sde_dbg_enable_power(true);
+ if (rc) {
+ pr_err("failed to enable power %d\n", rc);
+ return;
+ }
+ }
for (i = 0; i < len_align; i++) {
u32 x0, x4, x8, xc;
@@ -2288,6 +2295,7 @@ static void _sde_dbg_dump_sde_dbg_bus(struct sde_dbg_sde_debug_bus *bus)
u32 offset;
void __iomem *mem_base = NULL;
struct sde_dbg_reg_base *reg_base;
+ int rc;
if (!bus || !bus->cmn.entries_size)
return;
@@ -2333,7 +2341,12 @@ static void _sde_dbg_dump_sde_dbg_bus(struct sde_dbg_sde_debug_bus *bus)
}
}
- _sde_dbg_enable_power(true);
+ rc = _sde_dbg_enable_power(true);
+ if (rc) {
+ pr_err("failed to enable power %d\n", rc);
+ return;
+ }
+
for (i = 0; i < bus->cmn.entries_size; i++) {
head = bus->entries + i;
writel_relaxed(TEST_MASK(head->block_id, head->test_id),
@@ -2427,6 +2440,7 @@ static void _sde_dbg_dump_vbif_dbg_bus(struct sde_dbg_vbif_debug_bus *bus)
struct vbif_debug_bus_entry *dbg_bus;
u32 bus_size;
struct sde_dbg_reg_base *reg_base;
+ int rc;
if (!bus || !bus->cmn.entries_size)
return;
@@ -2484,7 +2498,11 @@ static void _sde_dbg_dump_vbif_dbg_bus(struct sde_dbg_vbif_debug_bus *bus)
}
}
- _sde_dbg_enable_power(true);
+ rc = _sde_dbg_enable_power(true);
+ if (rc) {
+ pr_err("failed to enable power %d\n", rc);
+ return;
+ }
value = readl_relaxed(mem_base + MMSS_VBIF_CLKON);
writel_relaxed(value | BIT(1), mem_base + MMSS_VBIF_CLKON);
@@ -2558,7 +2576,8 @@ static void _sde_dump_array(struct sde_dbg_reg_base *blk_arr[],
mutex_lock(&sde_dbg_base.mutex);
- sde_evtlog_dump_all(sde_dbg_base.evtlog);
+ if (dump_all)
+ sde_evtlog_dump_all(sde_dbg_base.evtlog);
if (dump_all || !blk_arr || !len) {
_sde_dump_reg_all();
@@ -2711,7 +2730,7 @@ static ssize_t sde_evtlog_dump_read(struct file *file, char __user *buff,
return -EINVAL;
len = sde_evtlog_dump_to_buffer(sde_dbg_base.evtlog, evtlog_buf,
- SDE_EVTLOG_BUF_MAX);
+ SDE_EVTLOG_BUF_MAX, true);
if (copy_to_user(buff, evtlog_buf, len))
return -EFAULT;
*ppos += len;
@@ -2968,6 +2987,7 @@ static ssize_t sde_dbg_reg_base_reg_write(struct file *file,
size_t off;
u32 data, cnt;
char buf[24];
+ int rc;
if (!file)
return -EINVAL;
@@ -2998,7 +3018,12 @@ static ssize_t sde_dbg_reg_base_reg_write(struct file *file,
return -EFAULT;
}
- _sde_dbg_enable_power(true);
+ rc = _sde_dbg_enable_power(true);
+ if (rc) {
+ mutex_unlock(&sde_dbg_base.mutex);
+ pr_err("failed to enable power %d\n", rc);
+ return rc;
+ }
writel_relaxed(data, dbg->base + off);
@@ -3023,6 +3048,7 @@ static ssize_t sde_dbg_reg_base_reg_read(struct file *file,
{
struct sde_dbg_reg_base *dbg;
size_t len;
+ int rc;
if (!file)
return -EINVAL;
@@ -3059,7 +3085,12 @@ static ssize_t sde_dbg_reg_base_reg_read(struct file *file,
ptr = dbg->base + dbg->off;
tot = 0;
- _sde_dbg_enable_power(true);
+ rc = _sde_dbg_enable_power(true);
+ if (rc) {
+ mutex_unlock(&sde_dbg_base.mutex);
+ pr_err("failed to enable power %d\n", rc);
+ return rc;
+ }
for (cnt = dbg->cnt; cnt > 0; cnt -= ROW_BYTES) {
hex_dump_to_buffer(ptr, min(cnt, ROW_BYTES),
diff --git a/drivers/gpu/drm/msm/sde_dbg.h b/drivers/gpu/drm/msm/sde_dbg.h
index 786451c3..7b1b4c6 100644
--- a/drivers/gpu/drm/msm/sde_dbg.h
+++ b/drivers/gpu/drm/msm/sde_dbg.h
@@ -17,7 +17,8 @@
#include <linux/debugfs.h>
#include <linux/list.h>
-#define SDE_EVTLOG_DATA_LIMITER (-1)
+/* select an uncommon hex value for the limiter */
+#define SDE_EVTLOG_DATA_LIMITER (0xC0DEBEEF)
#define SDE_EVTLOG_FUNC_ENTRY 0x1111
#define SDE_EVTLOG_FUNC_EXIT 0x2222
#define SDE_EVTLOG_FUNC_CASE1 0x3333
@@ -66,7 +67,7 @@ enum sde_dbg_dump_flag {
* number must be greater than print entry to prevent out of bound evtlog
* entry array access.
*/
-#define SDE_EVTLOG_ENTRY (SDE_EVTLOG_PRINT_ENTRY * 4)
+#define SDE_EVTLOG_ENTRY (SDE_EVTLOG_PRINT_ENTRY * 8)
#define SDE_EVTLOG_MAX_DATA 15
#define SDE_EVTLOG_BUF_MAX 512
#define SDE_EVTLOG_BUF_ALIGN 32
@@ -87,12 +88,14 @@ struct sde_dbg_evtlog_log {
};
/**
+ * @last_dump: Index of last entry to be output during evtlog dumps
* @filter_list: Linked list of currently active filter strings
*/
struct sde_dbg_evtlog {
struct sde_dbg_evtlog_log logs[SDE_EVTLOG_ENTRY];
u32 first;
u32 last;
+ u32 last_dump;
u32 curr;
u32 next;
u32 enable;
@@ -197,10 +200,12 @@ bool sde_evtlog_is_enabled(struct sde_dbg_evtlog *evtlog, u32 flag);
* @evtlog: pointer to evtlog
* @evtlog_buf: target buffer to print into
* @evtlog_buf_size: size of target buffer
+ * @update_last_entry: whether or not to stop at most recent entry
* Returns: number of bytes written to buffer
*/
ssize_t sde_evtlog_dump_to_buffer(struct sde_dbg_evtlog *evtlog,
- char *evtlog_buf, ssize_t evtlog_buf_size);
+ char *evtlog_buf, ssize_t evtlog_buf_size,
+ bool update_last_entry);
/**
* sde_dbg_init_dbg_buses - initialize debug bus dumping support for the chipset
@@ -352,7 +357,8 @@ static inline bool sde_evtlog_is_enabled(struct sde_dbg_evtlog *evtlog,
}
static inline ssize_t sde_evtlog_dump_to_buffer(struct sde_dbg_evtlog *evtlog,
- char *evtlog_buf, ssize_t evtlog_buf_size)
+ char *evtlog_buf, ssize_t evtlog_buf_size,
+ bool update_last_entry)
{
return 0;
}
diff --git a/drivers/gpu/drm/msm/sde_dbg_evtlog.c b/drivers/gpu/drm/msm/sde_dbg_evtlog.c
index 67c664f..9a75179 100644
--- a/drivers/gpu/drm/msm/sde_dbg_evtlog.c
+++ b/drivers/gpu/drm/msm/sde_dbg_evtlog.c
@@ -105,27 +105,32 @@ void sde_evtlog_log(struct sde_dbg_evtlog *evtlog, const char *name, int line,
}
/* always dump the last entries which are not dumped yet */
-static bool _sde_evtlog_dump_calc_range(struct sde_dbg_evtlog *evtlog)
+static bool _sde_evtlog_dump_calc_range(struct sde_dbg_evtlog *evtlog,
+ bool update_last_entry)
{
if (!evtlog)
return false;
evtlog->first = evtlog->next;
- if (evtlog->last == evtlog->first)
+ if (update_last_entry)
+ evtlog->last_dump = evtlog->last;
+
+ if (evtlog->last_dump == evtlog->first)
return false;
- if (evtlog->last < evtlog->first) {
+ if (evtlog->last_dump < evtlog->first) {
evtlog->first %= SDE_EVTLOG_ENTRY;
- if (evtlog->last < evtlog->first)
- evtlog->last += SDE_EVTLOG_ENTRY;
+ if (evtlog->last_dump < evtlog->first)
+ evtlog->last_dump += SDE_EVTLOG_ENTRY;
}
- if ((evtlog->last - evtlog->first) > SDE_EVTLOG_PRINT_ENTRY) {
+ if ((evtlog->last_dump - evtlog->first) > SDE_EVTLOG_PRINT_ENTRY) {
pr_info("evtlog skipping %d entries, last=%d\n",
- evtlog->last - evtlog->first - SDE_EVTLOG_PRINT_ENTRY,
- evtlog->last - 1);
- evtlog->first = evtlog->last - SDE_EVTLOG_PRINT_ENTRY;
+ evtlog->last_dump - evtlog->first -
+ SDE_EVTLOG_PRINT_ENTRY,
+ evtlog->last_dump - 1);
+ evtlog->first = evtlog->last_dump - SDE_EVTLOG_PRINT_ENTRY;
}
evtlog->next = evtlog->first + 1;
@@ -133,7 +138,8 @@ static bool _sde_evtlog_dump_calc_range(struct sde_dbg_evtlog *evtlog)
}
ssize_t sde_evtlog_dump_to_buffer(struct sde_dbg_evtlog *evtlog,
- char *evtlog_buf, ssize_t evtlog_buf_size)
+ char *evtlog_buf, ssize_t evtlog_buf_size,
+ bool update_last_entry)
{
int i;
ssize_t off = 0;
@@ -146,7 +152,7 @@ ssize_t sde_evtlog_dump_to_buffer(struct sde_dbg_evtlog *evtlog,
spin_lock_irqsave(&evtlog->spin_lock, flags);
/* update markers, exit if nothing to print */
- if (!_sde_evtlog_dump_calc_range(evtlog))
+ if (!_sde_evtlog_dump_calc_range(evtlog, update_last_entry))
goto exit;
log = &evtlog->logs[evtlog->first % SDE_EVTLOG_ENTRY];
@@ -179,12 +185,16 @@ ssize_t sde_evtlog_dump_to_buffer(struct sde_dbg_evtlog *evtlog,
void sde_evtlog_dump_all(struct sde_dbg_evtlog *evtlog)
{
char buf[SDE_EVTLOG_BUF_MAX];
+ bool update_last_entry = true;
if (!evtlog)
return;
- while (sde_evtlog_dump_to_buffer(evtlog, buf, sizeof(buf)))
+ while (sde_evtlog_dump_to_buffer(
+ evtlog, buf, sizeof(buf), update_last_entry)) {
pr_info("%s", buf);
+ update_last_entry = false;
+ }
}
struct sde_dbg_evtlog *sde_evtlog_init(void)
diff --git a/drivers/gpu/drm/msm/sde_edid_parser.c b/drivers/gpu/drm/msm/sde_edid_parser.c
index 791a6ca..c2ba3b97 100644
--- a/drivers/gpu/drm/msm/sde_edid_parser.c
+++ b/drivers/gpu/drm/msm/sde_edid_parser.c
@@ -566,45 +566,6 @@ int _sde_edid_update_modes(struct drm_connector *connector,
return rc;
}
-u32 sde_get_sink_bpc(void *input)
-{
- struct sde_edid_ctrl *edid_ctrl = (struct sde_edid_ctrl *)(input);
- struct edid *edid = edid_ctrl->edid;
-
- if (!edid) {
- SDE_ERROR("invalid edid input\n");
- return 0;
- }
-
- if ((edid->revision < 3) || !(edid->input & DRM_EDID_INPUT_DIGITAL))
- return 0;
-
- if (edid->revision < 4) {
- if (edid->input & DRM_EDID_DIGITAL_TYPE_DVI)
- return 8;
- else
- return 0;
- }
-
- switch (edid->input & DRM_EDID_DIGITAL_DEPTH_MASK) {
- case DRM_EDID_DIGITAL_DEPTH_6:
- return 6;
- case DRM_EDID_DIGITAL_DEPTH_8:
- return 8;
- case DRM_EDID_DIGITAL_DEPTH_10:
- return 10;
- case DRM_EDID_DIGITAL_DEPTH_12:
- return 12;
- case DRM_EDID_DIGITAL_DEPTH_14:
- return 14;
- case DRM_EDID_DIGITAL_DEPTH_16:
- return 16;
- case DRM_EDID_DIGITAL_DEPTH_UNDEF:
- default:
- return 0;
- }
-}
-
u8 sde_get_edid_checksum(void *input)
{
struct sde_edid_ctrl *edid_ctrl = (struct sde_edid_ctrl *)(input);
diff --git a/drivers/gpu/drm/msm/sde_edid_parser.h b/drivers/gpu/drm/msm/sde_edid_parser.h
index 07bdf50..fd56116 100644
--- a/drivers/gpu/drm/msm/sde_edid_parser.h
+++ b/drivers/gpu/drm/msm/sde_edid_parser.h
@@ -138,14 +138,6 @@ void sde_free_edid(void **edid_ctrl);
bool sde_detect_hdmi_monitor(void *edid_ctrl);
/**
- * sde_get_sink_bpc() - return the bpc of sink device.
- * @edid_ctrl: Handle to the edid_ctrl structure.
- *
- * Return: bpc supported by the sink.
- */
-u32 sde_get_sink_bpc(void *edid_ctrl);
-
-/**
* sde_get_edid_checksum() - return the checksum of last block of EDID.
* @input: Handle to the edid_ctrl structure.
*
diff --git a/drivers/gpu/drm/msm/sde_hdcp.h b/drivers/gpu/drm/msm/sde_hdcp.h
index 05d290b..6c44260 100644
--- a/drivers/gpu/drm/msm/sde_hdcp.h
+++ b/drivers/gpu/drm/msm/sde_hdcp.h
@@ -42,6 +42,10 @@ enum sde_hdcp_states {
struct sde_hdcp_init_data {
struct dss_io_data *core_io;
+ struct dss_io_data *dp_ahb;
+ struct dss_io_data *dp_aux;
+ struct dss_io_data *dp_link;
+ struct dss_io_data *dp_p0;
struct dss_io_data *qfprom_io;
struct dss_io_data *hdcp_io;
struct drm_dp_aux *drm_aux;
diff --git a/drivers/gpu/drm/msm/sde_hdcp_1x.c b/drivers/gpu/drm/msm/sde_hdcp_1x.c
index 3673d125..c012f9d 100644
--- a/drivers/gpu/drm/msm/sde_hdcp_1x.c
+++ b/drivers/gpu/drm/msm/sde_hdcp_1x.c
@@ -256,12 +256,15 @@ static int sde_hdcp_1x_load_keys(void *input)
u32 ksv_lsb_addr, ksv_msb_addr;
u32 aksv_lsb, aksv_msb;
u8 aksv[5];
- struct dss_io_data *io;
+ struct dss_io_data *dp_ahb;
+ struct dss_io_data *dp_aux;
+ struct dss_io_data *dp_link;
struct dss_io_data *qfprom_io;
struct sde_hdcp_1x *hdcp = input;
struct sde_hdcp_reg_set *reg_set;
- if (!hdcp || !hdcp->init_data.core_io ||
+ if (!hdcp || !hdcp->init_data.dp_ahb ||
+ !hdcp->init_data.dp_aux ||
!hdcp->init_data.qfprom_io) {
pr_err("invalid input\n");
rc = -EINVAL;
@@ -276,7 +279,9 @@ static int sde_hdcp_1x_load_keys(void *input)
goto end;
}
- io = hdcp->init_data.core_io;
+ dp_ahb = hdcp->init_data.dp_ahb;
+ dp_aux = hdcp->init_data.dp_aux;
+ dp_link = hdcp->init_data.dp_link;
qfprom_io = hdcp->init_data.qfprom_io;
reg_set = &hdcp->reg_set;
@@ -327,18 +332,18 @@ static int sde_hdcp_1x_load_keys(void *input)
goto end;
}
- DSS_REG_W(io, reg_set->aksv_lsb, aksv_lsb);
- DSS_REG_W(io, reg_set->aksv_msb, aksv_msb);
+ DSS_REG_W(dp_aux, reg_set->aksv_lsb, aksv_lsb);
+ DSS_REG_W(dp_aux, reg_set->aksv_msb, aksv_msb);
/* Setup seed values for random number An */
- DSS_REG_W(io, reg_set->entropy_ctrl0, 0xB1FFB0FF);
- DSS_REG_W(io, reg_set->entropy_ctrl1, 0xF00DFACE);
+ DSS_REG_W(dp_link, reg_set->entropy_ctrl0, 0xB1FFB0FF);
+ DSS_REG_W(dp_link, reg_set->entropy_ctrl1, 0xF00DFACE);
/* make sure hw is programmed */
wmb();
/* enable hdcp engine */
- DSS_REG_W(io, reg_set->ctrl, 0x1);
+ DSS_REG_W(dp_ahb, reg_set->ctrl, 0x1);
hdcp->hdcp_state = HDCP_STATE_AUTHENTICATING;
end:
@@ -415,7 +420,7 @@ static void sde_hdcp_1x_enable_interrupts(struct sde_hdcp_1x *hdcp)
struct dss_io_data *io;
struct sde_hdcp_int_set *isr;
- io = hdcp->init_data.core_io;
+ io = hdcp->init_data.dp_ahb;
isr = &hdcp->int_set;
intr_reg = DSS_REG_R(io, isr->int_reg);
@@ -462,7 +467,8 @@ static int sde_hdcp_1x_wait_for_hw_ready(struct sde_hdcp_1x *hdcp)
int rc;
u32 link0_status;
struct sde_hdcp_reg_set *reg_set = &hdcp->reg_set;
- struct dss_io_data *io = hdcp->init_data.core_io;
+ struct dss_io_data *dp_ahb = hdcp->init_data.dp_ahb;
+ struct dss_io_data *dp_aux = hdcp->init_data.dp_aux;
if (!sde_hdcp_1x_state(HDCP_STATE_AUTHENTICATING)) {
pr_err("invalid state\n");
@@ -470,7 +476,7 @@ static int sde_hdcp_1x_wait_for_hw_ready(struct sde_hdcp_1x *hdcp)
}
/* Wait for HDCP keys to be checked and validated */
- rc = readl_poll_timeout(io->base + reg_set->status, link0_status,
+ rc = readl_poll_timeout(dp_ahb->base + reg_set->status, link0_status,
((link0_status >> reg_set->keys_offset) & 0x7)
== HDCP_KEYS_STATE_VALID ||
!sde_hdcp_1x_state(HDCP_STATE_AUTHENTICATING),
@@ -484,10 +490,10 @@ static int sde_hdcp_1x_wait_for_hw_ready(struct sde_hdcp_1x *hdcp)
* 1.1_Features turned off by default.
* No need to write AInfo since 1.1_Features is disabled.
*/
- DSS_REG_W(io, reg_set->data4, 0);
+ DSS_REG_W(dp_aux, reg_set->data4, 0);
/* Wait for An0 and An1 bit to be ready */
- rc = readl_poll_timeout(io->base + reg_set->status, link0_status,
+ rc = readl_poll_timeout(dp_ahb->base + reg_set->status, link0_status,
(link0_status & (BIT(8) | BIT(9))) ||
!sde_hdcp_1x_state(HDCP_STATE_AUTHENTICATING),
HDCP_POLL_SLEEP_US, HDCP_POLL_TIMEOUT_US);
@@ -554,7 +560,8 @@ static int sde_hdcp_1x_send_an_aksv_to_sink(struct sde_hdcp_1x *hdcp)
static int sde_hdcp_1x_read_an_aksv_from_hw(struct sde_hdcp_1x *hdcp)
{
- struct dss_io_data *io = hdcp->init_data.core_io;
+ struct dss_io_data *dp_ahb = hdcp->init_data.dp_ahb;
+ struct dss_io_data *dp_aux = hdcp->init_data.dp_aux;
struct sde_hdcp_reg_set *reg_set = &hdcp->reg_set;
if (!sde_hdcp_1x_state(HDCP_STATE_AUTHENTICATING)) {
@@ -562,21 +569,21 @@ static int sde_hdcp_1x_read_an_aksv_from_hw(struct sde_hdcp_1x *hdcp)
return -EINVAL;
}
- hdcp->an_0 = DSS_REG_R(io, reg_set->data5);
+ hdcp->an_0 = DSS_REG_R(dp_ahb, reg_set->data5);
if (hdcp->init_data.client_id == HDCP_CLIENT_DP) {
udelay(1);
- hdcp->an_0 = DSS_REG_R(io, reg_set->data5);
+ hdcp->an_0 = DSS_REG_R(dp_ahb, reg_set->data5);
}
- hdcp->an_1 = DSS_REG_R(io, reg_set->data6);
+ hdcp->an_1 = DSS_REG_R(dp_ahb, reg_set->data6);
if (hdcp->init_data.client_id == HDCP_CLIENT_DP) {
udelay(1);
- hdcp->an_1 = DSS_REG_R(io, reg_set->data6);
+ hdcp->an_1 = DSS_REG_R(dp_ahb, reg_set->data6);
}
/* Read AKSV */
- hdcp->aksv_0 = DSS_REG_R(io, reg_set->data3);
- hdcp->aksv_1 = DSS_REG_R(io, reg_set->data4);
+ hdcp->aksv_0 = DSS_REG_R(dp_aux, reg_set->data3);
+ hdcp->aksv_1 = DSS_REG_R(dp_aux, reg_set->data4);
return 0;
}
@@ -649,7 +656,7 @@ static int sde_hdcp_1x_verify_r0(struct sde_hdcp_1x *hdcp)
u32 const r0_read_delay_us = 1;
u32 const r0_read_timeout_us = r0_read_delay_us * 10;
struct sde_hdcp_reg_set *reg_set = &hdcp->reg_set;
- struct dss_io_data *io = hdcp->init_data.core_io;
+ struct dss_io_data *io = hdcp->init_data.dp_ahb;
if (!sde_hdcp_1x_state(HDCP_STATE_AUTHENTICATING)) {
pr_err("invalid state\n");
@@ -910,7 +917,7 @@ static int sde_hdcp_1x_write_ksv_fifo(struct sde_hdcp_1x *hdcp)
int i, rc = 0;
u8 *ksv_fifo = hdcp->current_tp.ksv_list;
u32 ksv_bytes = hdcp->sink_addr.ksv_fifo.len;
- struct dss_io_data *io = hdcp->init_data.core_io;
+ struct dss_io_data *io = hdcp->init_data.dp_ahb;
struct dss_io_data *sec_io = hdcp->init_data.hdcp_io;
struct sde_hdcp_reg_set *reg_set = &hdcp->reg_set;
u32 sha_status = 0, status;
@@ -1087,7 +1094,8 @@ static int sde_hdcp_1x_authentication_part2(struct sde_hdcp_1x *hdcp)
static void sde_hdcp_1x_cache_topology(struct sde_hdcp_1x *hdcp)
{
- if (!hdcp || !hdcp->init_data.core_io) {
+ if (!hdcp || !hdcp->init_data.dp_ahb || !hdcp->init_data.dp_aux ||
+ !hdcp->init_data.dp_link || !hdcp->init_data.dp_p0) {
pr_err("invalid input\n");
return;
}
@@ -1146,6 +1154,7 @@ static void sde_hdcp_1x_auth_work(struct work_struct *work)
DSS_REG_W_ND(io, REG_HDMI_DDC_ARBITRATION, DSS_REG_R(io,
REG_HDMI_DDC_ARBITRATION) & ~(BIT(4)));
else if (hdcp->init_data.client_id == HDCP_CLIENT_DP) {
+ io = hdcp->init_data.dp_aux;
DSS_REG_W(io, DP_DP_HPD_REFTIMER, 0x10013);
}
@@ -1224,12 +1233,12 @@ static int sde_hdcp_1x_reauthenticate(void *input)
struct sde_hdcp_int_set *isr;
u32 ret = 0, reg;
- if (!hdcp || !hdcp->init_data.core_io) {
+ if (!hdcp || !hdcp->init_data.dp_ahb) {
pr_err("invalid input\n");
return -EINVAL;
}
- io = hdcp->init_data.core_io;
+ io = hdcp->init_data.dp_ahb;
reg_set = &hdcp->reg_set;
isr = &hdcp->int_set;
@@ -1264,12 +1273,12 @@ static void sde_hdcp_1x_off(void *input)
int rc = 0;
u32 reg;
- if (!hdcp || !hdcp->init_data.core_io) {
+ if (!hdcp || !hdcp->init_data.dp_ahb) {
pr_err("invalid input\n");
return;
}
- io = hdcp->init_data.core_io;
+ io = hdcp->init_data.dp_ahb;
reg_set = &hdcp->reg_set;
isr = &hdcp->int_set;
@@ -1327,13 +1336,13 @@ static int sde_hdcp_1x_isr(void *input)
struct sde_hdcp_reg_set *reg_set;
struct sde_hdcp_int_set *isr;
- if (!hdcp || !hdcp->init_data.core_io) {
+ if (!hdcp || !hdcp->init_data.dp_ahb) {
pr_err("invalid input\n");
rc = -EINVAL;
goto error;
}
- io = hdcp->init_data.core_io;
+ io = hdcp->init_data.dp_ahb;
reg_set = &hdcp->reg_set;
isr = &hdcp->int_set;
@@ -1531,8 +1540,7 @@ void *sde_hdcp_1x_init(struct sde_hdcp_init_data *init_data)
.off = sde_hdcp_1x_off
};
- if (!init_data || !init_data->core_io || !init_data->qfprom_io ||
- !init_data->mutex || !init_data->notify_status ||
+ if (!init_data || !init_data->mutex || !init_data->notify_status ||
!init_data->workq || !init_data->cb_data) {
pr_err("invalid input\n");
goto error;
diff --git a/drivers/gpu/drm/msm/sde_power_handle.c b/drivers/gpu/drm/msm/sde_power_handle.c
index 7a0da3d..34a826d 100644
--- a/drivers/gpu/drm/msm/sde_power_handle.c
+++ b/drivers/gpu/drm/msm/sde_power_handle.c
@@ -360,7 +360,7 @@ static int _sde_power_data_bus_set_quota(
ab_quota_nrt = max_t(u64, ab_quota_nrt,
SDE_POWER_HANDLE_ENABLE_BUS_AB_QUOTA);
ib_quota_nrt = max_t(u64, ib_quota_nrt,
- SDE_POWER_HANDLE_ENABLE_BUS_IB_QUOTA);
+ SDE_POWER_HANDLE_ENABLE_NRT_BUS_IB_QUOTA);
} else {
ab_quota_rt = min_t(u64, ab_quota_rt,
SDE_POWER_HANDLE_DISABLE_BUS_AB_QUOTA);
@@ -379,30 +379,30 @@ static int _sde_power_data_bus_set_quota(
struct msm_bus_vectors *vect = NULL;
struct msm_bus_scale_pdata *bw_table =
pdbus->data_bus_scale_table;
- u32 nrt_axi_port_cnt = pdbus->nrt_axi_port_cnt;
- u32 total_axi_port_cnt = pdbus->axi_port_cnt;
- u32 rt_axi_port_cnt = total_axi_port_cnt - nrt_axi_port_cnt;
+ u32 nrt_data_paths_cnt = pdbus->nrt_data_paths_cnt;
+ u32 total_data_paths_cnt = pdbus->data_paths_cnt;
+ u32 rt_data_paths_cnt = total_data_paths_cnt -
+ nrt_data_paths_cnt;
- if (!bw_table || !total_axi_port_cnt ||
- total_axi_port_cnt > MAX_AXI_PORT_COUNT) {
+ if (!bw_table || !total_data_paths_cnt ||
+ total_data_paths_cnt > MAX_AXI_PORT_COUNT) {
pr_err("invalid input\n");
return -EINVAL;
}
- if (pdbus->bus_channels) {
+ if (nrt_data_paths_cnt) {
+
+ ab_quota_rt = div_u64(ab_quota_rt, rt_data_paths_cnt);
+ ab_quota_nrt = div_u64(ab_quota_nrt,
+ nrt_data_paths_cnt);
+
ib_quota_rt = div_u64(ib_quota_rt,
- pdbus->bus_channels);
+ rt_data_paths_cnt);
ib_quota_nrt = div_u64(ib_quota_nrt,
- pdbus->bus_channels);
- }
+ nrt_data_paths_cnt);
- if (nrt_axi_port_cnt) {
-
- ab_quota_rt = div_u64(ab_quota_rt, rt_axi_port_cnt);
- ab_quota_nrt = div_u64(ab_quota_nrt, nrt_axi_port_cnt);
-
- for (i = 0; i < total_axi_port_cnt; i++) {
- if (i < rt_axi_port_cnt) {
+ for (i = 0; i < total_data_paths_cnt; i++) {
+ if (i < rt_data_paths_cnt) {
ab_quota[i] = ab_quota_rt;
ib_quota[i] = ib_quota_rt;
} else {
@@ -412,10 +412,11 @@ static int _sde_power_data_bus_set_quota(
}
} else {
ab_quota[0] = div_u64(ab_quota_rt + ab_quota_nrt,
- total_axi_port_cnt);
- ib_quota[0] = ib_quota_rt + ib_quota_nrt;
+ total_data_paths_cnt);
+ ib_quota[0] = div_u64(ib_quota_rt + ib_quota_nrt,
+ total_data_paths_cnt);
- for (i = 1; i < total_axi_port_cnt; i++) {
+ for (i = 1; i < total_data_paths_cnt; i++) {
ab_quota[i] = ab_quota[0];
ib_quota[i] = ib_quota[0];
}
@@ -424,7 +425,7 @@ static int _sde_power_data_bus_set_quota(
new_uc_idx = (pdbus->curr_bw_uc_idx %
(bw_table->num_usecases - 1)) + 1;
- for (i = 0; i < total_axi_port_cnt; i++) {
+ for (i = 0; i < total_data_paths_cnt; i++) {
vect = &bw_table->usecase[new_uc_idx].vectors[i];
vect->ab = ab_quota[i];
vect->ib = ib_quota[i];
@@ -432,8 +433,8 @@ static int _sde_power_data_bus_set_quota(
pr_debug(
"%s uc_idx=%d %s path idx=%d ab=%llu ib=%llu\n",
bw_table->name,
- new_uc_idx, (i < rt_axi_port_cnt) ? "rt" : "nrt"
- , i, vect->ab, vect->ib);
+ new_uc_idx, (i < rt_data_paths_cnt) ?
+ "rt" : "nrt", i, vect->ab, vect->ib);
}
}
pdbus->curr_bw_uc_idx = new_uc_idx;
@@ -518,10 +519,10 @@ static int sde_power_data_bus_parse(struct platform_device *pdev,
rc = 0;
}
- pdbus->nrt_axi_port_cnt = 0;
+ pdbus->nrt_data_paths_cnt = 0;
rc = of_property_read_u32(pdev->dev.of_node,
"qcom,sde-num-nrt-paths",
- &pdbus->nrt_axi_port_cnt);
+ &pdbus->nrt_data_paths_cnt);
if (rc) {
pr_debug("number of axi port property not specified\n");
rc = 0;
@@ -535,7 +536,7 @@ static int sde_power_data_bus_parse(struct platform_device *pdev,
pr_err("Error. qcom,msm-bus,num-paths not found\n");
return rc;
}
- pdbus->axi_port_cnt = paths;
+ pdbus->data_paths_cnt = paths;
pdbus->data_bus_scale_table =
msm_bus_pdata_from_node(pdev, node);
@@ -982,6 +983,16 @@ int sde_power_resource_enable(struct sde_power_handle *phandle,
return rc;
}
+int sde_power_resource_is_enabled(struct sde_power_handle *phandle)
+{
+ if (!phandle) {
+ pr_err("invalid input argument\n");
+ return false;
+ }
+
+ return phandle->current_usecase_ndx != VOTE_INDEX_DISABLE;
+}
+
int sde_power_clk_set_rate(struct sde_power_handle *phandle, char *clock_name,
u64 rate)
{
diff --git a/drivers/gpu/drm/msm/sde_power_handle.h b/drivers/gpu/drm/msm/sde_power_handle.h
index 18777fd..72975e7 100644
--- a/drivers/gpu/drm/msm/sde_power_handle.h
+++ b/drivers/gpu/drm/msm/sde_power_handle.h
@@ -18,7 +18,8 @@
#define SDE_POWER_HANDLE_ENABLE_BUS_AB_QUOTA 0
#define SDE_POWER_HANDLE_DISABLE_BUS_AB_QUOTA 0
-#define SDE_POWER_HANDLE_ENABLE_BUS_IB_QUOTA 1600000000
+#define SDE_POWER_HANDLE_ENABLE_BUS_IB_QUOTA 400000000
+#define SDE_POWER_HANDLE_ENABLE_NRT_BUS_IB_QUOTA 0
#define SDE_POWER_HANDLE_DISABLE_BUS_IB_QUOTA 0
#include <linux/sde_io_util.h>
@@ -101,8 +102,8 @@ struct sde_power_client {
* struct sde_power_data_handle: power handle struct for data bus
* @data_bus_scale_table: pointer to bus scaling table
* @data_bus_hdl: current data bus handle
- * @axi_port_cnt: number of rt axi ports
- * @nrt_axi_port_cnt: number of nrt axi ports
+ * @data_paths_cnt: number of rt data path ports
+ * @nrt_data_paths_cnt: number of nrt data path ports
* @bus_channels: number of memory bus channels
* @curr_bw_uc_idx: current use case index of data bus
* @ao_bw_uc_idx: active only use case index of data bus
@@ -115,8 +116,8 @@ struct sde_power_client {
struct sde_power_data_bus_handle {
struct msm_bus_scale_pdata *data_bus_scale_table;
u32 data_bus_hdl;
- u32 axi_port_cnt;
- u32 nrt_axi_port_cnt;
+ u32 data_paths_cnt;
+ u32 nrt_data_paths_cnt;
u32 bus_channels;
u32 curr_bw_uc_idx;
u32 ao_bw_uc_idx;
@@ -224,6 +225,14 @@ int sde_power_resource_enable(struct sde_power_handle *pdata,
struct sde_power_client *pclient, bool enable);
/**
+ * sde_power_resource_is_enabled() - return true if power resource is enabled
+ * @pdata: power handle containing the resources
+ *
+ * Return: true if enabled; false otherwise
+ */
+int sde_power_resource_is_enabled(struct sde_power_handle *pdata);
+
+/**
* sde_power_data_bus_state_update() - update data bus state
* @pdata: power handle containing the resources
* @enable: take enable vs disable path
diff --git a/drivers/gpu/drm/msm/sde_rsc_hw.c b/drivers/gpu/drm/msm/sde_rsc_hw.c
index e957779..a0d1245 100644
--- a/drivers/gpu/drm/msm/sde_rsc_hw.c
+++ b/drivers/gpu/drm/msm/sde_rsc_hw.c
@@ -187,32 +187,32 @@ static int rsc_hw_seq_memory_init(struct sde_rsc_priv *rsc)
0x39e038a8, rsc->debug_mode);
dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_MEM_0_DRV0 + 0x10,
0x888babec, rsc->debug_mode);
- dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_MEM_0_DRV0 + 0x14,
- 0xa806a020, rsc->debug_mode);
/* Mode - 2 sequence */
+ dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_MEM_0_DRV0 + 0x14,
+ 0xaaa8a020, rsc->debug_mode);
dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_MEM_0_DRV0 + 0x18,
- 0xa138ebaa, rsc->debug_mode);
+ 0xe1a138eb, rsc->debug_mode);
dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_MEM_0_DRV0 + 0x1c,
- 0xaca581e1, rsc->debug_mode);
+ 0xe0aca581, rsc->debug_mode);
dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_MEM_0_DRV0 + 0x20,
- 0xe2a2ede0, rsc->debug_mode);
+ 0x82e2a2ed, rsc->debug_mode);
dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_MEM_0_DRV0 + 0x24,
- 0xea8a3982, rsc->debug_mode);
+ 0x8cea8a39, rsc->debug_mode);
dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_MEM_0_DRV0 + 0x28,
- 0xa920888c, rsc->debug_mode);
+ 0xe9a92088, rsc->debug_mode);
- /* tcs sleep sequence */
+ /* tcs sleep & wake sequence */
dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_MEM_0_DRV0 + 0x2c,
- 0x89e6a6e9, rsc->debug_mode);
+ 0x89e686a6, rsc->debug_mode);
dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_MEM_0_DRV0 + 0x30,
0xa7e9a920, rsc->debug_mode);
dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_MEM_0_DRV0 + 0x34,
- 0x002089e7, rsc->debug_mode);
+ 0x2089e787, rsc->debug_mode);
/* branch address */
dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_CFG_BR_ADDR_0_DRV0,
- 0x2b, rsc->debug_mode);
+ 0x2a, rsc->debug_mode);
dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_CFG_BR_ADDR_1_DRV0,
0x31, rsc->debug_mode);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/bsp/g84.c b/drivers/gpu/drm/nouveau/nvkm/engine/bsp/g84.c
index 8e2e24a..44e116f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/bsp/g84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/bsp/g84.c
@@ -39,5 +39,5 @@ int
g84_bsp_new(struct nvkm_device *device, int index, struct nvkm_engine **pengine)
{
return nvkm_xtensa_new_(&g84_bsp, device, index,
- true, 0x103000, pengine);
+ device->chipset != 0x92, 0x103000, pengine);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
index 6584d50..133f896 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
@@ -1129,7 +1129,7 @@ gf100_gr_trap_intr(struct gf100_gr *gr)
if (trap & 0x00000008) {
u32 stat = nvkm_rd32(device, 0x408030);
- nvkm_snprintbf(error, sizeof(error), gf100_m2mf_error,
+ nvkm_snprintbf(error, sizeof(error), gf100_ccache_error,
stat & 0x3fffffff);
nvkm_error(subdev, "CCACHE %08x [%s]\n", stat, error);
nvkm_wr32(device, 0x408030, 0xc0000000);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c
index 5df9669..240872a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c
@@ -240,6 +240,8 @@ nvkm_vm_unmap_pgt(struct nvkm_vm *vm, int big, u32 fpde, u32 lpde)
mmu->func->map_pgt(vpgd->obj, pde, vpgt->mem);
}
+ mmu->func->flush(vm);
+
nvkm_memory_del(&pgt);
}
}
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 3b21ca5..82b0112 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -1674,7 +1674,7 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend,
radeon_agp_suspend(rdev);
pci_save_state(dev->pdev);
- if (freeze && rdev->family >= CHIP_CEDAR) {
+ if (freeze && rdev->family >= CHIP_CEDAR && !(rdev->flags & RADEON_IS_IGP)) {
rdev->asic->asic_reset(rdev, true);
pci_restore_state(dev->pdev);
} else if (suspend) {
diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.c b/drivers/gpu/drm/sun4i/sun4i_drv.c
index c3b2186..1feec34 100644
--- a/drivers/gpu/drm/sun4i/sun4i_drv.c
+++ b/drivers/gpu/drm/sun4i/sun4i_drv.c
@@ -47,6 +47,13 @@ static void sun4i_drv_disable_vblank(struct drm_device *drm, unsigned int pipe)
sun4i_tcon_enable_vblank(tcon, false);
}
+static void sun4i_drv_lastclose(struct drm_device *dev)
+{
+ struct sun4i_drv *drv = dev->dev_private;
+
+ drm_fbdev_cma_restore_mode(drv->fbdev);
+}
+
static const struct file_operations sun4i_drv_fops = {
.owner = THIS_MODULE,
.open = drm_open,
@@ -65,6 +72,7 @@ static struct drm_driver sun4i_drv_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | DRIVER_ATOMIC,
/* Generic Operations */
+ .lastclose = sun4i_drv_lastclose,
.fops = &sun4i_drv_fops,
.name = "sun4i-drm",
.desc = "Allwinner sun4i Display Engine",
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 36005bd..29abd28 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -721,7 +721,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
* allocation taken by fbdev
*/
if (!(dev_priv->capabilities & SVGA_CAP_3D))
- mem_size *= 2;
+ mem_size *= 3;
dev_priv->max_mob_pages = mem_size * 1024 / PAGE_SIZE;
dev_priv->prim_bb_mem =
diff --git a/drivers/gpu/msm/a6xx_reg.h b/drivers/gpu/msm/a6xx_reg.h
index 728e897..ee696e2 100644
--- a/drivers/gpu/msm/a6xx_reg.h
+++ b/drivers/gpu/msm/a6xx_reg.h
@@ -68,6 +68,7 @@
#define A6XX_CP_MEM_POOL_SIZE 0x8C3
#define A6XX_CP_CHICKEN_DBG 0x841
#define A6XX_CP_ADDR_MODE_CNTL 0x842
+#define A6XX_CP_DBG_ECO_CNTL 0x843
#define A6XX_CP_PROTECT_CNTL 0x84F
#define A6XX_CP_PROTECT_REG 0x850
#define A6XX_CP_CONTEXT_SWITCH_CNTL 0x8A0
@@ -676,6 +677,7 @@
#define A6XX_UCHE_PERFCTR_UCHE_SEL_9 0xE25
#define A6XX_UCHE_PERFCTR_UCHE_SEL_10 0xE26
#define A6XX_UCHE_PERFCTR_UCHE_SEL_11 0xE27
+#define A6XX_UCHE_GBIF_GX_CONFIG 0xE3A
/* SP registers */
#define A6XX_SP_ADDR_MODE_CNTL 0xAE01
@@ -764,6 +766,12 @@
#define A6XX_VBIF_PERF_PWR_CNT_HIGH1 0x3119
#define A6XX_VBIF_PERF_PWR_CNT_HIGH2 0x311a
+/* GBIF countables */
+#define GBIF_AXI0_READ_DATA_TOTAL_BEATS 34
+#define GBIF_AXI1_READ_DATA_TOTAL_BEATS 35
+#define GBIF_AXI0_WRITE_DATA_TOTAL_BEATS 46
+#define GBIF_AXI1_WRITE_DATA_TOTAL_BEATS 47
+
/* GBIF registers */
#define A6XX_GBIF_HALT 0x3c45
#define A6XX_GBIF_HALT_ACK 0x3c46
diff --git a/drivers/gpu/msm/adreno-gpulist.h b/drivers/gpu/msm/adreno-gpulist.h
index a56a593..770cf3b 100644
--- a/drivers/gpu/msm/adreno-gpulist.h
+++ b/drivers/gpu/msm/adreno-gpulist.h
@@ -346,8 +346,9 @@ static const struct adreno_gpu_core adreno_gpulist[] = {
.major = 3,
.minor = 0,
.patchid = ANY_ID,
- .features = ADRENO_64BIT | ADRENO_RPMH |
- ADRENO_GPMU | ADRENO_CONTENT_PROTECTION | ADRENO_LM,
+ .features = ADRENO_64BIT | ADRENO_RPMH | ADRENO_IFPC |
+ ADRENO_GPMU | ADRENO_CONTENT_PROTECTION | ADRENO_LM |
+ ADRENO_IOCOHERENT,
.sqefw_name = "a630_sqe.fw",
.zap_name = "a630_zap",
.gpudev = &adreno_a6xx_gpudev,
@@ -355,8 +356,8 @@ static const struct adreno_gpu_core adreno_gpulist[] = {
.num_protected_regs = 0x20,
.busy_mask = 0xFFFFFFFE,
.gpmufw_name = "a630_gmu.bin",
- .gpmu_major = 0x0,
- .gpmu_minor = 0x005,
+ .gpmu_major = 0x1,
+ .gpmu_minor = 0x001,
.gpmu_tsens = 0x000C000D,
.max_power = 5448,
},
@@ -375,7 +376,7 @@ static const struct adreno_gpu_core adreno_gpulist[] = {
.num_protected_regs = 0x20,
.busy_mask = 0xFFFFFFFE,
.gpmufw_name = "a630_gmu.bin",
- .gpmu_major = 0x0,
- .gpmu_minor = 0x005,
+ .gpmu_major = 0x1,
+ .gpmu_minor = 0x001,
},
};
diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c
index ae5a78d..46256b4 100644
--- a/drivers/gpu/msm/adreno.c
+++ b/drivers/gpu/msm/adreno.c
@@ -17,6 +17,7 @@
#include <linux/of_device.h>
#include <linux/delay.h>
#include <linux/input.h>
+#include <linux/io.h>
#include <soc/qcom/scm.h>
#include <linux/msm-bus-board.h>
@@ -36,6 +37,7 @@
#include "adreno_trace.h"
#include "a3xx_reg.h"
+#include "a6xx_reg.h"
#include "adreno_snapshot.h"
/* Include the master list of GPU cores that are supported */
@@ -610,7 +612,8 @@ static irqreturn_t adreno_irq_handler(struct kgsl_device *device)
struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
struct adreno_irq *irq_params = gpudev->irq;
irqreturn_t ret = IRQ_NONE;
- unsigned int status = 0, fence = 0, tmp, int_bit;
+ unsigned int status = 0, fence = 0, fence_retries = 0, tmp, int_bit;
+ unsigned int status_retries = 0;
int i;
atomic_inc(&adreno_dev->pending_irq_refcnt);
@@ -627,18 +630,55 @@ static irqreturn_t adreno_irq_handler(struct kgsl_device *device)
/*
* If the AHB fence is not in ALLOW mode when we receive an RBBM
- * interrupt, something went wrong. Set a fault and change the
- * fence to ALLOW so we can clear the interrupt.
+ * interrupt, something went wrong. This means that we cannot proceed
+ * since the IRQ status and clear registers are not accessible.
+ * This is usually harmless because the GMU will abort power collapse
+ * and change the fence back to ALLOW. Poll so that this can happen.
*/
- adreno_readreg(adreno_dev, ADRENO_REG_GMU_AO_AHB_FENCE_CTRL, &fence);
- if (fence != 0) {
- KGSL_DRV_CRIT_RATELIMIT(device, "AHB fence is stuck in ISR\n");
- return ret;
+ if (kgsl_gmu_isenabled(device)) {
+ do {
+ adreno_readreg(adreno_dev,
+ ADRENO_REG_GMU_AO_AHB_FENCE_CTRL,
+ &fence);
+
+ if (fence_retries == FENCE_RETRY_MAX) {
+ KGSL_DRV_CRIT_RATELIMIT(device,
+ "AHB fence stuck in ISR\n");
+ return ret;
+ }
+ fence_retries++;
+ } while (fence != 0);
}
adreno_readreg(adreno_dev, ADRENO_REG_RBBM_INT_0_STATUS, &status);
/*
+ * Read status again to make sure the bits aren't transitory.
+ * Transitory bits mean that they are spurious interrupts and are
+ * seen while preemption is on going. Empirical experiments have
+ * shown that the transitory bits are a timing thing and they
+ * go away in the small time window between two or three consecutive
+ * reads. If they don't go away, log the message and return.
+ */
+ while (status_retries < STATUS_RETRY_MAX) {
+ unsigned int new_status;
+
+ adreno_readreg(adreno_dev, ADRENO_REG_RBBM_INT_0_STATUS,
+ &new_status);
+
+ if (status == new_status)
+ break;
+
+ status = new_status;
+ status_retries++;
+ }
+
+ if (status_retries == STATUS_RETRY_MAX) {
+ KGSL_DRV_CRIT_RATELIMIT(device, "STATUS bits are not stable\n");
+ return ret;
+ }
+
+ /*
* Clear all the interrupt bits but ADRENO_INT_RBBM_AHB_ERROR. Because
* even if we clear it here, it will stay high until it is cleared
* in its respective handler. Otherwise, the interrupt handler will
@@ -1022,6 +1062,28 @@ adreno_ocmem_free(struct adreno_device *adreno_dev)
}
#endif
+static void adreno_cx_dbgc_probe(struct kgsl_device *device)
+{
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ struct resource *res;
+
+ res = platform_get_resource_byname(device->pdev, IORESOURCE_MEM,
+ "kgsl_3d0_cx_dbgc_memory");
+
+ if (res == NULL)
+ return;
+
+ adreno_dev->cx_dbgc_base = res->start - device->reg_phys;
+ adreno_dev->cx_dbgc_len = resource_size(res);
+ adreno_dev->cx_dbgc_virt = devm_ioremap(device->dev,
+ device->reg_phys +
+ adreno_dev->cx_dbgc_base,
+ adreno_dev->cx_dbgc_len);
+
+ if (adreno_dev->cx_dbgc_virt == NULL)
+ KGSL_DRV_WARN(device, "cx_dbgc ioremap failed\n");
+}
+
static int adreno_probe(struct platform_device *pdev)
{
struct kgsl_device *device;
@@ -1072,6 +1134,9 @@ static int adreno_probe(struct platform_device *pdev)
return status;
}
+ /* Probe for the optional CX_DBGC block */
+ adreno_cx_dbgc_probe(device);
+
/*
* qcom,iommu-secure-id is used to identify MMUs that can handle secure
* content but that is only part of the story - the GPU also has to be
@@ -1083,6 +1148,9 @@ static int adreno_probe(struct platform_device *pdev)
if (!ADRENO_FEATURE(adreno_dev, ADRENO_CONTENT_PROTECTION))
device->mmu.secured = false;
+ if (ADRENO_FEATURE(adreno_dev, ADRENO_IOCOHERENT))
+ device->mmu.features |= KGSL_MMU_IO_COHERENT;
+
status = adreno_ringbuffer_probe(adreno_dev, nopreempt);
if (status)
goto out;
@@ -1589,26 +1657,104 @@ static int _adreno_start(struct adreno_device *adreno_dev)
}
}
- /* VBIF DDR cycles */
- if (adreno_dev->ram_cycles_lo == 0) {
- ret = adreno_perfcounter_get(adreno_dev,
- KGSL_PERFCOUNTER_GROUP_VBIF,
- VBIF_AXI_TOTAL_BEATS,
- &adreno_dev->ram_cycles_lo, NULL,
- PERFCOUNTER_FLAG_KERNEL);
+ if (adreno_has_gbif(adreno_dev)) {
+ if (adreno_dev->starved_ram_lo_ch1 == 0) {
+ ret = adreno_perfcounter_get(adreno_dev,
+ KGSL_PERFCOUNTER_GROUP_VBIF_PWR, 1,
+ &adreno_dev->starved_ram_lo_ch1, NULL,
+ PERFCOUNTER_FLAG_KERNEL);
- if (ret) {
- KGSL_DRV_ERR(device,
- "Unable to get perf counters for bus DCVS\n");
- adreno_dev->ram_cycles_lo = 0;
+ if (ret) {
+ KGSL_DRV_ERR(device,
+ "Unable to get perf counters for bus DCVS\n");
+ adreno_dev->starved_ram_lo_ch1 = 0;
+ }
+ }
+
+ if (adreno_dev->ram_cycles_lo == 0) {
+ ret = adreno_perfcounter_get(adreno_dev,
+ KGSL_PERFCOUNTER_GROUP_VBIF,
+ GBIF_AXI0_READ_DATA_TOTAL_BEATS,
+ &adreno_dev->ram_cycles_lo, NULL,
+ PERFCOUNTER_FLAG_KERNEL);
+
+ if (ret) {
+ KGSL_DRV_ERR(device,
+ "Unable to get perf counters for bus DCVS\n");
+ adreno_dev->ram_cycles_lo = 0;
+ }
+ }
+
+ if (adreno_dev->ram_cycles_lo_ch1_read == 0) {
+ ret = adreno_perfcounter_get(adreno_dev,
+ KGSL_PERFCOUNTER_GROUP_VBIF,
+ GBIF_AXI1_READ_DATA_TOTAL_BEATS,
+ &adreno_dev->ram_cycles_lo_ch1_read,
+ NULL,
+ PERFCOUNTER_FLAG_KERNEL);
+
+ if (ret) {
+ KGSL_DRV_ERR(device,
+ "Unable to get perf counters for bus DCVS\n");
+ adreno_dev->ram_cycles_lo_ch1_read = 0;
+ }
+ }
+
+ if (adreno_dev->ram_cycles_lo_ch0_write == 0) {
+ ret = adreno_perfcounter_get(adreno_dev,
+ KGSL_PERFCOUNTER_GROUP_VBIF,
+ GBIF_AXI0_WRITE_DATA_TOTAL_BEATS,
+ &adreno_dev->ram_cycles_lo_ch0_write,
+ NULL,
+ PERFCOUNTER_FLAG_KERNEL);
+
+ if (ret) {
+ KGSL_DRV_ERR(device,
+ "Unable to get perf counters for bus DCVS\n");
+ adreno_dev->ram_cycles_lo_ch0_write = 0;
+ }
+ }
+
+ if (adreno_dev->ram_cycles_lo_ch1_write == 0) {
+ ret = adreno_perfcounter_get(adreno_dev,
+ KGSL_PERFCOUNTER_GROUP_VBIF,
+ GBIF_AXI1_WRITE_DATA_TOTAL_BEATS,
+ &adreno_dev->ram_cycles_lo_ch1_write,
+ NULL,
+ PERFCOUNTER_FLAG_KERNEL);
+
+ if (ret) {
+ KGSL_DRV_ERR(device,
+ "Unable to get perf counters for bus DCVS\n");
+ adreno_dev->ram_cycles_lo_ch1_write = 0;
+ }
+ }
+ } else {
+ /* VBIF DDR cycles */
+ if (adreno_dev->ram_cycles_lo == 0) {
+ ret = adreno_perfcounter_get(adreno_dev,
+ KGSL_PERFCOUNTER_GROUP_VBIF,
+ VBIF_AXI_TOTAL_BEATS,
+ &adreno_dev->ram_cycles_lo, NULL,
+ PERFCOUNTER_FLAG_KERNEL);
+
+ if (ret) {
+ KGSL_DRV_ERR(device,
+ "Unable to get perf counters for bus DCVS\n");
+ adreno_dev->ram_cycles_lo = 0;
+ }
}
}
}
/* Clear the busy_data stats - we're starting over from scratch */
adreno_dev->busy_data.gpu_busy = 0;
- adreno_dev->busy_data.vbif_ram_cycles = 0;
- adreno_dev->busy_data.vbif_starved_ram = 0;
+ adreno_dev->busy_data.bif_ram_cycles = 0;
+ adreno_dev->busy_data.bif_ram_cycles_read_ch1 = 0;
+ adreno_dev->busy_data.bif_ram_cycles_write_ch0 = 0;
+ adreno_dev->busy_data.bif_ram_cycles_write_ch1 = 0;
+ adreno_dev->busy_data.bif_starved_ram = 0;
+ adreno_dev->busy_data.bif_starved_ram_ch1 = 0;
/* Restore performance counter registers with saved values */
adreno_perfcounter_restore(adreno_dev);
@@ -1761,8 +1907,19 @@ static int adreno_stop(struct kgsl_device *device)
* because some idle level transitions require VBIF and MMU.
*/
if (gpudev->wait_for_lowest_idle &&
- gpudev->wait_for_lowest_idle(adreno_dev))
- return -EINVAL;
+ gpudev->wait_for_lowest_idle(adreno_dev)) {
+ struct gmu_device *gmu = &device->gmu;
+
+ set_bit(GMU_FAULT, &gmu->flags);
+ gmu_snapshot(device);
+ /*
+ * Assume GMU hang after 10ms without responding.
+ * It shall be relative safe to clear vbif and stop
+ * MMU later. Early return in adreno_stop function
+ * will result in kernel panic in adreno_start
+ */
+ error = -EINVAL;
+ }
adreno_vbif_clear_pending_transactions(device);
@@ -1776,7 +1933,7 @@ static int adreno_stop(struct kgsl_device *device)
clear_bit(ADRENO_DEVICE_STARTED, &adreno_dev->priv);
- return 0;
+ return error;
}
static inline bool adreno_try_soft_reset(struct kgsl_device *device, int fault)
@@ -2431,8 +2588,12 @@ int adreno_soft_reset(struct kgsl_device *device)
/* Clear the busy_data stats - we're starting over from scratch */
adreno_dev->busy_data.gpu_busy = 0;
- adreno_dev->busy_data.vbif_ram_cycles = 0;
- adreno_dev->busy_data.vbif_starved_ram = 0;
+ adreno_dev->busy_data.bif_ram_cycles = 0;
+ adreno_dev->busy_data.bif_ram_cycles_read_ch1 = 0;
+ adreno_dev->busy_data.bif_ram_cycles_write_ch0 = 0;
+ adreno_dev->busy_data.bif_ram_cycles_write_ch1 = 0;
+ adreno_dev->busy_data.bif_starved_ram = 0;
+ adreno_dev->busy_data.bif_starved_ram_ch1 = 0;
/* Set the page table back to the default page table */
adreno_ringbuffer_set_global(adreno_dev, 0);
@@ -2770,6 +2931,56 @@ static void adreno_gmu_regread(struct kgsl_device *device,
rmb();
}
+bool adreno_is_cx_dbgc_register(struct kgsl_device *device,
+ unsigned int offsetwords)
+{
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+
+ return adreno_dev->cx_dbgc_virt &&
+ (offsetwords >= (adreno_dev->cx_dbgc_base >> 2)) &&
+ (offsetwords < (adreno_dev->cx_dbgc_base +
+ adreno_dev->cx_dbgc_len) >> 2);
+}
+
+void adreno_cx_dbgc_regread(struct kgsl_device *device,
+ unsigned int offsetwords, unsigned int *value)
+{
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ unsigned int cx_dbgc_offset;
+
+ if (!adreno_is_cx_dbgc_register(device, offsetwords))
+ return;
+
+ cx_dbgc_offset = (offsetwords << 2) - adreno_dev->cx_dbgc_base;
+ *value = __raw_readl(adreno_dev->cx_dbgc_virt + cx_dbgc_offset);
+
+ /*
+ * ensure this read finishes before the next one.
+ * i.e. act like normal readl()
+ */
+ rmb();
+}
+
+void adreno_cx_dbgc_regwrite(struct kgsl_device *device,
+ unsigned int offsetwords, unsigned int value)
+{
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ unsigned int cx_dbgc_offset;
+
+ if (!adreno_is_cx_dbgc_register(device, offsetwords))
+ return;
+
+ cx_dbgc_offset = (offsetwords << 2) - adreno_dev->cx_dbgc_base;
+ trace_kgsl_regwrite(device, offsetwords, value);
+
+ /*
+ * ensure previous writes post before this one,
+ * i.e. act like normal writel()
+ */
+ wmb();
+ __raw_writel(value, adreno_dev->cx_dbgc_virt + cx_dbgc_offset);
+}
+
/**
* adreno_waittimestamp - sleep while waiting for the specified timestamp
* @device - pointer to a KGSL device structure
@@ -2948,7 +3159,8 @@ static void adreno_power_stats(struct kgsl_device *device,
if (adreno_is_a6xx(adreno_dev)) {
/* clock sourced from XO */
- stats->busy_time = gpu_busy * 10 / 192;
+ stats->busy_time = gpu_busy * 10;
+ do_div(stats->busy_time, 192);
} else {
/* clock sourced from GFX3D */
stats->busy_time = adreno_ticks_to_us(gpu_busy,
@@ -2962,12 +3174,36 @@ static void adreno_power_stats(struct kgsl_device *device,
if (adreno_dev->ram_cycles_lo != 0)
ram_cycles = counter_delta(device,
adreno_dev->ram_cycles_lo,
- &busy->vbif_ram_cycles);
+ &busy->bif_ram_cycles);
+
+ if (adreno_has_gbif(adreno_dev)) {
+ if (adreno_dev->ram_cycles_lo_ch1_read != 0)
+ ram_cycles += counter_delta(device,
+ adreno_dev->ram_cycles_lo_ch1_read,
+ &busy->bif_ram_cycles_read_ch1);
+
+ if (adreno_dev->ram_cycles_lo_ch0_write != 0)
+ ram_cycles += counter_delta(device,
+ adreno_dev->ram_cycles_lo_ch0_write,
+ &busy->bif_ram_cycles_write_ch0);
+
+ if (adreno_dev->ram_cycles_lo_ch1_write != 0)
+ ram_cycles += counter_delta(device,
+ adreno_dev->ram_cycles_lo_ch1_write,
+ &busy->bif_ram_cycles_write_ch1);
+ }
if (adreno_dev->starved_ram_lo != 0)
starved_ram = counter_delta(device,
adreno_dev->starved_ram_lo,
- &busy->vbif_starved_ram);
+ &busy->bif_starved_ram);
+
+ if (adreno_has_gbif(adreno_dev)) {
+ if (adreno_dev->starved_ram_lo_ch1 != 0)
+ starved_ram += counter_delta(device,
+ adreno_dev->starved_ram_lo_ch1,
+ &busy->bif_starved_ram_ch1);
+ }
stats->ram_time = ram_cycles;
stats->ram_wait = starved_ram;
diff --git a/drivers/gpu/msm/adreno.h b/drivers/gpu/msm/adreno.h
index d6cba9d..0b4e1df 100644
--- a/drivers/gpu/msm/adreno.h
+++ b/drivers/gpu/msm/adreno.h
@@ -121,6 +121,8 @@
#define ADRENO_HW_NAP BIT(14)
/* The GMU supports min voltage*/
#define ADRENO_MIN_VOLT BIT(15)
+/* The core supports IO-coherent memory */
+#define ADRENO_IOCOHERENT BIT(16)
/*
* Adreno GPU quirks - control bits for various workarounds
@@ -164,6 +166,12 @@
/* Number of times to try hard reset */
#define NUM_TIMES_RESET_RETRY 5
+/* Number of times to poll the AHB fence in ISR */
+#define FENCE_RETRY_MAX 100
+
+/* Number of times to see if INT_0_STATUS changed or not */
+#define STATUS_RETRY_MAX 3
+
/* One cannot wait forever for the core to idle, so set an upper limit to the
* amount of time to wait for the core to go idle
*/
@@ -264,6 +272,7 @@ enum adreno_preempt_states {
* preempt_level: The level of preemption (for 6XX)
* skipsaverestore: To skip saverestore during L1 preemption (for 6XX)
* usesgmem: enable GMEM save/restore across preemption (for 6XX)
+ * count: Track the number of preemptions triggered
*/
struct adreno_preemption {
atomic_t state;
@@ -274,13 +283,18 @@ struct adreno_preemption {
unsigned int preempt_level;
bool skipsaverestore;
bool usesgmem;
+ unsigned int count;
};
struct adreno_busy_data {
unsigned int gpu_busy;
- unsigned int vbif_ram_cycles;
- unsigned int vbif_starved_ram;
+ unsigned int bif_ram_cycles;
+ unsigned int bif_ram_cycles_read_ch1;
+ unsigned int bif_ram_cycles_write_ch0;
+ unsigned int bif_ram_cycles_write_ch1;
+ unsigned int bif_starved_ram;
+ unsigned int bif_starved_ram_ch1;
unsigned int throttle_cycles[ADRENO_GPMU_THROTTLE_COUNTERS];
};
@@ -397,8 +411,18 @@ struct adreno_gpu_core {
* @pwron_fixup_dwords: Number of dwords in the command buffer
* @input_work: Work struct for turning on the GPU after a touch event
* @busy_data: Struct holding GPU VBIF busy stats
- * @ram_cycles_lo: Number of DDR clock cycles for the monitor session
- * @perfctr_pwr_lo: Number of cycles VBIF is stalled by DDR
+ * @ram_cycles_lo: Number of DDR clock cycles for the monitor session (Only
+ * DDR channel 0 read cycles in case of GBIF)
+ * @ram_cycles_lo_ch1_read: Number of DDR channel 1 Read clock cycles for
+ * the monitor session
+ * @ram_cycles_lo_ch0_write: Number of DDR channel 0 Write clock cycles for
+ * the monitor session
+ * @ram_cycles_lo_ch1_write: Number of DDR channel 0 Write clock cycles for
+ * the monitor session
+ * @starved_ram_lo: Number of cycles VBIF/GBIF is stalled by DDR (Only channel 0
+ * stall cycles in case of GBIF)
+ * @starved_ram_lo_ch1: Number of cycles GBIF is stalled by DDR channel 1
+ * @perfctr_pwr_lo: GPU busy cycles
* @halt: Atomic variable to check whether the GPU is currently halted
* @pending_irq_refcnt: Atomic variable to keep track of running IRQ handlers
* @ctx_d_debugfs: Context debugfs node
@@ -434,6 +458,9 @@ struct adreno_device {
unsigned int chipid;
unsigned long gmem_base;
unsigned long gmem_size;
+ unsigned long cx_dbgc_base;
+ unsigned int cx_dbgc_len;
+ void __iomem *cx_dbgc_virt;
const struct adreno_gpu_core *gpucore;
struct adreno_firmware fw[2];
size_t gpmu_cmds_size;
@@ -455,7 +482,11 @@ struct adreno_device {
struct work_struct input_work;
struct adreno_busy_data busy_data;
unsigned int ram_cycles_lo;
+ unsigned int ram_cycles_lo_ch1_read;
+ unsigned int ram_cycles_lo_ch0_write;
+ unsigned int ram_cycles_lo_ch1_write;
unsigned int starved_ram_lo;
+ unsigned int starved_ram_lo_ch1;
unsigned int perfctr_pwr_lo;
atomic_t halt;
atomic_t pending_irq_refcnt;
@@ -601,6 +632,12 @@ enum adreno_regs {
ADRENO_REG_CP_PROTECT_REG_0,
ADRENO_REG_CP_CONTEXT_SWITCH_SMMU_INFO_LO,
ADRENO_REG_CP_CONTEXT_SWITCH_SMMU_INFO_HI,
+ ADRENO_REG_CP_CONTEXT_SWITCH_PRIV_NON_SECURE_RESTORE_ADDR_LO,
+ ADRENO_REG_CP_CONTEXT_SWITCH_PRIV_NON_SECURE_RESTORE_ADDR_HI,
+ ADRENO_REG_CP_CONTEXT_SWITCH_PRIV_SECURE_RESTORE_ADDR_LO,
+ ADRENO_REG_CP_CONTEXT_SWITCH_PRIV_SECURE_RESTORE_ADDR_HI,
+ ADRENO_REG_CP_CONTEXT_SWITCH_NON_PRIV_RESTORE_ADDR_LO,
+ ADRENO_REG_CP_CONTEXT_SWITCH_NON_PRIV_RESTORE_ADDR_HI,
ADRENO_REG_RBBM_STATUS,
ADRENO_REG_RBBM_STATUS3,
ADRENO_REG_RBBM_PERFCTR_CTL,
@@ -820,6 +857,13 @@ struct adreno_snapshot_data {
struct adreno_snapshot_sizes *sect_sizes;
};
+enum adreno_cp_marker_type {
+ IFPC_DISABLE,
+ IFPC_ENABLE,
+ IB1LIST_START,
+ IB1LIST_END,
+};
+
struct adreno_gpudev {
/*
* These registers are in a different location on different devices,
@@ -867,7 +911,8 @@ struct adreno_gpudev {
unsigned int *cmds,
struct kgsl_context *context);
int (*preemption_yield_enable)(unsigned int *);
- unsigned int (*set_marker)(unsigned int *cmds, int start);
+ unsigned int (*set_marker)(unsigned int *cmds,
+ enum adreno_cp_marker_type type);
unsigned int (*preemption_post_ibsubmit)(
struct adreno_device *adreno_dev,
unsigned int *cmds);
@@ -902,6 +947,9 @@ struct adreno_gpudev {
bool (*sptprac_is_on)(struct adreno_device *);
unsigned int (*ccu_invalidate)(struct adreno_device *adreno_dev,
unsigned int *cmds);
+ int (*perfcounter_update)(struct adreno_device *adreno_dev,
+ struct adreno_perfcount_register *reg,
+ bool update_reg);
};
/**
@@ -1060,6 +1108,13 @@ int adreno_efuse_read_u32(struct adreno_device *adreno_dev, unsigned int offset,
unsigned int *val);
void adreno_efuse_unmap(struct adreno_device *adreno_dev);
+bool adreno_is_cx_dbgc_register(struct kgsl_device *device,
+ unsigned int offset);
+void adreno_cx_dbgc_regread(struct kgsl_device *adreno_device,
+ unsigned int offsetwords, unsigned int *value);
+void adreno_cx_dbgc_regwrite(struct kgsl_device *device,
+ unsigned int offsetwords, unsigned int value);
+
#define ADRENO_TARGET(_name, _id) \
static inline int adreno_is_##_name(struct adreno_device *adreno_dev) \
{ \
@@ -1880,4 +1935,7 @@ static inline int adreno_vbif_clear_pending_transactions(
return ret;
}
+void adreno_gmu_fenced_write(struct adreno_device *adreno_dev,
+ enum adreno_regs offset, unsigned int val,
+ unsigned int fence_mask);
#endif /*__ADRENO_H */
diff --git a/drivers/gpu/msm/adreno_a5xx.c b/drivers/gpu/msm/adreno_a5xx.c
index f3e8650..768a4bb 100644
--- a/drivers/gpu/msm/adreno_a5xx.c
+++ b/drivers/gpu/msm/adreno_a5xx.c
@@ -193,6 +193,8 @@ static void a5xx_critical_packet_destroy(struct adreno_device *adreno_dev)
kgsl_free_global(&adreno_dev->dev, &crit_pkts_refbuf2);
kgsl_free_global(&adreno_dev->dev, &crit_pkts_refbuf3);
+ kgsl_iommu_unmap_global_secure_pt_entry(KGSL_DEVICE(adreno_dev),
+ &crit_pkts_refbuf0);
kgsl_sharedmem_free(&crit_pkts_refbuf0);
}
@@ -231,8 +233,10 @@ static int a5xx_critical_packet_construct(struct adreno_device *adreno_dev)
if (ret)
return ret;
- kgsl_add_global_secure_entry(&adreno_dev->dev,
+ ret = kgsl_iommu_map_global_secure_pt_entry(&adreno_dev->dev,
&crit_pkts_refbuf0);
+ if (ret)
+ return ret;
ret = kgsl_allocate_global(&adreno_dev->dev,
&crit_pkts_refbuf1,
@@ -293,8 +297,13 @@ static void a5xx_init(struct adreno_device *adreno_dev)
INIT_WORK(&adreno_dev->irq_storm_work, a5xx_irq_storm_worker);
- if (ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_CRITICAL_PACKETS))
- a5xx_critical_packet_construct(adreno_dev);
+ if (ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_CRITICAL_PACKETS)) {
+ int ret;
+
+ ret = a5xx_critical_packet_construct(adreno_dev);
+ if (ret)
+ a5xx_critical_packet_destroy(adreno_dev);
+ }
a5xx_crashdump_init(adreno_dev);
}
diff --git a/drivers/gpu/msm/adreno_a5xx_snapshot.c b/drivers/gpu/msm/adreno_a5xx_snapshot.c
index 6dc62866..d1a6005 100644
--- a/drivers/gpu/msm/adreno_a5xx_snapshot.c
+++ b/drivers/gpu/msm/adreno_a5xx_snapshot.c
@@ -621,7 +621,8 @@ static size_t a5xx_snapshot_shader_memory(struct kgsl_device *device,
header->index = info->bank;
header->size = block->sz;
- memcpy(data, registers.hostptr + info->offset, block->sz);
+ memcpy(data, registers.hostptr + info->offset,
+ block->sz * sizeof(unsigned int));
return SHADER_SECTION_SZ(block->sz);
}
diff --git a/drivers/gpu/msm/adreno_a6xx.c b/drivers/gpu/msm/adreno_a6xx.c
index 8cbc75e..1d065f4 100644
--- a/drivers/gpu/msm/adreno_a6xx.c
+++ b/drivers/gpu/msm/adreno_a6xx.c
@@ -13,6 +13,7 @@
#include <linux/firmware.h>
#include <soc/qcom/subsystem_restart.h>
#include <linux/pm_opp.h>
+#include <linux/jiffies.h>
#include "adreno.h"
#include "a6xx_reg.h"
@@ -52,6 +53,7 @@ static const struct adreno_vbif_data a630_vbif[] = {
static const struct adreno_vbif_data a615_gbif[] = {
{A6XX_RBBM_VBIF_CLIENT_QOS_CNTL, 0x3},
+ {A6XX_UCHE_GBIF_GX_CONFIG, 0x10200F9},
{0, 0},
};
@@ -73,14 +75,14 @@ static const struct kgsl_hwcg_reg a630_hwcg_regs[] = {
{A6XX_RBBM_CLOCK_CNTL2_SP1, 0x02022220},
{A6XX_RBBM_CLOCK_CNTL2_SP2, 0x02022220},
{A6XX_RBBM_CLOCK_CNTL2_SP3, 0x02022220},
- {A6XX_RBBM_CLOCK_DELAY_SP0, 0x0000F3CF},
- {A6XX_RBBM_CLOCK_DELAY_SP1, 0x0000F3CF},
- {A6XX_RBBM_CLOCK_DELAY_SP2, 0x0000F3CF},
- {A6XX_RBBM_CLOCK_DELAY_SP3, 0x0000F3CF},
- {A6XX_RBBM_CLOCK_HYST_SP0, 0x00000080},
- {A6XX_RBBM_CLOCK_HYST_SP1, 0x00000080},
- {A6XX_RBBM_CLOCK_HYST_SP2, 0x00000080},
- {A6XX_RBBM_CLOCK_HYST_SP3, 0x00000080},
+ {A6XX_RBBM_CLOCK_DELAY_SP0, 0x00000080},
+ {A6XX_RBBM_CLOCK_DELAY_SP1, 0x00000080},
+ {A6XX_RBBM_CLOCK_DELAY_SP2, 0x00000080},
+ {A6XX_RBBM_CLOCK_DELAY_SP3, 0x00000080},
+ {A6XX_RBBM_CLOCK_HYST_SP0, 0x0000F3CF},
+ {A6XX_RBBM_CLOCK_HYST_SP1, 0x0000F3CF},
+ {A6XX_RBBM_CLOCK_HYST_SP2, 0x0000F3CF},
+ {A6XX_RBBM_CLOCK_HYST_SP3, 0x0000F3CF},
{A6XX_RBBM_CLOCK_CNTL_TP0, 0x02222222},
{A6XX_RBBM_CLOCK_CNTL_TP1, 0x02222222},
{A6XX_RBBM_CLOCK_CNTL_TP2, 0x02222222},
@@ -105,10 +107,10 @@ static const struct kgsl_hwcg_reg a630_hwcg_regs[] = {
{A6XX_RBBM_CLOCK_HYST2_TP1, 0x77777777},
{A6XX_RBBM_CLOCK_HYST2_TP2, 0x77777777},
{A6XX_RBBM_CLOCK_HYST2_TP3, 0x77777777},
- {A6XX_RBBM_CLOCK_HYST3_TP0, 0x07777777},
- {A6XX_RBBM_CLOCK_HYST3_TP1, 0x07777777},
- {A6XX_RBBM_CLOCK_HYST3_TP2, 0x07777777},
- {A6XX_RBBM_CLOCK_HYST3_TP3, 0x07777777},
+ {A6XX_RBBM_CLOCK_HYST3_TP0, 0x77777777},
+ {A6XX_RBBM_CLOCK_HYST3_TP1, 0x77777777},
+ {A6XX_RBBM_CLOCK_HYST3_TP2, 0x77777777},
+ {A6XX_RBBM_CLOCK_HYST3_TP3, 0x77777777},
{A6XX_RBBM_CLOCK_HYST4_TP0, 0x00077777},
{A6XX_RBBM_CLOCK_HYST4_TP1, 0x00077777},
{A6XX_RBBM_CLOCK_HYST4_TP2, 0x00077777},
@@ -143,20 +145,20 @@ static const struct kgsl_hwcg_reg a630_hwcg_regs[] = {
{A6XX_RBBM_CLOCK_CNTL2_RB1, 0x00002222},
{A6XX_RBBM_CLOCK_CNTL2_RB2, 0x00002222},
{A6XX_RBBM_CLOCK_CNTL2_RB3, 0x00002222},
- {A6XX_RBBM_CLOCK_CNTL_CCU0, 0x00000000},
- {A6XX_RBBM_CLOCK_CNTL_CCU1, 0x00000000},
- {A6XX_RBBM_CLOCK_CNTL_CCU2, 0x00000000},
- {A6XX_RBBM_CLOCK_CNTL_CCU3, 0x00000000},
+ {A6XX_RBBM_CLOCK_CNTL_CCU0, 0x00002220},
+ {A6XX_RBBM_CLOCK_CNTL_CCU1, 0x00002220},
+ {A6XX_RBBM_CLOCK_CNTL_CCU2, 0x00002220},
+ {A6XX_RBBM_CLOCK_CNTL_CCU3, 0x00002220},
{A6XX_RBBM_CLOCK_HYST_RB_CCU0, 0x00040F00},
{A6XX_RBBM_CLOCK_HYST_RB_CCU1, 0x00040F00},
{A6XX_RBBM_CLOCK_HYST_RB_CCU2, 0x00040F00},
{A6XX_RBBM_CLOCK_HYST_RB_CCU3, 0x00040F00},
- {A6XX_RBBM_CLOCK_CNTL_RAC, 0x00022022},
- {A6XX_RBBM_CLOCK_CNTL2_RAC, 0x00005550},
- {A6XX_RBBM_CLOCK_DELAY_RAC, 0x00010011},
+ {A6XX_RBBM_CLOCK_CNTL_RAC, 0x05022022},
+ {A6XX_RBBM_CLOCK_CNTL2_RAC, 0x00005555},
+ {A6XX_RBBM_CLOCK_DELAY_RAC, 0x00000011},
{A6XX_RBBM_CLOCK_HYST_RAC, 0x00445044},
- {A6XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222220},
- {A6XX_RBBM_CLOCK_MODE_GPC, 0x00202222},
+ {A6XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222},
+ {A6XX_RBBM_CLOCK_MODE_GPC, 0x00222222},
{A6XX_RBBM_CLOCK_MODE_VFD, 0x00002222},
{A6XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000},
{A6XX_RBBM_CLOCK_HYST_GPC, 0x04104004},
@@ -173,12 +175,12 @@ static const struct kgsl_hwcg_reg a630_hwcg_regs[] = {
};
static const struct kgsl_hwcg_reg a615_hwcg_regs[] = {
- {A6XX_RBBM_CLOCK_CNTL_SP0, 0x22222222},
+ {A6XX_RBBM_CLOCK_CNTL_SP0, 0x02222222},
{A6XX_RBBM_CLOCK_CNTL2_SP0, 0x02222220},
- {A6XX_RBBM_CLOCK_DELAY_SP0, 0x00000081},
+ {A6XX_RBBM_CLOCK_DELAY_SP0, 0x00000080},
{A6XX_RBBM_CLOCK_HYST_SP0, 0x0000F3CF},
- {A6XX_RBBM_CLOCK_CNTL_TP0, 0x22222222},
- {A6XX_RBBM_CLOCK_CNTL_TP1, 0x22222222},
+ {A6XX_RBBM_CLOCK_CNTL_TP0, 0x02222222},
+ {A6XX_RBBM_CLOCK_CNTL_TP1, 0x02222222},
{A6XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222},
{A6XX_RBBM_CLOCK_CNTL2_TP1, 0x22222222},
{A6XX_RBBM_CLOCK_CNTL3_TP0, 0x22222222},
@@ -222,7 +224,7 @@ static const struct kgsl_hwcg_reg a615_hwcg_regs[] = {
{A6XX_RBBM_CLOCK_DELAY_RAC, 0x00000011},
{A6XX_RBBM_CLOCK_HYST_RAC, 0x00445044},
{A6XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222},
- {A6XX_RBBM_CLOCK_MODE_GPC, 0x02222222},
+ {A6XX_RBBM_CLOCK_MODE_GPC, 0x00222222},
{A6XX_RBBM_CLOCK_MODE_VFD, 0x00002222},
{A6XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000},
{A6XX_RBBM_CLOCK_HYST_GPC, 0x04104004},
@@ -266,7 +268,8 @@ static struct a6xx_protected_regs {
{ 0x0, 0x4F9, 0 },
{ 0x501, 0xA, 0 },
{ 0x511, 0x44, 0 },
- { 0xE00, 0xE, 1 },
+ { 0xE00, 0x1, 1 },
+ { 0xE03, 0xB, 1 },
{ 0x8E00, 0x0, 1 },
{ 0x8E50, 0xF, 1 },
{ 0xBE02, 0x0, 1 },
@@ -281,6 +284,7 @@ static struct a6xx_protected_regs {
{ 0xA630, 0x0, 1 },
};
+/* IFPC & Preemption static powerup restore list */
static struct reg_list_pair {
uint32_t offset;
uint32_t val;
@@ -315,6 +319,48 @@ static struct reg_list_pair {
{ A6XX_RB_CONTEXT_SWITCH_GMEM_SAVE_RESTORE, 0x0 },
};
+/* IFPC only static powerup restore list */
+static struct reg_list_pair a6xx_ifpc_pwrup_reglist[] = {
+ { A6XX_RBBM_VBIF_CLIENT_QOS_CNTL, 0x0 },
+ { A6XX_CP_CHICKEN_DBG, 0x0 },
+ { A6XX_CP_ADDR_MODE_CNTL, 0x0 },
+ { A6XX_CP_DBG_ECO_CNTL, 0x0 },
+ { A6XX_CP_PROTECT_CNTL, 0x0 },
+ { A6XX_CP_PROTECT_REG, 0x0 },
+ { A6XX_CP_PROTECT_REG+1, 0x0 },
+ { A6XX_CP_PROTECT_REG+2, 0x0 },
+ { A6XX_CP_PROTECT_REG+3, 0x0 },
+ { A6XX_CP_PROTECT_REG+4, 0x0 },
+ { A6XX_CP_PROTECT_REG+5, 0x0 },
+ { A6XX_CP_PROTECT_REG+6, 0x0 },
+ { A6XX_CP_PROTECT_REG+7, 0x0 },
+ { A6XX_CP_PROTECT_REG+8, 0x0 },
+ { A6XX_CP_PROTECT_REG+9, 0x0 },
+ { A6XX_CP_PROTECT_REG+10, 0x0 },
+ { A6XX_CP_PROTECT_REG+11, 0x0 },
+ { A6XX_CP_PROTECT_REG+12, 0x0 },
+ { A6XX_CP_PROTECT_REG+13, 0x0 },
+ { A6XX_CP_PROTECT_REG+14, 0x0 },
+ { A6XX_CP_PROTECT_REG+15, 0x0 },
+ { A6XX_CP_PROTECT_REG+16, 0x0 },
+ { A6XX_CP_PROTECT_REG+17, 0x0 },
+ { A6XX_CP_PROTECT_REG+18, 0x0 },
+ { A6XX_CP_PROTECT_REG+19, 0x0 },
+ { A6XX_CP_PROTECT_REG+20, 0x0 },
+ { A6XX_CP_PROTECT_REG+21, 0x0 },
+ { A6XX_CP_PROTECT_REG+22, 0x0 },
+ { A6XX_CP_PROTECT_REG+23, 0x0 },
+ { A6XX_CP_PROTECT_REG+24, 0x0 },
+ { A6XX_CP_PROTECT_REG+25, 0x0 },
+ { A6XX_CP_PROTECT_REG+26, 0x0 },
+ { A6XX_CP_PROTECT_REG+27, 0x0 },
+ { A6XX_CP_PROTECT_REG+28, 0x0 },
+ { A6XX_CP_PROTECT_REG+29, 0x0 },
+ { A6XX_CP_PROTECT_REG+30, 0x0 },
+ { A6XX_CP_PROTECT_REG+31, 0x0 },
+ { A6XX_CP_AHB_CNTL, 0x0 },
+};
+
static void _update_always_on_regs(struct adreno_device *adreno_dev)
{
struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
@@ -331,7 +377,7 @@ static void a6xx_pwrup_reglist_init(struct adreno_device *adreno_dev)
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
if (kgsl_allocate_global(device, &adreno_dev->pwrup_reglist,
- PAGE_SIZE, KGSL_MEMFLAGS_GPUREADONLY, 0,
+ PAGE_SIZE, 0, KGSL_MEMDESC_CONTIG | KGSL_MEMDESC_PRIVILEGED,
"powerup_register_list")) {
adreno_dev->pwrup_reglist.gpuaddr = 0;
return;
@@ -428,7 +474,41 @@ static void a6xx_enable_64bit(struct adreno_device *adreno_dev)
kgsl_regwrite(device, A6XX_RBBM_SECVID_TSB_ADDR_MODE_CNTL, 0x1);
}
-#define RBBM_CLOCK_CNTL_ON 0x8AA8AA02
+static inline unsigned int
+__get_rbbm_clock_cntl_on(struct adreno_device *adreno_dev)
+{
+ if (adreno_is_a615(adreno_dev))
+ return 0x8AA8AA82;
+ else
+ return 0x8AA8AA02;
+}
+
+static inline unsigned int
+__get_gmu_ao_cgc_mode_cntl(struct adreno_device *adreno_dev)
+{
+ if (adreno_is_a615(adreno_dev))
+ return 0x00000222;
+ else
+ return 0x00020222;
+}
+
+static inline unsigned int
+__get_gmu_ao_cgc_delay_cntl(struct adreno_device *adreno_dev)
+{
+ if (adreno_is_a615(adreno_dev))
+ return 0x00000111;
+ else
+ return 0x00010111;
+}
+
+static inline unsigned int
+__get_gmu_ao_cgc_hyst_cntl(struct adreno_device *adreno_dev)
+{
+ if (adreno_is_a615(adreno_dev))
+ return 0x00000555;
+ else
+ return 0x00005555;
+}
static void a6xx_hwcg_set(struct adreno_device *adreno_dev, bool on)
{
@@ -442,16 +522,16 @@ static void a6xx_hwcg_set(struct adreno_device *adreno_dev, bool on)
if (kgsl_gmu_isenabled(device)) {
kgsl_gmu_regwrite(device, A6XX_GPU_GMU_AO_GMU_CGC_MODE_CNTL,
- on ? 0x00020222 : 0);
+ on ? __get_gmu_ao_cgc_mode_cntl(adreno_dev) : 0);
kgsl_gmu_regwrite(device, A6XX_GPU_GMU_AO_GMU_CGC_DELAY_CNTL,
- on ? 0x00010111 : 0);
+ on ? __get_gmu_ao_cgc_delay_cntl(adreno_dev) : 0);
kgsl_gmu_regwrite(device, A6XX_GPU_GMU_AO_GMU_CGC_HYST_CNTL,
- on ? 0x00050555 : 0);
+ on ? __get_gmu_ao_cgc_hyst_cntl(adreno_dev) : 0);
}
kgsl_regread(device, A6XX_RBBM_CLOCK_CNTL, &value);
- if (value == RBBM_CLOCK_CNTL_ON && on)
+ if (value == __get_rbbm_clock_cntl_on(adreno_dev) && on)
return;
if (value == 0 && !on)
@@ -478,7 +558,7 @@ static void a6xx_hwcg_set(struct adreno_device *adreno_dev, bool on)
/* enable top level HWCG */
kgsl_regwrite(device, A6XX_RBBM_CLOCK_CNTL,
- on ? RBBM_CLOCK_CNTL_ON : 0);
+ on ? __get_rbbm_clock_cntl_on(adreno_dev) : 0);
}
#define LM_DEFAULT_LIMIT 6000
@@ -500,17 +580,46 @@ static uint32_t lm_limit(struct adreno_device *adreno_dev)
static void a6xx_patch_pwrup_reglist(struct adreno_device *adreno_dev)
{
uint32_t i;
+ struct cpu_gpu_lock *lock;
+ struct reg_list_pair *r;
/* Set up the register values */
- for (i = 0; i < ARRAY_SIZE(a6xx_pwrup_reglist); i++) {
- struct reg_list_pair *r = &a6xx_pwrup_reglist[i];
-
+ for (i = 0; i < ARRAY_SIZE(a6xx_ifpc_pwrup_reglist); i++) {
+ r = &a6xx_ifpc_pwrup_reglist[i];
kgsl_regread(KGSL_DEVICE(adreno_dev), r->offset, &r->val);
}
- /* Copy Preemption register/data pairs */
- memcpy(adreno_dev->pwrup_reglist.hostptr, &a6xx_pwrup_reglist,
- sizeof(a6xx_pwrup_reglist));
+ for (i = 0; i < ARRAY_SIZE(a6xx_pwrup_reglist); i++) {
+ r = &a6xx_pwrup_reglist[i];
+ kgsl_regread(KGSL_DEVICE(adreno_dev), r->offset, &r->val);
+ }
+
+ lock = (struct cpu_gpu_lock *) adreno_dev->pwrup_reglist.hostptr;
+ lock->flag_ucode = 0;
+ lock->flag_kmd = 0;
+ lock->turn = 0;
+
+ /*
+ * The overall register list is composed of
+ * 1. Static IFPC-only registers
+ * 2. Static IFPC + preemption registers
+ * 2. Dynamic IFPC + preemption registers (ex: perfcounter selects)
+ *
+ * The CP views the second and third entries as one dynamic list
+ * starting from list_offset. Thus, list_length should be the sum
+ * of all three lists above (of which the third list will start off
+ * empty). And list_offset should be specified as the size in dwords
+ * of the static IFPC-only register list.
+ */
+ lock->list_length = (sizeof(a6xx_ifpc_pwrup_reglist) +
+ sizeof(a6xx_pwrup_reglist)) >> 2;
+ lock->list_offset = sizeof(a6xx_ifpc_pwrup_reglist) >> 2;
+
+ memcpy(adreno_dev->pwrup_reglist.hostptr + sizeof(*lock),
+ a6xx_ifpc_pwrup_reglist, sizeof(a6xx_ifpc_pwrup_reglist));
+ memcpy(adreno_dev->pwrup_reglist.hostptr + sizeof(*lock)
+ + sizeof(a6xx_ifpc_pwrup_reglist),
+ a6xx_pwrup_reglist, sizeof(a6xx_pwrup_reglist));
}
/*
@@ -717,13 +826,16 @@ static int a6xx_microcode_load(struct adreno_device *adreno_dev)
/* Register initialization list */
#define CP_INIT_REGISTER_INIT_LIST BIT(7)
+/* Register initialization list with spinlock */
+#define CP_INIT_REGISTER_INIT_LIST_WITH_SPINLOCK BIT(8)
+
#define CP_INIT_MASK (CP_INIT_MAX_CONTEXT | \
CP_INIT_ERROR_DETECTION_CONTROL | \
CP_INIT_HEADER_DUMP | \
CP_INIT_DEFAULT_RESET_STATE | \
CP_INIT_UCODE_WORKAROUND_MASK | \
CP_INIT_OPERATION_MODE_MASK | \
- CP_INIT_REGISTER_INIT_LIST)
+ CP_INIT_REGISTER_INIT_LIST_WITH_SPINLOCK)
static void _set_ordinals(struct adreno_device *adreno_dev,
unsigned int *cmds, unsigned int count)
@@ -759,13 +871,21 @@ static void _set_ordinals(struct adreno_device *adreno_dev,
if (CP_INIT_MASK & CP_INIT_OPERATION_MODE_MASK)
*cmds++ = 0x00000002;
- if (CP_INIT_MASK & CP_INIT_REGISTER_INIT_LIST) {
+ if (CP_INIT_MASK & CP_INIT_REGISTER_INIT_LIST_WITH_SPINLOCK) {
+ uint64_t gpuaddr = adreno_dev->pwrup_reglist.gpuaddr;
+
+ *cmds++ = lower_32_bits(gpuaddr);
+ *cmds++ = upper_32_bits(gpuaddr);
+ *cmds++ = 0;
+
+ } else if (CP_INIT_MASK & CP_INIT_REGISTER_INIT_LIST) {
uint64_t gpuaddr = adreno_dev->pwrup_reglist.gpuaddr;
*cmds++ = lower_32_bits(gpuaddr);
*cmds++ = upper_32_bits(gpuaddr);
/* Size is in dwords */
- *cmds++ = 0;
+ *cmds++ = (sizeof(a6xx_ifpc_pwrup_reglist) +
+ sizeof(a6xx_pwrup_reglist)) >> 2;
}
/* Pad rest of the cmds with 0's */
@@ -822,7 +942,8 @@ static int _preemption_init(struct adreno_device *adreno_dev,
rb->preemption_desc.gpuaddr);
*cmds++ = 2;
- cmds += cp_gpuaddr(adreno_dev, cmds, 0);
+ cmds += cp_gpuaddr(adreno_dev, cmds,
+ rb->secure_preemption_desc.gpuaddr);
/* Turn CP protection ON */
*cmds++ = cp_type7_packet(CP_SET_PROTECTED_MODE, 1);
@@ -913,6 +1034,38 @@ static int a6xx_rb_start(struct adreno_device *adreno_dev,
return a6xx_post_start(adreno_dev);
}
+unsigned int a6xx_set_marker(
+ unsigned int *cmds, enum adreno_cp_marker_type type)
+{
+ unsigned int cmd = 0;
+
+ *cmds++ = cp_type7_packet(CP_SET_MARKER, 1);
+
+ /*
+ * Indicate the beginning and end of the IB1 list with a SET_MARKER.
+ * Among other things, this will implicitly enable and disable
+ * preemption respectively. IFPC can also be disabled and enabled
+ * with a SET_MARKER. Bit 8 tells the CP the marker is for IFPC.
+ */
+ switch (type) {
+ case IFPC_DISABLE:
+ cmd = 0x101;
+ break;
+ case IFPC_ENABLE:
+ cmd = 0x100;
+ break;
+ case IB1LIST_START:
+ cmd = 0xD;
+ break;
+ case IB1LIST_END:
+ cmd = 0xE;
+ break;
+ }
+
+ *cmds++ = cmd;
+ return 2;
+}
+
static int _load_firmware(struct kgsl_device *device, const char *fwfile,
struct adreno_firmware *firmware)
{
@@ -1079,7 +1232,7 @@ static int timed_poll_check(struct kgsl_device *device,
if ((value & mask) == expected_ret)
return 0;
/* Wait 100us to reduce unnecessary AHB bus traffic */
- udelay(100);
+ usleep_range(10, 100);
} while (!time_after(jiffies, t));
/* Double check one last time */
@@ -1225,7 +1378,6 @@ static int a6xx_oob_set(struct adreno_device *adreno_dev,
unsigned int clear_mask)
{
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
- struct gmu_device *gmu = &device->gmu;
int ret = 0;
if (!kgsl_gmu_isenabled(device))
@@ -1239,9 +1391,7 @@ static int a6xx_oob_set(struct adreno_device *adreno_dev,
GPU_START_TIMEOUT,
check_mask)) {
ret = -ETIMEDOUT;
- dev_err(&gmu->pdev->dev,
- "OOB set timed out, mask %x\n", set_mask);
- WARN_ON(true);
+ WARN(1, "OOB set timed out, mask %x\n", set_mask);
}
kgsl_gmu_regwrite(device, A6XX_GMU_GMU2HOST_INTR_CLR, clear_mask);
@@ -1844,7 +1994,7 @@ static int a6xx_wait_for_lowest_idle(struct adreno_device *adreno_dev)
}
/* Wait 100us to reduce unnecessary AHB bus traffic */
- udelay(100);
+ usleep_range(10, 100);
}
/* Check one last time */
@@ -1858,8 +2008,7 @@ static int a6xx_wait_for_lowest_idle(struct adreno_device *adreno_dev)
return 0;
}
- dev_err(&gmu->pdev->dev,
- "Timeout waiting for lowest idle level: %d\n", reg);
+ WARN(1, "Timeout waiting for lowest idle level: %d\n", reg);
return -ETIMEDOUT;
}
@@ -2898,8 +3047,16 @@ static struct adreno_perfcount_register a6xx_pwrcounters_gpmu[] = {
A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_1, },
};
+/*
+ * ADRENO_PERFCOUNTER_GROUP_RESTORE flag is enabled by default
+ * because most of the perfcounter groups need to be restored
+ * as part of preemption and IFPC. Perfcounter groups that are
+ * not restored as part of preemption and IFPC should be defined
+ * using A6XX_PERFCOUNTER_GROUP_FLAGS macro
+ */
#define A6XX_PERFCOUNTER_GROUP(offset, name) \
- ADRENO_PERFCOUNTER_GROUP(a6xx, offset, name)
+ ADRENO_PERFCOUNTER_GROUP_FLAGS(a6xx, offset, name, \
+ ADRENO_PERFCOUNTER_GROUP_RESTORE)
#define A6XX_PERFCOUNTER_GROUP_FLAGS(offset, name, flags) \
ADRENO_PERFCOUNTER_GROUP_FLAGS(a6xx, offset, name, flags)
@@ -2910,7 +3067,7 @@ static struct adreno_perfcount_register a6xx_pwrcounters_gpmu[] = {
static struct adreno_perfcount_group a6xx_perfcounter_groups
[KGSL_PERFCOUNTER_GROUP_MAX] = {
A6XX_PERFCOUNTER_GROUP(CP, cp),
- A6XX_PERFCOUNTER_GROUP(RBBM, rbbm),
+ A6XX_PERFCOUNTER_GROUP_FLAGS(RBBM, rbbm, 0),
A6XX_PERFCOUNTER_GROUP(PC, pc),
A6XX_PERFCOUNTER_GROUP(VFD, vfd),
A6XX_PERFCOUNTER_GROUP(HLSQ, hlsq),
@@ -2925,7 +3082,7 @@ static struct adreno_perfcount_group a6xx_perfcounter_groups
A6XX_PERFCOUNTER_GROUP(SP, sp),
A6XX_PERFCOUNTER_GROUP(RB, rb),
A6XX_PERFCOUNTER_GROUP(VSC, vsc),
- A6XX_PERFCOUNTER_GROUP(VBIF, vbif),
+ A6XX_PERFCOUNTER_GROUP_FLAGS(VBIF, vbif, 0),
A6XX_PERFCOUNTER_GROUP_FLAGS(VBIF_PWR, vbif_pwr,
ADRENO_PERFCOUNTER_GROUP_FIXED),
A6XX_PERFCOUNTER_GROUP_FLAGS(PWR, pwr,
@@ -2964,6 +3121,41 @@ static int a6xx_enable_pwr_counters(struct adreno_device *adreno_dev,
return 0;
}
+static void a6xx_efuse_speed_bin(struct adreno_device *adreno_dev)
+{
+ unsigned int val;
+ unsigned int speed_bin[3];
+ struct kgsl_device *device = &adreno_dev->dev;
+
+ if (of_property_read_u32_array(device->pdev->dev.of_node,
+ "qcom,gpu-speed-bin", speed_bin, 3))
+ return;
+
+ adreno_efuse_read_u32(adreno_dev, speed_bin[0], &val);
+
+ adreno_dev->speed_bin = (val & speed_bin[1]) >> speed_bin[2];
+}
+
+static const struct {
+ int (*check)(struct adreno_device *adreno_dev);
+ void (*func)(struct adreno_device *adreno_dev);
+} a6xx_efuse_funcs[] = {
+ { adreno_is_a615, a6xx_efuse_speed_bin },
+};
+
+static void a6xx_check_features(struct adreno_device *adreno_dev)
+{
+ unsigned int i;
+
+ if (adreno_efuse_map(adreno_dev))
+ return;
+ for (i = 0; i < ARRAY_SIZE(a6xx_efuse_funcs); i++) {
+ if (a6xx_efuse_funcs[i].check(adreno_dev))
+ a6xx_efuse_funcs[i].func(adreno_dev);
+ }
+
+ adreno_efuse_unmap(adreno_dev);
+}
static void a6xx_platform_setup(struct adreno_device *adreno_dev)
{
uint64_t addr;
@@ -2982,7 +3174,8 @@ static void a6xx_platform_setup(struct adreno_device *adreno_dev)
a6xx_perfcounter_groups[KGSL_PERFCOUNTER_GROUP_VBIF_PWR].regs =
a6xx_perfcounters_gbif_pwr;
- a6xx_perfcounter_groups[KGSL_PERFCOUNTER_GROUP_VBIF].reg_count
+ a6xx_perfcounter_groups[
+ KGSL_PERFCOUNTER_GROUP_VBIF_PWR].reg_count
= ARRAY_SIZE(a6xx_perfcounters_gbif_pwr);
gpudev->vbif_xin_halt_ctrl0_mask =
@@ -2990,6 +3183,9 @@ static void a6xx_platform_setup(struct adreno_device *adreno_dev)
} else
gpudev->vbif_xin_halt_ctrl0_mask =
A6XX_VBIF_XIN_HALT_CTRL0_MASK;
+
+ /* Check efuse bits for various capabilties */
+ a6xx_check_features(adreno_dev);
}
@@ -3035,6 +3231,22 @@ static unsigned int a6xx_register_offsets[ADRENO_REG_REGISTER_MAX] = {
A6XX_CP_CONTEXT_SWITCH_SMMU_INFO_LO),
ADRENO_REG_DEFINE(ADRENO_REG_CP_CONTEXT_SWITCH_SMMU_INFO_HI,
A6XX_CP_CONTEXT_SWITCH_SMMU_INFO_HI),
+ ADRENO_REG_DEFINE(
+ ADRENO_REG_CP_CONTEXT_SWITCH_PRIV_NON_SECURE_RESTORE_ADDR_LO,
+ A6XX_CP_CONTEXT_SWITCH_PRIV_NON_SECURE_RESTORE_ADDR_LO),
+ ADRENO_REG_DEFINE(
+ ADRENO_REG_CP_CONTEXT_SWITCH_PRIV_NON_SECURE_RESTORE_ADDR_HI,
+ A6XX_CP_CONTEXT_SWITCH_PRIV_NON_SECURE_RESTORE_ADDR_HI),
+ ADRENO_REG_DEFINE(
+ ADRENO_REG_CP_CONTEXT_SWITCH_PRIV_SECURE_RESTORE_ADDR_LO,
+ A6XX_CP_CONTEXT_SWITCH_PRIV_SECURE_RESTORE_ADDR_LO),
+ ADRENO_REG_DEFINE(
+ ADRENO_REG_CP_CONTEXT_SWITCH_PRIV_SECURE_RESTORE_ADDR_HI,
+ A6XX_CP_CONTEXT_SWITCH_PRIV_SECURE_RESTORE_ADDR_HI),
+ ADRENO_REG_DEFINE(ADRENO_REG_CP_CONTEXT_SWITCH_NON_PRIV_RESTORE_ADDR_LO,
+ A6XX_CP_CONTEXT_SWITCH_NON_PRIV_RESTORE_ADDR_LO),
+ ADRENO_REG_DEFINE(ADRENO_REG_CP_CONTEXT_SWITCH_NON_PRIV_RESTORE_ADDR_HI,
+ A6XX_CP_CONTEXT_SWITCH_NON_PRIV_RESTORE_ADDR_HI),
ADRENO_REG_DEFINE(ADRENO_REG_RBBM_STATUS, A6XX_RBBM_STATUS),
ADRENO_REG_DEFINE(ADRENO_REG_RBBM_STATUS3, A6XX_RBBM_STATUS3),
ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PERFCTR_CTL, A6XX_RBBM_PERFCTR_CNTL),
@@ -3127,6 +3339,69 @@ static const struct adreno_reg_offsets a6xx_reg_offsets = {
.offset_0 = ADRENO_REG_REGISTER_MAX,
};
+static int a6xx_perfcounter_update(struct adreno_device *adreno_dev,
+ struct adreno_perfcount_register *reg, bool update_reg)
+{
+ struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
+ struct cpu_gpu_lock *lock = adreno_dev->pwrup_reglist.hostptr;
+ struct reg_list_pair *reg_pair = (struct reg_list_pair *)(lock + 1);
+ unsigned int i;
+ unsigned long timeout = jiffies + msecs_to_jiffies(1000);
+ int ret = 0;
+
+ lock->flag_kmd = 1;
+ /* Write flag_kmd before turn */
+ wmb();
+ lock->turn = 0;
+ /* Write these fields before looping */
+ mb();
+
+ /*
+ * Spin here while GPU ucode holds the lock, lock->flag_ucode will
+ * be set to 0 after GPU ucode releases the lock. Minimum wait time
+ * is 1 second and this should be enough for GPU to release the lock
+ */
+ while (lock->flag_ucode == 1 && lock->turn == 0) {
+ cpu_relax();
+ /* Get the latest updates from GPU */
+ rmb();
+ /*
+ * Make sure we wait at least 1sec for the lock,
+ * if we did not get it after 1sec return an error.
+ */
+ if (time_after(jiffies, timeout) &&
+ (lock->flag_ucode == 1 && lock->turn == 0)) {
+ ret = -EBUSY;
+ goto unlock;
+ }
+ }
+
+ /* Read flag_ucode and turn before list_length */
+ rmb();
+ /*
+ * If the perfcounter select register is already present in reglist
+ * update it, otherwise append the <select register, value> pair to
+ * the end of the list.
+ */
+ for (i = 0; i < lock->list_length >> 1; i++)
+ if (reg_pair[i].offset == reg->select)
+ break;
+
+ reg_pair[i].offset = reg->select;
+ reg_pair[i].val = reg->countable;
+ if (i == lock->list_length >> 1)
+ lock->list_length += 2;
+
+ if (update_reg)
+ kgsl_regwrite(device, reg->select, reg->countable);
+
+unlock:
+ /* All writes done before releasing the lock */
+ wmb();
+ lock->flag_kmd = 0;
+ return ret;
+}
+
struct adreno_gpudev adreno_a6xx_gpudev = {
.reg_offsets = &a6xx_reg_offsets,
.start = a6xx_start,
@@ -3169,4 +3444,5 @@ struct adreno_gpudev adreno_a6xx_gpudev = {
.gx_is_on = a6xx_gx_is_on,
.sptprac_is_on = a6xx_sptprac_is_on,
.ccu_invalidate = a6xx_ccu_invalidate,
+ .perfcounter_update = a6xx_perfcounter_update,
};
diff --git a/drivers/gpu/msm/adreno_a6xx.h b/drivers/gpu/msm/adreno_a6xx.h
index dd8af80..bf1111c 100644
--- a/drivers/gpu/msm/adreno_a6xx.h
+++ b/drivers/gpu/msm/adreno_a6xx.h
@@ -75,6 +75,24 @@ struct a6xx_cp_smmu_info {
#define A6XX_CP_SMMU_INFO_MAGIC_REF 0x241350D5UL
+/**
+ * struct cpu_gpu_spinlock - CP spinlock structure for power up list
+ * @flag_ucode: flag value set by CP
+ * @flag_kmd: flag value set by KMD
+ * @turn: turn variable set by both CP and KMD
+ * @list_length: this tells CP the last dword in the list:
+ * 16 + (4 * (List_Length - 1))
+ * @list_offset: this tells CP the start of preemption only list:
+ * 16 + (4 * List_Offset)
+ */
+struct cpu_gpu_lock {
+ uint32_t flag_ucode;
+ uint32_t flag_kmd;
+ uint32_t turn;
+ uint16_t list_length;
+ uint16_t list_offset;
+};
+
#define A6XX_CP_CTXRECORD_MAGIC_REF 0xAE399D6EUL
/* Size of each CP preemption record */
#define A6XX_CP_CTXRECORD_SIZE_IN_BYTES (2112 * 1024)
@@ -100,7 +118,8 @@ unsigned int a6xx_preemption_pre_ibsubmit(struct adreno_device *adreno_dev,
struct adreno_ringbuffer *rb,
unsigned int *cmds, struct kgsl_context *context);
-unsigned int a6xx_set_marker(unsigned int *cmds, int start);
+unsigned int a6xx_set_marker(unsigned int *cmds,
+ enum adreno_cp_marker_type type);
void a6xx_preemption_callback(struct adreno_device *adreno_dev, int bit);
diff --git a/drivers/gpu/msm/adreno_a6xx_preempt.c b/drivers/gpu/msm/adreno_a6xx_preempt.c
index 1eec381..d92d1e0 100644
--- a/drivers/gpu/msm/adreno_a6xx_preempt.c
+++ b/drivers/gpu/msm/adreno_a6xx_preempt.c
@@ -35,6 +35,25 @@ static void _update_wptr(struct adreno_device *adreno_dev, bool reset_timer)
struct adreno_ringbuffer *rb = adreno_dev->cur_rb;
unsigned int wptr;
unsigned long flags;
+ struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
+
+ /*
+ * Need to make sure GPU is up before we read the
+ * WPTR as fence doesn't wake GPU on read operation.
+ */
+ if (in_interrupt() == 0) {
+ int status;
+
+ if (gpudev->oob_set) {
+ status = gpudev->oob_set(adreno_dev,
+ OOB_PREEMPTION_SET_MASK,
+ OOB_PREEMPTION_CHECK_MASK,
+ OOB_PREEMPTION_CLEAR_MASK);
+ if (status)
+ return;
+ }
+ }
+
spin_lock_irqsave(&rb->preempt_lock, flags);
@@ -55,6 +74,12 @@ static void _update_wptr(struct adreno_device *adreno_dev, bool reset_timer)
msecs_to_jiffies(adreno_drawobj_timeout);
spin_unlock_irqrestore(&rb->preempt_lock, flags);
+
+ if (in_interrupt() == 0) {
+ if (gpudev->oob_clear)
+ gpudev->oob_clear(adreno_dev,
+ OOB_PREEMPTION_CLEAR_MASK);
+ }
}
static inline bool adreno_move_preempt_state(struct adreno_device *adreno_dev,
@@ -204,7 +229,7 @@ void a6xx_preemption_trigger(struct adreno_device *adreno_dev)
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
struct kgsl_iommu *iommu = KGSL_IOMMU_PRIV(device);
struct adreno_ringbuffer *next;
- uint64_t ttbr0;
+ uint64_t ttbr0, gpuaddr;
unsigned int contextidr;
unsigned long flags;
uint32_t preempt_level, usesgmem, skipsaverestore;
@@ -267,6 +292,8 @@ void a6xx_preemption_trigger(struct adreno_device *adreno_dev)
kgsl_sharedmem_writel(device, &next->preemption_desc,
PREEMPT_RECORD(wptr), next->wptr);
+ preempt->count++;
+
spin_unlock_irqrestore(&next->preempt_lock, flags);
/* And write it to the smmu info */
@@ -275,24 +302,57 @@ void a6xx_preemption_trigger(struct adreno_device *adreno_dev)
kgsl_sharedmem_writel(device, &iommu->smmu_info,
PREEMPT_SMMU_RECORD(context_idr), contextidr);
- kgsl_regwrite(device,
- A6XX_CP_CONTEXT_SWITCH_PRIV_NON_SECURE_RESTORE_ADDR_LO,
- lower_32_bits(next->preemption_desc.gpuaddr));
- kgsl_regwrite(device,
- A6XX_CP_CONTEXT_SWITCH_PRIV_NON_SECURE_RESTORE_ADDR_HI,
- upper_32_bits(next->preemption_desc.gpuaddr));
+ kgsl_sharedmem_readq(&device->scratch, &gpuaddr,
+ SCRATCH_PREEMPTION_CTXT_RESTORE_ADDR_OFFSET(next->id));
- if (next->drawctxt_active) {
- struct kgsl_context *context = &next->drawctxt_active->base;
- uint64_t gpuaddr = context->user_ctxt_record->memdesc.gpuaddr;
+ /*
+ * Set a keepalive bit before the first preemption register write.
+ * This is required since while each individual write to the context
+ * switch registers will wake the GPU from collapse, it will not in
+ * itself cause GPU activity. Thus, the GPU could technically be
+ * re-collapsed between subsequent register writes leading to a
+ * prolonged preemption sequence. The keepalive bit prevents any
+ * further power collapse while it is set.
+ * It is more efficient to use a keepalive+wake-on-fence approach here
+ * rather than an OOB. Both keepalive and the fence are effectively
+ * free when the GPU is already powered on, whereas an OOB requires an
+ * unconditional handshake with the GMU.
+ */
+ kgsl_gmu_regrmw(device, A6XX_GMU_AO_SPARE_CNTL, 0x0, 0x2);
- kgsl_regwrite(device,
- A6XX_CP_CONTEXT_SWITCH_NON_PRIV_RESTORE_ADDR_LO,
- lower_32_bits(gpuaddr));
- kgsl_regwrite(device,
- A6XX_CP_CONTEXT_SWITCH_NON_PRIV_RESTORE_ADDR_HI,
- upper_32_bits(gpuaddr));
- }
+ /*
+ * Fenced writes on this path will make sure the GPU is woken up
+ * in case it was power collapsed by the GMU.
+ */
+ adreno_gmu_fenced_write(adreno_dev,
+ ADRENO_REG_CP_CONTEXT_SWITCH_PRIV_NON_SECURE_RESTORE_ADDR_LO,
+ lower_32_bits(next->preemption_desc.gpuaddr),
+ FENCE_STATUS_WRITEDROPPED1_MASK);
+
+ adreno_gmu_fenced_write(adreno_dev,
+ ADRENO_REG_CP_CONTEXT_SWITCH_PRIV_NON_SECURE_RESTORE_ADDR_HI,
+ upper_32_bits(next->preemption_desc.gpuaddr),
+ FENCE_STATUS_WRITEDROPPED1_MASK);
+
+ adreno_gmu_fenced_write(adreno_dev,
+ ADRENO_REG_CP_CONTEXT_SWITCH_PRIV_SECURE_RESTORE_ADDR_LO,
+ lower_32_bits(next->secure_preemption_desc.gpuaddr),
+ FENCE_STATUS_WRITEDROPPED1_MASK);
+
+ adreno_gmu_fenced_write(adreno_dev,
+ ADRENO_REG_CP_CONTEXT_SWITCH_PRIV_SECURE_RESTORE_ADDR_HI,
+ upper_32_bits(next->secure_preemption_desc.gpuaddr),
+ FENCE_STATUS_WRITEDROPPED1_MASK);
+
+ adreno_gmu_fenced_write(adreno_dev,
+ ADRENO_REG_CP_CONTEXT_SWITCH_NON_PRIV_RESTORE_ADDR_LO,
+ lower_32_bits(gpuaddr),
+ FENCE_STATUS_WRITEDROPPED1_MASK);
+
+ adreno_gmu_fenced_write(adreno_dev,
+ ADRENO_REG_CP_CONTEXT_SWITCH_NON_PRIV_RESTORE_ADDR_HI,
+ upper_32_bits(gpuaddr),
+ FENCE_STATUS_WRITEDROPPED1_MASK);
adreno_dev->next_rb = next;
@@ -305,10 +365,20 @@ void a6xx_preemption_trigger(struct adreno_device *adreno_dev)
adreno_set_preempt_state(adreno_dev, ADRENO_PREEMPT_TRIGGERED);
/* Trigger the preemption */
- adreno_writereg(adreno_dev, ADRENO_REG_CP_PREEMPT,
- ((preempt_level << 6) & 0xC0) |
- ((skipsaverestore << 9) & 0x200) |
- ((usesgmem << 8) & 0x100) | 0x1);
+ adreno_gmu_fenced_write(adreno_dev,
+ ADRENO_REG_CP_PREEMPT,
+ (((preempt_level << 6) & 0xC0) |
+ ((skipsaverestore << 9) & 0x200) |
+ ((usesgmem << 8) & 0x100) | 0x1),
+ FENCE_STATUS_WRITEDROPPED1_MASK);
+
+ /*
+ * Once preemption has been requested with the final register write,
+ * the preemption process starts and the GPU is considered busy.
+ * We can now safely clear the preemption keepalive bit, allowing
+ * power collapse to resume its regular activity.
+ */
+ kgsl_gmu_regrmw(device, A6XX_GMU_AO_SPARE_CNTL, 0x2, 0x0);
}
void a6xx_preemption_callback(struct adreno_device *adreno_dev, int bit)
@@ -374,34 +444,20 @@ void a6xx_preemption_schedule(struct adreno_device *adreno_dev)
mutex_unlock(&device->mutex);
}
-unsigned int a6xx_set_marker(unsigned int *cmds, int start)
-{
- *cmds++ = cp_type7_packet(CP_SET_MARKER, 1);
-
- /*
- * Indicate the beginning and end of the IB1 list with a SET_MARKER.
- * Among other things, this will implicitly enable and disable
- * preemption respectively.
- */
- if (start)
- *cmds++ = 0xD;
- else
- *cmds++ = 0xE;
-
- return 2;
-}
-
unsigned int a6xx_preemption_pre_ibsubmit(
struct adreno_device *adreno_dev,
struct adreno_ringbuffer *rb,
unsigned int *cmds, struct kgsl_context *context)
{
unsigned int *cmds_orig = cmds;
+ uint64_t gpuaddr = 0;
- if (context)
+ if (context) {
+ gpuaddr = context->user_ctxt_record->memdesc.gpuaddr;
*cmds++ = cp_type7_packet(CP_SET_PSEUDO_REGISTER, 15);
- else
+ } else {
*cmds++ = cp_type7_packet(CP_SET_PSEUDO_REGISTER, 12);
+ }
/* NULL SMMU_INFO buffer - we track in KMD */
*cmds++ = SET_PSEUDO_REGISTER_SAVE_REGISTER_SMMU_INFO;
@@ -411,10 +467,10 @@ unsigned int a6xx_preemption_pre_ibsubmit(
cmds += cp_gpuaddr(adreno_dev, cmds, rb->preemption_desc.gpuaddr);
*cmds++ = SET_PSEUDO_REGISTER_SAVE_REGISTER_PRIV_SECURE_SAVE_ADDR;
- cmds += cp_gpuaddr(adreno_dev, cmds, 0);
+ cmds += cp_gpuaddr(adreno_dev, cmds,
+ rb->secure_preemption_desc.gpuaddr);
if (context) {
- uint64_t gpuaddr = context->user_ctxt_record->memdesc.gpuaddr;
*cmds++ = SET_PSEUDO_REGISTER_SAVE_REGISTER_NON_PRIV_SAVE_ADDR;
cmds += cp_gpuaddr(adreno_dev, cmds, gpuaddr);
@@ -431,6 +487,20 @@ unsigned int a6xx_preemption_pre_ibsubmit(
cmds += cp_gpuaddr(adreno_dev, cmds,
rb->perfcounter_save_restore_desc.gpuaddr);
+ if (context) {
+ struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
+ struct adreno_context *drawctxt = ADRENO_CONTEXT(context);
+ struct adreno_ringbuffer *rb = drawctxt->rb;
+ uint64_t dest =
+ SCRATCH_PREEMPTION_CTXT_RESTORE_GPU_ADDR(device,
+ rb->id);
+
+ *cmds++ = cp_mem_packet(adreno_dev, CP_MEM_WRITE, 2, 2);
+ cmds += cp_gpuaddr(adreno_dev, cmds, dest);
+ *cmds++ = lower_32_bits(gpuaddr);
+ *cmds++ = upper_32_bits(gpuaddr);
+ }
+
return (unsigned int) (cmds - cmds_orig);
}
@@ -438,6 +508,18 @@ unsigned int a6xx_preemption_post_ibsubmit(struct adreno_device *adreno_dev,
unsigned int *cmds)
{
unsigned int *cmds_orig = cmds;
+ struct adreno_ringbuffer *rb = adreno_dev->cur_rb;
+
+ if (rb) {
+ struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
+ uint64_t dest = SCRATCH_PREEMPTION_CTXT_RESTORE_GPU_ADDR(device,
+ rb->id);
+
+ *cmds++ = cp_mem_packet(adreno_dev, CP_MEM_WRITE, 2, 2);
+ cmds += cp_gpuaddr(adreno_dev, cmds, dest);
+ *cmds++ = 0;
+ *cmds++ = 0;
+ }
*cmds++ = cp_type7_packet(CP_CONTEXT_SWITCH_YIELD, 4);
cmds += cp_gpuaddr(adreno_dev, cmds, 0x0);
@@ -505,6 +587,17 @@ static int a6xx_preemption_ringbuffer_init(struct adreno_device *adreno_dev,
if (ret)
return ret;
+ ret = kgsl_allocate_user(device, &rb->secure_preemption_desc,
+ A6XX_CP_CTXRECORD_SIZE_IN_BYTES,
+ KGSL_MEMFLAGS_SECURE | KGSL_MEMDESC_PRIVILEGED);
+ if (ret)
+ return ret;
+
+ ret = kgsl_iommu_map_global_secure_pt_entry(device,
+ &rb->secure_preemption_desc);
+ if (ret)
+ return ret;
+
ret = kgsl_allocate_global(device, &rb->perfcounter_save_restore_desc,
A6XX_CP_PERFCOUNTER_SAVE_RESTORE_SIZE, 0,
KGSL_MEMDESC_PRIVILEGED, "perfcounter_save_restore_desc");
@@ -578,6 +671,9 @@ static void a6xx_preemption_close(struct kgsl_device *device)
FOR_EACH_RINGBUFFER(adreno_dev, rb, i) {
kgsl_free_global(device, &rb->preemption_desc);
kgsl_free_global(device, &rb->perfcounter_save_restore_desc);
+ kgsl_iommu_unmap_global_secure_pt_entry(device,
+ &rb->secure_preemption_desc);
+ kgsl_sharedmem_free(&rb->secure_preemption_desc);
}
}
@@ -645,16 +741,20 @@ int a6xx_preemption_context_init(struct kgsl_context *context)
{
struct kgsl_device *device = context->device;
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ uint64_t flags = 0;
if (!adreno_is_preemption_setup_enabled(adreno_dev))
return 0;
+ if (context->flags & KGSL_CONTEXT_SECURE)
+ flags |= KGSL_MEMFLAGS_SECURE;
+
/*
* gpumem_alloc_entry takes an extra refcount. Put it only when
* destroying the context to keep the context record valid
*/
context->user_ctxt_record = gpumem_alloc_entry(context->dev_priv,
- A6XX_CP_CTXRECORD_USER_RESTORE_SIZE, 0);
+ A6XX_CP_CTXRECORD_USER_RESTORE_SIZE, flags);
if (IS_ERR(context->user_ctxt_record)) {
int ret = PTR_ERR(context->user_ctxt_record);
diff --git a/drivers/gpu/msm/adreno_a6xx_snapshot.c b/drivers/gpu/msm/adreno_a6xx_snapshot.c
index 4357518..b9a2f8d 100644
--- a/drivers/gpu/msm/adreno_a6xx_snapshot.c
+++ b/drivers/gpu/msm/adreno_a6xx_snapshot.c
@@ -195,35 +195,6 @@ struct a6xx_cluster_dbgahb_regs_info {
unsigned int ctxt_id;
};
-static const unsigned int a6xx_hlsq_non_ctx_registers[] = {
- 0xBE00, 0xBE01, 0xBE04, 0xBE05, 0xBE08, 0xBE09, 0xBE10, 0xBE15,
- 0xBE20, 0xBE23,
-};
-
-static const unsigned int a6xx_sp_non_ctx_registers[] = {
- 0xAE00, 0xAE04, 0xAE0C, 0xAE0C, 0xAE0F, 0xAE2B, 0xAE30, 0xAE32,
- 0xAE35, 0xAE35, 0xAE3A, 0xAE3F, 0xAE50, 0xAE52,
-};
-
-static const unsigned int a6xx_tp_non_ctx_registers[] = {
- 0xB600, 0xB601, 0xB604, 0xB605, 0xB610, 0xB61B, 0xB620, 0xB623,
-};
-
-static struct a6xx_non_ctx_dbgahb_registers {
- unsigned int regbase;
- unsigned int statetype;
- const unsigned int *regs;
- unsigned int num_sets;
- unsigned int offset;
-} a6xx_non_ctx_dbgahb[] = {
- { 0x0002F800, 0x40, a6xx_hlsq_non_ctx_registers,
- ARRAY_SIZE(a6xx_hlsq_non_ctx_registers) / 2 },
- { 0x0002B800, 0x20, a6xx_sp_non_ctx_registers,
- ARRAY_SIZE(a6xx_sp_non_ctx_registers) / 2 },
- { 0x0002D800, 0x0, a6xx_tp_non_ctx_registers,
- ARRAY_SIZE(a6xx_tp_non_ctx_registers) / 2 },
-};
-
static const unsigned int a6xx_vbif_ver_20xxxxxx_registers[] = {
/* VBIF */
0x3000, 0x3007, 0x300C, 0x3014, 0x3018, 0x302D, 0x3030, 0x3031,
@@ -332,6 +303,15 @@ static const unsigned int a6xx_registers[] = {
/* VFD */
0xA600, 0xA601, 0xA603, 0xA603, 0xA60A, 0xA60A, 0xA610, 0xA617,
0xA630, 0xA630,
+ /* SP */
+ 0xAE00, 0xAE04, 0xAE0C, 0xAE0C, 0xAE0F, 0xAE2B, 0xAE30, 0xAE32,
+ 0xAE35, 0xAE35, 0xAE3A, 0xAE3F, 0xAE50, 0xAE52,
+ /* TP */
+ 0xB600, 0xB601, 0xB604, 0xB605, 0xB610, 0xB61B, 0xB620, 0xB623,
+ /* HLSQ */
+ 0xBE00, 0xBE01, 0xBE04, 0xBE05, 0xBE08, 0xBE09, 0xBE10, 0xBE15,
+ 0xBE20, 0xBE23,
+
};
/*
@@ -437,7 +417,6 @@ static const struct adreno_debugbus_block a6xx_vbif_debugbus_blocks = {
A6XX_DBGBUS_VBIF, 0x100,
};
-static void __iomem *a6xx_cx_dbgc;
static const struct adreno_debugbus_block a6xx_cx_dbgc_debugbus_blocks[] = {
{ A6XX_DBGBUS_GMU_CX, 0x100, },
{ A6XX_DBGBUS_CX, 0x100, },
@@ -661,7 +640,7 @@ static size_t a6xx_snapshot_shader_memory(struct kgsl_device *device,
header->size = block->sz;
memcpy(data, a6xx_crashdump_registers.hostptr + info->offset,
- block->sz);
+ block->sz * sizeof(unsigned int));
return SHADER_SECTION_SZ(block->sz);
}
@@ -848,106 +827,6 @@ static size_t a6xx_snapshot_cluster_dbgahb(struct kgsl_device *device, u8 *buf,
return data_size + sizeof(*header);
}
-static size_t a6xx_legacy_snapshot_non_ctx_dbgahb(struct kgsl_device *device,
- u8 *buf, size_t remain, void *priv)
-{
- struct kgsl_snapshot_regs *header =
- (struct kgsl_snapshot_regs *)buf;
- struct a6xx_non_ctx_dbgahb_registers *regs =
- (struct a6xx_non_ctx_dbgahb_registers *)priv;
- unsigned int *data = (unsigned int *)(buf + sizeof(*header));
- int count = 0;
- unsigned int read_sel;
- int i, j;
-
- if (!device->snapshot_legacy)
- return 0;
-
- /* Figure out how many registers we are going to dump */
- for (i = 0; i < regs->num_sets; i++) {
- int start = regs->regs[i * 2];
- int end = regs->regs[i * 2 + 1];
-
- count += (end - start + 1);
- }
-
- if (remain < (count * 8) + sizeof(*header)) {
- SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
- return 0;
- }
-
- header->count = count;
-
- read_sel = (regs->statetype & 0xff) << 8;
- kgsl_regwrite(device, A6XX_HLSQ_DBG_READ_SEL, read_sel);
-
- for (i = 0; i < regs->num_sets; i++) {
- unsigned int start = regs->regs[2 * i];
- unsigned int end = regs->regs[2 * i + 1];
-
- for (j = start; j <= end; j++) {
- unsigned int val;
-
- val = a6xx_read_dbgahb(device, regs->regbase, j);
- *data++ = j;
- *data++ = val;
-
- }
- }
- return (count * 8) + sizeof(*header);
-}
-
-static size_t a6xx_snapshot_non_ctx_dbgahb(struct kgsl_device *device, u8 *buf,
- size_t remain, void *priv)
-{
- struct kgsl_snapshot_regs *header =
- (struct kgsl_snapshot_regs *)buf;
- struct a6xx_non_ctx_dbgahb_registers *regs =
- (struct a6xx_non_ctx_dbgahb_registers *)priv;
- unsigned int count = 0;
- unsigned int *data = (unsigned int *)(buf + sizeof(*header));
- unsigned int i, k;
- unsigned int *src;
-
- if (crash_dump_valid == false)
- return a6xx_legacy_snapshot_non_ctx_dbgahb(device, buf, remain,
- regs);
-
- if (remain < sizeof(*header)) {
- SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
- return 0;
- }
-
- remain -= sizeof(*header);
-
- src = (unsigned int *)(a6xx_crashdump_registers.hostptr + regs->offset);
-
- for (i = 0; i < regs->num_sets; i++) {
- unsigned int start;
- unsigned int end;
-
- start = regs->regs[2 * i];
- end = regs->regs[(2 * i) + 1];
-
- if (remain < (end - start + 1) * 8) {
- SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
- goto out;
- }
-
- remain -= ((end - start) + 1) * 8;
-
- for (k = start; k <= end; k++, count++) {
- *data++ = k;
- *data++ = *src++;
- }
- }
-out:
- header->count = count;
-
- /* Return the size of the section */
- return (count * 8) + sizeof(*header);
-}
-
static void a6xx_snapshot_dbgahb_regs(struct kgsl_device *device,
struct kgsl_snapshot *snapshot)
{
@@ -967,12 +846,6 @@ static void a6xx_snapshot_dbgahb_regs(struct kgsl_device *device,
a6xx_snapshot_cluster_dbgahb, &info);
}
}
-
- for (i = 0; i < ARRAY_SIZE(a6xx_non_ctx_dbgahb); i++) {
- kgsl_snapshot_add_section(device,
- KGSL_SNAPSHOT_SECTION_REGS, snapshot,
- a6xx_snapshot_non_ctx_dbgahb, &a6xx_non_ctx_dbgahb[i]);
- }
}
static size_t a6xx_legacy_snapshot_mvc(struct kgsl_device *device, u8 *buf,
@@ -1254,46 +1127,6 @@ static size_t a6xx_snapshot_vbif_debugbus_block(struct kgsl_device *device,
return size;
}
-static void _cx_dbgc_regread(unsigned int offsetwords, unsigned int *value)
-{
- void __iomem *reg;
-
- if (WARN((offsetwords < A6XX_CX_DBGC_CFG_DBGBUS_SEL_A) ||
- (offsetwords > A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF2),
- "Read beyond CX_DBGC block: 0x%x\n", offsetwords))
- return;
-
- reg = a6xx_cx_dbgc +
- ((offsetwords - A6XX_CX_DBGC_CFG_DBGBUS_SEL_A) << 2);
- *value = __raw_readl(reg);
-
- /*
- * ensure this read finishes before the next one.
- * i.e. act like normal readl()
- */
- rmb();
-}
-
-static void _cx_dbgc_regwrite(unsigned int offsetwords, unsigned int value)
-{
- void __iomem *reg;
-
- if (WARN((offsetwords < A6XX_CX_DBGC_CFG_DBGBUS_SEL_A) ||
- (offsetwords > A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF2),
- "Write beyond CX_DBGC block: 0x%x\n", offsetwords))
- return;
-
- reg = a6xx_cx_dbgc +
- ((offsetwords - A6XX_CX_DBGC_CFG_DBGBUS_SEL_A) << 2);
-
- /*
- * ensure previous writes post before this one,
- * i.e. act like normal writel()
- */
- wmb();
- __raw_writel(value, reg);
-}
-
/* a6xx_cx_dbgc_debug_bus_read() - Read data from trace bus */
static void a6xx_cx_debug_bus_read(struct kgsl_device *device,
unsigned int block_id, unsigned int index, unsigned int *val)
@@ -1303,10 +1136,10 @@ static void a6xx_cx_debug_bus_read(struct kgsl_device *device,
reg = (block_id << A6XX_CX_DBGC_CFG_DBGBUS_SEL_PING_BLK_SEL_SHIFT) |
(index << A6XX_CX_DBGC_CFG_DBGBUS_SEL_PING_INDEX_SHIFT);
- _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_SEL_A, reg);
- _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_SEL_B, reg);
- _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_SEL_C, reg);
- _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_SEL_D, reg);
+ adreno_cx_dbgc_regwrite(device, A6XX_CX_DBGC_CFG_DBGBUS_SEL_A, reg);
+ adreno_cx_dbgc_regwrite(device, A6XX_CX_DBGC_CFG_DBGBUS_SEL_B, reg);
+ adreno_cx_dbgc_regwrite(device, A6XX_CX_DBGC_CFG_DBGBUS_SEL_C, reg);
+ adreno_cx_dbgc_regwrite(device, A6XX_CX_DBGC_CFG_DBGBUS_SEL_D, reg);
/*
* There needs to be a delay of 1 us to ensure enough time for correct
@@ -1314,9 +1147,9 @@ static void a6xx_cx_debug_bus_read(struct kgsl_device *device,
*/
udelay(1);
- _cx_dbgc_regread(A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF2, val);
+ adreno_cx_dbgc_regread(device, A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF2, val);
val++;
- _cx_dbgc_regread(A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF1, val);
+ adreno_cx_dbgc_regread(device, A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF1, val);
}
/*
@@ -1398,50 +1231,42 @@ static void a6xx_snapshot_debugbus(struct kgsl_device *device,
kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_MASKL_2, 0);
kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_MASKL_3, 0);
- a6xx_cx_dbgc = ioremap(device->reg_phys +
- (A6XX_CX_DBGC_CFG_DBGBUS_SEL_A << 2),
- (A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF2 -
- A6XX_CX_DBGC_CFG_DBGBUS_SEL_A + 1) << 2);
-
- if (a6xx_cx_dbgc) {
- _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_CNTLT,
+ adreno_cx_dbgc_regwrite(device, A6XX_CX_DBGC_CFG_DBGBUS_CNTLT,
(0xf << A6XX_DBGC_CFG_DBGBUS_CNTLT_SEGT_SHIFT) |
(0x0 << A6XX_DBGC_CFG_DBGBUS_CNTLT_GRANU_SHIFT) |
(0x0 << A6XX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN_SHIFT));
- _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_CNTLM,
- 0xf << A6XX_CX_DBGC_CFG_DBGBUS_CNTLM_ENABLE_SHIFT);
+ adreno_cx_dbgc_regwrite(device, A6XX_CX_DBGC_CFG_DBGBUS_CNTLM,
+ 0xf << A6XX_CX_DBGC_CFG_DBGBUS_CNTLM_ENABLE_SHIFT);
- _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_IVTL_0, 0);
- _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_IVTL_1, 0);
- _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_IVTL_2, 0);
- _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_IVTL_3, 0);
+ adreno_cx_dbgc_regwrite(device, A6XX_CX_DBGC_CFG_DBGBUS_IVTL_0, 0);
+ adreno_cx_dbgc_regwrite(device, A6XX_CX_DBGC_CFG_DBGBUS_IVTL_1, 0);
+ adreno_cx_dbgc_regwrite(device, A6XX_CX_DBGC_CFG_DBGBUS_IVTL_2, 0);
+ adreno_cx_dbgc_regwrite(device, A6XX_CX_DBGC_CFG_DBGBUS_IVTL_3, 0);
- _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0,
- (0 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL0_SHIFT) |
- (1 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL1_SHIFT) |
- (2 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL2_SHIFT) |
- (3 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL3_SHIFT) |
- (4 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL4_SHIFT) |
- (5 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL5_SHIFT) |
- (6 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL6_SHIFT) |
- (7 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL7_SHIFT));
- _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1,
- (8 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL8_SHIFT) |
- (9 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL9_SHIFT) |
- (10 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL10_SHIFT) |
- (11 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL11_SHIFT) |
- (12 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL12_SHIFT) |
- (13 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL13_SHIFT) |
- (14 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL14_SHIFT) |
- (15 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL15_SHIFT));
+ adreno_cx_dbgc_regwrite(device, A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0,
+ (0 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL0_SHIFT) |
+ (1 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL1_SHIFT) |
+ (2 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL2_SHIFT) |
+ (3 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL3_SHIFT) |
+ (4 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL4_SHIFT) |
+ (5 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL5_SHIFT) |
+ (6 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL6_SHIFT) |
+ (7 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL7_SHIFT));
+ adreno_cx_dbgc_regwrite(device, A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1,
+ (8 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL8_SHIFT) |
+ (9 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL9_SHIFT) |
+ (10 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL10_SHIFT) |
+ (11 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL11_SHIFT) |
+ (12 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL12_SHIFT) |
+ (13 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL13_SHIFT) |
+ (14 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL14_SHIFT) |
+ (15 << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL15_SHIFT));
- _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_MASKL_0, 0);
- _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_MASKL_1, 0);
- _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_MASKL_2, 0);
- _cx_dbgc_regwrite(A6XX_CX_DBGC_CFG_DBGBUS_MASKL_3, 0);
- } else
- KGSL_DRV_ERR(device, "Unable to ioremap CX_DBGC_CFG block\n");
+ adreno_cx_dbgc_regwrite(device, A6XX_CX_DBGC_CFG_DBGBUS_MASKL_0, 0);
+ adreno_cx_dbgc_regwrite(device, A6XX_CX_DBGC_CFG_DBGBUS_MASKL_1, 0);
+ adreno_cx_dbgc_regwrite(device, A6XX_CX_DBGC_CFG_DBGBUS_MASKL_2, 0);
+ adreno_cx_dbgc_regwrite(device, A6XX_CX_DBGC_CFG_DBGBUS_MASKL_3, 0);
for (i = 0; i < ARRAY_SIZE(a6xx_dbgc_debugbus_blocks); i++) {
kgsl_snapshot_add_section(device,
@@ -1457,14 +1282,14 @@ static void a6xx_snapshot_debugbus(struct kgsl_device *device,
snapshot, a6xx_snapshot_vbif_debugbus_block,
(void *) &a6xx_vbif_debugbus_blocks);
- if (a6xx_cx_dbgc) {
+ /* Dump the CX debugbus data if the block exists */
+ if (adreno_is_cx_dbgc_register(device, A6XX_CX_DBGC_CFG_DBGBUS_SEL_A)) {
for (i = 0; i < ARRAY_SIZE(a6xx_cx_dbgc_debugbus_blocks); i++) {
kgsl_snapshot_add_section(device,
KGSL_SNAPSHOT_SECTION_DEBUGBUS,
snapshot, a6xx_snapshot_cx_dbgc_debugbus_block,
(void *) &a6xx_cx_dbgc_debugbus_blocks[i]);
}
- iounmap(a6xx_cx_dbgc);
}
}
@@ -1769,40 +1594,6 @@ static int _a6xx_crashdump_init_ctx_dbgahb(uint64_t *ptr, uint64_t *offset)
return qwords;
}
-static int _a6xx_crashdump_init_non_ctx_dbgahb(uint64_t *ptr, uint64_t *offset)
-{
- int qwords = 0;
- unsigned int i, k;
- unsigned int count;
-
- for (i = 0; i < ARRAY_SIZE(a6xx_non_ctx_dbgahb); i++) {
- struct a6xx_non_ctx_dbgahb_registers *regs =
- &a6xx_non_ctx_dbgahb[i];
-
- regs->offset = *offset;
-
- /* Program the aperture */
- ptr[qwords++] = (regs->statetype & 0xff) << 8;
- ptr[qwords++] = (((uint64_t)A6XX_HLSQ_DBG_READ_SEL << 44)) |
- (1 << 21) | 1;
-
- for (k = 0; k < regs->num_sets; k++) {
- unsigned int start = regs->regs[2 * k];
-
- count = REG_PAIR_COUNT(regs->regs, k);
- ptr[qwords++] =
- a6xx_crashdump_registers.gpuaddr + *offset;
- ptr[qwords++] =
- (((uint64_t)(A6XX_HLSQ_DBG_AHB_READ_APERTURE +
- start - regs->regbase / 4) << 44)) |
- count;
-
- *offset += count * sizeof(unsigned int);
- }
- }
- return qwords;
-}
-
void a6xx_crashdump_init(struct adreno_device *adreno_dev)
{
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
@@ -1894,26 +1685,6 @@ void a6xx_crashdump_init(struct adreno_device *adreno_dev)
}
}
- /*
- * Calculate the script and data size for non context debug
- * AHB registers
- */
- for (i = 0; i < ARRAY_SIZE(a6xx_non_ctx_dbgahb); i++) {
- struct a6xx_non_ctx_dbgahb_registers *regs =
- &a6xx_non_ctx_dbgahb[i];
-
- /* 16 bytes for programming the aperture */
- script_size += 16;
-
- /* Reading each pair of registers takes 16 bytes */
- script_size += 16 * regs->num_sets;
-
- /* A dword per register read from the cluster list */
- for (k = 0; k < regs->num_sets; k++)
- data_size += REG_PAIR_COUNT(regs->regs, k) *
- sizeof(unsigned int);
- }
-
/* Now allocate the script and data buffers */
/* The script buffers needs 2 extra qwords on the end */
@@ -1964,8 +1735,6 @@ void a6xx_crashdump_init(struct adreno_device *adreno_dev)
ptr += _a6xx_crashdump_init_ctx_dbgahb(ptr, &offset);
- ptr += _a6xx_crashdump_init_non_ctx_dbgahb(ptr, &offset);
-
*ptr++ = 0;
*ptr++ = 0;
}
diff --git a/drivers/gpu/msm/adreno_dispatch.c b/drivers/gpu/msm/adreno_dispatch.c
index 902dc0a..0caf55b 100644
--- a/drivers/gpu/msm/adreno_dispatch.c
+++ b/drivers/gpu/msm/adreno_dispatch.c
@@ -681,7 +681,7 @@ static int sendcmd(struct adreno_device *adreno_dev,
* then set up the timer. If this misses, then preemption is indeed a
* thing and the timer will be set up in due time
*/
- if (!adreno_in_preempt_state(adreno_dev, ADRENO_PREEMPT_NONE)) {
+ if (adreno_in_preempt_state(adreno_dev, ADRENO_PREEMPT_NONE)) {
if (drawqueue_is_current(dispatch_q))
mod_timer(&dispatcher->timer, dispatch_q->expires);
}
diff --git a/drivers/gpu/msm/adreno_drawctxt.c b/drivers/gpu/msm/adreno_drawctxt.c
index d984c6d..b81be8f 100644
--- a/drivers/gpu/msm/adreno_drawctxt.c
+++ b/drivers/gpu/msm/adreno_drawctxt.c
@@ -505,6 +505,8 @@ void adreno_drawctxt_detach(struct kgsl_context *context)
kgsl_drawobj_destroy(list[i]);
}
+ debugfs_remove_recursive(drawctxt->debug_root);
+
/*
* internal_timestamp is set in adreno_ringbuffer_addcmds,
* which holds the device mutex.
@@ -562,8 +564,6 @@ void adreno_drawctxt_detach(struct kgsl_context *context)
mutex_unlock(&device->mutex);
- debugfs_remove_recursive(drawctxt->debug_root);
-
/* wake threads waiting to submit commands from this context */
wake_up_all(&drawctxt->waiting);
wake_up_all(&drawctxt->wq);
diff --git a/drivers/gpu/msm/adreno_ioctl.c b/drivers/gpu/msm/adreno_ioctl.c
index 8b283ae..13d71982 100644
--- a/drivers/gpu/msm/adreno_ioctl.c
+++ b/drivers/gpu/msm/adreno_ioctl.c
@@ -143,7 +143,7 @@ long adreno_ioctl_helper(struct kgsl_device_private *dev_priv,
if (WARN_ON(_IOC_SIZE(cmds[i].cmd) > sizeof(data))) {
if (__ratelimit(&_rs))
- WARN(1, "data too big for ioctl 0x%08X: %d/%ld\n",
+ WARN(1, "data too big for ioctl 0x%08X: %d/%zu\n",
cmd, _IOC_SIZE(cmds[i].cmd), sizeof(data));
return -EINVAL;
}
diff --git a/drivers/gpu/msm/adreno_perfcounter.c b/drivers/gpu/msm/adreno_perfcounter.c
index 9ea8069..94fdbc2 100644
--- a/drivers/gpu/msm/adreno_perfcounter.c
+++ b/drivers/gpu/msm/adreno_perfcounter.c
@@ -171,17 +171,23 @@ void adreno_perfcounter_restore(struct adreno_device *adreno_dev)
*/
inline void adreno_perfcounter_save(struct adreno_device *adreno_dev)
{
+ struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
struct adreno_perfcounters *counters = ADRENO_PERFCOUNTERS(adreno_dev);
struct adreno_perfcount_group *group;
unsigned int counter, groupid;
- int ret;
+ int ret = 0;
if (counters == NULL)
return;
- ret = adreno_perfcntr_active_oob_get(adreno_dev);
+ if (gpudev->oob_set)
+ ret = gpudev->oob_set(adreno_dev, OOB_PERFCNTR_SET_MASK,
+ OOB_PERFCNTR_CHECK_MASK,
+ OOB_PERFCNTR_CLEAR_MASK);
+
+ /* if oob_set timeout, clear the mask and return */
if (ret)
- return;
+ goto done;
for (groupid = 0; groupid < counters->group_count; groupid++) {
group = &(counters->groups[groupid]);
@@ -203,7 +209,9 @@ inline void adreno_perfcounter_save(struct adreno_device *adreno_dev)
}
}
- adreno_perfcntr_active_oob_put(adreno_dev);
+done:
+ if (gpudev->oob_clear)
+ gpudev->oob_clear(adreno_dev, OOB_PERFCNTR_CLEAR_MASK);
}
static int adreno_perfcounter_enable(struct adreno_device *adreno_dev,
@@ -633,25 +641,26 @@ static void _perfcounter_enable_vbif(struct adreno_device *adreno_dev,
{
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
struct adreno_perfcount_register *reg;
- unsigned int shift = counter << 3;
reg = &counters->groups[KGSL_PERFCOUNTER_GROUP_VBIF].regs[counter];
if (adreno_has_gbif(adreno_dev)) {
+ unsigned int shift = counter << 3;
+ unsigned int perfctr_mask = 1 << counter;
/*
* Write 1, followed by 0 to CLR register for
* clearing the counter
*/
kgsl_regrmw(device, reg->select - GBIF_PERF_CLR_REG_SEL_OFF,
- 1 << counter, 1);
+ perfctr_mask, perfctr_mask);
kgsl_regrmw(device, reg->select - GBIF_PERF_CLR_REG_SEL_OFF,
- 1 << counter, 0);
+ perfctr_mask, 0);
/* select the desired countable */
kgsl_regrmw(device, reg->select,
GBIF_PERF_RMW_MASK << shift, countable << shift);
/* enable counter */
kgsl_regrmw(device, reg->select - GBIF_PERF_EN_REG_SEL_OFF,
- 1 << counter, 1);
+ perfctr_mask, perfctr_mask);
} else {
/*
@@ -680,17 +689,17 @@ static void _perfcounter_enable_vbif_pwr(struct adreno_device *adreno_dev,
reg = &counters->groups[KGSL_PERFCOUNTER_GROUP_VBIF_PWR].regs[counter];
if (adreno_has_gbif(adreno_dev)) {
+ unsigned int perfctr_mask = GBIF_PWR_RMW_MASK << counter;
/*
* Write 1, followed by 0 to CLR register for
* clearing the counter
*/
kgsl_regrmw(device, reg->select + GBIF_PWR_CLR_REG_EN_OFF,
- GBIF_PWR_RMW_MASK << counter, 1);
+ perfctr_mask, perfctr_mask);
kgsl_regrmw(device, reg->select + GBIF_PWR_CLR_REG_EN_OFF,
- GBIF_PWR_RMW_MASK << counter, 0);
+ perfctr_mask, 0);
/* Enable the counter */
- kgsl_regrmw(device, reg->select,
- GBIF_PWR_RMW_MASK << counter, 1);
+ kgsl_regrmw(device, reg->select, perfctr_mask, perfctr_mask);
} else {
/*
* Write 1, followed by 0 to CLR register for
@@ -759,6 +768,21 @@ static void _power_counter_enable_default(struct adreno_device *adreno_dev,
reg->value = 0;
}
+static inline bool _perfcounter_inline_update(
+ struct adreno_device *adreno_dev, unsigned int group)
+{
+ if (adreno_is_a6xx(adreno_dev)) {
+ if ((group == KGSL_PERFCOUNTER_GROUP_HLSQ) ||
+ (group == KGSL_PERFCOUNTER_GROUP_SP) ||
+ (group == KGSL_PERFCOUNTER_GROUP_TP))
+ return true;
+ else
+ return false;
+ }
+
+ return true;
+}
+
static int _perfcounter_enable_default(struct adreno_device *adreno_dev,
struct adreno_perfcounters *counters, unsigned int group,
unsigned int counter, unsigned int countable)
@@ -766,6 +790,7 @@ static int _perfcounter_enable_default(struct adreno_device *adreno_dev,
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
struct adreno_perfcount_register *reg;
+ struct adreno_perfcount_group *grp;
int i;
int ret = 0;
@@ -780,15 +805,20 @@ static int _perfcounter_enable_default(struct adreno_device *adreno_dev,
if (countable == invalid_countable.countables[i])
return -EACCES;
}
- reg = &(counters->groups[group].regs[counter]);
+ grp = &(counters->groups[group]);
+ reg = &(grp->regs[counter]);
- if (!adreno_is_a6xx(adreno_dev) &&
- test_bit(ADRENO_DEVICE_STARTED, &adreno_dev->priv)) {
+ if (_perfcounter_inline_update(adreno_dev, group) &&
+ test_bit(ADRENO_DEVICE_STARTED, &adreno_dev->priv)) {
struct adreno_ringbuffer *rb = &adreno_dev->ringbuffers[0];
unsigned int buf[4];
unsigned int *cmds = buf;
int ret;
+ if (gpudev->perfcounter_update && (grp->flags &
+ ADRENO_PERFCOUNTER_GROUP_RESTORE))
+ gpudev->perfcounter_update(adreno_dev, reg, false);
+
cmds += cp_wait_for_idle(adreno_dev, cmds);
*cmds++ = cp_register(adreno_dev, reg->select, 1);
*cmds++ = countable;
@@ -825,12 +855,16 @@ static int _perfcounter_enable_default(struct adreno_device *adreno_dev,
}
} else {
/* Select the desired perfcounter */
- kgsl_regwrite(device, reg->select, countable);
+ if (gpudev->perfcounter_update && (grp->flags &
+ ADRENO_PERFCOUNTER_GROUP_RESTORE))
+ ret = gpudev->perfcounter_update(adreno_dev, reg, true);
+ else
+ kgsl_regwrite(device, reg->select, countable);
}
if (!ret)
reg->value = 0;
- return 0;
+ return ret;
}
/**
diff --git a/drivers/gpu/msm/adreno_perfcounter.h b/drivers/gpu/msm/adreno_perfcounter.h
index 8c4db38..bcbc738 100644
--- a/drivers/gpu/msm/adreno_perfcounter.h
+++ b/drivers/gpu/msm/adreno_perfcounter.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2008-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2008-2015, 2017 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -70,6 +70,13 @@ struct adreno_perfcount_group {
#define ADRENO_PERFCOUNTER_GROUP_FIXED BIT(0)
+/*
+ * ADRENO_PERFCOUNTER_GROUP_RESTORE indicates CP needs to restore the select
+ * registers of this perfcounter group as part of preemption and IFPC
+ */
+#define ADRENO_PERFCOUNTER_GROUP_RESTORE BIT(1)
+
+
/**
* adreno_perfcounts: all available perfcounter groups
* @groups: available groups for this device
diff --git a/drivers/gpu/msm/adreno_ringbuffer.c b/drivers/gpu/msm/adreno_ringbuffer.c
index fb545e7..01d9f71 100644
--- a/drivers/gpu/msm/adreno_ringbuffer.c
+++ b/drivers/gpu/msm/adreno_ringbuffer.c
@@ -80,44 +80,6 @@ static void adreno_get_submit_time(struct adreno_device *adreno_dev,
local_irq_restore(flags);
}
-/*
- * Wait time before trying to write the register again.
- * Hopefully the GMU has finished waking up during this delay.
- * This delay must be less than the IFPC main hysteresis or
- * the GMU will start shutting down before we try again.
- */
-#define GMU_WAKEUP_DELAY 10
-/* Max amount of tries to wake up the GMU. */
-#define GMU_WAKEUP_RETRY_MAX 60
-
-/*
- * Check the WRITEDROPPED0 bit in the
- * FENCE_STATUS regsiter to check if the write went
- * through. If it didn't then we retry the write.
- */
-static inline void _gmu_wptr_update_if_dropped(struct adreno_device *adreno_dev,
- struct adreno_ringbuffer *rb)
-{
- unsigned int val, i;
-
- for (i = 0; i < GMU_WAKEUP_RETRY_MAX; i++) {
- adreno_read_gmureg(adreno_dev, ADRENO_REG_GMU_AHB_FENCE_STATUS,
- &val);
-
- /* If !writedropped, then wptr update was successful */
- if (!(val & 0x1))
- return;
-
- /* Wait a small amount of time before trying again */
- udelay(GMU_WAKEUP_DELAY);
-
- /* Try to write WPTR again */
- adreno_writereg(adreno_dev, ADRENO_REG_CP_RB_WPTR, rb->_wptr);
- }
-
- dev_err(adreno_dev->dev.dev, "GMU WPTR update timed out\n");
-}
-
static void adreno_ringbuffer_wptr(struct adreno_device *adreno_dev,
struct adreno_ringbuffer *rb)
{
@@ -132,15 +94,14 @@ static void adreno_ringbuffer_wptr(struct adreno_device *adreno_dev,
* been submitted.
*/
kgsl_pwrscale_busy(KGSL_DEVICE(adreno_dev));
- adreno_writereg(adreno_dev, ADRENO_REG_CP_RB_WPTR,
- rb->_wptr);
/*
- * If GMU, ensure the write posted after a possible
+ * Ensure the write posted after a possible
* GMU wakeup (write could have dropped during wakeup)
*/
- if (kgsl_gmu_isenabled(KGSL_DEVICE(adreno_dev)))
- _gmu_wptr_update_if_dropped(adreno_dev, rb);
+ adreno_gmu_fenced_write(adreno_dev,
+ ADRENO_REG_CP_RB_WPTR, rb->_wptr,
+ FENCE_STATUS_WRITEDROPPED0_MASK);
}
}
@@ -425,6 +386,7 @@ adreno_ringbuffer_addcmds(struct adreno_ringbuffer *rb,
struct kgsl_context *context = NULL;
bool secured_ctxt = false;
static unsigned int _seq_cnt;
+ struct adreno_firmware *fw = ADRENO_FW(adreno_dev, ADRENO_FW_SQE);
if (drawctxt != NULL && kgsl_context_detached(&drawctxt->base) &&
!(flags & KGSL_CMD_FLAGS_INTERNAL_ISSUE))
@@ -494,11 +456,11 @@ adreno_ringbuffer_addcmds(struct adreno_ringbuffer *rb,
if (gpudev->preemption_pre_ibsubmit &&
adreno_is_preemption_execution_enabled(adreno_dev))
- total_sizedwords += 22;
+ total_sizedwords += 27;
if (gpudev->preemption_post_ibsubmit &&
adreno_is_preemption_execution_enabled(adreno_dev))
- total_sizedwords += 5;
+ total_sizedwords += 10;
/*
* a5xx uses 64 bit memory address. pm4 commands that involve read/write
@@ -525,14 +487,20 @@ adreno_ringbuffer_addcmds(struct adreno_ringbuffer *rb,
if (flags & KGSL_CMD_FLAGS_PWRON_FIXUP)
total_sizedwords += 9;
- /*
- * WAIT_MEM_WRITES - needed in the stall on fault case
- * to prevent out of order CP operations that can result
- * in a CACHE_FLUSH_TS interrupt storm
- */
- if (test_bit(KGSL_FT_PAGEFAULT_GPUHALT_ENABLE,
+ /* Don't insert any commands if stall on fault is not supported. */
+ if ((ADRENO_GPUREV(adreno_dev) > 500) && !adreno_is_a510(adreno_dev)) {
+ /*
+ * WAIT_MEM_WRITES - needed in the stall on fault case
+ * to prevent out of order CP operations that can result
+ * in a CACHE_FLUSH_TS interrupt storm
+ */
+ if (test_bit(KGSL_FT_PAGEFAULT_GPUHALT_ENABLE,
&adreno_dev->ft_pf_policy))
- total_sizedwords += 1;
+ total_sizedwords += 1;
+ }
+
+ if (gpudev->set_marker)
+ total_sizedwords += 4;
ringcmds = adreno_ringbuffer_allocspace(rb, total_sizedwords);
if (IS_ERR(ringcmds))
@@ -553,6 +521,14 @@ adreno_ringbuffer_addcmds(struct adreno_ringbuffer *rb,
*ringcmds++ = KGSL_CMD_INTERNAL_IDENTIFIER;
}
+ if (gpudev->set_marker) {
+ /* Firmware versions before 1.49 do not support IFPC markers */
+ if (adreno_is_a6xx(adreno_dev) && (fw->version & 0xFFF) < 0x149)
+ ringcmds += gpudev->set_marker(ringcmds, IB1LIST_START);
+ else
+ ringcmds += gpudev->set_marker(ringcmds, IFPC_DISABLE);
+ }
+
if (flags & KGSL_CMD_FLAGS_PWRON_FIXUP) {
/* Disable protected mode for the fixup */
*ringcmds++ = cp_packet(adreno_dev, CP_SET_PROTECTED_MODE, 1);
@@ -621,14 +597,18 @@ adreno_ringbuffer_addcmds(struct adreno_ringbuffer *rb,
if (profile_ready)
adreno_profile_postib_processing(adreno_dev, &flags, &ringcmds);
- /*
- * WAIT_MEM_WRITES - needed in the stall on fault case to prevent
- * out of order CP operations that can result in a CACHE_FLUSH_TS
- * interrupt storm
- */
- if (test_bit(KGSL_FT_PAGEFAULT_GPUHALT_ENABLE,
+ /* Don't insert any commands if stall on fault is not supported. */
+ if ((ADRENO_GPUREV(adreno_dev) > 500) && !adreno_is_a510(adreno_dev)) {
+ /*
+ * WAIT_MEM_WRITES - needed in the stall on fault case
+ * to prevent out of order CP operations that can result
+ * in a CACHE_FLUSH_TS interrupt storm
+ */
+ if (test_bit(KGSL_FT_PAGEFAULT_GPUHALT_ENABLE,
&adreno_dev->ft_pf_policy))
- *ringcmds++ = cp_packet(adreno_dev, CP_WAIT_MEM_WRITES, 0);
+ *ringcmds++ = cp_packet(adreno_dev,
+ CP_WAIT_MEM_WRITES, 0);
+ }
/*
* Do a unique memory write from the GPU. This can be used in
@@ -667,6 +647,13 @@ adreno_ringbuffer_addcmds(struct adreno_ringbuffer *rb,
*ringcmds++ = timestamp;
}
+ if (gpudev->set_marker) {
+ if (adreno_is_a6xx(adreno_dev) && (fw->version & 0xFFF) < 0x149)
+ ringcmds += gpudev->set_marker(ringcmds, IB1LIST_END);
+ else
+ ringcmds += gpudev->set_marker(ringcmds, IFPC_ENABLE);
+ }
+
if (adreno_is_a3xx(adreno_dev)) {
/* Dummy set-constant to trigger context rollover */
*ringcmds++ = cp_packet(adreno_dev, CP_SET_CONSTANT, 2);
@@ -780,8 +767,9 @@ int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev,
struct kgsl_drawobj_profiling_buffer *profile_buffer = NULL;
unsigned int dwords = 0;
struct adreno_submit_time local;
-
struct kgsl_mem_entry *entry = cmdobj->profiling_buf_entry;
+ struct adreno_firmware *fw = ADRENO_FW(adreno_dev, ADRENO_FW_SQE);
+ bool set_ib1list_marker = false;
if (entry)
profile_buffer = kgsl_gpuaddr_to_vaddr(&entry->memdesc,
@@ -891,8 +879,16 @@ int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev,
dwords += 8;
}
- if (gpudev->set_marker)
+ /*
+ * Prior to SQE FW version 1.49, there was only one marker for
+ * both preemption and IFPC. Only include the IB1LIST markers if
+ * we are using a firmware that supports them.
+ */
+ if (gpudev->set_marker && numibs && adreno_is_a6xx(adreno_dev) &&
+ ((fw->version & 0xFFF) >= 0x149)) {
+ set_ib1list_marker = true;
dwords += 4;
+ }
if (gpudev->ccu_invalidate)
dwords += 4;
@@ -926,10 +922,10 @@ int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev,
gpu_ticks_submitted));
}
- if (gpudev->set_marker)
- cmds += gpudev->set_marker(cmds, 1);
-
if (numibs) {
+ if (set_ib1list_marker)
+ cmds += gpudev->set_marker(cmds, IB1LIST_START);
+
list_for_each_entry(ib, &cmdobj->cmdlist, node) {
/*
* Skip 0 sized IBs - these are presumed to have been
@@ -948,14 +944,14 @@ int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev,
/* preamble is required on only for first command */
use_preamble = false;
}
+
+ if (set_ib1list_marker)
+ cmds += gpudev->set_marker(cmds, IB1LIST_END);
}
if (gpudev->ccu_invalidate)
cmds += gpudev->ccu_invalidate(adreno_dev, cmds);
- if (gpudev->set_marker)
- cmds += gpudev->set_marker(cmds, 0);
-
if (adreno_is_preemption_execution_enabled(adreno_dev)) {
if (gpudev->preemption_yield_enable)
cmds += gpudev->preemption_yield_enable(cmds);
diff --git a/drivers/gpu/msm/adreno_ringbuffer.h b/drivers/gpu/msm/adreno_ringbuffer.h
index 72fc5bf..fbee627 100644
--- a/drivers/gpu/msm/adreno_ringbuffer.h
+++ b/drivers/gpu/msm/adreno_ringbuffer.h
@@ -92,6 +92,8 @@ struct adreno_ringbuffer_pagetable_info {
* @drawctxt_active: The last pagetable that this ringbuffer is set to
* @preemption_desc: The memory descriptor containing
* preemption info written/read by CP
+ * @secure_preemption_desc: The memory descriptor containing
+ * preemption info written/read by CP for secure contexts
* @perfcounter_save_restore_desc: Used by CP to save/restore the perfcounter
* values across preemption
* @pagetable_desc: Memory to hold information about the pagetables being used
@@ -120,6 +122,7 @@ struct adreno_ringbuffer {
struct kgsl_event_group events;
struct adreno_context *drawctxt_active;
struct kgsl_memdesc preemption_desc;
+ struct kgsl_memdesc secure_preemption_desc;
struct kgsl_memdesc perfcounter_save_restore_desc;
struct kgsl_memdesc pagetable_desc;
struct adreno_dispatcher_drawqueue dispatch_q;
diff --git a/drivers/gpu/msm/adreno_sysfs.c b/drivers/gpu/msm/adreno_sysfs.c
index fcf0417..e309ab0 100644
--- a/drivers/gpu/msm/adreno_sysfs.c
+++ b/drivers/gpu/msm/adreno_sysfs.c
@@ -29,6 +29,13 @@ struct adreno_sysfs_attribute adreno_attr_##_name = { \
.store = _ ## _name ## _store, \
}
+#define _ADRENO_SYSFS_ATTR_RO(_name, __show) \
+struct adreno_sysfs_attribute adreno_attr_##_name = { \
+ .attr = __ATTR(_name, 0644, __show, NULL), \
+ .show = _ ## _name ## _show, \
+ .store = NULL, \
+}
+
#define ADRENO_SYSFS_ATTR(_a) \
container_of((_a), struct adreno_sysfs_attribute, attr)
@@ -331,6 +338,13 @@ static unsigned int _ifpc_show(struct adreno_device *adreno_dev)
return kgsl_gmu_isenabled(device) && gmu->idle_level >= GPU_HW_IFPC;
}
+static unsigned int _preempt_count_show(struct adreno_device *adreno_dev)
+{
+ struct adreno_preemption *preempt = &adreno_dev->preempt;
+
+ return preempt->count;
+}
+
static ssize_t _sysfs_store_u32(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
@@ -411,9 +425,13 @@ static ssize_t _sysfs_show_bool(struct device *dev,
#define ADRENO_SYSFS_U32(_name) \
_ADRENO_SYSFS_ATTR(_name, _sysfs_show_u32, _sysfs_store_u32)
+#define ADRENO_SYSFS_RO_U32(_name) \
+ _ADRENO_SYSFS_ATTR_RO(_name, _sysfs_show_u32)
+
static ADRENO_SYSFS_U32(ft_policy);
static ADRENO_SYSFS_U32(ft_pagefault_policy);
static ADRENO_SYSFS_U32(preempt_level);
+static ADRENO_SYSFS_RO_U32(preempt_count);
static ADRENO_SYSFS_BOOL(usesgmem);
static ADRENO_SYSFS_BOOL(skipsaverestore);
static ADRENO_SYSFS_BOOL(ft_long_ib_detect);
@@ -451,6 +469,7 @@ static const struct device_attribute *_attr_list[] = {
&adreno_attr_usesgmem.attr,
&adreno_attr_skipsaverestore.attr,
&adreno_attr_ifpc.attr,
+ &adreno_attr_preempt_count.attr,
NULL,
};
diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c
index 31868a0..5d07380 100644
--- a/drivers/gpu/msm/kgsl.c
+++ b/drivers/gpu/msm/kgsl.c
@@ -1805,18 +1805,15 @@ long kgsl_ioctl_drawctxt_destroy(struct kgsl_device_private *dev_priv,
long gpumem_free_entry(struct kgsl_mem_entry *entry)
{
- pid_t ptname = 0;
-
if (!kgsl_mem_entry_set_pend(entry))
return -EBUSY;
trace_kgsl_mem_free(entry);
-
- if (entry->memdesc.pagetable != NULL)
- ptname = entry->memdesc.pagetable->name;
-
- kgsl_memfree_add(entry->priv->pid, ptname, entry->memdesc.gpuaddr,
- entry->memdesc.size, entry->memdesc.flags);
+ kgsl_memfree_add(entry->priv->pid,
+ entry->memdesc.pagetable ?
+ entry->memdesc.pagetable->name : 0,
+ entry->memdesc.gpuaddr, entry->memdesc.size,
+ entry->memdesc.flags);
kgsl_mem_entry_put(entry);
@@ -1835,6 +1832,12 @@ static void gpumem_free_func(struct kgsl_device *device,
/* Free the memory for all event types */
trace_kgsl_mem_timestamp_free(device, entry, KGSL_CONTEXT_ID(context),
timestamp, 0);
+ kgsl_memfree_add(entry->priv->pid,
+ entry->memdesc.pagetable ?
+ entry->memdesc.pagetable->name : 0,
+ entry->memdesc.gpuaddr, entry->memdesc.size,
+ entry->memdesc.flags);
+
kgsl_mem_entry_put(entry);
}
@@ -1928,6 +1931,13 @@ static bool gpuobj_free_fence_func(void *priv)
{
struct kgsl_mem_entry *entry = priv;
+ trace_kgsl_mem_free(entry);
+ kgsl_memfree_add(entry->priv->pid,
+ entry->memdesc.pagetable ?
+ entry->memdesc.pagetable->name : 0,
+ entry->memdesc.gpuaddr, entry->memdesc.size,
+ entry->memdesc.flags);
+
INIT_WORK(&entry->work, _deferred_put);
queue_work(kgsl_driver.mem_workqueue, &entry->work);
return true;
@@ -1960,15 +1970,15 @@ static long gpuobj_free_on_fence(struct kgsl_device_private *dev_priv,
handle = kgsl_sync_fence_async_wait(event.fd,
gpuobj_free_fence_func, entry, NULL, 0);
- /* if handle is NULL the fence has already signaled */
- if (handle == NULL)
- return gpumem_free_entry(entry);
-
if (IS_ERR(handle)) {
kgsl_mem_entry_unset_pend(entry);
return PTR_ERR(handle);
}
+ /* if handle is NULL the fence has already signaled */
+ if (handle == NULL)
+ gpuobj_free_fence_func(entry);
+
return 0;
}
@@ -2284,7 +2294,8 @@ static long _gpuobj_map_useraddr(struct kgsl_device *device,
param->flags &= KGSL_MEMFLAGS_GPUREADONLY
| KGSL_CACHEMODE_MASK
| KGSL_MEMTYPE_MASK
- | KGSL_MEMFLAGS_FORCE_32BIT;
+ | KGSL_MEMFLAGS_FORCE_32BIT
+ | KGSL_MEMFLAGS_IOCOHERENT;
/* Specifying SECURE is an explicit error */
if (param->flags & KGSL_MEMFLAGS_SECURE)
@@ -2378,7 +2389,12 @@ long kgsl_ioctl_gpuobj_import(struct kgsl_device_private *dev_priv,
| KGSL_MEMALIGN_MASK
| KGSL_MEMFLAGS_USE_CPU_MAP
| KGSL_MEMFLAGS_SECURE
- | KGSL_MEMFLAGS_FORCE_32BIT;
+ | KGSL_MEMFLAGS_FORCE_32BIT
+ | KGSL_MEMFLAGS_IOCOHERENT;
+
+ /* Disable IO coherence if it is not supported on the chip */
+ if (!MMU_FEATURE(mmu, KGSL_MMU_IO_COHERENT))
+ param->flags &= ~((uint64_t)KGSL_MEMFLAGS_IOCOHERENT);
entry->memdesc.flags = param->flags;
@@ -2663,7 +2679,13 @@ long kgsl_ioctl_map_user_mem(struct kgsl_device_private *dev_priv,
| KGSL_MEMTYPE_MASK
| KGSL_MEMALIGN_MASK
| KGSL_MEMFLAGS_USE_CPU_MAP
- | KGSL_MEMFLAGS_SECURE;
+ | KGSL_MEMFLAGS_SECURE
+ | KGSL_MEMFLAGS_IOCOHERENT;
+
+ /* Disable IO coherence if it is not supported on the chip */
+ if (!MMU_FEATURE(mmu, KGSL_MMU_IO_COHERENT))
+ param->flags &= ~((uint64_t)KGSL_MEMFLAGS_IOCOHERENT);
+
entry->memdesc.flags = ((uint64_t) param->flags)
| KGSL_MEMFLAGS_FORCE_32BIT;
@@ -2846,10 +2868,8 @@ static inline bool check_full_flush(size_t size, int op)
bool ret = (kgsl_driver.full_cache_threshold != 0) &&
(size >= kgsl_driver.full_cache_threshold) &&
(op == KGSL_GPUMEM_CACHE_FLUSH);
- if (ret) {
- trace_kgsl_mem_sync_full_cache(actual_count, op_size);
+ if (ret)
flush_cache_all();
- }
return ret;
}
#endif
@@ -2913,8 +2933,10 @@ long kgsl_ioctl_gpumem_sync_cache_bulk(struct kgsl_device_private *dev_priv,
entries[actual_count++] = entry;
full_flush = check_full_flush(op_size, param->op);
- if (full_flush)
+ if (full_flush) {
+ trace_kgsl_mem_sync_full_cache(actual_count, op_size);
break;
+ }
last_id = id;
}
@@ -3002,8 +3024,10 @@ long kgsl_ioctl_gpuobj_sync(struct kgsl_device_private *dev_priv,
size += (entries[i]->memdesc.size - objs[i].offset);
full_flush = check_full_flush(size, objs[i].op);
- if (full_flush)
+ if (full_flush) {
+ trace_kgsl_mem_sync_full_cache(i, size);
break;
+ }
ptr += sizeof(*objs);
}
@@ -3060,6 +3084,7 @@ struct kgsl_mem_entry *gpumem_alloc_entry(
int ret;
struct kgsl_process_private *private = dev_priv->process_priv;
struct kgsl_mem_entry *entry;
+ struct kgsl_mmu *mmu = &dev_priv->device->mmu;
unsigned int align;
flags &= KGSL_MEMFLAGS_GPUREADONLY
@@ -3068,14 +3093,15 @@ struct kgsl_mem_entry *gpumem_alloc_entry(
| KGSL_MEMALIGN_MASK
| KGSL_MEMFLAGS_USE_CPU_MAP
| KGSL_MEMFLAGS_SECURE
- | KGSL_MEMFLAGS_FORCE_32BIT;
+ | KGSL_MEMFLAGS_FORCE_32BIT
+ | KGSL_MEMFLAGS_IOCOHERENT;
/* Turn off SVM if the system doesn't support it */
- if (!kgsl_mmu_use_cpu_map(&dev_priv->device->mmu))
+ if (!kgsl_mmu_use_cpu_map(mmu))
flags &= ~((uint64_t) KGSL_MEMFLAGS_USE_CPU_MAP);
/* Return not supported error if secure memory isn't enabled */
- if (!kgsl_mmu_is_secured(&dev_priv->device->mmu) &&
+ if (!kgsl_mmu_is_secured(mmu) &&
(flags & KGSL_MEMFLAGS_SECURE)) {
dev_WARN_ONCE(dev_priv->device->dev, 1,
"Secure memory not supported");
@@ -3104,11 +3130,15 @@ struct kgsl_mem_entry *gpumem_alloc_entry(
flags = kgsl_filter_cachemode(flags);
+ /* Disable IO coherence if it is not supported on the chip */
+ if (!MMU_FEATURE(mmu, KGSL_MMU_IO_COHERENT))
+ flags &= ~((uint64_t)KGSL_MEMFLAGS_IOCOHERENT);
+
entry = kgsl_mem_entry_create();
if (entry == NULL)
return ERR_PTR(-ENOMEM);
- if (MMU_FEATURE(&dev_priv->device->mmu, KGSL_MMU_NEED_GUARD_PAGE))
+ if (MMU_FEATURE(mmu, KGSL_MMU_NEED_GUARD_PAGE))
entry->memdesc.priv |= KGSL_MEMDESC_GUARD_PAGE;
if (flags & KGSL_MEMFLAGS_SECURE)
diff --git a/drivers/gpu/msm/kgsl.h b/drivers/gpu/msm/kgsl.h
index f80da79..023e63e 100644
--- a/drivers/gpu/msm/kgsl.h
+++ b/drivers/gpu/msm/kgsl.h
@@ -75,6 +75,7 @@
* Used Data:
* Offset: Length(bytes): What
* 0x0: 4 * KGSL_PRIORITY_MAX_RB_LEVELS: RB0 RPTR
+ * 0x10: 8 * KGSL_PRIORITY_MAX_RB_LEVELS: RB0 CTXT RESTORE ADDR
*/
/* Shadow global helpers */
@@ -82,6 +83,13 @@
#define SCRATCH_RPTR_GPU_ADDR(dev, id) \
((dev)->scratch.gpuaddr + SCRATCH_RPTR_OFFSET(id))
+#define SCRATCH_PREEMPTION_CTXT_RESTORE_ADDR_OFFSET(id) \
+ (SCRATCH_RPTR_OFFSET(KGSL_PRIORITY_MAX_RB_LEVELS) + \
+ ((id) * sizeof(uint64_t)))
+#define SCRATCH_PREEMPTION_CTXT_RESTORE_GPU_ADDR(dev, id) \
+ ((dev)->scratch.gpuaddr + \
+ SCRATCH_PREEMPTION_CTXT_RESTORE_ADDR_OFFSET(id))
+
/* Timestamp window used to detect rollovers (half of integer range) */
#define KGSL_TIMESTAMP_WINDOW 0x80000000
diff --git a/drivers/gpu/msm/kgsl_debugfs.c b/drivers/gpu/msm/kgsl_debugfs.c
index e339a08..834706a 100644
--- a/drivers/gpu/msm/kgsl_debugfs.c
+++ b/drivers/gpu/msm/kgsl_debugfs.c
@@ -303,6 +303,7 @@ static int print_sparse_mem_entry(int id, void *ptr, void *data)
if (!(m->flags & KGSL_MEMFLAGS_SPARSE_VIRT))
return 0;
+ spin_lock(&entry->bind_lock);
node = rb_first(&entry->bind_tree);
while (node != NULL) {
@@ -313,6 +314,7 @@ static int print_sparse_mem_entry(int id, void *ptr, void *data)
obj->v_off, obj->size, obj->p_off);
node = rb_next(node);
}
+ spin_unlock(&entry->bind_lock);
seq_putc(s, '\n');
diff --git a/drivers/gpu/msm/kgsl_gmu.c b/drivers/gpu/msm/kgsl_gmu.c
index 0fd7286..0a7424a 100644
--- a/drivers/gpu/msm/kgsl_gmu.c
+++ b/drivers/gpu/msm/kgsl_gmu.c
@@ -18,6 +18,7 @@
#include <linux/msm-bus.h>
#include <linux/msm-bus-board.h>
#include <linux/pm_opp.h>
+#include <linux/io.h>
#include <soc/qcom/cmd-db.h>
#include "kgsl_device.h"
@@ -59,8 +60,6 @@ struct gmu_vma {
unsigned int image_start;
};
-static void gmu_snapshot(struct kgsl_device *device);
-
struct gmu_iommu_context {
const char *name;
struct device *dev;
@@ -183,8 +182,8 @@ static int alloc_and_map(struct gmu_device *gmu, unsigned int ctx_id,
if (ret) {
dev_err(&gmu->pdev->dev,
- "gmu map err: gaddr=0x%016llX, paddr=0x%016llX\n",
- md->gmuaddr, md->physaddr);
+ "gmu map err: gaddr=0x%016llX, paddr=0x%pa\n",
+ md->gmuaddr, &(md->physaddr));
free_gmu_mem(gmu, md);
}
@@ -532,9 +531,8 @@ static int rpmh_arc_cmds(struct gmu_device *gmu,
* them until we get to the end of the buffer or hit the
* zero padding.
*/
- for (arc->num = 1; arc->num <= len; arc->num++) {
- if (arc->num == len ||
- arc->val[arc->num - 1] >= arc->val[arc->num])
+ for (arc->num = 1; arc->num < (len >> 1); arc->num++) {
+ if (arc->val[arc->num - 1] >= arc->val[arc->num])
break;
}
@@ -1340,7 +1338,7 @@ static int gmu_suspend(struct kgsl_device *device)
return 0;
}
-static void gmu_snapshot(struct kgsl_device *device)
+void gmu_snapshot(struct kgsl_device *device)
{
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
struct gmu_device *gmu = &device->gmu;
@@ -1622,3 +1620,46 @@ void gmu_remove(struct kgsl_device *device)
device->gmu.pdev = NULL;
}
+
+/*
+ * adreno_gmu_fenced_write() - Check if there is a GMU and it is enabled
+ * @adreno_dev: Pointer to the Adreno device device that owns the GMU
+ * @offset: 32bit register enum that is to be written
+ * @val: The value to be written to the register
+ * @fence_mask: The value to poll the fence status register
+ *
+ * Check the WRITEDROPPED0/1 bit in the FENCE_STATUS regsiter to check if
+ * the write to the fenced register went through. If it didn't then we retry
+ * the write until it goes through or we time out.
+ */
+void adreno_gmu_fenced_write(struct adreno_device *adreno_dev,
+ enum adreno_regs offset, unsigned int val,
+ unsigned int fence_mask)
+{
+ unsigned int status, i;
+
+ adreno_writereg(adreno_dev, offset, val);
+
+ if (!kgsl_gmu_isenabled(KGSL_DEVICE(adreno_dev)))
+ return;
+
+ for (i = 0; i < GMU_WAKEUP_RETRY_MAX; i++) {
+ adreno_read_gmureg(adreno_dev, ADRENO_REG_GMU_AHB_FENCE_STATUS,
+ &status);
+
+ /*
+ * If !writedropped0/1, then the write to fenced register
+ * was successful
+ */
+ if (!(status & fence_mask))
+ return;
+ /* Wait a small amount of time before trying again */
+ udelay(GMU_WAKEUP_DELAY_US);
+
+ /* Try to write the fenced register again */
+ adreno_writereg(adreno_dev, offset, val);
+ }
+
+ dev_err(adreno_dev->dev.dev,
+ "GMU fenced register write timed out: reg %x\n", offset);
+}
diff --git a/drivers/gpu/msm/kgsl_gmu.h b/drivers/gpu/msm/kgsl_gmu.h
index e0c857f..90e87e4 100644
--- a/drivers/gpu/msm/kgsl_gmu.h
+++ b/drivers/gpu/msm/kgsl_gmu.h
@@ -30,8 +30,11 @@
GMU_INT_HOST_AHB_BUS_ERR | \
GMU_INT_FENCE_ERR)
-#define MAX_GMUFW_SIZE 0x2000 /* in dwords */
-#define FENCE_RANGE_MASK ((0x1 << 31) | (0x0A << 18) | (0x8A0))
+#define MAX_GMUFW_SIZE 0x2000 /* in bytes */
+#define FENCE_RANGE_MASK ((0x1 << 31) | ((0xA << 2) << 18) | (0x8A0))
+
+#define FENCE_STATUS_WRITEDROPPED0_MASK 0x1
+#define FENCE_STATUS_WRITEDROPPED1_MASK 0x2
/* Bitmask for GPU low power mode enabling and hysterisis*/
#define SPTP_ENABLE_MASK (BIT(2) | BIT(0))
@@ -78,6 +81,19 @@
#define OOB_PERFCNTR_SET_MASK BIT(17)
#define OOB_PERFCNTR_CHECK_MASK BIT(25)
#define OOB_PERFCNTR_CLEAR_MASK BIT(25)
+#define OOB_PREEMPTION_SET_MASK BIT(18)
+#define OOB_PREEMPTION_CHECK_MASK BIT(26)
+#define OOB_PREEMPTION_CLEAR_MASK BIT(26)
+
+/*
+ * Wait time before trying to write the register again.
+ * Hopefully the GMU has finished waking up during this delay.
+ * This delay must be less than the IFPC main hysteresis or
+ * the GMU will start shutting down before we try again.
+ */
+#define GMU_WAKEUP_DELAY_US 10
+/* Max amount of tries to wake up the GMU. */
+#define GMU_WAKEUP_RETRY_MAX 60
/* Bits for the flags field in the gmu structure */
enum gmu_flags {
@@ -233,6 +249,7 @@ struct gmu_device {
unsigned int fault_count;
};
+void gmu_snapshot(struct kgsl_device *device);
bool kgsl_gmu_isenabled(struct kgsl_device *device);
int gmu_probe(struct kgsl_device *device);
void gmu_remove(struct kgsl_device *device);
diff --git a/drivers/gpu/msm/kgsl_hfi.c b/drivers/gpu/msm/kgsl_hfi.c
index 2cc60b5..eef5f45 100644
--- a/drivers/gpu/msm/kgsl_hfi.c
+++ b/drivers/gpu/msm/kgsl_hfi.c
@@ -594,12 +594,12 @@ int hfi_start(struct gmu_device *gmu, uint32_t boot_state)
gmu->ver = ver;
if (major != FW_VER_MAJOR(ver))
- dev_err(dev, "FW version major %d error (expect %d)\n",
+ WARN_ONCE(1, "FW version major %d error (expect %d)\n",
FW_VER_MAJOR(ver),
adreno_dev->gpucore->gpmu_major);
if (minor > FW_VER_MINOR(ver))
- dev_err(dev, "FW version minor %d error (expect %d)\n",
+ WARN_ONCE(1, "FW version minor %d error (expect %d)\n",
FW_VER_MINOR(ver),
adreno_dev->gpucore->gpmu_minor);
diff --git a/drivers/gpu/msm/kgsl_ioctl.c b/drivers/gpu/msm/kgsl_ioctl.c
index bfce4d4..9b02e19 100644
--- a/drivers/gpu/msm/kgsl_ioctl.c
+++ b/drivers/gpu/msm/kgsl_ioctl.c
@@ -145,7 +145,7 @@ long kgsl_ioctl_helper(struct file *filep, unsigned int cmd, unsigned long arg,
if (_IOC_SIZE(cmds[nr].cmd) > sizeof(data)) {
if (__ratelimit(&_rs))
- WARN(1, "data too big for ioctl 0x%08X: %d/%ld\n",
+ WARN(1, "data too big for ioctl 0x%08X: %d/%zu\n",
cmd, _IOC_SIZE(cmds[nr].cmd), sizeof(data));
return -EINVAL;
}
diff --git a/drivers/gpu/msm/kgsl_iommu.c b/drivers/gpu/msm/kgsl_iommu.c
index c02046a..ab3ab31 100644
--- a/drivers/gpu/msm/kgsl_iommu.c
+++ b/drivers/gpu/msm/kgsl_iommu.c
@@ -110,7 +110,7 @@ struct global_pt_entry {
};
static struct global_pt_entry global_pt_entries[GLOBAL_PT_ENTRIES];
-static struct kgsl_memdesc *kgsl_global_secure_pt_entry;
+static int secure_global_size;
static int global_pt_count;
uint64_t global_pt_alloc;
static struct kgsl_memdesc gpu_qdss_desc;
@@ -126,9 +126,11 @@ void kgsl_print_global_pt_entries(struct seq_file *s)
if (memdesc == NULL)
continue;
- seq_printf(s, "0x%16.16llX-0x%16.16llX %16llu %s\n",
- memdesc->gpuaddr, memdesc->gpuaddr + memdesc->size - 1,
- memdesc->size, global_pt_entries[i].name);
+ seq_printf(s, "0x%pK-0x%pK %16llu %s\n",
+ (uint64_t *)(uintptr_t) memdesc->gpuaddr,
+ (uint64_t *)(uintptr_t) (memdesc->gpuaddr +
+ memdesc->size - 1), memdesc->size,
+ global_pt_entries[i].name);
}
}
@@ -160,24 +162,33 @@ static int kgsl_iommu_map_globals(struct kgsl_pagetable *pagetable)
return 0;
}
-static void kgsl_iommu_unmap_global_secure_pt_entry(struct kgsl_pagetable
- *pagetable)
+void kgsl_iommu_unmap_global_secure_pt_entry(struct kgsl_device *device,
+ struct kgsl_memdesc *entry)
{
- struct kgsl_memdesc *entry = kgsl_global_secure_pt_entry;
+ if (!kgsl_mmu_is_secured(&device->mmu))
+ return;
- if (entry != NULL)
- kgsl_mmu_unmap(pagetable, entry);
+ if (entry != NULL && entry->pagetable->name == KGSL_MMU_SECURE_PT)
+ kgsl_mmu_unmap(entry->pagetable, entry);
}
-static int kgsl_map_global_secure_pt_entry(struct kgsl_pagetable *pagetable)
+int kgsl_iommu_map_global_secure_pt_entry(struct kgsl_device *device,
+ struct kgsl_memdesc *entry)
{
int ret = 0;
- struct kgsl_memdesc *entry = kgsl_global_secure_pt_entry;
+
+ if (!kgsl_mmu_is_secured(&device->mmu))
+ return -ENOTSUPP;
if (entry != NULL) {
+ struct kgsl_pagetable *pagetable = device->mmu.securepagetable;
entry->pagetable = pagetable;
+ entry->gpuaddr = KGSL_IOMMU_SECURE_BASE + secure_global_size;
+
ret = kgsl_mmu_map(pagetable, entry);
+ if (ret == 0)
+ secure_global_size += entry->size;
}
return ret;
}
@@ -222,13 +233,6 @@ static void kgsl_iommu_add_global(struct kgsl_mmu *mmu,
global_pt_count++;
}
-void kgsl_add_global_secure_entry(struct kgsl_device *device,
- struct kgsl_memdesc *memdesc)
-{
- memdesc->gpuaddr = KGSL_IOMMU_SECURE_BASE;
- kgsl_global_secure_pt_entry = memdesc;
-}
-
struct kgsl_memdesc *kgsl_iommu_get_qdss_global_entry(void)
{
return &gpu_qdss_desc;
@@ -1066,7 +1070,6 @@ static void kgsl_iommu_destroy_pagetable(struct kgsl_pagetable *pt)
if (pt->name == KGSL_MMU_SECURE_PT) {
ctx = &iommu->ctx[KGSL_IOMMU_CONTEXT_SECURE];
- kgsl_iommu_unmap_global_secure_pt_entry(pt);
} else {
ctx = &iommu->ctx[KGSL_IOMMU_CONTEXT_USER];
kgsl_iommu_unmap_globals(pt);
@@ -1087,13 +1090,10 @@ static void setup_64bit_pagetable(struct kgsl_mmu *mmu,
struct kgsl_pagetable *pagetable,
struct kgsl_iommu_pt *pt)
{
- unsigned int secure_global_size = kgsl_global_secure_pt_entry != NULL ?
- kgsl_global_secure_pt_entry->size : 0;
if (mmu->secured && pagetable->name == KGSL_MMU_SECURE_PT) {
- pt->compat_va_start = KGSL_IOMMU_SECURE_BASE +
- secure_global_size;
+ pt->compat_va_start = KGSL_IOMMU_SECURE_BASE;
pt->compat_va_end = KGSL_IOMMU_SECURE_END;
- pt->va_start = KGSL_IOMMU_SECURE_BASE + secure_global_size;
+ pt->va_start = KGSL_IOMMU_SECURE_BASE;
pt->va_end = KGSL_IOMMU_SECURE_END;
} else {
pt->compat_va_start = KGSL_IOMMU_SVM_BASE32;
@@ -1118,20 +1118,15 @@ static void setup_32bit_pagetable(struct kgsl_mmu *mmu,
struct kgsl_pagetable *pagetable,
struct kgsl_iommu_pt *pt)
{
- unsigned int secure_global_size = kgsl_global_secure_pt_entry != NULL ?
- kgsl_global_secure_pt_entry->size : 0;
if (mmu->secured) {
if (pagetable->name == KGSL_MMU_SECURE_PT) {
- pt->compat_va_start = KGSL_IOMMU_SECURE_BASE +
- secure_global_size;
+ pt->compat_va_start = KGSL_IOMMU_SECURE_BASE;
pt->compat_va_end = KGSL_IOMMU_SECURE_END;
- pt->va_start = KGSL_IOMMU_SECURE_BASE +
- secure_global_size;
+ pt->va_start = KGSL_IOMMU_SECURE_BASE;
pt->va_end = KGSL_IOMMU_SECURE_END;
} else {
pt->va_start = KGSL_IOMMU_SVM_BASE32;
- pt->va_end = KGSL_IOMMU_SECURE_BASE +
- secure_global_size;
+ pt->va_end = KGSL_IOMMU_SECURE_BASE;
pt->compat_va_start = pt->va_start;
pt->compat_va_end = pt->va_end;
}
@@ -1361,8 +1356,6 @@ static int _init_secure_pt(struct kgsl_mmu *mmu, struct kgsl_pagetable *pt)
ctx->regbase = iommu->regbase + KGSL_IOMMU_CB0_OFFSET
+ (cb_num << KGSL_IOMMU_CB_SHIFT);
- ret = kgsl_map_global_secure_pt_entry(pt);
-
done:
if (ret)
_free_pt(ctx, pt);
@@ -1606,6 +1599,18 @@ static int kgsl_iommu_init(struct kgsl_mmu *mmu)
kgsl_setup_qdss_desc(device);
kgsl_setup_qtimer_desc(device);
+ if (!mmu->secured)
+ goto done;
+
+ mmu->securepagetable = kgsl_mmu_getpagetable(mmu,
+ KGSL_MMU_SECURE_PT);
+ if (IS_ERR(mmu->securepagetable)) {
+ status = PTR_ERR(mmu->securepagetable);
+ mmu->securepagetable = NULL;
+ } else if (mmu->securepagetable == NULL) {
+ status = -ENOMEM;
+ }
+
done:
if (status)
kgsl_iommu_close(mmu);
@@ -1687,17 +1692,9 @@ static int _setup_secure_context(struct kgsl_mmu *mmu)
if (ctx->dev == NULL || !mmu->secured)
return 0;
- if (mmu->securepagetable == NULL) {
- mmu->securepagetable = kgsl_mmu_getpagetable(mmu,
- KGSL_MMU_SECURE_PT);
- if (IS_ERR(mmu->securepagetable)) {
- ret = PTR_ERR(mmu->securepagetable);
- mmu->securepagetable = NULL;
- return ret;
- } else if (mmu->securepagetable == NULL) {
- return -ENOMEM;
- }
- }
+ if (mmu->securepagetable == NULL)
+ return -ENOMEM;
+
iommu_pt = mmu->securepagetable->priv;
ret = _attach_pt(iommu_pt, ctx);
@@ -1838,6 +1835,9 @@ static unsigned int _get_protection_flags(struct kgsl_memdesc *memdesc)
if (memdesc->priv & KGSL_MEMDESC_PRIVILEGED)
flags |= IOMMU_PRIV;
+ if (memdesc->flags & KGSL_MEMFLAGS_IOCOHERENT)
+ flags |= IOMMU_CACHE;
+
return flags;
}
@@ -2500,6 +2500,13 @@ static int kgsl_iommu_get_gpuaddr(struct kgsl_pagetable *pagetable,
end = pt->va_end;
}
+ /*
+ * When mapping secure buffers, adjust the start of the va range
+ * to the end of secure global buffers.
+ */
+ if (kgsl_memdesc_is_secured(memdesc))
+ start += secure_global_size;
+
spin_lock(&pagetable->lock);
addr = _get_unmapped_area(pagetable, start, end, size, align);
diff --git a/drivers/gpu/msm/kgsl_mmu.h b/drivers/gpu/msm/kgsl_mmu.h
index 7a8ab74..430a140 100644
--- a/drivers/gpu/msm/kgsl_mmu.h
+++ b/drivers/gpu/msm/kgsl_mmu.h
@@ -138,6 +138,8 @@ struct kgsl_mmu_pt_ops {
#define KGSL_MMU_PAGED BIT(8)
/* The device requires a guard page */
#define KGSL_MMU_NEED_GUARD_PAGE BIT(9)
+/* The device supports IO coherency */
+#define KGSL_MMU_IO_COHERENT BIT(10)
/**
* struct kgsl_mmu - Master definition for KGSL MMU devices
@@ -174,7 +176,9 @@ int kgsl_mmu_start(struct kgsl_device *device);
struct kgsl_pagetable *kgsl_mmu_getpagetable_ptbase(struct kgsl_mmu *mmu,
u64 ptbase);
-void kgsl_add_global_secure_entry(struct kgsl_device *device,
+int kgsl_iommu_map_global_secure_pt_entry(struct kgsl_device *device,
+ struct kgsl_memdesc *memdesc);
+void kgsl_iommu_unmap_global_secure_pt_entry(struct kgsl_device *device,
struct kgsl_memdesc *memdesc);
void kgsl_print_global_pt_entries(struct seq_file *s);
void kgsl_mmu_putpagetable(struct kgsl_pagetable *pagetable);
diff --git a/drivers/gpu/msm/kgsl_pool.c b/drivers/gpu/msm/kgsl_pool.c
index c31a85b..5da8c1d 100644
--- a/drivers/gpu/msm/kgsl_pool.c
+++ b/drivers/gpu/msm/kgsl_pool.c
@@ -422,6 +422,24 @@ void kgsl_pool_free_page(struct page *page)
__free_pages(page, page_order);
}
+/*
+ * Return true if the pool of specified page size is supported
+ * or no pools are supported otherwise return false.
+ */
+bool kgsl_pool_avaialable(int page_size)
+{
+ int i;
+
+ if (!kgsl_num_pools)
+ return true;
+
+ for (i = 0; i < kgsl_num_pools; i++)
+ if (ilog2(page_size >> PAGE_SHIFT) == kgsl_pools[i].pool_order)
+ return true;
+
+ return false;
+}
+
static void kgsl_pool_reserve_pages(void)
{
int i, j;
diff --git a/drivers/gpu/msm/kgsl_pool.h b/drivers/gpu/msm/kgsl_pool.h
index d55e1ad..8091afb 100644
--- a/drivers/gpu/msm/kgsl_pool.h
+++ b/drivers/gpu/msm/kgsl_pool.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -40,5 +40,6 @@ void kgsl_exit_page_pools(void);
int kgsl_pool_alloc_page(int *page_size, struct page **pages,
unsigned int pages_len, unsigned int *align);
void kgsl_pool_free_page(struct page *p);
+bool kgsl_pool_avaialable(int size);
#endif /* __KGSL_POOL_H */
diff --git a/drivers/gpu/msm/kgsl_pwrscale.c b/drivers/gpu/msm/kgsl_pwrscale.c
index 20590ea..32fac88 100644
--- a/drivers/gpu/msm/kgsl_pwrscale.c
+++ b/drivers/gpu/msm/kgsl_pwrscale.c
@@ -372,7 +372,7 @@ static bool popp_stable(struct kgsl_device *device)
}
if (nap_time && go_time) {
percent_nap = 100 * nap_time;
- do_div(percent_nap, nap_time + go_time);
+ div64_s64(percent_nap, nap_time + go_time);
}
trace_kgsl_popp_nap(device, (int)nap_time / 1000, nap,
percent_nap);
diff --git a/drivers/gpu/msm/kgsl_sharedmem.c b/drivers/gpu/msm/kgsl_sharedmem.c
index 5061f6a..de5df54 100644
--- a/drivers/gpu/msm/kgsl_sharedmem.c
+++ b/drivers/gpu/msm/kgsl_sharedmem.c
@@ -27,7 +27,6 @@
#include "kgsl_device.h"
#include "kgsl_log.h"
#include "kgsl_mmu.h"
-#include "kgsl_pool.h"
/*
* The user can set this from debugfs to force failed memory allocations to
@@ -1057,7 +1056,7 @@ void kgsl_get_memory_usage(char *name, size_t name_size, uint64_t memflags)
else if (type < ARRAY_SIZE(memtype_str) && memtype_str[type] != NULL)
strlcpy(name, memtype_str[type], name_size);
else
- snprintf(name, name_size, "unknown(%3d)", type);
+ snprintf(name, name_size, "VK/others(%3d)", type);
}
EXPORT_SYMBOL(kgsl_get_memory_usage);
diff --git a/drivers/gpu/msm/kgsl_sharedmem.h b/drivers/gpu/msm/kgsl_sharedmem.h
index 5466a49..55bb34f 100644
--- a/drivers/gpu/msm/kgsl_sharedmem.h
+++ b/drivers/gpu/msm/kgsl_sharedmem.h
@@ -368,6 +368,8 @@ static inline void kgsl_free_sgt(struct sg_table *sgt)
}
}
+#include "kgsl_pool.h"
+
/**
* kgsl_get_page_size() - Get supported pagesize
* @size: Size of the page
@@ -378,11 +380,14 @@ static inline void kgsl_free_sgt(struct sg_table *sgt)
#ifndef CONFIG_ALLOC_BUFFERS_IN_4K_CHUNKS
static inline int kgsl_get_page_size(size_t size, unsigned int align)
{
- if (align >= ilog2(SZ_1M) && size >= SZ_1M)
+ if (align >= ilog2(SZ_1M) && size >= SZ_1M &&
+ kgsl_pool_avaialable(SZ_1M))
return SZ_1M;
- else if (align >= ilog2(SZ_64K) && size >= SZ_64K)
+ else if (align >= ilog2(SZ_64K) && size >= SZ_64K &&
+ kgsl_pool_avaialable(SZ_64K))
return SZ_64K;
- else if (align >= ilog2(SZ_8K) && size >= SZ_8K)
+ else if (align >= ilog2(SZ_8K) && size >= SZ_8K &&
+ kgsl_pool_avaialable(SZ_8K))
return SZ_8K;
else
return PAGE_SIZE;
diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c
index 8008e06..865e7c2 100644
--- a/drivers/hid/i2c-hid/i2c-hid.c
+++ b/drivers/hid/i2c-hid/i2c-hid.c
@@ -604,7 +604,8 @@ static int i2c_hid_alloc_buffers(struct i2c_hid *ihid, size_t report_size)
{
/* the worst case is computed from the set_report command with a
* reportID > 15 and the maximum report length */
- int args_len = sizeof(__u8) + /* optional ReportID byte */
+ int args_len = sizeof(__u8) + /* ReportID */
+ sizeof(__u8) + /* optional ReportID byte */
sizeof(__u16) + /* data register */
sizeof(__u16) + /* size of the report */
report_size; /* report */
diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c
index ae83af6..7838343 100644
--- a/drivers/hid/usbhid/hid-core.c
+++ b/drivers/hid/usbhid/hid-core.c
@@ -971,6 +971,8 @@ static int usbhid_parse(struct hid_device *hid)
unsigned int rsize = 0;
char *rdesc;
int ret, n;
+ int num_descriptors;
+ size_t offset = offsetof(struct hid_descriptor, desc);
quirks = usbhid_lookup_quirk(le16_to_cpu(dev->descriptor.idVendor),
le16_to_cpu(dev->descriptor.idProduct));
@@ -993,10 +995,18 @@ static int usbhid_parse(struct hid_device *hid)
return -ENODEV;
}
+ if (hdesc->bLength < sizeof(struct hid_descriptor)) {
+ dbg_hid("hid descriptor is too short\n");
+ return -EINVAL;
+ }
+
hid->version = le16_to_cpu(hdesc->bcdHID);
hid->country = hdesc->bCountryCode;
- for (n = 0; n < hdesc->bNumDescriptors; n++)
+ num_descriptors = min_t(int, hdesc->bNumDescriptors,
+ (hdesc->bLength - offset) / sizeof(struct hid_class_descriptor));
+
+ for (n = 0; n < num_descriptors; n++)
if (hdesc->desc[n].bDescriptorType == HID_DT_REPORT)
rsize = le16_to_cpu(hdesc->desc[n].wDescriptorLength);
diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c
index 0c535d0..d72dfb2 100644
--- a/drivers/hid/wacom_sys.c
+++ b/drivers/hid/wacom_sys.c
@@ -611,8 +611,10 @@ static struct wacom_hdev_data *wacom_get_hdev_data(struct hid_device *hdev)
/* Try to find an already-probed interface from the same device */
list_for_each_entry(data, &wacom_udev_list, list) {
- if (compare_device_paths(hdev, data->dev, '/'))
+ if (compare_device_paths(hdev, data->dev, '/')) {
+ kref_get(&data->kref);
return data;
+ }
}
/* Fallback to finding devices that appear to be "siblings" */
@@ -712,6 +714,9 @@ static int wacom_led_control(struct wacom *wacom)
if (!wacom->led.groups)
return -ENOTSUPP;
+ if (wacom->wacom_wac.features.type == REMOTE)
+ return -ENOTSUPP;
+
if (wacom->wacom_wac.pid) { /* wireless connected */
report_id = WAC_CMD_WL_LED_CONTROL;
buf_size = 13;
@@ -2433,6 +2438,8 @@ static void wacom_remove(struct hid_device *hdev)
if (hdev->bus == BUS_BLUETOOTH)
device_remove_file(&hdev->dev, &dev_attr_speed);
+ wacom_release_resources(wacom);
+
hid_set_drvdata(hdev, NULL);
}
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
index c6a922e..db951c4 100644
--- a/drivers/hid/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -559,8 +559,8 @@ static int wacom_intuos_pad(struct wacom_wac *wacom)
keys = data[9] & 0x07;
}
} else {
- buttons = ((data[6] & 0x10) << 10) |
- ((data[5] & 0x10) << 9) |
+ buttons = ((data[6] & 0x10) << 5) |
+ ((data[5] & 0x10) << 4) |
((data[6] & 0x0F) << 4) |
(data[5] & 0x0F);
}
diff --git a/drivers/hv/hv_fcopy.c b/drivers/hv/hv_fcopy.c
index e47d8c9..75126e4 100644
--- a/drivers/hv/hv_fcopy.c
+++ b/drivers/hv/hv_fcopy.c
@@ -161,6 +161,10 @@ static void fcopy_send_data(struct work_struct *dummy)
out_src = smsg_out;
break;
+ case WRITE_TO_FILE:
+ out_src = fcopy_transaction.fcopy_msg;
+ out_len = sizeof(struct hv_do_fcopy);
+ break;
default:
out_src = fcopy_transaction.fcopy_msg;
out_len = fcopy_transaction.recv_len;
diff --git a/drivers/hwmon/gl520sm.c b/drivers/hwmon/gl520sm.c
index dee93ec..84e0994 100644
--- a/drivers/hwmon/gl520sm.c
+++ b/drivers/hwmon/gl520sm.c
@@ -208,11 +208,13 @@ static ssize_t get_cpu_vid(struct device *dev, struct device_attribute *attr,
}
static DEVICE_ATTR(cpu0_vid, S_IRUGO, get_cpu_vid, NULL);
-#define VDD_FROM_REG(val) (((val) * 95 + 2) / 4)
-#define VDD_TO_REG(val) clamp_val((((val) * 4 + 47) / 95), 0, 255)
+#define VDD_FROM_REG(val) DIV_ROUND_CLOSEST((val) * 95, 4)
+#define VDD_CLAMP(val) clamp_val(val, 0, 255 * 95 / 4)
+#define VDD_TO_REG(val) DIV_ROUND_CLOSEST(VDD_CLAMP(val) * 4, 95)
-#define IN_FROM_REG(val) ((val) * 19)
-#define IN_TO_REG(val) clamp_val((((val) + 9) / 19), 0, 255)
+#define IN_FROM_REG(val) ((val) * 19)
+#define IN_CLAMP(val) clamp_val(val, 0, 255 * 19)
+#define IN_TO_REG(val) DIV_ROUND_CLOSEST(IN_CLAMP(val), 19)
static ssize_t get_in_input(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -349,8 +351,13 @@ static SENSOR_DEVICE_ATTR(in4_max, S_IRUGO | S_IWUSR,
#define DIV_FROM_REG(val) (1 << (val))
#define FAN_FROM_REG(val, div) ((val) == 0 ? 0 : (480000 / ((val) << (div))))
-#define FAN_TO_REG(val, div) ((val) <= 0 ? 0 : \
- clamp_val((480000 + ((val) << ((div)-1))) / ((val) << (div)), 1, 255))
+
+#define FAN_BASE(div) (480000 >> (div))
+#define FAN_CLAMP(val, div) clamp_val(val, FAN_BASE(div) / 255, \
+ FAN_BASE(div))
+#define FAN_TO_REG(val, div) ((val) == 0 ? 0 : \
+ DIV_ROUND_CLOSEST(480000, \
+ FAN_CLAMP(val, div) << (div)))
static ssize_t get_fan_input(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -513,9 +520,9 @@ static SENSOR_DEVICE_ATTR(fan2_div, S_IRUGO | S_IWUSR,
static DEVICE_ATTR(fan1_off, S_IRUGO | S_IWUSR,
get_fan_off, set_fan_off);
-#define TEMP_FROM_REG(val) (((val) - 130) * 1000)
-#define TEMP_TO_REG(val) clamp_val(((((val) < 0 ? \
- (val) - 500 : (val) + 500) / 1000) + 130), 0, 255)
+#define TEMP_FROM_REG(val) (((val) - 130) * 1000)
+#define TEMP_CLAMP(val) clamp_val(val, -130000, 125000)
+#define TEMP_TO_REG(val) (DIV_ROUND_CLOSEST(TEMP_CLAMP(val), 1000) + 130)
static ssize_t get_temp_input(struct device *dev, struct device_attribute *attr,
char *buf)
diff --git a/drivers/hwmon/qpnp-adc-common.c b/drivers/hwmon/qpnp-adc-common.c
index d883483..16a3e7d 100644
--- a/drivers/hwmon/qpnp-adc-common.c
+++ b/drivers/hwmon/qpnp-adc-common.c
@@ -757,14 +757,14 @@ int32_t qpnp_adc_scale_pmic_therm(struct qpnp_vadc_chip *vadc,
return -EINVAL;
if (adc_properties->adc_hc) {
- /* (ADC code * vref_vadc (1.875V)) / 0x4000 */
+ /* (ADC code * vref_vadc (1.875V)) / scale_code */
if (adc_code > QPNP_VADC_HC_MAX_CODE)
adc_code = 0;
pmic_voltage = (int64_t) adc_code;
pmic_voltage *= (int64_t) (adc_properties->adc_vdd_reference
* 1000);
pmic_voltage = div64_s64(pmic_voltage,
- QPNP_VADC_HC_VREF_CODE);
+ adc_properties->full_scale_code);
} else {
if (!chan_properties->adc_graph[CALIB_ABSOLUTE].dy)
return -EINVAL;
@@ -777,7 +777,8 @@ int32_t qpnp_adc_scale_pmic_therm(struct qpnp_vadc_chip *vadc,
adc_chan_result->measurement = pmic_voltage*
chan_properties->offset_gain_denominator;
- do_div(adc_chan_result->measurement,
+ adc_chan_result->measurement =
+ div64_s64(adc_chan_result->measurement,
chan_properties->offset_gain_numerator * 2);
} else
adc_chan_result->measurement = 0;
@@ -804,10 +805,12 @@ int32_t qpnp_adc_scale_millidegc_pmic_voltage_thr(struct qpnp_vadc_chip *chip,
high_output = (param->high_temp + KELVINMIL_DEGMIL) * 2;
if (param->adc_tm_hc) {
- low_output *= QPNP_VADC_HC_VREF_CODE;
- do_div(low_output, (QPNP_VADC_HC_VDD_REFERENCE_MV * 1000));
- high_output *= QPNP_VADC_HC_VREF_CODE;
- do_div(high_output, (QPNP_VADC_HC_VDD_REFERENCE_MV * 1000));
+ low_output *= param->full_scale_code;
+ low_output = div64_s64(low_output,
+ (QPNP_VADC_HC_VDD_REFERENCE_MV * 1000));
+ high_output *= param->full_scale_code;
+ high_output = div64_s64(high_output,
+ (QPNP_VADC_HC_VDD_REFERENCE_MV * 1000));
} else {
rc = qpnp_get_vadc_gain_and_offset(chip, &btm_param,
CALIB_ABSOLUTE);
@@ -822,7 +825,7 @@ int32_t qpnp_adc_scale_millidegc_pmic_voltage_thr(struct qpnp_vadc_chip *chip,
sign = 1;
low_output = -low_output;
}
- do_div(low_output, QPNP_ADC_625_UV);
+ low_output = div64_s64(low_output, QPNP_ADC_625_UV);
if (sign)
low_output = -low_output;
low_output += btm_param.adc_gnd;
@@ -834,7 +837,7 @@ int32_t qpnp_adc_scale_millidegc_pmic_voltage_thr(struct qpnp_vadc_chip *chip,
sign = 1;
high_output = -high_output;
}
- do_div(high_output, QPNP_ADC_625_UV);
+ high_output = div64_s64(high_output, QPNP_ADC_625_UV);
if (sign)
high_output = -high_output;
high_output += btm_param.adc_gnd;
@@ -869,14 +872,14 @@ int32_t qpnp_adc_tdkntcg_therm(struct qpnp_vadc_chip *chip,
return -EINVAL;
if (adc_properties->adc_hc) {
- /* (ADC code * vref_vadc (1.875V) * 1000) / (0x4000 * 1000) */
+ /* (code * vref_vadc (1.875V) * 1000) / (scale_code * 1000) */
if (adc_code > QPNP_VADC_HC_MAX_CODE)
adc_code = 0;
xo_thm_voltage = (int64_t) adc_code;
xo_thm_voltage *= (int64_t) (adc_properties->adc_vdd_reference
* 1000);
xo_thm_voltage = div64_s64(xo_thm_voltage,
- QPNP_VADC_HC_VREF_CODE * 1000);
+ adc_properties->full_scale_code * 1000);
qpnp_adc_map_voltage_temp(adcmap_100k_104ef_104fb_1875_vref,
ARRAY_SIZE(adcmap_100k_104ef_104fb_1875_vref),
xo_thm_voltage, &adc_chan_result->physical);
@@ -885,7 +888,7 @@ int32_t qpnp_adc_tdkntcg_therm(struct qpnp_vadc_chip *chip,
adc_properties, chan_properties, &xo_thm_voltage);
if (chan_properties->calib_type == CALIB_ABSOLUTE)
- do_div(xo_thm_voltage, 1000);
+ xo_thm_voltage = div64_s64(xo_thm_voltage, 1000);
qpnp_adc_map_voltage_temp(adcmap_100k_104ef_104fb,
ARRAY_SIZE(adcmap_100k_104ef_104fb),
@@ -1068,14 +1071,14 @@ int32_t qpnp_adc_scale_therm_pu2(struct qpnp_vadc_chip *chip,
return -EINVAL;
if (adc_properties->adc_hc) {
- /* (ADC code * vref_vadc (1.875V) * 1000) / (0x4000 * 1000) */
+ /* (code * vref_vadc (1.875V) * 1000) / (scale code * 1000) */
if (adc_code > QPNP_VADC_HC_MAX_CODE)
adc_code = 0;
therm_voltage = (int64_t) adc_code;
therm_voltage *= (int64_t) (adc_properties->adc_vdd_reference
* 1000);
therm_voltage = div64_s64(therm_voltage,
- (QPNP_VADC_HC_VREF_CODE * 1000));
+ (adc_properties->full_scale_code * 1000));
qpnp_adc_map_voltage_temp(adcmap_100k_104ef_104fb_1875_vref,
ARRAY_SIZE(adcmap_100k_104ef_104fb_1875_vref),
@@ -1085,7 +1088,7 @@ int32_t qpnp_adc_scale_therm_pu2(struct qpnp_vadc_chip *chip,
adc_properties, chan_properties, &therm_voltage);
if (chan_properties->calib_type == CALIB_ABSOLUTE)
- do_div(therm_voltage, 1000);
+ therm_voltage = div64_s64(therm_voltage, 1000);
qpnp_adc_map_voltage_temp(adcmap_100k_104ef_104fb,
ARRAY_SIZE(adcmap_100k_104ef_104fb),
@@ -1105,13 +1108,13 @@ int32_t qpnp_adc_tm_scale_voltage_therm_pu2(struct qpnp_vadc_chip *chip,
int negative_offset = 0;
if (adc_properties->adc_hc) {
- /* (ADC code * vref_vadc (1.875V)) / 0x4000 */
+ /* (ADC code * vref_vadc (1.875V)) / full_scale_code */
if (reg > QPNP_VADC_HC_MAX_CODE)
reg = 0;
adc_voltage = (int64_t) reg;
adc_voltage *= QPNP_VADC_HC_VDD_REFERENCE_MV;
adc_voltage = div64_s64(adc_voltage,
- QPNP_VADC_HC_VREF_CODE);
+ adc_properties->full_scale_code);
qpnp_adc_map_voltage_temp(adcmap_100k_104ef_104fb_1875_vref,
ARRAY_SIZE(adcmap_100k_104ef_104fb_1875_vref),
adc_voltage, result);
@@ -1124,7 +1127,7 @@ int32_t qpnp_adc_tm_scale_voltage_therm_pu2(struct qpnp_vadc_chip *chip,
adc_voltage = -adc_voltage;
}
- do_div(adc_voltage, param1.dy);
+ adc_voltage = div64_s64(adc_voltage, param1.dy);
qpnp_adc_map_voltage_temp(adcmap_100k_104ef_104fb,
ARRAY_SIZE(adcmap_100k_104ef_104fb),
@@ -1151,8 +1154,9 @@ int32_t qpnp_adc_tm_scale_therm_voltage_pu2(struct qpnp_vadc_chip *chip,
param->low_thr_temp, ¶m->low_thr_voltage);
if (rc)
return rc;
- param->low_thr_voltage *= QPNP_VADC_HC_VREF_CODE;
- do_div(param->low_thr_voltage, QPNP_VADC_HC_VDD_REFERENCE_MV);
+ param->low_thr_voltage *= adc_properties->full_scale_code;
+ param->low_thr_voltage = div64_s64(param->low_thr_voltage,
+ QPNP_VADC_HC_VDD_REFERENCE_MV);
rc = qpnp_adc_map_temp_voltage(
adcmap_100k_104ef_104fb_1875_vref,
@@ -1160,8 +1164,9 @@ int32_t qpnp_adc_tm_scale_therm_voltage_pu2(struct qpnp_vadc_chip *chip,
param->high_thr_temp, ¶m->high_thr_voltage);
if (rc)
return rc;
- param->high_thr_voltage *= QPNP_VADC_HC_VREF_CODE;
- do_div(param->high_thr_voltage, QPNP_VADC_HC_VDD_REFERENCE_MV);
+ param->high_thr_voltage *= adc_properties->full_scale_code;
+ param->high_thr_voltage = div64_s64(param->high_thr_voltage,
+ QPNP_VADC_HC_VDD_REFERENCE_MV);
} else {
qpnp_get_vadc_gain_and_offset(chip, ¶m1, CALIB_RATIOMETRIC);
@@ -1172,7 +1177,8 @@ int32_t qpnp_adc_tm_scale_therm_voltage_pu2(struct qpnp_vadc_chip *chip,
return rc;
param->low_thr_voltage *= param1.dy;
- do_div(param->low_thr_voltage, param1.adc_vref);
+ param->low_thr_voltage = div64_s64(param->low_thr_voltage,
+ param1.adc_vref);
param->low_thr_voltage += param1.adc_gnd;
rc = qpnp_adc_map_temp_voltage(adcmap_100k_104ef_104fb,
@@ -1182,7 +1188,8 @@ int32_t qpnp_adc_tm_scale_therm_voltage_pu2(struct qpnp_vadc_chip *chip,
return rc;
param->high_thr_voltage *= param1.dy;
- do_div(param->high_thr_voltage, param1.adc_vref);
+ param->high_thr_voltage = div64_s64(param->high_thr_voltage,
+ param1.adc_vref);
param->high_thr_voltage += param1.adc_gnd;
}
@@ -1241,13 +1248,13 @@ int32_t qpnp_adc_scale_default(struct qpnp_vadc_chip *vadc,
return -EINVAL;
if (adc_properties->adc_hc) {
- /* (ADC code * vref_vadc (1.875V)) / 0x4000 */
+ /* (ADC code * vref_vadc (1.875V)) / full_scale_code */
if (adc_code > QPNP_VADC_HC_MAX_CODE)
adc_code = 0;
scale_voltage = (int64_t) adc_code;
scale_voltage *= (adc_properties->adc_vdd_reference * 1000);
scale_voltage = div64_s64(scale_voltage,
- QPNP_VADC_HC_VREF_CODE);
+ adc_properties->full_scale_code);
} else {
qpnp_adc_scale_with_calib_param(adc_code, adc_properties,
chan_properties, &scale_voltage);
@@ -1281,11 +1288,11 @@ int32_t qpnp_adc_usb_scaler(struct qpnp_vadc_chip *chip,
qpnp_get_vadc_gain_and_offset(chip, &usb_param, CALIB_RATIOMETRIC);
*low_threshold = param->low_thr * usb_param.dy;
- do_div(*low_threshold, usb_param.adc_vref);
+ *low_threshold = div64_s64(*low_threshold, usb_param.adc_vref);
*low_threshold += usb_param.adc_gnd;
*high_threshold = param->high_thr * usb_param.dy;
- do_div(*high_threshold, usb_param.adc_vref);
+ *high_threshold = div64_s64(*high_threshold, usb_param.adc_vref);
*high_threshold += usb_param.adc_gnd;
pr_debug("high_volt:%d, low_volt:%d\n", param->high_thr,
@@ -1305,14 +1312,16 @@ int32_t qpnp_adc_absolute_rthr(struct qpnp_vadc_chip *chip,
if (param->adc_tm_hc) {
low_thr = (param->low_thr/param->gain_den);
low_thr *= param->gain_num;
- low_thr *= QPNP_VADC_HC_VREF_CODE;
- do_div(low_thr, (QPNP_VADC_HC_VDD_REFERENCE_MV * 1000));
+ low_thr *= param->full_scale_code;
+ low_thr = div64_s64(low_thr,
+ (QPNP_VADC_HC_VDD_REFERENCE_MV * 1000));
*low_threshold = low_thr;
high_thr = (param->high_thr/param->gain_den);
high_thr *= param->gain_num;
- high_thr *= QPNP_VADC_HC_VREF_CODE;
- do_div(high_thr, (QPNP_VADC_HC_VDD_REFERENCE_MV * 1000));
+ high_thr *= param->full_scale_code;
+ high_thr = div64_s64(high_thr,
+ (QPNP_VADC_HC_VDD_REFERENCE_MV * 1000));
*high_threshold = high_thr;
} else {
rc = qpnp_get_vadc_gain_and_offset(chip, &vbatt_param,
@@ -1327,7 +1336,7 @@ int32_t qpnp_adc_absolute_rthr(struct qpnp_vadc_chip *chip,
low_thr = -low_thr;
}
low_thr = low_thr * param->gain_num;
- do_div(low_thr, QPNP_ADC_625_UV);
+ low_thr = div64_s64(low_thr, QPNP_ADC_625_UV);
if (sign)
low_thr = -low_thr;
*low_threshold = low_thr + vbatt_param.adc_gnd;
@@ -1340,7 +1349,7 @@ int32_t qpnp_adc_absolute_rthr(struct qpnp_vadc_chip *chip,
high_thr = -high_thr;
}
high_thr = high_thr * param->gain_num;
- do_div(high_thr, QPNP_ADC_625_UV);
+ high_thr = div64_s64(high_thr, QPNP_ADC_625_UV);
if (sign)
high_thr = -high_thr;
*high_threshold = high_thr + vbatt_param.adc_gnd;
@@ -1387,7 +1396,7 @@ int32_t qpnp_vadc_absolute_rthr(struct qpnp_vadc_chip *chip,
low_thr = -low_thr;
}
low_thr = low_thr * chan_prop->offset_gain_numerator;
- do_div(low_thr, QPNP_ADC_625_UV);
+ low_thr = div64_s64(low_thr, QPNP_ADC_625_UV);
if (sign)
low_thr = -low_thr;
*low_threshold = low_thr + vbatt_param.adc_gnd;
@@ -1400,7 +1409,7 @@ int32_t qpnp_vadc_absolute_rthr(struct qpnp_vadc_chip *chip,
high_thr = -high_thr;
}
high_thr = high_thr * chan_prop->offset_gain_numerator;
- do_div(high_thr, QPNP_ADC_625_UV);
+ high_thr = div64_s64(high_thr, QPNP_ADC_625_UV);
if (sign)
high_thr = -high_thr;
*high_threshold = high_thr + vbatt_param.adc_gnd;
@@ -1442,7 +1451,7 @@ int32_t qpnp_adc_btm_scaler(struct qpnp_vadc_chip *chip,
pr_debug("low_output:%lld\n", low_output);
low_output *= btm_param.dy;
- do_div(low_output, btm_param.adc_vref);
+ low_output = div64_s64(low_output, btm_param.adc_vref);
low_output += btm_param.adc_gnd;
rc = qpnp_adc_map_voltage_temp(
@@ -1457,7 +1466,7 @@ int32_t qpnp_adc_btm_scaler(struct qpnp_vadc_chip *chip,
pr_debug("high_output:%lld\n", high_output);
high_output *= btm_param.dy;
- do_div(high_output, btm_param.adc_vref);
+ high_output = div64_s64(high_output, btm_param.adc_vref);
high_output += btm_param.adc_gnd;
/* btm low temperature correspondes to high voltage threshold */
@@ -1500,7 +1509,7 @@ int32_t qpnp_adc_qrd_skuh_btm_scaler(struct qpnp_vadc_chip *chip,
pr_debug("low_output:%lld\n", low_output);
low_output *= btm_param.dy;
- do_div(low_output, btm_param.adc_vref);
+ low_output = div64_s64(low_output, btm_param.adc_vref);
low_output += btm_param.adc_gnd;
rc = qpnp_adc_map_voltage_temp(
@@ -1515,7 +1524,7 @@ int32_t qpnp_adc_qrd_skuh_btm_scaler(struct qpnp_vadc_chip *chip,
pr_debug("high_output:%lld\n", high_output);
high_output *= btm_param.dy;
- do_div(high_output, btm_param.adc_vref);
+ high_output = div64_s64(high_output, btm_param.adc_vref);
high_output += btm_param.adc_gnd;
/* btm low temperature correspondes to high voltage threshold */
@@ -1558,7 +1567,7 @@ int32_t qpnp_adc_qrd_skut1_btm_scaler(struct qpnp_vadc_chip *chip,
pr_debug("low_output:%lld\n", low_output);
low_output *= btm_param.dy;
- do_div(low_output, btm_param.adc_vref);
+ low_output = div64_s64(low_output, btm_param.adc_vref);
low_output += btm_param.adc_gnd;
rc = qpnp_adc_map_voltage_temp(
@@ -1573,7 +1582,7 @@ int32_t qpnp_adc_qrd_skut1_btm_scaler(struct qpnp_vadc_chip *chip,
pr_debug("high_output:%lld\n", high_output);
high_output *= btm_param.dy;
- do_div(high_output, btm_param.adc_vref);
+ high_output = div64_s64(high_output, btm_param.adc_vref);
high_output += btm_param.adc_gnd;
/* btm low temperature correspondes to high voltage threshold */
@@ -1616,7 +1625,7 @@ int32_t qpnp_adc_smb_btm_rscaler(struct qpnp_vadc_chip *chip,
pr_debug("low_output:%lld\n", low_output);
low_output *= btm_param.dy;
- do_div(low_output, btm_param.adc_vref);
+ low_output = div64_s64(low_output, btm_param.adc_vref);
low_output += btm_param.adc_gnd;
rc = qpnp_adc_map_voltage_temp(
@@ -1631,7 +1640,7 @@ int32_t qpnp_adc_smb_btm_rscaler(struct qpnp_vadc_chip *chip,
pr_debug("high_output:%lld\n", high_output);
high_output *= btm_param.dy;
- do_div(high_output, btm_param.adc_vref);
+ high_output = div64_s64(high_output, btm_param.adc_vref);
high_output += btm_param.adc_gnd;
/* btm low temperature correspondes to high voltage threshold */
@@ -2027,11 +2036,11 @@ int32_t qpnp_adc_get_devicetree_data(struct platform_device *pdev,
pr_err("Invalid adc vdd reference property\n");
return -EINVAL;
}
- rc = of_property_read_u32(node, "qcom,adc-bit-resolution",
- &adc_prop->bitresolution);
+ rc = of_property_read_u32(node, "qcom,adc-full-scale-code",
+ &adc_prop->full_scale_code);
if (rc) {
- pr_err("Invalid adc bit resolution property\n");
- return -EINVAL;
+ pr_debug("Use default value of 0x4000 for full scale\n");
+ adc_prop->full_scale_code = QPNP_VADC_HC_VREF_CODE;
}
adc_qpnp->adc_prop = adc_prop;
diff --git a/drivers/hwtracing/coresight/coresight-ost.c b/drivers/hwtracing/coresight/coresight-ost.c
index 3399c27..a5075ba 100644
--- a/drivers/hwtracing/coresight/coresight-ost.c
+++ b/drivers/hwtracing/coresight/coresight-ost.c
@@ -123,13 +123,14 @@ static int stm_trace_ost_header(void __iomem *ch_addr, uint32_t flags,
static int stm_trace_data_header(void __iomem *addr)
{
- char hdr[16];
+ char hdr[24];
int len = 0;
- *(uint16_t *)(hdr) = STM_MAKE_VERSION(0, 1);
+ *(uint16_t *)(hdr) = STM_MAKE_VERSION(0, 2);
*(uint16_t *)(hdr + 2) = STM_HEADER_MAGIC;
*(uint32_t *)(hdr + 4) = raw_smp_processor_id();
*(uint64_t *)(hdr + 8) = sched_clock();
+ *(uint64_t *)(hdr + 16) = task_tgid_nr(get_current());
len += stm_ost_send(addr, hdr, sizeof(hdr));
len += stm_ost_send(addr, current->comm, TASK_COMM_LEN);
diff --git a/drivers/hwtracing/stm/core.c b/drivers/hwtracing/stm/core.c
index 5269890..2dd60ee 100644
--- a/drivers/hwtracing/stm/core.c
+++ b/drivers/hwtracing/stm/core.c
@@ -1125,7 +1125,7 @@ void stm_source_unregister_device(struct stm_source_data *data)
stm_source_link_drop(src);
- device_destroy(&stm_source_class, src->dev.devt);
+ device_unregister(&src->dev);
}
EXPORT_SYMBOL_GPL(stm_source_unregister_device);
diff --git a/drivers/i2c/busses/i2c-at91.c b/drivers/i2c/busses/i2c-at91.c
index 0b86c61..c925a69 100644
--- a/drivers/i2c/busses/i2c-at91.c
+++ b/drivers/i2c/busses/i2c-at91.c
@@ -1180,6 +1180,7 @@ static int at91_twi_suspend_noirq(struct device *dev)
static int at91_twi_resume_noirq(struct device *dev)
{
+ struct at91_twi_dev *twi_dev = dev_get_drvdata(dev);
int ret;
if (!pm_runtime_status_suspended(dev)) {
@@ -1191,6 +1192,8 @@ static int at91_twi_resume_noirq(struct device *dev)
pm_runtime_mark_last_busy(dev);
pm_request_autosuspend(dev);
+ at91_init_twi_bus(twi_dev);
+
return 0;
}
diff --git a/drivers/i2c/busses/i2c-ismt.c b/drivers/i2c/busses/i2c-ismt.c
index 8477292..7aea288 100644
--- a/drivers/i2c/busses/i2c-ismt.c
+++ b/drivers/i2c/busses/i2c-ismt.c
@@ -340,12 +340,15 @@ static int ismt_process_desc(const struct ismt_desc *desc,
data->word = dma_buffer[0] | (dma_buffer[1] << 8);
break;
case I2C_SMBUS_BLOCK_DATA:
- case I2C_SMBUS_I2C_BLOCK_DATA:
if (desc->rxbytes != dma_buffer[0] + 1)
return -EMSGSIZE;
memcpy(data->block, dma_buffer, desc->rxbytes);
break;
+ case I2C_SMBUS_I2C_BLOCK_DATA:
+ memcpy(&data->block[1], dma_buffer, desc->rxbytes);
+ data->block[0] = desc->rxbytes;
+ break;
}
return 0;
}
diff --git a/drivers/i2c/busses/i2c-meson.c b/drivers/i2c/busses/i2c-meson.c
index 2aa61bb..73b97c7 100644
--- a/drivers/i2c/busses/i2c-meson.c
+++ b/drivers/i2c/busses/i2c-meson.c
@@ -175,7 +175,7 @@ static void meson_i2c_put_data(struct meson_i2c *i2c, char *buf, int len)
wdata1 |= *buf++ << ((i - 4) * 8);
writel(wdata0, i2c->regs + REG_TOK_WDATA0);
- writel(wdata0, i2c->regs + REG_TOK_WDATA1);
+ writel(wdata1, i2c->regs + REG_TOK_WDATA1);
dev_dbg(i2c->dev, "%s: data %08x %08x len %d\n", __func__,
wdata0, wdata1, len);
diff --git a/drivers/i2c/busses/i2c-piix4.c b/drivers/i2c/busses/i2c-piix4.c
index c21ca7b..8f1c5f2 100644
--- a/drivers/i2c/busses/i2c-piix4.c
+++ b/drivers/i2c/busses/i2c-piix4.c
@@ -94,6 +94,12 @@
#define SB800_PIIX4_PORT_IDX_ALT 0x2e
#define SB800_PIIX4_PORT_IDX_SEL 0x2f
#define SB800_PIIX4_PORT_IDX_MASK 0x06
+#define SB800_PIIX4_PORT_IDX_SHIFT 1
+
+/* On kerncz, SmBus0Sel is at bit 20:19 of PMx00 DecodeEn */
+#define SB800_PIIX4_PORT_IDX_KERNCZ 0x02
+#define SB800_PIIX4_PORT_IDX_MASK_KERNCZ 0x18
+#define SB800_PIIX4_PORT_IDX_SHIFT_KERNCZ 3
/* insmod parameters */
@@ -149,6 +155,8 @@ static const struct dmi_system_id piix4_dmi_ibm[] = {
*/
static DEFINE_MUTEX(piix4_mutex_sb800);
static u8 piix4_port_sel_sb800;
+static u8 piix4_port_mask_sb800;
+static u8 piix4_port_shift_sb800;
static const char *piix4_main_port_names_sb800[PIIX4_MAX_ADAPTERS] = {
" port 0", " port 2", " port 3", " port 4"
};
@@ -347,7 +355,19 @@ static int piix4_setup_sb800(struct pci_dev *PIIX4_dev,
/* Find which register is used for port selection */
if (PIIX4_dev->vendor == PCI_VENDOR_ID_AMD) {
- piix4_port_sel_sb800 = SB800_PIIX4_PORT_IDX_ALT;
+ switch (PIIX4_dev->device) {
+ case PCI_DEVICE_ID_AMD_KERNCZ_SMBUS:
+ piix4_port_sel_sb800 = SB800_PIIX4_PORT_IDX_KERNCZ;
+ piix4_port_mask_sb800 = SB800_PIIX4_PORT_IDX_MASK_KERNCZ;
+ piix4_port_shift_sb800 = SB800_PIIX4_PORT_IDX_SHIFT_KERNCZ;
+ break;
+ case PCI_DEVICE_ID_AMD_HUDSON2_SMBUS:
+ default:
+ piix4_port_sel_sb800 = SB800_PIIX4_PORT_IDX_ALT;
+ piix4_port_mask_sb800 = SB800_PIIX4_PORT_IDX_MASK;
+ piix4_port_shift_sb800 = SB800_PIIX4_PORT_IDX_SHIFT;
+ break;
+ }
} else {
mutex_lock(&piix4_mutex_sb800);
outb_p(SB800_PIIX4_PORT_IDX_SEL, SB800_PIIX4_SMB_IDX);
@@ -355,6 +375,8 @@ static int piix4_setup_sb800(struct pci_dev *PIIX4_dev,
piix4_port_sel_sb800 = (port_sel & 0x01) ?
SB800_PIIX4_PORT_IDX_ALT :
SB800_PIIX4_PORT_IDX;
+ piix4_port_mask_sb800 = SB800_PIIX4_PORT_IDX_MASK;
+ piix4_port_shift_sb800 = SB800_PIIX4_PORT_IDX_SHIFT;
mutex_unlock(&piix4_mutex_sb800);
}
@@ -616,8 +638,8 @@ static s32 piix4_access_sb800(struct i2c_adapter *adap, u16 addr,
smba_en_lo = inb_p(SB800_PIIX4_SMB_IDX + 1);
port = adapdata->port;
- if ((smba_en_lo & SB800_PIIX4_PORT_IDX_MASK) != port)
- outb_p((smba_en_lo & ~SB800_PIIX4_PORT_IDX_MASK) | port,
+ if ((smba_en_lo & piix4_port_mask_sb800) != port)
+ outb_p((smba_en_lo & ~piix4_port_mask_sb800) | port,
SB800_PIIX4_SMB_IDX + 1);
retval = piix4_access(adap, addr, flags, read_write,
@@ -706,7 +728,7 @@ static int piix4_add_adapter(struct pci_dev *dev, unsigned short smba,
adapdata->smba = smba;
adapdata->sb800_main = sb800_main;
- adapdata->port = port << 1;
+ adapdata->port = port << piix4_port_shift_sb800;
/* set up the sysfs linkage to our parent device */
adap->dev.parent = &dev->dev;
diff --git a/drivers/i2c/busses/i2c-qcom-geni.c b/drivers/i2c/busses/i2c-qcom-geni.c
index 793cbb5..1bfb98e 100644
--- a/drivers/i2c/busses/i2c-qcom-geni.c
+++ b/drivers/i2c/busses/i2c-qcom-geni.c
@@ -72,7 +72,10 @@
#define I2C_NACK GP_IRQ1
#define I2C_BUS_PROTO GP_IRQ3
#define I2C_ARB_LOST GP_IRQ4
-#define DM_I2C_RX_ERR ((GP_IRQ1 | GP_IRQ3 | GP_IRQ4) >> 4)
+#define DM_I2C_CB_ERR ((BIT(GP_IRQ1) | BIT(GP_IRQ3) | BIT(GP_IRQ4)) \
+ << 5)
+
+#define I2C_AUTO_SUSPEND_DELAY 250
enum i2c_se_mode {
UNINITIALIZED,
@@ -223,7 +226,7 @@ static irqreturn_t geni_i2c_irq(int irq, void *dev)
struct i2c_msg *cur = gi2c->cur;
if (!cur || (m_stat & M_CMD_FAILURE_EN) ||
- (dm_rx_st & (DM_I2C_RX_ERR)) ||
+ (dm_rx_st & (DM_I2C_CB_ERR)) ||
(m_stat & M_CMD_ABORT_EN)) {
if (m_stat & M_GP_IRQ_1_EN)
@@ -349,13 +352,33 @@ static void gi2c_ev_cb(struct dma_chan *ch, struct msm_gpi_cb const *cb_str,
m_stat, cb_str->cb_event);
}
+static void gi2c_gsi_cb_err(struct msm_gpi_dma_async_tx_cb_param *cb,
+ char *xfer)
+{
+ struct geni_i2c_dev *gi2c = cb->userdata;
+
+ if (cb->status & DM_I2C_CB_ERR) {
+ GENI_SE_DBG(gi2c->ipcl, false, gi2c->dev,
+ "%s TCE Unexpected Err, stat:0x%x\n",
+ xfer, cb->status);
+ if (cb->status & (BIT(GP_IRQ1) << 5))
+ geni_i2c_err(gi2c, I2C_NACK);
+ if (cb->status & (BIT(GP_IRQ3) << 5))
+ geni_i2c_err(gi2c, I2C_BUS_PROTO);
+ if (cb->status & (BIT(GP_IRQ4) << 5))
+ geni_i2c_err(gi2c, I2C_ARB_LOST);
+ }
+}
+
static void gi2c_gsi_tx_cb(void *ptr)
{
struct msm_gpi_dma_async_tx_cb_param *tx_cb = ptr;
struct geni_i2c_dev *gi2c = tx_cb->userdata;
- if (!(gi2c->cur->flags & I2C_M_RD))
+ if (!(gi2c->cur->flags & I2C_M_RD)) {
+ gi2c_gsi_cb_err(tx_cb, "TX");
complete(&gi2c->xfer);
+ }
}
static void gi2c_gsi_rx_cb(void *ptr)
@@ -364,17 +387,7 @@ static void gi2c_gsi_rx_cb(void *ptr)
struct geni_i2c_dev *gi2c = rx_cb->userdata;
if (gi2c->cur->flags & I2C_M_RD) {
- if (rx_cb->status & DM_I2C_RX_ERR) {
- GENI_SE_DBG(gi2c->ipcl, false, gi2c->dev,
- "RX TCE Unexpected Err, stat:0x%x\n",
- rx_cb->status);
- if (rx_cb->status & GP_IRQ1)
- geni_i2c_err(gi2c, I2C_NACK);
- if (rx_cb->status & GP_IRQ3)
- geni_i2c_err(gi2c, I2C_BUS_PROTO);
- if (rx_cb->status & GP_IRQ4)
- geni_i2c_err(gi2c, I2C_ARB_LOST);
- }
+ gi2c_gsi_cb_err(rx_cb, "RX");
complete(&gi2c->xfer);
}
}
@@ -385,12 +398,22 @@ static int geni_i2c_gsi_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[],
struct geni_i2c_dev *gi2c = i2c_get_adapdata(adap);
int i, ret = 0, timeout = 0;
+ ret = pinctrl_select_state(gi2c->i2c_rsc.geni_pinctrl,
+ gi2c->i2c_rsc.geni_gpio_active);
+ if (ret) {
+ GENI_SE_ERR(gi2c->ipcl, true, gi2c->dev,
+ "%s: Error %d pinctrl_select_state active\n",
+ __func__, ret);
+ return ret;
+ }
+
if (!gi2c->tx_c) {
gi2c->tx_c = dma_request_slave_channel(gi2c->dev, "tx");
if (!gi2c->tx_c) {
GENI_SE_ERR(gi2c->ipcl, true, gi2c->dev,
"tx dma req slv chan ret :%d\n", ret);
- return -EIO;
+ ret = -EIO;
+ goto geni_i2c_gsi_xfer_out;
}
gi2c->tx_ev.init.callback = gi2c_ev_cb;
gi2c->tx_ev.init.cb_param = gi2c;
@@ -400,7 +423,7 @@ static int geni_i2c_gsi_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[],
if (ret) {
GENI_SE_ERR(gi2c->ipcl, true, gi2c->dev,
"tx dma slave config ret :%d\n", ret);
- return ret;
+ goto geni_i2c_gsi_xfer_out;
}
}
if (!gi2c->rx_c) {
@@ -408,7 +431,8 @@ static int geni_i2c_gsi_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[],
if (!gi2c->rx_c) {
GENI_SE_ERR(gi2c->ipcl, true, gi2c->dev,
"rx dma req slv chan ret :%d\n", ret);
- return -EIO;
+ ret = -EIO;
+ goto geni_i2c_gsi_xfer_out;
}
gi2c->rx_ev.init.cb_param = gi2c;
gi2c->rx_ev.init.callback = gi2c_ev_cb;
@@ -418,7 +442,7 @@ static int geni_i2c_gsi_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[],
if (ret) {
GENI_SE_ERR(gi2c->ipcl, true, gi2c->dev,
"rx dma slave config ret :%d\n", ret);
- return ret;
+ goto geni_i2c_gsi_xfer_out;
}
}
@@ -502,7 +526,7 @@ static int geni_i2c_gsi_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[],
GENI_SE_ERR(gi2c->ipcl, true, gi2c->dev,
"prep_slave_sg for rx failed\n");
gi2c->err = -ENOMEM;
- return gi2c->err;
+ goto geni_i2c_gsi_xfer_out;
}
gi2c->rx_desc->callback = gi2c_gsi_rx_cb;
gi2c->rx_desc->callback_param = &gi2c->rx_cb;
@@ -534,7 +558,7 @@ static int geni_i2c_gsi_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[],
GENI_SE_ERR(gi2c->ipcl, true, gi2c->dev,
"prep_slave_sg for tx failed\n");
gi2c->err = -ENOMEM;
- return gi2c->err;
+ goto geni_i2c_gsi_xfer_out;
}
gi2c->tx_desc->callback = gi2c_gsi_tx_cb;
gi2c->tx_desc->callback_param = &gi2c->tx_cb;
@@ -559,10 +583,15 @@ static int geni_i2c_gsi_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[],
if (gi2c->err) {
dmaengine_terminate_all(gi2c->tx_c);
gi2c->cfg_sent = 0;
- return gi2c->err;
+ goto geni_i2c_gsi_xfer_out;
}
}
- return gi2c->err;
+geni_i2c_gsi_xfer_out:
+ if (!ret && gi2c->err)
+ ret = gi2c->err;
+ pinctrl_select_state(gi2c->i2c_rsc.geni_pinctrl,
+ gi2c->i2c_rsc.geni_gpio_sleep);
+ return ret;
}
static int geni_i2c_xfer(struct i2c_adapter *adap,
@@ -686,7 +715,9 @@ static int geni_i2c_xfer(struct i2c_adapter *adap,
geni_i2c_txn_ret:
if (ret == 0)
ret = num;
- pm_runtime_put_sync(gi2c->dev);
+
+ pm_runtime_mark_last_busy(gi2c->dev);
+ pm_runtime_put_autosuspend(gi2c->dev);
gi2c->cur = NULL;
gi2c->err = 0;
dev_dbg(gi2c->dev, "i2c txn ret:%d\n", ret);
@@ -830,6 +861,8 @@ static int geni_i2c_probe(struct platform_device *pdev)
strlcpy(gi2c->adap.name, "Geni-I2C", sizeof(gi2c->adap.name));
pm_runtime_set_suspended(gi2c->dev);
+ pm_runtime_set_autosuspend_delay(gi2c->dev, I2C_AUTO_SUSPEND_DELAY);
+ pm_runtime_use_autosuspend(gi2c->dev);
pm_runtime_enable(gi2c->dev);
i2c_add_adapter(&gi2c->adap);
@@ -858,10 +891,13 @@ static int geni_i2c_runtime_suspend(struct device *dev)
{
struct geni_i2c_dev *gi2c = dev_get_drvdata(dev);
- if (gi2c->se_mode == FIFO_SE_DMA)
+ if (gi2c->se_mode == FIFO_SE_DMA) {
disable_irq(gi2c->irq);
-
- se_geni_resources_off(&gi2c->i2c_rsc);
+ se_geni_resources_off(&gi2c->i2c_rsc);
+ } else {
+ /* GPIO is set to sleep state already. So just clocks off */
+ se_geni_clks_off(&gi2c->i2c_rsc);
+ }
return 0;
}
@@ -876,7 +912,12 @@ static int geni_i2c_runtime_resume(struct device *dev)
snprintf(ipc_name, I2C_NAME_SIZE, "i2c-%d", gi2c->adap.nr);
gi2c->ipcl = ipc_log_context_create(2, ipc_name, 0);
}
- ret = se_geni_resources_on(&gi2c->i2c_rsc);
+
+ if (gi2c->se_mode != GSI_ONLY)
+ ret = se_geni_resources_on(&gi2c->i2c_rsc);
+ else
+ ret = se_geni_clks_on(&gi2c->i2c_rsc);
+
if (ret)
return ret;
diff --git a/drivers/i2c/busses/i2c-riic.c b/drivers/i2c/busses/i2c-riic.c
index 6263ea8..8f11d34 100644
--- a/drivers/i2c/busses/i2c-riic.c
+++ b/drivers/i2c/busses/i2c-riic.c
@@ -80,6 +80,7 @@
#define ICIER_TEIE 0x40
#define ICIER_RIE 0x20
#define ICIER_NAKIE 0x10
+#define ICIER_SPIE 0x08
#define ICSR2_NACKF 0x10
@@ -216,11 +217,10 @@ static irqreturn_t riic_tend_isr(int irq, void *data)
return IRQ_NONE;
}
- if (riic->is_last || riic->err)
+ if (riic->is_last || riic->err) {
+ riic_clear_set_bit(riic, 0, ICIER_SPIE, RIIC_ICIER);
writeb(ICCR2_SP, riic->base + RIIC_ICCR2);
-
- writeb(0, riic->base + RIIC_ICIER);
- complete(&riic->msg_done);
+ }
return IRQ_HANDLED;
}
@@ -240,13 +240,13 @@ static irqreturn_t riic_rdrf_isr(int irq, void *data)
if (riic->bytes_left == 1) {
/* STOP must come before we set ACKBT! */
- if (riic->is_last)
+ if (riic->is_last) {
+ riic_clear_set_bit(riic, 0, ICIER_SPIE, RIIC_ICIER);
writeb(ICCR2_SP, riic->base + RIIC_ICCR2);
+ }
riic_clear_set_bit(riic, 0, ICMR3_ACKBT, RIIC_ICMR3);
- writeb(0, riic->base + RIIC_ICIER);
- complete(&riic->msg_done);
} else {
riic_clear_set_bit(riic, ICMR3_ACKBT, 0, RIIC_ICMR3);
}
@@ -259,6 +259,21 @@ static irqreturn_t riic_rdrf_isr(int irq, void *data)
return IRQ_HANDLED;
}
+static irqreturn_t riic_stop_isr(int irq, void *data)
+{
+ struct riic_dev *riic = data;
+
+ /* read back registers to confirm writes have fully propagated */
+ writeb(0, riic->base + RIIC_ICSR2);
+ readb(riic->base + RIIC_ICSR2);
+ writeb(0, riic->base + RIIC_ICIER);
+ readb(riic->base + RIIC_ICIER);
+
+ complete(&riic->msg_done);
+
+ return IRQ_HANDLED;
+}
+
static u32 riic_func(struct i2c_adapter *adap)
{
return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
@@ -326,6 +341,7 @@ static struct riic_irq_desc riic_irqs[] = {
{ .res_num = 0, .isr = riic_tend_isr, .name = "riic-tend" },
{ .res_num = 1, .isr = riic_rdrf_isr, .name = "riic-rdrf" },
{ .res_num = 2, .isr = riic_tdre_isr, .name = "riic-tdre" },
+ { .res_num = 3, .isr = riic_stop_isr, .name = "riic-stop" },
{ .res_num = 5, .isr = riic_tend_isr, .name = "riic-nack" },
};
diff --git a/drivers/iio/adc/ad7793.c b/drivers/iio/adc/ad7793.c
index e6706a0..47c3d7f 100644
--- a/drivers/iio/adc/ad7793.c
+++ b/drivers/iio/adc/ad7793.c
@@ -257,7 +257,7 @@ static int ad7793_setup(struct iio_dev *indio_dev,
unsigned int vref_mv)
{
struct ad7793_state *st = iio_priv(indio_dev);
- int i, ret = -1;
+ int i, ret;
unsigned long long scale_uv;
u32 id;
@@ -266,7 +266,7 @@ static int ad7793_setup(struct iio_dev *indio_dev,
return ret;
/* reset the serial interface */
- ret = spi_write(st->sd.spi, (u8 *)&ret, sizeof(ret));
+ ret = ad_sd_reset(&st->sd, 32);
if (ret < 0)
goto out;
usleep_range(500, 2000); /* Wait for at least 500us */
diff --git a/drivers/iio/adc/ad_sigma_delta.c b/drivers/iio/adc/ad_sigma_delta.c
index d10bd0c..22c4c17 100644
--- a/drivers/iio/adc/ad_sigma_delta.c
+++ b/drivers/iio/adc/ad_sigma_delta.c
@@ -177,6 +177,34 @@ int ad_sd_read_reg(struct ad_sigma_delta *sigma_delta,
}
EXPORT_SYMBOL_GPL(ad_sd_read_reg);
+/**
+ * ad_sd_reset() - Reset the serial interface
+ *
+ * @sigma_delta: The sigma delta device
+ * @reset_length: Number of SCLKs with DIN = 1
+ *
+ * Returns 0 on success, an error code otherwise.
+ **/
+int ad_sd_reset(struct ad_sigma_delta *sigma_delta,
+ unsigned int reset_length)
+{
+ uint8_t *buf;
+ unsigned int size;
+ int ret;
+
+ size = DIV_ROUND_UP(reset_length, 8);
+ buf = kcalloc(size, sizeof(*buf), GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ memset(buf, 0xff, size);
+ ret = spi_write(sigma_delta->spi, buf, size);
+ kfree(buf);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(ad_sd_reset);
+
static int ad_sd_calibrate(struct ad_sigma_delta *sigma_delta,
unsigned int mode, unsigned int channel)
{
diff --git a/drivers/iio/adc/axp288_adc.c b/drivers/iio/adc/axp288_adc.c
index 7fd2494..64799ad 100644
--- a/drivers/iio/adc/axp288_adc.c
+++ b/drivers/iio/adc/axp288_adc.c
@@ -28,8 +28,6 @@
#include <linux/iio/driver.h>
#define AXP288_ADC_EN_MASK 0xF1
-#define AXP288_ADC_TS_PIN_GPADC 0xF2
-#define AXP288_ADC_TS_PIN_ON 0xF3
enum axp288_adc_id {
AXP288_ADC_TS,
@@ -123,16 +121,6 @@ static int axp288_adc_read_channel(int *val, unsigned long address,
return IIO_VAL_INT;
}
-static int axp288_adc_set_ts(struct regmap *regmap, unsigned int mode,
- unsigned long address)
-{
- /* channels other than GPADC do not need to switch TS pin */
- if (address != AXP288_GP_ADC_H)
- return 0;
-
- return regmap_write(regmap, AXP288_ADC_TS_PIN_CTRL, mode);
-}
-
static int axp288_adc_read_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int *val, int *val2, long mask)
@@ -143,16 +131,7 @@ static int axp288_adc_read_raw(struct iio_dev *indio_dev,
mutex_lock(&indio_dev->mlock);
switch (mask) {
case IIO_CHAN_INFO_RAW:
- if (axp288_adc_set_ts(info->regmap, AXP288_ADC_TS_PIN_GPADC,
- chan->address)) {
- dev_err(&indio_dev->dev, "GPADC mode\n");
- ret = -EINVAL;
- break;
- }
ret = axp288_adc_read_channel(val, chan->address, info->regmap);
- if (axp288_adc_set_ts(info->regmap, AXP288_ADC_TS_PIN_ON,
- chan->address))
- dev_err(&indio_dev->dev, "TS pin restore\n");
break;
default:
ret = -EINVAL;
@@ -162,15 +141,6 @@ static int axp288_adc_read_raw(struct iio_dev *indio_dev,
return ret;
}
-static int axp288_adc_set_state(struct regmap *regmap)
-{
- /* ADC should be always enabled for internal FG to function */
- if (regmap_write(regmap, AXP288_ADC_TS_PIN_CTRL, AXP288_ADC_TS_PIN_ON))
- return -EIO;
-
- return regmap_write(regmap, AXP20X_ADC_EN1, AXP288_ADC_EN_MASK);
-}
-
static const struct iio_info axp288_adc_iio_info = {
.read_raw = &axp288_adc_read_raw,
.driver_module = THIS_MODULE,
@@ -199,7 +169,7 @@ static int axp288_adc_probe(struct platform_device *pdev)
* Set ADC to enabled state at all time, including system suspend.
* otherwise internal fuel gauge functionality may be affected.
*/
- ret = axp288_adc_set_state(axp20x->regmap);
+ ret = regmap_write(info->regmap, AXP20X_ADC_EN1, AXP288_ADC_EN_MASK);
if (ret) {
dev_err(&pdev->dev, "unable to enable ADC device\n");
return ret;
diff --git a/drivers/iio/adc/fsl-imx25-gcq.c b/drivers/iio/adc/fsl-imx25-gcq.c
index 72b32c1..ea264fa 100644
--- a/drivers/iio/adc/fsl-imx25-gcq.c
+++ b/drivers/iio/adc/fsl-imx25-gcq.c
@@ -401,6 +401,7 @@ static const struct of_device_id mx25_gcq_ids[] = {
{ .compatible = "fsl,imx25-gcq", },
{ /* Sentinel */ }
};
+MODULE_DEVICE_TABLE(of, mx25_gcq_ids);
static struct platform_driver mx25_gcq_driver = {
.driver = {
diff --git a/drivers/iio/adc/mcp320x.c b/drivers/iio/adc/mcp320x.c
index 634717a..071dd23 100644
--- a/drivers/iio/adc/mcp320x.c
+++ b/drivers/iio/adc/mcp320x.c
@@ -17,6 +17,8 @@
* MCP3204
* MCP3208
* ------------
+ * 13 bit converter
+ * MCP3301
*
* Datasheet can be found here:
* http://ww1.microchip.com/downloads/en/DeviceDoc/21293C.pdf mcp3001
@@ -96,7 +98,7 @@ static int mcp320x_channel_to_tx_data(int device_index,
}
static int mcp320x_adc_conversion(struct mcp320x *adc, u8 channel,
- bool differential, int device_index)
+ bool differential, int device_index, int *val)
{
int ret;
@@ -117,19 +119,25 @@ static int mcp320x_adc_conversion(struct mcp320x *adc, u8 channel,
switch (device_index) {
case mcp3001:
- return (adc->rx_buf[0] << 5 | adc->rx_buf[1] >> 3);
+ *val = (adc->rx_buf[0] << 5 | adc->rx_buf[1] >> 3);
+ return 0;
case mcp3002:
case mcp3004:
case mcp3008:
- return (adc->rx_buf[0] << 2 | adc->rx_buf[1] >> 6);
+ *val = (adc->rx_buf[0] << 2 | adc->rx_buf[1] >> 6);
+ return 0;
case mcp3201:
- return (adc->rx_buf[0] << 7 | adc->rx_buf[1] >> 1);
+ *val = (adc->rx_buf[0] << 7 | adc->rx_buf[1] >> 1);
+ return 0;
case mcp3202:
case mcp3204:
case mcp3208:
- return (adc->rx_buf[0] << 4 | adc->rx_buf[1] >> 4);
+ *val = (adc->rx_buf[0] << 4 | adc->rx_buf[1] >> 4);
+ return 0;
case mcp3301:
- return sign_extend32((adc->rx_buf[0] & 0x1f) << 8 | adc->rx_buf[1], 12);
+ *val = sign_extend32((adc->rx_buf[0] & 0x1f) << 8
+ | adc->rx_buf[1], 12);
+ return 0;
default:
return -EINVAL;
}
@@ -150,12 +158,10 @@ static int mcp320x_read_raw(struct iio_dev *indio_dev,
switch (mask) {
case IIO_CHAN_INFO_RAW:
ret = mcp320x_adc_conversion(adc, channel->address,
- channel->differential, device_index);
-
+ channel->differential, device_index, val);
if (ret < 0)
goto out;
- *val = ret;
ret = IIO_VAL_INT;
break;
@@ -312,6 +318,7 @@ static int mcp320x_probe(struct spi_device *spi)
indio_dev->name = spi_get_device_id(spi)->name;
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->info = &mcp320x_info;
+ spi_set_drvdata(spi, indio_dev);
chip_info = &mcp320x_chip_infos[spi_get_device_id(spi)->driver_data];
indio_dev->channels = chip_info->channels;
diff --git a/drivers/iio/adc/twl4030-madc.c b/drivers/iio/adc/twl4030-madc.c
index 0c74869..7ffc5db 100644
--- a/drivers/iio/adc/twl4030-madc.c
+++ b/drivers/iio/adc/twl4030-madc.c
@@ -866,8 +866,10 @@ static int twl4030_madc_probe(struct platform_device *pdev)
/* Enable 3v1 bias regulator for MADC[3:6] */
madc->usb3v1 = devm_regulator_get(madc->dev, "vusb3v1");
- if (IS_ERR(madc->usb3v1))
- return -ENODEV;
+ if (IS_ERR(madc->usb3v1)) {
+ ret = -ENODEV;
+ goto err_i2c;
+ }
ret = regulator_enable(madc->usb3v1);
if (ret)
@@ -876,11 +878,13 @@ static int twl4030_madc_probe(struct platform_device *pdev)
ret = iio_device_register(iio_dev);
if (ret) {
dev_err(&pdev->dev, "could not register iio device\n");
- goto err_i2c;
+ goto err_usb3v1;
}
return 0;
+err_usb3v1:
+ regulator_disable(madc->usb3v1);
err_i2c:
twl4030_madc_set_current_generator(madc, 0, 0);
err_current_generator:
diff --git a/drivers/iio/adc/xilinx-xadc-core.c b/drivers/iio/adc/xilinx-xadc-core.c
index 0a6beb3..56cf590 100644
--- a/drivers/iio/adc/xilinx-xadc-core.c
+++ b/drivers/iio/adc/xilinx-xadc-core.c
@@ -1208,7 +1208,7 @@ static int xadc_probe(struct platform_device *pdev)
ret = xadc->ops->setup(pdev, indio_dev, irq);
if (ret)
- goto err_free_samplerate_trigger;
+ goto err_clk_disable_unprepare;
ret = request_irq(irq, xadc->ops->interrupt_handler, 0,
dev_name(&pdev->dev), indio_dev);
@@ -1268,6 +1268,8 @@ static int xadc_probe(struct platform_device *pdev)
err_free_irq:
free_irq(irq, indio_dev);
+err_clk_disable_unprepare:
+ clk_disable_unprepare(xadc->clk);
err_free_samplerate_trigger:
if (xadc->ops->flags & XADC_FLAGS_BUFFERED)
iio_trigger_free(xadc->samplerate_trigger);
@@ -1277,8 +1279,6 @@ static int xadc_probe(struct platform_device *pdev)
err_triggered_buffer_cleanup:
if (xadc->ops->flags & XADC_FLAGS_BUFFERED)
iio_triggered_buffer_cleanup(indio_dev);
-err_clk_disable_unprepare:
- clk_disable_unprepare(xadc->clk);
err_device_free:
kfree(indio_dev->channels);
diff --git a/drivers/iio/dummy/iio_simple_dummy_events.c b/drivers/iio/dummy/iio_simple_dummy_events.c
index ed63ffd..7ec2a0b 100644
--- a/drivers/iio/dummy/iio_simple_dummy_events.c
+++ b/drivers/iio/dummy/iio_simple_dummy_events.c
@@ -72,6 +72,7 @@ int iio_simple_dummy_write_event_config(struct iio_dev *indio_dev,
st->event_en = state;
else
return -EINVAL;
+ break;
default:
return -EINVAL;
}
diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
index fc340ed..c5bc731 100644
--- a/drivers/iio/industrialio-core.c
+++ b/drivers/iio/industrialio-core.c
@@ -306,8 +306,10 @@ static ssize_t iio_debugfs_read_reg(struct file *file, char __user *userbuf,
ret = indio_dev->info->debugfs_reg_access(indio_dev,
indio_dev->cached_reg_addr,
0, &val);
- if (ret)
+ if (ret) {
dev_err(indio_dev->dev.parent, "%s: read failed\n", __func__);
+ return ret;
+ }
len = snprintf(buf, sizeof(buf), "0x%X\n", val);
diff --git a/drivers/iio/magnetometer/mag3110.c b/drivers/iio/magnetometer/mag3110.c
index f2b3bd7..b4f643f 100644
--- a/drivers/iio/magnetometer/mag3110.c
+++ b/drivers/iio/magnetometer/mag3110.c
@@ -222,29 +222,39 @@ static int mag3110_write_raw(struct iio_dev *indio_dev,
int val, int val2, long mask)
{
struct mag3110_data *data = iio_priv(indio_dev);
- int rate;
+ int rate, ret;
- if (iio_buffer_enabled(indio_dev))
- return -EBUSY;
+ ret = iio_device_claim_direct_mode(indio_dev);
+ if (ret)
+ return ret;
switch (mask) {
case IIO_CHAN_INFO_SAMP_FREQ:
rate = mag3110_get_samp_freq_index(data, val, val2);
- if (rate < 0)
- return -EINVAL;
+ if (rate < 0) {
+ ret = -EINVAL;
+ break;
+ }
data->ctrl_reg1 &= ~MAG3110_CTRL_DR_MASK;
data->ctrl_reg1 |= rate << MAG3110_CTRL_DR_SHIFT;
- return i2c_smbus_write_byte_data(data->client,
+ ret = i2c_smbus_write_byte_data(data->client,
MAG3110_CTRL_REG1, data->ctrl_reg1);
+ break;
case IIO_CHAN_INFO_CALIBBIAS:
- if (val < -10000 || val > 10000)
- return -EINVAL;
- return i2c_smbus_write_word_swapped(data->client,
+ if (val < -10000 || val > 10000) {
+ ret = -EINVAL;
+ break;
+ }
+ ret = i2c_smbus_write_word_swapped(data->client,
MAG3110_OFF_X + 2 * chan->scan_index, val << 1);
+ break;
default:
- return -EINVAL;
+ ret = -EINVAL;
+ break;
}
+ iio_device_release_direct_mode(indio_dev);
+ return ret;
}
static irqreturn_t mag3110_trigger_handler(int irq, void *p)
diff --git a/drivers/iio/pressure/bmp280-core.c b/drivers/iio/pressure/bmp280-core.c
index f762eb8..19aa957 100644
--- a/drivers/iio/pressure/bmp280-core.c
+++ b/drivers/iio/pressure/bmp280-core.c
@@ -558,7 +558,7 @@ static int bmp280_chip_config(struct bmp280_data *data)
u8 osrs = BMP280_OSRS_TEMP_X(data->oversampling_temp + 1) |
BMP280_OSRS_PRESS_X(data->oversampling_press + 1);
- ret = regmap_update_bits(data->regmap, BMP280_REG_CTRL_MEAS,
+ ret = regmap_write_bits(data->regmap, BMP280_REG_CTRL_MEAS,
BMP280_OSRS_TEMP_MASK |
BMP280_OSRS_PRESS_MASK |
BMP280_MODE_MASK,
diff --git a/drivers/iio/pressure/ms5611_core.c b/drivers/iio/pressure/ms5611_core.c
index a74ed1f..8cc7156 100644
--- a/drivers/iio/pressure/ms5611_core.c
+++ b/drivers/iio/pressure/ms5611_core.c
@@ -308,6 +308,7 @@ static int ms5611_write_raw(struct iio_dev *indio_dev,
{
struct ms5611_state *st = iio_priv(indio_dev);
const struct ms5611_osr *osr = NULL;
+ int ret;
if (mask != IIO_CHAN_INFO_OVERSAMPLING_RATIO)
return -EINVAL;
@@ -321,12 +322,11 @@ static int ms5611_write_raw(struct iio_dev *indio_dev,
if (!osr)
return -EINVAL;
- mutex_lock(&st->lock);
+ ret = iio_device_claim_direct_mode(indio_dev);
+ if (ret)
+ return ret;
- if (iio_buffer_enabled(indio_dev)) {
- mutex_unlock(&st->lock);
- return -EBUSY;
- }
+ mutex_lock(&st->lock);
if (chan->type == IIO_TEMP)
st->temp_osr = osr;
@@ -334,6 +334,8 @@ static int ms5611_write_raw(struct iio_dev *indio_dev,
st->pressure_osr = osr;
mutex_unlock(&st->lock);
+ iio_device_release_direct_mode(indio_dev);
+
return 0;
}
diff --git a/drivers/iio/proximity/sx9500.c b/drivers/iio/proximity/sx9500.c
index 1f06282..9ea147f 100644
--- a/drivers/iio/proximity/sx9500.c
+++ b/drivers/iio/proximity/sx9500.c
@@ -387,14 +387,18 @@ static int sx9500_read_raw(struct iio_dev *indio_dev,
int *val, int *val2, long mask)
{
struct sx9500_data *data = iio_priv(indio_dev);
+ int ret;
switch (chan->type) {
case IIO_PROXIMITY:
switch (mask) {
case IIO_CHAN_INFO_RAW:
- if (iio_buffer_enabled(indio_dev))
- return -EBUSY;
- return sx9500_read_proximity(data, chan, val);
+ ret = iio_device_claim_direct_mode(indio_dev);
+ if (ret)
+ return ret;
+ ret = sx9500_read_proximity(data, chan, val);
+ iio_device_release_direct_mode(indio_dev);
+ return ret;
case IIO_CHAN_INFO_SAMP_FREQ:
return sx9500_read_samp_freq(data, val, val2);
default:
diff --git a/drivers/iio/trigger/iio-trig-interrupt.c b/drivers/iio/trigger/iio-trig-interrupt.c
index 572bc6f..e18f12b 100644
--- a/drivers/iio/trigger/iio-trig-interrupt.c
+++ b/drivers/iio/trigger/iio-trig-interrupt.c
@@ -58,7 +58,7 @@ static int iio_interrupt_trigger_probe(struct platform_device *pdev)
trig_info = kzalloc(sizeof(*trig_info), GFP_KERNEL);
if (!trig_info) {
ret = -ENOMEM;
- goto error_put_trigger;
+ goto error_free_trigger;
}
iio_trigger_set_drvdata(trig, trig_info);
trig_info->irq = irq;
@@ -83,8 +83,8 @@ static int iio_interrupt_trigger_probe(struct platform_device *pdev)
free_irq(irq, trig);
error_free_trig_info:
kfree(trig_info);
-error_put_trigger:
- iio_trigger_put(trig);
+error_free_trigger:
+ iio_trigger_free(trig);
error_ret:
return ret;
}
@@ -99,7 +99,7 @@ static int iio_interrupt_trigger_remove(struct platform_device *pdev)
iio_trigger_unregister(trig);
free_irq(trig_info->irq, trig);
kfree(trig_info);
- iio_trigger_put(trig);
+ iio_trigger_free(trig);
return 0;
}
diff --git a/drivers/iio/trigger/iio-trig-sysfs.c b/drivers/iio/trigger/iio-trig-sysfs.c
index 3dfab2b..202e8b8 100644
--- a/drivers/iio/trigger/iio-trig-sysfs.c
+++ b/drivers/iio/trigger/iio-trig-sysfs.c
@@ -174,7 +174,7 @@ static int iio_sysfs_trigger_probe(int id)
return 0;
out2:
- iio_trigger_put(t->trig);
+ iio_trigger_free(t->trig);
free_t:
kfree(t);
out1:
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
index 63e82f8..fb4ce03 100644
--- a/drivers/infiniband/core/addr.c
+++ b/drivers/infiniband/core/addr.c
@@ -446,15 +446,10 @@ static int addr6_resolve(struct sockaddr_in6 *src_in,
ret = ipv6_stub->ipv6_dst_lookup(addr->net, NULL, &dst, &fl6);
if (ret < 0)
- goto put;
+ return ret;
rt = (struct rt6_info *)dst;
- if (ipv6_addr_any(&fl6.saddr)) {
- ret = ipv6_dev_get_saddr(addr->net, ip6_dst_idev(dst)->dev,
- &fl6.daddr, 0, &fl6.saddr);
- if (ret)
- goto put;
-
+ if (ipv6_addr_any(&src_in->sin6_addr)) {
src_in->sin6_family = AF_INET6;
src_in->sin6_addr = fl6.saddr;
}
@@ -471,9 +466,6 @@ static int addr6_resolve(struct sockaddr_in6 *src_in,
*pdst = dst;
return 0;
-put:
- dst_release(dst);
- return ret;
}
#else
static int addr6_resolve(struct sockaddr_in6 *src_in,
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index 9398143..6512a55 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -2577,9 +2577,9 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
c4iw_put_ep(&child_ep->com);
reject:
reject_cr(dev, hwtid, skb);
+out:
if (parent_ep)
c4iw_put_ep(&parent_ep->com);
-out:
return 0;
}
@@ -3441,7 +3441,7 @@ int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog)
cm_id->provider_data = ep;
goto out;
}
-
+ remove_handle(ep->com.dev, &ep->com.dev->stid_idr, ep->stid);
cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid,
ep->com.local_addr.ss_family);
fail2:
diff --git a/drivers/infiniband/hw/hfi1/init.c b/drivers/infiniband/hw/hfi1/init.c
index 34cfd34..a3dd27b 100644
--- a/drivers/infiniband/hw/hfi1/init.c
+++ b/drivers/infiniband/hw/hfi1/init.c
@@ -297,14 +297,15 @@ struct hfi1_ctxtdata *hfi1_create_ctxtdata(struct hfi1_pportdata *ppd, u32 ctxt,
* The resulting value will be rounded down to the closest
* multiple of dd->rcv_entries.group_size.
*/
- rcd->egrbufs.buffers = kcalloc(rcd->egrbufs.count,
- sizeof(*rcd->egrbufs.buffers),
- GFP_KERNEL);
+ rcd->egrbufs.buffers = kzalloc_node(
+ rcd->egrbufs.count * sizeof(*rcd->egrbufs.buffers),
+ GFP_KERNEL, numa);
if (!rcd->egrbufs.buffers)
goto bail;
- rcd->egrbufs.rcvtids = kcalloc(rcd->egrbufs.count,
- sizeof(*rcd->egrbufs.rcvtids),
- GFP_KERNEL);
+ rcd->egrbufs.rcvtids = kzalloc_node(
+ rcd->egrbufs.count *
+ sizeof(*rcd->egrbufs.rcvtids),
+ GFP_KERNEL, numa);
if (!rcd->egrbufs.rcvtids)
goto bail;
rcd->egrbufs.size = eager_buffer_size;
@@ -322,8 +323,8 @@ struct hfi1_ctxtdata *hfi1_create_ctxtdata(struct hfi1_pportdata *ppd, u32 ctxt,
rcd->egrbufs.rcvtid_size = HFI1_MAX_EAGER_BUFFER_SIZE;
if (ctxt < dd->first_user_ctxt) { /* N/A for PSM contexts */
- rcd->opstats = kzalloc(sizeof(*rcd->opstats),
- GFP_KERNEL);
+ rcd->opstats = kzalloc_node(sizeof(*rcd->opstats),
+ GFP_KERNEL, numa);
if (!rcd->opstats)
goto bail;
}
diff --git a/drivers/infiniband/hw/hfi1/pcie.c b/drivers/infiniband/hw/hfi1/pcie.c
index 4ac8f33..335613a1 100644
--- a/drivers/infiniband/hw/hfi1/pcie.c
+++ b/drivers/infiniband/hw/hfi1/pcie.c
@@ -673,12 +673,12 @@ MODULE_PARM_DESC(pcie_retry, "Driver will try this many times to reach requested
#define UNSET_PSET 255
#define DEFAULT_DISCRETE_PSET 2 /* discrete HFI */
-#define DEFAULT_MCP_PSET 4 /* MCP HFI */
+#define DEFAULT_MCP_PSET 6 /* MCP HFI */
static uint pcie_pset = UNSET_PSET;
module_param(pcie_pset, uint, S_IRUGO);
MODULE_PARM_DESC(pcie_pset, "PCIe Eq Pset value to use, range is 0-10");
-static uint pcie_ctle = 1; /* discrete on, integrated off */
+static uint pcie_ctle = 3; /* discrete on, integrated on */
module_param(pcie_ctle, uint, S_IRUGO);
MODULE_PARM_DESC(pcie_ctle, "PCIe static CTLE mode, bit 0 - discrete on/off, bit 1 - integrated on/off");
diff --git a/drivers/infiniband/hw/hfi1/rc.c b/drivers/infiniband/hw/hfi1/rc.c
index 4bd5b5c..613074e9 100644
--- a/drivers/infiniband/hw/hfi1/rc.c
+++ b/drivers/infiniband/hw/hfi1/rc.c
@@ -551,7 +551,7 @@ int hfi1_make_rc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
case IB_WR_RDMA_WRITE:
if (newreq && !(qp->s_flags & RVT_S_UNLIMITED_CREDIT))
qp->s_lsn++;
- /* FALLTHROUGH */
+ goto no_flow_control;
case IB_WR_RDMA_WRITE_WITH_IMM:
/* If no credit, return. */
if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT) &&
@@ -559,6 +559,7 @@ int hfi1_make_rc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
qp->s_flags |= RVT_S_WAIT_SSN_CREDIT;
goto bail;
}
+no_flow_control:
put_ib_reth_vaddr(
wqe->rdma_wr.remote_addr,
&ohdr->u.rc.reth);
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 282c9fb..786f640 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -325,6 +325,27 @@ __be16 mlx5_get_roce_udp_sport(struct mlx5_ib_dev *dev, u8 port_num,
return cpu_to_be16(MLX5_CAP_ROCE(dev->mdev, r_roce_min_src_udp_port));
}
+int mlx5_get_roce_gid_type(struct mlx5_ib_dev *dev, u8 port_num,
+ int index, enum ib_gid_type *gid_type)
+{
+ struct ib_gid_attr attr;
+ union ib_gid gid;
+ int ret;
+
+ ret = ib_get_cached_gid(&dev->ib_dev, port_num, index, &gid, &attr);
+ if (ret)
+ return ret;
+
+ if (!attr.ndev)
+ return -ENODEV;
+
+ dev_put(attr.ndev);
+
+ *gid_type = attr.gid_type;
+
+ return 0;
+}
+
static int mlx5_use_mad_ifc(struct mlx5_ib_dev *dev)
{
if (MLX5_CAP_GEN(dev->mdev, port_type) == MLX5_CAP_PORT_TYPE_IB)
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index 7d68990..86e1e08 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -892,6 +892,8 @@ int mlx5_ib_set_vf_guid(struct ib_device *device, int vf, u8 port,
__be16 mlx5_get_roce_udp_sport(struct mlx5_ib_dev *dev, u8 port_num,
int index);
+int mlx5_get_roce_gid_type(struct mlx5_ib_dev *dev, u8 port_num,
+ int index, enum ib_gid_type *gid_type);
/* GSI QP helper functions */
struct ib_qp *mlx5_ib_gsi_create_qp(struct ib_pd *pd,
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index aee3942..2665414 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -2226,6 +2226,7 @@ static int mlx5_set_path(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
{
enum rdma_link_layer ll = rdma_port_get_link_layer(&dev->ib_dev, port);
int err;
+ enum ib_gid_type gid_type;
if (attr_mask & IB_QP_PKEY_INDEX)
path->pkey_index = cpu_to_be16(alt ? attr->alt_pkey_index :
@@ -2244,10 +2245,16 @@ static int mlx5_set_path(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
if (ll == IB_LINK_LAYER_ETHERNET) {
if (!(ah->ah_flags & IB_AH_GRH))
return -EINVAL;
+ err = mlx5_get_roce_gid_type(dev, port, ah->grh.sgid_index,
+ &gid_type);
+ if (err)
+ return err;
memcpy(path->rmac, ah->dmac, sizeof(ah->dmac));
path->udp_sport = mlx5_get_roce_udp_sport(dev, port,
ah->grh.sgid_index);
path->dci_cfi_prio_sl = (ah->sl & 0x7) << 4;
+ if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP)
+ path->ecn_dscp = (ah->grh.traffic_class >> 2) & 0x3f;
} else {
path->fl_free_ar = (path_flags & MLX5_PATH_FLAG_FL) ? 0x80 : 0;
path->fl_free_ar |=
diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c
index ded2717..cedb447 100644
--- a/drivers/infiniband/hw/qib/qib_iba7322.c
+++ b/drivers/infiniband/hw/qib/qib_iba7322.c
@@ -7080,7 +7080,7 @@ static void qib_7322_txchk_change(struct qib_devdata *dd, u32 start,
unsigned long flags;
while (wait) {
- unsigned long shadow;
+ unsigned long shadow = 0;
int cstart, previ = -1;
/*
diff --git a/drivers/infiniband/hw/qib/qib_rc.c b/drivers/infiniband/hw/qib/qib_rc.c
index f3fe787..c1523f9 100644
--- a/drivers/infiniband/hw/qib/qib_rc.c
+++ b/drivers/infiniband/hw/qib/qib_rc.c
@@ -357,7 +357,7 @@ int qib_make_rc_req(struct rvt_qp *qp, unsigned long *flags)
case IB_WR_RDMA_WRITE:
if (newreq && !(qp->s_flags & RVT_S_UNLIMITED_CREDIT))
qp->s_lsn++;
- /* FALLTHROUGH */
+ goto no_flow_control;
case IB_WR_RDMA_WRITE_WITH_IMM:
/* If no credit, return. */
if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT) &&
@@ -365,7 +365,7 @@ int qib_make_rc_req(struct rvt_qp *qp, unsigned long *flags)
qp->s_flags |= RVT_S_WAIT_SSN_CREDIT;
goto bail;
}
-
+no_flow_control:
ohdr->u.rc.reth.vaddr =
cpu_to_be64(wqe->rdma_wr.remote_addr);
ohdr->u.rc.reth.rkey =
diff --git a/drivers/infiniband/sw/rxe/rxe_pool.c b/drivers/infiniband/sw/rxe/rxe_pool.c
index 6bac071..ee26a1b 100644
--- a/drivers/infiniband/sw/rxe/rxe_pool.c
+++ b/drivers/infiniband/sw/rxe/rxe_pool.c
@@ -274,6 +274,7 @@ static u32 alloc_index(struct rxe_pool *pool)
if (index >= range)
index = find_first_zero_bit(pool->table, range);
+ WARN_ON_ONCE(index >= range);
set_bit(index, pool->table);
pool->last = index;
return index + pool->min_index;
diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c
index 9f46be5..9d08478 100644
--- a/drivers/infiniband/sw/rxe/rxe_req.c
+++ b/drivers/infiniband/sw/rxe/rxe_req.c
@@ -633,6 +633,7 @@ int rxe_requester(void *arg)
goto exit;
}
rmr->state = RXE_MEM_STATE_FREE;
+ rxe_drop_ref(rmr);
wqe->state = wqe_state_done;
wqe->status = IB_WC_SUCCESS;
} else if (wqe->wr.opcode == IB_WR_REG_MR) {
diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c
index 4d2a346..39101b1 100644
--- a/drivers/infiniband/sw/rxe/rxe_resp.c
+++ b/drivers/infiniband/sw/rxe/rxe_resp.c
@@ -418,7 +418,7 @@ static enum resp_states check_length(struct rxe_qp *qp,
static enum resp_states check_rkey(struct rxe_qp *qp,
struct rxe_pkt_info *pkt)
{
- struct rxe_mem *mem;
+ struct rxe_mem *mem = NULL;
u64 va;
u32 rkey;
u32 resid;
@@ -452,38 +452,38 @@ static enum resp_states check_rkey(struct rxe_qp *qp,
mem = lookup_mem(qp->pd, access, rkey, lookup_remote);
if (!mem) {
state = RESPST_ERR_RKEY_VIOLATION;
- goto err1;
+ goto err;
}
if (unlikely(mem->state == RXE_MEM_STATE_FREE)) {
state = RESPST_ERR_RKEY_VIOLATION;
- goto err1;
+ goto err;
}
if (mem_check_range(mem, va, resid)) {
state = RESPST_ERR_RKEY_VIOLATION;
- goto err2;
+ goto err;
}
if (pkt->mask & RXE_WRITE_MASK) {
if (resid > mtu) {
if (pktlen != mtu || bth_pad(pkt)) {
state = RESPST_ERR_LENGTH;
- goto err2;
+ goto err;
}
qp->resp.resid = mtu;
} else {
if (pktlen != resid) {
state = RESPST_ERR_LENGTH;
- goto err2;
+ goto err;
}
if ((bth_pad(pkt) != (0x3 & (-resid)))) {
/* This case may not be exactly that
* but nothing else fits.
*/
state = RESPST_ERR_LENGTH;
- goto err2;
+ goto err;
}
}
}
@@ -493,9 +493,9 @@ static enum resp_states check_rkey(struct rxe_qp *qp,
qp->resp.mr = mem;
return RESPST_EXECUTE;
-err2:
- rxe_drop_ref(mem);
-err1:
+err:
+ if (mem)
+ rxe_drop_ref(mem);
return state;
}
@@ -893,6 +893,7 @@ static enum resp_states do_complete(struct rxe_qp *qp,
return RESPST_ERROR;
}
rmr->state = RXE_MEM_STATE_FREE;
+ rxe_drop_ref(rmr);
}
wc->qp = &qp->ibqp;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index 0616a65..7576166 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -1392,7 +1392,7 @@ static void ipoib_cm_tx_reap(struct work_struct *work)
while (!list_empty(&priv->cm.reap_list)) {
p = list_entry(priv->cm.reap_list.next, typeof(*p), list);
- list_del(&p->list);
+ list_del_init(&p->list);
spin_unlock_irqrestore(&priv->lock, flags);
netif_tx_unlock_bh(dev);
ipoib_cm_tx_destroy(p);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 08c4b02..183db0c 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -1302,7 +1302,7 @@ static void __ipoib_reap_neigh(struct ipoib_dev_priv *priv)
rcu_dereference_protected(neigh->hnext,
lockdep_is_held(&priv->lock)));
/* remove from path/mc list */
- list_del(&neigh->list);
+ list_del_init(&neigh->list);
call_rcu(&neigh->rcu, ipoib_neigh_reclaim);
} else {
np = &neigh->hnext;
@@ -1466,7 +1466,7 @@ void ipoib_neigh_free(struct ipoib_neigh *neigh)
rcu_dereference_protected(neigh->hnext,
lockdep_is_held(&priv->lock)));
/* remove from parent list */
- list_del(&neigh->list);
+ list_del_init(&neigh->list);
call_rcu(&neigh->rcu, ipoib_neigh_reclaim);
return;
} else {
@@ -1551,7 +1551,7 @@ void ipoib_del_neighs_by_gid(struct net_device *dev, u8 *gid)
rcu_dereference_protected(neigh->hnext,
lockdep_is_held(&priv->lock)));
/* remove from parent list */
- list_del(&neigh->list);
+ list_del_init(&neigh->list);
call_rcu(&neigh->rcu, ipoib_neigh_reclaim);
} else {
np = &neigh->hnext;
@@ -1593,7 +1593,7 @@ static void ipoib_flush_neighs(struct ipoib_dev_priv *priv)
rcu_dereference_protected(neigh->hnext,
lockdep_is_held(&priv->lock)));
/* remove from path/mc list */
- list_del(&neigh->list);
+ list_del_init(&neigh->list);
call_rcu(&neigh->rcu, ipoib_neigh_reclaim);
}
}
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
index 57eadd2..93b50be 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
@@ -165,11 +165,11 @@ int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey)
out:
up_write(&ppriv->vlan_rwsem);
+ rtnl_unlock();
+
if (result)
free_netdev(priv->dev);
- rtnl_unlock();
-
return result;
}
@@ -193,7 +193,6 @@ int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey)
list_for_each_entry_safe(priv, tpriv, &ppriv->child_intfs, list) {
if (priv->pkey == pkey &&
priv->child_type == IPOIB_LEGACY_CHILD) {
- unregister_netdevice(priv->dev);
list_del(&priv->list);
dev = priv->dev;
break;
@@ -201,6 +200,11 @@ int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey)
}
up_write(&ppriv->vlan_rwsem);
+ if (dev) {
+ ipoib_dbg(ppriv, "delete child vlan %s\n", dev->name);
+ unregister_netdevice(dev);
+ }
+
rtnl_unlock();
if (dev) {
diff --git a/drivers/input/keyboard/mpr121_touchkey.c b/drivers/input/keyboard/mpr121_touchkey.c
index 0fd612d..aaf43be 100644
--- a/drivers/input/keyboard/mpr121_touchkey.c
+++ b/drivers/input/keyboard/mpr121_touchkey.c
@@ -87,7 +87,8 @@ static irqreturn_t mpr_touchkey_interrupt(int irq, void *dev_id)
struct mpr121_touchkey *mpr121 = dev_id;
struct i2c_client *client = mpr121->client;
struct input_dev *input = mpr121->input_dev;
- unsigned int key_num, key_val, pressed;
+ unsigned long bit_changed;
+ unsigned int key_num;
int reg;
reg = i2c_smbus_read_byte_data(client, ELE_TOUCH_STATUS_1_ADDR);
@@ -105,19 +106,23 @@ static irqreturn_t mpr_touchkey_interrupt(int irq, void *dev_id)
reg &= TOUCH_STATUS_MASK;
/* use old press bit to figure out which bit changed */
- key_num = ffs(reg ^ mpr121->statusbits) - 1;
- pressed = reg & (1 << key_num);
+ bit_changed = reg ^ mpr121->statusbits;
mpr121->statusbits = reg;
+ for_each_set_bit(key_num, &bit_changed, mpr121->keycount) {
+ unsigned int key_val, pressed;
- key_val = mpr121->keycodes[key_num];
+ pressed = reg & BIT(key_num);
+ key_val = mpr121->keycodes[key_num];
- input_event(input, EV_MSC, MSC_SCAN, key_num);
- input_report_key(input, key_val, pressed);
+ input_event(input, EV_MSC, MSC_SCAN, key_num);
+ input_report_key(input, key_val, pressed);
+
+ dev_dbg(&client->dev, "key %d %d %s\n", key_num, key_val,
+ pressed ? "pressed" : "released");
+
+ }
input_sync(input);
- dev_dbg(&client->dev, "key %d %d %s\n", key_num, key_val,
- pressed ? "pressed" : "released");
-
out:
return IRQ_HANDLED;
}
@@ -231,6 +236,7 @@ static int mpr_touchkey_probe(struct i2c_client *client,
input_dev->id.bustype = BUS_I2C;
input_dev->dev.parent = &client->dev;
input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REP);
+ input_set_capability(input_dev, EV_MSC, MSC_SCAN);
input_dev->keycode = mpr121->keycodes;
input_dev->keycodesize = sizeof(mpr121->keycodes[0]);
diff --git a/drivers/input/misc/hbtp_input.c b/drivers/input/misc/hbtp_input.c
index 66d1499..e174102 100644
--- a/drivers/input/misc/hbtp_input.c
+++ b/drivers/input/misc/hbtp_input.c
@@ -30,10 +30,7 @@
#include <linux/delay.h>
#include <linux/completion.h>
-#if defined(CONFIG_FB)
-#include <linux/notifier.h>
-#include <linux/fb.h>
-#endif
+#include <linux/msm_drm_notify.h>
#define HBTP_INPUT_NAME "hbtp_input"
#define DISP_COORDS_SIZE 2
@@ -41,6 +38,7 @@
#define HBTP_PINCTRL_VALID_STATE_CNT (2)
#define HBTP_HOLD_DURATION_US (10)
#define HBTP_PINCTRL_DDIC_SEQ_NUM (4)
+#define HBTP_WAIT_TIMEOUT_MS 2000
struct hbtp_data {
struct platform_device *pdev;
@@ -50,9 +48,7 @@ struct hbtp_data {
struct mutex sensormutex;
struct hbtp_sensor_data *sensor_data;
bool touch_status[HBTP_MAX_FINGER];
-#if defined(CONFIG_FB)
- struct notifier_block fb_notif;
-#endif
+ struct notifier_block dsi_panel_notif;
struct pinctrl *ts_pinctrl;
struct pinctrl_state *gpio_state_active;
struct pinctrl_state *gpio_state_suspend;
@@ -61,7 +57,7 @@ struct hbtp_data {
struct pinctrl_state *ddic_rst_state_suspend;
u32 ts_pinctrl_seq_delay;
u32 ddic_pinctrl_seq_delay[HBTP_PINCTRL_DDIC_SEQ_NUM];
- u32 fb_resume_seq_delay;
+ u32 dsi_panel_resume_seq_delay;
bool lcd_on;
bool power_suspended;
bool power_sync_enabled;
@@ -99,64 +95,51 @@ static struct hbtp_data *hbtp;
static struct kobject *sensor_kobject;
-#if defined(CONFIG_FB)
-static int hbtp_fb_suspend(struct hbtp_data *ts);
-static int hbtp_fb_early_resume(struct hbtp_data *ts);
-static int hbtp_fb_resume(struct hbtp_data *ts);
-#endif
+static int hbtp_dsi_panel_suspend(struct hbtp_data *ts);
+static int hbtp_dsi_panel_early_resume(struct hbtp_data *ts);
-#if defined(CONFIG_FB)
-static int fb_notifier_callback(struct notifier_block *self,
+static int dsi_panel_notifier_callback(struct notifier_block *self,
unsigned long event, void *data)
{
int blank;
- struct fb_event *evdata = data;
+ struct msm_drm_notifier *evdata = data;
struct hbtp_data *hbtp_data =
- container_of(self, struct hbtp_data, fb_notif);
+ container_of(self, struct hbtp_data, dsi_panel_notif);
- if (evdata && evdata->data && hbtp_data &&
- (event == FB_EARLY_EVENT_BLANK ||
- event == FB_R_EARLY_EVENT_BLANK)) {
+ if (!evdata || (evdata->id != 0))
+ return 0;
+
+ if (hbtp_data && (event == MSM_DRM_EARLY_EVENT_BLANK)) {
blank = *(int *)(evdata->data);
- if (event == FB_EARLY_EVENT_BLANK) {
- if (blank == FB_BLANK_UNBLANK) {
- pr_debug("%s: receives EARLY_BLANK:UNBLANK\n",
+ if (blank == MSM_DRM_BLANK_UNBLANK) {
+ pr_debug("%s: receives EARLY_BLANK:UNBLANK\n",
__func__);
- hbtp_data->lcd_on = true;
- hbtp_fb_early_resume(hbtp_data);
- } else if (blank == FB_BLANK_POWERDOWN) {
- pr_debug("%s: receives EARLY_BLANK:POWERDOWN\n",
- __func__);
- hbtp_data->lcd_on = false;
- }
- } else if (event == FB_R_EARLY_EVENT_BLANK) {
- if (blank == FB_BLANK_UNBLANK) {
- pr_debug("%s: receives R_EARLY_BALNK:UNBLANK\n",
- __func__);
- hbtp_data->lcd_on = false;
- hbtp_fb_suspend(hbtp_data);
- } else if (blank == FB_BLANK_POWERDOWN) {
- pr_debug("%s: receives R_EARLY_BALNK:POWERDOWN\n",
- __func__);
- hbtp_data->lcd_on = true;
- }
+ hbtp_data->lcd_on = true;
+ hbtp_dsi_panel_early_resume(hbtp_data);
+ } else if (blank == MSM_DRM_BLANK_POWERDOWN) {
+ pr_debug("%s: receives EARLY_BLANK:POWERDOWN\n",
+ __func__);
+ hbtp_data->lcd_on = false;
+ } else {
+ pr_err("%s: receives wrong data EARLY_BLANK:%d\n",
+ __func__, blank);
}
}
- if (evdata && evdata->data && hbtp_data &&
- event == FB_EVENT_BLANK) {
+ if (hbtp_data && event == MSM_DRM_EVENT_BLANK) {
blank = *(int *)(evdata->data);
- if (blank == FB_BLANK_POWERDOWN) {
+ if (blank == MSM_DRM_BLANK_POWERDOWN) {
pr_debug("%s: receives BLANK:POWERDOWN\n", __func__);
- hbtp_fb_suspend(hbtp_data);
- } else if (blank == FB_BLANK_UNBLANK) {
+ hbtp_dsi_panel_suspend(hbtp_data);
+ } else if (blank == MSM_DRM_BLANK_UNBLANK) {
pr_debug("%s: receives BLANK:UNBLANK\n", __func__);
- hbtp_fb_resume(hbtp_data);
+ } else {
+ pr_err("%s: receives wrong data BLANK:%d\n",
+ __func__, blank);
}
}
return 0;
}
-#endif
static ssize_t hbtp_sensor_roi_show(struct file *dev, struct kobject *kobj,
struct bin_attribute *attr, char *buf, loff_t pos,
@@ -1134,7 +1117,7 @@ static int hbtp_pinctrl_init(struct hbtp_data *data)
}
if (of_property_read_u32(np, "qcom,fb-resume-delay-us",
- &data->fb_resume_seq_delay)) {
+ &data->dsi_panel_resume_seq_delay)) {
dev_warn(&data->pdev->dev, "Can not find fb resume seq delay\n");
}
@@ -1164,7 +1147,7 @@ static int hbtp_pinctrl_init(struct hbtp_data *data)
return rc;
}
-static int hbtp_fb_suspend(struct hbtp_data *ts)
+static int hbtp_dsi_panel_suspend(struct hbtp_data *ts)
{
int rc;
char *envp[2] = {HBTP_EVENT_TYPE_DISPLAY, NULL};
@@ -1189,29 +1172,30 @@ static int hbtp_fb_suspend(struct hbtp_data *ts)
goto err_power_disable;
}
ts->power_suspended = true;
- }
+ if (ts->input_dev) {
+ kobject_uevent_env(&ts->input_dev->dev.kobj,
+ KOBJ_OFFLINE, envp);
- if (ts->input_dev) {
- kobject_uevent_env(&ts->input_dev->dev.kobj,
- KOBJ_OFFLINE, envp);
-
- if (ts->power_sig_enabled) {
- pr_debug("%s: power_sig is enabled, wait for signal\n",
- __func__);
- mutex_unlock(&hbtp->mutex);
- rc = wait_for_completion_interruptible(
- &hbtp->power_suspend_sig);
- if (rc != 0) {
- pr_err("%s: wait for suspend is interrupted\n",
+ if (ts->power_sig_enabled) {
+ pr_debug("%s: power_sig is enabled, wait for signal\n",
+ __func__);
+ mutex_unlock(&hbtp->mutex);
+ rc = wait_for_completion_interruptible_timeout(
+ &hbtp->power_suspend_sig,
+ msecs_to_jiffies(HBTP_WAIT_TIMEOUT_MS));
+ if (rc <= 0) {
+ pr_err("%s: wait for suspend is interrupted\n",
+ __func__);
+ }
+ mutex_lock(&hbtp->mutex);
+ pr_debug("%s: Wait is done for suspend\n",
+ __func__);
+ } else {
+ pr_debug("%s: power_sig is NOT enabled\n",
__func__);
}
- mutex_lock(&hbtp->mutex);
- pr_debug("%s: Wait is done for suspend\n", __func__);
- } else {
- pr_debug("%s: power_sig is NOT enabled", __func__);
}
}
-
mutex_unlock(&hbtp->mutex);
return 0;
err_power_disable:
@@ -1221,15 +1205,12 @@ static int hbtp_fb_suspend(struct hbtp_data *ts)
return rc;
}
-static int hbtp_fb_early_resume(struct hbtp_data *ts)
+static int hbtp_dsi_panel_early_resume(struct hbtp_data *ts)
{
char *envp[2] = {HBTP_EVENT_TYPE_DISPLAY, NULL};
int rc;
mutex_lock(&hbtp->mutex);
-
- pr_debug("%s: hbtp_fb_early_resume\n", __func__);
-
if (ts->pdev && ts->power_sync_enabled) {
pr_debug("%s: power_sync is enabled\n", __func__);
if (!ts->power_suspended) {
@@ -1261,9 +1242,10 @@ static int hbtp_fb_early_resume(struct hbtp_data *ts)
pr_err("%s: power_sig is enabled, wait for signal\n",
__func__);
mutex_unlock(&hbtp->mutex);
- rc = wait_for_completion_interruptible(
- &hbtp->power_resume_sig);
- if (rc != 0) {
+ rc = wait_for_completion_interruptible_timeout(
+ &hbtp->power_resume_sig,
+ msecs_to_jiffies(HBTP_WAIT_TIMEOUT_MS));
+ if (rc <= 0) {
pr_err("%s: wait for resume is interrupted\n",
__func__);
}
@@ -1274,12 +1256,13 @@ static int hbtp_fb_early_resume(struct hbtp_data *ts)
__func__);
}
- if (ts->fb_resume_seq_delay) {
- usleep_range(ts->fb_resume_seq_delay,
- ts->fb_resume_seq_delay +
+ if (ts->dsi_panel_resume_seq_delay) {
+ usleep_range(ts->dsi_panel_resume_seq_delay,
+ ts->dsi_panel_resume_seq_delay +
HBTP_HOLD_DURATION_US);
- pr_err("%s: fb_resume_seq_delay = %u\n",
- __func__, ts->fb_resume_seq_delay);
+ pr_err("%s: dsi_panel_resume_seq_delay = %u\n",
+ __func__,
+ ts->dsi_panel_resume_seq_delay);
}
}
}
@@ -1293,22 +1276,6 @@ static int hbtp_fb_early_resume(struct hbtp_data *ts)
return rc;
}
-static int hbtp_fb_resume(struct hbtp_data *ts)
-{
- char *envp[2] = {HBTP_EVENT_TYPE_DISPLAY, NULL};
-
- mutex_lock(&hbtp->mutex);
- if (!ts->power_sync_enabled) {
- pr_debug("%s: power_sync is disabled, send uevent\n", __func__);
- if (ts->input_dev) {
- kobject_uevent_env(&ts->input_dev->dev.kobj,
- KOBJ_ONLINE, envp);
- }
- }
- mutex_unlock(&hbtp->mutex);
- return 0;
-}
-
static int hbtp_pdev_probe(struct platform_device *pdev)
{
int error;
@@ -1434,14 +1401,16 @@ static ssize_t hbtp_display_pwr_store(struct kobject *kobj,
mutex_unlock(&hbtp->mutex);
return ret;
}
- if (status) {
- pr_debug("hbtp: display power on!\n");
- kobject_uevent_env(&hbtp->input_dev->dev.kobj,
- KOBJ_ONLINE, envp);
- } else {
- pr_debug("hbtp: display power off!\n");
- kobject_uevent_env(&hbtp->input_dev->dev.kobj,
- KOBJ_OFFLINE, envp);
+ if (!hbtp->power_sync_enabled) {
+ if (status) {
+ pr_debug("hbtp: display power on!\n");
+ kobject_uevent_env(&hbtp->input_dev->dev.kobj,
+ KOBJ_ONLINE, envp);
+ } else {
+ pr_debug("hbtp: display power off!\n");
+ kobject_uevent_env(&hbtp->input_dev->dev.kobj,
+ KOBJ_OFFLINE, envp);
+ }
}
mutex_unlock(&hbtp->mutex);
return count;
@@ -1462,6 +1431,7 @@ static struct kobj_attribute hbtp_display_attribute =
__ATTR(display_pwr, 0660, hbtp_display_pwr_show,
hbtp_display_pwr_store);
+
static int __init hbtp_init(void)
{
int error = 0;
@@ -1485,15 +1455,13 @@ static int __init hbtp_init(void)
goto err_misc_reg;
}
-#if defined(CONFIG_FB)
- hbtp->fb_notif.notifier_call = fb_notifier_callback;
- error = fb_register_client(&hbtp->fb_notif);
+ hbtp->dsi_panel_notif.notifier_call = dsi_panel_notifier_callback;
+ error = msm_drm_register_client(&hbtp->dsi_panel_notif);
if (error) {
- pr_err("%s: Unable to register fb_notifier: %d\n",
+ pr_err("%s: Unable to register dsi_panel_notifier: %d\n",
HBTP_INPUT_NAME, error);
- goto err_fb_reg;
+ goto err_dsi_panel_reg;
}
-#endif
sensor_kobject = kobject_create_and_add("hbtpsensor", kernel_kobj);
if (!sensor_kobject) {
@@ -1542,10 +1510,8 @@ static int __init hbtp_init(void)
err_sysfs_create_capdata:
kobject_put(sensor_kobject);
err_kobject_create:
-#if defined(CONFIG_FB)
- fb_unregister_client(&hbtp->fb_notif);
-err_fb_reg:
-#endif
+ msm_drm_unregister_client(&hbtp->dsi_panel_notif);
+err_dsi_panel_reg:
misc_deregister(&hbtp_input_misc);
err_misc_reg:
kfree(hbtp->sensor_data);
@@ -1566,9 +1532,7 @@ static void __exit hbtp_exit(void)
if (hbtp->input_dev)
input_unregister_device(hbtp->input_dev);
-#if defined(CONFIG_FB)
- fb_unregister_client(&hbtp->fb_notif);
-#endif
+ msm_drm_unregister_client(&hbtp->dsi_panel_notif);
platform_driver_unregister(&hbtp_pdev_driver);
diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
index 681dce1..c9d491b 100644
--- a/drivers/input/mouse/elan_i2c_core.c
+++ b/drivers/input/mouse/elan_i2c_core.c
@@ -1240,6 +1240,8 @@ static const struct acpi_device_id elan_acpi_id[] = {
{ "ELAN0605", 0 },
{ "ELAN0609", 0 },
{ "ELAN060B", 0 },
+ { "ELAN060C", 0 },
+ { "ELAN0611", 0 },
{ "ELAN1000", 0 },
{ }
};
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
index 5be14ad..dbf0983 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -905,6 +905,13 @@ static const struct dmi_system_id __initconst i8042_dmi_kbdreset_table[] = {
},
},
{
+ /* Gigabyte P57 - Elantech touchpad */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "P57"),
+ },
+ },
+ {
/* Schenker XMG C504 - Elantech touchpad */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "XMG"),
diff --git a/drivers/input/tablet/gtco.c b/drivers/input/tablet/gtco.c
index abf09ac..339a0e2 100644
--- a/drivers/input/tablet/gtco.c
+++ b/drivers/input/tablet/gtco.c
@@ -230,13 +230,17 @@ static void parse_hid_report_descriptor(struct gtco *device, char * report,
/* Walk this report and pull out the info we need */
while (i < length) {
- prefix = report[i];
-
- /* Skip over prefix */
- i++;
+ prefix = report[i++];
/* Determine data size and save the data in the proper variable */
- size = PREF_SIZE(prefix);
+ size = (1U << PREF_SIZE(prefix)) >> 1;
+ if (i + size > length) {
+ dev_err(ddev,
+ "Not enough data (need %d, have %d)\n",
+ i + size, length);
+ break;
+ }
+
switch (size) {
case 1:
data = report[i];
@@ -244,8 +248,7 @@ static void parse_hid_report_descriptor(struct gtco *device, char * report,
case 2:
data16 = get_unaligned_le16(&report[i]);
break;
- case 3:
- size = 4;
+ case 4:
data32 = get_unaligned_le32(&report[i]);
break;
}
diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile
index ee0a86b..620fc50 100644
--- a/drivers/iommu/Makefile
+++ b/drivers/iommu/Makefile
@@ -13,7 +13,7 @@
obj-$(CONFIG_MSM_IOMMU) += msm_iommu.o
obj-$(CONFIG_AMD_IOMMU) += amd_iommu.o amd_iommu_init.o
obj-$(CONFIG_AMD_IOMMU_V2) += amd_iommu_v2.o
-obj-$(CONFIG_ARM_SMMU) += arm-smmu.o
+obj-$(CONFIG_ARM_SMMU) += arm-smmu.o arm-smmu-errata.o
obj-$(CONFIG_ARM_SMMU_V3) += arm-smmu-v3.o
obj-$(CONFIG_DMAR_TABLE) += dmar.o
obj-$(CONFIG_INTEL_IOMMU) += intel-iommu.o
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index c380b7e..1a0b110 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -3120,6 +3120,7 @@ static size_t amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova,
mutex_unlock(&domain->api_lock);
domain_flush_tlb_pde(domain);
+ domain_flush_complete(domain);
return unmap_size;
}
diff --git a/drivers/iommu/arm-smmu-errata.c b/drivers/iommu/arm-smmu-errata.c
new file mode 100644
index 0000000..2ee2028
--- /dev/null
+++ b/drivers/iommu/arm-smmu-errata.c
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <soc/qcom/secure_buffer.h>
+#include <linux/arm-smmu-errata.h>
+
+static struct page *guard_pages[VMID_LAST];
+static DEFINE_MUTEX(guard_page_lock);
+
+struct page *arm_smmu_errata_get_guard_page(int vmid)
+{
+ struct page *page;
+ int ret;
+ int source_vm = VMID_HLOS;
+ int dest_vm = vmid;
+ int dest_perm = PERM_READ | PERM_WRITE | PERM_EXEC;
+ size_t size = ARM_SMMU_MIN_IOVA_ALIGN;
+
+ mutex_lock(&guard_page_lock);
+ page = guard_pages[vmid];
+ if (page)
+ goto out;
+
+ page = alloc_pages(GFP_KERNEL, get_order(size));
+ if (!page)
+ goto out;
+
+ if (vmid != VMID_HLOS) {
+ ret = hyp_assign_phys(page_to_phys(page), PAGE_ALIGN(size),
+ &source_vm, 1,
+ &dest_vm, &dest_perm, 1);
+ if (ret) {
+ __free_pages(page, get_order(size));
+ page = NULL;
+ }
+ }
+ guard_pages[vmid] = page;
+out:
+ mutex_unlock(&guard_page_lock);
+ return page;
+}
diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c
index e6f9b2d..d3d975a 100644
--- a/drivers/iommu/arm-smmu-v3.c
+++ b/drivers/iommu/arm-smmu-v3.c
@@ -1040,13 +1040,8 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid,
}
}
- /* Nuke the existing Config, as we're going to rewrite it */
- val &= ~(STRTAB_STE_0_CFG_MASK << STRTAB_STE_0_CFG_SHIFT);
-
- if (ste->valid)
- val |= STRTAB_STE_0_V;
- else
- val &= ~STRTAB_STE_0_V;
+ /* Nuke the existing STE_0 value, as we're going to rewrite it */
+ val = ste->valid ? STRTAB_STE_0_V : 0;
if (ste->bypass) {
val |= disable_bypass ? STRTAB_STE_0_CFG_ABORT
@@ -1081,7 +1076,6 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid,
val |= (ste->s1_cfg->cdptr_dma & STRTAB_STE_0_S1CTXPTR_MASK
<< STRTAB_STE_0_S1CTXPTR_SHIFT) |
STRTAB_STE_0_CFG_S1_TRANS;
-
}
if (ste->s2_cfg) {
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index 7df1f56..280ce0cf 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -331,14 +331,6 @@ enum arm_smmu_implementation {
QCOM_SMMUV500,
};
-struct arm_smmu_device;
-struct arm_smmu_arch_ops {
- int (*init)(struct arm_smmu_device *smmu);
- void (*device_reset)(struct arm_smmu_device *smmu);
- phys_addr_t (*iova_to_phys_hard)(struct iommu_domain *domain,
- dma_addr_t iova);
-};
-
struct arm_smmu_impl_def_reg {
u32 offset;
u32 value;
@@ -412,6 +404,7 @@ struct arm_smmu_power_resources {
int regulator_defer;
};
+struct arm_smmu_arch_ops;
struct arm_smmu_device {
struct device *dev;
@@ -440,7 +433,7 @@ struct arm_smmu_device {
#define ARM_SMMU_OPT_3LVL_TABLES (1 << 4)
#define ARM_SMMU_OPT_NO_ASID_RETENTION (1 << 5)
#define ARM_SMMU_OPT_DISABLE_ATOS (1 << 6)
-#define ARM_SMMU_OPT_QCOM_MMU500_ERRATA1 (1 << 7)
+#define ARM_SMMU_OPT_MMU500_ERRATA1 (1 << 7)
u32 options;
enum arm_smmu_arch_version version;
enum arm_smmu_implementation model;
@@ -543,6 +536,7 @@ struct arm_smmu_domain {
bool qsmmuv500_errata1_init;
bool qsmmuv500_errata1_client;
+ bool qsmmuv500_errata2_min_align;
};
static DEFINE_SPINLOCK(arm_smmu_devices_lock);
@@ -565,7 +559,7 @@ static struct arm_smmu_option_prop arm_smmu_options[] = {
{ ARM_SMMU_OPT_3LVL_TABLES, "qcom,use-3-lvl-tables" },
{ ARM_SMMU_OPT_NO_ASID_RETENTION, "qcom,no-asid-retention" },
{ ARM_SMMU_OPT_DISABLE_ATOS, "qcom,disable-atos" },
- { ARM_SMMU_OPT_QCOM_MMU500_ERRATA1, "qcom,mmu500-errata-1" },
+ { ARM_SMMU_OPT_MMU500_ERRATA1, "qcom,mmu500-errata-1" },
{ 0, NULL},
};
@@ -580,9 +574,6 @@ static void arm_smmu_unprepare_pgtable(void *cookie, void *addr, size_t size);
static int arm_smmu_assign_table(struct arm_smmu_domain *smmu_domain);
static void arm_smmu_unassign_table(struct arm_smmu_domain *smmu_domain);
-static int arm_smmu_arch_init(struct arm_smmu_device *smmu);
-static void arm_smmu_arch_device_reset(struct arm_smmu_device *smmu);
-
static uint64_t arm_smmu_iova_to_pte(struct iommu_domain *domain,
dma_addr_t iova);
@@ -647,6 +638,76 @@ static void arm_smmu_secure_domain_unlock(struct arm_smmu_domain *smmu_domain)
mutex_unlock(&smmu_domain->assign_lock);
}
+/*
+ * init()
+ * Hook for additional device tree parsing at probe time.
+ *
+ * device_reset()
+ * Hook for one-time architecture-specific register settings.
+ *
+ * iova_to_phys_hard()
+ * Provides debug information. May be called from the context fault irq handler.
+ *
+ * init_context_bank()
+ * Hook for architecture-specific settings which require knowledge of the
+ * dynamically allocated context bank number.
+ *
+ * device_group()
+ * Hook for checking whether a device is compatible with a said group.
+ */
+struct arm_smmu_arch_ops {
+ int (*init)(struct arm_smmu_device *smmu);
+ void (*device_reset)(struct arm_smmu_device *smmu);
+ phys_addr_t (*iova_to_phys_hard)(struct iommu_domain *domain,
+ dma_addr_t iova);
+ void (*init_context_bank)(struct arm_smmu_domain *smmu_domain,
+ struct device *dev);
+ int (*device_group)(struct device *dev, struct iommu_group *group);
+};
+
+static int arm_smmu_arch_init(struct arm_smmu_device *smmu)
+{
+ if (!smmu->arch_ops)
+ return 0;
+ if (!smmu->arch_ops->init)
+ return 0;
+ return smmu->arch_ops->init(smmu);
+}
+
+static void arm_smmu_arch_device_reset(struct arm_smmu_device *smmu)
+{
+ if (!smmu->arch_ops)
+ return;
+ if (!smmu->arch_ops->device_reset)
+ return;
+ return smmu->arch_ops->device_reset(smmu);
+}
+
+static void arm_smmu_arch_init_context_bank(
+ struct arm_smmu_domain *smmu_domain, struct device *dev)
+{
+ struct arm_smmu_device *smmu = smmu_domain->smmu;
+
+ if (!smmu->arch_ops)
+ return;
+ if (!smmu->arch_ops->init_context_bank)
+ return;
+ return smmu->arch_ops->init_context_bank(smmu_domain, dev);
+}
+
+static int arm_smmu_arch_device_group(struct device *dev,
+ struct iommu_group *group)
+{
+ struct iommu_fwspec *fwspec = dev->iommu_fwspec;
+ struct arm_smmu_device *smmu = fwspec_smmu(fwspec);
+
+ if (!smmu->arch_ops)
+ return 0;
+ if (!smmu->arch_ops->device_group)
+ return 0;
+ return smmu->arch_ops->device_group(dev, group);
+}
+
static struct device_node *dev_get_dev_node(struct device *dev)
{
if (dev_is_pci(dev)) {
@@ -1731,7 +1792,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
quirks |= IO_PGTABLE_QUIRK_QSMMUV500_NON_SHAREABLE;
tlb = &arm_smmu_gather_ops;
- if (smmu->options & ARM_SMMU_OPT_QCOM_MMU500_ERRATA1)
+ if (smmu->options & ARM_SMMU_OPT_MMU500_ERRATA1)
tlb = &qsmmuv500_errata1_smmu_gather_ops;
ret = arm_smmu_alloc_cb(domain, smmu, dev);
@@ -1787,6 +1848,8 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
arm_smmu_init_context_bank(smmu_domain,
&smmu_domain->pgtbl_cfg);
+ arm_smmu_arch_init_context_bank(smmu_domain, dev);
+
/*
* Request context fault interrupt. Do this last to avoid the
* handler seeing a half-initialised domain state.
@@ -2151,13 +2214,13 @@ static void arm_smmu_detach_dev(struct iommu_domain *domain,
return;
}
- arm_smmu_domain_remove_master(smmu_domain, fwspec);
+ if (atomic_domain)
+ arm_smmu_power_on_atomic(smmu->pwr);
+ else
+ arm_smmu_power_on(smmu->pwr);
- /* Remove additional vote for atomic power */
- if (atomic_domain) {
- WARN_ON(arm_smmu_power_on_atomic(smmu->pwr));
- arm_smmu_power_off(smmu->pwr);
- }
+ arm_smmu_domain_remove_master(smmu_domain, fwspec);
+ arm_smmu_power_off(smmu->pwr);
}
static int arm_smmu_assign_table(struct arm_smmu_domain *smmu_domain)
@@ -2695,13 +2758,20 @@ static struct iommu_group *arm_smmu_device_group(struct device *dev)
group = smmu->s2crs[idx].group;
}
- if (group)
- return group;
+ if (!group) {
+ if (dev_is_pci(dev))
+ group = pci_device_group(dev);
+ else
+ group = generic_device_group(dev);
- if (dev_is_pci(dev))
- group = pci_device_group(dev);
- else
- group = generic_device_group(dev);
+ if (IS_ERR(group))
+ return NULL;
+ }
+
+ if (arm_smmu_arch_device_group(dev, group)) {
+ iommu_group_put(group);
+ return ERR_PTR(-EINVAL);
+ }
return group;
}
@@ -2829,6 +2899,10 @@ static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
& (1 << DOMAIN_ATTR_CB_STALL_DISABLE));
ret = 0;
break;
+ case DOMAIN_ATTR_MMU500_ERRATA_MIN_ALIGN:
+ *((int *)data) = smmu_domain->qsmmuv500_errata2_min_align;
+ ret = 0;
+ break;
default:
ret = -ENODEV;
break;
@@ -3118,65 +3192,6 @@ static void arm_smmu_trigger_fault(struct iommu_domain *domain,
arm_smmu_power_off(smmu->pwr);
}
-static unsigned long arm_smmu_reg_read(struct iommu_domain *domain,
- unsigned long offset)
-{
- struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
- struct arm_smmu_device *smmu;
- struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
- void __iomem *cb_base;
- unsigned long val;
-
- if (offset >= SZ_4K) {
- pr_err("Invalid offset: 0x%lx\n", offset);
- return 0;
- }
-
- smmu = smmu_domain->smmu;
- if (!smmu) {
- WARN(1, "Can't read registers of a detached domain\n");
- val = 0;
- return val;
- }
-
- if (arm_smmu_power_on(smmu->pwr))
- return 0;
-
- cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
- val = readl_relaxed(cb_base + offset);
-
- arm_smmu_power_off(smmu->pwr);
- return val;
-}
-
-static void arm_smmu_reg_write(struct iommu_domain *domain,
- unsigned long offset, unsigned long val)
-{
- struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
- struct arm_smmu_device *smmu;
- struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
- void __iomem *cb_base;
-
- if (offset >= SZ_4K) {
- pr_err("Invalid offset: 0x%lx\n", offset);
- return;
- }
-
- smmu = smmu_domain->smmu;
- if (!smmu) {
- WARN(1, "Can't read registers of a detached domain\n");
- return;
- }
-
- if (arm_smmu_power_on(smmu->pwr))
- return;
-
- cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
- writel_relaxed(val, cb_base + offset);
-
- arm_smmu_power_off(smmu->pwr);
-}
-
static void arm_smmu_tlbi_domain(struct iommu_domain *domain)
{
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
@@ -3218,8 +3233,6 @@ static struct iommu_ops arm_smmu_ops = {
.of_xlate = arm_smmu_of_xlate,
.pgsize_bitmap = -1UL, /* Restricted during device attach */
.trigger_fault = arm_smmu_trigger_fault,
- .reg_read = arm_smmu_reg_read,
- .reg_write = arm_smmu_reg_write,
.tlbi_domain = arm_smmu_tlbi_domain,
.enable_config_clocks = arm_smmu_enable_config_clocks,
.disable_config_clocks = arm_smmu_disable_config_clocks,
@@ -3452,7 +3465,7 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
/* Force bypass transaction to be Non-Shareable & not io-coherent */
reg &= ~(sCR0_SHCFG_MASK << sCR0_SHCFG_SHIFT);
- reg |= sCR0_SHCFG_NSH;
+ reg |= sCR0_SHCFG_NSH << sCR0_SHCFG_SHIFT;
/* Push the button */
__arm_smmu_tlb_sync(smmu);
@@ -4002,24 +4015,6 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
return 0;
}
-static int arm_smmu_arch_init(struct arm_smmu_device *smmu)
-{
- if (!smmu->arch_ops)
- return 0;
- if (!smmu->arch_ops->init)
- return 0;
- return smmu->arch_ops->init(smmu);
-}
-
-static void arm_smmu_arch_device_reset(struct arm_smmu_device *smmu)
-{
- if (!smmu->arch_ops)
- return;
- if (!smmu->arch_ops->device_reset)
- return;
- return smmu->arch_ops->device_reset(smmu);
-}
-
struct arm_smmu_match_data {
enum arm_smmu_arch_version version;
enum arm_smmu_implementation model;
@@ -4278,16 +4273,20 @@ static int __init arm_smmu_init(void)
{
static bool registered;
int ret = 0;
+ ktime_t cur;
if (registered)
return 0;
+ cur = ktime_get();
ret = platform_driver_register(&qsmmuv500_tbu_driver);
if (ret)
return ret;
ret = platform_driver_register(&arm_smmu_driver);
registered = !ret;
+ trace_smmu_init(ktime_us_delta(ktime_get(), cur));
+
return ret;
}
@@ -4342,7 +4341,16 @@ IOMMU_OF_DECLARE(cavium_smmuv2, "cavium,smmu-v2", arm_smmu_of_init);
#define DEBUG_PAR_PA_SHIFT 12
#define DEBUG_PAR_FAULT_VAL 0x1
-#define TBU_DBG_TIMEOUT_US 30000
+#define TBU_DBG_TIMEOUT_US 100
+
+#define QSMMUV500_ACTLR_DEEP_PREFETCH_MASK 0x3
+#define QSMMUV500_ACTLR_DEEP_PREFETCH_SHIFT 0x8
+
+
+struct actlr_setting {
+ struct arm_smmu_smr smr;
+ u32 actlr;
+};
struct qsmmuv500_archdata {
struct list_head tbus;
@@ -4376,14 +4384,24 @@ struct qsmmuv500_tbu_device {
u32 halt_count;
};
-static bool arm_smmu_domain_match_smr(struct arm_smmu_domain *smmu_domain,
+struct qsmmuv500_group_iommudata {
+ bool has_actlr;
+ u32 actlr;
+};
+#define to_qsmmuv500_group_iommudata(group) \
+ ((struct qsmmuv500_group_iommudata *) \
+ (iommu_group_get_iommudata(group)))
+
+
+static bool arm_smmu_fwspec_match_smr(struct iommu_fwspec *fwspec,
struct arm_smmu_smr *smr)
{
struct arm_smmu_smr *smr2;
+ struct arm_smmu_device *smmu = fwspec_smmu(fwspec);
int i, idx;
- for_each_cfg_sme(smmu_domain->dev->iommu_fwspec, i, idx) {
- smr2 = &smmu_domain->smmu->smrs[idx];
+ for_each_cfg_sme(fwspec, i, idx) {
+ smr2 = &smmu->smrs[idx];
/* Continue if table entry does not match */
if ((smr->id ^ smr2->id) & ~(smr->mask | smr2->mask))
continue;
@@ -4401,13 +4419,15 @@ qsmmuv500_errata1_required(struct arm_smmu_domain *smmu_domain,
bool ret = false;
int j;
struct arm_smmu_smr *smr;
+ struct iommu_fwspec *fwspec;
if (smmu_domain->qsmmuv500_errata1_init)
return smmu_domain->qsmmuv500_errata1_client;
+ fwspec = smmu_domain->dev->iommu_fwspec;
for (j = 0; j < data->num_errata1_clients; j++) {
smr = &data->errata1_clients[j];
- if (arm_smmu_domain_match_smr(smmu_domain, smr)) {
+ if (arm_smmu_fwspec_match_smr(fwspec, smr)) {
ret = true;
break;
}
@@ -4457,13 +4477,14 @@ static void qsmmuv500_errata1_tlb_inv_context(void *cookie)
struct qsmmuv500_archdata *data =
get_qsmmuv500_archdata(smmu_domain->smmu);
ktime_t cur;
+ unsigned long flags;
bool errata;
cur = ktime_get();
trace_errata_tlbi_start(dev, 0);
errata = qsmmuv500_errata1_required(smmu_domain, data);
- remote_spin_lock(&data->errata1_lock);
+ remote_spin_lock_irqsave(&data->errata1_lock, flags);
if (errata) {
s64 delta;
@@ -4477,7 +4498,7 @@ static void qsmmuv500_errata1_tlb_inv_context(void *cookie)
} else {
__qsmmuv500_errata1_tlbiall(smmu_domain);
}
- remote_spin_unlock(&data->errata1_lock);
+ remote_spin_unlock_irqrestore(&data->errata1_lock, flags);
trace_errata_tlbi_end(dev, ktime_us_delta(ktime_get(), cur));
}
@@ -4488,11 +4509,12 @@ static struct iommu_gather_ops qsmmuv500_errata1_smmu_gather_ops = {
.free_pages_exact = arm_smmu_free_pages_exact,
};
-static int qsmmuv500_tbu_halt(struct qsmmuv500_tbu_device *tbu)
+static int qsmmuv500_tbu_halt(struct qsmmuv500_tbu_device *tbu,
+ struct arm_smmu_domain *smmu_domain)
{
unsigned long flags;
- u32 val;
- void __iomem *base;
+ u32 halt, fsr, sctlr_orig, sctlr, status;
+ void __iomem *base, *cb_base;
spin_lock_irqsave(&tbu->halt_lock, flags);
if (tbu->halt_count) {
@@ -4501,19 +4523,49 @@ static int qsmmuv500_tbu_halt(struct qsmmuv500_tbu_device *tbu)
return 0;
}
+ cb_base = ARM_SMMU_CB_BASE(smmu_domain->smmu) +
+ ARM_SMMU_CB(smmu_domain->smmu, smmu_domain->cfg.cbndx);
base = tbu->base;
- val = readl_relaxed(base + DEBUG_SID_HALT_REG);
- val |= DEBUG_SID_HALT_VAL;
- writel_relaxed(val, base + DEBUG_SID_HALT_REG);
+ halt = readl_relaxed(base + DEBUG_SID_HALT_REG);
+ halt |= DEBUG_SID_HALT_VAL;
+ writel_relaxed(halt, base + DEBUG_SID_HALT_REG);
- if (readl_poll_timeout_atomic(base + DEBUG_SR_HALT_ACK_REG,
- val, (val & DEBUG_SR_HALT_ACK_VAL),
- 0, TBU_DBG_TIMEOUT_US)) {
+ if (!readl_poll_timeout_atomic(base + DEBUG_SR_HALT_ACK_REG, status,
+ (status & DEBUG_SR_HALT_ACK_VAL),
+ 0, TBU_DBG_TIMEOUT_US))
+ goto out;
+
+ fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
+ if (!(fsr & FSR_FAULT)) {
dev_err(tbu->dev, "Couldn't halt TBU!\n");
spin_unlock_irqrestore(&tbu->halt_lock, flags);
return -ETIMEDOUT;
}
+ /*
+ * We are in a fault; Our request to halt the bus will not complete
+ * until transactions in front of us (such as the fault itself) have
+ * completed. Disable iommu faults and terminate any existing
+ * transactions.
+ */
+ sctlr_orig = readl_relaxed(cb_base + ARM_SMMU_CB_SCTLR);
+ sctlr = sctlr_orig & ~(SCTLR_CFCFG | SCTLR_CFIE);
+ writel_relaxed(sctlr, cb_base + ARM_SMMU_CB_SCTLR);
+
+ writel_relaxed(fsr, cb_base + ARM_SMMU_CB_FSR);
+ writel_relaxed(RESUME_TERMINATE, cb_base + ARM_SMMU_CB_RESUME);
+
+ if (readl_poll_timeout_atomic(base + DEBUG_SR_HALT_ACK_REG, status,
+ (status & DEBUG_SR_HALT_ACK_VAL),
+ 0, TBU_DBG_TIMEOUT_US)) {
+ dev_err(tbu->dev, "Couldn't halt TBU from fault context!\n");
+ writel_relaxed(sctlr_orig, cb_base + ARM_SMMU_CB_SCTLR);
+ spin_unlock_irqrestore(&tbu->halt_lock, flags);
+ return -ETIMEDOUT;
+ }
+
+ writel_relaxed(sctlr_orig, cb_base + ARM_SMMU_CB_SCTLR);
+out:
tbu->halt_count = 1;
spin_unlock_irqrestore(&tbu->halt_lock, flags);
return 0;
@@ -4614,6 +4666,14 @@ static phys_addr_t qsmmuv500_iova_to_phys(
void __iomem *cb_base;
u32 sctlr_orig, sctlr;
int needs_redo = 0;
+ ktime_t timeout;
+
+ /* only 36 bit iova is supported */
+ if (iova >= (1ULL << 36)) {
+ dev_err_ratelimited(smmu->dev, "ECATS: address too large: %pad\n",
+ &iova);
+ return 0;
+ }
cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
tbu = qsmmuv500_find_tbu(smmu, sid);
@@ -4624,35 +4684,23 @@ static phys_addr_t qsmmuv500_iova_to_phys(
if (ret)
return 0;
- /*
- * Disable client transactions & wait for existing operations to
- * complete.
- */
- ret = qsmmuv500_tbu_halt(tbu);
+ ret = qsmmuv500_tbu_halt(tbu, smmu_domain);
if (ret)
goto out_power_off;
+ /*
+ * ECATS can trigger the fault interrupt, so disable it temporarily
+ * and check for an interrupt manually.
+ */
+ sctlr_orig = readl_relaxed(cb_base + ARM_SMMU_CB_SCTLR);
+ sctlr = sctlr_orig & ~(SCTLR_CFCFG | SCTLR_CFIE);
+ writel_relaxed(sctlr, cb_base + ARM_SMMU_CB_SCTLR);
+
/* Only one concurrent atos operation */
ret = qsmmuv500_ecats_lock(smmu_domain, tbu, &flags);
if (ret)
goto out_resume;
- /*
- * We can be called from an interrupt handler with FSR already set
- * so terminate the faulting transaction prior to starting ecats.
- * No new racing faults can occur since we in the halted state.
- * ECATS can trigger the fault interrupt, so disable it temporarily
- * and check for an interrupt manually.
- */
- fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
- if (fsr & FSR_FAULT) {
- writel_relaxed(fsr, cb_base + ARM_SMMU_CB_FSR);
- writel_relaxed(RESUME_TERMINATE, cb_base + ARM_SMMU_CB_RESUME);
- }
- sctlr_orig = readl_relaxed(cb_base + ARM_SMMU_CB_SCTLR);
- sctlr = sctlr_orig & ~(SCTLR_CFCFG | SCTLR_CFIE);
- writel_relaxed(sctlr, cb_base + ARM_SMMU_CB_SCTLR);
-
redo:
/* Set address and stream-id */
val = readq_relaxed(tbu->base + DEBUG_SID_HALT_REG);
@@ -4671,16 +4719,26 @@ static phys_addr_t qsmmuv500_iova_to_phys(
writeq_relaxed(val, tbu->base + DEBUG_TXN_TRIGG_REG);
ret = 0;
- if (readl_poll_timeout_atomic(tbu->base + DEBUG_SR_HALT_ACK_REG,
- val, !(val & DEBUG_SR_ECATS_RUNNING_VAL),
- 0, TBU_DBG_TIMEOUT_US)) {
- dev_err(tbu->dev, "ECATS translation timed out!\n");
+ //based on readx_poll_timeout_atomic
+ timeout = ktime_add_us(ktime_get(), TBU_DBG_TIMEOUT_US);
+ for (;;) {
+ val = readl_relaxed(tbu->base + DEBUG_SR_HALT_ACK_REG);
+ if (!(val & DEBUG_SR_ECATS_RUNNING_VAL))
+ break;
+ val = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
+ if (val & FSR_FAULT)
+ break;
+ if (ktime_compare(ktime_get(), timeout) > 0) {
+ dev_err(tbu->dev, "ECATS translation timed out!\n");
+ ret = -ETIMEDOUT;
+ break;
+ }
}
fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
if (fsr & FSR_FAULT) {
dev_err(tbu->dev, "ECATS generated a fault interrupt! FSR = %llx\n",
- val);
+ fsr);
ret = -EINVAL;
writel_relaxed(val, cb_base + ARM_SMMU_CB_FSR);
@@ -4741,6 +4799,83 @@ static phys_addr_t qsmmuv500_iova_to_phys_hard(
return qsmmuv500_iova_to_phys(domain, iova, sid);
}
+static void qsmmuv500_release_group_iommudata(void *data)
+{
+ kfree(data);
+}
+
+/* If a device has a valid actlr, it must match */
+static int qsmmuv500_device_group(struct device *dev,
+ struct iommu_group *group)
+{
+ struct iommu_fwspec *fwspec = dev->iommu_fwspec;
+ struct arm_smmu_device *smmu = fwspec_smmu(fwspec);
+ struct qsmmuv500_archdata *data = get_qsmmuv500_archdata(smmu);
+ struct qsmmuv500_group_iommudata *iommudata;
+ u32 actlr, i;
+ struct arm_smmu_smr *smr;
+
+ iommudata = to_qsmmuv500_group_iommudata(group);
+ if (!iommudata) {
+ iommudata = kzalloc(sizeof(*iommudata), GFP_KERNEL);
+ if (!iommudata)
+ return -ENOMEM;
+
+ iommu_group_set_iommudata(group, iommudata,
+ qsmmuv500_release_group_iommudata);
+ }
+
+ for (i = 0; i < data->actlr_tbl_size; i++) {
+ smr = &data->actlrs[i].smr;
+ actlr = data->actlrs[i].actlr;
+
+ if (!arm_smmu_fwspec_match_smr(fwspec, smr))
+ continue;
+
+ if (!iommudata->has_actlr) {
+ iommudata->actlr = actlr;
+ iommudata->has_actlr = true;
+ } else if (iommudata->actlr != actlr) {
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static void qsmmuv500_init_cb(struct arm_smmu_domain *smmu_domain,
+ struct device *dev)
+{
+ struct arm_smmu_device *smmu = smmu_domain->smmu;
+ struct qsmmuv500_group_iommudata *iommudata =
+ to_qsmmuv500_group_iommudata(dev->iommu_group);
+ void __iomem *cb_base;
+ const struct iommu_gather_ops *tlb;
+
+ if (!iommudata->has_actlr)
+ return;
+
+ tlb = smmu_domain->pgtbl_cfg.tlb;
+ cb_base = ARM_SMMU_CB_BASE(smmu) +
+ ARM_SMMU_CB(smmu, smmu_domain->cfg.cbndx);
+
+ writel_relaxed(iommudata->actlr, cb_base + ARM_SMMU_CB_ACTLR);
+
+ /*
+ * Prefetch only works properly if the start and end of all
+ * buffers in the page table are aligned to 16 Kb.
+ */
+ if ((iommudata->actlr >> QSMMUV500_ACTLR_DEEP_PREFETCH_SHIFT) &&
+ QSMMUV500_ACTLR_DEEP_PREFETCH_MASK)
+ smmu_domain->qsmmuv500_errata2_min_align = true;
+
+ /*
+ * Flush the context bank after modifying ACTLR to ensure there
+ * are no cache entries with stale state
+ */
+ tlb->tlb_flush_all(smmu_domain);
+}
+
static int qsmmuv500_tbu_register(struct device *dev, void *cookie)
{
struct arm_smmu_device *smmu = cookie;
@@ -4792,6 +4927,38 @@ static int qsmmuv500_parse_errata1(struct arm_smmu_device *smmu)
return 0;
}
+static int qsmmuv500_read_actlr_tbl(struct arm_smmu_device *smmu)
+{
+ int len, i;
+ struct device *dev = smmu->dev;
+ struct qsmmuv500_archdata *data = get_qsmmuv500_archdata(smmu);
+ struct actlr_setting *actlrs;
+ const __be32 *cell;
+
+ cell = of_get_property(dev->of_node, "qcom,actlr", NULL);
+ if (!cell)
+ return 0;
+
+ len = of_property_count_elems_of_size(dev->of_node, "qcom,actlr",
+ sizeof(u32) * 3);
+ if (len < 0)
+ return 0;
+
+ actlrs = devm_kzalloc(dev, sizeof(*actlrs) * len, GFP_KERNEL);
+ if (!actlrs)
+ return -ENOMEM;
+
+ for (i = 0; i < len; i++) {
+ actlrs[i].smr.id = of_read_number(cell++, 1);
+ actlrs[i].smr.mask = of_read_number(cell++, 1);
+ actlrs[i].actlr = of_read_number(cell++, 1);
+ }
+
+ data->actlrs = actlrs;
+ data->actlr_tbl_size = len;
+ return 0;
+}
+
static int qsmmuv500_arch_init(struct arm_smmu_device *smmu)
{
struct resource *res;
@@ -4799,6 +4966,8 @@ static int qsmmuv500_arch_init(struct arm_smmu_device *smmu)
struct qsmmuv500_archdata *data;
struct platform_device *pdev;
int ret;
+ u32 val;
+ void __iomem *reg;
data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
if (!data)
@@ -4819,6 +4988,22 @@ static int qsmmuv500_arch_init(struct arm_smmu_device *smmu)
if (ret)
return ret;
+ ret = qsmmuv500_read_actlr_tbl(smmu);
+ if (ret)
+ return ret;
+
+ reg = ARM_SMMU_GR0(smmu);
+ val = readl_relaxed(reg + ARM_SMMU_GR0_sACR);
+ val &= ~ARM_MMU500_ACR_CACHE_LOCK;
+ writel_relaxed(val, reg + ARM_SMMU_GR0_sACR);
+ val = readl_relaxed(reg + ARM_SMMU_GR0_sACR);
+ /*
+ * Modifiying the nonsecure copy of the sACR register is only
+ * allowed if permission is given in the secure sACR register.
+ * Attempt to detect if we were able to update the value.
+ */
+ WARN_ON(val & ARM_MMU500_ACR_CACHE_LOCK);
+
ret = of_platform_populate(dev->of_node, NULL, NULL, dev);
if (ret)
return ret;
@@ -4834,6 +5019,8 @@ static int qsmmuv500_arch_init(struct arm_smmu_device *smmu)
struct arm_smmu_arch_ops qsmmuv500_arch_ops = {
.init = qsmmuv500_arch_init,
.iova_to_phys_hard = qsmmuv500_iova_to_phys_hard,
+ .init_context_bank = qsmmuv500_init_cb,
+ .device_group = qsmmuv500_device_group,
};
static const struct of_device_id qsmmuv500_tbu_of_match[] = {
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index da4d283..57ae0dd 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -30,6 +30,8 @@
#include <linux/pci.h>
#include <linux/scatterlist.h>
#include <linux/vmalloc.h>
+#include <linux/arm-smmu-errata.h>
+#include <soc/qcom/secure_buffer.h>
struct iommu_dma_msi_page {
struct list_head list;
@@ -41,6 +43,8 @@ struct iommu_dma_cookie {
struct iova_domain iovad;
struct list_head msi_page_list;
spinlock_t msi_lock;
+ u32 min_iova_align;
+ struct page *guard_page;
};
static inline struct iova_domain *cookie_iovad(struct iommu_domain *domain)
@@ -121,6 +125,28 @@ static void iova_reserve_pci_windows(struct pci_dev *dev,
}
}
+static int iommu_dma_arm_smmu_errata_init(struct iommu_domain *domain)
+{
+ struct iommu_dma_cookie *cookie = domain->iova_cookie;
+ int vmid = VMID_HLOS;
+ int min_iova_align = 0;
+
+ iommu_domain_get_attr(domain,
+ DOMAIN_ATTR_MMU500_ERRATA_MIN_ALIGN,
+ &min_iova_align);
+ iommu_domain_get_attr(domain, DOMAIN_ATTR_SECURE_VMID, &vmid);
+ if (vmid >= VMID_LAST || vmid < 0)
+ vmid = VMID_HLOS;
+
+ if (min_iova_align) {
+ cookie->min_iova_align = ARM_SMMU_MIN_IOVA_ALIGN;
+ cookie->guard_page = arm_smmu_errata_get_guard_page(vmid);
+ if (!cookie->guard_page)
+ return -ENOMEM;
+ }
+ return 0;
+}
+
/**
* iommu_dma_init_domain - Initialise a DMA mapping domain
* @domain: IOMMU domain previously prepared by iommu_get_dma_cookie()
@@ -142,6 +168,9 @@ int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
if (!iovad)
return -ENODEV;
+ if (iommu_dma_arm_smmu_errata_init(domain))
+ return -ENODEV;
+
/* Use the smallest supported page size for IOVA granularity */
order = __ffs(domain->pgsize_bitmap);
base_pfn = max_t(unsigned long, 1, base >> order);
@@ -206,11 +235,19 @@ static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain,
size_t size, dma_addr_t dma_limit, struct device *dev)
{
struct iova_domain *iovad = cookie_iovad(domain);
+ struct iommu_dma_cookie *cookie = domain->iova_cookie;
unsigned long shift = iova_shift(iovad);
- unsigned long iova_len = size >> shift;
+ unsigned long iova_len;
unsigned long iova = 0;
dma_addr_t limit;
+ unsigned long guard_len;
+ dma_addr_t ret_iova;
+ if (cookie->min_iova_align)
+ guard_len = ALIGN(size, cookie->min_iova_align) - size;
+ else
+ guard_len = 0;
+ iova_len = (size + guard_len) >> shift;
/*
* Freeing non-power-of-two-sized allocations back into the IOVA caches
* will come back to bite us badly, so we have to waste a bit of space
@@ -231,16 +268,36 @@ static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain,
limit = min_t(dma_addr_t, dma_limit >> shift, iovad->dma_32bit_pfn);
iova = alloc_iova_fast(iovad, iova_len, limit);
- return (dma_addr_t)iova << shift;
+ ret_iova = (dma_addr_t)iova << shift;
+
+ if (guard_len &&
+ iommu_map(domain, ret_iova + size,
+ page_to_phys(cookie->guard_page),
+ guard_len, ARM_SMMU_GUARD_PROT)) {
+
+ free_iova_fast(iovad, iova, iova_len);
+ return 0;
+ }
+
+ return ret_iova;
}
-static void iommu_dma_free_iova(struct iommu_dma_cookie *cookie,
+static void iommu_dma_free_iova(struct iommu_domain *domain,
+ struct iommu_dma_cookie *cookie,
dma_addr_t iova, size_t size)
{
struct iova_domain *iovad = &cookie->iovad;
unsigned long shift = iova_shift(iovad);
+ unsigned long guard_len;
- free_iova_fast(iovad, iova >> shift, size >> shift);
+ if (cookie->min_iova_align) {
+ guard_len = ALIGN(size, cookie->min_iova_align) - size;
+ iommu_unmap(domain, iova + size, guard_len);
+ } else {
+ guard_len = 0;
+ }
+
+ free_iova_fast(iovad, iova >> shift, (size + guard_len) >> shift);
}
static void __iommu_dma_unmap(struct iommu_domain *domain, dma_addr_t dma_addr,
@@ -253,7 +310,7 @@ static void __iommu_dma_unmap(struct iommu_domain *domain, dma_addr_t dma_addr,
size = iova_align(iovad, size + iova_off);
WARN_ON(iommu_unmap(domain, dma_addr, size) != size);
- iommu_dma_free_iova(domain->iova_cookie, dma_addr, size);
+ iommu_dma_free_iova(domain, domain->iova_cookie, dma_addr, size);
}
static void __iommu_dma_free_pages(struct page **pages, int count)
@@ -418,7 +475,7 @@ struct page **iommu_dma_alloc(struct device *dev, size_t size, gfp_t gfp,
out_free_sg:
sg_free_table(&sgt);
out_free_iova:
- iommu_dma_free_iova(cookie, iova, size);
+ iommu_dma_free_iova(domain, cookie, iova, size);
out_free_pages:
__iommu_dma_free_pages(pages, count);
return NULL;
@@ -464,7 +521,7 @@ static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
return DMA_ERROR_CODE;
if (iommu_map(domain, iova, phys - iova_off, size, prot)) {
- iommu_dma_free_iova(cookie, iova, size);
+ iommu_dma_free_iova(domain, cookie, iova, size);
return DMA_ERROR_CODE;
}
return iova + iova_off;
@@ -629,7 +686,7 @@ int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
return __finalise_sg(dev, sg, nents, iova);
out_free_iova:
- iommu_dma_free_iova(cookie, iova, iova_len);
+ iommu_dma_free_iova(domain, cookie, iova, iova_len);
out_restore_sg:
__invalidate_sg(sg, nents);
return 0;
@@ -716,7 +773,7 @@ static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
return msi_page;
out_free_iova:
- iommu_dma_free_iova(cookie, iova, iovad->granule);
+ iommu_dma_free_iova(domain, cookie, iova, iovad->granule);
out_free_page:
kfree(msi_page);
return NULL;
diff --git a/drivers/iommu/dma-mapping-fast.c b/drivers/iommu/dma-mapping-fast.c
index 1ddf0dd..ad7ee11 100644
--- a/drivers/iommu/dma-mapping-fast.c
+++ b/drivers/iommu/dma-mapping-fast.c
@@ -14,11 +14,16 @@
#include <linux/dma-mapping.h>
#include <linux/dma-mapping-fast.h>
#include <linux/io-pgtable-fast.h>
+#include <linux/pci.h>
#include <linux/vmalloc.h>
#include <asm/cacheflush.h>
#include <asm/dma-iommu.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
+#include <trace/events/iommu.h>
+
+#include <soc/qcom/secure_buffer.h>
+#include <linux/arm-smmu-errata.h>
/* some redundant definitions... :( TODO: move to io-pgtable-fast.h */
#define FAST_PAGE_SHIFT 12
@@ -152,9 +157,18 @@ static dma_addr_t __fast_smmu_alloc_iova(struct dma_fast_smmu_mapping *mapping,
unsigned long attrs,
size_t size)
{
- unsigned long bit, prev_search_start, nbits = size >> FAST_PAGE_SHIFT;
- unsigned long align = (1 << get_order(size)) - 1;
+ unsigned long bit, prev_search_start, nbits;
+ unsigned long align;
+ unsigned long guard_len;
+ dma_addr_t iova;
+ if (mapping->min_iova_align)
+ guard_len = ALIGN(size, mapping->min_iova_align) - size;
+ else
+ guard_len = 0;
+
+ nbits = (size + guard_len) >> FAST_PAGE_SHIFT;
+ align = (1 << get_order(size + guard_len)) - 1;
bit = bitmap_find_next_zero_area(
mapping->bitmap, mapping->num_4k_pages, mapping->next_start,
nbits, align);
@@ -191,7 +205,16 @@ static dma_addr_t __fast_smmu_alloc_iova(struct dma_fast_smmu_mapping *mapping,
av8l_fast_clear_stale_ptes(mapping->pgtbl_pmds, skip_sync);
}
- return (bit << FAST_PAGE_SHIFT) + mapping->base;
+ iova = (bit << FAST_PAGE_SHIFT) + mapping->base;
+ if (guard_len &&
+ iommu_map(mapping->domain, iova + size,
+ page_to_phys(mapping->guard_page),
+ guard_len, ARM_SMMU_GUARD_PROT)) {
+
+ bitmap_clear(mapping->bitmap, bit, nbits);
+ return DMA_ERROR_CODE;
+ }
+ return iova;
}
/*
@@ -285,7 +308,17 @@ static void __fast_smmu_free_iova(struct dma_fast_smmu_mapping *mapping,
dma_addr_t iova, size_t size)
{
unsigned long start_bit = (iova - mapping->base) >> FAST_PAGE_SHIFT;
- unsigned long nbits = size >> FAST_PAGE_SHIFT;
+ unsigned long nbits;
+ unsigned long guard_len;
+
+ if (mapping->min_iova_align) {
+ guard_len = ALIGN(size, mapping->min_iova_align) - size;
+ iommu_unmap(mapping->domain, iova + size, guard_len);
+ } else {
+ guard_len = 0;
+ }
+ nbits = (size + guard_len) >> FAST_PAGE_SHIFT;
+
/*
* We don't invalidate TLBs on unmap. We invalidate TLBs on map
@@ -373,6 +406,8 @@ static dma_addr_t fast_smmu_map_page(struct device *dev, struct page *page,
fast_dmac_clean_range(mapping, pmd, pmd + nptes);
spin_unlock_irqrestore(&mapping->lock, flags);
+
+ trace_map(mapping->domain, iova, phys_to_map, len, prot);
return iova + offset_from_phys_to_map;
fail_free_iova:
@@ -402,8 +437,10 @@ static void fast_smmu_unmap_page(struct device *dev, dma_addr_t iova,
spin_lock_irqsave(&mapping->lock, flags);
av8l_fast_unmap_public(pmd, len);
fast_dmac_clean_range(mapping, pmd, pmd + nptes);
- __fast_smmu_free_iova(mapping, iova, len);
+ __fast_smmu_free_iova(mapping, iova - offset, len);
spin_unlock_irqrestore(&mapping->lock, flags);
+
+ trace_unmap(mapping->domain, iova - offset, len, len);
}
static void fast_smmu_sync_single_for_cpu(struct device *dev,
@@ -708,7 +745,7 @@ static void fast_smmu_dma_unmap_resource(
iommu_unmap(mapping->domain, addr - offset, len);
spin_lock_irqsave(&mapping->lock, flags);
- __fast_smmu_free_iova(mapping, addr, len);
+ __fast_smmu_free_iova(mapping, addr - offset, len);
spin_unlock_irqrestore(&mapping->lock, flags);
}
@@ -856,6 +893,28 @@ static void fast_smmu_reserve_pci_windows(struct device *dev,
spin_unlock_irqrestore(&mapping->lock, flags);
}
+static int fast_smmu_errata_init(struct dma_iommu_mapping *mapping)
+{
+ struct dma_fast_smmu_mapping *fast = mapping->fast;
+ int vmid = VMID_HLOS;
+ int min_iova_align = 0;
+
+ iommu_domain_get_attr(mapping->domain,
+ DOMAIN_ATTR_MMU500_ERRATA_MIN_ALIGN,
+ &min_iova_align);
+ iommu_domain_get_attr(mapping->domain, DOMAIN_ATTR_SECURE_VMID, &vmid);
+ if (vmid >= VMID_LAST || vmid < 0)
+ vmid = VMID_HLOS;
+
+ if (min_iova_align) {
+ fast->min_iova_align = ARM_SMMU_MIN_IOVA_ALIGN;
+ fast->guard_page = arm_smmu_errata_get_guard_page(vmid);
+ if (!fast->guard_page)
+ return -ENOMEM;
+ }
+ return 0;
+}
+
/**
* fast_smmu_init_mapping
* @dev: valid struct device pointer
@@ -884,6 +943,9 @@ int fast_smmu_init_mapping(struct device *dev,
mapping->fast->domain = domain;
mapping->fast->dev = dev;
+ if (fast_smmu_errata_init(mapping))
+ goto release_mapping;
+
fast_smmu_reserve_pci_windows(dev, mapping->fast);
if (iommu_domain_get_attr(domain, DOMAIN_ATTR_PGTBL_INFO,
diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c
index 30808e9..c7820b3 100644
--- a/drivers/iommu/exynos-iommu.c
+++ b/drivers/iommu/exynos-iommu.c
@@ -542,7 +542,10 @@ static void sysmmu_tlb_invalidate_flpdcache(struct sysmmu_drvdata *data,
spin_lock_irqsave(&data->lock, flags);
if (is_sysmmu_active(data) && data->version >= MAKE_MMU_VER(3, 3)) {
clk_enable(data->clk_master);
- __sysmmu_tlb_invalidate_entry(data, iova, 1);
+ if (sysmmu_block(data)) {
+ __sysmmu_tlb_invalidate_entry(data, iova, 1);
+ sysmmu_unblock(data);
+ }
clk_disable(data->clk_master);
}
spin_unlock_irqrestore(&data->lock, flags);
diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
index a3594d2..981172d 100644
--- a/drivers/iommu/io-pgtable-arm.c
+++ b/drivers/iommu/io-pgtable-arm.c
@@ -468,8 +468,12 @@ static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova,
if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS)
pte |= ARM_LPAE_PTE_NSTABLE;
__arm_lpae_set_pte(ptep, pte, cfg);
- } else {
+ } else if (!iopte_leaf(pte, lvl)) {
cptep = iopte_deref(pte, data);
+ } else {
+ /* We require an unmap first */
+ WARN_ON(!selftest_running);
+ return -EEXIST;
}
/* Rinse, repeat */
diff --git a/drivers/iommu/iommu-debug.c b/drivers/iommu/iommu-debug.c
index 6d79cfb..22a708e 100644
--- a/drivers/iommu/iommu-debug.c
+++ b/drivers/iommu/iommu-debug.c
@@ -165,6 +165,7 @@ static void *test_virt_addr;
struct iommu_debug_device {
struct device *dev;
struct iommu_domain *domain;
+ struct dma_iommu_mapping *mapping;
u64 iova;
u64 phys;
size_t len;
@@ -1251,6 +1252,8 @@ static ssize_t __iommu_debug_dma_attach_write(struct file *file,
if (arm_iommu_attach_device(dev, dma_mapping))
goto out_release_mapping;
+
+ ddev->mapping = dma_mapping;
pr_err("Attached\n");
} else {
if (!dev->archdata.mapping) {
@@ -1264,7 +1267,7 @@ static ssize_t __iommu_debug_dma_attach_write(struct file *file,
goto out;
}
arm_iommu_detach_device(dev);
- arm_iommu_release_mapping(dev->archdata.mapping);
+ arm_iommu_release_mapping(ddev->mapping);
pr_err("Detached\n");
}
retval = count;
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 83cbf20..c333a36 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -426,6 +426,7 @@ int iommu_group_add_device(struct iommu_group *group, struct device *dev)
if (ret)
goto err_put_group;
+
/* Notify any listeners about change to group. */
blocking_notifier_call_chain(&group->notifier,
IOMMU_GROUP_NOTIFY_ADD_DEVICE, dev);
@@ -1077,6 +1078,7 @@ static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus,
domain->type = type;
/* Assume all sizes by default; the driver may override this later */
domain->pgsize_bitmap = bus->iommu_ops->pgsize_bitmap;
+ memset(domain->name, 0, IOMMU_DOMAIN_NAME_LEN);
return domain;
}
@@ -1105,6 +1107,11 @@ static int __iommu_attach_device(struct iommu_domain *domain,
if (!ret) {
trace_attach_device_to_domain(dev);
iommu_debug_attach_device(domain, dev);
+
+ if (!strnlen(domain->name, IOMMU_DOMAIN_NAME_LEN)) {
+ strlcpy(domain->name, dev_name(dev),
+ IOMMU_DOMAIN_NAME_LEN);
+ }
}
return ret;
}
@@ -1398,7 +1405,7 @@ int iommu_map(struct iommu_domain *domain, unsigned long iova,
if (ret)
iommu_unmap(domain, orig_iova, orig_size - size);
else
- trace_map(orig_iova, orig_paddr, orig_size);
+ trace_map(domain, orig_iova, orig_paddr, orig_size, prot);
return ret;
}
@@ -1451,11 +1458,23 @@ size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size)
unmapped += unmapped_page;
}
- trace_unmap(orig_iova, size, unmapped);
+ trace_unmap(domain, orig_iova, size, unmapped);
return unmapped;
}
EXPORT_SYMBOL_GPL(iommu_unmap);
+size_t iommu_map_sg(struct iommu_domain *domain,
+ unsigned long iova, struct scatterlist *sg,
+ unsigned int nents, int prot)
+{
+ size_t mapped;
+
+ mapped = domain->ops->map_sg(domain, iova, sg, nents, prot);
+ trace_map_sg(domain, iova, mapped, prot);
+ return mapped;
+}
+EXPORT_SYMBOL(iommu_map_sg);
+
size_t default_iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
struct scatterlist *sg, unsigned int nents, int prot)
{
@@ -1519,8 +1538,49 @@ void iommu_domain_window_disable(struct iommu_domain *domain, u32 wnd_nr)
}
EXPORT_SYMBOL_GPL(iommu_domain_window_disable);
-struct dentry *iommu_debugfs_top;
+/**
+ * report_iommu_fault() - report about an IOMMU fault to the IOMMU framework
+ * @domain: the iommu domain where the fault has happened
+ * @dev: the device where the fault has happened
+ * @iova: the faulting address
+ * @flags: mmu fault flags (e.g. IOMMU_FAULT_READ/IOMMU_FAULT_WRITE/...)
+ *
+ * This function should be called by the low-level IOMMU implementations
+ * whenever IOMMU faults happen, to allow high-level users, that are
+ * interested in such events, to know about them.
+ *
+ * This event may be useful for several possible use cases:
+ * - mere logging of the event
+ * - dynamic TLB/PTE loading
+ * - if restarting of the faulting device is required
+ *
+ * Returns 0 on success and an appropriate error code otherwise (if dynamic
+ * PTE/TLB loading will one day be supported, implementations will be able
+ * to tell whether it succeeded or not according to this return value).
+ *
+ * Specifically, -ENOSYS is returned if a fault handler isn't installed
+ * (though fault handlers can also return -ENOSYS, in case they want to
+ * elicit the default behavior of the IOMMU drivers).
+ */
+int report_iommu_fault(struct iommu_domain *domain, struct device *dev,
+ unsigned long iova, int flags)
+{
+ int ret = -ENOSYS;
+ /*
+ * if upper layers showed interest and installed a fault handler,
+ * invoke it.
+ */
+ if (domain->handler)
+ ret = domain->handler(domain, dev, iova, flags,
+ domain->handler_token);
+
+ trace_io_page_fault(dev, iova, flags);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(report_iommu_fault);
+
+struct dentry *iommu_debugfs_top;
static int __init iommu_init(void)
{
iommu_group_kset = kset_create_and_add("iommu_groups",
@@ -1617,30 +1677,6 @@ void iommu_trigger_fault(struct iommu_domain *domain, unsigned long flags)
domain->ops->trigger_fault(domain, flags);
}
-/**
- * iommu_reg_read() - read an IOMMU register
- *
- * Reads the IOMMU register at the given offset.
- */
-unsigned long iommu_reg_read(struct iommu_domain *domain, unsigned long offset)
-{
- if (domain->ops->reg_read)
- return domain->ops->reg_read(domain, offset);
- return 0;
-}
-
-/**
- * iommu_reg_write() - write an IOMMU register
- *
- * Writes the given value to the IOMMU register at the given offset.
- */
-void iommu_reg_write(struct iommu_domain *domain, unsigned long offset,
- unsigned long val)
-{
- if (domain->ops->reg_write)
- domain->ops->reg_write(domain, offset, val);
-}
-
void iommu_get_dm_regions(struct device *dev, struct list_head *list)
{
const struct iommu_ops *ops = dev->bus->iommu_ops;
diff --git a/drivers/irqchip/irq-crossbar.c b/drivers/irqchip/irq-crossbar.c
index 1eef56a..05bbf17 100644
--- a/drivers/irqchip/irq-crossbar.c
+++ b/drivers/irqchip/irq-crossbar.c
@@ -198,7 +198,8 @@ static const struct irq_domain_ops crossbar_domain_ops = {
static int __init crossbar_of_init(struct device_node *node)
{
- int i, size, max = 0, reserved = 0, entry;
+ int i, size, reserved = 0;
+ u32 max = 0, entry;
const __be32 *irqsr;
int ret = -ENOMEM;
diff --git a/drivers/irqchip/qcom/pdc-sdm670.c b/drivers/irqchip/qcom/pdc-sdm670.c
index 7bd6333..21bb58e 100644
--- a/drivers/irqchip/qcom/pdc-sdm670.c
+++ b/drivers/irqchip/qcom/pdc-sdm670.c
@@ -120,13 +120,13 @@ static struct pdc_pin sdm670_data[] = {
{106, 653}, /* core_bi_px_gpio_132 */
{107, 654}, /* core_bi_px_gpio_133 */
{108, 655}, /* core_bi_px_gpio_145 */
+ {115, 662}, /* core_bi_px_gpio_41 */
+ {116, 663}, /* core_bi_px_gpio_89 */
+ {117, 664}, /* core_bi_px_gpio_31 */
+ {118, 665}, /* core_bi_px_gpio_49 */
{119, 666}, /* core_bi_px_to_mpm[2] */
{120, 667}, /* core_bi_px_to_mpm[3] */
{121, 668}, /* core_bi_px_to_mpm[4] */
- {122, 669}, /* core_bi_px_gpio_41 */
- {123, 670}, /* core_bi_px_gpio_89 */
- {124, 671}, /* core_bi_px_gpio_31 */
- {125, 95}, /* core_bi_px_gpio_49 */
{-1}
};
diff --git a/drivers/isdn/i4l/isdn_ppp.c b/drivers/isdn/i4l/isdn_ppp.c
index bf3fbd0..64b5864 100644
--- a/drivers/isdn/i4l/isdn_ppp.c
+++ b/drivers/isdn/i4l/isdn_ppp.c
@@ -828,7 +828,6 @@ isdn_ppp_write(int min, struct file *file, const char __user *buf, int count)
isdn_net_local *lp;
struct ippp_struct *is;
int proto;
- unsigned char protobuf[4];
is = file->private_data;
@@ -842,24 +841,28 @@ isdn_ppp_write(int min, struct file *file, const char __user *buf, int count)
if (!lp)
printk(KERN_DEBUG "isdn_ppp_write: lp == NULL\n");
else {
- /*
- * Don't reset huptimer for
- * LCP packets. (Echo requests).
- */
- if (copy_from_user(protobuf, buf, 4))
- return -EFAULT;
- proto = PPP_PROTOCOL(protobuf);
- if (proto != PPP_LCP)
- lp->huptimer = 0;
+ if (lp->isdn_device < 0 || lp->isdn_channel < 0) {
+ unsigned char protobuf[4];
+ /*
+ * Don't reset huptimer for
+ * LCP packets. (Echo requests).
+ */
+ if (copy_from_user(protobuf, buf, 4))
+ return -EFAULT;
- if (lp->isdn_device < 0 || lp->isdn_channel < 0)
+ proto = PPP_PROTOCOL(protobuf);
+ if (proto != PPP_LCP)
+ lp->huptimer = 0;
+
return 0;
+ }
if ((dev->drv[lp->isdn_device]->flags & DRV_FLAG_RUNNING) &&
lp->dialstate == 0 &&
(lp->flags & ISDN_NET_CONNECTED)) {
unsigned short hl;
struct sk_buff *skb;
+ unsigned char *cpy_buf;
/*
* we need to reserve enough space in front of
* sk_buff. old call to dev_alloc_skb only reserved
@@ -872,11 +875,21 @@ isdn_ppp_write(int min, struct file *file, const char __user *buf, int count)
return count;
}
skb_reserve(skb, hl);
- if (copy_from_user(skb_put(skb, count), buf, count))
+ cpy_buf = skb_put(skb, count);
+ if (copy_from_user(cpy_buf, buf, count))
{
kfree_skb(skb);
return -EFAULT;
}
+
+ /*
+ * Don't reset huptimer for
+ * LCP packets. (Echo requests).
+ */
+ proto = PPP_PROTOCOL(cpy_buf);
+ if (proto != PPP_LCP)
+ lp->huptimer = 0;
+
if (is->debug & 0x40) {
printk(KERN_DEBUG "ppp xmit: len %d\n", (int) skb->len);
isdn_ppp_frame_log("xmit", skb->data, skb->len, 32, is->unit, lp->ppp_slot);
diff --git a/drivers/mailbox/mailbox.c b/drivers/mailbox/mailbox.c
index 9f340bf..9dc85cd 100644
--- a/drivers/mailbox/mailbox.c
+++ b/drivers/mailbox/mailbox.c
@@ -63,7 +63,7 @@ static void msg_submit(struct mbox_chan *chan)
again:
spin_lock_irqsave(&chan->lock, flags);
- if (!chan->msg_count || chan->active_req)
+ if (!chan->msg_count || (chan->active_req && err != -EAGAIN))
goto exit;
count = chan->msg_count;
diff --git a/drivers/mailbox/qcom-rpmh-mailbox.c b/drivers/mailbox/qcom-rpmh-mailbox.c
index 7bf8a18..160b858 100644
--- a/drivers/mailbox/qcom-rpmh-mailbox.c
+++ b/drivers/mailbox/qcom-rpmh-mailbox.c
@@ -29,7 +29,7 @@
#include <linux/of_irq.h>
#include <linux/platform_device.h>
#include <linux/spinlock.h>
-
+#include <asm/arch_timer.h>
#include <asm-generic/io.h>
#include <soc/qcom/tcs.h>
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
index c3ea03c..02619ca 100644
--- a/drivers/md/bcache/bcache.h
+++ b/drivers/md/bcache/bcache.h
@@ -333,6 +333,7 @@ struct cached_dev {
/* Limit number of writeback bios in flight */
struct semaphore in_flight;
struct task_struct *writeback_thread;
+ struct workqueue_struct *writeback_write_wq;
struct keybuf writeback_keys;
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index a37c177..e0f1c6d 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -196,12 +196,12 @@ static void bch_data_insert_start(struct closure *cl)
struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
struct bio *bio = op->bio, *n;
- if (atomic_sub_return(bio_sectors(bio), &op->c->sectors_to_gc) < 0)
- wake_up_gc(op->c);
-
if (op->bypass)
return bch_data_invalidate(cl);
+ if (atomic_sub_return(bio_sectors(bio), &op->c->sectors_to_gc) < 0)
+ wake_up_gc(op->c);
+
/*
* Journal writes are marked REQ_PREFLUSH; if the original write was a
* flush, it'll wait on the journal write.
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index 66669c8..f4557f5 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -1025,7 +1025,7 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c)
}
if (BDEV_STATE(&dc->sb) == BDEV_STATE_DIRTY) {
- bch_sectors_dirty_init(dc);
+ bch_sectors_dirty_init(&dc->disk);
atomic_set(&dc->has_dirty, 1);
atomic_inc(&dc->count);
bch_writeback_queue(dc);
@@ -1058,6 +1058,8 @@ static void cached_dev_free(struct closure *cl)
cancel_delayed_work_sync(&dc->writeback_rate_update);
if (!IS_ERR_OR_NULL(dc->writeback_thread))
kthread_stop(dc->writeback_thread);
+ if (dc->writeback_write_wq)
+ destroy_workqueue(dc->writeback_write_wq);
mutex_lock(&bch_register_lock);
@@ -1229,6 +1231,7 @@ static int flash_dev_run(struct cache_set *c, struct uuid_entry *u)
goto err;
bcache_device_attach(d, c, u - c->uuids);
+ bch_sectors_dirty_init(d);
bch_flash_dev_request_init(d);
add_disk(d->disk);
@@ -1967,6 +1970,8 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
else
err = "device busy";
mutex_unlock(&bch_register_lock);
+ if (!IS_ERR(bdev))
+ bdput(bdev);
if (attr == &ksysfs_register_quiet)
goto out;
}
diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c
index b3ff57d..4fbb553 100644
--- a/drivers/md/bcache/sysfs.c
+++ b/drivers/md/bcache/sysfs.c
@@ -191,7 +191,7 @@ STORE(__cached_dev)
{
struct cached_dev *dc = container_of(kobj, struct cached_dev,
disk.kobj);
- unsigned v = size;
+ ssize_t v = size;
struct cache_set *c;
struct kobj_uevent_env *env;
@@ -226,7 +226,7 @@ STORE(__cached_dev)
bch_cached_dev_run(dc);
if (attr == &sysfs_cache_mode) {
- ssize_t v = bch_read_string_list(buf, bch_cache_modes + 1);
+ v = bch_read_string_list(buf, bch_cache_modes + 1);
if (v < 0)
return v;
diff --git a/drivers/md/bcache/util.c b/drivers/md/bcache/util.c
index dde6172..eb70f68 100644
--- a/drivers/md/bcache/util.c
+++ b/drivers/md/bcache/util.c
@@ -73,24 +73,44 @@ STRTO_H(strtouint, unsigned int)
STRTO_H(strtoll, long long)
STRTO_H(strtoull, unsigned long long)
+/**
+ * bch_hprint() - formats @v to human readable string for sysfs.
+ *
+ * @v - signed 64 bit integer
+ * @buf - the (at least 8 byte) buffer to format the result into.
+ *
+ * Returns the number of bytes used by format.
+ */
ssize_t bch_hprint(char *buf, int64_t v)
{
static const char units[] = "?kMGTPEZY";
- char dec[4] = "";
- int u, t = 0;
+ int u = 0, t;
- for (u = 0; v >= 1024 || v <= -1024; u++) {
- t = v & ~(~0 << 10);
- v >>= 10;
- }
+ uint64_t q;
- if (!u)
- return sprintf(buf, "%llu", v);
+ if (v < 0)
+ q = -v;
+ else
+ q = v;
- if (v < 100 && v > -100)
- snprintf(dec, sizeof(dec), ".%i", t / 100);
+ /* For as long as the number is more than 3 digits, but at least
+ * once, shift right / divide by 1024. Keep the remainder for
+ * a digit after the decimal point.
+ */
+ do {
+ u++;
- return sprintf(buf, "%lli%s%c", v, dec, units[u]);
+ t = q & ~(~0 << 10);
+ q >>= 10;
+ } while (q >= 1000);
+
+ if (v < 0)
+ /* '-', up to 3 digits, '.', 1 digit, 1 character, null;
+ * yields 8 bytes.
+ */
+ return sprintf(buf, "-%llu.%i%c", q, t * 10 / 1024, units[u]);
+ else
+ return sprintf(buf, "%llu.%i%c", q, t * 10 / 1024, units[u]);
}
ssize_t bch_snprint_string_list(char *buf, size_t size, const char * const list[],
diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
index e51644e..4ce2b19 100644
--- a/drivers/md/bcache/writeback.c
+++ b/drivers/md/bcache/writeback.c
@@ -20,7 +20,8 @@
static void __update_writeback_rate(struct cached_dev *dc)
{
struct cache_set *c = dc->disk.c;
- uint64_t cache_sectors = c->nbuckets * c->sb.bucket_size;
+ uint64_t cache_sectors = c->nbuckets * c->sb.bucket_size -
+ bcache_flash_devs_sectors_dirty(c);
uint64_t cache_dirty_target =
div_u64(cache_sectors * dc->writeback_percent, 100);
@@ -186,7 +187,7 @@ static void write_dirty(struct closure *cl)
closure_bio_submit(&io->bio, cl);
- continue_at(cl, write_dirty_finish, system_wq);
+ continue_at(cl, write_dirty_finish, io->dc->writeback_write_wq);
}
static void read_dirty_endio(struct bio *bio)
@@ -206,7 +207,7 @@ static void read_dirty_submit(struct closure *cl)
closure_bio_submit(&io->bio, cl);
- continue_at(cl, write_dirty, system_wq);
+ continue_at(cl, write_dirty, io->dc->writeback_write_wq);
}
static void read_dirty(struct cached_dev *dc)
@@ -482,17 +483,17 @@ static int sectors_dirty_init_fn(struct btree_op *_op, struct btree *b,
return MAP_CONTINUE;
}
-void bch_sectors_dirty_init(struct cached_dev *dc)
+void bch_sectors_dirty_init(struct bcache_device *d)
{
struct sectors_dirty_init op;
bch_btree_op_init(&op.op, -1);
- op.inode = dc->disk.id;
+ op.inode = d->id;
- bch_btree_map_keys(&op.op, dc->disk.c, &KEY(op.inode, 0, 0),
+ bch_btree_map_keys(&op.op, d->c, &KEY(op.inode, 0, 0),
sectors_dirty_init_fn, 0);
- dc->disk.sectors_dirty_last = bcache_dev_sectors_dirty(&dc->disk);
+ d->sectors_dirty_last = bcache_dev_sectors_dirty(d);
}
void bch_cached_dev_writeback_init(struct cached_dev *dc)
@@ -516,6 +517,11 @@ void bch_cached_dev_writeback_init(struct cached_dev *dc)
int bch_cached_dev_writeback_start(struct cached_dev *dc)
{
+ dc->writeback_write_wq = alloc_workqueue("bcache_writeback_wq",
+ WQ_MEM_RECLAIM, 0);
+ if (!dc->writeback_write_wq)
+ return -ENOMEM;
+
dc->writeback_thread = kthread_create(bch_writeback_thread, dc,
"bcache_writeback");
if (IS_ERR(dc->writeback_thread))
diff --git a/drivers/md/bcache/writeback.h b/drivers/md/bcache/writeback.h
index 301eaf5..cdf8d25 100644
--- a/drivers/md/bcache/writeback.h
+++ b/drivers/md/bcache/writeback.h
@@ -14,6 +14,25 @@ static inline uint64_t bcache_dev_sectors_dirty(struct bcache_device *d)
return ret;
}
+static inline uint64_t bcache_flash_devs_sectors_dirty(struct cache_set *c)
+{
+ uint64_t i, ret = 0;
+
+ mutex_lock(&bch_register_lock);
+
+ for (i = 0; i < c->nr_uuids; i++) {
+ struct bcache_device *d = c->devices[i];
+
+ if (!d || !UUID_FLASH_ONLY(&c->uuids[i]))
+ continue;
+ ret += bcache_dev_sectors_dirty(d);
+ }
+
+ mutex_unlock(&bch_register_lock);
+
+ return ret;
+}
+
static inline unsigned offset_to_stripe(struct bcache_device *d,
uint64_t offset)
{
@@ -85,7 +104,7 @@ static inline void bch_writeback_add(struct cached_dev *dc)
void bcache_dev_sectors_dirty_add(struct cache_set *, unsigned, uint64_t, int);
-void bch_sectors_dirty_init(struct cached_dev *dc);
+void bch_sectors_dirty_init(struct bcache_device *);
void bch_cached_dev_writeback_init(struct cached_dev *);
int bch_cached_dev_writeback_start(struct cached_dev *);
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index 2d82692..fb02c39 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -1992,6 +1992,11 @@ int bitmap_resize(struct bitmap *bitmap, sector_t blocks,
long pages;
struct bitmap_page *new_bp;
+ if (bitmap->storage.file && !init) {
+ pr_info("md: cannot resize file-based bitmap\n");
+ return -EINVAL;
+ }
+
if (chunksize == 0) {
/* If there is enough space, leave the chunk size unchanged,
* else increase by factor of two until there is enough space.
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index b0c0aef..12abf69 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -223,7 +223,8 @@ static int linear_add(struct mddev *mddev, struct md_rdev *rdev)
* oldconf until no one uses it anymore.
*/
mddev_suspend(mddev);
- oldconf = rcu_dereference(mddev->private);
+ oldconf = rcu_dereference_protected(mddev->private,
+ lockdep_is_held(&mddev->reconfig_mutex));
mddev->raid_disks++;
WARN_ONCE(mddev->raid_disks != newconf->raid_disks,
"copied raid_disks doesn't match mddev->raid_disks");
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 4c4aab0..b19b551 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -1407,11 +1407,24 @@ static void __make_request(struct mddev *mddev, struct bio *bio)
mbio->bi_private = r10_bio;
atomic_inc(&r10_bio->remaining);
+
+ cb = blk_check_plugged(raid10_unplug, mddev,
+ sizeof(*plug));
+ if (cb)
+ plug = container_of(cb, struct raid10_plug_cb,
+ cb);
+ else
+ plug = NULL;
spin_lock_irqsave(&conf->device_lock, flags);
- bio_list_add(&conf->pending_bio_list, mbio);
- conf->pending_count++;
+ if (plug) {
+ bio_list_add(&plug->pending, mbio);
+ plug->pending_cnt++;
+ } else {
+ bio_list_add(&conf->pending_bio_list, mbio);
+ conf->pending_count++;
+ }
spin_unlock_irqrestore(&conf->device_lock, flags);
- if (!mddev_check_plugged(mddev))
+ if (!plug)
md_wakeup_thread(mddev->thread);
}
}
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 549b4af..7aea022 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -829,6 +829,14 @@ static void stripe_add_to_batch_list(struct r5conf *conf, struct stripe_head *sh
spin_unlock(&head->batch_head->batch_lock);
goto unlock_out;
}
+ /*
+ * We must assign batch_head of this stripe within the
+ * batch_lock, otherwise clear_batch_ready of batch head
+ * stripe could clear BATCH_READY bit of this stripe and
+ * this stripe->batch_head doesn't get assigned, which
+ * could confuse clear_batch_ready for this stripe
+ */
+ sh->batch_head = head->batch_head;
/*
* at this point, head's BATCH_READY could be cleared, but we
@@ -836,8 +844,6 @@ static void stripe_add_to_batch_list(struct r5conf *conf, struct stripe_head *sh
*/
list_add(&sh->batch_list, &head->batch_list);
spin_unlock(&head->batch_head->batch_lock);
-
- sh->batch_head = head->batch_head;
} else {
head->batch_head = head;
sh->batch_head = head->batch_head;
@@ -4277,7 +4283,8 @@ static void break_stripe_batch_list(struct stripe_head *head_sh,
set_mask_bits(&sh->state, ~(STRIPE_EXPAND_SYNC_FLAGS |
(1 << STRIPE_PREREAD_ACTIVE) |
- (1 << STRIPE_DEGRADED)),
+ (1 << STRIPE_DEGRADED) |
+ (1 << STRIPE_ON_UNPLUG_LIST)),
head_sh->state & (1 << STRIPE_INSYNC));
sh->check_state = head_sh->check_state;
diff --git a/drivers/media/dvb-core/dmxdev.c b/drivers/media/dvb-core/dmxdev.c
index f2d39a9..0eadf08 100644
--- a/drivers/media/dvb-core/dmxdev.c
+++ b/drivers/media/dvb-core/dmxdev.c
@@ -4028,6 +4028,9 @@ static ssize_t dvb_dmxdev_read_sec(struct dmxdev_filter *dfil,
hcount = 3 + dfil->todo;
if (hcount > count)
hcount = count;
+ if (hcount == 0)
+ return done;
+
result = dvb_dmxdev_buffer_read(dfil, &dfil->buffer,
file->f_flags & O_NONBLOCK,
buf, hcount, ppos);
diff --git a/drivers/media/i2c/adv7604.c b/drivers/media/i2c/adv7604.c
index 4003831..7b1935a 100644
--- a/drivers/media/i2c/adv7604.c
+++ b/drivers/media/i2c/adv7604.c
@@ -3118,6 +3118,9 @@ static int adv76xx_parse_dt(struct adv76xx_state *state)
state->pdata.blank_data = 1;
state->pdata.op_format_mode_sel = ADV7604_OP_FORMAT_MODE0;
state->pdata.bus_order = ADV7604_BUS_ORDER_RGB;
+ state->pdata.dr_str_data = ADV76XX_DR_STR_MEDIUM_HIGH;
+ state->pdata.dr_str_clk = ADV76XX_DR_STR_MEDIUM_HIGH;
+ state->pdata.dr_str_sync = ADV76XX_DR_STR_MEDIUM_HIGH;
return 0;
}
diff --git a/drivers/media/pci/bt8xx/dvb-bt8xx.c b/drivers/media/pci/bt8xx/dvb-bt8xx.c
index e69d338..ae550a1 100644
--- a/drivers/media/pci/bt8xx/dvb-bt8xx.c
+++ b/drivers/media/pci/bt8xx/dvb-bt8xx.c
@@ -680,6 +680,7 @@ static void frontend_init(struct dvb_bt8xx_card *card, u32 type)
/* DST is not a frontend, attaching the ASIC */
if (dvb_attach(dst_attach, state, &card->dvb_adapter) == NULL) {
pr_err("%s: Could not find a Twinhan DST\n", __func__);
+ kfree(state);
break;
}
/* Attach other DST peripherals if any */
diff --git a/drivers/media/pci/ttpci/av7110_hw.c b/drivers/media/pci/ttpci/av7110_hw.c
index 0583d56..41ba848 100644
--- a/drivers/media/pci/ttpci/av7110_hw.c
+++ b/drivers/media/pci/ttpci/av7110_hw.c
@@ -56,11 +56,11 @@
by Nathan Laredo <laredo@gnu.org> */
int av7110_debiwrite(struct av7110 *av7110, u32 config,
- int addr, u32 val, int count)
+ int addr, u32 val, unsigned int count)
{
struct saa7146_dev *dev = av7110->dev;
- if (count <= 0 || count > 32764) {
+ if (count > 32764) {
printk("%s: invalid count %d\n", __func__, count);
return -1;
}
@@ -78,12 +78,12 @@ int av7110_debiwrite(struct av7110 *av7110, u32 config,
return 0;
}
-u32 av7110_debiread(struct av7110 *av7110, u32 config, int addr, int count)
+u32 av7110_debiread(struct av7110 *av7110, u32 config, int addr, unsigned int count)
{
struct saa7146_dev *dev = av7110->dev;
u32 result = 0;
- if (count > 32764 || count <= 0) {
+ if (count > 32764) {
printk("%s: invalid count %d\n", __func__, count);
return 0;
}
diff --git a/drivers/media/pci/ttpci/av7110_hw.h b/drivers/media/pci/ttpci/av7110_hw.h
index 1634aba..ccb1480 100644
--- a/drivers/media/pci/ttpci/av7110_hw.h
+++ b/drivers/media/pci/ttpci/av7110_hw.h
@@ -377,14 +377,14 @@ extern int av7110_fw_request(struct av7110 *av7110, u16 *request_buf,
/* DEBI (saa7146 data extension bus interface) access */
extern int av7110_debiwrite(struct av7110 *av7110, u32 config,
- int addr, u32 val, int count);
+ int addr, u32 val, unsigned int count);
extern u32 av7110_debiread(struct av7110 *av7110, u32 config,
- int addr, int count);
+ int addr, unsigned int count);
/* DEBI during interrupt */
/* single word writes */
-static inline void iwdebi(struct av7110 *av7110, u32 config, int addr, u32 val, int count)
+static inline void iwdebi(struct av7110 *av7110, u32 config, int addr, u32 val, unsigned int count)
{
av7110_debiwrite(av7110, config, addr, val, count);
}
@@ -397,7 +397,7 @@ static inline void mwdebi(struct av7110 *av7110, u32 config, int addr,
av7110_debiwrite(av7110, config, addr, 0, count);
}
-static inline u32 irdebi(struct av7110 *av7110, u32 config, int addr, u32 val, int count)
+static inline u32 irdebi(struct av7110 *av7110, u32 config, int addr, u32 val, unsigned int count)
{
u32 res;
@@ -408,7 +408,7 @@ static inline u32 irdebi(struct av7110 *av7110, u32 config, int addr, u32 val, i
}
/* DEBI outside interrupts, only for count <= 4! */
-static inline void wdebi(struct av7110 *av7110, u32 config, int addr, u32 val, int count)
+static inline void wdebi(struct av7110 *av7110, u32 config, int addr, u32 val, unsigned int count)
{
unsigned long flags;
@@ -417,7 +417,7 @@ static inline void wdebi(struct av7110 *av7110, u32 config, int addr, u32 val, i
spin_unlock_irqrestore(&av7110->debilock, flags);
}
-static inline u32 rdebi(struct av7110 *av7110, u32 config, int addr, u32 val, int count)
+static inline u32 rdebi(struct av7110 *av7110, u32 config, int addr, u32 val, unsigned int count)
{
unsigned long flags;
u32 res;
diff --git a/drivers/media/platform/exynos-gsc/gsc-core.c b/drivers/media/platform/exynos-gsc/gsc-core.c
index 787bd16..bbb5fee 100644
--- a/drivers/media/platform/exynos-gsc/gsc-core.c
+++ b/drivers/media/platform/exynos-gsc/gsc-core.c
@@ -849,9 +849,7 @@ int gsc_prepare_addr(struct gsc_ctx *ctx, struct vb2_buffer *vb,
if ((frame->fmt->pixelformat == V4L2_PIX_FMT_VYUY) ||
(frame->fmt->pixelformat == V4L2_PIX_FMT_YVYU) ||
- (frame->fmt->pixelformat == V4L2_PIX_FMT_NV61) ||
(frame->fmt->pixelformat == V4L2_PIX_FMT_YVU420) ||
- (frame->fmt->pixelformat == V4L2_PIX_FMT_NV21) ||
(frame->fmt->pixelformat == V4L2_PIX_FMT_YVU420M))
swap(addr->cb, addr->cr);
diff --git a/drivers/media/platform/exynos4-is/fimc-is.c b/drivers/media/platform/exynos4-is/fimc-is.c
index 518ad34..7f92144 100644
--- a/drivers/media/platform/exynos4-is/fimc-is.c
+++ b/drivers/media/platform/exynos4-is/fimc-is.c
@@ -825,12 +825,13 @@ static int fimc_is_probe(struct platform_device *pdev)
is->irq = irq_of_parse_and_map(dev->of_node, 0);
if (!is->irq) {
dev_err(dev, "no irq found\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto err_iounmap;
}
ret = fimc_is_get_clocks(is);
if (ret < 0)
- return ret;
+ goto err_iounmap;
platform_set_drvdata(pdev, is);
@@ -891,6 +892,8 @@ static int fimc_is_probe(struct platform_device *pdev)
free_irq(is->irq, is);
err_clk:
fimc_is_put_clocks(is);
+err_iounmap:
+ iounmap(is->pmu_regs);
return ret;
}
@@ -947,6 +950,7 @@ static int fimc_is_remove(struct platform_device *pdev)
fimc_is_unregister_subdevs(is);
vb2_dma_contig_clear_max_seg_size(dev);
fimc_is_put_clocks(is);
+ iounmap(is->pmu_regs);
fimc_is_debugfs_remove(is);
release_firmware(is->fw.f_w);
fimc_is_free_cpu_memory(is);
diff --git a/drivers/media/platform/msm/camera/Makefile b/drivers/media/platform/msm/camera/Makefile
index 48fa1c0..9e0aee9 100644
--- a/drivers/media/platform/msm/camera/Makefile
+++ b/drivers/media/platform/msm/camera/Makefile
@@ -10,3 +10,4 @@
obj-$(CONFIG_SPECTRA_CAMERA) += cam_icp/
obj-$(CONFIG_SPECTRA_CAMERA) += cam_jpeg/
obj-$(CONFIG_SPECTRA_CAMERA) += cam_fd/
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_lrme/
diff --git a/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_core_common.c b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_core_common.c
index 3fbb3f0..6d699cf 100644
--- a/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_core_common.c
+++ b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_core_common.c
@@ -67,11 +67,15 @@ bool cam_cdm_set_cam_hw_version(
return false;
}
-void cam_cdm_cpas_cb(uint32_t client_handle, void *userdata,
- enum cam_camnoc_irq_type evt_type, uint32_t evt_data)
+bool cam_cdm_cpas_cb(uint32_t client_handle, void *userdata,
+ struct cam_cpas_irq_data *irq_data)
{
- CAM_ERR(CAM_CDM, "CPAS error callback type=%d with data=%x", evt_type,
- evt_data);
+ if (!irq_data)
+ return false;
+
+ CAM_DBG(CAM_CDM, "CPAS error callback type=%d", irq_data->irq_type);
+
+ return false;
}
struct cam_cdm_utils_ops *cam_cdm_get_ops(
diff --git a/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_core_common.h b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_core_common.h
index fa3ae04..497832b 100644
--- a/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_core_common.h
+++ b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_core_common.h
@@ -32,8 +32,8 @@ int cam_cdm_process_cmd(void *hw_priv, uint32_t cmd, void *cmd_args,
uint32_t arg_size);
bool cam_cdm_set_cam_hw_version(
uint32_t ver, struct cam_hw_version *cam_version);
-void cam_cdm_cpas_cb(uint32_t client_handle, void *userdata,
- enum cam_camnoc_irq_type evt_type, uint32_t evt_data);
+bool cam_cdm_cpas_cb(uint32_t client_handle, void *userdata,
+ struct cam_cpas_irq_data *irq_data);
struct cam_cdm_utils_ops *cam_cdm_get_ops(
uint32_t ver, struct cam_hw_version *cam_version, bool by_cam_version);
int cam_virtual_cdm_submit_bl(struct cam_hw_info *cdm_hw,
diff --git a/drivers/media/platform/msm/camera/cam_core/cam_context.c b/drivers/media/platform/msm/camera/cam_core/cam_context.c
index bfa1bdd..84402e4 100644
--- a/drivers/media/platform/msm/camera/cam_core/cam_context.c
+++ b/drivers/media/platform/msm/camera/cam_core/cam_context.c
@@ -14,6 +14,7 @@
#include <linux/uaccess.h>
#include "cam_context.h"
#include "cam_debug_util.h"
+#include "cam_node.h"
static int cam_context_handle_hw_event(void *context, uint32_t evt_id,
void *evt_data)
@@ -133,8 +134,8 @@ int cam_context_handle_crm_unlink(struct cam_context *ctx,
rc = ctx->state_machine[ctx->state].crm_ops.unlink(
ctx, unlink);
} else {
- CAM_ERR(CAM_CORE, "No crm unlink in dev %d, state %d",
- ctx->dev_hdl, ctx->state);
+ CAM_ERR(CAM_CORE, "No crm unlink in dev %d, name %s, state %d",
+ ctx->dev_hdl, ctx->dev_name, ctx->state);
rc = -EPROTO;
}
mutex_unlock(&ctx->ctx_mutex);
@@ -332,14 +333,15 @@ int cam_context_handle_stop_dev(struct cam_context *ctx,
ctx, cmd);
else
/* stop device can be optional for some driver */
- CAM_WARN(CAM_CORE, "No stop device in dev %d, state %d",
- ctx->dev_hdl, ctx->state);
+ CAM_WARN(CAM_CORE, "No stop device in dev %d, name %s state %d",
+ ctx->dev_hdl, ctx->dev_name, ctx->state);
mutex_unlock(&ctx->ctx_mutex);
return rc;
}
int cam_context_init(struct cam_context *ctx,
+ const char *dev_name,
struct cam_req_mgr_kmd_ops *crm_node_intf,
struct cam_hw_mgr_intf *hw_mgr_intf,
struct cam_ctx_request *req_list,
@@ -355,10 +357,14 @@ int cam_context_init(struct cam_context *ctx,
memset(ctx, 0, sizeof(*ctx));
ctx->dev_hdl = -1;
+ ctx->link_hdl = -1;
+ ctx->session_hdl = -1;
INIT_LIST_HEAD(&ctx->list);
mutex_init(&ctx->ctx_mutex);
+ mutex_init(&ctx->sync_mutex);
spin_lock_init(&ctx->lock);
+ ctx->dev_name = dev_name;
ctx->ctx_crm_intf = NULL;
ctx->crm_ctx_intf = crm_node_intf;
ctx->hw_mgr_intf = hw_mgr_intf;
@@ -373,6 +379,7 @@ int cam_context_init(struct cam_context *ctx,
for (i = 0; i < req_size; i++) {
INIT_LIST_HEAD(&ctx->req_list[i].list);
list_add_tail(&ctx->req_list[i].list, &ctx->free_req_list);
+ ctx->req_list[i].ctx = ctx;
}
ctx->state = CAM_CTX_AVAILABLE;
ctx->state_machine = NULL;
@@ -398,3 +405,20 @@ int cam_context_deinit(struct cam_context *ctx)
return 0;
}
+
+void cam_context_putref(struct cam_context *ctx)
+{
+ kref_put(&ctx->refcount, cam_node_put_ctxt_to_free_list);
+ CAM_DBG(CAM_CORE, "ctx device hdl %ld, ref count %d",
+ ctx->dev_hdl, atomic_read(&(ctx->refcount.refcount)));
+}
+
+void cam_context_getref(struct cam_context *ctx)
+{
+ if (kref_get_unless_zero(&ctx->refcount) == 0) {
+ /* should never happen */
+ WARN(1, "cam_context_getref fail\n");
+ }
+ CAM_DBG(CAM_CORE, "ctx device hdl %ld, ref count %d",
+ ctx->dev_hdl, atomic_read(&(ctx->refcount.refcount)));
+}
diff --git a/drivers/media/platform/msm/camera/cam_core/cam_context.h b/drivers/media/platform/msm/camera/cam_core/cam_context.h
index 10285cb..6d1589e 100644
--- a/drivers/media/platform/msm/camera/cam_core/cam_context.h
+++ b/drivers/media/platform/msm/camera/cam_core/cam_context.h
@@ -15,6 +15,7 @@
#include <linux/mutex.h>
#include <linux/spinlock.h>
+#include <linux/kref.h>
#include "cam_req_mgr_interface.h"
#include "cam_hw_mgr_intf.h"
@@ -54,6 +55,8 @@ enum cam_context_state {
* @num_out_map_entries: Number of out map entries
* @num_in_acked: Number of in fence acked
* @num_out_acked: Number of out fence acked
+ * @flushed: Request is flushed
+ * @ctx: The context to which this request belongs
*
*/
struct cam_ctx_request {
@@ -69,6 +72,8 @@ struct cam_ctx_request {
uint32_t num_out_map_entries;
uint32_t num_in_acked;
uint32_t num_out_acked;
+ int flushed;
+ struct cam_context *ctx;
};
/**
@@ -135,6 +140,7 @@ struct cam_ctx_ops {
/**
* struct cam_context - camera context object for the subdevice node
*
+ * @dev_name: String giving name of device associated
* @list: Link list entry
* @sessoin_hdl: Session handle
* @dev_hdl: Device handle
@@ -155,9 +161,13 @@ struct cam_ctx_ops {
* @state_machine: Top level state machine
* @ctx_priv: Private context pointer
* @ctxt_to_hw_map: Context to hardware mapping pointer
+ * @refcount: Context object refcount
+ * @node: The main node to which this context belongs
+ * @sync_mutex: mutex to sync with sync cb thread
*
*/
struct cam_context {
+ const char *dev_name;
struct list_head list;
int32_t session_hdl;
int32_t dev_hdl;
@@ -183,6 +193,10 @@ struct cam_context {
void *ctx_priv;
void *ctxt_to_hw_map;
+
+ struct kref refcount;
+ void *node;
+ struct mutex sync_mutex;
};
/**
@@ -331,6 +345,7 @@ int cam_context_deinit(struct cam_context *ctx);
* @brief: Camera context initialize function
*
* @ctx: Object pointer for cam_context
+ * @dev_name: String giving name of device associated
* @crm_node_intf: Function table for crm to context interface
* @hw_mgr_intf: Function table for context to hw interface
* @req_list: Requests storage
@@ -338,10 +353,30 @@ int cam_context_deinit(struct cam_context *ctx);
*
*/
int cam_context_init(struct cam_context *ctx,
+ const char *dev_name,
struct cam_req_mgr_kmd_ops *crm_node_intf,
struct cam_hw_mgr_intf *hw_mgr_intf,
struct cam_ctx_request *req_list,
uint32_t req_size);
+/**
+ * cam_context_putref()
+ *
+ * @brief: Put back context reference.
+ *
+ * @ctx: Context for which ref is returned
+ *
+ */
+void cam_context_putref(struct cam_context *ctx);
+
+/**
+ * cam_context_getref()
+ *
+ * @brief: Get back context reference.
+ *
+ * @ctx: Context for which ref is taken
+ *
+ */
+void cam_context_getref(struct cam_context *ctx);
#endif /* _CAM_CONTEXT_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_core/cam_context_utils.c b/drivers/media/platform/msm/camera/cam_core/cam_context_utils.c
index 714891e..0a1c2cf 100644
--- a/drivers/media/platform/msm/camera/cam_core/cam_context_utils.c
+++ b/drivers/media/platform/msm/camera/cam_core/cam_context_utils.c
@@ -18,6 +18,7 @@
#include <media/cam_defs.h>
#include "cam_context.h"
+#include "cam_context_utils.h"
#include "cam_mem_mgr.h"
#include "cam_node.h"
#include "cam_req_mgr_util.h"
@@ -25,6 +26,15 @@
#include "cam_trace.h"
#include "cam_debug_util.h"
+static inline int cam_context_validate_thread(void)
+{
+ if (in_interrupt()) {
+ WARN(1, "Invalid execution context\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
int cam_context_buf_done_from_hw(struct cam_context *ctx,
void *done_event_data, uint32_t bubble_state)
{
@@ -33,17 +43,23 @@ int cam_context_buf_done_from_hw(struct cam_context *ctx,
struct cam_ctx_request *req;
struct cam_hw_done_event_data *done =
(struct cam_hw_done_event_data *)done_event_data;
+ int rc;
if (!ctx || !done) {
CAM_ERR(CAM_CTXT, "Invalid input params %pK %pK", ctx, done);
return -EINVAL;
}
+ rc = cam_context_validate_thread();
+ if (rc)
+ return rc;
+
+ spin_lock(&ctx->lock);
if (list_empty(&ctx->active_req_list)) {
CAM_ERR(CAM_CTXT, "no active request");
+ spin_unlock(&ctx->lock);
return -EIO;
}
-
req = list_first_entry(&ctx->active_req_list,
struct cam_ctx_request, list);
@@ -52,15 +68,22 @@ int cam_context_buf_done_from_hw(struct cam_context *ctx,
if (done->request_id != req->request_id) {
CAM_ERR(CAM_CTXT, "mismatch: done req[%lld], active req[%lld]",
done->request_id, req->request_id);
+ spin_unlock(&ctx->lock);
return -EIO;
}
if (!req->num_out_map_entries) {
CAM_ERR(CAM_CTXT, "no output fence to signal");
+ spin_unlock(&ctx->lock);
return -EIO;
}
+ /*
+ * since another thread may be adding/removing from active
+ * list, so hold the lock
+ */
list_del_init(&req->list);
+ spin_unlock(&ctx->lock);
if (!bubble_state)
result = CAM_SYNC_STATE_SIGNALED_SUCCESS;
else
@@ -71,41 +94,34 @@ int cam_context_buf_done_from_hw(struct cam_context *ctx,
req->out_map_entries[j].sync_id = -1;
}
+ /*
+ * another thread may be adding/removing from free list,
+ * so hold the lock
+ */
+ spin_lock(&ctx->lock);
list_add_tail(&req->list, &ctx->free_req_list);
+ req->ctx = NULL;
+ spin_unlock(&ctx->lock);
return 0;
}
-int cam_context_apply_req_to_hw(struct cam_context *ctx,
+static int cam_context_apply_req_to_hw(struct cam_ctx_request *req,
struct cam_req_mgr_apply_request *apply)
{
int rc = 0;
- struct cam_ctx_request *req;
+ struct cam_context *ctx = req->ctx;
struct cam_hw_config_args cfg;
- if (!ctx || !apply) {
- CAM_ERR(CAM_CTXT, "Invalid input params %pK %pK", ctx, apply);
- rc = -EINVAL;
- goto end;
- }
-
if (!ctx->hw_mgr_intf) {
CAM_ERR(CAM_CTXT, "HW interface is not ready");
rc = -EFAULT;
goto end;
}
- if (list_empty(&ctx->pending_req_list)) {
- CAM_ERR(CAM_CTXT, "No available request for Apply id %lld",
- apply->request_id);
- rc = -EFAULT;
- goto end;
- }
-
spin_lock(&ctx->lock);
- req = list_first_entry(&ctx->pending_req_list,
- struct cam_ctx_request, list);
list_del_init(&req->list);
+ list_add_tail(&req->list, &ctx->active_req_list);
spin_unlock(&ctx->lock);
cfg.ctxt_to_hw_map = ctx->ctxt_to_hw_map;
@@ -114,11 +130,13 @@ int cam_context_apply_req_to_hw(struct cam_context *ctx,
cfg.out_map_entries = req->out_map_entries;
cfg.num_out_map_entries = req->num_out_map_entries;
cfg.priv = req->req_priv;
- list_add_tail(&req->list, &ctx->active_req_list);
rc = ctx->hw_mgr_intf->hw_config(ctx->hw_mgr_intf->hw_mgr_priv, &cfg);
- if (rc)
+ if (rc) {
+ spin_lock(&ctx->lock);
list_del_init(&req->list);
+ spin_unlock(&ctx->lock);
+ }
end:
return rc;
@@ -126,39 +144,52 @@ int cam_context_apply_req_to_hw(struct cam_context *ctx,
static void cam_context_sync_callback(int32_t sync_obj, int status, void *data)
{
- struct cam_context *ctx = data;
- struct cam_ctx_request *req = NULL;
+ struct cam_ctx_request *req = data;
+ struct cam_context *ctx = NULL;
struct cam_req_mgr_apply_request apply;
+ int rc;
- if (!ctx) {
+ if (!req) {
CAM_ERR(CAM_CTXT, "Invalid input param");
return;
}
-
- spin_lock(&ctx->lock);
- if (!list_empty(&ctx->pending_req_list))
- req = list_first_entry(&ctx->pending_req_list,
- struct cam_ctx_request, list);
- spin_unlock(&ctx->lock);
-
- if (!req) {
- CAM_ERR(CAM_CTXT, "No more request obj free");
+ rc = cam_context_validate_thread();
+ if (rc)
return;
- }
+ ctx = req->ctx;
req->num_in_acked++;
if (req->num_in_acked == req->num_in_map_entries) {
apply.request_id = req->request_id;
- cam_context_apply_req_to_hw(ctx, &apply);
+ /*
+ * take mutex to ensure that another thread does
+ * not flush the request while this
+ * thread is submitting it to h/w. The submit to
+ * h/w and adding to the active list should happen
+ * in a critical section which is provided by this
+ * mutex.
+ */
+ mutex_lock(&ctx->sync_mutex);
+ if (!req->flushed) {
+ cam_context_apply_req_to_hw(req, &apply);
+ mutex_unlock(&ctx->sync_mutex);
+ } else {
+ mutex_unlock(&ctx->sync_mutex);
+ req->ctx = NULL;
+ req->flushed = 0;
+ spin_lock(&ctx->lock);
+ list_del_init(&req->list);
+ list_add_tail(&req->list, &ctx->free_req_list);
+ spin_unlock(&ctx->lock);
+ }
}
+ cam_context_putref(ctx);
}
int32_t cam_context_release_dev_to_hw(struct cam_context *ctx,
struct cam_release_dev_cmd *cmd)
{
- int i;
struct cam_hw_release_args arg;
- struct cam_ctx_request *req;
if (!ctx) {
CAM_ERR(CAM_CTXT, "Invalid input param");
@@ -171,11 +202,7 @@ int32_t cam_context_release_dev_to_hw(struct cam_context *ctx,
}
arg.ctxt_to_hw_map = ctx->ctxt_to_hw_map;
- if ((list_empty(&ctx->active_req_list)) &&
- (list_empty(&ctx->pending_req_list)))
- arg.active_req = false;
- else
- arg.active_req = true;
+ arg.active_req = false;
ctx->hw_mgr_intf->hw_release(ctx->hw_mgr_intf->hw_mgr_priv, &arg);
ctx->ctxt_to_hw_map = NULL;
@@ -184,38 +211,6 @@ int32_t cam_context_release_dev_to_hw(struct cam_context *ctx,
ctx->dev_hdl = -1;
ctx->link_hdl = -1;
- while (!list_empty(&ctx->active_req_list)) {
- req = list_first_entry(&ctx->active_req_list,
- struct cam_ctx_request, list);
- list_del_init(&req->list);
- CAM_DBG(CAM_CTXT, "signal fence in active list, num %d",
- req->num_out_map_entries);
- for (i = 0; i < req->num_out_map_entries; i++) {
- if (req->out_map_entries[i].sync_id > 0)
- cam_sync_signal(req->out_map_entries[i].sync_id,
- CAM_SYNC_STATE_SIGNALED_ERROR);
- }
- list_add_tail(&req->list, &ctx->free_req_list);
- }
-
- while (!list_empty(&ctx->pending_req_list)) {
- req = list_first_entry(&ctx->pending_req_list,
- struct cam_ctx_request, list);
- list_del_init(&req->list);
- for (i = 0; i < req->num_in_map_entries; i++)
- if (req->in_map_entries[i].sync_id > 0)
- cam_sync_deregister_callback(
- cam_context_sync_callback, ctx,
- req->in_map_entries[i].sync_id);
- CAM_DBG(CAM_CTXT, "signal fence in pending list, num %d",
- req->num_out_map_entries);
- for (i = 0; i < req->num_out_map_entries; i++)
- if (req->out_map_entries[i].sync_id > 0)
- cam_sync_signal(req->out_map_entries[i].sync_id,
- CAM_SYNC_STATE_SIGNALED_ERROR);
- list_add_tail(&req->list, &ctx->free_req_list);
- }
-
return 0;
}
@@ -241,6 +236,9 @@ int32_t cam_context_prepare_dev_to_hw(struct cam_context *ctx,
rc = -EFAULT;
goto end;
}
+ rc = cam_context_validate_thread();
+ if (rc)
+ return rc;
spin_lock(&ctx->lock);
if (!list_empty(&ctx->free_req_list)) {
@@ -258,6 +256,7 @@ int32_t cam_context_prepare_dev_to_hw(struct cam_context *ctx,
memset(req, 0, sizeof(*req));
INIT_LIST_HEAD(&req->list);
+ req->ctx = ctx;
/* for config dev, only memory handle is supported */
/* map packet from the memhandle */
@@ -303,10 +302,18 @@ int32_t cam_context_prepare_dev_to_hw(struct cam_context *ctx,
list_add_tail(&req->list, &ctx->pending_req_list);
spin_unlock(&ctx->lock);
for (i = 0; i < req->num_in_map_entries; i++) {
+ cam_context_getref(ctx);
rc = cam_sync_register_callback(
cam_context_sync_callback,
- (void *)ctx,
+ (void *)req,
req->in_map_entries[i].sync_id);
+ if (rc) {
+ CAM_ERR(CAM_CTXT,
+ "Failed register fence cb: %d ret = %d",
+ req->in_map_entries[i].sync_id, rc);
+ cam_context_putref(ctx);
+ goto free_req;
+ }
CAM_DBG(CAM_CTXT, "register in fence cb: %d ret = %d",
req->in_map_entries[i].sync_id, rc);
}
@@ -318,6 +325,7 @@ int32_t cam_context_prepare_dev_to_hw(struct cam_context *ctx,
free_req:
spin_lock(&ctx->lock);
list_add_tail(&req->list, &ctx->free_req_list);
+ req->ctx = NULL;
spin_unlock(&ctx->lock);
end:
return rc;
@@ -452,6 +460,7 @@ int32_t cam_context_stop_dev_to_hw(struct cam_context *ctx)
uint32_t i;
struct cam_hw_stop_args stop;
struct cam_ctx_request *req;
+ struct list_head temp_list;
if (!ctx) {
CAM_ERR(CAM_CTXT, "Invalid input param");
@@ -465,6 +474,32 @@ int32_t cam_context_stop_dev_to_hw(struct cam_context *ctx)
goto end;
}
+ rc = cam_context_validate_thread();
+ if (rc)
+ goto end;
+
+ /*
+ * flush pending requests, take the sync lock to synchronize with the
+ * sync callback thread so that the sync cb thread does not try to
+ * submit request to h/w while the request is being flushed
+ */
+ mutex_lock(&ctx->sync_mutex);
+ INIT_LIST_HEAD(&temp_list);
+ spin_lock(&ctx->lock);
+ list_splice_init(&ctx->pending_req_list, &temp_list);
+ spin_unlock(&ctx->lock);
+ while (!list_empty(&temp_list)) {
+ req = list_first_entry(&temp_list,
+ struct cam_ctx_request, list);
+ list_del_init(&req->list);
+ req->flushed = 1;
+ for (i = 0; i < req->num_out_map_entries; i++)
+ if (req->out_map_entries[i].sync_id != -1)
+ cam_sync_signal(req->out_map_entries[i].sync_id,
+ CAM_SYNC_STATE_SIGNALED_ERROR);
+ }
+ mutex_unlock(&ctx->sync_mutex);
+
/* stop hw first */
if (ctx->ctxt_to_hw_map) {
stop.ctxt_to_hw_map = ctx->ctxt_to_hw_map;
@@ -473,22 +508,17 @@ int32_t cam_context_stop_dev_to_hw(struct cam_context *ctx)
&stop);
}
- /* flush pending and active queue */
- while (!list_empty(&ctx->pending_req_list)) {
- req = list_first_entry(&ctx->pending_req_list,
- struct cam_ctx_request, list);
- list_del_init(&req->list);
- CAM_DBG(CAM_CTXT, "signal fence in pending list. fence num %d",
- req->num_out_map_entries);
- for (i = 0; i < req->num_out_map_entries; i++)
- if (req->out_map_entries[i].sync_id != -1)
- cam_sync_signal(req->out_map_entries[i].sync_id,
- CAM_SYNC_STATE_SIGNALED_ERROR);
- list_add_tail(&req->list, &ctx->free_req_list);
- }
+ /*
+ * flush active queue, at this point h/w layer below does not have any
+ * reference to requests in active queue.
+ */
+ INIT_LIST_HEAD(&temp_list);
+ spin_lock(&ctx->lock);
+ list_splice_init(&ctx->active_req_list, &temp_list);
+ spin_unlock(&ctx->lock);
- while (!list_empty(&ctx->active_req_list)) {
- req = list_first_entry(&ctx->active_req_list,
+ while (!list_empty(&temp_list)) {
+ req = list_first_entry(&temp_list,
struct cam_ctx_request, list);
list_del_init(&req->list);
CAM_DBG(CAM_CTXT, "signal fence in active list. fence num %d",
@@ -497,7 +527,14 @@ int32_t cam_context_stop_dev_to_hw(struct cam_context *ctx)
if (req->out_map_entries[i].sync_id != -1)
cam_sync_signal(req->out_map_entries[i].sync_id,
CAM_SYNC_STATE_SIGNALED_ERROR);
+ /*
+ * The spin lock should be taken here to guard the free list,
+ * as sync cb thread could be adding a pending req to free list
+ */
+ spin_lock(&ctx->lock);
list_add_tail(&req->list, &ctx->free_req_list);
+ req->ctx = NULL;
+ spin_unlock(&ctx->lock);
}
end:
diff --git a/drivers/media/platform/msm/camera/cam_core/cam_context_utils.h b/drivers/media/platform/msm/camera/cam_core/cam_context_utils.h
index f7982eb..45d9e56 100644
--- a/drivers/media/platform/msm/camera/cam_core/cam_context_utils.h
+++ b/drivers/media/platform/msm/camera/cam_core/cam_context_utils.h
@@ -17,8 +17,6 @@
int cam_context_buf_done_from_hw(struct cam_context *ctx,
void *done_event_data, uint32_t bubble_state);
-int cam_context_apply_req_to_hw(struct cam_context *ctx,
- struct cam_req_mgr_apply_request *apply);
int32_t cam_context_release_dev_to_hw(struct cam_context *ctx,
struct cam_release_dev_cmd *cmd);
int32_t cam_context_prepare_dev_to_hw(struct cam_context *ctx,
diff --git a/drivers/media/platform/msm/camera/cam_core/cam_hw_mgr_intf.h b/drivers/media/platform/msm/camera/cam_core/cam_hw_mgr_intf.h
index aab75d5..4746152 100644
--- a/drivers/media/platform/msm/camera/cam_core/cam_hw_mgr_intf.h
+++ b/drivers/media/platform/msm/camera/cam_core/cam_hw_mgr_intf.h
@@ -203,8 +203,8 @@ struct cam_hw_config_args {
* @hw_write: Function pointer for Write hardware registers
* @hw_cmd: Function pointer for any customized commands for the
* hardware manager
- * @download_fw: Function pointer for firmware downloading
- * @hw_close: Function pointer for subdev close
+ * @hw_open: Function pointer for HW init
+ * @hw_close: Function pointer for HW deinit
*
*/
struct cam_hw_mgr_intf {
@@ -220,7 +220,7 @@ struct cam_hw_mgr_intf {
int (*hw_read)(void *hw_priv, void *read_args);
int (*hw_write)(void *hw_priv, void *write_args);
int (*hw_cmd)(void *hw_priv, void *write_args);
- int (*download_fw)(void *hw_priv, void *fw_download_args);
+ int (*hw_open)(void *hw_priv, void *fw_download_args);
int (*hw_close)(void *hw_priv, void *hw_close_args);
};
diff --git a/drivers/media/platform/msm/camera/cam_core/cam_node.c b/drivers/media/platform/msm/camera/cam_core/cam_node.c
index 11e9290..1f0213e 100644
--- a/drivers/media/platform/msm/camera/cam_core/cam_node.c
+++ b/drivers/media/platform/msm/camera/cam_core/cam_node.c
@@ -18,6 +18,34 @@
#include "cam_trace.h"
#include "cam_debug_util.h"
+static struct cam_context *cam_node_get_ctxt_from_free_list(
+ struct cam_node *node)
+{
+ struct cam_context *ctx = NULL;
+
+ mutex_lock(&node->list_mutex);
+ if (!list_empty(&node->free_ctx_list)) {
+ ctx = list_first_entry(&node->free_ctx_list,
+ struct cam_context, list);
+ list_del_init(&ctx->list);
+ }
+ mutex_unlock(&node->list_mutex);
+ if (ctx)
+ kref_init(&ctx->refcount);
+ return ctx;
+}
+
+void cam_node_put_ctxt_to_free_list(struct kref *ref)
+{
+ struct cam_context *ctx =
+ container_of(ref, struct cam_context, refcount);
+ struct cam_node *node = ctx->node;
+
+ mutex_lock(&node->list_mutex);
+ list_add_tail(&ctx->list, &node->free_ctx_list);
+ mutex_unlock(&node->list_mutex);
+}
+
static int __cam_node_handle_query_cap(struct cam_node *node,
struct cam_query_cap_cmd *query)
{
@@ -45,13 +73,7 @@ static int __cam_node_handle_acquire_dev(struct cam_node *node,
if (!acquire)
return -EINVAL;
- mutex_lock(&node->list_mutex);
- if (!list_empty(&node->free_ctx_list)) {
- ctx = list_first_entry(&node->free_ctx_list,
- struct cam_context, list);
- list_del_init(&ctx->list);
- }
- mutex_unlock(&node->list_mutex);
+ ctx = cam_node_get_ctxt_from_free_list(node);
if (!ctx) {
rc = -ENOMEM;
goto err;
@@ -66,9 +88,7 @@ static int __cam_node_handle_acquire_dev(struct cam_node *node,
return 0;
free_ctx:
- mutex_lock(&node->list_mutex);
- list_add_tail(&ctx->list, &node->free_ctx_list);
- mutex_unlock(&node->list_mutex);
+ cam_context_putref(ctx);
err:
return rc;
}
@@ -207,9 +227,7 @@ static int __cam_node_handle_release_dev(struct cam_node *node,
CAM_ERR(CAM_CORE, "destroy device handle is failed node %s",
node->name);
- mutex_lock(&node->list_mutex);
- list_add_tail(&ctx->list, &node->free_ctx_list);
- mutex_unlock(&node->list_mutex);
+ cam_context_putref(ctx);
return rc;
}
@@ -312,8 +330,7 @@ int cam_node_shutdown(struct cam_node *node)
if (node->ctx_list[i].dev_hdl >= 0) {
cam_context_shutdown(&(node->ctx_list[i]));
cam_destroy_device_hdl(node->ctx_list[i].dev_hdl);
- list_add_tail(&(node->ctx_list[i].list),
- &node->free_ctx_list);
+ cam_context_putref(&(node->ctx_list[i]));
}
}
@@ -358,6 +375,7 @@ int cam_node_init(struct cam_node *node, struct cam_hw_mgr_intf *hw_mgr_intf,
}
INIT_LIST_HEAD(&ctx_list[i].list);
list_add_tail(&ctx_list[i].list, &node->free_ctx_list);
+ ctx_list[i].node = node;
}
node->state = CAM_NODE_STATE_INIT;
diff --git a/drivers/media/platform/msm/camera/cam_core/cam_node.h b/drivers/media/platform/msm/camera/cam_core/cam_node.h
index 02e153d..4303ee3 100644
--- a/drivers/media/platform/msm/camera/cam_core/cam_node.h
+++ b/drivers/media/platform/msm/camera/cam_core/cam_node.h
@@ -13,6 +13,7 @@
#ifndef _CAM_NODE_H_
#define _CAM_NODE_H_
+#include <linux/kref.h>
#include "cam_context.h"
#include "cam_hw_mgr_intf.h"
#include "cam_req_mgr_interface.h"
@@ -97,4 +98,14 @@ int cam_node_shutdown(struct cam_node *node);
int cam_node_init(struct cam_node *node, struct cam_hw_mgr_intf *hw_mgr_intf,
struct cam_context *ctx_list, uint32_t ctx_size, char *name);
+/**
+ * cam_node_put_ctxt_to_free_list()
+ *
+ * @brief: Put context in node free list.
+ *
+ * @ref: Context's kref object
+ *
+ */
+void cam_node_put_ctxt_to_free_list(struct kref *ref);
+
#endif /* _CAM_NODE_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.c b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.c
index 8518862..fc84d9d 100644
--- a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.c
+++ b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.c
@@ -21,6 +21,8 @@
#include "cam_cpas_hw_intf.h"
#include "cam_cpas_soc.h"
+#define CAM_CPAS_AXI_MIN_BW (2048 * 1024)
+
int cam_cpas_util_reg_update(struct cam_hw_info *cpas_hw,
enum cam_cpas_reg_base reg_base, struct cam_cpas_reg *reg_info)
{
@@ -116,6 +118,12 @@ static int cam_cpas_util_vote_bus_client_bw(
bus_client->curr_vote_level = idx;
mutex_unlock(&bus_client->lock);
+ if ((ab > 0) && (ab < CAM_CPAS_AXI_MIN_BW))
+ ab = CAM_CPAS_AXI_MIN_BW;
+
+ if ((ib > 0) && (ib < CAM_CPAS_AXI_MIN_BW))
+ ib = CAM_CPAS_AXI_MIN_BW;
+
pdata = bus_client->pdata;
path = &(pdata->usecase[idx]);
path->vectors[0].ab = ab;
@@ -362,7 +370,7 @@ static int cam_cpas_util_vote_default_ahb_axi(struct cam_hw_info *cpas_hw,
list_for_each_entry_safe(curr_port, temp_port,
&cpas_core->axi_ports_list_head, sibling_port) {
rc = cam_cpas_util_vote_bus_client_bw(&curr_port->mnoc_bus,
- mnoc_bw, 0);
+ mnoc_bw, mnoc_bw);
if (rc) {
CAM_ERR(CAM_CPAS,
"Failed in mnoc vote, enable=%d, rc=%d",
@@ -372,7 +380,7 @@ static int cam_cpas_util_vote_default_ahb_axi(struct cam_hw_info *cpas_hw,
if (soc_private->axi_camnoc_based) {
cam_cpas_util_vote_bus_client_bw(
- &curr_port->camnoc_bus, camnoc_bw, 0);
+ &curr_port->camnoc_bus, 0, camnoc_bw);
if (rc) {
CAM_ERR(CAM_CPAS,
"Failed in mnoc vote, enable=%d, %d",
@@ -563,7 +571,7 @@ static int cam_cpas_util_apply_client_axi_vote(
camnoc_bw, mnoc_bw);
rc = cam_cpas_util_vote_bus_client_bw(&axi_port->mnoc_bus,
- mnoc_bw, 0);
+ mnoc_bw, mnoc_bw);
if (rc) {
CAM_ERR(CAM_CPAS,
"Failed in mnoc vote ab[%llu] ib[%llu] rc=%d",
@@ -573,11 +581,11 @@ static int cam_cpas_util_apply_client_axi_vote(
if (soc_private->axi_camnoc_based) {
rc = cam_cpas_util_vote_bus_client_bw(&axi_port->camnoc_bus,
- camnoc_bw, 0);
+ 0, camnoc_bw);
if (rc) {
CAM_ERR(CAM_CPAS,
"Failed camnoc vote ab[%llu] ib[%llu] rc=%d",
- camnoc_bw, camnoc_bw, rc);
+ 0, camnoc_bw, rc);
goto unlock_axi_port;
}
}
@@ -1250,7 +1258,7 @@ static int cam_cpas_util_client_setup(struct cam_hw_info *cpas_hw)
struct cam_cpas *cpas_core = (struct cam_cpas *) cpas_hw->core_info;
int i;
- for (i = 0; i < CPAS_MAX_CLIENTS; i++) {
+ for (i = 0; i < CAM_CPAS_MAX_CLIENTS; i++) {
mutex_init(&cpas_core->client_mutex[i]);
cpas_core->cpas_client[i] = NULL;
}
@@ -1263,7 +1271,7 @@ static int cam_cpas_util_client_cleanup(struct cam_hw_info *cpas_hw)
struct cam_cpas *cpas_core = (struct cam_cpas *) cpas_hw->core_info;
int i;
- for (i = 0; i < CPAS_MAX_CLIENTS; i++) {
+ for (i = 0; i < CAM_CPAS_MAX_CLIENTS; i++) {
if (cpas_core->cpas_client[i]) {
cam_cpas_hw_unregister_client(cpas_hw, i);
cpas_core->cpas_client[i] = NULL;
diff --git a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.h b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.h
index bbc99b7..aa3663d 100644
--- a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.h
+++ b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.h
@@ -17,13 +17,14 @@
#include "cam_cpas_hw_intf.h"
#include "cam_common_util.h"
-#define CPAS_MAX_CLIENTS 20
+#define CAM_CPAS_MAX_CLIENTS 30
#define CAM_CPAS_INFLIGHT_WORKS 5
#define CAM_CPAS_GET_CLIENT_IDX(handle) (handle)
#define CAM_CPAS_GET_CLIENT_HANDLE(indx) (indx)
-#define CAM_CPAS_CLIENT_VALID(indx) ((indx >= 0) && (indx < CPAS_MAX_CLIENTS))
+#define CAM_CPAS_CLIENT_VALID(indx) \
+ ((indx >= 0) && (indx < CAM_CPAS_MAX_CLIENTS))
#define CAM_CPAS_CLIENT_REGISTERED(cpas_core, indx) \
((CAM_CPAS_CLIENT_VALID(indx)) && \
(cpas_core->cpas_client[indx]))
@@ -176,8 +177,8 @@ struct cam_cpas_axi_port {
*/
struct cam_cpas {
struct cam_cpas_hw_caps hw_caps;
- struct cam_cpas_client *cpas_client[CPAS_MAX_CLIENTS];
- struct mutex client_mutex[CPAS_MAX_CLIENTS];
+ struct cam_cpas_client *cpas_client[CAM_CPAS_MAX_CLIENTS];
+ struct mutex client_mutex[CAM_CPAS_MAX_CLIENTS];
uint32_t num_clients;
uint32_t registered_clients;
uint32_t streamon_clients;
diff --git a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_intf.c b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_intf.c
index 0ba3bb2..d5108f6 100644
--- a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_intf.c
+++ b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_intf.c
@@ -79,22 +79,24 @@ int cam_cpas_get_cpas_hw_version(uint32_t *hw_version)
int cam_cpas_get_hw_info(uint32_t *camera_family,
struct cam_hw_version *camera_version,
- struct cam_hw_version *cpas_version)
+ struct cam_hw_version *cpas_version,
+ uint32_t *cam_caps)
{
if (!CAM_CPAS_INTF_INITIALIZED()) {
CAM_ERR(CAM_CPAS, "cpas intf not initialized");
return -ENODEV;
}
- if (!camera_family || !camera_version || !cpas_version) {
- CAM_ERR(CAM_CPAS, "invalid input %pK %pK %pK", camera_family,
- camera_version, cpas_version);
+ if (!camera_family || !camera_version || !cpas_version || !cam_caps) {
+ CAM_ERR(CAM_CPAS, "invalid input %pK %pK %pK %pK",
+ camera_family, camera_version, cpas_version, cam_caps);
return -EINVAL;
}
*camera_family = g_cpas_intf->hw_caps.camera_family;
*camera_version = g_cpas_intf->hw_caps.camera_version;
*cpas_version = g_cpas_intf->hw_caps.cpas_version;
+ *cam_caps = g_cpas_intf->hw_caps.camera_capability;
return 0;
}
@@ -364,6 +366,7 @@ int cam_cpas_subdev_cmd(struct cam_cpas_intf *cpas_intf,
switch (cmd->op_code) {
case CAM_QUERY_CAP: {
struct cam_cpas_query_cap query;
+ uint32_t cam_cpas;
rc = copy_from_user(&query, (void __user *) cmd->handle,
sizeof(query));
@@ -374,7 +377,7 @@ int cam_cpas_subdev_cmd(struct cam_cpas_intf *cpas_intf,
}
rc = cam_cpas_get_hw_info(&query.camera_family,
- &query.camera_version, &query.cpas_version);
+ &query.camera_version, &query.cpas_version, &cam_cpas);
if (rc)
break;
diff --git a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_soc.h b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_soc.h
index b2ad513..d4fc039 100644
--- a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_soc.h
+++ b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_soc.h
@@ -14,8 +14,8 @@
#define _CAM_CPAS_SOC_H_
#include "cam_soc_util.h"
+#include "cam_cpas_hw.h"
-#define CAM_CPAS_MAX_CLIENTS 20
#define CAM_REGULATOR_LEVEL_MAX 16
/**
diff --git a/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cam_cpastop_hw.c b/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cam_cpastop_hw.c
index 4b0cc74..0e5ce85 100644
--- a/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cam_cpastop_hw.c
+++ b/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cam_cpastop_hw.c
@@ -24,6 +24,18 @@
struct cam_camnoc_info *camnoc_info;
+#define CAMNOC_SLAVE_MAX_ERR_CODE 7
+static const char * const camnoc_salve_err_code[] = {
+ "Target Error", /* err code 0 */
+ "Address decode error", /* err code 1 */
+ "Unsupported request", /* err code 2 */
+ "Disconnected target", /* err code 3 */
+ "Security violation", /* err code 4 */
+ "Hidden security violation", /* err code 5 */
+ "Timeout Error", /* err code 6 */
+ "Unknown Error", /* unknown err code */
+};
+
static int cam_cpastop_get_hw_info(struct cam_hw_info *cpas_hw,
struct cam_cpas_hw_caps *hw_caps)
{
@@ -106,91 +118,155 @@ static int cam_cpastop_setup_regbase_indices(struct cam_hw_soc_info *soc_info,
}
static int cam_cpastop_handle_errlogger(struct cam_cpas *cpas_core,
- struct cam_hw_soc_info *soc_info)
+ struct cam_hw_soc_info *soc_info,
+ struct cam_camnoc_irq_slave_err_data *slave_err)
{
- uint32_t reg_value[4];
- int i;
- int size = camnoc_info->error_logger_size;
int camnoc_index = cpas_core->regbase_index[CAM_CPAS_REG_CAMNOC];
+ int err_code_index = 0;
- for (i = 0; (i + 3) < size; i = i + 4) {
- reg_value[0] = cam_io_r_mb(
- soc_info->reg_map[camnoc_index].mem_base +
- camnoc_info->error_logger[i]);
- reg_value[1] = cam_io_r_mb(
- soc_info->reg_map[camnoc_index].mem_base +
- camnoc_info->error_logger[i + 1]);
- reg_value[2] = cam_io_r_mb(
- soc_info->reg_map[camnoc_index].mem_base +
- camnoc_info->error_logger[i + 2]);
- reg_value[3] = cam_io_r_mb(
- soc_info->reg_map[camnoc_index].mem_base +
- camnoc_info->error_logger[i + 3]);
- CAM_ERR(CAM_CPAS,
- "offset[0x%x] values [0x%x] [0x%x] [0x%x] [0x%x]",
- camnoc_info->error_logger[i], reg_value[0],
- reg_value[1], reg_value[2], reg_value[3]);
+ if (!camnoc_info->err_logger) {
+ CAM_ERR_RATE_LIMIT(CAM_CPAS, "Invalid err logger info");
+ return -EINVAL;
}
- if ((i + 2) < size) {
- reg_value[0] = cam_io_r_mb(
- soc_info->reg_map[camnoc_index].mem_base +
- camnoc_info->error_logger[i]);
- reg_value[1] = cam_io_r_mb(
- soc_info->reg_map[camnoc_index].mem_base +
- camnoc_info->error_logger[i + 1]);
- reg_value[2] = cam_io_r_mb(
- soc_info->reg_map[camnoc_index].mem_base +
- camnoc_info->error_logger[i + 2]);
- CAM_ERR(CAM_CPAS, "offset[0x%x] values [0x%x] [0x%x] [0x%x]",
- camnoc_info->error_logger[i], reg_value[0],
- reg_value[1], reg_value[2]);
- i = i + 3;
- }
+ slave_err->mainctrl.value = cam_io_r_mb(
+ soc_info->reg_map[camnoc_index].mem_base +
+ camnoc_info->err_logger->mainctrl);
- if ((i + 1) < size) {
- reg_value[0] = cam_io_r_mb(
- soc_info->reg_map[camnoc_index].mem_base +
- camnoc_info->error_logger[i]);
- reg_value[1] = cam_io_r_mb(
- soc_info->reg_map[camnoc_index].mem_base +
- camnoc_info->error_logger[i + 1]);
- CAM_ERR(CAM_CPAS, "offset[0x%x] values [0x%x] [0x%x]",
- camnoc_info->error_logger[i], reg_value[0],
- reg_value[1]);
- i = i + 2;
- }
+ slave_err->errvld.value = cam_io_r_mb(
+ soc_info->reg_map[camnoc_index].mem_base +
+ camnoc_info->err_logger->errvld);
- if (i < size) {
- reg_value[0] = cam_io_r_mb(
- soc_info->reg_map[camnoc_index].mem_base +
- camnoc_info->error_logger[i]);
- CAM_ERR(CAM_CPAS, "offset[0x%x] values [0x%x]",
- camnoc_info->error_logger[i], reg_value[0]);
- }
+ slave_err->errlog0_low.value = cam_io_r_mb(
+ soc_info->reg_map[camnoc_index].mem_base +
+ camnoc_info->err_logger->errlog0_low);
+
+ slave_err->errlog0_high.value = cam_io_r_mb(
+ soc_info->reg_map[camnoc_index].mem_base +
+ camnoc_info->err_logger->errlog0_high);
+
+ slave_err->errlog1_low.value = cam_io_r_mb(
+ soc_info->reg_map[camnoc_index].mem_base +
+ camnoc_info->err_logger->errlog1_low);
+
+ slave_err->errlog1_high.value = cam_io_r_mb(
+ soc_info->reg_map[camnoc_index].mem_base +
+ camnoc_info->err_logger->errlog1_high);
+
+ slave_err->errlog2_low.value = cam_io_r_mb(
+ soc_info->reg_map[camnoc_index].mem_base +
+ camnoc_info->err_logger->errlog2_low);
+
+ slave_err->errlog2_high.value = cam_io_r_mb(
+ soc_info->reg_map[camnoc_index].mem_base +
+ camnoc_info->err_logger->errlog2_high);
+
+ slave_err->errlog3_low.value = cam_io_r_mb(
+ soc_info->reg_map[camnoc_index].mem_base +
+ camnoc_info->err_logger->errlog3_low);
+
+ slave_err->errlog3_high.value = cam_io_r_mb(
+ soc_info->reg_map[camnoc_index].mem_base +
+ camnoc_info->err_logger->errlog3_high);
+
+ CAM_ERR_RATE_LIMIT(CAM_CPAS,
+ "Possible memory configuration issue, fault at SMMU raised as CAMNOC SLAVE_IRQ");
+
+ CAM_ERR_RATE_LIMIT(CAM_CPAS,
+ "mainctrl[0x%x 0x%x] errvld[0x%x 0x%x] stall_en=%d, fault_en=%d, err_vld=%d",
+ camnoc_info->err_logger->mainctrl,
+ slave_err->mainctrl.value,
+ camnoc_info->err_logger->errvld,
+ slave_err->errvld.value,
+ slave_err->mainctrl.stall_en,
+ slave_err->mainctrl.fault_en,
+ slave_err->errvld.err_vld);
+
+ err_code_index = slave_err->errlog0_low.err_code;
+ if (err_code_index > CAMNOC_SLAVE_MAX_ERR_CODE)
+ err_code_index = CAMNOC_SLAVE_MAX_ERR_CODE;
+
+ CAM_ERR_RATE_LIMIT(CAM_CPAS,
+ "errlog0 low[0x%x 0x%x] high[0x%x 0x%x] loginfo_vld=%d, word_error=%d, non_secure=%d, device=%d, opc=%d, err_code=%d(%s) sizef=%d, addr_space=%d, len1=%d",
+ camnoc_info->err_logger->errlog0_low,
+ slave_err->errlog0_low.value,
+ camnoc_info->err_logger->errlog0_high,
+ slave_err->errlog0_high.value,
+ slave_err->errlog0_low.loginfo_vld,
+ slave_err->errlog0_low.word_error,
+ slave_err->errlog0_low.non_secure,
+ slave_err->errlog0_low.device,
+ slave_err->errlog0_low.opc,
+ slave_err->errlog0_low.err_code,
+ camnoc_salve_err_code[err_code_index],
+ slave_err->errlog0_low.sizef,
+ slave_err->errlog0_low.addr_space,
+ slave_err->errlog0_high.len1);
+
+ CAM_ERR_RATE_LIMIT(CAM_CPAS,
+ "errlog1_low[0x%x 0x%x] errlog1_high[0x%x 0x%x] errlog2_low[0x%x 0x%x] errlog2_high[0x%x 0x%x] errlog3_low[0x%x 0x%x] errlog3_high[0x%x 0x%x]",
+ camnoc_info->err_logger->errlog1_low,
+ slave_err->errlog1_low.value,
+ camnoc_info->err_logger->errlog1_high,
+ slave_err->errlog1_high.value,
+ camnoc_info->err_logger->errlog2_low,
+ slave_err->errlog2_low.value,
+ camnoc_info->err_logger->errlog2_high,
+ slave_err->errlog2_high.value,
+ camnoc_info->err_logger->errlog3_low,
+ slave_err->errlog3_low.value,
+ camnoc_info->err_logger->errlog3_high,
+ slave_err->errlog3_high.value);
return 0;
}
-static int cam_cpastop_handle_ubwc_err(struct cam_cpas *cpas_core,
- struct cam_hw_soc_info *soc_info, int i)
+static int cam_cpastop_handle_ubwc_enc_err(struct cam_cpas *cpas_core,
+ struct cam_hw_soc_info *soc_info, int i,
+ struct cam_camnoc_irq_ubwc_enc_data *enc_err)
{
- uint32_t reg_value;
int camnoc_index = cpas_core->regbase_index[CAM_CPAS_REG_CAMNOC];
- reg_value = cam_io_r_mb(soc_info->reg_map[camnoc_index].mem_base +
+ enc_err->encerr_status.value =
+ cam_io_r_mb(soc_info->reg_map[camnoc_index].mem_base +
camnoc_info->irq_err[i].err_status.offset);
- CAM_ERR(CAM_CPAS,
- "Dumping ubwc error status [%d]: offset[0x%x] value[0x%x]",
- i, camnoc_info->irq_err[i].err_status.offset, reg_value);
+ /* Let clients handle the UBWC errors */
+ CAM_DBG(CAM_CPAS,
+ "ubwc enc err [%d]: offset[0x%x] value[0x%x]",
+ i, camnoc_info->irq_err[i].err_status.offset,
+ enc_err->encerr_status.value);
- return reg_value;
+ return 0;
}
-static int cam_cpastop_handle_ahb_timeout_err(struct cam_hw_info *cpas_hw)
+static int cam_cpastop_handle_ubwc_dec_err(struct cam_cpas *cpas_core,
+ struct cam_hw_soc_info *soc_info, int i,
+ struct cam_camnoc_irq_ubwc_dec_data *dec_err)
{
- CAM_ERR(CAM_CPAS, "ahb timout error");
+ int camnoc_index = cpas_core->regbase_index[CAM_CPAS_REG_CAMNOC];
+
+ dec_err->decerr_status.value =
+ cam_io_r_mb(soc_info->reg_map[camnoc_index].mem_base +
+ camnoc_info->irq_err[i].err_status.offset);
+
+ /* Let clients handle the UBWC errors */
+ CAM_DBG(CAM_CPAS,
+ "ubwc dec err status [%d]: offset[0x%x] value[0x%x] thr_err=%d, fcl_err=%d, len_md_err=%d, format_err=%d",
+ i, camnoc_info->irq_err[i].err_status.offset,
+ dec_err->decerr_status.value,
+ dec_err->decerr_status.thr_err,
+ dec_err->decerr_status.fcl_err,
+ dec_err->decerr_status.len_md_err,
+ dec_err->decerr_status.format_err);
+
+ return 0;
+}
+
+static int cam_cpastop_handle_ahb_timeout_err(struct cam_hw_info *cpas_hw,
+ struct cam_camnoc_irq_ahb_timeout_data *ahb_err)
+{
+ CAM_ERR_RATE_LIMIT(CAM_CPAS, "ahb timout error");
return 0;
}
@@ -228,10 +304,11 @@ static int cam_cpastop_reset_irq(struct cam_hw_info *cpas_hw)
}
static void cam_cpastop_notify_clients(struct cam_cpas *cpas_core,
- enum cam_camnoc_hw_irq_type irq_type, uint32_t irq_data)
+ struct cam_cpas_irq_data *irq_data)
{
int i;
struct cam_cpas_client *cpas_client;
+ bool error_handled = false;
CAM_DBG(CAM_CPAS,
"Notify CB : num_clients=%d, registered=%d, started=%d",
@@ -243,13 +320,15 @@ static void cam_cpastop_notify_clients(struct cam_cpas *cpas_core,
cpas_client = cpas_core->cpas_client[i];
if (cpas_client->data.cam_cpas_client_cb) {
CAM_DBG(CAM_CPAS,
- "Calling client CB %d : %d 0x%x",
- i, irq_type, irq_data);
- cpas_client->data.cam_cpas_client_cb(
+ "Calling client CB %d : %d",
+ i, irq_data->irq_type);
+ error_handled =
+ cpas_client->data.cam_cpas_client_cb(
cpas_client->data.client_handle,
cpas_client->data.userdata,
- (enum cam_camnoc_irq_type)irq_type,
irq_data);
+ if (error_handled)
+ break;
}
}
}
@@ -263,7 +342,7 @@ static void cam_cpastop_work(struct work_struct *work)
struct cam_hw_soc_info *soc_info;
int i;
enum cam_camnoc_hw_irq_type irq_type;
- uint32_t irq_data;
+ struct cam_cpas_irq_data irq_data;
payload = container_of(work, struct cam_cpas_work_payload, work);
if (!payload) {
@@ -280,23 +359,30 @@ static void cam_cpastop_work(struct work_struct *work)
(camnoc_info->irq_err[i].enable)) {
irq_type = camnoc_info->irq_err[i].irq_type;
CAM_ERR(CAM_CPAS, "Error occurred, type=%d", irq_type);
- irq_data = 0;
+ memset(&irq_data, 0x0, sizeof(irq_data));
+ irq_data.irq_type = (enum cam_camnoc_irq_type)irq_type;
switch (irq_type) {
case CAM_CAMNOC_HW_IRQ_SLAVE_ERROR:
- irq_data = cam_cpastop_handle_errlogger(
- cpas_core, soc_info);
+ cam_cpastop_handle_errlogger(
+ cpas_core, soc_info,
+ &irq_data.u.slave_err);
break;
case CAM_CAMNOC_HW_IRQ_IFE02_UBWC_ENCODE_ERROR:
case CAM_CAMNOC_HW_IRQ_IFE13_UBWC_ENCODE_ERROR:
- case CAM_CAMNOC_HW_IRQ_IPE_BPS_UBWC_DECODE_ERROR:
case CAM_CAMNOC_HW_IRQ_IPE_BPS_UBWC_ENCODE_ERROR:
- irq_data = cam_cpastop_handle_ubwc_err(
- cpas_core, soc_info, i);
+ cam_cpastop_handle_ubwc_enc_err(
+ cpas_core, soc_info, i,
+ &irq_data.u.enc_err);
+ break;
+ case CAM_CAMNOC_HW_IRQ_IPE_BPS_UBWC_DECODE_ERROR:
+ cam_cpastop_handle_ubwc_dec_err(
+ cpas_core, soc_info, i,
+ &irq_data.u.dec_err);
break;
case CAM_CAMNOC_HW_IRQ_AHB_TIMEOUT:
- irq_data = cam_cpastop_handle_ahb_timeout_err(
- cpas_hw);
+ cam_cpastop_handle_ahb_timeout_err(
+ cpas_hw, &irq_data.u.ahb_err);
break;
case CAM_CAMNOC_HW_IRQ_CAMNOC_TEST:
CAM_DBG(CAM_CPAS, "TEST IRQ");
@@ -306,8 +392,7 @@ static void cam_cpastop_work(struct work_struct *work)
break;
}
- cam_cpastop_notify_clients(cpas_core, irq_type,
- irq_data);
+ cam_cpastop_notify_clients(cpas_core, &irq_data);
payload->irq_status &=
~camnoc_info->irq_err[i].sbm_port;
diff --git a/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cam_cpastop_hw.h b/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cam_cpastop_hw.h
index e3639a6..73f7e9b 100644
--- a/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cam_cpastop_hw.h
+++ b/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cam_cpastop_hw.h
@@ -173,6 +173,34 @@ struct cam_cpas_hw_errata_wa_list {
};
/**
+ * struct cam_camnoc_err_logger_info : CAMNOC error logger register offsets
+ *
+ * @mainctrl: Register offset for mainctrl
+ * @errvld: Register offset for errvld
+ * @errlog0_low: Register offset for errlog0_low
+ * @errlog0_high: Register offset for errlog0_high
+ * @errlog1_low: Register offset for errlog1_low
+ * @errlog1_high: Register offset for errlog1_high
+ * @errlog2_low: Register offset for errlog2_low
+ * @errlog2_high: Register offset for errlog2_high
+ * @errlog3_low: Register offset for errlog3_low
+ * @errlog3_high: Register offset for errlog3_high
+ *
+ */
+struct cam_camnoc_err_logger_info {
+ uint32_t mainctrl;
+ uint32_t errvld;
+ uint32_t errlog0_low;
+ uint32_t errlog0_high;
+ uint32_t errlog1_low;
+ uint32_t errlog1_high;
+ uint32_t errlog2_low;
+ uint32_t errlog2_high;
+ uint32_t errlog3_low;
+ uint32_t errlog3_high;
+};
+
+/**
* struct cam_camnoc_info : Overall CAMNOC settings info
*
* @specific: Pointer to CAMNOC SPECIFICTONTTPTR settings
@@ -180,8 +208,7 @@ struct cam_cpas_hw_errata_wa_list {
* @irq_sbm: Pointer to CAMNOC IRQ SBM settings
* @irq_err: Pointer to CAMNOC IRQ Error settings
* @irq_err_size: Array size of IRQ Error settings
- * @error_logger: Pointer to CAMNOC IRQ Error logger read registers
- * @error_logger_size: Array size of IRQ Error logger
+ * @err_logger: Pointer to CAMNOC IRQ Error logger read registers
* @errata_wa_list: HW Errata workaround info
*
*/
@@ -191,8 +218,7 @@ struct cam_camnoc_info {
struct cam_camnoc_irq_sbm *irq_sbm;
struct cam_camnoc_irq_err *irq_err;
int irq_err_size;
- uint32_t *error_logger;
- int error_logger_size;
+ struct cam_camnoc_err_logger_info *err_logger;
struct cam_cpas_hw_errata_wa_list *errata_wa_list;
};
diff --git a/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cpastop100.h b/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cpastop100.h
index b30cd05..2654b47 100644
--- a/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cpastop100.h
+++ b/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cpastop100.h
@@ -498,19 +498,17 @@ static struct cam_camnoc_specific
}
};
-uint32_t slave_error_logger[] = {
- 0x2700, /* ERRLOGGER_SWID_LOW */
- 0x2704, /* ERRLOGGER_SWID_HIGH */
- 0x2708, /* ERRLOGGER_MAINCTL_LOW */
- 0x2710, /* ERRLOGGER_ERRVLD_LOW */
- 0x2720, /* ERRLOGGER_ERRLOG0_LOW */
- 0x2724, /* ERRLOGGER_ERRLOG0_HIGH */
- 0x2728, /* ERRLOGGER_ERRLOG1_LOW */
- 0x272c, /* ERRLOGGER_ERRLOG1_HIGH */
- 0x2730, /* ERRLOGGER_ERRLOG2_LOW */
- 0x2734, /* ERRLOGGER_ERRLOG2_HIGH */
- 0x2738, /* ERRLOGGER_ERRLOG3_LOW */
- 0x273c, /* ERRLOGGER_ERRLOG3_HIGH */
+static struct cam_camnoc_err_logger_info cam170_cpas100_err_logger_offsets = {
+ .mainctrl = 0x2708, /* ERRLOGGER_MAINCTL_LOW */
+ .errvld = 0x2710, /* ERRLOGGER_ERRVLD_LOW */
+ .errlog0_low = 0x2720, /* ERRLOGGER_ERRLOG0_LOW */
+ .errlog0_high = 0x2724, /* ERRLOGGER_ERRLOG0_HIGH */
+ .errlog1_low = 0x2728, /* ERRLOGGER_ERRLOG1_LOW */
+ .errlog1_high = 0x272c, /* ERRLOGGER_ERRLOG1_HIGH */
+ .errlog2_low = 0x2730, /* ERRLOGGER_ERRLOG2_LOW */
+ .errlog2_high = 0x2734, /* ERRLOGGER_ERRLOG2_HIGH */
+ .errlog3_low = 0x2738, /* ERRLOGGER_ERRLOG3_LOW */
+ .errlog3_high = 0x273c, /* ERRLOGGER_ERRLOG3_HIGH */
};
static struct cam_cpas_hw_errata_wa_list cam170_cpas100_errata_wa_list = {
@@ -533,9 +531,7 @@ struct cam_camnoc_info cam170_cpas100_camnoc_info = {
.irq_err = &cam_cpas100_irq_err[0],
.irq_err_size = sizeof(cam_cpas100_irq_err) /
sizeof(cam_cpas100_irq_err[0]),
- .error_logger = &slave_error_logger[0],
- .error_logger_size = sizeof(slave_error_logger) /
- sizeof(slave_error_logger[0]),
+ .err_logger = &cam170_cpas100_err_logger_offsets,
.errata_wa_list = &cam170_cpas100_errata_wa_list,
};
diff --git a/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cpastop_v170_110.h b/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cpastop_v170_110.h
index 55cb07b..4418fb1 100644
--- a/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cpastop_v170_110.h
+++ b/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cpastop_v170_110.h
@@ -258,14 +258,14 @@ static struct cam_camnoc_specific
.access_type = CAM_REG_TYPE_READ_WRITE,
.masked_value = 0,
.offset = 0x430, /* SPECIFIC_IFE02_PRIORITYLUT_LOW */
- .value = 0x66665433,
+ .value = 0x44443333,
},
.priority_lut_high = {
.enable = true,
.access_type = CAM_REG_TYPE_READ_WRITE,
.masked_value = 0,
.offset = 0x434, /* SPECIFIC_IFE02_PRIORITYLUT_HIGH */
- .value = 0x66666666,
+ .value = 0x66665555,
},
.urgency = {
.enable = true,
@@ -306,14 +306,14 @@ static struct cam_camnoc_specific
.access_type = CAM_REG_TYPE_READ_WRITE,
.masked_value = 0,
.offset = 0x830, /* SPECIFIC_IFE13_PRIORITYLUT_LOW */
- .value = 0x66665433,
+ .value = 0x44443333,
},
.priority_lut_high = {
.enable = true,
.access_type = CAM_REG_TYPE_READ_WRITE,
.masked_value = 0,
.offset = 0x834, /* SPECIFIC_IFE13_PRIORITYLUT_HIGH */
- .value = 0x66666666,
+ .value = 0x66665555,
},
.urgency = {
.enable = true,
@@ -505,19 +505,17 @@ static struct cam_camnoc_specific
},
};
-static uint32_t cam_cpas110_slave_error_logger[] = {
- 0x2700, /* ERRLOGGER_SWID_LOW */
- 0x2704, /* ERRLOGGER_SWID_HIGH */
- 0x2708, /* ERRLOGGER_MAINCTL_LOW */
- 0x2710, /* ERRLOGGER_ERRVLD_LOW */
- 0x2720, /* ERRLOGGER_ERRLOG0_LOW */
- 0x2724, /* ERRLOGGER_ERRLOG0_HIGH */
- 0x2728, /* ERRLOGGER_ERRLOG1_LOW */
- 0x272c, /* ERRLOGGER_ERRLOG1_HIGH */
- 0x2730, /* ERRLOGGER_ERRLOG2_LOW */
- 0x2734, /* ERRLOGGER_ERRLOG2_HIGH */
- 0x2738, /* ERRLOGGER_ERRLOG3_LOW */
- 0x273c, /* ERRLOGGER_ERRLOG3_HIGH */
+static struct cam_camnoc_err_logger_info cam170_cpas110_err_logger_offsets = {
+ .mainctrl = 0x2708, /* ERRLOGGER_MAINCTL_LOW */
+ .errvld = 0x2710, /* ERRLOGGER_ERRVLD_LOW */
+ .errlog0_low = 0x2720, /* ERRLOGGER_ERRLOG0_LOW */
+ .errlog0_high = 0x2724, /* ERRLOGGER_ERRLOG0_HIGH */
+ .errlog1_low = 0x2728, /* ERRLOGGER_ERRLOG1_LOW */
+ .errlog1_high = 0x272c, /* ERRLOGGER_ERRLOG1_HIGH */
+ .errlog2_low = 0x2730, /* ERRLOGGER_ERRLOG2_LOW */
+ .errlog2_high = 0x2734, /* ERRLOGGER_ERRLOG2_HIGH */
+ .errlog3_low = 0x2738, /* ERRLOGGER_ERRLOG3_LOW */
+ .errlog3_high = 0x273c, /* ERRLOGGER_ERRLOG3_HIGH */
};
static struct cam_cpas_hw_errata_wa_list cam170_cpas110_errata_wa_list = {
@@ -540,9 +538,7 @@ static struct cam_camnoc_info cam170_cpas110_camnoc_info = {
.irq_err = &cam_cpas110_irq_err[0],
.irq_err_size = sizeof(cam_cpas110_irq_err) /
sizeof(cam_cpas110_irq_err[0]),
- .error_logger = &cam_cpas110_slave_error_logger[0],
- .error_logger_size = sizeof(cam_cpas110_slave_error_logger) /
- sizeof(cam_cpas110_slave_error_logger[0]),
+ .err_logger = &cam170_cpas110_err_logger_offsets,
.errata_wa_list = &cam170_cpas110_errata_wa_list,
};
diff --git a/drivers/media/platform/msm/camera/cam_cpas/include/cam_cpas_api.h b/drivers/media/platform/msm/camera/cam_cpas/include/cam_cpas_api.h
index aa8b266..c844ef7 100644
--- a/drivers/media/platform/msm/camera/cam_cpas/include/cam_cpas_api.h
+++ b/drivers/media/platform/msm/camera/cam_cpas/include/cam_cpas_api.h
@@ -82,6 +82,183 @@ enum cam_camnoc_irq_type {
};
/**
+ * struct cam_camnoc_irq_slave_err_data : Data for Slave error.
+ *
+ * @mainctrl : Err logger mainctrl info
+ * @errvld : Err logger errvld info
+ * @errlog0_low : Err logger errlog0_low info
+ * @errlog0_high : Err logger errlog0_high info
+ * @errlog1_low : Err logger errlog1_low info
+ * @errlog1_high : Err logger errlog1_high info
+ * @errlog2_low : Err logger errlog2_low info
+ * @errlog2_high : Err logger errlog2_high info
+ * @errlog3_low : Err logger errlog3_low info
+ * @errlog3_high : Err logger errlog3_high info
+ *
+ */
+struct cam_camnoc_irq_slave_err_data {
+ union {
+ struct {
+ uint32_t stall_en : 1; /* bit 0 */
+ uint32_t fault_en : 1; /* bit 1 */
+ uint32_t rsv : 30; /* bits 2-31 */
+ };
+ uint32_t value;
+ } mainctrl;
+ union {
+ struct {
+ uint32_t err_vld : 1; /* bit 0 */
+ uint32_t rsv : 31; /* bits 1-31 */
+ };
+ uint32_t value;
+ } errvld;
+ union {
+ struct {
+ uint32_t loginfo_vld : 1; /* bit 0 */
+ uint32_t word_error : 1; /* bit 1 */
+ uint32_t non_secure : 1; /* bit 2 */
+ uint32_t device : 1; /* bit 3 */
+ uint32_t opc : 3; /* bits 4 - 6 */
+ uint32_t rsv0 : 1; /* bit 7 */
+ uint32_t err_code : 3; /* bits 8 - 10 */
+ uint32_t sizef : 3; /* bits 11 - 13 */
+ uint32_t rsv1 : 2; /* bits 14 - 15 */
+ uint32_t addr_space : 6; /* bits 16 - 21 */
+ uint32_t rsv2 : 10; /* bits 22 - 31 */
+ };
+ uint32_t value;
+ } errlog0_low;
+ union {
+ struct {
+ uint32_t len1 : 10; /* bits 0 - 9 */
+ uint32_t rsv : 22; /* bits 10 - 31 */
+ };
+ uint32_t value;
+ } errlog0_high;
+ union {
+ struct {
+ uint32_t path : 16; /* bits 0 - 15 */
+ uint32_t rsv : 16; /* bits 16 - 31 */
+ };
+ uint32_t value;
+ } errlog1_low;
+ union {
+ struct {
+ uint32_t extid : 18; /* bits 0 - 17 */
+ uint32_t rsv : 14; /* bits 18 - 31 */
+ };
+ uint32_t value;
+ } errlog1_high;
+ union {
+ struct {
+ uint32_t errlog2_lsb : 32; /* bits 0 - 31 */
+ };
+ uint32_t value;
+ } errlog2_low;
+ union {
+ struct {
+ uint32_t errlog2_msb : 16; /* bits 0 - 16 */
+ uint32_t rsv : 16; /* bits 16 - 31 */
+ };
+ uint32_t value;
+ } errlog2_high;
+ union {
+ struct {
+ uint32_t errlog3_lsb : 32; /* bits 0 - 31 */
+ };
+ uint32_t value;
+ } errlog3_low;
+ union {
+ struct {
+ uint32_t errlog3_msb : 32; /* bits 0 - 31 */
+ };
+ uint32_t value;
+ } errlog3_high;
+};
+
+/**
+ * struct cam_camnoc_irq_ubwc_enc_data : Data for UBWC Encode error.
+ *
+ * @encerr_status : Encode error status
+ *
+ */
+struct cam_camnoc_irq_ubwc_enc_data {
+ union {
+ struct {
+ uint32_t encerrstatus : 3; /* bits 0 - 2 */
+ uint32_t rsv : 29; /* bits 3 - 31 */
+ };
+ uint32_t value;
+ } encerr_status;
+};
+
+/**
+ * struct cam_camnoc_irq_ubwc_dec_data : Data for UBWC Decode error.
+ *
+ * @decerr_status : Decoder error status
+ * @thr_err : Set to 1 if
+ * At least one of the bflc_len fields in the bit steam exceeds
+ * its threshold value. This error is possible only for
+ * RGBA1010102, TP10, and RGB565 formats
+ * @fcl_err : Set to 1 if
+ * Fast clear with a legal non-RGB format
+ * @len_md_err : Set to 1 if
+ * The calculated burst length does not match burst length
+ * specified by the metadata value
+ * @format_err : Set to 1 if
+ * Illegal format
+ * 1. bad format :2,3,6
+ * 2. For 32B MAL, metadata=6
+ * 3. For 32B MAL RGB565, Metadata != 0,1,7
+ * 4. For 64B MAL RGB565, metadata[3:1] == 1,2
+ *
+ */
+struct cam_camnoc_irq_ubwc_dec_data {
+ union {
+ struct {
+ uint32_t thr_err : 1; /* bit 0 */
+ uint32_t fcl_err : 1; /* bit 1 */
+ uint32_t len_md_err : 1; /* bit 2 */
+ uint32_t format_err : 1; /* bit 3 */
+ uint32_t rsv : 28; /* bits 4 - 31 */
+ };
+ uint32_t value;
+ } decerr_status;
+};
+
+struct cam_camnoc_irq_ahb_timeout_data {
+ uint32_t data;
+};
+
+/**
+ * struct cam_cpas_irq_data : CAMNOC IRQ data
+ *
+ * @irq_type : To identify the type of IRQ
+ * @u : Union of irq err data information
+ * @slave_err : Data for Slave error.
+ * Valid if type is CAM_CAMNOC_IRQ_SLAVE_ERROR
+ * @enc_err : Data for UBWC Encode error.
+ * Valid if type is one of below:
+ * CAM_CAMNOC_IRQ_IFE02_UBWC_ENCODE_ERROR
+ * CAM_CAMNOC_IRQ_IFE13_UBWC_ENCODE_ERROR
+ * CAM_CAMNOC_IRQ_IPE_BPS_UBWC_ENCODE_ERROR
+ * @dec_err : Data for UBWC Decode error.
+ * Valid if type is CAM_CAMNOC_IRQ_IPE_BPS_UBWC_DECODE_ERROR
+ * @ahb_err : Data for Slave error.
+ * Valid if type is CAM_CAMNOC_IRQ_AHB_TIMEOUT
+ *
+ */
+struct cam_cpas_irq_data {
+ enum cam_camnoc_irq_type irq_type;
+ union {
+ struct cam_camnoc_irq_slave_err_data slave_err;
+ struct cam_camnoc_irq_ubwc_enc_data enc_err;
+ struct cam_camnoc_irq_ubwc_dec_data dec_err;
+ struct cam_camnoc_irq_ahb_timeout_data ahb_err;
+ } u;
+};
+
+/**
* struct cam_cpas_register_params : Register params for cpas client
*
* @identifier : Input identifier string which is the device label
@@ -107,11 +284,10 @@ struct cam_cpas_register_params {
uint32_t cell_index;
struct device *dev;
void *userdata;
- void (*cam_cpas_client_cb)(
+ bool (*cam_cpas_client_cb)(
uint32_t client_handle,
void *userdata,
- enum cam_camnoc_irq_type event_type,
- uint32_t event_data);
+ struct cam_cpas_irq_data *irq_data);
uint32_t client_handle;
};
@@ -314,6 +490,7 @@ int cam_cpas_reg_read(
* CAM_FAMILY_CPAS_SS
* @camera_version : Camera platform version
* @cpas_version : Camera cpas version
+ * @cam_caps : Camera capability
*
* @return 0 on success.
*
@@ -321,7 +498,8 @@ int cam_cpas_reg_read(
int cam_cpas_get_hw_info(
uint32_t *camera_family,
struct cam_hw_version *camera_version,
- struct cam_hw_version *cpas_version);
+ struct cam_hw_version *cpas_version,
+ uint32_t *cam_caps);
/**
* cam_cpas_get_cpas_hw_version()
diff --git a/drivers/media/platform/msm/camera/cam_fd/cam_fd_context.c b/drivers/media/platform/msm/camera/cam_fd/cam_fd_context.c
index f23c4c1..78c1dd3 100644
--- a/drivers/media/platform/msm/camera/cam_fd/cam_fd_context.c
+++ b/drivers/media/platform/msm/camera/cam_fd/cam_fd_context.c
@@ -17,6 +17,8 @@
#include "cam_fd_context.h"
#include "cam_trace.h"
+static const char fd_dev_name[] = "fd";
+
/* Functions in Available state */
static int __cam_fd_ctx_acquire_dev_in_available(struct cam_context *ctx,
struct cam_acquire_dev_cmd *cmd)
@@ -208,8 +210,8 @@ int cam_fd_context_init(struct cam_fd_context *fd_ctx,
memset(fd_ctx, 0, sizeof(*fd_ctx));
- rc = cam_context_init(base_ctx, NULL, hw_intf, fd_ctx->req_base,
- CAM_CTX_REQ_MAX);
+ rc = cam_context_init(base_ctx, fd_dev_name, NULL, hw_intf,
+ fd_ctx->req_base, CAM_CTX_REQ_MAX);
if (rc) {
CAM_ERR(CAM_FD, "Camera Context Base init failed, rc=%d", rc);
return rc;
diff --git a/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/cam_fd_hw_mgr.c b/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/cam_fd_hw_mgr.c
index 37e6954..bff42f4 100644
--- a/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/cam_fd_hw_mgr.c
+++ b/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/cam_fd_hw_mgr.c
@@ -865,6 +865,8 @@ static int cam_fd_mgr_util_submit_frame(void *priv, void *data)
}
hw_device->ready_to_process = false;
+ hw_device->cur_hw_ctx = hw_ctx;
+ hw_device->req_id = frame_req->request_id;
mutex_unlock(&hw_device->lock);
rc = cam_fd_mgr_util_put_frame_req(
@@ -1026,6 +1028,8 @@ static int32_t cam_fd_mgr_workq_irq_cb(void *priv, void *data)
*/
mutex_lock(&hw_device->lock);
hw_device->ready_to_process = true;
+ hw_device->req_id = -1;
+ hw_device->cur_hw_ctx = NULL;
CAM_DBG(CAM_FD, "ready_to_process=%d", hw_device->ready_to_process);
mutex_unlock(&hw_device->lock);
@@ -1206,6 +1210,7 @@ static int cam_fd_mgr_hw_release(void *hw_mgr_priv, void *hw_release_args)
if (rc)
CAM_ERR(CAM_FD, "Failed in release device, rc=%d", rc);
+ hw_ctx->ctx_in_use = false;
list_del_init(&hw_ctx->list);
cam_fd_mgr_util_put_ctx(&hw_mgr->free_ctx_list, &hw_ctx);
@@ -1261,6 +1266,82 @@ static int cam_fd_mgr_hw_start(void *hw_mgr_priv, void *mgr_start_args)
return rc;
}
+static int cam_fd_mgr_hw_flush(void *hw_mgr_priv,
+ struct cam_fd_hw_mgr_ctx *hw_ctx)
+{
+ int rc = 0;
+ struct cam_fd_mgr_frame_request *frame_req, *req_temp;
+ struct cam_fd_hw_stop_args hw_stop_args;
+ struct cam_fd_hw_mgr *hw_mgr = (struct cam_fd_hw_mgr *)hw_mgr_priv;
+ struct cam_fd_device *hw_device;
+
+ if (!hw_mgr_priv || !hw_ctx) {
+ CAM_ERR(CAM_FD, "Invalid arguments %pK %pK",
+ hw_mgr_priv, hw_ctx);
+ return -EINVAL;
+ }
+
+ if (!hw_ctx->ctx_in_use) {
+ CAM_ERR(CAM_FD, "Invalid context is used, hw_ctx=%pK", hw_ctx);
+ return -EPERM;
+ }
+ CAM_DBG(CAM_FD, "ctx index=%d, hw_ctx=%d", hw_ctx->ctx_index,
+ hw_ctx->device_index);
+
+ rc = cam_fd_mgr_util_get_device(hw_mgr, hw_ctx, &hw_device);
+ if (rc) {
+ CAM_ERR(CAM_FD, "Error in getting device %d", rc);
+ return rc;
+ }
+
+ mutex_lock(&hw_mgr->frame_req_mutex);
+ list_for_each_entry_safe(frame_req, req_temp,
+ &hw_mgr->frame_pending_list_high, list) {
+ if (frame_req->hw_ctx != hw_ctx)
+ continue;
+
+ list_del_init(&frame_req->list);
+ }
+
+ list_for_each_entry_safe(frame_req, req_temp,
+ &hw_mgr->frame_pending_list_normal, list) {
+ if (frame_req->hw_ctx != hw_ctx)
+ continue;
+
+ list_del_init(&frame_req->list);
+ }
+
+ list_for_each_entry_safe(frame_req, req_temp,
+ &hw_mgr->frame_processing_list, list) {
+ if (frame_req->hw_ctx != hw_ctx)
+ continue;
+
+ list_del_init(&frame_req->list);
+ }
+ mutex_unlock(&hw_mgr->frame_req_mutex);
+
+ mutex_lock(&hw_device->lock);
+ if ((hw_device->ready_to_process == true) ||
+ (hw_device->cur_hw_ctx != hw_ctx))
+ goto end;
+
+ if (hw_device->hw_intf->hw_ops.stop) {
+ hw_stop_args.hw_ctx = hw_ctx;
+ rc = hw_device->hw_intf->hw_ops.stop(
+ hw_device->hw_intf->hw_priv, &hw_stop_args,
+ sizeof(hw_stop_args));
+ if (rc) {
+ CAM_ERR(CAM_FD, "Failed in HW Stop %d", rc);
+ goto end;
+ }
+ hw_device->ready_to_process = true;
+ }
+
+end:
+ mutex_unlock(&hw_device->lock);
+ return rc;
+}
+
static int cam_fd_mgr_hw_stop(void *hw_mgr_priv, void *mgr_stop_args)
{
struct cam_fd_hw_mgr *hw_mgr = (struct cam_fd_hw_mgr *)hw_mgr_priv;
@@ -1268,7 +1349,6 @@ static int cam_fd_mgr_hw_stop(void *hw_mgr_priv, void *mgr_stop_args)
(struct cam_hw_stop_args *)mgr_stop_args;
struct cam_fd_hw_mgr_ctx *hw_ctx;
struct cam_fd_device *hw_device;
- struct cam_fd_hw_stop_args hw_stop_args;
struct cam_fd_hw_deinit_args hw_deinit_args;
int rc = 0;
@@ -1295,21 +1375,9 @@ static int cam_fd_mgr_hw_stop(void *hw_mgr_priv, void *mgr_stop_args)
CAM_DBG(CAM_FD, "FD Device ready_to_process = %d",
hw_device->ready_to_process);
- if ((hw_device->hw_intf->hw_ops.stop) &&
- (hw_device->ready_to_process == false)) {
- /*
- * Even if device is in processing state, we should submit
- * stop command only if this ctx is running on hw
- */
- hw_stop_args.hw_ctx = hw_ctx;
- rc = hw_device->hw_intf->hw_ops.stop(
- hw_device->hw_intf->hw_priv, &hw_stop_args,
- sizeof(hw_stop_args));
- if (rc) {
- CAM_ERR(CAM_FD, "Failed in HW Stop %d", rc);
- return rc;
- }
- }
+ rc = cam_fd_mgr_hw_flush(hw_mgr, hw_ctx);
+ if (rc)
+ CAM_ERR(CAM_FD, "FD failed to flush");
if (hw_device->hw_intf->hw_ops.deinit) {
hw_deinit_args.hw_ctx = hw_ctx;
diff --git a/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/cam_fd_hw_mgr.h b/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/cam_fd_hw_mgr.h
index 135e006..db5d100 100644
--- a/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/cam_fd_hw_mgr.h
+++ b/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/cam_fd_hw_mgr.h
@@ -80,14 +80,18 @@ struct cam_fd_hw_mgr_ctx {
* @num_ctxts : Number of context currently running on this device
* @valid : Whether this device is valid
* @lock : Lock used for protectin
+ * @cur_hw_ctx : current hw context running in the device
+ * @req_id : current processing req id
*/
struct cam_fd_device {
- struct cam_fd_hw_caps hw_caps;
- struct cam_hw_intf *hw_intf;
- bool ready_to_process;
- uint32_t num_ctxts;
- bool valid;
- struct mutex lock;
+ struct cam_fd_hw_caps hw_caps;
+ struct cam_hw_intf *hw_intf;
+ bool ready_to_process;
+ uint32_t num_ctxts;
+ bool valid;
+ struct mutex lock;
+ struct cam_fd_hw_mgr_ctx *cur_hw_ctx;
+ int64_t req_id;
};
/**
diff --git a/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_core.c b/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_core.c
index d9be53d..51c8e4a 100644
--- a/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_core.c
+++ b/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_core.c
@@ -643,6 +643,7 @@ int cam_fd_hw_init(void *hw_priv, void *init_hw_args, uint32_t arg_size)
struct cam_fd_hw_init_args *init_args =
(struct cam_fd_hw_init_args *)init_hw_args;
int rc = 0;
+ unsigned long flags;
if (!fd_hw || !init_args) {
CAM_ERR(CAM_FD, "Invalid argument %pK %pK", fd_hw, init_args);
@@ -671,6 +672,11 @@ int cam_fd_hw_init(void *hw_priv, void *init_hw_args, uint32_t arg_size)
goto unlock_return;
}
+ spin_lock_irqsave(&fd_core->spin_lock, flags);
+ fd_hw->hw_state = CAM_HW_STATE_POWER_UP;
+ fd_core->core_state = CAM_FD_CORE_STATE_IDLE;
+ spin_unlock_irqrestore(&fd_core->spin_lock, flags);
+
rc = cam_fd_hw_reset(hw_priv, NULL, 0);
if (rc) {
CAM_ERR(CAM_FD, "Reset Failed, rc=%d", rc);
@@ -679,15 +685,10 @@ int cam_fd_hw_init(void *hw_priv, void *init_hw_args, uint32_t arg_size)
cam_fd_hw_util_enable_power_on_settings(fd_hw);
- fd_hw->hw_state = CAM_HW_STATE_POWER_UP;
- fd_core->core_state = CAM_FD_CORE_STATE_IDLE;
-
cdm_streamon:
fd_hw->open_count++;
CAM_DBG(CAM_FD, "FD HW Init ref count after %d", fd_hw->open_count);
- mutex_unlock(&fd_hw->hw_mutex);
-
if (init_args->ctx_hw_private) {
struct cam_fd_ctx_hw_private *ctx_hw_private =
init_args->ctx_hw_private;
@@ -696,15 +697,24 @@ int cam_fd_hw_init(void *hw_priv, void *init_hw_args, uint32_t arg_size)
if (rc) {
CAM_ERR(CAM_FD, "CDM StreamOn fail :handle=0x%x, rc=%d",
ctx_hw_private->cdm_handle, rc);
- return rc;
+ fd_hw->open_count--;
+ if (!fd_hw->open_count)
+ goto disable_soc;
}
}
+ mutex_unlock(&fd_hw->hw_mutex);
+
return rc;
disable_soc:
if (cam_fd_soc_disable_resources(&fd_hw->soc_info))
CAM_ERR(CAM_FD, "Error in disable soc resources");
+
+ spin_lock_irqsave(&fd_core->spin_lock, flags);
+ fd_hw->hw_state = CAM_HW_STATE_POWER_DOWN;
+ fd_core->core_state = CAM_FD_CORE_STATE_POWERDOWN;
+ spin_unlock_irqrestore(&fd_core->spin_lock, flags);
unlock_return:
mutex_unlock(&fd_hw->hw_mutex);
return rc;
@@ -717,6 +727,7 @@ int cam_fd_hw_deinit(void *hw_priv, void *deinit_hw_args, uint32_t arg_size)
struct cam_fd_hw_deinit_args *deinit_args =
(struct cam_fd_hw_deinit_args *)deinit_hw_args;
int rc = 0;
+ unsigned long flags;
if (!fd_hw || !deinit_hw_args) {
CAM_ERR(CAM_FD, "Invalid argument");
@@ -754,8 +765,9 @@ int cam_fd_hw_deinit(void *hw_priv, void *deinit_hw_args, uint32_t arg_size)
/* With the ref_cnt correct, this should never happen */
WARN_ON(!fd_core);
+ spin_lock_irqsave(&fd_core->spin_lock, flags);
fd_core->core_state = CAM_FD_CORE_STATE_POWERDOWN;
-
+ spin_unlock_irqrestore(&fd_core->spin_lock, flags);
positive_ref_cnt:
if (deinit_args->ctx_hw_private) {
struct cam_fd_ctx_hw_private *ctx_hw_private =
@@ -792,7 +804,8 @@ int cam_fd_hw_reset(void *hw_priv, void *reset_core_args, uint32_t arg_size)
soc_info = &fd_hw->soc_info;
spin_lock_irqsave(&fd_core->spin_lock, flags);
- if (fd_core->core_state == CAM_FD_CORE_STATE_RESET_PROGRESS) {
+ if ((fd_core->core_state == CAM_FD_CORE_STATE_POWERDOWN) ||
+ (fd_core->core_state == CAM_FD_CORE_STATE_RESET_PROGRESS)) {
CAM_ERR(CAM_FD, "Reset not allowed in %d state",
fd_core->core_state);
spin_unlock_irqrestore(&fd_core->spin_lock, flags);
diff --git a/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_soc.c b/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_soc.c
index 9045dc1..f27d016 100644
--- a/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_soc.c
+++ b/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_soc.c
@@ -20,11 +20,16 @@
#include "cam_fd_hw_core.h"
#include "cam_fd_hw_soc.h"
-static void cam_fd_hw_util_cpas_callback(uint32_t handle, void *userdata,
- enum cam_camnoc_irq_type event_type, uint32_t event_data)
+static bool cam_fd_hw_util_cpas_callback(uint32_t handle, void *userdata,
+ struct cam_cpas_irq_data *irq_data)
{
- CAM_DBG(CAM_FD, "CPAS hdl=%d, udata=%pK, event=%d, event_data=%d",
- handle, userdata, event_type, event_data);
+ if (!irq_data)
+ return false;
+
+ CAM_DBG(CAM_FD, "CPAS hdl=%d, udata=%pK, irq_type=%d",
+ handle, userdata, irq_data->irq_type);
+
+ return false;
}
static int cam_fd_hw_soc_util_setup_regbase_indices(
diff --git a/drivers/media/platform/msm/camera/cam_icp/cam_icp_context.c b/drivers/media/platform/msm/camera/cam_icp/cam_icp_context.c
index 15bd98c..0c37994 100644
--- a/drivers/media/platform/msm/camera/cam_icp/cam_icp_context.c
+++ b/drivers/media/platform/msm/camera/cam_icp/cam_icp_context.c
@@ -26,6 +26,8 @@
#include "cam_trace.h"
#include "cam_debug_util.h"
+static const char icp_dev_name[] = "icp";
+
static int __cam_icp_acquire_dev_in_available(struct cam_context *ctx,
struct cam_acquire_dev_cmd *cmd)
{
@@ -171,8 +173,8 @@ int cam_icp_context_init(struct cam_icp_context *ctx,
goto err;
}
- rc = cam_context_init(ctx->base, NULL, hw_intf, ctx->req_base,
- CAM_CTX_REQ_MAX);
+ rc = cam_context_init(ctx->base, icp_dev_name, NULL, hw_intf,
+ ctx->req_base, CAM_CTX_REQ_MAX);
if (rc) {
CAM_ERR(CAM_ICP, "Camera Context Base init failed");
goto err;
diff --git a/drivers/media/platform/msm/camera/cam_icp/cam_icp_subdev.c b/drivers/media/platform/msm/camera/cam_icp/cam_icp_subdev.c
index 905cc97..51499de 100644
--- a/drivers/media/platform/msm/camera/cam_icp/cam_icp_subdev.c
+++ b/drivers/media/platform/msm/camera/cam_icp/cam_icp_subdev.c
@@ -76,7 +76,7 @@ static int cam_icp_subdev_open(struct v4l2_subdev *sd,
}
hw_mgr_intf = &node->hw_mgr_intf;
- rc = hw_mgr_intf->download_fw(hw_mgr_intf->hw_mgr_priv, NULL);
+ rc = hw_mgr_intf->hw_open(hw_mgr_intf->hw_mgr_priv, NULL);
if (rc < 0) {
CAM_ERR(CAM_ICP, "FW download failed");
goto end;
diff --git a/drivers/media/platform/msm/camera/cam_icp/fw_inc/hfi_intf.h b/drivers/media/platform/msm/camera/cam_icp/fw_inc/hfi_intf.h
index f74938d..ce7a8b3 100644
--- a/drivers/media/platform/msm/camera/cam_icp/fw_inc/hfi_intf.h
+++ b/drivers/media/platform/msm/camera/cam_icp/fw_inc/hfi_intf.h
@@ -60,10 +60,12 @@ int hfi_write_cmd(void *cmd_ptr);
* hfi_read_message() - function for hfi read
* @pmsg: buffer to place read message for hfi queue
* @q_id: queue id
+ * @words_read: total number of words read from the queue
+ * returned as output to the caller
*
* Returns success(zero)/failure(non zero)
*/
-int hfi_read_message(uint32_t *pmsg, uint8_t q_id);
+int hfi_read_message(uint32_t *pmsg, uint8_t q_id, uint32_t *words_read);
/**
* hfi_init() - function initialize hfi after firmware download
@@ -109,6 +111,11 @@ void cam_hfi_disable_cpu(void __iomem *icp_base);
* cam_hfi_deinit() - cleanup HFI
*/
void cam_hfi_deinit(void);
+/**
+ * hfi_set_debug_level() - set debug level
+ * @lvl: FW debug message level
+ */
+int hfi_set_debug_level(uint32_t lvl);
/**
* hfi_enable_ipe_bps_pc() - Enable interframe pc
@@ -119,4 +126,10 @@ void cam_hfi_deinit(void);
*/
int hfi_enable_ipe_bps_pc(bool enable);
+/**
+ * hfi_cmd_ubwc_config() - UBWC configuration to firmware
+ * @ubwc_cfg: UBWC configuration parameters
+ */
+int hfi_cmd_ubwc_config(uint32_t *ubwc_cfg);
+
#endif /* _HFI_INTF_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_icp/fw_inc/hfi_reg.h b/drivers/media/platform/msm/camera/cam_icp/fw_inc/hfi_reg.h
index 04e3c85..eb4b132 100644
--- a/drivers/media/platform/msm/camera/cam_icp/fw_inc/hfi_reg.h
+++ b/drivers/media/platform/msm/camera/cam_icp/fw_inc/hfi_reg.h
@@ -60,11 +60,11 @@
#define ICP_CMD_Q_SIZE_IN_BYTES 4096
#define ICP_MSG_Q_SIZE_IN_BYTES 4096
-#define ICP_DBG_Q_SIZE_IN_BYTES 8192
+#define ICP_DBG_Q_SIZE_IN_BYTES 102400
#define ICP_SHARED_MEM_IN_BYTES (1024 * 1024)
#define ICP_UNCACHED_HEAP_SIZE_IN_BYTES (2 * 1024 * 1024)
-#define ICP_HFI_MAX_MSG_SIZE_IN_WORDS 128
+#define ICP_HFI_MAX_PKT_SIZE_IN_WORDS 25600
#define ICP_HFI_QTBL_HOSTID1 0x01000000
#define ICP_HFI_QTBL_STATUS_ENABLED 0x00000001
@@ -109,7 +109,8 @@ enum hfi_state {
*/
enum reg_settings {
RESET,
- SET
+ SET,
+ SET_WM = 1024
};
/**
diff --git a/drivers/media/platform/msm/camera/cam_icp/fw_inc/hfi_sys_defs.h b/drivers/media/platform/msm/camera/cam_icp/fw_inc/hfi_sys_defs.h
index 65dc4b3..aaa18bb 100644
--- a/drivers/media/platform/msm/camera/cam_icp/fw_inc/hfi_sys_defs.h
+++ b/drivers/media/platform/msm/camera/cam_icp/fw_inc/hfi_sys_defs.h
@@ -156,6 +156,7 @@
#define HFI_PROPERTY_ICP_COMMON_START (HFI_DOMAIN_BASE_ICP + 0x0)
#define HFI_PROP_SYS_DEBUG_CFG (HFI_PROPERTY_ICP_COMMON_START + 0x1)
+#define HFI_PROP_SYS_UBWC_CFG (HFI_PROPERTY_ICP_COMMON_START + 0x2)
#define HFI_PROP_SYS_IMAGE_VER (HFI_PROPERTY_ICP_COMMON_START + 0x3)
#define HFI_PROP_SYS_SUPPORTED (HFI_PROPERTY_ICP_COMMON_START + 0x4)
#define HFI_PROP_SYS_IPEBPS_PC (HFI_PROPERTY_ICP_COMMON_START + 0x5)
@@ -201,6 +202,8 @@
#define HFI_DEBUG_MODE_QUEUE 0x00000001
#define HFI_DEBUG_MODE_QDSS 0x00000002
+#define HFI_DEV_VERSION_MAX 0x5
+
/**
* start of sys command packet types
* These commands are used to get system level information
@@ -257,6 +260,17 @@ struct hfi_ipe_bps_pc {
} __packed;
/**
+ * struct hfi_cmd_ubwc_cfg
+ * Payload structure to configure HFI_PROP_SYS_UBWC_CFG
+ * @ubwc_fetch_cfg: UBWC configuration for fecth
+ * @ubwc_write_cfg: UBWC configuration for write
+ */
+struct hfi_cmd_ubwc_cfg {
+ uint32_t ubwc_fetch_cfg;
+ uint32_t ubwc_write_cfg;
+};
+
+/**
* struct hfi_cmd_sys_init
* command to initialization of system session
* @size: packet size in bytes
@@ -371,14 +385,30 @@ struct hfi_image_version {
} __packed;
/**
+ * struct hfi_msg_init_done_data
+ * @api_ver: Firmware API version
+ * @dev_ver: Device version
+ * @num_icp_hw: Number of ICP hardware information
+ * @dev_hw_ver: Supported hardware version information
+ * @reserved: Reserved field
+ */
+struct hfi_msg_init_done_data {
+ uint32_t api_ver;
+ uint32_t dev_ver;
+ uint32_t num_icp_hw;
+ uint32_t dev_hw_ver[HFI_DEV_VERSION_MAX];
+ uint32_t reserved;
+};
+
+/**
* struct hfi_msg_init_done
* system init done message from firmware. Many system level properties
* are returned with the packet
- * @size: packet size in bytes
- * @pkt_type: opcode of a packet
- * @err_type: error code associated with response
- * @num_prop: number of default capability info
- * @prop_data: array of property ids and corresponding structure pairs
+ * @size: Packet size in bytes
+ * @pkt_type: Opcode of a packet
+ * @err_type: Error code associated with response
+ * @num_prop: Number of default capability info
+ * @prop_data: Array of property ids and corresponding structure pairs
*/
struct hfi_msg_init_done {
uint32_t size;
diff --git a/drivers/media/platform/msm/camera/cam_icp/hfi.c b/drivers/media/platform/msm/camera/cam_icp/hfi.c
index cdb0cfa..a8855ae 100644
--- a/drivers/media/platform/msm/camera/cam_icp/hfi.c
+++ b/drivers/media/platform/msm/camera/cam_icp/hfi.c
@@ -27,6 +27,7 @@
#include "hfi_intf.h"
#include "cam_icp_hw_mgr_intf.h"
#include "cam_debug_util.h"
+#include "cam_soc_util.h"
#define HFI_VERSION_INFO_MAJOR_VAL 1
#define HFI_VERSION_INFO_MINOR_VAL 1
@@ -39,9 +40,6 @@
#define HFI_VERSION_INFO_STEP_BMSK 0xFF
#define HFI_VERSION_INFO_STEP_SHFT 0
-#define SOC_VERSION_HW1 0x10000
-#define SOC_VERSION_HW2 0x20000
-
static struct hfi_info *g_hfi;
unsigned int g_icp_mmu_hdl;
static DEFINE_MUTEX(hfi_cmd_q_mutex);
@@ -111,7 +109,19 @@ int hfi_write_cmd(void *cmd_ptr)
new_write_idx << BYTE_WORD_SHIFT);
}
+ /*
+ * To make sure command data in a command queue before
+ * updating write index
+ */
+ wmb();
+
q->qhdr_write_idx = new_write_idx;
+
+ /*
+ * Before raising interrupt make sure command data is ready for
+ * firmware to process
+ */
+ wmb();
cam_io_w((uint32_t)INTR_ENABLE,
g_hfi->csr_base + HFI_REG_A5_CSR_HOST2ICPINT);
err:
@@ -119,12 +129,13 @@ int hfi_write_cmd(void *cmd_ptr)
return rc;
}
-int hfi_read_message(uint32_t *pmsg, uint8_t q_id)
+int hfi_read_message(uint32_t *pmsg, uint8_t q_id,
+ uint32_t *words_read)
{
struct hfi_qtbl *q_tbl_ptr;
struct hfi_q_hdr *q;
- uint32_t new_read_idx, size_in_words, temp;
- uint32_t *read_q, *read_ptr;
+ uint32_t new_read_idx, size_in_words, word_diff, temp;
+ uint32_t *read_q, *read_ptr, *write_ptr;
int rc = 0;
if (!pmsg) {
@@ -168,10 +179,22 @@ int hfi_read_message(uint32_t *pmsg, uint8_t q_id)
read_q = (uint32_t *)g_hfi->map.dbg_q.kva;
read_ptr = (uint32_t *)(read_q + q->qhdr_read_idx);
- size_in_words = (*read_ptr) >> BYTE_WORD_SHIFT;
+ write_ptr = (uint32_t *)(read_q + q->qhdr_write_idx);
+
+ if (write_ptr > read_ptr)
+ size_in_words = write_ptr - read_ptr;
+ else {
+ word_diff = read_ptr - write_ptr;
+ if (q_id == Q_MSG)
+ size_in_words = (ICP_MSG_Q_SIZE_IN_BYTES >>
+ BYTE_WORD_SHIFT) - word_diff;
+ else
+ size_in_words = (ICP_DBG_Q_SIZE_IN_BYTES >>
+ BYTE_WORD_SHIFT) - word_diff;
+ }
if ((size_in_words == 0) ||
- (size_in_words > ICP_HFI_MAX_MSG_SIZE_IN_WORDS)) {
+ (size_in_words > ICP_HFI_MAX_PKT_SIZE_IN_WORDS)) {
CAM_ERR(CAM_HFI, "invalid HFI message packet size - 0x%08x",
size_in_words << BYTE_WORD_SHIFT);
q->qhdr_read_idx = q->qhdr_write_idx;
@@ -192,11 +215,39 @@ int hfi_read_message(uint32_t *pmsg, uint8_t q_id)
}
q->qhdr_read_idx = new_read_idx;
+ *words_read = size_in_words;
err:
mutex_unlock(&hfi_msg_q_mutex);
return rc;
}
+int hfi_cmd_ubwc_config(uint32_t *ubwc_cfg)
+{
+ uint8_t *prop;
+ struct hfi_cmd_prop *dbg_prop;
+ uint32_t size = 0;
+
+ size = sizeof(struct hfi_cmd_prop) +
+ sizeof(struct hfi_cmd_ubwc_cfg);
+
+ prop = kzalloc(size, GFP_KERNEL);
+ if (!prop)
+ return -ENOMEM;
+
+ dbg_prop = (struct hfi_cmd_prop *)prop;
+ dbg_prop->size = size;
+ dbg_prop->pkt_type = HFI_CMD_SYS_SET_PROPERTY;
+ dbg_prop->num_prop = 1;
+ dbg_prop->prop_data[0] = HFI_PROP_SYS_UBWC_CFG;
+ dbg_prop->prop_data[1] = ubwc_cfg[0];
+ dbg_prop->prop_data[2] = ubwc_cfg[1];
+
+ hfi_write_cmd(prop);
+ kfree(prop);
+
+ return 0;
+}
+
int hfi_enable_ipe_bps_pc(bool enable)
{
uint8_t *prop;
@@ -223,6 +274,45 @@ int hfi_enable_ipe_bps_pc(bool enable)
return 0;
}
+int hfi_set_debug_level(uint32_t lvl)
+{
+ uint8_t *prop;
+ struct hfi_cmd_prop *dbg_prop;
+ uint32_t size = 0, val;
+
+ val = HFI_DEBUG_MSG_LOW |
+ HFI_DEBUG_MSG_MEDIUM |
+ HFI_DEBUG_MSG_HIGH |
+ HFI_DEBUG_MSG_ERROR |
+ HFI_DEBUG_MSG_FATAL |
+ HFI_DEBUG_MSG_PERF |
+ HFI_DEBUG_CFG_WFI |
+ HFI_DEBUG_CFG_ARM9WD;
+
+ if (lvl > val)
+ return -EINVAL;
+
+ size = sizeof(struct hfi_cmd_prop) +
+ sizeof(struct hfi_debug);
+
+ prop = kzalloc(size, GFP_KERNEL);
+ if (!prop)
+ return -ENOMEM;
+
+ dbg_prop = (struct hfi_cmd_prop *)prop;
+ dbg_prop->size = size;
+ dbg_prop->pkt_type = HFI_CMD_SYS_SET_PROPERTY;
+ dbg_prop->num_prop = 1;
+ dbg_prop->prop_data[0] = HFI_PROP_SYS_DEBUG_CFG;
+ dbg_prop->prop_data[1] = lvl;
+ dbg_prop->prop_data[2] = HFI_DEBUG_MODE_QUEUE;
+
+ hfi_write_cmd(prop);
+ kfree(prop);
+
+ return 0;
+}
+
void hfi_send_system_cmd(uint32_t type, uint64_t data, uint32_t size)
{
switch (type) {
@@ -456,8 +546,8 @@ int cam_hfi_init(uint8_t event_driven_mode, struct hfi_mem_info *hfi_mem,
dbg_q_hdr->qhdr_type = Q_DBG;
dbg_q_hdr->qhdr_rx_wm = SET;
- dbg_q_hdr->qhdr_tx_wm = SET;
- dbg_q_hdr->qhdr_rx_req = SET;
+ dbg_q_hdr->qhdr_tx_wm = SET_WM;
+ dbg_q_hdr->qhdr_rx_req = RESET;
dbg_q_hdr->qhdr_tx_req = RESET;
dbg_q_hdr->qhdr_rx_irq_status = RESET;
dbg_q_hdr->qhdr_tx_irq_status = RESET;
@@ -495,8 +585,8 @@ int cam_hfi_init(uint8_t event_driven_mode, struct hfi_mem_info *hfi_mem,
dbg_q_hdr->qhdr_type = Q_DBG | TX_EVENT_DRIVEN_MODE_2 |
RX_EVENT_DRIVEN_MODE_2;
dbg_q_hdr->qhdr_rx_wm = SET;
- dbg_q_hdr->qhdr_tx_wm = SET;
- dbg_q_hdr->qhdr_rx_req = SET;
+ dbg_q_hdr->qhdr_tx_wm = SET_WM;
+ dbg_q_hdr->qhdr_rx_req = RESET;
dbg_q_hdr->qhdr_tx_req = RESET;
dbg_q_hdr->qhdr_rx_irq_status = RESET;
dbg_q_hdr->qhdr_tx_irq_status = RESET;
@@ -574,17 +664,3 @@ void cam_hfi_deinit(void)
mutex_unlock(&hfi_cmd_q_mutex);
mutex_unlock(&hfi_msg_q_mutex);
}
-
-void icp_enable_fw_debug(void)
-{
- hfi_send_system_cmd(HFI_CMD_SYS_SET_PROPERTY,
- (uint64_t)HFI_PROP_SYS_DEBUG_CFG, 0);
-}
-
-int icp_ping_fw(void)
-{
- hfi_send_system_cmd(HFI_CMD_SYS_PING,
- (uint64_t)0x12123434, 0);
-
- return 0;
-}
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_core.c b/drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_core.c
index e200f6f..635d0df 100644
--- a/drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_core.c
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_core.c
@@ -266,8 +266,8 @@ int cam_a5_init_hw(void *device_priv,
cpas_vote.ahb_vote.type = CAM_VOTE_ABSOLUTE;
cpas_vote.ahb_vote.vote.level = CAM_SVS_VOTE;
- cpas_vote.axi_vote.compressed_bw = ICP_TURBO_VOTE;
- cpas_vote.axi_vote.uncompressed_bw = ICP_TURBO_VOTE;
+ cpas_vote.axi_vote.compressed_bw = CAM_ICP_A5_BW_BYTES_VOTE;
+ cpas_vote.axi_vote.uncompressed_bw = CAM_ICP_A5_BW_BYTES_VOTE;
rc = cam_cpas_start(core_info->cpas_handle,
&cpas_vote.ahb_vote, &cpas_vote.axi_vote);
@@ -367,6 +367,7 @@ int cam_a5_process_cmd(void *device_priv, uint32_t cmd_type,
struct cam_hw_soc_info *soc_info = NULL;
struct cam_a5_device_core_info *core_info = NULL;
struct cam_a5_device_hw_info *hw_info = NULL;
+ struct a5_soc_info *a5_soc = NULL;
int rc = 0;
if (!device_priv) {
@@ -456,6 +457,14 @@ int cam_a5_process_cmd(void *device_priv, uint32_t cmd_type,
core_info->cpas_start = false;
}
break;
+ case CAM_ICP_A5_CMD_UBWC_CFG:
+ a5_soc = soc_info->soc_private;
+ if (!a5_soc) {
+ CAM_ERR(CAM_ICP, "A5 private soc info is NULL");
+ return -EINVAL;
+ }
+ rc = hfi_cmd_ubwc_config(a5_soc->ubwc_cfg);
+ break;
default:
break;
}
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_dev.c b/drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_dev.c
index 99e2e79..14c3c9c 100644
--- a/drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_dev.c
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_dev.c
@@ -50,6 +50,40 @@ struct cam_a5_device_hw_info cam_a5_hw_info = {
};
EXPORT_SYMBOL(cam_a5_hw_info);
+static bool cam_a5_cpas_cb(uint32_t client_handle, void *userdata,
+ struct cam_cpas_irq_data *irq_data)
+{
+ bool error_handled = false;
+
+ if (!irq_data)
+ return error_handled;
+
+ switch (irq_data->irq_type) {
+ case CAM_CAMNOC_IRQ_IPE_BPS_UBWC_DECODE_ERROR:
+ CAM_ERR_RATE_LIMIT(CAM_ICP,
+ "IPE/BPS UBWC Decode error type=%d status=%x thr_err=%d, fcl_err=%d, len_md_err=%d, format_err=%d",
+ irq_data->irq_type,
+ irq_data->u.dec_err.decerr_status.value,
+ irq_data->u.dec_err.decerr_status.thr_err,
+ irq_data->u.dec_err.decerr_status.fcl_err,
+ irq_data->u.dec_err.decerr_status.len_md_err,
+ irq_data->u.dec_err.decerr_status.format_err);
+ error_handled = true;
+ break;
+ case CAM_CAMNOC_IRQ_IPE_BPS_UBWC_ENCODE_ERROR:
+ CAM_ERR_RATE_LIMIT(CAM_ICP,
+ "IPE/BPS UBWC Encode error type=%d status=%x",
+ irq_data->irq_type,
+ irq_data->u.enc_err.encerr_status.value);
+ error_handled = true;
+ break;
+ default:
+ break;
+ }
+
+ return error_handled;
+}
+
int cam_a5_register_cpas(struct cam_hw_soc_info *soc_info,
struct cam_a5_device_core_info *core_info,
uint32_t hw_idx)
@@ -59,7 +93,7 @@ int cam_a5_register_cpas(struct cam_hw_soc_info *soc_info,
cpas_register_params.dev = &soc_info->pdev->dev;
memcpy(cpas_register_params.identifier, "icp", sizeof("icp"));
- cpas_register_params.cam_cpas_client_cb = NULL;
+ cpas_register_params.cam_cpas_client_cb = cam_a5_cpas_cb;
cpas_register_params.cell_index = hw_idx;
cpas_register_params.userdata = NULL;
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_soc.c b/drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_soc.c
index f252931..3177513 100644
--- a/drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_soc.c
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_soc.c
@@ -22,11 +22,12 @@
static int cam_a5_get_dt_properties(struct cam_hw_soc_info *soc_info)
{
- int rc = 0;
+ int rc = 0, i;
const char *fw_name;
struct a5_soc_info *camp_a5_soc_info;
struct device_node *of_node = NULL;
struct platform_device *pdev = NULL;
+ int num_ubwc_cfg;
pdev = soc_info->pdev;
of_node = pdev->dev.of_node;
@@ -41,9 +42,28 @@ static int cam_a5_get_dt_properties(struct cam_hw_soc_info *soc_info)
fw_name = camp_a5_soc_info->fw_name;
rc = of_property_read_string(of_node, "fw_name", &fw_name);
- if (rc < 0)
+ if (rc < 0) {
CAM_ERR(CAM_ICP, "fw_name read failed");
+ goto end;
+ }
+ num_ubwc_cfg = of_property_count_u32_elems(of_node, "ubwc-cfg");
+ if ((num_ubwc_cfg < 0) || (num_ubwc_cfg > ICP_UBWC_MAX)) {
+ CAM_ERR(CAM_ICP, "wrong ubwc_cfg: %d", num_ubwc_cfg);
+ rc = num_ubwc_cfg;
+ goto end;
+ }
+
+ for (i = 0; i < num_ubwc_cfg; i++) {
+ rc = of_property_read_u32_index(of_node, "ubwc-cfg",
+ i, &camp_a5_soc_info->ubwc_cfg[i]);
+ if (rc < 0) {
+ CAM_ERR(CAM_ICP, "unable to read ubwc cfg values");
+ break;
+ }
+ }
+
+end:
return rc;
}
@@ -81,7 +101,7 @@ int cam_a5_enable_soc_resources(struct cam_hw_soc_info *soc_info)
int rc = 0;
rc = cam_soc_util_enable_platform_resource(soc_info, true,
- CAM_TURBO_VOTE, true);
+ CAM_SVS_VOTE, true);
if (rc)
CAM_ERR(CAM_ICP, "enable platform failed");
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_soc.h b/drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_soc.h
index 916143d..3593cfb 100644
--- a/drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_soc.h
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_soc.h
@@ -15,8 +15,11 @@
#include "cam_soc_util.h"
+#define ICP_UBWC_MAX 2
+
struct a5_soc_info {
char *fw_name;
+ uint32_t ubwc_cfg[ICP_UBWC_MAX];
};
int cam_a5_init_soc_resources(struct cam_hw_soc_info *soc_info,
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw/bps_soc.c b/drivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw/bps_soc.c
index 2477e7d..400e1e7 100644
--- a/drivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw/bps_soc.c
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw/bps_soc.c
@@ -65,7 +65,7 @@ int cam_bps_enable_soc_resources(struct cam_hw_soc_info *soc_info)
int rc = 0;
rc = cam_soc_util_enable_platform_resource(soc_info, true,
- CAM_TURBO_VOTE, false);
+ CAM_SVS_VOTE, false);
if (rc)
CAM_ERR(CAM_ICP, "enable platform failed");
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c b/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c
index 3844673..340a1e2 100644
--- a/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c
@@ -24,6 +24,7 @@
#include <linux/debugfs.h>
#include <media/cam_defs.h>
#include <media/cam_icp.h>
+#include <media/cam_cpas.h>
#include "cam_sync_api.h"
#include "cam_packet_util.h"
@@ -46,12 +47,34 @@
#include "hfi_sys_defs.h"
#include "cam_debug_util.h"
#include "cam_soc_util.h"
+#include "cam_trace.h"
+#include "cam_cpas_api.h"
#define ICP_WORKQ_TASK_CMD_TYPE 1
#define ICP_WORKQ_TASK_MSG_TYPE 2
static struct cam_icp_hw_mgr icp_hw_mgr;
+static int cam_icp_send_ubwc_cfg(struct cam_icp_hw_mgr *hw_mgr)
+{
+ struct cam_hw_intf *a5_dev_intf = NULL;
+ int rc;
+
+ a5_dev_intf = hw_mgr->devices[CAM_ICP_DEV_A5][0];
+ if (!a5_dev_intf) {
+ CAM_ERR(CAM_ICP, "a5_dev_intf is NULL");
+ return -EINVAL;
+ }
+
+ rc = a5_dev_intf->hw_ops.process_cmd(
+ a5_dev_intf->hw_priv,
+ CAM_ICP_A5_CMD_UBWC_CFG, NULL, 0);
+ if (rc)
+ CAM_ERR(CAM_ICP, "CAM_ICP_A5_CMD_UBWC_CFG is failed");
+
+ return rc;
+}
+
static void cam_icp_hw_mgr_clk_info_update(struct cam_icp_hw_mgr *hw_mgr,
struct cam_icp_hw_ctx_data *ctx_data)
{
@@ -72,13 +95,13 @@ static void cam_icp_hw_mgr_reset_clk_info(struct cam_icp_hw_mgr *hw_mgr)
for (i = 0; i < ICP_CLK_HW_MAX; i++) {
hw_mgr->clk_info[i].base_clk = 0;
- hw_mgr->clk_info[i].curr_clk = ICP_TURBO_VOTE;
+ hw_mgr->clk_info[i].curr_clk = ICP_CLK_SVS_HZ;
hw_mgr->clk_info[i].threshold = ICP_OVER_CLK_THRESHOLD;
hw_mgr->clk_info[i].over_clked = 0;
hw_mgr->clk_info[i].uncompressed_bw = CAM_CPAS_DEFAULT_AXI_BW;
hw_mgr->clk_info[i].compressed_bw = CAM_CPAS_DEFAULT_AXI_BW;
}
- hw_mgr->icp_default_clk = ICP_SVS_VOTE;
+ hw_mgr->icp_default_clk = ICP_CLK_SVS_HZ;
}
static int cam_icp_get_actual_clk_rate_idx(
@@ -208,14 +231,14 @@ static int cam_icp_clk_info_init(struct cam_icp_hw_mgr *hw_mgr,
int i;
for (i = 0; i < ICP_CLK_HW_MAX; i++) {
- hw_mgr->clk_info[i].base_clk = ICP_TURBO_VOTE;
- hw_mgr->clk_info[i].curr_clk = ICP_TURBO_VOTE;
+ hw_mgr->clk_info[i].base_clk = ICP_CLK_SVS_HZ;
+ hw_mgr->clk_info[i].curr_clk = ICP_CLK_SVS_HZ;
hw_mgr->clk_info[i].threshold = ICP_OVER_CLK_THRESHOLD;
hw_mgr->clk_info[i].over_clked = 0;
hw_mgr->clk_info[i].uncompressed_bw = CAM_CPAS_DEFAULT_AXI_BW;
hw_mgr->clk_info[i].compressed_bw = CAM_CPAS_DEFAULT_AXI_BW;
}
- hw_mgr->icp_default_clk = ICP_SVS_VOTE;
+ hw_mgr->icp_default_clk = ICP_CLK_SVS_HZ;
return 0;
}
@@ -443,7 +466,7 @@ static bool cam_icp_update_clk_free(struct cam_icp_hw_mgr *hw_mgr,
static bool cam_icp_debug_clk_update(struct cam_icp_clk_info *hw_mgr_clk_info)
{
- if (icp_hw_mgr.icp_debug_clk < ICP_TURBO_VOTE &&
+ if (icp_hw_mgr.icp_debug_clk < ICP_CLK_TURBO_HZ &&
icp_hw_mgr.icp_debug_clk &&
icp_hw_mgr.icp_debug_clk != hw_mgr_clk_info->curr_clk) {
mutex_lock(&icp_hw_mgr.hw_mgr_mutex);
@@ -790,6 +813,21 @@ DEFINE_SIMPLE_ATTRIBUTE(cam_icp_debug_default_clk,
cam_icp_get_dbg_default_clk,
cam_icp_set_dbg_default_clk, "%16llu");
+static int cam_icp_set_a5_dbg_lvl(void *data, u64 val)
+{
+ icp_hw_mgr.a5_dbg_lvl = val;
+ return 0;
+}
+
+static int cam_icp_get_a5_dbg_lvl(void *data, u64 *val)
+{
+ *val = icp_hw_mgr.a5_dbg_lvl;
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(cam_icp_debug_fs, cam_icp_get_a5_dbg_lvl,
+ cam_icp_set_a5_dbg_lvl, "%08llu");
+
static int cam_icp_hw_mgr_create_debugfs_entry(void)
{
int rc = 0;
@@ -798,15 +836,6 @@ static int cam_icp_hw_mgr_create_debugfs_entry(void)
if (!icp_hw_mgr.dentry)
return -ENOMEM;
- if (!debugfs_create_bool("a5_debug",
- 0644,
- icp_hw_mgr.dentry,
- &icp_hw_mgr.a5_debug)) {
- debugfs_remove_recursive(icp_hw_mgr.dentry);
- rc = -ENOMEM;
- goto err;
- }
-
if (!debugfs_create_bool("icp_pc",
0644,
icp_hw_mgr.dentry,
@@ -825,6 +854,32 @@ static int cam_icp_hw_mgr_create_debugfs_entry(void)
goto err;
}
+ if (!debugfs_create_bool("a5_jtag_debug",
+ 0644,
+ icp_hw_mgr.dentry,
+ &icp_hw_mgr.a5_jtag_debug)) {
+ rc = -ENOMEM;
+ goto err;
+ }
+
+ if (!debugfs_create_bool("a5_debug_q",
+ 0644,
+ icp_hw_mgr.dentry,
+ &icp_hw_mgr.a5_debug_q)) {
+ CAM_ERR(CAM_ICP, "failed to create a5_debug_q\n");
+ rc = -ENOMEM;
+ goto err;
+ }
+
+ if (!debugfs_create_file("a5_debug_lvl",
+ 0644,
+ icp_hw_mgr.dentry,
+ NULL, &cam_icp_debug_fs)) {
+ CAM_ERR(CAM_ICP, "failed to create a5_dbg_lvl\n");
+ rc = -ENOMEM;
+ goto err;
+ }
+
return rc;
err:
debugfs_remove_recursive(icp_hw_mgr.dentry);
@@ -1091,13 +1146,103 @@ static int cam_icp_mgr_process_direct_ack_msg(uint32_t *msg_ptr)
return rc;
}
-static int32_t cam_icp_mgr_process_msg(void *priv, void *data)
+static void cam_icp_mgr_process_dbg_buf(void)
+{
+ uint32_t *msg_ptr = NULL, *pkt_ptr = NULL;
+ struct hfi_msg_debug *dbg_msg;
+ uint32_t read_len, size_processed = 0;
+ char *dbg_buf;
+ int rc = 0;
+
+ rc = hfi_read_message(icp_hw_mgr.dbg_buf, Q_DBG, &read_len);
+ if (rc)
+ return;
+
+ msg_ptr = (uint32_t *)icp_hw_mgr.dbg_buf;
+ while (true) {
+ pkt_ptr = msg_ptr;
+ if (pkt_ptr[ICP_PACKET_TYPE] == HFI_MSG_SYS_DEBUG) {
+ dbg_msg = (struct hfi_msg_debug *)pkt_ptr;
+ dbg_buf = (char *)&dbg_msg->msg_data;
+ trace_cam_icp_fw_dbg(dbg_buf);
+ }
+ size_processed += (pkt_ptr[ICP_PACKET_SIZE] >>
+ BYTE_WORD_SHIFT);
+ if (size_processed >= read_len)
+ return;
+ msg_ptr += (pkt_ptr[ICP_PACKET_SIZE] >>
+ BYTE_WORD_SHIFT);
+ pkt_ptr = NULL;
+ dbg_msg = NULL;
+ dbg_buf = NULL;
+ }
+}
+
+static int cam_icp_process_msg_pkt_type(
+ struct cam_icp_hw_mgr *hw_mgr,
+ uint32_t *msg_ptr,
+ uint32_t *msg_processed_len)
{
int rc = 0;
+ int size_processed = 0;
+ struct hfi_msg_ipebps_async_ack *async_ack = NULL;
+
+ switch (msg_ptr[ICP_PACKET_TYPE]) {
+ case HFI_MSG_SYS_INIT_DONE:
+ CAM_DBG(CAM_ICP, "received SYS_INIT_DONE");
+ complete(&hw_mgr->a5_complete);
+ size_processed = sizeof(struct hfi_msg_init_done);
+ break;
+
+ case HFI_MSG_SYS_PING_ACK:
+ CAM_DBG(CAM_ICP, "received SYS_PING_ACK");
+ rc = cam_icp_mgr_process_msg_ping_ack(msg_ptr);
+ size_processed = sizeof(struct hfi_msg_ping_ack);
+ break;
+
+ case HFI_MSG_IPEBPS_CREATE_HANDLE_ACK:
+ CAM_DBG(CAM_ICP, "received IPEBPS_CREATE_HANDLE_ACK");
+ rc = cam_icp_mgr_process_msg_create_handle(msg_ptr);
+ size_processed = sizeof(struct hfi_msg_create_handle_ack);
+ break;
+
+ case HFI_MSG_IPEBPS_ASYNC_COMMAND_INDIRECT_ACK:
+ CAM_DBG(CAM_ICP, "received ASYNC_INDIRECT_ACK");
+ rc = cam_icp_mgr_process_indirect_ack_msg(msg_ptr);
+ async_ack = (struct hfi_msg_ipebps_async_ack *)msg_ptr;
+ size_processed = async_ack->size;
+ async_ack = NULL;
+ break;
+
+ case HFI_MSG_IPEBPS_ASYNC_COMMAND_DIRECT_ACK:
+ CAM_DBG(CAM_ICP, "received ASYNC_DIRECT_ACK");
+ rc = cam_icp_mgr_process_direct_ack_msg(msg_ptr);
+ size_processed = sizeof(struct hfi_msg_ipebps_async_ack);
+ break;
+
+ case HFI_MSG_EVENT_NOTIFY:
+ CAM_DBG(CAM_ICP, "received EVENT_NOTIFY");
+ size_processed = sizeof(struct hfi_msg_event_notify);
+ break;
+
+ default:
+ CAM_ERR(CAM_ICP, "invalid msg : %u",
+ msg_ptr[ICP_PACKET_TYPE]);
+ rc = -EINVAL;
+ break;
+ }
+
+ *msg_processed_len = size_processed;
+ return rc;
+}
+
+static int32_t cam_icp_mgr_process_msg(void *priv, void *data)
+{
+ uint32_t read_len, msg_processed_len;
uint32_t *msg_ptr = NULL;
struct hfi_msg_work_data *task_data;
struct cam_icp_hw_mgr *hw_mgr;
- int read_len;
+ int rc = 0;
if (!data || !priv) {
CAM_ERR(CAM_ICP, "Invalid data");
@@ -1107,48 +1252,31 @@ static int32_t cam_icp_mgr_process_msg(void *priv, void *data)
task_data = data;
hw_mgr = priv;
- read_len = hfi_read_message(icp_hw_mgr.msg_buf, Q_MSG);
- if (read_len < 0) {
+ rc = hfi_read_message(icp_hw_mgr.msg_buf, Q_MSG, &read_len);
+ if (rc) {
CAM_DBG(CAM_ICP, "Unable to read msg q");
- return read_len;
+ } else {
+ read_len = read_len << BYTE_WORD_SHIFT;
+ msg_ptr = (uint32_t *)icp_hw_mgr.msg_buf;
+ while (true) {
+ rc = cam_icp_process_msg_pkt_type(hw_mgr, msg_ptr,
+ &msg_processed_len);
+ if (rc)
+ return rc;
+
+ read_len -= msg_processed_len;
+ if (read_len > 0) {
+ msg_ptr += (msg_processed_len >>
+ BYTE_WORD_SHIFT);
+ msg_processed_len = 0;
+ }
+ else
+ break;
+ }
}
- msg_ptr = (uint32_t *)icp_hw_mgr.msg_buf;
-
- switch (msg_ptr[ICP_PACKET_TYPE]) {
- case HFI_MSG_SYS_INIT_DONE:
- CAM_DBG(CAM_ICP, "received SYS_INIT_DONE");
- complete(&hw_mgr->a5_complete);
- break;
-
- case HFI_MSG_SYS_PING_ACK:
- CAM_DBG(CAM_ICP, "received SYS_PING_ACK");
- rc = cam_icp_mgr_process_msg_ping_ack(msg_ptr);
- break;
-
- case HFI_MSG_IPEBPS_CREATE_HANDLE_ACK:
- CAM_DBG(CAM_ICP, "received IPEBPS_CREATE_HANDLE_ACK");
- rc = cam_icp_mgr_process_msg_create_handle(msg_ptr);
- break;
-
- case HFI_MSG_IPEBPS_ASYNC_COMMAND_INDIRECT_ACK:
- rc = cam_icp_mgr_process_indirect_ack_msg(msg_ptr);
- break;
-
- case HFI_MSG_IPEBPS_ASYNC_COMMAND_DIRECT_ACK:
- rc = cam_icp_mgr_process_direct_ack_msg(msg_ptr);
- break;
-
- case HFI_MSG_EVENT_NOTIFY:
- CAM_DBG(CAM_ICP, "received EVENT_NOTIFY");
- break;
-
- default:
- CAM_ERR(CAM_ICP, "invalid msg : %u",
- msg_ptr[ICP_PACKET_TYPE]);
- rc = -EINVAL;
- break;
- }
+ if (icp_hw_mgr.a5_debug_q)
+ cam_icp_mgr_process_dbg_buf();
return rc;
}
@@ -1183,12 +1311,52 @@ int32_t cam_icp_hw_mgr_cb(uint32_t irq_status, void *data)
static void cam_icp_free_hfi_mem(void)
{
+ int rc;
cam_smmu_dealloc_firmware(icp_hw_mgr.iommu_hdl);
cam_mem_mgr_release_mem(&icp_hw_mgr.hfi_mem.qtbl);
cam_mem_mgr_release_mem(&icp_hw_mgr.hfi_mem.cmd_q);
cam_mem_mgr_release_mem(&icp_hw_mgr.hfi_mem.msg_q);
cam_mem_mgr_release_mem(&icp_hw_mgr.hfi_mem.dbg_q);
- cam_mem_mgr_release_mem(&icp_hw_mgr.hfi_mem.sec_heap);
+ rc = cam_mem_mgr_free_memory_region(&icp_hw_mgr.hfi_mem.sec_heap);
+ if (rc)
+ CAM_ERR(CAM_ICP, "failed to unreserve sec heap");
+}
+
+static int cam_icp_alloc_secheap_mem(struct cam_mem_mgr_memory_desc *secheap)
+{
+ int rc;
+ struct cam_mem_mgr_request_desc alloc;
+ struct cam_mem_mgr_memory_desc out;
+ struct cam_smmu_region_info secheap_info;
+
+ memset(&alloc, 0, sizeof(alloc));
+ memset(&out, 0, sizeof(out));
+
+ rc = cam_smmu_get_region_info(icp_hw_mgr.iommu_hdl,
+ CAM_SMMU_REGION_SECHEAP,
+ &secheap_info);
+ if (rc) {
+ CAM_ERR(CAM_ICP, "Unable to get secheap memory info");
+ return rc;
+ }
+
+ alloc.size = secheap_info.iova_len;
+ alloc.align = 0;
+ alloc.flags = 0;
+ alloc.smmu_hdl = icp_hw_mgr.iommu_hdl;
+ rc = cam_mem_mgr_reserve_memory_region(&alloc,
+ CAM_SMMU_REGION_SECHEAP,
+ &out);
+ if (rc) {
+ CAM_ERR(CAM_ICP, "Unable to reserve secheap memory");
+ return rc;
+ }
+
+ *secheap = out;
+ CAM_DBG(CAM_ICP, "kva: %llX, iova: %x, hdl: %x, len: %lld",
+ out.kva, out.iova, out.mem_handle, out.len);
+
+ return rc;
}
static int cam_icp_alloc_shared_mem(struct cam_mem_mgr_memory_desc *qtbl)
@@ -1280,9 +1448,9 @@ static int cam_icp_allocate_hfi_mem(void)
goto dbg_q_alloc_failed;
}
- rc = cam_icp_alloc_shared_mem(&icp_hw_mgr.hfi_mem.sec_heap);
+ rc = cam_icp_alloc_secheap_mem(&icp_hw_mgr.hfi_mem.sec_heap);
if (rc) {
- CAM_ERR(CAM_ICP, "Unable to allocate sec heap q memory");
+ CAM_ERR(CAM_ICP, "Unable to allocate sec heap memory");
goto sec_heap_alloc_failed;
}
@@ -1476,6 +1644,7 @@ static int cam_icp_mgr_release_ctx(struct cam_icp_hw_mgr *hw_mgr, int ctx_id)
for (i = 0; i < CAM_FRAME_CMD_MAX; i++)
clear_bit(i, hw_mgr->ctx_data[ctx_id].hfi_frame_process.bitmap);
kfree(hw_mgr->ctx_data[ctx_id].hfi_frame_process.bitmap);
+ hw_mgr->ctx_data[ctx_id].hfi_frame_process.bitmap = NULL;
cam_icp_hw_mgr_clk_info_update(hw_mgr, &hw_mgr->ctx_data[ctx_id]);
hw_mgr->ctx_data[ctx_id].clk_info.curr_fc = 0;
hw_mgr->ctx_data[ctx_id].clk_info.base_clk = 0;
@@ -1534,15 +1703,6 @@ static int cam_icp_mgr_hw_close(void *hw_priv, void *hw_close_args)
return -EINVAL;
}
- irq_cb.icp_hw_mgr_cb = NULL;
- irq_cb.data = NULL;
- rc = a5_dev_intf->hw_ops.process_cmd(
- a5_dev_intf->hw_priv,
- CAM_ICP_A5_SET_IRQ_CB,
- &irq_cb, sizeof(irq_cb));
- if (rc)
- CAM_ERR(CAM_ICP, "deregister irq call back failed");
-
fw_buf_info.kva = 0;
fw_buf_info.iova = 0;
fw_buf_info.len = 0;
@@ -1561,6 +1721,16 @@ static int cam_icp_mgr_hw_close(void *hw_priv, void *hw_close_args)
mutex_lock(&hw_mgr->hw_mgr_mutex);
cam_hfi_deinit();
cam_icp_mgr_device_deinit(hw_mgr);
+
+ irq_cb.icp_hw_mgr_cb = NULL;
+ irq_cb.data = NULL;
+ rc = a5_dev_intf->hw_ops.process_cmd(
+ a5_dev_intf->hw_priv,
+ CAM_ICP_A5_SET_IRQ_CB,
+ &irq_cb, sizeof(irq_cb));
+ if (rc)
+ CAM_ERR(CAM_ICP, "deregister irq call back failed");
+
cam_icp_free_hfi_mem();
hw_mgr->fw_download = false;
hw_mgr->secure_mode = CAM_SECURE_MODE_NON_SECURE;
@@ -1706,7 +1876,7 @@ static int cam_icp_mgr_hfi_init(struct cam_icp_hw_mgr *hw_mgr)
return cam_hfi_init(0, &hfi_mem,
a5_dev->soc_info.reg_map[A5_SIERRA_BASE].mem_base,
- hw_mgr->a5_debug);
+ hw_mgr->a5_jtag_debug);
}
static int cam_icp_mgr_send_fw_init(struct cam_icp_hw_mgr *hw_mgr)
@@ -1742,7 +1912,7 @@ static int cam_icp_mgr_send_fw_init(struct cam_icp_hw_mgr *hw_mgr)
return rc;
}
-static int cam_icp_mgr_download_fw(void *hw_mgr_priv, void *download_fw_args)
+static int cam_icp_mgr_hw_open(void *hw_mgr_priv, void *download_fw_args)
{
struct cam_hw_intf *a5_dev_intf = NULL;
struct cam_hw_info *a5_dev = NULL;
@@ -1804,10 +1974,15 @@ static int cam_icp_mgr_download_fw(void *hw_mgr_priv, void *download_fw_args)
NULL, 0);
hw_mgr->fw_download = true;
hw_mgr->ctxt_cnt = 0;
- mutex_unlock(&hw_mgr->hw_mgr_mutex);
CAM_DBG(CAM_ICP, "FW download done successfully");
+
+ if (icp_hw_mgr.a5_debug_q)
+ hfi_set_debug_level(icp_hw_mgr.a5_dbg_lvl);
+
+ mutex_unlock(&hw_mgr->hw_mgr_mutex);
if (!download_fw_args)
cam_icp_mgr_hw_close(hw_mgr, NULL);
+
return rc;
fw_init_failed:
@@ -1894,6 +2069,7 @@ static int cam_icp_mgr_config_hw(void *hw_mgr_priv, void *config_hw_args)
ctx_data = config_args->ctxt_to_hw_map;
mutex_lock(&ctx_data->ctx_mutex);
if (!ctx_data->in_use) {
+ mutex_unlock(&ctx_data->ctx_mutex);
CAM_ERR(CAM_ICP, "ctx is not in use");
return -EINVAL;
}
@@ -1995,7 +2171,7 @@ static int cam_icp_mgr_process_io_cfg(struct cam_icp_hw_mgr *hw_mgr,
{
int i, j, k, rc = 0;
struct cam_buf_io_cfg *io_cfg_ptr = NULL;
- int32_t sync_in_obj[CAM_MAX_OUT_RES];
+ int32_t sync_in_obj[CAM_MAX_IN_RES];
int32_t merged_sync_in_obj;
io_cfg_ptr = (struct cam_buf_io_cfg *) ((uint32_t *) &packet->payload +
@@ -2273,9 +2449,16 @@ static int cam_icp_mgr_release_hw(void *hw_mgr_priv, void *release_hw_args)
}
ctx_data = release_hw->ctxt_to_hw_map;
+ if (!ctx_data) {
+ CAM_ERR(CAM_ICP, "NULL ctx");
+ return -EINVAL;
+ }
+
ctx_id = ctx_data->ctx_id;
- if (ctx_id < 0 || ctx_id >= CAM_ICP_CTX_MAX)
+ if (ctx_id < 0 || ctx_id >= CAM_ICP_CTX_MAX) {
CAM_ERR(CAM_ICP, "Invalid ctx id: %d", ctx_id);
+ return -EINVAL;
+ }
mutex_lock(&hw_mgr->ctx_data[ctx_id].ctx_mutex);
if (!hw_mgr->ctx_data[ctx_id].in_use) {
@@ -2571,12 +2754,16 @@ static int cam_icp_mgr_acquire_hw(void *hw_mgr_priv, void *acquire_hw_args)
rc = cam_icp_clk_info_init(hw_mgr, ctx_data);
if (rc)
goto get_io_buf_failed;
- rc = cam_icp_mgr_download_fw(hw_mgr, ctx_data);
+ rc = cam_icp_mgr_hw_open(hw_mgr, ctx_data);
if (rc)
goto get_io_buf_failed;
rc = cam_icp_mgr_ipe_bps_resume(hw_mgr, ctx_data);
if (rc)
goto ipe_bps_resume_failed;
+
+ rc = cam_icp_send_ubwc_cfg(hw_mgr);
+ if (rc)
+ goto ubwc_cfg_failed;
mutex_lock(&hw_mgr->hw_mgr_mutex);
}
mutex_unlock(&hw_mgr->hw_mgr_mutex);
@@ -2634,6 +2821,7 @@ static int cam_icp_mgr_acquire_hw(void *hw_mgr_priv, void *acquire_hw_args)
cam_icp_mgr_destroy_handle(ctx_data);
create_handle_failed:
send_ping_failed:
+ubwc_cfg_failed:
cam_icp_mgr_ipe_bps_power_collapse(hw_mgr, ctx_data, 0);
ipe_bps_resume_failed:
if (!hw_mgr->ctxt_cnt)
@@ -2709,6 +2897,9 @@ static int cam_icp_mgr_alloc_devs(struct device_node *of_node)
goto num_ipe_failed;
}
+ if (!icp_hw_mgr.ipe1_enable)
+ num_dev = 1;
+
icp_hw_mgr.devices[CAM_ICP_DEV_IPE] = kzalloc(
sizeof(struct cam_hw_intf *) * num_dev, GFP_KERNEL);
if (!icp_hw_mgr.devices[CAM_ICP_DEV_IPE]) {
@@ -2787,9 +2978,10 @@ static int cam_icp_mgr_init_devs(struct device_node *of_node)
if (!child_dev_intf) {
CAM_ERR(CAM_ICP, "no child device");
of_node_put(child_node);
+ if (!icp_hw_mgr.ipe1_enable)
+ continue;
goto compat_hw_name_failed;
}
-
icp_hw_mgr.devices[child_dev_intf->hw_type]
[child_dev_intf->hw_idx] = child_dev_intf;
@@ -2865,6 +3057,8 @@ int cam_icp_hw_mgr_init(struct device_node *of_node, uint64_t *hw_mgr_hdl)
{
int i, rc = 0;
struct cam_hw_mgr_intf *hw_mgr_intf;
+ struct cam_cpas_query_cap query;
+ uint32_t cam_caps;
hw_mgr_intf = (struct cam_hw_mgr_intf *)hw_mgr_hdl;
if (!of_node || !hw_mgr_intf) {
@@ -2879,7 +3073,7 @@ int cam_icp_hw_mgr_init(struct device_node *of_node, uint64_t *hw_mgr_hdl)
hw_mgr_intf->hw_release = cam_icp_mgr_release_hw;
hw_mgr_intf->hw_prepare_update = cam_icp_mgr_prepare_hw_update;
hw_mgr_intf->hw_config = cam_icp_mgr_config_hw;
- hw_mgr_intf->download_fw = cam_icp_mgr_download_fw;
+ hw_mgr_intf->hw_open = cam_icp_mgr_hw_open;
hw_mgr_intf->hw_close = cam_icp_mgr_hw_close;
icp_hw_mgr.secure_mode = CAM_SECURE_MODE_NON_SECURE;
@@ -2889,6 +3083,15 @@ int cam_icp_hw_mgr_init(struct device_node *of_node, uint64_t *hw_mgr_hdl)
for (i = 0; i < CAM_ICP_CTX_MAX; i++)
mutex_init(&icp_hw_mgr.ctx_data[i].ctx_mutex);
+ cam_cpas_get_hw_info(&query.camera_family,
+ &query.camera_version, &query.cpas_version, &cam_caps);
+ if (cam_caps & CPAS_IPE0_BIT)
+ icp_hw_mgr.ipe0_enable = true;
+ if (cam_caps & CPAS_IPE1_BIT)
+ icp_hw_mgr.ipe1_enable = true;
+ if (cam_caps & CPAS_BPS_BIT)
+ icp_hw_mgr.bps_enable = true;
+
rc = cam_icp_mgr_init_devs(of_node);
if (rc)
goto dev_init_failed;
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.h b/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.h
index c4a483f..321f10c 100644
--- a/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.h
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.h
@@ -32,6 +32,7 @@
#define CAM_FRAME_CMD_MAX 20
#define CAM_MAX_OUT_RES 6
+#define CAM_MAX_IN_RES 8
#define ICP_WORKQ_NUM_TASK 100
#define ICP_WORKQ_TASK_CMD_TYPE 1
@@ -44,6 +45,8 @@
#define ICP_FRAME_PROCESS_SUCCESS 0
#define ICP_FRAME_PROCESS_FAILURE 1
+#define ICP_MSG_BUF_SIZE 256
+#define ICP_DBG_BUF_SIZE 102400
#define ICP_CLK_HW_IPE 0x0
#define ICP_CLK_HW_BPS 0x1
@@ -51,6 +54,10 @@
#define ICP_OVER_CLK_THRESHOLD 15
+#define CPAS_IPE0_BIT 0x1000
+#define CPAS_IPE1_BIT 0x2000
+#define CPAS_BPS_BIT 0x400
+
/**
* struct icp_hfi_mem_info
* @qtbl: Memory info of queue table
@@ -230,6 +237,12 @@ struct cam_icp_clk_info {
* @icp_default_clk: Set this clok if user doesn't supply
* @clk_info: Clock info of hardware
* @secure_mode: Flag to enable/disable secure camera
+ * @a5_jtag_debug: entry to enable A5 JTAG debugging
+ * @a5_debug_q : entry to enable FW debug message
+ * @a5_dbg_lvl : debug level set to FW.
+ * @ipe0_enable: Flag for IPE0
+ * @ipe1_enable: Flag for IPE1
+ * @bps_enable: Flag for BPS
*/
struct cam_icp_hw_mgr {
struct mutex hw_mgr_mutex;
@@ -245,8 +258,8 @@ struct cam_icp_hw_mgr {
struct icp_hfi_mem_info hfi_mem;
struct cam_req_mgr_core_workq *cmd_work;
struct cam_req_mgr_core_workq *msg_work;
- uint32_t msg_buf[256];
- uint32_t dbg_buf[256];
+ uint32_t msg_buf[ICP_MSG_BUF_SIZE];
+ uint32_t dbg_buf[ICP_DBG_BUF_SIZE];
struct completion a5_complete;
struct hfi_cmd_work_data *cmd_work_data;
struct hfi_msg_work_data *msg_work_data;
@@ -260,8 +273,15 @@ struct cam_icp_hw_mgr {
uint64_t icp_default_clk;
struct cam_icp_clk_info clk_info[ICP_CLK_HW_MAX];
bool secure_mode;
+ bool a5_jtag_debug;
+ bool a5_debug_q;
+ u64 a5_dbg_lvl;
+ bool ipe0_enable;
+ bool ipe1_enable;
+ bool bps_enable;
};
static int cam_icp_mgr_hw_close(void *hw_priv, void *hw_close_args);
-static int cam_icp_mgr_download_fw(void *hw_mgr_priv, void *download_fw_args);
+static int cam_icp_mgr_hw_open(void *hw_mgr_priv, void *download_fw_args);
+
#endif /* CAM_ICP_HW_MGR_H */
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/include/cam_a5_hw_intf.h b/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/include/cam_a5_hw_intf.h
index 2686877..dad7736 100644
--- a/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/include/cam_a5_hw_intf.h
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/include/cam_a5_hw_intf.h
@@ -31,6 +31,7 @@ enum cam_icp_a5_cmd_type {
CAM_ICP_A5_CMD_VOTE_CPAS,
CAM_ICP_A5_CMD_CPAS_START,
CAM_ICP_A5_CMD_CPAS_STOP,
+ CAM_ICP_A5_CMD_UBWC_CFG,
CAM_ICP_A5_CMD_MAX,
};
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/include/cam_icp_hw_mgr_intf.h b/drivers/media/platform/msm/camera/cam_icp/icp_hw/include/cam_icp_hw_mgr_intf.h
index 4f6fce8..d2e04ef 100644
--- a/drivers/media/platform/msm/camera/cam_icp/icp_hw/include/cam_icp_hw_mgr_intf.h
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/include/cam_icp_hw_mgr_intf.h
@@ -18,10 +18,15 @@
#include <linux/of.h>
#include "cam_cpas_api.h"
-#define ICP_TURBO_VOTE 600000000
-#define ICP_SVS_VOTE 400000000
+#define ICP_CLK_TURBO_HZ 600000000
+#define ICP_CLK_SVS_HZ 400000000
+
+#define CAM_ICP_A5_BW_BYTES_VOTE 100000000
+
#define CAM_ICP_CTX_MAX 36
+#define CPAS_IPE1_BIT 0x2000
+
int cam_icp_hw_mgr_init(struct device_node *of_node,
uint64_t *hw_mgr_hdl);
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/ipe_dev.c b/drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/ipe_dev.c
index cbd9d84..cc2b1b1 100644
--- a/drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/ipe_dev.c
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/ipe_dev.c
@@ -72,14 +72,25 @@ int cam_ipe_probe(struct platform_device *pdev)
struct cam_ipe_device_core_info *core_info = NULL;
struct cam_ipe_device_hw_info *hw_info = NULL;
int rc = 0;
+ struct cam_cpas_query_cap query;
+ uint32_t cam_caps;
+ uint32_t hw_idx;
+
+ of_property_read_u32(pdev->dev.of_node,
+ "cell-index", &hw_idx);
+
+ cam_cpas_get_hw_info(&query.camera_family,
+ &query.camera_version, &query.cpas_version, &cam_caps);
+ if ((!(cam_caps & CPAS_IPE1_BIT)) && (hw_idx)) {
+ CAM_ERR(CAM_ICP, "IPE1 hw idx = %d\n", hw_idx);
+ return -EINVAL;
+ }
ipe_dev_intf = kzalloc(sizeof(struct cam_hw_intf), GFP_KERNEL);
if (!ipe_dev_intf)
return -ENOMEM;
- of_property_read_u32(pdev->dev.of_node,
- "cell-index", &ipe_dev_intf->hw_idx);
-
+ ipe_dev_intf->hw_idx = hw_idx;
ipe_dev = kzalloc(sizeof(struct cam_hw_info), GFP_KERNEL);
if (!ipe_dev) {
kfree(ipe_dev_intf);
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/ipe_soc.c b/drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/ipe_soc.c
index 49176b5..71af1a2 100644
--- a/drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/ipe_soc.c
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/ipe_soc.c
@@ -116,7 +116,7 @@ int cam_ipe_enable_soc_resources(struct cam_hw_soc_info *soc_info)
int rc = 0;
rc = cam_soc_util_enable_platform_resource(soc_info, true,
- CAM_TURBO_VOTE, false);
+ CAM_SVS_VOTE, false);
if (rc) {
CAM_ERR(CAM_ICP, "enable platform failed");
return rc;
diff --git a/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.c b/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.c
index 16c02d8..cfe5071 100644
--- a/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.c
+++ b/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.c
@@ -24,6 +24,8 @@
#include "cam_trace.h"
#include "cam_debug_util.h"
+static const char isp_dev_name[] = "isp";
+
static int __cam_isp_ctx_enqueue_request_in_order(
struct cam_context *ctx, struct cam_ctx_request *req)
{
@@ -302,7 +304,7 @@ static void __cam_isp_ctx_send_sof_timestamp(
ctx_isp->sof_timestamp_val);
CAM_DBG(CAM_ISP, " sof status:%d", sof_event_status);
- if (cam_req_mgr_notify_frame_message(&req_msg,
+ if (cam_req_mgr_notify_message(&req_msg,
V4L_EVENT_CAM_REQ_MGR_SOF, V4L_EVENT_CAM_REQ_MGR_EVENT))
CAM_ERR(CAM_ISP,
"Error in notifying the sof time for req id:%lld",
@@ -425,6 +427,13 @@ static int __cam_isp_ctx_notify_eof_in_actived_state(
return rc;
}
+static int __cam_isp_ctx_reg_upd_in_hw_error(
+ struct cam_isp_context *ctx_isp, void *evt_data)
+{
+ ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_SOF;
+ return 0;
+}
+
static int __cam_isp_ctx_sof_in_activated_state(
struct cam_isp_context *ctx_isp, void *evt_data)
{
@@ -687,8 +696,13 @@ static int __cam_isp_ctx_handle_error(struct cam_isp_context *ctx_isp,
void *evt_data)
{
int rc = 0;
- struct cam_ctx_request *req;
+ uint32_t i = 0;
+ bool found = 0;
+ struct cam_ctx_request *req = NULL;
+ struct cam_ctx_request *req_temp;
+ struct cam_isp_ctx_req *req_isp = NULL;
struct cam_req_mgr_error_notify notify;
+ uint64_t error_request_id;
struct cam_context *ctx = ctx_isp->base;
struct cam_isp_hw_error_event_data *error_event_data =
@@ -699,7 +713,7 @@ static int __cam_isp_ctx_handle_error(struct cam_isp_context *ctx_isp,
CAM_DBG(CAM_ISP, "Enter error_type = %d", error_type);
if ((error_type == CAM_ISP_HW_ERROR_OVERFLOW) ||
(error_type == CAM_ISP_HW_ERROR_BUSIF_OVERFLOW))
- notify.error = CRM_KMD_ERR_FATAL;
+ notify.error = CRM_KMD_ERR_OVERFLOW;
/*
* Need to check the active req
@@ -710,31 +724,92 @@ static int __cam_isp_ctx_handle_error(struct cam_isp_context *ctx_isp,
if (list_empty(&ctx->active_req_list)) {
CAM_ERR_RATE_LIMIT(CAM_ISP,
"handling error with no active request");
- rc = -EINVAL;
- goto end;
+ } else {
+ list_for_each_entry_safe(req, req_temp,
+ &ctx->active_req_list, list) {
+ req_isp = (struct cam_isp_ctx_req *) req->req_priv;
+ if (!req_isp->bubble_report) {
+ for (i = 0; i < req_isp->num_fence_map_out;
+ i++) {
+ CAM_ERR(CAM_ISP, "req %llu, Sync fd %x",
+ req->request_id,
+ req_isp->fence_map_out[i].
+ sync_id);
+ if (req_isp->fence_map_out[i].sync_id
+ != -1) {
+ rc = cam_sync_signal(
+ req_isp->fence_map_out[i].
+ sync_id,
+ CAM_SYNC_STATE_SIGNALED_ERROR);
+ req_isp->fence_map_out[i].
+ sync_id = -1;
+ }
+ }
+ list_del_init(&req->list);
+ list_add_tail(&req->list, &ctx->free_req_list);
+ ctx_isp->active_req_cnt--;
+ } else {
+ found = 1;
+ break;
+ }
+ }
}
- req = list_first_entry(&ctx->active_req_list,
- struct cam_ctx_request, list);
+ if (found) {
+ list_for_each_entry_safe_reverse(req, req_temp,
+ &ctx->active_req_list, list) {
+ req_isp = (struct cam_isp_ctx_req *) req->req_priv;
+ list_del_init(&req->list);
+ list_add(&req->list, &ctx->pending_req_list);
+ ctx_isp->active_req_cnt--;
+ }
+ }
+
+ do {
+ if (list_empty(&ctx->pending_req_list)) {
+ error_request_id = ctx_isp->last_applied_req_id + 1;
+ req_isp = NULL;
+ break;
+ }
+ req = list_first_entry(&ctx->pending_req_list,
+ struct cam_ctx_request, list);
+ req_isp = (struct cam_isp_ctx_req *) req->req_priv;
+ error_request_id = ctx_isp->last_applied_req_id;
+
+ if (req_isp->bubble_report)
+ break;
+
+ for (i = 0; i < req_isp->num_fence_map_out; i++) {
+ if (req_isp->fence_map_out[i].sync_id != -1)
+ rc = cam_sync_signal(
+ req_isp->fence_map_out[i].sync_id,
+ CAM_SYNC_STATE_SIGNALED_ERROR);
+ req_isp->fence_map_out[i].sync_id = -1;
+ }
+ list_del_init(&req->list);
+ list_add_tail(&req->list, &ctx->free_req_list);
+
+ } while (req->request_id < ctx_isp->last_applied_req_id);
+
if (ctx->ctx_crm_intf && ctx->ctx_crm_intf->notify_err) {
notify.link_hdl = ctx->link_hdl;
notify.dev_hdl = ctx->dev_hdl;
- notify.req_id = req->request_id;
+ notify.req_id = error_request_id;
+
+ if (req_isp && req_isp->bubble_report)
+ notify.error = CRM_KMD_ERR_BUBBLE;
+
+ CAM_WARN(CAM_ISP, "Notify CRM: req %lld, frame %lld\n",
+ error_request_id, ctx_isp->frame_id);
ctx->ctx_crm_intf->notify_err(¬ify);
- CAM_ERR_RATE_LIMIT(CAM_ISP, "Notify CRM about ERROR frame %lld",
- ctx_isp->frame_id);
+ ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_HW_ERROR;
} else {
CAM_ERR_RATE_LIMIT(CAM_ISP, "Can not notify ERRROR to CRM");
rc = -EFAULT;
}
- list_del_init(&req->list);
- list_add(&req->list, &ctx->pending_req_list);
- /* might need to check if active list is empty */
-
-end:
CAM_DBG(CAM_ISP, "Exit");
return rc;
}
@@ -744,7 +819,7 @@ static struct cam_isp_ctx_irq_ops
/* SOF */
{
.irq_ops = {
- NULL,
+ __cam_isp_ctx_handle_error,
__cam_isp_ctx_sof_in_activated_state,
__cam_isp_ctx_reg_upd_in_sof,
__cam_isp_ctx_notify_sof_in_actived_state,
@@ -777,7 +852,7 @@ static struct cam_isp_ctx_irq_ops
/* BUBBLE */
{
.irq_ops = {
- NULL,
+ __cam_isp_ctx_handle_error,
__cam_isp_ctx_sof_in_activated_state,
NULL,
__cam_isp_ctx_notify_sof_in_actived_state,
@@ -788,7 +863,7 @@ static struct cam_isp_ctx_irq_ops
/* Bubble Applied */
{
.irq_ops = {
- NULL,
+ __cam_isp_ctx_handle_error,
__cam_isp_ctx_sof_in_activated_state,
__cam_isp_ctx_reg_upd_in_activated_state,
__cam_isp_ctx_epoch_in_bubble_applied,
@@ -796,6 +871,17 @@ static struct cam_isp_ctx_irq_ops
__cam_isp_ctx_buf_done_in_bubble_applied,
},
},
+ /* HW ERROR */
+ {
+ .irq_ops = {
+ NULL,
+ __cam_isp_ctx_sof_in_activated_state,
+ __cam_isp_ctx_reg_upd_in_hw_error,
+ NULL,
+ NULL,
+ NULL,
+ },
+ },
/* HALT */
{
},
@@ -876,7 +962,9 @@ static int __cam_isp_ctx_apply_req_in_activated_state(
} else {
spin_lock_bh(&ctx->lock);
ctx_isp->substate_activated = next_state;
- CAM_DBG(CAM_ISP, "new state %d", next_state);
+ ctx_isp->last_applied_req_id = apply->request_id;
+ CAM_DBG(CAM_ISP, "new substate state %d, applied req %lld",
+ next_state, ctx_isp->last_applied_req_id);
spin_unlock_bh(&ctx->lock);
}
end:
@@ -1611,7 +1699,7 @@ static int __cam_isp_ctx_config_dev_in_top_state(
req->request_id = packet->header.request_id;
req->status = 1;
- if (ctx->state == CAM_CTX_ACTIVATED && ctx->ctx_crm_intf->add_req) {
+ if (ctx->state >= CAM_CTX_READY && ctx->ctx_crm_intf->add_req) {
add_req.link_hdl = ctx->link_hdl;
add_req.dev_hdl = ctx->dev_hdl;
add_req.req_id = req->request_id;
@@ -1781,7 +1869,7 @@ static int __cam_isp_ctx_config_dev_in_acquired(struct cam_context *ctx,
rc = __cam_isp_ctx_config_dev_in_top_state(ctx, cmd);
- if (!rc && ctx->link_hdl) {
+ if (!rc && (ctx->link_hdl >= 0)) {
ctx->state = CAM_CTX_READY;
trace_cam_context_state("ISP", ctx);
}
@@ -1819,7 +1907,7 @@ static int __cam_isp_ctx_unlink_in_acquired(struct cam_context *ctx,
{
int rc = 0;
- ctx->link_hdl = 0;
+ ctx->link_hdl = -1;
ctx->ctx_crm_intf = NULL;
return rc;
@@ -1905,7 +1993,7 @@ static int __cam_isp_ctx_unlink_in_ready(struct cam_context *ctx,
{
int rc = 0;
- ctx->link_hdl = 0;
+ ctx->link_hdl = -1;
ctx->ctx_crm_intf = NULL;
ctx->state = CAM_CTX_ACQUIRED;
trace_cam_context_state("ISP", ctx);
@@ -2003,6 +2091,24 @@ static int __cam_isp_ctx_release_dev_in_activated(struct cam_context *ctx,
return rc;
}
+static int __cam_isp_ctx_unlink_in_activated(struct cam_context *ctx,
+ struct cam_req_mgr_core_dev_link_setup *unlink)
+{
+ int rc = 0;
+
+ CAM_WARN(CAM_ISP,
+ "Received unlink in activated state. It's unexpected");
+ rc = __cam_isp_ctx_stop_dev_in_activated_unlock(ctx);
+ if (rc)
+ CAM_WARN(CAM_ISP, "Stop device failed rc=%d", rc);
+
+ rc = __cam_isp_ctx_unlink_in_ready(ctx, unlink);
+ if (rc)
+ CAM_ERR(CAM_ISP, "Unlink failed rc=%d", rc);
+
+ return rc;
+}
+
static int __cam_isp_ctx_apply_req(struct cam_context *ctx,
struct cam_req_mgr_apply_request *apply)
{
@@ -2114,6 +2220,7 @@ static struct cam_ctx_ops
.config_dev = __cam_isp_ctx_config_dev_in_top_state,
},
.crm_ops = {
+ .unlink = __cam_isp_ctx_unlink_in_activated,
.apply_req = __cam_isp_ctx_apply_req,
.flush_req = __cam_isp_ctx_flush_req_in_top_state,
},
@@ -2154,8 +2261,8 @@ int cam_isp_context_init(struct cam_isp_context *ctx,
}
/* camera context setup */
- rc = cam_context_init(ctx_base, crm_node_intf, hw_intf, ctx->req_base,
- CAM_CTX_REQ_MAX);
+ rc = cam_context_init(ctx_base, isp_dev_name, crm_node_intf, hw_intf,
+ ctx->req_base, CAM_CTX_REQ_MAX);
if (rc) {
CAM_ERR(CAM_ISP, "Camera Context Base init failed");
goto err;
@@ -2182,4 +2289,3 @@ int cam_isp_context_deinit(struct cam_isp_context *ctx)
memset(ctx, 0, sizeof(*ctx));
return rc;
}
-
diff --git a/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.h b/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.h
index 621d652..347290c 100644
--- a/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.h
+++ b/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.h
@@ -31,7 +31,7 @@
* Maxiimum configuration entry size - This is based on the
* worst case DUAL IFE use case plus some margin.
*/
-#define CAM_ISP_CTX_CFG_MAX 20
+#define CAM_ISP_CTX_CFG_MAX 22
/* forward declaration */
struct cam_isp_context;
@@ -50,6 +50,7 @@ enum cam_isp_ctx_activated_substate {
CAM_ISP_CTX_ACTIVATED_EPOCH,
CAM_ISP_CTX_ACTIVATED_BUBBLE,
CAM_ISP_CTX_ACTIVATED_BUBBLE_APPLIED,
+ CAM_ISP_CTX_ACTIVATED_HW_ERROR,
CAM_ISP_CTX_ACTIVATED_HALT,
CAM_ISP_CTX_ACTIVATED_MAX,
};
@@ -111,6 +112,7 @@ struct cam_isp_ctx_req {
* @reported_req_id: Last reported request id
* @subscribe_event: The irq event mask that CRM subscribes to, IFE will
* invoke CRM cb at those event.
+ * @last_applied_req_id: Last applied request id
*
*/
struct cam_isp_context {
@@ -129,6 +131,7 @@ struct cam_isp_context {
int32_t active_req_cnt;
int64_t reported_req_id;
uint32_t subscribe_event;
+ int64_t last_applied_req_id;
};
/**
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/Makefile b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/Makefile
index 7e3c353..1f7dc76 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/Makefile
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/Makefile
@@ -8,6 +8,7 @@
ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/irq_controller
ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include
ccflags-y += -Idrivers/media/platform/msm/camera/cam_smmu/
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas/include
obj-$(CONFIG_SPECTRA_CAMERA) += hw_utils/ isp_hw/
obj-$(CONFIG_SPECTRA_CAMERA) += cam_isp_hw_mgr.o cam_ife_hw_mgr.o
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c
index f7b40a4..0362758 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c
@@ -14,6 +14,7 @@
#include <linux/string.h>
#include <linux/uaccess.h>
#include <linux/debugfs.h>
+#include <soc/qcom/scm.h>
#include <uapi/media/cam_isp.h>
#include "cam_smmu_api.h"
#include "cam_req_mgr_workq.h"
@@ -26,11 +27,62 @@
#include "cam_cdm_intf_api.h"
#include "cam_packet_util.h"
#include "cam_debug_util.h"
+#include "cam_cpas_api.h"
#define CAM_IFE_HW_ENTRIES_MAX 20
+#define TZ_SVC_SMMU_PROGRAM 0x15
+#define TZ_SAFE_SYSCALL_ID 0x3
+#define CAM_IFE_SAFE_DISABLE 0
+#define CAM_IFE_SAFE_ENABLE 1
+#define SMMU_SE_IFE 0
+
+#define CAM_ISP_PACKET_META_MAX \
+ (CAM_ISP_PACKET_META_GENERIC_BLOB_COMMON + 1)
+
+#define CAM_ISP_GENERIC_BLOB_TYPE_MAX \
+ (CAM_ISP_GENERIC_BLOB_TYPE_BW_CONFIG + 1)
+
+static uint32_t blob_type_hw_cmd_map[CAM_ISP_GENERIC_BLOB_TYPE_MAX] = {
+ CAM_ISP_HW_CMD_GET_HFR_UPDATE,
+ CAM_ISP_HW_CMD_CLOCK_UPDATE,
+ CAM_ISP_HW_CMD_BW_UPDATE,
+};
+
static struct cam_ife_hw_mgr g_ife_hw_mgr;
+static int cam_ife_notify_safe_lut_scm(bool safe_trigger)
+{
+ uint32_t camera_hw_version, rc = 0;
+ struct scm_desc desc = {0};
+
+ rc = cam_cpas_get_cpas_hw_version(&camera_hw_version);
+ if (!rc) {
+ switch (camera_hw_version) {
+ case CAM_CPAS_TITAN_170_V100:
+ case CAM_CPAS_TITAN_170_V110:
+ case CAM_CPAS_TITAN_175_V100:
+
+ desc.arginfo = SCM_ARGS(2, SCM_VAL, SCM_VAL);
+ desc.args[0] = SMMU_SE_IFE;
+ desc.args[1] = safe_trigger;
+
+ CAM_DBG(CAM_ISP, "Safe scm call %d", safe_trigger);
+ if (scm_call2(SCM_SIP_FNID(TZ_SVC_SMMU_PROGRAM,
+ TZ_SAFE_SYSCALL_ID), &desc)) {
+ CAM_ERR(CAM_ISP,
+ "scm call to Enable Safe failed");
+ rc = -EINVAL;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+
+ return rc;
+}
+
static int cam_ife_mgr_get_hw_caps(void *hw_mgr_priv,
void *hw_caps_args)
{
@@ -88,6 +140,39 @@ static int cam_ife_hw_mgr_is_rdi_res(uint32_t res_id)
return rc;
}
+static int cam_ife_hw_mgr_reset_csid_res(
+ struct cam_ife_hw_mgr_res *isp_hw_res)
+{
+ int i;
+ int rc = 0;
+ struct cam_hw_intf *hw_intf;
+ struct cam_csid_reset_cfg_args csid_reset_args;
+
+ csid_reset_args.reset_type = CAM_IFE_CSID_RESET_PATH;
+
+ for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) {
+ if (!isp_hw_res->hw_res[i])
+ continue;
+ csid_reset_args.node_res = isp_hw_res->hw_res[i];
+ hw_intf = isp_hw_res->hw_res[i]->hw_intf;
+ CAM_DBG(CAM_ISP, "Resetting csid hardware %d",
+ hw_intf->hw_idx);
+ if (hw_intf->hw_ops.reset) {
+ rc = hw_intf->hw_ops.reset(hw_intf->hw_priv,
+ &csid_reset_args,
+ sizeof(struct cam_csid_reset_cfg_args));
+ if (rc <= 0)
+ goto err;
+ }
+ }
+
+ return 0;
+err:
+ CAM_ERR(CAM_ISP, "RESET HW res failed: (type:%d, id:%d)",
+ isp_hw_res->res_type, isp_hw_res->res_id);
+ return rc;
+}
+
static int cam_ife_hw_mgr_init_hw_res(
struct cam_ife_hw_mgr_res *isp_hw_res)
{
@@ -118,7 +203,8 @@ static int cam_ife_hw_mgr_init_hw_res(
}
static int cam_ife_hw_mgr_start_hw_res(
- struct cam_ife_hw_mgr_res *isp_hw_res)
+ struct cam_ife_hw_mgr_res *isp_hw_res,
+ struct cam_ife_hw_mgr_ctx *ctx)
{
int i;
int rc = -1;
@@ -129,6 +215,8 @@ static int cam_ife_hw_mgr_start_hw_res(
continue;
hw_intf = isp_hw_res->hw_res[i]->hw_intf;
if (hw_intf->hw_ops.start) {
+ isp_hw_res->hw_res[i]->rdi_only_ctx =
+ ctx->is_rdi_only_context;
rc = hw_intf->hw_ops.start(hw_intf->hw_priv,
isp_hw_res->hw_res[i],
sizeof(struct cam_isp_resource_node));
@@ -783,7 +871,7 @@ static int cam_ife_hw_mgr_acquire_res_ife_csid_ipp(
struct cam_ife_hw_mgr *ife_hw_mgr;
struct cam_ife_hw_mgr_res *csid_res;
struct cam_ife_hw_mgr_res *cid_res;
- struct cam_hw_intf *hw_intf;
+ struct cam_hw_intf *hw_intf;
struct cam_csid_hw_reserve_resource_args csid_acquire;
ife_hw_mgr = ife_ctx->hw_mgr;
@@ -1271,16 +1359,17 @@ static int cam_ife_mgr_acquire_hw(void *hw_mgr_priv,
cdm_acquire.id = CAM_CDM_VIRTUAL;
cdm_acquire.cam_cdm_callback = cam_ife_cam_cdm_callback;
- if (!cam_cdm_acquire(&cdm_acquire)) {
- CAM_DBG(CAM_ISP, "Successfully acquired the CDM HW hdl=%x",
- cdm_acquire.handle);
- ife_ctx->cdm_handle = cdm_acquire.handle;
- ife_ctx->cdm_ops = cdm_acquire.ops;
- } else {
+ rc = cam_cdm_acquire(&cdm_acquire);
+ if (rc) {
CAM_ERR(CAM_ISP, "Failed to acquire the CDM HW");
- goto err;
+ goto free_ctx;
}
+ CAM_DBG(CAM_ISP, "Successfully acquired the CDM HW hdl=%x",
+ cdm_acquire.handle);
+ ife_ctx->cdm_handle = cdm_acquire.handle;
+ ife_ctx->cdm_ops = cdm_acquire.ops;
+
isp_resource = (struct cam_isp_resource *)acquire_args->acquire_info;
/* acquire HW resources */
@@ -1325,7 +1414,7 @@ static int cam_ife_mgr_acquire_hw(void *hw_mgr_priv,
rc = cam_ife_mgr_process_base_info(ife_ctx);
if (rc) {
CAM_ERR(CAM_ISP, "Process base info failed");
- return -EINVAL;
+ goto free_res;
}
acquire_args->ctxt_to_hw_map = ife_ctx;
@@ -1338,6 +1427,8 @@ static int cam_ife_mgr_acquire_hw(void *hw_mgr_priv,
return 0;
free_res:
cam_ife_hw_mgr_release_hw_for_ctx(ife_ctx);
+ cam_cdm_release(ife_ctx->cdm_handle);
+free_ctx:
cam_ife_hw_mgr_put_ctx(&ife_hw_mgr->free_ctx_list, &ife_ctx);
err:
CAM_DBG(CAM_ISP, "Exit...(rc=%d)", rc);
@@ -1371,6 +1462,8 @@ static int cam_ife_mgr_config_hw(void *hw_mgr_priv,
CAM_ERR(CAM_ISP, "Invalid context parameters");
return -EPERM;
}
+ if (atomic_read(&ctx->overflow_pending))
+ return -EINVAL;
CAM_DBG(CAM_ISP, "Enter ctx id:%d num_hw_upd_entries %d",
ctx->ctx_index, cfg->num_hw_update_entries);
@@ -1402,8 +1495,7 @@ static int cam_ife_mgr_config_hw(void *hw_mgr_priv,
return rc;
}
-static int cam_ife_mgr_stop_hw_in_overflow(void *hw_mgr_priv,
- void *stop_hw_args)
+static int cam_ife_mgr_stop_hw_in_overflow(void *stop_hw_args)
{
int rc = 0;
struct cam_hw_stop_args *stop_args = stop_hw_args;
@@ -1411,7 +1503,7 @@ static int cam_ife_mgr_stop_hw_in_overflow(void *hw_mgr_priv,
struct cam_ife_hw_mgr_ctx *ctx;
uint32_t i, master_base_idx = 0;
- if (!hw_mgr_priv || !stop_hw_args) {
+ if (!stop_hw_args) {
CAM_ERR(CAM_ISP, "Invalid arguments");
return -EINVAL;
}
@@ -1424,7 +1516,6 @@ static int cam_ife_mgr_stop_hw_in_overflow(void *hw_mgr_priv,
CAM_DBG(CAM_ISP, "Enter...ctx id:%d",
ctx->ctx_index);
- /* stop resource will remove the irq mask from the hardware */
if (!ctx->num_base) {
CAM_ERR(CAM_ISP, "Number of bases are zero");
return -EINVAL;
@@ -1438,17 +1529,13 @@ static int cam_ife_mgr_stop_hw_in_overflow(void *hw_mgr_priv,
}
}
- /*
- * if Context does not have PIX resources and has only RDI resource
- * then take the first base index.
- */
-
if (i == ctx->num_base)
master_base_idx = ctx->base[0].idx;
+
/* stop the master CIDs first */
cam_ife_mgr_csid_stop_hw(ctx, &ctx->res_list_ife_cid,
- master_base_idx, CAM_CSID_HALT_IMMEDIATELY);
+ master_base_idx, CAM_CSID_HALT_IMMEDIATELY);
/* stop rest of the CIDs */
for (i = 0; i < ctx->num_base; i++) {
@@ -1460,7 +1547,7 @@ static int cam_ife_mgr_stop_hw_in_overflow(void *hw_mgr_priv,
/* stop the master CSID path first */
cam_ife_mgr_csid_stop_hw(ctx, &ctx->res_list_ife_csid,
- master_base_idx, CAM_CSID_HALT_IMMEDIATELY);
+ master_base_idx, CAM_CSID_HALT_IMMEDIATELY);
/* Stop rest of the CSID paths */
for (i = 0; i < ctx->num_base; i++) {
@@ -1480,8 +1567,9 @@ static int cam_ife_mgr_stop_hw_in_overflow(void *hw_mgr_priv,
for (i = 0; i < CAM_IFE_HW_OUT_RES_MAX; i++)
cam_ife_hw_mgr_stop_hw_res(&ctx->res_list_ife_out[i]);
- /* update vote bandwidth should be done at the HW layer */
+ /* Stop tasklet for context */
+ cam_tasklet_stop(ctx->common.tasklet_info);
CAM_DBG(CAM_ISP, "Exit...ctx id:%d rc :%d",
ctx->ctx_index, rc);
@@ -1597,43 +1685,41 @@ static int cam_ife_mgr_stop_hw(void *hw_mgr_priv, void *stop_hw_args)
CAM_DBG(CAM_ISP, "Exit...ctx id:%d rc :%d", ctx->ctx_index, rc);
+ mutex_lock(&g_ife_hw_mgr.ctx_mutex);
+ if (!atomic_dec_return(&g_ife_hw_mgr.active_ctx_cnt)) {
+ rc = cam_ife_notify_safe_lut_scm(CAM_IFE_SAFE_DISABLE);
+ if (rc) {
+ CAM_ERR(CAM_ISP,
+ "SAFE SCM call failed:Check TZ/HYP dependency");
+ rc = 0;
+ }
+ }
+ mutex_unlock(&g_ife_hw_mgr.ctx_mutex);
+
return rc;
}
-static int cam_ife_mgr_reset_hw(struct cam_ife_hw_mgr *hw_mgr,
+static int cam_ife_mgr_reset_vfe_hw(struct cam_ife_hw_mgr *hw_mgr,
uint32_t hw_idx)
{
uint32_t i = 0;
- struct cam_hw_intf *csid_hw_intf;
struct cam_hw_intf *vfe_hw_intf;
- struct cam_csid_reset_cfg_args csid_reset_args;
+ uint32_t vfe_reset_type;
if (!hw_mgr) {
CAM_DBG(CAM_ISP, "Invalid arguments");
return -EINVAL;
}
-
- /* Reset IFE CSID HW */
- csid_reset_args.reset_type = CAM_IFE_CSID_RESET_GLOBAL;
-
- for (i = 0; i < CAM_IFE_CSID_HW_NUM_MAX; i++) {
- if (hw_idx != hw_mgr->csid_devices[i]->hw_idx)
- continue;
-
- csid_hw_intf = hw_mgr->csid_devices[i];
- csid_hw_intf->hw_ops.reset(csid_hw_intf->hw_priv,
- &csid_reset_args,
- sizeof(struct cam_csid_reset_cfg_args));
- break;
- }
-
/* Reset VFE HW*/
+ vfe_reset_type = CAM_VFE_HW_RESET_HW;
+
for (i = 0; i < CAM_VFE_HW_NUM_MAX; i++) {
if (hw_idx != hw_mgr->ife_devices[i]->hw_idx)
continue;
CAM_DBG(CAM_ISP, "VFE (id = %d) reset", hw_idx);
vfe_hw_intf = hw_mgr->ife_devices[i];
- vfe_hw_intf->hw_ops.reset(vfe_hw_intf->hw_priv, NULL, 0);
+ vfe_hw_intf->hw_ops.reset(vfe_hw_intf->hw_priv,
+ &vfe_reset_type, sizeof(vfe_reset_type));
break;
}
@@ -1641,8 +1727,7 @@ static int cam_ife_mgr_reset_hw(struct cam_ife_hw_mgr *hw_mgr,
return 0;
}
-static int cam_ife_mgr_restart_hw(void *hw_mgr_priv,
- void *start_hw_args)
+static int cam_ife_mgr_restart_hw(void *start_hw_args)
{
int rc = -1;
struct cam_hw_start_args *start_args = start_hw_args;
@@ -1650,7 +1735,7 @@ static int cam_ife_mgr_restart_hw(void *hw_mgr_priv,
struct cam_ife_hw_mgr_res *hw_mgr_res;
uint32_t i;
- if (!hw_mgr_priv || !start_hw_args) {
+ if (!start_hw_args) {
CAM_ERR(CAM_ISP, "Invalid arguments");
return -EINVAL;
}
@@ -1661,12 +1746,14 @@ static int cam_ife_mgr_restart_hw(void *hw_mgr_priv,
return -EPERM;
}
- CAM_DBG(CAM_ISP, "Enter... ctx id:%d", ctx->ctx_index);
-
CAM_DBG(CAM_ISP, "START IFE OUT ... in ctx id:%d", ctx->ctx_index);
+
+ cam_tasklet_start(ctx->common.tasklet_info);
+
/* start the IFE out devices */
for (i = 0; i < CAM_IFE_HW_OUT_RES_MAX; i++) {
- rc = cam_ife_hw_mgr_start_hw_res(&ctx->res_list_ife_out[i]);
+ rc = cam_ife_hw_mgr_start_hw_res(
+ &ctx->res_list_ife_out[i], ctx);
if (rc) {
CAM_ERR(CAM_ISP, "Can not start IFE OUT (%d)", i);
goto err;
@@ -1676,7 +1763,7 @@ static int cam_ife_mgr_restart_hw(void *hw_mgr_priv,
CAM_DBG(CAM_ISP, "START IFE SRC ... in ctx id:%d", ctx->ctx_index);
/* Start the IFE mux in devices */
list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_src, list) {
- rc = cam_ife_hw_mgr_start_hw_res(hw_mgr_res);
+ rc = cam_ife_hw_mgr_start_hw_res(hw_mgr_res, ctx);
if (rc) {
CAM_ERR(CAM_ISP, "Can not start IFE MUX (%d)",
hw_mgr_res->res_id);
@@ -1687,7 +1774,7 @@ static int cam_ife_mgr_restart_hw(void *hw_mgr_priv,
CAM_DBG(CAM_ISP, "START CSID HW ... in ctx id:%d", ctx->ctx_index);
/* Start the IFE CSID HW devices */
list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_csid, list) {
- rc = cam_ife_hw_mgr_start_hw_res(hw_mgr_res);
+ rc = cam_ife_hw_mgr_start_hw_res(hw_mgr_res, ctx);
if (rc) {
CAM_ERR(CAM_ISP, "Can not start IFE CSID (%d)",
hw_mgr_res->res_id);
@@ -1696,22 +1783,12 @@ static int cam_ife_mgr_restart_hw(void *hw_mgr_priv,
}
CAM_DBG(CAM_ISP, "START CID SRC ... in ctx id:%d", ctx->ctx_index);
- /* Start the IFE CID HW devices */
- list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_cid, list) {
- rc = cam_ife_hw_mgr_start_hw_res(hw_mgr_res);
- if (rc) {
- CAM_ERR(CAM_ISP, "Can not start IFE CSID (%d)",
- hw_mgr_res->res_id);
- goto err;
- }
- }
-
/* Start IFE root node: do nothing */
CAM_DBG(CAM_ISP, "Exit...(success)");
return 0;
err:
- cam_ife_mgr_stop_hw(hw_mgr_priv, start_hw_args);
+ cam_ife_mgr_stop_hw_in_overflow(start_hw_args);
CAM_DBG(CAM_ISP, "Exit...(rc=%d)", rc);
return rc;
}
@@ -1805,6 +1882,17 @@ static int cam_ife_mgr_start_hw(void *hw_mgr_priv, void *start_hw_args)
}
}
+ mutex_lock(&g_ife_hw_mgr.ctx_mutex);
+ if (!atomic_fetch_inc(&g_ife_hw_mgr.active_ctx_cnt)) {
+ rc = cam_ife_notify_safe_lut_scm(CAM_IFE_SAFE_ENABLE);
+ if (rc) {
+ CAM_ERR(CAM_ISP,
+ "SAFE SCM call failed:Check TZ/HYP dependency");
+ rc = -1;
+ }
+ }
+ mutex_unlock(&g_ife_hw_mgr.ctx_mutex);
+
CAM_DBG(CAM_ISP, "start cdm interface");
rc = cam_cdm_stream_on(ctx->cdm_handle);
if (rc) {
@@ -1825,7 +1913,8 @@ static int cam_ife_mgr_start_hw(void *hw_mgr_priv, void *start_hw_args)
ctx->ctx_index);
/* start the IFE out devices */
for (i = 0; i < CAM_IFE_HW_OUT_RES_MAX; i++) {
- rc = cam_ife_hw_mgr_start_hw_res(&ctx->res_list_ife_out[i]);
+ rc = cam_ife_hw_mgr_start_hw_res(
+ &ctx->res_list_ife_out[i], ctx);
if (rc) {
CAM_ERR(CAM_ISP, "Can not start IFE OUT (%d)",
i);
@@ -1837,7 +1926,7 @@ static int cam_ife_mgr_start_hw(void *hw_mgr_priv, void *start_hw_args)
ctx->ctx_index);
/* Start the IFE mux in devices */
list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_src, list) {
- rc = cam_ife_hw_mgr_start_hw_res(hw_mgr_res);
+ rc = cam_ife_hw_mgr_start_hw_res(hw_mgr_res, ctx);
if (rc) {
CAM_ERR(CAM_ISP, "Can not start IFE MUX (%d)",
hw_mgr_res->res_id);
@@ -1849,7 +1938,7 @@ static int cam_ife_mgr_start_hw(void *hw_mgr_priv, void *start_hw_args)
ctx->ctx_index);
/* Start the IFE CSID HW devices */
list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_csid, list) {
- rc = cam_ife_hw_mgr_start_hw_res(hw_mgr_res);
+ rc = cam_ife_hw_mgr_start_hw_res(hw_mgr_res, ctx);
if (rc) {
CAM_ERR(CAM_ISP, "Can not start IFE CSID (%d)",
hw_mgr_res->res_id);
@@ -1861,10 +1950,10 @@ static int cam_ife_mgr_start_hw(void *hw_mgr_priv, void *start_hw_args)
ctx->ctx_index);
/* Start the IFE CID HW devices */
list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_cid, list) {
- rc = cam_ife_hw_mgr_start_hw_res(hw_mgr_res);
+ rc = cam_ife_hw_mgr_start_hw_res(hw_mgr_res, ctx);
if (rc) {
CAM_ERR(CAM_ISP, "Can not start IFE CSID (%d)",
- hw_mgr_res->res_id);
+ hw_mgr_res->res_id);
goto err;
}
}
@@ -1929,6 +2018,331 @@ static int cam_ife_mgr_release_hw(void *hw_mgr_priv,
return rc;
}
+static int cam_isp_blob_hfr_update(
+ uint32_t blob_type,
+ struct cam_isp_generic_blob_info *blob_info,
+ struct cam_isp_resource_hfr_config *hfr_config,
+ struct cam_hw_prepare_update_args *prepare)
+{
+ struct cam_isp_port_hfr_config *port_hfr_config;
+ struct cam_kmd_buf_info *kmd_buf_info;
+ struct cam_ife_hw_mgr_ctx *ctx = NULL;
+ struct cam_ife_hw_mgr_res *hw_mgr_res;
+ uint32_t res_id_out, i;
+ uint32_t total_used_bytes = 0;
+ uint32_t kmd_buf_remain_size;
+ uint32_t *cmd_buf_addr;
+ uint32_t bytes_used = 0;
+ int num_ent, rc = 0;
+
+ ctx = prepare->ctxt_to_hw_map;
+ CAM_DBG(CAM_ISP, "num_ports= %d",
+ hfr_config->num_ports);
+
+ /* Max one hw entries required for hfr config update */
+ if (prepare->num_hw_update_entries + 1 >=
+ prepare->max_hw_update_entries) {
+ CAM_ERR(CAM_ISP, "Insufficient HW entries :%d %d",
+ prepare->num_hw_update_entries,
+ prepare->max_hw_update_entries);
+ return -EINVAL;
+ }
+
+ kmd_buf_info = blob_info->kmd_buf_info;
+ for (i = 0; i < hfr_config->num_ports; i++) {
+ port_hfr_config = &hfr_config->port_hfr_config[i];
+ res_id_out = port_hfr_config->resource_type & 0xFF;
+
+ CAM_DBG(CAM_ISP, "hfr config idx %d, type=%d", i,
+ res_id_out);
+
+ if (res_id_out >= CAM_IFE_HW_OUT_RES_MAX) {
+ CAM_ERR(CAM_ISP, "invalid out restype:%x",
+ port_hfr_config->resource_type);
+ return -EINVAL;
+ }
+
+ if ((kmd_buf_info->used_bytes
+ + total_used_bytes) < kmd_buf_info->size) {
+ kmd_buf_remain_size = kmd_buf_info->size -
+ (kmd_buf_info->used_bytes +
+ total_used_bytes);
+ } else {
+ CAM_ERR(CAM_ISP,
+ "no free kmd memory for base %d",
+ blob_info->base_info->idx);
+ rc = -ENOMEM;
+ return rc;
+ }
+
+ cmd_buf_addr = kmd_buf_info->cpu_addr +
+ kmd_buf_info->used_bytes/4 +
+ total_used_bytes/4;
+ hw_mgr_res = &ctx->res_list_ife_out[res_id_out];
+
+ rc = cam_isp_add_cmd_buf_update(
+ hw_mgr_res, blob_type,
+ blob_type_hw_cmd_map[blob_type],
+ blob_info->base_info->idx,
+ (void *)cmd_buf_addr,
+ kmd_buf_remain_size,
+ (void *)port_hfr_config,
+ &bytes_used);
+ if (rc < 0) {
+ CAM_ERR(CAM_ISP,
+ "Failed cmd_update, base_idx=%d, rc=%d",
+ blob_info->base_info->idx, bytes_used);
+ return rc;
+ }
+
+ total_used_bytes += bytes_used;
+ }
+
+ if (total_used_bytes) {
+ /* Update the HW entries */
+ num_ent = prepare->num_hw_update_entries;
+ prepare->hw_update_entries[num_ent].handle =
+ kmd_buf_info->handle;
+ prepare->hw_update_entries[num_ent].len = total_used_bytes;
+ prepare->hw_update_entries[num_ent].offset =
+ kmd_buf_info->offset;
+ num_ent++;
+
+ kmd_buf_info->used_bytes += total_used_bytes;
+ kmd_buf_info->offset += total_used_bytes;
+ prepare->num_hw_update_entries = num_ent;
+ }
+
+ return rc;
+}
+
+static int cam_isp_blob_clock_update(
+ uint32_t blob_type,
+ struct cam_isp_generic_blob_info *blob_info,
+ struct cam_isp_clock_config *clock_config,
+ struct cam_hw_prepare_update_args *prepare)
+{
+ struct cam_ife_hw_mgr_ctx *ctx = NULL;
+ struct cam_ife_hw_mgr_res *hw_mgr_res;
+ struct cam_hw_intf *hw_intf;
+ struct cam_vfe_clock_update_args clock_upd_args;
+ uint64_t clk_rate = 0;
+ int rc = -EINVAL;
+ uint32_t i;
+ uint32_t j;
+
+ ctx = prepare->ctxt_to_hw_map;
+
+ CAM_DBG(CAM_ISP,
+ "usage=%u left_clk= %lu right_clk=%lu",
+ clock_config->usage_type,
+ clock_config->left_pix_hz,
+ clock_config->right_pix_hz);
+
+ list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_src, list) {
+ for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) {
+ clk_rate = 0;
+ if (!hw_mgr_res->hw_res[i])
+ continue;
+
+ if (hw_mgr_res->res_id == CAM_ISP_HW_VFE_IN_CAMIF)
+ if (i == CAM_ISP_HW_SPLIT_LEFT)
+ clk_rate =
+ clock_config->left_pix_hz;
+ else
+ clk_rate =
+ clock_config->right_pix_hz;
+ else if ((hw_mgr_res->res_id >= CAM_ISP_HW_VFE_IN_RDI0)
+ && (hw_mgr_res->res_id <=
+ CAM_ISP_HW_VFE_IN_RDI3))
+ for (j = 0; j < clock_config->num_rdi; j++)
+ clk_rate = max(clock_config->rdi_hz[j],
+ clk_rate);
+ else
+ if (hw_mgr_res->hw_res[i]) {
+ CAM_ERR(CAM_ISP, "Invalid res_id %u",
+ hw_mgr_res->res_id);
+ rc = -EINVAL;
+ return rc;
+ }
+
+ hw_intf = hw_mgr_res->hw_res[i]->hw_intf;
+ if (hw_intf && hw_intf->hw_ops.process_cmd) {
+ clock_upd_args.node_res =
+ hw_mgr_res->hw_res[i];
+ CAM_DBG(CAM_ISP,
+ "res_id=%u i= %d clk=%llu\n",
+ hw_mgr_res->res_id, i, clk_rate);
+
+ clock_upd_args.clk_rate = clk_rate;
+
+ rc = hw_intf->hw_ops.process_cmd(
+ hw_intf->hw_priv,
+ CAM_ISP_HW_CMD_CLOCK_UPDATE,
+ &clock_upd_args,
+ sizeof(
+ struct cam_vfe_clock_update_args));
+ if (rc)
+ CAM_ERR(CAM_ISP, "Clock Update failed");
+ } else
+ CAM_WARN(CAM_ISP, "NULL hw_intf!");
+ }
+ }
+
+ return rc;
+}
+
+static int cam_isp_blob_bw_update(
+ uint32_t blob_type,
+ struct cam_isp_generic_blob_info *blob_info,
+ struct cam_isp_bw_config *bw_config,
+ struct cam_hw_prepare_update_args *prepare)
+{
+ struct cam_ife_hw_mgr_ctx *ctx = NULL;
+ struct cam_ife_hw_mgr_res *hw_mgr_res;
+ struct cam_hw_intf *hw_intf;
+ struct cam_vfe_bw_update_args bw_upd_args;
+ uint64_t cam_bw_bps = 0;
+ uint64_t ext_bw_bps = 0;
+ int rc = -EINVAL;
+ uint32_t i;
+
+ ctx = prepare->ctxt_to_hw_map;
+
+ CAM_DBG(CAM_ISP,
+ "usage=%u left cam_bw_bps=%llu ext_bw_bps=%llu\n"
+ "right cam_bw_bps=%llu ext_bw_bps=%llu",
+ bw_config->usage_type,
+ bw_config->left_pix_vote.cam_bw_bps,
+ bw_config->left_pix_vote.ext_bw_bps,
+ bw_config->right_pix_vote.cam_bw_bps,
+ bw_config->right_pix_vote.ext_bw_bps);
+
+ list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_src, list) {
+ for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) {
+ if (!hw_mgr_res->hw_res[i])
+ continue;
+
+ if (hw_mgr_res->res_id == CAM_ISP_HW_VFE_IN_CAMIF)
+ if (i == CAM_ISP_HW_SPLIT_LEFT) {
+ cam_bw_bps =
+ bw_config->left_pix_vote.cam_bw_bps;
+ ext_bw_bps =
+ bw_config->left_pix_vote.ext_bw_bps;
+ } else {
+ cam_bw_bps =
+ bw_config->right_pix_vote.cam_bw_bps;
+ ext_bw_bps =
+ bw_config->right_pix_vote.ext_bw_bps;
+ }
+ else if ((hw_mgr_res->res_id >= CAM_ISP_HW_VFE_IN_RDI0)
+ && (hw_mgr_res->res_id <=
+ CAM_ISP_HW_VFE_IN_RDI3)) {
+ uint32_t idx = hw_mgr_res->res_id -
+ CAM_ISP_HW_VFE_IN_RDI0;
+ if (idx >= bw_config->num_rdi)
+ continue;
+
+ cam_bw_bps =
+ bw_config->rdi_vote[idx].cam_bw_bps;
+ ext_bw_bps =
+ bw_config->rdi_vote[idx].ext_bw_bps;
+ } else
+ if (hw_mgr_res->hw_res[i]) {
+ CAM_ERR(CAM_ISP, "Invalid res_id %u",
+ hw_mgr_res->res_id);
+ rc = -EINVAL;
+ return rc;
+ }
+
+ hw_intf = hw_mgr_res->hw_res[i]->hw_intf;
+ if (hw_intf && hw_intf->hw_ops.process_cmd) {
+ bw_upd_args.node_res =
+ hw_mgr_res->hw_res[i];
+
+ bw_upd_args.camnoc_bw_bytes = cam_bw_bps;
+ bw_upd_args.external_bw_bytes = ext_bw_bps;
+
+ rc = hw_intf->hw_ops.process_cmd(
+ hw_intf->hw_priv,
+ CAM_ISP_HW_CMD_BW_UPDATE,
+ &bw_upd_args,
+ sizeof(struct cam_vfe_bw_update_args));
+ if (rc)
+ CAM_ERR(CAM_ISP, "BW Update failed");
+ } else
+ CAM_WARN(CAM_ISP, "NULL hw_intf!");
+ }
+ }
+
+ return rc;
+}
+
+static int cam_isp_packet_generic_blob_handler(void *user_data,
+ uint32_t blob_type, uint32_t blob_size, uint8_t *blob_data)
+{
+ int rc = 0;
+ struct cam_isp_generic_blob_info *blob_info = user_data;
+ struct cam_hw_prepare_update_args *prepare = NULL;
+
+ if (!blob_data || (blob_size == 0) || !blob_info) {
+ CAM_ERR(CAM_ISP, "Invalid info blob %pK %d prepare %pK",
+ blob_data, blob_size, prepare);
+ return -EINVAL;
+ }
+
+ if (blob_type >= CAM_ISP_GENERIC_BLOB_TYPE_MAX) {
+ CAM_ERR(CAM_ISP, "Invalid Blob Type %d Max %d", blob_type,
+ CAM_ISP_GENERIC_BLOB_TYPE_MAX);
+ return -EINVAL;
+ }
+
+ prepare = blob_info->prepare;
+ if (!prepare) {
+ CAM_ERR(CAM_ISP, "Failed. prepare is NULL, blob_type %d",
+ blob_type);
+ return -EINVAL;
+ }
+
+ switch (blob_type) {
+ case CAM_ISP_GENERIC_BLOB_TYPE_HFR_CONFIG: {
+ struct cam_isp_resource_hfr_config *hfr_config =
+ (struct cam_isp_resource_hfr_config *)blob_data;
+
+ rc = cam_isp_blob_hfr_update(blob_type, blob_info,
+ hfr_config, prepare);
+ if (rc)
+ CAM_ERR(CAM_ISP, "HFR Update Failed");
+ }
+ break;
+ case CAM_ISP_GENERIC_BLOB_TYPE_CLOCK_CONFIG: {
+ struct cam_isp_clock_config *clock_config =
+ (struct cam_isp_clock_config *)blob_data;
+
+ rc = cam_isp_blob_clock_update(blob_type, blob_info,
+ clock_config, prepare);
+ if (rc)
+ CAM_ERR(CAM_ISP, "Clock Update Failed");
+ }
+ break;
+ case CAM_ISP_GENERIC_BLOB_TYPE_BW_CONFIG: {
+ struct cam_isp_bw_config *bw_config =
+ (struct cam_isp_bw_config *)blob_data;
+
+ rc = cam_isp_blob_bw_update(blob_type, blob_info,
+ bw_config, prepare);
+ if (rc)
+ CAM_ERR(CAM_ISP, "Bandwidth Update Failed");
+ }
+ break;
+ default:
+ CAM_WARN(CAM_ISP, "Invalid blob type %d", blob_type);
+ break;
+ }
+
+ return rc;
+}
+
static int cam_ife_mgr_prepare_hw_update(void *hw_mgr_priv,
void *prepare_hw_update_args)
{
@@ -1940,7 +2354,6 @@ static int cam_ife_mgr_prepare_hw_update(void *hw_mgr_priv,
struct cam_kmd_buf_info kmd_buf;
uint32_t i;
bool fill_fence = true;
- struct cam_isp_generic_blob_info blob_info;
if (!hw_mgr_priv || !prepare_hw_update_args) {
CAM_ERR(CAM_ISP, "Invalid args");
@@ -1969,14 +2382,6 @@ static int cam_ife_mgr_prepare_hw_update(void *hw_mgr_priv,
return rc;
}
- memset(&blob_info, 0x0, sizeof(struct cam_isp_generic_blob_info));
- rc = cam_isp_process_generic_cmd_buffer(prepare, &blob_info);
- if (rc) {
- CAM_ERR(CAM_ISP, "Failed in generic blob cmd buffer, rc=%d",
- rc);
- goto end;
- }
-
prepare->num_hw_update_entries = 0;
prepare->num_in_map_entries = 0;
prepare->num_out_map_entries = 0;
@@ -1997,26 +2402,14 @@ static int cam_ife_mgr_prepare_hw_update(void *hw_mgr_priv,
/* get command buffers */
if (ctx->base[i].split_id != CAM_ISP_HW_SPLIT_MAX) {
- rc = cam_isp_add_command_buffers(prepare,
- ctx->base[i].split_id, ctx->base[i].idx,
- ctx->res_list_ife_out, CAM_IFE_HW_OUT_RES_MAX);
- if (rc) {
- CAM_ERR(CAM_ISP,
- "Failed in add cmdbuf, i=%d, split_id=%d, rc=%d",
- i, ctx->base[i].split_id, rc);
- goto end;
- }
- }
-
- if (blob_info.hfr_config) {
- rc = cam_isp_add_hfr_config_hw_update(
- blob_info.hfr_config, prepare,
- ctx->base[i].idx, &kmd_buf,
+ rc = cam_isp_add_command_buffers(prepare, &kmd_buf,
+ &ctx->base[i],
+ cam_isp_packet_generic_blob_handler,
ctx->res_list_ife_out, CAM_IFE_HW_OUT_RES_MAX);
if (rc) {
CAM_ERR(CAM_ISP,
- "Failed in hfr config, i=%d, rc=%d",
- i, rc);
+ "Failed in add cmdbuf, i=%d, split_id=%d, rc=%d",
+ i, ctx->base[i].split_id, rc);
goto end;
}
}
@@ -2047,7 +2440,7 @@ static int cam_ife_mgr_prepare_hw_update(void *hw_mgr_priv,
* of op_code has some difference from KMD.
*/
if (((prepare->packet->header.op_code + 1) & 0xF) ==
- CAM_ISP_PACKET_INIT_DEV)
+ CAM_ISP_PACKET_INIT_DEV)
goto end;
/* add reg update commands */
@@ -2074,7 +2467,6 @@ static int cam_ife_mgr_prepare_hw_update(void *hw_mgr_priv,
}
end:
- kfree(blob_info.hfr_config);
return rc;
}
@@ -2167,11 +2559,12 @@ static int cam_ife_mgr_cmd_get_sof_timestamp(
static int cam_ife_mgr_process_recovery_cb(void *priv, void *data)
{
int32_t rc = 0;
- struct cam_hw_event_recovery_data *recovery_data = priv;
- struct cam_hw_start_args start_args;
- struct cam_ife_hw_mgr *ife_hw_mgr = NULL;
- uint32_t hw_mgr_priv;
- uint32_t i = 0;
+ struct cam_hw_event_recovery_data *recovery_data = data;
+ struct cam_hw_start_args start_args;
+ struct cam_hw_stop_args stop_args;
+ struct cam_ife_hw_mgr *ife_hw_mgr = priv;
+ struct cam_ife_hw_mgr_res *hw_mgr_res;
+ uint32_t i = 0;
uint32_t error_type = recovery_data->error_type;
struct cam_ife_hw_mgr_ctx *ctx = NULL;
@@ -2188,20 +2581,57 @@ static int cam_ife_mgr_process_recovery_cb(void *priv, void *data)
kfree(recovery_data);
return 0;
}
+ /* stop resources here */
+ CAM_DBG(CAM_ISP, "STOP: Number of affected context: %d",
+ recovery_data->no_of_context);
+ for (i = 0; i < recovery_data->no_of_context; i++) {
+ stop_args.ctxt_to_hw_map =
+ recovery_data->affected_ctx[i];
+ rc = cam_ife_mgr_stop_hw_in_overflow(&stop_args);
+ if (rc) {
+ CAM_ERR(CAM_ISP, "CTX stop failed(%d)", rc);
+ return rc;
+ }
+ }
- ctx = recovery_data->affected_ctx[0];
- ife_hw_mgr = ctx->hw_mgr;
+ CAM_DBG(CAM_ISP, "RESET: CSID PATH");
+ for (i = 0; i < recovery_data->no_of_context; i++) {
+ ctx = recovery_data->affected_ctx[i];
+ list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_csid,
+ list) {
+ rc = cam_ife_hw_mgr_reset_csid_res(hw_mgr_res);
+ if (rc) {
+ CAM_ERR(CAM_ISP, "Failed RESET (%d)",
+ hw_mgr_res->res_id);
+ return rc;
+ }
+ }
+ }
+
+ CAM_DBG(CAM_ISP, "RESET: Calling VFE reset");
for (i = 0; i < CAM_VFE_HW_NUM_MAX; i++) {
if (recovery_data->affected_core[i])
- rc = cam_ife_mgr_reset_hw(ife_hw_mgr, i);
+ cam_ife_mgr_reset_vfe_hw(ife_hw_mgr, i);
}
+ CAM_DBG(CAM_ISP, "START: Number of affected context: %d",
+ recovery_data->no_of_context);
+
for (i = 0; i < recovery_data->no_of_context; i++) {
- start_args.ctxt_to_hw_map =
- recovery_data->affected_ctx[i];
- rc = cam_ife_mgr_restart_hw(&hw_mgr_priv, &start_args);
+ ctx = recovery_data->affected_ctx[i];
+ start_args.ctxt_to_hw_map = ctx;
+
+ atomic_set(&ctx->overflow_pending, 0);
+
+ rc = cam_ife_mgr_restart_hw(&start_args);
+ if (rc) {
+ CAM_ERR(CAM_ISP, "CTX start failed(%d)", rc);
+ return rc;
+ }
+ CAM_DBG(CAM_ISP, "Started resources rc (%d)", rc);
}
+ CAM_DBG(CAM_ISP, "Recovery Done rc (%d)", rc);
break;
@@ -2227,8 +2657,6 @@ static int cam_ife_hw_mgr_do_error_recovery(
struct crm_workq_task *task = NULL;
struct cam_hw_event_recovery_data *recovery_data = NULL;
- return 0;
-
recovery_data = kzalloc(sizeof(struct cam_hw_event_recovery_data),
GFP_ATOMIC);
if (!recovery_data)
@@ -2247,7 +2675,9 @@ static int cam_ife_hw_mgr_do_error_recovery(
}
task->process_cb = &cam_ife_mgr_process_recovery_cb;
- rc = cam_req_mgr_workq_enqueue_task(task, recovery_data,
+ task->payload = recovery_data;
+ rc = cam_req_mgr_workq_enqueue_task(task,
+ recovery_data->affected_ctx[0]->hw_mgr,
CRM_TASK_PRIORITY_0);
return rc;
@@ -2260,9 +2690,9 @@ static int cam_ife_hw_mgr_do_error_recovery(
* affected_core[]
* b. Return 0 i.e.SUCCESS
*/
-static int cam_ife_hw_mgr_match_hw_idx(
+static int cam_ife_hw_mgr_is_ctx_affected(
struct cam_ife_hw_mgr_ctx *ife_hwr_mgr_ctx,
- uint32_t *affected_core)
+ uint32_t *affected_core, uint32_t size)
{
int32_t rc = -EPERM;
@@ -2272,22 +2702,25 @@ static int cam_ife_hw_mgr_match_hw_idx(
CAM_DBG(CAM_ISP, "Enter:max_idx = %d", max_idx);
- while (i < max_idx) {
+ if ((max_idx >= CAM_IFE_HW_NUM_MAX) ||
+ (size > CAM_IFE_HW_NUM_MAX)) {
+ CAM_ERR(CAM_ISP, "invalid parameter = %d", max_idx);
+ return rc;
+ }
+
+ for (i = 0; i < max_idx; i++) {
if (affected_core[ife_hwr_mgr_ctx->base[i].idx])
rc = 0;
else {
ctx_affected_core_idx[j] = ife_hwr_mgr_ctx->base[i].idx;
j = j + 1;
}
-
- i = i + 1;
}
if (rc == 0) {
while (j) {
if (affected_core[ctx_affected_core_idx[j-1]] != 1)
affected_core[ctx_affected_core_idx[j-1]] = 1;
-
j = j - 1;
}
}
@@ -2303,7 +2736,7 @@ static int cam_ife_hw_mgr_match_hw_idx(
* d. For any dual VFE context, if copanion VFE is also serving
* other context it should also notify the CRM with fatal error
*/
-static int cam_ife_hw_mgr_handle_overflow(
+static int cam_ife_hw_mgr_process_overflow(
struct cam_ife_hw_mgr_ctx *curr_ife_hwr_mgr_ctx,
struct cam_isp_hw_error_event_data *error_event_data,
uint32_t curr_core_idx,
@@ -2313,12 +2746,10 @@ static int cam_ife_hw_mgr_handle_overflow(
struct cam_ife_hw_mgr_ctx *ife_hwr_mgr_ctx = NULL;
cam_hw_event_cb_func ife_hwr_irq_err_cb;
struct cam_ife_hw_mgr *ife_hwr_mgr = NULL;
- uint32_t hw_mgr_priv = 1;
struct cam_hw_stop_args stop_args;
uint32_t i = 0;
CAM_DBG(CAM_ISP, "Enter");
- return 0;
if (!recovery_data) {
CAM_ERR(CAM_ISP, "recovery_data parameter is NULL",
@@ -2339,9 +2770,12 @@ static int cam_ife_hw_mgr_handle_overflow(
* with this context
*/
CAM_DBG(CAM_ISP, "Calling match Hw idx");
- if (cam_ife_hw_mgr_match_hw_idx(ife_hwr_mgr_ctx, affected_core))
+ if (cam_ife_hw_mgr_is_ctx_affected(ife_hwr_mgr_ctx,
+ affected_core, CAM_IFE_HW_NUM_MAX))
continue;
+ atomic_set(&ife_hwr_mgr_ctx->overflow_pending, 1);
+
ife_hwr_irq_err_cb =
ife_hwr_mgr_ctx->common.event_cb[CAM_ISP_HW_EVENT_ERROR];
@@ -2355,16 +2789,13 @@ static int cam_ife_hw_mgr_handle_overflow(
ife_hwr_mgr_ctx;
/*
- * Stop the hw resources associated with this context
- * and call the error callback. In the call back function
- * corresponding ISP context will update CRM about fatal Error
+ * In the call back function corresponding ISP context
+ * will update CRM about fatal Error
*/
- if (!cam_ife_mgr_stop_hw_in_overflow(&hw_mgr_priv,
- &stop_args)) {
- CAM_DBG(CAM_ISP, "Calling Error handler CB");
- ife_hwr_irq_err_cb(ife_hwr_mgr_ctx->common.cb_priv,
- CAM_ISP_HW_EVENT_ERROR, error_event_data);
- }
+
+ ife_hwr_irq_err_cb(ife_hwr_mgr_ctx->common.cb_priv,
+ CAM_ISP_HW_EVENT_ERROR, error_event_data);
+
}
/* fill the affected_core in recovery data */
for (i = 0; i < CAM_IFE_HW_NUM_MAX; i++) {
@@ -2376,11 +2807,85 @@ static int cam_ife_hw_mgr_handle_overflow(
return 0;
}
+static int cam_ife_hw_mgr_get_err_type(
+ void *handler_priv,
+ void *payload)
+{
+ struct cam_isp_resource_node *hw_res_l = NULL;
+ struct cam_isp_resource_node *hw_res_r = NULL;
+ struct cam_ife_hw_mgr_ctx *ife_hwr_mgr_ctx;
+ struct cam_vfe_top_irq_evt_payload *evt_payload;
+ struct cam_ife_hw_mgr_res *isp_ife_camif_res = NULL;
+ uint32_t status = 0;
+ uint32_t core_idx;
+
+ ife_hwr_mgr_ctx = handler_priv;
+ evt_payload = payload;
+
+ if (!evt_payload) {
+ CAM_ERR(CAM_ISP, "No payload");
+ return IRQ_HANDLED;
+ }
+
+ core_idx = evt_payload->core_index;
+ evt_payload->evt_id = CAM_ISP_HW_EVENT_ERROR;
+
+ list_for_each_entry(isp_ife_camif_res,
+ &ife_hwr_mgr_ctx->res_list_ife_src, list) {
+
+ if ((isp_ife_camif_res->res_type ==
+ CAM_IFE_HW_MGR_RES_UNINIT) ||
+ (isp_ife_camif_res->res_id != CAM_ISP_HW_VFE_IN_CAMIF))
+ continue;
+
+ hw_res_l = isp_ife_camif_res->hw_res[CAM_ISP_HW_SPLIT_LEFT];
+ hw_res_r = isp_ife_camif_res->hw_res[CAM_ISP_HW_SPLIT_RIGHT];
+
+ CAM_DBG(CAM_ISP, "is_dual_vfe ? = %d\n",
+ isp_ife_camif_res->is_dual_vfe);
+
+ /* ERROR check for Left VFE */
+ if (!hw_res_l) {
+ CAM_DBG(CAM_ISP, "VFE(L) Device is NULL");
+ break;
+ }
+
+ CAM_DBG(CAM_ISP, "core id= %d, HW id %d", core_idx,
+ hw_res_l->hw_intf->hw_idx);
+
+ if (core_idx == hw_res_l->hw_intf->hw_idx) {
+ status = hw_res_l->bottom_half_handler(
+ hw_res_l, evt_payload);
+ }
+
+ if (status)
+ break;
+
+ /* ERROR check for Right VFE */
+ if (!hw_res_r) {
+ CAM_DBG(CAM_ISP, "VFE(R) Device is NULL");
+ continue;
+ }
+ CAM_DBG(CAM_ISP, "core id= %d, HW id %d", core_idx,
+ hw_res_r->hw_intf->hw_idx);
+
+ if (core_idx == hw_res_r->hw_intf->hw_idx) {
+ status = hw_res_r->bottom_half_handler(
+ hw_res_r, evt_payload);
+ }
+
+ if (status)
+ break;
+ }
+ CAM_DBG(CAM_ISP, "Exit (status = %d)!", status);
+ return status;
+}
+
static int cam_ife_hw_mgr_handle_camif_error(
void *handler_priv,
void *payload)
{
- int32_t rc = 0;
+ int32_t error_status = CAM_ISP_HW_ERROR_NONE;
uint32_t core_idx;
struct cam_ife_hw_mgr_ctx *ife_hwr_mgr_ctx;
struct cam_vfe_top_irq_evt_payload *evt_payload;
@@ -2391,17 +2896,22 @@ static int cam_ife_hw_mgr_handle_camif_error(
evt_payload = payload;
core_idx = evt_payload->core_index;
- rc = evt_payload->error_type;
- CAM_DBG(CAM_ISP, "Enter: error_type (%d)", evt_payload->error_type);
- switch (evt_payload->error_type) {
+ error_status = cam_ife_hw_mgr_get_err_type(ife_hwr_mgr_ctx,
+ evt_payload);
+
+ if (atomic_read(&ife_hwr_mgr_ctx->overflow_pending))
+ return error_status;
+
+ switch (error_status) {
case CAM_ISP_HW_ERROR_OVERFLOW:
case CAM_ISP_HW_ERROR_P2I_ERROR:
case CAM_ISP_HW_ERROR_VIOLATION:
+ CAM_DBG(CAM_ISP, "Enter: error_type (%d)", error_status);
error_event_data.error_type =
CAM_ISP_HW_ERROR_OVERFLOW;
- cam_ife_hw_mgr_handle_overflow(ife_hwr_mgr_ctx,
+ cam_ife_hw_mgr_process_overflow(ife_hwr_mgr_ctx,
&error_event_data,
core_idx,
&recovery_data);
@@ -2411,12 +2921,10 @@ static int cam_ife_hw_mgr_handle_camif_error(
cam_ife_hw_mgr_do_error_recovery(&recovery_data);
break;
default:
- CAM_DBG(CAM_ISP, "None error. Error type (%d)",
- evt_payload->error_type);
+ CAM_DBG(CAM_ISP, "None error (%d)", error_status);
}
- CAM_DBG(CAM_ISP, "Exit (%d)", rc);
- return rc;
+ return error_status;
}
/*
@@ -2481,6 +2989,8 @@ static int cam_ife_hw_mgr_handle_reg_update(
rup_status = hw_res->bottom_half_handler(
hw_res, evt_payload);
}
+ if (atomic_read(&ife_hwr_mgr_ctx->overflow_pending))
+ break;
if (!rup_status) {
ife_hwr_irq_rup_cb(
@@ -2512,6 +3022,8 @@ static int cam_ife_hw_mgr_handle_reg_update(
rup_status = hw_res->bottom_half_handler(
hw_res, evt_payload);
+ if (atomic_read(&ife_hwr_mgr_ctx->overflow_pending))
+ break;
if (!rup_status) {
/* Send the Reg update hw event */
ife_hwr_irq_rup_cb(
@@ -2633,6 +3145,9 @@ static int cam_ife_hw_mgr_handle_epoch_for_camif_hw_res(
if (core_idx == hw_res_l->hw_intf->hw_idx) {
epoch_status = hw_res_l->bottom_half_handler(
hw_res_l, evt_payload);
+ if (atomic_read(
+ &ife_hwr_mgr_ctx->overflow_pending))
+ break;
if (!epoch_status)
ife_hwr_irq_epoch_cb(
ife_hwr_mgr_ctx->common.cb_priv,
@@ -2680,6 +3195,8 @@ static int cam_ife_hw_mgr_handle_epoch_for_camif_hw_res(
core_index1,
evt_payload->evt_id);
+ if (atomic_read(&ife_hwr_mgr_ctx->overflow_pending))
+ break;
if (!rc)
ife_hwr_irq_epoch_cb(
ife_hwr_mgr_ctx->common.cb_priv,
@@ -2740,6 +3257,8 @@ static int cam_ife_hw_mgr_process_camif_sof(
if (core_idx == hw_res_l->hw_intf->hw_idx) {
sof_status = hw_res_l->bottom_half_handler(hw_res_l,
evt_payload);
+ if (atomic_read(&ife_hwr_mgr_ctx->overflow_pending))
+ break;
if (!sof_status) {
cam_ife_mgr_cmd_get_sof_timestamp(
ife_hwr_mgr_ctx,
@@ -2795,12 +3314,20 @@ static int cam_ife_hw_mgr_process_camif_sof(
core_index0 = hw_res_l->hw_intf->hw_idx;
core_index1 = hw_res_r->hw_intf->hw_idx;
+ if (atomic_read(&ife_hwr_mgr_ctx->overflow_pending))
+ break;
+
rc = cam_ife_hw_mgr_check_irq_for_dual_vfe(ife_hwr_mgr_ctx,
core_index0, core_index1, evt_payload->evt_id);
- if (!rc)
+ if (!rc) {
+ cam_ife_mgr_cmd_get_sof_timestamp(
+ ife_hwr_mgr_ctx,
+ &sof_done_event_data.timestamp);
+
ife_hwr_irq_sof_cb(ife_hwr_mgr_ctx->common.cb_priv,
CAM_ISP_HW_EVENT_SOF, &sof_done_event_data);
+ }
break;
@@ -2948,6 +3475,9 @@ static int cam_ife_hw_mgr_handle_eof_for_camif_hw_res(
if (core_idx == hw_res_l->hw_intf->hw_idx) {
eof_status = hw_res_l->bottom_half_handler(
hw_res_l, evt_payload);
+ if (atomic_read(
+ &ife_hwr_mgr_ctx->overflow_pending))
+ break;
if (!eof_status)
ife_hwr_irq_eof_cb(
ife_hwr_mgr_ctx->common.cb_priv,
@@ -2992,6 +3522,9 @@ static int cam_ife_hw_mgr_handle_eof_for_camif_hw_res(
core_index1,
evt_payload->evt_id);
+ if (atomic_read(&ife_hwr_mgr_ctx->overflow_pending))
+ break;
+
if (!rc)
ife_hwr_irq_eof_cb(
ife_hwr_mgr_ctx->common.cb_priv,
@@ -3036,6 +3569,8 @@ static int cam_ife_hw_mgr_handle_buf_done_for_hw_res(
ife_hwr_irq_wm_done_cb =
ife_hwr_mgr_ctx->common.event_cb[CAM_ISP_HW_EVENT_DONE];
+ evt_payload->evt_id = CAM_ISP_HW_EVENT_DONE;
+
for (i = 0; i < CAM_IFE_HW_OUT_RES_MAX; i++) {
isp_ife_out_res = &ife_hwr_mgr_ctx->res_list_ife_out[i];
@@ -3092,6 +3627,8 @@ static int cam_ife_hw_mgr_handle_buf_done_for_hw_res(
buf_done_event_data.resource_handle[0] =
isp_ife_out_res->res_id;
+ if (atomic_read(&ife_hwr_mgr_ctx->overflow_pending))
+ break;
/* Report for Successful buf_done event if any */
if (buf_done_event_data.num_handles > 0 &&
ife_hwr_irq_wm_done_cb) {
@@ -3129,7 +3666,7 @@ static int cam_ife_hw_mgr_handle_buf_done_for_hw_res(
* the affected context and any successful buf_done event is not
* reported.
*/
- rc = cam_ife_hw_mgr_handle_overflow(ife_hwr_mgr_ctx,
+ rc = cam_ife_hw_mgr_process_overflow(ife_hwr_mgr_ctx,
&error_event_data, evt_payload->core_index,
&recovery_data);
@@ -3168,8 +3705,6 @@ int cam_ife_mgr_do_tasklet_buf_done(void *handler_priv,
evt_payload->irq_reg_val[5]);
CAM_DBG(CAM_ISP, "bus_irq_dual_comp_owrt: = %x",
evt_payload->irq_reg_val[6]);
-
- CAM_DBG(CAM_ISP, "Calling Buf_done");
/* WM Done */
return cam_ife_hw_mgr_handle_buf_done_for_hw_res(ife_hwr_mgr_ctx,
evt_payload_priv);
@@ -3200,14 +3735,25 @@ int cam_ife_mgr_do_tasklet(void *handler_priv, void *evt_payload_priv)
* for this context it needs to be handled remaining
* interrupts are ignored.
*/
- rc = cam_ife_hw_mgr_handle_camif_error(ife_hwr_mgr_ctx,
- evt_payload_priv);
+ if (g_ife_hw_mgr.debug_cfg.enable_recovery) {
+ CAM_DBG(CAM_ISP, "IFE Mgr recovery is enabled");
+ rc = cam_ife_hw_mgr_handle_camif_error(ife_hwr_mgr_ctx,
+ evt_payload_priv);
+ } else {
+ CAM_DBG(CAM_ISP, "recovery is not enabled");
+ rc = 0;
+ }
+
if (rc) {
CAM_ERR(CAM_ISP, "Encountered Error (%d), ignoring other irqs",
rc);
return IRQ_HANDLED;
}
+ CAM_DBG(CAM_ISP, "Calling EOF");
+ cam_ife_hw_mgr_handle_eof_for_camif_hw_res(ife_hwr_mgr_ctx,
+ evt_payload_priv);
+
CAM_DBG(CAM_ISP, "Calling SOF");
/* SOF IRQ */
cam_ife_hw_mgr_handle_sof(ife_hwr_mgr_ctx,
@@ -3222,8 +3768,6 @@ int cam_ife_mgr_do_tasklet(void *handler_priv, void *evt_payload_priv)
/* EPOCH IRQ */
cam_ife_hw_mgr_handle_epoch_for_camif_hw_res(ife_hwr_mgr_ctx,
evt_payload_priv);
- cam_ife_hw_mgr_handle_eof_for_camif_hw_res(ife_hwr_mgr_ctx,
- evt_payload_priv);
return IRQ_HANDLED;
}
@@ -3298,6 +3842,15 @@ static int cam_ife_hw_mgr_debug_register(void)
goto err;
}
+ if (!debugfs_create_u32("enable_recovery",
+ 0644,
+ g_ife_hw_mgr.debug_cfg.dentry,
+ &g_ife_hw_mgr.debug_cfg.enable_recovery)) {
+ CAM_ERR(CAM_ISP, "failed to create enable_recovery");
+ goto err;
+ }
+ g_ife_hw_mgr.debug_cfg.enable_recovery = 0;
+
return 0;
err:
@@ -3404,6 +3957,7 @@ int cam_ife_hw_mgr_init(struct cam_hw_mgr_intf *hw_mgr_intf)
g_ife_hw_mgr.mgr_common.cmd_iommu_hdl_secure = -1;
}
+ atomic_set(&g_ife_hw_mgr.active_ctx_cnt, 0);
for (i = 0; i < CAM_CTX_MAX; i++) {
memset(&g_ife_hw_mgr.ctx_pool[i], 0,
sizeof(g_ife_hw_mgr.ctx_pool[i]));
@@ -3496,4 +4050,3 @@ int cam_ife_hw_mgr_init(struct cam_hw_mgr_intf *hw_mgr_intf)
g_ife_hw_mgr.mgr_common.img_iommu_hdl = -1;
return rc;
}
-
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.h
index 2e66210..4d26138 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.h
@@ -69,10 +69,10 @@ struct cam_ife_hw_mgr_res {
/**
- * struct ctx_base_info - base hardware information for the context
+ * struct ctx_base_info - Base hardware information for the context
*
* @idx: Base resource index
- * @split_id: split info for the base resource
+ * @split_id: Split info for the base resource
*
*/
struct ctx_base_info {
@@ -85,11 +85,13 @@ struct ctx_base_info {
*
* @dentry: Debugfs entry
* @csid_debug: csid debug information
+ * @enable_recovery enable recovery
*
*/
struct cam_ife_hw_mgr_debug {
- struct dentry *dentry;
- uint64_t csid_debug;
+ struct dentry *dentry;
+ uint64_t csid_debug;
+ uint32_t enable_recovery;
};
/**
@@ -171,6 +173,7 @@ struct cam_ife_hw_mgr_ctx {
* @ife_csid_dev_caps csid device capability stored per core
* @ife_dev_caps ife device capability per core
* @work q work queue for IFE hw manager
+ * @debug_cfg debug configuration
*/
struct cam_ife_hw_mgr {
struct cam_isp_hw_mgr mgr_common;
@@ -179,6 +182,7 @@ struct cam_ife_hw_mgr {
struct cam_soc_reg_map *cdm_reg_map[CAM_IFE_HW_NUM_MAX];
struct mutex ctx_mutex;
+ atomic_t active_ctx_cnt;
struct list_head free_ctx_list;
struct list_head used_ctx_list;
struct cam_ife_hw_mgr_ctx ctx_pool[CAM_CTX_MAX];
@@ -186,8 +190,8 @@ struct cam_ife_hw_mgr {
struct cam_ife_csid_hw_caps ife_csid_dev_caps[
CAM_IFE_CSID_HW_NUM_MAX];
struct cam_vfe_hw_get_hw_cap ife_dev_caps[CAM_IFE_HW_NUM_MAX];
- struct cam_req_mgr_core_workq *workq;
- struct cam_ife_hw_mgr_debug debug_cfg;
+ struct cam_req_mgr_core_workq *workq;
+ struct cam_ife_hw_mgr_debug debug_cfg;
};
/**
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/cam_isp_packet_parser.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/cam_isp_packet_parser.c
index 8514ab3..3606af9 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/cam_isp_packet_parser.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/cam_isp_packet_parser.c
@@ -13,6 +13,7 @@
#include <uapi/media/cam_defs.h>
#include <uapi/media/cam_isp.h>
#include "cam_mem_mgr.h"
+#include "cam_isp_hw.h"
#include "cam_vfe_hw_intf.h"
#include "cam_isp_packet_parser.h"
#include "cam_debug_util.h"
@@ -26,7 +27,7 @@ int cam_isp_add_change_base(
int rc = -EINVAL;
struct cam_ife_hw_mgr_res *hw_mgr_res;
struct cam_isp_resource_node *res;
- struct cam_isp_hw_get_cdm_args get_base;
+ struct cam_isp_hw_get_cmd_update get_base;
struct cam_hw_update_entry *hw_entry;
uint32_t num_ent, i;
@@ -53,24 +54,25 @@ int cam_isp_add_change_base(
continue;
get_base.res = res;
- get_base.cmd_buf_addr = kmd_buf_info->cpu_addr +
+ get_base.cmd_type = CAM_ISP_HW_CMD_GET_CHANGE_BASE;
+ get_base.cmd.cmd_buf_addr = kmd_buf_info->cpu_addr +
kmd_buf_info->used_bytes/4;
- get_base.size = kmd_buf_info->size -
+ get_base.cmd.size = kmd_buf_info->size -
kmd_buf_info->used_bytes;
rc = res->hw_intf->hw_ops.process_cmd(
res->hw_intf->hw_priv,
- CAM_VFE_HW_CMD_GET_CHANGE_BASE, &get_base,
- sizeof(struct cam_isp_hw_get_cdm_args));
+ CAM_ISP_HW_CMD_GET_CHANGE_BASE, &get_base,
+ sizeof(struct cam_isp_hw_get_cmd_update));
if (rc)
return rc;
hw_entry[num_ent].handle = kmd_buf_info->handle;
- hw_entry[num_ent].len = get_base.used_bytes;
+ hw_entry[num_ent].len = get_base.cmd.used_bytes;
hw_entry[num_ent].offset = kmd_buf_info->offset;
- kmd_buf_info->used_bytes += get_base.used_bytes;
- kmd_buf_info->offset += get_base.used_bytes;
+ kmd_buf_info->used_bytes += get_base.cmd.used_bytes;
+ kmd_buf_info->offset += get_base.cmd.used_bytes;
num_ent++;
prepare->num_hw_update_entries = num_ent;
@@ -95,6 +97,8 @@ static int cam_isp_update_dual_config(
struct cam_ife_hw_mgr_res *hw_mgr_res;
struct cam_isp_resource_node *res;
struct cam_isp_hw_dual_isp_update_args dual_isp_update_args;
+ uint32_t outport_id;
+ uint32_t ports_plane_idx;
size_t len = 0;
uint32_t *cpu_addr;
uint32_t i, j;
@@ -111,6 +115,14 @@ static int cam_isp_update_dual_config(
dual_config = (struct cam_isp_dual_config *)cpu_addr;
for (i = 0; i < dual_config->num_ports; i++) {
+
+ if (i >= CAM_ISP_IFE_OUT_RES_MAX) {
+ CAM_ERR(CAM_UTIL,
+ "failed update for i:%d > size_isp_out:%d",
+ i, size_isp_out);
+ return -EINVAL;
+ }
+
hw_mgr_res = &res_list_isp_out[i];
for (j = 0; j < CAM_ISP_HW_SPLIT_MAX; j++) {
if (!hw_mgr_res->hw_res[j])
@@ -120,12 +132,26 @@ static int cam_isp_update_dual_config(
continue;
res = hw_mgr_res->hw_res[j];
+
+ if (res->res_id < CAM_ISP_IFE_OUT_RES_BASE ||
+ res->res_id >= CAM_ISP_IFE_OUT_RES_MAX)
+ continue;
+
+ outport_id = res->res_id & 0xFF;
+
+ ports_plane_idx = (j * (dual_config->num_ports *
+ CAM_PACKET_MAX_PLANES)) +
+ (outport_id * CAM_PACKET_MAX_PLANES);
+
+ if (dual_config->stripes[ports_plane_idx].port_id == 0)
+ continue;
+
dual_isp_update_args.split_id = j;
dual_isp_update_args.res = res;
dual_isp_update_args.dual_cfg = dual_config;
rc = res->hw_intf->hw_ops.process_cmd(
res->hw_intf->hw_priv,
- CAM_VFE_HW_CMD_STRIPE_UPDATE,
+ CAM_ISP_HW_CMD_STRIPE_UPDATE,
&dual_isp_update_args,
sizeof(struct cam_isp_hw_dual_isp_update_args));
if (rc)
@@ -136,20 +162,83 @@ static int cam_isp_update_dual_config(
return rc;
}
+int cam_isp_add_cmd_buf_update(
+ struct cam_ife_hw_mgr_res *hw_mgr_res,
+ uint32_t cmd_type,
+ uint32_t hw_cmd_type,
+ uint32_t base_idx,
+ uint32_t *cmd_buf_addr,
+ uint32_t kmd_buf_remain_size,
+ void *cmd_update_data,
+ uint32_t *bytes_used)
+{
+ int rc = 0;
+ struct cam_isp_resource_node *res;
+ struct cam_isp_hw_get_cmd_update cmd_update;
+ uint32_t i;
+ uint32_t total_used_bytes = 0;
+
+ if (hw_mgr_res->res_type == CAM_IFE_HW_MGR_RES_UNINIT) {
+ CAM_ERR(CAM_ISP, "io res id:%d not valid",
+ hw_mgr_res->res_type);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) {
+ if (!hw_mgr_res->hw_res[i])
+ continue;
+
+ if (hw_mgr_res->hw_res[i]->hw_intf->hw_idx != base_idx)
+ continue;
+
+ res = hw_mgr_res->hw_res[i];
+ cmd_update.res = res;
+ cmd_update.cmd_type = hw_cmd_type;
+ cmd_update.cmd.cmd_buf_addr = cmd_buf_addr;
+ cmd_update.cmd.size = kmd_buf_remain_size;
+ cmd_update.data = cmd_update_data;
+
+ CAM_DBG(CAM_ISP, "cmd buffer 0x%pK, size %d",
+ cmd_update.cmd.cmd_buf_addr,
+ cmd_update.cmd.size);
+ rc = res->hw_intf->hw_ops.process_cmd(
+ res->hw_intf->hw_priv,
+ cmd_update.cmd_type, &cmd_update,
+ sizeof(struct cam_isp_hw_get_cmd_update));
+
+ if (rc) {
+ CAM_ERR(CAM_ISP, "get buf cmd error:%d",
+ res->res_id);
+ rc = -ENOMEM;
+ return rc;
+ }
+
+ total_used_bytes += cmd_update.cmd.used_bytes;
+ }
+ *bytes_used = total_used_bytes;
+ CAM_DBG(CAM_ISP, "total_used_bytes %u", total_used_bytes);
+ return rc;
+}
+
int cam_isp_add_command_buffers(
struct cam_hw_prepare_update_args *prepare,
- enum cam_isp_hw_split_id split_id,
- uint32_t base_idx,
+ struct cam_kmd_buf_info *kmd_buf_info,
+ struct ctx_base_info *base_info,
+ cam_packet_generic_blob_handler blob_handler_cb,
struct cam_ife_hw_mgr_res *res_list_isp_out,
uint32_t size_isp_out)
{
int rc = 0;
- uint32_t cmd_meta_data, num_ent, i;
- struct cam_cmd_buf_desc *cmd_desc = NULL;
- struct cam_hw_update_entry *hw_entry;
+ uint32_t cmd_meta_data, num_ent, i;
+ uint32_t base_idx;
+ enum cam_isp_hw_split_id split_id;
+ struct cam_cmd_buf_desc *cmd_desc = NULL;
+ struct cam_hw_update_entry *hw_entry;
hw_entry = prepare->hw_update_entries;
- num_ent = prepare->num_hw_update_entries;
+ split_id = base_info->split_id;
+ base_idx = base_info->idx;
+
/*
* set the cmd_desc to point the first command descriptor in the
* packet
@@ -162,6 +251,7 @@ int cam_isp_add_command_buffers(
split_id, prepare->packet->num_cmd_buf);
for (i = 0; i < prepare->packet->num_cmd_buf; i++) {
+ num_ent = prepare->num_hw_update_entries;
if (!cmd_desc[i].length)
continue;
@@ -232,238 +322,75 @@ int cam_isp_add_command_buffers(
if (rc)
return rc;
break;
- case CAM_ISP_PACKET_META_GENERIC_BLOB:
+ case CAM_ISP_PACKET_META_GENERIC_BLOB_LEFT:
+ if (split_id == CAM_ISP_HW_SPLIT_LEFT) {
+ struct cam_isp_generic_blob_info blob_info;
+
+ prepare->num_hw_update_entries = num_ent;
+ blob_info.prepare = prepare;
+ blob_info.base_info = base_info;
+ blob_info.kmd_buf_info = kmd_buf_info;
+
+ rc = cam_packet_util_process_generic_cmd_buffer(
+ &cmd_desc[i],
+ blob_handler_cb,
+ &blob_info);
+ if (rc) {
+ CAM_ERR(CAM_ISP,
+ "Failed in processing blobs %d",
+ rc);
+ return rc;
+ }
+ num_ent = prepare->num_hw_update_entries;
+ }
+ break;
+ case CAM_ISP_PACKET_META_GENERIC_BLOB_RIGHT:
+ if (split_id == CAM_ISP_HW_SPLIT_RIGHT) {
+ struct cam_isp_generic_blob_info blob_info;
+
+ prepare->num_hw_update_entries = num_ent;
+ blob_info.prepare = prepare;
+ blob_info.base_info = base_info;
+ blob_info.kmd_buf_info = kmd_buf_info;
+
+ rc = cam_packet_util_process_generic_cmd_buffer(
+ &cmd_desc[i],
+ blob_handler_cb,
+ &blob_info);
+ if (rc) {
+ CAM_ERR(CAM_ISP,
+ "Failed in processing blobs %d",
+ rc);
+ return rc;
+ }
+ num_ent = prepare->num_hw_update_entries;
+ }
+ break;
+ case CAM_ISP_PACKET_META_GENERIC_BLOB_COMMON: {
+ struct cam_isp_generic_blob_info blob_info;
+
+ prepare->num_hw_update_entries = num_ent;
+ blob_info.prepare = prepare;
+ blob_info.base_info = base_info;
+ blob_info.kmd_buf_info = kmd_buf_info;
+
+ rc = cam_packet_util_process_generic_cmd_buffer(
+ &cmd_desc[i],
+ blob_handler_cb,
+ &blob_info);
+ if (rc) {
+ CAM_ERR(CAM_ISP,
+ "Failed in processing blobs %d", rc);
+ return rc;
+ }
+ num_ent = prepare->num_hw_update_entries;
+ }
break;
default:
CAM_ERR(CAM_ISP, "invalid cdm command meta data %d",
cmd_meta_data);
return -EINVAL;
}
- }
-
- prepare->num_hw_update_entries = num_ent;
-
- return rc;
-}
-
-static int cam_isp_handle_hfr_config(
- struct cam_isp_generic_blob_info *blob_info,
- struct cam_isp_resource_hfr_config *hfr_config, uint32_t blob_size)
-{
- uint32_t cal_blob_size =
- sizeof(struct cam_isp_resource_hfr_config) +
- (sizeof(struct cam_isp_port_hfr_config) *
- (hfr_config->num_io_configs - 1));
-
- if (cal_blob_size != blob_size) {
- CAM_ERR(CAM_ISP, "Invalid blob size %d %d",
- cal_blob_size, blob_size);
- return -EINVAL;
- }
-
- CAM_DBG(CAM_ISP, "HFR num_io_config = %d", hfr_config->num_io_configs);
-
- if (blob_info->hfr_config) {
- CAM_WARN(CAM_ISP,
- "Ignoring previous hfr_config, prev=%d, curr=%d",
- blob_info->hfr_config->num_io_configs,
- hfr_config->num_io_configs);
- kfree(blob_info->hfr_config);
- }
-
- blob_info->hfr_config = kzalloc(blob_size, GFP_ATOMIC);
- if (!blob_info->hfr_config)
- return -ENOMEM;
-
- memcpy(blob_info->hfr_config, hfr_config, blob_size);
-
- return 0;
-}
-
-static int cam_isp_packet_generic_blob_handler(void *user_data,
- uint32_t blob_type, uint32_t blob_size, uint8_t *blob_data)
-{
- int rc = 0;
-
- if (!blob_data || (blob_size == 0)) {
- CAM_ERR(CAM_ISP, "Invalid blob info %pK %d", blob_data,
- blob_size);
- return -EINVAL;
- }
-
- switch (blob_type) {
- case CAM_ISP_GENERIC_BLOB_TYPE_HFR_CONFIG:
- rc = cam_isp_handle_hfr_config(user_data,
- (struct cam_isp_resource_hfr_config *)blob_data,
- blob_size);
- if (rc)
- CAM_ERR(CAM_ISP, "Failed in handling hfr config %d",
- rc);
-
- break;
- default:
- CAM_WARN(CAM_ISP, "Invalid blob type %d", blob_type);
- break;
- }
-
- return rc;
-}
-
-int cam_isp_process_generic_cmd_buffer(
- struct cam_hw_prepare_update_args *prepare,
- struct cam_isp_generic_blob_info *blob_info)
-{
- int i, rc = 0;
- struct cam_cmd_buf_desc *cmd_desc = NULL;
-
- /*
- * set the cmd_desc to point the first command descriptor in the
- * packet
- */
- cmd_desc = (struct cam_cmd_buf_desc *)
- ((uint8_t *)&prepare->packet->payload +
- prepare->packet->cmd_buf_offset);
-
- for (i = 0; i < prepare->packet->num_cmd_buf; i++) {
- if (!cmd_desc[i].length)
- continue;
-
- if (cmd_desc[i].meta_data != CAM_ISP_PACKET_META_GENERIC_BLOB)
- continue;
-
- rc = cam_packet_util_validate_cmd_desc(&cmd_desc[i]);
- if (rc)
- return rc;
-
- rc = cam_packet_util_process_generic_cmd_buffer(&cmd_desc[i],
- cam_isp_packet_generic_blob_handler, blob_info);
- if (rc)
- CAM_ERR(CAM_ISP, "Failed in processing blobs %d", rc);
-
- break;
- }
-
- return rc;
-}
-
-int cam_isp_add_hfr_config_hw_update(
- struct cam_isp_resource_hfr_config *hfr_config,
- struct cam_hw_prepare_update_args *prepare,
- uint32_t base_idx,
- struct cam_kmd_buf_info *kmd_buf_info,
- struct cam_ife_hw_mgr_res *res_list_isp_out,
- uint32_t size_isp_out)
-{
- int rc = 0;
- struct cam_isp_resource_node *res;
- struct cam_ife_hw_mgr_res *hw_mgr_res;
- struct cam_isp_hw_get_hfr_update update_hfr;
- struct cam_isp_port_hfr_config *io_hfr_config;
- uint32_t kmd_buf_remain_size;
- uint32_t i, j;
- uint32_t res_id_out;
- uint32_t hfr_cfg_used_bytes, num_ent;
-
- hfr_cfg_used_bytes = 0;
-
- /* Max one hw entries required for hfr config update */
- if (prepare->num_hw_update_entries + 1 >=
- prepare->max_hw_update_entries) {
- CAM_ERR(CAM_ISP, "Insufficient HW entries :%d %d",
- prepare->num_hw_update_entries,
- prepare->max_hw_update_entries);
- return -EINVAL;
- }
-
- CAM_DBG(CAM_ISP, "num_io_configs= %d", hfr_config->num_io_configs);
-
- for (i = 0; i < hfr_config->num_io_configs; i++) {
- io_hfr_config = &hfr_config->io_hfr_config[i];
- res_id_out = io_hfr_config->resource_type & 0xFF;
-
- CAM_DBG(CAM_ISP, "hfr config idx %d, type=%d", i, res_id_out);
-
- if (res_id_out >= size_isp_out) {
- CAM_ERR(CAM_ISP, "invalid out restype:%x",
- io_hfr_config->resource_type);
- return -EINVAL;
- }
-
- hw_mgr_res = &res_list_isp_out[res_id_out];
- if (hw_mgr_res->res_type == CAM_IFE_HW_MGR_RES_UNINIT) {
- CAM_ERR(CAM_ISP, "io res id:%d not valid",
- io_hfr_config->resource_type);
- return -EINVAL;
- }
-
- for (j = 0; j < CAM_ISP_HW_SPLIT_MAX; j++) {
- if (!hw_mgr_res->hw_res[j])
- continue;
-
- if (hw_mgr_res->hw_res[j]->hw_intf->hw_idx != base_idx)
- continue;
-
- res = hw_mgr_res->hw_res[j];
- if (res->res_id !=
- io_hfr_config->resource_type) {
- CAM_ERR(CAM_ISP,
- "wm err res id:%d io res id:%d",
- res->res_id,
- io_hfr_config->resource_type);
- return -EINVAL;
- }
-
- if ((kmd_buf_info->used_bytes + hfr_cfg_used_bytes) <
- kmd_buf_info->size) {
- kmd_buf_remain_size = kmd_buf_info->size -
- (kmd_buf_info->used_bytes +
- hfr_cfg_used_bytes);
- } else {
- CAM_ERR(CAM_ISP,
- "no free kmd memory for base %d",
- base_idx);
- rc = -ENOMEM;
- return rc;
- }
-
- update_hfr.cdm.res = res;
- update_hfr.cdm.cmd_buf_addr = kmd_buf_info->cpu_addr +
- kmd_buf_info->used_bytes/4 +
- hfr_cfg_used_bytes/4;
- update_hfr.cdm.size = kmd_buf_remain_size;
- update_hfr.io_hfr_cfg = io_hfr_config;
-
- CAM_DBG(CAM_ISP, "cmd buffer 0x%pK, size %d",
- update_hfr.cdm.cmd_buf_addr,
- update_hfr.cdm.size);
- rc = res->hw_intf->hw_ops.process_cmd(
- res->hw_intf->hw_priv,
- CAM_VFE_HW_CMD_GET_HFR_UPDATE, &update_hfr,
- sizeof(struct cam_isp_hw_get_hfr_update));
-
- if (rc) {
- CAM_ERR(CAM_ISP, "get buf cmd error:%d",
- res->res_id);
- rc = -ENOMEM;
- return rc;
- }
- hfr_cfg_used_bytes += update_hfr.cdm.used_bytes;
- }
- }
-
- CAM_DBG(CAM_ISP, "hfr_cfg_used_bytes %d", hfr_cfg_used_bytes);
- if (hfr_cfg_used_bytes) {
- /* Update the HW entries */
- num_ent = prepare->num_hw_update_entries;
- prepare->hw_update_entries[num_ent].handle =
- kmd_buf_info->handle;
- prepare->hw_update_entries[num_ent].len = hfr_cfg_used_bytes;
- prepare->hw_update_entries[num_ent].offset =
- kmd_buf_info->offset;
- num_ent++;
-
- kmd_buf_info->used_bytes += hfr_cfg_used_bytes;
- kmd_buf_info->offset += hfr_cfg_used_bytes;
prepare->num_hw_update_entries = num_ent;
}
@@ -485,7 +412,8 @@ int cam_isp_add_io_buffers(
struct cam_buf_io_cfg *io_cfg;
struct cam_isp_resource_node *res;
struct cam_ife_hw_mgr_res *hw_mgr_res;
- struct cam_isp_hw_get_buf_update update_buf;
+ struct cam_isp_hw_get_cmd_update update_buf;
+ struct cam_isp_hw_get_wm_update wm_update;
uint32_t kmd_buf_remain_size;
uint32_t i, j, num_out_buf, num_in_buf;
uint32_t res_id_out, res_id_in, plane_id;
@@ -606,7 +534,7 @@ int cam_isp_add_io_buffers(
hdl = io_cfg[i].mem_handle[plane_id];
if (res->process_cmd(res,
- CAM_VFE_HW_CMD_GET_SECURE_MODE,
+ CAM_ISP_HW_CMD_GET_SECURE_MODE,
&mode,
sizeof(bool)))
return -EINVAL;
@@ -670,22 +598,24 @@ int cam_isp_add_io_buffers(
rc = -ENOMEM;
return rc;
}
- update_buf.cdm.res = res;
- update_buf.cdm.cmd_buf_addr = kmd_buf_info->cpu_addr +
+ update_buf.res = res;
+ update_buf.cmd_type = CAM_ISP_HW_CMD_GET_BUF_UPDATE;
+ update_buf.cmd.cmd_buf_addr = kmd_buf_info->cpu_addr +
kmd_buf_info->used_bytes/4 +
io_cfg_used_bytes/4;
- update_buf.cdm.size = kmd_buf_remain_size;
- update_buf.image_buf = io_addr;
- update_buf.num_buf = plane_id;
- update_buf.io_cfg = &io_cfg[i];
+ wm_update.image_buf = io_addr;
+ wm_update.num_buf = plane_id;
+ wm_update.io_cfg = &io_cfg[i];
+ update_buf.cmd.size = kmd_buf_remain_size;
+ update_buf.wm_update = &wm_update;
CAM_DBG(CAM_ISP, "cmd buffer 0x%pK, size %d",
- update_buf.cdm.cmd_buf_addr,
- update_buf.cdm.size);
+ update_buf.cmd.cmd_buf_addr,
+ update_buf.cmd.size);
rc = res->hw_intf->hw_ops.process_cmd(
res->hw_intf->hw_priv,
- CAM_VFE_HW_CMD_GET_BUF_UPDATE, &update_buf,
- sizeof(struct cam_isp_hw_get_buf_update));
+ CAM_ISP_HW_CMD_GET_BUF_UPDATE, &update_buf,
+ sizeof(struct cam_isp_hw_get_cmd_update));
if (rc) {
CAM_ERR(CAM_ISP, "get buf cmd error:%d",
@@ -693,7 +623,7 @@ int cam_isp_add_io_buffers(
rc = -ENOMEM;
return rc;
}
- io_cfg_used_bytes += update_buf.cdm.used_bytes;
+ io_cfg_used_bytes += update_buf.cmd.used_bytes;
}
}
@@ -733,7 +663,7 @@ int cam_isp_add_reg_update(
struct cam_isp_resource_node *res;
struct cam_ife_hw_mgr_res *hw_mgr_res;
struct cam_hw_update_entry *hw_entry;
- struct cam_isp_hw_get_cdm_args get_regup;
+ struct cam_isp_hw_get_cmd_update get_regup;
uint32_t kmd_buf_remain_size, num_ent, i, reg_update_size;
hw_entry = prepare->hw_update_entries;
@@ -773,22 +703,23 @@ int cam_isp_add_reg_update(
return rc;
}
- get_regup.cmd_buf_addr = kmd_buf_info->cpu_addr +
+ get_regup.cmd.cmd_buf_addr = kmd_buf_info->cpu_addr +
kmd_buf_info->used_bytes/4 +
reg_update_size/4;
- get_regup.size = kmd_buf_remain_size;
+ get_regup.cmd.size = kmd_buf_remain_size;
+ get_regup.cmd_type = CAM_ISP_HW_CMD_GET_REG_UPDATE;
get_regup.res = res;
rc = res->hw_intf->hw_ops.process_cmd(
res->hw_intf->hw_priv,
- CAM_VFE_HW_CMD_GET_REG_UPDATE, &get_regup,
- sizeof(struct cam_isp_hw_get_cdm_args));
+ CAM_ISP_HW_CMD_GET_REG_UPDATE, &get_regup,
+ sizeof(struct cam_isp_hw_get_cmd_update));
if (rc)
return rc;
CAM_DBG(CAM_ISP, "Reg update added for res %d hw_id %d",
res->res_id, res->hw_intf->hw_idx);
- reg_update_size += get_regup.used_bytes;
+ reg_update_size += get_regup.cmd.used_bytes;
}
}
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/cam_tasklet_util.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/cam_tasklet_util.c
index 4a7eff8..8863275 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/cam_tasklet_util.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/cam_tasklet_util.c
@@ -261,6 +261,15 @@ void cam_tasklet_deinit(void **tasklet_info)
*tasklet_info = NULL;
}
+static void cam_tasklet_flush(void *tasklet_info)
+{
+ unsigned long data;
+ struct cam_tasklet_info *tasklet = tasklet_info;
+
+ data = (unsigned long)tasklet;
+ cam_tasklet_action(data);
+}
+
int cam_tasklet_start(void *tasklet_info)
{
struct cam_tasklet_info *tasklet = tasklet_info;
@@ -290,6 +299,7 @@ void cam_tasklet_stop(void *tasklet_info)
{
struct cam_tasklet_info *tasklet = tasklet_info;
+ cam_tasklet_flush(tasklet);
atomic_set(&tasklet->tasklet_active, 0);
tasklet_disable(&tasklet->tasklet);
}
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/include/cam_isp_packet_parser.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/include/cam_isp_packet_parser.h
index cce0071..e3f2ce2 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/include/cam_isp_packet_parser.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/include/cam_isp_packet_parser.h
@@ -20,26 +20,32 @@
#include "cam_hw_intf.h"
#include "cam_packet_util.h"
-/**
- * struct cam_isp_generic_blob_info Generic blob information
+/*
+ * struct cam_isp_generic_blob_info
*
- * @hfr_config Initial configuration required to enable HFR
- *
+ * @prepare: Payload for prepare command
+ * @ctx_base_info: Base hardware information for the context
+ * @kmd_buf_info: Kmd buffer to store the custom cmd data
*/
struct cam_isp_generic_blob_info {
- struct cam_isp_resource_hfr_config *hfr_config;
+ struct cam_hw_prepare_update_args *prepare;
+ struct ctx_base_info *base_info;
+ struct cam_kmd_buf_info *kmd_buf_info;
};
-/**
+/*
+ * cam_isp_add_change_base()
+ *
* @brief Add change base in the hw entries list
* processe the isp source list get the change base from
* ISP HW instance
*
- * @prepare: Contain the packet and HW update variables
+ * @prepare: Contain the packet and HW update variables
* @res_list_isp_src: Resource list for IFE/VFE source
* @base_idx: Base or dev index of the IFE/VFE HW instance for
* which change change base need to be added
* @kmd_buf_info: Kmd buffer to store the change base command
+ *
* @return: 0 for success
* -EINVAL for Fail
*/
@@ -49,26 +55,63 @@ int cam_isp_add_change_base(
uint32_t base_idx,
struct cam_kmd_buf_info *kmd_buf_info);
-/**
+/*
+ * cam_isp_add_cmd_buf_update()
+ *
+ * @brief Add command buffer in the HW entries list for given
+ * Blob Data.
+ *
+ * @hw_mgr_res: HW resource to get the update from
+ * @cmd_type: Cmd type to get update for
+ * @hw_cmd_type: HW Cmd type corresponding to cmd_type
+ * @base_idx: Base hardware index
+ * @cmd_buf_addr: Cpu buf addr of kmd scratch buffer
+ * @kmd_buf_remain_size: Remaining size left for cmd buffer update
+ * @cmd_update_data: Data needed by HW to process the cmd and provide
+ * cmd buffer
+ * @bytes_used: Address of the field to be populated with
+ * total bytes used as output to caller
+ *
+ * @return: Negative for Failure
+ * otherwise returns bytes used
+ */
+int cam_isp_add_cmd_buf_update(
+ struct cam_ife_hw_mgr_res *hw_mgr_res,
+ uint32_t cmd_type,
+ uint32_t hw_cmd_type,
+ uint32_t base_idx,
+ uint32_t *cmd_buf_addr,
+ uint32_t kmd_buf_remain_size,
+ void *cmd_update_data,
+ uint32_t *bytes_used);
+
+/*
+ * cam_isp_add_command_buffers()
+ *
* @brief Add command buffer in the HW entries list for given
* left or right VFE/IFE instance.
*
- * @prepare: Contain the packet and HW update variables
- * @split_id: Left or right command buffers to be extracted
- * @base_idx: Base or dev index of the IFE/VFE HW instance
+ * @prepare: Contain the packet and HW update variables
+ * @kmd_buf_info: KMD buffer to store the custom cmd data
+ * @base_info: base hardware information
+ * @blob_handler_cb: Call_back_function for Meta handling
* @res_list_isp_out: IFE /VFE out resource list
* @size_isp_out: Size of the res_list_isp_out array
+ *
* @return: 0 for success
- * -EINVAL for Fail
+ * Negative for Failure
*/
int cam_isp_add_command_buffers(
struct cam_hw_prepare_update_args *prepare,
- enum cam_isp_hw_split_id split_id,
- uint32_t base_idx,
+ struct cam_kmd_buf_info *kmd_buf_info,
+ struct ctx_base_info *base_info,
+ cam_packet_generic_blob_handler blob_handler_cb,
struct cam_ife_hw_mgr_res *res_list_isp_out,
uint32_t size_isp_out);
-/**
+/*
+ * cam_isp_add_io_buffers()
+ *
* @brief Add io buffer configurations in the HW entries list
* processe the io configurations based on the base
* index and update the HW entries list
@@ -96,8 +139,9 @@ int cam_isp_add_io_buffers(
uint32_t size_isp_out,
bool fill_fence);
-
-/**
+/*
+ * cam_isp_add_reg_update()
+ *
* @brief Add reg update in the hw entries list
* processe the isp source list get the reg update from
* ISP HW instance
@@ -115,40 +159,4 @@ int cam_isp_add_reg_update(
uint32_t base_idx,
struct cam_kmd_buf_info *kmd_buf_info);
-/**
- * @brief Add HFR configurations in the HW entries list
- * processe the hfr configurations based on the base
- * index and update the HW entries list
- *
- * @hfr_config: HFR resource configuration info
- * @prepare: Contain the packet and HW update variables
- * @base_idx: Base or dev index of the IFE/VFE HW instance
- * @kmd_buf_info: Kmd buffer to store the change base command
- * @res_list_isp_out: IFE /VFE out resource list
- * @size_isp_out: Size of the res_list_isp_out array
- *
- * @return: 0 for success
- * -EINVAL for Fail
- */
-int cam_isp_add_hfr_config_hw_update(
- struct cam_isp_resource_hfr_config *hfr_config,
- struct cam_hw_prepare_update_args *prepare,
- uint32_t base_idx,
- struct cam_kmd_buf_info *kmd_buf_info,
- struct cam_ife_hw_mgr_res *res_list_isp_out,
- uint32_t size_isp_out);
-
-/**
- * @brief Processing Generic command buffer.
- *
- * @prepare: Contain the packet and HW update variables
- * @blob_info: Information from generic blob command buffer
- *
- * @return: 0 for success
- * -EINVAL for Fail
- */
-int cam_isp_process_generic_cmd_buffer(
- struct cam_hw_prepare_update_args *prepare,
- struct cam_isp_generic_blob_info *blob_info);
-
#endif /*_CAM_ISP_HW_PARSER_H */
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/irq_controller/cam_irq_controller.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/irq_controller/cam_irq_controller.c
index c6d5601..44dc5c4 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/irq_controller/cam_irq_controller.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/irq_controller/cam_irq_controller.c
@@ -239,7 +239,8 @@ int cam_irq_controller_subscribe_irq(void *irq_controller,
int i;
int rc = 0;
uint32_t irq_mask;
- unsigned long flags;
+ unsigned long flags = 0;
+ bool need_lock;
if (!controller || !handler_priv || !evt_bit_mask_arr) {
CAM_ERR(CAM_ISP,
@@ -301,7 +302,9 @@ int cam_irq_controller_subscribe_irq(void *irq_controller,
if (controller->hdl_idx > 0x3FFFFFFF)
controller->hdl_idx = 1;
- write_lock_irqsave(&controller->rw_lock, flags);
+ need_lock = !in_irq();
+ if (need_lock)
+ write_lock_irqsave(&controller->rw_lock, flags);
for (i = 0; i < controller->num_registers; i++) {
controller->irq_register_arr[i].top_half_enable_mask[priority]
|= evt_bit_mask_arr[i];
@@ -313,7 +316,8 @@ int cam_irq_controller_subscribe_irq(void *irq_controller,
cam_io_w_mb(irq_mask, controller->mem_base +
controller->irq_register_arr[i].mask_reg_offset);
}
- write_unlock_irqrestore(&controller->rw_lock, flags);
+ if (need_lock)
+ write_unlock_irqrestore(&controller->rw_lock, flags);
list_add_tail(&evt_handler->list_node,
&controller->evt_handler_list_head);
@@ -334,11 +338,12 @@ int cam_irq_controller_enable_irq(void *irq_controller, uint32_t handle)
struct cam_irq_controller *controller = irq_controller;
struct cam_irq_evt_handler *evt_handler = NULL;
struct cam_irq_evt_handler *evt_handler_temp;
- unsigned long flags;
+ unsigned long flags = 0;
unsigned int i;
uint32_t irq_mask;
uint32_t found = 0;
int rc = -EINVAL;
+ bool need_lock;
if (!controller)
return rc;
@@ -356,7 +361,9 @@ int cam_irq_controller_enable_irq(void *irq_controller, uint32_t handle)
if (!found)
return rc;
- write_lock_irqsave(&controller->rw_lock, flags);
+ need_lock = !in_irq();
+ if (need_lock)
+ write_lock_irqsave(&controller->rw_lock, flags);
for (i = 0; i < controller->num_registers; i++) {
controller->irq_register_arr[i].
top_half_enable_mask[evt_handler->priority] |=
@@ -370,7 +377,8 @@ int cam_irq_controller_enable_irq(void *irq_controller, uint32_t handle)
cam_io_w_mb(irq_mask, controller->mem_base +
controller->irq_register_arr[i].mask_reg_offset);
}
- write_unlock_irqrestore(&controller->rw_lock, flags);
+ if (need_lock)
+ write_unlock_irqrestore(&controller->rw_lock, flags);
return rc;
}
@@ -380,11 +388,12 @@ int cam_irq_controller_disable_irq(void *irq_controller, uint32_t handle)
struct cam_irq_controller *controller = irq_controller;
struct cam_irq_evt_handler *evt_handler = NULL;
struct cam_irq_evt_handler *evt_handler_temp;
- unsigned long flags;
+ unsigned long flags = 0;
unsigned int i;
uint32_t irq_mask;
uint32_t found = 0;
int rc = -EINVAL;
+ bool need_lock;
if (!controller)
return rc;
@@ -402,7 +411,9 @@ int cam_irq_controller_disable_irq(void *irq_controller, uint32_t handle)
if (!found)
return rc;
- write_lock_irqsave(&controller->rw_lock, flags);
+ need_lock = !in_irq();
+ if (need_lock)
+ write_lock_irqsave(&controller->rw_lock, flags);
for (i = 0; i < controller->num_registers; i++) {
controller->irq_register_arr[i].
top_half_enable_mask[evt_handler->priority] &=
@@ -429,7 +440,8 @@ int cam_irq_controller_disable_irq(void *irq_controller, uint32_t handle)
controller->mem_base +
controller->global_clear_offset);
}
- write_unlock_irqrestore(&controller->rw_lock, flags);
+ if (need_lock)
+ write_unlock_irqrestore(&controller->rw_lock, flags);
return rc;
}
@@ -443,8 +455,9 @@ int cam_irq_controller_unsubscribe_irq(void *irq_controller,
uint32_t i;
uint32_t found = 0;
uint32_t irq_mask;
- unsigned long flags;
+ unsigned long flags = 0;
int rc = -EINVAL;
+ bool need_lock;
list_for_each_entry_safe(evt_handler, evt_handler_temp,
&controller->evt_handler_list_head, list_node) {
@@ -458,8 +471,11 @@ int cam_irq_controller_unsubscribe_irq(void *irq_controller,
}
}
+ need_lock = !in_irq();
+
if (found) {
- write_lock_irqsave(&controller->rw_lock, flags);
+ if (need_lock)
+ write_lock_irqsave(&controller->rw_lock, flags);
for (i = 0; i < controller->num_registers; i++) {
controller->irq_register_arr[i].
top_half_enable_mask[evt_handler->priority] &=
@@ -485,7 +501,8 @@ int cam_irq_controller_unsubscribe_irq(void *irq_controller,
controller->mem_base +
controller->global_clear_offset);
}
- write_unlock_irqrestore(&controller->rw_lock, flags);
+ if (need_lock)
+ write_unlock_irqrestore(&controller->rw_lock, flags);
kfree(evt_handler->evt_bit_mask_arr);
kfree(evt_handler);
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.c
index cd92035..9a368cf 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.c
@@ -32,6 +32,15 @@
#define CAM_IFE_CSID_TIMEOUT_SLEEP_US 1000
#define CAM_IFE_CSID_TIMEOUT_ALL_US 1000000
+/*
+ * Constant Factors needed to change QTimer ticks to nanoseconds
+ * QTimer Freq = 19.2 MHz
+ * Time(us) = ticks/19.2
+ * Time(ns) = ticks/19.2 * 1000
+ */
+#define CAM_IFE_CSID_QTIMER_MUL_FACTOR 10000
+#define CAM_IFE_CSID_QTIMER_DIV_FACTOR 192
+
static int cam_ife_csid_is_ipp_format_supported(
uint32_t in_format)
{
@@ -1972,6 +1981,11 @@ static int cam_ife_csid_get_time_stamp(
time_stamp->time_stamp_val |= time_32;
}
+ time_stamp->time_stamp_val = mul_u64_u32_div(
+ time_stamp->time_stamp_val,
+ CAM_IFE_CSID_QTIMER_MUL_FACTOR,
+ CAM_IFE_CSID_QTIMER_DIV_FACTOR);
+
return 0;
}
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.h
index deef41f..07217f5 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.h
@@ -392,6 +392,7 @@ struct cam_ife_csid_cid_data {
* for RDI, set mode to none
* @master_idx: For Slave reservation, Give master IFE instance Index.
* Slave will synchronize with master Start and stop operations
+ * @clk_rate Clock rate
*
*/
struct cam_ife_csid_path_cfg {
@@ -409,6 +410,7 @@ struct cam_ife_csid_path_cfg {
uint32_t height;
enum cam_isp_hw_sync_mode sync_mode;
uint32_t master_idx;
+ uint64_t clk_rate;
};
/**
@@ -432,6 +434,7 @@ struct cam_ife_csid_path_cfg {
* @csid_rdin_reset_complete: rdi n completion
* @csid_debug: csid debug information to enable the SOT, EOT,
* SOF, EOF, measure etc in the csid hw
+ * @clk_rate Clock rate
*
*/
struct cam_ife_csid_hw {
@@ -452,6 +455,7 @@ struct cam_ife_csid_hw {
struct completion csid_ipp_complete;
struct completion csid_rdin_complete[CAM_IFE_CSID_RDI_MAX];
uint64_t csid_debug;
+ uint64_t clk_rate;
};
int cam_ife_csid_hw_probe_init(struct cam_hw_intf *csid_hw_intf,
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_soc.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_soc.c
index c036bca..e11ff63 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_soc.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_soc.c
@@ -143,7 +143,7 @@ int cam_ife_csid_enable_soc_resources(struct cam_hw_soc_info *soc_info)
}
rc = cam_soc_util_enable_platform_resource(soc_info, true,
- CAM_TURBO_VOTE, true);
+ CAM_SVS_VOTE, true);
if (rc) {
CAM_ERR(CAM_ISP, "enable platform failed");
goto stop_cpas;
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_isp_hw.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_isp_hw.h
index 3a0c6a7..257a5ac 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_isp_hw.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_isp_hw.h
@@ -15,6 +15,7 @@
#include <linux/completion.h>
#include "cam_hw.h"
+#include <uapi/media/cam_isp.h>
#include "cam_soc_util.h"
#include "cam_irq_controller.h"
#include <uapi/media/cam_isp.h>
@@ -82,6 +83,18 @@ enum cam_isp_resource_type {
CAM_ISP_RESOURCE_MAX,
};
+enum cam_isp_hw_cmd_type {
+ CAM_ISP_HW_CMD_GET_CHANGE_BASE,
+ CAM_ISP_HW_CMD_GET_BUF_UPDATE,
+ CAM_ISP_HW_CMD_GET_REG_UPDATE,
+ CAM_ISP_HW_CMD_GET_HFR_UPDATE,
+ CAM_ISP_HW_CMD_GET_SECURE_MODE,
+ CAM_ISP_HW_CMD_STRIPE_UPDATE,
+ CAM_ISP_HW_CMD_CLOCK_UPDATE,
+ CAM_ISP_HW_CMD_BW_UPDATE,
+ CAM_ISP_HW_CMD_MAX,
+};
+
/*
* struct cam_isp_resource_node:
*
@@ -99,6 +112,9 @@ enum cam_isp_resource_type {
* @tasklet_info: Tasklet structure that will be used to
* schedule IRQ events related to this resource
* @irq_handle: handle returned on subscribing for IRQ event
+ * @rdi_only_ctx: resouce belong to rdi only context or not
+ * @init: function pointer to init the HW resource
+ * @deinit: function pointer to deinit the HW resource
* @start: function pointer to start the HW resource
* @stop: function pointer to stop the HW resource
* @process_cmd: function pointer for processing commands
@@ -116,7 +132,12 @@ struct cam_isp_resource_node {
void *cdm_ops;
void *tasklet_info;
int irq_handle;
+ int rdi_only_ctx;
+ int (*init)(struct cam_isp_resource_node *rsrc_node,
+ void *init_args, uint32_t arg_size);
+ int (*deinit)(struct cam_isp_resource_node *rsrc_node,
+ void *deinit_args, uint32_t arg_size);
int (*start)(struct cam_isp_resource_node *rsrc_node);
int (*stop)(struct cam_isp_resource_node *rsrc_node);
int (*process_cmd)(struct cam_isp_resource_node *rsrc_node,
@@ -126,54 +147,58 @@ struct cam_isp_resource_node {
};
/*
- * struct cam_isp_hw_get_cdm_args:
+ * struct cam_isp_hw_cmd_buf_update:
*
- * @Brief: Contain the command buffer information
- * to store the CDM commands.
+ * @Brief: Contain the new created command buffer information
*
- * @res: Resource node
* @cmd_buf_addr: Command buffer to store the change base command
* @size: Size of the buffer in bytes
* @used_bytes: Consumed bytes in the command buffer
*
*/
-struct cam_isp_hw_get_cdm_args {
- struct cam_isp_resource_node *res;
+struct cam_isp_hw_cmd_buf_update {
uint32_t *cmd_buf_addr;
uint32_t size;
uint32_t used_bytes;
};
/*
- * struct cam_isp_hw_get_buf_update:
+ * struct cam_isp_hw_get_wm_update:
*
- * @Brief: Get cdm commands for buffer updates.
+ * @Brief: Get cmd buffer for WM updates.
*
- * @ cdm: Command buffer information
* @ image_buf: image buffer address array
* @ num_buf: Number of buffers in the image_buf array
* @ io_cfg: IO buffer config information sent from UMD
*
*/
-struct cam_isp_hw_get_buf_update {
- struct cam_isp_hw_get_cdm_args cdm;
+struct cam_isp_hw_get_wm_update {
uint64_t *image_buf;
uint32_t num_buf;
struct cam_buf_io_cfg *io_cfg;
};
/*
- * struct cam_isp_hw_get_hfr_update:
+ * struct cam_isp_hw_get_cmd_update:
*
- * @Brief: Get cdm commands for HFR updates.
+ * @Brief: Get cmd buffer update for different CMD types
*
- * @ cdm: Command buffer information
- * @ io_hfr_cfg: IO buffer config information sent from UMD
+ * @res: Resource node
+ * @cmd_type: Command type for which to get update
+ * @cmd: Command buffer information
*
*/
-struct cam_isp_hw_get_hfr_update {
- struct cam_isp_hw_get_cdm_args cdm;
- struct cam_isp_port_hfr_config *io_hfr_cfg;
+struct cam_isp_hw_get_cmd_update {
+ struct cam_isp_resource_node *res;
+ enum cam_isp_hw_cmd_type cmd_type;
+ struct cam_isp_hw_cmd_buf_update cmd;
+ union {
+ void *data;
+ struct cam_isp_hw_get_wm_update *wm_update;
+ struct cam_isp_port_hfr_config *hfr_update;
+ struct cam_isp_clock_config *clock_update;
+ struct cam_isp_bw_config *bw_update;
+ };
};
/*
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_vfe_hw_intf.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_vfe_hw_intf.h
index 96263de..b771ec6 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_vfe_hw_intf.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_vfe_hw_intf.h
@@ -44,16 +44,6 @@ enum cam_isp_hw_vfe_core {
CAM_ISP_HW_VFE_CORE_MAX,
};
-enum cam_vfe_hw_cmd_type {
- CAM_VFE_HW_CMD_GET_CHANGE_BASE,
- CAM_VFE_HW_CMD_GET_BUF_UPDATE,
- CAM_VFE_HW_CMD_GET_REG_UPDATE,
- CAM_VFE_HW_CMD_GET_HFR_UPDATE,
- CAM_VFE_HW_CMD_GET_SECURE_MODE,
- CAM_VFE_HW_CMD_STRIPE_UPDATE,
- CAM_VFE_HW_CMD_MAX,
-};
-
enum cam_vfe_hw_irq_status {
CAM_VFE_IRQ_STATUS_ERR_COMP = -3,
CAM_VFE_IRQ_STATUS_COMP_OWRT = -2,
@@ -80,6 +70,12 @@ enum cam_vfe_bus_irq_regs {
CAM_IFE_BUS_IRQ_REGISTERS_MAX,
};
+enum cam_vfe_reset_type {
+ CAM_VFE_HW_RESET_HW_AND_REG,
+ CAM_VFE_HW_RESET_HW,
+ CAM_VFE_HW_RESET_MAX,
+};
+
/*
* struct cam_vfe_hw_get_hw_cap:
*
@@ -165,6 +161,31 @@ struct cam_vfe_acquire_args {
};
/*
+ * struct cam_vfe_clock_update_args:
+ *
+ * @node_res: Resource to get the time stamp
+ * @clk_rate: Clock rate requested
+ */
+struct cam_vfe_clock_update_args {
+ struct cam_isp_resource_node *node_res;
+ uint64_t clk_rate;
+};
+
+/*
+ * struct cam_vfe_bw_update_args:
+ *
+ * @node_res: Resource to get the time stamp
+ * @camnoc_bw_bytes: Bandwidth vote request for CAMNOC
+ * @external_bw_bytes: Bandwidth vote request from CAMNOC
+ * out to the rest of the path-to-DDR
+ */
+struct cam_vfe_bw_update_args {
+ struct cam_isp_resource_node *node_res;
+ uint64_t camnoc_bw_bytes;
+ uint64_t external_bw_bytes;
+};
+
+/*
* struct cam_vfe_top_irq_evt_payload:
*
* @Brief: This structure is used to save payload for IRQ
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_core.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_core.c
index 8e83cb0..187aeaf 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_core.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_core.c
@@ -25,7 +25,6 @@
#include "cam_debug_util.h"
static const char drv_name[] = "vfe";
-
static uint32_t irq_reg_offset[CAM_IFE_IRQ_REGISTERS_MAX] = {
0x0000006C,
0x00000070,
@@ -34,6 +33,11 @@ static uint32_t irq_reg_offset[CAM_IFE_IRQ_REGISTERS_MAX] = {
static uint32_t camif_irq_reg_mask[CAM_IFE_IRQ_REGISTERS_MAX] = {
0x0003FD1F,
+ 0x00000000,
+};
+
+static uint32_t camif_irq_err_reg_mask[CAM_IFE_IRQ_REGISTERS_MAX] = {
+ 0x00000000,
0x0FFF7EBC,
};
@@ -83,6 +87,7 @@ int cam_vfe_put_evt_payload(void *core_info,
}
spin_lock_irqsave(&vfe_core_info->spin_lock, flags);
+ (*evt_payload)->error_type = 0;
list_add_tail(&(*evt_payload)->list, &vfe_core_info->free_payload_list);
spin_unlock_irqrestore(&vfe_core_info->spin_lock, flags);
@@ -125,9 +130,6 @@ int cam_vfe_reset_irq_top_half(uint32_t evt_id,
CAM_DBG(CAM_ISP, "IRQ status_0 = 0x%x", th_payload->evt_status_arr[0]);
if (th_payload->evt_status_arr[0] & (1<<31)) {
- CAM_DBG(CAM_ISP, "Calling Complete for RESET CMD");
- complete(handler_priv->reset_complete);
-
/*
* Clear All IRQs to avoid spurious IRQs immediately
* after Reset Done.
@@ -135,6 +137,9 @@ int cam_vfe_reset_irq_top_half(uint32_t evt_id,
cam_io_w(0xFFFFFFFF, handler_priv->mem_base + 0x64);
cam_io_w(0xFFFFFFFF, handler_priv->mem_base + 0x68);
cam_io_w(0x1, handler_priv->mem_base + 0x58);
+ CAM_DBG(CAM_ISP, "Calling Complete for RESET CMD");
+ complete(handler_priv->reset_complete);
+
rc = 0;
}
@@ -143,12 +148,69 @@ int cam_vfe_reset_irq_top_half(uint32_t evt_id,
return rc;
}
+static int cam_vfe_irq_err_top_half(uint32_t evt_id,
+ struct cam_irq_th_payload *th_payload)
+{
+ int32_t rc;
+ int i;
+ struct cam_vfe_irq_handler_priv *handler_priv;
+ struct cam_vfe_top_irq_evt_payload *evt_payload;
+ struct cam_vfe_hw_core_info *core_info;
+
+ CAM_DBG(CAM_ISP, "IRQ status_0 = %x, IRQ status_1 = %x",
+ th_payload->evt_status_arr[0], th_payload->evt_status_arr[1]);
+
+ handler_priv = th_payload->handler_priv;
+ core_info = handler_priv->core_info;
+ /*
+ * need to handle overflow condition here, otherwise irq storm
+ * will block everything
+ */
+
+ if (th_payload->evt_status_arr[1]) {
+ CAM_ERR(CAM_ISP, "IRQ status_1: %x, Masking all interrupts",
+ th_payload->evt_status_arr[1]);
+ cam_irq_controller_disable_irq(core_info->vfe_irq_controller,
+ core_info->irq_err_handle);
+ }
+
+ rc = cam_vfe_get_evt_payload(handler_priv->core_info, &evt_payload);
+ if (rc) {
+ CAM_ERR_RATE_LIMIT(CAM_ISP,
+ "No tasklet_cmd is free in queue\n");
+ return rc;
+ }
+
+ cam_isp_hw_get_timestamp(&evt_payload->ts);
+
+ evt_payload->core_index = handler_priv->core_index;
+ evt_payload->core_info = handler_priv->core_info;
+ evt_payload->evt_id = evt_id;
+
+ for (i = 0; i < th_payload->num_registers; i++)
+ evt_payload->irq_reg_val[i] = th_payload->evt_status_arr[i];
+
+ for (; i < CAM_IFE_IRQ_REGISTERS_MAX; i++) {
+ evt_payload->irq_reg_val[i] = cam_io_r(handler_priv->mem_base +
+ irq_reg_offset[i]);
+ }
+
+ CAM_DBG(CAM_ISP, "Violation status = %x", evt_payload->irq_reg_val[2]);
+
+ th_payload->evt_payload_priv = evt_payload;
+
+ return rc;
+}
+
int cam_vfe_init_hw(void *hw_priv, void *init_hw_args, uint32_t arg_size)
{
struct cam_hw_info *vfe_hw = hw_priv;
struct cam_hw_soc_info *soc_info = NULL;
struct cam_vfe_hw_core_info *core_info = NULL;
+ struct cam_isp_resource_node *isp_res = NULL;
int rc = 0;
+ uint32_t reset_core_args =
+ CAM_VFE_HW_RESET_HW_AND_REG;
CAM_DBG(CAM_ISP, "Enter");
if (!hw_priv) {
@@ -177,23 +239,37 @@ int cam_vfe_init_hw(void *hw_priv, void *init_hw_args, uint32_t arg_size)
goto decrement_open_cnt;
}
+ isp_res = (struct cam_isp_resource_node *)init_hw_args;
+ if (isp_res && isp_res->init) {
+ rc = isp_res->init(isp_res, NULL, 0);
+ if (rc) {
+ CAM_ERR(CAM_ISP, "init Failed rc=%d", rc);
+ goto disable_soc;
+ }
+ }
+
CAM_DBG(CAM_ISP, "Enable soc done");
/* Do HW Reset */
- rc = cam_vfe_reset(hw_priv, NULL, 0);
+ rc = cam_vfe_reset(hw_priv, &reset_core_args, sizeof(uint32_t));
if (rc) {
CAM_ERR(CAM_ISP, "Reset Failed rc=%d", rc);
- goto disable_soc;
+ goto deinint_vfe_res;
}
rc = core_info->vfe_bus->hw_ops.init(core_info->vfe_bus->bus_priv,
NULL, 0);
if (rc) {
CAM_ERR(CAM_ISP, "Bus HW init Failed rc=%d", rc);
- goto disable_soc;
+ goto deinint_vfe_res;
}
- return 0;
+ vfe_hw->hw_state = CAM_HW_STATE_POWER_UP;
+ return rc;
+
+deinint_vfe_res:
+ if (isp_res && isp_res->deinit)
+ isp_res->deinit(isp_res, NULL, 0);
disable_soc:
cam_vfe_disable_soc_resources(soc_info);
decrement_open_cnt:
@@ -207,6 +283,8 @@ int cam_vfe_deinit_hw(void *hw_priv, void *deinit_hw_args, uint32_t arg_size)
{
struct cam_hw_info *vfe_hw = hw_priv;
struct cam_hw_soc_info *soc_info = NULL;
+ struct cam_vfe_hw_core_info *core_info = NULL;
+ struct cam_isp_resource_node *isp_res = NULL;
int rc = 0;
CAM_DBG(CAM_ISP, "Enter");
@@ -230,6 +308,19 @@ int cam_vfe_deinit_hw(void *hw_priv, void *deinit_hw_args, uint32_t arg_size)
mutex_unlock(&vfe_hw->hw_mutex);
soc_info = &vfe_hw->soc_info;
+ core_info = (struct cam_vfe_hw_core_info *)vfe_hw->core_info;
+
+ rc = core_info->vfe_bus->hw_ops.deinit(core_info->vfe_bus->bus_priv,
+ NULL, 0);
+ if (rc)
+ CAM_ERR(CAM_ISP, "Bus HW deinit Failed rc=%d", rc);
+
+ isp_res = (struct cam_isp_resource_node *)deinit_hw_args;
+ if (isp_res && isp_res->deinit) {
+ rc = isp_res->deinit(isp_res, NULL, 0);
+ if (rc)
+ CAM_ERR(CAM_ISP, "deinit failed");
+ }
/* Turn OFF Regulators, Clocks and other SOC resources */
CAM_DBG(CAM_ISP, "Disable SOC resource");
@@ -278,7 +369,8 @@ int cam_vfe_reset(void *hw_priv, void *reset_core_args, uint32_t arg_size)
reinit_completion(&vfe_hw->hw_complete);
CAM_DBG(CAM_ISP, "calling RESET");
- core_info->vfe_top->hw_ops.reset(core_info->vfe_top->top_priv, NULL, 0);
+ core_info->vfe_top->hw_ops.reset(core_info->vfe_top->top_priv,
+ reset_core_args, arg_size);
CAM_DBG(CAM_ISP, "waiting for vfe reset complete");
/* Wait for Completion or Timeout of 500ms */
rc = wait_for_completion_timeout(&vfe_hw->hw_complete, 500);
@@ -305,20 +397,37 @@ void cam_isp_hw_get_timestamp(struct cam_isp_timestamp *time_stamp)
time_stamp->mono_time.tv_usec = ts.tv_nsec/1000;
}
-
-int cam_vfe_irq_top_half(uint32_t evt_id,
+static int cam_vfe_irq_top_half(uint32_t evt_id,
struct cam_irq_th_payload *th_payload)
{
int32_t rc;
int i;
struct cam_vfe_irq_handler_priv *handler_priv;
struct cam_vfe_top_irq_evt_payload *evt_payload;
+ struct cam_vfe_hw_core_info *core_info;
handler_priv = th_payload->handler_priv;
CAM_DBG(CAM_ISP, "IRQ status_0 = %x", th_payload->evt_status_arr[0]);
CAM_DBG(CAM_ISP, "IRQ status_1 = %x", th_payload->evt_status_arr[1]);
+ /*
+ * need to handle non-recoverable condition here, otherwise irq storm
+ * will block everything.
+ */
+ if (th_payload->evt_status_arr[0] & 0x3FC00) {
+ CAM_ERR(CAM_ISP,
+ "Encountered Error Irq_status0=0x%x Status1=0x%x",
+ th_payload->evt_status_arr[0],
+ th_payload->evt_status_arr[1]);
+ CAM_ERR(CAM_ISP,
+ "Stopping further IRQ processing from this HW index=%d",
+ handler_priv->core_index);
+ cam_io_w(0, handler_priv->mem_base + 0x60);
+ cam_io_w(0, handler_priv->mem_base + 0x5C);
+ return 0;
+ }
+
rc = cam_vfe_get_evt_payload(handler_priv->core_info, &evt_payload);
if (rc) {
CAM_ERR_RATE_LIMIT(CAM_ISP,
@@ -326,6 +435,7 @@ int cam_vfe_irq_top_half(uint32_t evt_id,
return rc;
}
+ core_info = handler_priv->core_info;
cam_isp_hw_get_timestamp(&evt_payload->ts);
evt_payload->core_index = handler_priv->core_index;
@@ -341,22 +451,6 @@ int cam_vfe_irq_top_half(uint32_t evt_id,
}
CAM_DBG(CAM_ISP, "Violation status = %x", evt_payload->irq_reg_val[2]);
- /*
- * need to handle overflow condition here, otherwise irq storm
- * will block everything.
- */
- if (evt_payload->irq_reg_val[1]) {
- CAM_ERR(CAM_ISP,
- "Encountered Error Irq_status1=0x%x. Stopping further IRQ processing from this HW",
- evt_payload->irq_reg_val[1]);
- CAM_ERR(CAM_ISP, "Violation status = %x",
- evt_payload->irq_reg_val[2]);
- cam_io_w(0, handler_priv->mem_base + 0x60);
- cam_io_w(0, handler_priv->mem_base + 0x5C);
-
- evt_payload->error_type = CAM_ISP_HW_ERROR_OVERFLOW;
- }
-
th_payload->evt_payload_priv = evt_payload;
CAM_DBG(CAM_ISP, "Exit");
@@ -437,7 +531,7 @@ int cam_vfe_start(void *hw_priv, void *start_args, uint32_t arg_size)
struct cam_vfe_hw_core_info *core_info = NULL;
struct cam_hw_info *vfe_hw = hw_priv;
struct cam_isp_resource_node *isp_res;
- int rc = -ENODEV;
+ int rc = 0;
if (!hw_priv || !start_args ||
(arg_size != sizeof(struct cam_isp_resource_node))) {
@@ -447,35 +541,72 @@ int cam_vfe_start(void *hw_priv, void *start_args, uint32_t arg_size)
core_info = (struct cam_vfe_hw_core_info *)vfe_hw->core_info;
isp_res = (struct cam_isp_resource_node *)start_args;
+ core_info->tasklet_info = isp_res->tasklet_info;
mutex_lock(&vfe_hw->hw_mutex);
if (isp_res->res_type == CAM_ISP_RESOURCE_VFE_IN) {
- if (isp_res->res_id == CAM_ISP_HW_VFE_IN_CAMIF)
- isp_res->irq_handle = cam_irq_controller_subscribe_irq(
- core_info->vfe_irq_controller,
- CAM_IRQ_PRIORITY_1,
- camif_irq_reg_mask, &core_info->irq_payload,
- cam_vfe_irq_top_half, cam_ife_mgr_do_tasklet,
- isp_res->tasklet_info, cam_tasklet_enqueue_cmd);
- else
- isp_res->irq_handle = cam_irq_controller_subscribe_irq(
- core_info->vfe_irq_controller,
- CAM_IRQ_PRIORITY_1,
- rdi_irq_reg_mask, &core_info->irq_payload,
- cam_vfe_irq_top_half, cam_ife_mgr_do_tasklet,
- isp_res->tasklet_info, cam_tasklet_enqueue_cmd);
+ if (isp_res->res_id == CAM_ISP_HW_VFE_IN_CAMIF) {
+ isp_res->irq_handle =
+ cam_irq_controller_subscribe_irq(
+ core_info->vfe_irq_controller,
+ CAM_IRQ_PRIORITY_1,
+ camif_irq_reg_mask,
+ &core_info->irq_payload,
+ cam_vfe_irq_top_half,
+ cam_ife_mgr_do_tasklet,
+ isp_res->tasklet_info,
+ cam_tasklet_enqueue_cmd);
+ if (isp_res->irq_handle < 1)
+ rc = -ENOMEM;
+ } else if (isp_res->rdi_only_ctx) {
+ isp_res->irq_handle =
+ cam_irq_controller_subscribe_irq(
+ core_info->vfe_irq_controller,
+ CAM_IRQ_PRIORITY_1,
+ rdi_irq_reg_mask,
+ &core_info->irq_payload,
+ cam_vfe_irq_top_half,
+ cam_ife_mgr_do_tasklet,
+ isp_res->tasklet_info,
+ cam_tasklet_enqueue_cmd);
+ if (isp_res->irq_handle < 1)
+ rc = -ENOMEM;
+ }
- if (isp_res->irq_handle > 0)
+ if (rc == 0) {
rc = core_info->vfe_top->hw_ops.start(
core_info->vfe_top->top_priv, isp_res,
sizeof(struct cam_isp_resource_node));
- else
+ if (rc)
+ CAM_ERR(CAM_ISP, "Start failed. type:%d",
+ isp_res->res_type);
+ } else {
CAM_ERR(CAM_ISP,
"Error! subscribe irq controller failed");
+ }
} else if (isp_res->res_type == CAM_ISP_RESOURCE_VFE_OUT) {
rc = core_info->vfe_bus->hw_ops.start(isp_res, NULL, 0);
} else {
CAM_ERR(CAM_ISP, "Invalid res type:%d", isp_res->res_type);
+ rc = -EFAULT;
+ }
+
+ if (!core_info->irq_err_handle) {
+ core_info->irq_err_handle =
+ cam_irq_controller_subscribe_irq(
+ core_info->vfe_irq_controller,
+ CAM_IRQ_PRIORITY_0,
+ camif_irq_err_reg_mask,
+ &core_info->irq_payload,
+ cam_vfe_irq_err_top_half,
+ cam_ife_mgr_do_tasklet,
+ core_info->tasklet_info,
+ cam_tasklet_enqueue_cmd);
+ if (core_info->irq_err_handle < 1) {
+ CAM_ERR(CAM_ISP, "Error handle subscribe failure");
+ rc = -ENOMEM;
+ core_info->irq_err_handle = 0;
+ }
}
mutex_unlock(&vfe_hw->hw_mutex);
@@ -506,12 +637,20 @@ int cam_vfe_stop(void *hw_priv, void *stop_args, uint32_t arg_size)
rc = core_info->vfe_top->hw_ops.stop(
core_info->vfe_top->top_priv, isp_res,
sizeof(struct cam_isp_resource_node));
+
} else if (isp_res->res_type == CAM_ISP_RESOURCE_VFE_OUT) {
rc = core_info->vfe_bus->hw_ops.stop(isp_res, NULL, 0);
} else {
CAM_ERR(CAM_ISP, "Invalid res type:%d", isp_res->res_type);
}
+ if (core_info->irq_err_handle) {
+ cam_irq_controller_unsubscribe_irq(
+ core_info->vfe_irq_controller,
+ core_info->irq_err_handle);
+ core_info->irq_err_handle = 0;
+ }
+
mutex_unlock(&vfe_hw->hw_mutex);
return rc;
@@ -546,16 +685,17 @@ int cam_vfe_process_cmd(void *hw_priv, uint32_t cmd_type,
hw_info = core_info->vfe_hw_info;
switch (cmd_type) {
- case CAM_VFE_HW_CMD_GET_CHANGE_BASE:
- case CAM_VFE_HW_CMD_GET_REG_UPDATE:
+ case CAM_ISP_HW_CMD_GET_CHANGE_BASE:
+ case CAM_ISP_HW_CMD_GET_REG_UPDATE:
+ case CAM_ISP_HW_CMD_CLOCK_UPDATE:
+ case CAM_ISP_HW_CMD_BW_UPDATE:
rc = core_info->vfe_top->hw_ops.process_cmd(
core_info->vfe_top->top_priv, cmd_type, cmd_args,
arg_size);
-
break;
- case CAM_VFE_HW_CMD_GET_BUF_UPDATE:
- case CAM_VFE_HW_CMD_GET_HFR_UPDATE:
- case CAM_VFE_HW_CMD_STRIPE_UPDATE:
+ case CAM_ISP_HW_CMD_GET_BUF_UPDATE:
+ case CAM_ISP_HW_CMD_GET_HFR_UPDATE:
+ case CAM_ISP_HW_CMD_STRIPE_UPDATE:
rc = core_info->vfe_bus->hw_ops.process_cmd(
core_info->vfe_bus->bus_priv, cmd_type, cmd_args,
arg_size);
@@ -671,4 +811,3 @@ int cam_vfe_core_deinit(struct cam_vfe_hw_core_info *core_info,
return rc;
}
-
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_core.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_core.h
index ee29e1cf..0674a6ad 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_core.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_core.h
@@ -50,12 +50,13 @@ struct cam_vfe_hw_core_info {
void *vfe_irq_controller;
struct cam_vfe_top *vfe_top;
struct cam_vfe_bus *vfe_bus;
-
+ void *tasklet_info;
struct cam_vfe_top_irq_evt_payload evt_payload[CAM_VFE_EVT_MAX];
struct list_head free_payload_list;
struct cam_vfe_irq_handler_priv irq_payload;
uint32_t cpas_handle;
int irq_handle;
+ int irq_err_handle;
spinlock_t spin_lock;
};
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_soc.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_soc.c
index b5ca432..0f93664 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_soc.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_soc.c
@@ -15,6 +15,30 @@
#include "cam_vfe_soc.h"
#include "cam_debug_util.h"
+static bool cam_vfe_cpas_cb(uint32_t client_handle, void *userdata,
+ struct cam_cpas_irq_data *irq_data)
+{
+ bool error_handled = false;
+
+ if (!irq_data)
+ return error_handled;
+
+ switch (irq_data->irq_type) {
+ case CAM_CAMNOC_IRQ_IFE02_UBWC_ENCODE_ERROR:
+ case CAM_CAMNOC_IRQ_IFE13_UBWC_ENCODE_ERROR:
+ CAM_ERR_RATE_LIMIT(CAM_ISP,
+ "IFE UBWC Encode error type=%d status=%x",
+ irq_data->irq_type,
+ irq_data->u.enc_err.encerr_status.value);
+ error_handled = true;
+ break;
+ default:
+ break;
+ }
+
+ return error_handled;
+}
+
static int cam_vfe_get_dt_properties(struct cam_hw_soc_info *soc_info)
{
int rc = 0;
@@ -76,6 +100,12 @@ int cam_vfe_init_soc_resources(struct cam_hw_soc_info *soc_info,
goto free_soc_private;
}
+ rc = cam_soc_util_get_option_clk_by_name(soc_info,
+ CAM_VFE_DSP_CLK_NAME, &soc_private->dsp_clk,
+ &soc_private->dsp_clk_index, &soc_private->dsp_clk_rate);
+ if (rc)
+ CAM_WARN(CAM_ISP, "option clk get failed");
+
rc = cam_vfe_request_platform_resource(soc_info, vfe_irq_handler,
irq_data);
if (rc < 0) {
@@ -89,6 +119,8 @@ int cam_vfe_init_soc_resources(struct cam_hw_soc_info *soc_info,
CAM_HW_IDENTIFIER_LENGTH);
cpas_register_param.cell_index = soc_info->index;
cpas_register_param.dev = soc_info->dev;
+ cpas_register_param.cam_cpas_client_cb = cam_vfe_cpas_cb;
+ cpas_register_param.userdata = soc_info;
rc = cam_cpas_register_client(&cpas_register_param);
if (rc) {
CAM_ERR(CAM_ISP, "CPAS registration failed rc=%d", rc);
@@ -132,6 +164,11 @@ int cam_vfe_deinit_soc_resources(struct cam_hw_soc_info *soc_info)
CAM_ERR(CAM_ISP,
"Error! Release platform resources failed rc=%d", rc);
+ rc = cam_soc_util_clk_put(&soc_private->dsp_clk);
+ if (rc < 0)
+ CAM_ERR(CAM_ISP,
+ "Error Put dsp clk failed rc=%d", rc);
+
kfree(soc_private);
return rc;
@@ -179,6 +216,54 @@ int cam_vfe_enable_soc_resources(struct cam_hw_soc_info *soc_info)
return rc;
}
+int cam_vfe_soc_enable_clk(struct cam_hw_soc_info *soc_info,
+ const char *clk_name)
+{
+ int rc = 0;
+ struct cam_vfe_soc_private *soc_private;
+
+ if (!soc_info) {
+ CAM_ERR(CAM_ISP, "Error Invalid params");
+ rc = -EINVAL;
+ return rc;
+ }
+ soc_private = soc_info->soc_private;
+
+ if (strcmp(clk_name, CAM_VFE_DSP_CLK_NAME) == 0) {
+ rc = cam_soc_util_clk_enable(soc_private->dsp_clk,
+ CAM_VFE_DSP_CLK_NAME, soc_private->dsp_clk_rate);
+ if (rc)
+ CAM_ERR(CAM_ISP,
+ "Error enable dsp clk failed rc=%d", rc);
+ }
+
+ return rc;
+}
+
+int cam_vfe_soc_disable_clk(struct cam_hw_soc_info *soc_info,
+ const char *clk_name)
+{
+ int rc = 0;
+ struct cam_vfe_soc_private *soc_private;
+
+ if (!soc_info) {
+ CAM_ERR(CAM_ISP, "Error Invalid params");
+ rc = -EINVAL;
+ return rc;
+ }
+ soc_private = soc_info->soc_private;
+
+ if (strcmp(clk_name, CAM_VFE_DSP_CLK_NAME) == 0) {
+ rc = cam_soc_util_clk_disable(soc_private->dsp_clk,
+ CAM_VFE_DSP_CLK_NAME);
+ if (rc)
+ CAM_ERR(CAM_ISP,
+ "Error enable dsp clk failed rc=%d", rc);
+ }
+
+ return rc;
+}
+
int cam_vfe_disable_soc_resources(struct cam_hw_soc_info *soc_info)
{
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_soc.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_soc.h
index 094c977..7a4dbea 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_soc.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_soc.h
@@ -16,6 +16,8 @@
#include "cam_soc_util.h"
#include "cam_isp_hw.h"
+#define CAM_VFE_DSP_CLK_NAME "ife_dsp_clk"
+
/*
* struct cam_vfe_soc_private:
*
@@ -26,7 +28,10 @@
* with CPAS.
*/
struct cam_vfe_soc_private {
- uint32_t cpas_handle;
+ uint32_t cpas_handle;
+ struct clk *dsp_clk;
+ int32_t dsp_clk_index;
+ int32_t dsp_clk_rate;
};
/*
@@ -80,4 +85,32 @@ int cam_vfe_enable_soc_resources(struct cam_hw_soc_info *soc_info);
*/
int cam_vfe_disable_soc_resources(struct cam_hw_soc_info *soc_info);
+/*
+ * cam_vfe_soc_enable_clk()
+ *
+ * @brief: Enable clock with given name
+ *
+ * @soc_info: Device soc information
+ * @clk_name: Name of clock to enable
+ *
+ * @Return: 0: Success
+ * Non-zero: Failure
+ */
+int cam_vfe_soc_enable_clk(struct cam_hw_soc_info *soc_info,
+ const char *clk_name);
+
+/*
+ * cam_vfe_soc_disable_dsp_clk()
+ *
+ * @brief: Disable clock with given name
+ *
+ * @soc_info: Device soc information
+ * @clk_name: Name of clock to enable
+ *
+ * @Return: 0: Success
+ * Non-zero: Failure
+ */
+int cam_vfe_soc_disable_clk(struct cam_hw_soc_info *soc_info,
+ const char *clk_name);
+
#endif /* _CAM_VFE_SOC_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe170/cam_vfe170.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe170/cam_vfe170.h
index 5773bbe..a4ba2e1 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe170/cam_vfe170.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe170/cam_vfe170.h
@@ -67,12 +67,18 @@ static struct cam_vfe_camif_reg_data vfe_170_camif_reg_data = {
.extern_reg_update_mask = 1,
.pixel_pattern_shift = 0,
.pixel_pattern_mask = 0x7,
+ .dsp_mode_shift = 23,
+ .dsp_mode_mask = 0x1,
+ .dsp_en_shift = 3,
+ .dsp_en_mask = 0x1,
.reg_update_cmd_data = 0x1,
.epoch_line_cfg = 0x00140014,
.sof_irq_mask = 0x00000001,
.epoch0_irq_mask = 0x00000004,
.reg_update_irq_mask = 0x00000010,
.eof_irq_mask = 0x00000002,
+ .error_irq_mask0 = 0x0003FC00,
+ .error_irq_mask1 = 0x0FFF7E80,
};
struct cam_vfe_top_ver2_reg_offset_module_ctrl lens_170_reg = {
@@ -193,6 +199,7 @@ static struct cam_vfe_bus_ver2_reg_offset_ubwc_client ubwc_regs_client_3 = {
.meta_offset = 0x0000253C,
.meta_stride = 0x00002540,
.mode_cfg = 0x00002544,
+ .bw_limit = 0x000025A0,
};
static struct cam_vfe_bus_ver2_reg_offset_ubwc_client ubwc_regs_client_4 = {
@@ -203,6 +210,7 @@ static struct cam_vfe_bus_ver2_reg_offset_ubwc_client ubwc_regs_client_4 = {
.meta_offset = 0x0000263C,
.meta_stride = 0x00002640,
.mode_cfg = 0x00002644,
+ .bw_limit = 0x000026A0,
};
static struct cam_vfe_bus_ver2_hw_info vfe170_bus_hw_info = {
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.c
index f7c62a1..e94bb62 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.c
@@ -57,6 +57,12 @@ static const char drv_name[] = "vfe_bus";
buf_array[index++] = val; \
} while (0)
+static uint32_t bus_error_irq_mask[3] = {
+ 0x7800,
+ 0x0000,
+ 0x00C0,
+};
+
enum cam_vfe_bus_packer_format {
PACKER_FMT_PLAIN_128 = 0x0,
PACKER_FMT_PLAIN_8 = 0x1,
@@ -197,6 +203,7 @@ struct cam_vfe_bus_ver2_priv {
struct list_head used_comp_grp;
uint32_t irq_handle;
+ uint32_t error_irq_handle;
};
static int cam_vfe_bus_process_cmd(
@@ -209,7 +216,7 @@ static int cam_vfe_bus_get_evt_payload(
{
if (list_empty(&common_data->free_payload_list)) {
*evt_payload = NULL;
- CAM_ERR(CAM_ISP, "No free payload");
+ CAM_ERR_RATE_LIMIT(CAM_ISP, "No free payload");
return -ENODEV;
}
@@ -256,7 +263,7 @@ static int cam_vfe_bus_put_evt_payload(void *core_info,
CAM_ERR(CAM_ISP, "No payload to put");
return -EINVAL;
}
-
+ (*evt_payload)->error_type = 0;
ife_irq_regs = (*evt_payload)->irq_reg_val;
status_reg0 = ife_irq_regs[CAM_IFE_IRQ_BUS_REG_STATUS0];
status_reg1 = ife_irq_regs[CAM_IFE_IRQ_BUS_REG_STATUS1];
@@ -463,6 +470,7 @@ static int cam_vfe_bus_get_num_wm(
case CAM_FORMAT_UBWC_NV12_4R:
case CAM_FORMAT_UBWC_TP10:
case CAM_FORMAT_UBWC_P010:
+ case CAM_FORMAT_PLAIN16_10:
return 2;
default:
break;
@@ -738,10 +746,12 @@ static int cam_vfe_bus_get_wm_idx(
}
static enum cam_vfe_bus_packer_format
- cam_vfe_bus_get_packer_fmt(uint32_t out_fmt)
+ cam_vfe_bus_get_packer_fmt(uint32_t out_fmt, int wm_index)
{
switch (out_fmt) {
case CAM_FORMAT_NV21:
+ if (wm_index == 4 || wm_index == 6)
+ return PACKER_FMT_PLAIN_8_LSB_MSB_10_ODD_EVEN;
case CAM_FORMAT_NV12:
case CAM_FORMAT_UBWC_NV12:
case CAM_FORMAT_UBWC_NV12_4R:
@@ -817,7 +827,8 @@ static int cam_vfe_bus_acquire_wm(
rsrc_data->irq_enabled = subscribe_irq;
rsrc_data->ctx = ctx;
rsrc_data->format = out_port_info->format;
- rsrc_data->pack_fmt = cam_vfe_bus_get_packer_fmt(rsrc_data->format);
+ rsrc_data->pack_fmt = cam_vfe_bus_get_packer_fmt(rsrc_data->format,
+ wm_idx);
rsrc_data->width = out_port_info->width;
rsrc_data->height = out_port_info->height;
@@ -951,6 +962,19 @@ static int cam_vfe_bus_acquire_wm(
return -EINVAL;
}
break;
+ case CAM_FORMAT_PLAIN16_10:
+ switch (plane) {
+ case PLANE_C:
+ rsrc_data->height /= 2;
+ break;
+ case PLANE_Y:
+ break;
+ default:
+ CAM_ERR(CAM_ISP, "Invalid plane %d", plane);
+ return -EINVAL;
+ }
+ rsrc_data->width *= 2;
+ break;
default:
CAM_ERR(CAM_ISP, "Invalid format %d",
rsrc_data->format);
@@ -968,16 +992,30 @@ static int cam_vfe_bus_acquire_wm(
rsrc_data->width = rsrc_data->width * 2;
rsrc_data->stride = rsrc_data->width;
rsrc_data->en_cfg = 0x1;
+
+ /* LSB aligned */
+ rsrc_data->pack_fmt |= 0x10;
} else {
/* Write master 5-6 DS ports, 10 PDAF */
+ uint32_t align_width;
rsrc_data->width = rsrc_data->width * 4;
rsrc_data->height = rsrc_data->height / 2;
rsrc_data->en_cfg = 0x1;
+ CAM_DBG(CAM_ISP, "before width %d", rsrc_data->width);
+ align_width = ALIGNUP(rsrc_data->width, 16);
+ if (align_width != rsrc_data->width) {
+ CAM_WARN(CAM_ISP,
+ "Override width %u with expected %u",
+ rsrc_data->width, align_width);
+ rsrc_data->width = align_width;
+ }
}
*client_done_mask = (1 << wm_idx);
*wm_res = wm_res_local;
+ CAM_DBG(CAM_ISP, "WM %d: processed width %d, processed height %d",
+ rsrc_data->index, rsrc_data->width, rsrc_data->height);
return 0;
}
@@ -1027,8 +1065,6 @@ static int cam_vfe_bus_start_wm(struct cam_isp_resource_node *wm_res)
rsrc_data->common_data;
uint32_t bus_irq_reg_mask[CAM_VFE_BUS_IRQ_MAX] = {0};
- cam_io_w_mb(0, common_data->mem_base + rsrc_data->hw_regs->header_addr);
- cam_io_w_mb(0, common_data->mem_base + rsrc_data->hw_regs->header_cfg);
cam_io_w(0xf, common_data->mem_base + rsrc_data->hw_regs->burst_limit);
cam_io_w_mb(rsrc_data->width,
@@ -1110,6 +1146,8 @@ static int cam_vfe_bus_stop_wm(struct cam_isp_resource_node *wm_res)
common_data->mem_base + common_data->common_reg->sw_reset);
wm_res->res_state = CAM_ISP_RESOURCE_STATE_RESERVED;
+ rsrc_data->init_cfg_done = false;
+ rsrc_data->hfr_cfg_done = false;
return rc;
}
@@ -2117,6 +2155,7 @@ static int cam_vfe_bus_stop_vfe_out(
if (vfe_out->res_state == CAM_ISP_RESOURCE_STATE_AVAILABLE ||
vfe_out->res_state == CAM_ISP_RESOURCE_STATE_RESERVED) {
+ CAM_DBG(CAM_ISP, "vfe_out res_state is %d", vfe_out->res_state);
return rc;
}
@@ -2260,38 +2299,57 @@ static int cam_vfe_bus_ver2_handle_irq(uint32_t evt_id,
bus_priv->common_data.bus_irq_controller);
}
-static int cam_vfe_bus_update_buf(void *priv, void *cmd_args,
+static int cam_vfe_bus_error_irq_top_half(uint32_t evt_id,
+ struct cam_irq_th_payload *th_payload)
+{
+ int i = 0;
+ struct cam_vfe_bus_ver2_priv *bus_priv = th_payload->handler_priv;
+
+ CAM_ERR_RATE_LIMIT(CAM_ISP, "Bus Err IRQ");
+ for (i = 0; i < th_payload->num_registers; i++) {
+ CAM_ERR_RATE_LIMIT(CAM_ISP, "IRQ_Status%d: 0x%x", i,
+ th_payload->evt_status_arr[i]);
+ }
+ cam_irq_controller_disable_irq(bus_priv->common_data.bus_irq_controller,
+ bus_priv->error_irq_handle);
+
+ /* Returning error stops from enqueuing bottom half */
+ return -EFAULT;
+}
+
+static int cam_vfe_bus_update_wm(void *priv, void *cmd_args,
uint32_t arg_size)
{
struct cam_vfe_bus_ver2_priv *bus_priv;
- struct cam_isp_hw_get_buf_update *update_buf;
+ struct cam_isp_hw_get_cmd_update *update_buf;
struct cam_buf_io_cfg *io_cfg;
struct cam_vfe_bus_ver2_vfe_out_data *vfe_out_data = NULL;
struct cam_vfe_bus_ver2_wm_resource_data *wm_data = NULL;
uint32_t *reg_val_pair;
uint32_t i, j, size = 0;
- uint32_t frame_inc = 0;
+ uint32_t frame_inc = 0, ubwc_bw_limit = 0, camera_hw_version, val;
+ int rc = 0;
bus_priv = (struct cam_vfe_bus_ver2_priv *) priv;
- update_buf = (struct cam_isp_hw_get_buf_update *) cmd_args;
+ update_buf = (struct cam_isp_hw_get_cmd_update *) cmd_args;
vfe_out_data = (struct cam_vfe_bus_ver2_vfe_out_data *)
- update_buf->cdm.res->res_priv;
+ update_buf->res->res_priv;
if (!vfe_out_data || !vfe_out_data->cdm_util_ops) {
CAM_ERR(CAM_ISP, "Failed! Invalid data");
return -EINVAL;
}
- if (update_buf->num_buf != vfe_out_data->num_wm) {
+ if (update_buf->wm_update->num_buf != vfe_out_data->num_wm) {
CAM_ERR(CAM_ISP,
"Failed! Invalid number buffers:%d required:%d",
- update_buf->num_buf, vfe_out_data->num_wm);
+ update_buf->wm_update->num_buf, vfe_out_data->num_wm);
return -EINVAL;
}
reg_val_pair = &vfe_out_data->common_data->io_buf_update[0];
- io_cfg = update_buf->io_cfg;
+ io_cfg = update_buf->wm_update->io_cfg;
for (i = 0, j = 0; i < vfe_out_data->num_wm; i++) {
if (j >= (MAX_REG_VAL_PAIR_SIZE - MAX_BUF_UPDATE_REG_NUM * 2)) {
@@ -2308,17 +2366,27 @@ static int cam_vfe_bus_update_buf(void *priv, void *cmd_args,
wm_data->hw_regs->buffer_width_cfg,
wm_data->width);
CAM_DBG(CAM_ISP, "WM %d image width 0x%x",
- wm_data->index, wm_data->width);
+ wm_data->index, reg_val_pair[j-1]);
/* For initial configuration program all bus registers */
- if ((wm_data->stride != io_cfg->planes[i].plane_stride ||
+ val = io_cfg->planes[i].plane_stride;
+ CAM_DBG(CAM_ISP, "before stride %d", val);
+ val = ALIGNUP(val, 16);
+ if (val != io_cfg->planes[i].plane_stride &&
+ val != wm_data->stride)
+ CAM_WARN(CAM_ISP,
+ "Warning stride %u expected %u",
+ io_cfg->planes[i].plane_stride,
+ val);
+
+ if ((wm_data->stride != val ||
!wm_data->init_cfg_done) && (wm_data->index >= 3)) {
CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, j,
wm_data->hw_regs->stride,
io_cfg->planes[i].plane_stride);
- wm_data->stride = io_cfg->planes[i].plane_stride;
+ wm_data->stride = val;
CAM_DBG(CAM_ISP, "WM %d image stride 0x%x",
- wm_data->index, wm_data->stride);
+ wm_data->index, reg_val_pair[j-1]);
}
if (wm_data->framedrop_pattern != io_cfg->framedrop_pattern ||
@@ -2328,8 +2396,7 @@ static int cam_vfe_bus_update_buf(void *priv, void *cmd_args,
io_cfg->framedrop_pattern);
wm_data->framedrop_pattern = io_cfg->framedrop_pattern;
CAM_DBG(CAM_ISP, "WM %d framedrop pattern 0x%x",
- wm_data->index,
- wm_data->framedrop_pattern);
+ wm_data->index, reg_val_pair[j-1]);
}
@@ -2340,8 +2407,7 @@ static int cam_vfe_bus_update_buf(void *priv, void *cmd_args,
io_cfg->framedrop_period);
wm_data->framedrop_period = io_cfg->framedrop_period;
CAM_DBG(CAM_ISP, "WM %d framedrop period 0x%x",
- wm_data->index,
- wm_data->framedrop_period);
+ wm_data->index, reg_val_pair[j-1]);
}
if (wm_data->irq_subsample_period != io_cfg->subsample_period
@@ -2352,8 +2418,7 @@ static int cam_vfe_bus_update_buf(void *priv, void *cmd_args,
wm_data->irq_subsample_period =
io_cfg->subsample_period;
CAM_DBG(CAM_ISP, "WM %d irq subsample period 0x%x",
- wm_data->index,
- wm_data->irq_subsample_period);
+ wm_data->index, reg_val_pair[j-1]);
}
if (wm_data->irq_subsample_pattern != io_cfg->subsample_pattern
@@ -2364,8 +2429,7 @@ static int cam_vfe_bus_update_buf(void *priv, void *cmd_args,
wm_data->irq_subsample_pattern =
io_cfg->subsample_pattern;
CAM_DBG(CAM_ISP, "WM %d irq subsample pattern 0x%x",
- wm_data->index,
- wm_data->irq_subsample_pattern);
+ wm_data->index, reg_val_pair[j-1]);
}
if (wm_data->en_ubwc) {
@@ -2383,7 +2447,7 @@ static int cam_vfe_bus_update_buf(void *priv, void *cmd_args,
wm_data->packer_cfg =
io_cfg->planes[i].packer_config;
CAM_DBG(CAM_ISP, "WM %d packer cfg 0x%x",
- wm_data->index, wm_data->packer_cfg);
+ wm_data->index, reg_val_pair[j-1]);
}
if (wm_data->is_dual) {
@@ -2399,21 +2463,21 @@ static int cam_vfe_bus_update_buf(void *priv, void *cmd_args,
wm_data->tile_cfg =
io_cfg->planes[i].tile_config;
CAM_DBG(CAM_ISP, "WM %d tile cfg 0x%x",
- wm_data->index, wm_data->tile_cfg);
+ wm_data->index, reg_val_pair[j-1]);
}
if (wm_data->is_dual) {
if ((wm_data->h_init != wm_data->offset) ||
!wm_data->init_cfg_done) {
- /*
- * For dual ife h init value need to take from
- * offset.Striping config update offset value
- */
+ /*
+ * For dual ife h init value need to
+ * take from offset. Striping config
+ * update offset value.
+ */
CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair,
j,
wm_data->hw_regs->ubwc_regs->
- h_init,
- wm_data->offset);
+ h_init, wm_data->offset);
wm_data->h_init = wm_data->offset;
}
} else if (wm_data->h_init !=
@@ -2424,7 +2488,7 @@ static int cam_vfe_bus_update_buf(void *priv, void *cmd_args,
io_cfg->planes[i].h_init);
wm_data->h_init = io_cfg->planes[i].h_init;
CAM_DBG(CAM_ISP, "WM %d h_init 0x%x",
- wm_data->index, wm_data->h_init);
+ wm_data->index, reg_val_pair[j-1]);
}
if (wm_data->v_init != io_cfg->planes[i].v_init ||
@@ -2434,7 +2498,7 @@ static int cam_vfe_bus_update_buf(void *priv, void *cmd_args,
io_cfg->planes[i].v_init);
wm_data->v_init = io_cfg->planes[i].v_init;
CAM_DBG(CAM_ISP, "WM %d v_init 0x%x",
- wm_data->index, wm_data->v_init);
+ wm_data->index, reg_val_pair[j-1]);
}
if (wm_data->ubwc_meta_stride !=
@@ -2447,8 +2511,7 @@ static int cam_vfe_bus_update_buf(void *priv, void *cmd_args,
wm_data->ubwc_meta_stride =
io_cfg->planes[i].meta_stride;
CAM_DBG(CAM_ISP, "WM %d meta stride 0x%x",
- wm_data->index,
- wm_data->ubwc_meta_stride);
+ wm_data->index, reg_val_pair[j-1]);
}
if (wm_data->ubwc_mode_cfg !=
@@ -2460,7 +2523,7 @@ static int cam_vfe_bus_update_buf(void *priv, void *cmd_args,
wm_data->ubwc_mode_cfg =
io_cfg->planes[i].mode_config;
CAM_DBG(CAM_ISP, "WM %d ubwc mode cfg 0x%x",
- wm_data->index, wm_data->ubwc_mode_cfg);
+ wm_data->index, reg_val_pair[j-1]);
}
if (wm_data->ubwc_meta_offset !=
@@ -2473,30 +2536,54 @@ static int cam_vfe_bus_update_buf(void *priv, void *cmd_args,
wm_data->ubwc_meta_offset =
io_cfg->planes[i].meta_offset;
CAM_DBG(CAM_ISP, "WM %d ubwc meta offset 0x%x",
- wm_data->index,
- wm_data->ubwc_meta_offset);
+ wm_data->index, reg_val_pair[j-1]);
}
/* UBWC meta address */
CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, j,
wm_data->hw_regs->ubwc_regs->meta_addr,
- update_buf->image_buf[i]);
+ update_buf->wm_update->image_buf[i]);
CAM_DBG(CAM_ISP, "WM %d ubwc meta addr 0x%llx",
- wm_data->index, update_buf->image_buf[i]);
+ wm_data->index,
+ update_buf->wm_update->image_buf[i]);
+
+ /* Enable UBWC bandwidth limit if required */
+ rc = cam_cpas_get_cpas_hw_version(&camera_hw_version);
+ if (camera_hw_version == CAM_CPAS_TITAN_170_V110
+ && !rc) {
+ switch (wm_data->format) {
+ case CAM_FORMAT_UBWC_TP10:
+ ubwc_bw_limit = 0x8 | BIT(0);
+ break;
+ case CAM_FORMAT_UBWC_NV12_4R:
+ ubwc_bw_limit = 0xB | BIT(0);
+ break;
+ default:
+ ubwc_bw_limit = 0;
+ break;
+ }
+ }
+
+ if (ubwc_bw_limit) {
+ CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, j,
+ wm_data->hw_regs->ubwc_regs->bw_limit,
+ ubwc_bw_limit);
+ CAM_DBG(CAM_ISP, "WM %d ubwc bw limit 0x%x",
+ wm_data->index, ubwc_bw_limit);
+ }
}
/* WM Image address */
if (wm_data->en_ubwc)
CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, j,
wm_data->hw_regs->image_addr,
- (update_buf->image_buf[i] +
+ (update_buf->wm_update->image_buf[i] +
io_cfg->planes[i].meta_size));
else
CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, j,
- wm_data->hw_regs->image_addr,
- update_buf->image_buf[i] +
- wm_data->offset);
-
+ wm_data->hw_regs->image_addr,
+ update_buf->wm_update->image_buf[i] +
+ wm_data->offset);
CAM_DBG(CAM_ISP, "WM %d image address 0x%x",
wm_data->index, reg_val_pair[j-1]);
@@ -2505,7 +2592,7 @@ static int cam_vfe_bus_update_buf(void *priv, void *cmd_args,
CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, j,
wm_data->hw_regs->frame_inc, frame_inc);
CAM_DBG(CAM_ISP, "WM %d frame_inc %d",
- wm_data->index, frame_inc);
+ wm_data->index, reg_val_pair[j-1]);
/* enable the WM */
@@ -2521,18 +2608,18 @@ static int cam_vfe_bus_update_buf(void *priv, void *cmd_args,
size = vfe_out_data->cdm_util_ops->cdm_required_size_reg_random(j/2);
/* cdm util returns dwords, need to convert to bytes */
- if ((size * 4) > update_buf->cdm.size) {
+ if ((size * 4) > update_buf->cmd.size) {
CAM_ERR(CAM_ISP,
"Failed! Buf size:%d insufficient, expected size:%d",
- update_buf->cdm.size, size);
+ update_buf->cmd.size, size);
return -ENOMEM;
}
vfe_out_data->cdm_util_ops->cdm_write_regrandom(
- update_buf->cdm.cmd_buf_addr, j/2, reg_val_pair);
+ update_buf->cmd.cmd_buf_addr, j/2, reg_val_pair);
/* cdm util returns dwords, need to convert to bytes */
- update_buf->cdm.used_bytes = size * 4;
+ update_buf->cmd.used_bytes = size * 4;
return 0;
}
@@ -2541,7 +2628,7 @@ static int cam_vfe_bus_update_hfr(void *priv, void *cmd_args,
uint32_t arg_size)
{
struct cam_vfe_bus_ver2_priv *bus_priv;
- struct cam_isp_hw_get_hfr_update *update_hfr;
+ struct cam_isp_hw_get_cmd_update *update_hfr;
struct cam_vfe_bus_ver2_vfe_out_data *vfe_out_data = NULL;
struct cam_vfe_bus_ver2_wm_resource_data *wm_data = NULL;
struct cam_isp_port_hfr_config *hfr_cfg = NULL;
@@ -2549,10 +2636,10 @@ static int cam_vfe_bus_update_hfr(void *priv, void *cmd_args,
uint32_t i, j, size = 0;
bus_priv = (struct cam_vfe_bus_ver2_priv *) priv;
- update_hfr = (struct cam_isp_hw_get_hfr_update *) cmd_args;
+ update_hfr = (struct cam_isp_hw_get_cmd_update *) cmd_args;
vfe_out_data = (struct cam_vfe_bus_ver2_vfe_out_data *)
- update_hfr->cdm.res->res_priv;
+ update_hfr->res->res_priv;
if (!vfe_out_data || !vfe_out_data->cdm_util_ops) {
CAM_ERR(CAM_ISP, "Failed! Invalid data");
@@ -2560,7 +2647,7 @@ static int cam_vfe_bus_update_hfr(void *priv, void *cmd_args,
}
reg_val_pair = &vfe_out_data->common_data->io_buf_update[0];
- hfr_cfg = update_hfr->io_hfr_cfg;
+ hfr_cfg = update_hfr->hfr_update;
for (i = 0, j = 0; i < vfe_out_data->num_wm; i++) {
if (j >= (MAX_REG_VAL_PAIR_SIZE - MAX_BUF_UPDATE_REG_NUM * 2)) {
@@ -2623,18 +2710,18 @@ static int cam_vfe_bus_update_hfr(void *priv, void *cmd_args,
size = vfe_out_data->cdm_util_ops->cdm_required_size_reg_random(j/2);
/* cdm util returns dwords, need to convert to bytes */
- if ((size * 4) > update_hfr->cdm.size) {
+ if ((size * 4) > update_hfr->cmd.size) {
CAM_ERR(CAM_ISP,
"Failed! Buf size:%d insufficient, expected size:%d",
- update_hfr->cdm.size, size);
+ update_hfr->cmd.size, size);
return -ENOMEM;
}
vfe_out_data->cdm_util_ops->cdm_write_regrandom(
- update_hfr->cdm.cmd_buf_addr, j/2, reg_val_pair);
+ update_hfr->cmd.cmd_buf_addr, j/2, reg_val_pair);
/* cdm util returns dwords, need to convert to bytes */
- update_hfr->cdm.used_bytes = size * 4;
+ update_hfr->cmd.used_bytes = size * 4;
return 0;
}
@@ -2723,6 +2810,21 @@ static int cam_vfe_bus_init_hw(void *hw_priv,
return -EFAULT;
}
+ bus_priv->error_irq_handle = cam_irq_controller_subscribe_irq(
+ bus_priv->common_data.bus_irq_controller,
+ CAM_IRQ_PRIORITY_0,
+ bus_error_irq_mask,
+ bus_priv,
+ cam_vfe_bus_error_irq_top_half,
+ NULL,
+ NULL,
+ NULL);
+
+ if (bus_priv->irq_handle <= 0) {
+ CAM_ERR(CAM_ISP, "Failed to subscribe BUS IRQ");
+ return -EFAULT;
+ }
+
/* BUS_WR_INPUT_IF_ADDR_SYNC_FRAME_HEADER */
cam_io_w_mb(0x0, bus_priv->common_data.mem_base +
bus_priv->common_data.common_reg->addr_sync_frame_hdr);
@@ -2747,17 +2849,28 @@ static int cam_vfe_bus_deinit_hw(void *hw_priv,
struct cam_vfe_bus_ver2_priv *bus_priv = hw_priv;
int rc;
- if (!bus_priv || (bus_priv->irq_handle <= 0)) {
+ if (!bus_priv || (bus_priv->irq_handle <= 0) ||
+ (bus_priv->error_irq_handle <= 0)) {
CAM_ERR(CAM_ISP, "Error: Invalid args");
return -EINVAL;
}
rc = cam_irq_controller_unsubscribe_irq(
+ bus_priv->common_data.bus_irq_controller,
+ bus_priv->error_irq_handle);
+ if (rc)
+ CAM_ERR(CAM_ISP, "Failed to unsubscribe error irq rc=%d", rc);
+
+ bus_priv->error_irq_handle = 0;
+
+ rc = cam_irq_controller_unsubscribe_irq(
bus_priv->common_data.vfe_irq_controller,
bus_priv->irq_handle);
if (rc)
CAM_ERR(CAM_ISP, "Failed to unsubscribe irq rc=%d", rc);
+ bus_priv->irq_handle = 0;
+
return rc;
}
@@ -2779,16 +2892,16 @@ static int cam_vfe_bus_process_cmd(
}
switch (cmd_type) {
- case CAM_VFE_HW_CMD_GET_BUF_UPDATE:
- rc = cam_vfe_bus_update_buf(priv, cmd_args, arg_size);
+ case CAM_ISP_HW_CMD_GET_BUF_UPDATE:
+ rc = cam_vfe_bus_update_wm(priv, cmd_args, arg_size);
break;
- case CAM_VFE_HW_CMD_GET_HFR_UPDATE:
+ case CAM_ISP_HW_CMD_GET_HFR_UPDATE:
rc = cam_vfe_bus_update_hfr(priv, cmd_args, arg_size);
break;
- case CAM_VFE_HW_CMD_GET_SECURE_MODE:
+ case CAM_ISP_HW_CMD_GET_SECURE_MODE:
rc = cam_vfe_bus_get_secure_mode(priv, cmd_args, arg_size);
break;
- case CAM_VFE_HW_CMD_STRIPE_UPDATE:
+ case CAM_ISP_HW_CMD_STRIPE_UPDATE:
rc = cam_vfe_bus_update_stripe_cfg(priv, cmd_args, arg_size);
break;
default:
@@ -3008,4 +3121,3 @@ int cam_vfe_bus_ver2_deinit(
return rc;
}
-
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.h
index ed7d5fe..5a12f74 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.h
@@ -98,6 +98,7 @@ struct cam_vfe_bus_ver2_reg_offset_ubwc_client {
uint32_t meta_offset;
uint32_t meta_stride;
uint32_t mode_cfg;
+ uint32_t bw_limit;
};
/*
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/Makefile b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/Makefile
index 0a94746..9a2c12c 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/Makefile
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/Makefile
@@ -1,10 +1,13 @@
ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils/
ccflags-y += -Idrivers/media/platform/msm/camera/cam_cdm/
ccflags-y += -Idrivers/media/platform/msm/camera/cam_core/
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas/include
ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/include
ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/irq_controller
ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw
ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/include
ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw
obj-$(CONFIG_SPECTRA_CAMERA) += cam_vfe_top.o cam_vfe_top_ver2.o cam_vfe_camif_ver2.o cam_vfe_rdi.o
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver2.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver2.c
index e81a9f2..9848454 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver2.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver2.c
@@ -16,6 +16,7 @@
#include "cam_isp_hw_mgr_intf.h"
#include "cam_isp_hw.h"
#include "cam_vfe_hw_intf.h"
+#include "cam_vfe_soc.h"
#include "cam_vfe_top.h"
#include "cam_vfe_top_ver2.h"
#include "cam_vfe_camif_ver2.h"
@@ -28,8 +29,10 @@ struct cam_vfe_mux_camif_data {
struct cam_vfe_camif_ver2_reg *camif_reg;
struct cam_vfe_top_ver2_reg_offset_common *common_reg;
struct cam_vfe_camif_reg_data *reg_data;
+ struct cam_hw_soc_info *soc_info;
enum cam_isp_hw_sync_mode sync_mode;
+ uint32_t dsp_mode;
uint32_t pix_pattern;
uint32_t first_pixel;
uint32_t first_line;
@@ -66,11 +69,11 @@ static int cam_vfe_camif_get_reg_update(
{
uint32_t size = 0;
uint32_t reg_val_pair[2];
- struct cam_isp_hw_get_cdm_args *cdm_args = cmd_args;
+ struct cam_isp_hw_get_cmd_update *cdm_args = cmd_args;
struct cam_cdm_utils_ops *cdm_util_ops = NULL;
struct cam_vfe_mux_camif_data *rsrc_data = NULL;
- if (arg_size != sizeof(struct cam_isp_hw_get_cdm_args)) {
+ if (arg_size != sizeof(struct cam_isp_hw_get_cmd_update)) {
CAM_ERR(CAM_ISP, "Invalid cmd size");
return -EINVAL;
}
@@ -89,9 +92,9 @@ static int cam_vfe_camif_get_reg_update(
size = cdm_util_ops->cdm_required_size_reg_random(1);
/* since cdm returns dwords, we need to convert it into bytes */
- if ((size * 4) > cdm_args->size) {
+ if ((size * 4) > cdm_args->cmd.size) {
CAM_ERR(CAM_ISP, "buf size:%d is not sufficient, expected: %d",
- cdm_args->size, size);
+ cdm_args->cmd.size, size);
return -EINVAL;
}
@@ -101,10 +104,10 @@ static int cam_vfe_camif_get_reg_update(
CAM_DBG(CAM_ISP, "CAMIF reg_update_cmd %x offset %x",
reg_val_pair[1], reg_val_pair[0]);
- cdm_util_ops->cdm_write_regrandom(cdm_args->cmd_buf_addr,
+ cdm_util_ops->cdm_write_regrandom(cdm_args->cmd.cmd_buf_addr,
1, reg_val_pair);
- cdm_args->used_bytes = size * 4;
+ cdm_args->cmd.used_bytes = size * 4;
return 0;
}
@@ -113,8 +116,8 @@ int cam_vfe_camif_ver2_acquire_resource(
struct cam_isp_resource_node *camif_res,
void *acquire_param)
{
- struct cam_vfe_mux_camif_data *camif_data;
- struct cam_vfe_acquire_args *acquire_data;
+ struct cam_vfe_mux_camif_data *camif_data;
+ struct cam_vfe_acquire_args *acquire_data;
int rc = 0;
@@ -128,6 +131,7 @@ int cam_vfe_camif_ver2_acquire_resource(
camif_data->sync_mode = acquire_data->vfe_in.sync_mode;
camif_data->pix_pattern = acquire_data->vfe_in.in_port->test_pattern;
+ camif_data->dsp_mode = acquire_data->vfe_in.in_port->dsp_mode;
camif_data->first_pixel = acquire_data->vfe_in.in_port->left_start;
camif_data->last_pixel = acquire_data->vfe_in.in_port->left_stop;
camif_data->first_line = acquire_data->vfe_in.in_port->line_start;
@@ -136,6 +140,61 @@ int cam_vfe_camif_ver2_acquire_resource(
return rc;
}
+static int cam_vfe_camif_resource_init(
+ struct cam_isp_resource_node *camif_res,
+ void *init_args, uint32_t arg_size)
+{
+ struct cam_vfe_mux_camif_data *camif_data;
+ struct cam_hw_soc_info *soc_info;
+ int rc = 0;
+
+ if (!camif_res) {
+ CAM_ERR(CAM_ISP, "Error Invalid input arguments");
+ return -EINVAL;
+ }
+
+ camif_data = (struct cam_vfe_mux_camif_data *)camif_res->res_priv;
+
+ soc_info = camif_data->soc_info;
+
+ if ((camif_data->dsp_mode >= CAM_ISP_DSP_MODE_ONE_WAY) &&
+ (camif_data->dsp_mode <= CAM_ISP_DSP_MODE_ROUND)) {
+ rc = cam_vfe_soc_enable_clk(soc_info, CAM_VFE_DSP_CLK_NAME);
+ if (rc)
+ CAM_ERR(CAM_ISP, "failed to enable dsp clk");
+ }
+
+ return rc;
+}
+
+static int cam_vfe_camif_resource_deinit(
+ struct cam_isp_resource_node *camif_res,
+ void *init_args, uint32_t arg_size)
+{
+ struct cam_vfe_mux_camif_data *camif_data;
+ struct cam_hw_soc_info *soc_info;
+ int rc = 0;
+
+ if (!camif_res) {
+ CAM_ERR(CAM_ISP, "Error Invalid input arguments");
+ return -EINVAL;
+ }
+
+ camif_data = (struct cam_vfe_mux_camif_data *)camif_res->res_priv;
+
+ soc_info = camif_data->soc_info;
+
+ if ((camif_data->dsp_mode >= CAM_ISP_DSP_MODE_ONE_WAY) &&
+ (camif_data->dsp_mode <= CAM_ISP_DSP_MODE_ROUND)) {
+ rc = cam_vfe_soc_disable_clk(soc_info, CAM_VFE_DSP_CLK_NAME);
+ if (rc)
+ CAM_ERR(CAM_ISP, "failed to disable dsp clk");
+ }
+
+ return rc;
+
+}
+
static int cam_vfe_camif_resource_start(
struct cam_isp_resource_node *camif_res)
{
@@ -161,6 +220,15 @@ static int cam_vfe_camif_resource_start(
if (rsrc_data->sync_mode == CAM_ISP_HW_SYNC_SLAVE)
val |= (1 << rsrc_data->reg_data->extern_reg_update_shift);
+ if ((rsrc_data->dsp_mode >= CAM_ISP_DSP_MODE_ONE_WAY) &&
+ (rsrc_data->dsp_mode <= CAM_ISP_DSP_MODE_ROUND)) {
+ /* DSP mode reg val is CAM_ISP_DSP_MODE - 1 */
+ val |= (((rsrc_data->dsp_mode - 1) &
+ rsrc_data->reg_data->dsp_mode_mask) <<
+ rsrc_data->reg_data->dsp_mode_shift);
+ val |= (0x1 << rsrc_data->reg_data->dsp_en_shift);
+ }
+
cam_io_w_mb(val, rsrc_data->mem_base + rsrc_data->common_reg->core_cfg);
CAM_DBG(CAM_ISP, "hw id:%d core_cfg val:%d", camif_res->hw_intf->hw_idx,
@@ -194,6 +262,7 @@ static int cam_vfe_camif_resource_stop(
struct cam_vfe_mux_camif_data *camif_priv;
struct cam_vfe_camif_ver2_reg *camif_reg;
int rc = 0;
+ uint32_t val = 0;
if (!camif_res) {
CAM_ERR(CAM_ISP, "Error! Invalid input arguments");
@@ -207,6 +276,15 @@ static int cam_vfe_camif_resource_stop(
camif_priv = (struct cam_vfe_mux_camif_data *)camif_res->res_priv;
camif_reg = camif_priv->camif_reg;
+ if ((camif_priv->dsp_mode >= CAM_ISP_DSP_MODE_ONE_WAY) &&
+ (camif_priv->dsp_mode <= CAM_ISP_DSP_MODE_ROUND)) {
+ val = cam_io_r_mb(camif_priv->mem_base +
+ camif_priv->common_reg->core_cfg);
+ val &= (~(1 << camif_priv->reg_data->dsp_en_shift));
+ cam_io_w_mb(val, camif_priv->mem_base +
+ camif_priv->common_reg->core_cfg);
+ }
+
if (camif_res->res_state == CAM_ISP_RESOURCE_STATE_STREAMING)
camif_res->res_state = CAM_ISP_RESOURCE_STATE_RESERVED;
@@ -224,7 +302,7 @@ static int cam_vfe_camif_process_cmd(struct cam_isp_resource_node *rsrc_node,
}
switch (cmd_type) {
- case CAM_VFE_HW_CMD_GET_REG_UPDATE:
+ case CAM_ISP_HW_CMD_GET_REG_UPDATE:
rc = cam_vfe_camif_get_reg_update(rsrc_node, cmd_args,
arg_size);
break;
@@ -251,6 +329,7 @@ static int cam_vfe_camif_handle_irq_bottom_half(void *handler_priv,
struct cam_vfe_mux_camif_data *camif_priv;
struct cam_vfe_top_irq_evt_payload *payload;
uint32_t irq_status0;
+ uint32_t irq_status1;
if (!handler_priv || !evt_payload_priv)
return ret;
@@ -259,6 +338,7 @@ static int cam_vfe_camif_handle_irq_bottom_half(void *handler_priv,
camif_priv = camif_node->res_priv;
payload = evt_payload_priv;
irq_status0 = payload->irq_reg_val[CAM_IFE_IRQ_CAMIF_REG_STATUS0];
+ irq_status1 = payload->irq_reg_val[CAM_IFE_IRQ_CAMIF_REG_STATUS1];
CAM_DBG(CAM_ISP, "event ID:%d", payload->evt_id);
CAM_DBG(CAM_ISP, "irq_status_0 = %x", irq_status0);
@@ -289,6 +369,15 @@ static int cam_vfe_camif_handle_irq_bottom_half(void *handler_priv,
ret = CAM_VFE_IRQ_STATUS_SUCCESS;
}
break;
+ case CAM_ISP_HW_EVENT_ERROR:
+ if (irq_status1 & camif_priv->reg_data->error_irq_mask1) {
+ CAM_DBG(CAM_ISP, "Received ERROR\n");
+ ret = CAM_ISP_HW_ERROR_OVERFLOW;
+ cam_vfe_put_evt_payload(payload->core_info, &payload);
+ } else {
+ ret = CAM_ISP_HW_ERROR_NONE;
+ }
+ break;
default:
break;
}
@@ -315,14 +404,17 @@ int cam_vfe_camif_ver2_init(
camif_node->res_priv = camif_priv;
- camif_priv->mem_base = soc_info->reg_map[VFE_CORE_BASE_IDX].mem_base;
- camif_priv->camif_reg = camif_info->camif_reg;
- camif_priv->common_reg = camif_info->common_reg;
- camif_priv->reg_data = camif_info->reg_data;
- camif_priv->hw_intf = hw_intf;
+ camif_priv->mem_base = soc_info->reg_map[VFE_CORE_BASE_IDX].mem_base;
+ camif_priv->camif_reg = camif_info->camif_reg;
+ camif_priv->common_reg = camif_info->common_reg;
+ camif_priv->reg_data = camif_info->reg_data;
+ camif_priv->hw_intf = hw_intf;
+ camif_priv->soc_info = soc_info;
- camif_node->start = cam_vfe_camif_resource_start;
- camif_node->stop = cam_vfe_camif_resource_stop;
+ camif_node->init = cam_vfe_camif_resource_init;
+ camif_node->deinit = cam_vfe_camif_resource_deinit;
+ camif_node->start = cam_vfe_camif_resource_start;
+ camif_node->stop = cam_vfe_camif_resource_stop;
camif_node->process_cmd = cam_vfe_camif_process_cmd;
camif_node->top_half_handler = cam_vfe_camif_handle_irq_top_half;
camif_node->bottom_half_handler = cam_vfe_camif_handle_irq_bottom_half;
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver2.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver2.h
index 847b7d5..4a73bd7 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver2.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver2.h
@@ -50,12 +50,19 @@ struct cam_vfe_camif_reg_data {
uint32_t pixel_pattern_shift;
uint32_t pixel_pattern_mask;
+ uint32_t dsp_mode_shift;
+ uint32_t dsp_mode_mask;
+ uint32_t dsp_en_shift;
+ uint32_t dsp_en_mask;
+
uint32_t reg_update_cmd_data;
uint32_t epoch_line_cfg;
uint32_t sof_irq_mask;
uint32_t epoch0_irq_mask;
uint32_t reg_update_irq_mask;
uint32_t eof_irq_mask;
+ uint32_t error_irq_mask0;
+ uint32_t error_irq_mask1;
};
struct cam_vfe_camif_ver2_hw_info {
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_rdi.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_rdi.c
index 797873c..28e99f2 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_rdi.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_rdi.c
@@ -35,11 +35,11 @@ static int cam_vfe_rdi_get_reg_update(
{
uint32_t size = 0;
uint32_t reg_val_pair[2];
- struct cam_isp_hw_get_cdm_args *cdm_args = cmd_args;
+ struct cam_isp_hw_get_cmd_update *cdm_args = cmd_args;
struct cam_cdm_utils_ops *cdm_util_ops = NULL;
struct cam_vfe_mux_rdi_data *rsrc_data = NULL;
- if (arg_size != sizeof(struct cam_isp_hw_get_cdm_args)) {
+ if (arg_size != sizeof(struct cam_isp_hw_get_cmd_update)) {
CAM_ERR(CAM_ISP, "Error - Invalid cmd size");
return -EINVAL;
}
@@ -57,10 +57,10 @@ static int cam_vfe_rdi_get_reg_update(
size = cdm_util_ops->cdm_required_size_reg_random(1);
/* since cdm returns dwords, we need to convert it into bytes */
- if ((size * 4) > cdm_args->size) {
+ if ((size * 4) > cdm_args->cmd.size) {
CAM_ERR(CAM_ISP,
"Error - buf size:%d is not sufficient, expected: %d",
- cdm_args->size, size * 4);
+ cdm_args->cmd.size, size * 4);
return -EINVAL;
}
@@ -70,9 +70,9 @@ static int cam_vfe_rdi_get_reg_update(
CAM_DBG(CAM_ISP, "RDI%d reg_update_cmd %x",
rdi_res->res_id - CAM_ISP_HW_VFE_IN_RDI0, reg_val_pair[1]);
- cdm_util_ops->cdm_write_regrandom(cdm_args->cmd_buf_addr,
+ cdm_util_ops->cdm_write_regrandom(cdm_args->cmd.cmd_buf_addr,
1, reg_val_pair);
- cdm_args->used_bytes = size * 4;
+ cdm_args->cmd.used_bytes = size * 4;
return 0;
}
@@ -158,7 +158,7 @@ static int cam_vfe_rdi_process_cmd(struct cam_isp_resource_node *rsrc_node,
}
switch (cmd_type) {
- case CAM_VFE_HW_CMD_GET_REG_UPDATE:
+ case CAM_ISP_HW_CMD_GET_REG_UPDATE:
rc = cam_vfe_rdi_get_reg_update(rsrc_node, cmd_args,
arg_size);
break;
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver2.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver2.c
index f87953d..1b8cdf3 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver2.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver2.c
@@ -17,6 +17,11 @@
#include "cam_vfe_top.h"
#include "cam_vfe_top_ver2.h"
#include "cam_debug_util.h"
+#include "cam_cpas_api.h"
+#include "cam_vfe_soc.h"
+
+#define CAM_VFE_HW_RESET_HW_AND_REG_VAL 0x00003F9F
+#define CAM_VFE_HW_RESET_HW_VAL 0x00003F87
struct cam_vfe_top_ver2_common_data {
struct cam_hw_soc_info *soc_info;
@@ -26,8 +31,11 @@ struct cam_vfe_top_ver2_common_data {
struct cam_vfe_top_ver2_priv {
struct cam_vfe_top_ver2_common_data common_data;
- struct cam_vfe_camif *camif;
struct cam_isp_resource_node mux_rsrc[CAM_VFE_TOP_VER2_MUX_MAX];
+ unsigned long hw_clk_rate;
+ struct cam_axi_vote hw_axi_vote;
+ struct cam_axi_vote req_axi_vote[CAM_VFE_TOP_VER2_MUX_MAX];
+ unsigned long req_clk_rate[CAM_VFE_TOP_VER2_MUX_MAX];
};
static int cam_vfe_top_mux_get_base(struct cam_vfe_top_ver2_priv *top_priv,
@@ -35,10 +43,10 @@ static int cam_vfe_top_mux_get_base(struct cam_vfe_top_ver2_priv *top_priv,
{
uint32_t size = 0;
uint32_t mem_base = 0;
- struct cam_isp_hw_get_cdm_args *cdm_args = cmd_args;
+ struct cam_isp_hw_get_cmd_update *cdm_args = cmd_args;
struct cam_cdm_utils_ops *cdm_util_ops = NULL;
- if (arg_size != sizeof(struct cam_isp_hw_get_cdm_args)) {
+ if (arg_size != sizeof(struct cam_isp_hw_get_cmd_update)) {
CAM_ERR(CAM_ISP, "Error! Invalid cmd size");
return -EINVAL;
}
@@ -59,9 +67,9 @@ static int cam_vfe_top_mux_get_base(struct cam_vfe_top_ver2_priv *top_priv,
size = cdm_util_ops->cdm_required_size_changebase();
/* since cdm returns dwords, we need to convert it into bytes */
- if ((size * 4) > cdm_args->size) {
+ if ((size * 4) > cdm_args->cmd.size) {
CAM_ERR(CAM_ISP, "buf size:%d is not sufficient, expected: %d",
- cdm_args->size, size);
+ cdm_args->cmd.size, size);
return -EINVAL;
}
@@ -70,21 +78,190 @@ static int cam_vfe_top_mux_get_base(struct cam_vfe_top_ver2_priv *top_priv,
CAM_DBG(CAM_ISP, "core %d mem_base 0x%x",
top_priv->common_data.soc_info->index, mem_base);
- cdm_util_ops->cdm_write_changebase(cdm_args->cmd_buf_addr, mem_base);
- cdm_args->used_bytes = (size * 4);
+ cdm_util_ops->cdm_write_changebase(
+ cdm_args->cmd.cmd_buf_addr, mem_base);
+ cdm_args->cmd.used_bytes = (size * 4);
return 0;
}
+static int cam_vfe_top_set_hw_clk_rate(
+ struct cam_vfe_top_ver2_priv *top_priv)
+{
+ struct cam_hw_soc_info *soc_info = NULL;
+ int i, rc = 0;
+ unsigned long max_clk_rate = 0;
+
+ soc_info = top_priv->common_data.soc_info;
+
+ for (i = 0; i < CAM_VFE_TOP_VER2_MUX_MAX; i++) {
+ if (top_priv->req_clk_rate[i] > max_clk_rate)
+ max_clk_rate = top_priv->req_clk_rate[i];
+ }
+ if (max_clk_rate == top_priv->hw_clk_rate)
+ return 0;
+
+ CAM_DBG(CAM_ISP, "VFE: Clock name=%s idx=%d clk=%lld",
+ soc_info->clk_name[soc_info->src_clk_idx],
+ soc_info->src_clk_idx, max_clk_rate);
+
+ rc = cam_soc_util_set_clk_rate(
+ soc_info->clk[soc_info->src_clk_idx],
+ soc_info->clk_name[soc_info->src_clk_idx],
+ max_clk_rate);
+
+ if (!rc)
+ top_priv->hw_clk_rate = max_clk_rate;
+ else
+ CAM_ERR(CAM_ISP, "Set Clock rate failed, rc=%d", rc);
+
+ return rc;
+}
+
+static int cam_vfe_top_set_axi_bw_vote(
+ struct cam_vfe_top_ver2_priv *top_priv)
+{
+ struct cam_axi_vote sum = {0, 0};
+ int i, rc = 0;
+ struct cam_hw_soc_info *soc_info =
+ top_priv->common_data.soc_info;
+ struct cam_vfe_soc_private *soc_private =
+ soc_info->soc_private;
+
+ if (!soc_private) {
+ CAM_ERR(CAM_ISP, "Error soc_private NULL");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < CAM_VFE_TOP_VER2_MUX_MAX; i++) {
+ sum.uncompressed_bw +=
+ top_priv->req_axi_vote[i].uncompressed_bw;
+ sum.compressed_bw +=
+ top_priv->req_axi_vote[i].compressed_bw;
+ }
+
+ CAM_DBG(CAM_ISP, "BW Vote: u=%lld c=%lld",
+ sum.uncompressed_bw,
+ sum.compressed_bw);
+
+ if ((top_priv->hw_axi_vote.uncompressed_bw ==
+ sum.uncompressed_bw) &&
+ (top_priv->hw_axi_vote.compressed_bw ==
+ sum.compressed_bw))
+ return 0;
+
+ rc = cam_cpas_update_axi_vote(
+ soc_private->cpas_handle,
+ &sum);
+ if (!rc) {
+ top_priv->hw_axi_vote.uncompressed_bw = sum.uncompressed_bw;
+ top_priv->hw_axi_vote.compressed_bw = sum.compressed_bw;
+ } else
+ CAM_ERR(CAM_ISP, "BW request failed, rc=%d", rc);
+
+ return rc;
+}
+
+static int cam_vfe_top_clock_update(
+ struct cam_vfe_top_ver2_priv *top_priv,
+ void *cmd_args, uint32_t arg_size)
+{
+ struct cam_vfe_clock_update_args *clk_update = NULL;
+ struct cam_isp_resource_node *res = NULL;
+ struct cam_hw_info *hw_info = NULL;
+ int i, rc = 0;
+
+ clk_update =
+ (struct cam_vfe_clock_update_args *)cmd_args;
+ res = clk_update->node_res;
+
+ if (!res || !res->hw_intf->hw_priv) {
+ CAM_ERR(CAM_ISP, "Invalid input res %pK", res);
+ return -EINVAL;
+ }
+
+ hw_info = res->hw_intf->hw_priv;
+
+ if (res->res_type != CAM_ISP_RESOURCE_VFE_IN ||
+ res->res_id >= CAM_ISP_HW_VFE_IN_MAX) {
+ CAM_ERR(CAM_ISP, "VFE:%d Invalid res_type:%d res id%d",
+ res->hw_intf->hw_idx, res->res_type,
+ res->res_id);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < CAM_VFE_TOP_VER2_MUX_MAX; i++) {
+ if (top_priv->mux_rsrc[i].res_id == res->res_id) {
+ top_priv->req_clk_rate[i] = clk_update->clk_rate;
+ break;
+ }
+ }
+
+ if (hw_info->hw_state != CAM_HW_STATE_POWER_UP) {
+ CAM_DBG(CAM_ISP, "VFE:%d Not ready to set clocks yet :%d",
+ res->hw_intf->hw_idx,
+ hw_info->hw_state);
+ } else
+ rc = cam_vfe_top_set_hw_clk_rate(top_priv);
+
+ return rc;
+}
+
+static int cam_vfe_top_bw_update(
+ struct cam_vfe_top_ver2_priv *top_priv,
+ void *cmd_args, uint32_t arg_size)
+{
+ struct cam_vfe_bw_update_args *bw_update = NULL;
+ struct cam_isp_resource_node *res = NULL;
+ struct cam_hw_info *hw_info = NULL;
+ int rc = 0;
+ int i;
+
+ bw_update = (struct cam_vfe_bw_update_args *)cmd_args;
+ res = bw_update->node_res;
+
+ if (!res || !res->hw_intf->hw_priv)
+ return -EINVAL;
+
+ hw_info = res->hw_intf->hw_priv;
+
+ if (res->res_type != CAM_ISP_RESOURCE_VFE_IN ||
+ res->res_id >= CAM_ISP_HW_VFE_IN_MAX) {
+ CAM_ERR(CAM_ISP, "VFE:%d Invalid res_type:%d res id%d",
+ res->hw_intf->hw_idx, res->res_type,
+ res->res_id);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < CAM_VFE_TOP_VER2_MUX_MAX; i++) {
+ if (top_priv->mux_rsrc[i].res_id == res->res_id) {
+ top_priv->req_axi_vote[i].uncompressed_bw =
+ bw_update->camnoc_bw_bytes;
+ top_priv->req_axi_vote[i].compressed_bw =
+ bw_update->external_bw_bytes;
+ break;
+ }
+ }
+
+ if (hw_info->hw_state != CAM_HW_STATE_POWER_UP) {
+ CAM_DBG(CAM_ISP, "VFE:%d Not ready to set BW yet :%d",
+ res->hw_intf->hw_idx,
+ hw_info->hw_state);
+ } else
+ rc = cam_vfe_top_set_axi_bw_vote(top_priv);
+
+ return rc;
+}
+
static int cam_vfe_top_mux_get_reg_update(
struct cam_vfe_top_ver2_priv *top_priv,
void *cmd_args, uint32_t arg_size)
{
- struct cam_isp_hw_get_cdm_args *cdm_args = cmd_args;
+ struct cam_isp_hw_get_cmd_update *cmd_update = cmd_args;
- if (cdm_args->res->process_cmd)
- return cdm_args->res->process_cmd(cdm_args->res,
- CAM_VFE_HW_CMD_GET_REG_UPDATE, cmd_args, arg_size);
+ if (cmd_update->res->process_cmd)
+ return cmd_update->res->process_cmd(cmd_update->res,
+ CAM_ISP_HW_CMD_GET_REG_UPDATE, cmd_args, arg_size);
return -EINVAL;
}
@@ -107,12 +284,24 @@ int cam_vfe_top_reset(void *device_priv,
struct cam_vfe_top_ver2_priv *top_priv = device_priv;
struct cam_hw_soc_info *soc_info = NULL;
struct cam_vfe_top_ver2_reg_offset_common *reg_common = NULL;
+ uint32_t *reset_reg_args = reset_core_args;
+ uint32_t reset_reg_val;
- if (!top_priv) {
+ if (!top_priv || !reset_reg_args) {
CAM_ERR(CAM_ISP, "Invalid arguments");
return -EINVAL;
}
+ switch (*reset_reg_args) {
+ case CAM_VFE_HW_RESET_HW_AND_REG:
+ reset_reg_val = CAM_VFE_HW_RESET_HW_AND_REG_VAL;
+ break;
+ default:
+ reset_reg_val = CAM_VFE_HW_RESET_HW_VAL;
+ break;
+ }
+
+ CAM_DBG(CAM_ISP, "reset reg value: %x", reset_reg_val);
soc_info = top_priv->common_data.soc_info;
reg_common = top_priv->common_data.common_reg;
@@ -121,7 +310,7 @@ int cam_vfe_top_reset(void *device_priv,
CAM_SOC_GET_REG_MAP_START(soc_info, VFE_CORE_BASE_IDX) + 0x5C);
/* Reset HW */
- cam_io_w_mb(0x00003F9F,
+ cam_io_w_mb(reset_reg_val,
CAM_SOC_GET_REG_MAP_START(soc_info, VFE_CORE_BASE_IDX) +
reg_common->global_reset_cmd);
@@ -214,9 +403,21 @@ int cam_vfe_top_start(void *device_priv,
return -EINVAL;
}
- top_priv = (struct cam_vfe_top_ver2_priv *)device_priv;
+ top_priv = (struct cam_vfe_top_ver2_priv *)device_priv;
mux_res = (struct cam_isp_resource_node *)start_args;
+ rc = cam_vfe_top_set_hw_clk_rate(top_priv);
+ if (rc) {
+ CAM_ERR(CAM_ISP, "set_hw_clk_rate failed, rc=%d", rc);
+ return rc;
+ }
+
+ rc = cam_vfe_top_set_axi_bw_vote(top_priv);
+ if (rc) {
+ CAM_ERR(CAM_ISP, "set_hw_clk_rate failed, rc=%d", rc);
+ return rc;
+ }
+
if (mux_res->start) {
rc = mux_res->start(mux_res);
} else {
@@ -232,7 +433,7 @@ int cam_vfe_top_stop(void *device_priv,
{
struct cam_vfe_top_ver2_priv *top_priv;
struct cam_isp_resource_node *mux_res;
- int rc = 0;
+ int i, rc = 0;
if (!device_priv || !stop_args) {
CAM_ERR(CAM_ISP, "Error! Invalid input arguments");
@@ -251,8 +452,16 @@ int cam_vfe_top_stop(void *device_priv,
rc = -EINVAL;
}
- return rc;
+ if (!rc) {
+ for (i = 0; i < CAM_VFE_TOP_VER2_MUX_MAX; i++) {
+ if (top_priv->mux_rsrc[i].res_id == mux_res->res_id)
+ top_priv->req_clk_rate[i] = 0;
+ top_priv->req_axi_vote[i].compressed_bw = 0;
+ top_priv->req_axi_vote[i].uncompressed_bw = 0;
+ }
+ }
+ return rc;
}
int cam_vfe_top_read(void *device_priv,
@@ -280,13 +489,21 @@ int cam_vfe_top_process_cmd(void *device_priv, uint32_t cmd_type,
top_priv = (struct cam_vfe_top_ver2_priv *)device_priv;
switch (cmd_type) {
- case CAM_VFE_HW_CMD_GET_CHANGE_BASE:
+ case CAM_ISP_HW_CMD_GET_CHANGE_BASE:
rc = cam_vfe_top_mux_get_base(top_priv, cmd_args, arg_size);
break;
- case CAM_VFE_HW_CMD_GET_REG_UPDATE:
+ case CAM_ISP_HW_CMD_GET_REG_UPDATE:
rc = cam_vfe_top_mux_get_reg_update(top_priv, cmd_args,
arg_size);
break;
+ case CAM_ISP_HW_CMD_CLOCK_UPDATE:
+ rc = cam_vfe_top_clock_update(top_priv, cmd_args,
+ arg_size);
+ break;
+ case CAM_ISP_HW_CMD_BW_UPDATE:
+ rc = cam_vfe_top_bw_update(top_priv, cmd_args,
+ arg_size);
+ break;
default:
rc = -EINVAL;
CAM_ERR(CAM_ISP, "Error! Invalid cmd:%d", cmd_type);
@@ -322,12 +539,19 @@ int cam_vfe_top_ver2_init(
goto free_vfe_top;
}
vfe_top->top_priv = top_priv;
+ top_priv->hw_clk_rate = 0;
+ top_priv->hw_axi_vote.compressed_bw = 0;
+ top_priv->hw_axi_vote.uncompressed_bw = 0;
for (i = 0, j = 0; i < CAM_VFE_TOP_VER2_MUX_MAX; i++) {
top_priv->mux_rsrc[i].res_type = CAM_ISP_RESOURCE_VFE_IN;
top_priv->mux_rsrc[i].hw_intf = hw_intf;
top_priv->mux_rsrc[i].res_state =
CAM_ISP_RESOURCE_STATE_AVAILABLE;
+ top_priv->req_clk_rate[i] = 0;
+ top_priv->req_axi_vote[i].compressed_bw = 0;
+ top_priv->req_axi_vote[i].uncompressed_bw = 0;
+
if (ver2_hw_info->mux_type[i] == CAM_VFE_CAMIF_VER_2_0) {
top_priv->mux_rsrc[i].res_id =
CAM_ISP_HW_VFE_IN_CAMIF;
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/include/cam_vfe_top.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/include/cam_vfe_top.h
index dbb211f..81e3b48 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/include/cam_vfe_top.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/include/cam_vfe_top.h
@@ -29,21 +29,6 @@ struct cam_vfe_top {
struct cam_hw_ops hw_ops;
};
-struct cam_vfe_camif {
- void *camif_priv;
- int (*start_resource)(void *priv,
- struct cam_isp_resource_node *camif_res);
- int (*stop_resource)(void *priv,
- struct cam_isp_resource_node *camif_res);
- int (*acquire_resource)(void *priv,
- struct cam_isp_resource_node *camif_res,
- void *acquire_param);
- int (*release_resource)(void *priv,
- struct cam_isp_resource_node *camif_res);
- int (*process_cmd)(void *priv, uint32_t cmd_type, void *cmd_args,
- uint32_t arg_size);
-};
-
int cam_vfe_top_init(uint32_t top_version,
struct cam_hw_soc_info *soc_info,
struct cam_hw_intf *hw_intf,
diff --git a/drivers/media/platform/msm/camera/cam_jpeg/cam_jpeg_context.c b/drivers/media/platform/msm/camera/cam_jpeg/cam_jpeg_context.c
index a299179..6fcd7f6 100644
--- a/drivers/media/platform/msm/camera/cam_jpeg/cam_jpeg_context.c
+++ b/drivers/media/platform/msm/camera/cam_jpeg/cam_jpeg_context.c
@@ -21,6 +21,8 @@
#include "cam_context_utils.h"
#include "cam_debug_util.h"
+static const char jpeg_dev_name[] = "jpeg";
+
static int __cam_jpeg_ctx_acquire_dev_in_available(struct cam_context *ctx,
struct cam_acquire_dev_cmd *cmd)
{
@@ -109,8 +111,8 @@ int cam_jpeg_context_init(struct cam_jpeg_context *ctx,
for (i = 0; i < CAM_CTX_REQ_MAX; i++)
ctx->req_base[i].req_priv = ctx;
- rc = cam_context_init(ctx_base, NULL, hw_intf, ctx->req_base,
- CAM_CTX_REQ_MAX);
+ rc = cam_context_init(ctx_base, jpeg_dev_name, NULL, hw_intf,
+ ctx->req_base, CAM_CTX_REQ_MAX);
if (rc) {
CAM_ERR(CAM_JPEG, "Camera Context Base init failed");
goto err;
diff --git a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/cam_jpeg_hw_mgr.c b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/cam_jpeg_hw_mgr.c
index 35c2717..df95100 100644
--- a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/cam_jpeg_hw_mgr.c
+++ b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/cam_jpeg_hw_mgr.c
@@ -312,8 +312,6 @@ static int cam_jpeg_mgr_process_cmd(void *priv, void *data)
struct cam_jpeg_set_irq_cb irq_cb;
struct cam_jpeg_hw_cfg_req *p_cfg_req = NULL;
struct cam_hw_done_event_data buf_data;
- uint32_t size = 0;
- uint32_t mem_cam_base = 0;
if (!hw_mgr || !task_data) {
CAM_ERR(CAM_JPEG, "Invalid arguments %pK %pK",
@@ -427,35 +425,11 @@ static int cam_jpeg_mgr_process_cmd(void *priv, void *data)
cdm_cmd->cookie = 0;
cdm_cmd->cmd_arrary_count = 0;
- /* if for backward compat */
- if (config_args->hw_update_entries[CAM_JPEG_CHBASE].handle) {
- rc = cam_jpeg_insert_cdm_change_base(config_args,
- ctx_data, hw_mgr);
- if (rc) {
- CAM_ERR(CAM_JPEG, "insert change base failed %d", rc);
- goto end_callcb;
- }
- } else {
- mem_cam_base = hw_mgr->cdm_reg_map[dev_type][0]->
- mem_cam_base;
- size = hw_mgr->cdm_info[dev_type][0].cdm_ops->
- cdm_required_size_changebase();
- hw_mgr->cdm_info[dev_type][0].cdm_ops->
- cdm_write_changebase(ctx_data->cmd_chbase_buf_addr,
- hw_mgr->cdm_reg_map[dev_type][0]->mem_cam_base);
- ctx_data->cdm_cmd_chbase->cmd_arrary_count = 1;
- ctx_data->cdm_cmd_chbase->type =
- CAM_CDM_BL_CMD_TYPE_KERNEL_IOVA;
- ctx_data->cdm_cmd_chbase->flag = false;
- ctx_data->cdm_cmd_chbase->userdata = NULL;
- ctx_data->cdm_cmd_chbase->cookie = 0;
- ctx_data->cdm_cmd_chbase->cmd[0].bl_addr.kernel_iova =
- ctx_data->cmd_chbase_buf_addr;
- ctx_data->cdm_cmd_chbase->cmd[0].offset = 0;
- ctx_data->cdm_cmd_chbase->cmd[0].len = size;
- cam_cdm_submit_bls(hw_mgr->cdm_info[dev_type][0].
- cdm_handle,
- ctx_data->cdm_cmd_chbase);
+ rc = cam_jpeg_insert_cdm_change_base(config_args,
+ ctx_data, hw_mgr);
+ if (rc) {
+ CAM_ERR(CAM_JPEG, "insert change base failed %d", rc);
+ goto end_callcb;
}
CAM_DBG(CAM_JPEG, "num hw up %d", config_args->num_hw_update_entries);
@@ -659,13 +633,10 @@ static int cam_jpeg_mgr_prepare_hw_update(void *hw_mgr_priv,
return -EINVAL;
}
- /* if for backward compat */
- if (packet->kmd_cmd_buf_index != -1) {
- rc = cam_packet_util_validate_packet(packet);
- if (rc) {
- CAM_ERR(CAM_JPEG, "invalid packet %d", rc);
- return rc;
- }
+ rc = cam_packet_util_validate_packet(packet);
+ if (rc) {
+ CAM_ERR(CAM_JPEG, "invalid packet %d", rc);
+ return rc;
}
if ((packet->num_cmd_buf > 5) || !packet->num_patches ||
@@ -715,16 +686,12 @@ static int cam_jpeg_mgr_prepare_hw_update(void *hw_mgr_priv,
i, io_cfg_ptr[i].direction, io_cfg_ptr[i].fence);
}
+
j = prepare_args->num_hw_update_entries;
- /* if-else for backward compat */
- if (packet->kmd_cmd_buf_index != -1) {
- rc = cam_packet_util_get_kmd_buffer(packet, &kmd_buf);
- if (rc) {
- CAM_ERR(CAM_JPEG, "get kmd buf failed %d", rc);
- return rc;
- }
- } else {
- memset(&kmd_buf, 0x0, sizeof(kmd_buf));
+ rc = cam_packet_util_get_kmd_buffer(packet, &kmd_buf);
+ if (rc) {
+ CAM_ERR(CAM_JPEG, "get kmd buf failed %d", rc);
+ return rc;
}
/* fill kmd buf info into 1st hw update entry */
prepare_args->hw_update_entries[j].len =
@@ -859,6 +826,11 @@ static int cam_jpeg_mgr_acquire_hw(void *hw_mgr_priv, void *acquire_hw_args)
ctx_data->jpeg_dev_acquire_info = jpeg_dev_acquire_info;
mutex_unlock(&ctx_data->ctx_mutex);
+ if (ctx_data->jpeg_dev_acquire_info.dev_type >=
+ CAM_JPEG_RES_TYPE_MAX) {
+ rc = -EINVAL;
+ goto acq_cdm_hdl_failed;
+ }
dev_type = ctx_data->jpeg_dev_acquire_info.dev_type;
if (!hw_mgr->cdm_info[dev_type][0].ref_cnt) {
diff --git a/drivers/media/platform/msm/camera/cam_lrme/Makefile b/drivers/media/platform/msm/camera/cam_lrme/Makefile
new file mode 100644
index 0000000..fba4529
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_lrme/Makefile
@@ -0,0 +1,14 @@
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_sync
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_core
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_smmu
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_cdm
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_lrme
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw
+ccflags-y += -Idrivers/media/platform/msm/camera
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas/include/
+
+obj-$(CONFIG_SPECTRA_CAMERA) += lrme_hw_mgr/
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_lrme_dev.o cam_lrme_context.o
diff --git a/drivers/media/platform/msm/camera/cam_lrme/cam_lrme_context.c b/drivers/media/platform/msm/camera/cam_lrme/cam_lrme_context.c
new file mode 100644
index 0000000..0aa5ade
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_lrme/cam_lrme_context.c
@@ -0,0 +1,241 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+
+#include "cam_debug_util.h"
+#include "cam_lrme_context.h"
+
+static int __cam_lrme_ctx_acquire_dev_in_available(struct cam_context *ctx,
+ struct cam_acquire_dev_cmd *cmd)
+{
+ int rc = 0;
+ uint64_t ctxt_to_hw_map = (uint64_t)ctx->ctxt_to_hw_map;
+ struct cam_lrme_context *lrme_ctx = ctx->ctx_priv;
+
+ CAM_DBG(CAM_LRME, "Enter");
+
+ rc = cam_context_acquire_dev_to_hw(ctx, cmd);
+ if (rc) {
+ CAM_ERR(CAM_LRME, "Failed to acquire");
+ return rc;
+ }
+
+ ctxt_to_hw_map |= (lrme_ctx->index << CAM_LRME_CTX_INDEX_SHIFT);
+ ctx->ctxt_to_hw_map = (void *)ctxt_to_hw_map;
+
+ ctx->state = CAM_CTX_ACQUIRED;
+
+ return rc;
+}
+
+static int __cam_lrme_ctx_release_dev_in_acquired(struct cam_context *ctx,
+ struct cam_release_dev_cmd *cmd)
+{
+ int rc = 0;
+
+ CAM_DBG(CAM_LRME, "Enter");
+
+ rc = cam_context_release_dev_to_hw(ctx, cmd);
+ if (rc) {
+ CAM_ERR(CAM_LRME, "Failed to release");
+ return rc;
+ }
+
+ ctx->state = CAM_CTX_AVAILABLE;
+
+ return rc;
+}
+
+static int __cam_lrme_ctx_start_dev_in_acquired(struct cam_context *ctx,
+ struct cam_start_stop_dev_cmd *cmd)
+{
+ int rc = 0;
+
+ CAM_DBG(CAM_LRME, "Enter");
+
+ rc = cam_context_start_dev_to_hw(ctx, cmd);
+ if (rc) {
+ CAM_ERR(CAM_LRME, "Failed to start");
+ return rc;
+ }
+
+ ctx->state = CAM_CTX_ACTIVATED;
+
+ return rc;
+}
+
+static int __cam_lrme_ctx_config_dev_in_activated(struct cam_context *ctx,
+ struct cam_config_dev_cmd *cmd)
+{
+ int rc;
+
+ CAM_DBG(CAM_LRME, "Enter");
+
+ rc = cam_context_prepare_dev_to_hw(ctx, cmd);
+ if (rc) {
+ CAM_ERR(CAM_LRME, "Failed to config");
+ return rc;
+ }
+
+ return rc;
+}
+
+static int __cam_lrme_ctx_stop_dev_in_activated(struct cam_context *ctx,
+ struct cam_start_stop_dev_cmd *cmd)
+{
+ int rc = 0;
+
+ CAM_DBG(CAM_LRME, "Enter");
+
+ rc = cam_context_stop_dev_to_hw(ctx);
+ if (rc) {
+ CAM_ERR(CAM_LRME, "Failed to stop dev");
+ return rc;
+ }
+
+ ctx->state = CAM_CTX_ACQUIRED;
+
+ return rc;
+}
+
+static int __cam_lrme_ctx_release_dev_in_activated(struct cam_context *ctx,
+ struct cam_release_dev_cmd *cmd)
+{
+ int rc = 0;
+
+ CAM_DBG(CAM_LRME, "Enter");
+
+ rc = __cam_lrme_ctx_stop_dev_in_activated(ctx, NULL);
+ if (rc) {
+ CAM_ERR(CAM_LRME, "Failed to stop");
+ return rc;
+ }
+
+ rc = cam_context_release_dev_to_hw(ctx, cmd);
+ if (rc) {
+ CAM_ERR(CAM_LRME, "Failed to release");
+ return rc;
+ }
+
+ ctx->state = CAM_CTX_AVAILABLE;
+
+ return rc;
+}
+
+static int __cam_lrme_ctx_handle_irq_in_activated(void *context,
+ uint32_t evt_id, void *evt_data)
+{
+ int rc;
+
+ CAM_DBG(CAM_LRME, "Enter");
+
+ rc = cam_context_buf_done_from_hw(context, evt_data, evt_id);
+ if (rc) {
+ CAM_ERR(CAM_LRME, "Failed in buf done, rc=%d", rc);
+ return rc;
+ }
+
+ return rc;
+}
+
+/* top state machine */
+static struct cam_ctx_ops
+ cam_lrme_ctx_state_machine[CAM_CTX_STATE_MAX] = {
+ /* Uninit */
+ {
+ .ioctl_ops = {},
+ .crm_ops = {},
+ .irq_ops = NULL,
+ },
+ /* Available */
+ {
+ .ioctl_ops = {
+ .acquire_dev = __cam_lrme_ctx_acquire_dev_in_available,
+ },
+ .crm_ops = {},
+ .irq_ops = NULL,
+ },
+ /* Acquired */
+ {
+ .ioctl_ops = {
+ .release_dev = __cam_lrme_ctx_release_dev_in_acquired,
+ .start_dev = __cam_lrme_ctx_start_dev_in_acquired,
+ },
+ .crm_ops = {},
+ .irq_ops = NULL,
+ },
+ /* Ready */
+ {
+ .ioctl_ops = {},
+ .crm_ops = {},
+ .irq_ops = NULL,
+ },
+ /* Activate */
+ {
+ .ioctl_ops = {
+ .config_dev = __cam_lrme_ctx_config_dev_in_activated,
+ .release_dev = __cam_lrme_ctx_release_dev_in_activated,
+ .stop_dev = __cam_lrme_ctx_stop_dev_in_activated,
+ },
+ .crm_ops = {},
+ .irq_ops = __cam_lrme_ctx_handle_irq_in_activated,
+ },
+};
+
+int cam_lrme_context_init(struct cam_lrme_context *lrme_ctx,
+ struct cam_context *base_ctx,
+ struct cam_hw_mgr_intf *hw_intf,
+ uint64_t index)
+{
+ int rc = 0;
+
+ CAM_DBG(CAM_LRME, "Enter");
+
+ if (!base_ctx || !lrme_ctx) {
+ CAM_ERR(CAM_LRME, "Invalid input");
+ return -EINVAL;
+ }
+
+ memset(lrme_ctx, 0, sizeof(*lrme_ctx));
+
+ rc = cam_context_init(base_ctx, "lrme", NULL, hw_intf,
+ lrme_ctx->req_base, CAM_CTX_REQ_MAX);
+ if (rc) {
+ CAM_ERR(CAM_LRME, "Failed to init context");
+ return rc;
+ }
+ lrme_ctx->base = base_ctx;
+ lrme_ctx->index = index;
+ base_ctx->ctx_priv = lrme_ctx;
+ base_ctx->state_machine = cam_lrme_ctx_state_machine;
+
+ return rc;
+}
+
+int cam_lrme_context_deinit(struct cam_lrme_context *lrme_ctx)
+{
+ int rc = 0;
+
+ CAM_DBG(CAM_LRME, "Enter");
+
+ if (!lrme_ctx) {
+ CAM_ERR(CAM_LRME, "No ctx to deinit");
+ return -EINVAL;
+ }
+
+ rc = cam_context_deinit(lrme_ctx->base);
+
+ memset(lrme_ctx, 0, sizeof(*lrme_ctx));
+ return rc;
+}
diff --git a/drivers/media/platform/msm/camera/cam_lrme/cam_lrme_context.h b/drivers/media/platform/msm/camera/cam_lrme/cam_lrme_context.h
new file mode 100644
index 0000000..882f7ac
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_lrme/cam_lrme_context.h
@@ -0,0 +1,41 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_LRME_CONTEXT_H_
+#define _CAM_LRME_CONTEXT_H_
+
+#include "cam_context.h"
+#include "cam_context_utils.h"
+#include "cam_hw_mgr_intf.h"
+#include "cam_req_mgr_interface.h"
+#include "cam_sync_api.h"
+
+#define CAM_LRME_CTX_INDEX_SHIFT 32
+
+/**
+ * struct cam_lrme_context
+ *
+ * @base : Base context pointer for this LRME context
+ * @req_base : List of base request for this LRME context
+ */
+struct cam_lrme_context {
+ struct cam_context *base;
+ struct cam_ctx_request req_base[CAM_CTX_REQ_MAX];
+ uint64_t index;
+};
+
+int cam_lrme_context_init(struct cam_lrme_context *lrme_ctx,
+ struct cam_context *base_ctx, struct cam_hw_mgr_intf *hw_intf,
+ uint64_t index);
+int cam_lrme_context_deinit(struct cam_lrme_context *lrme_ctx);
+
+#endif /* _CAM_LRME_CONTEXT_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_lrme/cam_lrme_dev.c b/drivers/media/platform/msm/camera/cam_lrme/cam_lrme_dev.c
new file mode 100644
index 0000000..5be16ef
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_lrme/cam_lrme_dev.c
@@ -0,0 +1,233 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+
+#include "cam_subdev.h"
+#include "cam_node.h"
+#include "cam_lrme_context.h"
+#include "cam_lrme_hw_mgr.h"
+#include "cam_lrme_hw_mgr_intf.h"
+
+#define CAM_LRME_DEV_NAME "cam-lrme"
+
+/**
+ * struct cam_lrme_dev
+ *
+ * @sd : Subdev information
+ * @ctx : List of base contexts
+ * @lrme_ctx : List of LRME contexts
+ * @lock : Mutex for LRME subdev
+ * @open_cnt : Open count of LRME subdev
+ */
+struct cam_lrme_dev {
+ struct cam_subdev sd;
+ struct cam_context ctx[CAM_CTX_MAX];
+ struct cam_lrme_context lrme_ctx[CAM_CTX_MAX];
+ struct mutex lock;
+ uint32_t open_cnt;
+};
+
+static struct cam_lrme_dev *g_lrme_dev;
+
+static int cam_lrme_dev_buf_done_cb(void *ctxt_to_hw_map, uint32_t evt_id,
+ void *evt_data)
+{
+ uint64_t index;
+ struct cam_context *ctx;
+ int rc;
+
+ index = CAM_LRME_DECODE_CTX_INDEX(ctxt_to_hw_map);
+ CAM_DBG(CAM_LRME, "ctx index %llu, evt_id %u\n", index, evt_id);
+ ctx = &g_lrme_dev->ctx[index];
+ rc = ctx->irq_cb_intf(ctx, evt_id, evt_data);
+ if (rc)
+ CAM_ERR(CAM_LRME, "irq callback failed");
+
+ return rc;
+}
+
+static int cam_lrme_dev_open(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh)
+{
+ struct cam_lrme_dev *lrme_dev = g_lrme_dev;
+
+ if (!lrme_dev) {
+ CAM_ERR(CAM_LRME,
+ "LRME Dev not initialized, dev=%pK", lrme_dev);
+ return -ENODEV;
+ }
+
+ mutex_lock(&lrme_dev->lock);
+ lrme_dev->open_cnt++;
+ mutex_unlock(&lrme_dev->lock);
+
+ return 0;
+}
+
+static int cam_lrme_dev_close(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh)
+{
+ struct cam_lrme_dev *lrme_dev = g_lrme_dev;
+ struct cam_node *node = v4l2_get_subdevdata(sd);
+
+ if (!lrme_dev) {
+ CAM_ERR(CAM_LRME, "Invalid args");
+ return -ENODEV;
+ }
+
+ mutex_lock(&lrme_dev->lock);
+ lrme_dev->open_cnt--;
+ mutex_unlock(&lrme_dev->lock);
+
+ if (!node) {
+ CAM_ERR(CAM_LRME, "Node is NULL");
+ return -EINVAL;
+ }
+
+ if (lrme_dev->open_cnt == 0)
+ cam_node_shutdown(node);
+
+ return 0;
+}
+
+static const struct v4l2_subdev_internal_ops cam_lrme_subdev_internal_ops = {
+ .open = cam_lrme_dev_open,
+ .close = cam_lrme_dev_close,
+};
+
+static int cam_lrme_dev_probe(struct platform_device *pdev)
+{
+ int rc;
+ int i;
+ struct cam_hw_mgr_intf hw_mgr_intf;
+ struct cam_node *node;
+
+ g_lrme_dev = kzalloc(sizeof(struct cam_lrme_dev), GFP_KERNEL);
+ if (!g_lrme_dev) {
+ CAM_ERR(CAM_LRME, "No memory");
+ return -ENOMEM;
+ }
+ g_lrme_dev->sd.internal_ops = &cam_lrme_subdev_internal_ops;
+
+ mutex_init(&g_lrme_dev->lock);
+
+ rc = cam_subdev_probe(&g_lrme_dev->sd, pdev, CAM_LRME_DEV_NAME,
+ CAM_LRME_DEVICE_TYPE);
+ if (rc) {
+ CAM_ERR(CAM_LRME, "LRME cam_subdev_probe failed");
+ goto free_mem;
+ }
+ node = (struct cam_node *)g_lrme_dev->sd.token;
+
+ rc = cam_lrme_hw_mgr_init(&hw_mgr_intf, cam_lrme_dev_buf_done_cb);
+ if (rc) {
+ CAM_ERR(CAM_LRME, "Can not initialized LRME HW manager");
+ goto unregister;
+ }
+
+ for (i = 0; i < CAM_CTX_MAX; i++) {
+ rc = cam_lrme_context_init(&g_lrme_dev->lrme_ctx[i],
+ &g_lrme_dev->ctx[i],
+ &node->hw_mgr_intf, i);
+ if (rc) {
+ CAM_ERR(CAM_LRME, "LRME context init failed");
+ goto deinit_ctx;
+ }
+ }
+
+ rc = cam_node_init(node, &hw_mgr_intf, g_lrme_dev->ctx, CAM_CTX_MAX,
+ CAM_LRME_DEV_NAME);
+ if (rc) {
+ CAM_ERR(CAM_LRME, "LRME node init failed");
+ goto deinit_ctx;
+ }
+
+ CAM_DBG(CAM_LRME, "%s probe complete", g_lrme_dev->sd.name);
+
+ return 0;
+
+deinit_ctx:
+ for (--i; i >= 0; i--) {
+ if (cam_lrme_context_deinit(&g_lrme_dev->lrme_ctx[i]))
+ CAM_ERR(CAM_LRME, "LRME context %d deinit failed", i);
+ }
+unregister:
+ if (cam_subdev_remove(&g_lrme_dev->sd))
+ CAM_ERR(CAM_LRME, "Failed in subdev remove");
+free_mem:
+ kfree(g_lrme_dev);
+
+ return rc;
+}
+
+static int cam_lrme_dev_remove(struct platform_device *pdev)
+{
+ int i;
+ int rc = 0;
+
+ for (i = 0; i < CAM_CTX_MAX; i++) {
+ rc = cam_lrme_context_deinit(&g_lrme_dev->lrme_ctx[i]);
+ if (rc)
+ CAM_ERR(CAM_LRME, "LRME context %d deinit failed", i);
+ }
+
+ rc = cam_lrme_hw_mgr_deinit();
+ if (rc)
+ CAM_ERR(CAM_LRME, "Failed in hw mgr deinit, rc=%d", rc);
+
+ rc = cam_subdev_remove(&g_lrme_dev->sd);
+ if (rc)
+ CAM_ERR(CAM_LRME, "Unregister failed");
+
+ mutex_destroy(&g_lrme_dev->lock);
+ kfree(g_lrme_dev);
+ g_lrme_dev = NULL;
+
+ return rc;
+}
+
+static const struct of_device_id cam_lrme_dt_match[] = {
+ {
+ .compatible = "qcom,cam-lrme"
+ },
+ {}
+};
+
+static struct platform_driver cam_lrme_driver = {
+ .probe = cam_lrme_dev_probe,
+ .remove = cam_lrme_dev_remove,
+ .driver = {
+ .name = "cam_lrme",
+ .owner = THIS_MODULE,
+ .of_match_table = cam_lrme_dt_match,
+ },
+};
+
+static int __init cam_lrme_dev_init_module(void)
+{
+ return platform_driver_register(&cam_lrme_driver);
+}
+
+static void __exit cam_lrme_dev_exit_module(void)
+{
+ platform_driver_unregister(&cam_lrme_driver);
+}
+
+module_init(cam_lrme_dev_init_module);
+module_exit(cam_lrme_dev_exit_module);
+MODULE_DESCRIPTION("MSM LRME driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/Makefile b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/Makefile
new file mode 100644
index 0000000..e4c8e0d
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/Makefile
@@ -0,0 +1,14 @@
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_core
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_sync
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_smmu
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_cdm
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_lrme
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw
+ccflags-y += -Idrivers/media/platform/msm/camera
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas/include
+
+obj-$(CONFIG_SPECTRA_CAMERA) += lrme_hw/
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_lrme_hw_mgr.o
diff --git a/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/cam_lrme_hw_mgr.c b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/cam_lrme_hw_mgr.c
new file mode 100644
index 0000000..448086d
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/cam_lrme_hw_mgr.c
@@ -0,0 +1,1034 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <media/cam_cpas.h>
+#include <media/cam_req_mgr.h>
+
+#include "cam_io_util.h"
+#include "cam_soc_util.h"
+#include "cam_mem_mgr_api.h"
+#include "cam_smmu_api.h"
+#include "cam_packet_util.h"
+#include "cam_lrme_context.h"
+#include "cam_lrme_hw_intf.h"
+#include "cam_lrme_hw_core.h"
+#include "cam_lrme_hw_soc.h"
+#include "cam_lrme_hw_mgr_intf.h"
+#include "cam_lrme_hw_mgr.h"
+
+static struct cam_lrme_hw_mgr g_lrme_hw_mgr;
+
+static int cam_lrme_mgr_util_reserve_device(struct cam_lrme_hw_mgr *hw_mgr,
+ struct cam_lrme_acquire_args *lrme_acquire_args)
+{
+ int i, index = 0;
+ uint32_t min_ctx = UINT_MAX;
+ struct cam_lrme_device *hw_device = NULL;
+
+ mutex_lock(&hw_mgr->hw_mgr_mutex);
+ if (!hw_mgr->device_count) {
+ mutex_unlock(&hw_mgr->hw_mgr_mutex);
+ CAM_ERR(CAM_LRME, "No device is registered");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < hw_mgr->device_count && i < CAM_LRME_HW_MAX; i++) {
+ hw_device = &hw_mgr->hw_device[i];
+ if (!hw_device->num_context) {
+ index = i;
+ break;
+ }
+ if (hw_device->num_context < min_ctx) {
+ min_ctx = hw_device->num_context;
+ index = i;
+ }
+ }
+
+ hw_device = &hw_mgr->hw_device[index];
+ hw_device->num_context++;
+
+ mutex_unlock(&hw_mgr->hw_mgr_mutex);
+
+ CAM_DBG(CAM_LRME, "reserve device index %d", index);
+
+ return index;
+}
+
+static int cam_lrme_mgr_util_get_device(struct cam_lrme_hw_mgr *hw_mgr,
+ uint32_t device_index, struct cam_lrme_device **hw_device)
+{
+ if (!hw_mgr) {
+ CAM_ERR(CAM_LRME, "invalid params hw_mgr %pK", hw_mgr);
+ return -EINVAL;
+ }
+
+ if (device_index >= CAM_LRME_HW_MAX) {
+ CAM_ERR(CAM_LRME, "Wrong device index %d", device_index);
+ return -EINVAL;
+ }
+
+ *hw_device = &hw_mgr->hw_device[device_index];
+
+ return 0;
+}
+
+static int cam_lrme_mgr_util_packet_validate(struct cam_packet *packet)
+{
+ struct cam_cmd_buf_desc *cmd_desc = NULL;
+ int i, rc;
+
+ if (!packet) {
+ CAM_ERR(CAM_LRME, "Invalid args");
+ return -EINVAL;
+ }
+
+ CAM_DBG(CAM_LRME, "Packet request=%d, op_code=0x%x, size=%d, flags=%d",
+ packet->header.request_id, packet->header.op_code,
+ packet->header.size, packet->header.flags);
+ CAM_DBG(CAM_LRME,
+ "Packet cmdbuf(offset=%d, num=%d) io(offset=%d, num=%d)",
+ packet->cmd_buf_offset, packet->num_cmd_buf,
+ packet->io_configs_offset, packet->num_io_configs);
+ CAM_DBG(CAM_LRME,
+ "Packet Patch(offset=%d, num=%d) kmd(offset=%d, num=%d)",
+ packet->patch_offset, packet->num_patches,
+ packet->kmd_cmd_buf_offset, packet->kmd_cmd_buf_index);
+
+ if (cam_packet_util_validate_packet(packet)) {
+ CAM_ERR(CAM_LRME, "invalid packet:%d %d %d %d %d",
+ packet->kmd_cmd_buf_index,
+ packet->num_cmd_buf, packet->cmd_buf_offset,
+ packet->io_configs_offset, packet->header.size);
+ return -EINVAL;
+ }
+
+ if (!packet->num_io_configs) {
+ CAM_ERR(CAM_LRME, "no io configs");
+ return -EINVAL;
+ }
+
+ cmd_desc = (struct cam_cmd_buf_desc *)((uint8_t *)&packet->payload +
+ packet->cmd_buf_offset);
+
+ for (i = 0; i < packet->num_cmd_buf; i++) {
+ if (!cmd_desc[i].length)
+ continue;
+
+ CAM_DBG(CAM_LRME,
+ "CmdBuf[%d] hdl=%d, offset=%d, size=%d, len=%d, type=%d, meta_data=%d",
+ i,
+ cmd_desc[i].mem_handle, cmd_desc[i].offset,
+ cmd_desc[i].size, cmd_desc[i].length, cmd_desc[i].type,
+ cmd_desc[i].meta_data);
+
+ rc = cam_packet_util_validate_cmd_desc(&cmd_desc[i]);
+ if (rc) {
+ CAM_ERR(CAM_LRME, "Invalid cmd buffer %d", i);
+ return rc;
+ }
+ }
+
+ return 0;
+}
+
+static int cam_lrme_mgr_util_prepare_io_buffer(int32_t iommu_hdl,
+ struct cam_hw_prepare_update_args *prepare,
+ struct cam_lrme_hw_io_buffer *input_buf,
+ struct cam_lrme_hw_io_buffer *output_buf, uint32_t io_buf_size)
+{
+ int rc = -EINVAL;
+ uint32_t num_in_buf, num_out_buf, i, j, plane;
+ struct cam_buf_io_cfg *io_cfg;
+ uint64_t io_addr[CAM_PACKET_MAX_PLANES];
+ size_t size;
+
+ num_in_buf = 0;
+ num_out_buf = 0;
+ io_cfg = (struct cam_buf_io_cfg *)((uint8_t *)
+ &prepare->packet->payload +
+ prepare->packet->io_configs_offset);
+
+ for (i = 0; i < prepare->packet->num_io_configs; i++) {
+ CAM_DBG(CAM_LRME,
+ "IOConfig[%d] : handle[%d] Dir[%d] Res[%d] Fence[%d], Format[%d]",
+ i, io_cfg[i].mem_handle[0], io_cfg[i].direction,
+ io_cfg[i].resource_type,
+ io_cfg[i].fence, io_cfg[i].format);
+
+ if ((num_in_buf > io_buf_size) ||
+ (num_out_buf > io_buf_size)) {
+ CAM_ERR(CAM_LRME, "Invalid number of buffers %d %d %d",
+ num_in_buf, num_out_buf, io_buf_size);
+ return -EINVAL;
+ }
+
+ memset(io_addr, 0, sizeof(io_addr));
+ for (plane = 0; plane < CAM_PACKET_MAX_PLANES; plane++) {
+ if (!io_cfg[i].mem_handle[plane])
+ break;
+
+ rc = cam_mem_get_io_buf(io_cfg[i].mem_handle[plane],
+ iommu_hdl, &io_addr[plane], &size);
+ if (rc) {
+ CAM_ERR(CAM_LRME, "Cannot get io buf for %d %d",
+ plane, rc);
+ return -ENOMEM;
+ }
+
+ io_addr[plane] += io_cfg[i].offsets[plane];
+
+ if (io_addr[plane] >> 32) {
+ CAM_ERR(CAM_LRME, "Invalid io addr for %d %d",
+ plane, rc);
+ return -ENOMEM;
+ }
+
+ CAM_DBG(CAM_LRME, "IO Address[%d][%d] : %llu",
+ io_cfg[i].direction, plane, io_addr[plane]);
+ }
+
+ switch (io_cfg[i].direction) {
+ case CAM_BUF_INPUT: {
+ prepare->in_map_entries[num_in_buf].resource_handle =
+ io_cfg[i].resource_type;
+ prepare->in_map_entries[num_in_buf].sync_id =
+ io_cfg[i].fence;
+
+ input_buf[num_in_buf].valid = true;
+ for (j = 0; j < plane; j++)
+ input_buf[num_in_buf].io_addr[j] = io_addr[j];
+ input_buf[num_in_buf].num_plane = plane;
+ input_buf[num_in_buf].io_cfg = &io_cfg[i];
+
+ num_in_buf++;
+ break;
+ }
+ case CAM_BUF_OUTPUT: {
+ prepare->out_map_entries[num_out_buf].resource_handle =
+ io_cfg[i].resource_type;
+ prepare->out_map_entries[num_out_buf].sync_id =
+ io_cfg[i].fence;
+
+ output_buf[num_out_buf].valid = true;
+ for (j = 0; j < plane; j++)
+ output_buf[num_out_buf].io_addr[j] = io_addr[j];
+ output_buf[num_out_buf].num_plane = plane;
+ output_buf[num_out_buf].io_cfg = &io_cfg[i];
+
+ num_out_buf++;
+ break;
+ }
+ default:
+ CAM_ERR(CAM_LRME, "Unsupported io direction %d",
+ io_cfg[i].direction);
+ return -EINVAL;
+ }
+ }
+ prepare->num_in_map_entries = num_in_buf;
+ prepare->num_out_map_entries = num_out_buf;
+
+ return 0;
+}
+
+static int cam_lrme_mgr_util_prepare_hw_update_entries(
+ struct cam_lrme_hw_mgr *hw_mgr,
+ struct cam_hw_prepare_update_args *prepare,
+ struct cam_lrme_hw_cmd_config_args *config_args,
+ struct cam_kmd_buf_info *kmd_buf_info)
+{
+ int i, rc = 0;
+ struct cam_lrme_device *hw_device = NULL;
+ uint32_t *kmd_buf_addr;
+ uint32_t num_entry;
+ uint32_t kmd_buf_max_size;
+ uint32_t kmd_buf_used_bytes = 0;
+ struct cam_hw_update_entry *hw_entry;
+ struct cam_cmd_buf_desc *cmd_desc = NULL;
+
+ hw_device = config_args->hw_device;
+ if (!hw_device) {
+ CAM_ERR(CAM_LRME, "Invalid hw_device");
+ return -EINVAL;
+ }
+
+ kmd_buf_addr = (uint32_t *)((uint8_t *)kmd_buf_info->cpu_addr +
+ kmd_buf_info->used_bytes);
+ kmd_buf_max_size = kmd_buf_info->size - kmd_buf_info->used_bytes;
+
+ config_args->cmd_buf_addr = kmd_buf_addr;
+ config_args->size = kmd_buf_max_size;
+ config_args->config_buf_size = 0;
+
+ if (hw_device->hw_intf.hw_ops.process_cmd) {
+ rc = hw_device->hw_intf.hw_ops.process_cmd(
+ hw_device->hw_intf.hw_priv,
+ CAM_LRME_HW_CMD_PREPARE_HW_UPDATE,
+ config_args,
+ sizeof(struct cam_lrme_hw_cmd_config_args));
+ if (rc) {
+ CAM_ERR(CAM_LRME,
+ "Failed in CMD_PREPARE_HW_UPDATE %d", rc);
+ return rc;
+ }
+ } else {
+ CAM_ERR(CAM_LRME, "Can't find handle function");
+ return -EINVAL;
+ }
+
+ kmd_buf_used_bytes += config_args->config_buf_size;
+
+ if (!kmd_buf_used_bytes || (kmd_buf_used_bytes > kmd_buf_max_size)) {
+ CAM_ERR(CAM_LRME, "Invalid kmd used bytes %d (%d)",
+ kmd_buf_used_bytes, kmd_buf_max_size);
+ return -ENOMEM;
+ }
+
+ hw_entry = prepare->hw_update_entries;
+ num_entry = 0;
+
+ if (config_args->config_buf_size) {
+ if ((num_entry + 1) >= prepare->max_hw_update_entries) {
+ CAM_ERR(CAM_LRME, "Insufficient HW entries :%d %d",
+ num_entry, prepare->max_hw_update_entries);
+ return -EINVAL;
+ }
+
+ hw_entry[num_entry].handle = kmd_buf_info->handle;
+ hw_entry[num_entry].len = config_args->config_buf_size;
+ hw_entry[num_entry].offset = kmd_buf_info->offset;
+
+ kmd_buf_info->used_bytes += config_args->config_buf_size;
+ kmd_buf_info->offset += config_args->config_buf_size;
+ num_entry++;
+ }
+
+ cmd_desc = (struct cam_cmd_buf_desc *)((uint8_t *)
+ &prepare->packet->payload + prepare->packet->cmd_buf_offset);
+
+ for (i = 0; i < prepare->packet->num_cmd_buf; i++) {
+ if (!cmd_desc[i].length)
+ continue;
+
+ if ((num_entry + 1) >= prepare->max_hw_update_entries) {
+ CAM_ERR(CAM_LRME, "Exceed max num of entry");
+ return -EINVAL;
+ }
+
+ hw_entry[num_entry].handle = cmd_desc[i].mem_handle;
+ hw_entry[num_entry].len = cmd_desc[i].length;
+ hw_entry[num_entry].offset = cmd_desc[i].offset;
+ num_entry++;
+ }
+ prepare->num_hw_update_entries = num_entry;
+
+ CAM_DBG(CAM_LRME, "FinalConfig : hw_entries=%d, Sync(in=%d, out=%d)",
+ prepare->num_hw_update_entries, prepare->num_in_map_entries,
+ prepare->num_out_map_entries);
+
+ return rc;
+}
+
+static void cam_lrme_mgr_util_put_frame_req(
+ struct list_head *src_list,
+ struct list_head *list,
+ spinlock_t *lock)
+{
+ spin_lock(lock);
+ list_add_tail(list, src_list);
+ spin_unlock(lock);
+}
+
+static int cam_lrme_mgr_util_get_frame_req(
+ struct list_head *src_list,
+ struct cam_lrme_frame_request **frame_req,
+ spinlock_t *lock)
+{
+ int rc = 0;
+ struct cam_lrme_frame_request *req_ptr = NULL;
+
+ spin_lock(lock);
+ if (!list_empty(src_list)) {
+ req_ptr = list_first_entry(src_list,
+ struct cam_lrme_frame_request, frame_list);
+ list_del_init(&req_ptr->frame_list);
+ } else {
+ rc = -ENOENT;
+ }
+ *frame_req = req_ptr;
+ spin_unlock(lock);
+
+ return rc;
+}
+
+
+static int cam_lrme_mgr_util_submit_req(void *priv, void *data)
+{
+ struct cam_lrme_device *hw_device;
+ struct cam_lrme_hw_mgr *hw_mgr;
+ struct cam_lrme_frame_request *frame_req = NULL;
+ struct cam_lrme_hw_submit_args submit_args;
+ struct cam_lrme_mgr_work_data *work_data;
+ int rc;
+ int req_prio = 0;
+
+ if (!priv) {
+ CAM_ERR(CAM_LRME, "worker doesn't have private data");
+ return -EINVAL;
+ }
+
+ hw_mgr = (struct cam_lrme_hw_mgr *)priv;
+ work_data = (struct cam_lrme_mgr_work_data *)data;
+ hw_device = work_data->hw_device;
+
+ rc = cam_lrme_mgr_util_get_frame_req(&hw_device->
+ frame_pending_list_high, &frame_req, &hw_device->high_req_lock);
+
+ if (!frame_req) {
+ rc = cam_lrme_mgr_util_get_frame_req(&hw_device->
+ frame_pending_list_normal, &frame_req,
+ &hw_device->normal_req_lock);
+ if (frame_req)
+ req_prio = 1;
+ }
+
+ if (!frame_req) {
+ CAM_DBG(CAM_LRME, "No pending request");
+ return 0;
+ }
+
+ if (hw_device->hw_intf.hw_ops.process_cmd) {
+ submit_args.hw_update_entries = frame_req->hw_update_entries;
+ submit_args.num_hw_update_entries =
+ frame_req->num_hw_update_entries;
+ submit_args.frame_req = frame_req;
+
+ rc = hw_device->hw_intf.hw_ops.process_cmd(
+ hw_device->hw_intf.hw_priv,
+ CAM_LRME_HW_CMD_SUBMIT,
+ &submit_args, sizeof(struct cam_lrme_hw_submit_args));
+
+ if (rc == -EBUSY)
+ CAM_DBG(CAM_LRME, "device busy");
+ else if (rc)
+ CAM_ERR(CAM_LRME, "submit request failed rc %d", rc);
+ if (rc) {
+ req_prio == 0 ? spin_lock(&hw_device->high_req_lock) :
+ spin_lock(&hw_device->normal_req_lock);
+ list_add(&frame_req->frame_list,
+ (req_prio == 0 ?
+ &hw_device->frame_pending_list_high :
+ &hw_device->frame_pending_list_normal));
+ req_prio == 0 ? spin_unlock(&hw_device->high_req_lock) :
+ spin_unlock(&hw_device->normal_req_lock);
+ }
+ if (rc == -EBUSY)
+ rc = 0;
+ } else {
+ req_prio == 0 ? spin_lock(&hw_device->high_req_lock) :
+ spin_lock(&hw_device->normal_req_lock);
+ list_add(&frame_req->frame_list,
+ (req_prio == 0 ?
+ &hw_device->frame_pending_list_high :
+ &hw_device->frame_pending_list_normal));
+ req_prio == 0 ? spin_unlock(&hw_device->high_req_lock) :
+ spin_unlock(&hw_device->normal_req_lock);
+ rc = -EINVAL;
+ }
+
+ CAM_DBG(CAM_LRME, "End of submit, rc %d", rc);
+
+ return rc;
+}
+
+static int cam_lrme_mgr_util_schedule_frame_req(
+ struct cam_lrme_hw_mgr *hw_mgr, struct cam_lrme_device *hw_device)
+{
+ int rc = 0;
+ struct crm_workq_task *task;
+ struct cam_lrme_mgr_work_data *work_data;
+
+ task = cam_req_mgr_workq_get_task(hw_device->work);
+ if (!task) {
+ CAM_ERR(CAM_LRME, "Can not get task for worker");
+ return -ENOMEM;
+ }
+
+ work_data = (struct cam_lrme_mgr_work_data *)task->payload;
+ work_data->hw_device = hw_device;
+
+ task->process_cb = cam_lrme_mgr_util_submit_req;
+ CAM_DBG(CAM_LRME, "enqueue submit task");
+ rc = cam_req_mgr_workq_enqueue_task(task, hw_mgr, CRM_TASK_PRIORITY_0);
+
+ return rc;
+}
+
+static int cam_lrme_mgr_util_release(struct cam_lrme_hw_mgr *hw_mgr,
+ uint32_t device_index)
+{
+ int rc = 0;
+ struct cam_lrme_device *hw_device;
+
+ rc = cam_lrme_mgr_util_get_device(hw_mgr, device_index, &hw_device);
+ if (rc) {
+ CAM_ERR(CAM_LRME, "Error in getting device %d", rc);
+ return rc;
+ }
+
+ mutex_lock(&hw_mgr->hw_mgr_mutex);
+ hw_device->num_context--;
+ mutex_unlock(&hw_mgr->hw_mgr_mutex);
+
+ return rc;
+}
+
+static int cam_lrme_mgr_cb(void *data,
+ struct cam_lrme_hw_cb_args *cb_args)
+{
+ struct cam_lrme_hw_mgr *hw_mgr = &g_lrme_hw_mgr;
+ int rc = 0;
+ bool frame_abort = true;
+ struct cam_lrme_frame_request *frame_req;
+ struct cam_lrme_device *hw_device;
+
+ if (!data || !cb_args) {
+ CAM_ERR(CAM_LRME, "Invalid input args");
+ return -EINVAL;
+ }
+
+ hw_device = (struct cam_lrme_device *)data;
+ frame_req = cb_args->frame_req;
+
+ if (cb_args->cb_type & CAM_LRME_CB_PUT_FRAME) {
+ memset(frame_req, 0x0, sizeof(*frame_req));
+ INIT_LIST_HEAD(&frame_req->frame_list);
+ cam_lrme_mgr_util_put_frame_req(&hw_mgr->frame_free_list,
+ &frame_req->frame_list,
+ &hw_mgr->free_req_lock);
+ cb_args->cb_type &= ~CAM_LRME_CB_PUT_FRAME;
+ frame_req = NULL;
+ }
+
+ if (cb_args->cb_type & CAM_LRME_CB_COMP_REG_UPDATE) {
+ cb_args->cb_type &= ~CAM_LRME_CB_COMP_REG_UPDATE;
+ CAM_DBG(CAM_LRME, "Reg update");
+ }
+
+ if (!frame_req)
+ return rc;
+
+ if (cb_args->cb_type & CAM_LRME_CB_BUF_DONE) {
+ cb_args->cb_type &= ~CAM_LRME_CB_BUF_DONE;
+ frame_abort = false;
+ } else if (cb_args->cb_type & CAM_LRME_CB_ERROR) {
+ cb_args->cb_type &= ~CAM_LRME_CB_ERROR;
+ frame_abort = true;
+ } else {
+ CAM_ERR(CAM_LRME, "Wrong cb type %d, req %lld",
+ cb_args->cb_type, frame_req->req_id);
+ return -EINVAL;
+ }
+
+ if (hw_mgr->event_cb) {
+ struct cam_hw_done_event_data buf_data;
+
+ buf_data.request_id = frame_req->req_id;
+ CAM_DBG(CAM_LRME, "frame req %llu, frame_abort %d",
+ frame_req->req_id, frame_abort);
+ rc = hw_mgr->event_cb(frame_req->ctxt_to_hw_map,
+ frame_abort, &buf_data);
+ } else {
+ CAM_ERR(CAM_LRME, "No cb function");
+ }
+ memset(frame_req, 0x0, sizeof(*frame_req));
+ INIT_LIST_HEAD(&frame_req->frame_list);
+ cam_lrme_mgr_util_put_frame_req(&hw_mgr->frame_free_list,
+ &frame_req->frame_list,
+ &hw_mgr->free_req_lock);
+
+ rc = cam_lrme_mgr_util_schedule_frame_req(hw_mgr, hw_device);
+
+ return rc;
+}
+
+static int cam_lrme_mgr_get_caps(void *hw_mgr_priv, void *hw_get_caps_args)
+{
+ int rc = 0;
+ struct cam_lrme_hw_mgr *hw_mgr = hw_mgr_priv;
+ struct cam_query_cap_cmd *args = hw_get_caps_args;
+
+ if (sizeof(struct cam_lrme_query_cap_cmd) != args->size) {
+ CAM_ERR(CAM_LRME,
+ "sizeof(struct cam_query_cap_cmd) = %lu, args->size = %d",
+ sizeof(struct cam_query_cap_cmd), args->size);
+ return -EFAULT;
+ }
+
+ if (copy_to_user((void __user *)args->caps_handle, &(hw_mgr->lrme_caps),
+ sizeof(struct cam_lrme_query_cap_cmd))) {
+ CAM_ERR(CAM_LRME, "copy to user failed");
+ return -EFAULT;
+ }
+
+ return rc;
+}
+
+static int cam_lrme_mgr_hw_acquire(void *hw_mgr_priv, void *hw_acquire_args)
+{
+ struct cam_lrme_hw_mgr *hw_mgr = hw_mgr_priv;
+ struct cam_hw_acquire_args *args =
+ (struct cam_hw_acquire_args *)hw_acquire_args;
+ struct cam_lrme_acquire_args lrme_acquire_args;
+ uint64_t device_index;
+
+ if (!hw_mgr_priv || !args) {
+ CAM_ERR(CAM_LRME,
+ "Invalid input params hw_mgr_priv %pK, acquire_args %pK",
+ hw_mgr_priv, args);
+ return -EINVAL;
+ }
+
+ if (copy_from_user(&lrme_acquire_args,
+ (void __user *)args->acquire_info,
+ sizeof(struct cam_lrme_acquire_args))) {
+ CAM_ERR(CAM_LRME, "Failed to copy acquire args from user");
+ return -EFAULT;
+ }
+
+ device_index = cam_lrme_mgr_util_reserve_device(hw_mgr,
+ &lrme_acquire_args);
+ CAM_DBG(CAM_LRME, "Get device id %llu", device_index);
+
+ if (device_index >= hw_mgr->device_count) {
+ CAM_ERR(CAM_LRME, "Get wrong device id %llu", device_index);
+ return -EINVAL;
+ }
+
+ /* device_index is the right 4 bit in ctxt_to_hw_map */
+ args->ctxt_to_hw_map = (void *)device_index;
+
+ return 0;
+}
+
+static int cam_lrme_mgr_hw_release(void *hw_mgr_priv, void *hw_release_args)
+{
+ int rc = 0;
+ struct cam_lrme_hw_mgr *hw_mgr = hw_mgr_priv;
+ struct cam_hw_release_args *args =
+ (struct cam_hw_release_args *)hw_release_args;
+ uint64_t device_index;
+
+ if (!hw_mgr_priv || !hw_release_args) {
+ CAM_ERR(CAM_LRME, "Invalid arguments %pK, %pK",
+ hw_mgr_priv, hw_release_args);
+ return -EINVAL;
+ }
+
+ device_index = CAM_LRME_DECODE_DEVICE_INDEX(args->ctxt_to_hw_map);
+ if (device_index >= hw_mgr->device_count) {
+ CAM_ERR(CAM_LRME, "Invalid device index %llu", device_index);
+ return -EPERM;
+ }
+
+ rc = cam_lrme_mgr_util_release(hw_mgr, device_index);
+ if (rc)
+ CAM_ERR(CAM_LRME, "Failed in release device, rc=%d", rc);
+
+ return rc;
+}
+
+static int cam_lrme_mgr_hw_start(void *hw_mgr_priv, void *hw_start_args)
+{
+ int rc = 0;
+ struct cam_lrme_hw_mgr *hw_mgr = hw_mgr_priv;
+ struct cam_hw_start_args *args =
+ (struct cam_hw_start_args *)hw_start_args;
+ struct cam_lrme_device *hw_device;
+ uint32_t device_index;
+
+ if (!hw_mgr || !args) {
+ CAM_ERR(CAM_LRME, "Invald input params");
+ return -EINVAL;
+ }
+
+ device_index = CAM_LRME_DECODE_DEVICE_INDEX(args->ctxt_to_hw_map);
+ if (device_index >= hw_mgr->device_count) {
+ CAM_ERR(CAM_LRME, "Invalid device index %d", device_index);
+ return -EPERM;
+ }
+
+ CAM_DBG(CAM_LRME, "Start device index %d", device_index);
+
+ rc = cam_lrme_mgr_util_get_device(hw_mgr, device_index, &hw_device);
+ if (rc) {
+ CAM_ERR(CAM_LRME, "Failed to get hw device");
+ return rc;
+ }
+
+ if (hw_device->hw_intf.hw_ops.start) {
+ rc = hw_device->hw_intf.hw_ops.start(
+ hw_device->hw_intf.hw_priv, NULL, 0);
+ } else {
+ CAM_ERR(CAM_LRME, "Invald start function");
+ return -EINVAL;
+ }
+
+ return rc;
+}
+
+static int cam_lrme_mgr_hw_stop(void *hw_mgr_priv, void *stop_args)
+{
+ int rc = 0;
+ struct cam_lrme_hw_mgr *hw_mgr = hw_mgr_priv;
+ struct cam_hw_stop_args *args =
+ (struct cam_hw_stop_args *)stop_args;
+ struct cam_lrme_device *hw_device;
+ uint32_t device_index;
+
+ if (!hw_mgr_priv || !stop_args) {
+ CAM_ERR(CAM_LRME, "Invalid arguments");
+ return -EINVAL;
+ }
+
+ device_index = CAM_LRME_DECODE_DEVICE_INDEX(args->ctxt_to_hw_map);
+ if (device_index >= hw_mgr->device_count) {
+ CAM_ERR(CAM_LRME, "Invalid device index %d", device_index);
+ return -EPERM;
+ }
+
+ CAM_DBG(CAM_LRME, "Stop device index %d", device_index);
+
+ rc = cam_lrme_mgr_util_get_device(hw_mgr, device_index, &hw_device);
+ if (rc) {
+ CAM_ERR(CAM_LRME, "Failed to get hw device");
+ return rc;
+ }
+
+ if (hw_device->hw_intf.hw_ops.stop) {
+ rc = hw_device->hw_intf.hw_ops.stop(
+ hw_device->hw_intf.hw_priv, NULL, 0);
+ if (rc) {
+ CAM_ERR(CAM_LRME, "Failed in HW stop %d", rc);
+ goto end;
+ }
+ }
+
+end:
+ return rc;
+}
+
+static int cam_lrme_mgr_hw_prepare_update(void *hw_mgr_priv,
+ void *hw_prepare_update_args)
+{
+ int rc = 0, i;
+ struct cam_lrme_hw_mgr *hw_mgr = hw_mgr_priv;
+ struct cam_hw_prepare_update_args *args =
+ (struct cam_hw_prepare_update_args *)hw_prepare_update_args;
+ struct cam_lrme_device *hw_device;
+ struct cam_kmd_buf_info kmd_buf;
+ struct cam_lrme_hw_cmd_config_args config_args;
+ struct cam_lrme_frame_request *frame_req = NULL;
+ uint32_t device_index;
+
+ if (!hw_mgr_priv || !hw_prepare_update_args) {
+ CAM_ERR(CAM_LRME, "Invalid args %pK %pK",
+ hw_mgr_priv, hw_prepare_update_args);
+ return -EINVAL;
+ }
+
+ device_index = CAM_LRME_DECODE_DEVICE_INDEX(args->ctxt_to_hw_map);
+ if (device_index >= hw_mgr->device_count) {
+ CAM_ERR(CAM_LRME, "Invalid device index %d", device_index);
+ return -EPERM;
+ }
+
+ rc = cam_lrme_mgr_util_get_device(hw_mgr, device_index, &hw_device);
+ if (rc) {
+ CAM_ERR(CAM_LRME, "Error in getting device %d", rc);
+ goto error;
+ }
+
+ rc = cam_lrme_mgr_util_packet_validate(args->packet);
+ if (rc) {
+ CAM_ERR(CAM_LRME, "Error in packet validation %d", rc);
+ goto error;
+ }
+
+ rc = cam_packet_util_get_kmd_buffer(args->packet, &kmd_buf);
+ if (rc) {
+ CAM_ERR(CAM_LRME, "Error in get kmd buf buffer %d", rc);
+ goto error;
+ }
+
+ CAM_DBG(CAM_LRME,
+ "KMD Buf : hdl=%d, cpu_addr=%pK, offset=%d, size=%d, used=%d",
+ kmd_buf.handle, kmd_buf.cpu_addr, kmd_buf.offset,
+ kmd_buf.size, kmd_buf.used_bytes);
+
+ rc = cam_packet_util_process_patches(args->packet,
+ hw_mgr->device_iommu.non_secure, hw_mgr->device_iommu.secure);
+ if (rc) {
+ CAM_ERR(CAM_LRME, "Patch packet failed, rc=%d", rc);
+ return rc;
+ }
+
+ memset(&config_args, 0, sizeof(config_args));
+ config_args.hw_device = hw_device;
+
+ rc = cam_lrme_mgr_util_prepare_io_buffer(
+ hw_mgr->device_iommu.non_secure, args,
+ config_args.input_buf, config_args.output_buf,
+ CAM_LRME_MAX_IO_BUFFER);
+ if (rc) {
+ CAM_ERR(CAM_LRME, "Error in prepare IO Buf %d", rc);
+ goto error;
+ }
+ /* Check port number */
+ if (args->num_in_map_entries == 0 || args->num_out_map_entries == 0) {
+ CAM_ERR(CAM_LRME, "Error in port number in %d, out %d",
+ args->num_in_map_entries, args->num_out_map_entries);
+ goto error;
+ }
+
+ rc = cam_lrme_mgr_util_prepare_hw_update_entries(hw_mgr, args,
+ &config_args, &kmd_buf);
+ if (rc) {
+ CAM_ERR(CAM_LRME, "Error in hw update entries %d", rc);
+ goto error;
+ }
+
+ rc = cam_lrme_mgr_util_get_frame_req(&hw_mgr->frame_free_list,
+ &frame_req, &hw_mgr->free_req_lock);
+ if (rc || !frame_req) {
+ CAM_ERR(CAM_LRME, "Can not get free frame request");
+ goto error;
+ }
+
+ frame_req->ctxt_to_hw_map = args->ctxt_to_hw_map;
+ frame_req->req_id = args->packet->header.request_id;
+ frame_req->hw_device = hw_device;
+ frame_req->num_hw_update_entries = args->num_hw_update_entries;
+ for (i = 0; i < args->num_hw_update_entries; i++)
+ frame_req->hw_update_entries[i] = args->hw_update_entries[i];
+
+ args->priv = frame_req;
+
+ CAM_DBG(CAM_LRME, "FramePrepare : Frame[%lld]", frame_req->req_id);
+
+ return 0;
+
+error:
+ return rc;
+}
+
+static int cam_lrme_mgr_hw_config(void *hw_mgr_priv,
+ void *hw_config_args)
+{
+ int rc = 0;
+ struct cam_lrme_hw_mgr *hw_mgr = hw_mgr_priv;
+ struct cam_hw_config_args *args =
+ (struct cam_hw_config_args *)hw_config_args;
+ struct cam_lrme_frame_request *frame_req;
+ struct cam_lrme_device *hw_device = NULL;
+ enum cam_lrme_hw_mgr_ctx_priority priority;
+
+ if (!hw_mgr_priv || !hw_config_args) {
+ CAM_ERR(CAM_LRME, "Invalid arguments, hw_mgr %pK, config %pK",
+ hw_mgr_priv, hw_config_args);
+ return -EINVAL;
+ }
+
+ if (!args->num_hw_update_entries) {
+ CAM_ERR(CAM_LRME, "No hw update entries");
+ return -EINVAL;
+ }
+
+ frame_req = (struct cam_lrme_frame_request *)args->priv;
+ if (!frame_req) {
+ CAM_ERR(CAM_LRME, "No frame request");
+ return -EINVAL;
+ }
+
+ hw_device = frame_req->hw_device;
+ if (!hw_device)
+ return -EINVAL;
+
+ priority = CAM_LRME_DECODE_PRIORITY(args->ctxt_to_hw_map);
+ if (priority == CAM_LRME_PRIORITY_HIGH) {
+ cam_lrme_mgr_util_put_frame_req(
+ &hw_device->frame_pending_list_high,
+ &frame_req->frame_list, &hw_device->high_req_lock);
+ } else {
+ cam_lrme_mgr_util_put_frame_req(
+ &hw_device->frame_pending_list_normal,
+ &frame_req->frame_list, &hw_device->normal_req_lock);
+ }
+
+ CAM_DBG(CAM_LRME, "schedule req %llu", frame_req->req_id);
+ rc = cam_lrme_mgr_util_schedule_frame_req(hw_mgr, hw_device);
+
+ return rc;
+}
+
+int cam_lrme_mgr_register_device(
+ struct cam_hw_intf *lrme_hw_intf,
+ struct cam_iommu_handle *device_iommu,
+ struct cam_iommu_handle *cdm_iommu)
+{
+ struct cam_lrme_device *hw_device;
+ char buf[128];
+ int i, rc;
+
+ hw_device = &g_lrme_hw_mgr.hw_device[lrme_hw_intf->hw_idx];
+
+ g_lrme_hw_mgr.device_iommu = *device_iommu;
+ g_lrme_hw_mgr.cdm_iommu = *cdm_iommu;
+
+ memcpy(&hw_device->hw_intf, lrme_hw_intf, sizeof(struct cam_hw_intf));
+
+ spin_lock_init(&hw_device->high_req_lock);
+ spin_lock_init(&hw_device->normal_req_lock);
+ INIT_LIST_HEAD(&hw_device->frame_pending_list_high);
+ INIT_LIST_HEAD(&hw_device->frame_pending_list_normal);
+
+ rc = snprintf(buf, sizeof(buf), "cam_lrme_device_submit_worker%d",
+ lrme_hw_intf->hw_idx);
+ CAM_DBG(CAM_LRME, "Create submit workq for %s", buf);
+ rc = cam_req_mgr_workq_create(buf,
+ CAM_LRME_WORKQ_NUM_TASK,
+ &hw_device->work, CRM_WORKQ_USAGE_NON_IRQ);
+ if (rc) {
+ CAM_ERR(CAM_LRME,
+ "Unable to create a worker, rc=%d", rc);
+ return rc;
+ }
+
+ for (i = 0; i < CAM_LRME_WORKQ_NUM_TASK; i++)
+ hw_device->work->task.pool[i].payload =
+ &hw_device->work_data[i];
+
+ if (hw_device->hw_intf.hw_ops.process_cmd) {
+ struct cam_lrme_hw_cmd_set_cb cb_args;
+
+ cb_args.cam_lrme_hw_mgr_cb = cam_lrme_mgr_cb;
+ cb_args.data = hw_device;
+
+ rc = hw_device->hw_intf.hw_ops.process_cmd(
+ hw_device->hw_intf.hw_priv,
+ CAM_LRME_HW_CMD_REGISTER_CB,
+ &cb_args, sizeof(cb_args));
+ if (rc) {
+ CAM_ERR(CAM_LRME, "Register cb failed");
+ goto destroy_workqueue;
+ }
+ CAM_DBG(CAM_LRME, "cb registered");
+ }
+
+ if (hw_device->hw_intf.hw_ops.get_hw_caps) {
+ rc = hw_device->hw_intf.hw_ops.get_hw_caps(
+ hw_device->hw_intf.hw_priv, &hw_device->hw_caps,
+ sizeof(hw_device->hw_caps));
+ if (rc)
+ CAM_ERR(CAM_LRME, "Get caps failed");
+ } else {
+ CAM_ERR(CAM_LRME, "No get_hw_caps function");
+ goto destroy_workqueue;
+ }
+ g_lrme_hw_mgr.lrme_caps.dev_caps[lrme_hw_intf->hw_idx] =
+ hw_device->hw_caps;
+ g_lrme_hw_mgr.device_count++;
+ g_lrme_hw_mgr.lrme_caps.device_iommu = g_lrme_hw_mgr.device_iommu;
+ g_lrme_hw_mgr.lrme_caps.cdm_iommu = g_lrme_hw_mgr.cdm_iommu;
+ g_lrme_hw_mgr.lrme_caps.num_devices = g_lrme_hw_mgr.device_count;
+
+ hw_device->valid = true;
+
+ CAM_DBG(CAM_LRME, "device registration done");
+ return 0;
+
+destroy_workqueue:
+ cam_req_mgr_workq_destroy(&hw_device->work);
+
+ return rc;
+}
+
+int cam_lrme_mgr_deregister_device(int device_index)
+{
+ struct cam_lrme_device *hw_device;
+
+ hw_device = &g_lrme_hw_mgr.hw_device[device_index];
+ cam_req_mgr_workq_destroy(&hw_device->work);
+ memset(hw_device, 0x0, sizeof(struct cam_lrme_device));
+ g_lrme_hw_mgr.device_count--;
+
+ return 0;
+}
+
+int cam_lrme_hw_mgr_deinit(void)
+{
+ mutex_destroy(&g_lrme_hw_mgr.hw_mgr_mutex);
+ memset(&g_lrme_hw_mgr, 0x0, sizeof(g_lrme_hw_mgr));
+
+ return 0;
+}
+
+int cam_lrme_hw_mgr_init(struct cam_hw_mgr_intf *hw_mgr_intf,
+ cam_hw_event_cb_func cam_lrme_dev_buf_done_cb)
+{
+ int i, rc = 0;
+ struct cam_lrme_frame_request *frame_req;
+
+ if (!hw_mgr_intf)
+ return -EINVAL;
+
+ CAM_DBG(CAM_LRME, "device count %d", g_lrme_hw_mgr.device_count);
+ if (g_lrme_hw_mgr.device_count > CAM_LRME_HW_MAX) {
+ CAM_ERR(CAM_LRME, "Invalid count of devices");
+ return -EINVAL;
+ }
+
+ memset(hw_mgr_intf, 0, sizeof(*hw_mgr_intf));
+
+ mutex_init(&g_lrme_hw_mgr.hw_mgr_mutex);
+ spin_lock_init(&g_lrme_hw_mgr.free_req_lock);
+ INIT_LIST_HEAD(&g_lrme_hw_mgr.frame_free_list);
+
+ /* Init hw mgr frame requests and add to free list */
+ for (i = 0; i < CAM_CTX_REQ_MAX * CAM_CTX_MAX; i++) {
+ frame_req = &g_lrme_hw_mgr.frame_req[i];
+
+ memset(frame_req, 0x0, sizeof(*frame_req));
+ INIT_LIST_HEAD(&frame_req->frame_list);
+
+ list_add_tail(&frame_req->frame_list,
+ &g_lrme_hw_mgr.frame_free_list);
+ }
+
+ hw_mgr_intf->hw_mgr_priv = &g_lrme_hw_mgr;
+ hw_mgr_intf->hw_get_caps = cam_lrme_mgr_get_caps;
+ hw_mgr_intf->hw_acquire = cam_lrme_mgr_hw_acquire;
+ hw_mgr_intf->hw_release = cam_lrme_mgr_hw_release;
+ hw_mgr_intf->hw_start = cam_lrme_mgr_hw_start;
+ hw_mgr_intf->hw_stop = cam_lrme_mgr_hw_stop;
+ hw_mgr_intf->hw_prepare_update = cam_lrme_mgr_hw_prepare_update;
+ hw_mgr_intf->hw_config = cam_lrme_mgr_hw_config;
+ hw_mgr_intf->hw_read = NULL;
+ hw_mgr_intf->hw_write = NULL;
+ hw_mgr_intf->hw_close = NULL;
+
+ g_lrme_hw_mgr.event_cb = cam_lrme_dev_buf_done_cb;
+
+ CAM_DBG(CAM_LRME, "Hw mgr init done");
+ return rc;
+}
diff --git a/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/cam_lrme_hw_mgr.h b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/cam_lrme_hw_mgr.h
new file mode 100644
index 0000000..f7ce4d2
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/cam_lrme_hw_mgr.h
@@ -0,0 +1,120 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_LRME_HW_MGR_H_
+#define _CAM_LRME_HW_MGR_H_
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+
+#include <media/cam_lrme.h>
+#include "cam_hw.h"
+#include "cam_hw_intf.h"
+#include "cam_cpas_api.h"
+#include "cam_debug_util.h"
+#include "cam_hw_mgr_intf.h"
+#include "cam_req_mgr_workq.h"
+#include "cam_lrme_hw_intf.h"
+#include "cam_context.h"
+
+#define CAM_LRME_HW_MAX 1
+#define CAM_LRME_WORKQ_NUM_TASK 10
+
+#define CAM_LRME_DECODE_DEVICE_INDEX(ctxt_to_hw_map) \
+ ((uint64_t)ctxt_to_hw_map & 0xF)
+
+#define CAM_LRME_DECODE_PRIORITY(ctxt_to_hw_map) \
+ (((uint64_t)ctxt_to_hw_map & 0xF0) >> 4)
+
+#define CAM_LRME_DECODE_CTX_INDEX(ctxt_to_hw_map) \
+ ((uint64_t)ctxt_to_hw_map >> CAM_LRME_CTX_INDEX_SHIFT)
+
+/**
+ * enum cam_lrme_hw_mgr_ctx_priority
+ *
+ * CAM_LRME_PRIORITY_HIGH : High priority client
+ * CAM_LRME_PRIORITY_NORMAL : Normal priority client
+ */
+enum cam_lrme_hw_mgr_ctx_priority {
+ CAM_LRME_PRIORITY_HIGH,
+ CAM_LRME_PRIORITY_NORMAL,
+};
+
+/**
+ * struct cam_lrme_mgr_work_data : HW Mgr work data
+ *
+ * hw_device : Pointer to the hw device
+ */
+struct cam_lrme_mgr_work_data {
+ struct cam_lrme_device *hw_device;
+};
+
+/**
+ * struct cam_lrme_device : LRME HW device
+ *
+ * @hw_caps : HW device's capabilities
+ * @hw_intf : HW device's interface information
+ * @num_context : Number of contexts using this device
+ * @valid : Whether this device is valid
+ * @work : HW device's work queue
+ * @work_data : HW device's work data
+ * @frame_pending_list_high : High priority request queue
+ * @frame_pending_list_normal : Normal priority request queue
+ * @high_req_lock : Spinlock of high priority queue
+ * @normal_req_lock : Spinlock of normal priority queue
+ */
+struct cam_lrme_device {
+ struct cam_lrme_dev_cap hw_caps;
+ struct cam_hw_intf hw_intf;
+ uint32_t num_context;
+ bool valid;
+ struct cam_req_mgr_core_workq *work;
+ struct cam_lrme_mgr_work_data work_data[CAM_LRME_WORKQ_NUM_TASK];
+ struct list_head frame_pending_list_high;
+ struct list_head frame_pending_list_normal;
+ spinlock_t high_req_lock;
+ spinlock_t normal_req_lock;
+};
+
+/**
+ * struct cam_lrme_hw_mgr : LRME HW manager
+ *
+ * @device_count : Number of HW devices
+ * @frame_free_list : List of free frame request
+ * @hw_mgr_mutex : Mutex to protect HW manager data
+ * @free_req_lock :Spinlock to protect frame_free_list
+ * @hw_device : List of HW devices
+ * @device_iommu : Device iommu
+ * @cdm_iommu : cdm iommu
+ * @frame_req : List of frame request to use
+ * @lrme_caps : LRME capabilities
+ * @event_cb : IRQ callback function
+ */
+struct cam_lrme_hw_mgr {
+ uint32_t device_count;
+ struct list_head frame_free_list;
+ struct mutex hw_mgr_mutex;
+ spinlock_t free_req_lock;
+ struct cam_lrme_device hw_device[CAM_LRME_HW_MAX];
+ struct cam_iommu_handle device_iommu;
+ struct cam_iommu_handle cdm_iommu;
+ struct cam_lrme_frame_request frame_req[CAM_CTX_REQ_MAX * CAM_CTX_MAX];
+ struct cam_lrme_query_cap_cmd lrme_caps;
+ cam_hw_event_cb_func event_cb;
+};
+
+int cam_lrme_mgr_register_device(struct cam_hw_intf *lrme_hw_intf,
+ struct cam_iommu_handle *device_iommu,
+ struct cam_iommu_handle *cdm_iommu);
+int cam_lrme_mgr_deregister_device(int device_index);
+
+#endif /* _CAM_LRME_HW_MGR_H_ */
diff --git a/arch/arm64/boot/dts/qcom/sdm845-qvr.dts b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/cam_lrme_hw_mgr_intf.h
similarity index 61%
copy from arch/arm64/boot/dts/qcom/sdm845-qvr.dts
copy to drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/cam_lrme_hw_mgr_intf.h
index c06b806..8bb609c 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-qvr.dts
+++ b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/cam_lrme_hw_mgr_intf.h
@@ -10,14 +10,16 @@
* GNU General Public License for more details.
*/
+#ifndef _CAM_LRME_HW_MGR_INTF_H_
+#define _CAM_LRME_HW_MGR_INTF_H_
-/dts-v1/;
+#include <linux/of.h>
-#include "sdm845-v2.dtsi"
-#include "sdm845-qvr.dtsi"
+#include "cam_debug_util.h"
+#include "cam_hw_mgr_intf.h"
-/ {
- model = "Qualcomm Technologies, Inc. SDM845 QVR";
- compatible = "qcom,sdm845-qvr", "qcom,sdm845", "qcom,qvr";
- qcom,board-id = <0x01000B 0x20>;
-};
+int cam_lrme_hw_mgr_init(struct cam_hw_mgr_intf *hw_mgr_intf,
+ cam_hw_event_cb_func cam_lrme_dev_buf_done_cb);
+int cam_lrme_hw_mgr_deinit(void);
+
+#endif /* _CAM_LRME_HW_MGR_INTF_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/Makefile b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/Makefile
new file mode 100644
index 0000000..c65d862
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/Makefile
@@ -0,0 +1,13 @@
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_core
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_sync
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_smmu
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_cdm
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_lrme
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw
+ccflags-y += -Idrivers/media/platform/msm/camera0
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas/include
+
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_lrme_hw_dev.o cam_lrme_hw_core.o cam_lrme_hw_soc.o
diff --git a/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_core.c b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_core.c
new file mode 100644
index 0000000..0318739
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_core.c
@@ -0,0 +1,1022 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "cam_lrme_hw_core.h"
+#include "cam_lrme_hw_soc.h"
+#include "cam_smmu_api.h"
+
+static void cam_lrme_cdm_write_reg_val_pair(uint32_t *buffer,
+ uint32_t *index, uint32_t reg_offset, uint32_t reg_value)
+{
+ buffer[(*index)++] = reg_offset;
+ buffer[(*index)++] = reg_value;
+}
+
+static void cam_lrme_hw_util_fill_fe_reg(struct cam_lrme_hw_io_buffer *io_buf,
+ uint32_t index, uint32_t *reg_val_pair, uint32_t *num_cmd,
+ struct cam_lrme_hw_info *hw_info)
+{
+ uint32_t reg_val;
+
+ /* 1. config buffer size */
+ reg_val = io_buf->io_cfg->planes[0].width;
+ reg_val |= (io_buf->io_cfg->planes[0].height << 16);
+ cam_lrme_cdm_write_reg_val_pair(reg_val_pair, num_cmd,
+ hw_info->bus_rd_reg.bus_client_reg[index].rd_buffer_size,
+ reg_val);
+
+ CAM_DBG(CAM_LRME,
+ "width %d", io_buf->io_cfg->planes[0].width);
+ CAM_DBG(CAM_LRME,
+ "height %d", io_buf->io_cfg->planes[0].height);
+
+ /* 2. config image address */
+ cam_lrme_cdm_write_reg_val_pair(reg_val_pair, num_cmd,
+ hw_info->bus_rd_reg.bus_client_reg[index].addr_image,
+ io_buf->io_addr[0]);
+
+ CAM_DBG(CAM_LRME, "io addr %llu", io_buf->io_addr[0]);
+
+ /* 3. config stride */
+ reg_val = io_buf->io_cfg->planes[0].plane_stride;
+ cam_lrme_cdm_write_reg_val_pair(reg_val_pair, num_cmd,
+ hw_info->bus_rd_reg.bus_client_reg[index].rd_stride,
+ reg_val);
+
+ CAM_DBG(CAM_LRME, "plane_stride %d",
+ io_buf->io_cfg->planes[0].plane_stride);
+
+ /* 4. enable client */
+ cam_lrme_cdm_write_reg_val_pair(reg_val_pair, num_cmd,
+ hw_info->bus_rd_reg.bus_client_reg[index].core_cfg, 0x1);
+
+ /* 5. unpack_cfg */
+ cam_lrme_cdm_write_reg_val_pair(reg_val_pair, num_cmd,
+ hw_info->bus_rd_reg.bus_client_reg[index].unpack_cfg_0, 0x0);
+}
+
+static void cam_lrme_hw_util_fill_we_reg(struct cam_lrme_hw_io_buffer *io_buf,
+ uint32_t index, uint32_t *reg_val_pair, uint32_t *num_cmd,
+ struct cam_lrme_hw_info *hw_info)
+{
+ /* config client mode */
+ cam_lrme_cdm_write_reg_val_pair(reg_val_pair, num_cmd,
+ hw_info->bus_wr_reg.bus_client_reg[index].cfg,
+ 0x1);
+
+ /* image address */
+ cam_lrme_cdm_write_reg_val_pair(reg_val_pair, num_cmd,
+ hw_info->bus_wr_reg.bus_client_reg[index].addr_image,
+ io_buf->io_addr[0]);
+ CAM_DBG(CAM_LRME, "io addr %llu", io_buf->io_addr[0]);
+
+ /* buffer width and height */
+ cam_lrme_cdm_write_reg_val_pair(reg_val_pair, num_cmd,
+ hw_info->bus_wr_reg.bus_client_reg[index].buffer_width_cfg,
+ io_buf->io_cfg->planes[0].width);
+ CAM_DBG(CAM_LRME, "width %d", io_buf->io_cfg->planes[0].width);
+
+ cam_lrme_cdm_write_reg_val_pair(reg_val_pair, num_cmd,
+ hw_info->bus_wr_reg.bus_client_reg[index].buffer_height_cfg,
+ io_buf->io_cfg->planes[0].height);
+ CAM_DBG(CAM_LRME, "height %d", io_buf->io_cfg->planes[0].height);
+
+ /* packer cfg */
+ cam_lrme_cdm_write_reg_val_pair(reg_val_pair, num_cmd,
+ hw_info->bus_wr_reg.bus_client_reg[index].packer_cfg,
+ (index == 0) ? 0x1 : 0x5);
+
+ /* client stride */
+ cam_lrme_cdm_write_reg_val_pair(reg_val_pair, num_cmd,
+ hw_info->bus_wr_reg.bus_client_reg[index].wr_stride,
+ io_buf->io_cfg->planes[0].meta_stride);
+ CAM_DBG(CAM_LRME, "plane_stride %d",
+ io_buf->io_cfg->planes[0].plane_stride);
+}
+
+
+static int cam_lrme_hw_util_process_config_hw(struct cam_hw_info *lrme_hw,
+ struct cam_lrme_hw_cmd_config_args *config_args)
+{
+ int i;
+ struct cam_hw_soc_info *soc_info = &lrme_hw->soc_info;
+ struct cam_lrme_cdm_info *hw_cdm_info;
+ uint32_t *cmd_buf_addr = config_args->cmd_buf_addr;
+ uint32_t reg_val_pair[CAM_LRME_MAX_REG_PAIR_NUM];
+ struct cam_lrme_hw_io_buffer *io_buf;
+ struct cam_lrme_hw_info *hw_info =
+ ((struct cam_lrme_core *)lrme_hw->core_info)->hw_info;
+ uint32_t num_cmd = 0;
+ uint32_t size;
+ uint32_t mem_base, available_size = config_args->size;
+ uint32_t output_res_mask = 0, input_res_mask = 0;
+
+
+ if (!cmd_buf_addr) {
+ CAM_ERR(CAM_LRME, "Invalid input args");
+ return -EINVAL;
+ }
+
+ hw_cdm_info =
+ ((struct cam_lrme_core *)lrme_hw->core_info)->hw_cdm_info;
+
+ for (i = 0; i < CAM_LRME_MAX_IO_BUFFER; i++) {
+ io_buf = &config_args->input_buf[i];
+
+ if (io_buf->valid == false)
+ break;
+
+ if (io_buf->io_cfg->direction != CAM_BUF_INPUT) {
+ CAM_ERR(CAM_LRME, "Incorrect direction %d %d",
+ io_buf->io_cfg->direction, CAM_BUF_INPUT);
+ return -EINVAL;
+ }
+ CAM_DBG(CAM_LRME,
+ "resource_type %d", io_buf->io_cfg->resource_type);
+
+ switch (io_buf->io_cfg->resource_type) {
+ case CAM_LRME_IO_TYPE_TAR:
+ cam_lrme_hw_util_fill_fe_reg(io_buf, 0, reg_val_pair,
+ &num_cmd, hw_info);
+
+ input_res_mask |= CAM_LRME_INPUT_PORT_TYPE_TAR;
+ break;
+ case CAM_LRME_IO_TYPE_REF:
+ cam_lrme_hw_util_fill_fe_reg(io_buf, 1, reg_val_pair,
+ &num_cmd, hw_info);
+
+ input_res_mask |= CAM_LRME_INPUT_PORT_TYPE_REF;
+ break;
+ default:
+ CAM_ERR(CAM_LRME, "wrong resource_type %d",
+ io_buf->io_cfg->resource_type);
+ return -EINVAL;
+ }
+ }
+
+ for (i = 0; i < CAM_LRME_BUS_RD_MAX_CLIENTS; i++)
+ if (!((input_res_mask >> i) & 0x1))
+ cam_lrme_cdm_write_reg_val_pair(reg_val_pair, &num_cmd,
+ hw_info->bus_rd_reg.bus_client_reg[i].core_cfg,
+ 0x0);
+
+ for (i = 0; i < CAM_LRME_MAX_IO_BUFFER; i++) {
+ io_buf = &config_args->output_buf[i];
+
+ if (io_buf->valid == false)
+ break;
+
+ if (io_buf->io_cfg->direction != CAM_BUF_OUTPUT) {
+ CAM_ERR(CAM_LRME, "Incorrect direction %d %d",
+ io_buf->io_cfg->direction, CAM_BUF_INPUT);
+ return -EINVAL;
+ }
+
+ CAM_DBG(CAM_LRME, "resource_type %d",
+ io_buf->io_cfg->resource_type);
+ switch (io_buf->io_cfg->resource_type) {
+ case CAM_LRME_IO_TYPE_DS2:
+ cam_lrme_hw_util_fill_we_reg(io_buf, 0, reg_val_pair,
+ &num_cmd, hw_info);
+
+ output_res_mask |= CAM_LRME_OUTPUT_PORT_TYPE_DS2;
+ break;
+ case CAM_LRME_IO_TYPE_RES:
+ cam_lrme_hw_util_fill_we_reg(io_buf, 1, reg_val_pair,
+ &num_cmd, hw_info);
+
+ output_res_mask |= CAM_LRME_OUTPUT_PORT_TYPE_RES;
+ break;
+
+ default:
+ CAM_ERR(CAM_LRME, "wrong resource_type %d",
+ io_buf->io_cfg->resource_type);
+ return -EINVAL;
+ }
+ }
+
+ for (i = 0; i < CAM_LRME_BUS_RD_MAX_CLIENTS; i++)
+ if (!((output_res_mask >> i) & 0x1))
+ cam_lrme_cdm_write_reg_val_pair(reg_val_pair, &num_cmd,
+ hw_info->bus_wr_reg.bus_client_reg[i].cfg, 0x0);
+
+ if (output_res_mask) {
+ /* write composite mask */
+ cam_lrme_cdm_write_reg_val_pair(reg_val_pair, &num_cmd,
+ hw_info->bus_wr_reg.common_reg.composite_mask_0,
+ output_res_mask);
+ }
+
+ size = hw_cdm_info->cdm_ops->cdm_required_size_changebase();
+ if ((size * 4) > available_size) {
+ CAM_ERR(CAM_LRME, "buf size:%d is not sufficient, expected: %d",
+ available_size, size);
+ return -EINVAL;
+ }
+
+ mem_base = CAM_SOC_GET_REG_MAP_CAM_BASE(soc_info, CAM_LRME_BASE_IDX);
+
+ hw_cdm_info->cdm_ops->cdm_write_changebase(cmd_buf_addr, mem_base);
+ cmd_buf_addr += size;
+ available_size -= (size * 4);
+
+ size = hw_cdm_info->cdm_ops->cdm_required_size_reg_random(
+ num_cmd / 2);
+
+ if ((size * 4) > available_size) {
+ CAM_ERR(CAM_LRME, "buf size:%d is not sufficient, expected: %d",
+ available_size, size);
+ return -ENOMEM;
+ }
+
+ hw_cdm_info->cdm_ops->cdm_write_regrandom(cmd_buf_addr, num_cmd / 2,
+ reg_val_pair);
+ cmd_buf_addr += size;
+ available_size -= (size * 4);
+
+ config_args->config_buf_size =
+ config_args->size - available_size;
+
+ return 0;
+}
+
+static int cam_lrme_hw_util_submit_go(struct cam_hw_info *lrme_hw)
+{
+ struct cam_lrme_core *lrme_core;
+ struct cam_hw_soc_info *soc_info;
+ struct cam_lrme_hw_info *hw_info;
+
+ lrme_core = (struct cam_lrme_core *)lrme_hw->core_info;
+ hw_info = lrme_core->hw_info;
+ soc_info = &lrme_hw->soc_info;
+
+ cam_io_w_mb(0x1, soc_info->reg_map[0].mem_base +
+ hw_info->bus_rd_reg.common_reg.cmd);
+
+ return 0;
+}
+
+static int cam_lrme_hw_util_reset(struct cam_hw_info *lrme_hw,
+ uint32_t reset_type)
+{
+ struct cam_lrme_core *lrme_core;
+ struct cam_hw_soc_info *soc_info = &lrme_hw->soc_info;
+ struct cam_lrme_hw_info *hw_info;
+ long time_left;
+
+ lrme_core = lrme_hw->core_info;
+ hw_info = lrme_core->hw_info;
+
+ switch (reset_type) {
+ case CAM_LRME_HW_RESET_TYPE_HW_RESET:
+ reinit_completion(&lrme_core->reset_complete);
+ cam_io_w_mb(0x1, soc_info->reg_map[0].mem_base +
+ hw_info->titan_reg.top_rst_cmd);
+ time_left = wait_for_completion_timeout(
+ &lrme_core->reset_complete,
+ msecs_to_jiffies(CAM_LRME_HW_RESET_TIMEOUT));
+ if (time_left <= 0) {
+ CAM_ERR(CAM_LRME,
+ "HW reset wait failed time_left=%ld",
+ time_left);
+ return -ETIMEDOUT;
+ }
+ break;
+ case CAM_LRME_HW_RESET_TYPE_SW_RESET:
+ cam_io_w_mb(0x3, soc_info->reg_map[0].mem_base +
+ hw_info->bus_wr_reg.common_reg.sw_reset);
+ cam_io_w_mb(0x3, soc_info->reg_map[0].mem_base +
+ hw_info->bus_rd_reg.common_reg.sw_reset);
+ reinit_completion(&lrme_core->reset_complete);
+ cam_io_w_mb(0x2, soc_info->reg_map[0].mem_base +
+ hw_info->titan_reg.top_rst_cmd);
+ time_left = wait_for_completion_timeout(
+ &lrme_core->reset_complete,
+ msecs_to_jiffies(CAM_LRME_HW_RESET_TIMEOUT));
+ if (time_left <= 0) {
+ CAM_ERR(CAM_LRME,
+ "SW reset wait failed time_left=%ld",
+ time_left);
+ return -ETIMEDOUT;
+ }
+ break;
+ }
+
+ return 0;
+}
+
+int cam_lrme_hw_util_get_caps(struct cam_hw_info *lrme_hw,
+ struct cam_lrme_dev_cap *hw_caps)
+{
+ struct cam_hw_soc_info *soc_info = &lrme_hw->soc_info;
+ struct cam_lrme_hw_info *hw_info =
+ ((struct cam_lrme_core *)lrme_hw->core_info)->hw_info;
+ uint32_t reg_value;
+
+ if (!hw_info) {
+ CAM_ERR(CAM_LRME, "Invalid hw info data");
+ return -EINVAL;
+ }
+
+ reg_value = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+ hw_info->clc_reg.clc_hw_version);
+ hw_caps->clc_hw_version.gen =
+ CAM_BITS_MASK_SHIFT(reg_value, 0xf0000000, 0x1C);
+ hw_caps->clc_hw_version.rev =
+ CAM_BITS_MASK_SHIFT(reg_value, 0xfff0000, 0x10);
+ hw_caps->clc_hw_version.step =
+ CAM_BITS_MASK_SHIFT(reg_value, 0xffff, 0x0);
+
+ reg_value = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+ hw_info->bus_rd_reg.common_reg.hw_version);
+ hw_caps->bus_rd_hw_version.gen =
+ CAM_BITS_MASK_SHIFT(reg_value, 0xf0000000, 0x1C);
+ hw_caps->bus_rd_hw_version.rev =
+ CAM_BITS_MASK_SHIFT(reg_value, 0xfff0000, 0x10);
+ hw_caps->bus_rd_hw_version.step =
+ CAM_BITS_MASK_SHIFT(reg_value, 0xffff, 0x0);
+
+ reg_value = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+ hw_info->bus_wr_reg.common_reg.hw_version);
+ hw_caps->bus_wr_hw_version.gen =
+ CAM_BITS_MASK_SHIFT(reg_value, 0xf0000000, 0x1C);
+ hw_caps->bus_wr_hw_version.rev =
+ CAM_BITS_MASK_SHIFT(reg_value, 0xfff0000, 0x10);
+ hw_caps->bus_wr_hw_version.step =
+ CAM_BITS_MASK_SHIFT(reg_value, 0xffff, 0x0);
+
+ reg_value = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+ hw_info->titan_reg.top_hw_version);
+ hw_caps->top_hw_version.gen =
+ CAM_BITS_MASK_SHIFT(reg_value, 0xf0000000, 0x1C);
+ hw_caps->top_hw_version.rev =
+ CAM_BITS_MASK_SHIFT(reg_value, 0xfff0000, 0x10);
+ hw_caps->top_hw_version.step =
+ CAM_BITS_MASK_SHIFT(reg_value, 0xffff, 0x0);
+
+ reg_value = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+ hw_info->titan_reg.top_titan_version);
+ hw_caps->top_titan_version.gen =
+ CAM_BITS_MASK_SHIFT(reg_value, 0xf0000000, 0x1C);
+ hw_caps->top_titan_version.rev =
+ CAM_BITS_MASK_SHIFT(reg_value, 0xfff0000, 0x10);
+ hw_caps->top_titan_version.step =
+ CAM_BITS_MASK_SHIFT(reg_value, 0xffff, 0x0);
+
+ return 0;
+}
+
+static int cam_lrme_hw_util_submit_req(struct cam_lrme_core *lrme_core,
+ struct cam_lrme_frame_request *frame_req)
+{
+ struct cam_lrme_cdm_info *hw_cdm_info =
+ lrme_core->hw_cdm_info;
+ struct cam_cdm_bl_request *cdm_cmd = hw_cdm_info->cdm_cmd;
+ struct cam_hw_update_entry *cmd;
+ int i, rc = 0;
+
+ if (frame_req->num_hw_update_entries > 0) {
+ cdm_cmd->cmd_arrary_count = frame_req->num_hw_update_entries;
+ cdm_cmd->type = CAM_CDM_BL_CMD_TYPE_MEM_HANDLE;
+ cdm_cmd->flag = false;
+ cdm_cmd->userdata = NULL;
+ cdm_cmd->cookie = 0;
+
+ for (i = 0; i <= frame_req->num_hw_update_entries; i++) {
+ cmd = (frame_req->hw_update_entries + i);
+ cdm_cmd->cmd[i].bl_addr.mem_handle = cmd->handle;
+ cdm_cmd->cmd[i].offset = cmd->offset;
+ cdm_cmd->cmd[i].len = cmd->len;
+ }
+
+ rc = cam_cdm_submit_bls(hw_cdm_info->cdm_handle, cdm_cmd);
+ if (rc) {
+ CAM_ERR(CAM_LRME, "Failed to submit cdm commands");
+ return -EINVAL;
+ }
+ } else {
+ CAM_ERR(CAM_LRME, "No hw update entry");
+ rc = -EINVAL;
+ }
+
+ return rc;
+}
+
+static int cam_lrme_hw_util_process_err(struct cam_hw_info *lrme_hw)
+{
+ struct cam_lrme_core *lrme_core = lrme_hw->core_info;
+ struct cam_lrme_frame_request *req_proc, *req_submit;
+ struct cam_lrme_hw_cb_args cb_args;
+ int rc;
+
+ req_proc = lrme_core->req_proc;
+ req_submit = lrme_core->req_submit;
+ cb_args.cb_type = CAM_LRME_CB_ERROR;
+
+ if ((lrme_core->state != CAM_LRME_CORE_STATE_PROCESSING) &&
+ (lrme_core->state != CAM_LRME_CORE_STATE_REQ_PENDING) &&
+ (lrme_core->state != CAM_LRME_CORE_STATE_REQ_PROC_PEND)) {
+ CAM_ERR(CAM_LRME, "Get error irq in wrong state %d",
+ lrme_core->state);
+ }
+
+ CAM_ERR_RATE_LIMIT(CAM_LRME, "Start recovery");
+ lrme_core->state = CAM_LRME_CORE_STATE_RECOVERY;
+ rc = cam_lrme_hw_util_reset(lrme_hw, CAM_LRME_HW_RESET_TYPE_HW_RESET);
+ if (rc)
+ CAM_ERR(CAM_LRME, "Failed to reset");
+
+ lrme_core->req_proc = NULL;
+ lrme_core->req_submit = NULL;
+ if (!rc)
+ lrme_core->state = CAM_LRME_CORE_STATE_IDLE;
+
+ cb_args.frame_req = req_proc;
+ lrme_core->hw_mgr_cb.cam_lrme_hw_mgr_cb(lrme_core->hw_mgr_cb.data,
+ &cb_args);
+
+ cb_args.frame_req = req_submit;
+ lrme_core->hw_mgr_cb.cam_lrme_hw_mgr_cb(lrme_core->hw_mgr_cb.data,
+ &cb_args);
+
+ return rc;
+}
+
+static int cam_lrme_hw_util_process_reg_update(
+ struct cam_hw_info *lrme_hw, struct cam_lrme_hw_cb_args *cb_args)
+{
+ struct cam_lrme_core *lrme_core = lrme_hw->core_info;
+ int rc = 0;
+
+ cb_args->cb_type |= CAM_LRME_CB_COMP_REG_UPDATE;
+ if (lrme_core->state == CAM_LRME_CORE_STATE_REQ_PENDING) {
+ lrme_core->state = CAM_LRME_CORE_STATE_PROCESSING;
+ } else {
+ CAM_ERR(CAM_LRME, "Reg update in wrong state %d",
+ lrme_core->state);
+ rc = cam_lrme_hw_util_process_err(lrme_hw);
+ if (rc)
+ CAM_ERR(CAM_LRME, "Failed to reset");
+ return -EINVAL;
+ }
+
+ lrme_core->req_proc = lrme_core->req_submit;
+ lrme_core->req_submit = NULL;
+
+ return 0;
+}
+
+static int cam_lrme_hw_util_process_idle(
+ struct cam_hw_info *lrme_hw, struct cam_lrme_hw_cb_args *cb_args)
+{
+ struct cam_lrme_core *lrme_core = lrme_hw->core_info;
+ int rc = 0;
+
+ cb_args->cb_type |= CAM_LRME_CB_BUF_DONE;
+ switch (lrme_core->state) {
+ case CAM_LRME_CORE_STATE_REQ_PROC_PEND:
+ cam_lrme_hw_util_submit_go(lrme_hw);
+ lrme_core->state = CAM_LRME_CORE_STATE_REQ_PENDING;
+ break;
+
+ case CAM_LRME_CORE_STATE_PROCESSING:
+ lrme_core->state = CAM_LRME_CORE_STATE_IDLE;
+ break;
+
+ default:
+ CAM_ERR(CAM_LRME, "Idle in wrong state %d",
+ lrme_core->state);
+ rc = cam_lrme_hw_util_process_err(lrme_hw);
+ return rc;
+ }
+ cb_args->frame_req = lrme_core->req_proc;
+ lrme_core->req_proc = NULL;
+
+ return 0;
+}
+
+void cam_lrme_set_irq(struct cam_hw_info *lrme_hw,
+ enum cam_lrme_irq_set set)
+{
+ struct cam_hw_soc_info *soc_info = &lrme_hw->soc_info;
+ struct cam_lrme_core *lrme_core = lrme_hw->core_info;
+ struct cam_lrme_hw_info *hw_info = lrme_core->hw_info;
+
+ switch (set) {
+ case CAM_LRME_IRQ_ENABLE:
+ cam_io_w_mb(0xFFFF,
+ soc_info->reg_map[0].mem_base +
+ hw_info->titan_reg.top_irq_mask);
+ cam_io_w_mb(0xFFFF,
+ soc_info->reg_map[0].mem_base +
+ hw_info->bus_wr_reg.common_reg.irq_mask_0);
+ cam_io_w_mb(0xFFFF,
+ soc_info->reg_map[0].mem_base +
+ hw_info->bus_wr_reg.common_reg.irq_mask_1);
+ cam_io_w_mb(0xFFFF,
+ soc_info->reg_map[0].mem_base +
+ hw_info->bus_rd_reg.common_reg.irq_mask);
+ break;
+
+ case CAM_LRME_IRQ_DISABLE:
+ cam_io_w_mb(0x0,
+ soc_info->reg_map[0].mem_base +
+ hw_info->titan_reg.top_irq_mask);
+ cam_io_w_mb(0x0,
+ soc_info->reg_map[0].mem_base +
+ hw_info->bus_wr_reg.common_reg.irq_mask_0);
+ cam_io_w_mb(0x0,
+ soc_info->reg_map[0].mem_base +
+ hw_info->bus_wr_reg.common_reg.irq_mask_1);
+ cam_io_w_mb(0x0,
+ soc_info->reg_map[0].mem_base +
+ hw_info->bus_rd_reg.common_reg.irq_mask);
+ break;
+ }
+}
+
+
+int cam_lrme_hw_process_irq(void *priv, void *data)
+{
+ struct cam_lrme_hw_work_data *work_data;
+ struct cam_hw_info *lrme_hw;
+ struct cam_lrme_core *lrme_core;
+ int rc = 0;
+ uint32_t top_irq_status, fe_irq_status;
+ uint32_t *we_irq_status;
+ struct cam_lrme_hw_cb_args cb_args;
+
+ if (!data || !priv) {
+ CAM_ERR(CAM_LRME, "Invalid data %pK %pK", data, priv);
+ return -EINVAL;
+ }
+
+ memset(&cb_args, 0, sizeof(struct cam_lrme_hw_cb_args));
+ lrme_hw = (struct cam_hw_info *)priv;
+ work_data = (struct cam_lrme_hw_work_data *)data;
+ lrme_core = (struct cam_lrme_core *)lrme_hw->core_info;
+ top_irq_status = work_data->top_irq_status;
+ fe_irq_status = work_data->fe_irq_status;
+ we_irq_status = work_data->we_irq_status;
+
+ CAM_DBG(CAM_LRME,
+ "top status %x, fe status %x, we status0 %x, we status1 %x",
+ top_irq_status, fe_irq_status, we_irq_status[0],
+ we_irq_status[1]);
+ CAM_DBG(CAM_LRME, "Current state %d", lrme_core->state);
+
+ mutex_lock(&lrme_hw->hw_mutex);
+
+ if (top_irq_status & (1 << 3)) {
+ CAM_DBG(CAM_LRME, "Error");
+ rc = cam_lrme_hw_util_process_err(lrme_hw);
+ if (rc)
+ CAM_ERR(CAM_LRME, "Process error failed");
+ goto end;
+ }
+
+ if (we_irq_status[0] & (1 << 1)) {
+ CAM_DBG(CAM_LRME, "reg update");
+ rc = cam_lrme_hw_util_process_reg_update(lrme_hw, &cb_args);
+ if (rc) {
+ CAM_ERR(CAM_LRME, "Process reg_update failed");
+ goto end;
+ }
+ }
+
+ if (top_irq_status & (1 << 4)) {
+ CAM_DBG(CAM_LRME, "IDLE");
+
+ rc = cam_lrme_hw_util_process_idle(lrme_hw, &cb_args);
+ if (rc) {
+ CAM_ERR(CAM_LRME, "Process idle failed");
+ goto end;
+ }
+ }
+
+ if (lrme_core->hw_mgr_cb.cam_lrme_hw_mgr_cb) {
+ lrme_core->hw_mgr_cb.cam_lrme_hw_mgr_cb(lrme_core->
+ hw_mgr_cb.data, &cb_args);
+ } else {
+ CAM_ERR(CAM_LRME, "No hw mgr cb");
+ rc = -EINVAL;
+ }
+
+end:
+ mutex_unlock(&lrme_hw->hw_mutex);
+ return rc;
+}
+
+int cam_lrme_hw_start(void *hw_priv, void *hw_start_args, uint32_t arg_size)
+{
+ struct cam_hw_info *lrme_hw = (struct cam_hw_info *)hw_priv;
+ int rc = 0;
+ struct cam_lrme_core *lrme_core;
+
+ if (!lrme_hw) {
+ CAM_ERR(CAM_LRME,
+ "Invalid input params, lrme_hw %pK",
+ lrme_hw);
+ return -EINVAL;
+ }
+
+ lrme_core = (struct cam_lrme_core *)lrme_hw->core_info;
+
+ mutex_lock(&lrme_hw->hw_mutex);
+
+ if (lrme_hw->open_count > 0) {
+ CAM_DBG(CAM_LRME, "This device is activated before");
+ goto unlock;
+ }
+
+ rc = cam_lrme_soc_enable_resources(lrme_hw);
+ if (rc) {
+ CAM_ERR(CAM_LRME, "Failed to enable soc resources");
+ goto unlock;
+ }
+
+ rc = cam_lrme_hw_util_reset(lrme_hw, CAM_LRME_HW_RESET_TYPE_HW_RESET);
+ if (rc) {
+ CAM_ERR(CAM_LRME, "Failed to reset hw");
+ goto disable_soc;
+ }
+
+ if (lrme_core->hw_cdm_info) {
+ struct cam_lrme_cdm_info *hw_cdm_info =
+ lrme_core->hw_cdm_info;
+
+ rc = cam_cdm_stream_on(hw_cdm_info->cdm_handle);
+ if (rc) {
+ CAM_ERR(CAM_LRME, "Failed to stream on cdm");
+ goto disable_soc;
+ }
+ }
+
+ lrme_hw->hw_state = CAM_HW_STATE_POWER_UP;
+ lrme_hw->open_count++;
+ lrme_core->state = CAM_LRME_CORE_STATE_IDLE;
+
+ mutex_unlock(&lrme_hw->hw_mutex);
+ return rc;
+
+disable_soc:
+ if (cam_lrme_soc_disable_resources(lrme_hw))
+ CAM_ERR(CAM_LRME, "Error in disable soc resources");
+unlock:
+ mutex_unlock(&lrme_hw->hw_mutex);
+ return rc;
+}
+
+int cam_lrme_hw_stop(void *hw_priv, void *hw_stop_args, uint32_t arg_size)
+{
+ struct cam_hw_info *lrme_hw = (struct cam_hw_info *)hw_priv;
+ int rc = 0;
+ struct cam_lrme_core *lrme_core;
+
+ if (!lrme_hw) {
+ CAM_ERR(CAM_LRME, "Invalid argument");
+ return -EINVAL;
+ }
+
+ lrme_core = (struct cam_lrme_core *)lrme_hw->core_info;
+
+ mutex_lock(&lrme_hw->hw_mutex);
+
+ if (lrme_hw->open_count == 0) {
+ mutex_unlock(&lrme_hw->hw_mutex);
+ CAM_ERR(CAM_LRME, "Error Unbalanced stop");
+ return -EINVAL;
+ }
+ lrme_hw->open_count--;
+
+ if (lrme_hw->open_count)
+ goto unlock;
+
+ lrme_core->req_proc = NULL;
+ lrme_core->req_submit = NULL;
+
+ if (lrme_core->hw_cdm_info) {
+ struct cam_lrme_cdm_info *hw_cdm_info =
+ lrme_core->hw_cdm_info;
+
+ rc = cam_cdm_stream_off(hw_cdm_info->cdm_handle);
+ if (rc) {
+ CAM_ERR(CAM_LRME,
+ "Failed in CDM StreamOff, handle=0x%x, rc=%d",
+ hw_cdm_info->cdm_handle, rc);
+ goto unlock;
+ }
+ }
+
+ rc = cam_lrme_soc_disable_resources(lrme_hw);
+ if (rc) {
+ CAM_ERR(CAM_LRME, "Failed in Disable SOC, rc=%d", rc);
+ goto unlock;
+ }
+
+ lrme_hw->hw_state = CAM_HW_STATE_POWER_DOWN;
+ if (lrme_core->state == CAM_LRME_CORE_STATE_IDLE) {
+ lrme_core->state = CAM_LRME_CORE_STATE_INIT;
+ } else {
+ CAM_ERR(CAM_LRME, "HW in wrong state %d", lrme_core->state);
+ return -EINVAL;
+ }
+
+unlock:
+ mutex_unlock(&lrme_hw->hw_mutex);
+ return rc;
+}
+
+int cam_lrme_hw_submit_req(void *hw_priv, void *hw_submit_args,
+ uint32_t arg_size)
+{
+ struct cam_hw_info *lrme_hw = (struct cam_hw_info *)hw_priv;
+ struct cam_lrme_core *lrme_core;
+ struct cam_lrme_hw_submit_args *args =
+ (struct cam_lrme_hw_submit_args *)hw_submit_args;
+ int rc = 0;
+ struct cam_lrme_frame_request *frame_req;
+
+
+ if (!hw_priv || !hw_submit_args) {
+ CAM_ERR(CAM_LRME, "Invalid input");
+ return -EINVAL;
+ }
+
+ if (sizeof(struct cam_lrme_hw_submit_args) != arg_size) {
+ CAM_ERR(CAM_LRME,
+ "size of args %lu, arg_size %d",
+ sizeof(struct cam_lrme_hw_submit_args), arg_size);
+ return -EINVAL;
+ }
+
+ frame_req = args->frame_req;
+
+ mutex_lock(&lrme_hw->hw_mutex);
+
+ if (lrme_hw->open_count == 0) {
+ CAM_ERR(CAM_LRME, "HW is not open");
+ mutex_unlock(&lrme_hw->hw_mutex);
+ return -EINVAL;
+ }
+
+ lrme_core = (struct cam_lrme_core *)lrme_hw->core_info;
+ if (lrme_core->state != CAM_LRME_CORE_STATE_IDLE &&
+ lrme_core->state != CAM_LRME_CORE_STATE_PROCESSING) {
+ mutex_unlock(&lrme_hw->hw_mutex);
+ CAM_DBG(CAM_LRME, "device busy, can not submit, state %d",
+ lrme_core->state);
+ return -EBUSY;
+ }
+
+ if (lrme_core->req_submit != NULL) {
+ CAM_ERR(CAM_LRME, "req_submit is not NULL");
+ return -EBUSY;
+ }
+
+ rc = cam_lrme_hw_util_submit_req(lrme_core, frame_req);
+ if (rc) {
+ CAM_ERR(CAM_LRME, "Submit req failed");
+ goto error;
+ }
+
+ switch (lrme_core->state) {
+ case CAM_LRME_CORE_STATE_PROCESSING:
+ lrme_core->state = CAM_LRME_CORE_STATE_REQ_PROC_PEND;
+ break;
+
+ case CAM_LRME_CORE_STATE_IDLE:
+ cam_lrme_hw_util_submit_go(lrme_hw);
+ lrme_core->state = CAM_LRME_CORE_STATE_REQ_PENDING;
+ break;
+
+ default:
+ CAM_ERR(CAM_LRME, "Wrong hw state");
+ rc = -EINVAL;
+ goto error;
+ }
+
+ lrme_core->req_submit = frame_req;
+ mutex_unlock(&lrme_hw->hw_mutex);
+ CAM_DBG(CAM_LRME, "Release lock, submit done for req %llu",
+ frame_req->req_id);
+
+ return 0;
+
+error:
+ mutex_unlock(&lrme_hw->hw_mutex);
+
+ return rc;
+
+}
+
+int cam_lrme_hw_reset(void *hw_priv, void *reset_core_args, uint32_t arg_size)
+{
+ struct cam_hw_info *lrme_hw = hw_priv;
+ struct cam_lrme_core *lrme_core;
+ struct cam_lrme_hw_reset_args *lrme_reset_args = reset_core_args;
+ int rc;
+
+ if (!hw_priv) {
+ CAM_ERR(CAM_LRME, "Invalid input args");
+ return -EINVAL;
+ }
+
+ if (!reset_core_args ||
+ sizeof(struct cam_lrme_hw_reset_args) != arg_size) {
+ CAM_ERR(CAM_LRME, "Invalid reset args");
+ return -EINVAL;
+ }
+
+ lrme_core = lrme_hw->core_info;
+
+ mutex_lock(&lrme_hw->hw_mutex);
+ if (lrme_core->state == CAM_LRME_CORE_STATE_RECOVERY) {
+ mutex_unlock(&lrme_hw->hw_mutex);
+ CAM_ERR(CAM_LRME, "Reset not allowed in %d state",
+ lrme_core->state);
+ return -EINVAL;
+ }
+
+ lrme_core->state = CAM_LRME_CORE_STATE_RECOVERY;
+
+ rc = cam_lrme_hw_util_reset(lrme_hw, lrme_reset_args->reset_type);
+ if (rc) {
+ mutex_unlock(&lrme_hw->hw_mutex);
+ CAM_ERR(CAM_FD, "Failed to reset");
+ return rc;
+ }
+
+ lrme_core->state = CAM_LRME_CORE_STATE_IDLE;
+
+ mutex_unlock(&lrme_hw->hw_mutex);
+
+ return 0;
+}
+
+int cam_lrme_hw_get_caps(void *hw_priv, void *get_hw_cap_args,
+ uint32_t arg_size)
+{
+ struct cam_hw_info *lrme_hw;
+ struct cam_lrme_core *lrme_core;
+ struct cam_lrme_dev_cap *lrme_hw_caps =
+ (struct cam_lrme_dev_cap *)get_hw_cap_args;
+
+ if (!hw_priv || !get_hw_cap_args) {
+ CAM_ERR(CAM_LRME, "Invalid input pointers %pK %pK",
+ hw_priv, get_hw_cap_args);
+ return -EINVAL;
+ }
+
+ lrme_hw = (struct cam_hw_info *)hw_priv;
+ lrme_core = (struct cam_lrme_core *)lrme_hw->core_info;
+ *lrme_hw_caps = lrme_core->hw_caps;
+
+ return 0;
+}
+
+irqreturn_t cam_lrme_hw_irq(int irq_num, void *data)
+{
+ struct cam_hw_info *lrme_hw;
+ struct cam_lrme_core *lrme_core;
+ struct cam_hw_soc_info *soc_info;
+ struct cam_lrme_hw_info *hw_info;
+ struct crm_workq_task *task;
+ struct cam_lrme_hw_work_data *work_data;
+ uint32_t top_irq_status, fe_irq_status, we_irq_status0, we_irq_status1;
+ int rc;
+
+ if (!data) {
+ CAM_ERR(CAM_LRME, "Invalid data in IRQ callback");
+ return -EINVAL;
+ }
+
+ lrme_hw = (struct cam_hw_info *)data;
+ lrme_core = (struct cam_lrme_core *)lrme_hw->core_info;
+ soc_info = &lrme_hw->soc_info;
+ hw_info = lrme_core->hw_info;
+
+ top_irq_status = cam_io_r_mb(
+ soc_info->reg_map[0].mem_base +
+ hw_info->titan_reg.top_irq_status);
+ CAM_DBG(CAM_LRME, "top_irq_status %x", top_irq_status);
+ cam_io_w_mb(top_irq_status,
+ soc_info->reg_map[0].mem_base +
+ hw_info->titan_reg.top_irq_clear);
+ top_irq_status &= CAM_LRME_TOP_IRQ_MASK;
+
+ fe_irq_status = cam_io_r_mb(
+ soc_info->reg_map[0].mem_base +
+ hw_info->bus_rd_reg.common_reg.irq_status);
+ CAM_DBG(CAM_LRME, "fe_irq_status %x", fe_irq_status);
+ cam_io_w_mb(fe_irq_status,
+ soc_info->reg_map[0].mem_base +
+ hw_info->bus_rd_reg.common_reg.irq_clear);
+ fe_irq_status &= CAM_LRME_FE_IRQ_MASK;
+
+ we_irq_status0 = cam_io_r_mb(
+ soc_info->reg_map[0].mem_base +
+ hw_info->bus_wr_reg.common_reg.irq_status_0);
+ CAM_DBG(CAM_LRME, "we_irq_status[0] %x", we_irq_status0);
+ cam_io_w_mb(we_irq_status0,
+ soc_info->reg_map[0].mem_base +
+ hw_info->bus_wr_reg.common_reg.irq_clear_0);
+ we_irq_status0 &= CAM_LRME_WE_IRQ_MASK_0;
+
+ we_irq_status1 = cam_io_r_mb(
+ soc_info->reg_map[0].mem_base +
+ hw_info->bus_wr_reg.common_reg.irq_status_1);
+ CAM_DBG(CAM_LRME, "we_irq_status[1] %x", we_irq_status1);
+ cam_io_w_mb(we_irq_status1,
+ soc_info->reg_map[0].mem_base +
+ hw_info->bus_wr_reg.common_reg.irq_clear_1);
+ we_irq_status1 &= CAM_LRME_WE_IRQ_MASK_1;
+
+ cam_io_w_mb(0x1, soc_info->reg_map[0].mem_base +
+ hw_info->titan_reg.top_irq_cmd);
+ cam_io_w_mb(0x1, soc_info->reg_map[0].mem_base +
+ hw_info->bus_wr_reg.common_reg.irq_cmd);
+ cam_io_w_mb(0x1, soc_info->reg_map[0].mem_base +
+ hw_info->bus_rd_reg.common_reg.irq_cmd);
+
+ if (top_irq_status & 0x1) {
+ complete(&lrme_core->reset_complete);
+ top_irq_status &= (~0x1);
+ }
+
+ if (top_irq_status || fe_irq_status ||
+ we_irq_status0 || we_irq_status1) {
+ task = cam_req_mgr_workq_get_task(lrme_core->work);
+ if (!task) {
+ CAM_ERR(CAM_LRME, "no empty task available");
+ return -ENOMEM;
+ }
+ work_data = (struct cam_lrme_hw_work_data *)task->payload;
+ work_data->top_irq_status = top_irq_status;
+ work_data->fe_irq_status = fe_irq_status;
+ work_data->we_irq_status[0] = we_irq_status0;
+ work_data->we_irq_status[1] = we_irq_status1;
+ task->process_cb = cam_lrme_hw_process_irq;
+ rc = cam_req_mgr_workq_enqueue_task(task, data,
+ CRM_TASK_PRIORITY_0);
+ if (rc)
+ CAM_ERR(CAM_LRME,
+ "Failed in enqueue work task, rc=%d", rc);
+ }
+
+ return IRQ_HANDLED;
+}
+
+int cam_lrme_hw_process_cmd(void *hw_priv, uint32_t cmd_type,
+ void *cmd_args, uint32_t arg_size)
+{
+ struct cam_hw_info *lrme_hw = (struct cam_hw_info *)hw_priv;
+ int rc = 0;
+
+ switch (cmd_type) {
+ case CAM_LRME_HW_CMD_PREPARE_HW_UPDATE: {
+ struct cam_lrme_hw_cmd_config_args *config_args;
+
+ config_args = (struct cam_lrme_hw_cmd_config_args *)cmd_args;
+ rc = cam_lrme_hw_util_process_config_hw(lrme_hw, config_args);
+ break;
+ }
+
+ case CAM_LRME_HW_CMD_REGISTER_CB: {
+ struct cam_lrme_hw_cmd_set_cb *cb_args;
+ struct cam_lrme_device *hw_device;
+ struct cam_lrme_core *lrme_core =
+ (struct cam_lrme_core *)lrme_hw->core_info;
+ cb_args = (struct cam_lrme_hw_cmd_set_cb *)cmd_args;
+ lrme_core->hw_mgr_cb.cam_lrme_hw_mgr_cb =
+ cb_args->cam_lrme_hw_mgr_cb;
+ lrme_core->hw_mgr_cb.data = cb_args->data;
+ hw_device = cb_args->data;
+ rc = 0;
+ break;
+ }
+
+ case CAM_LRME_HW_CMD_SUBMIT: {
+ struct cam_lrme_hw_submit_args *submit_args;
+
+ submit_args = (struct cam_lrme_hw_submit_args *)cmd_args;
+ rc = cam_lrme_hw_submit_req(hw_priv,
+ submit_args, arg_size);
+ break;
+ }
+
+ default:
+ break;
+ }
+
+ return rc;
+}
diff --git a/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_core.h b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_core.h
new file mode 100644
index 0000000..bf2f370
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_core.h
@@ -0,0 +1,457 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_LRME_HW_CORE_H_
+#define _CAM_LRME_HW_CORE_H_
+
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <media/cam_defs.h>
+#include <media/cam_lrme.h>
+
+#include "cam_common_util.h"
+#include "cam_debug_util.h"
+#include "cam_io_util.h"
+#include "cam_cpas_api.h"
+#include "cam_cdm_intf_api.h"
+#include "cam_lrme_hw_intf.h"
+#include "cam_lrme_hw_soc.h"
+#include "cam_req_mgr_workq.h"
+
+#define CAM_LRME_HW_RESET_TIMEOUT 3000
+
+#define CAM_LRME_BUS_RD_MAX_CLIENTS 2
+#define CAM_LRME_BUS_WR_MAX_CLIENTS 2
+
+#define CAM_LRME_HW_WORKQ_NUM_TASK 30
+
+#define CAM_LRME_TOP_IRQ_MASK 0x19
+#define CAM_LRME_WE_IRQ_MASK_0 0x2
+#define CAM_LRME_WE_IRQ_MASK_1 0x0
+#define CAM_LRME_FE_IRQ_MASK 0x0
+
+#define CAM_LRME_MAX_REG_PAIR_NUM 60
+
+/**
+ * enum cam_lrme_irq_set
+ *
+ * @CAM_LRME_IRQ_ENABLE : Enable irqs
+ * @CAM_LRME_IRQ_DISABLE : Disable irqs
+ */
+enum cam_lrme_irq_set {
+ CAM_LRME_IRQ_ENABLE,
+ CAM_LRME_IRQ_DISABLE,
+};
+
+/**
+ * struct cam_lrme_cdm_info : information used to submit cdm command
+ *
+ * @cdm_handle : CDM handle for this device
+ * @cdm_ops : CDM ops
+ * @cdm_cmd : CDM command pointer
+ */
+struct cam_lrme_cdm_info {
+ uint32_t cdm_handle;
+ struct cam_cdm_utils_ops *cdm_ops;
+ struct cam_cdm_bl_request *cdm_cmd;
+};
+
+/**
+ * struct cam_lrme_hw_work_data : Work data for HW work queue
+ *
+ * @top_irq_status : Top registers irq status
+ * @fe_irq_status : FE engine irq status
+ * @we_irq_status : WE engine irq status
+ */
+struct cam_lrme_hw_work_data {
+ uint32_t top_irq_status;
+ uint32_t fe_irq_status;
+ uint32_t we_irq_status[2];
+};
+
+/**
+ * enum cam_lrme_core_state : LRME core states
+ *
+ * @CAM_LRME_CORE_STATE_UNINIT : LRME is in uninit state
+ * @CAM_LRME_CORE_STATE_INIT : LRME is in init state after probe
+ * @ CAM_LRME_CORE_STATE_IDLE : LRME is in idle state. Hardware is in
+ * this state when no frame is processing
+ * or waiting for this core.
+ * @CAM_LRME_CORE_STATE_REQ_PENDING : LRME is in pending state. One frame is
+ * waiting for processing
+ * @CAM_LRME_CORE_STATE_PROCESSING : LRME is in processing state. HW manager
+ * can submit one more frame to HW
+ * @CAM_LRME_CORE_STATE_REQ_PROC_PEND : Indicate two frames are inside HW.
+ * @CAM_LRME_CORE_STATE_RECOVERY : Indicate core is in the process of reset
+ * @CAM_LRME_CORE_STATE_MAX : upper limit of states
+ */
+enum cam_lrme_core_state {
+ CAM_LRME_CORE_STATE_UNINIT,
+ CAM_LRME_CORE_STATE_INIT,
+ CAM_LRME_CORE_STATE_IDLE,
+ CAM_LRME_CORE_STATE_REQ_PENDING,
+ CAM_LRME_CORE_STATE_PROCESSING,
+ CAM_LRME_CORE_STATE_REQ_PROC_PEND,
+ CAM_LRME_CORE_STATE_RECOVERY,
+ CAM_LRME_CORE_STATE_MAX,
+};
+
+/**
+ * struct cam_lrme_core : LRME HW core information
+ *
+ * @hw_info : Pointer to base HW information structure
+ * @device_iommu : Device iommu handle
+ * @cdm_iommu : CDM iommu handle
+ * @hw_caps : Hardware capabilities
+ * @state : Hardware state
+ * @reset_complete : Reset completion
+ * @work : Hardware workqueue to handle irq events
+ * @work_data : Work data used by hardware workqueue
+ * @hw_mgr_cb : Hw manager callback
+ * @req_proc : Pointer to the processing frame request
+ * @req_submit : Pointer to the frame request waiting for processing
+ * @hw_cdm_info : CDM information used by this device
+ * @hw_idx : Hardware index
+ */
+struct cam_lrme_core {
+ struct cam_lrme_hw_info *hw_info;
+ struct cam_iommu_handle device_iommu;
+ struct cam_iommu_handle cdm_iommu;
+ struct cam_lrme_dev_cap hw_caps;
+ enum cam_lrme_core_state state;
+ struct completion reset_complete;
+ struct cam_req_mgr_core_workq *work;
+ struct cam_lrme_hw_work_data work_data[CAM_LRME_HW_WORKQ_NUM_TASK];
+ struct cam_lrme_hw_cmd_set_cb hw_mgr_cb;
+ struct cam_lrme_frame_request *req_proc;
+ struct cam_lrme_frame_request *req_submit;
+ struct cam_lrme_cdm_info *hw_cdm_info;
+ uint32_t hw_idx;
+};
+
+/**
+ * struct cam_lrme_bus_rd_reg_common : Offsets of FE common registers
+ *
+ * @hw_version : Offset of hw_version register
+ * @hw_capability : Offset of hw_capability register
+ * @sw_reset : Offset of sw_reset register
+ * @cgc_override : Offset of cgc_override register
+ * @irq_mask : Offset of irq_mask register
+ * @irq_clear : Offset of irq_clear register
+ * @irq_cmd : Offset of irq_cmd register
+ * @irq_status : Offset of irq_status register
+ * @cmd : Offset of cmd register
+ * @irq_set : Offset of irq_set register
+ * @misr_reset : Offset of misr_reset register
+ * @security_cfg : Offset of security_cfg register
+ * @pwr_iso_cfg : Offset of pwr_iso_cfg register
+ * @pwr_iso_seed : Offset of pwr_iso_seed register
+ * @test_bus_ctrl : Offset of test_bus_ctrl register
+ * @spare : Offset of spare register
+ */
+struct cam_lrme_bus_rd_reg_common {
+ uint32_t hw_version;
+ uint32_t hw_capability;
+ uint32_t sw_reset;
+ uint32_t cgc_override;
+ uint32_t irq_mask;
+ uint32_t irq_clear;
+ uint32_t irq_cmd;
+ uint32_t irq_status;
+ uint32_t cmd;
+ uint32_t irq_set;
+ uint32_t misr_reset;
+ uint32_t security_cfg;
+ uint32_t pwr_iso_cfg;
+ uint32_t pwr_iso_seed;
+ uint32_t test_bus_ctrl;
+ uint32_t spare;
+};
+
+/**
+ * struct cam_lrme_bus_wr_reg_common : Offset of WE common registers
+ * @hw_version : Offset of hw_version register
+ * @hw_capability : Offset of hw_capability register
+ * @sw_reset : Offset of sw_reset register
+ * @cgc_override : Offset of cgc_override register
+ * @misr_reset : Offset of misr_reset register
+ * @pwr_iso_cfg : Offset of pwr_iso_cfg register
+ * @test_bus_ctrl : Offset of test_bus_ctrl register
+ * @composite_mask_0 : Offset of composite_mask_0 register
+ * @irq_mask_0 : Offset of irq_mask_0 register
+ * @irq_mask_1 : Offset of irq_mask_1 register
+ * @irq_clear_0 : Offset of irq_clear_0 register
+ * @irq_clear_1 : Offset of irq_clear_1 register
+ * @irq_status_0 : Offset of irq_status_0 register
+ * @irq_status_1 : Offset of irq_status_1 register
+ * @irq_cmd : Offset of irq_cmd register
+ * @irq_set_0 : Offset of irq_set_0 register
+ * @irq_set_1 : Offset of irq_set_1 register
+ * @addr_fifo_status : Offset of addr_fifo_status register
+ * @frame_header_cfg0 : Offset of frame_header_cfg0 register
+ * @frame_header_cfg1 : Offset of frame_header_cfg1 register
+ * @spare : Offset of spare register
+ */
+struct cam_lrme_bus_wr_reg_common {
+ uint32_t hw_version;
+ uint32_t hw_capability;
+ uint32_t sw_reset;
+ uint32_t cgc_override;
+ uint32_t misr_reset;
+ uint32_t pwr_iso_cfg;
+ uint32_t test_bus_ctrl;
+ uint32_t composite_mask_0;
+ uint32_t irq_mask_0;
+ uint32_t irq_mask_1;
+ uint32_t irq_clear_0;
+ uint32_t irq_clear_1;
+ uint32_t irq_status_0;
+ uint32_t irq_status_1;
+ uint32_t irq_cmd;
+ uint32_t irq_set_0;
+ uint32_t irq_set_1;
+ uint32_t addr_fifo_status;
+ uint32_t frame_header_cfg0;
+ uint32_t frame_header_cfg1;
+ uint32_t spare;
+};
+
+/**
+ * struct cam_lrme_bus_rd_bus_client : Offset of FE registers
+ *
+ * @core_cfg : Offset of core_cfg register
+ * @ccif_meta_data : Offset of ccif_meta_data register
+ * @addr_image : Offset of addr_image register
+ * @rd_buffer_size : Offset of rd_buffer_size register
+ * @rd_stride : Offset of rd_stride register
+ * @unpack_cfg_0 : Offset of unpack_cfg_0 register
+ * @latency_buff_allocation : Offset of latency_buff_allocation register
+ * @burst_limit_cfg : Offset of burst_limit_cfg register
+ * @misr_cfg_0 : Offset of misr_cfg_0 register
+ * @misr_cfg_1 : Offset of misr_cfg_1 register
+ * @misr_rd_val : Offset of misr_rd_val register
+ * @debug_status_cfg : Offset of debug_status_cfg register
+ * @debug_status_0 : Offset of debug_status_0 register
+ * @debug_status_1 : Offset of debug_status_1 register
+ */
+struct cam_lrme_bus_rd_bus_client {
+ uint32_t core_cfg;
+ uint32_t ccif_meta_data;
+ uint32_t addr_image;
+ uint32_t rd_buffer_size;
+ uint32_t rd_stride;
+ uint32_t unpack_cfg_0;
+ uint32_t latency_buff_allocation;
+ uint32_t burst_limit_cfg;
+ uint32_t misr_cfg_0;
+ uint32_t misr_cfg_1;
+ uint32_t misr_rd_val;
+ uint32_t debug_status_cfg;
+ uint32_t debug_status_0;
+ uint32_t debug_status_1;
+};
+
+/**
+ * struct cam_lrme_bus_wr_bus_client : Offset of WE registers
+ *
+ * @status_0 : Offset of status_0 register
+ * @status_1 : Offset of status_1 register
+ * @cfg : Offset of cfg register
+ * @addr_frame_header : Offset of addr_frame_header register
+ * @frame_header_cfg : Offset of frame_header_cfg register
+ * @addr_image : Offset of addr_image register
+ * @addr_image_offset : Offset of addr_image_offset register
+ * @buffer_width_cfg : Offset of buffer_width_cfg register
+ * @buffer_height_cfg : Offset of buffer_height_cfg register
+ * @packer_cfg : Offset of packer_cfg register
+ * @wr_stride : Offset of wr_stride register
+ * @irq_subsample_cfg_period : Offset of irq_subsample_cfg_period register
+ * @irq_subsample_cfg_pattern : Offset of irq_subsample_cfg_pattern register
+ * @burst_limit_cfg : Offset of burst_limit_cfg register
+ * @misr_cfg : Offset of misr_cfg register
+ * @misr_rd_word_sel : Offset of misr_rd_word_sel register
+ * @misr_val : Offset of misr_val register
+ * @debug_status_cfg : Offset of debug_status_cfg register
+ * @debug_status_0 : Offset of debug_status_0 register
+ * @debug_status_1 : Offset of debug_status_1 register
+ */
+struct cam_lrme_bus_wr_bus_client {
+ uint32_t status_0;
+ uint32_t status_1;
+ uint32_t cfg;
+ uint32_t addr_frame_header;
+ uint32_t frame_header_cfg;
+ uint32_t addr_image;
+ uint32_t addr_image_offset;
+ uint32_t buffer_width_cfg;
+ uint32_t buffer_height_cfg;
+ uint32_t packer_cfg;
+ uint32_t wr_stride;
+ uint32_t irq_subsample_cfg_period;
+ uint32_t irq_subsample_cfg_pattern;
+ uint32_t burst_limit_cfg;
+ uint32_t misr_cfg;
+ uint32_t misr_rd_word_sel;
+ uint32_t misr_val;
+ uint32_t debug_status_cfg;
+ uint32_t debug_status_0;
+ uint32_t debug_status_1;
+};
+
+/**
+ * struct cam_lrme_bus_rd_hw_info : FE registers information
+ *
+ * @common_reg : FE common register
+ * @bus_client_reg : List of FE bus registers information
+ */
+struct cam_lrme_bus_rd_hw_info {
+ struct cam_lrme_bus_rd_reg_common common_reg;
+ struct cam_lrme_bus_rd_bus_client
+ bus_client_reg[CAM_LRME_BUS_RD_MAX_CLIENTS];
+};
+
+/**
+ * struct cam_lrme_bus_wr_hw_info : WE engine registers information
+ *
+ * @common_reg : WE common register
+ * @bus_client_reg : List of WE bus registers information
+ */
+struct cam_lrme_bus_wr_hw_info {
+ struct cam_lrme_bus_wr_reg_common common_reg;
+ struct cam_lrme_bus_wr_bus_client
+ bus_client_reg[CAM_LRME_BUS_WR_MAX_CLIENTS];
+};
+
+/**
+ * struct cam_lrme_clc_reg : Offset of clc registers
+ *
+ * @clc_hw_version : Offset of clc_hw_version register
+ * @clc_hw_status : Offset of clc_hw_status register
+ * @clc_hw_status_dbg : Offset of clc_hw_status_dbg register
+ * @clc_module_cfg : Offset of clc_module_cfg register
+ * @clc_moduleformat : Offset of clc_moduleformat register
+ * @clc_rangestep : Offset of clc_rangestep register
+ * @clc_offset : Offset of clc_offset register
+ * @clc_maxallowedsad : Offset of clc_maxallowedsad register
+ * @clc_minallowedtarmad : Offset of clc_minallowedtarmad register
+ * @clc_meaningfulsaddiff : Offset of clc_meaningfulsaddiff register
+ * @clc_minsaddiffdenom : Offset of clc_minsaddiffdenom register
+ * @clc_robustnessmeasuredistmap_0 : Offset of measuredistmap_0 register
+ * @clc_robustnessmeasuredistmap_1 : Offset of measuredistmap_1 register
+ * @clc_robustnessmeasuredistmap_2 : Offset of measuredistmap_2 register
+ * @clc_robustnessmeasuredistmap_3 : Offset of measuredistmap_3 register
+ * @clc_robustnessmeasuredistmap_4 : Offset of measuredistmap_4 register
+ * @clc_robustnessmeasuredistmap_5 : Offset of measuredistmap_5 register
+ * @clc_robustnessmeasuredistmap_6 : Offset of measuredistmap_6 register
+ * @clc_robustnessmeasuredistmap_7 : Offset of measuredistmap_7 register
+ * @clc_ds_crop_horizontal : Offset of clc_ds_crop_horizontal register
+ * @clc_ds_crop_vertical : Offset of clc_ds_crop_vertical register
+ * @clc_tar_pd_unpacker : Offset of clc_tar_pd_unpacker register
+ * @clc_ref_pd_unpacker : Offset of clc_ref_pd_unpacker register
+ * @clc_sw_override : Offset of clc_sw_override register
+ * @clc_tar_height : Offset of clc_tar_height register
+ * @clc_test_bus_ctrl : Offset of clc_test_bus_ctrl register
+ * @clc_spare : Offset of clc_spare register
+ */
+struct cam_lrme_clc_reg {
+ uint32_t clc_hw_version;
+ uint32_t clc_hw_status;
+ uint32_t clc_hw_status_dbg;
+ uint32_t clc_module_cfg;
+ uint32_t clc_moduleformat;
+ uint32_t clc_rangestep;
+ uint32_t clc_offset;
+ uint32_t clc_maxallowedsad;
+ uint32_t clc_minallowedtarmad;
+ uint32_t clc_meaningfulsaddiff;
+ uint32_t clc_minsaddiffdenom;
+ uint32_t clc_robustnessmeasuredistmap_0;
+ uint32_t clc_robustnessmeasuredistmap_1;
+ uint32_t clc_robustnessmeasuredistmap_2;
+ uint32_t clc_robustnessmeasuredistmap_3;
+ uint32_t clc_robustnessmeasuredistmap_4;
+ uint32_t clc_robustnessmeasuredistmap_5;
+ uint32_t clc_robustnessmeasuredistmap_6;
+ uint32_t clc_robustnessmeasuredistmap_7;
+ uint32_t clc_ds_crop_horizontal;
+ uint32_t clc_ds_crop_vertical;
+ uint32_t clc_tar_pd_unpacker;
+ uint32_t clc_ref_pd_unpacker;
+ uint32_t clc_sw_override;
+ uint32_t clc_tar_height;
+ uint32_t clc_ref_height;
+ uint32_t clc_test_bus_ctrl;
+ uint32_t clc_spare;
+};
+
+/**
+ * struct cam_lrme_titan_reg : Offset of LRME top registers
+ *
+ * @top_hw_version : Offset of top_hw_version register
+ * @top_titan_version : Offset of top_titan_version register
+ * @top_rst_cmd : Offset of top_rst_cmd register
+ * @top_core_clk_cfg : Offset of top_core_clk_cfg register
+ * @top_irq_status : Offset of top_irq_status register
+ * @top_irq_mask : Offset of top_irq_mask register
+ * @top_irq_clear : Offset of top_irq_clear register
+ * @top_irq_set : Offset of top_irq_set register
+ * @top_irq_cmd : Offset of top_irq_cmd register
+ * @top_violation_status : Offset of top_violation_status register
+ * @top_spare : Offset of top_spare register
+ */
+struct cam_lrme_titan_reg {
+ uint32_t top_hw_version;
+ uint32_t top_titan_version;
+ uint32_t top_rst_cmd;
+ uint32_t top_core_clk_cfg;
+ uint32_t top_irq_status;
+ uint32_t top_irq_mask;
+ uint32_t top_irq_clear;
+ uint32_t top_irq_set;
+ uint32_t top_irq_cmd;
+ uint32_t top_violation_status;
+ uint32_t top_spare;
+};
+
+/**
+ * struct cam_lrme_hw_info : LRME registers information
+ *
+ * @clc_reg : LRME CLC registers
+ * @bus_rd_reg : LRME FE registers
+ * @bus_wr_reg : LRME WE registers
+ * @titan_reg : LRME top reisters
+ */
+struct cam_lrme_hw_info {
+ struct cam_lrme_clc_reg clc_reg;
+ struct cam_lrme_bus_rd_hw_info bus_rd_reg;
+ struct cam_lrme_bus_wr_hw_info bus_wr_reg;
+ struct cam_lrme_titan_reg titan_reg;
+};
+
+int cam_lrme_hw_process_irq(void *priv, void *data);
+int cam_lrme_hw_submit_req(void *hw_priv, void *hw_submit_args,
+ uint32_t arg_size);
+int cam_lrme_hw_reset(void *hw_priv, void *reset_core_args, uint32_t arg_size);
+int cam_lrme_hw_stop(void *hw_priv, void *stop_args, uint32_t arg_size);
+int cam_lrme_hw_get_caps(void *hw_priv, void *get_hw_cap_args,
+ uint32_t arg_size);
+irqreturn_t cam_lrme_hw_irq(int irq_num, void *data);
+int cam_lrme_hw_process_cmd(void *hw_priv, uint32_t cmd_type,
+ void *cmd_args, uint32_t arg_size);
+int cam_lrme_hw_util_get_caps(struct cam_hw_info *lrme_hw,
+ struct cam_lrme_dev_cap *hw_caps);
+int cam_lrme_hw_start(void *hw_priv, void *hw_init_args, uint32_t arg_size);
+int cam_lrme_hw_flush(void *hw_priv, void *hw_flush_args, uint32_t arg_size);
+void cam_lrme_set_irq(struct cam_hw_info *lrme_hw, enum cam_lrme_irq_set set);
+
+#endif /* _CAM_LRME_HW_CORE_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_dev.c b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_dev.c
new file mode 100644
index 0000000..2e63752
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_dev.c
@@ -0,0 +1,320 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <media/cam_req_mgr.h>
+
+#include "cam_subdev.h"
+#include "cam_lrme_hw_intf.h"
+#include "cam_lrme_hw_core.h"
+#include "cam_lrme_hw_soc.h"
+#include "cam_lrme_hw_reg.h"
+#include "cam_req_mgr_workq.h"
+#include "cam_lrme_hw_mgr.h"
+#include "cam_mem_mgr_api.h"
+#include "cam_smmu_api.h"
+
+#define CAM_LRME_HW_WORKQ_NUM_TASK 30
+
+static int cam_lrme_hw_dev_util_cdm_acquire(struct cam_lrme_core *lrme_core,
+ struct cam_hw_info *lrme_hw)
+{
+ int rc, i;
+ struct cam_cdm_bl_request *cdm_cmd;
+ struct cam_cdm_acquire_data cdm_acquire;
+ struct cam_lrme_cdm_info *hw_cdm_info;
+
+ hw_cdm_info = kzalloc(sizeof(struct cam_lrme_cdm_info),
+ GFP_KERNEL);
+ if (!hw_cdm_info) {
+ CAM_ERR(CAM_LRME, "No memory for hw_cdm_info");
+ return -ENOMEM;
+ }
+
+ cdm_cmd = kzalloc((sizeof(struct cam_cdm_bl_request) +
+ ((CAM_LRME_MAX_HW_ENTRIES - 1) *
+ sizeof(struct cam_cdm_bl_cmd))), GFP_KERNEL);
+ if (!cdm_cmd) {
+ CAM_ERR(CAM_LRME, "No memory for cdm_cmd");
+ kfree(hw_cdm_info);
+ return -ENOMEM;
+ }
+
+ memset(&cdm_acquire, 0, sizeof(cdm_acquire));
+ strlcpy(cdm_acquire.identifier, "lrmecdm", sizeof("lrmecdm"));
+ cdm_acquire.cell_index = lrme_hw->soc_info.index;
+ cdm_acquire.handle = 0;
+ cdm_acquire.userdata = hw_cdm_info;
+ cdm_acquire.cam_cdm_callback = NULL;
+ cdm_acquire.id = CAM_CDM_VIRTUAL;
+ cdm_acquire.base_array_cnt = lrme_hw->soc_info.num_reg_map;
+ for (i = 0; i < lrme_hw->soc_info.num_reg_map; i++)
+ cdm_acquire.base_array[i] = &lrme_hw->soc_info.reg_map[i];
+
+ rc = cam_cdm_acquire(&cdm_acquire);
+ if (rc) {
+ CAM_ERR(CAM_LRME, "Can't acquire cdm");
+ goto error;
+ }
+
+ hw_cdm_info->cdm_cmd = cdm_cmd;
+ hw_cdm_info->cdm_ops = cdm_acquire.ops;
+ hw_cdm_info->cdm_handle = cdm_acquire.handle;
+
+ lrme_core->hw_cdm_info = hw_cdm_info;
+ CAM_DBG(CAM_LRME, "cdm acquire done");
+
+ return 0;
+error:
+ kfree(cdm_cmd);
+ kfree(hw_cdm_info);
+ return rc;
+}
+
+static int cam_lrme_hw_dev_probe(struct platform_device *pdev)
+{
+ struct cam_hw_info *lrme_hw;
+ struct cam_hw_intf lrme_hw_intf;
+ struct cam_lrme_core *lrme_core;
+ const struct of_device_id *match_dev = NULL;
+ struct cam_lrme_hw_info *hw_info;
+ int rc, i;
+
+ lrme_hw = kzalloc(sizeof(struct cam_hw_info), GFP_KERNEL);
+ if (!lrme_hw) {
+ CAM_ERR(CAM_LRME, "No memory to create lrme_hw");
+ return -ENOMEM;
+ }
+
+ lrme_core = kzalloc(sizeof(struct cam_lrme_core), GFP_KERNEL);
+ if (!lrme_core) {
+ CAM_ERR(CAM_LRME, "No memory to create lrme_core");
+ kfree(lrme_hw);
+ return -ENOMEM;
+ }
+
+ lrme_hw->core_info = lrme_core;
+ lrme_hw->hw_state = CAM_HW_STATE_POWER_DOWN;
+ lrme_hw->soc_info.pdev = pdev;
+ lrme_hw->soc_info.dev = &pdev->dev;
+ lrme_hw->soc_info.dev_name = pdev->name;
+ lrme_hw->open_count = 0;
+ lrme_core->state = CAM_LRME_CORE_STATE_INIT;
+
+ mutex_init(&lrme_hw->hw_mutex);
+ spin_lock_init(&lrme_hw->hw_lock);
+ init_completion(&lrme_hw->hw_complete);
+ init_completion(&lrme_core->reset_complete);
+
+ rc = cam_req_mgr_workq_create("cam_lrme_hw_worker",
+ CAM_LRME_HW_WORKQ_NUM_TASK,
+ &lrme_core->work, CRM_WORKQ_USAGE_IRQ);
+ if (rc) {
+ CAM_ERR(CAM_LRME, "Unable to create a workq, rc=%d", rc);
+ goto free_memory;
+ }
+
+ for (i = 0; i < CAM_LRME_HW_WORKQ_NUM_TASK; i++)
+ lrme_core->work->task.pool[i].payload =
+ &lrme_core->work_data[i];
+
+ match_dev = of_match_device(pdev->dev.driver->of_match_table,
+ &pdev->dev);
+ if (!match_dev || !match_dev->data) {
+ CAM_ERR(CAM_LRME, "No Of_match data, %pK", match_dev);
+ rc = -EINVAL;
+ goto destroy_workqueue;
+ }
+ hw_info = (struct cam_lrme_hw_info *)match_dev->data;
+ lrme_core->hw_info = hw_info;
+
+ rc = cam_lrme_soc_init_resources(&lrme_hw->soc_info,
+ cam_lrme_hw_irq, lrme_hw);
+ if (rc) {
+ CAM_ERR(CAM_LRME, "Failed to init soc, rc=%d", rc);
+ goto destroy_workqueue;
+ }
+
+ rc = cam_lrme_hw_dev_util_cdm_acquire(lrme_core, lrme_hw);
+ if (rc) {
+ CAM_ERR(CAM_LRME, "Failed to acquire cdm");
+ goto deinit_platform_res;
+ }
+
+ rc = cam_smmu_get_handle("lrme", &lrme_core->device_iommu.non_secure);
+ if (rc) {
+ CAM_ERR(CAM_LRME, "Get iommu handle failed");
+ goto release_cdm;
+ }
+
+ rc = cam_smmu_ops(lrme_core->device_iommu.non_secure, CAM_SMMU_ATTACH);
+ if (rc) {
+ CAM_ERR(CAM_LRME, "LRME attach iommu handle failed, rc=%d", rc);
+ goto destroy_smmu;
+ }
+
+ rc = cam_lrme_hw_start(lrme_hw, NULL, 0);
+ if (rc) {
+ CAM_ERR(CAM_LRME, "Failed to hw init, rc=%d", rc);
+ goto detach_smmu;
+ }
+
+ rc = cam_lrme_hw_util_get_caps(lrme_hw, &lrme_core->hw_caps);
+ if (rc) {
+ CAM_ERR(CAM_LRME, "Failed to get hw caps, rc=%d", rc);
+ if (cam_lrme_hw_stop(lrme_hw, NULL, 0))
+ CAM_ERR(CAM_LRME, "Failed in hw deinit");
+ goto detach_smmu;
+ }
+
+ rc = cam_lrme_hw_stop(lrme_hw, NULL, 0);
+ if (rc) {
+ CAM_ERR(CAM_LRME, "Failed to deinit hw, rc=%d", rc);
+ goto detach_smmu;
+ }
+
+ lrme_core->hw_idx = lrme_hw->soc_info.index;
+ lrme_hw_intf.hw_priv = lrme_hw;
+ lrme_hw_intf.hw_idx = lrme_hw->soc_info.index;
+ lrme_hw_intf.hw_ops.get_hw_caps = cam_lrme_hw_get_caps;
+ lrme_hw_intf.hw_ops.init = NULL;
+ lrme_hw_intf.hw_ops.deinit = NULL;
+ lrme_hw_intf.hw_ops.reset = cam_lrme_hw_reset;
+ lrme_hw_intf.hw_ops.reserve = NULL;
+ lrme_hw_intf.hw_ops.release = NULL;
+ lrme_hw_intf.hw_ops.start = cam_lrme_hw_start;
+ lrme_hw_intf.hw_ops.stop = cam_lrme_hw_stop;
+ lrme_hw_intf.hw_ops.read = NULL;
+ lrme_hw_intf.hw_ops.write = NULL;
+ lrme_hw_intf.hw_ops.process_cmd = cam_lrme_hw_process_cmd;
+ lrme_hw_intf.hw_type = CAM_HW_LRME;
+
+ rc = cam_cdm_get_iommu_handle("lrmecdm", &lrme_core->cdm_iommu);
+ if (rc) {
+ CAM_ERR(CAM_LRME, "Failed to acquire the CDM iommu handles");
+ goto detach_smmu;
+ }
+
+ rc = cam_lrme_mgr_register_device(&lrme_hw_intf,
+ &lrme_core->device_iommu,
+ &lrme_core->cdm_iommu);
+ if (rc) {
+ CAM_ERR(CAM_LRME, "Failed to register device");
+ goto detach_smmu;
+ }
+
+ platform_set_drvdata(pdev, lrme_hw);
+ CAM_DBG(CAM_LRME, "LRME-%d probe successful", lrme_hw_intf.hw_idx);
+
+ return rc;
+
+detach_smmu:
+ cam_smmu_ops(lrme_core->device_iommu.non_secure, CAM_SMMU_DETACH);
+destroy_smmu:
+ cam_smmu_destroy_handle(lrme_core->device_iommu.non_secure);
+release_cdm:
+ cam_cdm_release(lrme_core->hw_cdm_info->cdm_handle);
+ kfree(lrme_core->hw_cdm_info->cdm_cmd);
+ kfree(lrme_core->hw_cdm_info);
+deinit_platform_res:
+ if (cam_lrme_soc_deinit_resources(&lrme_hw->soc_info))
+ CAM_ERR(CAM_LRME, "Failed in soc deinit");
+ mutex_destroy(&lrme_hw->hw_mutex);
+destroy_workqueue:
+ cam_req_mgr_workq_destroy(&lrme_core->work);
+free_memory:
+ mutex_destroy(&lrme_hw->hw_mutex);
+ kfree(lrme_hw);
+ kfree(lrme_core);
+
+ return rc;
+}
+
+static int cam_lrme_hw_dev_remove(struct platform_device *pdev)
+{
+ int rc = 0;
+ struct cam_hw_info *lrme_hw;
+ struct cam_lrme_core *lrme_core;
+
+ lrme_hw = platform_get_drvdata(pdev);
+ if (!lrme_hw) {
+ CAM_ERR(CAM_LRME, "Invalid lrme_hw from fd_hw_intf");
+ rc = -ENODEV;
+ goto deinit_platform_res;
+ }
+
+ lrme_core = (struct cam_lrme_core *)lrme_hw->core_info;
+ if (!lrme_core) {
+ CAM_ERR(CAM_LRME, "Invalid lrme_core from fd_hw");
+ rc = -EINVAL;
+ goto deinit_platform_res;
+ }
+
+ cam_smmu_ops(lrme_core->device_iommu.non_secure, CAM_SMMU_DETACH);
+ cam_smmu_destroy_handle(lrme_core->device_iommu.non_secure);
+ cam_cdm_release(lrme_core->hw_cdm_info->cdm_handle);
+ cam_lrme_mgr_deregister_device(lrme_core->hw_idx);
+
+ kfree(lrme_core->hw_cdm_info->cdm_cmd);
+ kfree(lrme_core->hw_cdm_info);
+ kfree(lrme_core);
+
+deinit_platform_res:
+ rc = cam_lrme_soc_deinit_resources(&lrme_hw->soc_info);
+ if (rc)
+ CAM_ERR(CAM_LRME, "Error in LRME soc deinit, rc=%d", rc);
+
+ mutex_destroy(&lrme_hw->hw_mutex);
+ kfree(lrme_hw);
+
+ return rc;
+}
+
+static const struct of_device_id cam_lrme_hw_dt_match[] = {
+ {
+ .compatible = "qcom,lrme",
+ .data = &cam_lrme10_hw_info,
+ },
+ {}
+};
+
+MODULE_DEVICE_TABLE(of, cam_lrme_hw_dt_match);
+
+static struct platform_driver cam_lrme_hw_driver = {
+ .probe = cam_lrme_hw_dev_probe,
+ .remove = cam_lrme_hw_dev_remove,
+ .driver = {
+ .name = "cam_lrme_hw",
+ .owner = THIS_MODULE,
+ .of_match_table = cam_lrme_hw_dt_match,
+ },
+};
+
+static int __init cam_lrme_hw_init_module(void)
+{
+ return platform_driver_register(&cam_lrme_hw_driver);
+}
+
+static void __exit cam_lrme_hw_exit_module(void)
+{
+ platform_driver_unregister(&cam_lrme_hw_driver);
+}
+
+module_init(cam_lrme_hw_init_module);
+module_exit(cam_lrme_hw_exit_module);
+MODULE_DESCRIPTION("CAM LRME HW driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_intf.h b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_intf.h
new file mode 100644
index 0000000..d16b174
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_intf.h
@@ -0,0 +1,200 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_LRME_HW_INTF_H_
+#define _CAM_LRME_HW_INTF_H_
+
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <media/cam_cpas.h>
+#include <media/cam_req_mgr.h>
+#include <media/cam_lrme.h>
+
+#include "cam_io_util.h"
+#include "cam_soc_util.h"
+#include "cam_hw.h"
+#include "cam_hw_intf.h"
+#include "cam_subdev.h"
+#include "cam_cpas_api.h"
+#include "cam_hw_mgr_intf.h"
+#include "cam_debug_util.h"
+
+
+#define CAM_LRME_MAX_IO_BUFFER 2
+#define CAM_LRME_MAX_HW_ENTRIES 5
+
+#define CAM_LRME_BASE_IDX 0
+
+/**
+ * enum cam_lrme_hw_type : Enum for LRME HW type
+ *
+ * @CAM_HW_LRME : LRME HW type
+ */
+enum cam_lrme_hw_type {
+ CAM_HW_LRME,
+};
+
+/**
+ * enum cam_lrme_cb_type : HW manager call back type
+ *
+ * @CAM_LRME_CB_BUF_DONE : Indicate buf done has been generated
+ * @CAM_LRME_CB_COMP_REG_UPDATE : Indicate receiving WE comp reg update
+ * @CAM_LRME_CB_PUT_FRAME : Request HW manager to put back the frame
+ * @CAM_LRME_CB_ERROR : Indicate error irq has been generated
+ */
+enum cam_lrme_cb_type {
+ CAM_LRME_CB_BUF_DONE = 1,
+ CAM_LRME_CB_COMP_REG_UPDATE = 1 << 1,
+ CAM_LRME_CB_PUT_FRAME = 1 << 2,
+ CAM_LRME_CB_ERROR = 1 << 3,
+};
+
+/**
+ * enum cam_lrme_hw_cmd_type : HW CMD type
+ *
+ * @CAM_LRME_HW_CMD_prepare_hw_update : Prepare HW update
+ * @CAM_LRME_HW_CMD_REGISTER_CB : register HW manager callback
+ * @CAM_LRME_HW_CMD_SUBMIT : Submit frame to HW
+ */
+enum cam_lrme_hw_cmd_type {
+ CAM_LRME_HW_CMD_PREPARE_HW_UPDATE,
+ CAM_LRME_HW_CMD_REGISTER_CB,
+ CAM_LRME_HW_CMD_SUBMIT,
+};
+
+/**
+ * enum cam_lrme_hw_reset_type : Type of reset
+ *
+ * @CAM_LRME_HW_RESET_TYPE_HW_RESET : HW reset
+ * @CAM_LRME_HW_RESET_TYPE_SW_RESET : SW reset
+ */
+enum cam_lrme_hw_reset_type {
+ CAM_LRME_HW_RESET_TYPE_HW_RESET,
+ CAM_LRME_HW_RESET_TYPE_SW_RESET,
+};
+
+/**
+ *struct cam_lrme_frame_request : LRME frame request
+ *
+ * @frame_list : List head
+ * @req_id : Request ID
+ * @ctxt_to_hw_map : Information about context id, priority and device id
+ * @hw_device : Pointer to HW device
+ * @hw_update_entries : List of hw_update_entries
+ * @num_hw_update_entries : number of hw_update_entries
+ */
+struct cam_lrme_frame_request {
+ struct list_head frame_list;
+ uint64_t req_id;
+ void *ctxt_to_hw_map;
+ struct cam_lrme_device *hw_device;
+ struct cam_hw_update_entry hw_update_entries[CAM_LRME_MAX_HW_ENTRIES];
+ uint32_t num_hw_update_entries;
+};
+
+/**
+ * struct cam_lrme_hw_io_buffer : IO buffer information
+ *
+ * @valid : Indicate whether this IO config is valid
+ * @io_cfg : Pointer to IO configuration
+ * @num_buf : Number of buffers
+ * @num_plane : Number of planes
+ * @io_addr : List of IO address
+ */
+struct cam_lrme_hw_io_buffer {
+ bool valid;
+ struct cam_buf_io_cfg *io_cfg;
+ uint32_t num_buf;
+ uint32_t num_plane;
+ uint64_t io_addr[CAM_PACKET_MAX_PLANES];
+};
+
+/**
+ * struct cam_lrme_hw_cmd_config_args : Args for prepare HW update
+ *
+ * @hw_device : Pointer to HW device
+ * @input_buf : List of input buffers
+ * @output_buf : List of output buffers
+ * @cmd_buf_addr : Pointer to available KMD buffer
+ * @size : Available KMD buffer size
+ * @config_buf_size : Size used to prepare update
+ */
+struct cam_lrme_hw_cmd_config_args {
+ struct cam_lrme_device *hw_device;
+ struct cam_lrme_hw_io_buffer input_buf[CAM_LRME_MAX_IO_BUFFER];
+ struct cam_lrme_hw_io_buffer output_buf[CAM_LRME_MAX_IO_BUFFER];
+ uint32_t *cmd_buf_addr;
+ uint32_t size;
+ uint32_t config_buf_size;
+};
+
+/**
+ * struct cam_lrme_hw_flush_args : Args for flush HW
+ *
+ * @ctxt_to_hw_map : Identity of context
+ * @req_to_flush : Pointer to the frame need to flush in
+ * case of single frame flush
+ * @flush_type : Flush type
+ */
+struct cam_lrme_hw_flush_args {
+ void *ctxt_to_hw_map;
+ struct cam_lrme_frame_request *req_to_flush;
+ uint32_t flush_type;
+};
+
+/**
+ * struct cam_lrme_hw_reset_args : Args for reset HW
+ *
+ * @reset_type : Enum cam_lrme_hw_reset_type
+ */
+struct cam_lrme_hw_reset_args {
+ uint32_t reset_type;
+};
+
+/**
+ * struct cam_lrme_hw_cb_args : HW manager callback args
+ *
+ * @cb_type : Callback event type
+ * @frame_req : Pointer to the frame associated with the cb
+ */
+struct cam_lrme_hw_cb_args {
+ uint32_t cb_type;
+ struct cam_lrme_frame_request *frame_req;
+};
+
+/**
+ * struct cam_lrme_hw_cmd_set_cb : Args for set callback function
+ *
+ * @cam_lrme_hw_mgr_cb : Callback function pointer
+ * @data : Data sent along with callback function
+ */
+struct cam_lrme_hw_cmd_set_cb {
+ int (*cam_lrme_hw_mgr_cb)(void *data,
+ struct cam_lrme_hw_cb_args *args);
+ void *data;
+};
+
+/**
+ * struct cam_lrme_hw_submit_args : Args for submit request
+ *
+ * @hw_update_entries : List of hw update entries used to program registers
+ * @num_hw_update_entries : Number of hw update entries
+ * @frame_req : Pointer to the frame request
+ */
+struct cam_lrme_hw_submit_args {
+ struct cam_hw_update_entry *hw_update_entries;
+ uint32_t num_hw_update_entries;
+ struct cam_lrme_frame_request *frame_req;
+};
+
+#endif /* _CAM_LRME_HW_INTF_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_reg.h b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_reg.h
new file mode 100644
index 0000000..39cfde7
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_reg.h
@@ -0,0 +1,193 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_LRME_HW_REG_H_
+#define _CAM_LRME_HW_REG_H_
+
+#include "cam_lrme_hw_core.h"
+
+static struct cam_lrme_hw_info cam_lrme10_hw_info = {
+ .clc_reg = {
+ .clc_hw_version = 0x00000000,
+ .clc_hw_status = 0x00000004,
+ .clc_hw_status_dbg = 0x00000008,
+ .clc_module_cfg = 0x00000060,
+ .clc_moduleformat = 0x000000A8,
+ .clc_rangestep = 0x00000068,
+ .clc_offset = 0x0000006C,
+ .clc_maxallowedsad = 0x00000070,
+ .clc_minallowedtarmad = 0x00000074,
+ .clc_meaningfulsaddiff = 0x00000078,
+ .clc_minsaddiffdenom = 0x0000007C,
+ .clc_robustnessmeasuredistmap_0 = 0x00000080,
+ .clc_robustnessmeasuredistmap_1 = 0x00000084,
+ .clc_robustnessmeasuredistmap_2 = 0x00000088,
+ .clc_robustnessmeasuredistmap_3 = 0x0000008C,
+ .clc_robustnessmeasuredistmap_4 = 0x00000090,
+ .clc_robustnessmeasuredistmap_5 = 0x00000094,
+ .clc_robustnessmeasuredistmap_6 = 0x00000098,
+ .clc_robustnessmeasuredistmap_7 = 0x0000009C,
+ .clc_ds_crop_horizontal = 0x000000A0,
+ .clc_ds_crop_vertical = 0x000000A4,
+ .clc_tar_pd_unpacker = 0x000000AC,
+ .clc_ref_pd_unpacker = 0x000000B0,
+ .clc_sw_override = 0x000000B4,
+ .clc_tar_height = 0x000000B8,
+ .clc_ref_height = 0x000000BC,
+ .clc_test_bus_ctrl = 0x000001F8,
+ .clc_spare = 0x000001FC,
+ },
+ .bus_rd_reg = {
+ .common_reg = {
+ .hw_version = 0x00000200,
+ .hw_capability = 0x00000204,
+ .sw_reset = 0x00000208,
+ .cgc_override = 0x0000020C,
+ .irq_mask = 0x00000210,
+ .irq_clear = 0x00000214,
+ .irq_cmd = 0x00000218,
+ .irq_status = 0x0000021C,
+ .cmd = 0x00000220,
+ .irq_set = 0x00000224,
+ .misr_reset = 0x0000022C,
+ .security_cfg = 0x00000230,
+ .pwr_iso_cfg = 0x00000234,
+ .pwr_iso_seed = 0x00000238,
+ .test_bus_ctrl = 0x00000248,
+ .spare = 0x0000024C,
+ },
+ .bus_client_reg = {
+ /* bus client 0 */
+ {
+ .core_cfg = 0x00000250,
+ .ccif_meta_data = 0x00000254,
+ .addr_image = 0x00000258,
+ .rd_buffer_size = 0x0000025C,
+ .rd_stride = 0x00000260,
+ .unpack_cfg_0 = 0x00000264,
+ .latency_buff_allocation = 0x00000278,
+ .burst_limit_cfg = 0x00000280,
+ .misr_cfg_0 = 0x00000284,
+ .misr_cfg_1 = 0x00000288,
+ .misr_rd_val = 0x0000028C,
+ .debug_status_cfg = 0x00000290,
+ .debug_status_0 = 0x00000294,
+ .debug_status_1 = 0x00000298,
+ },
+ /* bus client 1 */
+ {
+ .core_cfg = 0x000002F0,
+ .ccif_meta_data = 0x000002F4,
+ .addr_image = 0x000002F8,
+ .rd_buffer_size = 0x000002FC,
+ .rd_stride = 0x00000300,
+ .unpack_cfg_0 = 0x00000304,
+ .latency_buff_allocation = 0x00000318,
+ .burst_limit_cfg = 0x00000320,
+ .misr_cfg_0 = 0x00000324,
+ .misr_cfg_1 = 0x00000328,
+ .misr_rd_val = 0x0000032C,
+ .debug_status_cfg = 0x00000330,
+ .debug_status_0 = 0x00000334,
+ .debug_status_1 = 0x00000338,
+ },
+ },
+ },
+ .bus_wr_reg = {
+ .common_reg = {
+ .hw_version = 0x00000500,
+ .hw_capability = 0x00000504,
+ .sw_reset = 0x00000508,
+ .cgc_override = 0x0000050C,
+ .misr_reset = 0x000005C8,
+ .pwr_iso_cfg = 0x000005CC,
+ .test_bus_ctrl = 0x0000061C,
+ .composite_mask_0 = 0x00000510,
+ .irq_mask_0 = 0x00000544,
+ .irq_mask_1 = 0x00000548,
+ .irq_clear_0 = 0x00000550,
+ .irq_clear_1 = 0x00000554,
+ .irq_status_0 = 0x0000055C,
+ .irq_status_1 = 0x00000560,
+ .irq_cmd = 0x00000568,
+ .irq_set_0 = 0x000005BC,
+ .irq_set_1 = 0x000005C0,
+ .addr_fifo_status = 0x000005A8,
+ .frame_header_cfg0 = 0x000005AC,
+ .frame_header_cfg1 = 0x000005B0,
+ .spare = 0x00000620,
+ },
+ .bus_client_reg = {
+ /* bus client 0 */
+ {
+ .status_0 = 0x00000700,
+ .status_1 = 0x00000704,
+ .cfg = 0x00000708,
+ .addr_frame_header = 0x0000070C,
+ .frame_header_cfg = 0x00000710,
+ .addr_image = 0x00000714,
+ .addr_image_offset = 0x00000718,
+ .buffer_width_cfg = 0x0000071C,
+ .buffer_height_cfg = 0x00000720,
+ .packer_cfg = 0x00000724,
+ .wr_stride = 0x00000728,
+ .irq_subsample_cfg_period = 0x00000748,
+ .irq_subsample_cfg_pattern = 0x0000074C,
+ .burst_limit_cfg = 0x0000075C,
+ .misr_cfg = 0x00000760,
+ .misr_rd_word_sel = 0x00000764,
+ .misr_val = 0x00000768,
+ .debug_status_cfg = 0x0000076C,
+ .debug_status_0 = 0x00000770,
+ .debug_status_1 = 0x00000774,
+ },
+ /* bus client 1 */
+ {
+ .status_0 = 0x00000800,
+ .status_1 = 0x00000804,
+ .cfg = 0x00000808,
+ .addr_frame_header = 0x0000080C,
+ .frame_header_cfg = 0x00000810,
+ .addr_image = 0x00000814,
+ .addr_image_offset = 0x00000818,
+ .buffer_width_cfg = 0x0000081C,
+ .buffer_height_cfg = 0x00000820,
+ .packer_cfg = 0x00000824,
+ .wr_stride = 0x00000828,
+ .irq_subsample_cfg_period = 0x00000848,
+ .irq_subsample_cfg_pattern = 0x0000084C,
+ .burst_limit_cfg = 0x0000085C,
+ .misr_cfg = 0x00000860,
+ .misr_rd_word_sel = 0x00000864,
+ .misr_val = 0x00000868,
+ .debug_status_cfg = 0x0000086C,
+ .debug_status_0 = 0x00000870,
+ .debug_status_1 = 0x00000874,
+ },
+ },
+ },
+ .titan_reg = {
+ .top_hw_version = 0x00000900,
+ .top_titan_version = 0x00000904,
+ .top_rst_cmd = 0x00000908,
+ .top_core_clk_cfg = 0x00000920,
+ .top_irq_status = 0x0000090C,
+ .top_irq_mask = 0x00000910,
+ .top_irq_clear = 0x00000914,
+ .top_irq_set = 0x00000918,
+ .top_irq_cmd = 0x0000091C,
+ .top_violation_status = 0x00000924,
+ .top_spare = 0x000009FC,
+ },
+};
+
+#endif /* _CAM_LRME_HW_REG_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_soc.c b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_soc.c
new file mode 100644
index 0000000..75de0dd
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_soc.c
@@ -0,0 +1,158 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+
+#include "cam_lrme_hw_core.h"
+#include "cam_lrme_hw_soc.h"
+
+
+int cam_lrme_soc_enable_resources(struct cam_hw_info *lrme_hw)
+{
+ struct cam_hw_soc_info *soc_info = &lrme_hw->soc_info;
+ struct cam_lrme_soc_private *soc_private =
+ (struct cam_lrme_soc_private *)soc_info->soc_private;
+ struct cam_ahb_vote ahb_vote;
+ struct cam_axi_vote axi_vote;
+ int rc = 0;
+
+ ahb_vote.type = CAM_VOTE_ABSOLUTE;
+ ahb_vote.vote.level = CAM_SVS_VOTE;
+ axi_vote.compressed_bw = 7200000;
+ axi_vote.uncompressed_bw = 7200000;
+ rc = cam_cpas_start(soc_private->cpas_handle, &ahb_vote, &axi_vote);
+ if (rc) {
+ CAM_ERR(CAM_LRME, "Failed to start cpas, rc %d", rc);
+ return -EFAULT;
+ }
+
+ rc = cam_soc_util_enable_platform_resource(soc_info, true, CAM_SVS_VOTE,
+ true);
+ if (rc) {
+ CAM_ERR(CAM_LRME,
+ "Failed to enable platform resource, rc %d", rc);
+ goto stop_cpas;
+ }
+
+ cam_lrme_set_irq(lrme_hw, CAM_LRME_IRQ_ENABLE);
+
+ return rc;
+
+stop_cpas:
+ if (cam_cpas_stop(soc_private->cpas_handle))
+ CAM_ERR(CAM_LRME, "Failed to stop cpas");
+
+ return rc;
+}
+
+int cam_lrme_soc_disable_resources(struct cam_hw_info *lrme_hw)
+{
+ struct cam_hw_soc_info *soc_info = &lrme_hw->soc_info;
+ struct cam_lrme_soc_private *soc_private;
+ int rc = 0;
+
+ soc_private = soc_info->soc_private;
+
+ cam_lrme_set_irq(lrme_hw, CAM_LRME_IRQ_DISABLE);
+
+ rc = cam_soc_util_disable_platform_resource(soc_info, true, true);
+ if (rc) {
+ CAM_ERR(CAM_LRME, "Failed to disable platform resource");
+ return rc;
+ }
+ rc = cam_cpas_stop(soc_private->cpas_handle);
+ if (rc)
+ CAM_ERR(CAM_LRME, "Failed to stop cpas");
+
+ return rc;
+}
+
+int cam_lrme_soc_init_resources(struct cam_hw_soc_info *soc_info,
+ irq_handler_t irq_handler, void *private_data)
+{
+ struct cam_lrme_soc_private *soc_private;
+ struct cam_cpas_register_params cpas_register_param;
+ int rc;
+
+ rc = cam_soc_util_get_dt_properties(soc_info);
+ if (rc) {
+ CAM_ERR(CAM_LRME, "Failed in get_dt_properties, rc=%d", rc);
+ return rc;
+ }
+
+ rc = cam_soc_util_request_platform_resource(soc_info, irq_handler,
+ private_data);
+ if (rc) {
+ CAM_ERR(CAM_LRME, "Failed in request_platform_resource rc=%d",
+ rc);
+ return rc;
+ }
+
+ soc_private = kzalloc(sizeof(struct cam_lrme_soc_private), GFP_KERNEL);
+ if (!soc_private) {
+ rc = -ENOMEM;
+ goto release_res;
+ }
+ soc_info->soc_private = soc_private;
+
+ memset(&cpas_register_param, 0, sizeof(cpas_register_param));
+ strlcpy(cpas_register_param.identifier,
+ "lrmecpas", CAM_HW_IDENTIFIER_LENGTH);
+ cpas_register_param.cell_index = soc_info->index;
+ cpas_register_param.dev = &soc_info->pdev->dev;
+ cpas_register_param.userdata = private_data;
+ cpas_register_param.cam_cpas_client_cb = NULL;
+
+ rc = cam_cpas_register_client(&cpas_register_param);
+ if (rc) {
+ CAM_ERR(CAM_LRME, "CPAS registration failed");
+ goto free_soc_private;
+ }
+ soc_private->cpas_handle = cpas_register_param.client_handle;
+ CAM_DBG(CAM_LRME, "CPAS handle=%d", soc_private->cpas_handle);
+
+ return rc;
+
+free_soc_private:
+ kfree(soc_info->soc_private);
+ soc_info->soc_private = NULL;
+release_res:
+ cam_soc_util_release_platform_resource(soc_info);
+
+ return rc;
+}
+
+int cam_lrme_soc_deinit_resources(struct cam_hw_soc_info *soc_info)
+{
+ struct cam_lrme_soc_private *soc_private =
+ (struct cam_lrme_soc_private *)soc_info->soc_private;
+ int rc;
+
+ rc = cam_cpas_unregister_client(soc_private->cpas_handle);
+ if (rc)
+ CAM_ERR(CAM_LRME, "Unregister cpas failed, handle=%d, rc=%d",
+ soc_private->cpas_handle, rc);
+
+ rc = cam_soc_util_release_platform_resource(soc_info);
+ if (rc)
+ CAM_ERR(CAM_LRME, "release platform failed, rc=%d", rc);
+
+ kfree(soc_info->soc_private);
+ soc_info->soc_private = NULL;
+
+ return rc;
+}
diff --git a/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_soc.h b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_soc.h
new file mode 100644
index 0000000..44e8486
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_soc.h
@@ -0,0 +1,28 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_LRME_HW_SOC_H_
+#define _CAM_LRME_HW_SOC_H_
+
+#include "cam_soc_util.h"
+
+struct cam_lrme_soc_private {
+ uint32_t cpas_handle;
+};
+
+int cam_lrme_soc_enable_resources(struct cam_hw_info *lrme_hw);
+int cam_lrme_soc_disable_resources(struct cam_hw_info *lrme_hw);
+int cam_lrme_soc_init_resources(struct cam_hw_soc_info *soc_info,
+ irq_handler_t irq_handler, void *private_data);
+int cam_lrme_soc_deinit_resources(struct cam_hw_soc_info *soc_info);
+
+#endif /* _CAM_LRME_HW_SOC_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr.c b/drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr.c
index 88efcb5..6ad0934 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr.c
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr.c
@@ -14,6 +14,7 @@
#include <linux/types.h>
#include <linux/mutex.h>
#include <linux/msm_ion.h>
+#include <linux/slab.h>
#include <asm/cacheflush.h>
#include "cam_req_mgr_util.h"
@@ -299,7 +300,40 @@ int cam_mem_mgr_cache_ops(struct cam_mem_cache_ops_cmd *cmd)
}
EXPORT_SYMBOL(cam_mem_mgr_cache_ops);
-static int cam_mem_util_get_ion_buffer(size_t len,
+static int cam_mem_util_get_dma_buf(size_t len,
+ size_t align,
+ unsigned int heap_id_mask,
+ unsigned int flags,
+ struct ion_handle **hdl,
+ struct dma_buf **buf)
+{
+ int rc = 0;
+
+ if (!hdl || !buf) {
+ CAM_ERR(CAM_CRM, "Invalid params");
+ return -EINVAL;
+ }
+
+ *hdl = ion_alloc(tbl.client, len, align, heap_id_mask, flags);
+ if (IS_ERR_OR_NULL(*hdl))
+ return -ENOMEM;
+
+ *buf = ion_share_dma_buf(tbl.client, *hdl);
+ if (IS_ERR_OR_NULL(*buf)) {
+ CAM_ERR(CAM_CRM, "get dma buf fail");
+ rc = -EINVAL;
+ goto get_buf_fail;
+ }
+
+ return rc;
+
+get_buf_fail:
+ ion_free(tbl.client, *hdl);
+ return rc;
+
+}
+
+static int cam_mem_util_get_dma_buf_fd(size_t len,
size_t align,
unsigned int heap_id_mask,
unsigned int flags,
@@ -308,13 +342,18 @@ static int cam_mem_util_get_ion_buffer(size_t len,
{
int rc = 0;
+ if (!hdl || !fd) {
+ CAM_ERR(CAM_CRM, "Invalid params");
+ return -EINVAL;
+ }
+
*hdl = ion_alloc(tbl.client, len, align, heap_id_mask, flags);
if (IS_ERR_OR_NULL(*hdl))
return -ENOMEM;
*fd = ion_share_dma_buf_fd(tbl.client, *hdl);
if (*fd < 0) {
- CAM_ERR(CAM_CRM, "dma buf get fd fail");
+ CAM_ERR(CAM_CRM, "get fd fail");
rc = -EINVAL;
goto get_fd_fail;
}
@@ -346,7 +385,7 @@ static int cam_mem_util_ion_alloc(struct cam_mem_mgr_alloc_cmd *cmd,
else
ion_flag &= ~ION_FLAG_CACHED;
- rc = cam_mem_util_get_ion_buffer(cmd->len,
+ rc = cam_mem_util_get_dma_buf_fd(cmd->len,
cmd->align,
heap_id,
ion_flag,
@@ -441,7 +480,7 @@ static int cam_mem_util_map_hw_va(uint32_t flags,
}
} else {
for (i = 0; i < num_hdls; i++) {
- rc = cam_smmu_map_iova(mmu_hdls[i],
+ rc = cam_smmu_map_user_iova(mmu_hdls[i],
fd,
dir,
(dma_addr_t *)hw_vaddr,
@@ -462,7 +501,7 @@ static int cam_mem_util_map_hw_va(uint32_t flags,
cam_smmu_unmap_stage2_iova(mmu_hdls[i], fd);
else
for (--i; i > 0; i--)
- cam_smmu_unmap_iova(mmu_hdls[i],
+ cam_smmu_unmap_user_iova(mmu_hdls[i],
fd,
CAM_SMMU_REGION_IO);
return rc;
@@ -530,6 +569,7 @@ int cam_mem_mgr_alloc_and_map(struct cam_mem_mgr_alloc_cmd *cmd)
mutex_lock(&tbl.bufq[idx].q_lock);
tbl.bufq[idx].fd = ion_fd;
+ tbl.bufq[idx].dma_buf = NULL;
tbl.bufq[idx].flags = cmd->flags;
tbl.bufq[idx].buf_handle = GET_MEM_HANDLE(idx, ion_fd);
if (cmd->flags & CAM_MEM_FLAG_PROTECTED_MODE)
@@ -615,6 +655,7 @@ int cam_mem_mgr_map(struct cam_mem_mgr_map_cmd *cmd)
mutex_lock(&tbl.bufq[idx].q_lock);
tbl.bufq[idx].fd = cmd->fd;
+ tbl.bufq[idx].dma_buf = NULL;
tbl.bufq[idx].flags = cmd->flags;
tbl.bufq[idx].buf_handle = GET_MEM_HANDLE(idx, cmd->fd);
if (cmd->flags & CAM_MEM_FLAG_PROTECTED_MODE)
@@ -645,7 +686,8 @@ int cam_mem_mgr_map(struct cam_mem_mgr_map_cmd *cmd)
}
static int cam_mem_util_unmap_hw_va(int32_t idx,
- enum cam_smmu_region_id region)
+ enum cam_smmu_region_id region,
+ enum cam_smmu_mapping_client client)
{
int i;
uint32_t flags;
@@ -672,15 +714,27 @@ static int cam_mem_util_unmap_hw_va(int32_t idx,
}
} else {
for (i = 0; i < num_hdls; i++) {
- rc = cam_smmu_unmap_iova(mmu_hdls[i],
- fd,
- region);
+ if (client == CAM_SMMU_MAPPING_USER) {
+ rc = cam_smmu_unmap_user_iova(mmu_hdls[i],
+ fd, region);
+ } else if (client == CAM_SMMU_MAPPING_KERNEL) {
+ rc = cam_smmu_unmap_kernel_iova(mmu_hdls[i],
+ tbl.bufq[idx].dma_buf, region);
+ } else {
+ CAM_ERR(CAM_CRM,
+ "invalid caller for unmapping : %d",
+ client);
+ rc = -EINVAL;
+ }
if (rc < 0)
goto unmap_end;
}
}
+ return rc;
+
unmap_end:
+ CAM_ERR(CAM_CRM, "unmapping failed");
return rc;
}
@@ -693,7 +747,7 @@ static void cam_mem_mgr_unmap_active_buf(int idx)
else if (tbl.bufq[idx].flags & CAM_MEM_FLAG_HW_READ_WRITE)
region = CAM_SMMU_REGION_IO;
- cam_mem_util_unmap_hw_va(idx, region);
+ cam_mem_util_unmap_hw_va(idx, region, CAM_SMMU_MAPPING_USER);
}
static int cam_mem_mgr_cleanup_table(void)
@@ -748,7 +802,8 @@ void cam_mem_mgr_deinit(void)
mutex_destroy(&tbl.m_lock);
}
-static int cam_mem_util_unmap(int32_t idx)
+static int cam_mem_util_unmap(int32_t idx,
+ enum cam_smmu_mapping_client client)
{
int rc = 0;
enum cam_smmu_region_id region = CAM_SMMU_REGION_SHARED;
@@ -775,7 +830,7 @@ static int cam_mem_util_unmap(int32_t idx)
if ((tbl.bufq[idx].flags & CAM_MEM_FLAG_HW_READ_WRITE) ||
(tbl.bufq[idx].flags & CAM_MEM_FLAG_HW_SHARED_ACCESS) ||
(tbl.bufq[idx].flags & CAM_MEM_FLAG_PROTECTED_MODE))
- rc = cam_mem_util_unmap_hw_va(idx, region);
+ rc = cam_mem_util_unmap_hw_va(idx, region, client);
mutex_lock(&tbl.bufq[idx].q_lock);
@@ -786,9 +841,10 @@ static int cam_mem_util_unmap(int32_t idx)
sizeof(int32_t) * CAM_MEM_MMU_MAX_HANDLE);
CAM_DBG(CAM_CRM,
- "Ion handle at idx = %d freeing = %pK, fd = %d, imported %d",
+ "Ion handle at idx = %d freeing = %pK, fd = %d, imported %d dma_buf %pK",
idx, tbl.bufq[idx].i_hdl, tbl.bufq[idx].fd,
- tbl.bufq[idx].is_imported);
+ tbl.bufq[idx].is_imported,
+ tbl.bufq[idx].dma_buf);
if (tbl.bufq[idx].i_hdl) {
ion_free(tbl.client, tbl.bufq[idx].i_hdl);
@@ -796,6 +852,7 @@ static int cam_mem_util_unmap(int32_t idx)
}
tbl.bufq[idx].fd = -1;
+ tbl.bufq[idx].dma_buf = NULL;
tbl.bufq[idx].is_imported = false;
tbl.bufq[idx].len = 0;
tbl.bufq[idx].num_hdl = 0;
@@ -833,7 +890,7 @@ int cam_mem_mgr_release(struct cam_mem_mgr_release_cmd *cmd)
}
CAM_DBG(CAM_CRM, "Releasing hdl = %u", cmd->buf_handle);
- rc = cam_mem_util_unmap(idx);
+ rc = cam_mem_util_unmap(idx, CAM_SMMU_MAPPING_USER);
return rc;
}
@@ -842,17 +899,19 @@ int cam_mem_mgr_request_mem(struct cam_mem_mgr_request_desc *inp,
struct cam_mem_mgr_memory_desc *out)
{
struct ion_handle *hdl;
- int ion_fd;
+ struct dma_buf *buf = NULL;
+ int ion_fd = -1;
int rc = 0;
uint32_t heap_id;
int32_t ion_flag = 0;
uint64_t kvaddr;
dma_addr_t iova = 0;
size_t request_len = 0;
- int32_t idx;
uint32_t mem_handle;
+ int32_t idx;
int32_t smmu_hdl = 0;
int32_t num_hdl = 0;
+
enum cam_smmu_region_id region = CAM_SMMU_REGION_SHARED;
if (!inp || !out) {
@@ -874,18 +933,18 @@ int cam_mem_mgr_request_mem(struct cam_mem_mgr_request_desc *inp,
heap_id = ION_HEAP(ION_SYSTEM_HEAP_ID);
- rc = cam_mem_util_get_ion_buffer(inp->size,
+ rc = cam_mem_util_get_dma_buf(inp->size,
inp->align,
heap_id,
ion_flag,
&hdl,
- &ion_fd);
+ &buf);
if (rc) {
CAM_ERR(CAM_CRM, "ION alloc failed for shared buffer");
goto ion_fail;
} else {
- CAM_DBG(CAM_CRM, "Got ION fd = %d, hdl = %pK", ion_fd, hdl);
+ CAM_DBG(CAM_CRM, "Got dma_buf = %pK, hdl = %pK", buf, hdl);
}
rc = cam_mem_util_map_cpu_va(hdl, &kvaddr, &request_len);
@@ -908,8 +967,8 @@ int cam_mem_mgr_request_mem(struct cam_mem_mgr_request_desc *inp,
region = CAM_SMMU_REGION_IO;
}
- rc = cam_smmu_map_iova(inp->smmu_hdl,
- ion_fd,
+ rc = cam_smmu_map_kernel_iova(inp->smmu_hdl,
+ buf,
CAM_SMMU_MAP_RW,
&iova,
&request_len,
@@ -931,7 +990,8 @@ int cam_mem_mgr_request_mem(struct cam_mem_mgr_request_desc *inp,
mutex_lock(&tbl.bufq[idx].q_lock);
mem_handle = GET_MEM_HANDLE(idx, ion_fd);
- tbl.bufq[idx].fd = ion_fd;
+ tbl.bufq[idx].dma_buf = buf;
+ tbl.bufq[idx].fd = -1;
tbl.bufq[idx].flags = inp->flags;
tbl.bufq[idx].buf_handle = mem_handle;
tbl.bufq[idx].kmdvaddr = kvaddr;
@@ -955,9 +1015,8 @@ int cam_mem_mgr_request_mem(struct cam_mem_mgr_request_desc *inp,
return rc;
slot_fail:
- cam_smmu_unmap_iova(inp->smmu_hdl,
- ion_fd,
- region);
+ cam_smmu_unmap_kernel_iova(inp->smmu_hdl,
+ buf, region);
smmu_fail:
ion_unmap_kernel(tbl.client, hdl);
map_fail:
@@ -995,8 +1054,172 @@ int cam_mem_mgr_release_mem(struct cam_mem_mgr_memory_desc *inp)
}
CAM_DBG(CAM_CRM, "Releasing hdl = %X", inp->mem_handle);
- rc = cam_mem_util_unmap(idx);
+ rc = cam_mem_util_unmap(idx, CAM_SMMU_MAPPING_KERNEL);
return rc;
}
EXPORT_SYMBOL(cam_mem_mgr_release_mem);
+
+int cam_mem_mgr_reserve_memory_region(struct cam_mem_mgr_request_desc *inp,
+ enum cam_smmu_region_id region,
+ struct cam_mem_mgr_memory_desc *out)
+{
+ struct ion_handle *hdl;
+ struct dma_buf *buf = NULL;
+ int rc = 0;
+ int ion_fd = -1;
+ uint32_t heap_id;
+ dma_addr_t iova = 0;
+ size_t request_len = 0;
+ uint32_t mem_handle;
+ int32_t idx;
+ int32_t smmu_hdl = 0;
+ int32_t num_hdl = 0;
+
+ if (!inp || !out) {
+ CAM_ERR(CAM_CRM, "Invalid param(s)");
+ return -EINVAL;
+ }
+
+ if (!inp->smmu_hdl) {
+ CAM_ERR(CAM_CRM, "Invalid SMMU handle");
+ return -EINVAL;
+ }
+
+ if (region != CAM_SMMU_REGION_SECHEAP) {
+ CAM_ERR(CAM_CRM, "Only secondary heap supported");
+ return -EINVAL;
+ }
+
+ heap_id = ION_HEAP(ION_SYSTEM_HEAP_ID);
+ rc = cam_mem_util_get_dma_buf(inp->size,
+ inp->align,
+ heap_id,
+ 0,
+ &hdl,
+ &buf);
+
+ if (rc) {
+ CAM_ERR(CAM_CRM, "ION alloc failed for sec heap buffer");
+ goto ion_fail;
+ } else {
+ CAM_DBG(CAM_CRM, "Got dma_buf = %pK, hdl = %pK", buf, hdl);
+ }
+
+ rc = cam_smmu_reserve_sec_heap(inp->smmu_hdl,
+ buf,
+ &iova,
+ &request_len);
+
+ if (rc) {
+ CAM_ERR(CAM_CRM, "Reserving secondary heap failed");
+ goto smmu_fail;
+ }
+
+ smmu_hdl = inp->smmu_hdl;
+ num_hdl = 1;
+
+ idx = cam_mem_get_slot();
+ if (idx < 0) {
+ rc = -ENOMEM;
+ goto slot_fail;
+ }
+
+ mutex_lock(&tbl.bufq[idx].q_lock);
+ mem_handle = GET_MEM_HANDLE(idx, ion_fd);
+ tbl.bufq[idx].fd = -1;
+ tbl.bufq[idx].dma_buf = buf;
+ tbl.bufq[idx].flags = inp->flags;
+ tbl.bufq[idx].buf_handle = mem_handle;
+ tbl.bufq[idx].kmdvaddr = 0;
+
+ tbl.bufq[idx].vaddr = iova;
+
+ tbl.bufq[idx].i_hdl = hdl;
+ tbl.bufq[idx].len = request_len;
+ tbl.bufq[idx].num_hdl = num_hdl;
+ memcpy(tbl.bufq[idx].hdls, &smmu_hdl,
+ sizeof(int32_t));
+ tbl.bufq[idx].is_imported = false;
+ mutex_unlock(&tbl.bufq[idx].q_lock);
+
+ out->kva = 0;
+ out->iova = (uint32_t)iova;
+ out->smmu_hdl = smmu_hdl;
+ out->mem_handle = mem_handle;
+ out->len = request_len;
+ out->region = region;
+
+ return rc;
+
+slot_fail:
+ cam_smmu_release_sec_heap(smmu_hdl);
+smmu_fail:
+ ion_free(tbl.client, hdl);
+ion_fail:
+ return rc;
+}
+EXPORT_SYMBOL(cam_mem_mgr_reserve_memory_region);
+
+int cam_mem_mgr_free_memory_region(struct cam_mem_mgr_memory_desc *inp)
+{
+ int32_t idx;
+ int rc;
+ int32_t smmu_hdl;
+
+ if (!inp) {
+ CAM_ERR(CAM_CRM, "Invalid argument");
+ return -EINVAL;
+ }
+
+ if (inp->region != CAM_SMMU_REGION_SECHEAP) {
+ CAM_ERR(CAM_CRM, "Only secondary heap supported");
+ return -EINVAL;
+ }
+
+ idx = CAM_MEM_MGR_GET_HDL_IDX(inp->mem_handle);
+ if (idx >= CAM_MEM_BUFQ_MAX || idx <= 0) {
+ CAM_ERR(CAM_CRM, "Incorrect index extracted from mem handle");
+ return -EINVAL;
+ }
+
+ if (!tbl.bufq[idx].active) {
+ CAM_ERR(CAM_CRM, "Released buffer state should be active");
+ return -EINVAL;
+ }
+
+ if (tbl.bufq[idx].buf_handle != inp->mem_handle) {
+ CAM_ERR(CAM_CRM,
+ "Released buf handle not matching within table");
+ return -EINVAL;
+ }
+
+ if (tbl.bufq[idx].num_hdl != 1) {
+ CAM_ERR(CAM_CRM,
+ "Sec heap region should have only one smmu hdl");
+ return -ENODEV;
+ }
+
+ memcpy(&smmu_hdl, tbl.bufq[idx].hdls,
+ sizeof(int32_t));
+ if (inp->smmu_hdl != smmu_hdl) {
+ CAM_ERR(CAM_CRM,
+ "Passed SMMU handle doesn't match with internal hdl");
+ return -ENODEV;
+ }
+
+ rc = cam_smmu_release_sec_heap(inp->smmu_hdl);
+ if (rc) {
+ CAM_ERR(CAM_CRM,
+ "Sec heap region release failed");
+ return -ENODEV;
+ }
+
+ CAM_DBG(CAM_CRM, "Releasing hdl = %X", inp->mem_handle);
+ rc = cam_mem_util_unmap(idx, CAM_SMMU_MAPPING_KERNEL);
+ if (rc)
+ CAM_ERR(CAM_CRM, "unmapping secondary heap failed");
+
+ return rc;
+}
+EXPORT_SYMBOL(cam_mem_mgr_free_memory_region);
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr.h b/drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr.h
index 06588c4..83727d2 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr.h
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr.h
@@ -14,15 +14,23 @@
#define _CAM_MEM_MGR_H_
#include <linux/mutex.h>
+#include <linux/dma-buf.h>
#include <media/cam_req_mgr.h>
#include "cam_mem_mgr_api.h"
#define CAM_MEM_BUFQ_MAX 1024
+/*Enum for possible SMMU operations */
+enum cam_smmu_mapping_client {
+ CAM_SMMU_MAPPING_USER,
+ CAM_SMMU_MAPPING_KERNEL,
+};
+
/**
* struct cam_mem_buf_queue
*
* @i_hdl: ion handle for the buffer
+ * @dma_buf: pointer to the allocated dma_buf in the table
* @q_lock: mutex lock for buffer
* @hdls: list of mapped handles
* @num_hdl: number of handles
@@ -38,6 +46,7 @@
*/
struct cam_mem_buf_queue {
struct ion_handle *i_hdl;
+ struct dma_buf *dma_buf;
struct mutex q_lock;
int32_t hdls[CAM_MEM_MMU_MAX_HANDLE];
int32_t num_hdl;
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr_api.h b/drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr_api.h
index af7962a..7588c17 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr_api.h
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr_api.h
@@ -100,4 +100,26 @@ static inline bool cam_mem_is_secure_buf(int32_t buf_handle)
return CAM_MEM_MGR_IS_SECURE_HDL(buf_handle);
}
+/**
+ * @brief: Reserves a memory region
+ *
+ * @inp: Information specifying requested region properties
+ * @region : Region which is to be reserved
+ * @out : Information about reserved region
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_mem_mgr_reserve_memory_region(struct cam_mem_mgr_request_desc *inp,
+ enum cam_smmu_region_id region,
+ struct cam_mem_mgr_memory_desc *out);
+
+/**
+ * @brief: Frees a memory region
+ *
+ * @inp : Information about region which is to be freed
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_mem_mgr_free_memory_region(struct cam_mem_mgr_memory_desc *inp);
+
#endif /* _CAM_MEM_MGR_API_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.c b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.c
index a6b097d..244746b 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.c
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.c
@@ -564,6 +564,7 @@ static int __cam_req_mgr_process_req(struct cam_req_mgr_core_link *link,
* hence try again in next sof
*/
slot->status = CRM_SLOT_STATUS_REQ_PENDING;
+ spin_lock_bh(&link->link_state_spin_lock);
if (link->state == CAM_CRM_LINK_STATE_ERR) {
/*
* During error recovery all tables should be
@@ -576,6 +577,7 @@ static int __cam_req_mgr_process_req(struct cam_req_mgr_core_link *link,
in_q->slot[in_q->rd_idx].status);
rc = -EPERM;
}
+ spin_unlock_bh(&link->link_state_spin_lock);
return rc;
}
}
@@ -592,13 +594,14 @@ static int __cam_req_mgr_process_req(struct cam_req_mgr_core_link *link,
} else {
link->trigger_mask |= trigger;
+ spin_lock_bh(&link->link_state_spin_lock);
if (link->state == CAM_CRM_LINK_STATE_ERR) {
CAM_WARN(CAM_CRM, "Err recovery done idx %d",
in_q->rd_idx);
- mutex_lock(&link->lock);
link->state = CAM_CRM_LINK_STATE_READY;
- mutex_unlock(&link->lock);
}
+ spin_unlock_bh(&link->link_state_spin_lock);
+
if (link->trigger_mask == link->subscribe_event) {
slot->status = CRM_SLOT_STATUS_REQ_APPLIED;
link->trigger_mask = 0;
@@ -807,15 +810,34 @@ static int __cam_req_mgr_reset_in_q(struct cam_req_mgr_req_data *req)
*/
static void __cam_req_mgr_sof_freeze(unsigned long data)
{
- struct cam_req_mgr_timer *timer = (struct cam_req_mgr_timer *)data;
- struct cam_req_mgr_core_link *link = NULL;
+ struct cam_req_mgr_timer *timer = (struct cam_req_mgr_timer *)data;
+ struct cam_req_mgr_core_link *link = NULL;
+ struct cam_req_mgr_core_session *session = NULL;
+ struct cam_req_mgr_message msg;
if (!timer) {
CAM_ERR(CAM_CRM, "NULL timer");
return;
}
link = (struct cam_req_mgr_core_link *)timer->parent;
- CAM_ERR(CAM_CRM, "SOF freeze for link %x", link->link_hdl);
+ session = (struct cam_req_mgr_core_session *)link->parent;
+
+ CAM_ERR(CAM_CRM, "SOF freeze for session %d link 0x%x",
+ session->session_hdl, link->link_hdl);
+
+ memset(&msg, 0, sizeof(msg));
+
+ msg.session_hdl = session->session_hdl;
+ msg.u.err_msg.error_type = CAM_REQ_MGR_ERROR_TYPE_DEVICE;
+ msg.u.err_msg.request_id = 0;
+ msg.u.err_msg.link_hdl = link->link_hdl;
+
+
+ if (cam_req_mgr_notify_message(&msg,
+ V4L_EVENT_CAM_REQ_MGR_ERROR, V4L_EVENT_CAM_REQ_MGR_EVENT))
+ CAM_ERR(CAM_CRM,
+ "Error notifying SOF freeze for session %d link 0x%x",
+ session->session_hdl, link->link_hdl);
}
/**
@@ -860,14 +882,14 @@ static void __cam_req_mgr_destroy_subdev(
* @brief : Cleans up the mem allocated while linking
* @link : pointer to link, mem associated with this link is freed
*
+ * @return : returns if unlink for any device was success or failure
*/
-static void __cam_req_mgr_destroy_link_info(struct cam_req_mgr_core_link *link)
+static int __cam_req_mgr_destroy_link_info(struct cam_req_mgr_core_link *link)
{
int32_t i = 0;
struct cam_req_mgr_connected_device *dev;
struct cam_req_mgr_core_dev_link_setup link_data;
-
- mutex_lock(&link->lock);
+ int rc = 0;
link_data.link_enable = 0;
link_data.link_hdl = link->link_hdl;
@@ -880,7 +902,11 @@ static void __cam_req_mgr_destroy_link_info(struct cam_req_mgr_core_link *link)
if (dev != NULL) {
link_data.dev_hdl = dev->dev_hdl;
if (dev->ops && dev->ops->link_setup)
- dev->ops->link_setup(&link_data);
+ rc = dev->ops->link_setup(&link_data);
+ if (rc)
+ CAM_ERR(CAM_CRM,
+ "Unlink failed dev_hdl %d",
+ dev->dev_hdl);
dev->dev_hdl = 0;
dev->parent = NULL;
dev->ops = NULL;
@@ -895,7 +921,7 @@ static void __cam_req_mgr_destroy_link_info(struct cam_req_mgr_core_link *link)
link->num_devs = 0;
link->max_delay = 0;
- mutex_unlock(&link->lock);
+ return rc;
}
/**
@@ -938,6 +964,7 @@ static struct cam_req_mgr_core_link *__cam_req_mgr_reserve_link(
return NULL;
}
mutex_init(&link->lock);
+ spin_lock_init(&link->link_state_spin_lock);
mutex_lock(&link->lock);
link->state = CAM_CRM_LINK_STATE_AVAILABLE;
@@ -1348,9 +1375,9 @@ int cam_req_mgr_process_error(void *priv, void *data)
__cam_req_mgr_tbl_set_all_skip_cnt(&link->req.l_tbl);
in_q->rd_idx = idx;
in_q->slot[idx].status = CRM_SLOT_STATUS_REQ_ADDED;
- mutex_lock(&link->lock);
+ spin_lock_bh(&link->link_state_spin_lock);
link->state = CAM_CRM_LINK_STATE_ERR;
- mutex_unlock(&link->lock);
+ spin_unlock_bh(&link->link_state_spin_lock);
}
}
mutex_unlock(&link->req.lock);
@@ -1401,11 +1428,14 @@ static int cam_req_mgr_process_trigger(void *priv, void *data)
CAM_DBG(CAM_CRM, "link_hdl %x curent idx %d req_status %d",
link->link_hdl, in_q->rd_idx, in_q->slot[in_q->rd_idx].status);
+ spin_lock_bh(&link->link_state_spin_lock);
if (link->state == CAM_CRM_LINK_STATE_ERR)
CAM_WARN(CAM_CRM, "Error recovery idx %d status %d",
in_q->rd_idx,
in_q->slot[in_q->rd_idx].status);
+ spin_unlock_bh(&link->link_state_spin_lock);
+
if (in_q->slot[in_q->rd_idx].status == CRM_SLOT_STATUS_REQ_APPLIED) {
/*
* Do NOT reset req q slot data here, it can not be done
@@ -1446,8 +1476,7 @@ static int cam_req_mgr_cb_add_req(struct cam_req_mgr_add_request *add_req)
if (!add_req) {
CAM_ERR(CAM_CRM, "sof_data is NULL");
- rc = -EINVAL;
- goto end;
+ return -EINVAL;
}
CAM_DBG(CAM_CRM, "E: dev %x dev req %lld",
@@ -1457,9 +1486,18 @@ static int cam_req_mgr_cb_add_req(struct cam_req_mgr_add_request *add_req)
if (!link) {
CAM_DBG(CAM_CRM, "link ptr NULL %x", add_req->link_hdl);
- rc = -EINVAL;
+ return -EINVAL;
+ }
+
+ mutex_lock(&link->lock);
+ spin_lock_bh(&link->link_state_spin_lock);
+ if (link->state != CAM_CRM_LINK_STATE_READY) {
+ CAM_WARN(CAM_CRM, "invalid link state:%d", link->state);
+ rc = -EPERM;
+ spin_unlock_bh(&link->link_state_spin_lock);
goto end;
}
+ spin_unlock_bh(&link->link_state_spin_lock);
/* Validate if req id is present in input queue */
idx = __cam_req_mgr_find_slot_for_req(link->req.in_q, add_req->req_id);
@@ -1490,6 +1528,7 @@ static int cam_req_mgr_cb_add_req(struct cam_req_mgr_add_request *add_req)
add_req->dev_hdl, add_req->req_id);
end:
+ mutex_unlock(&link->lock);
return rc;
}
@@ -1525,6 +1564,15 @@ static int cam_req_mgr_cb_notify_err(
goto end;
}
+ spin_lock_bh(&link->link_state_spin_lock);
+ if (link->state != CAM_CRM_LINK_STATE_READY) {
+ CAM_WARN(CAM_CRM, "invalid link state:%d", link->state);
+ spin_unlock_bh(&link->link_state_spin_lock);
+ rc = -EPERM;
+ goto end;
+ }
+ spin_unlock_bh(&link->link_state_spin_lock);
+
crm_timer_reset(link->watchdog);
task = cam_req_mgr_workq_get_task(link->workq);
if (!task) {
@@ -1579,6 +1627,15 @@ static int cam_req_mgr_cb_notify_trigger(
goto end;
}
+ spin_lock_bh(&link->link_state_spin_lock);
+ if (link->state != CAM_CRM_LINK_STATE_READY) {
+ CAM_WARN(CAM_CRM, "invalid link state:%d", link->state);
+ spin_unlock_bh(&link->link_state_spin_lock);
+ rc = -EPERM;
+ goto end;
+ }
+ spin_unlock_bh(&link->link_state_spin_lock);
+
crm_timer_reset(link->watchdog);
task = cam_req_mgr_workq_get_task(link->workq);
if (!task) {
@@ -1639,7 +1696,6 @@ static int __cam_req_mgr_setup_link_info(struct cam_req_mgr_core_link *link,
if (rc < 0)
return rc;
- mutex_lock(&link->lock);
max_delay = CAM_PIPELINE_DELAY_0;
for (i = 0; i < link_info->num_devices; i++) {
dev = &link->l_dev[i];
@@ -1742,7 +1798,6 @@ static int __cam_req_mgr_setup_link_info(struct cam_req_mgr_core_link *link,
/* At start, expect max pd devices, all are in skip state */
__cam_req_mgr_tbl_set_all_skip_cnt(&link->req.l_tbl);
- mutex_unlock(&link->lock);
return 0;
error:
@@ -1882,11 +1937,9 @@ int cam_req_mgr_link(struct cam_req_mgr_link_info *link_info)
if (link->link_hdl < 0) {
CAM_ERR(CAM_CRM,
"Insufficient memory to create new device handle");
- mutex_unlock(&link->lock);
rc = link->link_hdl;
goto link_hdl_fail;
}
- mutex_unlock(&link->lock);
link_info->link_hdl = link->link_hdl;
/* Allocate memory to hold data of all linked devs */
@@ -1903,9 +1956,9 @@ int cam_req_mgr_link(struct cam_req_mgr_link_info *link_info)
if (rc < 0)
goto setup_failed;
- mutex_lock(&link->lock);
+ spin_lock_bh(&link->link_state_spin_lock);
link->state = CAM_CRM_LINK_STATE_READY;
- mutex_unlock(&link->lock);
+ spin_unlock_bh(&link->link_state_spin_lock);
/* Create worker for current link */
snprintf(buf, sizeof(buf), "%x-%x",
@@ -1936,6 +1989,7 @@ int cam_req_mgr_link(struct cam_req_mgr_link_info *link_info)
goto setup_failed;
}
+ mutex_unlock(&link->lock);
mutex_unlock(&g_crm_core_dev->crm_lock);
return rc;
setup_failed:
@@ -1944,6 +1998,7 @@ int cam_req_mgr_link(struct cam_req_mgr_link_info *link_info)
cam_destroy_device_hdl(link->link_hdl);
link_info->link_hdl = 0;
link_hdl_fail:
+ mutex_unlock(&link->lock);
__cam_req_mgr_unreserve_link(cam_session, &link);
mutex_unlock(&g_crm_core_dev->crm_lock);
return rc;
@@ -1979,6 +2034,11 @@ int cam_req_mgr_unlink(struct cam_req_mgr_unlink_info *unlink_info)
mutex_unlock(&g_crm_core_dev->crm_lock);
return -EINVAL;
}
+
+ mutex_lock(&link->lock);
+ spin_lock_bh(&link->link_state_spin_lock);
+ link->state = CAM_CRM_LINK_STATE_IDLE;
+ spin_unlock_bh(&link->link_state_spin_lock);
__cam_req_mgr_print_req_tbl(&link->req);
/* Destroy workq payload data */
@@ -1990,8 +2050,12 @@ int cam_req_mgr_unlink(struct cam_req_mgr_unlink_info *unlink_info)
cam_req_mgr_workq_destroy(&link->workq);
- /* Cleanuprequest tables */
- __cam_req_mgr_destroy_link_info(link);
+ /* Cleanup request tables and unlink devices */
+ rc = __cam_req_mgr_destroy_link_info(link);
+ if (rc) {
+ CAM_ERR(CAM_CORE, "Unlink failed. Cannot proceed");
+ return rc;
+ }
/* Free memory holding data of linked devs */
__cam_req_mgr_destroy_subdev(link->l_dev);
@@ -2004,6 +2068,7 @@ int cam_req_mgr_unlink(struct cam_req_mgr_unlink_info *unlink_info)
}
/* Free curent link and put back into session's free pool of links */
+ mutex_unlock(&link->lock);
__cam_req_mgr_unreserve_link(cam_session, &link);
mutex_unlock(&g_crm_core_dev->crm_lock);
@@ -2127,10 +2192,10 @@ int cam_req_mgr_flush_requests(
flush->link_hdl = flush_info->link_hdl;
flush->flush_type = flush_info->flush_type;
task->process_cb = &cam_req_mgr_process_flush_req;
+ init_completion(&link->workq_comp);
rc = cam_req_mgr_workq_enqueue_task(task, link, CRM_TASK_PRIORITY_0);
/* Blocking call */
- init_completion(&link->workq_comp);
rc = wait_for_completion_timeout(
&link->workq_comp,
msecs_to_jiffies(CAM_REQ_MGR_SCHED_REQ_TIMEOUT));
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.h b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.h
index db34157..e17047d 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.h
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.h
@@ -12,6 +12,7 @@
#ifndef _CAM_REQ_MGR_CORE_H_
#define _CAM_REQ_MGR_CORE_H_
+#include <linux/spinlock.h>
#include "cam_req_mgr_interface.h"
#include "cam_req_mgr_core_defs.h"
#include "cam_req_mgr_timer.h"
@@ -259,27 +260,28 @@ struct cam_req_mgr_connected_device {
/**
* struct cam_req_mgr_core_link
* - Link Properties
- * @link_hdl : Link identifier
- * @num_devs : num of connected devices to this link
- * @max_delay : Max of pipeline delay of all connected devs
- * @workq : Pointer to handle workq related jobs
- * @pd_mask : each set bit indicates the device with pd equal to bit
- * position is available.
+ * @link_hdl : Link identifier
+ * @num_devs : num of connected devices to this link
+ * @max_delay : Max of pipeline delay of all connected devs
+ * @workq : Pointer to handle workq related jobs
+ * @pd_mask : each set bit indicates the device with pd equal to
+ * bit position is available.
* - List of connected devices
- * @l_dev : List of connected devices to this link
+ * @l_dev : List of connected devices to this link
* - Request handling data struct
- * @req : req data holder.
+ * @req : req data holder.
* - Timer
- * @watchdog : watchdog timer to recover from sof freeze
+ * @watchdog : watchdog timer to recover from sof freeze
* - Link private data
- * @workq_comp : conditional variable to block user thread for workq to
- * finish schedule request processing
- * @state : link state machine
- * @parent : pvt data - link's parent is session
- * @lock : mutex lock to guard link data operations
- * @subscribe_event: irqs that link subscribes, IFE should send notification
- * to CRM at those hw events.
- * @trigger_mask : mask on which irq the req is already applied
+ * @workq_comp : conditional variable to block user thread for workq
+ * to finish schedule request processing
+ * @state : link state machine
+ * @parent : pvt data - link's parent is session
+ * @lock : mutex lock to guard link data operations
+ * @link_state_spin_lock : spin lock to protect link state variable
+ * @subscribe_event : irqs that link subscribes, IFE should send
+ * notification to CRM at those hw events.
+ * @trigger_mask : mask on which irq the req is already applied
*/
struct cam_req_mgr_core_link {
int32_t link_hdl;
@@ -294,6 +296,7 @@ struct cam_req_mgr_core_link {
enum cam_req_mgr_link_state state;
void *parent;
struct mutex lock;
+ spinlock_t link_state_spin_lock;
uint32_t subscribe_event;
uint32_t trigger_mask;
};
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_dev.c b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_dev.c
index c316dbb..49c3c56e 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_dev.c
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_dev.c
@@ -462,7 +462,7 @@ static int cam_video_device_setup(void)
return rc;
}
-int cam_req_mgr_notify_frame_message(struct cam_req_mgr_message *msg,
+int cam_req_mgr_notify_message(struct cam_req_mgr_message *msg,
uint32_t id,
uint32_t type)
{
@@ -481,7 +481,7 @@ int cam_req_mgr_notify_frame_message(struct cam_req_mgr_message *msg,
return 0;
}
-EXPORT_SYMBOL(cam_req_mgr_notify_frame_message);
+EXPORT_SYMBOL(cam_req_mgr_notify_message);
void cam_video_device_cleanup(void)
{
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_dev.h b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_dev.h
index 77faed9..93278b8 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_dev.h
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_dev.h
@@ -43,7 +43,7 @@ struct cam_req_mgr_device {
#define CAM_REQ_MGR_GET_PAYLOAD_PTR(ev, type) \
(type *)((char *)ev.u.data)
-int cam_req_mgr_notify_frame_message(struct cam_req_mgr_message *msg,
+int cam_req_mgr_notify_message(struct cam_req_mgr_message *msg,
uint32_t id,
uint32_t type);
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_util.c b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_util.c
index 1d2169b..f357941 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_util.c
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_util.c
@@ -317,6 +317,8 @@ static int cam_destroy_hdl(int32_t dev_hdl, int dev_hdl_type)
}
hdl_tbl->hdl[idx].state = HDL_FREE;
+ hdl_tbl->hdl[idx].ops = NULL;
+ hdl_tbl->hdl[idx].priv = NULL;
clear_bit(idx, hdl_tbl->bitmap);
spin_unlock_bh(&hdl_tbl_lock);
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/Makefile b/drivers/media/platform/msm/camera/cam_sensor_module/Makefile
index 94a591c..65c2327 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/Makefile
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/Makefile
@@ -1,3 +1,4 @@
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_res_mgr/
obj-$(CONFIG_SPECTRA_CAMERA) += cam_sensor_utils/
obj-$(CONFIG_SPECTRA_CAMERA) += cam_cci/
obj-$(CONFIG_SPECTRA_CAMERA) += cam_sensor_io/
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/Makefile b/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/Makefile
index 4e8ea8b..c0bebfd 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/Makefile
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/Makefile
@@ -1,6 +1,7 @@
ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas/include
ccflags-y += -Idrivers/media/platform/msm/camera/cam_core
ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_res_mgr
ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_cci
ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io
ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_core.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_core.c
index 85db1b1..abfc190 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_core.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_core.c
@@ -15,6 +15,129 @@
#include "cam_actuator_core.h"
#include "cam_sensor_util.h"
#include "cam_trace.h"
+#include "cam_res_mgr_api.h"
+
+int32_t cam_actuator_construct_default_power_setting(
+ struct cam_sensor_power_ctrl_t *power_info)
+{
+ int rc = 0;
+
+ power_info->power_setting_size = 1;
+ power_info->power_setting =
+ (struct cam_sensor_power_setting *)
+ kzalloc(sizeof(struct cam_sensor_power_setting),
+ GFP_KERNEL);
+ if (!power_info->power_setting)
+ return -ENOMEM;
+
+ power_info->power_setting[0].seq_type = SENSOR_VAF;
+ power_info->power_setting[0].seq_val = CAM_VAF;
+ power_info->power_setting[0].config_val = 1;
+
+ power_info->power_down_setting_size = 1;
+ power_info->power_down_setting =
+ (struct cam_sensor_power_setting *)
+ kzalloc(sizeof(struct cam_sensor_power_setting),
+ GFP_KERNEL);
+ if (!power_info->power_down_setting) {
+ rc = -ENOMEM;
+ goto free_power_settings;
+ }
+
+ power_info->power_setting[0].seq_type = SENSOR_VAF;
+ power_info->power_setting[0].seq_val = CAM_VAF;
+ power_info->power_setting[0].config_val = 0;
+
+ return rc;
+
+free_power_settings:
+ kfree(power_info->power_setting);
+ return rc;
+}
+
+static int32_t cam_actuator_power_up(struct cam_actuator_ctrl_t *a_ctrl)
+{
+ int rc = 0;
+ struct cam_hw_soc_info *soc_info =
+ &a_ctrl->soc_info;
+ struct cam_actuator_soc_private *soc_private;
+ struct cam_sensor_power_ctrl_t *power_info;
+
+ soc_private =
+ (struct cam_actuator_soc_private *)a_ctrl->soc_info.soc_private;
+ power_info = &soc_private->power_info;
+
+ /* Parse and fill vreg params for power up settings */
+ rc = msm_camera_fill_vreg_params(
+ &a_ctrl->soc_info,
+ power_info->power_setting,
+ power_info->power_setting_size);
+ if (rc) {
+ CAM_ERR(CAM_ACTUATOR,
+ "failed to fill vreg params for power up rc:%d", rc);
+ return rc;
+ }
+
+ /* Parse and fill vreg params for power down settings*/
+ rc = msm_camera_fill_vreg_params(
+ &a_ctrl->soc_info,
+ power_info->power_down_setting,
+ power_info->power_down_setting_size);
+ if (rc) {
+ CAM_ERR(CAM_ACTUATOR,
+ "failed to fill vreg params power down rc:%d", rc);
+ return rc;
+ }
+
+ power_info->dev = soc_info->dev;
+
+ rc = cam_sensor_core_power_up(power_info, soc_info);
+ if (rc) {
+ CAM_ERR(CAM_ACTUATOR, "failed in ois power up rc %d", rc);
+ return rc;
+ }
+
+ /* VREG needs some delay to power up */
+ usleep_range(2000, 2050);
+
+ rc = camera_io_init(&a_ctrl->io_master_info);
+ if (rc < 0)
+ CAM_ERR(CAM_ACTUATOR, "cci_init failed: rc: %d", rc);
+
+ return rc;
+}
+
+static int32_t cam_actuator_power_down(struct cam_actuator_ctrl_t *a_ctrl)
+{
+ int32_t rc = 0;
+ struct cam_sensor_power_ctrl_t *power_info;
+ struct cam_hw_soc_info *soc_info = &a_ctrl->soc_info;
+ struct cam_actuator_soc_private *soc_private;
+
+ if (!a_ctrl) {
+ CAM_ERR(CAM_ACTUATOR, "failed: e_ctrl %pK", a_ctrl);
+ return -EINVAL;
+ }
+
+ soc_private =
+ (struct cam_actuator_soc_private *)a_ctrl->soc_info.soc_private;
+ power_info = &soc_private->power_info;
+ soc_info = &a_ctrl->soc_info;
+
+ if (!power_info) {
+ CAM_ERR(CAM_ACTUATOR, "failed: power_info %pK", power_info);
+ return -EINVAL;
+ }
+ rc = msm_camera_power_down(power_info, soc_info);
+ if (rc) {
+ CAM_ERR(CAM_ACTUATOR, "power down the core is failed:%d", rc);
+ return rc;
+ }
+
+ camera_io_release(&a_ctrl->io_master_info);
+
+ return rc;
+}
static int32_t cam_actuator_i2c_modes_util(
struct camera_io_master *io_master_info,
@@ -324,6 +447,19 @@ int32_t cam_actuator_i2c_pkt_parse(struct cam_actuator_ctrl_t *a_ctrl,
rc);
return rc;
}
+
+ rc = cam_actuator_apply_settings(a_ctrl,
+ &a_ctrl->i2c_data.init_settings);
+ if (rc < 0)
+ CAM_ERR(CAM_ACTUATOR, "Cannot apply Init settings");
+
+ /* Delete the request even if the apply is failed */
+ rc = delete_request(&a_ctrl->i2c_data.init_settings);
+ if (rc < 0) {
+ CAM_WARN(CAM_ACTUATOR,
+ "Fail in deleting the Init settings");
+ rc = 0;
+ }
} else if ((csl_packet->header.op_code & 0xFFFFFF) ==
CAM_ACTUATOR_PACKET_AUTO_MOVE_LENS) {
a_ctrl->setting_apply_state =
@@ -383,92 +519,6 @@ int32_t cam_actuator_i2c_pkt_parse(struct cam_actuator_ctrl_t *a_ctrl,
return rc;
}
-static int32_t cam_actuator_vreg_control(
- struct cam_actuator_ctrl_t *a_ctrl,
- int config)
-{
- int rc = 0, cnt;
- struct cam_hw_soc_info *soc_info;
-
- soc_info = &a_ctrl->soc_info;
- cnt = soc_info->num_rgltr;
-
- if (!cnt)
- return 0;
-
- if (cnt >= CAM_SOC_MAX_REGULATOR) {
- CAM_ERR(CAM_ACTUATOR, "Regulators more than supported %d", cnt);
- return -EINVAL;
- }
-
- if (config) {
- rc = cam_soc_util_request_platform_resource(soc_info,
- NULL, NULL);
- rc = cam_soc_util_enable_platform_resource(soc_info, false, 0,
- false);
- } else {
- rc = cam_soc_util_disable_platform_resource(soc_info, false,
- false);
- rc = cam_soc_util_release_platform_resource(soc_info);
- }
-
- return rc;
-}
-
-static int32_t cam_actuator_power_up(struct cam_actuator_ctrl_t *a_ctrl)
-{
- int rc = 0;
- struct cam_hw_soc_info *soc_info =
- &a_ctrl->soc_info;
- struct msm_camera_gpio_num_info *gpio_num_info = NULL;
-
- rc = cam_actuator_vreg_control(a_ctrl, 1);
- if (rc < 0) {
- CAM_ERR(CAM_ACTUATOR, "Actuator Reg Failed %d", rc);
- return rc;
- }
-
- gpio_num_info = a_ctrl->gpio_num_info;
-
- if (soc_info->gpio_data &&
- gpio_num_info &&
- gpio_num_info->valid[SENSOR_VAF] == 1) {
- gpio_set_value_cansleep(
- gpio_num_info->gpio_num[SENSOR_VAF],
- 1);
- }
-
- /* VREG needs some delay to power up */
- usleep_range(2000, 2050);
-
- return rc;
-}
-
-static int32_t cam_actuator_power_down(struct cam_actuator_ctrl_t *a_ctrl)
-{
- int32_t rc = 0;
- struct cam_hw_soc_info *soc_info =
- &a_ctrl->soc_info;
- struct msm_camera_gpio_num_info *gpio_num_info = NULL;
-
- gpio_num_info = a_ctrl->gpio_num_info;
-
- if (soc_info->gpio_data &&
- gpio_num_info &&
- gpio_num_info->valid[SENSOR_VAF] == 1) {
-
- gpio_set_value_cansleep(
- gpio_num_info->gpio_num[SENSOR_VAF],
- GPIOF_OUT_INIT_LOW);
- }
-
- rc = cam_actuator_vreg_control(a_ctrl, 0);
- if (rc < 0)
- CAM_ERR(CAM_ACTUATOR, "Disable Regulator Failed: %d", rc);
-
- return rc;
-}
-
void cam_actuator_shutdown(struct cam_actuator_ctrl_t *a_ctrl)
{
int rc;
@@ -476,17 +526,12 @@ void cam_actuator_shutdown(struct cam_actuator_ctrl_t *a_ctrl)
if (a_ctrl->cam_act_state == CAM_ACTUATOR_INIT)
return;
- if (a_ctrl->cam_act_state == CAM_ACTUATOR_START) {
- rc = camera_io_release(&a_ctrl->io_master_info);
- if (rc < 0)
- CAM_ERR(CAM_ACTUATOR, "Failed in releasing CCI");
+ if ((a_ctrl->cam_act_state == CAM_ACTUATOR_START) ||
+ (a_ctrl->cam_act_state == CAM_ACTUATOR_ACQUIRE)) {
rc = cam_actuator_power_down(a_ctrl);
if (rc < 0)
CAM_ERR(CAM_ACTUATOR, "Actuator Power down failed");
- a_ctrl->cam_act_state = CAM_ACTUATOR_ACQUIRE;
- }
- if (a_ctrl->cam_act_state == CAM_ACTUATOR_ACQUIRE) {
rc = cam_destroy_device_hdl(a_ctrl->bridge_intf.device_hdl);
if (rc < 0)
CAM_ERR(CAM_ACTUATOR, "destroying dhdl failed");
@@ -508,7 +553,7 @@ int32_t cam_actuator_driver_cmd(struct cam_actuator_ctrl_t *a_ctrl,
return -EINVAL;
}
- pr_debug("Opcode to Actuator: %d", cmd->op_code);
+ CAM_DBG(CAM_ACTUATOR, "Opcode to Actuator: %d", cmd->op_code);
mutex_lock(&(a_ctrl->actuator_mutex));
switch (cmd->op_code) {
@@ -549,10 +594,31 @@ int32_t cam_actuator_driver_cmd(struct cam_actuator_ctrl_t *a_ctrl,
rc = -EFAULT;
goto release_mutex;
}
+
+ rc = cam_actuator_power_up(a_ctrl);
+ if (rc < 0) {
+ CAM_ERR(CAM_ACTUATOR, " Actuator Power up failed");
+ goto release_mutex;
+ }
+
a_ctrl->cam_act_state = CAM_ACTUATOR_ACQUIRE;
}
break;
case CAM_RELEASE_DEV: {
+ if (a_ctrl->cam_act_state != CAM_ACTUATOR_ACQUIRE) {
+ rc = -EINVAL;
+ CAM_WARN(CAM_ACTUATOR,
+ "Not in right state to release : %d",
+ a_ctrl->cam_act_state);
+ goto release_mutex;
+ }
+
+ rc = cam_actuator_power_down(a_ctrl);
+ if (rc < 0) {
+ CAM_ERR(CAM_ACTUATOR, "Actuator Power down failed");
+ goto release_mutex;
+ }
+
if (a_ctrl->bridge_intf.device_hdl == -1) {
CAM_ERR(CAM_ACTUATOR, "link hdl: %d device hdl: %d",
a_ctrl->bridge_intf.device_hdl,
@@ -582,28 +648,11 @@ int32_t cam_actuator_driver_cmd(struct cam_actuator_ctrl_t *a_ctrl,
}
break;
case CAM_START_DEV: {
- rc = cam_actuator_power_up(a_ctrl);
- if (rc < 0) {
- CAM_ERR(CAM_ACTUATOR, " Actuator Power up failed");
- goto release_mutex;
- }
- rc = camera_io_init(&a_ctrl->io_master_info);
- if (rc < 0) {
- CAM_ERR(CAM_ACTUATOR, "cci_init failed");
- cam_actuator_power_down(a_ctrl);
- }
-
- rc = cam_actuator_apply_settings(a_ctrl,
- &a_ctrl->i2c_data.init_settings);
- if (rc < 0)
- CAM_ERR(CAM_ACTUATOR, "Cannot apply Init settings");
-
- /* Delete the request even if the apply is failed */
- rc = delete_request(&a_ctrl->i2c_data.init_settings);
- if (rc < 0) {
- CAM_ERR(CAM_ACTUATOR,
- "Fail in deleting the Init settings");
+ if (a_ctrl->cam_act_state != CAM_ACTUATOR_ACQUIRE) {
rc = -EINVAL;
+ CAM_WARN(CAM_ACTUATOR,
+ "Not in right state to start : %d",
+ a_ctrl->cam_act_state);
goto release_mutex;
}
a_ctrl->cam_act_state = CAM_ACTUATOR_START;
@@ -613,14 +662,14 @@ int32_t cam_actuator_driver_cmd(struct cam_actuator_ctrl_t *a_ctrl,
struct i2c_settings_array *i2c_set = NULL;
int i;
- rc = camera_io_release(&a_ctrl->io_master_info);
- if (rc < 0)
- CAM_ERR(CAM_ACTUATOR, "Failed in releasing CCI");
- rc = cam_actuator_power_down(a_ctrl);
- if (rc < 0) {
- CAM_ERR(CAM_ACTUATOR, "Actuator Power down failed");
+ if (a_ctrl->cam_act_state != CAM_ACTUATOR_START) {
+ rc = -EINVAL;
+ CAM_WARN(CAM_ACTUATOR,
+ "Not in right state to stop : %d",
+ a_ctrl->cam_act_state);
goto release_mutex;
}
+
for (i = 0; i < MAX_PER_FRAME_ARRAY; i++) {
i2c_set = &(a_ctrl->i2c_data.per_frame[i]);
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_core.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_core.h
index f24070e..c28d79d 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_core.h
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_core.h
@@ -16,6 +16,16 @@
#include "cam_actuator_dev.h"
/**
+ * @power_info: power setting info to control the power
+ *
+ * This API construct the default actuator power setting.
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int32_t cam_actuator_construct_default_power_setting(
+ struct cam_sensor_power_ctrl_t *power_info);
+
+/**
* @apply: Req mgr structure for applying request
*
* This API applies the request that is mentioned
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_dev.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_dev.c
index 465f5e2..c5c9b0a 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_dev.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_dev.c
@@ -140,9 +140,11 @@ static int cam_actuator_init_subdev(struct cam_actuator_ctrl_t *a_ctrl)
static int32_t cam_actuator_driver_i2c_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
- int32_t rc = 0, i = 0;
- struct cam_actuator_ctrl_t *a_ctrl;
- struct cam_hw_soc_info *soc_info = NULL;
+ int32_t rc = 0;
+ int32_t i = 0;
+ struct cam_actuator_ctrl_t *a_ctrl;
+ struct cam_hw_soc_info *soc_info = NULL;
+ struct cam_actuator_soc_private *soc_private = NULL;
if (client == NULL || id == NULL) {
CAM_ERR(CAM_ACTUATOR, "Invalid Args client: %pK id: %pK",
@@ -164,6 +166,14 @@ static int32_t cam_actuator_driver_i2c_probe(struct i2c_client *client,
i2c_set_clientdata(client, a_ctrl);
+ soc_private = kzalloc(sizeof(struct cam_actuator_soc_private),
+ GFP_KERNEL);
+ if (!soc_private) {
+ rc = -ENOMEM;
+ goto free_ctrl;
+ }
+ a_ctrl->soc_info.soc_private = soc_private;
+
a_ctrl->io_master_info.client = client;
soc_info = &a_ctrl->soc_info;
soc_info->dev = &client->dev;
@@ -178,7 +188,11 @@ static int32_t cam_actuator_driver_i2c_probe(struct i2c_client *client,
rc = cam_actuator_init_subdev(a_ctrl);
if (rc)
- goto free_ctrl;
+ goto free_soc;
+
+ if (soc_private->i2c_info.slave_addr != 0)
+ a_ctrl->io_master_info.client->addr =
+ soc_private->i2c_info.slave_addr;
a_ctrl->i2c_data.per_frame =
(struct i2c_settings_array *)
@@ -194,14 +208,6 @@ static int32_t cam_actuator_driver_i2c_probe(struct i2c_client *client,
for (i = 0; i < MAX_PER_FRAME_ARRAY; i++)
INIT_LIST_HEAD(&(a_ctrl->i2c_data.per_frame[i].list_head));
- rc = cam_soc_util_request_platform_resource(&a_ctrl->soc_info,
- NULL, NULL);
- if (rc < 0) {
- CAM_ERR(CAM_ACTUATOR,
- "Requesting Platform Resources failed rc %d", rc);
- goto free_mem;
- }
-
a_ctrl->bridge_intf.device_hdl = -1;
a_ctrl->bridge_intf.ops.get_dev_info =
cam_actuator_publish_dev_info;
@@ -212,6 +218,14 @@ static int32_t cam_actuator_driver_i2c_probe(struct i2c_client *client,
v4l2_set_subdevdata(&(a_ctrl->v4l2_dev_str.sd), a_ctrl);
+ rc = cam_actuator_construct_default_power_setting(
+ &soc_private->power_info);
+ if (rc < 0) {
+ CAM_ERR(CAM_ACTUATOR,
+ "Construct default actuator power setting failed.");
+ goto free_mem;
+ }
+
a_ctrl->cam_act_state = CAM_ACTUATOR_INIT;
return rc;
@@ -219,6 +233,8 @@ static int32_t cam_actuator_driver_i2c_probe(struct i2c_client *client,
kfree(a_ctrl->i2c_data.per_frame);
unreg_subdev:
cam_unregister_subdev(&(a_ctrl->v4l2_dev_str));
+free_soc:
+ kfree(soc_private);
free_ctrl:
kfree(a_ctrl);
return rc;
@@ -226,8 +242,10 @@ static int32_t cam_actuator_driver_i2c_probe(struct i2c_client *client,
static int32_t cam_actuator_platform_remove(struct platform_device *pdev)
{
- struct cam_actuator_ctrl_t *a_ctrl;
int32_t rc = 0;
+ struct cam_actuator_ctrl_t *a_ctrl;
+ struct cam_actuator_soc_private *soc_private;
+ struct cam_sensor_power_ctrl_t *power_info;
a_ctrl = platform_get_drvdata(pdev);
if (!a_ctrl) {
@@ -235,8 +253,15 @@ static int32_t cam_actuator_platform_remove(struct platform_device *pdev)
return 0;
}
+ soc_private =
+ (struct cam_actuator_soc_private *)a_ctrl->soc_info.soc_private;
+ power_info = &soc_private->power_info;
+
kfree(a_ctrl->io_master_info.cci_client);
a_ctrl->io_master_info.cci_client = NULL;
+ kfree(power_info->power_setting);
+ kfree(power_info->power_down_setting);
+ kfree(a_ctrl->soc_info.soc_private);
kfree(a_ctrl->i2c_data.per_frame);
a_ctrl->i2c_data.per_frame = NULL;
devm_kfree(&pdev->dev, a_ctrl);
@@ -246,17 +271,29 @@ static int32_t cam_actuator_platform_remove(struct platform_device *pdev)
static int32_t cam_actuator_driver_i2c_remove(struct i2c_client *client)
{
- struct cam_actuator_ctrl_t *a_ctrl = i2c_get_clientdata(client);
int32_t rc = 0;
+ struct cam_actuator_ctrl_t *a_ctrl =
+ i2c_get_clientdata(client);
+ struct cam_actuator_soc_private *soc_private;
+ struct cam_sensor_power_ctrl_t *power_info;
/* Handle I2C Devices */
if (!a_ctrl) {
CAM_ERR(CAM_ACTUATOR, "Actuator device is NULL");
return -EINVAL;
}
+
+ soc_private =
+ (struct cam_actuator_soc_private *)a_ctrl->soc_info.soc_private;
+ power_info = &soc_private->power_info;
+
/*Free Allocated Mem */
kfree(a_ctrl->i2c_data.per_frame);
a_ctrl->i2c_data.per_frame = NULL;
+ kfree(power_info->power_setting);
+ kfree(power_info->power_down_setting);
+ kfree(a_ctrl->soc_info.soc_private);
+ a_ctrl->soc_info.soc_private = NULL;
kfree(a_ctrl);
return rc;
}
@@ -269,8 +306,10 @@ static const struct of_device_id cam_actuator_driver_dt_match[] = {
static int32_t cam_actuator_driver_platform_probe(
struct platform_device *pdev)
{
- int32_t rc = 0, i = 0;
- struct cam_actuator_ctrl_t *a_ctrl = NULL;
+ int32_t rc = 0;
+ int32_t i = 0;
+ struct cam_actuator_ctrl_t *a_ctrl = NULL;
+ struct cam_actuator_soc_private *soc_private = NULL;
/* Create sensor control structure */
a_ctrl = devm_kzalloc(&pdev->dev,
@@ -287,15 +326,28 @@ static int32_t cam_actuator_driver_platform_probe(
a_ctrl->io_master_info.cci_client = kzalloc(sizeof(
struct cam_sensor_cci_client), GFP_KERNEL);
- if (!(a_ctrl->io_master_info.cci_client))
- return -ENOMEM;
+ if (!(a_ctrl->io_master_info.cci_client)) {
+ rc = -ENOMEM;
+ goto free_ctrl;
+ }
+
+ soc_private = kzalloc(sizeof(struct cam_actuator_soc_private),
+ GFP_KERNEL);
+ if (!soc_private) {
+ rc = -ENOMEM;
+ goto free_cci_client;
+ }
+ a_ctrl->soc_info.soc_private = soc_private;
+ soc_private->power_info.dev = &pdev->dev;
a_ctrl->i2c_data.per_frame =
(struct i2c_settings_array *)
kzalloc(sizeof(struct i2c_settings_array) *
MAX_PER_FRAME_ARRAY, GFP_KERNEL);
- if (a_ctrl->i2c_data.per_frame == NULL)
- return -ENOMEM;
+ if (a_ctrl->i2c_data.per_frame == NULL) {
+ rc = -ENOMEM;
+ goto free_soc;
+ }
INIT_LIST_HEAD(&(a_ctrl->i2c_data.init_settings.list_head));
@@ -305,7 +357,7 @@ static int32_t cam_actuator_driver_platform_probe(
rc = cam_actuator_parse_dt(a_ctrl, &(pdev->dev));
if (rc < 0) {
CAM_ERR(CAM_ACTUATOR, "Paring actuator dt failed rc %d", rc);
- goto free_ctrl;
+ goto free_mem;
}
/* Fill platform device id*/
@@ -315,14 +367,6 @@ static int32_t cam_actuator_driver_platform_probe(
if (rc)
goto free_mem;
- rc = cam_soc_util_request_platform_resource(&a_ctrl->soc_info,
- NULL, NULL);
- if (rc < 0) {
- CAM_ERR(CAM_ACTUATOR,
- "Requesting Platform Resources failed rc %d", rc);
- goto unreg_subdev;
- }
-
a_ctrl->bridge_intf.device_hdl = -1;
a_ctrl->bridge_intf.ops.get_dev_info =
cam_actuator_publish_dev_info;
@@ -336,11 +380,23 @@ static int32_t cam_actuator_driver_platform_probe(
platform_set_drvdata(pdev, a_ctrl);
v4l2_set_subdevdata(&a_ctrl->v4l2_dev_str.sd, a_ctrl);
+ rc = cam_actuator_construct_default_power_setting(
+ &soc_private->power_info);
+ if (rc < 0) {
+ CAM_ERR(CAM_ACTUATOR,
+ "Construct default actuator power setting failed.");
+ goto unreg_subdev;
+ }
+
return rc;
unreg_subdev:
cam_unregister_subdev(&(a_ctrl->v4l2_dev_str));
free_mem:
kfree(a_ctrl->i2c_data.per_frame);
+free_soc:
+ kfree(soc_private);
+free_cci_client:
+ kfree(a_ctrl->io_master_info.cci_client);
free_ctrl:
devm_kfree(&pdev->dev, a_ctrl);
return rc;
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_dev.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_dev.h
index bd5d50f..8b8b1ef 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_dev.h
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_dev.h
@@ -53,11 +53,25 @@ enum cam_actuator_apply_state_t {
ACT_APPLY_SETTINGS_LATER,
};
-enum cam_actator_state {
+enum cam_actuator_state {
CAM_ACTUATOR_INIT,
CAM_ACTUATOR_ACQUIRE,
CAM_ACTUATOR_START,
- CAM_ACTUATOR_RELEASE,
+};
+
+/**
+ * struct cam_actuator_i2c_info_t - I2C info
+ * @slave_addr : slave address
+ * @i2c_freq_mode : i2c frequency mode
+ */
+struct cam_actuator_i2c_info_t {
+ uint16_t slave_addr;
+ uint8_t i2c_freq_mode;
+};
+
+struct cam_actuator_soc_private {
+ struct cam_actuator_i2c_info_t i2c_info;
+ struct cam_sensor_power_ctrl_t power_info;
};
/**
@@ -102,8 +116,7 @@ struct cam_actuator_ctrl_t {
struct mutex actuator_mutex;
uint32_t id;
enum cam_actuator_apply_state_t setting_apply_state;
- enum cam_actator_state cam_act_state;
- struct msm_camera_gpio_num_info *gpio_num_info;
+ enum cam_actuator_state cam_act_state;
uint8_t cam_pinctrl_status;
struct cam_subdev v4l2_dev_str;
struct i2c_data_settings i2c_data;
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_soc.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_soc.c
index f47ec2f..55b7c72 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_soc.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_soc.c
@@ -22,9 +22,12 @@
int32_t cam_actuator_parse_dt(struct cam_actuator_ctrl_t *a_ctrl,
struct device *dev)
{
- int32_t rc = 0;
- struct cam_hw_soc_info *soc_info = &a_ctrl->soc_info;
- struct device_node *of_node = NULL;
+ int32_t rc = 0;
+ struct cam_hw_soc_info *soc_info = &a_ctrl->soc_info;
+ struct cam_actuator_soc_private *soc_private =
+ (struct cam_actuator_soc_private *)a_ctrl->soc_info.soc_private;
+ struct cam_sensor_power_ctrl_t *power_info = &soc_private->power_info;
+ struct device_node *of_node = NULL;
/* Initialize mutex */
mutex_init(&(a_ctrl->actuator_mutex));
@@ -61,9 +64,8 @@ int32_t cam_actuator_parse_dt(struct cam_actuator_ctrl_t *a_ctrl,
}
rc = cam_sensor_util_init_gpio_pin_tbl(soc_info,
- &a_ctrl->gpio_num_info);
-
- if ((rc < 0) || (!a_ctrl->gpio_num_info)) {
+ &power_info->gpio_num_info);
+ if ((rc < 0) || (!power_info->gpio_num_info)) {
CAM_ERR(CAM_ACTUATOR, "No/Error Actuator GPIOs");
return -EINVAL;
}
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_core.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_core.c
index c62b251..d7a6504 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_core.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_core.c
@@ -441,7 +441,7 @@ static int32_t cam_cci_calc_cmd_len(struct cci_device *cci_dev,
if (cmd->reg_addr + 1 ==
(cmd+1)->reg_addr) {
len += data_len;
- *pack += data_len;
+ (*pack)++;
} else {
break;
}
@@ -730,10 +730,30 @@ static int32_t cam_cci_data_queue(struct cci_device *cci_dev,
reg_addr++;
} else {
if ((i + 1) <= cci_dev->payload_size) {
- data[i++] = (i2c_cmd->reg_data &
- 0xFF00) >> 8; /* MSB */
- data[i++] = i2c_cmd->reg_data &
- 0x00FF; /* LSB */
+ switch (i2c_msg->data_type) {
+ case CAMERA_SENSOR_I2C_TYPE_DWORD:
+ data[i++] = (i2c_cmd->reg_data &
+ 0xFF000000) >> 24;
+ /* fallthrough */
+ case CAMERA_SENSOR_I2C_TYPE_3B:
+ data[i++] = (i2c_cmd->reg_data &
+ 0x00FF0000) >> 16;
+ /* fallthrough */
+ case CAMERA_SENSOR_I2C_TYPE_WORD:
+ data[i++] = (i2c_cmd->reg_data &
+ 0x0000FF00) >> 8;
+ /* fallthrough */
+ case CAMERA_SENSOR_I2C_TYPE_BYTE:
+ data[i++] = i2c_cmd->reg_data &
+ 0x000000FF;
+ break;
+ default:
+ CAM_ERR(CAM_CCI,
+ "invalid data type: %d",
+ i2c_msg->data_type);
+ return -EINVAL;
+ }
+
if (c_ctrl->cmd ==
MSM_CCI_I2C_WRITE_SEQ)
reg_addr++;
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_dev.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_dev.h
index 4c996e08..d0ee0f6 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_dev.h
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_dev.h
@@ -289,7 +289,7 @@ struct cci_write_async {
irqreturn_t cam_cci_irq(int irq_num, void *data);
#ifdef CONFIG_SPECTRA_CAMERA
-struct v4l2_subdev *cam_cci_get_subdev(void);
+extern struct v4l2_subdev *cam_cci_get_subdev(void);
#else
static inline struct v4l2_subdev *cam_cci_get_subdev(void)
{
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_soc.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_soc.c
index 8de4472..cf7a65f 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_soc.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_soc.c
@@ -99,7 +99,7 @@ int cam_cci_init(struct v4l2_subdev *sd,
/* Enable Regulators and IRQ*/
rc = cam_soc_util_enable_platform_resource(soc_info, true,
- CAM_TURBO_VOTE, true);
+ CAM_LOWSVS_VOTE, true);
if (rc < 0) {
CAM_DBG(CAM_CCI, "request platform resources failed");
goto platform_enable_failed;
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_core.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_core.c
index 7cc26c1..cb44cb8 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_core.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_core.c
@@ -494,8 +494,8 @@ int32_t cam_csiphy_core_cfg(void *phy_dev,
}
break;
case CAM_STOP_DEV: {
- if (csiphy_dev->csiphy_state !=
- CAM_CSIPHY_START) {
+ if ((csiphy_dev->csiphy_state != CAM_CSIPHY_START) ||
+ !csiphy_dev->start_dev_count) {
CAM_ERR(CAM_CSIPHY, "Not in right state to stop : %d",
csiphy_dev->csiphy_state);
goto release_mutex;
@@ -508,16 +508,13 @@ int32_t cam_csiphy_core_cfg(void *phy_dev,
}
rc = cam_csiphy_disable_hw(csiphy_dev);
- if (rc < 0) {
+ if (rc < 0)
CAM_ERR(CAM_CSIPHY, "Failed in csiphy release");
- cam_cpas_stop(csiphy_dev->cpas_handle);
- goto release_mutex;
- }
+
rc = cam_cpas_stop(csiphy_dev->cpas_handle);
- if (rc < 0) {
+ if (rc < 0)
CAM_ERR(CAM_CSIPHY, "de-voting CPAS: %d", rc);
- goto release_mutex;
- }
+
csiphy_dev->csiphy_state = CAM_CSIPHY_ACQUIRE;
}
break;
@@ -547,8 +544,7 @@ int32_t cam_csiphy_core_cfg(void *phy_dev,
} else {
csiphy_dev->bridge_intf.device_hdl[1] = -1;
csiphy_dev->bridge_intf.link_hdl[1] = -1;
- csiphy_dev->bridge_intf.
- session_hdl[1] = -1;
+ csiphy_dev->bridge_intf.session_hdl[1] = -1;
csiphy_dev->is_acquired_dev_combo_mode = 0;
}
@@ -587,10 +583,10 @@ int32_t cam_csiphy_core_cfg(void *phy_dev,
struct cam_ahb_vote ahb_vote;
struct cam_axi_vote axi_vote;
- csiphy_dev->start_dev_count++;
-
- if (csiphy_dev->csiphy_state == CAM_CSIPHY_START)
+ if (csiphy_dev->csiphy_state == CAM_CSIPHY_START) {
+ csiphy_dev->start_dev_count++;
goto release_mutex;
+ }
ahb_vote.type = CAM_VOTE_ABSOLUTE;
ahb_vote.vote.level = CAM_SVS_VOTE;
@@ -616,9 +612,11 @@ int32_t cam_csiphy_core_cfg(void *phy_dev,
if (rc < 0) {
CAM_ERR(CAM_CSIPHY, "cam_csiphy_config_dev failed");
+ cam_csiphy_disable_hw(csiphy_dev);
cam_cpas_stop(csiphy_dev->cpas_handle);
goto release_mutex;
}
+ csiphy_dev->start_dev_count++;
csiphy_dev->csiphy_state = CAM_CSIPHY_START;
}
break;
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_soc.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_soc.c
index d2a8467..6db5a97 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_soc.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_soc.c
@@ -90,7 +90,7 @@ int32_t cam_csiphy_enable_hw(struct csiphy_device *csiphy_dev)
}
rc = cam_soc_util_enable_platform_resource(soc_info, true,
- CAM_TURBO_VOTE, ENABLE_IRQ);
+ CAM_SVS_VOTE, ENABLE_IRQ);
if (rc < 0) {
CAM_ERR(CAM_CSIPHY, "failed to enable platform resources %d",
rc);
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/include/cam_csiphy_1_0_hwreg.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/include/cam_csiphy_1_0_hwreg.h
index 7a4aede..2977834 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/include/cam_csiphy_1_0_hwreg.h
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/include/cam_csiphy_1_0_hwreg.h
@@ -65,7 +65,7 @@ struct csiphy_reg_t csiphy_2ph_v1_0_reg[MAX_LANES][MAX_SETTINGS_PER_LANE] = {
{0x000c, 0x00, 0x00, CSIPHY_DNP_PARAMS},
{0x0010, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
{0x0038, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0060, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0060, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
{0x0064, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
},
{
@@ -81,7 +81,7 @@ struct csiphy_reg_t csiphy_2ph_v1_0_reg[MAX_LANES][MAX_SETTINGS_PER_LANE] = {
{0x070C, 0xA5, 0x00, CSIPHY_DEFAULT_PARAMS},
{0x0710, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
{0x0738, 0x1F, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0760, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0760, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
{0x0764, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
},
{
@@ -97,7 +97,7 @@ struct csiphy_reg_t csiphy_2ph_v1_0_reg[MAX_LANES][MAX_SETTINGS_PER_LANE] = {
{0x020C, 0x00, 0x00, CSIPHY_DNP_PARAMS},
{0x0210, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
{0x0238, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0260, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0260, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
{0x0264, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
},
{
@@ -113,7 +113,7 @@ struct csiphy_reg_t csiphy_2ph_v1_0_reg[MAX_LANES][MAX_SETTINGS_PER_LANE] = {
{0x040C, 0x00, 0x00, CSIPHY_DNP_PARAMS},
{0x0410, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
{0x0438, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0460, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0460, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
{0x0464, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
},
{
@@ -129,7 +129,7 @@ struct csiphy_reg_t csiphy_2ph_v1_0_reg[MAX_LANES][MAX_SETTINGS_PER_LANE] = {
{0x060C, 0x00, 0x00, CSIPHY_DNP_PARAMS},
{0x0610, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
{0x0638, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0660, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0660, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
{0x0664, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
},
};
@@ -148,7 +148,7 @@ struct csiphy_reg_t
{0x0008, 0x00, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
{0x0010, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
{0x0038, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0060, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0060, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
{0x0064, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
},
{
@@ -164,7 +164,7 @@ struct csiphy_reg_t
{0x070C, 0xA5, 0x00, CSIPHY_DEFAULT_PARAMS},
{0x0710, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
{0x0738, 0x1F, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0760, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0760, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
{0x0764, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
},
{
@@ -179,7 +179,7 @@ struct csiphy_reg_t
{0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
{0x0210, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
{0x0238, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0260, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0260, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
{0x0264, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
},
{
@@ -194,7 +194,7 @@ struct csiphy_reg_t
{0x0408, 0x00, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
{0x0410, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
{0x0438, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0460, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0460, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
{0x0464, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
},
{
@@ -210,7 +210,7 @@ struct csiphy_reg_t
{0x060C, 0xA5, 0x00, CSIPHY_DEFAULT_PARAMS},
{0x0610, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
{0x0638, 0x1F, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0660, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0660, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
{0x0664, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
},
};
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_core.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_core.c
index bd9f0fe..72b1779 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_core.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_core.c
@@ -176,6 +176,8 @@ static int cam_eeprom_power_up(struct cam_eeprom_ctrl_t *e_ctrl,
return rc;
}
+ power_info->dev = soc_info->dev;
+
rc = cam_sensor_core_power_up(power_info, soc_info);
if (rc) {
CAM_ERR(CAM_EEPROM, "failed in eeprom power up rc %d", rc);
@@ -289,6 +291,8 @@ int32_t cam_eeprom_parse_read_memory_map(struct device_node *of_node,
CAM_ERR(CAM_EEPROM, "failed: eeprom power up rc %d", rc);
goto data_mem_free;
}
+
+ e_ctrl->cam_eeprom_state = CAM_EEPROM_CONFIG;
if (e_ctrl->eeprom_device_type == MSM_CAMERA_SPI_DEVICE) {
rc = cam_eeprom_match_id(e_ctrl);
if (rc) {
@@ -305,6 +309,8 @@ int32_t cam_eeprom_parse_read_memory_map(struct device_node *of_node,
rc = cam_eeprom_power_down(e_ctrl);
if (rc)
CAM_ERR(CAM_EEPROM, "failed: eeprom power down rc %d", rc);
+
+ e_ctrl->cam_eeprom_state = CAM_EEPROM_ACQUIRE;
return rc;
power_down:
cam_eeprom_power_down(e_ctrl);
@@ -313,6 +319,7 @@ int32_t cam_eeprom_parse_read_memory_map(struct device_node *of_node,
kfree(e_ctrl->cal_data.map);
e_ctrl->cal_data.num_data = 0;
e_ctrl->cal_data.num_map = 0;
+ e_ctrl->cam_eeprom_state = CAM_EEPROM_ACQUIRE;
return rc;
}
@@ -650,8 +657,15 @@ static int32_t cam_eeprom_pkt_parse(struct cam_eeprom_ctrl_t *e_ctrl, void *arg)
struct cam_packet *csl_packet = NULL;
struct cam_eeprom_soc_private *soc_private =
(struct cam_eeprom_soc_private *)e_ctrl->soc_info.soc_private;
+ struct cam_sensor_power_ctrl_t *power_info = &soc_private->power_info;
ioctl_ctrl = (struct cam_control *)arg;
+
+ if (ioctl_ctrl->handle_type != CAM_HANDLE_USER_POINTER) {
+ CAM_ERR(CAM_EEPROM, "Invalid Handle Type");
+ return -EINVAL;
+ }
+
if (copy_from_user(&dev_config, (void __user *) ioctl_ctrl->handle,
sizeof(dev_config)))
return -EFAULT;
@@ -662,6 +676,14 @@ static int32_t cam_eeprom_pkt_parse(struct cam_eeprom_ctrl_t *e_ctrl, void *arg)
"error in converting command Handle Error: %d", rc);
return rc;
}
+
+ if (dev_config.offset > pkt_len) {
+ CAM_ERR(CAM_EEPROM,
+ "Offset is out of bound: off: %lld, %zu",
+ dev_config.offset, pkt_len);
+ return -EINVAL;
+ }
+
csl_packet = (struct cam_packet *)
(generic_pkt_addr + dev_config.offset);
switch (csl_packet->header.op_code & 0xFFFFFF) {
@@ -680,7 +702,7 @@ static int32_t cam_eeprom_pkt_parse(struct cam_eeprom_ctrl_t *e_ctrl, void *arg)
e_ctrl->cal_data.num_map = 0;
CAM_DBG(CAM_EEPROM,
"Returning the data using kernel probe");
- break;
+ break;
}
rc = cam_eeprom_init_pkt_parser(e_ctrl, csl_packet);
if (rc) {
@@ -704,6 +726,7 @@ static int32_t cam_eeprom_pkt_parse(struct cam_eeprom_ctrl_t *e_ctrl, void *arg)
goto memdata_free;
}
+ e_ctrl->cam_eeprom_state = CAM_EEPROM_CONFIG;
rc = cam_eeprom_read_memory(e_ctrl, &e_ctrl->cal_data);
if (rc) {
CAM_ERR(CAM_EEPROM,
@@ -713,6 +736,7 @@ static int32_t cam_eeprom_pkt_parse(struct cam_eeprom_ctrl_t *e_ctrl, void *arg)
rc = cam_eeprom_get_cal_data(e_ctrl, csl_packet);
rc = cam_eeprom_power_down(e_ctrl);
+ e_ctrl->cam_eeprom_state = CAM_EEPROM_ACQUIRE;
kfree(e_ctrl->cal_data.mapdata);
kfree(e_ctrl->cal_data.map);
e_ctrl->cal_data.num_data = 0;
@@ -727,23 +751,26 @@ static int32_t cam_eeprom_pkt_parse(struct cam_eeprom_ctrl_t *e_ctrl, void *arg)
memdata_free:
kfree(e_ctrl->cal_data.mapdata);
error:
+ kfree(power_info->power_setting);
+ kfree(power_info->power_down_setting);
kfree(e_ctrl->cal_data.map);
e_ctrl->cal_data.num_data = 0;
e_ctrl->cal_data.num_map = 0;
+ e_ctrl->cam_eeprom_state = CAM_EEPROM_INIT;
return rc;
}
void cam_eeprom_shutdown(struct cam_eeprom_ctrl_t *e_ctrl)
{
int rc;
+ struct cam_eeprom_soc_private *soc_private =
+ (struct cam_eeprom_soc_private *)e_ctrl->soc_info.soc_private;
+ struct cam_sensor_power_ctrl_t *power_info = &soc_private->power_info;
if (e_ctrl->cam_eeprom_state == CAM_EEPROM_INIT)
return;
- if (e_ctrl->cam_eeprom_state == CAM_EEPROM_START) {
- rc = camera_io_release(&e_ctrl->io_master_info);
- if (rc < 0)
- CAM_ERR(CAM_EEPROM, "Failed in releasing CCI");
+ if (e_ctrl->cam_eeprom_state == CAM_EEPROM_CONFIG) {
rc = cam_eeprom_power_down(e_ctrl);
if (rc < 0)
CAM_ERR(CAM_EEPROM, "EEPROM Power down failed");
@@ -754,9 +781,13 @@ void cam_eeprom_shutdown(struct cam_eeprom_ctrl_t *e_ctrl)
rc = cam_destroy_device_hdl(e_ctrl->bridge_intf.device_hdl);
if (rc < 0)
CAM_ERR(CAM_EEPROM, "destroying the device hdl");
+
e_ctrl->bridge_intf.device_hdl = -1;
e_ctrl->bridge_intf.link_hdl = -1;
e_ctrl->bridge_intf.session_hdl = -1;
+
+ kfree(power_info->power_setting);
+ kfree(power_info->power_down_setting);
}
e_ctrl->cam_eeprom_state = CAM_EEPROM_INIT;
@@ -807,6 +838,14 @@ int32_t cam_eeprom_driver_cmd(struct cam_eeprom_ctrl_t *e_ctrl, void *arg)
e_ctrl->cam_eeprom_state = CAM_EEPROM_ACQUIRE;
break;
case CAM_RELEASE_DEV:
+ if (e_ctrl->cam_eeprom_state != CAM_EEPROM_ACQUIRE) {
+ rc = -EINVAL;
+ CAM_WARN(CAM_EEPROM,
+ "Not in right state to release : %d",
+ e_ctrl->cam_eeprom_state);
+ goto release_mutex;
+ }
+
if (e_ctrl->bridge_intf.device_hdl == -1) {
CAM_ERR(CAM_EEPROM,
"Invalid Handles: link hdl: %d device hdl: %d",
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_dev.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_dev.c
index d667cf4..5eb29c3 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_dev.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_dev.c
@@ -201,13 +201,6 @@ static int cam_eeprom_i2c_driver_probe(struct i2c_client *client,
goto free_soc;
}
- soc_private = (struct cam_eeprom_soc_private *)(id->driver_data);
- if (!soc_private) {
- CAM_ERR(CAM_EEPROM, "board info NULL");
- rc = -EINVAL;
- goto ectrl_free;
- }
-
rc = cam_eeprom_init_subdev(e_ctrl);
if (rc)
goto free_soc;
@@ -260,10 +253,9 @@ static int cam_eeprom_i2c_driver_remove(struct i2c_client *client)
return -EINVAL;
}
- if (soc_private) {
- kfree(soc_private->power_info.gpio_num_info);
+ if (soc_private)
kfree(soc_private);
- }
+
kfree(e_ctrl);
return 0;
@@ -451,6 +443,9 @@ static int32_t cam_eeprom_platform_driver_probe(
platform_set_drvdata(pdev, e_ctrl);
v4l2_set_subdevdata(&e_ctrl->v4l2_dev_str.sd, e_ctrl);
+
+ e_ctrl->cam_eeprom_state = CAM_EEPROM_INIT;
+
return rc;
free_soc:
kfree(soc_private);
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_dev.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_dev.h
index fa4a3dd..4a2190d 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_dev.h
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_dev.h
@@ -40,8 +40,7 @@
enum cam_eeprom_state {
CAM_EEPROM_INIT,
CAM_EEPROM_ACQUIRE,
- CAM_EEPROM_START,
- CAM_EEPROM_RELEASE,
+ CAM_EEPROM_CONFIG,
};
/**
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/Makefile b/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/Makefile
index 9aab0e4..c7889a5 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/Makefile
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/Makefile
@@ -1,5 +1,6 @@
ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
ccflags-y += -Idrivers/media/platform/msm/camera/cam_sync
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_res_mgr
ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils
ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io
ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_cci
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_core.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_core.c
index 8573f00..55da264 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_core.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_core.c
@@ -14,6 +14,7 @@
#include "cam_sensor_cmn_header.h"
#include "cam_flash_core.h"
+#include "cam_res_mgr_api.h"
int cam_flash_prepare(struct cam_flash_ctrl *flash_ctrl,
enum cam_flash_state state)
@@ -25,7 +26,7 @@ int cam_flash_prepare(struct cam_flash_ctrl *flash_ctrl,
return -EINVAL;
}
- if ((state == CAM_FLASH_STATE_INIT) &&
+ if ((state == CAM_FLASH_STATE_START) &&
(flash_ctrl->is_regulator_enabled == false)) {
rc = qpnp_flash_led_prepare(flash_ctrl->switch_trigger,
ENABLE_REGULATOR, NULL);
@@ -35,7 +36,8 @@ int cam_flash_prepare(struct cam_flash_ctrl *flash_ctrl,
return rc;
}
flash_ctrl->is_regulator_enabled = true;
- } else if ((state == CAM_FLASH_STATE_RELEASE) &&
+ flash_ctrl->flash_state = CAM_FLASH_STATE_START;
+ } else if ((state == CAM_FLASH_STATE_STOP) &&
(flash_ctrl->is_regulator_enabled == true)) {
rc = qpnp_flash_led_prepare(flash_ctrl->switch_trigger,
DISABLE_REGULATOR, NULL);
@@ -45,7 +47,7 @@ int cam_flash_prepare(struct cam_flash_ctrl *flash_ctrl,
return rc;
}
flash_ctrl->is_regulator_enabled = false;
- flash_ctrl->flash_state = CAM_FLASH_STATE_RELEASE;
+ flash_ctrl->flash_state = CAM_FLASH_STATE_ACQUIRE;
} else {
CAM_ERR(CAM_FLASH, "Wrong Flash State : %d",
flash_ctrl->flash_state);
@@ -55,6 +57,74 @@ int cam_flash_prepare(struct cam_flash_ctrl *flash_ctrl,
return rc;
}
+static int cam_flash_flush_nrt(struct cam_flash_ctrl *fctrl)
+{
+ int j = 0;
+ struct cam_flash_frame_setting *nrt_settings;
+
+ if (!fctrl)
+ return -EINVAL;
+
+ nrt_settings = &fctrl->nrt_info;
+
+ if (nrt_settings->cmn_attr.cmd_type ==
+ CAMERA_SENSOR_FLASH_CMD_TYPE_INIT) {
+ fctrl->flash_init_setting.cmn_attr.is_settings_valid = false;
+ } else if ((nrt_settings->cmn_attr.cmd_type ==
+ CAMERA_SENSOR_FLASH_CMD_TYPE_WIDGET) ||
+ (nrt_settings->cmn_attr.cmd_type ==
+ CAMERA_SENSOR_FLASH_CMD_TYPE_RER)) {
+ fctrl->nrt_info.cmn_attr.is_settings_valid = false;
+ fctrl->nrt_info.cmn_attr.count = 0;
+ fctrl->nrt_info.num_iterations = 0;
+ fctrl->nrt_info.led_on_delay_ms = 0;
+ fctrl->nrt_info.led_off_delay_ms = 0;
+ for (j = 0; j < CAM_FLASH_MAX_LED_TRIGGERS; j++)
+ fctrl->nrt_info.led_current_ma[j] = 0;
+ }
+
+ return 0;
+}
+
+int cam_flash_flush_request(struct cam_req_mgr_flush_request *flush)
+{
+ int rc = 0;
+ int i = 0, j = 0;
+ struct cam_flash_ctrl *fctrl = NULL;
+ int frame_offset = 0;
+
+ fctrl = (struct cam_flash_ctrl *) cam_get_device_priv(flush->dev_hdl);
+ if (!fctrl) {
+ CAM_ERR(CAM_FLASH, "Device data is NULL");
+ return -EINVAL;
+ }
+
+ if (flush->type == CAM_REQ_MGR_FLUSH_TYPE_ALL) {
+ /* flush all requests*/
+ for (i = 0; i < MAX_PER_FRAME_ARRAY; i++) {
+ fctrl->per_frame[i].cmn_attr.request_id = 0;
+ fctrl->per_frame[i].cmn_attr.is_settings_valid = false;
+ fctrl->per_frame[i].cmn_attr.count = 0;
+ for (j = 0; j < CAM_FLASH_MAX_LED_TRIGGERS; j++)
+ fctrl->per_frame[i].led_current_ma[j] = 0;
+ }
+
+ rc = cam_flash_flush_nrt(fctrl);
+ if (rc)
+ CAM_ERR(CAM_FLASH, "NonRealTime flush error");
+ } else if (flush->type == CAM_REQ_MGR_FLUSH_TYPE_CANCEL_REQ) {
+ /* flush request with req_id*/
+ frame_offset = flush->req_id % MAX_PER_FRAME_ARRAY;
+ fctrl->per_frame[frame_offset].cmn_attr.request_id = 0;
+ fctrl->per_frame[frame_offset].cmn_attr.is_settings_valid =
+ false;
+ fctrl->per_frame[frame_offset].cmn_attr.count = 0;
+ for (i = 0; i < CAM_FLASH_MAX_LED_TRIGGERS; i++)
+ fctrl->per_frame[frame_offset].led_current_ma[i] = 0;
+ }
+ return rc;
+}
+
static int cam_flash_ops(struct cam_flash_ctrl *flash_ctrl,
struct cam_flash_frame_setting *flash_data, enum camera_flash_opcode op)
{
@@ -83,7 +153,8 @@ static int cam_flash_ops(struct cam_flash_ctrl *flash_ctrl,
CAM_DBG(CAM_FLASH,
"Led_Current[%d] = %d", i, curr);
- led_trigger_event(flash_ctrl->torch_trigger[i],
+ cam_res_mgr_led_trigger_event(
+ flash_ctrl->torch_trigger[i],
curr);
}
}
@@ -100,7 +171,8 @@ static int cam_flash_ops(struct cam_flash_ctrl *flash_ctrl,
CAM_DBG(CAM_FLASH, "LED flash_current[%d]: %d",
i, curr);
- led_trigger_event(flash_ctrl->flash_trigger[i],
+ cam_res_mgr_led_trigger_event(
+ flash_ctrl->flash_trigger[i],
curr);
}
}
@@ -110,7 +182,9 @@ static int cam_flash_ops(struct cam_flash_ctrl *flash_ctrl,
}
if (flash_ctrl->switch_trigger)
- led_trigger_event(flash_ctrl->switch_trigger, LED_SWITCH_ON);
+ cam_res_mgr_led_trigger_event(
+ flash_ctrl->switch_trigger,
+ LED_SWITCH_ON);
return 0;
}
@@ -126,18 +200,21 @@ int cam_flash_off(struct cam_flash_ctrl *flash_ctrl)
for (i = 0; i < flash_ctrl->flash_num_sources; i++)
if (flash_ctrl->flash_trigger[i])
- led_trigger_event(flash_ctrl->flash_trigger[i],
+ cam_res_mgr_led_trigger_event(
+ flash_ctrl->flash_trigger[i],
LED_OFF);
for (i = 0; i < flash_ctrl->torch_num_sources; i++)
if (flash_ctrl->torch_trigger[i])
- led_trigger_event(flash_ctrl->torch_trigger[i],
+ cam_res_mgr_led_trigger_event(
+ flash_ctrl->torch_trigger[i],
LED_OFF);
if (flash_ctrl->switch_trigger)
- led_trigger_event(flash_ctrl->switch_trigger,
+ cam_res_mgr_led_trigger_event(flash_ctrl->switch_trigger,
LED_SWITCH_OFF);
+ flash_ctrl->flash_state = CAM_FLASH_STATE_START;
return 0;
}
@@ -154,7 +231,8 @@ static int cam_flash_low(
for (i = 0; i < flash_ctrl->flash_num_sources; i++)
if (flash_ctrl->flash_trigger[i])
- led_trigger_event(flash_ctrl->flash_trigger[i],
+ cam_res_mgr_led_trigger_event(
+ flash_ctrl->flash_trigger[i],
LED_OFF);
rc = cam_flash_ops(flash_ctrl, flash_data,
@@ -178,7 +256,8 @@ static int cam_flash_high(
for (i = 0; i < flash_ctrl->torch_num_sources; i++)
if (flash_ctrl->torch_trigger[i])
- led_trigger_event(flash_ctrl->torch_trigger[i],
+ cam_res_mgr_led_trigger_event(
+ flash_ctrl->torch_trigger[i],
LED_OFF);
rc = cam_flash_ops(flash_ctrl, flash_data,
@@ -237,72 +316,44 @@ int cam_flash_apply_setting(struct cam_flash_ctrl *fctrl,
flash_data = &fctrl->nrt_info;
if (flash_data->opcode ==
CAMERA_SENSOR_FLASH_OP_FIRELOW) {
- if (!(fctrl->is_regulator_enabled)) {
- rc = cam_flash_prepare(fctrl,
- CAM_FLASH_STATE_INIT);
- if (rc) {
- CAM_ERR(CAM_FLASH,
- "Reg Enable Failed %d",
- rc);
- goto nrt_del_req;
- }
- fctrl->flash_state =
- CAM_FLASH_STATE_INIT;
- rc = cam_flash_low(fctrl, flash_data);
- if (rc) {
- CAM_ERR(CAM_FLASH,
- "Torch ON failed : %d",
- rc);
- goto nrt_del_req;
- }
- fctrl->flash_state =
- CAM_FLASH_STATE_LOW;
- }
- } else if (flash_data->opcode ==
- CAMERA_SENSOR_FLASH_OP_OFF) {
- if (fctrl->flash_state !=
- CAM_FLASH_STATE_INIT) {
- rc = cam_flash_off(fctrl);
- if (rc)
- CAM_ERR(CAM_FLASH,
- "LED off failed: %d",
- rc);
- }
-
- rc = cam_flash_prepare(fctrl,
- CAM_FLASH_STATE_RELEASE);
+ rc = cam_flash_low(fctrl, flash_data);
if (rc) {
CAM_ERR(CAM_FLASH,
- "Regulator Disable failed %d",
+ "Torch ON failed : %d",
rc);
goto nrt_del_req;
}
-
fctrl->flash_state =
- CAM_FLASH_STATE_RELEASE;
- fctrl->is_regulator_enabled = false;
+ CAM_FLASH_STATE_LOW;
+ } else if (flash_data->opcode ==
+ CAMERA_SENSOR_FLASH_OP_OFF) {
+ if (fctrl->flash_state ==
+ CAM_FLASH_STATE_LOW) {
+ rc = cam_flash_off(fctrl);
+ if (rc)
+ CAM_ERR(CAM_FLASH,
+ "LED off failed: %d",
+ rc);
+ }
}
} else if (fctrl->nrt_info.cmn_attr.cmd_type ==
CAMERA_SENSOR_FLASH_CMD_TYPE_RER) {
flash_data = &fctrl->nrt_info;
- if (fctrl->flash_state != CAM_FLASH_STATE_INIT) {
+ if (fctrl->flash_state != CAM_FLASH_STATE_START) {
rc = cam_flash_off(fctrl);
if (rc) {
CAM_ERR(CAM_FLASH,
"Flash off failed: %d",
rc);
- } else {
- fctrl->flash_state =
- CAM_FLASH_STATE_INIT;
+ goto nrt_del_req;
}
}
-
num_iterations = flash_data->num_iterations;
for (i = 0; i < num_iterations; i++) {
/* Turn On Torch */
if (fctrl->flash_state ==
- CAM_FLASH_STATE_INIT) {
+ CAM_FLASH_STATE_START) {
rc = cam_flash_low(fctrl, flash_data);
if (rc) {
CAM_ERR(CAM_FLASH,
@@ -311,11 +362,12 @@ int cam_flash_apply_setting(struct cam_flash_ctrl *fctrl,
}
fctrl->flash_state =
CAM_FLASH_STATE_LOW;
- }
- usleep_range(
- flash_data->led_on_delay_ms * 1000,
- flash_data->led_on_delay_ms * 1000 + 100);
+ usleep_range(
+ flash_data->led_on_delay_ms * 1000,
+ flash_data->led_on_delay_ms * 1000 +
+ 100);
+ }
/* Turn Off Torch */
rc = cam_flash_off(fctrl);
if (rc) {
@@ -324,7 +376,7 @@ int cam_flash_apply_setting(struct cam_flash_ctrl *fctrl,
rc);
continue;
}
- fctrl->flash_state = CAM_FLASH_STATE_INIT;
+ fctrl->flash_state = CAM_FLASH_STATE_START;
usleep_range(
flash_data->led_off_delay_ms * 1000,
flash_data->led_off_delay_ms * 1000 + 100);
@@ -338,7 +390,7 @@ int cam_flash_apply_setting(struct cam_flash_ctrl *fctrl,
(flash_data->cmn_attr.is_settings_valid) &&
(flash_data->cmn_attr.request_id == req_id)) {
/* Turn On Flash */
- if (fctrl->flash_state == CAM_FLASH_STATE_INIT) {
+ if (fctrl->flash_state == CAM_FLASH_STATE_START) {
rc = cam_flash_high(fctrl, flash_data);
if (rc) {
CAM_ERR(CAM_FLASH,
@@ -353,7 +405,7 @@ int cam_flash_apply_setting(struct cam_flash_ctrl *fctrl,
(flash_data->cmn_attr.is_settings_valid) &&
(flash_data->cmn_attr.request_id == req_id)) {
/* Turn On Torch */
- if (fctrl->flash_state == CAM_FLASH_STATE_INIT) {
+ if (fctrl->flash_state == CAM_FLASH_STATE_START) {
rc = cam_flash_low(fctrl, flash_data);
if (rc) {
CAM_ERR(CAM_FLASH,
@@ -366,15 +418,13 @@ int cam_flash_apply_setting(struct cam_flash_ctrl *fctrl,
} else if ((flash_data->opcode == CAMERA_SENSOR_FLASH_OP_OFF) &&
(flash_data->cmn_attr.is_settings_valid) &&
(flash_data->cmn_attr.request_id == req_id)) {
- if ((fctrl->flash_state != CAM_FLASH_STATE_RELEASE) ||
- (fctrl->flash_state != CAM_FLASH_STATE_INIT)) {
+ if ((fctrl->flash_state == CAM_FLASH_STATE_LOW) ||
+ (fctrl->flash_state == CAM_FLASH_STATE_HIGH)) {
rc = cam_flash_off(fctrl);
if (rc) {
CAM_ERR(CAM_FLASH,
"Flash off failed %d", rc);
- } else {
- fctrl->flash_state =
- CAM_FLASH_STATE_INIT;
+ goto apply_setting_err;
}
}
} else {
@@ -684,77 +734,14 @@ int cam_flash_establish_link(struct cam_req_mgr_core_dev_link_setup *link)
return 0;
}
-static int cam_flash_flush_nrt(struct cam_flash_ctrl *fctrl)
+
+int cam_flash_stop_dev(struct cam_flash_ctrl *fctrl)
{
- int j = 0;
- struct cam_flash_frame_setting *nrt_settings;
+ int rc = 0, i, j;
- if (!fctrl)
- return -EINVAL;
-
- nrt_settings = &fctrl->nrt_info;
-
- if (nrt_settings->cmn_attr.cmd_type ==
- CAMERA_SENSOR_FLASH_CMD_TYPE_INIT) {
- fctrl->flash_init_setting.cmn_attr.is_settings_valid = false;
- } else if ((nrt_settings->cmn_attr.cmd_type ==
- CAMERA_SENSOR_FLASH_CMD_TYPE_WIDGET) ||
- (nrt_settings->cmn_attr.cmd_type ==
- CAMERA_SENSOR_FLASH_CMD_TYPE_RER)) {
- fctrl->nrt_info.cmn_attr.is_settings_valid = false;
- fctrl->nrt_info.cmn_attr.count = 0;
- fctrl->nrt_info.num_iterations = 0;
- fctrl->nrt_info.led_on_delay_ms = 0;
- fctrl->nrt_info.led_off_delay_ms = 0;
- for (j = 0; j < CAM_FLASH_MAX_LED_TRIGGERS; j++)
- fctrl->nrt_info.led_current_ma[j] = 0;
- }
-
- return 0;
-}
-
-int cam_flash_flush_request(struct cam_req_mgr_flush_request *flush)
-{
- int rc = 0;
- int i = 0, j = 0;
- struct cam_flash_ctrl *fctrl = NULL;
- int frame_offset = 0;
-
- fctrl = (struct cam_flash_ctrl *) cam_get_device_priv(flush->dev_hdl);
- if (!fctrl) {
- CAM_ERR(CAM_FLASH, "Device data is NULL");
- return -EINVAL;
- }
-
- if (flush->type == CAM_REQ_MGR_FLUSH_TYPE_ALL) {
- /* flush all requests*/
- for (i = 0; i < MAX_PER_FRAME_ARRAY; i++) {
- fctrl->per_frame[i].cmn_attr.request_id = 0;
- fctrl->per_frame[i].cmn_attr.is_settings_valid = false;
- fctrl->per_frame[i].cmn_attr.count = 0;
- for (j = 0; j < CAM_FLASH_MAX_LED_TRIGGERS; j++)
- fctrl->per_frame[i].led_current_ma[j] = 0;
- }
-
- rc = cam_flash_flush_nrt(fctrl);
- if (rc)
- CAM_ERR(CAM_FLASH, "NonRealTime flush error");
- } else if (flush->type == CAM_REQ_MGR_FLUSH_TYPE_CANCEL_REQ) {
- /* flush request with req_id*/
- frame_offset = flush->req_id % MAX_PER_FRAME_ARRAY;
- fctrl->per_frame[frame_offset].cmn_attr.request_id = 0;
- fctrl->per_frame[frame_offset].cmn_attr.is_settings_valid =
- false;
- fctrl->per_frame[frame_offset].cmn_attr.count = 0;
- for (i = 0; i < CAM_FLASH_MAX_LED_TRIGGERS; i++)
- fctrl->per_frame[frame_offset].led_current_ma[i] = 0;
- }
- return rc;
-}
-
-void cam_flash_shutdown(struct cam_flash_ctrl *fctrl)
-{
- int rc, i, j;
+ if ((fctrl->flash_state == CAM_FLASH_STATE_LOW) ||
+ (fctrl->flash_state == CAM_FLASH_STATE_HIGH))
+ cam_flash_off(fctrl);
for (i = 0; i < MAX_PER_FRAME_ARRAY; i++) {
fctrl->per_frame[i].cmn_attr.request_id = 0;
@@ -764,27 +751,63 @@ void cam_flash_shutdown(struct cam_flash_ctrl *fctrl)
fctrl->per_frame[i].led_current_ma[j] = 0;
}
- cam_flash_flush_nrt(fctrl);
+ rc = cam_flash_flush_nrt(fctrl);
+ if (rc) {
+ CAM_ERR(CAM_FLASH,
+ "NonRealTime Dev flush failed rc: %d", rc);
+ return rc;
+ }
- if ((fctrl->flash_state != CAM_FLASH_STATE_RELEASE) &&
+ if ((fctrl->flash_state == CAM_FLASH_STATE_START) &&
(fctrl->is_regulator_enabled == true)) {
- rc = cam_flash_prepare(fctrl, CAM_FLASH_STATE_RELEASE);
+ rc = cam_flash_prepare(fctrl, CAM_FLASH_STATE_STOP);
if (rc)
- CAM_ERR(CAM_FLASH, "Disable Regulator Failed ret = %d",
+ CAM_ERR(CAM_FLASH, "Disable Regulator Failed rc: %d",
rc);
}
- if (fctrl->bridge_intf.device_hdl != -1) {
- rc = cam_destroy_device_hdl(fctrl->bridge_intf.
- device_hdl);
+ return rc;
+}
+
+int cam_flash_release_dev(struct cam_flash_ctrl *fctrl)
+{
+ int rc = 0;
+
+ if (fctrl->bridge_intf.device_hdl != 1) {
+ rc = cam_destroy_device_hdl(fctrl->bridge_intf.device_hdl);
if (rc)
CAM_ERR(CAM_FLASH,
- "Failed in destroying the device Handle rc= %d",
+ "Failed in destroying device handle rc = %d",
rc);
fctrl->bridge_intf.device_hdl = -1;
fctrl->bridge_intf.link_hdl = -1;
fctrl->bridge_intf.session_hdl = -1;
}
+
+ return rc;
+}
+
+void cam_flash_shutdown(struct cam_flash_ctrl *fctrl)
+{
+ int rc;
+
+ if (fctrl->flash_state == CAM_FLASH_STATE_INIT)
+ return;
+
+ if (fctrl->flash_state == CAM_FLASH_STATE_ACQUIRE) {
+ cam_flash_release_dev(fctrl);
+ return;
+ }
+
+ rc = cam_flash_stop_dev(fctrl);
+ if (rc)
+ CAM_ERR(CAM_FLASH, "Stop Failed rc: %d", rc);
+
+ rc = cam_flash_release_dev(fctrl);
+ if (rc)
+ CAM_ERR(CAM_FLASH, "Release failed rc: %d", rc);
+
+ fctrl->flash_state = CAM_FLASH_STATE_INIT;
}
int cam_flash_apply_request(struct cam_req_mgr_apply_request *apply)
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_core.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_core.h
index f2a782b..d5ea04c 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_core.h
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_core.h
@@ -30,4 +30,6 @@ int cam_flash_off(struct cam_flash_ctrl *fctrl);
int cam_flash_prepare(struct cam_flash_ctrl *flash_ctrl,
enum cam_flash_state state);
void cam_flash_shutdown(struct cam_flash_ctrl *flash_ctrl);
+int cam_flash_stop_dev(struct cam_flash_ctrl *flash_ctrl);
+int cam_flash_release_dev(struct cam_flash_ctrl *fctrl);
#endif /*_CAM_FLASH_CORE_H_*/
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_dev.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_dev.c
index 57f1f0f..2b371a3 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_dev.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_dev.c
@@ -36,6 +36,13 @@ static int32_t cam_flash_driver_cmd(struct cam_flash_ctrl *fctrl,
struct cam_create_dev_hdl bridge_params;
CAM_DBG(CAM_FLASH, "CAM_ACQUIRE_DEV");
+
+ if (fctrl->flash_state != CAM_FLASH_STATE_INIT) {
+ CAM_ERR(CAM_FLASH,
+ "Cannot apply Acquire dev: Prev state: %d",
+ fctrl->flash_state);
+ }
+
if (fctrl->bridge_intf.device_hdl != -1) {
CAM_ERR(CAM_FLASH, "Device is already acquired");
rc = -EINVAL;
@@ -70,12 +77,19 @@ static int32_t cam_flash_driver_cmd(struct cam_flash_ctrl *fctrl,
rc = -EFAULT;
goto release_mutex;
}
- break;
fctrl->flash_state = CAM_FLASH_STATE_ACQUIRE;
+ break;
}
case CAM_RELEASE_DEV: {
CAM_DBG(CAM_FLASH, "CAM_RELEASE_DEV");
- if (fctrl->bridge_intf.device_hdl == -1) {
+ if (fctrl->flash_state != CAM_FLASH_STATE_ACQUIRE) {
+ CAM_WARN(CAM_FLASH,
+ "Cannot apply Release dev: Prev state:%d",
+ fctrl->flash_state);
+ }
+
+ if (fctrl->bridge_intf.device_hdl == -1 &&
+ fctrl->flash_state == CAM_FLASH_STATE_ACQUIRE) {
CAM_ERR(CAM_FLASH,
"Invalid Handle: Link Hdl: %d device hdl: %d",
fctrl->bridge_intf.device_hdl,
@@ -83,16 +97,13 @@ static int32_t cam_flash_driver_cmd(struct cam_flash_ctrl *fctrl,
rc = -EINVAL;
goto release_mutex;
}
- rc = cam_destroy_device_hdl(fctrl->bridge_intf.device_hdl);
+ rc = cam_flash_release_dev(fctrl);
if (rc)
CAM_ERR(CAM_FLASH,
"Failed in destroying the device Handle rc= %d",
rc);
- fctrl->bridge_intf.device_hdl = -1;
- fctrl->bridge_intf.link_hdl = -1;
- fctrl->bridge_intf.session_hdl = -1;
+ fctrl->flash_state = CAM_FLASH_STATE_INIT;
break;
- fctrl->flash_state = CAM_FLASH_STATE_RELEASE;
}
case CAM_QUERY_CAP: {
struct cam_flash_query_cap_info flash_cap = {0};
@@ -120,29 +131,38 @@ static int32_t cam_flash_driver_cmd(struct cam_flash_ctrl *fctrl,
}
case CAM_START_DEV: {
CAM_DBG(CAM_FLASH, "CAM_START_DEV");
- rc = cam_flash_prepare(fctrl, CAM_FLASH_STATE_INIT);
+ if (fctrl->flash_state != CAM_FLASH_STATE_ACQUIRE) {
+ CAM_WARN(CAM_FLASH,
+ "Cannot apply Start Dev: Prev state: %d",
+ fctrl->flash_state);
+ rc = -EINVAL;
+ goto release_mutex;
+ }
+
+ rc = cam_flash_prepare(fctrl, CAM_FLASH_STATE_START);
if (rc) {
CAM_ERR(CAM_FLASH,
"Enable Regulator Failed rc = %d", rc);
goto release_mutex;
}
- fctrl->flash_state = CAM_FLASH_STATE_INIT;
rc = cam_flash_apply_setting(fctrl, 0);
if (rc) {
CAM_ERR(CAM_FLASH, "cannot apply settings rc = %d", rc);
goto release_mutex;
}
- fctrl->flash_state = CAM_FLASH_STATE_INIT;
+ fctrl->flash_state = CAM_FLASH_STATE_START;
break;
}
case CAM_STOP_DEV: {
- CAM_DBG(CAM_FLASH, "CAM_STOP_DEV");
- if (fctrl->flash_state != CAM_FLASH_STATE_INIT)
- cam_flash_off(fctrl);
+ if (fctrl->flash_state != CAM_FLASH_STATE_START) {
+ CAM_WARN(CAM_FLASH,
+ "Cannot apply Stop dev: Prev state is: %d",
+ fctrl->flash_state);
+ }
- rc = cam_flash_prepare(fctrl, CAM_FLASH_STATE_RELEASE);
+ rc = cam_flash_stop_dev(fctrl);
if (rc) {
- CAM_ERR(CAM_FLASH, "Disable Regulator Failed ret = %d",
+ CAM_ERR(CAM_FLASH, "Stop Dev Failed rc = %d",
rc);
goto release_mutex;
}
@@ -344,6 +364,7 @@ static int32_t cam_flash_platform_probe(struct platform_device *pdev)
mutex_init(&(flash_ctrl->flash_mutex));
mutex_init(&(flash_ctrl->flash_wq_mutex));
+ flash_ctrl->flash_state = CAM_FLASH_STATE_INIT;
CAM_DBG(CAM_FLASH, "Probe success");
return rc;
free_resource:
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_dev.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_dev.h
index 1583c27..bacf088 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_dev.h
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_dev.h
@@ -50,9 +50,10 @@ enum cam_flash_switch_trigger_ops {
enum cam_flash_state {
CAM_FLASH_STATE_INIT,
CAM_FLASH_STATE_ACQUIRE,
+ CAM_FLASH_STATE_START,
CAM_FLASH_STATE_LOW,
CAM_FLASH_STATE_HIGH,
- CAM_FLASH_STATE_RELEASE,
+ CAM_FLASH_STATE_STOP,
};
/**
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_soc.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_soc.c
index a9ab169..a195762 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_soc.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_soc.c
@@ -13,6 +13,7 @@
#include <linux/of.h>
#include <linux/of_gpio.h>
#include "cam_flash_soc.h"
+#include "cam_res_mgr_api.h"
static int32_t cam_get_source_node_info(
struct device_node *of_node,
@@ -38,7 +39,7 @@ static int32_t cam_get_source_node_info(
} else {
CAM_DBG(CAM_FLASH, "switch trigger %s",
soc_private->switch_trigger_name);
- led_trigger_register_simple(
+ cam_res_mgr_led_trigger_register(
soc_private->switch_trigger_name,
&fctrl->switch_trigger);
}
@@ -111,7 +112,7 @@ static int32_t cam_get_source_node_info(
CAM_DBG(CAM_FLASH, "max_current[%d]: %d",
i, soc_private->flash_max_current[i]);
- led_trigger_register_simple(
+ cam_res_mgr_led_trigger_register(
soc_private->flash_trigger_name[i],
&fctrl->flash_trigger[i]);
}
@@ -172,7 +173,7 @@ static int32_t cam_get_source_node_info(
CAM_DBG(CAM_FLASH, "max_current[%d]: %d",
i, soc_private->torch_max_current[i]);
- led_trigger_register_simple(
+ cam_res_mgr_led_trigger_register(
soc_private->torch_trigger_name[i],
&fctrl->torch_trigger[i]);
}
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/Makefile b/drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/Makefile
index ec1d2fd..9397c68 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/Makefile
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/Makefile
@@ -1,6 +1,7 @@
ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas/include
ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_res_mgr
ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils
ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr
ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_cci
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/cam_ois_core.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/cam_ois_core.c
index 2a877fd..d825f5e 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/cam_ois_core.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/cam_ois_core.c
@@ -17,6 +17,46 @@
#include "cam_ois_soc.h"
#include "cam_sensor_util.h"
#include "cam_debug_util.h"
+#include "cam_res_mgr_api.h"
+
+int32_t cam_ois_construct_default_power_setting(
+ struct cam_sensor_power_ctrl_t *power_info)
+{
+ int rc = 0;
+
+ power_info->power_setting_size = 1;
+ power_info->power_setting =
+ (struct cam_sensor_power_setting *)
+ kzalloc(sizeof(struct cam_sensor_power_setting),
+ GFP_KERNEL);
+ if (!power_info->power_setting)
+ return -ENOMEM;
+
+ power_info->power_setting[0].seq_type = SENSOR_VAF;
+ power_info->power_setting[0].seq_val = CAM_VAF;
+ power_info->power_setting[0].config_val = 1;
+
+ power_info->power_down_setting_size = 1;
+ power_info->power_down_setting =
+ (struct cam_sensor_power_setting *)
+ kzalloc(sizeof(struct cam_sensor_power_setting),
+ GFP_KERNEL);
+ if (!power_info->power_down_setting) {
+ rc = -ENOMEM;
+ goto free_power_settings;
+ }
+
+ power_info->power_setting[0].seq_type = SENSOR_VAF;
+ power_info->power_setting[0].seq_val = CAM_VAF;
+ power_info->power_setting[0].config_val = 0;
+
+ return rc;
+
+free_power_settings:
+ kfree(power_info->power_setting);
+ return rc;
+}
+
/**
* cam_ois_get_dev_handle - get device handle
@@ -60,87 +100,88 @@ static int cam_ois_get_dev_handle(struct cam_ois_ctrl_t *o_ctrl,
return 0;
}
-static int cam_ois_vreg_control(struct cam_ois_ctrl_t *o_ctrl,
- int config)
-{
- int rc = 0, cnt;
- struct cam_hw_soc_info *soc_info;
-
- soc_info = &o_ctrl->soc_info;
- cnt = soc_info->num_rgltr;
-
- if (!cnt)
- return 0;
-
- if (cnt >= CAM_SOC_MAX_REGULATOR) {
- CAM_ERR(CAM_OIS, "Regulators more than supported %d", cnt);
- return -EINVAL;
- }
-
- if (config) {
- rc = cam_soc_util_request_platform_resource(soc_info,
- NULL, NULL);
- rc = cam_soc_util_enable_platform_resource(soc_info, false, 0,
- false);
- } else {
- rc = cam_soc_util_disable_platform_resource(soc_info, false,
- false);
- rc = cam_soc_util_release_platform_resource(soc_info);
- }
-
- return rc;
-}
-
static int cam_ois_power_up(struct cam_ois_ctrl_t *o_ctrl)
{
- int rc = 0;
- struct cam_hw_soc_info *soc_info =
+ int rc = 0;
+ struct cam_hw_soc_info *soc_info =
&o_ctrl->soc_info;
- struct msm_camera_gpio_num_info *gpio_num_info = NULL;
+ struct cam_ois_soc_private *soc_private;
+ struct cam_sensor_power_ctrl_t *power_info;
- rc = cam_ois_vreg_control(o_ctrl, 1);
- if (rc < 0) {
- CAM_ERR(CAM_OIS, "OIS Reg Failed %d", rc);
+ soc_private =
+ (struct cam_ois_soc_private *)o_ctrl->soc_info.soc_private;
+ power_info = &soc_private->power_info;
+
+ /* Parse and fill vreg params for power up settings */
+ rc = msm_camera_fill_vreg_params(
+ &o_ctrl->soc_info,
+ power_info->power_setting,
+ power_info->power_setting_size);
+ if (rc) {
+ CAM_ERR(CAM_OIS,
+ "failed to fill vreg params for power up rc:%d", rc);
return rc;
}
- gpio_num_info = o_ctrl->gpio_num_info;
+ /* Parse and fill vreg params for power down settings*/
+ rc = msm_camera_fill_vreg_params(
+ &o_ctrl->soc_info,
+ power_info->power_down_setting,
+ power_info->power_down_setting_size);
+ if (rc) {
+ CAM_ERR(CAM_OIS,
+ "failed to fill vreg params power down rc:%d", rc);
+ return rc;
+ }
- if (soc_info->gpio_data &&
- gpio_num_info &&
- gpio_num_info->valid[SENSOR_VAF] == 1) {
- gpio_set_value_cansleep(
- gpio_num_info->gpio_num[SENSOR_VAF],
- 1);
+ power_info->dev = soc_info->dev;
+
+ rc = cam_sensor_core_power_up(power_info, soc_info);
+ if (rc) {
+ CAM_ERR(CAM_OIS, "failed in ois power up rc %d", rc);
+ return rc;
}
/* VREG needs some delay to power up */
usleep_range(2000, 2050);
+ rc = camera_io_init(&o_ctrl->io_master_info);
+ if (rc)
+ CAM_ERR(CAM_OIS, "cci_init failed: rc: %d", rc);
+
return rc;
}
static int cam_ois_power_down(struct cam_ois_ctrl_t *o_ctrl)
{
- int32_t rc = 0;
- struct cam_hw_soc_info *soc_info =
+ int32_t rc = 0;
+ struct cam_sensor_power_ctrl_t *power_info;
+ struct cam_hw_soc_info *soc_info =
&o_ctrl->soc_info;
- struct msm_camera_gpio_num_info *gpio_num_info = NULL;
+ struct cam_ois_soc_private *soc_private;
- gpio_num_info = o_ctrl->gpio_num_info;
-
- if (soc_info->gpio_data &&
- gpio_num_info &&
- gpio_num_info->valid[SENSOR_VAF] == 1) {
-
- gpio_set_value_cansleep(
- gpio_num_info->gpio_num[SENSOR_VAF],
- GPIOF_OUT_INIT_LOW);
+ if (!o_ctrl) {
+ CAM_ERR(CAM_OIS, "failed: o_ctrl %pK", o_ctrl);
+ return -EINVAL;
}
- rc = cam_ois_vreg_control(o_ctrl, 0);
- if (rc < 0)
- CAM_ERR(CAM_OIS, "Disable regualtor Failed %d", rc);
+ soc_private =
+ (struct cam_ois_soc_private *)o_ctrl->soc_info.soc_private;
+ power_info = &soc_private->power_info;
+ soc_info = &o_ctrl->soc_info;
+
+ if (!power_info) {
+ CAM_ERR(CAM_OIS, "failed: power_info %pK", power_info);
+ return -EINVAL;
+ }
+
+ rc = msm_camera_power_down(power_info, soc_info);
+ if (rc) {
+ CAM_ERR(CAM_OIS, "power down the core is failed:%d", rc);
+ return rc;
+ }
+
+ camera_io_release(&o_ctrl->io_master_info);
return rc;
}
@@ -436,6 +477,42 @@ static int cam_ois_pkt_parse(struct cam_ois_ctrl_t *o_ctrl, void *arg)
return rc;
}
}
+
+ if (o_ctrl->ois_fw_flag) {
+ rc = cam_ois_fw_download(o_ctrl);
+ if (rc) {
+ CAM_ERR(CAM_OIS, "Failed OIS FW Download");
+ goto pwr_dwn;
+ }
+ }
+
+ rc = cam_ois_apply_settings(o_ctrl, &o_ctrl->i2c_init_data);
+ if (rc < 0) {
+ CAM_ERR(CAM_OIS, "Cannot apply Init settings");
+ goto pwr_dwn;
+ }
+
+ if (o_ctrl->is_ois_calib) {
+ rc = cam_ois_apply_settings(o_ctrl,
+ &o_ctrl->i2c_calib_data);
+ if (rc) {
+ CAM_ERR(CAM_OIS, "Cannot apply calib data");
+ goto pwr_dwn;
+ }
+ }
+
+ rc = delete_request(&o_ctrl->i2c_init_data);
+ if (rc < 0) {
+ CAM_WARN(CAM_OIS,
+ "Fail deleting Init data: rc: %d", rc);
+ rc = 0;
+ }
+ rc = delete_request(&o_ctrl->i2c_calib_data);
+ if (rc < 0) {
+ CAM_WARN(CAM_OIS,
+ "Fail deleting Calibration data: rc: %d", rc);
+ rc = 0;
+ }
break;
case CAM_OIS_PACKET_OPCODE_OIS_CONTROL:
offset = (uint32_t *)&csl_packet->payload;
@@ -452,13 +529,23 @@ static int cam_ois_pkt_parse(struct cam_ois_ctrl_t *o_ctrl, void *arg)
}
rc = cam_ois_apply_settings(o_ctrl, i2c_reg_settings);
- if (rc < 0)
+ if (rc < 0) {
CAM_ERR(CAM_OIS, "Cannot apply mode settings");
+ return rc;
+ }
+
+ rc = delete_request(i2c_reg_settings);
+ if (rc < 0)
+ CAM_ERR(CAM_OIS,
+ "Fail deleting Mode data: rc: %d", rc);
break;
default:
break;
}
return rc;
+pwr_dwn:
+ cam_ois_power_down(o_ctrl);
+ return rc;
}
void cam_ois_shutdown(struct cam_ois_ctrl_t *o_ctrl)
@@ -468,17 +555,12 @@ void cam_ois_shutdown(struct cam_ois_ctrl_t *o_ctrl)
if (o_ctrl->cam_ois_state == CAM_OIS_INIT)
return;
- if (o_ctrl->cam_ois_state == CAM_OIS_START) {
- rc = camera_io_release(&o_ctrl->io_master_info);
- if (rc < 0)
- CAM_ERR(CAM_OIS, "Failed in releasing CCI");
+ if ((o_ctrl->cam_ois_state == CAM_OIS_START) ||
+ (o_ctrl->cam_ois_state == CAM_OIS_ACQUIRE)) {
rc = cam_ois_power_down(o_ctrl);
if (rc < 0)
CAM_ERR(CAM_OIS, "OIS Power down failed");
- o_ctrl->cam_ois_state = CAM_OIS_ACQUIRE;
- }
- if (o_ctrl->cam_ois_state == CAM_OIS_ACQUIRE) {
rc = cam_destroy_device_hdl(o_ctrl->bridge_intf.device_hdl);
if (rc < 0)
CAM_ERR(CAM_OIS, "destroying the device hdl");
@@ -517,7 +599,7 @@ int cam_ois_driver_cmd(struct cam_ois_ctrl_t *o_ctrl, void *arg)
&ois_cap,
sizeof(struct cam_ois_query_cap_t))) {
CAM_ERR(CAM_OIS, "Failed Copy to User");
- return -EFAULT;
+ rc = -EFAULT;
goto release_mutex;
}
CAM_DBG(CAM_OIS, "ois_cap: ID: %d", ois_cap.slot_info);
@@ -528,41 +610,22 @@ int cam_ois_driver_cmd(struct cam_ois_ctrl_t *o_ctrl, void *arg)
CAM_ERR(CAM_OIS, "Failed to acquire dev");
goto release_mutex;
}
- o_ctrl->cam_ois_state = CAM_OIS_ACQUIRE;
- break;
- case CAM_START_DEV:
+
rc = cam_ois_power_up(o_ctrl);
if (rc) {
CAM_ERR(CAM_OIS, " OIS Power up failed");
goto release_mutex;
}
- rc = camera_io_init(&o_ctrl->io_master_info);
- if (rc) {
- CAM_ERR(CAM_OIS, "cci_init failed");
- goto pwr_dwn;
- }
- if (o_ctrl->ois_fw_flag) {
- rc = cam_ois_fw_download(o_ctrl);
- if (rc) {
- CAM_ERR(CAM_OIS, "Failed OIS FW Download");
- goto pwr_dwn;
- }
- }
-
- rc = cam_ois_apply_settings(o_ctrl, &o_ctrl->i2c_init_data);
- if (rc < 0) {
- CAM_ERR(CAM_OIS, "Cannot apply Init settings");
- goto pwr_dwn;
- }
-
- if (o_ctrl->is_ois_calib) {
- rc = cam_ois_apply_settings(o_ctrl,
- &o_ctrl->i2c_calib_data);
- if (rc) {
- CAM_ERR(CAM_OIS, "Cannot apply calib data");
- goto pwr_dwn;
- }
+ o_ctrl->cam_ois_state = CAM_OIS_ACQUIRE;
+ break;
+ case CAM_START_DEV:
+ if (o_ctrl->cam_ois_state != CAM_OIS_ACQUIRE) {
+ rc = -EINVAL;
+ CAM_WARN(CAM_OIS,
+ "Not in right state for start : %d",
+ o_ctrl->cam_ois_state);
+ goto release_mutex;
}
o_ctrl->cam_ois_state = CAM_OIS_START;
break;
@@ -574,6 +637,20 @@ int cam_ois_driver_cmd(struct cam_ois_ctrl_t *o_ctrl, void *arg)
}
break;
case CAM_RELEASE_DEV:
+ if (o_ctrl->cam_ois_state != CAM_OIS_ACQUIRE) {
+ rc = -EINVAL;
+ CAM_WARN(CAM_OIS,
+ "Not in right state for release : %d",
+ o_ctrl->cam_ois_state);
+ goto release_mutex;
+ }
+
+ rc = cam_ois_power_down(o_ctrl);
+ if (rc < 0) {
+ CAM_ERR(CAM_OIS, "OIS Power down failed");
+ goto release_mutex;
+ }
+
if (o_ctrl->bridge_intf.device_hdl == -1) {
CAM_ERR(CAM_OIS, "link hdl: %d device hdl: %d",
o_ctrl->bridge_intf.device_hdl,
@@ -590,13 +667,11 @@ int cam_ois_driver_cmd(struct cam_ois_ctrl_t *o_ctrl, void *arg)
o_ctrl->cam_ois_state = CAM_OIS_INIT;
break;
case CAM_STOP_DEV:
- rc = camera_io_release(&o_ctrl->io_master_info);
- if (rc < 0)
- CAM_ERR(CAM_OIS, "Failed in releasing CCI");
- rc = cam_ois_power_down(o_ctrl);
- if (rc < 0) {
- CAM_ERR(CAM_OIS, "OIS Power down failed");
- goto release_mutex;
+ if (o_ctrl->cam_ois_state != CAM_OIS_START) {
+ rc = -EINVAL;
+ CAM_WARN(CAM_OIS,
+ "Not in right state for stop : %d",
+ o_ctrl->cam_ois_state);
}
o_ctrl->cam_ois_state = CAM_OIS_ACQUIRE;
break;
@@ -604,10 +679,7 @@ int cam_ois_driver_cmd(struct cam_ois_ctrl_t *o_ctrl, void *arg)
CAM_ERR(CAM_OIS, "invalid opcode");
goto release_mutex;
}
-pwr_dwn:
- cam_ois_power_down(o_ctrl);
release_mutex:
mutex_unlock(&(o_ctrl->ois_mutex));
-
return rc;
}
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/cam_ois_core.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/cam_ois_core.h
index 6f81d09..516ac88 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/cam_ois_core.h
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/cam_ois_core.h
@@ -14,6 +14,17 @@
#include "cam_ois_dev.h"
+/**
+ * @power_info: power setting info to control the power
+ *
+ * This API construct the default ois power setting.
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int32_t cam_ois_construct_default_power_setting(
+ struct cam_sensor_power_ctrl_t *power_info);
+
+
int cam_ois_driver_cmd(struct cam_ois_ctrl_t *e_ctrl, void *arg);
/**
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/cam_ois_dev.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/cam_ois_dev.c
index 2629180..d9b43a4 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/cam_ois_dev.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/cam_ois_dev.c
@@ -203,10 +203,21 @@ static int cam_ois_i2c_driver_probe(struct i2c_client *client,
rc = cam_ois_init_subdev_param(o_ctrl);
if (rc)
goto octrl_free;
+
+ rc = cam_ois_construct_default_power_setting(
+ &soc_private->power_info);
+ if (rc < 0) {
+ CAM_ERR(CAM_OIS,
+ "Construct default ois power setting failed.");
+ goto unreg_subdev;
+ }
+
o_ctrl->cam_ois_state = CAM_OIS_INIT;
return rc;
+unreg_subdev:
+ cam_unregister_subdev(&(o_ctrl->v4l2_dev_str));
octrl_free:
kfree(o_ctrl);
probe_failure:
@@ -215,13 +226,21 @@ static int cam_ois_i2c_driver_probe(struct i2c_client *client,
static int cam_ois_i2c_driver_remove(struct i2c_client *client)
{
- struct cam_ois_ctrl_t *o_ctrl = i2c_get_clientdata(client);
+ struct cam_ois_ctrl_t *o_ctrl = i2c_get_clientdata(client);
+ struct cam_ois_soc_private *soc_private;
+ struct cam_sensor_power_ctrl_t *power_info;
if (!o_ctrl) {
CAM_ERR(CAM_OIS, "ois device is NULL");
return -EINVAL;
}
+ soc_private =
+ (struct cam_ois_soc_private *)o_ctrl->soc_info.soc_private;
+ power_info = &soc_private->power_info;
+
+ kfree(power_info->power_setting);
+ kfree(power_info->power_down_setting);
kfree(o_ctrl->soc_info.soc_private);
kfree(o_ctrl);
@@ -240,6 +259,7 @@ static int32_t cam_ois_platform_driver_probe(
return -ENOMEM;
o_ctrl->soc_info.pdev = pdev;
+ o_ctrl->pdev = pdev;
o_ctrl->soc_info.dev = &pdev->dev;
o_ctrl->soc_info.dev_name = pdev->name;
@@ -280,9 +300,19 @@ static int32_t cam_ois_platform_driver_probe(
}
o_ctrl->bridge_intf.device_hdl = -1;
+ rc = cam_ois_construct_default_power_setting(
+ &soc_private->power_info);
+ if (rc < 0) {
+ CAM_ERR(CAM_OIS,
+ "Construct default ois power setting failed.");
+ goto unreg_subdev;
+ }
+
platform_set_drvdata(pdev, o_ctrl);
v4l2_set_subdevdata(&o_ctrl->v4l2_dev_str.sd, o_ctrl);
+ o_ctrl->cam_ois_state = CAM_OIS_INIT;
+
return rc;
unreg_subdev:
cam_unregister_subdev(&(o_ctrl->v4l2_dev_str));
@@ -297,7 +327,9 @@ static int32_t cam_ois_platform_driver_probe(
static int cam_ois_platform_driver_remove(struct platform_device *pdev)
{
- struct cam_ois_ctrl_t *o_ctrl;
+ struct cam_ois_ctrl_t *o_ctrl;
+ struct cam_ois_soc_private *soc_private;
+ struct cam_sensor_power_ctrl_t *power_info;
o_ctrl = platform_get_drvdata(pdev);
if (!o_ctrl) {
@@ -305,6 +337,12 @@ static int cam_ois_platform_driver_remove(struct platform_device *pdev)
return -EINVAL;
}
+ soc_private =
+ (struct cam_ois_soc_private *)o_ctrl->soc_info.soc_private;
+ power_info = &soc_private->power_info;
+
+ kfree(power_info->power_setting);
+ kfree(power_info->power_down_setting);
kfree(o_ctrl->soc_info.soc_private);
kfree(o_ctrl->io_master_info.cci_client);
kfree(o_ctrl);
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/cam_ois_dev.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/cam_ois_dev.h
index e341bb7..80f1e84 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/cam_ois_dev.h
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/cam_ois_dev.h
@@ -35,7 +35,6 @@ enum cam_ois_state {
CAM_OIS_INIT,
CAM_OIS_ACQUIRE,
CAM_OIS_START,
- CAM_OIS_RELEASE,
};
/**
@@ -94,7 +93,6 @@ struct cam_ois_intf_params {
* @ois_mutex : ois mutex
* @soc_info : ois soc related info
* @io_master_info : Information about the communication master
- * @gpio_num_info : gpio info
* @cci_i2c_master : I2C structure
* @v4l2_dev_str : V4L2 device structure
* @bridge_intf : bridge interface params
@@ -115,7 +113,6 @@ struct cam_ois_ctrl_t {
struct mutex ois_mutex;
struct cam_hw_soc_info soc_info;
struct camera_io_master io_master_info;
- struct msm_camera_gpio_num_info *gpio_num_info;
enum cci_i2c_master_t cci_i2c_master;
struct cam_subdev v4l2_dev_str;
struct cam_ois_intf_params bridge_intf;
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_res_mgr/Makefile b/drivers/media/platform/msm/camera/cam_sensor_module/cam_res_mgr/Makefile
new file mode 100644
index 0000000..516faf5
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_res_mgr/Makefile
@@ -0,0 +1,9 @@
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_core
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_cci
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
+
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_res_mgr.o
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_res_mgr/cam_res_mgr.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_res_mgr/cam_res_mgr.c
new file mode 100644
index 0000000..bb3789b
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_res_mgr/cam_res_mgr.c
@@ -0,0 +1,737 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/gpio.h>
+#include "cam_debug_util.h"
+#include "cam_res_mgr_api.h"
+#include "cam_res_mgr_private.h"
+
+static struct cam_res_mgr *cam_res;
+
+static void cam_res_mgr_free_res(void)
+{
+ struct cam_dev_res *dev_res, *dev_temp;
+ struct cam_gpio_res *gpio_res, *gpio_temp;
+ struct cam_flash_res *flash_res, *flash_temp;
+
+ if (!cam_res)
+ return;
+
+ mutex_lock(&cam_res->gpio_res_lock);
+ list_for_each_entry_safe(gpio_res, gpio_temp,
+ &cam_res->gpio_res_list, list) {
+ list_for_each_entry_safe(dev_res, dev_temp,
+ &gpio_res->dev_list, list) {
+ list_del_init(&dev_res->list);
+ kfree(dev_res);
+ }
+ list_del_init(&gpio_res->list);
+ kfree(gpio_res);
+ }
+ mutex_unlock(&cam_res->gpio_res_lock);
+
+ mutex_lock(&cam_res->flash_res_lock);
+ list_for_each_entry_safe(flash_res, flash_temp,
+ &cam_res->flash_res_list, list) {
+ list_del_init(&flash_res->list);
+ kfree(flash_res);
+ }
+ mutex_unlock(&cam_res->flash_res_lock);
+
+ mutex_lock(&cam_res->clk_res_lock);
+ cam_res->shared_clk_ref_count = 0;
+ mutex_unlock(&cam_res->clk_res_lock);
+}
+
+void cam_res_mgr_led_trigger_register(const char *name, struct led_trigger **tp)
+{
+ bool found = false;
+ struct cam_flash_res *flash_res;
+
+ if (!cam_res) {
+ /*
+ * If this driver not probed, then just register the
+ * led trigger.
+ */
+ led_trigger_register_simple(name, tp);
+ return;
+ }
+
+ mutex_lock(&cam_res->flash_res_lock);
+ list_for_each_entry(flash_res, &cam_res->flash_res_list, list) {
+ if (!strcmp(flash_res->name, name)) {
+ found = true;
+ break;
+ }
+ }
+ mutex_unlock(&cam_res->flash_res_lock);
+
+ if (found) {
+ *tp = flash_res->trigger;
+ } else {
+ flash_res = kzalloc(sizeof(struct cam_flash_res), GFP_KERNEL);
+ if (!flash_res) {
+ CAM_ERR(CAM_RES,
+ "Failed to malloc memory for flash_res:%s",
+ name);
+ *tp = NULL;
+ return;
+ }
+
+ led_trigger_register_simple(name, tp);
+ INIT_LIST_HEAD(&flash_res->list);
+ flash_res->trigger = *tp;
+ flash_res->name = name;
+
+ mutex_lock(&cam_res->flash_res_lock);
+ list_add_tail(&flash_res->list, &cam_res->flash_res_list);
+ mutex_unlock(&cam_res->flash_res_lock);
+ }
+}
+EXPORT_SYMBOL(cam_res_mgr_led_trigger_register);
+
+void cam_res_mgr_led_trigger_unregister(struct led_trigger *tp)
+{
+ bool found = false;
+ struct cam_flash_res *flash_res;
+
+ if (!cam_res) {
+ /*
+ * If this driver not probed, then just unregister the
+ * led trigger.
+ */
+ led_trigger_unregister_simple(tp);
+ return;
+ }
+
+ mutex_lock(&cam_res->flash_res_lock);
+ list_for_each_entry(flash_res, &cam_res->flash_res_list, list) {
+ if (flash_res->trigger == tp) {
+ found = true;
+ break;
+ }
+ }
+
+ if (found) {
+ led_trigger_unregister_simple(tp);
+ list_del_init(&flash_res->list);
+ kfree(flash_res);
+ }
+ mutex_unlock(&cam_res->flash_res_lock);
+}
+EXPORT_SYMBOL(cam_res_mgr_led_trigger_unregister);
+
+void cam_res_mgr_led_trigger_event(struct led_trigger *trig,
+ enum led_brightness brightness)
+{
+ bool found = false;
+ struct cam_flash_res *flash_res;
+
+ if (!cam_res) {
+ /*
+ * If this driver not probed, then just trigger
+ * the led event.
+ */
+ led_trigger_event(trig, brightness);
+ return;
+ }
+
+ mutex_lock(&cam_res->flash_res_lock);
+ list_for_each_entry(flash_res, &cam_res->flash_res_list, list) {
+ if (flash_res->trigger == trig) {
+ found = true;
+ break;
+ }
+ }
+ mutex_unlock(&cam_res->flash_res_lock);
+
+ if (found)
+ led_trigger_event(trig, brightness);
+}
+EXPORT_SYMBOL(cam_res_mgr_led_trigger_event);
+
+int cam_res_mgr_shared_pinctrl_init(void)
+{
+ struct cam_soc_pinctrl_info *pinctrl_info;
+
+ /*
+ * We allow the cam_res is NULL or shared_gpio_enabled
+ * is false, it means this driver no probed or doesn't
+ * have shared gpio in this device.
+ */
+ if (!cam_res || !cam_res->shared_gpio_enabled) {
+ CAM_DBG(CAM_RES, "Not support shared gpio.");
+ return 0;
+ }
+
+ if (cam_res->pstatus != PINCTRL_STATUS_PUT) {
+ CAM_DBG(CAM_RES, "The shared pinctrl already been got.");
+ return 0;
+ }
+
+ pinctrl_info = &cam_res->dt.pinctrl_info;
+
+ pinctrl_info->pinctrl =
+ devm_pinctrl_get(cam_res->dev);
+ if (IS_ERR_OR_NULL(pinctrl_info->pinctrl)) {
+ CAM_ERR(CAM_RES, "Pinctrl not available");
+ cam_res->shared_gpio_enabled = false;
+ return -EINVAL;
+ }
+
+ pinctrl_info->gpio_state_active =
+ pinctrl_lookup_state(pinctrl_info->pinctrl,
+ CAM_RES_MGR_DEFAULT);
+ if (IS_ERR_OR_NULL(pinctrl_info->gpio_state_active)) {
+ CAM_ERR(CAM_RES,
+ "Failed to get the active state pinctrl handle");
+ cam_res->shared_gpio_enabled = false;
+ return -EINVAL;
+ }
+
+ pinctrl_info->gpio_state_suspend =
+ pinctrl_lookup_state(pinctrl_info->pinctrl,
+ CAM_RES_MGR_SLEEP);
+ if (IS_ERR_OR_NULL(pinctrl_info->gpio_state_suspend)) {
+ CAM_ERR(CAM_RES,
+ "Failed to get the active state pinctrl handle");
+ cam_res->shared_gpio_enabled = false;
+ return -EINVAL;
+ }
+
+ mutex_lock(&cam_res->gpio_res_lock);
+ cam_res->pstatus = PINCTRL_STATUS_GOT;
+ mutex_unlock(&cam_res->gpio_res_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(cam_res_mgr_shared_pinctrl_init);
+
+static bool cam_res_mgr_shared_pinctrl_check_hold(void)
+{
+ int index = 0;
+ int dev_num = 0;
+ bool hold = false;
+ struct list_head *list;
+ struct cam_gpio_res *gpio_res;
+ struct cam_res_mgr_dt *dt = &cam_res->dt;
+
+ for (; index < dt->num_shared_gpio; index++) {
+ list_for_each_entry(gpio_res,
+ &cam_res->gpio_res_list, list) {
+
+ if (gpio_res->gpio ==
+ dt->shared_gpio[index]) {
+ list_for_each(list, &gpio_res->dev_list)
+ dev_num++;
+
+ if (dev_num >= 2) {
+ hold = true;
+ break;
+ }
+ }
+ }
+ }
+
+ if (cam_res->shared_clk_ref_count > 1)
+ hold = true;
+
+ return hold;
+}
+
+void cam_res_mgr_shared_pinctrl_put(void)
+{
+ struct cam_soc_pinctrl_info *pinctrl_info;
+
+ if (!cam_res || !cam_res->shared_gpio_enabled) {
+ CAM_DBG(CAM_RES, "Not support shared gpio.");
+ return;
+ }
+
+ mutex_lock(&cam_res->gpio_res_lock);
+ if (cam_res->pstatus == PINCTRL_STATUS_PUT) {
+ CAM_DBG(CAM_RES, "The shared pinctrl already been put");
+ mutex_unlock(&cam_res->gpio_res_lock);
+ return;
+ }
+
+ if (cam_res_mgr_shared_pinctrl_check_hold()) {
+ CAM_INFO(CAM_RES, "Need hold put this pinctrl");
+ mutex_unlock(&cam_res->gpio_res_lock);
+ return;
+ }
+
+ pinctrl_info = &cam_res->dt.pinctrl_info;
+
+ devm_pinctrl_put(pinctrl_info->pinctrl);
+
+ cam_res->pstatus = PINCTRL_STATUS_PUT;
+ mutex_unlock(&cam_res->gpio_res_lock);
+}
+EXPORT_SYMBOL(cam_res_mgr_shared_pinctrl_put);
+
+int cam_res_mgr_shared_pinctrl_select_state(bool active)
+{
+ int rc = 0;
+ struct cam_soc_pinctrl_info *pinctrl_info;
+
+ if (!cam_res || !cam_res->shared_gpio_enabled) {
+ CAM_DBG(CAM_RES, "Not support shared gpio.");
+ return 0;
+ }
+
+ mutex_lock(&cam_res->gpio_res_lock);
+ if (cam_res->pstatus == PINCTRL_STATUS_PUT) {
+ CAM_DBG(CAM_RES, "The shared pinctrl alerady been put.!");
+ mutex_unlock(&cam_res->gpio_res_lock);
+ return 0;
+ }
+
+ pinctrl_info = &cam_res->dt.pinctrl_info;
+
+ if (active && (cam_res->pstatus != PINCTRL_STATUS_ACTIVE)) {
+ rc = pinctrl_select_state(pinctrl_info->pinctrl,
+ pinctrl_info->gpio_state_active);
+ cam_res->pstatus = PINCTRL_STATUS_ACTIVE;
+ } else if (!active &&
+ !cam_res_mgr_shared_pinctrl_check_hold()) {
+ rc = pinctrl_select_state(pinctrl_info->pinctrl,
+ pinctrl_info->gpio_state_suspend);
+ cam_res->pstatus = PINCTRL_STATUS_SUSPEND;
+ }
+ mutex_unlock(&cam_res->gpio_res_lock);
+
+ return rc;
+}
+EXPORT_SYMBOL(cam_res_mgr_shared_pinctrl_select_state);
+
+int cam_res_mgr_shared_pinctrl_post_init(void)
+{
+ int ret = 0;
+ struct cam_soc_pinctrl_info *pinctrl_info;
+
+ if (!cam_res || !cam_res->shared_gpio_enabled) {
+ CAM_DBG(CAM_RES, "Not support shared gpio.");
+ return ret;
+ }
+
+ mutex_lock(&cam_res->gpio_res_lock);
+ if (cam_res->pstatus == PINCTRL_STATUS_PUT) {
+ CAM_DBG(CAM_RES, "The shared pinctrl alerady been put.!");
+ mutex_unlock(&cam_res->gpio_res_lock);
+ return ret;
+ }
+
+ pinctrl_info = &cam_res->dt.pinctrl_info;
+
+ /*
+ * If no gpio resource in gpio_res_list, and
+ * no shared clk now, it means this device
+ * don't have shared gpio.
+ */
+ if (list_empty(&cam_res->gpio_res_list) &&
+ cam_res->shared_clk_ref_count < 1) {
+ ret = pinctrl_select_state(pinctrl_info->pinctrl,
+ pinctrl_info->gpio_state_suspend);
+ devm_pinctrl_put(pinctrl_info->pinctrl);
+ cam_res->pstatus = PINCTRL_STATUS_PUT;
+ }
+ mutex_unlock(&cam_res->gpio_res_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL(cam_res_mgr_shared_pinctrl_post_init);
+
+static int cam_res_mgr_add_device(struct device *dev,
+ struct cam_gpio_res *gpio_res)
+{
+ struct cam_dev_res *dev_res = NULL;
+
+ dev_res = kzalloc(sizeof(struct cam_dev_res), GFP_KERNEL);
+ if (!dev_res)
+ return -ENOMEM;
+
+ dev_res->dev = dev;
+ INIT_LIST_HEAD(&dev_res->list);
+
+ list_add_tail(&dev_res->list, &gpio_res->dev_list);
+
+ return 0;
+}
+
+static bool cam_res_mgr_gpio_is_shared(uint gpio)
+{
+ int index = 0;
+ bool found = false;
+ struct cam_res_mgr_dt *dt = &cam_res->dt;
+
+ for (; index < dt->num_shared_gpio; index++) {
+ if (gpio == dt->shared_gpio[index]) {
+ found = true;
+ break;
+ }
+ }
+
+ return found;
+}
+
+int cam_res_mgr_gpio_request(struct device *dev, uint gpio,
+ unsigned long flags, const char *label)
+{
+ int rc = 0;
+ bool found = false;
+ struct cam_gpio_res *gpio_res = NULL;
+
+ if (cam_res && cam_res->shared_gpio_enabled) {
+ mutex_lock(&cam_res->gpio_res_lock);
+ list_for_each_entry(gpio_res, &cam_res->gpio_res_list, list) {
+ if (gpio == gpio_res->gpio) {
+ found = true;
+ break;
+ }
+ }
+ mutex_unlock(&cam_res->gpio_res_lock);
+ }
+
+ /*
+ * found equal to false has two situation:
+ * 1. shared gpio not enabled
+ * 2. shared gpio enabled, but not find this gpio
+ * from the gpio_res_list
+ * These two situation both need request gpio.
+ */
+ if (!found) {
+ rc = gpio_request_one(gpio, flags, label);
+ if (rc) {
+ CAM_ERR(CAM_RES, "gpio %d:%s request fails",
+ gpio, label);
+ return rc;
+ }
+ }
+
+ /*
+ * If the gpio is in the shared list, and not find
+ * from gpio_res_list, then insert a cam_gpio_res
+ * to gpio_res_list.
+ */
+ if (!found && cam_res
+ && cam_res->shared_gpio_enabled &&
+ cam_res_mgr_gpio_is_shared(gpio)) {
+
+ gpio_res = kzalloc(sizeof(struct cam_gpio_res), GFP_KERNEL);
+ if (!gpio_res)
+ return -ENOMEM;
+
+ gpio_res->gpio = gpio;
+ gpio_res->power_on_count = 0;
+ INIT_LIST_HEAD(&gpio_res->list);
+ INIT_LIST_HEAD(&gpio_res->dev_list);
+
+ rc = cam_res_mgr_add_device(dev, gpio_res);
+ if (rc) {
+ kfree(gpio_res);
+ return rc;
+ }
+
+ mutex_lock(&cam_res->gpio_res_lock);
+ list_add_tail(&gpio_res->list, &cam_res->gpio_res_list);
+ mutex_unlock(&cam_res->gpio_res_lock);
+ }
+
+ if (found && cam_res
+ && cam_res->shared_gpio_enabled) {
+ struct cam_dev_res *dev_res = NULL;
+
+ found = 0;
+ mutex_lock(&cam_res->gpio_res_lock);
+ list_for_each_entry(dev_res, &gpio_res->dev_list, list) {
+ if (dev_res->dev == dev) {
+ found = 1;
+ break;
+ }
+ }
+
+ if (!found)
+ rc = cam_res_mgr_add_device(dev, gpio_res);
+
+ mutex_unlock(&cam_res->gpio_res_lock);
+ }
+
+ return rc;
+}
+EXPORT_SYMBOL(cam_res_mgr_gpio_request);
+
+static void cam_res_mgr_gpio_free(struct device *dev, uint gpio)
+{
+ bool found = false;
+ bool need_free = true;
+ int dev_num = 0;
+ struct cam_gpio_res *gpio_res = NULL;
+
+ if (cam_res && cam_res->shared_gpio_enabled) {
+ mutex_lock(&cam_res->gpio_res_lock);
+ list_for_each_entry(gpio_res, &cam_res->gpio_res_list, list) {
+ if (gpio == gpio_res->gpio) {
+ found = true;
+ break;
+ }
+ }
+ mutex_unlock(&cam_res->gpio_res_lock);
+ }
+
+ if (found && cam_res
+ && cam_res->shared_gpio_enabled) {
+ struct list_head *list;
+ struct cam_dev_res *dev_res = NULL;
+
+ mutex_lock(&cam_res->gpio_res_lock);
+ /* Count the dev number in the dev_list */
+ list_for_each(list, &gpio_res->dev_list)
+ dev_num++;
+
+ /*
+ * Need free the gpio if only has last 1 device
+ * in the dev_list, otherwise, not free this
+ * gpio.
+ */
+ if (dev_num == 1) {
+ dev_res = list_first_entry(&gpio_res->dev_list,
+ struct cam_dev_res, list);
+ list_del_init(&dev_res->list);
+ kfree(dev_res);
+
+ list_del_init(&gpio_res->list);
+ kfree(gpio_res);
+ } else {
+ list_for_each_entry(dev_res,
+ &gpio_res->dev_list, list) {
+ if (dev_res->dev == dev) {
+ list_del_init(&dev_res->list);
+ kfree(dev_res);
+ need_free = false;
+ break;
+ }
+ }
+ }
+ mutex_unlock(&cam_res->gpio_res_lock);
+ }
+
+ if (need_free)
+ gpio_free(gpio);
+}
+
+void cam_res_mgr_gpio_free_arry(struct device *dev,
+ const struct gpio *array, size_t num)
+{
+ while (num--)
+ cam_res_mgr_gpio_free(dev, (array[num]).gpio);
+}
+EXPORT_SYMBOL(cam_res_mgr_gpio_free_arry);
+
+int cam_res_mgr_gpio_set_value(unsigned int gpio, int value)
+{
+ int rc = 0;
+ bool found = false;
+ struct cam_gpio_res *gpio_res = NULL;
+
+ if (cam_res && cam_res->shared_gpio_enabled) {
+ mutex_lock(&cam_res->gpio_res_lock);
+ list_for_each_entry(gpio_res, &cam_res->gpio_res_list, list) {
+ if (gpio == gpio_res->gpio) {
+ found = true;
+ break;
+ }
+ }
+ mutex_unlock(&cam_res->gpio_res_lock);
+ }
+
+ /*
+ * Set the value directly if can't find the gpio from
+ * gpio_res_list, otherwise, need add ref count support
+ **/
+ if (!found) {
+ gpio_set_value_cansleep(gpio, value);
+ } else {
+ if (value) {
+ gpio_res->power_on_count++;
+ if (gpio_res->power_on_count < 2) {
+ gpio_set_value_cansleep(gpio, value);
+ CAM_DBG(CAM_RES,
+ "Shared GPIO(%d) : HIGH", gpio);
+ }
+ } else {
+ gpio_res->power_on_count--;
+ if (gpio_res->power_on_count < 1) {
+ gpio_set_value_cansleep(gpio, value);
+ CAM_DBG(CAM_RES,
+ "Shared GPIO(%d) : LOW", gpio);
+ }
+ }
+ }
+
+ return rc;
+}
+EXPORT_SYMBOL(cam_res_mgr_gpio_set_value);
+
+void cam_res_mgr_shared_clk_config(bool value)
+{
+ if (!cam_res)
+ return;
+
+ mutex_lock(&cam_res->clk_res_lock);
+ if (value)
+ cam_res->shared_clk_ref_count++;
+ else
+ cam_res->shared_clk_ref_count--;
+ mutex_unlock(&cam_res->clk_res_lock);
+}
+EXPORT_SYMBOL(cam_res_mgr_shared_clk_config);
+
+static int cam_res_mgr_parse_dt(struct device *dev)
+{
+ int rc = 0;
+ struct device_node *of_node = NULL;
+ struct cam_res_mgr_dt *dt = &cam_res->dt;
+
+ of_node = dev->of_node;
+
+ dt->num_shared_gpio = of_property_count_u32_elems(of_node,
+ "shared-gpios");
+
+ if (dt->num_shared_gpio > MAX_SHARED_GPIO_SIZE ||
+ dt->num_shared_gpio <= 0) {
+ /*
+ * Not really an error, it means dtsi not configure
+ * the shared gpio.
+ */
+ CAM_DBG(CAM_RES, "Invalid GPIO number %d. No shared gpio.",
+ dt->num_shared_gpio);
+ return -EINVAL;
+ }
+
+ rc = of_property_read_u32_array(of_node, "shared-gpios",
+ dt->shared_gpio, dt->num_shared_gpio);
+ if (rc) {
+ CAM_ERR(CAM_RES, "Get shared gpio array failed.");
+ return -EINVAL;
+ }
+
+ dt->pinctrl_info.pinctrl = devm_pinctrl_get(dev);
+ if (IS_ERR_OR_NULL(dt->pinctrl_info.pinctrl)) {
+ CAM_ERR(CAM_RES, "Pinctrl not available");
+ return -EINVAL;
+ }
+
+ /*
+ * Check the pinctrl state to make sure the gpio
+ * shared enabled.
+ */
+ dt->pinctrl_info.gpio_state_active =
+ pinctrl_lookup_state(dt->pinctrl_info.pinctrl,
+ CAM_RES_MGR_DEFAULT);
+ if (IS_ERR_OR_NULL(dt->pinctrl_info.gpio_state_active)) {
+ CAM_ERR(CAM_RES,
+ "Failed to get the active state pinctrl handle");
+ return -EINVAL;
+ }
+
+ dt->pinctrl_info.gpio_state_suspend =
+ pinctrl_lookup_state(dt->pinctrl_info.pinctrl,
+ CAM_RES_MGR_SLEEP);
+ if (IS_ERR_OR_NULL(dt->pinctrl_info.gpio_state_suspend)) {
+ CAM_ERR(CAM_RES,
+ "Failed to get the active state pinctrl handle");
+ return -EINVAL;
+ }
+
+ devm_pinctrl_put(dt->pinctrl_info.pinctrl);
+
+ return rc;
+}
+
+static int cam_res_mgr_probe(struct platform_device *pdev)
+{
+ int rc = 0;
+
+ cam_res = kzalloc(sizeof(*cam_res), GFP_KERNEL);
+ if (!cam_res)
+ return -ENOMEM;
+
+ cam_res->dev = &pdev->dev;
+ mutex_init(&cam_res->flash_res_lock);
+ mutex_init(&cam_res->gpio_res_lock);
+ mutex_init(&cam_res->clk_res_lock);
+
+ rc = cam_res_mgr_parse_dt(&pdev->dev);
+ if (rc) {
+ CAM_INFO(CAM_RES, "Disable shared gpio support.");
+ cam_res->shared_gpio_enabled = false;
+ } else {
+ CAM_INFO(CAM_RES, "Enable shared gpio support.");
+ cam_res->shared_gpio_enabled = true;
+ }
+
+ cam_res->shared_clk_ref_count = 0;
+ cam_res->pstatus = PINCTRL_STATUS_PUT;
+
+ INIT_LIST_HEAD(&cam_res->gpio_res_list);
+ INIT_LIST_HEAD(&cam_res->flash_res_list);
+
+ return 0;
+}
+
+static int cam_res_mgr_remove(struct platform_device *pdev)
+{
+ if (cam_res) {
+ cam_res_mgr_free_res();
+ kfree(cam_res);
+ cam_res = NULL;
+ }
+
+ return 0;
+}
+
+static const struct of_device_id cam_res_mgr_dt_match[] = {
+ {.compatible = "qcom,cam-res-mgr"},
+ {}
+};
+MODULE_DEVICE_TABLE(of, cam_res_mgr_dt_match);
+
+static struct platform_driver cam_res_mgr_driver = {
+ .probe = cam_res_mgr_probe,
+ .remove = cam_res_mgr_remove,
+ .driver = {
+ .name = "cam_res_mgr",
+ .owner = THIS_MODULE,
+ .of_match_table = cam_res_mgr_dt_match,
+ },
+};
+
+static int __init cam_res_mgr_init(void)
+{
+ return platform_driver_register(&cam_res_mgr_driver);
+}
+
+static void __exit cam_res_mgr_exit(void)
+{
+ platform_driver_unregister(&cam_res_mgr_driver);
+}
+
+module_init(cam_res_mgr_init);
+module_exit(cam_res_mgr_exit);
+MODULE_DESCRIPTION("Camera resource manager driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_res_mgr/cam_res_mgr_api.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_res_mgr/cam_res_mgr_api.h
new file mode 100644
index 0000000..7fb13ba
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_res_mgr/cam_res_mgr_api.h
@@ -0,0 +1,148 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __CAM_RES_MGR_API_H__
+#define __CAM_RES_MGR_API_H__
+
+#include <linux/leds.h>
+
+/**
+ * @brief: Register the led trigger
+ *
+ * The newly registered led trigger is assigned to flash_res_list.
+ *
+ * @name : Pointer to int led trigger name
+ * @tp : Save the returned led trigger
+ *
+ * @return None
+ */
+void cam_res_mgr_led_trigger_register(const char *name,
+ struct led_trigger **tp);
+
+/**
+ * @brief: Unregister the led trigger
+ *
+ * Free the flash_res if this led trigger isn't used by other device .
+ *
+ * @tp : Pointer to the led trigger
+ *
+ * @return None
+ */
+void cam_res_mgr_led_trigger_unregister(struct led_trigger *tp);
+
+/**
+ * @brief: Trigger the event to led core
+ *
+ * @trig : Pointer to the led trigger
+ * @brightness : The brightness need to fire
+ *
+ * @return None
+ */
+void cam_res_mgr_led_trigger_event(struct led_trigger *trig,
+ enum led_brightness brightness);
+
+/**
+ * @brief: Get the corresponding pinctrl of dev
+ *
+ * Init the shared pinctrl if shared pinctrl enabled.
+ *
+ * @return None
+ */
+int cam_res_mgr_shared_pinctrl_init(void);
+
+/**
+ * @brief: Put the pinctrl
+ *
+ * Put the shared pinctrl.
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+void cam_res_mgr_shared_pinctrl_put(void);
+
+/**
+ * @brief: Select the corresponding state
+ *
+ * Active state can be selected directly, but need hold to suspend the
+ * pinctrl if the gpios in this pinctrl also held by other pinctrl.
+ *
+ * @active : The flag to indicate whether active or suspend
+ * the shared pinctrl.
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_res_mgr_shared_pinctrl_select_state(bool active);
+
+/**
+ * @brief: Post init shared pinctrl
+ *
+ * Post init to check if the device really has shared gpio,
+ * suspend and put the pinctrl if not use shared gpio.
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_res_mgr_shared_pinctrl_post_init(void);
+
+/**
+ * @brief: Request a gpio
+ *
+ * Will alloc a gpio_res for the new gpio, other find the corresponding
+ * gpio_res.
+ *
+ * @dev : Pointer to the device
+ * @gpio : The GPIO number
+ * @flags : GPIO configuration as specified by GPIOF_*
+ * @label : A literal description string of this GPIO
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_res_mgr_gpio_request(struct device *dev, unsigned int gpio,
+ unsigned long flags, const char *label);
+
+/**
+ * @brief: Free a array GPIO
+ *
+ * Free the GPIOs and release corresponding gpio_res.
+ *
+ * @dev : Pointer to the device
+ * @gpio : Array of the GPIO number
+ * @num : The number of gpio
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+void cam_res_mgr_gpio_free_arry(struct device *dev,
+ const struct gpio *array, size_t num);
+
+/**
+ * @brief: Set GPIO power level
+ *
+ * Add ref count support for shared GPIOs.
+ *
+ * @gpio : The GPIO number
+ * @value : The power level need to setup
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ * -EINVAL will be returned if the gpio can't be found in gpio_res_list.
+ */
+int cam_res_mgr_gpio_set_value(unsigned int gpio, int value);
+
+/**
+ * @brief: Config the shared clk ref count
+ *
+ * Config the shared clk ref count..
+ *
+ * @value : get or put the shared clk.
+ *
+ * @return None
+ */
+void cam_res_mgr_shared_clk_config(bool value);
+
+#endif /* __CAM_RES_MGR_API_H__ */
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_res_mgr/cam_res_mgr_private.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_res_mgr/cam_res_mgr_private.h
new file mode 100644
index 0000000..53a8778
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_res_mgr/cam_res_mgr_private.h
@@ -0,0 +1,117 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __CAM_RES_MGR_PRIVATE_H__
+#define __CAM_RES_MGR_PRIVATE_H__
+
+#include <linux/list.h>
+#include <linux/leds.h>
+#include "cam_soc_util.h"
+
+#define MAX_SHARED_GPIO_SIZE 16
+
+/* pinctrl states name */
+#define CAM_RES_MGR_SLEEP "cam_res_mgr_suspend"
+#define CAM_RES_MGR_DEFAULT "cam_res_mgr_default"
+
+/**
+ * enum pinctrl_status - Enum for pinctrl status
+ */
+enum pinctrl_status {
+ PINCTRL_STATUS_GOT = 0,
+ PINCTRL_STATUS_ACTIVE,
+ PINCTRL_STATUS_SUSPEND,
+ PINCTRL_STATUS_PUT,
+};
+
+/**
+ * struct cam_dev_res
+ *
+ * @list : List member used to append this node to a dev list
+ * @dev : Device pointer associated with device
+ */
+struct cam_dev_res {
+ struct list_head list;
+ struct device *dev;
+};
+
+/**
+ * struct cam_gpio_res
+ *
+ * @list : List member used to append this node to a gpio list
+ * @dev_list : List the device which request this gpio
+ * @gpio : Gpio value
+ * @power_on_count : Record the power on times of this gpio
+ */
+struct cam_gpio_res {
+ struct list_head list;
+ struct list_head dev_list;
+ unsigned int gpio;
+ int power_on_count;
+};
+
+/**
+ * struct cam_pinctrl_res
+ *
+ * @list : List member used to append this node to a linked list
+ * @name : Pointer to the flash trigger's name.
+ * @trigger : Pointer to the flash trigger
+ */
+struct cam_flash_res {
+ struct list_head list;
+ const char *name;
+ struct led_trigger *trigger;
+};
+
+/**
+ * struct cam_res_mgr_dt
+ *
+ * @shared_gpio : Shared gpios list in the device tree
+ * @num_shared_gpio : The number of shared gpio
+ * @pinctrl_info : Pinctrl information
+ */
+struct cam_res_mgr_dt {
+ uint shared_gpio[MAX_SHARED_GPIO_SIZE];
+ int num_shared_gpio;
+ struct cam_soc_pinctrl_info pinctrl_info;
+};
+
+/**
+ * struct cam_pinctrl_res
+ *
+ * @dev : Pointer to the device
+ * @dt : Device tree resource
+ * @shared_gpio_enabled : The flag to indicate if support shared gpio
+ * @pstatus : Shared pinctrl status
+ * @gpio_res_list : List head of the gpio resource
+ * @flash_res_list : List head of the flash resource
+ * @gpio_res_lock : GPIO resource lock
+ * @flash_res_lock : Flash resource lock
+ * @clk_res_lock : Clk resource lock
+ */
+struct cam_res_mgr {
+ struct device *dev;
+ struct cam_res_mgr_dt dt;
+
+ bool shared_gpio_enabled;
+ enum pinctrl_status pstatus;
+
+ uint shared_clk_ref_count;
+
+ struct list_head gpio_res_list;
+ struct list_head flash_res_list;
+ struct mutex gpio_res_lock;
+ struct mutex flash_res_lock;
+ struct mutex clk_res_lock;
+};
+
+#endif /* __CAM_RES_MGR_PRIVATE_H__ */
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_core.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_core.c
index ac9235d..97158e4 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_core.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_core.c
@@ -53,7 +53,7 @@ static void cam_sensor_release_resource(
"failed while deleting Init settings");
}
- i2c_set = &(s_ctrl->i2c_data.res_settings);
+ i2c_set = &(s_ctrl->i2c_data.config_settings);
if (i2c_set->is_settings_valid == 1) {
i2c_set->is_settings_valid = -1;
rc = delete_request(i2c_set);
@@ -145,8 +145,8 @@ static int32_t cam_sensor_i2c_pkt_parse(struct cam_sensor_ctrl_t *s_ctrl,
i2c_reg_settings->is_settings_valid = 1;
break;
}
- case CAM_SENSOR_PACKET_OPCODE_SENSOR_RESCONFIG: {
- i2c_reg_settings = &i2c_data->res_settings;
+ case CAM_SENSOR_PACKET_OPCODE_SENSOR_CONFIG: {
+ i2c_reg_settings = &i2c_data->config_settings;
i2c_reg_settings->request_id = 0;
i2c_reg_settings->is_settings_valid = 1;
break;
@@ -163,6 +163,7 @@ static int32_t cam_sensor_i2c_pkt_parse(struct cam_sensor_ctrl_t *s_ctrl,
i2c_reg_settings->is_settings_valid = 1;
break;
}
+
case CAM_SENSOR_PACKET_OPCODE_SENSOR_UPDATE: {
i2c_reg_settings =
&i2c_data->
@@ -181,12 +182,7 @@ static int32_t cam_sensor_i2c_pkt_parse(struct cam_sensor_ctrl_t *s_ctrl,
return rc;
}
}
-
- i2c_reg_settings->request_id =
- csl_packet->header.request_id;
- i2c_reg_settings->is_settings_valid = 1;
- cam_sensor_update_req_mgr(s_ctrl, csl_packet);
- break;
+ break;
}
case CAM_SENSOR_PACKET_OPCODE_SENSOR_NOP: {
cam_sensor_update_req_mgr(s_ctrl, csl_packet);
@@ -206,6 +202,14 @@ static int32_t cam_sensor_i2c_pkt_parse(struct cam_sensor_ctrl_t *s_ctrl,
CAM_ERR(CAM_SENSOR, "Fail parsing I2C Pkt: %d", rc);
return rc;
}
+
+ if ((csl_packet->header.op_code & 0xFFFFFF) ==
+ CAM_SENSOR_PACKET_OPCODE_SENSOR_UPDATE) {
+ i2c_reg_settings->request_id =
+ csl_packet->header.request_id;
+ cam_sensor_update_req_mgr(s_ctrl, csl_packet);
+ }
+
return rc;
}
@@ -296,6 +300,8 @@ int32_t cam_sensor_update_i2c_info(struct cam_cmd_i2c_info *i2c_info,
i2c_info->i2c_freq_mode);
}
+ s_ctrl->sensordata->slave_info.sensor_slave_addr =
+ i2c_info->slave_addr;
return rc;
}
@@ -470,12 +476,9 @@ void cam_sensor_shutdown(struct cam_sensor_ctrl_t *s_ctrl)
cam_sensor_release_resource(s_ctrl);
- if (s_ctrl->sensor_state == CAM_SENSOR_START) {
+ if ((s_ctrl->sensor_state == CAM_SENSOR_START) ||
+ (s_ctrl->sensor_state == CAM_SENSOR_ACQUIRE)) {
cam_sensor_power_down(s_ctrl);
- s_ctrl->sensor_state = CAM_SENSOR_ACQUIRE;
- }
-
- if (s_ctrl->sensor_state == CAM_SENSOR_ACQUIRE) {
rc = cam_destroy_device_hdl(s_ctrl->bridge_intf.device_hdl);
if (rc < 0)
CAM_ERR(CAM_SENSOR, " failed destroying dhdl");
@@ -628,7 +631,7 @@ int32_t cam_sensor_driver_cmd(struct cam_sensor_ctrl_t *s_ctrl,
}
CAM_INFO(CAM_SENSOR,
- "Probe Succees, slot:%d slave_addr: 0x%x, slave_id: %d",
+ "Probe Succees,slot:%d,slave_addr:0x%x,sensor_id:0x%x",
s_ctrl->soc_info.index,
s_ctrl->sensordata->slave_info.sensor_slave_addr,
s_ctrl->sensordata->slave_info.sensor_id);
@@ -683,9 +686,31 @@ int32_t cam_sensor_driver_cmd(struct cam_sensor_ctrl_t *s_ctrl,
rc = -EFAULT;
goto release_mutex;
}
+
+ rc = cam_sensor_power_up(s_ctrl);
+ if (rc < 0) {
+ CAM_ERR(CAM_SENSOR, "Sensor Power up failed");
+ goto release_mutex;
+ }
+
+ s_ctrl->sensor_state = CAM_SENSOR_ACQUIRE;
}
break;
case CAM_RELEASE_DEV: {
+ if (s_ctrl->sensor_state != CAM_SENSOR_ACQUIRE) {
+ rc = -EINVAL;
+ CAM_WARN(CAM_SENSOR,
+ "Not in right state to release : %d",
+ s_ctrl->sensor_state);
+ goto release_mutex;
+ }
+
+ rc = cam_sensor_power_down(s_ctrl);
+ if (rc < 0) {
+ CAM_ERR(CAM_SENSOR, "Sensor Power Down failed");
+ goto release_mutex;
+ }
+
cam_sensor_release_resource(s_ctrl);
if (s_ctrl->bridge_intf.device_hdl == -1) {
CAM_ERR(CAM_SENSOR,
@@ -702,6 +727,8 @@ int32_t cam_sensor_driver_cmd(struct cam_sensor_ctrl_t *s_ctrl,
s_ctrl->bridge_intf.device_hdl = -1;
s_ctrl->bridge_intf.link_hdl = -1;
s_ctrl->bridge_intf.session_hdl = -1;
+
+ s_ctrl->sensor_state = CAM_SENSOR_INIT;
}
break;
case CAM_QUERY_CAP: {
@@ -717,6 +744,14 @@ int32_t cam_sensor_driver_cmd(struct cam_sensor_ctrl_t *s_ctrl,
break;
}
case CAM_START_DEV: {
+ if (s_ctrl->sensor_state != CAM_SENSOR_ACQUIRE) {
+ rc = -EINVAL;
+ CAM_WARN(CAM_SENSOR,
+ "Not in right state to start : %d",
+ s_ctrl->sensor_state);
+ goto release_mutex;
+ }
+
if (s_ctrl->i2c_data.streamon_settings.is_settings_valid &&
(s_ctrl->i2c_data.streamon_settings.request_id == 0)) {
rc = cam_sensor_apply_settings(s_ctrl, 0,
@@ -731,6 +766,14 @@ int32_t cam_sensor_driver_cmd(struct cam_sensor_ctrl_t *s_ctrl,
}
break;
case CAM_STOP_DEV: {
+ if (s_ctrl->sensor_state != CAM_SENSOR_START) {
+ rc = -EINVAL;
+ CAM_WARN(CAM_SENSOR,
+ "Not in right state to stop : %d",
+ s_ctrl->sensor_state);
+ goto release_mutex;
+ }
+
if (s_ctrl->i2c_data.streamoff_settings.is_settings_valid &&
(s_ctrl->i2c_data.streamoff_settings.request_id == 0)) {
rc = cam_sensor_apply_settings(s_ctrl, 0,
@@ -740,11 +783,7 @@ int32_t cam_sensor_driver_cmd(struct cam_sensor_ctrl_t *s_ctrl,
"cannot apply streamoff settings");
}
}
- rc = cam_sensor_power_down(s_ctrl);
- if (rc < 0) {
- CAM_ERR(CAM_SENSOR, "Sensor Power Down failed");
- goto release_mutex;
- }
+ s_ctrl->sensor_state = CAM_SENSOR_ACQUIRE;
}
break;
case CAM_CONFIG_DEV: {
@@ -755,11 +794,7 @@ int32_t cam_sensor_driver_cmd(struct cam_sensor_ctrl_t *s_ctrl,
}
if (s_ctrl->i2c_data.init_settings.is_settings_valid &&
(s_ctrl->i2c_data.init_settings.request_id == 0)) {
- rc = cam_sensor_power_up(s_ctrl);
- if (rc < 0) {
- CAM_ERR(CAM_SENSOR, "Sensor Power up failed");
- goto release_mutex;
- }
+
rc = cam_sensor_apply_settings(s_ctrl, 0,
CAM_SENSOR_PACKET_OPCODE_SENSOR_INITIAL_CONFIG);
if (rc < 0) {
@@ -776,22 +811,22 @@ int32_t cam_sensor_driver_cmd(struct cam_sensor_ctrl_t *s_ctrl,
s_ctrl->i2c_data.init_settings.request_id = -1;
}
- if (s_ctrl->i2c_data.res_settings.is_settings_valid &&
- (s_ctrl->i2c_data.res_settings.request_id == 0)) {
+ if (s_ctrl->i2c_data.config_settings.is_settings_valid &&
+ (s_ctrl->i2c_data.config_settings.request_id == 0)) {
rc = cam_sensor_apply_settings(s_ctrl, 0,
- CAM_SENSOR_PACKET_OPCODE_SENSOR_RESCONFIG);
+ CAM_SENSOR_PACKET_OPCODE_SENSOR_CONFIG);
if (rc < 0) {
CAM_ERR(CAM_SENSOR,
- "cannot apply res settings");
+ "cannot apply config settings");
goto release_mutex;
}
- rc = delete_request(&s_ctrl->i2c_data.res_settings);
+ rc = delete_request(&s_ctrl->i2c_data.config_settings);
if (rc < 0) {
CAM_ERR(CAM_SENSOR,
- "Fail in deleting the res settings");
+ "Fail in deleting the config settings");
goto release_mutex;
}
- s_ctrl->i2c_data.res_settings.request_id = -1;
+ s_ctrl->i2c_data.config_settings.request_id = -1;
}
}
break;
@@ -886,13 +921,9 @@ int cam_sensor_power_up(struct cam_sensor_ctrl_t *s_ctrl)
return rc;
}
- if (s_ctrl->io_master_info.master_type == CCI_MASTER) {
- rc = camera_io_init(&(s_ctrl->io_master_info));
- if (rc < 0) {
- CAM_ERR(CAM_SENSOR, "cci_init failed");
- return -EINVAL;
- }
- }
+ rc = camera_io_init(&(s_ctrl->io_master_info));
+ if (rc < 0)
+ CAM_ERR(CAM_SENSOR, "cci_init failed: rc: %d", rc);
return rc;
}
@@ -921,8 +952,7 @@ int cam_sensor_power_down(struct cam_sensor_ctrl_t *s_ctrl)
return rc;
}
- if (s_ctrl->io_master_info.master_type == CCI_MASTER)
- camera_io_release(&(s_ctrl->io_master_info));
+ camera_io_release(&(s_ctrl->io_master_info));
return rc;
}
@@ -944,8 +974,8 @@ int cam_sensor_apply_settings(struct cam_sensor_ctrl_t *s_ctrl,
i2c_set = &s_ctrl->i2c_data.init_settings;
break;
}
- case CAM_SENSOR_PACKET_OPCODE_SENSOR_RESCONFIG: {
- i2c_set = &s_ctrl->i2c_data.res_settings;
+ case CAM_SENSOR_PACKET_OPCODE_SENSOR_CONFIG: {
+ i2c_set = &s_ctrl->i2c_data.config_settings;
break;
}
case CAM_SENSOR_PACKET_OPCODE_SENSOR_STREAMOFF: {
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_dev.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_dev.c
index f915a0e..8ea767f 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_dev.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_dev.c
@@ -183,7 +183,7 @@ static int32_t cam_sensor_driver_i2c_probe(struct i2c_client *client,
}
INIT_LIST_HEAD(&(s_ctrl->i2c_data.init_settings.list_head));
- INIT_LIST_HEAD(&(s_ctrl->i2c_data.res_settings.list_head));
+ INIT_LIST_HEAD(&(s_ctrl->i2c_data.config_settings.list_head));
INIT_LIST_HEAD(&(s_ctrl->i2c_data.streamon_settings.list_head));
INIT_LIST_HEAD(&(s_ctrl->i2c_data.streamoff_settings.list_head));
@@ -292,7 +292,7 @@ static int32_t cam_sensor_driver_platform_probe(
}
INIT_LIST_HEAD(&(s_ctrl->i2c_data.init_settings.list_head));
- INIT_LIST_HEAD(&(s_ctrl->i2c_data.res_settings.list_head));
+ INIT_LIST_HEAD(&(s_ctrl->i2c_data.config_settings.list_head));
INIT_LIST_HEAD(&(s_ctrl->i2c_data.streamon_settings.list_head));
INIT_LIST_HEAD(&(s_ctrl->i2c_data.streamoff_settings.list_head));
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/Makefile b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/Makefile
index bf61fb3..98ee3ae 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/Makefile
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/Makefile
@@ -2,6 +2,7 @@
ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io
ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_cci
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_res_mgr
ccflags-y += -Idrivers/media/platform/msm/camera/cam_smmu/
obj-$(CONFIG_SPECTRA_CAMERA) += cam_sensor_util.o
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_cmn_header.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_cmn_header.h
index ac1e23b..2e91efc 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_cmn_header.h
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_cmn_header.h
@@ -154,7 +154,7 @@ enum cam_sensor_packet_opcodes {
CAM_SENSOR_PACKET_OPCODE_SENSOR_UPDATE,
CAM_SENSOR_PACKET_OPCODE_SENSOR_INITIAL_CONFIG,
CAM_SENSOR_PACKET_OPCODE_SENSOR_PROBE,
- CAM_SENSOR_PACKET_OPCODE_SENSOR_RESCONFIG,
+ CAM_SENSOR_PACKET_OPCODE_SENSOR_CONFIG,
CAM_SENSOR_PACKET_OPCODE_SENSOR_STREAMOFF,
CAM_SENSOR_PACKET_OPCODE_SENSOR_NOP = 127
};
@@ -282,7 +282,7 @@ struct i2c_settings_array {
struct i2c_data_settings {
struct i2c_settings_array init_settings;
- struct i2c_settings_array res_settings;
+ struct i2c_settings_array config_settings;
struct i2c_settings_array streamon_settings;
struct i2c_settings_array streamoff_settings;
struct i2c_settings_array *per_frame;
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_util.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_util.c
index 85d7b74..0a3878e 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_util.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_util.c
@@ -13,6 +13,7 @@
#include <linux/kernel.h>
#include "cam_sensor_util.h"
#include <cam_mem_mgr.h>
+#include "cam_res_mgr_api.h"
#define CAM_SENSOR_PINCTRL_STATE_SLEEP "cam_suspend"
#define CAM_SENSOR_PINCTRL_STATE_DEFAULT "cam_default"
@@ -614,7 +615,8 @@ int cam_sensor_util_request_gpio_table(
if (gpio_en) {
for (i = 0; i < size; i++) {
- rc = gpio_request_one(gpio_tbl[i].gpio,
+ rc = cam_res_mgr_gpio_request(soc_info->dev,
+ gpio_tbl[i].gpio,
gpio_tbl[i].flags, gpio_tbl[i].label);
if (rc) {
/*
@@ -627,7 +629,7 @@ int cam_sensor_util_request_gpio_table(
}
}
} else {
- gpio_free_array(gpio_tbl, size);
+ cam_res_mgr_gpio_free_arry(soc_info->dev, gpio_tbl, size);
}
return rc;
@@ -1157,7 +1159,7 @@ int msm_camera_pinctrl_init(
sensor_pctrl->pinctrl = devm_pinctrl_get(dev);
if (IS_ERR_OR_NULL(sensor_pctrl->pinctrl)) {
- CAM_ERR(CAM_SENSOR, "Getting pinctrl handle failed");
+ CAM_DBG(CAM_SENSOR, "Getting pinctrl handle failed");
return -EINVAL;
}
sensor_pctrl->gpio_state_active =
@@ -1176,8 +1178,16 @@ int msm_camera_pinctrl_init(
"Failed to get the suspend state pinctrl handle");
return -EINVAL;
}
+
+ if (cam_res_mgr_shared_pinctrl_init()) {
+ CAM_ERR(CAM_SENSOR,
+ "Failed to init shared pinctrl");
+ return -EINVAL;
+ }
+
return 0;
}
+
int msm_cam_sensor_handle_reg_gpio(int seq_type,
struct msm_camera_gpio_num_info *gpio_num_info, int val)
{
@@ -1195,7 +1205,7 @@ int msm_cam_sensor_handle_reg_gpio(int seq_type,
if (gpio_num_info->valid[gpio_offset] == 1) {
CAM_DBG(CAM_SENSOR, "VALID GPIO offset: %d, seqtype: %d",
gpio_offset, seq_type);
- gpio_set_value_cansleep(
+ cam_res_mgr_gpio_set_value(
gpio_num_info->gpio_num
[gpio_offset], val);
}
@@ -1225,9 +1235,13 @@ int cam_sensor_core_power_up(struct cam_sensor_power_ctrl_t *ctrl,
return -EINVAL;
}
+ if (soc_info->use_shared_clk)
+ cam_res_mgr_shared_clk_config(true);
+
ret = msm_camera_pinctrl_init(&(ctrl->pinctrl_info), ctrl->dev);
if (ret < 0) {
- CAM_ERR(CAM_SENSOR, "Initialization of pinctrl failed");
+ /* Some sensor subdev no pinctrl. */
+ CAM_DBG(CAM_SENSOR, "Initialization of pinctrl failed");
ctrl->cam_pinctrl_status = 0;
} else {
ctrl->cam_pinctrl_status = 1;
@@ -1238,14 +1252,25 @@ int cam_sensor_core_power_up(struct cam_sensor_power_ctrl_t *ctrl,
no_gpio = rc;
if (ctrl->cam_pinctrl_status) {
- ret = pinctrl_select_state(ctrl->pinctrl_info.pinctrl,
+ ret = pinctrl_select_state(
+ ctrl->pinctrl_info.pinctrl,
ctrl->pinctrl_info.gpio_state_active);
if (ret)
CAM_ERR(CAM_SENSOR, "cannot set pin to active state");
+
+ ret = cam_res_mgr_shared_pinctrl_select_state(true);
+ if (ret)
+ CAM_ERR(CAM_SENSOR,
+ "Cannot set shared pin to active state");
+
+ ret = cam_res_mgr_shared_pinctrl_post_init();
+ if (ret)
+ CAM_ERR(CAM_SENSOR,
+ "Failed to post init shared pinctrl");
}
for (index = 0; index < ctrl->power_setting_size; index++) {
- CAM_DBG(CAM_SENSOR, "index: %d", index);
+ CAM_DBG(CAM_SENSOR, "index: %d", index);
power_setting = &ctrl->power_setting[index];
CAM_DBG(CAM_SENSOR, "seq_type %d", power_setting->seq_type);
@@ -1423,7 +1448,7 @@ int cam_sensor_core_power_up(struct cam_sensor_power_ctrl_t *ctrl,
if (!gpio_num_info->valid
[power_setting->seq_type])
continue;
- gpio_set_value_cansleep(
+ cam_res_mgr_gpio_set_value(
gpio_num_info->gpio_num
[power_setting->seq_type], GPIOF_OUT_INIT_LOW);
break;
@@ -1470,13 +1495,21 @@ int cam_sensor_core_power_up(struct cam_sensor_power_ctrl_t *ctrl,
(power_setting->delay * 1000) + 1000);
}
}
+
if (ctrl->cam_pinctrl_status) {
- ret = pinctrl_select_state(ctrl->pinctrl_info.pinctrl,
+ ret = pinctrl_select_state(
+ ctrl->pinctrl_info.pinctrl,
ctrl->pinctrl_info.gpio_state_suspend);
if (ret)
CAM_ERR(CAM_SENSOR, "cannot set pin to suspend state");
- devm_pinctrl_put(ctrl->pinctrl_info.pinctrl);
+ cam_res_mgr_shared_pinctrl_select_state(false);
+ pinctrl_put(ctrl->pinctrl_info.pinctrl);
+ cam_res_mgr_shared_pinctrl_put();
}
+
+ if (soc_info->use_shared_clk)
+ cam_res_mgr_shared_clk_config(false);
+
ctrl->cam_pinctrl_status = 0;
cam_sensor_util_request_gpio_table(soc_info, 0);
@@ -1599,7 +1632,7 @@ int msm_camera_power_down(struct cam_sensor_power_ctrl_t *ctrl,
if (!gpio_num_info->valid[pd->seq_type])
continue;
- gpio_set_value_cansleep(
+ cam_res_mgr_gpio_set_value(
gpio_num_info->gpio_num
[pd->seq_type],
(int) pd->config_val);
@@ -1662,13 +1695,20 @@ int msm_camera_power_down(struct cam_sensor_power_ctrl_t *ctrl,
}
if (ctrl->cam_pinctrl_status) {
- ret = pinctrl_select_state(ctrl->pinctrl_info.pinctrl,
+ ret = pinctrl_select_state(
+ ctrl->pinctrl_info.pinctrl,
ctrl->pinctrl_info.gpio_state_suspend);
if (ret)
CAM_ERR(CAM_SENSOR, "cannot set pin to suspend state");
- devm_pinctrl_put(ctrl->pinctrl_info.pinctrl);
+
+ cam_res_mgr_shared_pinctrl_select_state(false);
+ pinctrl_put(ctrl->pinctrl_info.pinctrl);
+ cam_res_mgr_shared_pinctrl_put();
}
+ if (soc_info->use_shared_clk)
+ cam_res_mgr_shared_clk_config(false);
+
ctrl->cam_pinctrl_status = 0;
cam_sensor_util_request_gpio_table(soc_info, 0);
diff --git a/drivers/media/platform/msm/camera/cam_smmu/cam_smmu_api.c b/drivers/media/platform/msm/camera/cam_smmu/cam_smmu_api.c
index cbf54f7..7824102 100644
--- a/drivers/media/platform/msm/camera/cam_smmu/cam_smmu_api.c
+++ b/drivers/media/platform/msm/camera/cam_smmu/cam_smmu_api.c
@@ -73,7 +73,7 @@ enum cam_iommu_type {
enum cam_smmu_buf_state {
CAM_SMMU_BUFF_EXIST,
- CAM_SMMU_BUFF_NOT_EXIST
+ CAM_SMMU_BUFF_NOT_EXIST,
};
enum cam_smmu_init_dir {
@@ -88,6 +88,12 @@ struct scratch_mapping {
dma_addr_t base;
};
+struct secheap_buf_info {
+ struct dma_buf *buf;
+ struct dma_buf_attachment *attach;
+ struct sg_table *table;
+};
+
struct cam_context_bank_info {
struct device *dev;
struct dma_iommu_mapping *mapping;
@@ -99,7 +105,9 @@ struct cam_context_bank_info {
uint8_t firmware_support;
uint8_t shared_support;
uint8_t io_support;
+ uint8_t secheap_support;
bool is_fw_allocated;
+ bool is_secheap_allocated;
struct scratch_mapping scratch_map;
struct gen_pool *shared_mem_pool;
@@ -108,8 +116,11 @@ struct cam_context_bank_info {
struct cam_smmu_region_info firmware_info;
struct cam_smmu_region_info shared_info;
struct cam_smmu_region_info io_info;
+ struct cam_smmu_region_info secheap_info;
+ struct secheap_buf_info secheap_buf;
struct list_head smmu_buf_list;
+ struct list_head smmu_buf_kernel_list;
struct mutex lock;
int handle;
enum cam_smmu_ops_param state;
@@ -179,6 +190,9 @@ static int cam_smmu_create_add_handle_in_table(char *name,
static struct cam_dma_buff_info *cam_smmu_find_mapping_by_ion_index(int idx,
int ion_fd);
+static struct cam_dma_buff_info *cam_smmu_find_mapping_by_dma_buf(int idx,
+ struct dma_buf *buf);
+
static struct cam_sec_buff_info *cam_smmu_find_mapping_by_sec_buf_idx(int idx,
int ion_fd);
@@ -198,7 +212,11 @@ static struct cam_dma_buff_info *cam_smmu_find_mapping_by_virt_address(int idx,
static int cam_smmu_map_buffer_and_add_to_list(int idx, int ion_fd,
enum dma_data_direction dma_dir, dma_addr_t *paddr_ptr,
- size_t *len_ptr,
+ size_t *len_ptr, enum cam_smmu_region_id region_id);
+
+static int cam_smmu_map_kernel_buffer_and_add_to_list(int idx,
+ struct dma_buf *buf, enum dma_data_direction dma_dir,
+ dma_addr_t *paddr_ptr, size_t *len_ptr,
enum cam_smmu_region_id region_id);
static int cam_smmu_alloc_scratch_buffer_add_to_list(int idx,
@@ -214,9 +232,13 @@ static int cam_smmu_free_scratch_buffer_remove_from_list(
struct cam_dma_buff_info *mapping_info,
int idx);
-static void cam_smmu_clean_buffer_list(int idx);
+static void cam_smmu_clean_user_buffer_list(int idx);
-static void cam_smmu_print_list(int idx);
+static void cam_smmu_clean_kernel_buffer_list(int idx);
+
+static void cam_smmu_print_user_list(int idx);
+
+static void cam_smmu_print_kernel_list(int idx);
static void cam_smmu_print_table(void);
@@ -261,7 +283,7 @@ static void cam_smmu_page_fault_work(struct work_struct *work)
kfree(payload);
}
-static void cam_smmu_print_list(int idx)
+static void cam_smmu_print_user_list(int idx)
{
struct cam_dma_buff_info *mapping;
@@ -276,6 +298,21 @@ static void cam_smmu_print_list(int idx)
}
}
+static void cam_smmu_print_kernel_list(int idx)
+{
+ struct cam_dma_buff_info *mapping;
+
+ CAM_ERR(CAM_SMMU, "index = %d", idx);
+ list_for_each_entry(mapping,
+ &iommu_cb_set.cb_info[idx].smmu_buf_kernel_list, list) {
+ CAM_ERR(CAM_SMMU,
+ "dma_buf = %pK, paddr= 0x%pK, len = %u, region = %d",
+ mapping->buf, (void *)mapping->paddr,
+ (unsigned int)mapping->len,
+ mapping->region_id);
+ }
+}
+
static void cam_smmu_print_table(void)
{
int i;
@@ -479,6 +516,7 @@ void cam_smmu_reset_iommu_table(enum cam_smmu_init_dir ops)
for (i = 0; i < iommu_cb_set.cb_num; i++) {
iommu_cb_set.cb_info[i].handle = HANDLE_INIT;
INIT_LIST_HEAD(&iommu_cb_set.cb_info[i].smmu_buf_list);
+ INIT_LIST_HEAD(&iommu_cb_set.cb_info[i].smmu_buf_kernel_list);
iommu_cb_set.cb_info[i].state = CAM_SMMU_DETACH;
iommu_cb_set.cb_info[i].dev = NULL;
iommu_cb_set.cb_info[i].cb_count = 0;
@@ -709,7 +747,13 @@ static struct cam_dma_buff_info *cam_smmu_find_mapping_by_ion_index(int idx,
{
struct cam_dma_buff_info *mapping;
- list_for_each_entry(mapping, &iommu_cb_set.cb_info[idx].smmu_buf_list,
+ if (ion_fd < 0) {
+ CAM_ERR(CAM_SMMU, "Invalid fd %d", ion_fd);
+ return NULL;
+ }
+
+ list_for_each_entry(mapping,
+ &iommu_cb_set.cb_info[idx].smmu_buf_list,
list) {
if (mapping->ion_fd == ion_fd) {
CAM_DBG(CAM_SMMU, "find ion_fd %d", ion_fd);
@@ -717,8 +761,31 @@ static struct cam_dma_buff_info *cam_smmu_find_mapping_by_ion_index(int idx,
}
}
- CAM_ERR(CAM_SMMU, "Error: Cannot find fd %d by index %d",
- ion_fd, idx);
+ CAM_ERR(CAM_SMMU, "Error: Cannot find entry by index %d", idx);
+
+ return NULL;
+}
+
+static struct cam_dma_buff_info *cam_smmu_find_mapping_by_dma_buf(int idx,
+ struct dma_buf *buf)
+{
+ struct cam_dma_buff_info *mapping;
+
+ if (!buf) {
+ CAM_ERR(CAM_SMMU, "Invalid dma_buf");
+ return NULL;
+ }
+
+ list_for_each_entry(mapping,
+ &iommu_cb_set.cb_info[idx].smmu_buf_kernel_list,
+ list) {
+ if (mapping->buf == buf) {
+ CAM_DBG(CAM_SMMU, "find dma_buf %pK", buf);
+ return mapping;
+ }
+ }
+
+ CAM_ERR(CAM_SMMU, "Error: Cannot find entry by index %d", idx);
return NULL;
}
@@ -740,7 +807,7 @@ static struct cam_sec_buff_info *cam_smmu_find_mapping_by_sec_buf_idx(int idx,
return NULL;
}
-static void cam_smmu_clean_buffer_list(int idx)
+static void cam_smmu_clean_user_buffer_list(int idx)
{
int ret;
struct cam_dma_buff_info *mapping_info, *temp;
@@ -777,6 +844,40 @@ static void cam_smmu_clean_buffer_list(int idx)
}
}
+static void cam_smmu_clean_kernel_buffer_list(int idx)
+{
+ int ret;
+ struct cam_dma_buff_info *mapping_info, *temp;
+
+ list_for_each_entry_safe(mapping_info, temp,
+ &iommu_cb_set.cb_info[idx].smmu_buf_kernel_list, list) {
+ CAM_DBG(CAM_SMMU,
+ "Free mapping address %pK, i = %d, dma_buf = %pK",
+ (void *)mapping_info->paddr, idx,
+ mapping_info->buf);
+
+ /* Clean up regular mapped buffers */
+ ret = cam_smmu_unmap_buf_and_remove_from_list(
+ mapping_info,
+ idx);
+
+ if (ret < 0) {
+ CAM_ERR(CAM_SMMU,
+ "Buffer delete in kernel list failed: idx = %d",
+ idx);
+ CAM_ERR(CAM_SMMU,
+ "Buffer delete failed: addr = %lx, dma_buf = %pK",
+ (unsigned long)mapping_info->paddr,
+ mapping_info->buf);
+ /*
+ * Ignore this error and continue to delete other
+ * buffers in the list
+ */
+ continue;
+ }
+ }
+}
+
static int cam_smmu_attach(int idx)
{
int ret;
@@ -1125,6 +1226,15 @@ int cam_smmu_get_region_info(int32_t smmu_hdl,
region_info->iova_start = cb->io_info.iova_start;
region_info->iova_len = cb->io_info.iova_len;
break;
+ case CAM_SMMU_REGION_SECHEAP:
+ if (!cb->secheap_support) {
+ CAM_ERR(CAM_SMMU, "Secondary heap not supported");
+ mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+ return -ENODEV;
+ }
+ region_info->iova_start = cb->secheap_info.iova_start;
+ region_info->iova_len = cb->secheap_info.iova_len;
+ break;
default:
CAM_ERR(CAM_SMMU, "Invalid region id: %d for smmu hdl: %X",
smmu_hdl, region_id);
@@ -1137,25 +1247,173 @@ int cam_smmu_get_region_info(int32_t smmu_hdl,
}
EXPORT_SYMBOL(cam_smmu_get_region_info);
-static int cam_smmu_map_buffer_and_add_to_list(int idx, int ion_fd,
- enum dma_data_direction dma_dir, dma_addr_t *paddr_ptr,
- size_t *len_ptr,
- enum cam_smmu_region_id region_id)
+int cam_smmu_reserve_sec_heap(int32_t smmu_hdl,
+ struct dma_buf *buf,
+ dma_addr_t *iova,
+ size_t *request_len)
{
- int rc = -1;
- struct cam_dma_buff_info *mapping_info;
- struct dma_buf *buf = NULL;
+ struct secheap_buf_info *secheap_buf = NULL;
+ size_t size = 0;
+ uint32_t sec_heap_iova = 0;
+ size_t sec_heap_iova_len = 0;
+ int idx;
+ int rc = 0;
+
+ idx = GET_SMMU_TABLE_IDX(smmu_hdl);
+ if (idx < 0 || idx >= iommu_cb_set.cb_num) {
+ CAM_ERR(CAM_SMMU,
+ "Error: handle or index invalid. idx = %d hdl = %x",
+ idx, smmu_hdl);
+ return -EINVAL;
+ }
+
+ if (!iommu_cb_set.cb_info[idx].secheap_support) {
+ CAM_ERR(CAM_SMMU, "Secondary heap not supported");
+ return -EINVAL;
+ }
+
+ mutex_lock(&iommu_cb_set.cb_info[idx].lock);
+
+ if (iommu_cb_set.cb_info[idx].is_secheap_allocated) {
+ CAM_ERR(CAM_SMMU, "Trying to allocate secheap twice");
+ rc = -ENOMEM;
+ mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+ return rc;
+ }
+
+ if (IS_ERR_OR_NULL(buf)) {
+ rc = PTR_ERR(buf);
+ CAM_ERR(CAM_SMMU,
+ "Error: dma get buf failed. rc = %d", rc);
+ goto err_out;
+ }
+
+ secheap_buf = &iommu_cb_set.cb_info[idx].secheap_buf;
+ secheap_buf->buf = buf;
+ secheap_buf->attach = dma_buf_attach(secheap_buf->buf,
+ iommu_cb_set.cb_info[idx].dev);
+ if (IS_ERR_OR_NULL(secheap_buf->attach)) {
+ rc = PTR_ERR(secheap_buf->attach);
+ CAM_ERR(CAM_SMMU, "Error: dma buf attach failed");
+ goto err_put;
+ }
+
+ secheap_buf->table = dma_buf_map_attachment(secheap_buf->attach,
+ DMA_BIDIRECTIONAL);
+ if (IS_ERR_OR_NULL(secheap_buf->table)) {
+ rc = PTR_ERR(secheap_buf->table);
+ CAM_ERR(CAM_SMMU, "Error: dma buf map attachment failed");
+ goto err_detach;
+ }
+
+ sec_heap_iova = iommu_cb_set.cb_info[idx].secheap_info.iova_start;
+ sec_heap_iova_len = iommu_cb_set.cb_info[idx].secheap_info.iova_len;
+ size = iommu_map_sg(iommu_cb_set.cb_info[idx].mapping->domain,
+ sec_heap_iova,
+ secheap_buf->table->sgl,
+ secheap_buf->table->nents,
+ IOMMU_READ | IOMMU_WRITE);
+ if (size != sec_heap_iova_len) {
+ CAM_ERR(CAM_SMMU, "IOMMU mapping failed");
+ goto err_unmap_sg;
+ }
+
+ iommu_cb_set.cb_info[idx].is_secheap_allocated = true;
+ *iova = (uint32_t)sec_heap_iova;
+ *request_len = sec_heap_iova_len;
+ mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+
+ return rc;
+
+err_unmap_sg:
+ dma_buf_unmap_attachment(secheap_buf->attach,
+ secheap_buf->table,
+ DMA_BIDIRECTIONAL);
+err_detach:
+ dma_buf_detach(secheap_buf->buf,
+ secheap_buf->attach);
+err_put:
+ dma_buf_put(secheap_buf->buf);
+err_out:
+ mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+ return rc;
+}
+EXPORT_SYMBOL(cam_smmu_reserve_sec_heap);
+
+int cam_smmu_release_sec_heap(int32_t smmu_hdl)
+{
+ int idx;
+ size_t size = 0;
+ uint32_t sec_heap_iova = 0;
+ size_t sec_heap_iova_len = 0;
+ struct secheap_buf_info *secheap_buf = NULL;
+
+ idx = GET_SMMU_TABLE_IDX(smmu_hdl);
+ if (idx < 0 || idx >= iommu_cb_set.cb_num) {
+ CAM_ERR(CAM_SMMU,
+ "Error: handle or index invalid. idx = %d hdl = %x",
+ idx, smmu_hdl);
+ return -EINVAL;
+ }
+
+ if (!iommu_cb_set.cb_info[idx].secheap_support) {
+ CAM_ERR(CAM_SMMU, "Secondary heap not supported");
+ return -EINVAL;
+ }
+ mutex_lock(&iommu_cb_set.cb_info[idx].lock);
+
+ if (!iommu_cb_set.cb_info[idx].is_secheap_allocated) {
+ CAM_ERR(CAM_SMMU, "Trying to release secheap twice");
+ mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+ return -ENOMEM;
+ }
+
+ secheap_buf = &iommu_cb_set.cb_info[idx].secheap_buf;
+ sec_heap_iova = iommu_cb_set.cb_info[idx].secheap_info.iova_start;
+ sec_heap_iova_len = iommu_cb_set.cb_info[idx].secheap_info.iova_len;
+
+ size = iommu_unmap(iommu_cb_set.cb_info[idx].mapping->domain,
+ sec_heap_iova,
+ sec_heap_iova_len);
+ if (size != sec_heap_iova_len) {
+ CAM_ERR(CAM_SMMU, "Failed: Unmapped = %zu, requested = %zu",
+ size,
+ sec_heap_iova_len);
+ }
+
+ dma_buf_unmap_attachment(secheap_buf->attach,
+ secheap_buf->table, DMA_BIDIRECTIONAL);
+ dma_buf_detach(secheap_buf->buf, secheap_buf->attach);
+ dma_buf_put(secheap_buf->buf);
+ iommu_cb_set.cb_info[idx].is_secheap_allocated = false;
+ mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(cam_smmu_release_sec_heap);
+
+static int cam_smmu_map_buffer_validate(struct dma_buf *buf,
+ int idx, enum dma_data_direction dma_dir, dma_addr_t *paddr_ptr,
+ size_t *len_ptr, enum cam_smmu_region_id region_id,
+ struct cam_dma_buff_info **mapping_info)
+{
struct dma_buf_attachment *attach = NULL;
struct sg_table *table = NULL;
struct iommu_domain *domain;
size_t size = 0;
uint32_t iova = 0;
+ int rc = 0;
- /* allocate memory for each buffer information */
- buf = dma_buf_get(ion_fd);
if (IS_ERR_OR_NULL(buf)) {
rc = PTR_ERR(buf);
- CAM_ERR(CAM_SMMU, "Error: dma get buf failed. fd = %d", ion_fd);
+ CAM_ERR(CAM_SMMU,
+ "Error: dma get buf failed. rc = %d", rc);
+ goto err_out;
+ }
+
+ if (!mapping_info) {
+ rc = -EINVAL;
+ CAM_ERR(CAM_SMMU, "Error: mapping_info is invalid");
goto err_out;
}
@@ -1190,18 +1448,13 @@ static int cam_smmu_map_buffer_and_add_to_list(int idx, int ion_fd,
goto err_unmap_sg;
}
- size = iommu_map_sg(domain,
- iova,
- table->sgl,
- table->nents,
- IOMMU_READ | IOMMU_WRITE);
+ size = iommu_map_sg(domain, iova, table->sgl, table->nents,
+ IOMMU_READ | IOMMU_WRITE);
if (size < 0) {
CAM_ERR(CAM_SMMU, "IOMMU mapping failed");
rc = cam_smmu_free_iova(iova,
- size,
- iommu_cb_set.cb_info[idx].handle);
-
+ size, iommu_cb_set.cb_info[idx].handle);
if (rc)
CAM_ERR(CAM_SMMU, "IOVA free failed");
rc = -ENOMEM;
@@ -1213,7 +1466,7 @@ static int cam_smmu_map_buffer_and_add_to_list(int idx, int ion_fd,
}
} else if (region_id == CAM_SMMU_REGION_IO) {
rc = msm_dma_map_sg_lazy(iommu_cb_set.cb_info[idx].dev,
- table->sgl, table->nents, dma_dir, buf);
+ table->sgl, table->nents, dma_dir, buf);
if (rc != table->nents) {
CAM_ERR(CAM_SMMU, "Error: msm_dma_map_sg_lazy failed");
@@ -1245,33 +1498,31 @@ static int cam_smmu_map_buffer_and_add_to_list(int idx, int ion_fd,
}
/* fill up mapping_info */
- mapping_info = kzalloc(sizeof(struct cam_dma_buff_info), GFP_KERNEL);
- if (!mapping_info) {
+ *mapping_info = kzalloc(sizeof(struct cam_dma_buff_info), GFP_KERNEL);
+ if (!(*mapping_info)) {
rc = -ENOSPC;
goto err_alloc;
}
- mapping_info->ion_fd = ion_fd;
- mapping_info->buf = buf;
- mapping_info->attach = attach;
- mapping_info->table = table;
- mapping_info->paddr = *paddr_ptr;
- mapping_info->len = *len_ptr;
- mapping_info->dir = dma_dir;
- mapping_info->ref_count = 1;
- mapping_info->region_id = region_id;
+
+ (*mapping_info)->buf = buf;
+ (*mapping_info)->attach = attach;
+ (*mapping_info)->table = table;
+ (*mapping_info)->paddr = *paddr_ptr;
+ (*mapping_info)->len = *len_ptr;
+ (*mapping_info)->dir = dma_dir;
+ (*mapping_info)->ref_count = 1;
+ (*mapping_info)->region_id = region_id;
if (!*paddr_ptr || !*len_ptr) {
CAM_ERR(CAM_SMMU, "Error: Space Allocation failed");
- kfree(mapping_info);
+ kfree(*mapping_info);
rc = -ENOSPC;
goto err_alloc;
}
- CAM_DBG(CAM_SMMU, "ion_fd = %d, dev = %pK, paddr= %pK, len = %u",
- ion_fd, (void *)iommu_cb_set.cb_info[idx].dev,
+ CAM_DBG(CAM_SMMU, "dma_buf = %pK, dev = %pK, paddr= %pK, len = %u",
+ buf, (void *)iommu_cb_set.cb_info[idx].dev,
(void *)*paddr_ptr, (unsigned int)*len_ptr);
- /* add to the list */
- list_add(&mapping_info->list, &iommu_cb_set.cb_info[idx].smmu_buf_list);
return 0;
err_alloc:
@@ -1300,6 +1551,60 @@ static int cam_smmu_map_buffer_and_add_to_list(int idx, int ion_fd,
return rc;
}
+
+static int cam_smmu_map_buffer_and_add_to_list(int idx, int ion_fd,
+ enum dma_data_direction dma_dir, dma_addr_t *paddr_ptr,
+ size_t *len_ptr, enum cam_smmu_region_id region_id)
+{
+ int rc = -1;
+ struct cam_dma_buff_info *mapping_info = NULL;
+ struct dma_buf *buf = NULL;
+
+ /* returns the dma_buf structure related to an fd */
+ buf = dma_buf_get(ion_fd);
+
+ rc = cam_smmu_map_buffer_validate(buf, idx, dma_dir, paddr_ptr, len_ptr,
+ region_id, &mapping_info);
+
+ if (rc) {
+ CAM_ERR(CAM_SMMU, "buffer validation failure");
+ return rc;
+ }
+
+ mapping_info->ion_fd = ion_fd;
+ /* add to the list */
+ list_add(&mapping_info->list,
+ &iommu_cb_set.cb_info[idx].smmu_buf_list);
+
+ return 0;
+}
+
+static int cam_smmu_map_kernel_buffer_and_add_to_list(int idx,
+ struct dma_buf *buf, enum dma_data_direction dma_dir,
+ dma_addr_t *paddr_ptr, size_t *len_ptr,
+ enum cam_smmu_region_id region_id)
+{
+ int rc = -1;
+ struct cam_dma_buff_info *mapping_info = NULL;
+
+ rc = cam_smmu_map_buffer_validate(buf, idx, dma_dir, paddr_ptr, len_ptr,
+ region_id, &mapping_info);
+
+ if (rc) {
+ CAM_ERR(CAM_SMMU, "buffer validation failure");
+ return rc;
+ }
+
+ mapping_info->ion_fd = -1;
+
+ /* add to the list */
+ list_add(&mapping_info->list,
+ &iommu_cb_set.cb_info[idx].smmu_buf_kernel_list);
+
+ return 0;
+}
+
+
static int cam_smmu_unmap_buf_and_remove_from_list(
struct cam_dma_buff_info *mapping_info,
int idx)
@@ -1366,8 +1671,7 @@ static int cam_smmu_unmap_buf_and_remove_from_list(
}
static enum cam_smmu_buf_state cam_smmu_check_fd_in_list(int idx,
- int ion_fd, dma_addr_t *paddr_ptr,
- size_t *len_ptr)
+ int ion_fd, dma_addr_t *paddr_ptr, size_t *len_ptr)
{
struct cam_dma_buff_info *mapping;
@@ -1383,6 +1687,23 @@ static enum cam_smmu_buf_state cam_smmu_check_fd_in_list(int idx,
return CAM_SMMU_BUFF_NOT_EXIST;
}
+static enum cam_smmu_buf_state cam_smmu_check_dma_buf_in_list(int idx,
+ struct dma_buf *buf, dma_addr_t *paddr_ptr, size_t *len_ptr)
+{
+ struct cam_dma_buff_info *mapping;
+
+ list_for_each_entry(mapping,
+ &iommu_cb_set.cb_info[idx].smmu_buf_kernel_list, list) {
+ if (mapping->buf == buf) {
+ *paddr_ptr = mapping->paddr;
+ *len_ptr = mapping->len;
+ return CAM_SMMU_BUFF_EXIST;
+ }
+ }
+
+ return CAM_SMMU_BUFF_NOT_EXIST;
+}
+
static enum cam_smmu_buf_state cam_smmu_check_secure_fd_in_list(int idx,
int ion_fd, dma_addr_t *paddr_ptr,
size_t *len_ptr)
@@ -1980,13 +2301,13 @@ int cam_smmu_unmap_stage2_iova(int handle, int ion_fd)
}
EXPORT_SYMBOL(cam_smmu_unmap_stage2_iova);
-int cam_smmu_map_iova(int handle, int ion_fd,
- enum cam_smmu_map_dir dir, dma_addr_t *paddr_ptr,
- size_t *len_ptr, enum cam_smmu_region_id region_id)
+static int cam_smmu_map_iova_validate_params(int handle,
+ enum cam_smmu_map_dir dir,
+ dma_addr_t *paddr_ptr, size_t *len_ptr,
+ enum cam_smmu_region_id region_id)
{
- int idx, rc;
+ int idx, rc = 0;
enum dma_data_direction dma_dir;
- enum cam_smmu_buf_state buf_state;
if (!paddr_ptr || !len_ptr) {
CAM_ERR(CAM_SMMU, "Input pointers are invalid");
@@ -2016,13 +2337,34 @@ int cam_smmu_map_iova(int handle, int ion_fd,
return -EINVAL;
}
+ return rc;
+}
+
+int cam_smmu_map_user_iova(int handle, int ion_fd,
+ enum cam_smmu_map_dir dir, dma_addr_t *paddr_ptr,
+ size_t *len_ptr, enum cam_smmu_region_id region_id)
+{
+ int idx, rc = 0;
+ enum cam_smmu_buf_state buf_state;
+ enum dma_data_direction dma_dir;
+
+ rc = cam_smmu_map_iova_validate_params(handle, dir, paddr_ptr,
+ len_ptr, region_id);
+ if (rc) {
+ CAM_ERR(CAM_SMMU, "initial checks failed, unable to proceed");
+ return rc;
+ }
+
+ dma_dir = cam_smmu_translate_dir(dir);
+ idx = GET_SMMU_TABLE_IDX(handle);
+ mutex_lock(&iommu_cb_set.cb_info[idx].lock);
if (iommu_cb_set.cb_info[idx].is_secure) {
CAM_ERR(CAM_SMMU,
"Error: can't map non-secure mem to secure cb");
- return -EINVAL;
+ rc = -EINVAL;
+ goto get_addr_end;
}
- mutex_lock(&iommu_cb_set.cb_info[idx].lock);
if (iommu_cb_set.cb_info[idx].handle != handle) {
CAM_ERR(CAM_SMMU, "hdl is not valid, table_hdl = %x, hdl = %x",
iommu_cb_set.cb_info[idx].handle, handle);
@@ -2033,18 +2375,19 @@ int cam_smmu_map_iova(int handle, int ion_fd,
if (iommu_cb_set.cb_info[idx].state != CAM_SMMU_ATTACH) {
CAM_ERR(CAM_SMMU,
"Err:Dev %s should call SMMU attach before map buffer",
- iommu_cb_set.cb_info[idx].name);
+ iommu_cb_set.cb_info[idx].name);
rc = -EINVAL;
goto get_addr_end;
}
- buf_state = cam_smmu_check_fd_in_list(idx, ion_fd, paddr_ptr,
- len_ptr);
+ buf_state = cam_smmu_check_fd_in_list(idx, ion_fd, paddr_ptr, len_ptr);
if (buf_state == CAM_SMMU_BUFF_EXIST) {
- CAM_ERR(CAM_SMMU, "ion_fd:%d already in the list", ion_fd);
+ CAM_ERR(CAM_SMMU,
+ "ion_fd: %d already in the list", ion_fd);
rc = -EALREADY;
goto get_addr_end;
}
+
rc = cam_smmu_map_buffer_and_add_to_list(idx, ion_fd, dma_dir,
paddr_ptr, len_ptr, region_id);
if (rc < 0)
@@ -2054,8 +2397,67 @@ int cam_smmu_map_iova(int handle, int ion_fd,
mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
return rc;
}
-EXPORT_SYMBOL(cam_smmu_map_iova);
+EXPORT_SYMBOL(cam_smmu_map_user_iova);
+int cam_smmu_map_kernel_iova(int handle, struct dma_buf *buf,
+ enum cam_smmu_map_dir dir, dma_addr_t *paddr_ptr,
+ size_t *len_ptr, enum cam_smmu_region_id region_id)
+{
+ int idx, rc = 0;
+ enum cam_smmu_buf_state buf_state;
+ enum dma_data_direction dma_dir;
+
+ rc = cam_smmu_map_iova_validate_params(handle, dir, paddr_ptr,
+ len_ptr, region_id);
+ if (rc) {
+ CAM_ERR(CAM_SMMU, "initial checks failed, unable to proceed");
+ return rc;
+ }
+
+ dma_dir = cam_smmu_translate_dir(dir);
+ idx = GET_SMMU_TABLE_IDX(handle);
+ mutex_lock(&iommu_cb_set.cb_info[idx].lock);
+ if (iommu_cb_set.cb_info[idx].is_secure) {
+ CAM_ERR(CAM_SMMU,
+ "Error: can't map non-secure mem to secure cb");
+ rc = -EINVAL;
+ goto get_addr_end;
+ }
+
+ if (iommu_cb_set.cb_info[idx].handle != handle) {
+ CAM_ERR(CAM_SMMU, "hdl is not valid, table_hdl = %x, hdl = %x",
+ iommu_cb_set.cb_info[idx].handle, handle);
+ rc = -EINVAL;
+ goto get_addr_end;
+ }
+
+ if (iommu_cb_set.cb_info[idx].state != CAM_SMMU_ATTACH) {
+ CAM_ERR(CAM_SMMU,
+ "Err:Dev %s should call SMMU attach before map buffer",
+ iommu_cb_set.cb_info[idx].name);
+ rc = -EINVAL;
+ goto get_addr_end;
+ }
+
+ buf_state = cam_smmu_check_dma_buf_in_list(idx, buf,
+ paddr_ptr, len_ptr);
+ if (buf_state == CAM_SMMU_BUFF_EXIST) {
+ CAM_ERR(CAM_SMMU,
+ "dma_buf :%pK already in the list", buf);
+ rc = -EALREADY;
+ goto get_addr_end;
+ }
+
+ rc = cam_smmu_map_kernel_buffer_and_add_to_list(idx, buf, dma_dir,
+ paddr_ptr, len_ptr, region_id);
+ if (rc < 0)
+ CAM_ERR(CAM_SMMU, "mapping or add list fail");
+
+get_addr_end:
+ mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+ return rc;
+}
+EXPORT_SYMBOL(cam_smmu_map_kernel_iova);
int cam_smmu_get_iova(int handle, int ion_fd,
dma_addr_t *paddr_ptr, size_t *len_ptr)
@@ -2173,12 +2575,9 @@ int cam_smmu_get_stage2_iova(int handle, int ion_fd,
}
EXPORT_SYMBOL(cam_smmu_get_stage2_iova);
-int cam_smmu_unmap_iova(int handle,
- int ion_fd,
- enum cam_smmu_region_id region_id)
+static int cam_smmu_unmap_validate_params(int handle)
{
- int idx, rc;
- struct cam_dma_buff_info *mapping_info;
+ int idx;
if (handle == HANDLE_INIT) {
CAM_ERR(CAM_SMMU, "Error: Invalid handle");
@@ -2194,13 +2593,30 @@ int cam_smmu_unmap_iova(int handle,
return -EINVAL;
}
+ return 0;
+}
+
+int cam_smmu_unmap_user_iova(int handle,
+ int ion_fd, enum cam_smmu_region_id region_id)
+{
+ int idx, rc;
+ struct cam_dma_buff_info *mapping_info;
+
+ rc = cam_smmu_unmap_validate_params(handle);
+ if (rc) {
+ CAM_ERR(CAM_SMMU, "unmap util validation failure");
+ return rc;
+ }
+
+ idx = GET_SMMU_TABLE_IDX(handle);
+ mutex_lock(&iommu_cb_set.cb_info[idx].lock);
if (iommu_cb_set.cb_info[idx].is_secure) {
CAM_ERR(CAM_SMMU,
"Error: can't unmap non-secure mem from secure cb");
- return -EINVAL;
+ rc = -EINVAL;
+ goto unmap_end;
}
- mutex_lock(&iommu_cb_set.cb_info[idx].lock);
if (iommu_cb_set.cb_info[idx].handle != handle) {
CAM_ERR(CAM_SMMU,
"Error: hdl is not valid, table_hdl = %x, hdl = %x",
@@ -2209,10 +2625,12 @@ int cam_smmu_unmap_iova(int handle,
goto unmap_end;
}
- /* Based on ion fd and index, we can find mapping info of buffer */
+ /* Based on ion_fd & index, we can find mapping info of buffer */
mapping_info = cam_smmu_find_mapping_by_ion_index(idx, ion_fd);
+
if (!mapping_info) {
- CAM_ERR(CAM_SMMU, "Error: Invalid params idx = %d, fd = %d",
+ CAM_ERR(CAM_SMMU,
+ "Error: Invalid params idx = %d, fd = %d",
idx, ion_fd);
rc = -EINVAL;
goto unmap_end;
@@ -2228,7 +2646,60 @@ int cam_smmu_unmap_iova(int handle,
mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
return rc;
}
-EXPORT_SYMBOL(cam_smmu_unmap_iova);
+EXPORT_SYMBOL(cam_smmu_unmap_user_iova);
+
+int cam_smmu_unmap_kernel_iova(int handle,
+ struct dma_buf *buf, enum cam_smmu_region_id region_id)
+{
+ int idx, rc;
+ struct cam_dma_buff_info *mapping_info;
+
+ rc = cam_smmu_unmap_validate_params(handle);
+ if (rc) {
+ CAM_ERR(CAM_SMMU, "unmap util validation failure");
+ return rc;
+ }
+
+ idx = GET_SMMU_TABLE_IDX(handle);
+ mutex_lock(&iommu_cb_set.cb_info[idx].lock);
+ if (iommu_cb_set.cb_info[idx].is_secure) {
+ CAM_ERR(CAM_SMMU,
+ "Error: can't unmap non-secure mem from secure cb");
+ rc = -EINVAL;
+ goto unmap_end;
+ }
+
+ if (iommu_cb_set.cb_info[idx].handle != handle) {
+ CAM_ERR(CAM_SMMU,
+ "Error: hdl is not valid, table_hdl = %x, hdl = %x",
+ iommu_cb_set.cb_info[idx].handle, handle);
+ rc = -EINVAL;
+ goto unmap_end;
+ }
+
+ /* Based on dma_buf & index, we can find mapping info of buffer */
+ mapping_info = cam_smmu_find_mapping_by_dma_buf(idx, buf);
+
+ if (!mapping_info) {
+ CAM_ERR(CAM_SMMU,
+ "Error: Invalid params idx = %d, dma_buf = %pK",
+ idx, buf);
+ rc = -EINVAL;
+ goto unmap_end;
+ }
+
+ /* Unmapping one buffer from device */
+ CAM_DBG(CAM_SMMU, "SMMU: removing buffer idx = %d", idx);
+ rc = cam_smmu_unmap_buf_and_remove_from_list(mapping_info, idx);
+ if (rc < 0)
+ CAM_ERR(CAM_SMMU, "Error: unmap or remove list fail");
+
+unmap_end:
+ mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+ return rc;
+}
+EXPORT_SYMBOL(cam_smmu_unmap_kernel_iova);
+
int cam_smmu_put_iova(int handle, int ion_fd)
{
@@ -2301,10 +2772,18 @@ int cam_smmu_destroy_handle(int handle)
}
if (!list_empty_careful(&iommu_cb_set.cb_info[idx].smmu_buf_list)) {
- CAM_ERR(CAM_SMMU, "Client %s buffer list is not clean",
+ CAM_ERR(CAM_SMMU, "UMD %s buffer list is not clean",
iommu_cb_set.cb_info[idx].name);
- cam_smmu_print_list(idx);
- cam_smmu_clean_buffer_list(idx);
+ cam_smmu_print_user_list(idx);
+ cam_smmu_clean_user_buffer_list(idx);
+ }
+
+ if (!list_empty_careful(
+ &iommu_cb_set.cb_info[idx].smmu_buf_kernel_list)) {
+ CAM_ERR(CAM_SMMU, "KMD %s buffer list is not clean",
+ iommu_cb_set.cb_info[idx].name);
+ cam_smmu_print_kernel_list(idx);
+ cam_smmu_clean_kernel_buffer_list(idx);
}
if (&iommu_cb_set.cb_info[idx].is_secure) {
@@ -2373,6 +2852,7 @@ static int cam_smmu_setup_cb(struct cam_context_bank_info *cb,
cb->dev = dev;
cb->is_fw_allocated = false;
+ cb->is_secheap_allocated = false;
/* Create a pool with 4K granularity for supporting shared memory */
if (cb->shared_support) {
@@ -2574,6 +3054,11 @@ static int cam_smmu_get_memory_regions_info(struct device_node *of_node,
cb->io_info.iova_start = region_start;
cb->io_info.iova_len = region_len;
break;
+ case CAM_SMMU_REGION_SECHEAP:
+ cb->secheap_support = 1;
+ cb->secheap_info.iova_start = region_start;
+ cb->secheap_info.iova_len = region_len;
+ break;
default:
CAM_ERR(CAM_SMMU,
"Incorrect region id present in DT file: %d",
diff --git a/drivers/media/platform/msm/camera/cam_smmu/cam_smmu_api.h b/drivers/media/platform/msm/camera/cam_smmu/cam_smmu_api.h
index 4cb6efb..b062258 100644
--- a/drivers/media/platform/msm/camera/cam_smmu/cam_smmu_api.h
+++ b/drivers/media/platform/msm/camera/cam_smmu/cam_smmu_api.h
@@ -45,7 +45,8 @@ enum cam_smmu_region_id {
CAM_SMMU_REGION_FIRMWARE,
CAM_SMMU_REGION_SHARED,
CAM_SMMU_REGION_SCRATCH,
- CAM_SMMU_REGION_IO
+ CAM_SMMU_REGION_IO,
+ CAM_SMMU_REGION_SECHEAP
};
/**
@@ -85,7 +86,7 @@ int cam_smmu_get_handle(char *identifier, int *handle_ptr);
int cam_smmu_ops(int handle, enum cam_smmu_ops_param op);
/**
- * @brief : Maps IOVA for calling driver
+ * @brief : Maps user space IOVA for calling driver
*
* @param handle: Handle to identify the CAM SMMU client (VFE, CPP, FD etc.)
* @param ion_fd: ION handle identifying the memory buffer.
@@ -95,25 +96,54 @@ int cam_smmu_ops(int handle, enum cam_smmu_ops_param op);
* returned if region_id is CAM_SMMU_REGION_IO. If region_id is
* CAM_SMMU_REGION_SHARED, dma_addr is used as an input parameter
* which specifies the cpu virtual address to map.
- * @len : Length of buffer mapped returned by CAM SMMU driver.
+ * @len_ptr : Length of buffer mapped returned by CAM SMMU driver.
* @return Status of operation. Negative in case of error. Zero otherwise.
*/
-int cam_smmu_map_iova(int handle,
+int cam_smmu_map_user_iova(int handle,
int ion_fd, enum cam_smmu_map_dir dir,
dma_addr_t *dma_addr, size_t *len_ptr,
enum cam_smmu_region_id region_id);
/**
- * @brief : Unmaps IOVA for calling driver
+ * @brief : Maps kernel space IOVA for calling driver
+ *
+ * @param handle : Handle to identify the CAM SMMU client (VFE, CPP, FD etc.)
+ * @param buf : dma_buf allocated for kernel usage in mem_mgr
+ * @dir : Mapping direction: which will traslate toDMA_BIDIRECTIONAL,
+ * DMA_TO_DEVICE or DMA_FROM_DEVICE
+ * @dma_addr : Pointer to physical address where mapped address will be
+ * returned if region_id is CAM_SMMU_REGION_IO. If region_id is
+ * CAM_SMMU_REGION_SHARED, dma_addr is used as an input
+ * parameter which specifies the cpu virtual address to map.
+ * @len_ptr : Length of buffer mapped returned by CAM SMMU driver.
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_smmu_map_kernel_iova(int handle,
+ struct dma_buf *buf, enum cam_smmu_map_dir dir,
+ dma_addr_t *dma_addr, size_t *len_ptr,
+ enum cam_smmu_region_id region_id);
+
+/**
+ * @brief : Unmaps user space IOVA for calling driver
*
* @param handle: Handle to identify the CAMSMMU client (VFE, CPP, FD etc.)
* @param ion_fd: ION handle identifying the memory buffer.
*
* @return Status of operation. Negative in case of error. Zero otherwise.
*/
-int cam_smmu_unmap_iova(int handle,
- int ion_fd,
- enum cam_smmu_region_id region_id);
+int cam_smmu_unmap_user_iova(int handle,
+ int ion_fd, enum cam_smmu_region_id region_id);
+
+/**
+ * @brief : Unmaps kernel IOVA for calling driver
+ *
+ * @param handle: Handle to identify the CAMSMMU client (VFE, CPP, FD etc.)
+ * @param buf : dma_buf allocated for the kernel
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_smmu_unmap_kernel_iova(int handle,
+ struct dma_buf *buf, enum cam_smmu_region_id region_id);
/**
* @brief : Allocates a scratch buffer
@@ -290,4 +320,29 @@ int cam_smmu_dealloc_firmware(int32_t smmu_hdl);
int cam_smmu_get_region_info(int32_t smmu_hdl,
enum cam_smmu_region_id region_id,
struct cam_smmu_region_info *region_info);
+
+/**
+ * @brief Reserves secondary heap
+ *
+ * @param smmu_hdl: SMMU handle identifying the context bank
+ * @param iova: IOVA of secondary heap after reservation has completed
+ * @param buf: Allocated dma_buf for secondary heap
+ * @param request_len: Length of secondary heap after reservation has completed
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_smmu_reserve_sec_heap(int32_t smmu_hdl,
+ struct dma_buf *buf,
+ dma_addr_t *iova,
+ size_t *request_len);
+
+/**
+ * @brief Releases secondary heap
+ *
+ * @param smmu_hdl: SMMU handle identifying the context bank
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_smmu_release_sec_heap(int32_t smmu_hdl);
+
#endif /* _CAM_SMMU_API_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_sync/cam_sync.c b/drivers/media/platform/msm/camera/cam_sync/cam_sync.c
index bfeb7c3..e7dcbe7 100644
--- a/drivers/media/platform/msm/camera/cam_sync/cam_sync.c
+++ b/drivers/media/platform/msm/camera/cam_sync/cam_sync.c
@@ -25,22 +25,25 @@ int cam_sync_create(int32_t *sync_obj, const char *name)
{
int rc;
long idx;
+ bool bit;
do {
idx = find_first_zero_bit(sync_dev->bitmap, CAM_SYNC_MAX_OBJS);
- if (idx >= CAM_SYNC_MAX_OBJS)
- return -ENOMEM;
- } while (!spin_trylock_bh(&sync_dev->row_spinlocks[idx]));
+ if (idx >= CAM_SYNC_MAX_OBJS)
+ return -ENOMEM;
+ bit = test_and_set_bit(idx, sync_dev->bitmap);
+ } while (bit);
+ spin_lock_bh(&sync_dev->row_spinlocks[idx]);
rc = cam_sync_init_object(sync_dev->sync_table, idx, name);
if (rc) {
CAM_ERR(CAM_SYNC, "Error: Unable to init row at idx = %ld",
idx);
+ clear_bit(idx, sync_dev->bitmap);
spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
return -EINVAL;
}
- set_bit(idx, sync_dev->bitmap);
*sync_obj = idx;
spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
@@ -227,6 +230,8 @@ int cam_sync_signal(int32_t sync_obj, uint32_t status)
spin_unlock_bh(
&sync_dev->row_spinlocks[
parent_info->sync_id]);
+ spin_unlock_bh(
+ &sync_dev->row_spinlocks[sync_obj]);
return rc;
}
}
@@ -299,6 +304,12 @@ int cam_sync_merge(int32_t *sync_obj, uint32_t num_objs, int32_t *merged_obj)
{
int rc;
long idx = 0;
+ bool bit;
+
+ if (!sync_obj || !merged_obj) {
+ CAM_ERR(CAM_SYNC, "Invalid pointer(s)");
+ return -EINVAL;
+ }
rc = cam_sync_util_validate_merge(sync_obj,
num_objs);
@@ -307,48 +318,34 @@ int cam_sync_merge(int32_t *sync_obj, uint32_t num_objs, int32_t *merged_obj)
return -EINVAL;
}
- rc = cam_sync_util_find_and_set_empty_row(sync_dev, &idx);
- if (rc < 0) {
- CAM_ERR(CAM_SYNC,
- "Error: Unable to find empty row, table full");
- return -EINVAL;
- }
+ do {
+ idx = find_first_zero_bit(sync_dev->bitmap, CAM_SYNC_MAX_OBJS);
+ if (idx >= CAM_SYNC_MAX_OBJS)
+ return -ENOMEM;
+ bit = test_and_set_bit(idx, sync_dev->bitmap);
+ } while (bit);
- if (idx <= 0 || idx >= CAM_SYNC_MAX_OBJS) {
- CAM_ERR(CAM_SYNC,
- "Error: Invalid empty row index returned = %ld", idx);
- return -EINVAL;
- }
+ spin_lock_bh(&sync_dev->row_spinlocks[idx]);
rc = cam_sync_init_group_object(sync_dev->sync_table,
idx, sync_obj,
num_objs);
-
if (rc < 0) {
CAM_ERR(CAM_SYNC, "Error: Unable to init row at idx = %ld",
idx);
+ clear_bit(idx, sync_dev->bitmap);
+ spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
return -EINVAL;
}
*merged_obj = idx;
+ spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
return 0;
}
int cam_sync_destroy(int32_t sync_obj)
{
- struct sync_table_row *row = NULL;
-
- if (sync_obj >= CAM_SYNC_MAX_OBJS || sync_obj <= 0)
- return -EINVAL;
-
- row = sync_dev->sync_table + sync_obj;
- if (row->state == CAM_SYNC_STATE_INVALID) {
- CAM_ERR(CAM_SYNC,
- "Error: accessing an uninitialized sync obj: idx = %d",
- sync_obj);
- return -EINVAL;
- }
cam_sync_deinit_object(sync_dev->sync_table, sync_obj);
return 0;
diff --git a/drivers/media/platform/msm/camera/cam_sync/cam_sync_private.h b/drivers/media/platform/msm/camera/cam_sync/cam_sync_private.h
index ba9bef4..e2a7fcb 100644
--- a/drivers/media/platform/msm/camera/cam_sync/cam_sync_private.h
+++ b/drivers/media/platform/msm/camera/cam_sync/cam_sync_private.h
@@ -55,6 +55,18 @@ enum sync_type {
};
/**
+ * enum sync_list_clean_type - Enum to indicate the type of list clean action
+ * to be peformed, i.e. specific sync ID or all list sync ids.
+ *
+ * @SYNC_CLEAN_ID : Specific object to be cleaned in the list
+ * @SYNC_CLEAN_ALL : Clean all objects in the list
+ */
+enum sync_list_clean_type {
+ SYNC_LIST_CLEAN_ID,
+ SYNC_LIST_CLEAN_ALL
+};
+
+/**
* struct sync_parent_info - Single node of information about a parent
* of a sync object, usually part of the parents linked list
*
diff --git a/drivers/media/platform/msm/camera/cam_sync/cam_sync_util.c b/drivers/media/platform/msm/camera/cam_sync/cam_sync_util.c
index c62aacf..6aa7c23 100644
--- a/drivers/media/platform/msm/camera/cam_sync/cam_sync_util.c
+++ b/drivers/media/platform/msm/camera/cam_sync/cam_sync_util.c
@@ -98,18 +98,39 @@ uint32_t cam_sync_util_get_group_object_state(struct sync_table_row *table,
return CAM_SYNC_STATE_SIGNALED_ERROR;
}
+static int cam_sync_util_get_group_object_remaining_count(
+ struct sync_table_row *table,
+ uint32_t *sync_objs,
+ uint32_t num_objs)
+{
+ int i;
+ struct sync_table_row *child_row = NULL;
+ int remaining_count = 0;
+
+ if (!table || !sync_objs)
+ return -EINVAL;
+
+ for (i = 0; i < num_objs; i++) {
+ child_row = table + sync_objs[i];
+ if (child_row->state == CAM_SYNC_STATE_ACTIVE)
+ remaining_count++;
+ }
+
+ return remaining_count;
+}
+
int cam_sync_init_group_object(struct sync_table_row *table,
uint32_t idx,
uint32_t *sync_objs,
uint32_t num_objs)
{
int i;
+ int remaining;
struct sync_child_info *child_info;
struct sync_parent_info *parent_info;
struct sync_table_row *row = table + idx;
struct sync_table_row *child_row = NULL;
- spin_lock_bh(&sync_dev->row_spinlocks[idx]);
INIT_LIST_HEAD(&row->parents_list);
INIT_LIST_HEAD(&row->children_list);
@@ -124,8 +145,7 @@ int cam_sync_init_group_object(struct sync_table_row *table,
if (!child_info) {
cam_sync_util_cleanup_children_list(
- &row->children_list);
- spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
+ &row->children_list, SYNC_LIST_CLEAN_ALL, 0);
return -ENOMEM;
}
@@ -140,11 +160,11 @@ int cam_sync_init_group_object(struct sync_table_row *table,
parent_info = kzalloc(sizeof(*parent_info), GFP_ATOMIC);
if (!parent_info) {
cam_sync_util_cleanup_parents_list(
- &child_row->parents_list);
+ &child_row->parents_list,
+ SYNC_LIST_CLEAN_ALL, 0);
cam_sync_util_cleanup_children_list(
- &row->children_list);
+ &row->children_list, SYNC_LIST_CLEAN_ALL, 0);
spin_unlock_bh(&sync_dev->row_spinlocks[sync_objs[i]]);
- spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
return -ENOMEM;
}
parent_info->sync_id = idx;
@@ -156,7 +176,15 @@ int cam_sync_init_group_object(struct sync_table_row *table,
row->sync_id = idx;
row->state = cam_sync_util_get_group_object_state(table,
sync_objs, num_objs);
- row->remaining = num_objs;
+ remaining = cam_sync_util_get_group_object_remaining_count(table,
+ sync_objs, num_objs);
+ if (remaining < 0) {
+ CAM_ERR(CAM_SYNC, "Failed getting remaining count");
+ return -ENODEV;
+ }
+
+ row->remaining = remaining;
+
init_completion(&row->signaled);
INIT_LIST_HEAD(&row->callback_list);
INIT_LIST_HEAD(&row->user_payload_list);
@@ -164,35 +192,137 @@ int cam_sync_init_group_object(struct sync_table_row *table,
if (row->state != CAM_SYNC_STATE_ACTIVE)
complete_all(&row->signaled);
- spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
return 0;
}
int cam_sync_deinit_object(struct sync_table_row *table, uint32_t idx)
{
struct sync_table_row *row = table + idx;
- struct sync_child_info *child_info, *temp_child;
+ struct sync_child_info *child_info, *temp_child, *child_copy_info;
struct sync_callback_info *sync_cb, *temp_cb;
- struct sync_parent_info *parent_info, *temp_parent;
+ struct sync_parent_info *parent_info, *temp_parent, *parent_copy_info;
struct sync_user_payload *upayload_info, *temp_upayload;
+ struct sync_table_row *child_row = NULL, *parent_row = NULL;
+ struct list_head child_copy_list, parent_copy_list;
if (!table || idx <= 0 || idx >= CAM_SYNC_MAX_OBJS)
return -EINVAL;
spin_lock_bh(&sync_dev->row_spinlocks[idx]);
- clear_bit(idx, sync_dev->bitmap);
- list_for_each_entry_safe(child_info, temp_child,
- &row->children_list, list) {
+ if (row->state == CAM_SYNC_STATE_INVALID) {
+ CAM_ERR(CAM_SYNC,
+ "Error: accessing an uninitialized sync obj: idx = %d",
+ idx);
+ spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
+ return -EINVAL;
+ }
+
+ /* Objects child and parent objects will be added into this list */
+ INIT_LIST_HEAD(&child_copy_list);
+ INIT_LIST_HEAD(&parent_copy_list);
+
+ list_for_each_entry_safe(child_info, temp_child, &row->children_list,
+ list) {
+ if (child_info->sync_id <= 0)
+ continue;
+
+ child_copy_info = kzalloc(sizeof(*child_copy_info), GFP_ATOMIC);
+ if (!child_copy_info) {
+ /* No free memory, clean up the child_copy_list */
+ while (!list_empty(&child_copy_list)) {
+ child_info = list_first_entry(&child_copy_list,
+ struct sync_child_info, list);
+ list_del_init(&child_info->list);
+ kfree(child_info);
+ }
+ spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
+ goto deinit;
+ }
+ child_copy_info->sync_id = child_info->sync_id;
+ list_add_tail(&child_copy_info->list, &child_copy_list);
+ }
+
+ list_for_each_entry_safe(parent_info, temp_parent, &row->parents_list,
+ list) {
+ if (parent_info->sync_id <= 0)
+ continue;
+ parent_copy_info = kzalloc(sizeof(*parent_copy_info),
+ GFP_ATOMIC);
+ if (!parent_copy_info) {
+ /* No free memory, clean up the parent_copy_list */
+ while (!list_empty(&parent_copy_list)) {
+ parent_info = list_first_entry(
+ &parent_copy_list,
+ struct sync_parent_info, list);
+ list_del_init(&parent_info->list);
+ kfree(parent_info);
+ }
+ /* No free memory, clean up the child_copy_list */
+ while (!list_empty(&child_copy_list)) {
+ child_info = list_first_entry(&child_copy_list,
+ struct sync_child_info, list);
+ list_del_init(&child_info->list);
+ kfree(child_info);
+ }
+ spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
+ goto deinit;
+ }
+ parent_copy_info->sync_id = parent_info->sync_id;
+ list_add_tail(&parent_copy_info->list, &parent_copy_list);
+ }
+
+ spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
+ /* Cleanup the child to parent link from child list*/
+ while (!list_empty(&child_copy_list)) {
+ child_info = list_first_entry(&child_copy_list,
+ struct sync_child_info, list);
+ child_row = sync_dev->sync_table + child_info->sync_id;
+ spin_lock_bh(&sync_dev->row_spinlocks[child_info->sync_id]);
+ if (child_row->state == CAM_SYNC_STATE_INVALID) {
+ spin_unlock_bh(&sync_dev->row_spinlocks[
+ child_info->sync_id]);
+ list_del_init(&child_info->list);
+ kfree(child_info);
+ continue;
+ }
+
+ cam_sync_util_cleanup_parents_list(&child_row->parents_list,
+ SYNC_LIST_CLEAN_ID, idx);
+
+ spin_unlock_bh(&sync_dev->row_spinlocks[child_info->sync_id]);
list_del_init(&child_info->list);
kfree(child_info);
}
- list_for_each_entry_safe(parent_info, temp_parent,
- &row->parents_list, list) {
+ /* Cleanup the parent to child link */
+ while (!list_empty(&parent_copy_list)) {
+ parent_info = list_first_entry(&parent_copy_list,
+ struct sync_parent_info, list);
+ parent_row = sync_dev->sync_table + parent_info->sync_id;
+ spin_lock_bh(&sync_dev->row_spinlocks[parent_info->sync_id]);
+ if (parent_row->state == CAM_SYNC_STATE_INVALID) {
+ spin_unlock_bh(&sync_dev->row_spinlocks[
+ parent_info->sync_id]);
+ list_del_init(&parent_info->list);
+ kfree(parent_info);
+ continue;
+ }
+
+ cam_sync_util_cleanup_children_list(&parent_row->children_list,
+ SYNC_LIST_CLEAN_ID, idx);
+
+ spin_unlock_bh(&sync_dev->row_spinlocks[parent_info->sync_id]);
list_del_init(&parent_info->list);
kfree(parent_info);
}
+deinit:
+ spin_lock_bh(&sync_dev->row_spinlocks[idx]);
+ cam_sync_util_cleanup_children_list(&row->children_list,
+ SYNC_LIST_CLEAN_ALL, 0);
+ cam_sync_util_cleanup_parents_list(&row->parents_list,
+ SYNC_LIST_CLEAN_ALL, 0);
+
list_for_each_entry_safe(upayload_info, temp_upayload,
&row->user_payload_list, list) {
list_del_init(&upayload_info->list);
@@ -207,6 +337,7 @@ int cam_sync_deinit_object(struct sync_table_row *table, uint32_t idx)
row->state = CAM_SYNC_STATE_INVALID;
memset(row, 0, sizeof(*row));
+ clear_bit(idx, sync_dev->bitmap);
spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
return 0;
@@ -218,11 +349,14 @@ void cam_sync_util_cb_dispatch(struct work_struct *cb_dispatch_work)
struct sync_callback_info,
cb_dispatch_work);
+ spin_lock_bh(&sync_dev->row_spinlocks[cb_info->sync_obj]);
+ list_del_init(&cb_info->list);
+ spin_unlock_bh(&sync_dev->row_spinlocks[cb_info->sync_obj]);
+
cb_info->callback_func(cb_info->sync_obj,
cb_info->status,
cb_info->cb_data);
- list_del_init(&cb_info->list);
kfree(cb_info);
}
@@ -323,26 +457,48 @@ int cam_sync_util_get_state(int current_state,
return result;
}
-void cam_sync_util_cleanup_children_list(struct list_head *list_to_clean)
+void cam_sync_util_cleanup_children_list(struct list_head *list_to_clean,
+ uint32_t list_clean_type, uint32_t sync_obj)
{
struct sync_child_info *child_info = NULL;
struct sync_child_info *temp_child_info = NULL;
+ uint32_t curr_sync_obj;
list_for_each_entry_safe(child_info,
temp_child_info, list_to_clean, list) {
+ if ((list_clean_type == SYNC_LIST_CLEAN_ID) &&
+ (child_info->sync_id != sync_obj))
+ continue;
+
+ curr_sync_obj = child_info->sync_id;
list_del_init(&child_info->list);
kfree(child_info);
+
+ if ((list_clean_type == SYNC_LIST_CLEAN_ID) &&
+ (curr_sync_obj == sync_obj))
+ break;
}
}
-void cam_sync_util_cleanup_parents_list(struct list_head *list_to_clean)
+void cam_sync_util_cleanup_parents_list(struct list_head *list_to_clean,
+ uint32_t list_clean_type, uint32_t sync_obj)
{
struct sync_parent_info *parent_info = NULL;
struct sync_parent_info *temp_parent_info = NULL;
+ uint32_t curr_sync_obj;
list_for_each_entry_safe(parent_info,
temp_parent_info, list_to_clean, list) {
+ if ((list_clean_type == SYNC_LIST_CLEAN_ID) &&
+ (parent_info->sync_id != sync_obj))
+ continue;
+
+ curr_sync_obj = parent_info->sync_id;
list_del_init(&parent_info->list);
kfree(parent_info);
+
+ if ((list_clean_type == SYNC_LIST_CLEAN_ID) &&
+ (curr_sync_obj == sync_obj))
+ break;
}
}
diff --git a/drivers/media/platform/msm/camera/cam_sync/cam_sync_util.h b/drivers/media/platform/msm/camera/cam_sync/cam_sync_util.h
index 8b60ce1..1c5c4bf 100644
--- a/drivers/media/platform/msm/camera/cam_sync/cam_sync_util.h
+++ b/drivers/media/platform/msm/camera/cam_sync/cam_sync_util.h
@@ -141,17 +141,25 @@ int cam_sync_util_get_state(int current_state,
/**
* @brief: Function to clean up the children of a sync object
* @param list_to_clean : List to clean up
+ * @list_clean_type : Clean specific object or clean all objects
+ * @sync_obj : Sync object to be clean if list clean type is
+ * SYNC_LIST_CLEAN_ID
*
* @return None
*/
-void cam_sync_util_cleanup_children_list(struct list_head *list_to_clean);
+void cam_sync_util_cleanup_children_list(struct list_head *list_to_clean,
+ uint32_t list_clean_type, uint32_t sync_obj);
/**
* @brief: Function to clean up the parents of a sync object
* @param list_to_clean : List to clean up
+ * @list_clean_type : Clean specific object or clean all objects
+ * @sync_obj : Sync object to be clean if list clean type is
+ * SYNC_LIST_CLEAN_ID
*
* @return None
*/
-void cam_sync_util_cleanup_parents_list(struct list_head *list_to_clean);
+void cam_sync_util_cleanup_parents_list(struct list_head *list_to_clean,
+ uint32_t list_clean_type, uint32_t sync_obj);
#endif /* __CAM_SYNC_UTIL_H__ */
diff --git a/drivers/media/platform/msm/camera/cam_utils/cam_debug_util.h b/drivers/media/platform/msm/camera/cam_utils/cam_debug_util.h
index 9745d45..c0160c4 100644
--- a/drivers/media/platform/msm/camera/cam_utils/cam_debug_util.h
+++ b/drivers/media/platform/msm/camera/cam_utils/cam_debug_util.h
@@ -34,6 +34,7 @@
#define CAM_HFI (1 << 18)
#define CAM_CTXT (1 << 19)
#define CAM_OIS (1 << 20)
+#define CAM_RES (1 << 21)
#define STR_BUFFER_MAX_LENGTH 1024
diff --git a/drivers/media/platform/msm/camera/cam_utils/cam_soc_util.c b/drivers/media/platform/msm/camera/cam_utils/cam_soc_util.c
index 743dfda..07fb944 100644
--- a/drivers/media/platform/msm/camera/cam_utils/cam_soc_util.c
+++ b/drivers/media/platform/msm/camera/cam_utils/cam_soc_util.c
@@ -196,6 +196,92 @@ int cam_soc_util_set_clk_rate(struct clk *clk, const char *clk_name,
return rc;
}
+int cam_soc_util_clk_put(struct clk **clk)
+{
+ if (!(*clk)) {
+ CAM_ERR(CAM_UTIL, "Invalid params clk");
+ return -EINVAL;
+ }
+
+ clk_put(*clk);
+ *clk = NULL;
+
+ return 0;
+}
+
+static struct clk *cam_soc_util_option_clk_get(struct device_node *np,
+ int index)
+{
+ struct of_phandle_args clkspec;
+ struct clk *clk;
+ int rc;
+
+ if (index < 0)
+ return ERR_PTR(-EINVAL);
+
+ rc = of_parse_phandle_with_args(np, "clocks-option", "#clock-cells",
+ index, &clkspec);
+ if (rc)
+ return ERR_PTR(rc);
+
+ clk = of_clk_get_from_provider(&clkspec);
+ of_node_put(clkspec.np);
+
+ return clk;
+}
+
+int cam_soc_util_get_option_clk_by_name(struct cam_hw_soc_info *soc_info,
+ const char *clk_name, struct clk **clk, int32_t *clk_index,
+ int32_t *clk_rate)
+{
+ int index = 0;
+ int rc = 0;
+ struct device_node *of_node = NULL;
+
+ if (!soc_info || !clk_name || !clk) {
+ CAM_ERR(CAM_UTIL,
+ "Invalid params soc_info %pK clk_name %s clk %pK",
+ soc_info, clk_name, clk);
+ return -EINVAL;
+ }
+
+ of_node = soc_info->dev->of_node;
+
+ index = of_property_match_string(of_node, "clock-names-option",
+ clk_name);
+
+ *clk = cam_soc_util_option_clk_get(of_node, index);
+ if (IS_ERR(*clk)) {
+ CAM_ERR(CAM_UTIL, "No clk named %s found. Dev %s", clk_name,
+ soc_info->dev_name);
+ *clk_index = -1;
+ return -EFAULT;
+ }
+ *clk_index = index;
+
+ rc = of_property_read_u32_index(of_node, "clock-rates-option",
+ index, clk_rate);
+ if (rc) {
+ CAM_ERR(CAM_UTIL,
+ "Error reading clock-rates clk_name %s index %d",
+ clk_name, index);
+ cam_soc_util_clk_put(clk);
+ *clk_rate = 0;
+ return rc;
+ }
+
+ /*
+ * Option clocks are assumed to be available to single Device here.
+ * Hence use INIT_RATE instead of NO_SET_RATE.
+ */
+ *clk_rate = (*clk_rate == 0) ? (int32_t)INIT_RATE : *clk_rate;
+
+ CAM_DBG(CAM_UTIL, "clk_name %s index %d clk_rate %d",
+ clk_name, *clk_index, *clk_rate);
+
+ return 0;
+}
+
int cam_soc_util_clk_enable(struct clk *clk, const char *clk_name,
int32_t clk_rate)
{
@@ -324,6 +410,13 @@ static int cam_soc_util_get_dt_clk_info(struct cam_hw_soc_info *soc_info)
of_node = soc_info->dev->of_node;
+ if (!of_property_read_bool(of_node, "use-shared-clk")) {
+ CAM_DBG(CAM_UTIL, "No shared clk parameter defined");
+ soc_info->use_shared_clk = false;
+ } else {
+ soc_info->use_shared_clk = true;
+ }
+
count = of_property_count_strings(of_node, "clock-names");
CAM_DBG(CAM_UTIL, "count = %d", count);
@@ -407,7 +500,7 @@ static int cam_soc_util_get_dt_clk_info(struct cam_hw_soc_info *soc_info)
soc_info->clk_rate[level][j] =
(soc_info->clk_rate[level][j] == 0) ?
- (long)NO_SET_RATE :
+ (int32_t)NO_SET_RATE :
soc_info->clk_rate[level][j];
CAM_DBG(CAM_UTIL, "soc_info->clk_rate[%d][%d] = %d",
@@ -766,7 +859,8 @@ int cam_soc_util_get_dt_properties(struct cam_hw_soc_info *soc_info)
count = of_property_count_strings(of_node, "reg-names");
if (count <= 0) {
- CAM_ERR(CAM_UTIL, "no reg-names found");
+ CAM_WARN(CAM_UTIL, "no reg-names found for: %s",
+ soc_info->dev_name);
count = 0;
}
soc_info->num_mem_block = count;
@@ -802,7 +896,8 @@ int cam_soc_util_get_dt_properties(struct cam_hw_soc_info *soc_info)
rc = of_property_read_string_index(of_node, "interrupt-names", 0,
&soc_info->irq_name);
if (rc) {
- CAM_WARN(CAM_UTIL, "No interrupt line present");
+ CAM_WARN(CAM_UTIL, "No interrupt line preset for: %s",
+ soc_info->dev_name);
rc = 0;
} else {
soc_info->irq_line =
@@ -1267,14 +1362,14 @@ int cam_soc_util_disable_platform_resource(struct cam_hw_soc_info *soc_info,
if (!soc_info)
return -EINVAL;
+ if (disble_irq)
+ rc |= cam_soc_util_irq_disable(soc_info);
+
if (disable_clocks)
cam_soc_util_clk_disable_default(soc_info);
cam_soc_util_regulator_disable_default(soc_info);
- if (disble_irq)
- rc |= cam_soc_util_irq_disable(soc_info);
-
if (soc_info->pinctrl_info.pinctrl &&
soc_info->pinctrl_info.gpio_state_suspend)
rc = pinctrl_select_state(soc_info->pinctrl_info.pinctrl,
diff --git a/drivers/media/platform/msm/camera/cam_utils/cam_soc_util.h b/drivers/media/platform/msm/camera/cam_utils/cam_soc_util.h
index 8bd8275..4a87d50 100644
--- a/drivers/media/platform/msm/camera/cam_utils/cam_soc_util.h
+++ b/drivers/media/platform/msm/camera/cam_utils/cam_soc_util.h
@@ -180,6 +180,7 @@ struct cam_hw_soc_info {
struct regulator *rgltr[CAM_SOC_MAX_REGULATOR];
uint32_t rgltr_delay[CAM_SOC_MAX_REGULATOR];
+ uint32_t use_shared_clk;
uint32_t num_clk;
const char *clk_name[CAM_SOC_MAX_CLK];
struct clk *clk[CAM_SOC_MAX_CLK];
@@ -377,6 +378,35 @@ int cam_soc_util_set_clk_rate(struct clk *clk, const char *clk_name,
int32_t clk_rate);
/**
+ * cam_soc_util_get_option_clk_by_name()
+ *
+ * @brief: Get reference to optional clk using name
+ *
+ * @soc_info: Device soc information
+ * @clk_name: Name of clock to find reference for
+ * @clk: Clock reference pointer to be filled if Success
+ * @clk_index: Clk index in the option clk array to be returned
+ * @clk_rate: Clk rate in the option clk array
+ *
+ * @return: 0: Success
+ * Negative: Failure
+ */
+int cam_soc_util_get_option_clk_by_name(struct cam_hw_soc_info *soc_info,
+ const char *clk_name, struct clk **clk, int32_t *clk_index,
+ int32_t *clk_rate);
+
+/**
+ * cam_soc_util_clk_put()
+ *
+ * @brief: Put clock specified in params
+ *
+ * @clk: Reference to the Clock that needs to be put
+ *
+ * @return: Success or failure
+ */
+int cam_soc_util_clk_put(struct clk **clk);
+
+/**
* cam_soc_util_clk_enable()
*
* @brief: Enable clock specified in params
diff --git a/drivers/media/platform/msm/camera/cam_utils/cam_trace.h b/drivers/media/platform/msm/camera/cam_utils/cam_trace.h
index f4f85e4..90ec566 100644
--- a/drivers/media/platform/msm/camera/cam_utils/cam_trace.h
+++ b/drivers/media/platform/msm/camera/cam_utils/cam_trace.h
@@ -71,6 +71,21 @@ TRACE_EVENT(cam_isp_activated_irq,
)
);
+TRACE_EVENT(cam_icp_fw_dbg,
+ TP_PROTO(char *dbg_message),
+ TP_ARGS(dbg_message),
+ TP_STRUCT__entry(
+ __string(dbg_message, dbg_message)
+ ),
+ TP_fast_assign(
+ __assign_str(dbg_message, dbg_message);
+ ),
+ TP_printk(
+ "%s: ",
+ __get_str(dbg_message)
+ )
+);
+
TRACE_EVENT(cam_buf_done,
TP_PROTO(const char *ctx_type, struct cam_context *ctx,
struct cam_ctx_request *req),
diff --git a/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_common.c b/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_common.c
index 39793b6..0a01b6f 100644
--- a/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_common.c
+++ b/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_common.c
@@ -877,6 +877,7 @@ int mpq_dmx_plugin_init(mpq_dmx_init dmx_init_func)
mpq_demux->sdmx_eos = 0;
mpq_demux->sdmx_log_level = SDMX_LOG_NO_PRINT;
mpq_demux->ts_packet_timestamp_source = 0;
+ mpq_demux->disable_cache_ops = 1;
if (mpq_demux->demux.feednum > MPQ_MAX_DMX_FILES) {
MPQ_DVB_ERR_PRINT(
@@ -6349,7 +6350,8 @@ static void mpq_sdmx_process_results(struct mpq_demux *mpq_demux)
continue;
/* Invalidate output buffer before processing the results */
- mpq_sdmx_invalidate_buffer(mpq_feed);
+ if (!mpq_demux->disable_cache_ops)
+ mpq_sdmx_invalidate_buffer(mpq_feed);
if (sts->error_indicators & SDMX_FILTER_ERR_MD_BUF_FULL)
MPQ_DVB_ERR_PRINT(
@@ -6571,13 +6573,15 @@ static int mpq_sdmx_write(struct mpq_demux *mpq_demux,
* We must flush the buffer before SDMX starts reading from it
* so that it gets a valid data in memory.
*/
- ret = msm_ion_do_cache_op(mpq_demux->ion_client,
- ion_handle, rbuf->data,
- rbuf->size, ION_IOC_CLEAN_CACHES);
- if (ret)
- MPQ_DVB_ERR_PRINT(
- "%s: msm_ion_do_cache_op failed, ret = %d\n",
- __func__, ret);
+ if (!mpq_demux->disable_cache_ops) {
+ ret = msm_ion_do_cache_op(mpq_demux->ion_client,
+ ion_handle, rbuf->data,
+ rbuf->size, ION_IOC_CLEAN_CACHES);
+ if (ret)
+ MPQ_DVB_ERR_PRINT(
+ "%s: msm_ion_do_cache_op failed, ret = %d\n",
+ __func__, ret);
+ }
return mpq_sdmx_process(mpq_demux, &buf_desc, count,
read_offset, mpq_demux->demux.ts_packet_size);
diff --git a/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_common.h b/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_common.h
index 0c20a89..a187707 100644
--- a/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_common.h
+++ b/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_common.h
@@ -543,6 +543,8 @@ struct mpq_demux {
ktime_t last_notification_time;
int ts_packet_timestamp_source;
+ /* Disable cache operations on qseecom heap since not supported */
+ int disable_cache_ops;
};
/**
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_base.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_base.c
index ae01baf..dc041a7 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_base.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_base.c
@@ -139,6 +139,43 @@ static bool force_on_xin_clk(u32 bit_off, u32 clk_ctl_reg_off, bool enable)
return clk_forced_on;
}
+void sde_mdp_halt_vbif_xin(struct sde_mdp_vbif_halt_params *params)
+{
+ struct sde_rot_data_type *mdata = sde_rot_get_mdata();
+ u32 reg_val;
+ bool forced_on;
+
+ if (!mdata || !params || !params->reg_off_mdp_clk_ctrl) {
+ SDEROT_ERR("null input parameter\n");
+ return;
+ }
+
+ if (params->xin_id > MMSS_VBIF_NRT_VBIF_CLK_FORCE_CTRL0_XIN1) {
+ SDEROT_ERR("xin_id:%d exceed max limit\n", params->xin_id);
+ return;
+ }
+
+ forced_on = force_on_xin_clk(params->bit_off_mdp_clk_ctrl,
+ params->reg_off_mdp_clk_ctrl, true);
+
+ SDEROT_EVTLOG(forced_on, params->xin_id);
+
+ reg_val = SDE_VBIF_READ(mdata, MMSS_VBIF_XIN_HALT_CTRL0);
+ SDE_VBIF_WRITE(mdata, MMSS_VBIF_XIN_HALT_CTRL0,
+ reg_val | BIT(params->xin_id));
+
+ /* this is a polling operation */
+ sde_mdp_wait_for_xin_halt(params->xin_id);
+
+ reg_val = SDE_VBIF_READ(mdata, MMSS_VBIF_XIN_HALT_CTRL0);
+ SDE_VBIF_WRITE(mdata, MMSS_VBIF_XIN_HALT_CTRL0,
+ reg_val & ~BIT(params->xin_id));
+
+ if (forced_on)
+ force_on_xin_clk(params->bit_off_mdp_clk_ctrl,
+ params->reg_off_mdp_clk_ctrl, false);
+}
+
u32 sde_mdp_get_ot_limit(u32 width, u32 height, u32 pixfmt, u32 fps, u32 is_rd)
{
struct sde_rot_data_type *mdata = sde_rot_get_mdata();
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_base.h b/drivers/media/platform/msm/sde/rotator/sde_rotator_base.h
index 400f53b..c85d255 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_base.h
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_base.h
@@ -63,6 +63,18 @@ struct sde_mdp_set_ot_params {
u32 rotsts_busy_mask;
};
+/*
+ * struct sde_mdp_vbif_halt_params: parameters for issue halt request to vbif
+ * @xin_id: xin port number of vbif
+ * @reg_off_mdp_clk_ctrl: reg offset for vbif clock control
+ * @bit_off_mdp_clk_ctrl: bit offset for vbif clock control
+ */
+struct sde_mdp_vbif_halt_params {
+ u32 xin_id;
+ u32 reg_off_mdp_clk_ctrl;
+ u32 bit_off_mdp_clk_ctrl;
+};
+
enum sde_bus_vote_type {
VOTE_INDEX_DISABLE,
VOTE_INDEX_19_MHZ,
@@ -276,6 +288,8 @@ u32 sde_mdp_get_ot_limit(u32 width, u32 height, u32 pixfmt, u32 fps, u32 is_rd);
void sde_mdp_set_ot_limit(struct sde_mdp_set_ot_params *params);
+void sde_mdp_halt_vbif_xin(struct sde_mdp_vbif_halt_params *params);
+
int sde_mdp_init_vbif(void);
#define SDE_VBIF_WRITE(mdata, offset, value) \
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c
index ab3223e..c7d1074 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c
@@ -2513,6 +2513,45 @@ void sde_rotator_req_finish(struct sde_rot_mgr *mgr,
req->finished = true;
}
+void sde_rotator_abort_inline_request(struct sde_rot_mgr *mgr,
+ struct sde_rot_file_private *private,
+ struct sde_rot_entry_container *req)
+{
+ struct kthread_work *commit_work;
+ struct kthread_work *done_work;
+ struct sde_rot_entry *entry;
+ struct sde_rot_hw_resource *hw;
+ int i;
+
+ if (!mgr || !private || !req || !req->entries)
+ return;
+
+ for (i = 0; i < req->count; i++) {
+ entry = &req->entries[i];
+ if (!entry)
+ continue;
+
+ commit_work = &entry->commit_work;
+ done_work = &entry->done_work;
+
+ hw = sde_rotator_get_hw_resource(entry->commitq, entry);
+ if (!hw) {
+ SDEROT_ERR("no hw for the queue\n");
+ SDEROT_EVTLOG(i, req->count, SDE_ROT_EVTLOG_ERROR);
+ continue;
+ }
+
+ SDEROT_EVTLOG(i, req->count);
+
+ mgr->ops_abort_hw(hw, entry);
+
+ sde_rot_mgr_unlock(mgr);
+ kthread_flush_work(commit_work);
+ kthread_flush_work(done_work);
+ sde_rot_mgr_lock(mgr);
+ }
+}
+
int sde_rotator_handle_request_common(struct sde_rot_mgr *mgr,
struct sde_rot_file_private *private,
struct sde_rot_entry_container *req)
@@ -3143,6 +3182,26 @@ void sde_rotator_core_destroy(struct sde_rot_mgr *mgr)
devm_kfree(dev, mgr);
}
+void sde_rotator_core_dump(struct sde_rot_mgr *mgr)
+{
+ if (!mgr) {
+ SDEROT_ERR("null parameters\n");
+ return;
+ }
+
+ sde_rotator_resource_ctrl(mgr, true);
+ /* dump first snapshot */
+ if (mgr->ops_hw_dump_status)
+ mgr->ops_hw_dump_status(mgr->hw_data);
+
+ SDEROT_EVTLOG_TOUT_HANDLER("rot", "rot_dbg_bus", "vbif_dbg_bus");
+
+ /* dump second snapshot for comparison */
+ if (mgr->ops_hw_dump_status)
+ mgr->ops_hw_dump_status(mgr->hw_data);
+ sde_rotator_resource_ctrl(mgr, false);
+}
+
static void sde_rotator_suspend_cancel_rot_work(struct sde_rot_mgr *mgr)
{
struct sde_rot_file_private *priv, *priv_next;
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_core.h b/drivers/media/platform/msm/sde/rotator/sde_rotator_core.h
index 57a68ed..e23ed7a 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_core.h
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_core.h
@@ -449,6 +449,8 @@ struct sde_rot_mgr {
struct sde_rot_entry *entry);
int (*ops_cancel_hw)(struct sde_rot_hw_resource *hw,
struct sde_rot_entry *entry);
+ int (*ops_abort_hw)(struct sde_rot_hw_resource *hw,
+ struct sde_rot_entry *entry);
int (*ops_kickoff_entry)(struct sde_rot_hw_resource *hw,
struct sde_rot_entry *entry);
int (*ops_wait_for_entry)(struct sde_rot_hw_resource *hw,
@@ -476,6 +478,7 @@ struct sde_rot_mgr {
int (*ops_hw_get_downscale_caps)(struct sde_rot_mgr *mgr, char *caps,
int len);
int (*ops_hw_get_maxlinewidth)(struct sde_rot_mgr *mgr);
+ void (*ops_hw_dump_status)(struct sde_rot_mgr *mgr);
void *hw_data;
};
@@ -568,6 +571,12 @@ int sde_rotator_core_init(struct sde_rot_mgr **pmgr,
void sde_rotator_core_destroy(struct sde_rot_mgr *mgr);
/*
+ * sde_rotator_core_dump - perform register dump
+ * @mgr: Pointer to rotator manager
+ */
+void sde_rotator_core_dump(struct sde_rot_mgr *mgr);
+
+/*
* sde_rotator_session_open - open a new rotator per file session
* @mgr: Pointer to rotator manager
* @pprivate: Pointer to pointer of the newly initialized per file session
@@ -670,6 +679,19 @@ void sde_rotator_req_finish(struct sde_rot_mgr *mgr,
struct sde_rot_entry_container *req);
/*
+ * sde_rotator_abort_inline_request - abort inline rotation request after start
+ * This function allows inline rotation requests to be aborted after
+ * sde_rotator_req_set_start has already been issued.
+ * @mgr: Pointer to rotator manager
+ * @private: Pointer to rotator manager per file context
+ * @req: Pointer to rotation request
+ * return: none
+ */
+void sde_rotator_abort_inline_request(struct sde_rot_mgr *mgr,
+ struct sde_rot_file_private *private,
+ struct sde_rot_entry_container *req);
+
+/*
* sde_rotator_handle_request_common - add the given request to rotator
* manager and clean up completed requests
* @rot_dev: Pointer to rotator device
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_debug.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_debug.c
index 46f64d2..b9158e1 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_debug.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_debug.c
@@ -638,18 +638,6 @@ static void sde_rot_evtlog_debug_work(struct work_struct *work)
}
/*
- * sde_rot_dump_panic - Issue evtlog dump and generic panic
- */
-void sde_rot_dump_panic(bool do_panic)
-{
- sde_rot_evtlog_dump_all();
- sde_rot_dump_reg_all();
-
- if (do_panic)
- panic("sde_rotator");
-}
-
-/*
* sde_rot_evtlog_tout_handler - log dump timeout handler
* @queue: boolean indicate putting log dump into queue
* @name: function name having timeout
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_debug.h b/drivers/media/platform/msm/sde/rotator/sde_rotator_debug.h
index 2fc8e3f..fa53083 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_debug.h
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_debug.h
@@ -42,7 +42,6 @@ enum sde_rot_dbg_evtlog_flag {
SDE_ROT_EVTLOG_TOUT_DATA_LIMITER)
void sde_rot_evtlog(const char *name, int line, int flag, ...);
-void sde_rot_dump_panic(bool do_panic);
void sde_rot_evtlog_tout_handler(bool queue, const char *name, ...);
struct sde_rotator_device;
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c
index 976155e..523ff5b 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c
@@ -1440,6 +1440,61 @@ int sde_rotator_inline_get_pixfmt_caps(struct platform_device *pdev,
EXPORT_SYMBOL(sde_rotator_inline_get_pixfmt_caps);
/*
+ * _sde_rotator_inline_cleanup - perform inline related request cleanup
+ * This function assumes rot_dev->mgr lock has been taken when called.
+ * @handle: Pointer to rotator context
+ * @request: Pointer to rotation request
+ * return: 0 if success; -EAGAIN if cleanup should be retried
+ */
+static int _sde_rotator_inline_cleanup(void *handle,
+ struct sde_rotator_request *request)
+{
+ struct sde_rotator_ctx *ctx;
+ struct sde_rotator_device *rot_dev;
+ int ret;
+
+ if (!handle || !request) {
+ SDEROT_ERR("invalid rotator handle/request\n");
+ return -EINVAL;
+ }
+
+ ctx = handle;
+ rot_dev = ctx->rot_dev;
+
+ if (!rot_dev || !rot_dev->mgr) {
+ SDEROT_ERR("invalid rotator device\n");
+ return -EINVAL;
+ }
+
+ if (request->committed) {
+ /* wait until request is finished */
+ sde_rot_mgr_unlock(rot_dev->mgr);
+ mutex_unlock(&rot_dev->lock);
+ ret = wait_event_timeout(ctx->wait_queue,
+ sde_rotator_is_request_retired(request),
+ msecs_to_jiffies(rot_dev->streamoff_timeout));
+ mutex_lock(&rot_dev->lock);
+ sde_rot_mgr_lock(rot_dev->mgr);
+
+ if (!ret) {
+ SDEROT_ERR("timeout w/o retire s:%d\n",
+ ctx->session_id);
+ SDEROT_EVTLOG(ctx->session_id, SDE_ROT_EVTLOG_ERROR);
+ sde_rotator_abort_inline_request(rot_dev->mgr,
+ ctx->private, request->req);
+ return -EAGAIN;
+ } else if (ret == 1) {
+ SDEROT_ERR("timeout w/ retire s:%d\n", ctx->session_id);
+ SDEROT_EVTLOG(ctx->session_id, SDE_ROT_EVTLOG_ERROR);
+ }
+ }
+
+ sde_rotator_req_finish(rot_dev->mgr, ctx->private, request->req);
+ sde_rotator_retire_request(request);
+ return 0;
+}
+
+/*
* sde_rotator_inline_commit - commit given rotator command
* @handle: Pointer to rotator context
* @cmd: Pointer to rotator command
@@ -1466,7 +1521,7 @@ int sde_rotator_inline_commit(void *handle, struct sde_rotator_inline_cmd *cmd,
ctx = handle;
rot_dev = ctx->rot_dev;
- if (!rot_dev) {
+ if (!rot_dev || !rot_dev->mgr) {
SDEROT_ERR("invalid rotator device\n");
return -EINVAL;
}
@@ -1498,6 +1553,7 @@ int sde_rotator_inline_commit(void *handle, struct sde_rotator_inline_cmd *cmd,
(cmd->video_mode << 5) |
(cmd_type << 24));
+ mutex_lock(&rot_dev->lock);
sde_rot_mgr_lock(rot_dev->mgr);
if (cmd_type == SDE_ROTATOR_INLINE_CMD_VALIDATE ||
@@ -1707,33 +1763,27 @@ int sde_rotator_inline_commit(void *handle, struct sde_rotator_inline_cmd *cmd,
}
request = cmd->priv_handle;
- req = request->req;
- if (request->committed) {
- /* wait until request is finished */
- sde_rot_mgr_unlock(rot_dev->mgr);
- ret = wait_event_timeout(ctx->wait_queue,
- sde_rotator_is_request_retired(request),
- msecs_to_jiffies(rot_dev->streamoff_timeout));
- if (!ret) {
- SDEROT_ERR("timeout w/o retire s:%d\n",
- ctx->session_id);
- SDEROT_EVTLOG(ctx->session_id,
- SDE_ROT_EVTLOG_ERROR);
- } else if (ret == 1) {
- SDEROT_ERR("timeout w/ retire s:%d\n",
- ctx->session_id);
- SDEROT_EVTLOG(ctx->session_id,
- SDE_ROT_EVTLOG_ERROR);
- }
- sde_rot_mgr_lock(rot_dev->mgr);
+ /* attempt single retry if first cleanup attempt failed */
+ if (_sde_rotator_inline_cleanup(handle, request) == -EAGAIN)
+ _sde_rotator_inline_cleanup(handle, request);
+
+ cmd->priv_handle = NULL;
+ } else if (cmd_type == SDE_ROTATOR_INLINE_CMD_ABORT) {
+ if (!cmd->priv_handle) {
+ ret = -EINVAL;
+ SDEROT_ERR("invalid private handle\n");
+ goto error_invalid_handle;
}
- sde_rotator_req_finish(rot_dev->mgr, ctx->private, req);
- sde_rotator_retire_request(request);
+ request = cmd->priv_handle;
+ if (!sde_rotator_is_request_retired(request))
+ sde_rotator_abort_inline_request(rot_dev->mgr,
+ ctx->private, request->req);
}
sde_rot_mgr_unlock(rot_dev->mgr);
+ mutex_unlock(&rot_dev->lock);
return 0;
error_handle_request:
@@ -1746,13 +1796,29 @@ int sde_rotator_inline_commit(void *handle, struct sde_rotator_inline_cmd *cmd,
error_invalid_handle:
error_init_request:
sde_rot_mgr_unlock(rot_dev->mgr);
+ mutex_unlock(&rot_dev->lock);
return ret;
}
EXPORT_SYMBOL(sde_rotator_inline_commit);
void sde_rotator_inline_reg_dump(struct platform_device *pdev)
{
- sde_rot_dump_panic(false);
+ struct sde_rotator_device *rot_dev;
+
+ if (!pdev) {
+ SDEROT_ERR("invalid platform device\n");
+ return;
+ }
+
+ rot_dev = (struct sde_rotator_device *) platform_get_drvdata(pdev);
+ if (!rot_dev || !rot_dev->mgr) {
+ SDEROT_ERR("invalid rotator device\n");
+ return;
+ }
+
+ sde_rot_mgr_lock(rot_dev->mgr);
+ sde_rotator_core_dump(rot_dev->mgr);
+ sde_rot_mgr_unlock(rot_dev->mgr);
}
EXPORT_SYMBOL(sde_rotator_inline_reg_dump);
@@ -2755,6 +2821,18 @@ static long sde_rotator_compat_ioctl32(struct file *file,
}
#endif
+static int sde_rotator_ctrl_subscribe_event(struct v4l2_fh *fh,
+ const struct v4l2_event_subscription *sub)
+{
+ return -EINVAL;
+}
+
+static int sde_rotator_event_unsubscribe(struct v4l2_fh *fh,
+ const struct v4l2_event_subscription *sub)
+{
+ return -EINVAL;
+}
+
/* V4l2 ioctl handlers */
static const struct v4l2_ioctl_ops sde_rotator_ioctl_ops = {
.vidioc_querycap = sde_rotator_querycap,
@@ -2779,8 +2857,8 @@ static const struct v4l2_ioctl_ops sde_rotator_ioctl_ops = {
.vidioc_s_parm = sde_rotator_s_parm,
.vidioc_default = sde_rotator_private_ioctl,
.vidioc_log_status = v4l2_ctrl_log_status,
- .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
- .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+ .vidioc_subscribe_event = sde_rotator_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = sde_rotator_event_unsubscribe,
};
/*
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_inline.h b/drivers/media/platform/msm/sde/rotator/sde_rotator_inline.h
index 474662e..ba70489 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_inline.h
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_inline.h
@@ -27,12 +27,14 @@
* @SDE_ROTATOR_INLINE_CMD_COMMIT: commit command to hardware
* @SDE_ROTATOR_INLINE_CMD_START: ready to start inline rotation
* @SDE_ROTATOR_INLINE_CMD_CLEANUP: cleanup after commit is done
+ * @SDE_ROTATOR_INLINE_CMD_ABORT: abort current commit and reset
*/
enum sde_rotator_inline_cmd_type {
SDE_ROTATOR_INLINE_CMD_VALIDATE,
SDE_ROTATOR_INLINE_CMD_COMMIT,
SDE_ROTATOR_INLINE_CMD_START,
SDE_ROTATOR_INLINE_CMD_CLEANUP,
+ SDE_ROTATOR_INLINE_CMD_ABORT,
};
/**
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_r1.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_r1.c
index 89ad438..40db488 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_r1.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_r1.c
@@ -349,6 +349,12 @@ static int sde_rotator_cancel_hw(struct sde_rot_hw_resource *hw,
return 0;
}
+static int sde_rotator_abort_hw(struct sde_rot_hw_resource *hw,
+ struct sde_rot_entry *entry)
+{
+ return 0;
+}
+
static int sde_rotator_kickoff_entry(struct sde_rot_hw_resource *hw,
struct sde_rot_entry *entry)
{
@@ -691,6 +697,7 @@ int sde_rotator_r1_init(struct sde_rot_mgr *mgr)
mgr->hw_data = hw_data;
mgr->ops_config_hw = sde_rotator_config_hw;
mgr->ops_cancel_hw = sde_rotator_cancel_hw;
+ mgr->ops_abort_hw = sde_rotator_abort_hw;
mgr->ops_kickoff_entry = sde_rotator_kickoff_entry;
mgr->ops_wait_for_entry = sde_rotator_wait_for_entry;
mgr->ops_hw_alloc = sde_rotator_hw_alloc_ext;
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c
index 3206844..6ecec03 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c
@@ -51,6 +51,12 @@
/* wait for at most 2 vsync for lowest refresh rate (24hz) */
#define KOFF_TIMEOUT (42 * 8)
+/*
+ * When in sbuf mode, select a much longer wait, to allow the other driver
+ * to detect timeouts and abort if necessary.
+ */
+#define KOFF_TIMEOUT_SBUF (10000)
+
/* default stream buffer headroom in lines */
#define DEFAULT_SBUF_HEADROOM 20
#define DEFAULT_UBWC_MALSIZE 0
@@ -127,6 +133,9 @@
#define SDE_ROTREG_READ(base, off) \
readl_relaxed(base + (off))
+#define SDE_ROTTOP_IN_OFFLINE_MODE(_rottop_op_mode_) \
+ (((_rottop_op_mode_) & ROTTOP_OP_MODE_ROT_OUT_MASK) == 0)
+
static const u32 sde_hw_rotator_v3_inpixfmts[] = {
SDE_PIX_FMT_XRGB_8888,
SDE_PIX_FMT_ARGB_8888,
@@ -531,6 +540,8 @@ static struct sde_rot_regdump sde_rot_r3_regdump[] = {
SDE_ROT_REGDUMP_READ },
{ "SDEROT_VBIF_NRT", SDE_ROT_VBIF_NRT_OFFSET, 0x590,
SDE_ROT_REGDUMP_VBIF },
+ { "SDEROT_REGDMA_RESET", ROTTOP_SW_RESET_OVERRIDE, 0,
+ SDE_ROT_REGDUMP_WRITE },
};
struct sde_rot_cdp_params {
@@ -666,10 +677,29 @@ static void sde_hw_rotator_disable_irq(struct sde_hw_rotator *rot)
}
}
+static void sde_hw_rotator_halt_vbif_xin_client(void)
+{
+ struct sde_mdp_vbif_halt_params halt_params;
+
+ memset(&halt_params, 0, sizeof(struct sde_mdp_vbif_halt_params));
+ halt_params.xin_id = XIN_SSPP;
+ halt_params.reg_off_mdp_clk_ctrl = MMSS_VBIF_NRT_VBIF_CLK_FORCE_CTRL0;
+ halt_params.bit_off_mdp_clk_ctrl =
+ MMSS_VBIF_NRT_VBIF_CLK_FORCE_CTRL0_XIN0;
+ sde_mdp_halt_vbif_xin(&halt_params);
+
+ memset(&halt_params, 0, sizeof(struct sde_mdp_vbif_halt_params));
+ halt_params.xin_id = XIN_WRITEBACK;
+ halt_params.reg_off_mdp_clk_ctrl = MMSS_VBIF_NRT_VBIF_CLK_FORCE_CTRL0;
+ halt_params.bit_off_mdp_clk_ctrl =
+ MMSS_VBIF_NRT_VBIF_CLK_FORCE_CTRL0_XIN1;
+ sde_mdp_halt_vbif_xin(&halt_params);
+}
+
/**
* sde_hw_rotator_reset - Reset rotator hardware
* @rot: pointer to hw rotator
- * @ctx: pointer to current rotator context during the hw hang
+ * @ctx: pointer to current rotator context during the hw hang (optional)
*/
static int sde_hw_rotator_reset(struct sde_hw_rotator *rot,
struct sde_hw_rotator_context *ctx)
@@ -683,13 +713,8 @@ static int sde_hw_rotator_reset(struct sde_hw_rotator *rot,
int i, j;
unsigned long flags;
- if (!rot || !ctx) {
- SDEROT_ERR("NULL rotator context\n");
- return -EINVAL;
- }
-
- if (ctx->q_id >= ROT_QUEUE_MAX) {
- SDEROT_ERR("context q_id out of range: %d\n", ctx->q_id);
+ if (!rot) {
+ SDEROT_ERR("NULL rotator\n");
return -EINVAL;
}
@@ -698,6 +723,18 @@ static int sde_hw_rotator_reset(struct sde_hw_rotator *rot,
usleep_range(MS_TO_US(10), MS_TO_US(20));
SDE_ROTREG_WRITE(rot->mdss_base, ROTTOP_SW_RESET_OVERRIDE, 0);
+ /* halt vbif xin client to ensure no pending transaction */
+ sde_hw_rotator_halt_vbif_xin_client();
+
+ /* if no ctx is specified, skip ctx wake up */
+ if (!ctx)
+ return 0;
+
+ if (ctx->q_id >= ROT_QUEUE_MAX) {
+ SDEROT_ERR("context q_id out of range: %d\n", ctx->q_id);
+ return -EINVAL;
+ }
+
spin_lock_irqsave(&rot->rotisr_lock, flags);
/* update timestamp register with current context */
@@ -753,10 +790,11 @@ static int sde_hw_rotator_reset(struct sde_hw_rotator *rot,
}
/**
- * sde_hw_rotator_dump_status - Dump hw rotator status on error
+ * _sde_hw_rotator_dump_status - Dump hw rotator status on error
* @rot: Pointer to hw rotator
*/
-static void sde_hw_rotator_dump_status(struct sde_hw_rotator *rot, u32 *ubwcerr)
+static void _sde_hw_rotator_dump_status(struct sde_hw_rotator *rot,
+ u32 *ubwcerr)
{
struct sde_rot_data_type *mdata = sde_rot_get_mdata();
u32 reg = 0;
@@ -788,6 +826,11 @@ static void sde_hw_rotator_dump_status(struct sde_hw_rotator *rot, u32 *ubwcerr)
SDE_ROTREG_READ(rot->mdss_base,
REGDMA_CSR_REGDMA_FSM_STATE));
+ SDEROT_ERR("rottop: op_mode = %x, status = %x, clk_status = %x\n",
+ SDE_ROTREG_READ(rot->mdss_base, ROTTOP_OP_MODE),
+ SDE_ROTREG_READ(rot->mdss_base, ROTTOP_STATUS),
+ SDE_ROTREG_READ(rot->mdss_base, ROTTOP_CLK_STATUS));
+
reg = SDE_ROTREG_READ(rot->mdss_base, ROT_SSPP_UBWC_ERROR_STATUS);
if (ubwcerr)
*ubwcerr = reg;
@@ -799,12 +842,35 @@ static void sde_hw_rotator_dump_status(struct sde_hw_rotator *rot, u32 *ubwcerr)
SDE_VBIF_READ(mdata, MMSS_VBIF_XIN_HALT_CTRL1),
SDE_VBIF_READ(mdata, MMSS_VBIF_AXI_HALT_CTRL1));
- SDEROT_ERR(
- "sbuf_status_plane0 = %x, sbuf_status_plane1 = %x\n",
- SDE_ROTREG_READ(rot->mdss_base,
- ROT_WB_SBUF_STATUS_PLANE0),
- SDE_ROTREG_READ(rot->mdss_base,
- ROT_WB_SBUF_STATUS_PLANE1));
+ SDEROT_ERR("sspp unpack wr: plane0 = %x, plane1 = %x, plane2 = %x\n",
+ SDE_ROTREG_READ(rot->mdss_base,
+ ROT_SSPP_FETCH_SMP_WR_PLANE0),
+ SDE_ROTREG_READ(rot->mdss_base,
+ ROT_SSPP_FETCH_SMP_WR_PLANE1),
+ SDE_ROTREG_READ(rot->mdss_base,
+ ROT_SSPP_FETCH_SMP_WR_PLANE2));
+ SDEROT_ERR("sspp unpack rd: plane0 = %x, plane1 = %x, plane2 = %x\n",
+ SDE_ROTREG_READ(rot->mdss_base,
+ ROT_SSPP_SMP_UNPACK_RD_PLANE0),
+ SDE_ROTREG_READ(rot->mdss_base,
+ ROT_SSPP_SMP_UNPACK_RD_PLANE1),
+ SDE_ROTREG_READ(rot->mdss_base,
+ ROT_SSPP_SMP_UNPACK_RD_PLANE2));
+ SDEROT_ERR("sspp: unpack_ln = %x, unpack_blk = %x, fill_lvl = %x\n",
+ SDE_ROTREG_READ(rot->mdss_base,
+ ROT_SSPP_UNPACK_LINE_COUNT),
+ SDE_ROTREG_READ(rot->mdss_base,
+ ROT_SSPP_UNPACK_BLK_COUNT),
+ SDE_ROTREG_READ(rot->mdss_base,
+ ROT_SSPP_FILL_LEVELS));
+
+ SDEROT_ERR("wb: sbuf0 = %x, sbuf1 = %x, sys_cache = %x\n",
+ SDE_ROTREG_READ(rot->mdss_base,
+ ROT_WB_SBUF_STATUS_PLANE0),
+ SDE_ROTREG_READ(rot->mdss_base,
+ ROT_WB_SBUF_STATUS_PLANE1),
+ SDE_ROTREG_READ(rot->mdss_base,
+ ROT_WB_SYS_CACHE_MODE));
}
/**
@@ -1568,7 +1634,7 @@ static void sde_hw_rotator_setup_wbengine(struct sde_hw_rotator_context *ctx,
/* use prefill bandwidth instead if specified */
if (cfg->prefill_bw)
- bw = DIV_ROUND_UP(cfg->prefill_bw,
+ bw = DIV_ROUND_UP_SECTOR_T(cfg->prefill_bw,
TRAFFIC_SHAPE_VSYNC_CLK);
if (bw > 0xFF)
@@ -1766,6 +1832,8 @@ static u32 sde_hw_rotator_start_regdma(struct sde_hw_rotator_context *ctx,
mask = ~(SDE_REGDMA_SWTS_MASK << SDE_REGDMA_SWTS_SHIFT);
}
+ SDEROT_EVTLOG(ctx->timestamp, queue_id, length, offset, ctx->sbuf_mode);
+
/* timestamp update can only be used in offline multi-context mode */
if (!ctx->sbuf_mode) {
/* Write timestamp after previous rotator job finished */
@@ -1778,6 +1846,8 @@ static u32 sde_hw_rotator_start_regdma(struct sde_hw_rotator_context *ctx,
/* ensure command packet is issue before the submit command */
wmb();
+ SDEROT_EVTLOG(queue_id, enableInt, ts_length, offset);
+
if (queue_id == ROT_QUEUE_HIGH_PRIORITY) {
SDE_ROTREG_WRITE(rot->mdss_base,
REGDMA_CSR_REGDMA_QUEUE_0_SUBMIT,
@@ -1814,6 +1884,8 @@ static u32 sde_hw_rotator_wait_done_no_regdma(
if (rot->irq_num >= 0) {
SDEROT_DBG("Wait for Rotator completion\n");
rc = wait_for_completion_timeout(&ctx->rot_comp,
+ ctx->sbuf_mode ?
+ msecs_to_jiffies(KOFF_TIMEOUT_SBUF) :
msecs_to_jiffies(rot->koff_timeout));
spin_lock_irqsave(&rot->rotisr_lock, flags);
@@ -1872,6 +1944,7 @@ static u32 sde_hw_rotator_wait_done_regdma(
{
struct sde_hw_rotator *rot = ctx->rot;
int rc = 0;
+ bool abort;
u32 status;
u32 last_isr;
u32 last_ts;
@@ -1886,6 +1959,8 @@ static u32 sde_hw_rotator_wait_done_regdma(
ctx, ctx->timestamp);
rc = wait_event_timeout(ctx->regdma_waitq,
!sde_hw_rotator_pending_swts(rot, ctx, &swts),
+ ctx->sbuf_mode ?
+ msecs_to_jiffies(KOFF_TIMEOUT_SBUF) :
msecs_to_jiffies(rot->koff_timeout));
ATRACE_INT("sde_rot_done", 0);
@@ -1893,18 +1968,19 @@ static u32 sde_hw_rotator_wait_done_regdma(
last_isr = ctx->last_regdma_isr_status;
last_ts = ctx->last_regdma_timestamp;
+ abort = ctx->abort;
status = last_isr & REGDMA_INT_MASK;
int_id = last_ts & 1;
SDEROT_DBG("INT status:0x%X, INT id:%d, timestamp:0x%X\n",
status, int_id, last_ts);
- if (rc == 0 || (status & REGDMA_INT_ERR_MASK)) {
+ if (rc == 0 || (status & REGDMA_INT_ERR_MASK) || abort) {
bool pending;
pending = sde_hw_rotator_pending_swts(rot, ctx, &swts);
SDEROT_ERR(
- "Timeout wait for regdma interrupt status, ts:0x%X/0x%X pending:%d\n",
- ctx->timestamp, swts, pending);
+ "Timeout wait for regdma interrupt status, ts:0x%X/0x%X, pending:%d, abort:%d\n",
+ ctx->timestamp, swts, pending, abort);
if (status & REGDMA_WATCHDOG_INT)
SDEROT_ERR("REGDMA watchdog interrupt\n");
@@ -1915,9 +1991,9 @@ static u32 sde_hw_rotator_wait_done_regdma(
else if (status & REGDMA_INVALID_CMD)
SDEROT_ERR("REGDMA invalid command\n");
- sde_hw_rotator_dump_status(rot, &ubwcerr);
+ _sde_hw_rotator_dump_status(rot, &ubwcerr);
- if (ubwcerr) {
+ if (ubwcerr || abort) {
/*
* Perform recovery for ROT SSPP UBWC decode
* error.
@@ -1960,12 +2036,12 @@ static u32 sde_hw_rotator_wait_done_regdma(
if (last_isr & REGDMA_INT_ERR_MASK) {
SDEROT_ERR("Rotator error, ts:0x%X/0x%X status:%x\n",
ctx->timestamp, swts, last_isr);
- sde_hw_rotator_dump_status(rot, NULL);
+ _sde_hw_rotator_dump_status(rot, NULL);
status = ROT_ERROR_BIT;
} else if (pending) {
SDEROT_ERR("Rotator timeout, ts:0x%X/0x%X status:%x\n",
ctx->timestamp, swts, last_isr);
- sde_hw_rotator_dump_status(rot, NULL);
+ _sde_hw_rotator_dump_status(rot, NULL);
status = ROT_ERROR_BIT;
} else {
status = 0;
@@ -2127,7 +2203,7 @@ void sde_hw_rotator_pre_pmevent(struct sde_rot_mgr *mgr, bool pmon)
{
struct sde_hw_rotator *rot;
u32 l_ts, h_ts, swts, hwts;
- u32 rotsts, regdmasts;
+ u32 rotsts, regdmasts, rotopmode;
/*
* Check last HW timestamp with SW timestamp before power off event.
@@ -2152,19 +2228,37 @@ void sde_hw_rotator_pre_pmevent(struct sde_rot_mgr *mgr, bool pmon)
regdmasts = SDE_ROTREG_READ(rot->mdss_base,
REGDMA_CSR_REGDMA_BLOCK_STATUS);
rotsts = SDE_ROTREG_READ(rot->mdss_base, ROTTOP_STATUS);
+ rotopmode = SDE_ROTREG_READ(rot->mdss_base, ROTTOP_OP_MODE);
SDEROT_DBG(
- "swts:0x%x, hwts:0x%x, regdma-sts:0x%x, rottop-sts:0x%x\n",
- swts, hwts, regdmasts, rotsts);
- SDEROT_EVTLOG(swts, hwts, regdmasts, rotsts);
+ "swts:0x%x, hwts:0x%x, regdma-sts:0x%x, rottop-sts:0x%x, rottop-opmode:0x%x\n",
+ swts, hwts, regdmasts, rotsts, rotopmode);
+ SDEROT_EVTLOG(swts, hwts, regdmasts, rotsts, rotopmode);
if ((swts != hwts) && ((regdmasts & REGDMA_BUSY) ||
(rotsts & ROT_STATUS_MASK))) {
SDEROT_ERR(
"Mismatch SWTS with HWTS: swts:0x%x, hwts:0x%x, regdma-sts:0x%x, rottop-sts:0x%x\n",
swts, hwts, regdmasts, rotsts);
+ _sde_hw_rotator_dump_status(rot, NULL);
SDEROT_EVTLOG_TOUT_HANDLER("rot", "rot_dbg_bus",
"vbif_dbg_bus", "panic");
+ } else if (!SDE_ROTTOP_IN_OFFLINE_MODE(rotopmode) &&
+ ((regdmasts & REGDMA_BUSY) ||
+ (rotsts & ROT_BUSY_BIT))) {
+ /*
+ * rotator can stuck in inline while mdp is detached
+ */
+ SDEROT_WARN(
+ "Inline Rot busy: regdma-sts:0x%x, rottop-sts:0x%x, rottop-opmode:0x%x\n",
+ regdmasts, rotsts, rotopmode);
+ sde_hw_rotator_reset(rot, NULL);
+ } else if ((regdmasts & REGDMA_BUSY) ||
+ (rotsts & ROT_BUSY_BIT)) {
+ _sde_hw_rotator_dump_status(rot, NULL);
+ SDEROT_EVTLOG_TOUT_HANDLER("rot", "rot_dbg_bus",
+ "vbif_dbg_bus", "panic");
+ sde_hw_rotator_reset(rot, NULL);
}
/* Turn off rotator clock after checking rotator registers */
@@ -2468,7 +2562,7 @@ static int sde_hw_rotator_config(struct sde_rot_hw_resource *hw,
if (status & BIT(0)) {
SDEROT_ERR("rotator busy 0x%x\n",
status);
- sde_hw_rotator_dump_status(rot, NULL);
+ _sde_hw_rotator_dump_status(rot, NULL);
SDEROT_EVTLOG_TOUT_HANDLER("rot",
"vbif_dbg_bus",
"panic");
@@ -2572,6 +2666,12 @@ static int sde_hw_rotator_config(struct sde_rot_hw_resource *hw,
wb_cfg.fps = entry->perf->config.frame_rate;
wb_cfg.bw = entry->perf->bw;
wb_cfg.fmt = sde_get_format_params(item->output.format);
+ if (!wb_cfg.fmt) {
+ SDEROT_ERR("null format\n");
+ ret = -EINVAL;
+ goto error;
+ }
+
wb_cfg.dst_rect = &item->dst_rect;
wb_cfg.data = &entry->dst_buf;
sde_mdp_get_plane_sizes(wb_cfg.fmt, item->output.width,
@@ -2750,6 +2850,42 @@ static int sde_hw_rotator_kickoff(struct sde_rot_hw_resource *hw,
return 0;
}
+static int sde_hw_rotator_abort_kickoff(struct sde_rot_hw_resource *hw,
+ struct sde_rot_entry *entry)
+{
+ struct sde_hw_rotator *rot;
+ struct sde_hw_rotator_resource_info *resinfo;
+ struct sde_hw_rotator_context *ctx;
+ unsigned long flags;
+
+ if (!hw || !entry) {
+ SDEROT_ERR("null hw resource/entry\n");
+ return -EINVAL;
+ }
+
+ resinfo = container_of(hw, struct sde_hw_rotator_resource_info, hw);
+ rot = resinfo->rot;
+
+ /* Lookup rotator context from session-id */
+ ctx = sde_hw_rotator_get_ctx(rot, entry->item.session_id,
+ entry->item.sequence_id, hw->wb_id);
+ if (!ctx) {
+ SDEROT_ERR("Cannot locate rotator ctx from sesison id:%d\n",
+ entry->item.session_id);
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&rot->rotisr_lock, flags);
+ sde_hw_rotator_update_swts(rot, ctx, ctx->timestamp);
+ ctx->abort = true;
+ wake_up_all(&ctx->regdma_waitq);
+ spin_unlock_irqrestore(&rot->rotisr_lock, flags);
+
+ SDEROT_EVTLOG(entry->item.session_id, ctx->timestamp);
+
+ return 0;
+}
+
/*
* sde_hw_rotator_wait4done - wait for completion notification
* @hw: Pointer to rotator resource
@@ -3414,6 +3550,21 @@ static int sde_hw_rotator_get_maxlinewidth(struct sde_rot_mgr *mgr)
}
/*
+ * sde_hw_rotator_dump_status - dump status to debug output
+ * @mgr: Pointer to rotator manager
+ * return: none
+ */
+static void sde_hw_rotator_dump_status(struct sde_rot_mgr *mgr)
+{
+ if (!mgr || !mgr->hw_data) {
+ SDEROT_ERR("null parameters\n");
+ return;
+ }
+
+ _sde_hw_rotator_dump_status(mgr->hw_data, NULL);
+}
+
+/*
* sde_hw_rotator_parse_dt - parse r3 specific device tree settings
* @hw_data: Pointer to rotator hw
* @dev: Pointer to platform device
@@ -3529,6 +3680,7 @@ int sde_rotator_r3_init(struct sde_rot_mgr *mgr)
mgr->ops_hw_free = sde_hw_rotator_free_ext;
mgr->ops_config_hw = sde_hw_rotator_config;
mgr->ops_cancel_hw = sde_hw_rotator_cancel;
+ mgr->ops_abort_hw = sde_hw_rotator_abort_kickoff;
mgr->ops_kickoff_entry = sde_hw_rotator_kickoff;
mgr->ops_wait_for_entry = sde_hw_rotator_wait4done;
mgr->ops_hw_validate_entry = sde_hw_rotator_validate_entry;
@@ -3541,6 +3693,7 @@ int sde_rotator_r3_init(struct sde_rot_mgr *mgr)
mgr->ops_hw_post_pmevent = sde_hw_rotator_post_pmevent;
mgr->ops_hw_get_downscale_caps = sde_hw_rotator_get_downscale_caps;
mgr->ops_hw_get_maxlinewidth = sde_hw_rotator_get_maxlinewidth;
+ mgr->ops_hw_dump_status = sde_hw_rotator_dump_status;
ret = sde_hw_rotator_parse_dt(mgr->hw_data, mgr->pdev);
if (ret)
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3_hwio.h b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3_hwio.h
index 2afd032..aaaa28c 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3_hwio.h
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3_hwio.h
@@ -50,6 +50,8 @@
#define ROTTOP_START_CTRL_TRIG_SEL_REGDMA 2
#define ROTTOP_START_CTRL_TRIG_SEL_MDP 3
+#define ROTTOP_OP_MODE_ROT_OUT_MASK (0x3 << 4)
+
/* SDE_ROT_SSPP:
* OFFSET=0x0A8900
*/
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3_internal.h b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3_internal.h
index 8b391ea..1ff43d6 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3_internal.h
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3_internal.h
@@ -231,6 +231,7 @@ struct sde_hw_rotator_context {
bool is_secure;
bool is_traffic_shaping;
bool sbuf_mode;
+ bool abort;
u32 start_ctrl;
u32 sys_cache_mode;
u32 op_mode;
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_smmu.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_smmu.c
index e75f36e..b817ff0 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_smmu.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_smmu.c
@@ -36,6 +36,14 @@
#define SMMU_SDE_ROT_SEC "qcom,smmu_sde_rot_sec"
#define SMMU_SDE_ROT_UNSEC "qcom,smmu_sde_rot_unsec"
+#ifndef SZ_4G
+#define SZ_4G (((size_t) SZ_1G) * 4)
+#endif
+
+#ifndef SZ_2G
+#define SZ_2G (((size_t) SZ_1G) * 2)
+#endif
+
struct sde_smmu_domain {
char *ctx_name;
int domain;
@@ -487,9 +495,9 @@ static int sde_smmu_fault_handler(struct iommu_domain *domain,
}
static struct sde_smmu_domain sde_rot_unsec = {
- "rot_0", SDE_IOMMU_DOMAIN_ROT_UNSECURE, SZ_128K, (SZ_1G - SZ_128K)};
+ "rot_0", SDE_IOMMU_DOMAIN_ROT_UNSECURE, SZ_2G, (SZ_4G - SZ_2G)};
static struct sde_smmu_domain sde_rot_sec = {
- "rot_1", SDE_IOMMU_DOMAIN_ROT_SECURE, SZ_1G, SZ_2G};
+ "rot_1", SDE_IOMMU_DOMAIN_ROT_SECURE, SZ_2G, (SZ_4G - SZ_2G)};
static const struct of_device_id sde_smmu_dt_match[] = {
{ .compatible = SMMU_SDE_ROT_UNSEC, .data = &sde_rot_unsec},
diff --git a/drivers/media/platform/msm/vidc/governors/msm_vidc_dyn_gov.c b/drivers/media/platform/msm/vidc/governors/msm_vidc_dyn_gov.c
index 45e8771..cdcfa96 100644
--- a/drivers/media/platform/msm/vidc/governors/msm_vidc_dyn_gov.c
+++ b/drivers/media/platform/msm/vidc/governors/msm_vidc_dyn_gov.c
@@ -312,6 +312,7 @@ static int __bpp(enum hal_uncompressed_format f)
case HAL_COLOR_FORMAT_NV12_UBWC:
return 8;
case HAL_COLOR_FORMAT_NV12_TP10_UBWC:
+ case HAL_COLOR_FORMAT_P010:
return 10;
default:
dprintk(VIDC_ERR,
@@ -462,7 +463,7 @@ static unsigned long __calculate_decoder(struct vidc_bus_vote_data *d,
bw_for_1x_8bpc = fp_div(FP_INT(width * height), FP_INT(32 * 8));
bw_for_1x_8bpc = fp_mult(bw_for_1x_8bpc,
- fp_div(FP_INT(256 * 30), FP_INT(1000 * 1000)));
+ fp_div(FP_INT(((int)(256 * fps))), FP_INT(1000 * 1000)));
dpb_bw_for_1x = dpb_bpp == 8 ? bw_for_1x_8bpc :
fp_mult(bw_for_1x_8bpc, fp_mult(ten_bpc_packing_factor,
@@ -715,7 +716,7 @@ static unsigned long __calculate_encoder(struct vidc_bus_vote_data *d,
bw_for_1x_8bpc = fp_div(FP_INT(width * height), FP_INT(32 * 8));
bw_for_1x_8bpc = fp_mult(bw_for_1x_8bpc,
- fp_div(FP_INT(256 * 30), FP_INT(1000 * 1000)));
+ fp_div(FP_INT(((int)(256 * fps))), FP_INT(1000 * 1000)));
dpb_bw_for_1x = dpb_bpp == 8 ? bw_for_1x_8bpc :
fp_mult(bw_for_1x_8bpc, fp_mult(ten_bpc_packing_factor,
diff --git a/drivers/media/platform/msm/vidc/hfi_packetization.c b/drivers/media/platform/msm/vidc/hfi_packetization.c
index cf0413e..b6f206e 100644
--- a/drivers/media/platform/msm/vidc/hfi_packetization.c
+++ b/drivers/media/platform/msm/vidc/hfi_packetization.c
@@ -1632,7 +1632,11 @@ int create_pkt_cmd_session_set_property(
&pkt->rg_property_data[1];
hfi->input_color_primaries = hal->input_color_primaries;
- hfi->custom_matrix_enabled = hal->custom_matrix_enabled;
+ if (hal->custom_matrix_enabled)
+ /* Bit Mask to enable all custom values */
+ hfi->custom_matrix_enabled = 0x7;
+ else
+ hfi->custom_matrix_enabled = 0x0;
memcpy(hfi->csc_matrix, hal->csc_matrix,
sizeof(hfi->csc_matrix));
memcpy(hfi->csc_bias, hal->csc_bias, sizeof(hfi->csc_bias));
@@ -1877,6 +1881,22 @@ int create_pkt_cmd_session_set_property(
pkt->size += sizeof(u32) + sizeof(*work_mode);
break;
}
+ case HAL_PARAM_VENC_HDR10_PQ_SEI:
+ {
+ struct hfi_hdr10_pq_sei *hfi;
+ struct hal_hdr10_pq_sei *prop =
+ (struct hal_hdr10_pq_sei *) pdata;
+
+ pkt->rg_property_data[0] =
+ HFI_PROPERTY_PARAM_VENC_HDR10_PQ_SEI;
+ hfi = (struct hfi_hdr10_pq_sei *)
+ &pkt->rg_property_data[1];
+
+ memcpy(hfi, prop, sizeof(*hfi));
+ pkt->size += sizeof(u32) +
+ sizeof(struct hfi_hdr10_pq_sei);
+ break;
+ }
/* FOLLOWING PROPERTIES ARE NOT IMPLEMENTED IN CORE YET */
case HAL_CONFIG_BUFFER_REQUIREMENTS:
case HAL_CONFIG_PRIORITY:
diff --git a/drivers/media/platform/msm/vidc/hfi_response_handler.c b/drivers/media/platform/msm/vidc/hfi_response_handler.c
index 52b9b32..03dfde6 100644
--- a/drivers/media/platform/msm/vidc/hfi_response_handler.c
+++ b/drivers/media/platform/msm/vidc/hfi_response_handler.c
@@ -348,11 +348,10 @@ static int hfi_process_session_error(u32 device_id,
info->response_type = HAL_RESPONSE_UNUSED;
break;
default:
- /* All other errors are not expected and treated as sys error */
dprintk(VIDC_ERR,
- "%s: data1 %#x, data2 %#x, treat as sys error\n",
- __func__, pkt->event_data1, pkt->event_data2);
- info->response_type = HAL_SYS_ERROR;
+ "%s: session %x data1 %#x, data2 %#x\n", __func__,
+ pkt->session_id, pkt->event_data1, pkt->event_data2);
+ info->response_type = HAL_SESSION_ERROR;
break;
}
diff --git a/drivers/media/platform/msm/vidc/msm_vdec.c b/drivers/media/platform/msm/vidc/msm_vdec.c
index 286a67e..9238176 100644
--- a/drivers/media/platform/msm/vidc/msm_vdec.c
+++ b/drivers/media/platform/msm/vidc/msm_vdec.c
@@ -808,7 +808,7 @@ static struct v4l2_ctrl *get_ctrl_from_cluster(int id,
int msm_vdec_s_ctrl(struct msm_vidc_inst *inst, struct v4l2_ctrl *ctrl)
{
- int rc = 0;
+ int rc = 0, temp;
struct hal_nal_stream_format_supported stream_format;
struct hal_enable_picture enable_picture;
struct hal_enable hal_property;
@@ -1033,6 +1033,31 @@ int msm_vdec_s_ctrl(struct msm_vidc_inst *inst, struct v4l2_ctrl *ctrl)
rc);
break;
case V4L2_CID_MPEG_VIDC_VIDEO_STREAM_OUTPUT_SECONDARY:
+ temp_ctrl = TRY_GET_CTRL(
+ V4L2_CID_MPEG_VIDC_VIDEO_DPB_COLOR_FORMAT);
+ switch (temp_ctrl->val) {
+ case V4L2_MPEG_VIDC_VIDEO_DPB_COLOR_FMT_UBWC:
+ temp = V4L2_PIX_FMT_NV12_UBWC;
+ break;
+ case V4L2_MPEG_VIDC_VIDEO_DPB_COLOR_FMT_TP10_UBWC:
+ temp = V4L2_PIX_FMT_NV12_TP10_UBWC;
+ break;
+ case V4L2_MPEG_VIDC_VIDEO_DPB_COLOR_FMT_NONE:
+ default:
+ dprintk(VIDC_DBG,
+ "set default dpb color format as NV12_UBWC\n");
+ temp = V4L2_PIX_FMT_NV12_UBWC;
+ break;
+ }
+ rc = msm_comm_set_color_format(inst,
+ HAL_BUFFER_OUTPUT, temp);
+ if (rc) {
+ dprintk(VIDC_ERR,
+ "%s Failed setting output color format: %#x\n",
+ __func__, rc);
+ break;
+ }
+
multi_stream.buffer_type = HAL_BUFFER_OUTPUT2;
multi_stream.enable = true;
pdata = &multi_stream;
@@ -1258,6 +1283,14 @@ int msm_vdec_s_ext_ctrl(struct msm_vidc_inst *inst,
}
rc = msm_vidc_update_host_buff_counts(inst);
inst->clk_data.dpb_fourcc = fourcc;
+ control.id =
+ V4L2_CID_MPEG_VIDC_VIDEO_DPB_COLOR_FORMAT;
+ control.value = ext_control[i].value;
+ rc = msm_comm_s_ctrl(inst, &control);
+ if (rc)
+ dprintk(VIDC_ERR,
+ "%s: set control dpb color format %d failed\n",
+ __func__, control.value);
break;
default:
dprintk(VIDC_ERR,
diff --git a/drivers/media/platform/msm/vidc/msm_venc.c b/drivers/media/platform/msm/vidc/msm_venc.c
index 266c50e..dd62fb7 100644
--- a/drivers/media/platform/msm/vidc/msm_venc.c
+++ b/drivers/media/platform/msm/vidc/msm_venc.c
@@ -675,7 +675,7 @@ static struct msm_vidc_ctrl msm_venc_ctrls[] = {
.name = "Extradata Type",
.type = V4L2_CTRL_TYPE_MENU,
.minimum = V4L2_MPEG_VIDC_EXTRADATA_NONE,
- .maximum = V4L2_MPEG_VIDC_EXTRADATA_PQ_INFO,
+ .maximum = V4L2_MPEG_VIDC_EXTRADATA_ROI_QP,
.default_value = V4L2_MPEG_VIDC_EXTRADATA_NONE,
.menu_skip_mask = ~(
(1 << V4L2_MPEG_VIDC_EXTRADATA_NONE) |
@@ -695,8 +695,7 @@ static struct msm_vidc_ctrl msm_venc_ctrls[] = {
(1 << V4L2_MPEG_VIDC_EXTRADATA_LTR) |
(1 << V4L2_MPEG_VIDC_EXTRADATA_METADATA_MBI) |
(1 << V4L2_MPEG_VIDC_EXTRADATA_YUV_STATS)|
- (1 << V4L2_MPEG_VIDC_EXTRADATA_ROI_QP) |
- (1 << V4L2_MPEG_VIDC_EXTRADATA_PQ_INFO)
+ (1 << V4L2_MPEG_VIDC_EXTRADATA_ROI_QP)
),
.qmenu = mpeg_video_vidc_extradata,
},
@@ -1068,6 +1067,135 @@ static struct msm_vidc_ctrl msm_venc_ctrls[] = {
),
.qmenu = mpeg_video_flip,
},
+ {
+ .id = V4L2_CID_MPEG_VIDC_VENC_HDR_INFO,
+ .name = "Enable/Disable HDR INFO",
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .minimum = V4L2_MPEG_VIDC_VENC_HDR_INFO_DISABLED,
+ .maximum = V4L2_MPEG_VIDC_VENC_HDR_INFO_ENABLED,
+ .default_value = V4L2_MPEG_VIDC_VENC_HDR_INFO_DISABLED,
+ .step = 1,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDC_VENC_RGB_PRIMARY_00,
+ .name = "RGB PRIMARIES[0][0]",
+ .type = V4L2_CTRL_TYPE_U32,
+ .minimum = 0,
+ .maximum = UINT_MAX,
+ .default_value = 0,
+ .step = 1,
+ .qmenu = NULL,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDC_VENC_RGB_PRIMARY_01,
+ .name = "RGB PRIMARIES[0][1]",
+ .type = V4L2_CTRL_TYPE_U32,
+ .minimum = 0,
+ .maximum = UINT_MAX,
+ .default_value = 0,
+ .step = 1,
+ .qmenu = NULL,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDC_VENC_RGB_PRIMARY_10,
+ .name = "RGB PRIMARIES[1][0]",
+ .type = V4L2_CTRL_TYPE_U32,
+ .minimum = 0,
+ .maximum = UINT_MAX,
+ .default_value = 0,
+ .step = 1,
+ .qmenu = NULL,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDC_VENC_RGB_PRIMARY_11,
+ .name = "RGB PRIMARIES[1][1]",
+ .type = V4L2_CTRL_TYPE_U32,
+ .minimum = 0,
+ .maximum = UINT_MAX,
+ .default_value = 0,
+ .step = 1,
+ .qmenu = NULL,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDC_VENC_RGB_PRIMARY_20,
+ .name = "RGB PRIMARIES[2][0]",
+ .type = V4L2_CTRL_TYPE_U32,
+ .minimum = 0,
+ .maximum = UINT_MAX,
+ .default_value = 0,
+ .step = 1,
+ .qmenu = NULL,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDC_VENC_RGB_PRIMARY_21,
+ .name = "RGB PRIMARIES[2][1]",
+ .type = V4L2_CTRL_TYPE_U32,
+ .minimum = 0,
+ .maximum = UINT_MAX,
+ .default_value = 0,
+ .step = 1,
+ .qmenu = NULL,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDC_VENC_WHITEPOINT_X,
+ .name = "WHITE POINT X",
+ .type = V4L2_CTRL_TYPE_U32,
+ .minimum = 0,
+ .maximum = UINT_MAX,
+ .default_value = 0,
+ .step = 1,
+ .qmenu = NULL,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDC_VENC_WHITEPOINT_Y,
+ .name = "WHITE POINT Y",
+ .type = V4L2_CTRL_TYPE_U32,
+ .minimum = 0,
+ .maximum = UINT_MAX,
+ .default_value = 0,
+ .step = 1,
+ .qmenu = NULL,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDC_VENC_MAX_DISP_LUM,
+ .name = "MAX DISPLAY LUMINANCE",
+ .type = V4L2_CTRL_TYPE_U32,
+ .minimum = 0,
+ .maximum = UINT_MAX,
+ .default_value = 0,
+ .step = 1,
+ .qmenu = NULL,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDC_VENC_MIN_DISP_LUM,
+ .name = "MIN DISPLAY LUMINANCE",
+ .type = V4L2_CTRL_TYPE_U32,
+ .minimum = 0,
+ .maximum = UINT_MAX,
+ .default_value = 0,
+ .step = 1,
+ .qmenu = NULL,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDC_VENC_MAX_CLL,
+ .name = "MAX CLL",
+ .type = V4L2_CTRL_TYPE_U32,
+ .minimum = 0,
+ .maximum = UINT_MAX,
+ .default_value = 0,
+ .step = 1,
+ .qmenu = NULL,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDC_VENC_MAX_FLL,
+ .name = "MAX FLL",
+ .type = V4L2_CTRL_TYPE_U32,
+ .minimum = 0,
+ .maximum = UINT_MAX,
+ .default_value = 0,
+ .step = 1,
+ .qmenu = NULL,
+ },
};
@@ -2095,6 +2223,19 @@ int msm_venc_s_ctrl(struct msm_vidc_inst *inst, struct v4l2_ctrl *ctrl)
case V4L2_CID_MPEG_VIDC_VIDEO_I_FRAME_QP_MAX:
case V4L2_CID_MPEG_VIDC_VIDEO_P_FRAME_QP_MAX:
case V4L2_CID_MPEG_VIDC_VIDEO_B_FRAME_QP_MAX:
+ case V4L2_CID_MPEG_VIDC_VENC_HDR_INFO:
+ case V4L2_CID_MPEG_VIDC_VENC_RGB_PRIMARY_00:
+ case V4L2_CID_MPEG_VIDC_VENC_RGB_PRIMARY_01:
+ case V4L2_CID_MPEG_VIDC_VENC_RGB_PRIMARY_10:
+ case V4L2_CID_MPEG_VIDC_VENC_RGB_PRIMARY_11:
+ case V4L2_CID_MPEG_VIDC_VENC_RGB_PRIMARY_20:
+ case V4L2_CID_MPEG_VIDC_VENC_RGB_PRIMARY_21:
+ case V4L2_CID_MPEG_VIDC_VENC_WHITEPOINT_X:
+ case V4L2_CID_MPEG_VIDC_VENC_WHITEPOINT_Y:
+ case V4L2_CID_MPEG_VIDC_VENC_MAX_DISP_LUM:
+ case V4L2_CID_MPEG_VIDC_VENC_MIN_DISP_LUM:
+ case V4L2_CID_MPEG_VIDC_VENC_MAX_CLL:
+ case V4L2_CID_MPEG_VIDC_VENC_MAX_FLL:
dprintk(VIDC_DBG, "Set the control : %#x using ext ctrl\n",
ctrl->id);
break;
@@ -2133,6 +2274,11 @@ int msm_venc_s_ext_ctrl(struct msm_vidc_inst *inst,
struct hal_frame_size blur_res;
struct hal_quantization_range qp_range;
struct hal_quantization qp;
+ struct hal_hdr10_pq_sei hdr10_sei_params;
+ struct msm_vidc_mastering_display_colour_sei_payload *mdisp_sei
+ = &(hdr10_sei_params.disp_color_sei);
+ struct msm_vidc_content_light_level_sei_payload *cll_sei
+ = &(hdr10_sei_params.cll_sei);
if (!inst || !inst->core || !inst->core->device || !ctrl) {
dprintk(VIDC_ERR, "%s invalid parameters\n", __func__);
@@ -2281,6 +2427,75 @@ int msm_venc_s_ext_ctrl(struct msm_vidc_inst *inst,
i++;
}
break;
+ case V4L2_CID_MPEG_VIDC_VENC_HDR_INFO:
+ if (control[i].value ==
+ V4L2_MPEG_VIDC_VENC_HDR_INFO_DISABLED)
+ break;
+ memset(&hdr10_sei_params, 0, sizeof(hdr10_sei_params));
+ i++;
+ while (i < ctrl->count) {
+ switch (control[i].id) {
+ case V4L2_CID_MPEG_VIDC_VENC_RGB_PRIMARY_00:
+ mdisp_sei->nDisplayPrimariesX[0] =
+ control[i].value;
+ break;
+ case V4L2_CID_MPEG_VIDC_VENC_RGB_PRIMARY_01:
+ mdisp_sei->nDisplayPrimariesY[0] =
+ control[i].value;
+ break;
+ case V4L2_CID_MPEG_VIDC_VENC_RGB_PRIMARY_10:
+ mdisp_sei->nDisplayPrimariesX[1] =
+ control[i].value;
+ break;
+ case V4L2_CID_MPEG_VIDC_VENC_RGB_PRIMARY_11:
+ mdisp_sei->nDisplayPrimariesY[1] =
+ control[i].value;
+ break;
+ case V4L2_CID_MPEG_VIDC_VENC_RGB_PRIMARY_20:
+ mdisp_sei->nDisplayPrimariesX[2] =
+ control[i].value;
+ break;
+ case V4L2_CID_MPEG_VIDC_VENC_RGB_PRIMARY_21:
+ mdisp_sei->nDisplayPrimariesY[2] =
+ control[i].value;
+ break;
+ case V4L2_CID_MPEG_VIDC_VENC_WHITEPOINT_X:
+ mdisp_sei->nWhitePointX =
+ control[i].value;
+ break;
+ case V4L2_CID_MPEG_VIDC_VENC_WHITEPOINT_Y:
+ mdisp_sei->nWhitePointY =
+ control[i].value;
+ break;
+ case V4L2_CID_MPEG_VIDC_VENC_MAX_DISP_LUM:
+ mdisp_sei->
+ nMaxDisplayMasteringLuminance =
+ control[i].value;
+ break;
+ case V4L2_CID_MPEG_VIDC_VENC_MIN_DISP_LUM:
+ mdisp_sei->
+ nMinDisplayMasteringLuminance =
+ control[i].value;
+ break;
+ case V4L2_CID_MPEG_VIDC_VENC_MAX_CLL:
+ cll_sei->nMaxContentLight =
+ control[i].value;
+ break;
+ case V4L2_CID_MPEG_VIDC_VENC_MAX_FLL:
+ cll_sei->nMaxPicAverageLight =
+ control[i].value;
+ break;
+ default:
+ dprintk(VIDC_ERR,
+ "Unknown Ctrl:%d, not part of HDR Info",
+ control[i].id);
+ }
+ i++;
+ }
+ property_id =
+ HAL_PARAM_VENC_HDR10_PQ_SEI;
+ pdata = &hdr10_sei_params;
+ break;
default:
dprintk(VIDC_ERR, "Invalid id set: %d\n",
control[i].id);
@@ -2543,6 +2758,19 @@ int msm_venc_s_fmt(struct msm_vidc_inst *inst, struct v4l2_format *f)
inst->bufq[fmt->type].plane_sizes[i] =
f->fmt.pix_mp.plane_fmt[i].sizeimage;
}
+ /*
+ * Input extradata buffer size may change upon updating
+ * CAPTURE plane buffer size.
+ */
+
+ extra_idx = EXTRADATA_IDX(inst->bufq[OUTPUT_PORT].num_planes);
+ if (extra_idx && extra_idx < VIDEO_MAX_PLANES) {
+ buff_req_buffer = get_buff_req_buffer(inst,
+ HAL_BUFFER_EXTRADATA_INPUT);
+ inst->bufq[OUTPUT_PORT].plane_sizes[extra_idx] =
+ buff_req_buffer ?
+ buff_req_buffer->buffer_size : 0;
+ }
} else if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
struct hal_frame_size frame_sz;
diff --git a/drivers/media/platform/msm/vidc/msm_vidc.c b/drivers/media/platform/msm/vidc/msm_vidc.c
index 907e01f..dabe667 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc.c
@@ -605,8 +605,10 @@ int msm_vidc_streamon(void *instance, enum v4l2_buf_type i)
mutex_lock(&q->lock);
rc = vb2_streamon(&q->vb2_bufq, i);
mutex_unlock(&q->lock);
- if (rc)
+ if (rc) {
dprintk(VIDC_ERR, "streamon failed on port: %d\n", i);
+ msm_comm_kill_session(inst);
+ }
return rc;
}
EXPORT_SYMBOL(msm_vidc_streamon);
@@ -1011,10 +1013,9 @@ static inline int start_streaming(struct msm_vidc_inst *inst)
}
fail_start:
- if (rc) {
- dprintk(VIDC_ERR, "%s: kill session %pK\n", __func__, inst);
- msm_comm_kill_session(inst);
- }
+ if (rc)
+ dprintk(VIDC_ERR, "%s: inst %pK session %x failed to start\n",
+ __func__, inst, hash32_ptr(inst->session));
return rc;
}
@@ -1781,12 +1782,6 @@ static void msm_vidc_cleanup_instance(struct msm_vidc_inst *inst)
dprintk(VIDC_ERR,
"Failed to release mark_data buffers\n");
- /*
- * At this point all buffes should be with driver
- * irrespective of scenario
- */
- msm_comm_validate_output_buffers(inst);
-
msm_comm_release_eos_buffers(inst);
if (msm_comm_release_output_buffers(inst, true))
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_clocks.c b/drivers/media/platform/msm/vidc/msm_vidc_clocks.c
index 5183ddd..1d22077 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_clocks.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_clocks.c
@@ -146,7 +146,7 @@ static int fill_dynamic_stats(struct msm_vidc_inst *inst,
vote_data->use_dpb_read = false;
/* Check if driver can vote for lower bus BW */
- if (inst->clk_data.load <= inst->clk_data.load_norm) {
+ if (inst->clk_data.load < inst->clk_data.load_norm) {
vote_data->compression_ratio = max_cr;
vote_data->complexity_factor = min_cf;
vote_data->input_cr = max_input_cr;
@@ -283,9 +283,11 @@ int msm_comm_vote_bus(struct msm_vidc_core *core)
return rc;
}
-static inline int get_pending_bufs_fw(struct msm_vidc_inst *inst)
+static inline int get_bufs_outside_fw(struct msm_vidc_inst *inst)
{
- int fw_out_qsize = 0;
+ u32 fw_out_qsize = 0, i = 0;
+ struct vb2_queue *q = NULL;
+ struct vb2_buffer *vb = NULL;
/*
* DCVS always operates on Uncompressed buffers.
@@ -294,10 +296,36 @@ static inline int get_pending_bufs_fw(struct msm_vidc_inst *inst)
if (inst->state >= MSM_VIDC_OPEN_DONE &&
inst->state < MSM_VIDC_STOP_DONE) {
- if (inst->session_type == MSM_VIDC_DECODER)
- fw_out_qsize = inst->count.ftb - inst->count.fbd;
- else
- fw_out_qsize = inst->count.etb - inst->count.ebd;
+
+ /*
+ * For decoder, there will be some frames with client
+ * but not to be displayed. Ex : VP9 DECODE_ONLY frames.
+ * Hence don't count them.
+ */
+
+ if (inst->session_type == MSM_VIDC_DECODER) {
+ struct vb2_v4l2_buffer *vbuf = NULL;
+
+ q = &inst->bufq[CAPTURE_PORT].vb2_bufq;
+ for (i = 0; i < q->num_buffers; i++) {
+ vb = q->bufs[i];
+ if (!vb)
+ continue;
+ vbuf = to_vb2_v4l2_buffer(vb);
+ if (vbuf &&
+ vb->state != VB2_BUF_STATE_ACTIVE &&
+ !(vbuf->flags &
+ V4L2_QCOM_BUF_FLAG_DECODEONLY))
+ fw_out_qsize++;
+ }
+ } else {
+ q = &inst->bufq[OUTPUT_PORT].vb2_bufq;
+ for (i = 0; i < q->num_buffers; i++) {
+ vb = q->bufs[i];
+ if (vb && vb->state != VB2_BUF_STATE_ACTIVE)
+ fw_out_qsize++;
+ }
+ }
}
return fw_out_qsize;
@@ -328,7 +356,7 @@ static int msm_dcvs_scale_clocks(struct msm_vidc_inst *inst)
core = inst->core;
mutex_lock(&inst->lock);
- fw_pending_bufs = get_pending_bufs_fw(inst);
+ buffers_outside_fw = get_bufs_outside_fw(inst);
output_buf_req = get_buff_req_buffer(inst,
dcvs->buffer_type);
@@ -345,8 +373,8 @@ static int msm_dcvs_scale_clocks(struct msm_vidc_inst *inst)
min_output_buf = output_buf_req->buffer_count_min;
- /* Buffers outside FW are with display */
- buffers_outside_fw = total_output_buf - fw_pending_bufs;
+ /* Buffers outside Display are with FW. */
+ fw_pending_bufs = total_output_buf - buffers_outside_fw;
dprintk(VIDC_PROF,
"Counts : total_output_buf = %d Min buffers = %d fw_pending_bufs = %d buffers_outside_fw = %d\n",
total_output_buf, min_output_buf, fw_pending_bufs,
@@ -372,7 +400,7 @@ static int msm_dcvs_scale_clocks(struct msm_vidc_inst *inst)
if (buffers_outside_fw <= dcvs->max_threshold)
dcvs->load = dcvs->load_high;
- else if (fw_pending_bufs <= min_output_buf)
+ else if (fw_pending_bufs < min_output_buf)
dcvs->load = dcvs->load_low;
else
dcvs->load = dcvs->load_norm;
@@ -877,10 +905,7 @@ void msm_clock_data_reset(struct msm_vidc_inst *inst)
return;
}
dcvs->max_threshold = output_buf_req->buffer_count_actual -
- output_buf_req->buffer_count_min_host + 1;
- /* Compensate for decode only frames */
- if (inst->fmts[OUTPUT_PORT].fourcc == V4L2_PIX_FMT_VP9)
- dcvs->max_threshold += 2;
+ output_buf_req->buffer_count_min_host + 2;
dcvs->min_threshold =
msm_vidc_get_extra_buff_count(inst, dcvs->buffer_type);
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_common.c b/drivers/media/platform/msm/vidc/msm_vidc_common.c
index ed3cfa3..9dce3f9 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_common.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_common.c
@@ -2108,7 +2108,8 @@ static void handle_session_error(enum hal_command_response cmd, void *data)
}
hdev = inst->core->device;
- dprintk(VIDC_WARN, "Session error received for session %pK\n", inst);
+ dprintk(VIDC_ERR, "Session error received for inst %pK session %x\n",
+ inst, hash32_ptr(inst->session));
if (response->status == VIDC_ERR_MAX_CLIENTS) {
dprintk(VIDC_WARN, "Too many clients, rejecting %pK", inst);
@@ -2131,6 +2132,8 @@ static void handle_session_error(enum hal_command_response cmd, void *data)
event = V4L2_EVENT_MSM_VIDC_SYS_ERROR;
}
+ /* change state before sending error to client */
+ change_inst_state(inst, MSM_VIDC_CORE_INVALID);
msm_vidc_queue_v4l2_event(inst, event);
put_inst(inst);
}
@@ -2202,6 +2205,8 @@ static void handle_sys_error(enum hal_command_response cmd, void *data)
if (!core->trigger_ssr)
msm_comm_print_inst_info(inst);
}
+ /* handle the hw error before core released to get full debug info */
+ msm_vidc_handle_hw_error(core);
dprintk(VIDC_DBG, "Calling core_release\n");
rc = call_hfi_op(hdev, core_release, hdev->hfi_device_data);
if (rc) {
@@ -2212,10 +2217,7 @@ static void handle_sys_error(enum hal_command_response cmd, void *data)
core->state = VIDC_CORE_UNINIT;
mutex_unlock(&core->lock);
- dprintk(VIDC_ERR,
- "SYS_ERROR can potentially crash the system\n");
-
- msm_vidc_handle_hw_error(core);
+ dprintk(VIDC_WARN, "SYS_ERROR handled.\n");
}
void msm_comm_session_clean(struct msm_vidc_inst *inst)
@@ -2328,9 +2330,6 @@ int msm_comm_vb2_buffer_done(struct msm_vidc_inst *inst,
__func__, vb->type);
return -EINVAL;
}
- msm_vidc_debugfs_update(inst, port == CAPTURE_PORT ?
- MSM_VIDC_DEBUGFS_EVENT_FBD :
- MSM_VIDC_DEBUGFS_EVENT_EBD);
mutex_lock(&inst->bufq[port].lock);
vb2_buffer_done(vb, VB2_BUF_STATE_DONE);
@@ -2385,8 +2384,8 @@ static void handle_ebd(enum hal_command_response cmd, void *data)
empty_buf_done = (struct vidc_hal_ebd *)&response->input_done;
/* If this is internal EOS buffer, handle it in driver */
if (is_eos_buffer(inst, empty_buf_done->packet_buffer)) {
- dprintk(VIDC_DBG, "Received EOS buffer %pK\n",
- (void *)(u64)empty_buf_done->packet_buffer);
+ dprintk(VIDC_DBG, "Received EOS buffer 0x%x\n",
+ empty_buf_done->packet_buffer);
goto exit;
}
@@ -2456,6 +2455,7 @@ static void handle_ebd(enum hal_command_response cmd, void *data)
*/
msm_comm_put_vidc_buffer(inst, mbuf);
msm_comm_vb2_buffer_done(inst, vb2);
+ msm_vidc_debugfs_update(inst, MSM_VIDC_DEBUGFS_EVENT_EBD);
kref_put_mbuf(mbuf);
exit:
put_inst(inst);
@@ -2657,6 +2657,7 @@ static void handle_fbd(enum hal_command_response cmd, void *data)
*/
msm_comm_put_vidc_buffer(inst, mbuf);
msm_comm_vb2_buffer_done(inst, vb2);
+ msm_vidc_debugfs_update(inst, MSM_VIDC_DEBUGFS_EVENT_FBD);
kref_put_mbuf(mbuf);
exit:
@@ -2796,7 +2797,8 @@ static int msm_comm_session_abort(struct msm_vidc_inst *inst)
hdev = inst->core->device;
abort_completion = SESSION_MSG_INDEX(HAL_SESSION_ABORT_DONE);
- dprintk(VIDC_WARN, "%s: inst %pK\n", __func__, inst);
+ dprintk(VIDC_WARN, "%s: inst %pK session %x\n", __func__,
+ inst, hash32_ptr(inst->session));
rc = call_hfi_op(hdev, session_abort, (void *)inst->session);
if (rc) {
dprintk(VIDC_ERR,
@@ -2808,8 +2810,8 @@ static int msm_comm_session_abort(struct msm_vidc_inst *inst)
msecs_to_jiffies(
inst->core->resources.msm_vidc_hw_rsp_timeout));
if (!rc) {
- dprintk(VIDC_ERR, "%s: inst %pK abort timed out\n",
- __func__, inst);
+ dprintk(VIDC_ERR, "%s: inst %pK session %x abort timed out\n",
+ __func__, inst, hash32_ptr(inst->session));
msm_comm_generate_sys_error(inst);
rc = -EBUSY;
} else {
@@ -3701,8 +3703,8 @@ int msm_comm_try_state(struct msm_vidc_inst *inst, int state)
if (inst->state == MSM_VIDC_CORE_INVALID) {
dprintk(VIDC_ERR, "%s: inst %pK is in invalid\n",
__func__, inst);
- mutex_unlock(&inst->sync_lock);
- return -EINVAL;
+ rc = -EINVAL;
+ goto exit;
}
flipped_state = get_flipped_state(inst->state, state);
@@ -3783,6 +3785,8 @@ int msm_comm_try_state(struct msm_vidc_inst *inst, int state)
rc = -EINVAL;
break;
}
+
+exit:
mutex_unlock(&inst->sync_lock);
if (rc) {
@@ -3821,8 +3825,8 @@ int msm_vidc_send_pending_eos_buffers(struct msm_vidc_inst *inst)
data.timestamp = LLONG_MAX;
data.extradata_addr = data.device_addr;
data.extradata_size = 0;
- dprintk(VIDC_DBG, "Queueing EOS buffer %pK\n",
- (void *)(u64)data.device_addr);
+ dprintk(VIDC_DBG, "Queueing EOS buffer 0x%x\n",
+ data.device_addr);
hdev = inst->core->device;
rc = call_hfi_op(hdev, session_etb, inst->session,
@@ -4176,6 +4180,19 @@ int msm_comm_qbuf(struct msm_vidc_inst *inst, struct msm_vidc_buffer *mbuf)
output_count = (batch_mode ? &count_single_batch : &count_buffers)
(inst, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+ if (!batch_mode && mbuf) {
+ /*
+ * don't queue output_mplane buffers if buffer queued
+ * by client is capture_mplane type and vice versa.
+ */
+ if (mbuf->vvb.vb2_buf.type ==
+ V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ output_count = 0;
+ else if (mbuf->vvb.vb2_buf.type ==
+ V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ capture_count = 0;
+ }
+
/*
* Somewhat complicated logic to prevent queuing the buffer to hardware.
* Don't queue if:
@@ -5028,6 +5045,9 @@ static void msm_comm_flush_in_invalid_state(struct msm_vidc_inst *inst)
enum vidc_ports ports[] = {OUTPUT_PORT, CAPTURE_PORT};
int c = 0;
+ /* before flush ensure venus released all buffers */
+ msm_comm_try_state(inst, MSM_VIDC_RELEASE_RESOURCES_DONE);
+
for (c = 0; c < ARRAY_SIZE(ports); ++c) {
enum vidc_ports port = ports[c];
@@ -5090,6 +5110,9 @@ int msm_comm_flush(struct msm_vidc_inst *inst, u32 flags)
return 0;
}
+ /* enable in flush */
+ inst->in_flush = true;
+
mutex_lock(&inst->registeredbufs.lock);
list_for_each_entry_safe(mbuf, next, &inst->registeredbufs.list, list) {
/* don't flush input buffers if input flush is not requested */
@@ -5130,9 +5153,6 @@ int msm_comm_flush(struct msm_vidc_inst *inst, u32 flags)
}
mutex_unlock(&inst->registeredbufs.lock);
- /* enable in flush */
- inst->in_flush = true;
-
hdev = inst->core->device;
if (ip_flush) {
dprintk(VIDC_DBG, "Send flush on all ports to firmware\n");
@@ -5440,7 +5460,7 @@ int msm_vidc_check_session_supported(struct msm_vidc_inst *inst)
int rc = 0;
struct hfi_device *hdev;
struct msm_vidc_core *core;
- u32 output_height, output_width;
+ u32 output_height, output_width, input_height, input_width;
u32 rotation;
if (!inst || !inst->core || !inst->core->device) {
@@ -5463,6 +5483,22 @@ int msm_vidc_check_session_supported(struct msm_vidc_inst *inst)
return -ENOTSUPP;
}
+ output_height = inst->prop.height[CAPTURE_PORT];
+ output_width = inst->prop.width[CAPTURE_PORT];
+ input_height = inst->prop.height[OUTPUT_PORT];
+ input_width = inst->prop.width[OUTPUT_PORT];
+
+ if (input_width % 2 != 0 || input_height % 2 != 0 ||
+ output_width % 2 != 0 || output_height % 2 != 0) {
+ dprintk(VIDC_ERR,
+ "Height and Width should be even numbers for NV12\n");
+ dprintk(VIDC_ERR,
+ "Input WxH = (%u)x(%u), Output WxH = (%u)x(%u)\n",
+ input_width, input_height,
+ output_width, output_height);
+ rc = -ENOTSUPP;
+ }
+
rotation = msm_comm_g_ctrl_for_id(inst,
V4L2_CID_MPEG_VIDC_VIDEO_ROTATION);
@@ -5560,8 +5596,8 @@ int msm_comm_kill_session(struct msm_vidc_inst *inst)
return 0;
}
- dprintk(VIDC_WARN, "%s: inst %pK, state %d\n", __func__,
- inst, inst->state);
+ dprintk(VIDC_WARN, "%s: inst %pK, session %x state %d\n", __func__,
+ inst, hash32_ptr(inst->session), inst->state);
/*
* We're internally forcibly killing the session, if fw is aware of
* the session send session_abort to firmware to clean up and release
@@ -5572,8 +5608,9 @@ int msm_comm_kill_session(struct msm_vidc_inst *inst)
inst->state == MSM_VIDC_CORE_INVALID) {
rc = msm_comm_session_abort(inst);
if (rc) {
- dprintk(VIDC_WARN, "%s: inst %pK abort failed\n",
- __func__, inst);
+ dprintk(VIDC_ERR,
+ "%s: inst %pK session %x abort failed\n",
+ __func__, inst, hash32_ptr(inst->session));
change_inst_state(inst, MSM_VIDC_CORE_INVALID);
}
}
@@ -5581,7 +5618,8 @@ int msm_comm_kill_session(struct msm_vidc_inst *inst)
change_inst_state(inst, MSM_VIDC_CLOSE_DONE);
msm_comm_session_clean(inst);
- dprintk(VIDC_WARN, "%s: inst %pK handled\n", __func__, inst);
+ dprintk(VIDC_WARN, "%s: inst %pK session %x handled\n", __func__,
+ inst, hash32_ptr(inst->session));
return rc;
}
@@ -5869,8 +5907,8 @@ int msm_vidc_comm_s_parm(struct msm_vidc_inst *inst, struct v4l2_streamparm *a)
goto exit;
}
- fps = USEC_PER_SEC;
- do_div(fps, us_per_frame);
+ fps = us_per_frame > USEC_PER_SEC ?
+ 0 : USEC_PER_SEC / (u32)us_per_frame;
if (fps % 15 == 14 || fps % 24 == 23)
fps = fps + 1;
@@ -6485,7 +6523,10 @@ void msm_comm_put_vidc_buffer(struct msm_vidc_inst *inst,
if (msm_smem_unmap_dma_buf(inst, &mbuf->smem[i]))
print_vidc_buffer(VIDC_ERR,
"dqbuf: unmap failed..", inst, mbuf);
- } /* else RBR event expected */
+ } else {
+ /* RBR event expected */
+ mbuf->flags |= MSM_VIDC_FLAG_RBR_PENDING;
+ }
}
/*
* remove the entry if plane[0].refcount is zero else
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_internal.h b/drivers/media/platform/msm/vidc/msm_vidc_internal.h
index 2e2dd13..0b6331c 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_internal.h
+++ b/drivers/media/platform/msm/vidc/msm_vidc_internal.h
@@ -423,9 +423,9 @@ struct msm_vidc_ctrl {
u32 id;
char name[MAX_NAME_LENGTH];
enum v4l2_ctrl_type type;
- s32 minimum;
- s32 maximum;
- s32 default_value;
+ s64 minimum;
+ s64 maximum;
+ s64 default_value;
u32 step;
u32 menu_skip_mask;
u32 flags;
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_platform.c b/drivers/media/platform/msm/vidc/msm_vidc_platform.c
index 1818788..d7641c3 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_platform.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_platform.c
@@ -115,6 +115,10 @@ static struct msm_vidc_common_data sdm845_common_data[] = {
.value = 1,
},
{
+ .key = "qcom,domain-attr-cache-pagetables",
+ .value = 1,
+ },
+ {
.key = "qcom,max-secure-instances",
.value = 5,
},
@@ -140,11 +144,11 @@ static struct msm_vidc_common_data sdm845_common_data[] = {
},
{
.key = "qcom,power-collapse-delay",
- .value = 500,
+ .value = 1500,
},
{
.key = "qcom,hw-resp-timeout",
- .value = 250,
+ .value = 1000,
},
{
.key = "qcom,debug-timeout",
@@ -162,6 +166,14 @@ static struct msm_vidc_common_data sdm670_common_data_v0[] = {
.value = 1,
},
{
+ .key = "qcom,domain-attr-non-fatal-faults",
+ .value = 1,
+ },
+ {
+ .key = "qcom,domain-attr-cache-pagetables",
+ .value = 1,
+ },
+ {
.key = "qcom,max-secure-instances",
.value = 5,
},
@@ -205,6 +217,14 @@ static struct msm_vidc_common_data sdm670_common_data_v1[] = {
.value = 1,
},
{
+ .key = "qcom,domain-attr-non-fatal-faults",
+ .value = 1,
+ },
+ {
+ .key = "qcom,domain-attr-cache-pagetables",
+ .value = 1,
+ },
+ {
.key = "qcom,max-secure-instances",
.value = 5,
},
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c b/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c
index a0214a2..b1a240d 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c
@@ -776,6 +776,8 @@ int read_platform_resources_from_drv_data(
"qcom,hw-resp-timeout");
res->non_fatal_pagefaults = find_key_value(platform_data,
"qcom,domain-attr-non-fatal-faults");
+ res->cache_pagetables = find_key_value(platform_data,
+ "qcom,domain-attr-cache-pagetables");
res->csc_coeff_data = &platform_data->csc_data;
@@ -901,14 +903,14 @@ static int get_secure_vmid(struct context_bank_info *cb)
return VMID_INVAL;
}
-static int msm_vidc_setup_context_bank(struct context_bank_info *cb,
- struct device *dev)
+static int msm_vidc_setup_context_bank(struct msm_vidc_platform_resources *res,
+ struct context_bank_info *cb, struct device *dev)
{
int rc = 0;
int secure_vmid = VMID_INVAL;
struct bus_type *bus;
- if (!dev || !cb) {
+ if (!dev || !cb || !res) {
dprintk(VIDC_ERR,
"%s: Invalid Input params\n", __func__);
return -EINVAL;
@@ -942,6 +944,19 @@ static int msm_vidc_setup_context_bank(struct context_bank_info *cb,
}
}
+ if (res->cache_pagetables) {
+ int cache_pagetables = 1;
+
+ rc = iommu_domain_set_attr(cb->mapping->domain,
+ DOMAIN_ATTR_USE_UPSTREAM_HINT, &cache_pagetables);
+ if (rc) {
+ WARN_ONCE(rc,
+ "%s: failed to set cache pagetables attribute, %d\n",
+ __func__, rc);
+ rc = 0;
+ }
+ }
+
rc = arm_iommu_attach_device(cb->dev, cb->mapping);
if (rc) {
dprintk(VIDC_ERR, "%s - Couldn't arm_iommu_attach_device\n",
@@ -1055,7 +1070,7 @@ static int msm_vidc_populate_context_bank(struct device *dev,
cb->name, cb->addr_range.start,
cb->addr_range.size, cb->buffer_type);
- rc = msm_vidc_setup_context_bank(cb, dev);
+ rc = msm_vidc_setup_context_bank(&core->resources, cb, dev);
if (rc) {
dprintk(VIDC_ERR, "Cannot setup context bank %d\n", rc);
goto err_setup_cb;
@@ -1167,7 +1182,7 @@ static int msm_vidc_populate_legacy_context_bank(
goto err_setup_cb;
}
- rc = msm_vidc_setup_context_bank(cb, cb->dev);
+ rc = msm_vidc_setup_context_bank(res, cb, cb->dev);
if (rc) {
dprintk(VIDC_ERR, "Cannot setup context bank %d\n", rc);
goto err_setup_cb;
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_resources.h b/drivers/media/platform/msm/vidc/msm_vidc_resources.h
index 99b4e30..23e33fe 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_resources.h
+++ b/drivers/media/platform/msm/vidc/msm_vidc_resources.h
@@ -185,6 +185,7 @@ struct msm_vidc_platform_resources {
int msm_vidc_firmware_unload_delay;
uint32_t msm_vidc_pwr_collapse_delay;
bool non_fatal_pagefaults;
+ bool cache_pagetables;
struct msm_vidc_codec_data *codec_data;
int codec_data_count;
struct msm_vidc_csc_coeff *csc_coeff_data;
diff --git a/drivers/media/platform/msm/vidc/venus_hfi.c b/drivers/media/platform/msm/vidc/venus_hfi.c
index 60169e9..7e7ed47 100644
--- a/drivers/media/platform/msm/vidc/venus_hfi.c
+++ b/drivers/media/platform/msm/vidc/venus_hfi.c
@@ -503,6 +503,11 @@ static int __read_queue(struct vidc_iface_q_info *qinfo, u8 *packet,
if (queue->qhdr_read_idx == queue->qhdr_write_idx) {
queue->qhdr_rx_req = receive_request;
+ /*
+ * mb() to ensure qhdr is updated in main memory
+ * so that venus reads the updated header values
+ */
+ mb();
*pb_tx_req_is_set = 0;
dprintk(VIDC_DBG,
"%s queue is empty, rx_req = %u, tx_req = %u, read_idx = %u\n",
@@ -550,6 +555,11 @@ static int __read_queue(struct vidc_iface_q_info *qinfo, u8 *packet,
queue->qhdr_rx_req = 0;
else
queue->qhdr_rx_req = receive_request;
+ /*
+ * mb() to ensure qhdr is updated in main memory
+ * so that venus reads the updated header values
+ */
+ mb();
*pb_tx_req_is_set = (queue->qhdr_tx_req == 1) ? 1 : 0;
diff --git a/drivers/media/platform/msm/vidc/vidc_hfi_api.h b/drivers/media/platform/msm/vidc/vidc_hfi_api.h
index 53df90f5..2260b55 100644
--- a/drivers/media/platform/msm/vidc/vidc_hfi_api.h
+++ b/drivers/media/platform/msm/vidc/vidc_hfi_api.h
@@ -226,6 +226,7 @@ enum hal_property {
HAL_PARAM_VIDEO_CORES_USAGE,
HAL_PARAM_VIDEO_WORK_MODE,
HAL_PARAM_SECURE,
+ HAL_PARAM_VENC_HDR10_PQ_SEI,
};
enum hal_domain {
@@ -1398,6 +1399,11 @@ struct hal_cmd_sys_get_property_packet {
u32 rg_property_data[1];
};
+struct hal_hdr10_pq_sei {
+ struct msm_vidc_mastering_display_colour_sei_payload disp_color_sei;
+ struct msm_vidc_content_light_level_sei_payload cll_sei;
+};
+
#define call_hfi_op(q, op, args...) \
(((q) && (q)->op) ? ((q)->op(args)) : 0)
diff --git a/drivers/media/platform/msm/vidc/vidc_hfi_helper.h b/drivers/media/platform/msm/vidc/vidc_hfi_helper.h
index 001ca39..ca6d803 100644
--- a/drivers/media/platform/msm/vidc/vidc_hfi_helper.h
+++ b/drivers/media/platform/msm/vidc/vidc_hfi_helper.h
@@ -321,6 +321,10 @@ struct hfi_buffer_info {
(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x033)
#define HFI_PROPERTY_PARAM_VENC_IFRAMESIZE \
(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x034)
+#define HFI_PROPERTY_PARAM_VENC_SEND_OUTPUT_FOR_SKIPPED_FRAMES \
+ (HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x035)
+#define HFI_PROPERTY_PARAM_VENC_HDR10_PQ_SEI \
+ (HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x036)
#define HFI_PROPERTY_CONFIG_VENC_COMMON_START \
(HFI_DOMAIN_BASE_VENC + HFI_ARCH_COMMON_OFFSET + 0x6000)
@@ -1058,4 +1062,24 @@ struct hfi_cmd_sys_test_ssr_packet {
u32 packet_type;
u32 trigger_type;
};
+
+struct hfi_mastering_display_colour_sei_payload {
+ u32 display_primariesX[3];
+ u32 display_primariesY[3];
+ u32 white_pointX;
+ u32 white_pointY;
+ u32 max_display_mastering_luminance;
+ u32 min_display_mastering_luminance;
+};
+
+struct hfi_content_light_level_sei_payload {
+ u32 max_content_light;
+ u32 max_pic_average_light;
+};
+
+struct hfi_hdr10_pq_sei {
+ struct hfi_mastering_display_colour_sei_payload mdisp_info;
+ struct hfi_content_light_level_sei_payload cll_info;
+};
+
#endif
diff --git a/drivers/media/platform/mtk-vpu/mtk_vpu.c b/drivers/media/platform/mtk-vpu/mtk_vpu.c
index c9bf58c..04b8b87 100644
--- a/drivers/media/platform/mtk-vpu/mtk_vpu.c
+++ b/drivers/media/platform/mtk-vpu/mtk_vpu.c
@@ -23,6 +23,7 @@
#include <linux/of_reserved_mem.h>
#include <linux/sched.h>
#include <linux/sizes.h>
+#include <linux/dma-mapping.h>
#include "mtk_vpu.h"
diff --git a/drivers/media/usb/cx231xx/cx231xx-core.c b/drivers/media/usb/cx231xx/cx231xx-core.c
index 8b099fe..71b65ab 100644
--- a/drivers/media/usb/cx231xx/cx231xx-core.c
+++ b/drivers/media/usb/cx231xx/cx231xx-core.c
@@ -356,7 +356,12 @@ int cx231xx_send_vendor_cmd(struct cx231xx *dev,
*/
if ((ven_req->wLength > 4) && ((ven_req->bRequest == 0x4) ||
(ven_req->bRequest == 0x5) ||
- (ven_req->bRequest == 0x6))) {
+ (ven_req->bRequest == 0x6) ||
+
+ /* Internal Master 3 Bus can send
+ * and receive only 4 bytes per time
+ */
+ (ven_req->bRequest == 0x2))) {
unsend_size = 0;
pdata = ven_req->pBuff;
diff --git a/drivers/media/usb/uvc/uvc_ctrl.c b/drivers/media/usb/uvc/uvc_ctrl.c
index 252ab99..9f2a64c 100644
--- a/drivers/media/usb/uvc/uvc_ctrl.c
+++ b/drivers/media/usb/uvc/uvc_ctrl.c
@@ -2005,6 +2005,13 @@ int uvc_ctrl_add_mapping(struct uvc_video_chain *chain,
goto done;
}
+ /* Validate the user-provided bit-size and offset */
+ if (mapping->size > 32 ||
+ mapping->offset + mapping->size > ctrl->info.size * 8) {
+ ret = -EINVAL;
+ goto done;
+ }
+
list_for_each_entry(map, &ctrl->info.mappings, list) {
if (mapping->id == map->id) {
uvc_trace(UVC_TRACE_CONTROL, "Can't add mapping '%s', "
diff --git a/drivers/mfd/ab8500-sysctrl.c b/drivers/mfd/ab8500-sysctrl.c
index 207cc49..8062d37 100644
--- a/drivers/mfd/ab8500-sysctrl.c
+++ b/drivers/mfd/ab8500-sysctrl.c
@@ -98,7 +98,7 @@ int ab8500_sysctrl_read(u16 reg, u8 *value)
u8 bank;
if (sysctrl_dev == NULL)
- return -EINVAL;
+ return -EPROBE_DEFER;
bank = (reg >> 8);
if (!valid_bank(bank))
@@ -114,11 +114,13 @@ int ab8500_sysctrl_write(u16 reg, u8 mask, u8 value)
u8 bank;
if (sysctrl_dev == NULL)
- return -EINVAL;
+ return -EPROBE_DEFER;
bank = (reg >> 8);
- if (!valid_bank(bank))
+ if (!valid_bank(bank)) {
+ pr_err("invalid bank\n");
return -EINVAL;
+ }
return abx500_mask_and_set_register_interruptible(sysctrl_dev, bank,
(u8)(reg & 0xFF), mask, value);
@@ -145,9 +147,15 @@ static int ab8500_sysctrl_remove(struct platform_device *pdev)
return 0;
}
+static const struct of_device_id ab8500_sysctrl_match[] = {
+ { .compatible = "stericsson,ab8500-sysctrl", },
+ {}
+};
+
static struct platform_driver ab8500_sysctrl_driver = {
.driver = {
.name = "ab8500-sysctrl",
+ .of_match_table = ab8500_sysctrl_match,
},
.probe = ab8500_sysctrl_probe,
.remove = ab8500_sysctrl_remove,
diff --git a/drivers/mfd/axp20x.c b/drivers/mfd/axp20x.c
index ba130be..9617fc3 100644
--- a/drivers/mfd/axp20x.c
+++ b/drivers/mfd/axp20x.c
@@ -205,14 +205,14 @@ static struct resource axp22x_pek_resources[] = {
static struct resource axp288_power_button_resources[] = {
{
.name = "PEK_DBR",
- .start = AXP288_IRQ_POKN,
- .end = AXP288_IRQ_POKN,
+ .start = AXP288_IRQ_POKP,
+ .end = AXP288_IRQ_POKP,
.flags = IORESOURCE_IRQ,
},
{
.name = "PEK_DBF",
- .start = AXP288_IRQ_POKP,
- .end = AXP288_IRQ_POKP,
+ .start = AXP288_IRQ_POKN,
+ .end = AXP288_IRQ_POKN,
.flags = IORESOURCE_IRQ,
},
};
diff --git a/drivers/misc/cxl/api.c b/drivers/misc/cxl/api.c
index 2e5233b..ae85616 100644
--- a/drivers/misc/cxl/api.c
+++ b/drivers/misc/cxl/api.c
@@ -244,6 +244,10 @@ int cxl_start_context(struct cxl_context *ctx, u64 wed,
ctx->real_mode = false;
}
+ /*
+ * Increment driver use count. Enables global TLBIs for hash
+ * and callbacks to handle the segment table
+ */
cxl_ctx_get();
if ((rc = cxl_ops->attach_process(ctx, kernel, wed, 0))) {
diff --git a/drivers/misc/cxl/file.c b/drivers/misc/cxl/file.c
index afa2113..d3e0094 100644
--- a/drivers/misc/cxl/file.c
+++ b/drivers/misc/cxl/file.c
@@ -91,7 +91,6 @@ static int __afu_open(struct inode *inode, struct file *file, bool master)
pr_devel("afu_open pe: %i\n", ctx->pe);
file->private_data = ctx;
- cxl_ctx_get();
/* indicate success */
rc = 0;
@@ -213,6 +212,12 @@ static long afu_ioctl_start_work(struct cxl_context *ctx,
ctx->glpid = get_task_pid(current->group_leader, PIDTYPE_PID);
+ /*
+ * Increment driver use count. Enables global TLBIs for hash
+ * and callbacks to handle the segment table
+ */
+ cxl_ctx_get();
+
trace_cxl_attach(ctx, work.work_element_descriptor, work.num_interrupts, amr);
if ((rc = cxl_ops->attach_process(ctx, false, work.work_element_descriptor,
@@ -222,6 +227,7 @@ static long afu_ioctl_start_work(struct cxl_context *ctx,
put_pid(ctx->glpid);
put_pid(ctx->pid);
ctx->glpid = ctx->pid = NULL;
+ cxl_ctx_put();
goto out;
}
diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c
index fa4fe02..eef202d 100644
--- a/drivers/misc/cxl/pci.c
+++ b/drivers/misc/cxl/pci.c
@@ -1620,6 +1620,9 @@ static void cxl_pci_remove_adapter(struct cxl *adapter)
cxl_sysfs_adapter_remove(adapter);
cxl_debugfs_adapter_remove(adapter);
+ /* Flush adapter datacache as its about to be removed */
+ cxl_data_cache_flush(adapter);
+
cxl_deconfigure_adapter(adapter);
device_unregister(&adapter->dev);
diff --git a/drivers/misc/lkdtm_core.c b/drivers/misc/lkdtm_core.c
index 7eeb71a..4d44084 100644
--- a/drivers/misc/lkdtm_core.c
+++ b/drivers/misc/lkdtm_core.c
@@ -535,7 +535,9 @@ static void __exit lkdtm_module_exit(void)
/* Handle test-specific clean-up. */
lkdtm_usercopy_exit();
- unregister_jprobe(lkdtm_jprobe);
+ if (lkdtm_jprobe != NULL)
+ unregister_jprobe(lkdtm_jprobe);
+
pr_info("Crash point unregistered\n");
}
diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c
index e2af61f..451d417 100644
--- a/drivers/misc/mei/client.c
+++ b/drivers/misc/mei/client.c
@@ -1320,6 +1320,9 @@ int mei_cl_notify_request(struct mei_cl *cl,
return -EOPNOTSUPP;
}
+ if (!mei_cl_is_connected(cl))
+ return -ENODEV;
+
rets = pm_runtime_get(dev->dev);
if (rets < 0 && rets != -EINPROGRESS) {
pm_runtime_put_noidle(dev->dev);
diff --git a/drivers/misc/qseecom.c b/drivers/misc/qseecom.c
index fe5dad7..4c4835d 100644
--- a/drivers/misc/qseecom.c
+++ b/drivers/misc/qseecom.c
@@ -1900,20 +1900,22 @@ static int __qseecom_process_blocked_on_listener_legacy(
ptr_app->blocked_on_listener_id = resp->data;
/* sleep until listener is available */
- qseecom.app_block_ref_cnt++;
- ptr_app->app_blocked = true;
- mutex_unlock(&app_access_lock);
- if (wait_event_freezable(
+ do {
+ qseecom.app_block_ref_cnt++;
+ ptr_app->app_blocked = true;
+ mutex_unlock(&app_access_lock);
+ if (wait_event_freezable(
list_ptr->listener_block_app_wq,
!list_ptr->listener_in_use)) {
- pr_err("Interrupted: listener_id %d, app_id %d\n",
+ pr_err("Interrupted: listener_id %d, app_id %d\n",
resp->data, ptr_app->app_id);
- ret = -ERESTARTSYS;
- goto exit;
- }
- mutex_lock(&app_access_lock);
- ptr_app->app_blocked = false;
- qseecom.app_block_ref_cnt--;
+ ret = -ERESTARTSYS;
+ goto exit;
+ }
+ mutex_lock(&app_access_lock);
+ ptr_app->app_blocked = false;
+ qseecom.app_block_ref_cnt--;
+ } while (list_ptr->listener_in_use);
ptr_app->blocked_on_listener_id = 0;
/* notify the blocked app that listener is available */
@@ -1964,18 +1966,20 @@ static int __qseecom_process_blocked_on_listener_smcinvoke(
pr_debug("lsntr %d in_use = %d\n",
resp->data, list_ptr->listener_in_use);
/* sleep until listener is available */
- qseecom.app_block_ref_cnt++;
- mutex_unlock(&app_access_lock);
- if (wait_event_freezable(
+ do {
+ qseecom.app_block_ref_cnt++;
+ mutex_unlock(&app_access_lock);
+ if (wait_event_freezable(
list_ptr->listener_block_app_wq,
!list_ptr->listener_in_use)) {
- pr_err("Interrupted: listener_id %d, session_id %d\n",
+ pr_err("Interrupted: listener_id %d, session_id %d\n",
resp->data, session_id);
- ret = -ERESTARTSYS;
- goto exit;
- }
- mutex_lock(&app_access_lock);
- qseecom.app_block_ref_cnt--;
+ ret = -ERESTARTSYS;
+ goto exit;
+ }
+ mutex_lock(&app_access_lock);
+ qseecom.app_block_ref_cnt--;
+ } while (list_ptr->listener_in_use);
/* notify TZ that listener is available */
pr_warn("Lsntr %d is available, unblock session(%d) in TZ\n",
@@ -2616,6 +2620,8 @@ static int qseecom_unload_app(struct qseecom_dev_handle *data,
if (!strcmp((void *)ptr_app->app_name,
(void *)data->client.app_name)) {
found_app = true;
+ if (ptr_app->app_blocked)
+ app_crash = false;
if (app_crash || ptr_app->ref_cnt == 1)
unload = true;
break;
@@ -4785,8 +4791,12 @@ int qseecom_process_listener_from_smcinvoke(struct scm_desc *desc)
resp.data = desc->ret[2]; /*listener_id*/
mutex_lock(&app_access_lock);
- ret = __qseecom_process_reentrancy(&resp, &dummy_app_entry,
+ if (qseecom.qsee_reentrancy_support)
+ ret = __qseecom_process_reentrancy(&resp, &dummy_app_entry,
&dummy_private_data);
+ else
+ ret = __qseecom_process_incomplete_cmd(&dummy_private_data,
+ &resp);
mutex_unlock(&app_access_lock);
if (ret)
pr_err("Failed on cmd %d for lsnr %d session %d, ret = %d\n",
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index a209aa6..538a8d9 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -1221,16 +1221,16 @@ static int mmc_blk_ioctl_cmd(struct block_device *bdev,
if (md->area_type & MMC_BLK_DATA_AREA_RPMB)
mmc_blk_part_switch(card, dev_get_drvdata(&card->dev));
- mmc_put_card(card);
-
- err = mmc_blk_ioctl_copy_to_user(ic_ptr, idata);
-
if (mmc_card_cmdq(card)) {
if (mmc_cmdq_halt(card->host, false))
pr_err("%s: %s: cmdq unhalt failed\n",
mmc_hostname(card->host), __func__);
}
+ mmc_put_card(card);
+
+ err = mmc_blk_ioctl_copy_to_user(ic_ptr, idata);
+
cmd_done:
mmc_blk_put(md);
cmd_err:
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index edbf682..30180af 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -456,10 +456,11 @@ int mmc_clk_update_freq(struct mmc_host *host,
}
EXPORT_SYMBOL(mmc_clk_update_freq);
-void mmc_recovery_fallback_lower_speed(struct mmc_host *host)
+int mmc_recovery_fallback_lower_speed(struct mmc_host *host)
{
+ int err = 0;
if (!host->card)
- return;
+ return -EINVAL;
if (host->sdr104_wa && mmc_card_sd(host->card) &&
(host->ios.timing == MMC_TIMING_UHS_SDR104) &&
@@ -467,9 +468,14 @@ void mmc_recovery_fallback_lower_speed(struct mmc_host *host)
pr_err("%s: %s: blocked SDR104, lower the bus-speed (SDR50 / DDR50)\n",
mmc_hostname(host), __func__);
mmc_host_clear_sdr104(host);
- mmc_hw_reset(host);
+ err = mmc_hw_reset(host);
host->card->sdr104_blocked = true;
}
+ if (err)
+ pr_err("%s: %s: Fallback to lower speed mode failed with err=%d\n",
+ mmc_hostname(host), __func__, err);
+
+ return err;
}
static int mmc_devfreq_set_target(struct device *dev,
@@ -537,7 +543,7 @@ static int mmc_devfreq_set_target(struct device *dev,
if (err && err != -EAGAIN) {
pr_err("%s: clock scale to %lu failed with error %d\n",
mmc_hostname(host), *freq, err);
- mmc_recovery_fallback_lower_speed(host);
+ err = mmc_recovery_fallback_lower_speed(host);
} else {
pr_debug("%s: clock change to %lu finished successfully (%s)\n",
mmc_hostname(host), *freq, current->comm);
@@ -1203,6 +1209,46 @@ static int mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
return 0;
}
+static int mmc_cmdq_check_retune(struct mmc_host *host)
+{
+ bool cmdq_mode;
+ int err = 0;
+
+ if (!host->need_retune || host->doing_retune || !host->card ||
+ mmc_card_hs400es(host->card) ||
+ (host->ios.clock <= MMC_HIGH_DDR_MAX_DTR))
+ return 0;
+
+ cmdq_mode = mmc_card_cmdq(host->card);
+ if (cmdq_mode) {
+ err = mmc_cmdq_halt(host, true);
+ if (err) {
+ pr_err("%s: %s: failed halting queue (%d)\n",
+ mmc_hostname(host), __func__, err);
+ host->cmdq_ops->dumpstate(host);
+ goto halt_failed;
+ }
+ }
+
+ mmc_retune_hold(host);
+ err = mmc_retune(host);
+ mmc_retune_release(host);
+
+ if (cmdq_mode) {
+ if (mmc_cmdq_halt(host, false)) {
+ pr_err("%s: %s: cmdq unhalt failed\n",
+ mmc_hostname(host), __func__);
+ host->cmdq_ops->dumpstate(host);
+ }
+ }
+
+halt_failed:
+ pr_debug("%s: %s: Retuning done err: %d\n",
+ mmc_hostname(host), __func__, err);
+
+ return err;
+}
+
static void mmc_start_cmdq_request(struct mmc_host *host,
struct mmc_request *mrq)
{
@@ -1227,6 +1273,7 @@ static void mmc_start_cmdq_request(struct mmc_host *host,
}
mmc_host_clk_hold(host);
+ mmc_cmdq_check_retune(host);
if (likely(host->cmdq_ops->request))
host->cmdq_ops->request(host, mrq);
else
@@ -4306,8 +4353,7 @@ int _mmc_detect_card_removed(struct mmc_host *host)
if (ret) {
if (host->ops->get_cd && host->ops->get_cd(host)) {
- mmc_recovery_fallback_lower_speed(host);
- ret = 0;
+ ret = mmc_recovery_fallback_lower_speed(host);
} else {
mmc_card_set_removed(host->card);
if (host->card->sdr104_blocked) {
@@ -4360,6 +4406,18 @@ int mmc_detect_card_removed(struct mmc_host *host)
}
EXPORT_SYMBOL(mmc_detect_card_removed);
+/*
+ * This should be called to make sure that detect work(mmc_rescan)
+ * is completed.Drivers may use this function from async schedule/probe
+ * contexts to make sure that the bootdevice detection is completed on
+ * completion of async_schedule.
+ */
+void mmc_flush_detect_work(struct mmc_host *host)
+{
+ flush_delayed_work(&host->detect);
+}
+EXPORT_SYMBOL(mmc_flush_detect_work);
+
void mmc_rescan(struct work_struct *work)
{
unsigned long flags;
diff --git a/drivers/mmc/core/debugfs.c b/drivers/mmc/core/debugfs.c
index 7163e34..15c3e9e 100644
--- a/drivers/mmc/core/debugfs.c
+++ b/drivers/mmc/core/debugfs.c
@@ -669,9 +669,10 @@ static ssize_t mmc_wr_pack_stats_read(struct file *filp, char __user *ubuf,
{
struct mmc_card *card = filp->private_data;
struct mmc_wr_pack_stats *pack_stats;
- int i;
+ int i, ret = 0;
int max_num_of_packed_reqs = 0;
- char *temp_buf;
+ char *temp_buf, *temp_ubuf;
+ size_t tubuf_cnt = 0;
if (!card)
return cnt;
@@ -697,15 +698,24 @@ static ssize_t mmc_wr_pack_stats_read(struct file *filp, char __user *ubuf,
max_num_of_packed_reqs = card->ext_csd.max_packed_writes;
- temp_buf = kmalloc(TEMP_BUF_SIZE, GFP_KERNEL);
+ if (cnt <= (strlen_user(ubuf) + 1))
+ goto exit;
+
+ temp_buf = kzalloc(TEMP_BUF_SIZE, GFP_KERNEL);
if (!temp_buf)
goto exit;
+ tubuf_cnt = cnt - strlen_user(ubuf) - 1;
+
+ temp_ubuf = kzalloc(tubuf_cnt, GFP_KERNEL);
+ if (!temp_ubuf)
+ goto cleanup;
+
spin_lock(&pack_stats->lock);
snprintf(temp_buf, TEMP_BUF_SIZE, "%s: write packing statistics:\n",
mmc_hostname(card->host));
- strlcat(ubuf, temp_buf, cnt);
+ strlcat(temp_ubuf, temp_buf, tubuf_cnt);
for (i = 1 ; i <= max_num_of_packed_reqs ; ++i) {
if (pack_stats->packing_events[i]) {
@@ -713,63 +723,63 @@ static ssize_t mmc_wr_pack_stats_read(struct file *filp, char __user *ubuf,
"%s: Packed %d reqs - %d times\n",
mmc_hostname(card->host), i,
pack_stats->packing_events[i]);
- strlcat(ubuf, temp_buf, cnt);
+ strlcat(temp_ubuf, temp_buf, tubuf_cnt);
}
}
snprintf(temp_buf, TEMP_BUF_SIZE,
"%s: stopped packing due to the following reasons:\n",
mmc_hostname(card->host));
- strlcat(ubuf, temp_buf, cnt);
+ strlcat(temp_ubuf, temp_buf, tubuf_cnt);
if (pack_stats->pack_stop_reason[EXCEEDS_SEGMENTS]) {
snprintf(temp_buf, TEMP_BUF_SIZE,
"%s: %d times: exceed max num of segments\n",
mmc_hostname(card->host),
pack_stats->pack_stop_reason[EXCEEDS_SEGMENTS]);
- strlcat(ubuf, temp_buf, cnt);
+ strlcat(temp_ubuf, temp_buf, tubuf_cnt);
}
if (pack_stats->pack_stop_reason[EXCEEDS_SECTORS]) {
snprintf(temp_buf, TEMP_BUF_SIZE,
"%s: %d times: exceed max num of sectors\n",
mmc_hostname(card->host),
pack_stats->pack_stop_reason[EXCEEDS_SECTORS]);
- strlcat(ubuf, temp_buf, cnt);
+ strlcat(temp_ubuf, temp_buf, tubuf_cnt);
}
if (pack_stats->pack_stop_reason[WRONG_DATA_DIR]) {
snprintf(temp_buf, TEMP_BUF_SIZE,
"%s: %d times: wrong data direction\n",
mmc_hostname(card->host),
pack_stats->pack_stop_reason[WRONG_DATA_DIR]);
- strlcat(ubuf, temp_buf, cnt);
+ strlcat(temp_ubuf, temp_buf, tubuf_cnt);
}
if (pack_stats->pack_stop_reason[FLUSH_OR_DISCARD]) {
snprintf(temp_buf, TEMP_BUF_SIZE,
"%s: %d times: flush or discard\n",
mmc_hostname(card->host),
pack_stats->pack_stop_reason[FLUSH_OR_DISCARD]);
- strlcat(ubuf, temp_buf, cnt);
+ strlcat(temp_ubuf, temp_buf, tubuf_cnt);
}
if (pack_stats->pack_stop_reason[EMPTY_QUEUE]) {
snprintf(temp_buf, TEMP_BUF_SIZE,
"%s: %d times: empty queue\n",
mmc_hostname(card->host),
pack_stats->pack_stop_reason[EMPTY_QUEUE]);
- strlcat(ubuf, temp_buf, cnt);
+ strlcat(temp_ubuf, temp_buf, tubuf_cnt);
}
if (pack_stats->pack_stop_reason[REL_WRITE]) {
snprintf(temp_buf, TEMP_BUF_SIZE,
"%s: %d times: rel write\n",
mmc_hostname(card->host),
pack_stats->pack_stop_reason[REL_WRITE]);
- strlcat(ubuf, temp_buf, cnt);
+ strlcat(temp_ubuf, temp_buf, tubuf_cnt);
}
if (pack_stats->pack_stop_reason[THRESHOLD]) {
snprintf(temp_buf, TEMP_BUF_SIZE,
"%s: %d times: Threshold\n",
mmc_hostname(card->host),
pack_stats->pack_stop_reason[THRESHOLD]);
- strlcat(ubuf, temp_buf, cnt);
+ strlcat(temp_ubuf, temp_buf, tubuf_cnt);
}
if (pack_stats->pack_stop_reason[LARGE_SEC_ALIGN]) {
@@ -777,25 +787,36 @@ static ssize_t mmc_wr_pack_stats_read(struct file *filp, char __user *ubuf,
"%s: %d times: Large sector alignment\n",
mmc_hostname(card->host),
pack_stats->pack_stop_reason[LARGE_SEC_ALIGN]);
- strlcat(ubuf, temp_buf, cnt);
+ strlcat(temp_ubuf, temp_buf, tubuf_cnt);
}
if (pack_stats->pack_stop_reason[RANDOM]) {
snprintf(temp_buf, TEMP_BUF_SIZE,
"%s: %d times: random request\n",
mmc_hostname(card->host),
pack_stats->pack_stop_reason[RANDOM]);
- strlcat(ubuf, temp_buf, cnt);
+ strlcat(temp_ubuf, temp_buf, tubuf_cnt);
}
if (pack_stats->pack_stop_reason[FUA]) {
snprintf(temp_buf, TEMP_BUF_SIZE,
"%s: %d times: fua request\n",
mmc_hostname(card->host),
pack_stats->pack_stop_reason[FUA]);
- strlcat(ubuf, temp_buf, cnt);
+ strlcat(temp_ubuf, temp_buf, tubuf_cnt);
}
+ if (strlen_user(ubuf) < cnt - strlen(temp_ubuf))
+ ret = copy_to_user((ubuf + strlen_user(ubuf)),
+ temp_ubuf, tubuf_cnt);
+ else
+ ret = -EFAULT;
+ if (ret)
+ pr_err("%s: %s: Copy to userspace failed: %s\n",
+ mmc_hostname(card->host), __func__, ubuf);
spin_unlock(&pack_stats->lock);
+ kfree(temp_ubuf);
+
+cleanup:
kfree(temp_buf);
pr_info("%s", ubuf);
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index 64c8743..8a503b2 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -434,7 +434,8 @@ int mmc_retune(struct mmc_host *host)
else
return 0;
- if (!host->need_retune || host->doing_retune || !host->card)
+ if (!host->need_retune || host->doing_retune || !host->card ||
+ mmc_card_hs400es(host->card))
return 0;
host->need_retune = 0;
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index efb1b81..dd58288 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -1414,6 +1414,23 @@ int mmc_hs400_to_hs200(struct mmc_card *card)
return err;
}
+static void mmc_select_driver_type(struct mmc_card *card)
+{
+ int card_drv_type, drive_strength, drv_type;
+
+ card_drv_type = card->ext_csd.raw_driver_strength |
+ mmc_driver_type_mask(0);
+
+ drive_strength = mmc_select_drive_strength(card,
+ card->ext_csd.hs200_max_dtr,
+ card_drv_type, &drv_type);
+
+ card->drive_strength = drive_strength;
+
+ if (drv_type)
+ mmc_set_driver_type(card->host, drv_type);
+}
+
static int mmc_select_hs400es(struct mmc_card *card)
{
struct mmc_host *host = card->host;
@@ -1462,6 +1479,8 @@ static int mmc_select_hs400es(struct mmc_card *card)
goto out_err;
}
+ mmc_select_driver_type(card);
+
/* Switch card to HS400 */
val = EXT_CSD_TIMING_HS400 |
card->drive_strength << EXT_CSD_DRV_STR_SHIFT;
@@ -1495,23 +1514,6 @@ static int mmc_select_hs400es(struct mmc_card *card)
return err;
}
-static void mmc_select_driver_type(struct mmc_card *card)
-{
- int card_drv_type, drive_strength, drv_type;
-
- card_drv_type = card->ext_csd.raw_driver_strength |
- mmc_driver_type_mask(0);
-
- drive_strength = mmc_select_drive_strength(card,
- card->ext_csd.hs200_max_dtr,
- card_drv_type, &drv_type);
-
- card->drive_strength = drive_strength;
-
- if (drv_type)
- mmc_set_driver_type(card->host, drv_type);
-}
-
/*
* For device supporting HS200 mode, the following sequence
* should be done before executing the tuning process.
diff --git a/drivers/mmc/core/sdio_bus.c b/drivers/mmc/core/sdio_bus.c
index e32ed3d..6098489 100644
--- a/drivers/mmc/core/sdio_bus.c
+++ b/drivers/mmc/core/sdio_bus.c
@@ -277,7 +277,7 @@ static void sdio_release_func(struct device *dev)
sdio_free_func_cis(func);
kfree(func->info);
-
+ kfree(func->tmpbuf);
kfree(func);
}
@@ -292,6 +292,16 @@ struct sdio_func *sdio_alloc_func(struct mmc_card *card)
if (!func)
return ERR_PTR(-ENOMEM);
+ /*
+ * allocate buffer separately to make sure it's properly aligned for
+ * DMA usage (incl. 64 bit DMA)
+ */
+ func->tmpbuf = kmalloc(4, GFP_KERNEL);
+ if (!func->tmpbuf) {
+ kfree(func);
+ return ERR_PTR(-ENOMEM);
+ }
+
func->card = card;
device_initialize(&func->dev);
diff --git a/drivers/mmc/host/s3cmci.c b/drivers/mmc/host/s3cmci.c
index c531dee..8f27fe3 100644
--- a/drivers/mmc/host/s3cmci.c
+++ b/drivers/mmc/host/s3cmci.c
@@ -21,6 +21,7 @@
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <linux/gpio.h>
+#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/io.h>
diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
index 6b018e1..e817a02 100644
--- a/drivers/mmc/host/sdhci-msm.c
+++ b/drivers/mmc/host/sdhci-msm.c
@@ -1406,6 +1406,32 @@ static int sdhci_msm_setup_gpio(struct sdhci_msm_pltfm_data *pdata, bool enable)
return ret;
}
+static int sdhci_msm_config_pinctrl_drv_type(struct sdhci_msm_pltfm_data *pdata,
+ unsigned int clock)
+{
+ int ret = 0;
+
+ if (clock > 150000000) {
+ if (pdata->pctrl_data->pins_drv_type_200MHz)
+ ret = pinctrl_select_state(pdata->pctrl_data->pctrl,
+ pdata->pctrl_data->pins_drv_type_200MHz);
+ } else if (clock > 75000000) {
+ if (pdata->pctrl_data->pins_drv_type_100MHz)
+ ret = pinctrl_select_state(pdata->pctrl_data->pctrl,
+ pdata->pctrl_data->pins_drv_type_100MHz);
+ } else if (clock > 400000) {
+ if (pdata->pctrl_data->pins_drv_type_50MHz)
+ ret = pinctrl_select_state(pdata->pctrl_data->pctrl,
+ pdata->pctrl_data->pins_drv_type_50MHz);
+ } else {
+ if (pdata->pctrl_data->pins_drv_type_400KHz)
+ ret = pinctrl_select_state(pdata->pctrl_data->pctrl,
+ pdata->pctrl_data->pins_drv_type_400KHz);
+ }
+
+ return ret;
+}
+
static int sdhci_msm_setup_pinctrl(struct sdhci_msm_pltfm_data *pdata,
bool enable)
{
@@ -1586,6 +1612,35 @@ static int sdhci_msm_parse_pinctrl_info(struct device *dev,
dev_err(dev, "Could not get sleep pinstates, err:%d\n", ret);
goto out;
}
+
+ pctrl_data->pins_drv_type_400KHz = pinctrl_lookup_state(
+ pctrl_data->pctrl, "ds_400KHz");
+ if (IS_ERR(pctrl_data->pins_drv_type_400KHz)) {
+ dev_dbg(dev, "Could not get 400K pinstates, err:%d\n", ret);
+ pctrl_data->pins_drv_type_400KHz = NULL;
+ }
+
+ pctrl_data->pins_drv_type_50MHz = pinctrl_lookup_state(
+ pctrl_data->pctrl, "ds_50MHz");
+ if (IS_ERR(pctrl_data->pins_drv_type_50MHz)) {
+ dev_dbg(dev, "Could not get 50M pinstates, err:%d\n", ret);
+ pctrl_data->pins_drv_type_50MHz = NULL;
+ }
+
+ pctrl_data->pins_drv_type_100MHz = pinctrl_lookup_state(
+ pctrl_data->pctrl, "ds_100MHz");
+ if (IS_ERR(pctrl_data->pins_drv_type_100MHz)) {
+ dev_dbg(dev, "Could not get 100M pinstates, err:%d\n", ret);
+ pctrl_data->pins_drv_type_100MHz = NULL;
+ }
+
+ pctrl_data->pins_drv_type_200MHz = pinctrl_lookup_state(
+ pctrl_data->pctrl, "ds_200MHz");
+ if (IS_ERR(pctrl_data->pins_drv_type_200MHz)) {
+ dev_dbg(dev, "Could not get 200M pinstates, err:%d\n", ret);
+ pctrl_data->pins_drv_type_200MHz = NULL;
+ }
+
pdata->pctrl_data = pctrl_data;
out:
return ret;
@@ -3349,6 +3404,16 @@ static void sdhci_msm_set_clock(struct sdhci_host *host, unsigned int clock)
}
msm_host->clk_rate = sup_clock;
host->clock = clock;
+
+ /* Configure pinctrl drive type according to
+ * current clock rate
+ */
+ rc = sdhci_msm_config_pinctrl_drv_type(msm_host->pdata, clock);
+ if (rc)
+ pr_err("%s: %s: Failed to set pinctrl drive type for clock rate %u (%d)\n",
+ mmc_hostname(host->mmc), __func__,
+ clock, rc);
+
/*
* Update the bus vote in case of frequency change due to
* clock scaling.
@@ -4883,6 +4948,9 @@ static int sdhci_msm_probe(struct platform_device *pdev)
mmc_hostname(host->mmc), __func__, ret);
device_remove_file(&pdev->dev, &msm_host->auto_cmd21_attr);
}
+ if (sdhci_msm_is_bootdevice(&pdev->dev))
+ mmc_flush_detect_work(host->mmc);
+
/* Successful initialization */
goto out;
diff --git a/drivers/mmc/host/sdhci-msm.h b/drivers/mmc/host/sdhci-msm.h
index 2c6c0d7..6e15a73 100644
--- a/drivers/mmc/host/sdhci-msm.h
+++ b/drivers/mmc/host/sdhci-msm.h
@@ -76,6 +76,10 @@ struct sdhci_pinctrl_data {
struct pinctrl *pctrl;
struct pinctrl_state *pins_active;
struct pinctrl_state *pins_sleep;
+ struct pinctrl_state *pins_drv_type_400KHz;
+ struct pinctrl_state *pins_drv_type_50MHz;
+ struct pinctrl_state *pins_drv_type_100MHz;
+ struct pinctrl_state *pins_drv_type_200MHz;
};
struct sdhci_msm_bus_voting_data {
diff --git a/drivers/mtd/devices/msm_qpic_nand.c b/drivers/mtd/devices/msm_qpic_nand.c
index 44b56b6..da24e2c 100644
--- a/drivers/mtd/devices/msm_qpic_nand.c
+++ b/drivers/mtd/devices/msm_qpic_nand.c
@@ -111,7 +111,7 @@ static dma_addr_t msm_nand_dma_map(struct device *dev, void *addr, size_t size,
return dma_map_page(dev, page, offset, size, dir);
}
-#ifdef CONFIG_MSM_BUS_SCALING
+#ifdef CONFIG_QCOM_BUS_SCALING
static int msm_nand_bus_set_vote(struct msm_nand_info *info,
unsigned int vote)
{
@@ -130,9 +130,11 @@ static int msm_nand_setup_clocks_and_bus_bw(struct msm_nand_info *info,
{
int ret = 0;
- if (IS_ERR_OR_NULL(info->clk_data.qpic_clk)) {
- ret = -EINVAL;
- goto out;
+ if (!info->clk_data.rpmh_clk) {
+ if (IS_ERR_OR_NULL(info->clk_data.qpic_clk)) {
+ ret = -EINVAL;
+ goto out;
+ }
}
if (atomic_read(&info->clk_data.clk_enabled) == vote)
goto out;
@@ -142,15 +144,18 @@ static int msm_nand_setup_clocks_and_bus_bw(struct msm_nand_info *info,
pr_err("Failed to vote for bus with %d\n", ret);
goto out;
}
- ret = clk_prepare_enable(info->clk_data.qpic_clk);
- if (ret) {
- pr_err("Failed to enable the bus-clock with error %d\n",
- ret);
- msm_nand_bus_set_vote(info, 0);
- goto out;
+ if (!info->clk_data.rpmh_clk) {
+ ret = clk_prepare_enable(info->clk_data.qpic_clk);
+ if (ret) {
+ pr_err("Failed to enable the bus-clock with error %d\n",
+ ret);
+ msm_nand_bus_set_vote(info, 0);
+ goto out;
+ }
}
} else if (atomic_read(&info->clk_data.clk_enabled) && !vote) {
- clk_disable_unprepare(info->clk_data.qpic_clk);
+ if (!info->clk_data.rpmh_clk)
+ clk_disable_unprepare(info->clk_data.qpic_clk);
msm_nand_bus_set_vote(info, 0);
}
atomic_set(&info->clk_data.clk_enabled, vote);
@@ -283,7 +288,7 @@ static int msm_nand_put_device(struct device *dev)
}
#endif
-#ifdef CONFIG_MSM_BUS_SCALING
+#ifdef CONFIG_QCOM_BUS_SCALING
static int msm_nand_bus_register(struct platform_device *pdev,
struct msm_nand_info *info)
{
@@ -314,6 +319,7 @@ static void msm_nand_bus_unregister(struct msm_nand_info *info)
static int msm_nand_bus_register(struct platform_device *pdev,
struct msm_nand_info *info)
{
+ pr_info("couldn't register due to missing config option\n");
return 0;
}
@@ -3273,7 +3279,7 @@ static int msm_nand_parse_smem_ptable(int *nr_parts)
temp_ptable = smem_get_entry(SMEM_AARM_PARTITION_TABLE, &len, 0,
SMEM_ANY_HOST_FLAG);
- if (!temp_ptable) {
+ if (IS_ERR_OR_NULL(temp_ptable)) {
pr_err("Error reading partition table header\n");
goto out;
}
@@ -3313,7 +3319,7 @@ static int msm_nand_parse_smem_ptable(int *nr_parts)
*/
temp_ptable = smem_get_entry(SMEM_AARM_PARTITION_TABLE, &len, 0,
SMEM_ANY_HOST_FLAG);
- if (!temp_ptable) {
+ if (IS_ERR_OR_NULL(temp_ptable)) {
pr_err("Error reading partition table\n");
goto out;
}
@@ -3327,10 +3333,12 @@ static int msm_nand_parse_smem_ptable(int *nr_parts)
continue;
/* Convert name to lower case and discard the initial chars */
mtd_part[i].name = pentry->name;
+ strsep(&(mtd_part[i].name), delimiter);
+ if (!mtd_part[i].name)
+ mtd_part[i].name = pentry->name;
for (j = 0; j < strlen(mtd_part[i].name); j++)
*(mtd_part[i].name + j) =
tolower(*(mtd_part[i].name + j));
- strsep(&(mtd_part[i].name), delimiter);
mtd_part[i].offset = pentry->offset;
mtd_part[i].mask_flags = pentry->attr;
mtd_part[i].size = pentry->length;
@@ -3465,16 +3473,22 @@ static int msm_nand_probe(struct platform_device *pdev)
err = msm_nand_bus_register(pdev, info);
if (err)
goto out;
- info->clk_data.qpic_clk = devm_clk_get(&pdev->dev, "core_clk");
- if (!IS_ERR_OR_NULL(info->clk_data.qpic_clk)) {
- err = clk_set_rate(info->clk_data.qpic_clk,
- MSM_NAND_BUS_VOTE_MAX_RATE);
- } else {
- err = PTR_ERR(info->clk_data.qpic_clk);
- pr_err("Failed to get clock handle, err=%d\n", err);
+
+ if (of_property_read_bool(pdev->dev.of_node, "qcom,qpic-clk-rpmh"))
+ info->clk_data.rpmh_clk = true;
+
+ if (!info->clk_data.rpmh_clk) {
+ info->clk_data.qpic_clk = devm_clk_get(&pdev->dev, "core_clk");
+ if (!IS_ERR_OR_NULL(info->clk_data.qpic_clk)) {
+ err = clk_set_rate(info->clk_data.qpic_clk,
+ MSM_NAND_BUS_VOTE_MAX_RATE);
+ } else {
+ err = PTR_ERR(info->clk_data.qpic_clk);
+ pr_err("Failed to get clock handle, err=%d\n", err);
+ }
+ if (err)
+ goto bus_unregister;
}
- if (err)
- goto bus_unregister;
err = msm_nand_setup_clocks_and_bus_bw(info, true);
if (err)
diff --git a/drivers/mtd/devices/msm_qpic_nand.h b/drivers/mtd/devices/msm_qpic_nand.h
index 9b6701c..043c215 100644
--- a/drivers/mtd/devices/msm_qpic_nand.h
+++ b/drivers/mtd/devices/msm_qpic_nand.h
@@ -295,6 +295,7 @@ struct msm_nand_clk_data {
uint32_t client_handle;
atomic_t clk_enabled;
atomic_t curr_vote;
+ bool rpmh_clk;
};
/* Structure that defines NANDc private data. */
diff --git a/drivers/mtd/nand/nand_ids.c b/drivers/mtd/nand/nand_ids.c
index 2af9869..c821cca 100644
--- a/drivers/mtd/nand/nand_ids.c
+++ b/drivers/mtd/nand/nand_ids.c
@@ -52,6 +52,12 @@ struct nand_flash_dev nand_flash_ids[] = {
{ .id = {0xad, 0xde, 0x94, 0xda, 0x74, 0xc4} },
SZ_8K, SZ_8K, SZ_2M, NAND_NEED_SCRAMBLING, 6, 640,
NAND_ECC_INFO(40, SZ_1K), 4 },
+ {"MT29F8G08ABBCAH4 8G 3.3V 8-bit",
+ { .id = {0x2c, 0xa3, 0x90, 0x26, 0x00, 0x00, 0x00, 0x00} },
+ SZ_4K, SZ_1K, SZ_256K, 0, 4, 224, NAND_ECC_INFO(8, SZ_512)},
+ {"TC58NYG2S0HBAI4 4G 1.8V 8-bit",
+ { .id = {0x98, 0xac, 0x90, 0x26, 0x76, 0x00, 0x00, 0x00} },
+ SZ_4K, SZ_512, SZ_256K, 0, 5, 256, NAND_ECC_INFO(8, SZ_512) },
LEGACY_ID_NAND("NAND 4MiB 5V 8-bit", 0x6B, 4, SZ_8K, SP_OPTIONS),
LEGACY_ID_NAND("NAND 4MiB 3,3V 8-bit", 0xE3, 4, SZ_8K, SP_OPTIONS),
diff --git a/drivers/mtd/nand/sunxi_nand.c b/drivers/mtd/nand/sunxi_nand.c
index 8b8470c..f9b2a77 100644
--- a/drivers/mtd/nand/sunxi_nand.c
+++ b/drivers/mtd/nand/sunxi_nand.c
@@ -320,6 +320,10 @@ static int sunxi_nfc_wait_events(struct sunxi_nfc *nfc, u32 events,
ret = wait_for_completion_timeout(&nfc->complete,
msecs_to_jiffies(timeout_ms));
+ if (!ret)
+ ret = -ETIMEDOUT;
+ else
+ ret = 0;
writel(0, nfc->regs + NFC_REG_INT);
} else {
diff --git a/drivers/net/can/c_can/c_can_pci.c b/drivers/net/can/c_can/c_can_pci.c
index cf7c189..d065c0e 100644
--- a/drivers/net/can/c_can/c_can_pci.c
+++ b/drivers/net/can/c_can/c_can_pci.c
@@ -178,7 +178,6 @@ static int c_can_pci_probe(struct pci_dev *pdev,
break;
case BOSCH_D_CAN:
priv->regs = reg_map_d_can;
- priv->can.ctrlmode_supported |= CAN_CTRLMODE_3_SAMPLES;
break;
default:
ret = -EINVAL;
diff --git a/drivers/net/can/c_can/c_can_platform.c b/drivers/net/can/c_can/c_can_platform.c
index e36d105..717530e 100644
--- a/drivers/net/can/c_can/c_can_platform.c
+++ b/drivers/net/can/c_can/c_can_platform.c
@@ -320,7 +320,6 @@ static int c_can_plat_probe(struct platform_device *pdev)
break;
case BOSCH_D_CAN:
priv->regs = reg_map_d_can;
- priv->can.ctrlmode_supported |= CAN_CTRLMODE_3_SAMPLES;
priv->read_reg = c_can_plat_read_reg_aligned_to_16bit;
priv->write_reg = c_can_plat_write_reg_aligned_to_16bit;
priv->read_reg32 = d_can_plat_read_reg32;
diff --git a/drivers/net/can/ifi_canfd/ifi_canfd.c b/drivers/net/can/ifi_canfd/ifi_canfd.c
index 481895b..c06ef43 100644
--- a/drivers/net/can/ifi_canfd/ifi_canfd.c
+++ b/drivers/net/can/ifi_canfd/ifi_canfd.c
@@ -670,9 +670,9 @@ static void ifi_canfd_set_bittiming(struct net_device *ndev)
priv->base + IFI_CANFD_FTIME);
/* Configure transmitter delay */
- tdc = (dbt->brp * (dbt->phase_seg1 + 1)) & IFI_CANFD_TDELAY_MASK;
- writel(IFI_CANFD_TDELAY_EN | IFI_CANFD_TDELAY_ABS | tdc,
- priv->base + IFI_CANFD_TDELAY);
+ tdc = dbt->brp * (dbt->prop_seg + dbt->phase_seg1);
+ tdc &= IFI_CANFD_TDELAY_MASK;
+ writel(IFI_CANFD_TDELAY_EN | tdc, priv->base + IFI_CANFD_TDELAY);
}
static void ifi_canfd_set_filter(struct net_device *ndev, const u32 id,
diff --git a/drivers/net/can/sun4i_can.c b/drivers/net/can/sun4i_can.c
index 68ef0a4..1ac2090 100644
--- a/drivers/net/can/sun4i_can.c
+++ b/drivers/net/can/sun4i_can.c
@@ -342,7 +342,7 @@ static int sun4i_can_start(struct net_device *dev)
/* enter the selected mode */
mod_reg_val = readl(priv->base + SUN4I_REG_MSEL_ADDR);
- if (priv->can.ctrlmode & CAN_CTRLMODE_PRESUME_ACK)
+ if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK)
mod_reg_val |= SUN4I_MSEL_LOOPBACK_MODE;
else if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
mod_reg_val |= SUN4I_MSEL_LISTEN_ONLY_MODE;
@@ -539,6 +539,13 @@ static int sun4i_can_err(struct net_device *dev, u8 isrc, u8 status)
}
stats->rx_over_errors++;
stats->rx_errors++;
+
+ /* reset the CAN IP by entering reset mode
+ * ignoring timeout error
+ */
+ set_reset_mode(dev);
+ set_normal_mode(dev);
+
/* clear bit */
sun4i_can_write_cmdreg(priv, SUN4I_CMD_CLEAR_OR_FLAG);
}
@@ -653,8 +660,9 @@ static irqreturn_t sun4i_can_interrupt(int irq, void *dev_id)
netif_wake_queue(dev);
can_led_event(dev, CAN_LED_EVENT_TX);
}
- if (isrc & SUN4I_INT_RBUF_VLD) {
- /* receive interrupt */
+ if ((isrc & SUN4I_INT_RBUF_VLD) &&
+ !(isrc & SUN4I_INT_DATA_OR)) {
+ /* receive interrupt - don't read if overrun occurred */
while (status & SUN4I_STA_RBUF_RDY) {
/* RX buffer is not empty */
sun4i_can_rx(dev);
@@ -811,7 +819,6 @@ static int sun4ican_probe(struct platform_device *pdev)
priv->can.ctrlmode_supported = CAN_CTRLMODE_BERR_REPORTING |
CAN_CTRLMODE_LISTENONLY |
CAN_CTRLMODE_LOOPBACK |
- CAN_CTRLMODE_PRESUME_ACK |
CAN_CTRLMODE_3_SAMPLES;
priv->base = addr;
priv->clk = clk;
diff --git a/drivers/net/can/usb/esd_usb2.c b/drivers/net/can/usb/esd_usb2.c
index be928ce..9fdb0f0 100644
--- a/drivers/net/can/usb/esd_usb2.c
+++ b/drivers/net/can/usb/esd_usb2.c
@@ -333,7 +333,7 @@ static void esd_usb2_rx_can_msg(struct esd_usb2_net_priv *priv,
}
cf->can_id = id & ESD_IDMASK;
- cf->can_dlc = get_can_dlc(msg->msg.rx.dlc);
+ cf->can_dlc = get_can_dlc(msg->msg.rx.dlc & ~ESD_RTR);
if (id & ESD_EXTID)
cf->can_id |= CAN_EFF_FLAG;
diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c
index 05369dc..eea9aea 100644
--- a/drivers/net/can/usb/gs_usb.c
+++ b/drivers/net/can/usb/gs_usb.c
@@ -375,6 +375,8 @@ static void gs_usb_receive_bulk_callback(struct urb *urb)
gs_free_tx_context(txc);
+ atomic_dec(&dev->active_tx_urbs);
+
netif_wake_queue(netdev);
}
@@ -463,14 +465,6 @@ static void gs_usb_xmit_callback(struct urb *urb)
urb->transfer_buffer_length,
urb->transfer_buffer,
urb->transfer_dma);
-
- atomic_dec(&dev->active_tx_urbs);
-
- if (!netif_device_present(netdev))
- return;
-
- if (netif_queue_stopped(netdev))
- netif_wake_queue(netdev);
}
static netdev_tx_t gs_can_start_xmit(struct sk_buff *skb,
diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c
index d51e0c4..4224e06 100644
--- a/drivers/net/can/usb/kvaser_usb.c
+++ b/drivers/net/can/usb/kvaser_usb.c
@@ -137,6 +137,7 @@ static inline bool kvaser_is_usbcan(const struct usb_device_id *id)
#define CMD_RESET_ERROR_COUNTER 49
#define CMD_TX_ACKNOWLEDGE 50
#define CMD_CAN_ERROR_EVENT 51
+#define CMD_FLUSH_QUEUE_REPLY 68
#define CMD_LEAF_USB_THROTTLE 77
#define CMD_LEAF_LOG_MESSAGE 106
@@ -1301,6 +1302,11 @@ static void kvaser_usb_handle_message(const struct kvaser_usb *dev,
goto warn;
break;
+ case CMD_FLUSH_QUEUE_REPLY:
+ if (dev->family != KVASER_LEAF)
+ goto warn;
+ break;
+
default:
warn: dev_warn(dev->udev->dev.parent,
"Unhandled message (%d)\n", msg->id);
@@ -1609,7 +1615,8 @@ static int kvaser_usb_close(struct net_device *netdev)
if (err)
netdev_warn(netdev, "Cannot flush queue, error %d\n", err);
- if (kvaser_usb_send_simple_msg(dev, CMD_RESET_CHIP, priv->channel))
+ err = kvaser_usb_send_simple_msg(dev, CMD_RESET_CHIP, priv->channel);
+ if (err)
netdev_warn(netdev, "Cannot reset card, error %d\n", err);
err = kvaser_usb_stop_chip(priv);
diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
index 3ec573c..c26debc 100644
--- a/drivers/net/dsa/b53/b53_common.c
+++ b/drivers/net/dsa/b53/b53_common.c
@@ -326,6 +326,7 @@ static void b53_get_vlan_entry(struct b53_device *dev, u16 vid,
static void b53_set_forwarding(struct b53_device *dev, int enable)
{
+ struct dsa_switch *ds = dev->ds;
u8 mgmt;
b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, &mgmt);
@@ -336,6 +337,15 @@ static void b53_set_forwarding(struct b53_device *dev, int enable)
mgmt &= ~SM_SW_FWD_EN;
b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, mgmt);
+
+ /* Include IMP port in dumb forwarding mode when no tagging protocol is
+ * set
+ */
+ if (ds->ops->get_tag_protocol(ds) == DSA_TAG_PROTO_NONE) {
+ b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_CTRL, &mgmt);
+ mgmt |= B53_MII_DUMB_FWDG_EN;
+ b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_CTRL, mgmt);
+ }
}
static void b53_enable_vlan(struct b53_device *dev, bool enable)
diff --git a/drivers/net/dsa/b53/b53_regs.h b/drivers/net/dsa/b53/b53_regs.h
index dac0af4..8104400 100644
--- a/drivers/net/dsa/b53/b53_regs.h
+++ b/drivers/net/dsa/b53/b53_regs.h
@@ -104,6 +104,10 @@
#define B53_UC_FWD_EN BIT(6)
#define B53_MC_FWD_EN BIT(7)
+/* Switch control (8 bit) */
+#define B53_SWITCH_CTRL 0x22
+#define B53_MII_DUMB_FWDG_EN BIT(6)
+
/* (16 bit) */
#define B53_UC_FLOOD_MASK 0x32
#define B53_MC_FLOOD_MASK 0x34
diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c
index 3066d9c..e2512ab 100644
--- a/drivers/net/ethernet/amazon/ena/ena_com.c
+++ b/drivers/net/ethernet/amazon/ena/ena_com.c
@@ -36,9 +36,9 @@
/*****************************************************************************/
/* Timeout in micro-sec */
-#define ADMIN_CMD_TIMEOUT_US (1000000)
+#define ADMIN_CMD_TIMEOUT_US (3000000)
-#define ENA_ASYNC_QUEUE_DEPTH 4
+#define ENA_ASYNC_QUEUE_DEPTH 16
#define ENA_ADMIN_QUEUE_DEPTH 32
#define MIN_ENA_VER (((ENA_COMMON_SPEC_VERSION_MAJOR) << \
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h
index 69d7e9e..c5eaf76 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.h
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h
@@ -100,7 +100,7 @@
/* Number of queues to check for missing queues per timer service */
#define ENA_MONITORED_TX_QUEUES 4
/* Max timeout packets before device reset */
-#define MAX_NUM_OF_TIMEOUTED_PACKETS 32
+#define MAX_NUM_OF_TIMEOUTED_PACKETS 128
#define ENA_TX_RING_IDX_NEXT(idx, ring_size) (((idx) + 1) & ((ring_size) - 1))
@@ -116,9 +116,9 @@
#define ENA_IO_IRQ_IDX(q) (ENA_IO_IRQ_FIRST_IDX + (q))
/* ENA device should send keep alive msg every 1 sec.
- * We wait for 3 sec just to be on the safe side.
+ * We wait for 6 sec just to be on the safe side.
*/
-#define ENA_DEVICE_KALIVE_TIMEOUT (3 * HZ)
+#define ENA_DEVICE_KALIVE_TIMEOUT (6 * HZ)
#define ENA_MMIO_DISABLE_REG_READ BIT(0)
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 20e569b..333df54 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -97,6 +97,8 @@ enum board_idx {
BCM57407_NPAR,
BCM57414_NPAR,
BCM57416_NPAR,
+ BCM57452,
+ BCM57454,
NETXTREME_E_VF,
NETXTREME_C_VF,
};
@@ -131,6 +133,8 @@ static const struct {
{ "Broadcom BCM57407 NetXtreme-E Ethernet Partition" },
{ "Broadcom BCM57414 NetXtreme-E Ethernet Partition" },
{ "Broadcom BCM57416 NetXtreme-E Ethernet Partition" },
+ { "Broadcom BCM57452 NetXtreme-E 10Gb/25Gb/40Gb/50Gb Ethernet" },
+ { "Broadcom BCM57454 NetXtreme-E 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
{ "Broadcom NetXtreme-E Ethernet Virtual Function" },
{ "Broadcom NetXtreme-C Ethernet Virtual Function" },
};
@@ -166,6 +170,8 @@ static const struct pci_device_id bnxt_pci_tbl[] = {
{ PCI_VDEVICE(BROADCOM, 0x16ed), .driver_data = BCM57414_NPAR },
{ PCI_VDEVICE(BROADCOM, 0x16ee), .driver_data = BCM57416_NPAR },
{ PCI_VDEVICE(BROADCOM, 0x16ef), .driver_data = BCM57416_NPAR },
+ { PCI_VDEVICE(BROADCOM, 0x16f1), .driver_data = BCM57452 },
+ { PCI_VDEVICE(BROADCOM, 0x1614), .driver_data = BCM57454 },
#ifdef CONFIG_BNXT_SRIOV
{ PCI_VDEVICE(BROADCOM, 0x16c1), .driver_data = NETXTREME_E_VF },
{ PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = NETXTREME_C_VF },
diff --git a/drivers/net/ethernet/ibm/emac/mal.c b/drivers/net/ethernet/ibm/emac/mal.c
index aaf6fec..3660a3d 100644
--- a/drivers/net/ethernet/ibm/emac/mal.c
+++ b/drivers/net/ethernet/ibm/emac/mal.c
@@ -402,7 +402,7 @@ static int mal_poll(struct napi_struct *napi, int budget)
unsigned long flags;
MAL_DBG2(mal, "poll(%d)" NL, budget);
- again:
+
/* Process TX skbs */
list_for_each(l, &mal->poll_list) {
struct mal_commac *mc =
@@ -451,7 +451,6 @@ static int mal_poll(struct napi_struct *napi, int budget)
spin_lock_irqsave(&mal->lock, flags);
mal_disable_eob_irq(mal);
spin_unlock_irqrestore(&mal->lock, flags);
- goto again;
}
mc->ops->poll_tx(mc->dev);
}
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 9f2184b..b8778e7 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -1253,6 +1253,7 @@ static void release_sub_crqs(struct ibmvnic_adapter *adapter)
release_sub_crq_queue(adapter,
adapter->tx_scrq[i]);
}
+ kfree(adapter->tx_scrq);
adapter->tx_scrq = NULL;
}
@@ -1265,6 +1266,7 @@ static void release_sub_crqs(struct ibmvnic_adapter *adapter)
release_sub_crq_queue(adapter,
adapter->rx_scrq[i]);
}
+ kfree(adapter->rx_scrq);
adapter->rx_scrq = NULL;
}
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 9affd7c..6a62447 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -7882,6 +7882,11 @@ static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev)
pci_enable_wake(pdev, PCI_D3hot, 0);
pci_enable_wake(pdev, PCI_D3cold, 0);
+ /* In case of PCI error, adapter lose its HW address
+ * so we should re-assign it here.
+ */
+ hw->hw_addr = adapter->io_addr;
+
igb_reset(adapter);
wr32(E1000_WUS, ~0);
result = PCI_ERS_RESULT_RECOVERED;
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 707bc46..6ea10a9 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -28,6 +28,7 @@
#include <linux/of_mdio.h>
#include <linux/of_net.h>
#include <linux/phy.h>
+#include <linux/phy_fixed.h>
#include <linux/platform_device.h>
#include <linux/skbuff.h>
#include <net/hwbm.h>
diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c
index 0a4e81a..ed6fae9 100644
--- a/drivers/net/ethernet/marvell/mvpp2.c
+++ b/drivers/net/ethernet/marvell/mvpp2.c
@@ -4413,13 +4413,12 @@ static void mvpp2_txq_bufs_free(struct mvpp2_port *port,
struct mvpp2_txq_pcpu_buf *tx_buf =
txq_pcpu->buffs + txq_pcpu->txq_get_index;
- mvpp2_txq_inc_get(txq_pcpu);
-
dma_unmap_single(port->dev->dev.parent, tx_buf->phys,
tx_buf->size, DMA_TO_DEVICE);
- if (!tx_buf->skb)
- continue;
- dev_kfree_skb_any(tx_buf->skb);
+ if (tx_buf->skb)
+ dev_kfree_skb_any(tx_buf->skb);
+
+ mvpp2_txq_inc_get(txq_pcpu);
}
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_clock.c b/drivers/net/ethernet/mellanox/mlx4/en_clock.c
index a5fc46b..d4d97ca 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_clock.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_clock.c
@@ -88,10 +88,17 @@ void mlx4_en_remove_timestamp(struct mlx4_en_dev *mdev)
}
}
+#define MLX4_EN_WRAP_AROUND_SEC 10UL
+/* By scheduling the overflow check every 5 seconds, we have a reasonably
+ * good chance we wont miss a wrap around.
+ * TOTO: Use a timer instead of a work queue to increase the guarantee.
+ */
+#define MLX4_EN_OVERFLOW_PERIOD (MLX4_EN_WRAP_AROUND_SEC * HZ / 2)
+
void mlx4_en_ptp_overflow_check(struct mlx4_en_dev *mdev)
{
bool timeout = time_is_before_jiffies(mdev->last_overflow_check +
- mdev->overflow_period);
+ MLX4_EN_OVERFLOW_PERIOD);
unsigned long flags;
if (timeout) {
@@ -236,7 +243,6 @@ static const struct ptp_clock_info mlx4_en_ptp_clock_info = {
.enable = mlx4_en_phc_enable,
};
-#define MLX4_EN_WRAP_AROUND_SEC 10ULL
/* This function calculates the max shift that enables the user range
* of MLX4_EN_WRAP_AROUND_SEC values in the cycles register.
@@ -261,7 +267,6 @@ void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev)
{
struct mlx4_dev *dev = mdev->dev;
unsigned long flags;
- u64 ns, zero = 0;
/* mlx4_en_init_timestamp is called for each netdev.
* mdev->ptp_clock is common for all ports, skip initialization if
@@ -285,13 +290,6 @@ void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev)
ktime_to_ns(ktime_get_real()));
write_unlock_irqrestore(&mdev->clock_lock, flags);
- /* Calculate period in seconds to call the overflow watchdog - to make
- * sure counter is checked at least once every wrap around.
- */
- ns = cyclecounter_cyc2ns(&mdev->cycles, mdev->cycles.mask, zero, &zero);
- do_div(ns, NSEC_PER_SEC / 2 / HZ);
- mdev->overflow_period = ns;
-
/* Configure the PHC */
mdev->ptp_clock_info = mlx4_en_ptp_clock_info;
snprintf(mdev->ptp_clock_info.name, 16, "mlx4 ptp");
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index ba652d8..727122d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -841,8 +841,6 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
return -ENOSYS;
}
- mlx4_log_num_mgm_entry_size = hca_param.log_mc_entry_sz;
-
dev->caps.hca_core_clock = hca_param.hca_core_clock;
memset(&dev_cap, 0, sizeof(dev_cap));
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index a3528dd..df0f396 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -419,7 +419,6 @@ struct mlx4_en_dev {
struct cyclecounter cycles;
struct timecounter clock;
unsigned long last_overflow_check;
- unsigned long overflow_period;
struct ptp_clock *ptp_clock;
struct ptp_clock_info ptp_clock_info;
struct notifier_block nb;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 1806b1f..d50350c 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -249,15 +249,14 @@ static void mlxsw_sp_span_entry_destroy(struct mlxsw_sp *mlxsw_sp,
}
static struct mlxsw_sp_span_entry *
-mlxsw_sp_span_entry_find(struct mlxsw_sp_port *port)
+mlxsw_sp_span_entry_find(struct mlxsw_sp *mlxsw_sp, u8 local_port)
{
- struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
int i;
for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
- if (curr->used && curr->local_port == port->local_port)
+ if (curr->used && curr->local_port == local_port)
return curr;
}
return NULL;
@@ -268,7 +267,8 @@ static struct mlxsw_sp_span_entry
{
struct mlxsw_sp_span_entry *span_entry;
- span_entry = mlxsw_sp_span_entry_find(port);
+ span_entry = mlxsw_sp_span_entry_find(port->mlxsw_sp,
+ port->local_port);
if (span_entry) {
/* Already exists, just take a reference */
span_entry->ref_count++;
@@ -453,12 +453,13 @@ static int mlxsw_sp_span_mirror_add(struct mlxsw_sp_port *from,
}
static void mlxsw_sp_span_mirror_remove(struct mlxsw_sp_port *from,
- struct mlxsw_sp_port *to,
+ u8 destination_port,
enum mlxsw_sp_span_type type)
{
struct mlxsw_sp_span_entry *span_entry;
- span_entry = mlxsw_sp_span_entry_find(to);
+ span_entry = mlxsw_sp_span_entry_find(from->mlxsw_sp,
+ destination_port);
if (!span_entry) {
netdev_err(from->dev, "no span entry found\n");
return;
@@ -1255,10 +1256,8 @@ static int mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port,
static void mlxsw_sp_port_del_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port,
struct tc_cls_matchall_offload *cls)
{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry;
enum mlxsw_sp_span_type span_type;
- struct mlxsw_sp_port *to_port;
mall_tc_entry = mlxsw_sp_port_mirror_entry_find(mlxsw_sp_port,
cls->cookie);
@@ -1269,11 +1268,12 @@ static void mlxsw_sp_port_del_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port,
switch (mall_tc_entry->type) {
case MLXSW_SP_PORT_MALL_MIRROR:
- to_port = mlxsw_sp->ports[mall_tc_entry->mirror.to_local_port];
span_type = mall_tc_entry->mirror.ingress ?
MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS;
- mlxsw_sp_span_mirror_remove(mlxsw_sp_port, to_port, span_type);
+ mlxsw_sp_span_mirror_remove(mlxsw_sp_port,
+ mall_tc_entry->mirror.to_local_port,
+ span_type);
break;
default:
WARN_ON(1);
diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h
index 653bb57..433f8be 100644
--- a/drivers/net/ethernet/qlogic/qed/qed.h
+++ b/drivers/net/ethernet/qlogic/qed/qed.h
@@ -642,7 +642,9 @@ static inline u8 qed_concrete_to_sw_fid(struct qed_dev *cdev,
#define OOO_LB_TC 9
int qed_configure_vport_wfq(struct qed_dev *cdev, u16 vp_id, u32 rate);
-void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev, u32 min_pf_rate);
+void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev,
+ struct qed_ptt *p_ptt,
+ u32 min_pf_rate);
void qed_clean_wfq_db(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
#define QED_LEADING_HWFN(dev) (&dev->hwfns[0])
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
index a4789a93..9d59cb8 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
@@ -1222,7 +1222,7 @@ static struct qed_dcbx_get *qed_dcbnl_get_dcbx(struct qed_hwfn *hwfn,
{
struct qed_dcbx_get *dcbx_info;
- dcbx_info = kzalloc(sizeof(*dcbx_info), GFP_KERNEL);
+ dcbx_info = kmalloc(sizeof(*dcbx_info), GFP_ATOMIC);
if (!dcbx_info)
return NULL;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
index edae5fc..afe5e57 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
@@ -877,7 +877,7 @@ qed_hw_init_pf_doorbell_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
/* Either EDPM is mandatory, or we are attempting to allocate a
* WID per CPU.
*/
- n_cpus = num_active_cpus();
+ n_cpus = num_present_cpus();
rc = qed_hw_init_dpi_size(p_hwfn, p_ptt, pwm_regsize, n_cpus);
}
@@ -2732,7 +2732,8 @@ int qed_configure_vport_wfq(struct qed_dev *cdev, u16 vp_id, u32 rate)
}
/* API to configure WFQ from mcp link change */
-void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev, u32 min_pf_rate)
+void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev,
+ struct qed_ptt *p_ptt, u32 min_pf_rate)
{
int i;
@@ -2746,8 +2747,7 @@ void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev, u32 min_pf_rate)
for_each_hwfn(cdev, i) {
struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
- __qed_configure_vp_wfq_on_link_change(p_hwfn,
- p_hwfn->p_dpc_ptt,
+ __qed_configure_vp_wfq_on_link_change(p_hwfn, p_ptt,
min_pf_rate);
}
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
index bdc9ba9..8b7d2f9 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
@@ -628,7 +628,8 @@ static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn,
/* Min bandwidth configuration */
__qed_configure_pf_min_bandwidth(p_hwfn, p_ptt, p_link, min_bw);
- qed_configure_vp_wfq_on_link_change(p_hwfn->cdev, p_link->min_pf_rate);
+ qed_configure_vp_wfq_on_link_change(p_hwfn->cdev, p_ptt,
+ p_link->min_pf_rate);
p_link->an = !!(status & LINK_STATUS_AUTO_NEGOTIATE_ENABLED);
p_link->an_complete = !!(status &
diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.c b/drivers/net/ethernet/qlogic/qed/qed_roce.c
index f3a825a..d9dcb0d 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_roce.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_roce.c
@@ -1766,13 +1766,13 @@ static int qed_roce_query_qp(struct qed_hwfn *p_hwfn,
if (rc)
goto err_resp;
- dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_resp_ramrod_res),
- p_resp_ramrod_res, resp_ramrod_res_phys);
-
out_params->rq_psn = le32_to_cpu(p_resp_ramrod_res->psn);
rq_err_state = GET_FIELD(le32_to_cpu(p_resp_ramrod_res->err_flag),
ROCE_QUERY_QP_RESP_OUTPUT_PARAMS_ERROR_FLG);
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_resp_ramrod_res),
+ p_resp_ramrod_res, resp_ramrod_res_phys);
+
if (!(qp->req_offloaded)) {
/* Don't send query qp for the requester */
out_params->sq_psn = qp->sq_psn;
@@ -1813,9 +1813,6 @@ static int qed_roce_query_qp(struct qed_hwfn *p_hwfn,
if (rc)
goto err_req;
- dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_req_ramrod_res),
- p_req_ramrod_res, req_ramrod_res_phys);
-
out_params->sq_psn = le32_to_cpu(p_req_ramrod_res->psn);
sq_err_state = GET_FIELD(le32_to_cpu(p_req_ramrod_res->flags),
ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_ERR_FLG);
@@ -1823,6 +1820,9 @@ static int qed_roce_query_qp(struct qed_hwfn *p_hwfn,
GET_FIELD(le32_to_cpu(p_req_ramrod_res->flags),
ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_SQ_DRAINING_FLG);
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_req_ramrod_res),
+ p_req_ramrod_res, req_ramrod_res_phys);
+
out_params->draining = false;
if (rq_err_state)
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
index 7567cc4..634e414 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
@@ -1221,7 +1221,7 @@ static int qede_selftest_receive_traffic(struct qede_dev *edev)
struct qede_rx_queue *rxq = NULL;
struct sw_rx_data *sw_rx_data;
union eth_rx_cqe *cqe;
- int i, rc = 0;
+ int i, iter, rc = 0;
u8 *data_ptr;
for_each_queue(i) {
@@ -1240,7 +1240,7 @@ static int qede_selftest_receive_traffic(struct qede_dev *edev)
* enabled. This is because the queue 0 is configured as the default
* queue and that the loopback traffic is not IP.
*/
- for (i = 0; i < QEDE_SELFTEST_POLL_COUNT; i++) {
+ for (iter = 0; iter < QEDE_SELFTEST_POLL_COUNT; iter++) {
if (!qede_has_rx_work(rxq)) {
usleep_range(100, 200);
continue;
@@ -1287,7 +1287,7 @@ static int qede_selftest_receive_traffic(struct qede_dev *edev)
qed_chain_recycle_consumed(&rxq->rx_comp_ring);
}
- if (i == QEDE_SELFTEST_POLL_COUNT) {
+ if (iter == QEDE_SELFTEST_POLL_COUNT) {
DP_NOTICE(edev, "Failed to receive the traffic\n");
return -1;
}
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-mac.c b/drivers/net/ethernet/qualcomm/emac/emac-mac.c
index 0b4deb3..f683bfb 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac-mac.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac-mac.c
@@ -932,7 +932,8 @@ static void emac_mac_rx_descs_refill(struct emac_adapter *adpt,
curr_rxbuf->dma_addr =
dma_map_single(adpt->netdev->dev.parent, skb->data,
- curr_rxbuf->length, DMA_FROM_DEVICE);
+ adpt->rxbuf_size, DMA_FROM_DEVICE);
+
ret = dma_mapping_error(adpt->netdev->dev.parent,
curr_rxbuf->dma_addr);
if (ret) {
diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h
index d050f37..5024280 100644
--- a/drivers/net/ethernet/renesas/sh_eth.h
+++ b/drivers/net/ethernet/renesas/sh_eth.h
@@ -339,7 +339,7 @@ enum FELIC_MODE_BIT {
ECMR_DPAD = 0x00200000, ECMR_RZPF = 0x00100000,
ECMR_ZPF = 0x00080000, ECMR_PFR = 0x00040000, ECMR_RXF = 0x00020000,
ECMR_TXF = 0x00010000, ECMR_MCT = 0x00002000, ECMR_PRCEF = 0x00001000,
- ECMR_PMDE = 0x00000200, ECMR_RE = 0x00000040, ECMR_TE = 0x00000020,
+ ECMR_MPDE = 0x00000200, ECMR_RE = 0x00000040, ECMR_TE = 0x00000020,
ECMR_RTM = 0x00000010, ECMR_ILB = 0x00000008, ECMR_ELB = 0x00000004,
ECMR_DM = 0x00000002, ECMR_PRM = 0x00000001,
};
diff --git a/drivers/net/ethernet/rocker/rocker_tlv.h b/drivers/net/ethernet/rocker/rocker_tlv.h
index a63ef82..dfae3c9 100644
--- a/drivers/net/ethernet/rocker/rocker_tlv.h
+++ b/drivers/net/ethernet/rocker/rocker_tlv.h
@@ -139,40 +139,52 @@ rocker_tlv_start(struct rocker_desc_info *desc_info)
int rocker_tlv_put(struct rocker_desc_info *desc_info,
int attrtype, int attrlen, const void *data);
-static inline int rocker_tlv_put_u8(struct rocker_desc_info *desc_info,
- int attrtype, u8 value)
+static inline int
+rocker_tlv_put_u8(struct rocker_desc_info *desc_info, int attrtype, u8 value)
{
- return rocker_tlv_put(desc_info, attrtype, sizeof(u8), &value);
+ u8 tmp = value; /* work around GCC PR81715 */
+
+ return rocker_tlv_put(desc_info, attrtype, sizeof(u8), &tmp);
}
-static inline int rocker_tlv_put_u16(struct rocker_desc_info *desc_info,
- int attrtype, u16 value)
+static inline int
+rocker_tlv_put_u16(struct rocker_desc_info *desc_info, int attrtype, u16 value)
{
- return rocker_tlv_put(desc_info, attrtype, sizeof(u16), &value);
+ u16 tmp = value;
+
+ return rocker_tlv_put(desc_info, attrtype, sizeof(u16), &tmp);
}
-static inline int rocker_tlv_put_be16(struct rocker_desc_info *desc_info,
- int attrtype, __be16 value)
+static inline int
+rocker_tlv_put_be16(struct rocker_desc_info *desc_info, int attrtype, __be16 value)
{
- return rocker_tlv_put(desc_info, attrtype, sizeof(__be16), &value);
+ __be16 tmp = value;
+
+ return rocker_tlv_put(desc_info, attrtype, sizeof(__be16), &tmp);
}
-static inline int rocker_tlv_put_u32(struct rocker_desc_info *desc_info,
- int attrtype, u32 value)
+static inline int
+rocker_tlv_put_u32(struct rocker_desc_info *desc_info, int attrtype, u32 value)
{
- return rocker_tlv_put(desc_info, attrtype, sizeof(u32), &value);
+ u32 tmp = value;
+
+ return rocker_tlv_put(desc_info, attrtype, sizeof(u32), &tmp);
}
-static inline int rocker_tlv_put_be32(struct rocker_desc_info *desc_info,
- int attrtype, __be32 value)
+static inline int
+rocker_tlv_put_be32(struct rocker_desc_info *desc_info, int attrtype, __be32 value)
{
- return rocker_tlv_put(desc_info, attrtype, sizeof(__be32), &value);
+ __be32 tmp = value;
+
+ return rocker_tlv_put(desc_info, attrtype, sizeof(__be32), &tmp);
}
-static inline int rocker_tlv_put_u64(struct rocker_desc_info *desc_info,
- int attrtype, u64 value)
+static inline int
+rocker_tlv_put_u64(struct rocker_desc_info *desc_info, int attrtype, u64 value)
{
- return rocker_tlv_put(desc_info, attrtype, sizeof(u64), &value);
+ u64 tmp = value;
+
+ return rocker_tlv_put(desc_info, attrtype, sizeof(u64), &tmp);
}
static inline struct rocker_tlv *
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index c4ada72..1d85109 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -197,11 +197,15 @@ static int efx_ef10_init_datapath_caps(struct efx_nic *efx)
nic_data->datapath_caps =
MCDI_DWORD(outbuf, GET_CAPABILITIES_OUT_FLAGS1);
- if (outlen >= MC_CMD_GET_CAPABILITIES_V2_OUT_LEN)
+ if (outlen >= MC_CMD_GET_CAPABILITIES_V2_OUT_LEN) {
nic_data->datapath_caps2 = MCDI_DWORD(outbuf,
GET_CAPABILITIES_V2_OUT_FLAGS2);
- else
+ nic_data->piobuf_size = MCDI_WORD(outbuf,
+ GET_CAPABILITIES_V2_OUT_SIZE_PIO_BUFF);
+ } else {
nic_data->datapath_caps2 = 0;
+ nic_data->piobuf_size = ER_DZ_TX_PIOBUF_SIZE;
+ }
/* record the DPCPU firmware IDs to determine VEB vswitching support.
*/
@@ -825,8 +829,8 @@ static int efx_ef10_link_piobufs(struct efx_nic *efx)
offset = ((efx->tx_channel_offset + efx->n_tx_channels -
tx_queue->channel->channel - 1) *
efx_piobuf_size);
- index = offset / ER_DZ_TX_PIOBUF_SIZE;
- offset = offset % ER_DZ_TX_PIOBUF_SIZE;
+ index = offset / nic_data->piobuf_size;
+ offset = offset % nic_data->piobuf_size;
/* When the host page size is 4K, the first
* host page in the WC mapping may be within
@@ -1161,11 +1165,11 @@ static int efx_ef10_dimension_resources(struct efx_nic *efx)
* functions of the controller.
*/
if (efx_piobuf_size != 0 &&
- ER_DZ_TX_PIOBUF_SIZE / efx_piobuf_size * EF10_TX_PIOBUF_COUNT >=
+ nic_data->piobuf_size / efx_piobuf_size * EF10_TX_PIOBUF_COUNT >=
efx->n_tx_channels) {
unsigned int n_piobufs =
DIV_ROUND_UP(efx->n_tx_channels,
- ER_DZ_TX_PIOBUF_SIZE / efx_piobuf_size);
+ nic_data->piobuf_size / efx_piobuf_size);
rc = efx_ef10_alloc_piobufs(efx, n_piobufs);
if (rc)
diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h
index 73bee7e..73028f2 100644
--- a/drivers/net/ethernet/sfc/nic.h
+++ b/drivers/net/ethernet/sfc/nic.h
@@ -500,6 +500,7 @@ enum {
* @pio_write_base: Base address for writing PIO buffers
* @pio_write_vi_base: Relative VI number for @pio_write_base
* @piobuf_handle: Handle of each PIO buffer allocated
+ * @piobuf_size: size of a single PIO buffer
* @must_restore_piobufs: Flag: PIO buffers have yet to be restored after MC
* reboot
* @rx_rss_context: Firmware handle for our RSS context
@@ -537,6 +538,7 @@ struct efx_ef10_nic_data {
void __iomem *wc_membase, *pio_write_base;
unsigned int pio_write_vi_base;
unsigned int piobuf_handle[EF10_TX_PIOBUF_COUNT];
+ u16 piobuf_size;
bool must_restore_piobufs;
u32 rx_rss_context;
bool rx_rss_context_exclusive;
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
index 2337789..6f26acd 100644
--- a/drivers/net/ethernet/sfc/tx.c
+++ b/drivers/net/ethernet/sfc/tx.c
@@ -27,7 +27,6 @@
#ifdef EFX_USE_PIO
-#define EFX_PIOBUF_SIZE_MAX ER_DZ_TX_PIOBUF_SIZE
#define EFX_PIOBUF_SIZE_DEF ALIGN(256, L1_CACHE_BYTES)
unsigned int efx_piobuf_size __read_mostly = EFX_PIOBUF_SIZE_DEF;
diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c
index 01cf094..8f84961 100644
--- a/drivers/net/phy/dp83867.c
+++ b/drivers/net/phy/dp83867.c
@@ -33,6 +33,7 @@
/* Extended Registers */
#define DP83867_RGMIICTL 0x0032
+#define DP83867_STRAP_STS1 0x006E
#define DP83867_RGMIIDCTL 0x0086
#define DP83867_SW_RESET BIT(15)
@@ -56,9 +57,13 @@
#define DP83867_RGMII_TX_CLK_DELAY_EN BIT(1)
#define DP83867_RGMII_RX_CLK_DELAY_EN BIT(0)
+/* STRAP_STS1 bits */
+#define DP83867_STRAP_STS1_RESERVED BIT(11)
+
/* PHY CTRL bits */
#define DP83867_PHYCR_FIFO_DEPTH_SHIFT 14
#define DP83867_PHYCR_FIFO_DEPTH_MASK (3 << 14)
+#define DP83867_PHYCR_RESERVED_MASK BIT(11)
/* RGMIIDCTL bits */
#define DP83867_RGMII_TX_CLK_DELAY_SHIFT 4
@@ -141,7 +146,7 @@ static int dp83867_of_init(struct phy_device *phydev)
static int dp83867_config_init(struct phy_device *phydev)
{
struct dp83867_private *dp83867;
- int ret, val;
+ int ret, val, bs;
u16 delay;
if (!phydev->priv) {
@@ -164,6 +169,22 @@ static int dp83867_config_init(struct phy_device *phydev)
return val;
val &= ~DP83867_PHYCR_FIFO_DEPTH_MASK;
val |= (dp83867->fifo_depth << DP83867_PHYCR_FIFO_DEPTH_SHIFT);
+
+ /* The code below checks if "port mirroring" N/A MODE4 has been
+ * enabled during power on bootstrap.
+ *
+ * Such N/A mode enabled by mistake can put PHY IC in some
+ * internal testing mode and disable RGMII transmission.
+ *
+ * In this particular case one needs to check STRAP_STS1
+ * register's bit 11 (marked as RESERVED).
+ */
+
+ bs = phy_read_mmd_indirect(phydev, DP83867_STRAP_STS1,
+ DP83867_DEVADDR);
+ if (bs & DP83867_STRAP_STS1_RESERVED)
+ val &= ~DP83867_PHYCR_RESERVED_MASK;
+
ret = phy_write(phydev, MII_DP83867_PHYCTRL, val);
if (ret)
return ret;
diff --git a/drivers/net/phy/xilinx_gmii2rgmii.c b/drivers/net/phy/xilinx_gmii2rgmii.c
index d15dd39..2e5150b 100644
--- a/drivers/net/phy/xilinx_gmii2rgmii.c
+++ b/drivers/net/phy/xilinx_gmii2rgmii.c
@@ -44,7 +44,7 @@ static int xgmiitorgmii_read_status(struct phy_device *phydev)
priv->phy_drv->read_status(phydev);
val = mdiobus_read(phydev->mdio.bus, priv->addr, XILINX_GMII2RGMII_REG);
- val &= XILINX_GMII2RGMII_SPEED_MASK;
+ val &= ~XILINX_GMII2RGMII_SPEED_MASK;
if (phydev->speed == SPEED_1000)
val |= BMCR_SPEED1000;
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index a380649..2668170 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -2366,8 +2366,10 @@ static int team_nl_send_options_get(struct team *team, u32 portid, u32 seq,
hdr = genlmsg_put(skb, portid, seq, &team_nl_family, flags | NLM_F_MULTI,
TEAM_CMD_OPTIONS_GET);
- if (!hdr)
+ if (!hdr) {
+ nlmsg_free(skb);
return -EMSGSIZE;
+ }
if (nla_put_u32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex))
goto nla_put_failure;
@@ -2639,8 +2641,10 @@ static int team_nl_send_port_list_get(struct team *team, u32 portid, u32 seq,
hdr = genlmsg_put(skb, portid, seq, &team_nl_family, flags | NLM_F_MULTI,
TEAM_CMD_PORT_LIST_GET);
- if (!hdr)
+ if (!hdr) {
+ nlmsg_free(skb);
return -EMSGSIZE;
+ }
if (nla_put_u32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex))
goto nla_put_failure;
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 35aa28b..7e5ae26 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1283,11 +1283,13 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
switch (tun->flags & TUN_TYPE_MASK) {
case IFF_TUN:
if (tun->flags & IFF_NO_PI) {
- switch (skb->data[0] & 0xf0) {
- case 0x40:
+ u8 ip_version = skb->len ? (skb->data[0] >> 4) : 0;
+
+ switch (ip_version) {
+ case 4:
pi.proto = htons(ETH_P_IP);
break;
- case 0x60:
+ case 6:
pi.proto = htons(ETH_P_IPV6);
break;
default:
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
index cdde590..3a72862 100644
--- a/drivers/net/usb/Kconfig
+++ b/drivers/net/usb/Kconfig
@@ -364,7 +364,7 @@
optionally with LEDs that indicate traffic
config USB_NET_PLUSB
- tristate "Prolific PL-2301/2302/25A1 based cables"
+ tristate "Prolific PL-2301/2302/25A1/27A1 based cables"
# if the handshake/init/reset problems, from original 'plusb',
# are ever resolved ... then remove "experimental"
depends on USB_USBNET
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index afbfc0f..dc6d3b0 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -769,8 +769,10 @@ int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_
u8 *buf;
int len;
int temp;
+ int err;
u8 iface_no;
struct usb_cdc_parsed_header hdr;
+ u16 curr_ntb_format;
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx)
@@ -875,6 +877,32 @@ int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_
goto error2;
}
+ /*
+ * Some Huawei devices have been observed to come out of reset in NDP32 mode.
+ * Let's check if this is the case, and set the device to NDP16 mode again if
+ * needed.
+ */
+ if (ctx->drvflags & CDC_NCM_FLAG_RESET_NTB16) {
+ err = usbnet_read_cmd(dev, USB_CDC_GET_NTB_FORMAT,
+ USB_TYPE_CLASS | USB_DIR_IN | USB_RECIP_INTERFACE,
+ 0, iface_no, &curr_ntb_format, 2);
+ if (err < 0) {
+ goto error2;
+ }
+
+ if (curr_ntb_format == USB_CDC_NCM_NTB32_FORMAT) {
+ dev_info(&intf->dev, "resetting NTB format to 16-bit");
+ err = usbnet_write_cmd(dev, USB_CDC_SET_NTB_FORMAT,
+ USB_TYPE_CLASS | USB_DIR_OUT
+ | USB_RECIP_INTERFACE,
+ USB_CDC_NCM_NTB16_FORMAT,
+ iface_no, NULL, 0);
+
+ if (err < 0)
+ goto error2;
+ }
+ }
+
cdc_ncm_find_endpoints(dev, ctx->data);
cdc_ncm_find_endpoints(dev, ctx->control);
if (!dev->in || !dev->out || !dev->status) {
diff --git a/drivers/net/usb/huawei_cdc_ncm.c b/drivers/net/usb/huawei_cdc_ncm.c
index 2680a65..63f28908 100644
--- a/drivers/net/usb/huawei_cdc_ncm.c
+++ b/drivers/net/usb/huawei_cdc_ncm.c
@@ -80,6 +80,12 @@ static int huawei_cdc_ncm_bind(struct usbnet *usbnet_dev,
* be at the end of the frame.
*/
drvflags |= CDC_NCM_FLAG_NDP_TO_END;
+
+ /* Additionally, it has been reported that some Huawei E3372H devices, with
+ * firmware version 21.318.01.00.541, come out of reset in NTB32 format mode, hence
+ * needing to be set to the NTB16 one again.
+ */
+ drvflags |= CDC_NCM_FLAG_RESET_NTB16;
ret = cdc_ncm_bind_common(usbnet_dev, intf, 1, drvflags);
if (ret)
goto err;
diff --git a/drivers/net/usb/plusb.c b/drivers/net/usb/plusb.c
index 22e1a9a..6fe5937 100644
--- a/drivers/net/usb/plusb.c
+++ b/drivers/net/usb/plusb.c
@@ -102,7 +102,7 @@ static int pl_reset(struct usbnet *dev)
}
static const struct driver_info prolific_info = {
- .description = "Prolific PL-2301/PL-2302/PL-25A1",
+ .description = "Prolific PL-2301/PL-2302/PL-25A1/PL-27A1",
.flags = FLAG_POINTTOPOINT | FLAG_NO_SETINT,
/* some PL-2302 versions seem to fail usb_set_interface() */
.reset = pl_reset,
@@ -139,6 +139,17 @@ static const struct usb_device_id products [] = {
* Host-to-Host Cable
*/
.driver_info = (unsigned long) &prolific_info,
+
+},
+
+/* super speed cables */
+{
+ USB_DEVICE(0x067b, 0x27a1), /* PL-27A1, no eeprom
+ * also: goobay Active USB 3.0
+ * Data Link,
+ * Unitek Y-3501
+ */
+ .driver_info = (unsigned long) &prolific_info,
},
{ }, // END
@@ -158,5 +169,5 @@ static struct usb_driver plusb_driver = {
module_usb_driver(plusb_driver);
MODULE_AUTHOR("David Brownell");
-MODULE_DESCRIPTION("Prolific PL-2301/2302/25A1 USB Host to Host Link Driver");
+MODULE_DESCRIPTION("Prolific PL-2301/2302/25A1/27A1 USB Host to Host Link Driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/wireless/ath/ath10k/ahb.c b/drivers/net/wireless/ath/ath10k/ahb.c
index 766c63b..45226db 100644
--- a/drivers/net/wireless/ath/ath10k/ahb.c
+++ b/drivers/net/wireless/ath/ath10k/ahb.c
@@ -33,6 +33,9 @@ static const struct of_device_id ath10k_ahb_of_match[] = {
MODULE_DEVICE_TABLE(of, ath10k_ahb_of_match);
+#define QCA4019_SRAM_ADDR 0x000C0000
+#define QCA4019_SRAM_LEN 0x00040000 /* 256 kb */
+
static inline struct ath10k_ahb *ath10k_ahb_priv(struct ath10k *ar)
{
return &((struct ath10k_pci *)ar->drv_priv)->ahb[0];
@@ -699,6 +702,25 @@ static int ath10k_ahb_hif_power_up(struct ath10k *ar)
return ret;
}
+static u32 ath10k_ahb_qca4019_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr)
+{
+ u32 val = 0, region = addr & 0xfffff;
+
+ val = ath10k_pci_read32(ar, PCIE_BAR_REG_ADDRESS);
+
+ if (region >= QCA4019_SRAM_ADDR && region <=
+ (QCA4019_SRAM_ADDR + QCA4019_SRAM_LEN)) {
+ /* SRAM contents for QCA4019 can be directly accessed and
+ * no conversions are required
+ */
+ val |= region;
+ } else {
+ val |= 0x100000 | region;
+ }
+
+ return val;
+}
+
static const struct ath10k_hif_ops ath10k_ahb_hif_ops = {
.tx_sg = ath10k_pci_hif_tx_sg,
.diag_read = ath10k_pci_hif_diag_read,
@@ -766,6 +788,7 @@ static int ath10k_ahb_probe(struct platform_device *pdev)
ar_pci->mem_len = ar_ahb->mem_len;
ar_pci->ar = ar;
ar_pci->bus_ops = &ath10k_ahb_bus_ops;
+ ar_pci->targ_cpu_to_ce_addr = ath10k_ahb_qca4019_targ_cpu_to_ce_addr;
ret = ath10k_pci_setup_resource(ar);
if (ret) {
diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h
index be5b527..90c0c4a 100644
--- a/drivers/net/wireless/ath/ath10k/core.h
+++ b/drivers/net/wireless/ath/ath10k/core.h
@@ -314,6 +314,7 @@ struct ath10k_peer {
struct ieee80211_vif *vif;
struct ieee80211_sta *sta;
+ bool removed;
int vdev_id;
u8 addr[ETH_ALEN];
DECLARE_BITMAP(peer_ids, ATH10K_MAX_NUM_PEER_IDS);
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index f2e85eb..30e98af 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -3738,6 +3738,9 @@ struct ieee80211_txq *ath10k_mac_txq_lookup(struct ath10k *ar,
if (!peer)
return NULL;
+ if (peer->removed)
+ return NULL;
+
if (peer->sta)
return peer->sta->txq[tid];
else if (peer->vif)
@@ -7422,6 +7425,20 @@ ath10k_mac_op_switch_vif_chanctx(struct ieee80211_hw *hw,
return 0;
}
+static void ath10k_mac_op_sta_pre_rcu_remove(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct ath10k *ar;
+ struct ath10k_peer *peer;
+
+ ar = hw->priv;
+
+ list_for_each_entry(peer, &ar->peers, list)
+ if (peer->sta == sta)
+ peer->removed = true;
+}
+
static const struct ieee80211_ops ath10k_ops = {
.tx = ath10k_mac_op_tx,
.wake_tx_queue = ath10k_mac_op_wake_tx_queue,
@@ -7462,6 +7479,7 @@ static const struct ieee80211_ops ath10k_ops = {
.assign_vif_chanctx = ath10k_mac_op_assign_vif_chanctx,
.unassign_vif_chanctx = ath10k_mac_op_unassign_vif_chanctx,
.switch_vif_chanctx = ath10k_mac_op_switch_vif_chanctx,
+ .sta_pre_rcu_remove = ath10k_mac_op_sta_pre_rcu_remove,
CFG80211_TESTMODE_CMD(ath10k_tm_cmd)
diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
index 410bcda..25b8d50 100644
--- a/drivers/net/wireless/ath/ath10k/pci.c
+++ b/drivers/net/wireless/ath/ath10k/pci.c
@@ -840,29 +840,33 @@ void ath10k_pci_rx_replenish_retry(unsigned long ptr)
ath10k_pci_rx_post(ar);
}
+static u32 ath10k_pci_qca988x_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr)
+{
+ u32 val = 0, region = addr & 0xfffff;
+
+ val = (ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS)
+ & 0x7ff) << 21;
+ val |= 0x100000 | region;
+ return val;
+}
+
+static u32 ath10k_pci_qca99x0_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr)
+{
+ u32 val = 0, region = addr & 0xfffff;
+
+ val = ath10k_pci_read32(ar, PCIE_BAR_REG_ADDRESS);
+ val |= 0x100000 | region;
+ return val;
+}
+
static u32 ath10k_pci_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr)
{
- u32 val = 0;
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- switch (ar->hw_rev) {
- case ATH10K_HW_QCA988X:
- case ATH10K_HW_QCA9887:
- case ATH10K_HW_QCA6174:
- case ATH10K_HW_QCA9377:
- val = (ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
- CORE_CTRL_ADDRESS) &
- 0x7ff) << 21;
- break;
- case ATH10K_HW_QCA9888:
- case ATH10K_HW_QCA99X0:
- case ATH10K_HW_QCA9984:
- case ATH10K_HW_QCA4019:
- val = ath10k_pci_read32(ar, PCIE_BAR_REG_ADDRESS);
- break;
- }
+ if (WARN_ON_ONCE(!ar_pci->targ_cpu_to_ce_addr))
+ return -ENOTSUPP;
- val |= 0x100000 | (addr & 0xfffff);
- return val;
+ return ar_pci->targ_cpu_to_ce_addr(ar, addr);
}
/*
@@ -3171,6 +3175,7 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
bool pci_ps;
int (*pci_soft_reset)(struct ath10k *ar);
int (*pci_hard_reset)(struct ath10k *ar);
+ u32 (*targ_cpu_to_ce_addr)(struct ath10k *ar, u32 addr);
switch (pci_dev->device) {
case QCA988X_2_0_DEVICE_ID:
@@ -3178,12 +3183,14 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
pci_ps = false;
pci_soft_reset = ath10k_pci_warm_reset;
pci_hard_reset = ath10k_pci_qca988x_chip_reset;
+ targ_cpu_to_ce_addr = ath10k_pci_qca988x_targ_cpu_to_ce_addr;
break;
case QCA9887_1_0_DEVICE_ID:
hw_rev = ATH10K_HW_QCA9887;
pci_ps = false;
pci_soft_reset = ath10k_pci_warm_reset;
pci_hard_reset = ath10k_pci_qca988x_chip_reset;
+ targ_cpu_to_ce_addr = ath10k_pci_qca988x_targ_cpu_to_ce_addr;
break;
case QCA6164_2_1_DEVICE_ID:
case QCA6174_2_1_DEVICE_ID:
@@ -3191,30 +3198,35 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
pci_ps = true;
pci_soft_reset = ath10k_pci_warm_reset;
pci_hard_reset = ath10k_pci_qca6174_chip_reset;
+ targ_cpu_to_ce_addr = ath10k_pci_qca988x_targ_cpu_to_ce_addr;
break;
case QCA99X0_2_0_DEVICE_ID:
hw_rev = ATH10K_HW_QCA99X0;
pci_ps = false;
pci_soft_reset = ath10k_pci_qca99x0_soft_chip_reset;
pci_hard_reset = ath10k_pci_qca99x0_chip_reset;
+ targ_cpu_to_ce_addr = ath10k_pci_qca99x0_targ_cpu_to_ce_addr;
break;
case QCA9984_1_0_DEVICE_ID:
hw_rev = ATH10K_HW_QCA9984;
pci_ps = false;
pci_soft_reset = ath10k_pci_qca99x0_soft_chip_reset;
pci_hard_reset = ath10k_pci_qca99x0_chip_reset;
+ targ_cpu_to_ce_addr = ath10k_pci_qca99x0_targ_cpu_to_ce_addr;
break;
case QCA9888_2_0_DEVICE_ID:
hw_rev = ATH10K_HW_QCA9888;
pci_ps = false;
pci_soft_reset = ath10k_pci_qca99x0_soft_chip_reset;
pci_hard_reset = ath10k_pci_qca99x0_chip_reset;
+ targ_cpu_to_ce_addr = ath10k_pci_qca99x0_targ_cpu_to_ce_addr;
break;
case QCA9377_1_0_DEVICE_ID:
hw_rev = ATH10K_HW_QCA9377;
pci_ps = true;
pci_soft_reset = NULL;
pci_hard_reset = ath10k_pci_qca6174_chip_reset;
+ targ_cpu_to_ce_addr = ath10k_pci_qca988x_targ_cpu_to_ce_addr;
break;
default:
WARN_ON(1);
@@ -3241,6 +3253,7 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
ar_pci->bus_ops = &ath10k_pci_bus_ops;
ar_pci->pci_soft_reset = pci_soft_reset;
ar_pci->pci_hard_reset = pci_hard_reset;
+ ar_pci->targ_cpu_to_ce_addr = targ_cpu_to_ce_addr;
ar->id.vendor = pdev->vendor;
ar->id.device = pdev->device;
diff --git a/drivers/net/wireless/ath/ath10k/pci.h b/drivers/net/wireless/ath/ath10k/pci.h
index 9854ad5..577bb87 100644
--- a/drivers/net/wireless/ath/ath10k/pci.h
+++ b/drivers/net/wireless/ath/ath10k/pci.h
@@ -238,6 +238,11 @@ struct ath10k_pci {
/* Chip specific pci full reset function */
int (*pci_hard_reset)(struct ath10k *ar);
+ /* chip specific methods for converting target CPU virtual address
+ * space to CE address space
+ */
+ u32 (*targ_cpu_to_ce_addr)(struct ath10k *ar, u32 addr);
+
/* Keep this entry in the last, memory for struct ath10k_ahb is
* allocated (ahb support enabled case) in the continuation of
* this struct.
diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c
index e1d59da..ca8797c 100644
--- a/drivers/net/wireless/ath/wcn36xx/main.c
+++ b/drivers/net/wireless/ath/wcn36xx/main.c
@@ -1165,11 +1165,12 @@ static int wcn36xx_remove(struct platform_device *pdev)
wcn36xx_dbg(WCN36XX_DBG_MAC, "platform remove\n");
release_firmware(wcn->nv);
- mutex_destroy(&wcn->hal_mutex);
ieee80211_unregister_hw(hw);
iounmap(wcn->dxe_base);
iounmap(wcn->ccu_base);
+
+ mutex_destroy(&wcn->hal_mutex);
ieee80211_free_hw(hw);
return 0;
diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c
index e2a459e..51030c3 100644
--- a/drivers/net/wireless/ath/wil6210/cfg80211.c
+++ b/drivers/net/wireless/ath/wil6210/cfg80211.c
@@ -976,7 +976,7 @@ int wil_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
u64 *cookie)
{
const u8 *buf = params->buf;
- size_t len = params->len;
+ size_t len = params->len, total;
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
int rc;
bool tx_status = false;
@@ -1001,7 +1001,11 @@ int wil_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
if (len < sizeof(struct ieee80211_hdr_3addr))
return -EINVAL;
- cmd = kmalloc(sizeof(*cmd) + len, GFP_KERNEL);
+ total = sizeof(*cmd) + len;
+ if (total < len)
+ return -EINVAL;
+
+ cmd = kmalloc(total, GFP_KERNEL);
if (!cmd) {
rc = -ENOMEM;
goto out;
@@ -1011,7 +1015,7 @@ int wil_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
cmd->len = cpu_to_le16(len);
memcpy(cmd->payload, buf, len);
- rc = wmi_call(wil, WMI_SW_TX_REQ_CMDID, cmd, sizeof(*cmd) + len,
+ rc = wmi_call(wil, WMI_SW_TX_REQ_CMDID, cmd, total,
WMI_SW_TX_COMPLETE_EVENTID, &evt, sizeof(evt), 2000);
if (rc == 0)
tx_status = !evt.evt.status;
diff --git a/drivers/net/wireless/ath/wil6210/fw_inc.c b/drivers/net/wireless/ath/wil6210/fw_inc.c
index 7a33792..77d1902 100644
--- a/drivers/net/wireless/ath/wil6210/fw_inc.c
+++ b/drivers/net/wireless/ath/wil6210/fw_inc.c
@@ -26,14 +26,17 @@
prefix_type, rowsize, \
groupsize, buf, len, ascii)
-#define FW_ADDR_CHECK(ioaddr, val, msg) do { \
- ioaddr = wmi_buffer(wil, val); \
- if (!ioaddr) { \
- wil_err_fw(wil, "bad " msg ": 0x%08x\n", \
- le32_to_cpu(val)); \
- return -EINVAL; \
- } \
- } while (0)
+static bool wil_fw_addr_check(struct wil6210_priv *wil,
+ void __iomem **ioaddr, __le32 val,
+ u32 size, const char *msg)
+{
+ *ioaddr = wmi_buffer_block(wil, val, size);
+ if (!(*ioaddr)) {
+ wil_err_fw(wil, "bad %s: 0x%08x\n", msg, le32_to_cpu(val));
+ return false;
+ }
+ return true;
+}
/**
* wil_fw_verify - verify firmware file validity
@@ -160,7 +163,8 @@ static int fw_handle_data(struct wil6210_priv *wil, const void *data,
return -EINVAL;
}
- FW_ADDR_CHECK(dst, d->addr, "address");
+ if (!wil_fw_addr_check(wil, &dst, d->addr, s, "address"))
+ return -EINVAL;
wil_dbg_fw(wil, "write [0x%08x] <== %zu bytes\n", le32_to_cpu(d->addr),
s);
wil_memcpy_toio_32(dst, d->data, s);
@@ -192,7 +196,8 @@ static int fw_handle_fill(struct wil6210_priv *wil, const void *data,
return -EINVAL;
}
- FW_ADDR_CHECK(dst, d->addr, "address");
+ if (!wil_fw_addr_check(wil, &dst, d->addr, s, "address"))
+ return -EINVAL;
v = le32_to_cpu(d->value);
wil_dbg_fw(wil, "fill [0x%08x] <== 0x%08x, %zu bytes\n",
@@ -248,7 +253,8 @@ static int fw_handle_direct_write(struct wil6210_priv *wil, const void *data,
u32 v = le32_to_cpu(block[i].value);
u32 x, y;
- FW_ADDR_CHECK(dst, block[i].addr, "address");
+ if (!wil_fw_addr_check(wil, &dst, block[i].addr, 0, "address"))
+ return -EINVAL;
x = readl(dst);
y = (x & m) | (v & ~m);
@@ -314,10 +320,15 @@ static int fw_handle_gateway_data(struct wil6210_priv *wil, const void *data,
wil_dbg_fw(wil, "gw write record [%3d] blocks, cmd 0x%08x\n",
n, gw_cmd);
- FW_ADDR_CHECK(gwa_addr, d->gateway_addr_addr, "gateway_addr_addr");
- FW_ADDR_CHECK(gwa_val, d->gateway_value_addr, "gateway_value_addr");
- FW_ADDR_CHECK(gwa_cmd, d->gateway_cmd_addr, "gateway_cmd_addr");
- FW_ADDR_CHECK(gwa_ctl, d->gateway_ctrl_address, "gateway_ctrl_address");
+ if (!wil_fw_addr_check(wil, &gwa_addr, d->gateway_addr_addr, 0,
+ "gateway_addr_addr") ||
+ !wil_fw_addr_check(wil, &gwa_val, d->gateway_value_addr, 0,
+ "gateway_value_addr") ||
+ !wil_fw_addr_check(wil, &gwa_cmd, d->gateway_cmd_addr, 0,
+ "gateway_cmd_addr") ||
+ !wil_fw_addr_check(wil, &gwa_ctl, d->gateway_ctrl_address, 0,
+ "gateway_ctrl_address"))
+ return -EINVAL;
wil_dbg_fw(wil, "gw addresses: addr 0x%08x val 0x%08x"
" cmd 0x%08x ctl 0x%08x\n",
@@ -373,12 +384,19 @@ static int fw_handle_gateway_data4(struct wil6210_priv *wil, const void *data,
wil_dbg_fw(wil, "gw4 write record [%3d] blocks, cmd 0x%08x\n",
n, gw_cmd);
- FW_ADDR_CHECK(gwa_addr, d->gateway_addr_addr, "gateway_addr_addr");
+ if (!wil_fw_addr_check(wil, &gwa_addr, d->gateway_addr_addr, 0,
+ "gateway_addr_addr"))
+ return -EINVAL;
for (k = 0; k < ARRAY_SIZE(block->value); k++)
- FW_ADDR_CHECK(gwa_val[k], d->gateway_value_addr[k],
- "gateway_value_addr");
- FW_ADDR_CHECK(gwa_cmd, d->gateway_cmd_addr, "gateway_cmd_addr");
- FW_ADDR_CHECK(gwa_ctl, d->gateway_ctrl_address, "gateway_ctrl_address");
+ if (!wil_fw_addr_check(wil, &gwa_val[k],
+ d->gateway_value_addr[k],
+ 0, "gateway_value_addr"))
+ return -EINVAL;
+ if (!wil_fw_addr_check(wil, &gwa_cmd, d->gateway_cmd_addr, 0,
+ "gateway_cmd_addr") ||
+ !wil_fw_addr_check(wil, &gwa_ctl, d->gateway_ctrl_address, 0,
+ "gateway_ctrl_address"))
+ return -EINVAL;
wil_dbg_fw(wil, "gw4 addresses: addr 0x%08x cmd 0x%08x ctl 0x%08x\n",
le32_to_cpu(d->gateway_addr_addr),
diff --git a/drivers/net/wireless/ath/wil6210/interrupt.c b/drivers/net/wireless/ath/wil6210/interrupt.c
index 59def4f..5cf3417 100644
--- a/drivers/net/wireless/ath/wil6210/interrupt.c
+++ b/drivers/net/wireless/ath/wil6210/interrupt.c
@@ -358,6 +358,25 @@ static void wil_cache_mbox_regs(struct wil6210_priv *wil)
wil_mbox_ring_le2cpus(&wil->mbox_ctl.tx);
}
+static bool wil_validate_mbox_regs(struct wil6210_priv *wil)
+{
+ size_t min_size = sizeof(struct wil6210_mbox_hdr) +
+ sizeof(struct wmi_cmd_hdr);
+
+ if (wil->mbox_ctl.rx.entry_size < min_size) {
+ wil_err(wil, "rx mbox entry too small (%d)\n",
+ wil->mbox_ctl.rx.entry_size);
+ return false;
+ }
+ if (wil->mbox_ctl.tx.entry_size < min_size) {
+ wil_err(wil, "tx mbox entry too small (%d)\n",
+ wil->mbox_ctl.tx.entry_size);
+ return false;
+ }
+
+ return true;
+}
+
static irqreturn_t wil6210_irq_misc(int irq, void *cookie)
{
struct wil6210_priv *wil = cookie;
@@ -393,7 +412,8 @@ static irqreturn_t wil6210_irq_misc(int irq, void *cookie)
if (isr & ISR_MISC_FW_READY) {
wil_dbg_irq(wil, "IRQ: FW ready\n");
wil_cache_mbox_regs(wil);
- set_bit(wil_status_mbox_ready, wil->status);
+ if (wil_validate_mbox_regs(wil))
+ set_bit(wil_status_mbox_ready, wil->status);
/**
* Actual FW ready indicated by the
* WMI_FW_READY_EVENTID
diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c
index c4faa2c..ae5a1b6 100644
--- a/drivers/net/wireless/ath/wil6210/main.c
+++ b/drivers/net/wireless/ath/wil6210/main.c
@@ -777,11 +777,11 @@ static void wil_collect_fw_info(struct wil6210_priv *wil)
void wil_refresh_fw_capabilities(struct wil6210_priv *wil)
{
struct wiphy *wiphy = wil_to_wiphy(wil);
+ int features;
wil->keep_radio_on_during_sleep =
- wil->platform_ops.keep_radio_on_during_sleep &&
- wil->platform_ops.keep_radio_on_during_sleep(
- wil->platform_handle) &&
+ test_bit(WIL_PLATFORM_CAPA_RADIO_ON_IN_SUSPEND,
+ wil->platform_capa) &&
test_bit(WMI_FW_CAPABILITY_D3_SUSPEND, wil->fw_capabilities);
wil_info(wil, "keep_radio_on_during_sleep (%d)\n",
@@ -791,6 +791,16 @@ void wil_refresh_fw_capabilities(struct wil6210_priv *wil)
wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
else
wiphy->signal_type = CFG80211_SIGNAL_TYPE_UNSPEC;
+
+ if (wil->platform_ops.set_features) {
+ features = (test_bit(WMI_FW_CAPABILITY_REF_CLOCK_CONTROL,
+ wil->fw_capabilities) &&
+ test_bit(WIL_PLATFORM_CAPA_EXT_CLK,
+ wil->platform_capa)) ?
+ BIT(WIL_PLATFORM_FEATURE_FW_EXT_CLK_CONTROL) : 0;
+
+ wil->platform_ops.set_features(wil->platform_handle, features);
+ }
}
void wil_mbox_ring_le2cpus(struct wil6210_mbox_ring *r)
@@ -986,6 +996,7 @@ static void wil_pre_fw_config(struct wil6210_priv *wil)
int wil_reset(struct wil6210_priv *wil, bool load_fw)
{
int rc;
+ unsigned long status_flags = BIT(wil_status_resetting);
wil_dbg_misc(wil, "reset\n");
@@ -1006,9 +1017,18 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
if (wil->hw_version == HW_VER_UNKNOWN)
return -ENODEV;
- wil_dbg_misc(wil, "Prevent DS in BL & mark FW to set T_POWER_ON=0\n");
- wil_s(wil, RGF_USER_USAGE_8, BIT_USER_PREVENT_DEEP_SLEEP |
- BIT_USER_SUPPORT_T_POWER_ON_0);
+ wil_dbg_misc(wil, "Prevent DS in BL\n");
+ wil_s(wil, RGF_USER_USAGE_8, BIT_USER_PREVENT_DEEP_SLEEP);
+
+ if (test_bit(WIL_PLATFORM_CAPA_T_PWR_ON_0, wil->platform_capa)) {
+ wil_dbg_misc(wil, "Notify FW to set T_POWER_ON=0\n");
+ wil_s(wil, RGF_USER_USAGE_8, BIT_USER_SUPPORT_T_POWER_ON_0);
+ }
+
+ if (test_bit(WIL_PLATFORM_CAPA_EXT_CLK, wil->platform_capa)) {
+ wil_dbg_misc(wil, "Notify FW on ext clock configuration\n");
+ wil_s(wil, RGF_USER_USAGE_8, BIT_USER_EXT_CLK);
+ }
if (wil->platform_ops.notify) {
rc = wil->platform_ops.notify(wil->platform_handle,
@@ -1019,6 +1039,14 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
}
set_bit(wil_status_resetting, wil->status);
+ if (test_bit(wil_status_collecting_dumps, wil->status)) {
+ /* Device collects crash dump, cancel the reset.
+ * following crash dump collection, reset would take place.
+ */
+ wil_dbg_misc(wil, "reject reset while collecting crash dump\n");
+ rc = -EBUSY;
+ goto out;
+ }
cancel_work_sync(&wil->disconnect_worker);
wil6210_disconnect(wil, NULL, WLAN_REASON_DEAUTH_LEAVING, false);
@@ -1033,7 +1061,11 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
/* prevent NAPI from being scheduled and prevent wmi commands */
mutex_lock(&wil->wmi_mutex);
- bitmap_zero(wil->status, wil_status_last);
+ if (test_bit(wil_status_suspending, wil->status))
+ status_flags |= BIT(wil_status_suspending);
+ bitmap_and(wil->status, wil->status, &status_flags,
+ wil_status_last);
+ wil_dbg_misc(wil, "wil->status (0x%lx)\n", *wil->status);
mutex_unlock(&wil->wmi_mutex);
wil_mask_irq(wil);
@@ -1051,14 +1083,14 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
wil_rx_fini(wil);
if (rc) {
wil_bl_crash_info(wil, true);
- return rc;
+ goto out;
}
rc = wil_get_bl_info(wil);
if (rc == -EAGAIN && !load_fw) /* ignore RF error if not going up */
rc = 0;
if (rc)
- return rc;
+ goto out;
wil_set_oob_mode(wil, oob_mode);
if (load_fw) {
@@ -1070,10 +1102,10 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
/* Loading f/w from the file */
rc = wil_request_firmware(wil, wil->wil_fw_name, true);
if (rc)
- return rc;
+ goto out;
rc = wil_request_firmware(wil, WIL_BOARD_FILE_NAME, true);
if (rc)
- return rc;
+ goto out;
wil_pre_fw_config(wil);
wil_release_cpu(wil);
@@ -1085,6 +1117,8 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
reinit_completion(&wil->wmi_call);
reinit_completion(&wil->halp.comp);
+ clear_bit(wil_status_resetting, wil->status);
+
if (load_fw) {
wil_configure_interrupt_moderation(wil);
wil_unmask_irq(wil);
@@ -1109,6 +1143,10 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
if (wil->tt_data_set)
wmi_set_tt_cfg(wil, &wil->tt_data);
+ if (wil->snr_thresh.enabled)
+ wmi_set_snr_thresh(wil, wil->snr_thresh.omni,
+ wil->snr_thresh.direct);
+
if (wil->platform_ops.notify) {
rc = wil->platform_ops.notify(wil->platform_handle,
WIL_PLATFORM_EVT_FW_RDY);
@@ -1121,6 +1159,10 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
}
return rc;
+
+out:
+ clear_bit(wil_status_resetting, wil->status);
+ return rc;
}
void wil_fw_error_recovery(struct wil6210_priv *wil)
@@ -1228,9 +1270,7 @@ int __wil_down(struct wil6210_priv *wil)
wil_abort_scan(wil, false);
mutex_unlock(&wil->p2p_wdev_mutex);
- wil_reset(wil, false);
-
- return 0;
+ return wil_reset(wil, false);
}
int wil_down(struct wil6210_priv *wil)
diff --git a/drivers/net/wireless/ath/wil6210/pcie_bus.c b/drivers/net/wireless/ath/wil6210/pcie_bus.c
index 89e3fbf..370068a 100644
--- a/drivers/net/wireless/ath/wil6210/pcie_bus.c
+++ b/drivers/net/wireless/ath/wil6210/pcie_bus.c
@@ -23,9 +23,9 @@
#include <linux/rtnetlink.h>
#include <linux/pm_runtime.h>
-static bool use_msi;
+static bool use_msi = true;
module_param(use_msi, bool, 0444);
-MODULE_PARM_DESC(use_msi, " Use MSI interrupt, default - false");
+MODULE_PARM_DESC(use_msi, " Use MSI interrupt, default - true");
static bool ftm_mode;
module_param(ftm_mode, bool, 0444);
@@ -45,9 +45,11 @@ void wil_set_capabilities(struct wil6210_priv *wil)
u32 jtag_id = wil_r(wil, RGF_USER_JTAG_DEV_ID);
u8 chip_revision = (wil_r(wil, RGF_USER_REVISION_ID) &
RGF_USER_REVISION_ID_MASK);
+ int platform_capa;
bitmap_zero(wil->hw_capabilities, hw_capability_last);
bitmap_zero(wil->fw_capabilities, WMI_FW_CAPABILITY_MAX);
+ bitmap_zero(wil->platform_capa, WIL_PLATFORM_CAPA_MAX);
wil->wil_fw_name = ftm_mode ? WIL_FW_NAME_FTM_DEFAULT :
WIL_FW_NAME_DEFAULT;
wil->chip_revision = chip_revision;
@@ -83,6 +85,14 @@ void wil_set_capabilities(struct wil6210_priv *wil)
wil_info(wil, "Board hardware is %s\n", wil->hw_name);
+ /* Get platform capabilities */
+ if (wil->platform_ops.get_capa) {
+ platform_capa =
+ wil->platform_ops.get_capa(wil->platform_handle);
+ memcpy(wil->platform_capa, &platform_capa,
+ min(sizeof(wil->platform_capa), sizeof(platform_capa)));
+ }
+
/* extract FW capabilities from file without loading the FW */
wil_request_firmware(wil, wil->wil_fw_name, false);
wil_refresh_fw_capabilities(wil);
diff --git a/drivers/net/wireless/ath/wil6210/pm.c b/drivers/net/wireless/ath/wil6210/pm.c
index 2ef2f34..153c1cf 100644
--- a/drivers/net/wireless/ath/wil6210/pm.c
+++ b/drivers/net/wireless/ath/wil6210/pm.c
@@ -141,6 +141,13 @@ static int wil_suspend_keep_radio_on(struct wil6210_priv *wil)
/* Prevent handling of new tx and wmi commands */
set_bit(wil_status_suspending, wil->status);
+ if (test_bit(wil_status_collecting_dumps, wil->status)) {
+ /* Device collects crash dump, cancel the suspend */
+ wil_dbg_pm(wil, "reject suspend while collecting crash dump\n");
+ clear_bit(wil_status_suspending, wil->status);
+ wil->suspend_stats.rejected_by_host++;
+ return -EBUSY;
+ }
wil_update_net_queues_bh(wil, NULL, true);
if (!wil_is_tx_idle(wil)) {
@@ -251,6 +258,15 @@ static int wil_suspend_radio_off(struct wil6210_priv *wil)
wil_dbg_pm(wil, "suspend radio off\n");
+ set_bit(wil_status_suspending, wil->status);
+ if (test_bit(wil_status_collecting_dumps, wil->status)) {
+ /* Device collects crash dump, cancel the suspend */
+ wil_dbg_pm(wil, "reject suspend while collecting crash dump\n");
+ clear_bit(wil_status_suspending, wil->status);
+ wil->suspend_stats.rejected_by_host++;
+ return -EBUSY;
+ }
+
/* if netif up, hardware is alive, shut it down */
if (ndev->flags & IFF_UP) {
rc = wil_down(wil);
@@ -275,6 +291,7 @@ static int wil_suspend_radio_off(struct wil6210_priv *wil)
set_bit(wil_status_suspended, wil->status);
out:
+ clear_bit(wil_status_suspending, wil->status);
wil_dbg_pm(wil, "suspend radio off: %d\n", rc);
return rc;
diff --git a/drivers/net/wireless/ath/wil6210/sysfs.c b/drivers/net/wireless/ath/wil6210/sysfs.c
index b91bf51..7c9a790 100644
--- a/drivers/net/wireless/ath/wil6210/sysfs.c
+++ b/drivers/net/wireless/ath/wil6210/sysfs.c
@@ -268,10 +268,49 @@ static DEVICE_ATTR(fst_link_loss, 0644,
wil_fst_link_loss_sysfs_show,
wil_fst_link_loss_sysfs_store);
+static ssize_t
+wil_snr_thresh_sysfs_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct wil6210_priv *wil = dev_get_drvdata(dev);
+ ssize_t len = 0;
+
+ if (wil->snr_thresh.enabled)
+ len = snprintf(buf, PAGE_SIZE, "omni=%d, direct=%d\n",
+ wil->snr_thresh.omni, wil->snr_thresh.direct);
+
+ return len;
+}
+
+static ssize_t
+wil_snr_thresh_sysfs_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct wil6210_priv *wil = dev_get_drvdata(dev);
+ int rc;
+ short omni, direct;
+
+ /* to disable snr threshold, set both omni and direct to 0 */
+ if (sscanf(buf, "%hd %hd", &omni, &direct) != 2)
+ return -EINVAL;
+
+ rc = wmi_set_snr_thresh(wil, omni, direct);
+ if (!rc)
+ rc = count;
+
+ return rc;
+}
+
+static DEVICE_ATTR(snr_thresh, 0644,
+ wil_snr_thresh_sysfs_show,
+ wil_snr_thresh_sysfs_store);
+
static struct attribute *wil6210_sysfs_entries[] = {
&dev_attr_ftm_txrx_offset.attr,
&dev_attr_thermal_throttling.attr,
&dev_attr_fst_link_loss.attr,
+ &dev_attr_snr_thresh.attr,
NULL
};
diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h
index 8616f86..bb43f3f 100644
--- a/drivers/net/wireless/ath/wil6210/wil6210.h
+++ b/drivers/net/wireless/ath/wil6210/wil6210.h
@@ -165,6 +165,7 @@ struct RGF_ICR {
#define RGF_USER_USAGE_8 (0x880020)
#define BIT_USER_PREVENT_DEEP_SLEEP BIT(0)
#define BIT_USER_SUPPORT_T_POWER_ON_0 BIT(1)
+ #define BIT_USER_EXT_CLK BIT(2)
#define RGF_USER_HW_MACHINE_STATE (0x8801dc)
#define HW_MACHINE_BOOT_DONE (0x3fffffd)
#define RGF_USER_USER_CPU_0 (0x8801e0)
@@ -445,6 +446,7 @@ enum { /* for wil6210_priv.status */
wil_status_suspending, /* suspend in progress */
wil_status_suspended, /* suspend completed, device is suspended */
wil_status_resuming, /* resume in progress */
+ wil_status_collecting_dumps, /* crashdump collection in progress */
wil_status_last /* keep last */
};
@@ -648,6 +650,7 @@ struct wil6210_priv {
const char *wil_fw_name;
DECLARE_BITMAP(hw_capabilities, hw_capability_last);
DECLARE_BITMAP(fw_capabilities, WMI_FW_CAPABILITY_MAX);
+ DECLARE_BITMAP(platform_capa, WIL_PLATFORM_CAPA_MAX);
u8 n_mids; /* number of additional MIDs as reported by FW */
u32 recovery_count; /* num of FW recovery attempts in a short time */
u32 recovery_state; /* FW recovery state machine */
@@ -748,6 +751,11 @@ struct wil6210_priv {
struct wil_ftm_priv ftm;
bool tt_data_set;
struct wmi_tt_data tt_data;
+ struct {
+ bool enabled;
+ short omni;
+ short direct;
+ } snr_thresh;
int fw_calib_result;
@@ -886,6 +894,7 @@ void wil_mbox_ring_le2cpus(struct wil6210_mbox_ring *r);
int wil_find_cid(struct wil6210_priv *wil, const u8 *mac);
void wil_set_ethtoolops(struct net_device *ndev);
+void __iomem *wmi_buffer_block(struct wil6210_priv *wil, __le32 ptr, u32 size);
void __iomem *wmi_buffer(struct wil6210_priv *wil, __le32 ptr);
void __iomem *wmi_addr(struct wil6210_priv *wil, u32 ptr);
int wmi_read_hdr(struct wil6210_priv *wil, __le32 ptr,
@@ -1066,4 +1075,5 @@ int wmi_link_maintain_cfg_write(struct wil6210_priv *wil,
const u8 *addr,
bool fst_link_loss);
+int wmi_set_snr_thresh(struct wil6210_priv *wil, short omni, short direct);
#endif /* __WIL6210_H__ */
diff --git a/drivers/net/wireless/ath/wil6210/wil_crash_dump.c b/drivers/net/wireless/ath/wil6210/wil_crash_dump.c
index e53cf0c..1ed3306 100644
--- a/drivers/net/wireless/ath/wil6210/wil_crash_dump.c
+++ b/drivers/net/wireless/ath/wil6210/wil_crash_dump.c
@@ -72,6 +72,15 @@ int wil_fw_copy_crash_dump(struct wil6210_priv *wil, void *dest, u32 size)
return -EINVAL;
}
+ set_bit(wil_status_collecting_dumps, wil->status);
+ if (test_bit(wil_status_suspending, wil->status) ||
+ test_bit(wil_status_suspended, wil->status) ||
+ test_bit(wil_status_resetting, wil->status)) {
+ wil_err(wil, "cannot collect fw dump during suspend/reset\n");
+ clear_bit(wil_status_collecting_dumps, wil->status);
+ return -EINVAL;
+ }
+
/* copy to crash dump area */
for (i = 0; i < ARRAY_SIZE(fw_mapping); i++) {
map = &fw_mapping[i];
@@ -91,6 +100,8 @@ int wil_fw_copy_crash_dump(struct wil6210_priv *wil, void *dest, u32 size)
(const void __iomem * __force)data, len);
}
+ clear_bit(wil_status_collecting_dumps, wil->status);
+
return 0;
}
diff --git a/drivers/net/wireless/ath/wil6210/wil_platform.h b/drivers/net/wireless/ath/wil6210/wil_platform.h
index 621005b..620a1b3 100644
--- a/drivers/net/wireless/ath/wil6210/wil_platform.h
+++ b/drivers/net/wireless/ath/wil6210/wil_platform.h
@@ -27,6 +27,18 @@ enum wil_platform_event {
WIL_PLATFORM_EVT_POST_SUSPEND = 4,
};
+enum wil_platform_features {
+ WIL_PLATFORM_FEATURE_FW_EXT_CLK_CONTROL = 0,
+ WIL_PLATFORM_FEATURE_MAX,
+};
+
+enum wil_platform_capa {
+ WIL_PLATFORM_CAPA_RADIO_ON_IN_SUSPEND = 0,
+ WIL_PLATFORM_CAPA_T_PWR_ON_0 = 1,
+ WIL_PLATFORM_CAPA_EXT_CLK = 2,
+ WIL_PLATFORM_CAPA_MAX,
+};
+
/**
* struct wil_platform_ops - wil platform module calls from this
* driver to platform driver
@@ -37,7 +49,8 @@ struct wil_platform_ops {
int (*resume)(void *handle, bool device_powered_on);
void (*uninit)(void *handle);
int (*notify)(void *handle, enum wil_platform_event evt);
- bool (*keep_radio_on_during_sleep)(void *handle);
+ int (*get_capa)(void *handle);
+ void (*set_features)(void *handle, int features);
};
/**
diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c
index 43cdaef..9520c39 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.c
+++ b/drivers/net/wireless/ath/wil6210/wmi.c
@@ -141,13 +141,15 @@ static u32 wmi_addr_remap(u32 x)
/**
* Check address validity for WMI buffer; remap if needed
* @ptr - internal (linker) fw/ucode address
+ * @size - if non zero, validate the block does not
+ * exceed the device memory (bar)
*
* Valid buffer should be DWORD aligned
*
* return address for accessing buffer from the host;
* if buffer is not valid, return NULL.
*/
-void __iomem *wmi_buffer(struct wil6210_priv *wil, __le32 ptr_)
+void __iomem *wmi_buffer_block(struct wil6210_priv *wil, __le32 ptr_, u32 size)
{
u32 off;
u32 ptr = le32_to_cpu(ptr_);
@@ -162,10 +164,17 @@ void __iomem *wmi_buffer(struct wil6210_priv *wil, __le32 ptr_)
off = HOSTADDR(ptr);
if (off > wil->bar_size - 4)
return NULL;
+ if (size && ((off + size > wil->bar_size) || (off + size < off)))
+ return NULL;
return wil->csr + off;
}
+void __iomem *wmi_buffer(struct wil6210_priv *wil, __le32 ptr_)
+{
+ return wmi_buffer_block(wil, ptr_, 0);
+}
+
/**
* Check address validity
*/
@@ -223,7 +232,7 @@ static int __wmi_send(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len)
uint retry;
int rc = 0;
- if (sizeof(cmd) + len > r->entry_size) {
+ if (len > r->entry_size - sizeof(cmd)) {
wil_err(wil, "WMI size too large: %d bytes, max is %d\n",
(int)(sizeof(cmd) + len), r->entry_size);
return -ERANGE;
@@ -369,7 +378,7 @@ static void wmi_evt_rx_mgmt(struct wil6210_priv *wil, int id, void *d, int len)
s32 signal;
__le16 fc;
u32 d_len;
- u16 d_status;
+ s16 snr;
if (flen < 0) {
wil_err(wil, "MGMT Rx: short event, len %d\n", len);
@@ -391,13 +400,13 @@ static void wmi_evt_rx_mgmt(struct wil6210_priv *wil, int id, void *d, int len)
signal = 100 * data->info.rssi;
else
signal = data->info.sqi;
- d_status = le16_to_cpu(data->info.status);
+ snr = le16_to_cpu(data->info.snr); /* 1/4 dB units */
fc = rx_mgmt_frame->frame_control;
wil_dbg_wmi(wil, "MGMT Rx: channel %d MCS %d RSSI %d SQI %d%%\n",
data->info.channel, data->info.mcs, data->info.rssi,
data->info.sqi);
- wil_dbg_wmi(wil, "status 0x%04x len %d fc 0x%04x\n", d_status, d_len,
+ wil_dbg_wmi(wil, "snr %ddB len %d fc 0x%04x\n", snr / 4, d_len,
le16_to_cpu(fc));
wil_dbg_wmi(wil, "qid %d mid %d cid %d\n",
data->info.qid, data->info.mid, data->info.cid);
@@ -425,6 +434,11 @@ static void wmi_evt_rx_mgmt(struct wil6210_priv *wil, int id, void *d, int len)
wil_dbg_wmi(wil, "Capability info : 0x%04x\n", cap);
+ if (wil->snr_thresh.enabled && snr < wil->snr_thresh.omni) {
+ wil_dbg_wmi(wil, "snr below threshold. dropping\n");
+ return;
+ }
+
bss = cfg80211_inform_bss_frame(wiphy, channel, rx_mgmt_frame,
d_len, signal, GFP_KERNEL);
if (bss) {
@@ -1412,8 +1426,14 @@ int wmi_set_ie(struct wil6210_priv *wil, u8 type, u16 ie_len, const void *ie)
};
int rc;
u16 len = sizeof(struct wmi_set_appie_cmd) + ie_len;
- struct wmi_set_appie_cmd *cmd = kzalloc(len, GFP_KERNEL);
+ struct wmi_set_appie_cmd *cmd;
+ if (len < ie_len) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ cmd = kzalloc(len, GFP_KERNEL);
if (!cmd) {
rc = -ENOMEM;
goto out;
@@ -2150,3 +2170,32 @@ bool wil_is_wmi_idle(struct wil6210_priv *wil)
spin_unlock_irqrestore(&wil->wmi_ev_lock, flags);
return rc;
}
+
+int wmi_set_snr_thresh(struct wil6210_priv *wil, short omni, short direct)
+{
+ int rc;
+ struct wmi_set_connect_snr_thr_cmd cmd = {
+ .enable = true,
+ .omni_snr_thr = cpu_to_le16(omni),
+ .direct_snr_thr = cpu_to_le16(direct),
+ };
+
+ if (!test_bit(WMI_FW_CAPABILITY_CONNECT_SNR_THR, wil->fw_capabilities))
+ return -ENOTSUPP;
+
+ if (omni == 0 && direct == 0)
+ cmd.enable = false;
+
+ wil_dbg_wmi(wil, "%s snr thresh omni=%d, direct=%d (1/4 dB units)\n",
+ cmd.enable ? "enable" : "disable", omni, direct);
+
+ rc = wmi_send(wil, WMI_SET_CONNECT_SNR_THR_CMDID, &cmd, sizeof(cmd));
+ if (rc)
+ return rc;
+
+ wil->snr_thresh.enabled = cmd.enable;
+ wil->snr_thresh.omni = omni;
+ wil->snr_thresh.direct = direct;
+
+ return 0;
+}
diff --git a/drivers/net/wireless/ath/wil6210/wmi.h b/drivers/net/wireless/ath/wil6210/wmi.h
index 5263ee7..809e320 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.h
+++ b/drivers/net/wireless/ath/wil6210/wmi.h
@@ -71,6 +71,8 @@ enum wmi_fw_capability {
WMI_FW_CAPABILITY_RSSI_REPORTING = 12,
WMI_FW_CAPABILITY_SET_SILENT_RSSI_TABLE = 13,
WMI_FW_CAPABILITY_LO_POWER_CALIB_FROM_OTP = 14,
+ WMI_FW_CAPABILITY_CONNECT_SNR_THR = 16,
+ WMI_FW_CAPABILITY_REF_CLOCK_CONTROL = 18,
WMI_FW_CAPABILITY_MAX,
};
@@ -1821,7 +1823,7 @@ struct wmi_rx_mgmt_info {
u8 range;
u8 sqi;
__le16 stype;
- __le16 status;
+ __le16 snr;
__le32 len;
/* Not resolved when == 0xFFFFFFFF == > Broadcast to all MIDS */
u8 qid;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
index 261a0da..bc59aa2 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
@@ -980,7 +980,7 @@ static void brcmf_escan_prep(struct brcmf_cfg80211_info *cfg,
eth_broadcast_addr(params_le->bssid);
params_le->bss_type = DOT11_BSSTYPE_ANY;
- params_le->scan_type = 0;
+ params_le->scan_type = BRCMF_SCANTYPE_ACTIVE;
params_le->channel_num = 0;
params_le->nprobes = cpu_to_le32(-1);
params_le->active_time = cpu_to_le32(-1);
@@ -988,12 +988,9 @@ static void brcmf_escan_prep(struct brcmf_cfg80211_info *cfg,
params_le->home_time = cpu_to_le32(-1);
memset(¶ms_le->ssid_le, 0, sizeof(params_le->ssid_le));
- /* if request is null exit so it will be all channel broadcast scan */
- if (!request)
- return;
-
n_ssids = request->n_ssids;
n_channels = request->n_channels;
+
/* Copy channel array if applicable */
brcmf_dbg(SCAN, "### List of channelspecs to scan ### %d\n",
n_channels);
@@ -1030,16 +1027,8 @@ static void brcmf_escan_prep(struct brcmf_cfg80211_info *cfg,
ptr += sizeof(ssid_le);
}
} else {
- brcmf_dbg(SCAN, "Broadcast scan %p\n", request->ssids);
- if ((request->ssids) && request->ssids->ssid_len) {
- brcmf_dbg(SCAN, "SSID %s len=%d\n",
- params_le->ssid_le.SSID,
- request->ssids->ssid_len);
- params_le->ssid_le.SSID_len =
- cpu_to_le32(request->ssids->ssid_len);
- memcpy(¶ms_le->ssid_le.SSID, request->ssids->ssid,
- request->ssids->ssid_len);
- }
+ brcmf_dbg(SCAN, "Performing passive scan\n");
+ params_le->scan_type = BRCMF_SCANTYPE_PASSIVE;
}
/* Adding mask to channel numbers */
params_le->channel_num =
@@ -3099,6 +3088,7 @@ brcmf_cfg80211_escan_handler(struct brcmf_if *ifp,
struct brcmf_cfg80211_info *cfg = ifp->drvr->config;
s32 status;
struct brcmf_escan_result_le *escan_result_le;
+ u32 escan_buflen;
struct brcmf_bss_info_le *bss_info_le;
struct brcmf_bss_info_le *bss = NULL;
u32 bi_length;
@@ -3115,11 +3105,23 @@ brcmf_cfg80211_escan_handler(struct brcmf_if *ifp,
if (status == BRCMF_E_STATUS_PARTIAL) {
brcmf_dbg(SCAN, "ESCAN Partial result\n");
+ if (e->datalen < sizeof(*escan_result_le)) {
+ brcmf_err("invalid event data length\n");
+ goto exit;
+ }
escan_result_le = (struct brcmf_escan_result_le *) data;
if (!escan_result_le) {
brcmf_err("Invalid escan result (NULL pointer)\n");
goto exit;
}
+ escan_buflen = le32_to_cpu(escan_result_le->buflen);
+ if (escan_buflen > BRCMF_ESCAN_BUF_SIZE ||
+ escan_buflen > e->datalen ||
+ escan_buflen < sizeof(*escan_result_le)) {
+ brcmf_err("Invalid escan buffer length: %d\n",
+ escan_buflen);
+ goto exit;
+ }
if (le16_to_cpu(escan_result_le->bss_count) != 1) {
brcmf_err("Invalid bss_count %d: ignoring\n",
escan_result_le->bss_count);
@@ -3136,9 +3138,8 @@ brcmf_cfg80211_escan_handler(struct brcmf_if *ifp,
}
bi_length = le32_to_cpu(bss_info_le->length);
- if (bi_length != (le32_to_cpu(escan_result_le->buflen) -
- WL_ESCAN_RESULTS_FIXED_SIZE)) {
- brcmf_err("Invalid bss_info length %d: ignoring\n",
+ if (bi_length != escan_buflen - WL_ESCAN_RESULTS_FIXED_SIZE) {
+ brcmf_err("Ignoring invalid bss_info length: %d\n",
bi_length);
goto exit;
}
@@ -6580,8 +6581,7 @@ static int brcmf_setup_wiphy(struct wiphy *wiphy, struct brcmf_if *ifp)
wiphy->bands[NL80211_BAND_5GHZ] = band;
}
}
- err = brcmf_setup_wiphybands(wiphy);
- return err;
+ return 0;
}
static s32 brcmf_config_dongle(struct brcmf_cfg80211_info *cfg)
@@ -6946,6 +6946,12 @@ struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct brcmf_pub *drvr,
goto priv_out;
}
+ err = brcmf_setup_wiphybands(wiphy);
+ if (err) {
+ brcmf_err("Setting wiphy bands failed (%d)\n", err);
+ goto wiphy_unreg_out;
+ }
+
/* If cfg80211 didn't disable 40MHz HT CAP in wiphy_register(),
* setup 40MHz in 2GHz band and enable OBSS scanning.
*/
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.c
index e64557c..6f8a4b0 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.c
@@ -32,16 +32,25 @@ static int brcmf_debug_create_memdump(struct brcmf_bus *bus, const void *data,
{
void *dump;
size_t ramsize;
+ int err;
ramsize = brcmf_bus_get_ramsize(bus);
- if (ramsize) {
- dump = vzalloc(len + ramsize);
- if (!dump)
- return -ENOMEM;
- memcpy(dump, data, len);
- brcmf_bus_get_memdump(bus, dump + len, ramsize);
- dev_coredumpv(bus->dev, dump, len + ramsize, GFP_KERNEL);
+ if (!ramsize)
+ return -ENOTSUPP;
+
+ dump = vzalloc(len + ramsize);
+ if (!dump)
+ return -ENOMEM;
+
+ memcpy(dump, data, len);
+ err = brcmf_bus_get_memdump(bus, dump + len, ramsize);
+ if (err) {
+ vfree(dump);
+ return err;
}
+
+ dev_coredumpv(bus->dev, dump, len + ramsize, GFP_KERNEL);
+
return 0;
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c
index 79c081f..6afcf86 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c
@@ -429,7 +429,8 @@ void brcmf_fweh_process_event(struct brcmf_pub *drvr,
if (code != BRCMF_E_IF && !fweh->evt_handler[code])
return;
- if (datalen > BRCMF_DCMD_MAXLEN)
+ if (datalen > BRCMF_DCMD_MAXLEN ||
+ datalen + sizeof(*event_packet) > packet_len)
return;
if (in_interrupt())
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h
index a4118c0..5901357 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h
@@ -45,6 +45,11 @@
#define BRCMF_SCAN_PARAMS_COUNT_MASK 0x0000ffff
#define BRCMF_SCAN_PARAMS_NSSID_SHIFT 16
+/* scan type definitions */
+#define BRCMF_SCANTYPE_DEFAULT 0xFF
+#define BRCMF_SCANTYPE_ACTIVE 0
+#define BRCMF_SCANTYPE_PASSIVE 1
+
/* primary (ie tx) key */
#define BRCMF_PRIMARY_KEY (1 << 1)
#define DOT11_BSSTYPE_ANY 2
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c
index b3aab2f..ef68546 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c
@@ -14764,8 +14764,8 @@ static void wlc_phy_ipa_restore_tx_digi_filts_nphy(struct brcms_phy *pi)
}
static void
-wlc_phy_set_rfseq_nphy(struct brcms_phy *pi, u8 cmd, u8 *events, u8 *dlys,
- u8 len)
+wlc_phy_set_rfseq_nphy(struct brcms_phy *pi, u8 cmd, const u8 *events,
+ const u8 *dlys, u8 len)
{
u32 t1_offset, t2_offset;
u8 ctr;
@@ -15240,16 +15240,16 @@ static void wlc_phy_workarounds_nphy_gainctrl_2057_rev5(struct brcms_phy *pi)
static void wlc_phy_workarounds_nphy_gainctrl_2057_rev6(struct brcms_phy *pi)
{
u16 currband;
- s8 lna1G_gain_db_rev7[] = { 9, 14, 19, 24 };
- s8 *lna1_gain_db = NULL;
- s8 *lna1_gain_db_2 = NULL;
- s8 *lna2_gain_db = NULL;
- s8 tiaA_gain_db_rev7[] = { -9, -6, -3, 0, 3, 3, 3, 3, 3, 3 };
- s8 *tia_gain_db;
- s8 tiaA_gainbits_rev7[] = { 0, 1, 2, 3, 4, 4, 4, 4, 4, 4 };
- s8 *tia_gainbits;
- u16 rfseqA_init_gain_rev7[] = { 0x624f, 0x624f };
- u16 *rfseq_init_gain;
+ static const s8 lna1G_gain_db_rev7[] = { 9, 14, 19, 24 };
+ const s8 *lna1_gain_db = NULL;
+ const s8 *lna1_gain_db_2 = NULL;
+ const s8 *lna2_gain_db = NULL;
+ static const s8 tiaA_gain_db_rev7[] = { -9, -6, -3, 0, 3, 3, 3, 3, 3, 3 };
+ const s8 *tia_gain_db;
+ static const s8 tiaA_gainbits_rev7[] = { 0, 1, 2, 3, 4, 4, 4, 4, 4, 4 };
+ const s8 *tia_gainbits;
+ static const u16 rfseqA_init_gain_rev7[] = { 0x624f, 0x624f };
+ const u16 *rfseq_init_gain;
u16 init_gaincode;
u16 clip1hi_gaincode;
u16 clip1md_gaincode = 0;
@@ -15310,10 +15310,9 @@ static void wlc_phy_workarounds_nphy_gainctrl_2057_rev6(struct brcms_phy *pi)
if ((freq <= 5080) || (freq == 5825)) {
- s8 lna1A_gain_db_rev7[] = { 11, 16, 20, 24 };
- s8 lna1A_gain_db_2_rev7[] = {
- 11, 17, 22, 25};
- s8 lna2A_gain_db_rev7[] = { -1, 6, 10, 14 };
+ static const s8 lna1A_gain_db_rev7[] = { 11, 16, 20, 24 };
+ static const s8 lna1A_gain_db_2_rev7[] = { 11, 17, 22, 25};
+ static const s8 lna2A_gain_db_rev7[] = { -1, 6, 10, 14 };
crsminu_th = 0x3e;
lna1_gain_db = lna1A_gain_db_rev7;
@@ -15321,10 +15320,9 @@ static void wlc_phy_workarounds_nphy_gainctrl_2057_rev6(struct brcms_phy *pi)
lna2_gain_db = lna2A_gain_db_rev7;
} else if ((freq >= 5500) && (freq <= 5700)) {
- s8 lna1A_gain_db_rev7[] = { 11, 17, 21, 25 };
- s8 lna1A_gain_db_2_rev7[] = {
- 12, 18, 22, 26};
- s8 lna2A_gain_db_rev7[] = { 1, 8, 12, 16 };
+ static const s8 lna1A_gain_db_rev7[] = { 11, 17, 21, 25 };
+ static const s8 lna1A_gain_db_2_rev7[] = { 12, 18, 22, 26};
+ static const s8 lna2A_gain_db_rev7[] = { 1, 8, 12, 16 };
crsminu_th = 0x45;
clip1md_gaincode_B = 0x14;
@@ -15335,10 +15333,9 @@ static void wlc_phy_workarounds_nphy_gainctrl_2057_rev6(struct brcms_phy *pi)
lna2_gain_db = lna2A_gain_db_rev7;
} else {
- s8 lna1A_gain_db_rev7[] = { 12, 18, 22, 26 };
- s8 lna1A_gain_db_2_rev7[] = {
- 12, 18, 22, 26};
- s8 lna2A_gain_db_rev7[] = { -1, 6, 10, 14 };
+ static const s8 lna1A_gain_db_rev7[] = { 12, 18, 22, 26 };
+ static const s8 lna1A_gain_db_2_rev7[] = { 12, 18, 22, 26};
+ static const s8 lna2A_gain_db_rev7[] = { -1, 6, 10, 14 };
crsminu_th = 0x41;
lna1_gain_db = lna1A_gain_db_rev7;
@@ -15450,65 +15447,65 @@ static void wlc_phy_workarounds_nphy_gainctrl(struct brcms_phy *pi)
NPHY_RFSEQ_CMD_CLR_HIQ_DIS,
NPHY_RFSEQ_CMD_SET_HPF_BW
};
- u8 rfseq_updategainu_dlys[] = { 10, 30, 1 };
- s8 lna1G_gain_db[] = { 7, 11, 16, 23 };
- s8 lna1G_gain_db_rev4[] = { 8, 12, 17, 25 };
- s8 lna1G_gain_db_rev5[] = { 9, 13, 18, 26 };
- s8 lna1G_gain_db_rev6[] = { 8, 13, 18, 25 };
- s8 lna1G_gain_db_rev6_224B0[] = { 10, 14, 19, 27 };
- s8 lna1A_gain_db[] = { 7, 11, 17, 23 };
- s8 lna1A_gain_db_rev4[] = { 8, 12, 18, 23 };
- s8 lna1A_gain_db_rev5[] = { 6, 10, 16, 21 };
- s8 lna1A_gain_db_rev6[] = { 6, 10, 16, 21 };
- s8 *lna1_gain_db = NULL;
- s8 lna2G_gain_db[] = { -5, 6, 10, 14 };
- s8 lna2G_gain_db_rev5[] = { -3, 7, 11, 16 };
- s8 lna2G_gain_db_rev6[] = { -5, 6, 10, 14 };
- s8 lna2G_gain_db_rev6_224B0[] = { -5, 6, 10, 15 };
- s8 lna2A_gain_db[] = { -6, 2, 6, 10 };
- s8 lna2A_gain_db_rev4[] = { -5, 2, 6, 10 };
- s8 lna2A_gain_db_rev5[] = { -7, 0, 4, 8 };
- s8 lna2A_gain_db_rev6[] = { -7, 0, 4, 8 };
- s8 *lna2_gain_db = NULL;
- s8 tiaG_gain_db[] = {
+ static const u8 rfseq_updategainu_dlys[] = { 10, 30, 1 };
+ static const s8 lna1G_gain_db[] = { 7, 11, 16, 23 };
+ static const s8 lna1G_gain_db_rev4[] = { 8, 12, 17, 25 };
+ static const s8 lna1G_gain_db_rev5[] = { 9, 13, 18, 26 };
+ static const s8 lna1G_gain_db_rev6[] = { 8, 13, 18, 25 };
+ static const s8 lna1G_gain_db_rev6_224B0[] = { 10, 14, 19, 27 };
+ static const s8 lna1A_gain_db[] = { 7, 11, 17, 23 };
+ static const s8 lna1A_gain_db_rev4[] = { 8, 12, 18, 23 };
+ static const s8 lna1A_gain_db_rev5[] = { 6, 10, 16, 21 };
+ static const s8 lna1A_gain_db_rev6[] = { 6, 10, 16, 21 };
+ const s8 *lna1_gain_db = NULL;
+ static const s8 lna2G_gain_db[] = { -5, 6, 10, 14 };
+ static const s8 lna2G_gain_db_rev5[] = { -3, 7, 11, 16 };
+ static const s8 lna2G_gain_db_rev6[] = { -5, 6, 10, 14 };
+ static const s8 lna2G_gain_db_rev6_224B0[] = { -5, 6, 10, 15 };
+ static const s8 lna2A_gain_db[] = { -6, 2, 6, 10 };
+ static const s8 lna2A_gain_db_rev4[] = { -5, 2, 6, 10 };
+ static const s8 lna2A_gain_db_rev5[] = { -7, 0, 4, 8 };
+ static const s8 lna2A_gain_db_rev6[] = { -7, 0, 4, 8 };
+ const s8 *lna2_gain_db = NULL;
+ static const s8 tiaG_gain_db[] = {
0x0A, 0x0A, 0x0A, 0x0A, 0x0A, 0x0A, 0x0A, 0x0A, 0x0A, 0x0A };
- s8 tiaA_gain_db[] = {
+ static const s8 tiaA_gain_db[] = {
0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13 };
- s8 tiaA_gain_db_rev4[] = {
+ static const s8 tiaA_gain_db_rev4[] = {
0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d };
- s8 tiaA_gain_db_rev5[] = {
+ static const s8 tiaA_gain_db_rev5[] = {
0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d };
- s8 tiaA_gain_db_rev6[] = {
+ static const s8 tiaA_gain_db_rev6[] = {
0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d };
- s8 *tia_gain_db;
- s8 tiaG_gainbits[] = {
+ const s8 *tia_gain_db;
+ static const s8 tiaG_gainbits[] = {
0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03 };
- s8 tiaA_gainbits[] = {
+ static const s8 tiaA_gainbits[] = {
0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06 };
- s8 tiaA_gainbits_rev4[] = {
+ static const s8 tiaA_gainbits_rev4[] = {
0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04 };
- s8 tiaA_gainbits_rev5[] = {
+ static const s8 tiaA_gainbits_rev5[] = {
0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04 };
- s8 tiaA_gainbits_rev6[] = {
+ static const s8 tiaA_gainbits_rev6[] = {
0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04 };
- s8 *tia_gainbits;
- s8 lpf_gain_db[] = { 0x00, 0x06, 0x0c, 0x12, 0x12, 0x12 };
- s8 lpf_gainbits[] = { 0x00, 0x01, 0x02, 0x03, 0x03, 0x03 };
- u16 rfseqG_init_gain[] = { 0x613f, 0x613f, 0x613f, 0x613f };
- u16 rfseqG_init_gain_rev4[] = { 0x513f, 0x513f, 0x513f, 0x513f };
- u16 rfseqG_init_gain_rev5[] = { 0x413f, 0x413f, 0x413f, 0x413f };
- u16 rfseqG_init_gain_rev5_elna[] = {
+ const s8 *tia_gainbits;
+ static const s8 lpf_gain_db[] = { 0x00, 0x06, 0x0c, 0x12, 0x12, 0x12 };
+ static const s8 lpf_gainbits[] = { 0x00, 0x01, 0x02, 0x03, 0x03, 0x03 };
+ static const u16 rfseqG_init_gain[] = { 0x613f, 0x613f, 0x613f, 0x613f };
+ static const u16 rfseqG_init_gain_rev4[] = { 0x513f, 0x513f, 0x513f, 0x513f };
+ static const u16 rfseqG_init_gain_rev5[] = { 0x413f, 0x413f, 0x413f, 0x413f };
+ static const u16 rfseqG_init_gain_rev5_elna[] = {
0x013f, 0x013f, 0x013f, 0x013f };
- u16 rfseqG_init_gain_rev6[] = { 0x513f, 0x513f };
- u16 rfseqG_init_gain_rev6_224B0[] = { 0x413f, 0x413f };
- u16 rfseqG_init_gain_rev6_elna[] = { 0x113f, 0x113f };
- u16 rfseqA_init_gain[] = { 0x516f, 0x516f, 0x516f, 0x516f };
- u16 rfseqA_init_gain_rev4[] = { 0x614f, 0x614f, 0x614f, 0x614f };
- u16 rfseqA_init_gain_rev4_elna[] = {
+ static const u16 rfseqG_init_gain_rev6[] = { 0x513f, 0x513f };
+ static const u16 rfseqG_init_gain_rev6_224B0[] = { 0x413f, 0x413f };
+ static const u16 rfseqG_init_gain_rev6_elna[] = { 0x113f, 0x113f };
+ static const u16 rfseqA_init_gain[] = { 0x516f, 0x516f, 0x516f, 0x516f };
+ static const u16 rfseqA_init_gain_rev4[] = { 0x614f, 0x614f, 0x614f, 0x614f };
+ static const u16 rfseqA_init_gain_rev4_elna[] = {
0x314f, 0x314f, 0x314f, 0x314f };
- u16 rfseqA_init_gain_rev5[] = { 0x714f, 0x714f, 0x714f, 0x714f };
- u16 rfseqA_init_gain_rev6[] = { 0x714f, 0x714f };
- u16 *rfseq_init_gain;
+ static const u16 rfseqA_init_gain_rev5[] = { 0x714f, 0x714f, 0x714f, 0x714f };
+ static const u16 rfseqA_init_gain_rev6[] = { 0x714f, 0x714f };
+ const u16 *rfseq_init_gain;
u16 initG_gaincode = 0x627e;
u16 initG_gaincode_rev4 = 0x527e;
u16 initG_gaincode_rev5 = 0x427e;
@@ -15538,10 +15535,10 @@ static void wlc_phy_workarounds_nphy_gainctrl(struct brcms_phy *pi)
u16 clip1mdA_gaincode_rev6 = 0x2084;
u16 clip1md_gaincode = 0;
u16 clip1loG_gaincode = 0x0074;
- u16 clip1loG_gaincode_rev5[] = {
+ static const u16 clip1loG_gaincode_rev5[] = {
0x0062, 0x0064, 0x006a, 0x106a, 0x106c, 0x1074, 0x107c, 0x207c
};
- u16 clip1loG_gaincode_rev6[] = {
+ static const u16 clip1loG_gaincode_rev6[] = {
0x106a, 0x106c, 0x1074, 0x107c, 0x007e, 0x107e, 0x207e, 0x307e
};
u16 clip1loG_gaincode_rev6_224B0 = 0x1074;
@@ -16066,7 +16063,7 @@ static void wlc_phy_workarounds_nphy_gainctrl(struct brcms_phy *pi)
static void wlc_phy_workarounds_nphy(struct brcms_phy *pi)
{
- u8 rfseq_rx2tx_events[] = {
+ static const u8 rfseq_rx2tx_events[] = {
NPHY_RFSEQ_CMD_NOP,
NPHY_RFSEQ_CMD_RXG_FBW,
NPHY_RFSEQ_CMD_TR_SWITCH,
@@ -16076,7 +16073,7 @@ static void wlc_phy_workarounds_nphy(struct brcms_phy *pi)
NPHY_RFSEQ_CMD_EXT_PA
};
u8 rfseq_rx2tx_dlys[] = { 8, 6, 6, 2, 4, 60, 1 };
- u8 rfseq_tx2rx_events[] = {
+ static const u8 rfseq_tx2rx_events[] = {
NPHY_RFSEQ_CMD_NOP,
NPHY_RFSEQ_CMD_EXT_PA,
NPHY_RFSEQ_CMD_TX_GAIN,
@@ -16085,8 +16082,8 @@ static void wlc_phy_workarounds_nphy(struct brcms_phy *pi)
NPHY_RFSEQ_CMD_RXG_FBW,
NPHY_RFSEQ_CMD_CLR_HIQ_DIS
};
- u8 rfseq_tx2rx_dlys[] = { 8, 6, 2, 4, 4, 6, 1 };
- u8 rfseq_tx2rx_events_rev3[] = {
+ static const u8 rfseq_tx2rx_dlys[] = { 8, 6, 2, 4, 4, 6, 1 };
+ static const u8 rfseq_tx2rx_events_rev3[] = {
NPHY_REV3_RFSEQ_CMD_EXT_PA,
NPHY_REV3_RFSEQ_CMD_INT_PA_PU,
NPHY_REV3_RFSEQ_CMD_TX_GAIN,
@@ -16096,7 +16093,7 @@ static void wlc_phy_workarounds_nphy(struct brcms_phy *pi)
NPHY_REV3_RFSEQ_CMD_CLR_HIQ_DIS,
NPHY_REV3_RFSEQ_CMD_END
};
- u8 rfseq_tx2rx_dlys_rev3[] = { 8, 4, 2, 2, 4, 4, 6, 1 };
+ static const u8 rfseq_tx2rx_dlys_rev3[] = { 8, 4, 2, 2, 4, 4, 6, 1 };
u8 rfseq_rx2tx_events_rev3[] = {
NPHY_REV3_RFSEQ_CMD_NOP,
NPHY_REV3_RFSEQ_CMD_RXG_FBW,
@@ -16110,7 +16107,7 @@ static void wlc_phy_workarounds_nphy(struct brcms_phy *pi)
};
u8 rfseq_rx2tx_dlys_rev3[] = { 8, 6, 6, 4, 4, 18, 42, 1, 1 };
- u8 rfseq_rx2tx_events_rev3_ipa[] = {
+ static const u8 rfseq_rx2tx_events_rev3_ipa[] = {
NPHY_REV3_RFSEQ_CMD_NOP,
NPHY_REV3_RFSEQ_CMD_RXG_FBW,
NPHY_REV3_RFSEQ_CMD_TR_SWITCH,
@@ -16121,15 +16118,15 @@ static void wlc_phy_workarounds_nphy(struct brcms_phy *pi)
NPHY_REV3_RFSEQ_CMD_INT_PA_PU,
NPHY_REV3_RFSEQ_CMD_END
};
- u8 rfseq_rx2tx_dlys_rev3_ipa[] = { 8, 6, 6, 4, 4, 16, 43, 1, 1 };
- u16 rfseq_rx2tx_dacbufpu_rev7[] = { 0x10f, 0x10f };
+ static const u8 rfseq_rx2tx_dlys_rev3_ipa[] = { 8, 6, 6, 4, 4, 16, 43, 1, 1 };
+ static const u16 rfseq_rx2tx_dacbufpu_rev7[] = { 0x10f, 0x10f };
s16 alpha0, alpha1, alpha2;
s16 beta0, beta1, beta2;
u32 leg_data_weights, ht_data_weights, nss1_data_weights,
stbc_data_weights;
u8 chan_freq_range = 0;
- u16 dac_control = 0x0002;
+ static const u16 dac_control = 0x0002;
u16 aux_adc_vmid_rev7_core0[] = { 0x8e, 0x96, 0x96, 0x96 };
u16 aux_adc_vmid_rev7_core1[] = { 0x8f, 0x9f, 0x9f, 0x96 };
u16 aux_adc_vmid_rev4[] = { 0xa2, 0xb4, 0xb4, 0x89 };
@@ -16139,8 +16136,8 @@ static void wlc_phy_workarounds_nphy(struct brcms_phy *pi)
u16 aux_adc_gain_rev4[] = { 0x02, 0x02, 0x02, 0x00 };
u16 aux_adc_gain_rev3[] = { 0x02, 0x02, 0x02, 0x00 };
u16 *aux_adc_gain;
- u16 sk_adc_vmid[] = { 0xb4, 0xb4, 0xb4, 0x24 };
- u16 sk_adc_gain[] = { 0x02, 0x02, 0x02, 0x02 };
+ static const u16 sk_adc_vmid[] = { 0xb4, 0xb4, 0xb4, 0x24 };
+ static const u16 sk_adc_gain[] = { 0x02, 0x02, 0x02, 0x02 };
s32 min_nvar_val = 0x18d;
s32 min_nvar_offset_6mbps = 20;
u8 pdetrange;
@@ -16151,9 +16148,9 @@ static void wlc_phy_workarounds_nphy(struct brcms_phy *pi)
u16 rfseq_rx2tx_lpf_h_hpc_rev7 = 0x77;
u16 rfseq_tx2rx_lpf_h_hpc_rev7 = 0x77;
u16 rfseq_pktgn_lpf_h_hpc_rev7 = 0x77;
- u16 rfseq_htpktgn_lpf_hpc_rev7[] = { 0x77, 0x11, 0x11 };
- u16 rfseq_pktgn_lpf_hpc_rev7[] = { 0x11, 0x11 };
- u16 rfseq_cckpktgn_lpf_hpc_rev7[] = { 0x11, 0x11 };
+ static const u16 rfseq_htpktgn_lpf_hpc_rev7[] = { 0x77, 0x11, 0x11 };
+ static const u16 rfseq_pktgn_lpf_hpc_rev7[] = { 0x11, 0x11 };
+ static const u16 rfseq_cckpktgn_lpf_hpc_rev7[] = { 0x11, 0x11 };
u16 ipalvlshift_3p3_war_en = 0;
u16 rccal_bcap_val, rccal_scap_val;
u16 rccal_tx20_11b_bcap = 0;
@@ -24291,13 +24288,13 @@ static void wlc_phy_update_txcal_ladder_nphy(struct brcms_phy *pi, u16 core)
u16 bbmult;
u16 tblentry;
- struct nphy_txiqcal_ladder ladder_lo[] = {
+ static const struct nphy_txiqcal_ladder ladder_lo[] = {
{3, 0}, {4, 0}, {6, 0}, {9, 0}, {13, 0}, {18, 0},
{25, 0}, {25, 1}, {25, 2}, {25, 3}, {25, 4}, {25, 5},
{25, 6}, {25, 7}, {35, 7}, {50, 7}, {71, 7}, {100, 7}
};
- struct nphy_txiqcal_ladder ladder_iq[] = {
+ static const struct nphy_txiqcal_ladder ladder_iq[] = {
{3, 0}, {4, 0}, {6, 0}, {9, 0}, {13, 0}, {18, 0},
{25, 0}, {35, 0}, {50, 0}, {71, 0}, {100, 0}, {100, 1},
{100, 2}, {100, 3}, {100, 4}, {100, 5}, {100, 6}, {100, 7}
@@ -25773,67 +25770,67 @@ wlc_phy_cal_txiqlo_nphy(struct brcms_phy *pi, struct nphy_txgains target_gain,
u16 cal_gain[2];
struct nphy_iqcal_params cal_params[2];
u32 tbl_len;
- void *tbl_ptr;
+ const void *tbl_ptr;
bool ladder_updated[2];
u8 mphase_cal_lastphase = 0;
int bcmerror = 0;
bool phyhang_avoid_state = false;
- u16 tbl_tx_iqlo_cal_loft_ladder_20[] = {
+ static const u16 tbl_tx_iqlo_cal_loft_ladder_20[] = {
0x0300, 0x0500, 0x0700, 0x0900, 0x0d00, 0x1100, 0x1900, 0x1901,
0x1902,
0x1903, 0x1904, 0x1905, 0x1906, 0x1907, 0x2407, 0x3207, 0x4607,
0x6407
};
- u16 tbl_tx_iqlo_cal_iqimb_ladder_20[] = {
+ static const u16 tbl_tx_iqlo_cal_iqimb_ladder_20[] = {
0x0200, 0x0300, 0x0600, 0x0900, 0x0d00, 0x1100, 0x1900, 0x2400,
0x3200,
0x4600, 0x6400, 0x6401, 0x6402, 0x6403, 0x6404, 0x6405, 0x6406,
0x6407
};
- u16 tbl_tx_iqlo_cal_loft_ladder_40[] = {
+ static const u16 tbl_tx_iqlo_cal_loft_ladder_40[] = {
0x0200, 0x0300, 0x0400, 0x0700, 0x0900, 0x0c00, 0x1200, 0x1201,
0x1202,
0x1203, 0x1204, 0x1205, 0x1206, 0x1207, 0x1907, 0x2307, 0x3207,
0x4707
};
- u16 tbl_tx_iqlo_cal_iqimb_ladder_40[] = {
+ static const u16 tbl_tx_iqlo_cal_iqimb_ladder_40[] = {
0x0100, 0x0200, 0x0400, 0x0700, 0x0900, 0x0c00, 0x1200, 0x1900,
0x2300,
0x3200, 0x4700, 0x4701, 0x4702, 0x4703, 0x4704, 0x4705, 0x4706,
0x4707
};
- u16 tbl_tx_iqlo_cal_startcoefs[] = {
+ static const u16 tbl_tx_iqlo_cal_startcoefs[] = {
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000
};
- u16 tbl_tx_iqlo_cal_cmds_fullcal[] = {
+ static const u16 tbl_tx_iqlo_cal_cmds_fullcal[] = {
0x8123, 0x8264, 0x8086, 0x8245, 0x8056,
0x9123, 0x9264, 0x9086, 0x9245, 0x9056
};
- u16 tbl_tx_iqlo_cal_cmds_recal[] = {
+ static const u16 tbl_tx_iqlo_cal_cmds_recal[] = {
0x8101, 0x8253, 0x8053, 0x8234, 0x8034,
0x9101, 0x9253, 0x9053, 0x9234, 0x9034
};
- u16 tbl_tx_iqlo_cal_startcoefs_nphyrev3[] = {
+ static const u16 tbl_tx_iqlo_cal_startcoefs_nphyrev3[] = {
0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000
};
- u16 tbl_tx_iqlo_cal_cmds_fullcal_nphyrev3[] = {
+ static const u16 tbl_tx_iqlo_cal_cmds_fullcal_nphyrev3[] = {
0x8434, 0x8334, 0x8084, 0x8267, 0x8056, 0x8234,
0x9434, 0x9334, 0x9084, 0x9267, 0x9056, 0x9234
};
- u16 tbl_tx_iqlo_cal_cmds_recal_nphyrev3[] = {
+ static const u16 tbl_tx_iqlo_cal_cmds_recal_nphyrev3[] = {
0x8423, 0x8323, 0x8073, 0x8256, 0x8045, 0x8223,
0x9423, 0x9323, 0x9073, 0x9256, 0x9045, 0x9223
};
diff --git a/drivers/net/wireless/cnss_genl/cnss_nl.c b/drivers/net/wireless/cnss_genl/cnss_nl.c
index fafd9ce..29dd4c9 100644
--- a/drivers/net/wireless/cnss_genl/cnss_nl.c
+++ b/drivers/net/wireless/cnss_genl/cnss_nl.c
@@ -64,6 +64,8 @@ static const struct nla_policy cld80211_policy[CLD80211_ATTR_MAX + 1] = {
[CLD80211_ATTR_VENDOR_DATA] = { .type = NLA_NESTED },
[CLD80211_ATTR_DATA] = { .type = NLA_BINARY,
.len = CLD80211_MAX_NL_DATA },
+ [CLD80211_ATTR_META_DATA] = { .type = NLA_BINARY,
+ .len = CLD80211_MAX_NL_DATA },
};
static int cld80211_pre_doit(const struct genl_ops *ops, struct sk_buff *skb,
diff --git a/drivers/net/wireless/cnss_utils/cnss_utils.c b/drivers/net/wireless/cnss_utils/cnss_utils.c
index d73846e..4955130 100644
--- a/drivers/net/wireless/cnss_utils/cnss_utils.c
+++ b/drivers/net/wireless/cnss_utils/cnss_utils.c
@@ -34,6 +34,11 @@ struct cnss_wlan_mac_addr {
u32 no_of_mac_addr_set;
};
+enum mac_type {
+ CNSS_MAC_PROVISIONED,
+ CNSS_MAC_DERIVED,
+};
+
static struct cnss_utils_priv {
struct cnss_unsafe_channel_list unsafe_channel_list;
struct cnss_dfs_nol_info dfs_nol_info;
@@ -42,8 +47,8 @@ static struct cnss_utils_priv {
/* generic spin-lock for dfs_nol info */
spinlock_t dfs_nol_info_lock;
int driver_load_cnt;
- bool is_wlan_mac_set;
struct cnss_wlan_mac_addr wlan_mac_addr;
+ struct cnss_wlan_mac_addr wlan_der_mac_addr;
enum cnss_utils_cc_src cc_source;
} *cnss_utils_priv;
@@ -189,7 +194,8 @@ int cnss_utils_get_driver_load_cnt(struct device *dev)
}
EXPORT_SYMBOL(cnss_utils_get_driver_load_cnt);
-int cnss_utils_set_wlan_mac_address(const u8 *in, const uint32_t len)
+static int set_wlan_mac_address(const u8 *mac_list, const uint32_t len,
+ enum mac_type type)
{
struct cnss_utils_priv *priv = cnss_utils_priv;
u32 no_of_mac_addr;
@@ -200,11 +206,6 @@ int cnss_utils_set_wlan_mac_address(const u8 *in, const uint32_t len)
if (!priv)
return -EINVAL;
- if (priv->is_wlan_mac_set) {
- pr_debug("WLAN MAC address is already set\n");
- return 0;
- }
-
if (len == 0 || (len % ETH_ALEN) != 0) {
pr_err("Invalid length %d\n", len);
return -EINVAL;
@@ -217,24 +218,45 @@ int cnss_utils_set_wlan_mac_address(const u8 *in, const uint32_t len)
return -EINVAL;
}
- priv->is_wlan_mac_set = true;
- addr = &priv->wlan_mac_addr;
+ if (type == CNSS_MAC_PROVISIONED)
+ addr = &priv->wlan_mac_addr;
+ else
+ addr = &priv->wlan_der_mac_addr;
+
+ if (addr->no_of_mac_addr_set) {
+ pr_err("WLAN MAC address is already set, num %d type %d\n",
+ addr->no_of_mac_addr_set, type);
+ return 0;
+ }
+
addr->no_of_mac_addr_set = no_of_mac_addr;
temp = &addr->mac_addr[0][0];
for (iter = 0; iter < no_of_mac_addr;
- ++iter, temp += ETH_ALEN, in += ETH_ALEN) {
- ether_addr_copy(temp, in);
+ ++iter, temp += ETH_ALEN, mac_list += ETH_ALEN) {
+ ether_addr_copy(temp, mac_list);
pr_debug("MAC_ADDR:%02x:%02x:%02x:%02x:%02x:%02x\n",
temp[0], temp[1], temp[2],
temp[3], temp[4], temp[5]);
}
-
return 0;
}
+
+int cnss_utils_set_wlan_mac_address(const u8 *mac_list, const uint32_t len)
+{
+ return set_wlan_mac_address(mac_list, len, CNSS_MAC_PROVISIONED);
+}
EXPORT_SYMBOL(cnss_utils_set_wlan_mac_address);
-u8 *cnss_utils_get_wlan_mac_address(struct device *dev, uint32_t *num)
+int cnss_utils_set_wlan_derived_mac_address(
+ const u8 *mac_list, const uint32_t len)
+{
+ return set_wlan_mac_address(mac_list, len, CNSS_MAC_DERIVED);
+}
+EXPORT_SYMBOL(cnss_utils_set_wlan_derived_mac_address);
+
+static u8 *get_wlan_mac_address(struct device *dev,
+ u32 *num, enum mac_type type)
{
struct cnss_utils_priv *priv = cnss_utils_priv;
struct cnss_wlan_mac_addr *addr = NULL;
@@ -242,20 +264,36 @@ u8 *cnss_utils_get_wlan_mac_address(struct device *dev, uint32_t *num)
if (!priv)
goto out;
- if (!priv->is_wlan_mac_set) {
- pr_debug("WLAN MAC address is not set\n");
+ if (type == CNSS_MAC_PROVISIONED)
+ addr = &priv->wlan_mac_addr;
+ else
+ addr = &priv->wlan_der_mac_addr;
+
+ if (!addr->no_of_mac_addr_set) {
+ pr_err("WLAN MAC address is not set, type %d\n", type);
goto out;
}
-
- addr = &priv->wlan_mac_addr;
*num = addr->no_of_mac_addr_set;
return &addr->mac_addr[0][0];
+
out:
*num = 0;
return NULL;
}
+
+u8 *cnss_utils_get_wlan_mac_address(struct device *dev, uint32_t *num)
+{
+ return get_wlan_mac_address(dev, num, CNSS_MAC_PROVISIONED);
+}
EXPORT_SYMBOL(cnss_utils_get_wlan_mac_address);
+u8 *cnss_utils_get_wlan_derived_mac_address(
+ struct device *dev, uint32_t *num)
+{
+ return get_wlan_mac_address(dev, num, CNSS_MAC_DERIVED);
+}
+EXPORT_SYMBOL(cnss_utils_get_wlan_derived_mac_address);
+
void cnss_utils_set_cc_source(struct device *dev,
enum cnss_utils_cc_src cc_source)
{
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
index 3bd6fc1..33f4d7c 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
@@ -78,6 +78,7 @@
/* NVM offsets (in words) definitions */
enum wkp_nvm_offsets {
/* NVM HW-Section offset (in words) definitions */
+ SUBSYSTEM_ID = 0x0A,
HW_ADDR = 0x15,
/* NVM SW-Section offset (in words) definitions */
@@ -262,13 +263,12 @@ static u32 iwl_get_channel_flags(u8 ch_num, int ch_idx, bool is_5ghz,
static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
struct iwl_nvm_data *data,
const __le16 * const nvm_ch_flags,
- bool lar_supported)
+ bool lar_supported, bool no_wide_in_5ghz)
{
int ch_idx;
int n_channels = 0;
struct ieee80211_channel *channel;
u16 ch_flags;
- bool is_5ghz;
int num_of_ch, num_2ghz_channels;
const u8 *nvm_chan;
@@ -283,12 +283,20 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
}
for (ch_idx = 0; ch_idx < num_of_ch; ch_idx++) {
+ bool is_5ghz = (ch_idx >= num_2ghz_channels);
+
ch_flags = __le16_to_cpup(nvm_ch_flags + ch_idx);
- if (ch_idx >= num_2ghz_channels &&
- !data->sku_cap_band_52GHz_enable)
+ if (is_5ghz && !data->sku_cap_band_52GHz_enable)
continue;
+ /* workaround to disable wide channels in 5GHz */
+ if (no_wide_in_5ghz && is_5ghz) {
+ ch_flags &= ~(NVM_CHANNEL_40MHZ |
+ NVM_CHANNEL_80MHZ |
+ NVM_CHANNEL_160MHZ);
+ }
+
if (ch_flags & NVM_CHANNEL_160MHZ)
data->vht160_supported = true;
@@ -311,8 +319,8 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
n_channels++;
channel->hw_value = nvm_chan[ch_idx];
- channel->band = (ch_idx < num_2ghz_channels) ?
- NL80211_BAND_2GHZ : NL80211_BAND_5GHZ;
+ channel->band = is_5ghz ?
+ NL80211_BAND_5GHZ : NL80211_BAND_2GHZ;
channel->center_freq =
ieee80211_channel_to_frequency(
channel->hw_value, channel->band);
@@ -324,7 +332,6 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
* is not used in mvm, and is used for backwards compatibility
*/
channel->max_power = IWL_DEFAULT_MAX_TX_POWER;
- is_5ghz = channel->band == NL80211_BAND_5GHZ;
/* don't put limitations in case we're using LAR */
if (!lar_supported)
@@ -441,7 +448,8 @@ static void iwl_init_vht_hw_capab(const struct iwl_cfg *cfg,
static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg,
struct iwl_nvm_data *data,
const __le16 *ch_section,
- u8 tx_chains, u8 rx_chains, bool lar_supported)
+ u8 tx_chains, u8 rx_chains, bool lar_supported,
+ bool no_wide_in_5ghz)
{
int n_channels;
int n_used = 0;
@@ -450,12 +458,14 @@ static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg,
if (cfg->device_family != IWL_DEVICE_FAMILY_8000)
n_channels = iwl_init_channel_map(
dev, cfg, data,
- &ch_section[NVM_CHANNELS], lar_supported);
+ &ch_section[NVM_CHANNELS], lar_supported,
+ no_wide_in_5ghz);
else
n_channels = iwl_init_channel_map(
dev, cfg, data,
&ch_section[NVM_CHANNELS_FAMILY_8000],
- lar_supported);
+ lar_supported,
+ no_wide_in_5ghz);
sband = &data->bands[NL80211_BAND_2GHZ];
sband->band = NL80211_BAND_2GHZ;
@@ -658,6 +668,39 @@ static int iwl_set_hw_address(struct iwl_trans *trans,
return 0;
}
+static bool
+iwl_nvm_no_wide_in_5ghz(struct device *dev, const struct iwl_cfg *cfg,
+ const __le16 *nvm_hw)
+{
+ /*
+ * Workaround a bug in Indonesia SKUs where the regulatory in
+ * some 7000-family OTPs erroneously allow wide channels in
+ * 5GHz. To check for Indonesia, we take the SKU value from
+ * bits 1-4 in the subsystem ID and check if it is either 5 or
+ * 9. In those cases, we need to force-disable wide channels
+ * in 5GHz otherwise the FW will throw a sysassert when we try
+ * to use them.
+ */
+ if (cfg->device_family == IWL_DEVICE_FAMILY_7000) {
+ /*
+ * Unlike the other sections in the NVM, the hw
+ * section uses big-endian.
+ */
+ u16 subsystem_id = be16_to_cpup((const __be16 *)nvm_hw
+ + SUBSYSTEM_ID);
+ u8 sku = (subsystem_id & 0x1e) >> 1;
+
+ if (sku == 5 || sku == 9) {
+ IWL_DEBUG_EEPROM(dev,
+ "disabling wide channels in 5GHz (0x%0x %d)\n",
+ subsystem_id, sku);
+ return true;
+ }
+ }
+
+ return false;
+}
+
struct iwl_nvm_data *
iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
const __le16 *nvm_hw, const __le16 *nvm_sw,
@@ -668,6 +711,7 @@ iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
struct device *dev = trans->dev;
struct iwl_nvm_data *data;
bool lar_enabled;
+ bool no_wide_in_5ghz = iwl_nvm_no_wide_in_5ghz(dev, cfg, nvm_hw);
u32 sku, radio_cfg;
u16 lar_config;
const __le16 *ch_section;
@@ -738,7 +782,7 @@ iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
}
iwl_init_sbands(dev, cfg, data, ch_section, tx_chains, rx_chains,
- lar_fw_supported && lar_enabled);
+ lar_fw_supported && lar_enabled, no_wide_in_5ghz);
data->calib_version = 255;
return data;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index 1db1dc1..9789f3c 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -1548,6 +1548,11 @@ static void iwl_mvm_mc_iface_iterator(void *_data, u8 *mac,
struct iwl_mvm_mc_iter_data *data = _data;
struct iwl_mvm *mvm = data->mvm;
struct iwl_mcast_filter_cmd *cmd = mvm->mcast_filter_cmd;
+ struct iwl_host_cmd hcmd = {
+ .id = MCAST_FILTER_CMD,
+ .flags = CMD_ASYNC,
+ .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
+ };
int ret, len;
/* if we don't have free ports, mcast frames will be dropped */
@@ -1562,7 +1567,10 @@ static void iwl_mvm_mc_iface_iterator(void *_data, u8 *mac,
memcpy(cmd->bssid, vif->bss_conf.bssid, ETH_ALEN);
len = roundup(sizeof(*cmd) + cmd->count * ETH_ALEN, 4);
- ret = iwl_mvm_send_cmd_pdu(mvm, MCAST_FILTER_CMD, CMD_ASYNC, len, cmd);
+ hcmd.len[0] = len;
+ hcmd.data[0] = cmd;
+
+ ret = iwl_mvm_send_cmd(mvm, &hcmd);
if (ret)
IWL_ERR(mvm, "mcast filter cmd error. ret=%d\n", ret);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
index 0556d13..092ae00 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
@@ -499,15 +499,17 @@ static int iwl_mvm_get_ctrl_vif_queue(struct iwl_mvm *mvm,
switch (info->control.vif->type) {
case NL80211_IFTYPE_AP:
/*
- * handle legacy hostapd as well, where station may be added
- * only after assoc.
+ * Handle legacy hostapd as well, where station may be added
+ * only after assoc. Take care of the case where we send a
+ * deauth to a station that we don't have.
*/
- if (ieee80211_is_probe_resp(fc) || ieee80211_is_auth(fc))
+ if (ieee80211_is_probe_resp(fc) || ieee80211_is_auth(fc) ||
+ ieee80211_is_deauth(fc))
return IWL_MVM_DQA_AP_PROBE_RESP_QUEUE;
if (info->hw_queue == info->control.vif->cab_queue)
return info->hw_queue;
- WARN_ON_ONCE(1);
+ WARN_ONCE(1, "fc=0x%02x", le16_to_cpu(fc));
return IWL_MVM_DQA_AP_PROBE_RESP_QUEUE;
case NL80211_IFTYPE_P2P_DEVICE:
if (ieee80211_is_mgmt(fc))
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 0fd7d7e..d2a28a9 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -1357,8 +1357,6 @@ static void mac80211_hwsim_tx(struct ieee80211_hw *hw,
txi->control.rates,
ARRAY_SIZE(txi->control.rates));
- txi->rate_driver_data[0] = channel;
-
if (skb->len >= 24 + 8 &&
ieee80211_is_probe_resp(hdr->frame_control)) {
/* fake header transmission time */
@@ -3048,6 +3046,7 @@ static int hwsim_register_received_nl(struct sk_buff *skb_2,
static int hwsim_new_radio_nl(struct sk_buff *msg, struct genl_info *info)
{
struct hwsim_new_radio_params param = { 0 };
+ const char *hwname = NULL;
param.reg_strict = info->attrs[HWSIM_ATTR_REG_STRICT_REG];
param.p2p_device = info->attrs[HWSIM_ATTR_SUPPORT_P2P_DEVICE];
@@ -3061,8 +3060,14 @@ static int hwsim_new_radio_nl(struct sk_buff *msg, struct genl_info *info)
if (info->attrs[HWSIM_ATTR_NO_VIF])
param.no_vif = true;
- if (info->attrs[HWSIM_ATTR_RADIO_NAME])
- param.hwname = nla_data(info->attrs[HWSIM_ATTR_RADIO_NAME]);
+ if (info->attrs[HWSIM_ATTR_RADIO_NAME]) {
+ hwname = kasprintf(GFP_KERNEL, "%.*s",
+ nla_len(info->attrs[HWSIM_ATTR_RADIO_NAME]),
+ (char *)nla_data(info->attrs[HWSIM_ATTR_RADIO_NAME]));
+ if (!hwname)
+ return -ENOMEM;
+ param.hwname = hwname;
+ }
if (info->attrs[HWSIM_ATTR_USE_CHANCTX])
param.use_chanctx = true;
@@ -3090,11 +3095,15 @@ static int hwsim_del_radio_nl(struct sk_buff *msg, struct genl_info *info)
s64 idx = -1;
const char *hwname = NULL;
- if (info->attrs[HWSIM_ATTR_RADIO_ID])
+ if (info->attrs[HWSIM_ATTR_RADIO_ID]) {
idx = nla_get_u32(info->attrs[HWSIM_ATTR_RADIO_ID]);
- else if (info->attrs[HWSIM_ATTR_RADIO_NAME])
- hwname = (void *)nla_data(info->attrs[HWSIM_ATTR_RADIO_NAME]);
- else
+ } else if (info->attrs[HWSIM_ATTR_RADIO_NAME]) {
+ hwname = kasprintf(GFP_KERNEL, "%.*s",
+ nla_len(info->attrs[HWSIM_ATTR_RADIO_NAME]),
+ (char *)nla_data(info->attrs[HWSIM_ATTR_RADIO_NAME]));
+ if (!hwname)
+ return -ENOMEM;
+ } else
return -EINVAL;
spin_lock_bh(&hwsim_radio_lock);
@@ -3103,7 +3112,8 @@ static int hwsim_del_radio_nl(struct sk_buff *msg, struct genl_info *info)
if (data->idx != idx)
continue;
} else {
- if (strcmp(hwname, wiphy_name(data->hw->wiphy)))
+ if (!hwname ||
+ strcmp(hwname, wiphy_name(data->hw->wiphy)))
continue;
}
@@ -3114,10 +3124,12 @@ static int hwsim_del_radio_nl(struct sk_buff *msg, struct genl_info *info)
spin_unlock_bh(&hwsim_radio_lock);
mac80211_hwsim_del_radio(data, wiphy_name(data->hw->wiphy),
info);
+ kfree(hwname);
return 0;
}
spin_unlock_bh(&hwsim_radio_lock);
+ kfree(hwname);
return -ENODEV;
}
diff --git a/drivers/net/wireless/marvell/libertas/cmd.c b/drivers/net/wireless/marvell/libertas/cmd.c
index 301170c..033ff88 100644
--- a/drivers/net/wireless/marvell/libertas/cmd.c
+++ b/drivers/net/wireless/marvell/libertas/cmd.c
@@ -305,7 +305,7 @@ int lbs_cmd_802_11_sleep_params(struct lbs_private *priv, uint16_t cmd_action,
}
lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret);
- return 0;
+ return ret;
}
static int lbs_wait_for_ds_awake(struct lbs_private *priv)
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800usb.c b/drivers/net/wireless/ralink/rt2x00/rt2800usb.c
index 4b0bb6b..c636e60 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800usb.c
@@ -646,10 +646,9 @@ static void rt2800usb_txdone_nostatus(struct rt2x00_dev *rt2x00dev)
!test_bit(ENTRY_DATA_STATUS_PENDING, &entry->flags))
break;
- if (test_bit(ENTRY_DATA_IO_FAILED, &entry->flags))
+ if (test_bit(ENTRY_DATA_IO_FAILED, &entry->flags) ||
+ rt2800usb_entry_txstatus_timeout(entry))
rt2x00lib_txdone_noinfo(entry, TXDONE_FAILURE);
- else if (rt2800usb_entry_txstatus_timeout(entry))
- rt2x00lib_txdone_noinfo(entry, TXDONE_UNKNOWN);
else
break;
}
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
index 82d949e..4e725d1 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
@@ -6316,6 +6316,13 @@ static struct usb_device_id dev_table[] = {
.driver_info = (unsigned long)&rtl8192cu_fops},
{USB_DEVICE_AND_INTERFACE_INFO(0x7392, 0x7822, 0xff, 0xff, 0xff),
.driver_info = (unsigned long)&rtl8192cu_fops},
+/* found in rtl8192eu vendor driver */
+{USB_DEVICE_AND_INTERFACE_INFO(0x2357, 0x0107, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8192eu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x2019, 0xab33, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8192eu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x818c, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8192eu_fops},
#endif
{ }
};
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
index 2cbef96..1281ebe 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
@@ -1128,7 +1128,7 @@ static u8 _rtl8821ae_dbi_read(struct rtl_priv *rtlpriv, u16 addr)
}
if (0 == tmp) {
read_addr = REG_DBI_RDATA + addr % 4;
- ret = rtl_read_byte(rtlpriv, read_addr);
+ ret = rtl_read_word(rtlpriv, read_addr);
}
return ret;
}
diff --git a/drivers/net/xen-netback/hash.c b/drivers/net/xen-netback/hash.c
index e8c5ddd..3c4c58b 100644
--- a/drivers/net/xen-netback/hash.c
+++ b/drivers/net/xen-netback/hash.c
@@ -39,7 +39,7 @@ static void xenvif_add_hash(struct xenvif *vif, const u8 *tag,
unsigned long flags;
bool found;
- new = kmalloc(sizeof(*entry), GFP_KERNEL);
+ new = kmalloc(sizeof(*entry), GFP_ATOMIC);
if (!new)
return;
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index d9b5b73..a7bdb1f 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -67,6 +67,7 @@ module_param(rx_drain_timeout_msecs, uint, 0444);
unsigned int rx_stall_timeout_msecs = 60000;
module_param(rx_stall_timeout_msecs, uint, 0444);
+#define MAX_QUEUES_DEFAULT 8
unsigned int xenvif_max_queues;
module_param_named(max_queues, xenvif_max_queues, uint, 0644);
MODULE_PARM_DESC(max_queues,
@@ -1626,11 +1627,12 @@ static int __init netback_init(void)
if (!xen_domain())
return -ENODEV;
- /* Allow as many queues as there are CPUs if user has not
+ /* Allow as many queues as there are CPUs but max. 8 if user has not
* specified a value.
*/
if (xenvif_max_queues == 0)
- xenvif_max_queues = num_online_cpus();
+ xenvif_max_queues = min_t(unsigned int, MAX_QUEUES_DEFAULT,
+ num_online_cpus());
if (fatal_skb_slots < XEN_NETBK_LEGACY_SLOTS_MAX) {
pr_info("fatal_skb_slots too small (%d), bump it to XEN_NETBK_LEGACY_SLOTS_MAX (%d)\n",
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 14eac73..54ea90f 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -96,7 +96,7 @@ struct nvme_dev {
struct mutex shutdown_lock;
bool subsystem;
void __iomem *cmb;
- dma_addr_t cmb_dma_addr;
+ pci_bus_addr_t cmb_bus_addr;
u64 cmb_size;
u32 cmbsz;
u32 cmbloc;
@@ -1037,7 +1037,7 @@ static int nvme_alloc_sq_cmds(struct nvme_dev *dev, struct nvme_queue *nvmeq,
if (qid && dev->cmb && use_cmb_sqes && NVME_CMB_SQS(dev->cmbsz)) {
unsigned offset = (qid - 1) * roundup(SQ_SIZE(depth),
dev->ctrl.page_size);
- nvmeq->sq_dma_addr = dev->cmb_dma_addr + offset;
+ nvmeq->sq_dma_addr = dev->cmb_bus_addr + offset;
nvmeq->sq_cmds_io = dev->cmb + offset;
} else {
nvmeq->sq_cmds = dma_alloc_coherent(dev->dev, SQ_SIZE(depth),
@@ -1343,7 +1343,7 @@ static void __iomem *nvme_map_cmb(struct nvme_dev *dev)
resource_size_t bar_size;
struct pci_dev *pdev = to_pci_dev(dev->dev);
void __iomem *cmb;
- dma_addr_t dma_addr;
+ int bar;
dev->cmbsz = readl(dev->bar + NVME_REG_CMBSZ);
if (!(NVME_CMB_SZ(dev->cmbsz)))
@@ -1356,7 +1356,8 @@ static void __iomem *nvme_map_cmb(struct nvme_dev *dev)
szu = (u64)1 << (12 + 4 * NVME_CMB_SZU(dev->cmbsz));
size = szu * NVME_CMB_SZ(dev->cmbsz);
offset = szu * NVME_CMB_OFST(dev->cmbloc);
- bar_size = pci_resource_len(pdev, NVME_CMB_BIR(dev->cmbloc));
+ bar = NVME_CMB_BIR(dev->cmbloc);
+ bar_size = pci_resource_len(pdev, bar);
if (offset > bar_size)
return NULL;
@@ -1369,12 +1370,11 @@ static void __iomem *nvme_map_cmb(struct nvme_dev *dev)
if (size > bar_size - offset)
size = bar_size - offset;
- dma_addr = pci_resource_start(pdev, NVME_CMB_BIR(dev->cmbloc)) + offset;
- cmb = ioremap_wc(dma_addr, size);
+ cmb = ioremap_wc(pci_resource_start(pdev, bar) + offset, size);
if (!cmb)
return NULL;
- dev->cmb_dma_addr = dma_addr;
+ dev->cmb_bus_addr = pci_bus_address(pdev, bar) + offset;
dev->cmb_size = size;
return cmb;
}
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 286fda4..ab4f8db 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -337,8 +337,6 @@ static int __nvme_rdma_init_request(struct nvme_rdma_ctrl *ctrl,
struct ib_device *ibdev = dev->dev;
int ret;
- BUG_ON(queue_idx >= ctrl->queue_count);
-
ret = nvme_rdma_alloc_qe(ibdev, &req->sqe, sizeof(struct nvme_command),
DMA_TO_DEVICE);
if (ret)
@@ -643,8 +641,22 @@ static int nvme_rdma_connect_io_queues(struct nvme_rdma_ctrl *ctrl)
static int nvme_rdma_init_io_queues(struct nvme_rdma_ctrl *ctrl)
{
+ struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
+ unsigned int nr_io_queues;
int i, ret;
+ nr_io_queues = min(opts->nr_io_queues, num_online_cpus());
+ ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues);
+ if (ret)
+ return ret;
+
+ ctrl->queue_count = nr_io_queues + 1;
+ if (ctrl->queue_count < 2)
+ return 0;
+
+ dev_info(ctrl->ctrl.device,
+ "creating %d I/O queues.\n", nr_io_queues);
+
for (i = 1; i < ctrl->queue_count; i++) {
ret = nvme_rdma_init_queue(ctrl, i,
ctrl->ctrl.opts->queue_size);
@@ -1795,20 +1807,8 @@ static const struct nvme_ctrl_ops nvme_rdma_ctrl_ops = {
static int nvme_rdma_create_io_queues(struct nvme_rdma_ctrl *ctrl)
{
- struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
int ret;
- ret = nvme_set_queue_count(&ctrl->ctrl, &opts->nr_io_queues);
- if (ret)
- return ret;
-
- ctrl->queue_count = opts->nr_io_queues + 1;
- if (ctrl->queue_count < 2)
- return 0;
-
- dev_info(ctrl->ctrl.device,
- "creating %d I/O queues.\n", opts->nr_io_queues);
-
ret = nvme_rdma_init_io_queues(ctrl);
if (ret)
return ret;
diff --git a/drivers/pci/access.c b/drivers/pci/access.c
index d11cdbb..7b5cf6d 100644
--- a/drivers/pci/access.c
+++ b/drivers/pci/access.c
@@ -672,8 +672,9 @@ void pci_cfg_access_unlock(struct pci_dev *dev)
WARN_ON(!dev->block_cfg_access);
dev->block_cfg_access = 0;
- wake_up_all(&pci_cfg_wait);
raw_spin_unlock_irqrestore(&pci_lock, flags);
+
+ wake_up_all(&pci_cfg_wait);
}
EXPORT_SYMBOL_GPL(pci_cfg_access_unlock);
diff --git a/drivers/pci/host/pci-mvebu.c b/drivers/pci/host/pci-mvebu.c
index 45a89d9..90e0b6f 100644
--- a/drivers/pci/host/pci-mvebu.c
+++ b/drivers/pci/host/pci-mvebu.c
@@ -133,6 +133,12 @@ struct mvebu_pcie {
int nports;
};
+struct mvebu_pcie_window {
+ phys_addr_t base;
+ phys_addr_t remap;
+ size_t size;
+};
+
/* Structure representing one PCIe interface */
struct mvebu_pcie_port {
char *name;
@@ -150,10 +156,8 @@ struct mvebu_pcie_port {
struct mvebu_sw_pci_bridge bridge;
struct device_node *dn;
struct mvebu_pcie *pcie;
- phys_addr_t memwin_base;
- size_t memwin_size;
- phys_addr_t iowin_base;
- size_t iowin_size;
+ struct mvebu_pcie_window memwin;
+ struct mvebu_pcie_window iowin;
u32 saved_pcie_stat;
};
@@ -379,23 +383,45 @@ static void mvebu_pcie_add_windows(struct mvebu_pcie_port *port,
}
}
+static void mvebu_pcie_set_window(struct mvebu_pcie_port *port,
+ unsigned int target, unsigned int attribute,
+ const struct mvebu_pcie_window *desired,
+ struct mvebu_pcie_window *cur)
+{
+ if (desired->base == cur->base && desired->remap == cur->remap &&
+ desired->size == cur->size)
+ return;
+
+ if (cur->size != 0) {
+ mvebu_pcie_del_windows(port, cur->base, cur->size);
+ cur->size = 0;
+ cur->base = 0;
+
+ /*
+ * If something tries to change the window while it is enabled
+ * the change will not be done atomically. That would be
+ * difficult to do in the general case.
+ */
+ }
+
+ if (desired->size == 0)
+ return;
+
+ mvebu_pcie_add_windows(port, target, attribute, desired->base,
+ desired->size, desired->remap);
+ *cur = *desired;
+}
+
static void mvebu_pcie_handle_iobase_change(struct mvebu_pcie_port *port)
{
- phys_addr_t iobase;
+ struct mvebu_pcie_window desired = {};
/* Are the new iobase/iolimit values invalid? */
if (port->bridge.iolimit < port->bridge.iobase ||
port->bridge.iolimitupper < port->bridge.iobaseupper ||
!(port->bridge.command & PCI_COMMAND_IO)) {
-
- /* If a window was configured, remove it */
- if (port->iowin_base) {
- mvebu_pcie_del_windows(port, port->iowin_base,
- port->iowin_size);
- port->iowin_base = 0;
- port->iowin_size = 0;
- }
-
+ mvebu_pcie_set_window(port, port->io_target, port->io_attr,
+ &desired, &port->iowin);
return;
}
@@ -412,32 +438,27 @@ static void mvebu_pcie_handle_iobase_change(struct mvebu_pcie_port *port)
* specifications. iobase is the bus address, port->iowin_base
* is the CPU address.
*/
- iobase = ((port->bridge.iobase & 0xF0) << 8) |
- (port->bridge.iobaseupper << 16);
- port->iowin_base = port->pcie->io.start + iobase;
- port->iowin_size = ((0xFFF | ((port->bridge.iolimit & 0xF0) << 8) |
- (port->bridge.iolimitupper << 16)) -
- iobase) + 1;
+ desired.remap = ((port->bridge.iobase & 0xF0) << 8) |
+ (port->bridge.iobaseupper << 16);
+ desired.base = port->pcie->io.start + desired.remap;
+ desired.size = ((0xFFF | ((port->bridge.iolimit & 0xF0) << 8) |
+ (port->bridge.iolimitupper << 16)) -
+ desired.remap) +
+ 1;
- mvebu_pcie_add_windows(port, port->io_target, port->io_attr,
- port->iowin_base, port->iowin_size,
- iobase);
+ mvebu_pcie_set_window(port, port->io_target, port->io_attr, &desired,
+ &port->iowin);
}
static void mvebu_pcie_handle_membase_change(struct mvebu_pcie_port *port)
{
+ struct mvebu_pcie_window desired = {.remap = MVEBU_MBUS_NO_REMAP};
+
/* Are the new membase/memlimit values invalid? */
if (port->bridge.memlimit < port->bridge.membase ||
!(port->bridge.command & PCI_COMMAND_MEMORY)) {
-
- /* If a window was configured, remove it */
- if (port->memwin_base) {
- mvebu_pcie_del_windows(port, port->memwin_base,
- port->memwin_size);
- port->memwin_base = 0;
- port->memwin_size = 0;
- }
-
+ mvebu_pcie_set_window(port, port->mem_target, port->mem_attr,
+ &desired, &port->memwin);
return;
}
@@ -447,14 +468,12 @@ static void mvebu_pcie_handle_membase_change(struct mvebu_pcie_port *port)
* window to setup, according to the PCI-to-PCI bridge
* specifications.
*/
- port->memwin_base = ((port->bridge.membase & 0xFFF0) << 16);
- port->memwin_size =
- (((port->bridge.memlimit & 0xFFF0) << 16) | 0xFFFFF) -
- port->memwin_base + 1;
+ desired.base = ((port->bridge.membase & 0xFFF0) << 16);
+ desired.size = (((port->bridge.memlimit & 0xFFF0) << 16) | 0xFFFFF) -
+ desired.base + 1;
- mvebu_pcie_add_windows(port, port->mem_target, port->mem_attr,
- port->memwin_base, port->memwin_size,
- MVEBU_MBUS_NO_REMAP);
+ mvebu_pcie_set_window(port, port->mem_target, port->mem_attr, &desired,
+ &port->memwin);
}
/*
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
index b57fc6d..d08dfc8 100644
--- a/drivers/pci/hotplug/pciehp_hpc.c
+++ b/drivers/pci/hotplug/pciehp_hpc.c
@@ -586,6 +586,14 @@ static irqreturn_t pciehp_isr(int irq, void *dev_id)
events = status & (PCI_EXP_SLTSTA_ABP | PCI_EXP_SLTSTA_PFD |
PCI_EXP_SLTSTA_PDC | PCI_EXP_SLTSTA_CC |
PCI_EXP_SLTSTA_DLLSC);
+
+ /*
+ * If we've already reported a power fault, don't report it again
+ * until we've done something to handle it.
+ */
+ if (ctrl->power_fault_detected)
+ events &= ~PCI_EXP_SLTSTA_PFD;
+
if (!events)
return IRQ_NONE;
diff --git a/drivers/pci/hotplug/shpchp_hpc.c b/drivers/pci/hotplug/shpchp_hpc.c
index de0ea47..e5824c7 100644
--- a/drivers/pci/hotplug/shpchp_hpc.c
+++ b/drivers/pci/hotplug/shpchp_hpc.c
@@ -1062,6 +1062,8 @@ int shpc_init(struct controller *ctrl, struct pci_dev *pdev)
if (rc) {
ctrl_info(ctrl, "Can't get msi for the hotplug controller\n");
ctrl_info(ctrl, "Use INTx for the hotplug controller\n");
+ } else {
+ pci_set_master(pdev);
}
rc = request_irq(ctrl->pci_dev->irq, shpc_isr, IRQF_SHARED,
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index 3455f75..0e9a9db 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -730,7 +730,7 @@ static int msix_setup_entries(struct pci_dev *dev, void __iomem *base,
ret = 0;
out:
kfree(masks);
- return 0;
+ return ret;
}
static void msix_program_entries(struct pci_dev *dev,
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index 1b07865..f9f4d1c 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -527,7 +527,7 @@ static ssize_t driver_override_store(struct device *dev,
const char *buf, size_t count)
{
struct pci_dev *pdev = to_pci_dev(dev);
- char *driver_override, *old = pdev->driver_override, *cp;
+ char *driver_override, *old, *cp;
/* We need to keep extra room for a newline */
if (count >= (PAGE_SIZE - 1))
@@ -541,12 +541,15 @@ static ssize_t driver_override_store(struct device *dev,
if (cp)
*cp = '\0';
+ device_lock(dev);
+ old = pdev->driver_override;
if (strlen(driver_override)) {
pdev->driver_override = driver_override;
} else {
kfree(driver_override);
pdev->driver_override = NULL;
}
+ device_unlock(dev);
kfree(old);
@@ -557,8 +560,12 @@ static ssize_t driver_override_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct pci_dev *pdev = to_pci_dev(dev);
+ ssize_t len;
- return snprintf(buf, PAGE_SIZE, "%s\n", pdev->driver_override);
+ device_lock(dev);
+ len = snprintf(buf, PAGE_SIZE, "%s\n", pdev->driver_override);
+ device_unlock(dev);
+ return len;
}
static DEVICE_ATTR_RW(driver_override);
diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig
index 0e75d94..671610c 100644
--- a/drivers/pinctrl/Kconfig
+++ b/drivers/pinctrl/Kconfig
@@ -82,6 +82,7 @@
tristate "AMD GPIO pin control"
depends on GPIOLIB
select GPIOLIB_IRQCHIP
+ select PINMUX
select PINCONF
select GENERIC_PINCONF
help
diff --git a/drivers/pinctrl/intel/pinctrl-baytrail.c b/drivers/pinctrl/intel/pinctrl-baytrail.c
index 5419de8..0a96502 100644
--- a/drivers/pinctrl/intel/pinctrl-baytrail.c
+++ b/drivers/pinctrl/intel/pinctrl-baytrail.c
@@ -1466,7 +1466,7 @@ static void byt_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
val & BYT_INPUT_EN ? " " : "in",
val & BYT_OUTPUT_EN ? " " : "out",
val & BYT_LEVEL ? "hi" : "lo",
- comm->pad_map[i], comm->pad_map[i] * 32,
+ comm->pad_map[i], comm->pad_map[i] * 16,
conf0 & 0x7,
conf0 & BYT_TRIG_NEG ? " fall" : " ",
conf0 & BYT_TRIG_POS ? " rise" : " ",
diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c
index c9a1469..a5b7bd3 100644
--- a/drivers/pinctrl/pinctrl-amd.c
+++ b/drivers/pinctrl/pinctrl-amd.c
@@ -32,6 +32,7 @@
#include <linux/pinctrl/pinconf.h>
#include <linux/pinctrl/pinconf-generic.h>
+#include "core.h"
#include "pinctrl-utils.h"
#include "pinctrl-amd.h"
@@ -712,6 +713,69 @@ static const struct pinconf_ops amd_pinconf_ops = {
.pin_config_group_set = amd_pinconf_group_set,
};
+#ifdef CONFIG_PM_SLEEP
+static bool amd_gpio_should_save(struct amd_gpio *gpio_dev, unsigned int pin)
+{
+ const struct pin_desc *pd = pin_desc_get(gpio_dev->pctrl, pin);
+
+ if (!pd)
+ return false;
+
+ /*
+ * Only restore the pin if it is actually in use by the kernel (or
+ * by userspace).
+ */
+ if (pd->mux_owner || pd->gpio_owner ||
+ gpiochip_line_is_irq(&gpio_dev->gc, pin))
+ return true;
+
+ return false;
+}
+
+int amd_gpio_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct amd_gpio *gpio_dev = platform_get_drvdata(pdev);
+ struct pinctrl_desc *desc = gpio_dev->pctrl->desc;
+ int i;
+
+ for (i = 0; i < desc->npins; i++) {
+ int pin = desc->pins[i].number;
+
+ if (!amd_gpio_should_save(gpio_dev, pin))
+ continue;
+
+ gpio_dev->saved_regs[i] = readl(gpio_dev->base + pin*4);
+ }
+
+ return 0;
+}
+
+int amd_gpio_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct amd_gpio *gpio_dev = platform_get_drvdata(pdev);
+ struct pinctrl_desc *desc = gpio_dev->pctrl->desc;
+ int i;
+
+ for (i = 0; i < desc->npins; i++) {
+ int pin = desc->pins[i].number;
+
+ if (!amd_gpio_should_save(gpio_dev, pin))
+ continue;
+
+ writel(gpio_dev->saved_regs[i], gpio_dev->base + pin*4);
+ }
+
+ return 0;
+}
+
+static const struct dev_pm_ops amd_gpio_pm_ops = {
+ SET_LATE_SYSTEM_SLEEP_PM_OPS(amd_gpio_suspend,
+ amd_gpio_resume)
+};
+#endif
+
static struct pinctrl_desc amd_pinctrl_desc = {
.pins = kerncz_pins,
.npins = ARRAY_SIZE(kerncz_pins),
@@ -751,6 +815,14 @@ static int amd_gpio_probe(struct platform_device *pdev)
return -EINVAL;
}
+#ifdef CONFIG_PM_SLEEP
+ gpio_dev->saved_regs = devm_kcalloc(&pdev->dev, amd_pinctrl_desc.npins,
+ sizeof(*gpio_dev->saved_regs),
+ GFP_KERNEL);
+ if (!gpio_dev->saved_regs)
+ return -ENOMEM;
+#endif
+
gpio_dev->pdev = pdev;
gpio_dev->gc.direction_input = amd_gpio_direction_input;
gpio_dev->gc.direction_output = amd_gpio_direction_output;
@@ -839,6 +911,9 @@ static struct platform_driver amd_gpio_driver = {
.driver = {
.name = "amd_gpio",
.acpi_match_table = ACPI_PTR(amd_gpio_acpi_match),
+#ifdef CONFIG_PM_SLEEP
+ .pm = &amd_gpio_pm_ops,
+#endif
},
.probe = amd_gpio_probe,
.remove = amd_gpio_remove,
diff --git a/drivers/pinctrl/pinctrl-amd.h b/drivers/pinctrl/pinctrl-amd.h
index 7bfea47..e8bbb20 100644
--- a/drivers/pinctrl/pinctrl-amd.h
+++ b/drivers/pinctrl/pinctrl-amd.h
@@ -95,6 +95,7 @@ struct amd_gpio {
struct gpio_chip gc;
struct resource *res;
struct platform_device *pdev;
+ u32 *saved_regs;
};
/* KERNCZ configuration*/
diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c
index e63f1a0..c8f8813 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm.c
@@ -818,6 +818,9 @@ static void msm_dirconn_irq_ack(struct irq_data *d)
struct irq_desc *desc = irq_data_to_desc(d);
struct irq_data *parent_data = irq_get_irq_data(desc->parent_irq);
+ if (!parent_data)
+ return;
+
if (parent_data->chip->irq_ack)
parent_data->chip->irq_ack(parent_data);
}
@@ -827,6 +830,9 @@ static void msm_dirconn_irq_eoi(struct irq_data *d)
struct irq_desc *desc = irq_data_to_desc(d);
struct irq_data *parent_data = irq_get_irq_data(desc->parent_irq);
+ if (!parent_data)
+ return;
+
if (parent_data->chip->irq_eoi)
parent_data->chip->irq_eoi(parent_data);
}
@@ -852,6 +858,9 @@ static int msm_dirconn_irq_set_vcpu_affinity(struct irq_data *d,
struct irq_desc *desc = irq_data_to_desc(d);
struct irq_data *parent_data = irq_get_irq_data(desc->parent_irq);
+ if (!parent_data)
+ return 0;
+
if (parent_data->chip->irq_set_vcpu_affinity)
return parent_data->chip->irq_set_vcpu_affinity(parent_data,
vcpu_info);
diff --git a/drivers/pinctrl/qcom/pinctrl-sdm670.c b/drivers/pinctrl/qcom/pinctrl-sdm670.c
index 8749653..f7af6da 100644
--- a/drivers/pinctrl/qcom/pinctrl-sdm670.c
+++ b/drivers/pinctrl/qcom/pinctrl-sdm670.c
@@ -54,6 +54,8 @@
.intr_cfg_reg = base + 0x8 + REG_SIZE * id, \
.intr_status_reg = base + 0xc + REG_SIZE * id, \
.intr_target_reg = base + 0x8 + REG_SIZE * id, \
+ .dir_conn_reg = (base == NORTH) ? base + 0xa3000 : \
+ ((base == SOUTH) ? base + 0xa6000 : base + 0xa4000), \
.mux_bit = 2, \
.pull_bit = 0, \
.drv_bit = 6, \
@@ -68,6 +70,7 @@
.intr_polarity_bit = 1, \
.intr_detection_bit = 2, \
.intr_detection_width = 2, \
+ .dir_conn_en_bit = 8, \
}
#define SDC_QDSD_PINGROUP(pg_name, ctl, pull, drv) \
@@ -1596,7 +1599,7 @@ static const struct msm_dir_conn sdm670_dir_conn[] = {
{24, 517},
{26, 518},
{30, 519},
- {31, 639},
+ {31, 632},
{32, 521},
{34, 522},
{36, 523},
@@ -1604,12 +1607,12 @@ static const struct msm_dir_conn sdm670_dir_conn[] = {
{38, 525},
{39, 526},
{40, 527},
- {41, 637},
+ {41, 630},
{43, 529},
{44, 530},
{46, 531},
{48, 532},
- {49, 640},
+ {49, 633},
{52, 534},
{53, 535},
{54, 536},
@@ -1625,7 +1628,7 @@ static const struct msm_dir_conn sdm670_dir_conn[] = {
{85, 555},
{86, 556},
{88, 557},
- {89, 638},
+ {89, 631},
{91, 559},
{92, 560},
{95, 561},
@@ -1651,6 +1654,14 @@ static const struct msm_dir_conn sdm670_dir_conn[] = {
{132, 621},
{133, 622},
{145, 623},
+ {0, 216},
+ {0, 215},
+ {0, 214},
+ {0, 213},
+ {0, 212},
+ {0, 211},
+ {0, 210},
+ {0, 209},
};
static const struct msm_pinctrl_soc_data sdm670_pinctrl = {
@@ -1663,6 +1674,7 @@ static const struct msm_pinctrl_soc_data sdm670_pinctrl = {
.ngpios = 150,
.dir_conn = sdm670_dir_conn,
.n_dir_conns = ARRAY_SIZE(sdm670_dir_conn),
+ .dir_conn_irq_base = 216,
};
static int sdm670_pinctrl_probe(struct platform_device *pdev)
diff --git a/drivers/platform/msm/gsi/gsi.c b/drivers/platform/msm/gsi/gsi.c
index c02881b..6727da6 100644
--- a/drivers/platform/msm/gsi/gsi.c
+++ b/drivers/platform/msm/gsi/gsi.c
@@ -24,8 +24,6 @@
#define GSI_CMD_TIMEOUT (5*HZ)
#define GSI_STOP_CMD_TIMEOUT_MS 20
#define GSI_MAX_CH_LOW_WEIGHT 15
-#define GSI_MHI_ER_START 10
-#define GSI_MHI_ER_END 16
#define GSI_RESET_WA_MIN_SLEEP 1000
#define GSI_RESET_WA_MAX_SLEEP 2000
@@ -829,10 +827,23 @@ int gsi_register_device(struct gsi_per_props *props, unsigned long *dev_hdl)
return -GSI_STATUS_ERROR;
}
- /* bitmap is max events excludes reserved events */
+ if (props->mhi_er_id_limits_valid &&
+ props->mhi_er_id_limits[0] > (gsi_ctx->max_ev - 1)) {
+ devm_iounmap(gsi_ctx->dev, gsi_ctx->base);
+ gsi_ctx->base = NULL;
+ devm_free_irq(gsi_ctx->dev, props->irq, gsi_ctx);
+ GSIERR("MHI event ring start id %u is beyond max %u\n",
+ props->mhi_er_id_limits[0], gsi_ctx->max_ev);
+ return -GSI_STATUS_ERROR;
+ }
+
gsi_ctx->evt_bmap = ~((1 << gsi_ctx->max_ev) - 1);
- gsi_ctx->evt_bmap |= ((1 << (GSI_MHI_ER_END + 1)) - 1) ^
- ((1 << GSI_MHI_ER_START) - 1);
+
+ /* exclude reserved mhi events */
+ if (props->mhi_er_id_limits_valid)
+ gsi_ctx->evt_bmap |=
+ ((1 << (props->mhi_er_id_limits[1] + 1)) - 1) ^
+ ((1 << (props->mhi_er_id_limits[0])) - 1);
/*
* enable all interrupts but GSI_BREAK_POINT.
@@ -1084,8 +1095,8 @@ static int gsi_validate_evt_ring_props(struct gsi_evt_ring_props *props)
if (props->intf == GSI_EVT_CHTYPE_MHI_EV &&
(!props->evchid_valid ||
- props->evchid > GSI_MHI_ER_END ||
- props->evchid < GSI_MHI_ER_START)) {
+ props->evchid > gsi_ctx->per.mhi_er_id_limits[1] ||
+ props->evchid < gsi_ctx->per.mhi_er_id_limits[0])) {
GSIERR("MHI requires evchid valid=%d val=%u\n",
props->evchid_valid, props->evchid);
return -GSI_STATUS_INVALID_PARAMS;
diff --git a/drivers/platform/msm/ipa/ipa_api.c b/drivers/platform/msm/ipa/ipa_api.c
index abb714d..96b9bd6 100644
--- a/drivers/platform/msm/ipa/ipa_api.c
+++ b/drivers/platform/msm/ipa/ipa_api.c
@@ -1095,7 +1095,7 @@ int ipa_reset_flt(enum ipa_ip_type ip)
EXPORT_SYMBOL(ipa_reset_flt);
/**
- * allocate_nat_device() - Allocates memory for the NAT device
+ * ipa_allocate_nat_device() - Allocates memory for the NAT device
* @mem: [in/out] memory parameters
*
* Called by NAT client driver to allocate memory for the NAT entries. Based on
@@ -1103,15 +1103,55 @@ EXPORT_SYMBOL(ipa_reset_flt);
*
* Returns: 0 on success, negative on failure
*/
-int allocate_nat_device(struct ipa_ioc_nat_alloc_mem *mem)
+int ipa_allocate_nat_device(struct ipa_ioc_nat_alloc_mem *mem)
{
int ret;
- IPA_API_DISPATCH_RETURN(allocate_nat_device, mem);
+ IPA_API_DISPATCH_RETURN(ipa_allocate_nat_device, mem);
return ret;
}
-EXPORT_SYMBOL(allocate_nat_device);
+EXPORT_SYMBOL(ipa_allocate_nat_device);
+
+/**
+ * ipa_allocate_nat_table() - Allocates memory for the NAT table
+ * @table_alloc: [in/out] memory parameters
+ *
+ * Called by NAT client to allocate memory for the table entries.
+ * Based on the request size either shared or system memory will be used.
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa_allocate_nat_table(struct ipa_ioc_nat_ipv6ct_table_alloc *table_alloc)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_allocate_nat_table, table_alloc);
+
+ return ret;
+}
+EXPORT_SYMBOL(ipa_allocate_nat_table);
+
+
+/**
+ * ipa_allocate_ipv6ct_table() - Allocates memory for the IPv6CT table
+ * @table_alloc: [in/out] memory parameters
+ *
+ * Called by IPv6CT client to allocate memory for the table entries.
+ * Based on the request size either shared or system memory will be used.
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa_allocate_ipv6ct_table(
+ struct ipa_ioc_nat_ipv6ct_table_alloc *table_alloc)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_allocate_ipv6ct_table, table_alloc);
+
+ return ret;
+}
+EXPORT_SYMBOL(ipa_allocate_ipv6ct_table);
/**
* ipa_nat_init_cmd() - Post IP_V4_NAT_INIT command to IPA HW
@@ -1132,6 +1172,25 @@ int ipa_nat_init_cmd(struct ipa_ioc_v4_nat_init *init)
EXPORT_SYMBOL(ipa_nat_init_cmd);
/**
+ * ipa_ipv6ct_init_cmd() - Post IP_V6_CONN_TRACK_INIT command to IPA HW
+ * @init: [in] initialization command attributes
+ *
+ * Called by IPv6CT client driver to post IP_V6_CONN_TRACK_INIT command
+ * to IPA HW.
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa_ipv6ct_init_cmd(struct ipa_ioc_ipv6ct_init *init)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_ipv6ct_init_cmd, init);
+
+ return ret;
+}
+EXPORT_SYMBOL(ipa_ipv6ct_init_cmd);
+
+/**
* ipa_nat_dma_cmd() - Post NAT_DMA command to IPA HW
* @dma: [in] initialization command attributes
*
@@ -1150,8 +1209,26 @@ int ipa_nat_dma_cmd(struct ipa_ioc_nat_dma_cmd *dma)
EXPORT_SYMBOL(ipa_nat_dma_cmd);
/**
- * ipa_nat_del_cmd() - Delete a NAT table
- * @del: [in] delete table table table parameters
+ * ipa_table_dma_cmd() - Post TABLE_DMA command to IPA HW
+ * @dma: [in] initialization command attributes
+ *
+ * Called by NAT/IPv6CT client to post TABLE_DMA command to IPA HW
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa_table_dma_cmd(struct ipa_ioc_nat_dma_cmd *dma)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_table_dma_cmd, dma);
+
+ return ret;
+}
+EXPORT_SYMBOL(ipa_table_dma_cmd);
+
+/**
+ * ipa_nat_del_cmd() - Delete the NAT table
+ * @del: [in] delete NAT table parameters
*
* Called by NAT client driver to delete the nat table
*
@@ -1168,6 +1245,60 @@ int ipa_nat_del_cmd(struct ipa_ioc_v4_nat_del *del)
EXPORT_SYMBOL(ipa_nat_del_cmd);
/**
+ * ipa_del_nat_table() - Delete the NAT table
+ * @del: [in] delete table parameters
+ *
+ * Called by NAT client to delete the table
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa_del_nat_table(struct ipa_ioc_nat_ipv6ct_table_del *del)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_del_nat_table, del);
+
+ return ret;
+}
+EXPORT_SYMBOL(ipa_del_nat_table);
+
+/**
+ * ipa_del_ipv6ct_table() - Delete the IPv6CT table
+ * @del: [in] delete table parameters
+ *
+ * Called by IPv6CT client to delete the table
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa_del_ipv6ct_table(struct ipa_ioc_nat_ipv6ct_table_del *del)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_del_ipv6ct_table, del);
+
+ return ret;
+}
+EXPORT_SYMBOL(ipa_del_ipv6ct_table);
+
+/**
+ * ipa3_nat_mdfy_pdn() - Modify a PDN entry in PDN config table in IPA SRAM
+ * @mdfy_pdn: [in] PDN info to be written to SRAM
+ *
+ * Called by NAT client driver to modify an entry in the PDN config table
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa_nat_mdfy_pdn(struct ipa_ioc_nat_pdn_entry *mdfy_pdn)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_nat_mdfy_pdn, mdfy_pdn);
+
+ return ret;
+}
+EXPORT_SYMBOL(ipa_nat_mdfy_pdn);
+
+/**
* ipa_send_msg() - Send "message" from kernel client to IPA driver
* @meta: [in] message meta-data
* @buff: [in] the payload for message
@@ -3056,6 +3187,18 @@ int ipa_disable_wdi3_pipes(int ipa_ep_idx_tx, int ipa_ep_idx_rx)
return ret;
}
+/**
+ * ipa_tz_unlock_reg() - Allow AP access to memory regions controlled by TZ
+ */
+int ipa_tz_unlock_reg(struct ipa_tz_unlock_reg_info *reg_info, u16 num_regs)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_tz_unlock_reg, reg_info, num_regs);
+
+ return ret;
+}
+
static const struct dev_pm_ops ipa_pm_ops = {
.suspend_noirq = ipa_ap_suspend,
.resume_noirq = ipa_ap_resume,
diff --git a/drivers/platform/msm/ipa/ipa_api.h b/drivers/platform/msm/ipa/ipa_api.h
index 7a48b68..b526711 100644
--- a/drivers/platform/msm/ipa/ipa_api.h
+++ b/drivers/platform/msm/ipa/ipa_api.h
@@ -115,14 +115,30 @@ struct ipa_api_controller {
int (*ipa_reset_flt)(enum ipa_ip_type ip);
- int (*allocate_nat_device)(struct ipa_ioc_nat_alloc_mem *mem);
+ int (*ipa_allocate_nat_device)(struct ipa_ioc_nat_alloc_mem *mem);
+
+ int (*ipa_allocate_nat_table)(
+ struct ipa_ioc_nat_ipv6ct_table_alloc *table_alloc);
+
+ int (*ipa_allocate_ipv6ct_table)(
+ struct ipa_ioc_nat_ipv6ct_table_alloc *table_alloc);
int (*ipa_nat_init_cmd)(struct ipa_ioc_v4_nat_init *init);
+ int (*ipa_ipv6ct_init_cmd)(struct ipa_ioc_ipv6ct_init *init);
+
int (*ipa_nat_dma_cmd)(struct ipa_ioc_nat_dma_cmd *dma);
+ int (*ipa_table_dma_cmd)(struct ipa_ioc_nat_dma_cmd *dma);
+
int (*ipa_nat_del_cmd)(struct ipa_ioc_v4_nat_del *del);
+ int (*ipa_del_nat_table)(struct ipa_ioc_nat_ipv6ct_table_del *del);
+
+ int (*ipa_del_ipv6ct_table)(struct ipa_ioc_nat_ipv6ct_table_del *del);
+
+ int (*ipa_nat_mdfy_pdn)(struct ipa_ioc_nat_pdn_entry *mdfy_pdn);
+
int (*ipa_send_msg)(struct ipa_msg_meta *meta, void *buff,
ipa_msg_free_fn callback);
@@ -398,6 +414,9 @@ struct ipa_api_controller {
int (*ipa_disable_wdi3_pipes)(int ipa_ep_idx_tx,
int ipa_ep_idx_rx);
+
+ int (*ipa_tz_unlock_reg)(struct ipa_tz_unlock_reg_info *reg_info,
+ u16 num_regs);
};
#ifdef CONFIG_IPA
diff --git a/drivers/platform/msm/ipa/ipa_clients/ipa_mhi_client.c b/drivers/platform/msm/ipa/ipa_clients/ipa_mhi_client.c
index e18c0d4..7a683ec 100644
--- a/drivers/platform/msm/ipa/ipa_clients/ipa_mhi_client.c
+++ b/drivers/platform/msm/ipa/ipa_clients/ipa_mhi_client.c
@@ -41,11 +41,6 @@
#define IPA_MHI_MAX_UL_CHANNELS 1
#define IPA_MHI_MAX_DL_CHANNELS 1
-#if (IPA_MHI_MAX_UL_CHANNELS + IPA_MHI_MAX_DL_CHANNELS) > \
- (IPA_MHI_GSI_ER_END - IPA_MHI_GSI_ER_START)
-#error not enought event rings for MHI
-#endif
-
/* bit #40 in address should be asserted for MHI transfers over pcie */
#define IPA_MHI_CLIENT_HOST_ADDR_COND(addr) \
((ipa_mhi_client_ctx->assert_bit40)?(IPA_MHI_HOST_ADDR(addr)):(addr))
@@ -1574,8 +1569,7 @@ int ipa_mhi_connect_pipe(struct ipa_mhi_connect_params *in, u32 *clnt_hdl)
internal.start.gsi.mhi = &channel->ch_scratch.mhi;
internal.start.gsi.cached_gsi_evt_ring_hdl =
&channel->cached_gsi_evt_ring_hdl;
- internal.start.gsi.evchid =
- channel->index + IPA_MHI_GSI_ER_START;
+ internal.start.gsi.evchid = channel->index;
res = ipa_connect_mhi_pipe(&internal, clnt_hdl);
if (res) {
diff --git a/drivers/platform/msm/ipa/ipa_common_i.h b/drivers/platform/msm/ipa/ipa_common_i.h
index fe8cbc0..98a1cf9 100644
--- a/drivers/platform/msm/ipa/ipa_common_i.h
+++ b/drivers/platform/msm/ipa/ipa_common_i.h
@@ -19,6 +19,10 @@
#include <linux/ipa.h>
#include <linux/ipa_uc_offload.h>
#include <linux/ipa_wdi3.h>
+#include <linux/ratelimit.h>
+
+#define WARNON_RATELIMIT_BURST 1
+#define IPA_RATELIMIT_BURST 1
#define __FILENAME__ \
(strrchr(__FILE__, '/') ? strrchr(__FILE__, '/') + 1 : __FILE__)
@@ -104,6 +108,39 @@
ipa_dec_client_disable_clks(&log_info); \
} while (0)
+/*
+ * Printing one warning message in 5 seconds if multiple warning messages
+ * are coming back to back.
+ */
+
+#define WARN_ON_RATELIMIT_IPA(condition) \
+({ \
+ static DEFINE_RATELIMIT_STATE(_rs, \
+ DEFAULT_RATELIMIT_INTERVAL, \
+ WARNON_RATELIMIT_BURST); \
+ int rtn = !!(condition); \
+ \
+ if (unlikely(rtn && __ratelimit(&_rs))) \
+ WARN_ON(rtn); \
+})
+
+/*
+ * Printing one error message in 5 seconds if multiple error messages
+ * are coming back to back.
+ */
+
+#define pr_err_ratelimited_ipa(fmt, ...) \
+ printk_ratelimited_ipa(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__)
+#define printk_ratelimited_ipa(fmt, ...) \
+({ \
+ static DEFINE_RATELIMIT_STATE(_rs, \
+ DEFAULT_RATELIMIT_INTERVAL, \
+ IPA_RATELIMIT_BURST); \
+ \
+ if (__ratelimit(&_rs)) \
+ printk(fmt, ##__VA_ARGS__); \
+})
+
#define ipa_assert_on(condition)\
do {\
if (unlikely(condition))\
@@ -145,9 +182,6 @@ struct ipa_mem_buffer {
u32 size;
};
-#define IPA_MHI_GSI_ER_START 10
-#define IPA_MHI_GSI_ER_END 16
-
/**
* enum ipa3_mhi_burst_mode - MHI channel burst mode state
*
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa.c b/drivers/platform/msm/ipa/ipa_v2/ipa.c
index 6230356..c760f75 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa.c
@@ -602,8 +602,6 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
if (_IOC_TYPE(cmd) != IPA_IOC_MAGIC)
return -ENOTTY;
- if (_IOC_NR(cmd) >= IPA_IOCTL_MAX)
- return -ENOTTY;
IPA_ACTIVE_CLIENTS_INC_SIMPLE();
@@ -1465,7 +1463,7 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
}
break;
- default: /* redundant, as cmd was checked against MAXNR */
+ default:
IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
return -ENOTTY;
}
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c b/drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c
index 7aa7ffd..a249567 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c
@@ -86,7 +86,9 @@ const char *ipa_event_name[] = {
__stringify(ADD_VLAN_IFACE),
__stringify(DEL_VLAN_IFACE),
__stringify(ADD_L2TP_VLAN_MAPPING),
- __stringify(DEL_L2TP_VLAN_MAPPING)
+ __stringify(DEL_L2TP_VLAN_MAPPING),
+ __stringify(IPA_PER_CLIENT_STATS_CONNECT_EVENT),
+ __stringify(IPA_PER_CLIENT_STATS_DISCONNECT_EVENT),
};
const char *ipa_hdr_l2_type_name[] = {
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service_v01.c b/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service_v01.c
index dd59140..5228b2d 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service_v01.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service_v01.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -1436,6 +1436,66 @@ struct elem_info ipa_fltr_installed_notif_req_msg_data_v01_ei[] = {
start_ipv6_filter_idx),
},
{
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x17,
+ .offset = offsetof(
+ struct ipa_fltr_installed_notif_req_msg_v01,
+ rule_id_valid),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x17,
+ .offset = offsetof(
+ struct ipa_fltr_installed_notif_req_msg_v01,
+ rule_id_len),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = QMI_IPA_MAX_FILTERS_V01,
+ .elem_size = sizeof(uint32_t),
+ .is_array = VAR_LEN_ARRAY,
+ .tlv_type = 0x17,
+ .offset = offsetof(
+ struct ipa_fltr_installed_notif_req_msg_v01,
+ rule_id),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x18,
+ .offset = offsetof(
+ struct ipa_fltr_installed_notif_req_msg_v01,
+ dst_pipe_id_valid),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x18,
+ .offset = offsetof(
+ struct ipa_fltr_installed_notif_req_msg_v01,
+ dst_pipe_id_len),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = QMI_IPA_MAX_CLIENT_DST_PIPES_V01,
+ .elem_size = sizeof(uint32_t),
+ .is_array = VAR_LEN_ARRAY,
+ .tlv_type = 0x18,
+ .offset = offsetof(
+ struct ipa_fltr_installed_notif_req_msg_v01,
+ dst_pipe_id),
+ },
+ {
.data_type = QMI_EOTI,
.is_array = NO_ARRAY,
.tlv_type = QMI_COMMON_TLV_TYPE,
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c b/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c
index 210ddfe..e611abd 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c
@@ -4969,7 +4969,7 @@ int ipa2_bind_api_controller(enum ipa_hw_type ipa_hw_type,
api_ctrl->ipa_mdfy_flt_rule = ipa2_mdfy_flt_rule;
api_ctrl->ipa_commit_flt = ipa2_commit_flt;
api_ctrl->ipa_reset_flt = ipa2_reset_flt;
- api_ctrl->allocate_nat_device = ipa2_allocate_nat_device;
+ api_ctrl->ipa_allocate_nat_device = ipa2_allocate_nat_device;
api_ctrl->ipa_nat_init_cmd = ipa2_nat_init_cmd;
api_ctrl->ipa_nat_dma_cmd = ipa2_nat_dma_cmd;
api_ctrl->ipa_nat_del_cmd = ipa2_nat_del_cmd;
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa.c b/drivers/platform/msm/ipa/ipa_v3/ipa.c
index e5aa6ef..59d93f3 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa.c
@@ -86,6 +86,9 @@
#define IPA3_ACTIVE_CLIENT_LOG_TYPE_RESOURCE 2
#define IPA3_ACTIVE_CLIENT_LOG_TYPE_SPECIAL 3
+#define IPA_MHI_GSI_EVENT_RING_ID_START 10
+#define IPA_MHI_GSI_EVENT_RING_ID_END 12
+
#define IPA_SMEM_SIZE (8 * 1024)
#define IPA_GSI_CHANNEL_HALT_MIN_SLEEP 5000
@@ -147,15 +150,33 @@
#define IPA_IOC_ALLOC_NAT_MEM32 _IOWR(IPA_IOC_MAGIC, \
IPA_IOCTL_ALLOC_NAT_MEM, \
compat_uptr_t)
+#define IPA_IOC_ALLOC_NAT_TABLE32 _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_ALLOC_NAT_TABLE, \
+ compat_uptr_t)
+#define IPA_IOC_ALLOC_IPV6CT_TABLE32 _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_ALLOC_IPV6CT_TABLE, \
+ compat_uptr_t)
#define IPA_IOC_V4_INIT_NAT32 _IOWR(IPA_IOC_MAGIC, \
IPA_IOCTL_V4_INIT_NAT, \
compat_uptr_t)
-#define IPA_IOC_NAT_DMA32 _IOWR(IPA_IOC_MAGIC, \
- IPA_IOCTL_NAT_DMA, \
+#define IPA_IOC_INIT_IPV6CT_TABLE32 _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_INIT_IPV6CT_TABLE, \
+ compat_uptr_t)
+#define IPA_IOC_TABLE_DMA_CMD32 _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_TABLE_DMA_CMD, \
compat_uptr_t)
#define IPA_IOC_V4_DEL_NAT32 _IOWR(IPA_IOC_MAGIC, \
IPA_IOCTL_V4_DEL_NAT, \
compat_uptr_t)
+#define IPA_IOC_DEL_NAT_TABLE32 _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_DEL_NAT_TABLE, \
+ compat_uptr_t)
+#define IPA_IOC_DEL_IPV6CT_TABLE32 _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_DEL_IPV6CT_TABLE, \
+ compat_uptr_t)
+#define IPA_IOC_NAT_MODIFY_PDN32 _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_NAT_MODIFY_PDN, \
+ compat_uptr_t)
#define IPA_IOC_GET_NAT_OFFSET32 _IOWR(IPA_IOC_MAGIC, \
IPA_IOCTL_GET_NAT_OFFSET, \
compat_uptr_t)
@@ -211,6 +232,18 @@ struct ipa3_ioc_nat_alloc_mem32 {
compat_size_t size;
compat_off_t offset;
};
+
+/**
+ * struct ipa_ioc_nat_ipv6ct_table_alloc32 - table memory allocation
+ * properties
+ * @size: input parameter, size of table in bytes
+ * @offset: output parameter, offset into page in case of system memory
+ */
+struct ipa_ioc_nat_ipv6ct_table_alloc32 {
+ compat_size_t size;
+ compat_off_t offset;
+};
+
#endif
#define IPA_TZ_UNLOCK_ATTRIBUTE 0x0C0311
@@ -545,7 +578,7 @@ static int ipa3_send_wan_msg(unsigned long usr_param, uint8_t msg_type, bool is_
return -ENOMEM;
}
- if (copy_from_user((u8 *)wan_msg, (u8 *)usr_param,
+ if (copy_from_user(wan_msg, (const void __user *)usr_param,
sizeof(struct ipa_wan_msg))) {
kfree(wan_msg);
return -EFAULT;
@@ -677,10 +710,14 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
u8 header[128] = { 0 };
u8 *param = NULL;
struct ipa_ioc_nat_alloc_mem nat_mem;
+ struct ipa_ioc_nat_ipv6ct_table_alloc table_alloc;
struct ipa_ioc_v4_nat_init nat_init;
+ struct ipa_ioc_ipv6ct_init ipv6ct_init;
struct ipa_ioc_v4_nat_del nat_del;
+ struct ipa_ioc_nat_ipv6ct_table_del table_del;
struct ipa_ioc_nat_pdn_entry mdfy_pdn;
struct ipa_ioc_rm_dependency rm_depend;
+ struct ipa_ioc_nat_dma_cmd *table_dma_cmd;
size_t sz;
int pre_entry;
@@ -688,8 +725,6 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
if (_IOC_TYPE(cmd) != IPA_IOC_MAGIC)
return -ENOTTY;
- if (_IOC_NR(cmd) >= IPA_IOCTL_MAX)
- return -ENOTTY;
if (!ipa3_is_ready()) {
IPAERR("IPA not ready, waiting for init completion\n");
@@ -700,8 +735,8 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
switch (cmd) {
case IPA_IOC_ALLOC_NAT_MEM:
- if (copy_from_user((u8 *)&nat_mem, (u8 *)arg,
- sizeof(struct ipa_ioc_nat_alloc_mem))) {
+ if (copy_from_user(&nat_mem, (const void __user *)arg,
+ sizeof(struct ipa_ioc_nat_alloc_mem))) {
retval = -EFAULT;
break;
}
@@ -712,15 +747,53 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -EFAULT;
break;
}
- if (copy_to_user((u8 *)arg, (u8 *)&nat_mem,
- sizeof(struct ipa_ioc_nat_alloc_mem))) {
+ if (copy_to_user((void __user *)arg, &nat_mem,
+ sizeof(struct ipa_ioc_nat_alloc_mem))) {
retval = -EFAULT;
break;
}
break;
+ case IPA_IOC_ALLOC_NAT_TABLE:
+ if (copy_from_user(&table_alloc, (const void __user *)arg,
+ sizeof(struct ipa_ioc_nat_ipv6ct_table_alloc))) {
+ retval = -EFAULT;
+ break;
+ }
+
+ if (ipa3_allocate_nat_table(&table_alloc)) {
+ retval = -EFAULT;
+ break;
+ }
+ if (table_alloc.offset &&
+ copy_to_user((void __user *)arg, &table_alloc, sizeof(
+ struct ipa_ioc_nat_ipv6ct_table_alloc))) {
+ retval = -EFAULT;
+ break;
+ }
+ break;
+
+ case IPA_IOC_ALLOC_IPV6CT_TABLE:
+ if (copy_from_user(&table_alloc, (const void __user *)arg,
+ sizeof(struct ipa_ioc_nat_ipv6ct_table_alloc))) {
+ retval = -EFAULT;
+ break;
+ }
+
+ if (ipa3_allocate_ipv6ct_table(&table_alloc)) {
+ retval = -EFAULT;
+ break;
+ }
+ if (table_alloc.offset &&
+ copy_to_user((void __user *)arg, &table_alloc, sizeof(
+ struct ipa_ioc_nat_ipv6ct_table_alloc))) {
+ retval = -EFAULT;
+ break;
+ }
+ break;
+
case IPA_IOC_V4_INIT_NAT:
- if (copy_from_user((u8 *)&nat_init, (u8 *)arg,
- sizeof(struct ipa_ioc_v4_nat_init))) {
+ if (copy_from_user(&nat_init, (const void __user *)arg,
+ sizeof(struct ipa_ioc_v4_nat_init))) {
retval = -EFAULT;
break;
}
@@ -730,45 +803,56 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
}
break;
- case IPA_IOC_NAT_DMA:
- if (copy_from_user(header, (u8 *)arg,
- sizeof(struct ipa_ioc_nat_dma_cmd))) {
+ case IPA_IOC_INIT_IPV6CT_TABLE:
+ if (copy_from_user(&ipv6ct_init, (const void __user *)arg,
+ sizeof(struct ipa_ioc_ipv6ct_init))) {
retval = -EFAULT;
break;
}
- pre_entry =
- ((struct ipa_ioc_nat_dma_cmd *)header)->entries;
- pyld_sz =
- sizeof(struct ipa_ioc_nat_dma_cmd) +
- pre_entry * sizeof(struct ipa_ioc_nat_dma_one);
+ if (ipa3_ipv6ct_init_cmd(&ipv6ct_init)) {
+ retval = -EFAULT;
+ break;
+ }
+ break;
+
+ case IPA_IOC_TABLE_DMA_CMD:
+ table_dma_cmd = (struct ipa_ioc_nat_dma_cmd *)header;
+ if (copy_from_user(header, (const void __user *)arg,
+ sizeof(struct ipa_ioc_nat_dma_cmd))) {
+ retval = -EFAULT;
+ break;
+ }
+ pre_entry = table_dma_cmd->entries;
+ pyld_sz = sizeof(struct ipa_ioc_nat_dma_cmd) +
+ pre_entry * sizeof(struct ipa_ioc_nat_dma_one);
param = kzalloc(pyld_sz, GFP_KERNEL);
if (!param) {
retval = -ENOMEM;
break;
}
- if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+ if (copy_from_user(param, (const void __user *)arg, pyld_sz)) {
retval = -EFAULT;
break;
}
+ table_dma_cmd = (struct ipa_ioc_nat_dma_cmd *)param;
+
/* add check in case user-space module compromised */
- if (unlikely(((struct ipa_ioc_nat_dma_cmd *)param)->entries
- != pre_entry)) {
+ if (unlikely(table_dma_cmd->entries != pre_entry)) {
IPAERR_RL("current %d pre %d\n",
- ((struct ipa_ioc_nat_dma_cmd *)param)->entries,
- pre_entry);
+ table_dma_cmd->entries, pre_entry);
retval = -EFAULT;
break;
}
- if (ipa3_nat_dma_cmd((struct ipa_ioc_nat_dma_cmd *)param)) {
+ if (ipa3_table_dma_cmd(table_dma_cmd)) {
retval = -EFAULT;
break;
}
break;
case IPA_IOC_V4_DEL_NAT:
- if (copy_from_user((u8 *)&nat_del, (u8 *)arg,
- sizeof(struct ipa_ioc_v4_nat_del))) {
+ if (copy_from_user(&nat_del, (const void __user *)arg,
+ sizeof(struct ipa_ioc_v4_nat_del))) {
retval = -EFAULT;
break;
}
@@ -778,8 +862,32 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
}
break;
+ case IPA_IOC_DEL_NAT_TABLE:
+ if (copy_from_user(&table_del, (const void __user *)arg,
+ sizeof(struct ipa_ioc_nat_ipv6ct_table_del))) {
+ retval = -EFAULT;
+ break;
+ }
+ if (ipa3_del_nat_table(&table_del)) {
+ retval = -EFAULT;
+ break;
+ }
+ break;
+
+ case IPA_IOC_DEL_IPV6CT_TABLE:
+ if (copy_from_user(&table_del, (const void __user *)arg,
+ sizeof(struct ipa_ioc_nat_ipv6ct_table_del))) {
+ retval = -EFAULT;
+ break;
+ }
+ if (ipa3_del_ipv6ct_table(&table_del)) {
+ retval = -EFAULT;
+ break;
+ }
+ break;
+
case IPA_IOC_NAT_MODIFY_PDN:
- if (copy_from_user((u8 *)&mdfy_pdn, (const void __user *)arg,
+ if (copy_from_user(&mdfy_pdn, (const void __user *)arg,
sizeof(struct ipa_ioc_nat_pdn_entry))) {
retval = -EFAULT;
break;
@@ -791,8 +899,8 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
break;
case IPA_IOC_ADD_HDR:
- if (copy_from_user(header, (u8 *)arg,
- sizeof(struct ipa_ioc_add_hdr))) {
+ if (copy_from_user(header, (const void __user *)arg,
+ sizeof(struct ipa_ioc_add_hdr))) {
retval = -EFAULT;
break;
}
@@ -806,7 +914,7 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -ENOMEM;
break;
}
- if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+ if (copy_from_user(param, (const void __user *)arg, pyld_sz)) {
retval = -EFAULT;
break;
}
@@ -823,15 +931,15 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -EFAULT;
break;
}
- if (copy_to_user((u8 *)arg, param, pyld_sz)) {
+ if (copy_to_user((void __user *)arg, param, pyld_sz)) {
retval = -EFAULT;
break;
}
break;
case IPA_IOC_DEL_HDR:
- if (copy_from_user(header, (u8 *)arg,
- sizeof(struct ipa_ioc_del_hdr))) {
+ if (copy_from_user(header, (const void __user *)arg,
+ sizeof(struct ipa_ioc_del_hdr))) {
retval = -EFAULT;
break;
}
@@ -845,7 +953,7 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -ENOMEM;
break;
}
- if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+ if (copy_from_user(param, (const void __user *)arg, pyld_sz)) {
retval = -EFAULT;
break;
}
@@ -863,15 +971,15 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -EFAULT;
break;
}
- if (copy_to_user((u8 *)arg, param, pyld_sz)) {
+ if (copy_to_user((void __user *)arg, param, pyld_sz)) {
retval = -EFAULT;
break;
}
break;
case IPA_IOC_ADD_RT_RULE:
- if (copy_from_user(header, (u8 *)arg,
- sizeof(struct ipa_ioc_add_rt_rule))) {
+ if (copy_from_user(header, (const void __user *)arg,
+ sizeof(struct ipa_ioc_add_rt_rule))) {
retval = -EFAULT;
break;
}
@@ -885,7 +993,7 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -ENOMEM;
break;
}
- if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+ if (copy_from_user(param, (const void __user *)arg, pyld_sz)) {
retval = -EFAULT;
break;
}
@@ -903,13 +1011,56 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -EFAULT;
break;
}
- if (copy_to_user((u8 *)arg, param, pyld_sz)) {
+ if (copy_to_user((void __user *)arg, param, pyld_sz)) {
+ retval = -EFAULT;
+ break;
+ }
+ break;
+
+ case IPA_IOC_ADD_RT_RULE_EXT:
+ if (copy_from_user(header,
+ (const void __user *)arg,
+ sizeof(struct ipa_ioc_add_rt_rule_ext))) {
+ retval = -EFAULT;
+ break;
+ }
+ pre_entry =
+ ((struct ipa_ioc_add_rt_rule_ext *)header)->num_rules;
+ pyld_sz =
+ sizeof(struct ipa_ioc_add_rt_rule_ext) +
+ pre_entry * sizeof(struct ipa_rt_rule_add_ext);
+ param = kzalloc(pyld_sz, GFP_KERNEL);
+ if (!param) {
+ retval = -ENOMEM;
+ break;
+ }
+ if (copy_from_user(param, (const void __user *)arg, pyld_sz)) {
+ retval = -EFAULT;
+ break;
+ }
+ /* add check in case user-space module compromised */
+ if (unlikely(
+ ((struct ipa_ioc_add_rt_rule_ext *)param)->num_rules
+ != pre_entry)) {
+ IPAERR(" prevent memory corruption(%d not match %d)\n",
+ ((struct ipa_ioc_add_rt_rule_ext *)param)->
+ num_rules,
+ pre_entry);
+ retval = -EINVAL;
+ break;
+ }
+ if (ipa3_add_rt_rule_ext(
+ (struct ipa_ioc_add_rt_rule_ext *)param)) {
+ retval = -EFAULT;
+ break;
+ }
+ if (copy_to_user((void __user *)arg, param, pyld_sz)) {
retval = -EFAULT;
break;
}
break;
case IPA_IOC_ADD_RT_RULE_AFTER:
- if (copy_from_user(header, (u8 *)arg,
+ if (copy_from_user(header, (const void __user *)arg,
sizeof(struct ipa_ioc_add_rt_rule_after))) {
retval = -EFAULT;
@@ -925,7 +1076,7 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -ENOMEM;
break;
}
- if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+ if (copy_from_user(param, (const void __user *)arg, pyld_sz)) {
retval = -EFAULT;
break;
}
@@ -945,15 +1096,15 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -EFAULT;
break;
}
- if (copy_to_user((u8 *)arg, param, pyld_sz)) {
+ if (copy_to_user((void __user *)arg, param, pyld_sz)) {
retval = -EFAULT;
break;
}
break;
case IPA_IOC_MDFY_RT_RULE:
- if (copy_from_user(header, (u8 *)arg,
- sizeof(struct ipa_ioc_mdfy_rt_rule))) {
+ if (copy_from_user(header, (const void __user *)arg,
+ sizeof(struct ipa_ioc_mdfy_rt_rule))) {
retval = -EFAULT;
break;
}
@@ -967,7 +1118,7 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -ENOMEM;
break;
}
- if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+ if (copy_from_user(param, (const void __user *)arg, pyld_sz)) {
retval = -EFAULT;
break;
}
@@ -985,15 +1136,15 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -EFAULT;
break;
}
- if (copy_to_user((u8 *)arg, param, pyld_sz)) {
+ if (copy_to_user((void __user *)arg, param, pyld_sz)) {
retval = -EFAULT;
break;
}
break;
case IPA_IOC_DEL_RT_RULE:
- if (copy_from_user(header, (u8 *)arg,
- sizeof(struct ipa_ioc_del_rt_rule))) {
+ if (copy_from_user(header, (const void __user *)arg,
+ sizeof(struct ipa_ioc_del_rt_rule))) {
retval = -EFAULT;
break;
}
@@ -1007,7 +1158,7 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -ENOMEM;
break;
}
- if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+ if (copy_from_user(param, (const void __user *)arg, pyld_sz)) {
retval = -EFAULT;
break;
}
@@ -1024,15 +1175,15 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -EFAULT;
break;
}
- if (copy_to_user((u8 *)arg, param, pyld_sz)) {
+ if (copy_to_user((void __user *)arg, param, pyld_sz)) {
retval = -EFAULT;
break;
}
break;
case IPA_IOC_ADD_FLT_RULE:
- if (copy_from_user(header, (u8 *)arg,
- sizeof(struct ipa_ioc_add_flt_rule))) {
+ if (copy_from_user(header, (const void __user *)arg,
+ sizeof(struct ipa_ioc_add_flt_rule))) {
retval = -EFAULT;
break;
}
@@ -1046,7 +1197,7 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -ENOMEM;
break;
}
- if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+ if (copy_from_user(param, (const void __user *)arg, pyld_sz)) {
retval = -EFAULT;
break;
}
@@ -1064,15 +1215,15 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -EFAULT;
break;
}
- if (copy_to_user((u8 *)arg, param, pyld_sz)) {
+ if (copy_to_user((void __user *)arg, param, pyld_sz)) {
retval = -EFAULT;
break;
}
break;
case IPA_IOC_ADD_FLT_RULE_AFTER:
- if (copy_from_user(header, (u8 *)arg,
- sizeof(struct ipa_ioc_add_flt_rule_after))) {
+ if (copy_from_user(header, (const void __user *)arg,
+ sizeof(struct ipa_ioc_add_flt_rule_after))) {
retval = -EFAULT;
break;
@@ -1088,7 +1239,7 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -ENOMEM;
break;
}
- if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+ if (copy_from_user(param, (const void __user *)arg, pyld_sz)) {
retval = -EFAULT;
break;
}
@@ -1107,15 +1258,15 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -EFAULT;
break;
}
- if (copy_to_user((u8 *)arg, param, pyld_sz)) {
+ if (copy_to_user((void __user *)arg, param, pyld_sz)) {
retval = -EFAULT;
break;
}
break;
case IPA_IOC_DEL_FLT_RULE:
- if (copy_from_user(header, (u8 *)arg,
- sizeof(struct ipa_ioc_del_flt_rule))) {
+ if (copy_from_user(header, (const void __user *)arg,
+ sizeof(struct ipa_ioc_del_flt_rule))) {
retval = -EFAULT;
break;
}
@@ -1129,7 +1280,7 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -ENOMEM;
break;
}
- if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+ if (copy_from_user(param, (const void __user *)arg, pyld_sz)) {
retval = -EFAULT;
break;
}
@@ -1147,15 +1298,15 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -EFAULT;
break;
}
- if (copy_to_user((u8 *)arg, param, pyld_sz)) {
+ if (copy_to_user((void __user *)arg, param, pyld_sz)) {
retval = -EFAULT;
break;
}
break;
case IPA_IOC_MDFY_FLT_RULE:
- if (copy_from_user(header, (u8 *)arg,
- sizeof(struct ipa_ioc_mdfy_flt_rule))) {
+ if (copy_from_user(header, (const void __user *)arg,
+ sizeof(struct ipa_ioc_mdfy_flt_rule))) {
retval = -EFAULT;
break;
}
@@ -1169,7 +1320,7 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -ENOMEM;
break;
}
- if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+ if (copy_from_user(param, (const void __user *)arg, pyld_sz)) {
retval = -EFAULT;
break;
}
@@ -1187,7 +1338,7 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -EFAULT;
break;
}
- if (copy_to_user((u8 *)arg, param, pyld_sz)) {
+ if (copy_to_user((void __user *)arg, param, pyld_sz)) {
retval = -EFAULT;
break;
}
@@ -1212,8 +1363,8 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = ipa3_reset_flt(arg);
break;
case IPA_IOC_GET_RT_TBL:
- if (copy_from_user(header, (u8 *)arg,
- sizeof(struct ipa_ioc_get_rt_tbl))) {
+ if (copy_from_user(header, (const void __user *)arg,
+ sizeof(struct ipa_ioc_get_rt_tbl))) {
retval = -EFAULT;
break;
}
@@ -1221,7 +1372,7 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -EFAULT;
break;
}
- if (copy_to_user((u8 *)arg, header,
+ if (copy_to_user((void __user *)arg, header,
sizeof(struct ipa_ioc_get_rt_tbl))) {
retval = -EFAULT;
break;
@@ -1231,8 +1382,8 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = ipa3_put_rt_tbl(arg);
break;
case IPA_IOC_GET_HDR:
- if (copy_from_user(header, (u8 *)arg,
- sizeof(struct ipa_ioc_get_hdr))) {
+ if (copy_from_user(header, (const void __user *)arg,
+ sizeof(struct ipa_ioc_get_hdr))) {
retval = -EFAULT;
break;
}
@@ -1240,8 +1391,8 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -EFAULT;
break;
}
- if (copy_to_user((u8 *)arg, header,
- sizeof(struct ipa_ioc_get_hdr))) {
+ if (copy_to_user((void __user *)arg, header,
+ sizeof(struct ipa_ioc_get_hdr))) {
retval = -EFAULT;
break;
}
@@ -1253,8 +1404,8 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = ipa3_cfg_filter(arg);
break;
case IPA_IOC_COPY_HDR:
- if (copy_from_user(header, (u8 *)arg,
- sizeof(struct ipa_ioc_copy_hdr))) {
+ if (copy_from_user(header, (const void __user *)arg,
+ sizeof(struct ipa_ioc_copy_hdr))) {
retval = -EFAULT;
break;
}
@@ -1262,15 +1413,15 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -EFAULT;
break;
}
- if (copy_to_user((u8 *)arg, header,
- sizeof(struct ipa_ioc_copy_hdr))) {
+ if (copy_to_user((void __user *)arg, header,
+ sizeof(struct ipa_ioc_copy_hdr))) {
retval = -EFAULT;
break;
}
break;
case IPA_IOC_QUERY_INTF:
- if (copy_from_user(header, (u8 *)arg,
- sizeof(struct ipa_ioc_query_intf))) {
+ if (copy_from_user(header, (const void __user *)arg,
+ sizeof(struct ipa_ioc_query_intf))) {
retval = -EFAULT;
break;
}
@@ -1278,21 +1429,21 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -1;
break;
}
- if (copy_to_user((u8 *)arg, header,
- sizeof(struct ipa_ioc_query_intf))) {
+ if (copy_to_user((void __user *)arg, header,
+ sizeof(struct ipa_ioc_query_intf))) {
retval = -EFAULT;
break;
}
break;
case IPA_IOC_QUERY_INTF_TX_PROPS:
sz = sizeof(struct ipa_ioc_query_intf_tx_props);
- if (copy_from_user(header, (u8 *)arg, sz)) {
+ if (copy_from_user(header, (const void __user *)arg, sz)) {
retval = -EFAULT;
break;
}
if (((struct ipa_ioc_query_intf_tx_props *)header)->num_tx_props
- > IPA_NUM_PROPS_MAX) {
+ > IPA_NUM_PROPS_MAX) {
retval = -EFAULT;
break;
}
@@ -1306,7 +1457,7 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -ENOMEM;
break;
}
- if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+ if (copy_from_user(param, (const void __user *)arg, pyld_sz)) {
retval = -EFAULT;
break;
}
@@ -1321,24 +1472,24 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
break;
}
if (ipa3_query_intf_tx_props(
- (struct ipa_ioc_query_intf_tx_props *)param)) {
+ (struct ipa_ioc_query_intf_tx_props *)param)) {
retval = -1;
break;
}
- if (copy_to_user((u8 *)arg, param, pyld_sz)) {
+ if (copy_to_user((void __user *)arg, param, pyld_sz)) {
retval = -EFAULT;
break;
}
break;
case IPA_IOC_QUERY_INTF_RX_PROPS:
sz = sizeof(struct ipa_ioc_query_intf_rx_props);
- if (copy_from_user(header, (u8 *)arg, sz)) {
+ if (copy_from_user(header, (const void __user *)arg, sz)) {
retval = -EFAULT;
break;
}
if (((struct ipa_ioc_query_intf_rx_props *)header)->num_rx_props
- > IPA_NUM_PROPS_MAX) {
+ > IPA_NUM_PROPS_MAX) {
retval = -EFAULT;
break;
}
@@ -1352,7 +1503,7 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -ENOMEM;
break;
}
- if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+ if (copy_from_user(param, (const void __user *)arg, pyld_sz)) {
retval = -EFAULT;
break;
}
@@ -1366,24 +1517,24 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
break;
}
if (ipa3_query_intf_rx_props(
- (struct ipa_ioc_query_intf_rx_props *)param)) {
+ (struct ipa_ioc_query_intf_rx_props *)param)) {
retval = -1;
break;
}
- if (copy_to_user((u8 *)arg, param, pyld_sz)) {
+ if (copy_to_user((void __user *)arg, param, pyld_sz)) {
retval = -EFAULT;
break;
}
break;
case IPA_IOC_QUERY_INTF_EXT_PROPS:
sz = sizeof(struct ipa_ioc_query_intf_ext_props);
- if (copy_from_user(header, (u8 *)arg, sz)) {
+ if (copy_from_user(header, (const void __user *)arg, sz)) {
retval = -EFAULT;
break;
}
if (((struct ipa_ioc_query_intf_ext_props *)
- header)->num_ext_props > IPA_NUM_PROPS_MAX) {
+ header)->num_ext_props > IPA_NUM_PROPS_MAX) {
retval = -EFAULT;
break;
}
@@ -1397,7 +1548,7 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -ENOMEM;
break;
}
- if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+ if (copy_from_user(param, (const void __user *)arg, pyld_sz)) {
retval = -EFAULT;
break;
}
@@ -1411,18 +1562,18 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
break;
}
if (ipa3_query_intf_ext_props(
- (struct ipa_ioc_query_intf_ext_props *)param)) {
+ (struct ipa_ioc_query_intf_ext_props *)param)) {
retval = -1;
break;
}
- if (copy_to_user((u8 *)arg, param, pyld_sz)) {
+ if (copy_to_user((void __user *)arg, param, pyld_sz)) {
retval = -EFAULT;
break;
}
break;
case IPA_IOC_PULL_MSG:
- if (copy_from_user(header, (u8 *)arg,
- sizeof(struct ipa_msg_meta))) {
+ if (copy_from_user(header, (const void __user *)arg,
+ sizeof(struct ipa_msg_meta))) {
retval = -EFAULT;
break;
}
@@ -1435,7 +1586,7 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -ENOMEM;
break;
}
- if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+ if (copy_from_user(param, (const void __user *)arg, pyld_sz)) {
retval = -EFAULT;
break;
}
@@ -1449,13 +1600,13 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
break;
}
if (ipa3_pull_msg((struct ipa_msg_meta *)param,
- (char *)param + sizeof(struct ipa_msg_meta),
- ((struct ipa_msg_meta *)param)->msg_len) !=
- ((struct ipa_msg_meta *)param)->msg_len) {
+ (char *)param + sizeof(struct ipa_msg_meta),
+ ((struct ipa_msg_meta *)param)->msg_len) !=
+ ((struct ipa_msg_meta *)param)->msg_len) {
retval = -1;
break;
}
- if (copy_to_user((u8 *)arg, param, pyld_sz)) {
+ if (copy_to_user((void __user *)arg, param, pyld_sz)) {
retval = -EFAULT;
break;
}
@@ -1465,8 +1616,8 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
if (ipa3_ctx->use_ipa_pm)
return 0;
- if (copy_from_user((u8 *)&rm_depend, (u8 *)arg,
- sizeof(struct ipa_ioc_rm_dependency))) {
+ if (copy_from_user(&rm_depend, (const void __user *)arg,
+ sizeof(struct ipa_ioc_rm_dependency))) {
retval = -EFAULT;
break;
}
@@ -1478,8 +1629,8 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
if (ipa3_ctx->use_ipa_pm)
return 0;
- if (copy_from_user((u8 *)&rm_depend, (u8 *)arg,
- sizeof(struct ipa_ioc_rm_dependency))) {
+ if (copy_from_user(&rm_depend, (const void __user *)arg,
+ sizeof(struct ipa_ioc_rm_dependency))) {
retval = -EFAULT;
break;
}
@@ -1490,7 +1641,7 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
struct ipa_ioc_generate_flt_eq flt_eq;
- if (copy_from_user(&flt_eq, (u8 *)arg,
+ if (copy_from_user(&flt_eq, (const void __user *)arg,
sizeof(struct ipa_ioc_generate_flt_eq))) {
retval = -EFAULT;
break;
@@ -1500,7 +1651,7 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -EFAULT;
break;
}
- if (copy_to_user((u8 *)arg, &flt_eq,
+ if (copy_to_user((void __user *)arg, &flt_eq,
sizeof(struct ipa_ioc_generate_flt_eq))) {
retval = -EFAULT;
break;
@@ -1513,25 +1664,25 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
break;
}
case IPA_IOC_QUERY_RT_TBL_INDEX:
- if (copy_from_user(header, (u8 *)arg,
- sizeof(struct ipa_ioc_get_rt_tbl_indx))) {
+ if (copy_from_user(header, (const void __user *)arg,
+ sizeof(struct ipa_ioc_get_rt_tbl_indx))) {
retval = -EFAULT;
break;
}
if (ipa3_query_rt_index(
- (struct ipa_ioc_get_rt_tbl_indx *)header)) {
+ (struct ipa_ioc_get_rt_tbl_indx *)header)) {
retval = -EFAULT;
break;
}
- if (copy_to_user((u8 *)arg, header,
- sizeof(struct ipa_ioc_get_rt_tbl_indx))) {
+ if (copy_to_user((void __user *)arg, header,
+ sizeof(struct ipa_ioc_get_rt_tbl_indx))) {
retval = -EFAULT;
break;
}
break;
case IPA_IOC_WRITE_QMAPID:
- if (copy_from_user(header, (u8 *)arg,
- sizeof(struct ipa_ioc_write_qmapid))) {
+ if (copy_from_user(header, (const void __user *)arg,
+ sizeof(struct ipa_ioc_write_qmapid))) {
retval = -EFAULT;
break;
}
@@ -1539,8 +1690,8 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -EFAULT;
break;
}
- if (copy_to_user((u8 *)arg, header,
- sizeof(struct ipa_ioc_write_qmapid))) {
+ if (copy_to_user((void __user *)arg, header,
+ sizeof(struct ipa_ioc_write_qmapid))) {
retval = -EFAULT;
break;
}
@@ -1567,7 +1718,7 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
}
break;
case IPA_IOC_ADD_HDR_PROC_CTX:
- if (copy_from_user(header, (u8 *)arg,
+ if (copy_from_user(header, (const void __user *)arg,
sizeof(struct ipa_ioc_add_hdr_proc_ctx))) {
retval = -EFAULT;
break;
@@ -1583,7 +1734,7 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -ENOMEM;
break;
}
- if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+ if (copy_from_user(param, (const void __user *)arg, pyld_sz)) {
retval = -EFAULT;
break;
}
@@ -1601,13 +1752,13 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -EFAULT;
break;
}
- if (copy_to_user((u8 *)arg, param, pyld_sz)) {
+ if (copy_to_user((void __user *)arg, param, pyld_sz)) {
retval = -EFAULT;
break;
}
break;
case IPA_IOC_DEL_HDR_PROC_CTX:
- if (copy_from_user(header, (u8 *)arg,
+ if (copy_from_user(header, (const void __user *)arg,
sizeof(struct ipa_ioc_del_hdr_proc_ctx))) {
retval = -EFAULT;
break;
@@ -1622,7 +1773,7 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -ENOMEM;
break;
}
- if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+ if (copy_from_user(param, (const void __user *)arg, pyld_sz)) {
retval = -EFAULT;
break;
}
@@ -1641,7 +1792,7 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -EFAULT;
break;
}
- if (copy_to_user((u8 *)arg, param, pyld_sz)) {
+ if (copy_to_user((void __user *)arg, param, pyld_sz)) {
retval = -EFAULT;
break;
}
@@ -1655,7 +1806,7 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
break;
}
memcpy(param, &ipa3_ctx->ipa_hw_type, pyld_sz);
- if (copy_to_user((u8 *)arg, param, pyld_sz)) {
+ if (copy_to_user((void __user *)arg, param, pyld_sz)) {
retval = -EFAULT;
break;
}
@@ -1689,7 +1840,7 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
}
break;
- default: /* redundant, as cmd was checked against MAXNR */
+ default:
IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
return -ENOTTY;
}
@@ -1713,8 +1864,8 @@ int ipa3_setup_dflt_rt_tables(void)
struct ipa_rt_rule_add *rt_rule_entry;
rt_rule =
- kzalloc(sizeof(struct ipa_ioc_add_rt_rule) + 1 *
- sizeof(struct ipa_rt_rule_add), GFP_KERNEL);
+ kzalloc(sizeof(struct ipa_ioc_add_rt_rule) + 1 *
+ sizeof(struct ipa_rt_rule_add), GFP_KERNEL);
if (!rt_rule) {
IPAERR("fail to alloc mem\n");
return -ENOMEM;
@@ -1724,7 +1875,7 @@ int ipa3_setup_dflt_rt_tables(void)
rt_rule->commit = 1;
rt_rule->ip = IPA_IP_v4;
strlcpy(rt_rule->rt_tbl_name, IPA_DFLT_RT_TBL_NAME,
- IPA_RESOURCE_NAME_MAX);
+ IPA_RESOURCE_NAME_MAX);
rt_rule_entry = &rt_rule->rules[0];
rt_rule_entry->at_rear = 1;
@@ -1853,10 +2004,7 @@ static int ipa3_init_smem_region(int memory_region_size,
IPAERR("failed to construct dma_shared_mem imm cmd\n");
return -ENOMEM;
}
- desc.opcode = cmd_pyld->opcode;
- desc.pyld = cmd_pyld->data;
- desc.len = cmd_pyld->len;
- desc.type = IPA_IMM_CMD_DESC;
+ ipa3_init_imm_cmd_desc(&desc, cmd_pyld);
rc = ipa3_send_cmd(1, &desc);
if (rc) {
@@ -1968,6 +2116,12 @@ static void ipa3_q6_avoid_holb(void)
if (ep_idx == -1)
continue;
+ /* from IPA 4.0 pipe suspend is not supported */
+ if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_0)
+ ipahal_write_reg_n_fields(
+ IPA_ENDP_INIT_CTRL_n,
+ ep_idx, &ep_suspend);
+
/*
* ipa3_cfg_ep_holb is not used here because we are
* setting HOLB on Q6 pipes, and from APPS perspective
@@ -1980,12 +2134,6 @@ static void ipa3_q6_avoid_holb(void)
ipahal_write_reg_n_fields(
IPA_ENDP_INIT_HOL_BLOCK_EN_n,
ep_idx, &ep_holb);
-
- /* from IPA 4.0 pipe suspend is not supported */
- if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_0)
- ipahal_write_reg_n_fields(
- IPA_ENDP_INIT_CTRL_n,
- ep_idx, &ep_suspend);
}
}
}
@@ -2117,6 +2265,12 @@ static int ipa3_q6_clean_q6_flt_tbls(enum ipa_ip_type ip,
if (!ipa3_ctx->ep[pipe_idx].valid ||
ipa3_ctx->ep[pipe_idx].skip_ep_cfg) {
+ if (num_cmds >= ipa3_ctx->ep_flt_num) {
+ IPAERR("number of commands is out of range\n");
+ retval = -ENOBUFS;
+ goto free_empty_img;
+ }
+
cmd.is_read = false;
cmd.skip_pipeline_clear = false;
cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
@@ -2134,14 +2288,12 @@ static int ipa3_q6_clean_q6_flt_tbls(enum ipa_ip_type ip,
retval = -ENOMEM;
goto free_empty_img;
}
- desc[num_cmds].opcode = cmd_pyld[num_cmds]->opcode;
- desc[num_cmds].pyld = cmd_pyld[num_cmds]->data;
- desc[num_cmds].len = cmd_pyld[num_cmds]->len;
- desc[num_cmds].type = IPA_IMM_CMD_DESC;
- num_cmds++;
+ ipa3_init_imm_cmd_desc(&desc[num_cmds],
+ cmd_pyld[num_cmds]);
+ ++num_cmds;
}
- flt_idx++;
+ ++flt_idx;
}
IPADBG("Sending %d descriptors for flt tbl clearing\n", num_cmds);
@@ -2233,10 +2385,7 @@ static int ipa3_q6_clean_q6_rt_tbls(enum ipa_ip_type ip,
retval = -ENOMEM;
goto free_desc;
}
- desc->opcode = cmd_pyld->opcode;
- desc->pyld = cmd_pyld->data;
- desc->len = cmd_pyld->len;
- desc->type = IPA_IMM_CMD_DESC;
+ ipa3_init_imm_cmd_desc(desc, cmd_pyld);
IPADBG("Sending 1 descriptor for rt tbl clearing\n");
retval = ipa3_send_cmd(1, desc);
@@ -2323,10 +2472,7 @@ static int ipa3_q6_clean_q6_tables(void)
retval = -EFAULT;
goto bail_desc;
}
- desc->opcode = cmd_pyld->opcode;
- desc->pyld = cmd_pyld->data;
- desc->len = cmd_pyld->len;
- desc->type = IPA_IMM_CMD_DESC;
+ ipa3_init_imm_cmd_desc(desc, cmd_pyld);
IPADBG("Sending 1 descriptor for tbls flush\n");
retval = ipa3_send_cmd(1, desc);
@@ -2389,13 +2535,10 @@ static int ipa3_q6_set_ex_path_to_apps(void)
return -EFAULT;
}
- desc[num_descs].opcode = cmd_pyld->opcode;
- desc[num_descs].type = IPA_IMM_CMD_DESC;
+ ipa3_init_imm_cmd_desc(&desc[num_descs], cmd_pyld);
desc[num_descs].callback = ipa3_destroy_imm;
desc[num_descs].user1 = cmd_pyld;
- desc[num_descs].pyld = cmd_pyld->data;
- desc[num_descs].len = cmd_pyld->len;
- num_descs++;
+ ++num_descs;
}
}
@@ -2565,7 +2708,7 @@ int _ipa_init_sram_v3(void)
*/
int _ipa_init_hdr_v3_0(void)
{
- struct ipa3_desc desc = { 0 };
+ struct ipa3_desc desc;
struct ipa_mem_buffer mem;
struct ipahal_imm_cmd_hdr_init_local cmd = {0};
struct ipahal_imm_cmd_pyld *cmd_pyld;
@@ -2593,10 +2736,7 @@ int _ipa_init_hdr_v3_0(void)
mem.phys_base);
return -EFAULT;
}
- desc.opcode = cmd_pyld->opcode;
- desc.type = IPA_IMM_CMD_DESC;
- desc.pyld = cmd_pyld->data;
- desc.len = cmd_pyld->len;
+ ipa3_init_imm_cmd_desc(&desc, cmd_pyld);
IPA_DUMP_BUFF(mem.base, mem.phys_base, mem.size);
if (ipa3_send_cmd(1, &desc)) {
@@ -2620,7 +2760,6 @@ int _ipa_init_hdr_v3_0(void)
return -ENOMEM;
}
memset(mem.base, 0, mem.size);
- memset(&desc, 0, sizeof(desc));
dma_cmd.is_read = false;
dma_cmd.skip_pipeline_clear = false;
@@ -2638,10 +2777,7 @@ int _ipa_init_hdr_v3_0(void)
mem.phys_base);
return -EFAULT;
}
- desc.opcode = cmd_pyld->opcode;
- desc.pyld = cmd_pyld->data;
- desc.len = cmd_pyld->len;
- desc.type = IPA_IMM_CMD_DESC;
+ ipa3_init_imm_cmd_desc(&desc, cmd_pyld);
IPA_DUMP_BUFF(mem.base, mem.phys_base, mem.size);
if (ipa3_send_cmd(1, &desc)) {
@@ -2669,7 +2805,7 @@ int _ipa_init_hdr_v3_0(void)
*/
int _ipa_init_rt4_v3(void)
{
- struct ipa3_desc desc = { 0 };
+ struct ipa3_desc desc;
struct ipa_mem_buffer mem;
struct ipahal_imm_cmd_ip_v4_routing_init v4_cmd;
struct ipahal_imm_cmd_pyld *cmd_pyld;
@@ -2710,10 +2846,7 @@ int _ipa_init_rt4_v3(void)
goto free_mem;
}
- desc.opcode = cmd_pyld->opcode;
- desc.type = IPA_IMM_CMD_DESC;
- desc.pyld = cmd_pyld->data;
- desc.len = cmd_pyld->len;
+ ipa3_init_imm_cmd_desc(&desc, cmd_pyld);
IPA_DUMP_BUFF(mem.base, mem.phys_base, mem.size);
if (ipa3_send_cmd(1, &desc)) {
@@ -2735,7 +2868,7 @@ int _ipa_init_rt4_v3(void)
*/
int _ipa_init_rt6_v3(void)
{
- struct ipa3_desc desc = { 0 };
+ struct ipa3_desc desc;
struct ipa_mem_buffer mem;
struct ipahal_imm_cmd_ip_v6_routing_init v6_cmd;
struct ipahal_imm_cmd_pyld *cmd_pyld;
@@ -2776,10 +2909,7 @@ int _ipa_init_rt6_v3(void)
goto free_mem;
}
- desc.opcode = cmd_pyld->opcode;
- desc.type = IPA_IMM_CMD_DESC;
- desc.pyld = cmd_pyld->data;
- desc.len = cmd_pyld->len;
+ ipa3_init_imm_cmd_desc(&desc, cmd_pyld);
IPA_DUMP_BUFF(mem.base, mem.phys_base, mem.size);
if (ipa3_send_cmd(1, &desc)) {
@@ -2801,7 +2931,7 @@ int _ipa_init_rt6_v3(void)
*/
int _ipa_init_flt4_v3(void)
{
- struct ipa3_desc desc = { 0 };
+ struct ipa3_desc desc;
struct ipa_mem_buffer mem;
struct ipahal_imm_cmd_ip_v4_filter_init v4_cmd;
struct ipahal_imm_cmd_pyld *cmd_pyld;
@@ -2836,10 +2966,7 @@ int _ipa_init_flt4_v3(void)
goto free_mem;
}
- desc.opcode = cmd_pyld->opcode;
- desc.type = IPA_IMM_CMD_DESC;
- desc.pyld = cmd_pyld->data;
- desc.len = cmd_pyld->len;
+ ipa3_init_imm_cmd_desc(&desc, cmd_pyld);
IPA_DUMP_BUFF(mem.base, mem.phys_base, mem.size);
if (ipa3_send_cmd(1, &desc)) {
@@ -2861,7 +2988,7 @@ int _ipa_init_flt4_v3(void)
*/
int _ipa_init_flt6_v3(void)
{
- struct ipa3_desc desc = { 0 };
+ struct ipa3_desc desc;
struct ipa_mem_buffer mem;
struct ipahal_imm_cmd_ip_v6_filter_init v6_cmd;
struct ipahal_imm_cmd_pyld *cmd_pyld;
@@ -2897,10 +3024,7 @@ int _ipa_init_flt6_v3(void)
goto free_mem;
}
- desc.opcode = cmd_pyld->opcode;
- desc.type = IPA_IMM_CMD_DESC;
- desc.pyld = cmd_pyld->data;
- desc.len = cmd_pyld->len;
+ ipa3_init_imm_cmd_desc(&desc, cmd_pyld);
IPA_DUMP_BUFF(mem.base, mem.phys_base, mem.size);
if (ipa3_send_cmd(1, &desc)) {
@@ -3118,9 +3242,38 @@ static void ipa3_teardown_apps_pipes(void)
}
#ifdef CONFIG_COMPAT
+
+static long compat_ipa3_nat_ipv6ct_alloc_table(unsigned long arg,
+ int (alloc_func)(struct ipa_ioc_nat_ipv6ct_table_alloc *))
+{
+ long retval;
+ struct ipa_ioc_nat_ipv6ct_table_alloc32 table_alloc32;
+ struct ipa_ioc_nat_ipv6ct_table_alloc table_alloc;
+
+ retval = copy_from_user(&table_alloc32, (const void __user *)arg,
+ sizeof(struct ipa_ioc_nat_ipv6ct_table_alloc32));
+ if (retval)
+ return retval;
+
+ table_alloc.size = (size_t)table_alloc32.size;
+ table_alloc.offset = (off_t)table_alloc32.offset;
+
+ retval = alloc_func(&table_alloc);
+ if (retval)
+ return retval;
+
+ if (table_alloc.offset) {
+ table_alloc32.offset = (compat_off_t)table_alloc.offset;
+ retval = copy_to_user((void __user *)arg, &table_alloc32,
+ sizeof(struct ipa_ioc_nat_ipv6ct_table_alloc32));
+ }
+
+ return retval;
+}
+
long compat_ipa3_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
- int retval = 0;
+ long retval = 0;
struct ipa3_ioc_nat_alloc_mem32 nat_mem32;
struct ipa_ioc_nat_alloc_mem nat_mem;
@@ -3165,11 +3318,10 @@ long compat_ipa3_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
cmd = IPA_IOC_GET_HDR;
break;
case IPA_IOC_ALLOC_NAT_MEM32:
- if (copy_from_user((u8 *)&nat_mem32, (u8 *)arg,
- sizeof(struct ipa3_ioc_nat_alloc_mem32))) {
- retval = -EFAULT;
- goto ret;
- }
+ retval = copy_from_user(&nat_mem32, (const void __user *)arg,
+ sizeof(struct ipa3_ioc_nat_alloc_mem32));
+ if (retval)
+ return retval;
memcpy(nat_mem.dev_name, nat_mem32.dev_name,
IPA_RESOURCE_NAME_MAX);
nat_mem.size = (size_t)nat_mem32.size;
@@ -3178,26 +3330,40 @@ long compat_ipa3_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
/* null terminate the string */
nat_mem.dev_name[IPA_RESOURCE_NAME_MAX - 1] = '\0';
- if (ipa3_allocate_nat_device(&nat_mem)) {
- retval = -EFAULT;
- goto ret;
- }
+ retval = ipa3_allocate_nat_device(&nat_mem);
+ if (retval)
+ return retval;
nat_mem32.offset = (compat_off_t)nat_mem.offset;
- if (copy_to_user((u8 *)arg, (u8 *)&nat_mem32,
- sizeof(struct ipa3_ioc_nat_alloc_mem32))) {
- retval = -EFAULT;
- }
-ret:
+ retval = copy_to_user((void __user *)arg, &nat_mem32,
+ sizeof(struct ipa3_ioc_nat_alloc_mem32));
return retval;
+ case IPA_IOC_ALLOC_NAT_TABLE32:
+ return compat_ipa3_nat_ipv6ct_alloc_table(arg,
+ ipa3_allocate_nat_table);
+ case IPA_IOC_ALLOC_IPV6CT_TABLE32:
+ return compat_ipa3_nat_ipv6ct_alloc_table(arg,
+ ipa3_allocate_ipv6ct_table);
case IPA_IOC_V4_INIT_NAT32:
cmd = IPA_IOC_V4_INIT_NAT;
break;
- case IPA_IOC_NAT_DMA32:
- cmd = IPA_IOC_NAT_DMA;
+ case IPA_IOC_INIT_IPV6CT_TABLE32:
+ cmd = IPA_IOC_INIT_IPV6CT_TABLE;
+ break;
+ case IPA_IOC_TABLE_DMA_CMD32:
+ cmd = IPA_IOC_TABLE_DMA_CMD;
break;
case IPA_IOC_V4_DEL_NAT32:
cmd = IPA_IOC_V4_DEL_NAT;
break;
+ case IPA_IOC_DEL_NAT_TABLE32:
+ cmd = IPA_IOC_DEL_NAT_TABLE;
+ break;
+ case IPA_IOC_DEL_IPV6CT_TABLE32:
+ cmd = IPA_IOC_DEL_IPV6CT_TABLE;
+ break;
+ case IPA_IOC_NAT_MODIFY_PDN32:
+ cmd = IPA_IOC_NAT_MODIFY_PDN;
+ break;
case IPA_IOC_GET_NAT_OFFSET32:
cmd = IPA_IOC_GET_NAT_OFFSET;
break;
@@ -4286,6 +4452,12 @@ static int ipa3_post_init(const struct ipa3_plat_drv_res *resource_p,
gsi_props.req_clk_cb = NULL;
gsi_props.rel_clk_cb = NULL;
+ if (ipa3_ctx->ipa_config_is_mhi) {
+ gsi_props.mhi_er_id_limits_valid = true;
+ gsi_props.mhi_er_id_limits[0] = resource_p->mhi_evid_limits[0];
+ gsi_props.mhi_er_id_limits[1] = resource_p->mhi_evid_limits[1];
+ }
+
result = gsi_register_device(&gsi_props,
&ipa3_ctx->gsi_dev_hdl);
if (result != GSI_STATUS_SUCCESS) {
@@ -4346,6 +4518,7 @@ static int ipa3_post_init(const struct ipa3_plat_drv_res *resource_p,
ipa3_register_panic_hdlr();
ipa3_ctx->q6_proxy_clk_vote_valid = true;
+ ipa3_ctx->q6_proxy_clk_vote_cnt++;
mutex_lock(&ipa3_ctx->lock);
ipa3_ctx->ipa_initialization_complete = true;
@@ -4492,50 +4665,70 @@ static ssize_t ipa3_write(struct file *file, const char __user *buf,
return count;
}
-static int ipa3_tz_unlock_reg(struct ipa3_context *ipa3_ctx)
+/**
+ * ipa3_tz_unlock_reg - Unlocks memory regions so that they become accessible
+ * from AP.
+ * @reg_info - Pointer to array of memory regions to unlock
+ * @num_regs - Number of elements in the array
+ *
+ * Converts the input array of regions to a struct that TZ understands and
+ * issues an SCM call.
+ * Also flushes the memory cache to DDR in order to make sure that TZ sees the
+ * correct data structure.
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa3_tz_unlock_reg(struct ipa_tz_unlock_reg_info *reg_info, u16 num_regs)
{
int i, size, ret, resp;
struct tz_smmu_ipa_protect_region_iovec_s *ipa_tz_unlock_vec;
struct tz_smmu_ipa_protect_region_s cmd_buf;
+ struct scm_desc desc = {0};
- if (ipa3_ctx && ipa3_ctx->ipa_tz_unlock_reg_num > 0) {
- size = ipa3_ctx->ipa_tz_unlock_reg_num *
- sizeof(struct tz_smmu_ipa_protect_region_iovec_s);
- ipa_tz_unlock_vec = kzalloc(PAGE_ALIGN(size), GFP_KERNEL);
- if (ipa_tz_unlock_vec == NULL)
- return -ENOMEM;
-
- for (i = 0; i < ipa3_ctx->ipa_tz_unlock_reg_num; i++) {
- ipa_tz_unlock_vec[i].input_addr =
- ipa3_ctx->ipa_tz_unlock_reg[i].reg_addr ^
- (ipa3_ctx->ipa_tz_unlock_reg[i].reg_addr &
- 0xFFF);
- ipa_tz_unlock_vec[i].output_addr =
- ipa3_ctx->ipa_tz_unlock_reg[i].reg_addr ^
- (ipa3_ctx->ipa_tz_unlock_reg[i].reg_addr &
- 0xFFF);
- ipa_tz_unlock_vec[i].size =
- ipa3_ctx->ipa_tz_unlock_reg[i].size;
- ipa_tz_unlock_vec[i].attr = IPA_TZ_UNLOCK_ATTRIBUTE;
- }
-
- /* pass physical address of command buffer */
- cmd_buf.iovec_buf = virt_to_phys((void *)ipa_tz_unlock_vec);
- cmd_buf.size_bytes = size;
-
- /* flush cache to DDR */
- __cpuc_flush_dcache_area((void *)ipa_tz_unlock_vec, size);
- outer_flush_range(cmd_buf.iovec_buf, cmd_buf.iovec_buf + size);
-
- ret = scm_call(SCM_SVC_MP, TZ_MEM_PROTECT_REGION_ID, &cmd_buf,
- sizeof(cmd_buf), &resp, sizeof(resp));
- if (ret) {
- IPAERR("scm call SCM_SVC_MP failed: %d\n", ret);
- kfree(ipa_tz_unlock_vec);
- return -EFAULT;
- }
- kfree(ipa_tz_unlock_vec);
+ if (reg_info == NULL || num_regs == 0) {
+ IPAERR("Bad parameters\n");
+ return -EFAULT;
}
+
+ size = num_regs * sizeof(struct tz_smmu_ipa_protect_region_iovec_s);
+ ipa_tz_unlock_vec = kzalloc(PAGE_ALIGN(size), GFP_KERNEL);
+ if (ipa_tz_unlock_vec == NULL)
+ return -ENOMEM;
+
+ for (i = 0; i < num_regs; i++) {
+ ipa_tz_unlock_vec[i].input_addr = reg_info[i].reg_addr ^
+ (reg_info[i].reg_addr & 0xFFF);
+ ipa_tz_unlock_vec[i].output_addr = reg_info[i].reg_addr ^
+ (reg_info[i].reg_addr & 0xFFF);
+ ipa_tz_unlock_vec[i].size = reg_info[i].size;
+ ipa_tz_unlock_vec[i].attr = IPA_TZ_UNLOCK_ATTRIBUTE;
+ }
+
+ /* pass physical address of command buffer */
+ cmd_buf.iovec_buf = virt_to_phys((void *)ipa_tz_unlock_vec);
+ cmd_buf.size_bytes = size;
+
+ /* flush cache to DDR */
+ __cpuc_flush_dcache_area((void *)ipa_tz_unlock_vec, size);
+ outer_flush_range(cmd_buf.iovec_buf, cmd_buf.iovec_buf + size);
+ if (!is_scm_armv8())
+ ret = scm_call(SCM_SVC_MP, TZ_MEM_PROTECT_REGION_ID,
+ &cmd_buf, sizeof(cmd_buf), &resp, sizeof(resp));
+ else {
+ desc.args[0] = virt_to_phys((void *)ipa_tz_unlock_vec);
+ desc.args[1] = size;
+ desc.arginfo = SCM_ARGS(2, SCM_RO, SCM_VAL);
+ ret = scm_call2(SCM_SIP_FNID(SCM_SVC_MP,
+ TZ_MEM_PROTECT_REGION_ID), &desc);
+ }
+
+ if (ret) {
+ IPAERR("scm call SCM_SVC_MP failed: %d\n", ret);
+ kfree(ipa_tz_unlock_vec);
+ return -EFAULT;
+ }
+ kfree(ipa_tz_unlock_vec);
+
return 0;
}
@@ -4667,6 +4860,8 @@ static int ipa3_pre_init(const struct ipa3_plat_drv_res *resource_p,
ipa3_ctx->gsi_ch20_wa = resource_p->gsi_ch20_wa;
ipa3_ctx->use_ipa_pm = resource_p->use_ipa_pm;
ipa3_ctx->ipa3_active_clients_logging.log_rdy = false;
+ ipa3_ctx->mhi_evid_limits[0] = resource_p->mhi_evid_limits[0];
+ ipa3_ctx->mhi_evid_limits[1] = resource_p->mhi_evid_limits[1];
if (resource_p->ipa_tz_unlock_reg) {
ipa3_ctx->ipa_tz_unlock_reg_num =
resource_p->ipa_tz_unlock_reg_num;
@@ -4687,7 +4882,10 @@ static int ipa3_pre_init(const struct ipa3_plat_drv_res *resource_p,
}
/* unlock registers for uc */
- ipa3_tz_unlock_reg(ipa3_ctx);
+ result = ipa3_tz_unlock_reg(ipa3_ctx->ipa_tz_unlock_reg,
+ ipa3_ctx->ipa_tz_unlock_reg_num);
+ if (result)
+ IPAERR("Failed to unlock memory region using TZ\n");
/* default aggregation parameters */
ipa3_ctx->aggregation_type = IPA_MBIM_16;
@@ -4939,9 +5137,9 @@ static int ipa3_pre_init(const struct ipa3_plat_drv_res *resource_p,
mutex_init(&ipa3_ctx->msg_lock);
mutex_init(&ipa3_ctx->lock);
- mutex_init(&ipa3_ctx->nat_mem.lock);
mutex_init(&ipa3_ctx->q6_proxy_clk_vote_mutex);
mutex_init(&ipa3_ctx->ipa_cne_evt_lock);
+ ipa3_ctx->q6_proxy_clk_vote_cnt = 0;
idr_init(&ipa3_ctx->ipa_idr);
spin_lock_init(&ipa3_ctx->idr_lock);
@@ -4969,10 +5167,10 @@ static int ipa3_pre_init(const struct ipa3_plat_drv_res *resource_p,
goto fail_device_create;
}
- if (ipa3_create_nat_device()) {
- IPAERR("unable to create nat device\n");
+ if (ipa3_nat_ipv6ct_init_devices()) {
+ IPAERR("unable to init NAT and IPv6CT devices\n");
result = -ENODEV;
- goto fail_nat_dev_add;
+ goto fail_nat_ipv6ct_init_dev;
}
/* Create a wakeup source. */
@@ -5071,7 +5269,8 @@ static int ipa3_pre_init(const struct ipa3_plat_drv_res *resource_p,
if (!ipa3_ctx->use_ipa_pm)
ipa_rm_exit();
fail_ipa_rm_init:
-fail_nat_dev_add:
+ ipa3_nat_ipv6ct_destroy_devices();
+fail_nat_ipv6ct_init_dev:
device_destroy(ipa3_ctx->class, ipa3_ctx->dev_num);
fail_device_create:
unregister_chrdev_region(ipa3_ctx->dev_num, 1);
@@ -5240,6 +5439,7 @@ static int get_ipa_dts_configuration(struct platform_device *pdev,
struct resource *resource;
u32 *ipa_tz_unlock_reg;
int elem_num;
+ u32 mhi_evid_limits[2];
/* initialize ipa3_res */
ipa_drv_res->ipa_pipe_mem_start_ofst = IPA_PIPE_MEM_START_OFST;
@@ -5256,6 +5456,8 @@ static int get_ipa_dts_configuration(struct platform_device *pdev,
ipa_drv_res->gsi_ch20_wa = false;
ipa_drv_res->ipa_tz_unlock_reg_num = 0;
ipa_drv_res->ipa_tz_unlock_reg = NULL;
+ ipa_drv_res->mhi_evid_limits[0] = IPA_MHI_GSI_EVENT_RING_ID_START;
+ ipa_drv_res->mhi_evid_limits[1] = IPA_MHI_GSI_EVENT_RING_ID_END;
/* Get IPA HW Version */
result = of_property_read_u32(pdev->dev.of_node, "qcom,ipa-hw-ver",
@@ -5432,6 +5634,34 @@ static int get_ipa_dts_configuration(struct platform_device *pdev,
? "Needed" : "Not needed");
elem_num = of_property_count_elems_of_size(pdev->dev.of_node,
+ "qcom,mhi-event-ring-id-limits", sizeof(u32));
+
+ if (elem_num == 2) {
+ if (of_property_read_u32_array(pdev->dev.of_node,
+ "qcom,mhi-event-ring-id-limits", mhi_evid_limits, 2)) {
+ IPAERR("failed to read mhi event ring id limits\n");
+ return -EFAULT;
+ }
+ if (mhi_evid_limits[0] > mhi_evid_limits[1]) {
+ IPAERR("mhi event ring id low limit > high limit\n");
+ return -EFAULT;
+ }
+ ipa_drv_res->mhi_evid_limits[0] = mhi_evid_limits[0];
+ ipa_drv_res->mhi_evid_limits[1] = mhi_evid_limits[1];
+ IPADBG(": mhi-event-ring-id-limits start=%u end=%u\n",
+ mhi_evid_limits[0], mhi_evid_limits[1]);
+ } else {
+ if (elem_num > 0) {
+ IPAERR("Invalid mhi event ring id limits number %d\n",
+ elem_num);
+ return -EINVAL;
+ }
+ IPADBG("use default mhi evt ring id limits start=%u end=%u\n",
+ ipa_drv_res->mhi_evid_limits[0],
+ ipa_drv_res->mhi_evid_limits[1]);
+ }
+
+ elem_num = of_property_count_elems_of_size(pdev->dev.of_node,
"qcom,ipa-tz-unlock-reg", sizeof(u32));
if (elem_num > 0 && elem_num % 2 == 0) {
@@ -5465,7 +5695,7 @@ static int get_ipa_dts_configuration(struct platform_device *pdev,
ipa_tz_unlock_reg[pos++];
ipa_drv_res->ipa_tz_unlock_reg[i].size =
ipa_tz_unlock_reg[pos++];
- IPADBG("tz unlock reg %d: addr 0x%pa size %d\n", i,
+ IPADBG("tz unlock reg %d: addr 0x%pa size %llu\n", i,
&ipa_drv_res->ipa_tz_unlock_reg[i].reg_addr,
ipa_drv_res->ipa_tz_unlock_reg[i].size);
}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_client.c b/drivers/platform/msm/ipa/ipa_v3/ipa_client.c
index a859ff7..59fe07f 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_client.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_client.c
@@ -995,6 +995,7 @@ static int ipa3_xdci_stop_gsi_ch_brute_force(u32 clnt_hdl,
msecs_to_jiffies(IPA_CHANNEL_STOP_IN_PROC_TO_MSEC);
int res;
+ IPADBG("entry\n");
if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
ipa3_ctx->ep[clnt_hdl].valid == 0 ||
!stop_in_proc) {
@@ -1041,6 +1042,7 @@ static int ipa3_stop_ul_chan_with_data_drain(u32 qmi_req_id,
bool stop_in_proc;
struct ipa3_ep_context *ep;
struct ipa_ep_cfg_ctrl ep_cfg_ctrl;
+ IPADBG("entry\n");
if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
ipa3_ctx->ep[clnt_hdl].valid == 0) {
@@ -1157,6 +1159,7 @@ static int ipa3_stop_ul_chan_with_data_drain(u32 qmi_req_id,
ep->ep_delay_set = false;
}
}
+ IPADBG("exit\n");
return result;
}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c b/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
index f72f41c..5da83e5 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
@@ -17,7 +17,9 @@
#include <linux/stringify.h>
#include "ipa_i.h"
#include "../ipa_rm_i.h"
+#include "ipahal/ipahal_nat.h"
+#define IPA_MAX_ENTRY_STRING_LEN 500
#define IPA_MAX_MSG_LEN 4096
#define IPA_DBG_MAX_RULE_IN_TBL 128
#define IPA_DBG_ACTIVE_CLIENT_BUF_SIZE ((IPA3_ACTIVE_CLIENTS_LOG_LINE_LEN \
@@ -26,17 +28,18 @@
#define IPA_DUMP_STATUS_FIELD(f) \
pr_err(#f "=0x%x\n", status->f)
-const char *ipa3_excp_name[] = {
- __stringify_1(IPA_A5_MUX_HDR_EXCP_RSVD0),
- __stringify_1(IPA_A5_MUX_HDR_EXCP_RSVD1),
- __stringify_1(IPA_A5_MUX_HDR_EXCP_FLAG_IHL),
- __stringify_1(IPA_A5_MUX_HDR_EXCP_FLAG_REPLICATED),
- __stringify_1(IPA_A5_MUX_HDR_EXCP_FLAG_TAG),
- __stringify_1(IPA_A5_MUX_HDR_EXCP_FLAG_SW_FLT),
- __stringify_1(IPA_A5_MUX_HDR_EXCP_FLAG_NAT),
- __stringify_1(IPA_A5_MUX_HDR_EXCP_FLAG_IP),
+#define IPA_READ_ONLY_MODE 0444
+#define IPA_READ_WRITE_MODE 0664
+#define IPA_WRITE_ONLY_MODE 0220
+
+struct ipa3_debugfs_file {
+ const char *name;
+ umode_t mode;
+ void *data;
+ const struct file_operations fops;
};
+
const char *ipa3_event_name[] = {
__stringify(WLAN_CLIENT_CONNECT),
__stringify(WLAN_CLIENT_DISCONNECT),
@@ -67,7 +70,9 @@ const char *ipa3_event_name[] = {
__stringify(ADD_VLAN_IFACE),
__stringify(DEL_VLAN_IFACE),
__stringify(ADD_L2TP_VLAN_MAPPING),
- __stringify(DEL_L2TP_VLAN_MAPPING)
+ __stringify(DEL_L2TP_VLAN_MAPPING),
+ __stringify(IPA_PER_CLIENT_STATS_CONNECT_EVENT),
+ __stringify(IPA_PER_CLIENT_STATS_DISCONNECT_EVENT),
};
const char *ipa3_hdr_l2_type_name[] = {
@@ -87,30 +92,6 @@ const char *ipa3_hdr_proc_type_name[] = {
};
static struct dentry *dent;
-static struct dentry *dfile_gen_reg;
-static struct dentry *dfile_ep_reg;
-static struct dentry *dfile_keep_awake;
-static struct dentry *dfile_ep_holb;
-static struct dentry *dfile_hdr;
-static struct dentry *dfile_proc_ctx;
-static struct dentry *dfile_ip4_rt;
-static struct dentry *dfile_ip4_rt_hw;
-static struct dentry *dfile_ip6_rt;
-static struct dentry *dfile_ip6_rt_hw;
-static struct dentry *dfile_ip4_flt;
-static struct dentry *dfile_ip4_flt_hw;
-static struct dentry *dfile_ip6_flt;
-static struct dentry *dfile_ip6_flt_hw;
-static struct dentry *dfile_stats;
-static struct dentry *dfile_wstats;
-static struct dentry *dfile_wdi_stats;
-static struct dentry *dfile_ntn_stats;
-static struct dentry *dfile_dbg_cnt;
-static struct dentry *dfile_msg;
-static struct dentry *dfile_ip4_nat;
-static struct dentry *dfile_rm_stats;
-static struct dentry *dfile_status_stats;
-static struct dentry *dfile_active_clients;
static char dbg_buff[IPA_MAX_MSG_LEN];
static char *active_clients_buf;
@@ -1516,250 +1497,367 @@ static ssize_t ipa3_read_msg(struct file *file, char __user *ubuf,
return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, cnt);
}
+static int ipa3_read_table(
+ char *table_addr, u32 table_size,
+ char *buff, u32 buff_size,
+ u32 *total_num_entries,
+ u32 *rule_id,
+ enum ipahal_nat_type nat_type)
+{
+ int result;
+ char *entry;
+ size_t entry_size;
+ bool entry_zeroed;
+ u32 i, num_entries = 0, id = *rule_id, pos = 0;
+
+ IPADBG("\n");
+
+ if (table_addr == NULL)
+ return 0;
+
+ result = ipahal_nat_entry_size(nat_type, &entry_size);
+ if (result) {
+ IPAERR("Failed to retrieve size of %s entry\n",
+ ipahal_nat_type_str(nat_type));
+ return 0;
+ }
+
+ for (i = 0, entry = table_addr;
+ i < table_size;
+ ++i, ++id, entry += entry_size) {
+ result = ipahal_nat_is_entry_zeroed(nat_type, entry,
+ &entry_zeroed);
+ if (result) {
+ IPAERR(
+ "Failed to determine whether the %s entry is definitely zero",
+ ipahal_nat_type_str(nat_type));
+ goto bail;
+ }
+ if (entry_zeroed)
+ continue;
+
+ pos += scnprintf(buff + pos, buff_size - pos,
+ "\tEntry_Index=%d\n", id);
+
+ pos += ipahal_nat_stringify_entry(nat_type, entry,
+ buff + pos, buff_size - pos);
+
+ ++num_entries;
+ }
+
+ if (num_entries)
+ pos += scnprintf(buff + pos, buff_size - pos, "\n");
+ else
+ pos += scnprintf(buff + pos, buff_size - pos, "\tEmpty\n\n");
+
+ IPADBG("return\n");
+bail:
+ *rule_id = id;
+ *total_num_entries += num_entries;
+ return pos;
+}
+
+static int ipa3_start_read_memory_device(
+ struct ipa3_nat_ipv6ct_common_mem *dev,
+ char *buff, u32 buff_size,
+ enum ipahal_nat_type nat_type,
+ u32 *num_entries)
+{
+ u32 rule_id = 0, pos = 0;
+
+ IPADBG("\n");
+
+ pos += scnprintf(buff + pos, buff_size - pos, "%s_Table_Size=%d\n",
+ dev->name, dev->table_entries + 1);
+
+ pos += scnprintf(buff + pos, buff_size - pos,
+ "%s_Expansion_Table_Size=%d\n",
+ dev->name, dev->expn_table_entries);
+
+ if (!dev->is_sys_mem)
+ pos += scnprintf(buff + pos, buff_size - pos,
+ "Not supported for local(shared) memory\n");
+
+ pos += scnprintf(buff + pos, buff_size - pos,
+ "\n%s Base Table:\n", dev->name);
+ pos += ipa3_read_table(dev->base_table_addr, dev->table_entries + 1,
+ buff + pos, buff_size - pos, num_entries, &rule_id, nat_type);
+
+ pos += scnprintf(buff + pos, buff_size - pos,
+ "%s Expansion Table:\n", dev->name);
+ pos += ipa3_read_table(
+ dev->expansion_table_addr, dev->expn_table_entries,
+ buff + pos, buff_size - pos,
+ num_entries,
+ &rule_id,
+ nat_type);
+
+ IPADBG("return\n");
+ return pos;
+}
+
+static int ipa3_finish_read_memory_device(
+ struct ipa3_nat_ipv6ct_common_mem *dev,
+ char *buff, u32 buff_size,
+ u32 curr_pos,
+ u32 num_entries)
+{
+ u32 pos = 0;
+
+ IPADBG("\n");
+
+ /*
+ * A real buffer and buff size, so need to use the
+ * real current position
+ */
+ pos += scnprintf(buff + curr_pos, buff_size - curr_pos,
+ "Overall number %s entries: %d\n\n", dev->name, num_entries);
+
+ if (curr_pos + pos >= buff_size - 1)
+ IPAERR(
+ "The %s debug information is larger than the internal buffer, so the read information might be incomplete",
+ dev->name);
+
+ IPADBG("return\n");
+ return pos;
+}
+
+static int ipa3_read_pdn_table(char *buff, u32 buff_size)
+{
+ int i, result;
+ char *pdn_entry;
+ size_t pdn_entry_size;
+ bool entry_zeroed;
+ u32 pos = 0;
+
+ IPADBG("\n");
+
+ result = ipahal_nat_entry_size(IPAHAL_NAT_IPV4_PDN, &pdn_entry_size);
+ if (result) {
+ IPAERR("Failed to retrieve size of PDN entry");
+ return 0;
+ }
+
+ for (i = 0, pdn_entry = ipa3_ctx->nat_mem.pdn_mem.base;
+ i < IPA_MAX_PDN_NUM;
+ ++i, pdn_entry += pdn_entry_size) {
+ result = ipahal_nat_is_entry_zeroed(IPAHAL_NAT_IPV4_PDN,
+ pdn_entry, &entry_zeroed);
+ if (result) {
+ IPAERR(
+ "Failed to determine whether the PDN entry is definitely zero");
+ goto bail;
+ }
+ if (entry_zeroed)
+ continue;
+
+ pos += scnprintf(buff + pos, buff_size - pos, "PDN %d: ", i);
+
+ pos += ipahal_nat_stringify_entry(IPAHAL_NAT_IPV4_PDN,
+ pdn_entry, buff + pos, buff_size - pos);
+ }
+ pos += scnprintf(buff + pos, buff_size - pos, "\n");
+
+ IPADBG("return\n");
+bail:
+ return pos;
+}
+
static ssize_t ipa3_read_nat4(struct file *file,
char __user *ubuf, size_t count,
- loff_t *ppos) {
+ loff_t *ppos)
+{
+ ssize_t ret;
+ char *buff;
+ u32 rule_id = 0, pos = 0, num_entries = 0, index_num_entries = 0;
+ const u32 buff_size = IPA_MAX_MSG_LEN + 2 * IPA_MAX_ENTRY_STRING_LEN * (
+ ipa3_ctx->nat_mem.dev.table_entries + 1 +
+ ipa3_ctx->nat_mem.dev.expn_table_entries);
-#define ENTRY_U32_FIELDS 8
-#define NAT_ENTRY_ENABLE 0x8000
-#define NAT_ENTRY_RST_FIN_BIT 0x4000
-#define BASE_TABLE 0
-#define EXPANSION_TABLE 1
+ IPADBG("\n");
- u32 *base_tbl, *indx_tbl;
- u32 tbl_size, *tmp;
- u32 value, i, j, rule_id;
- u16 enable, tbl_entry, flag;
- u32 no_entrys = 0;
- struct ipa_pdn_entry *pdn_table = ipa3_ctx->nat_mem.pdn_mem.base;
+ buff = kzalloc(buff_size, GFP_KERNEL);
+ if (buff == NULL)
+ return 0;
- mutex_lock(&ipa3_ctx->nat_mem.lock);
- value = ipa3_ctx->nat_mem.public_ip_addr;
- pr_err(
- "Table IP Address:%d.%d.%d.%d\n",
- ((value & 0xFF000000) >> 24),
- ((value & 0x00FF0000) >> 16),
- ((value & 0x0000FF00) >> 8),
- ((value & 0x000000FF)));
-
- if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0)
- for (i = 0; i < IPA_MAX_PDN_NUM; i++) {
- pr_err(
- "PDN %d: ip 0x%X, src_metadata 0x%X, dst_metadata 0x%X\n",
- i, pdn_table[i].public_ip,
- pdn_table[i].src_metadata,
- pdn_table[i].dst_metadata);
- }
-
- pr_err("Table Size:%d\n",
- ipa3_ctx->nat_mem.size_base_tables);
-
- pr_err("Expansion Table Size:%d\n",
- ipa3_ctx->nat_mem.size_expansion_tables-1);
-
- if (!ipa3_ctx->nat_mem.is_sys_mem)
- pr_err("Not supported for local(shared) memory\n");
-
- /* Print Base tables */
- rule_id = 0;
- for (j = 0; j < 2; j++) {
- if (j == BASE_TABLE) {
- tbl_size = ipa3_ctx->nat_mem.size_base_tables;
- base_tbl = (u32 *)ipa3_ctx->nat_mem.ipv4_rules_addr;
-
- pr_err("\nBase Table:\n");
- } else {
- tbl_size = ipa3_ctx->nat_mem.size_expansion_tables-1;
- base_tbl =
- (u32 *)ipa3_ctx->nat_mem.ipv4_expansion_rules_addr;
-
- pr_err("\nExpansion Base Table:\n");
- }
-
- if (base_tbl != NULL) {
- for (i = 0; i <= tbl_size; i++, rule_id++) {
- tmp = base_tbl;
- value = tmp[4];
- enable = ((value & 0xFFFF0000) >> 16);
-
- if (enable & NAT_ENTRY_ENABLE) {
- no_entrys++;
- pr_err("Rule:%d ", rule_id);
-
- value = *tmp;
- pr_err(
- "Private_IP:%d.%d.%d.%d ",
- ((value & 0xFF000000) >> 24),
- ((value & 0x00FF0000) >> 16),
- ((value & 0x0000FF00) >> 8),
- ((value & 0x000000FF)));
- tmp++;
-
- value = *tmp;
- pr_err(
- "Target_IP:%d.%d.%d.%d ",
- ((value & 0xFF000000) >> 24),
- ((value & 0x00FF0000) >> 16),
- ((value & 0x0000FF00) >> 8),
- ((value & 0x000000FF)));
- tmp++;
-
- value = *tmp;
- pr_err(
- "Next_Index:%d Public_Port:%d ",
- (value & 0x0000FFFF),
- ((value & 0xFFFF0000) >> 16));
- tmp++;
-
- value = *tmp;
- pr_err(
- "Private_Port:%d Target_Port:%d ",
- (value & 0x0000FFFF),
- ((value & 0xFFFF0000) >> 16));
- tmp++;
-
- value = *tmp;
- flag = ((value & 0xFFFF0000) >> 16);
- if (flag & NAT_ENTRY_RST_FIN_BIT) {
- pr_err(
- "IP_CKSM_delta:0x%x Flags:%s ",
- (value & 0x0000FFFF),
- "Direct_To_A5");
- } else {
- pr_err(
- "IP_CKSM_delta:0x%x Flags:%s ",
- (value & 0x0000FFFF),
- "Fwd_to_route");
- }
- tmp++;
-
- value = *tmp;
- pr_err(
- "Time_stamp:0x%x Proto:%d ",
- (value & 0x00FFFFFF),
- ((value & 0xFF000000) >> 24));
- tmp++;
-
- value = *tmp;
- pr_err(
- "Prev_Index:%d Indx_tbl_entry:%d ",
- (value & 0x0000FFFF),
- ((value & 0xFFFF0000) >> 16));
- tmp++;
-
- value = *tmp;
- pr_err(
- "TCP_UDP_cksum_delta:0x%x\n",
- ((value & 0xFFFF0000) >> 16));
- }
-
- base_tbl += ENTRY_U32_FIELDS;
-
- }
- }
+ if (!ipa3_ctx->nat_mem.dev.is_dev_init) {
+ pos += scnprintf(buff + pos, buff_size - pos,
+ "NAT hasn't been initialized or not supported\n");
+ goto ret;
}
+ mutex_lock(&ipa3_ctx->nat_mem.dev.lock);
+
+ if (!ipa3_ctx->nat_mem.dev.is_hw_init) {
+ pos += scnprintf(buff + pos, buff_size - pos,
+ "NAT H/W hasn't been initialized\n");
+ goto bail;
+ }
+
+ pos += scnprintf(buff + pos, buff_size - pos, "\n");
+
+ if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) {
+ pos += ipa3_read_pdn_table(buff + pos, buff_size - pos);
+ } else {
+ pos += scnprintf(buff + pos, buff_size - pos,
+ "NAT Table IP Address=%pI4h\n\n",
+ &ipa3_ctx->nat_mem.public_ip_addr);
+ }
+
+ pos += ipa3_start_read_memory_device(&ipa3_ctx->nat_mem.dev,
+ buff + pos, buff_size - pos, IPAHAL_NAT_IPV4, &num_entries);
+
/* Print Index tables */
- rule_id = 0;
- for (j = 0; j < 2; j++) {
- if (j == BASE_TABLE) {
- tbl_size = ipa3_ctx->nat_mem.size_base_tables;
- indx_tbl = (u32 *)ipa3_ctx->nat_mem.index_table_addr;
+ pos += scnprintf(buff + pos, buff_size - pos,
+ "ipaNatTable Index Table:\n");
+ pos += ipa3_read_table(
+ ipa3_ctx->nat_mem.index_table_addr,
+ ipa3_ctx->nat_mem.dev.table_entries + 1,
+ buff + pos, buff_size - pos,
+ &index_num_entries,
+ &rule_id,
+ IPAHAL_NAT_IPV4_INDEX);
- pr_err("\nIndex Table:\n");
- } else {
- tbl_size = ipa3_ctx->nat_mem.size_expansion_tables-1;
- indx_tbl =
- (u32 *)ipa3_ctx->nat_mem.index_table_expansion_addr;
+ pos += scnprintf(buff + pos, buff_size - pos,
+ "ipaNatTable Expansion Index Table:\n");
+ pos += ipa3_read_table(
+ ipa3_ctx->nat_mem.index_table_expansion_addr,
+ ipa3_ctx->nat_mem.dev.expn_table_entries,
+ buff + pos, buff_size - pos,
+ &index_num_entries,
+ &rule_id,
+ IPAHAL_NAT_IPV4_INDEX);
- pr_err("\nExpansion Index Table:\n");
- }
+ if (num_entries != index_num_entries)
+ IPAERR(
+ "The NAT table number of entries %d is different from index table number of entries %d\n",
+ num_entries, index_num_entries);
- if (indx_tbl != NULL) {
- for (i = 0; i <= tbl_size; i++, rule_id++) {
- tmp = indx_tbl;
- value = *tmp;
- tbl_entry = (value & 0x0000FFFF);
+ pos += ipa3_finish_read_memory_device(&ipa3_ctx->nat_mem.dev,
+ buff, buff_size, pos, num_entries);
- if (tbl_entry) {
- pr_err("Rule:%d ", rule_id);
+ IPADBG("return\n");
+bail:
+ mutex_unlock(&ipa3_ctx->nat_mem.dev.lock);
+ret:
+ ret = simple_read_from_buffer(ubuf, count, ppos, buff, pos);
+ kfree(buff);
+ return ret;
+}
- value = *tmp;
- pr_err(
- "Table_Entry:%d Next_Index:%d\n",
- tbl_entry,
- ((value & 0xFFFF0000) >> 16));
- }
+static ssize_t ipa3_read_ipv6ct(struct file *file,
+ char __user *ubuf, size_t count,
+ loff_t *ppos) {
+ ssize_t ret;
+ char *buff;
+ u32 pos = 0, num_entries = 0;
+ const u32 buff_size = IPA_MAX_MSG_LEN + IPA_MAX_ENTRY_STRING_LEN * (
+ ipa3_ctx->nat_mem.dev.table_entries + 1 +
+ ipa3_ctx->nat_mem.dev.expn_table_entries);
- indx_tbl++;
- }
- }
+ IPADBG("\n");
+
+ buff = kzalloc(buff_size, GFP_KERNEL);
+ if (buff == NULL)
+ return 0;
+
+ pos += scnprintf(buff + pos, buff_size - pos, "\n");
+
+ if (!ipa3_ctx->ipv6ct_mem.dev.is_dev_init) {
+ pos += scnprintf(buff + pos, buff_size - pos,
+ "IPv6 connection tracking hasn't been initialized or not supported\n");
+ goto ret;
}
- pr_err("Current No. Nat Entries: %d\n", no_entrys);
- mutex_unlock(&ipa3_ctx->nat_mem.lock);
- return 0;
+ mutex_lock(&ipa3_ctx->ipv6ct_mem.dev.lock);
+
+ if (!ipa3_ctx->ipv6ct_mem.dev.is_hw_init) {
+ pos += scnprintf(buff + pos, buff_size - pos,
+ "IPv6 connection tracking H/W hasn't been initialized\n");
+ goto bail;
+ }
+
+ pos += ipa3_start_read_memory_device(&ipa3_ctx->ipv6ct_mem.dev,
+ buff + pos, buff_size - pos, IPAHAL_NAT_IPV6CT, &num_entries);
+ pos += ipa3_finish_read_memory_device(&ipa3_ctx->ipv6ct_mem.dev,
+ buff, buff_size, pos, num_entries);
+
+ IPADBG("return\n");
+bail:
+ mutex_unlock(&ipa3_ctx->ipv6ct_mem.dev.lock);
+ret:
+ ret = simple_read_from_buffer(ubuf, count, ppos, buff, pos);
+ kfree(buff);
+ return ret;
}
static ssize_t ipa3_rm_read_stats(struct file *file, char __user *ubuf,
size_t count, loff_t *ppos)
{
- int result, nbytes, cnt = 0;
+ int result, cnt = 0;
/* deprecate if IPA PM is used */
- if (ipa3_ctx->use_ipa_pm)
- return 0;
+ if (ipa3_ctx->use_ipa_pm) {
+ cnt += scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
+ "IPA RM is disabled\n");
+ goto ret;
+ }
result = ipa_rm_stat(dbg_buff, IPA_MAX_MSG_LEN);
if (result < 0) {
- nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
+ cnt += scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
"Error in printing RM stat %d\n", result);
- cnt += nbytes;
- } else
- cnt += result;
-
+ goto ret;
+ }
+ cnt += result;
+ret:
return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, cnt);
}
static ssize_t ipa3_pm_read_stats(struct file *file, char __user *ubuf,
size_t count, loff_t *ppos)
{
- int result, nbytes, cnt = 0;
+ int result, cnt = 0;
- if (!ipa3_ctx->use_ipa_pm)
- return 0;
+ if (!ipa3_ctx->use_ipa_pm) {
+ cnt += scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
+ "IPA PM is disabled\n");
+ goto ret;
+ }
result = ipa_pm_stat(dbg_buff, IPA_MAX_MSG_LEN);
if (result < 0) {
- nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
+ cnt += scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
"Error in printing PM stat %d\n", result);
- cnt += nbytes;
- } else
- cnt += result;
-
+ goto ret;
+ }
+ cnt += result;
+ret:
return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, cnt);
}
static ssize_t ipa3_pm_ex_read_stats(struct file *file, char __user *ubuf,
size_t count, loff_t *ppos)
{
- int result, nbytes, cnt = 0;
+ int result, cnt = 0;
- if (!ipa3_ctx->use_ipa_pm)
- return 0;
+ if (!ipa3_ctx->use_ipa_pm) {
+ cnt += scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
+ "IPA PM is disabled\n");
+ goto ret;
+ }
result = ipa_pm_exceptions_stat(dbg_buff, IPA_MAX_MSG_LEN);
if (result < 0) {
- nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
+ cnt += scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
"Error in printing PM stat %d\n", result);
- cnt += nbytes;
- } else
- cnt += result;
-
+ goto ret;
+ }
+ cnt += result;
+ret:
return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, cnt);
}
-
static void ipa_dump_status(struct ipahal_pkt_status *status)
{
IPA_DUMP_STATUS_FIELD(status_opcode);
@@ -1901,113 +1999,139 @@ static ssize_t ipa3_enable_ipc_low(struct file *file,
return count;
}
-const struct file_operations ipa3_gen_reg_ops = {
- .read = ipa3_read_gen_reg,
-};
-
-const struct file_operations ipa3_ep_reg_ops = {
- .read = ipa3_read_ep_reg,
- .write = ipa3_write_ep_reg,
-};
-
-const struct file_operations ipa3_keep_awake_ops = {
- .read = ipa3_read_keep_awake,
- .write = ipa3_write_keep_awake,
-};
-
-const struct file_operations ipa3_ep_holb_ops = {
- .write = ipa3_write_ep_holb,
-};
-
-const struct file_operations ipa3_hdr_ops = {
- .read = ipa3_read_hdr,
-};
-
-const struct file_operations ipa3_rt_ops = {
- .read = ipa3_read_rt,
- .open = ipa3_open_dbg,
-};
-
-const struct file_operations ipa3_rt_hw_ops = {
- .read = ipa3_read_rt_hw,
- .open = ipa3_open_dbg,
-};
-
-const struct file_operations ipa3_proc_ctx_ops = {
- .read = ipa3_read_proc_ctx,
-};
-
-const struct file_operations ipa3_flt_ops = {
- .read = ipa3_read_flt,
- .open = ipa3_open_dbg,
-};
-
-const struct file_operations ipa3_flt_hw_ops = {
- .read = ipa3_read_flt_hw,
- .open = ipa3_open_dbg,
-};
-
-const struct file_operations ipa3_stats_ops = {
- .read = ipa3_read_stats,
-};
-
-const struct file_operations ipa3_wstats_ops = {
- .read = ipa3_read_wstats,
-};
-
-const struct file_operations ipa3_wdi_ops = {
- .read = ipa3_read_wdi,
-};
-
-const struct file_operations ipa3_ntn_ops = {
- .read = ipa3_read_ntn,
-};
-
-const struct file_operations ipa3_msg_ops = {
- .read = ipa3_read_msg,
-};
-
-const struct file_operations ipa3_dbg_cnt_ops = {
- .read = ipa3_read_dbg_cnt,
- .write = ipa3_write_dbg_cnt,
-};
-
-const struct file_operations ipa3_status_stats_ops = {
- .read = ipa_status_stats_read,
-};
-
-const struct file_operations ipa3_nat4_ops = {
- .read = ipa3_read_nat4,
-};
-
-const struct file_operations ipa3_rm_stats = {
- .read = ipa3_rm_read_stats,
-};
-
-static const struct file_operations ipa3_pm_stats = {
- .read = ipa3_pm_read_stats,
-};
-
-
-static const struct file_operations ipa3_pm_ex_stats = {
- .read = ipa3_pm_ex_read_stats,
-};
-
-const struct file_operations ipa3_active_clients = {
- .read = ipa3_print_active_clients_log,
- .write = ipa3_clear_active_clients_log,
-};
-
-const struct file_operations ipa3_ipc_low_ops = {
- .write = ipa3_enable_ipc_low,
+static const struct ipa3_debugfs_file debugfs_files[] = {
+ {
+ "gen_reg", IPA_READ_ONLY_MODE, NULL, {
+ .read = ipa3_read_gen_reg
+ }
+ }, {
+ "active_clients", IPA_READ_WRITE_MODE, NULL, {
+ .read = ipa3_print_active_clients_log,
+ .write = ipa3_clear_active_clients_log
+ }
+ }, {
+ "ep_reg", IPA_READ_WRITE_MODE, NULL, {
+ .read = ipa3_read_ep_reg,
+ .write = ipa3_write_ep_reg,
+ }
+ }, {
+ "keep_awake", IPA_READ_WRITE_MODE, NULL, {
+ .read = ipa3_read_keep_awake,
+ .write = ipa3_write_keep_awake,
+ }
+ }, {
+ "holb", IPA_WRITE_ONLY_MODE, NULL, {
+ .write = ipa3_write_ep_holb,
+ }
+ }, {
+ "hdr", IPA_READ_ONLY_MODE, NULL, {
+ .read = ipa3_read_hdr,
+ }
+ }, {
+ "proc_ctx", IPA_READ_ONLY_MODE, NULL, {
+ .read = ipa3_read_proc_ctx,
+ }
+ }, {
+ "ip4_rt", IPA_READ_ONLY_MODE, (void *)IPA_IP_v4, {
+ .read = ipa3_read_rt,
+ .open = ipa3_open_dbg,
+ }
+ }, {
+ "ip4_rt_hw", IPA_READ_ONLY_MODE, (void *)IPA_IP_v4, {
+ .read = ipa3_read_rt_hw,
+ .open = ipa3_open_dbg,
+ }
+ }, {
+ "ip6_rt", IPA_READ_ONLY_MODE, (void *)IPA_IP_v6, {
+ .read = ipa3_read_rt,
+ .open = ipa3_open_dbg,
+ }
+ }, {
+ "ip6_rt_hw", IPA_READ_ONLY_MODE, (void *)IPA_IP_v6, {
+ .read = ipa3_read_rt_hw,
+ .open = ipa3_open_dbg,
+ }
+ }, {
+ "ip4_flt", IPA_READ_ONLY_MODE, (void *)IPA_IP_v4, {
+ .read = ipa3_read_flt,
+ .open = ipa3_open_dbg,
+ }
+ }, {
+ "ip4_flt_hw", IPA_READ_ONLY_MODE, (void *)IPA_IP_v4, {
+ .read = ipa3_read_flt_hw,
+ .open = ipa3_open_dbg,
+ }
+ }, {
+ "ip6_flt", IPA_READ_ONLY_MODE, (void *)IPA_IP_v6, {
+ .read = ipa3_read_flt,
+ .open = ipa3_open_dbg,
+ }
+ }, {
+ "ip6_flt_hw", IPA_READ_ONLY_MODE, (void *)IPA_IP_v6, {
+ .read = ipa3_read_flt_hw,
+ .open = ipa3_open_dbg,
+ }
+ }, {
+ "stats", IPA_READ_ONLY_MODE, NULL, {
+ .read = ipa3_read_stats,
+ }
+ }, {
+ "wstats", IPA_READ_ONLY_MODE, NULL, {
+ .read = ipa3_read_wstats,
+ }
+ }, {
+ "wdi", IPA_READ_ONLY_MODE, NULL, {
+ .read = ipa3_read_wdi,
+ }
+ }, {
+ "ntn", IPA_READ_ONLY_MODE, NULL, {
+ .read = ipa3_read_ntn,
+ }
+ }, {
+ "dbg_cnt", IPA_READ_WRITE_MODE, NULL, {
+ .read = ipa3_read_dbg_cnt,
+ .write = ipa3_write_dbg_cnt,
+ }
+ }, {
+ "msg", IPA_READ_ONLY_MODE, NULL, {
+ .read = ipa3_read_msg,
+ }
+ }, {
+ "ip4_nat", IPA_READ_ONLY_MODE, NULL, {
+ .read = ipa3_read_nat4,
+ }
+ }, {
+ "ipv6ct", IPA_READ_ONLY_MODE, NULL, {
+ .read = ipa3_read_ipv6ct,
+ }
+ }, {
+ "rm_stats", IPA_READ_ONLY_MODE, NULL, {
+ .read = ipa3_rm_read_stats,
+ }
+ }, {
+ "pm_stats", IPA_READ_ONLY_MODE, NULL, {
+ .read = ipa3_pm_read_stats,
+ }
+ }, {
+ "pm_ex_stats", IPA_READ_ONLY_MODE, NULL, {
+ .read = ipa3_pm_ex_read_stats,
+ }
+ }, {
+ "status_stats", IPA_READ_ONLY_MODE, NULL, {
+ .read = ipa_status_stats_read,
+ }
+ }, {
+ "enable_low_prio_print", IPA_WRITE_ONLY_MODE, NULL, {
+ .write = ipa3_enable_ipc_low,
+ }
+ }
};
void ipa3_debugfs_init(void)
{
- const mode_t read_only_mode = S_IRUSR | S_IRGRP | S_IROTH;
- const mode_t read_write_mode = S_IRUSR | S_IRGRP | S_IROTH |
- S_IWUSR | S_IWGRP;
- const mode_t write_only_mode = S_IWUSR | S_IWGRP;
+ const size_t debugfs_files_num =
+ sizeof(debugfs_files) / sizeof(struct ipa3_debugfs_file);
+ size_t i;
struct dentry *file;
dent = debugfs_create_dir("ipa", 0);
@@ -2016,26 +2140,24 @@ void ipa3_debugfs_init(void)
return;
}
- file = debugfs_create_u32("hw_type", read_only_mode,
- dent, &ipa3_ctx->ipa_hw_type);
+ file = debugfs_create_u32("hw_type", IPA_READ_ONLY_MODE,
+ dent, &ipa3_ctx->ipa_hw_type);
if (!file) {
IPAERR("could not create hw_type file\n");
goto fail;
}
- dfile_gen_reg = debugfs_create_file("gen_reg", read_only_mode, dent, 0,
- &ipa3_gen_reg_ops);
- if (!dfile_gen_reg || IS_ERR(dfile_gen_reg)) {
- IPAERR("fail to create file for debug_fs gen_reg\n");
- goto fail;
- }
+ for (i = 0; i < debugfs_files_num; ++i) {
+ const struct ipa3_debugfs_file *curr = &debugfs_files[i];
- dfile_active_clients = debugfs_create_file("active_clients",
- read_write_mode, dent, 0, &ipa3_active_clients);
- if (!dfile_active_clients || IS_ERR(dfile_active_clients)) {
- IPAERR("fail to create file for debug_fs active_clients\n");
- goto fail;
+ file = debugfs_create_file(curr->name, curr->mode, dent,
+ curr->data, &curr->fops);
+ if (!file || IS_ERR(file)) {
+ IPAERR("fail to create file for debug_fs %s\n",
+ curr->name);
+ goto fail;
+ }
}
active_clients_buf = NULL;
@@ -2044,177 +2166,7 @@ void ipa3_debugfs_init(void)
if (active_clients_buf == NULL)
IPAERR("fail to allocate active clients memory buffer");
- dfile_ep_reg = debugfs_create_file("ep_reg", read_write_mode, dent, 0,
- &ipa3_ep_reg_ops);
- if (!dfile_ep_reg || IS_ERR(dfile_ep_reg)) {
- IPAERR("fail to create file for debug_fs ep_reg\n");
- goto fail;
- }
-
- dfile_keep_awake = debugfs_create_file("keep_awake", read_write_mode,
- dent, 0, &ipa3_keep_awake_ops);
- if (!dfile_keep_awake || IS_ERR(dfile_keep_awake)) {
- IPAERR("fail to create file for debug_fs dfile_keep_awake\n");
- goto fail;
- }
-
- dfile_ep_holb = debugfs_create_file("holb", write_only_mode, dent,
- 0, &ipa3_ep_holb_ops);
- if (!dfile_ep_holb || IS_ERR(dfile_ep_holb)) {
- IPAERR("fail to create file for debug_fs dfile_ep_hol_en\n");
- goto fail;
- }
-
- dfile_hdr = debugfs_create_file("hdr", read_only_mode, dent, 0,
- &ipa3_hdr_ops);
- if (!dfile_hdr || IS_ERR(dfile_hdr)) {
- IPAERR("fail to create file for debug_fs hdr\n");
- goto fail;
- }
-
- dfile_proc_ctx = debugfs_create_file("proc_ctx", read_only_mode, dent,
- 0, &ipa3_proc_ctx_ops);
- if (!dfile_hdr || IS_ERR(dfile_hdr)) {
- IPAERR("fail to create file for debug_fs proc_ctx\n");
- goto fail;
- }
-
- dfile_ip4_rt = debugfs_create_file("ip4_rt", read_only_mode, dent,
- (void *)IPA_IP_v4, &ipa3_rt_ops);
- if (!dfile_ip4_rt || IS_ERR(dfile_ip4_rt)) {
- IPAERR("fail to create file for debug_fs ip4 rt\n");
- goto fail;
- }
-
- dfile_ip4_rt_hw = debugfs_create_file("ip4_rt_hw", read_only_mode, dent,
- (void *)IPA_IP_v4, &ipa3_rt_hw_ops);
- if (!dfile_ip4_rt_hw || IS_ERR(dfile_ip4_rt_hw)) {
- IPAERR("fail to create file for debug_fs ip4 rt hw\n");
- goto fail;
- }
-
- dfile_ip6_rt = debugfs_create_file("ip6_rt", read_only_mode, dent,
- (void *)IPA_IP_v6, &ipa3_rt_ops);
- if (!dfile_ip6_rt || IS_ERR(dfile_ip6_rt)) {
- IPAERR("fail to create file for debug_fs ip6:w rt\n");
- goto fail;
- }
-
- dfile_ip6_rt_hw = debugfs_create_file("ip6_rt_hw", read_only_mode, dent,
- (void *)IPA_IP_v6, &ipa3_rt_hw_ops);
- if (!dfile_ip6_rt_hw || IS_ERR(dfile_ip6_rt_hw)) {
- IPAERR("fail to create file for debug_fs ip6 rt hw\n");
- goto fail;
- }
-
- dfile_ip4_flt = debugfs_create_file("ip4_flt", read_only_mode, dent,
- (void *)IPA_IP_v4, &ipa3_flt_ops);
- if (!dfile_ip4_flt || IS_ERR(dfile_ip4_flt)) {
- IPAERR("fail to create file for debug_fs ip4 flt\n");
- goto fail;
- }
-
- dfile_ip4_flt_hw = debugfs_create_file("ip4_flt_hw", read_only_mode,
- dent, (void *)IPA_IP_v4, &ipa3_flt_hw_ops);
- if (!dfile_ip4_flt_hw || IS_ERR(dfile_ip4_flt_hw)) {
- IPAERR("fail to create file for debug_fs ip4 flt\n");
- goto fail;
- }
-
- dfile_ip6_flt = debugfs_create_file("ip6_flt", read_only_mode, dent,
- (void *)IPA_IP_v6, &ipa3_flt_ops);
- if (!dfile_ip6_flt || IS_ERR(dfile_ip6_flt)) {
- IPAERR("fail to create file for debug_fs ip6 flt\n");
- goto fail;
- }
-
- dfile_ip6_flt_hw = debugfs_create_file("ip6_flt_hw", read_only_mode,
- dent, (void *)IPA_IP_v6, &ipa3_flt_hw_ops);
- if (!dfile_ip6_flt_hw || IS_ERR(dfile_ip6_flt_hw)) {
- IPAERR("fail to create file for debug_fs ip6 flt\n");
- goto fail;
- }
-
- dfile_stats = debugfs_create_file("stats", read_only_mode, dent, 0,
- &ipa3_stats_ops);
- if (!dfile_stats || IS_ERR(dfile_stats)) {
- IPAERR("fail to create file for debug_fs stats\n");
- goto fail;
- }
-
- dfile_wstats = debugfs_create_file("wstats", read_only_mode,
- dent, 0, &ipa3_wstats_ops);
- if (!dfile_wstats || IS_ERR(dfile_wstats)) {
- IPAERR("fail to create file for debug_fs wstats\n");
- goto fail;
- }
-
- dfile_wdi_stats = debugfs_create_file("wdi", read_only_mode, dent, 0,
- &ipa3_wdi_ops);
- if (!dfile_wdi_stats || IS_ERR(dfile_wdi_stats)) {
- IPAERR("fail to create file for debug_fs wdi stats\n");
- goto fail;
- }
-
- dfile_ntn_stats = debugfs_create_file("ntn", read_only_mode, dent, 0,
- &ipa3_ntn_ops);
- if (!dfile_ntn_stats || IS_ERR(dfile_ntn_stats)) {
- IPAERR("fail to create file for debug_fs ntn stats\n");
- goto fail;
- }
-
- dfile_dbg_cnt = debugfs_create_file("dbg_cnt", read_write_mode, dent, 0,
- &ipa3_dbg_cnt_ops);
- if (!dfile_dbg_cnt || IS_ERR(dfile_dbg_cnt)) {
- IPAERR("fail to create file for debug_fs dbg_cnt\n");
- goto fail;
- }
-
- dfile_msg = debugfs_create_file("msg", read_only_mode, dent, 0,
- &ipa3_msg_ops);
- if (!dfile_msg || IS_ERR(dfile_msg)) {
- IPAERR("fail to create file for debug_fs msg\n");
- goto fail;
- }
-
- dfile_ip4_nat = debugfs_create_file("ip4_nat", read_only_mode, dent,
- 0, &ipa3_nat4_ops);
- if (!dfile_ip4_nat || IS_ERR(dfile_ip4_nat)) {
- IPAERR("fail to create file for debug_fs ip4 nat\n");
- goto fail;
- }
-
- if (ipa3_ctx->use_ipa_pm) {
- file = dfile_rm_stats = debugfs_create_file("pm_stats",
- read_only_mode, dent, NULL, &ipa3_pm_stats);
- if (!file || IS_ERR(file)) {
- IPAERR("fail to create file for debug_fs pm_stats\n");
- goto fail;
- }
-
- file = dfile_rm_stats = debugfs_create_file("pm_ex_stats",
- read_only_mode, dent, NULL, &ipa3_pm_ex_stats);
- if (!file || IS_ERR(file)) {
- IPAERR("fail to create file for debugfs pm_ex_stats\n");
- goto fail;
- }
- } else {
- dfile_rm_stats = debugfs_create_file("rm_stats",
- read_only_mode, dent, NULL, &ipa3_rm_stats);
- if (!dfile_rm_stats || IS_ERR(dfile_rm_stats)) {
- IPAERR("fail to create file for debug_fs rm_stats\n");
- goto fail;
- }
- }
-
- dfile_status_stats = debugfs_create_file("status_stats",
- read_only_mode, dent, 0, &ipa3_status_stats_ops);
- if (!dfile_status_stats || IS_ERR(dfile_status_stats)) {
- IPAERR("fail to create file for debug_fs status_stats\n");
- goto fail;
- }
-
- file = debugfs_create_u32("enable_clock_scaling", read_write_mode,
+ file = debugfs_create_u32("enable_clock_scaling", IPA_READ_WRITE_MODE,
dent, &ipa3_ctx->enable_clock_scaling);
if (!file) {
IPAERR("could not create enable_clock_scaling file\n");
@@ -2222,7 +2174,7 @@ void ipa3_debugfs_init(void)
}
file = debugfs_create_u32("clock_scaling_bw_threshold_nominal_mbps",
- read_write_mode, dent,
+ IPA_READ_WRITE_MODE, dent,
&ipa3_ctx->ctrl->clock_scaling_bw_threshold_nominal);
if (!file) {
IPAERR("could not create bw_threshold_nominal_mbps\n");
@@ -2230,20 +2182,13 @@ void ipa3_debugfs_init(void)
}
file = debugfs_create_u32("clock_scaling_bw_threshold_turbo_mbps",
- read_write_mode, dent,
- &ipa3_ctx->ctrl->clock_scaling_bw_threshold_turbo);
+ IPA_READ_WRITE_MODE, dent,
+ &ipa3_ctx->ctrl->clock_scaling_bw_threshold_turbo);
if (!file) {
IPAERR("could not create bw_threshold_turbo_mbps\n");
goto fail;
}
- file = debugfs_create_file("enable_low_prio_print", write_only_mode,
- dent, 0, &ipa3_ipc_low_ops);
- if (!file) {
- IPAERR("could not create enable_low_prio_print file\n");
- goto fail;
- }
-
ipa_debugfs_init_stats(dent);
return;
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c b/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c
index 5d1bbe7..0f3940f 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c
@@ -62,7 +62,7 @@ static int ipa3_generate_flt_hw_rule(enum ipa_ip_type ip,
res = ipahal_flt_generate_hw_rule(&gen_params, &entry->hw_len, buf);
if (res)
- IPAERR("failed to generate flt h/w rule\n");
+ IPAERR_RL("failed to generate flt h/w rule\n");
return 0;
}
@@ -311,7 +311,7 @@ static int ipa_generate_flt_hw_tbl_img(enum ipa_ip_type ip,
}
if (ipahal_fltrt_allocate_hw_tbl_imgs(alloc_params)) {
- IPAERR("fail to allocate FLT HW TBL images. IP %d\n", ip);
+ IPAERR_RL("fail to allocate FLT HW TBL images. IP %d\n", ip);
rc = -ENOMEM;
goto allocate_failed;
}
@@ -319,14 +319,14 @@ static int ipa_generate_flt_hw_tbl_img(enum ipa_ip_type ip,
if (ipa_translate_flt_tbl_to_hw_fmt(ip, IPA_RULE_HASHABLE,
alloc_params->hash_bdy.base, alloc_params->hash_hdr.base,
hash_bdy_start_ofst)) {
- IPAERR("fail to translate hashable flt tbls to hw format\n");
+ IPAERR_RL("fail to translate hashable flt tbls to hw format\n");
rc = -EPERM;
goto translate_fail;
}
if (ipa_translate_flt_tbl_to_hw_fmt(ip, IPA_RULE_NON_HASHABLE,
alloc_params->nhash_bdy.base, alloc_params->nhash_hdr.base,
nhash_bdy_start_ofst)) {
- IPAERR("fail to translate non-hash flt tbls to hw format\n");
+ IPAERR_RL("fail to translate non-hash flt tbls to hw format\n");
rc = -EPERM;
goto translate_fail;
}
@@ -385,19 +385,15 @@ static bool ipa_flt_valid_lcl_tbl_size(enum ipa_ip_type ipt,
* payload pointers buffers for headers and bodies of flt structure
* as well as place for flush imm.
* @ipt: the ip address family type
+ * @entries: the number of entries
* @desc: [OUT] descriptor buffer
* @cmd: [OUT] imm commands payload pointers buffer
*
* Return: 0 on success, negative on failure
*/
-static int ipa_flt_alloc_cmd_buffers(enum ipa_ip_type ip,
+static int ipa_flt_alloc_cmd_buffers(enum ipa_ip_type ip, u16 entries,
struct ipa3_desc **desc, struct ipahal_imm_cmd_pyld ***cmd_pyld)
{
- u16 entries;
-
- /* +3: 2 for bodies (hashable and non-hashable) and 1 for flushing */
- entries = (ipa3_ctx->ep_flt_num) * 2 + 3;
-
*desc = kcalloc(entries, sizeof(**desc), GFP_ATOMIC);
if (*desc == NULL) {
IPAERR("fail to alloc desc blob ip %d\n", ip);
@@ -473,6 +469,7 @@ int __ipa_commit_flt_v3(enum ipa_ip_type ip)
struct ipahal_reg_valmask valmask;
u32 tbl_hdr_width;
struct ipa3_flt_tbl *tbl;
+ u16 entries;
tbl_hdr_width = ipahal_get_hw_tbl_hdr_width();
memset(&alloc_params, 0, sizeof(alloc_params));
@@ -533,7 +530,7 @@ int __ipa_commit_flt_v3(enum ipa_ip_type ip)
}
if (ipa_generate_flt_hw_tbl_img(ip, &alloc_params)) {
- IPAERR("fail to generate FLT HW TBL image. IP %d\n", ip);
+ IPAERR_RL("fail to generate FLT HW TBL image. IP %d\n", ip);
rc = -EFAULT;
goto prep_failed;
}
@@ -549,7 +546,10 @@ int __ipa_commit_flt_v3(enum ipa_ip_type ip)
goto fail_size_valid;
}
- if (ipa_flt_alloc_cmd_buffers(ip, &desc, &cmd_pyld)) {
+ /* +3: 2 for bodies (hashable and non-hashable) and 1 for flushing */
+ entries = (ipa3_ctx->ep_flt_num) * 2 + 3;
+
+ if (ipa_flt_alloc_cmd_buffers(ip, entries, &desc, &cmd_pyld)) {
rc = -ENOMEM;
goto fail_size_valid;
}
@@ -573,11 +573,8 @@ int __ipa_commit_flt_v3(enum ipa_ip_type ip)
rc = -EFAULT;
goto fail_reg_write_construct;
}
- desc[0].opcode = cmd_pyld[0]->opcode;
- desc[0].pyld = cmd_pyld[0]->data;
- desc[0].len = cmd_pyld[0]->len;
- desc[0].type = IPA_IMM_CMD_DESC;
- num_cmd++;
+ ipa3_init_imm_cmd_desc(&desc[num_cmd], cmd_pyld[num_cmd]);
+ ++num_cmd;
hdr_idx = 0;
for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) {
@@ -591,6 +588,13 @@ int __ipa_commit_flt_v3(enum ipa_ip_type ip)
continue;
}
+ if (num_cmd + 1 >= entries) {
+ IPAERR("number of commands is out of range: IP = %d\n",
+ ip);
+ rc = -ENOBUFS;
+ goto fail_imm_cmd_construct;
+ }
+
IPADBG_LOW("Prepare imm cmd for hdr at index %d for pipe %d\n",
hdr_idx, i);
@@ -607,12 +611,11 @@ int __ipa_commit_flt_v3(enum ipa_ip_type ip)
if (!cmd_pyld[num_cmd]) {
IPAERR("fail construct dma_shared_mem cmd: IP = %d\n",
ip);
+ rc = -ENOMEM;
goto fail_imm_cmd_construct;
}
- desc[num_cmd].opcode = cmd_pyld[num_cmd]->opcode;
- desc[num_cmd].pyld = cmd_pyld[num_cmd]->data;
- desc[num_cmd].len = cmd_pyld[num_cmd]->len;
- desc[num_cmd++].type = IPA_IMM_CMD_DESC;
+ ipa3_init_imm_cmd_desc(&desc[num_cmd], cmd_pyld[num_cmd]);
+ ++num_cmd;
mem_cmd.is_read = false;
mem_cmd.skip_pipeline_clear = false;
@@ -627,17 +630,23 @@ int __ipa_commit_flt_v3(enum ipa_ip_type ip)
if (!cmd_pyld[num_cmd]) {
IPAERR("fail construct dma_shared_mem cmd: IP = %d\n",
ip);
+ rc = -ENOMEM;
goto fail_imm_cmd_construct;
}
- desc[num_cmd].opcode = cmd_pyld[num_cmd]->opcode;
- desc[num_cmd].pyld = cmd_pyld[num_cmd]->data;
- desc[num_cmd].len = cmd_pyld[num_cmd]->len;
- desc[num_cmd++].type = IPA_IMM_CMD_DESC;
+ ipa3_init_imm_cmd_desc(&desc[num_cmd], cmd_pyld[num_cmd]);
+ ++num_cmd;
- hdr_idx++;
+ ++hdr_idx;
}
if (lcl_nhash) {
+ if (num_cmd >= entries) {
+ IPAERR("number of commands is out of range: IP = %d\n",
+ ip);
+ rc = -ENOBUFS;
+ goto fail_imm_cmd_construct;
+ }
+
mem_cmd.is_read = false;
mem_cmd.skip_pipeline_clear = false;
mem_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
@@ -649,14 +658,20 @@ int __ipa_commit_flt_v3(enum ipa_ip_type ip)
if (!cmd_pyld[num_cmd]) {
IPAERR("fail construct dma_shared_mem cmd: IP = %d\n",
ip);
+ rc = -ENOMEM;
goto fail_imm_cmd_construct;
}
- desc[num_cmd].opcode = cmd_pyld[num_cmd]->opcode;
- desc[num_cmd].pyld = cmd_pyld[num_cmd]->data;
- desc[num_cmd].len = cmd_pyld[num_cmd]->len;
- desc[num_cmd++].type = IPA_IMM_CMD_DESC;
+ ipa3_init_imm_cmd_desc(&desc[num_cmd], cmd_pyld[num_cmd]);
+ ++num_cmd;
}
if (lcl_hash) {
+ if (num_cmd >= entries) {
+ IPAERR("number of commands is out of range: IP = %d\n",
+ ip);
+ rc = -ENOBUFS;
+ goto fail_imm_cmd_construct;
+ }
+
mem_cmd.is_read = false;
mem_cmd.skip_pipeline_clear = false;
mem_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
@@ -668,12 +683,11 @@ int __ipa_commit_flt_v3(enum ipa_ip_type ip)
if (!cmd_pyld[num_cmd]) {
IPAERR("fail construct dma_shared_mem cmd: IP = %d\n",
ip);
+ rc = -ENOMEM;
goto fail_imm_cmd_construct;
}
- desc[num_cmd].opcode = cmd_pyld[num_cmd]->opcode;
- desc[num_cmd].pyld = cmd_pyld[num_cmd]->data;
- desc[num_cmd].len = cmd_pyld[num_cmd]->len;
- desc[num_cmd++].type = IPA_IMM_CMD_DESC;
+ ipa3_init_imm_cmd_desc(&desc[num_cmd], cmd_pyld[num_cmd]);
+ ++num_cmd;
}
if (ipa3_send_cmd(num_cmd, desc)) {
@@ -731,25 +745,25 @@ static int __ipa_validate_flt_rule(const struct ipa_flt_rule *rule,
if (rule->action != IPA_PASS_TO_EXCEPTION) {
if (!rule->eq_attrib_type) {
if (!rule->rt_tbl_hdl) {
- IPAERR("invalid RT tbl\n");
+ IPAERR_RL("invalid RT tbl\n");
goto error;
}
*rt_tbl = ipa3_id_find(rule->rt_tbl_hdl);
if (*rt_tbl == NULL) {
- IPAERR("RT tbl not found\n");
+ IPAERR_RL("RT tbl not found\n");
goto error;
}
if ((*rt_tbl)->cookie != IPA_RT_TBL_COOKIE) {
- IPAERR("RT table cookie is invalid\n");
+ IPAERR_RL("RT table cookie is invalid\n");
goto error;
}
} else {
if (rule->rt_tbl_idx > ((ip == IPA_IP_v4) ?
IPA_MEM_PART(v4_modem_rt_index_hi) :
IPA_MEM_PART(v6_modem_rt_index_hi))) {
- IPAERR("invalid RT tbl\n");
+ IPAERR_RL("invalid RT tbl\n");
goto error;
}
}
@@ -764,12 +778,12 @@ static int __ipa_validate_flt_rule(const struct ipa_flt_rule *rule,
if (rule->pdn_idx) {
if (rule->action == IPA_PASS_TO_EXCEPTION ||
rule->action == IPA_PASS_TO_ROUTING) {
- IPAERR(
+ IPAERR_RL(
"PDN index should be 0 when action is not pass to NAT\n");
goto error;
} else {
if (rule->pdn_idx >= IPA_MAX_PDN_NUM) {
- IPAERR("PDN index %d is too large\n",
+ IPAERR_RL("PDN index %d is too large\n",
rule->pdn_idx);
goto error;
}
@@ -780,7 +794,7 @@ static int __ipa_validate_flt_rule(const struct ipa_flt_rule *rule,
if (rule->rule_id) {
if ((rule->rule_id < ipahal_get_rule_id_hi_bit()) ||
(rule->rule_id >= ((ipahal_get_rule_id_hi_bit()<<1)-1))) {
- IPAERR("invalid rule_id provided 0x%x\n"
+ IPAERR_RL("invalid rule_id provided 0x%x\n"
"rule_id with bit 0x%x are auto generated\n",
rule->rule_id, ipahal_get_rule_id_hi_bit());
goto error;
@@ -814,8 +828,8 @@ static int __ipa_create_flt_entry(struct ipa3_flt_entry **entry,
} else {
id = ipa3_alloc_rule_id(tbl->rule_ids);
if (id < 0) {
- IPAERR("failed to allocate rule id\n");
- WARN_ON(1);
+ IPAERR_RL("failed to allocate rule id\n");
+ WARN_ON_RATELIMIT_IPA(1);
goto rule_id_fail;
}
}
@@ -839,8 +853,8 @@ static int __ipa_finish_flt_rule_add(struct ipa3_flt_tbl *tbl,
entry->rt_tbl->ref_cnt++;
id = ipa3_id_alloc(entry);
if (id < 0) {
- IPAERR("failed to add to tree\n");
- WARN_ON(1);
+ IPAERR_RL("failed to add to tree\n");
+ WARN_ON_RATELIMIT_IPA(1);
goto ipa_insert_failed;
}
*rule_hdl = id;
@@ -1385,7 +1399,7 @@ int ipa3_reset_flt(enum ipa_ip_type ip)
list_for_each_entry_safe(entry, next, &tbl->head_flt_rule_list,
link) {
if (ipa3_id_find(entry->id) == NULL) {
- WARN_ON(1);
+ WARN_ON_RATELIMIT_IPA(1);
mutex_unlock(&ipa3_ctx->lock);
return -EFAULT;
}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c b/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c
index 34624c0..a37df7e 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c
@@ -199,9 +199,6 @@ int __ipa_commit_hdr_v3_0(void)
IPAERR("fail construct dma_shared_mem cmd\n");
goto end;
}
- desc[0].opcode = hdr_cmd_pyld->opcode;
- desc[0].pyld = hdr_cmd_pyld->data;
- desc[0].len = hdr_cmd_pyld->len;
}
} else {
if (hdr_mem.size > IPA_MEM_PART(apps_hdr_size_ddr)) {
@@ -217,12 +214,9 @@ int __ipa_commit_hdr_v3_0(void)
IPAERR("fail construct hdr_init_system cmd\n");
goto end;
}
- desc[0].opcode = hdr_cmd_pyld->opcode;
- desc[0].pyld = hdr_cmd_pyld->data;
- desc[0].len = hdr_cmd_pyld->len;
}
}
- desc[0].type = IPA_IMM_CMD_DESC;
+ ipa3_init_imm_cmd_desc(&desc[0], hdr_cmd_pyld);
IPA_DUMP_BUFF(hdr_mem.base, hdr_mem.phys_base, hdr_mem.size);
proc_ctx_size = IPA_MEM_PART(apps_hdr_proc_ctx_size);
@@ -249,9 +243,6 @@ int __ipa_commit_hdr_v3_0(void)
IPAERR("fail construct dma_shared_mem cmd\n");
goto end;
}
- desc[1].opcode = ctx_cmd_pyld->opcode;
- desc[1].pyld = ctx_cmd_pyld->data;
- desc[1].len = ctx_cmd_pyld->len;
}
} else {
proc_ctx_size_ddr = IPA_MEM_PART(apps_hdr_proc_ctx_size_ddr);
@@ -277,12 +268,9 @@ int __ipa_commit_hdr_v3_0(void)
IPAERR("fail construct register_write cmd\n");
goto end;
}
- desc[1].opcode = ctx_cmd_pyld->opcode;
- desc[1].pyld = ctx_cmd_pyld->data;
- desc[1].len = ctx_cmd_pyld->len;
}
}
- desc[1].type = IPA_IMM_CMD_DESC;
+ ipa3_init_imm_cmd_desc(&desc[1], ctx_cmd_pyld);
IPA_DUMP_BUFF(ctx_mem.base, ctx_mem.phys_base, ctx_mem.size);
if (ipa3_send_cmd(2, desc))
@@ -355,7 +343,7 @@ static int __ipa_add_hdr_proc_ctx(struct ipa_hdr_proc_ctx_add *proc_ctx,
}
if (hdr_entry->cookie != IPA_HDR_COOKIE) {
IPAERR_RL("Invalid header cookie %u\n", hdr_entry->cookie);
- WARN_ON(1);
+ WARN_ON_RATELIMIT_IPA(1);
return -EINVAL;
}
IPADBG("Associated header is name=%s is_hdr_proc_ctx=%d\n",
@@ -385,7 +373,7 @@ static int __ipa_add_hdr_proc_ctx(struct ipa_hdr_proc_ctx_add *proc_ctx,
bin = IPA_HDR_PROC_CTX_BIN1;
} else {
IPAERR_RL("unexpected needed len %d\n", needed_len);
- WARN_ON(1);
+ WARN_ON_RATELIMIT_IPA(1);
goto bad_len;
}
@@ -430,8 +418,8 @@ static int __ipa_add_hdr_proc_ctx(struct ipa_hdr_proc_ctx_add *proc_ctx,
id = ipa3_id_alloc(entry);
if (id < 0) {
- IPAERR("failed to alloc id\n");
- WARN_ON(1);
+ IPAERR_RL("failed to alloc id\n");
+ WARN_ON_RATELIMIT_IPA(1);
goto ipa_insert_failed;
}
entry->id = id;
@@ -567,8 +555,8 @@ static int __ipa_add_hdr(struct ipa_hdr_add *hdr)
id = ipa3_id_alloc(entry);
if (id < 0) {
- IPAERR("failed to alloc id\n");
- WARN_ON(1);
+ IPAERR_RL("failed to alloc id\n");
+ WARN_ON_RATELIMIT_IPA(1);
goto ipa_insert_failed;
}
entry->id = id;
@@ -996,7 +984,7 @@ int ipa3_reset_hdr(void)
if (entry->is_hdr_proc_ctx) {
IPAERR("default header is proc ctx\n");
mutex_unlock(&ipa3_ctx->lock);
- WARN_ON(1);
+ WARN_ON_RATELIMIT_IPA(1);
return -EFAULT;
}
continue;
@@ -1004,7 +992,7 @@ int ipa3_reset_hdr(void)
if (ipa3_id_find(entry->id) == NULL) {
mutex_unlock(&ipa3_ctx->lock);
- WARN_ON(1);
+ WARN_ON_RATELIMIT_IPA(1);
return -EFAULT;
}
if (entry->is_hdr_proc_ctx) {
@@ -1058,7 +1046,7 @@ int ipa3_reset_hdr(void)
if (ipa3_id_find(ctx_entry->id) == NULL) {
mutex_unlock(&ipa3_ctx->lock);
- WARN_ON(1);
+ WARN_ON_RATELIMIT_IPA(1);
return -EFAULT;
}
list_del(&ctx_entry->link);
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_hw_defs.h b/drivers/platform/msm/ipa/ipa_v3/ipa_hw_defs.h
deleted file mode 100644
index dff3a3f..0000000
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_hw_defs.h
+++ /dev/null
@@ -1,44 +0,0 @@
-/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef _IPA_HW_DEFS_H
-#define _IPA_HW_DEFS_H
-#include <linux/bitops.h>
-
-/* This header defines various HW related data types */
-
-
-#define IPA_A5_MUX_HDR_EXCP_FLAG_IP BIT(7)
-#define IPA_A5_MUX_HDR_EXCP_FLAG_NAT BIT(6)
-#define IPA_A5_MUX_HDR_EXCP_FLAG_SW_FLT BIT(5)
-#define IPA_A5_MUX_HDR_EXCP_FLAG_TAG BIT(4)
-#define IPA_A5_MUX_HDR_EXCP_FLAG_REPLICATED BIT(3)
-#define IPA_A5_MUX_HDR_EXCP_FLAG_IHL BIT(2)
-
-/**
- * struct ipa3_a5_mux_hdr - A5 MUX header definition
- * @interface_id: interface ID
- * @src_pipe_index: source pipe index
- * @flags: flags
- * @metadata: metadata
- *
- * A5 MUX header is in BE, A5 runs in LE. This struct definition
- * allows A5 SW to correctly parse the header
- */
-struct ipa3_a5_mux_hdr {
- u16 interface_id;
- u8 src_pipe_index;
- u8 flags;
- u32 metadata;
-};
-
-#endif /* _IPA_HW_DEFS_H */
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
index 29fe24e..3eff209 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
@@ -27,7 +27,6 @@
#include <linux/iommu.h>
#include <linux/platform_device.h>
#include <linux/firmware.h>
-#include "ipa_hw_defs.h"
#include "ipa_qmi_service.h"
#include "../ipa_api.h"
#include "ipahal/ipahal_reg.h"
@@ -38,8 +37,9 @@
#include "ipa_uc_offload_i.h"
#include "ipa_pm.h"
+#define IPA_DEV_NAME_MAX_LEN 15
#define DRV_NAME "ipa"
-#define NAT_DEV_NAME "ipaNatTable"
+
#define IPA_COOKIE 0x57831603
#define IPA_RT_RULE_COOKIE 0x57831604
#define IPA_RT_TBL_COOKIE 0x57831605
@@ -106,7 +106,7 @@
#define IPAERR_RL(fmt, args...) \
do { \
- pr_err_ratelimited(DRV_NAME " %s:%d " fmt, __func__,\
+ pr_err_ratelimited_ipa(DRV_NAME " %s:%d " fmt, __func__,\
__LINE__, ## args);\
if (ipa3_ctx) { \
IPA_IPC_LOGGING(ipa3_ctx->logbuf, \
@@ -125,6 +125,8 @@
#define IPA_RAM_NAT_OFST 0
#define IPA_RAM_NAT_SIZE 0
+#define IPA_RAM_IPV6CT_OFST 0
+#define IPA_RAM_IPV6CT_SIZE 0
#define IPA_MEM_CANARY_VAL 0xdeadbeef
#define IPA_STATS
@@ -431,6 +433,7 @@ struct ipa3_rt_entry {
int id;
u16 prio;
u16 rule_id;
+ u16 rule_id_valid;
};
/**
@@ -780,57 +783,101 @@ struct ipa_pdn_entry {
u32 dst_metadata;
u32 resrvd;
};
+
/**
- * struct ipa3_nat_mem - IPA NAT memory description
+ * struct ipa3_nat_ipv6ct_tmp_mem - NAT/IPv6CT temporary memory
+ *
+ * In case NAT/IPv6CT table are destroyed the HW is provided with the
+ * temporary memory
+ *
+ * @vaddr: the address of the temporary memory
+ * @dma_handle: the handle of the temporary memory
+ */
+struct ipa3_nat_ipv6ct_tmp_mem {
+ void *vaddr;
+ dma_addr_t dma_handle;
+};
+
+/**
+ * struct ipa3_nat_ipv6ct_common_mem - IPA NAT/IPv6CT memory device
* @class: pointer to the struct class
* @dev: the dev_t of the device
* @cdev: cdev of the device
* @dev_num: device number
- * @vaddr: virtual address
- * @dma_handle: DMA handle
- * @size: NAT memory size
- * @is_mapped: flag indicating if NAT memory is mapped
- * @is_sys_mem: flag indicating if NAT memory is sys memory
- * @is_dev_init: flag indicating if NAT device is initialized
- * @lock: NAT memory mutex
- * @nat_base_address: nat table virutal address
- * @ipv4_rules_addr: base nat table address
- * @ipv4_expansion_rules_addr: expansion table address
- * @index_table_addr: index table address
- * @index_table_expansion_addr: index expansion table address
- * @size_base_tables: base table size
- * @size_expansion_tables: expansion table size
- * @public_ip_addr: ip address of nat table
- * @pdn_mem: pdn config table SW cache memory structure
+ * @vaddr: the virtual address in the system memory
+ * @dma_handle: the system memory DMA handle
+ * @phys_mem_size: the physical size in the shared memory
+ * @smem_offset: the offset in the shared memory
+ * @size: memory size
+ * @is_mapped: flag indicating if memory is mapped
+ * @is_sys_mem: flag indicating if memory is sys memory
+ * @is_mem_allocated: flag indicating if the memory is allocated
+ * @is_hw_init: flag indicating if the corresponding HW is initialized
+ * @is_dev_init: flag indicating if device is initialized
+ * @lock: memory mutex
+ * @base_address: table virtual address
+ * @base_table_addr: base table address
+ * @expansion_table_addr: expansion table address
+ * @table_entries: num of entries in the base table
+ * @expn_table_entries: num of entries in the expansion table
+ * @tmp_mem: temporary memory used to always provide HW with a legal memory
+ * @name: the device name
*/
-struct ipa3_nat_mem {
+struct ipa3_nat_ipv6ct_common_mem {
struct class *class;
struct device *dev;
struct cdev cdev;
dev_t dev_num;
+
+ /* system memory */
void *vaddr;
dma_addr_t dma_handle;
+
+ /* shared memory */
+ u32 phys_mem_size;
+ u32 smem_offset;
+
size_t size;
bool is_mapped;
bool is_sys_mem;
+ bool is_mem_allocated;
+ bool is_hw_init;
bool is_dev_init;
- bool is_dev;
struct mutex lock;
- void *nat_base_address;
- char *ipv4_rules_addr;
- char *ipv4_expansion_rules_addr;
+ void *base_address;
+ char *base_table_addr;
+ char *expansion_table_addr;
+ u32 table_entries;
+ u32 expn_table_entries;
+ struct ipa3_nat_ipv6ct_tmp_mem *tmp_mem;
+ char name[IPA_DEV_NAME_MAX_LEN];
+};
+
+/**
+ * struct ipa3_nat_mem - IPA NAT memory description
+ * @dev: the memory device structure
+ * @index_table_addr: index table address
+ * @index_table_expansion_addr: index expansion table address
+ * @public_ip_addr: ip address of nat table
+ * @pdn_mem: pdn config table SW cache memory structure
+ */
+struct ipa3_nat_mem {
+ struct ipa3_nat_ipv6ct_common_mem dev;
char *index_table_addr;
char *index_table_expansion_addr;
- u32 size_base_tables;
- u32 size_expansion_tables;
u32 public_ip_addr;
- void *tmp_vaddr;
- dma_addr_t tmp_dma_handle;
- bool is_tmp_mem;
struct ipa_mem_buffer pdn_mem;
};
/**
+* struct ipa3_ipv6ct_mem - IPA IPv6 connection tracking memory description
+* @dev: the memory device structure
+*/
+struct ipa3_ipv6ct_mem {
+ struct ipa3_nat_ipv6ct_common_mem dev;
+};
+
+/**
* enum ipa3_hw_mode - IPA hardware mode
* @IPA_HW_Normal: Regular IPA hardware
* @IPA_HW_Virtual: IPA hardware supporting virtual memory allocation
@@ -1034,11 +1081,6 @@ struct ipa3_ready_cb_info {
void *user_data;
};
-struct ipa_tz_unlock_reg_info {
- u64 reg_addr;
- u32 size;
-};
-
struct ipa_dma_task_info {
struct ipa_mem_buffer mem;
struct ipahal_imm_cmd_pyld *cmd_pyld;
@@ -1142,6 +1184,7 @@ enum ipa_smmu_cb_type {
* from non-restricted bytes
* @smem_restricted_bytes: the bytes that SW should not use in the shared mem
* @nat_mem: NAT memory
+ * @ipv6ct_mem: IPv6CT memory
* @excp_hdr_hdl: exception header handle
* @dflt_v4_rt_rule_hdl: default v4 routing rule handle
* @dflt_v6_rt_rule_hdl: default v6 routing rule handle
@@ -1191,6 +1234,7 @@ enum ipa_smmu_cb_type {
* @ipa_ready_cb_list: A list of all the clients who require a CB when IPA
* driver is ready/initialized.
* @init_completion_obj: Completion object to be used in case IPA driver hasn't
+ * @mhi_evid_limits: MHI event rings start and end ids
* finished initializing. Example of use - IOCTLs to /dev/ipa
* IPA context - holds all relevant info about IPA driver and its state
*/
@@ -1228,6 +1272,7 @@ struct ipa3_context {
u16 smem_restricted_bytes;
u16 smem_reqd_sz;
struct ipa3_nat_mem nat_mem;
+ struct ipa3_ipv6ct_mem ipv6ct_mem;
u32 excp_hdr_hdl;
u32 dflt_v4_rt_rule_hdl;
u32 dflt_v6_rt_rule_hdl;
@@ -1286,6 +1331,7 @@ struct ipa3_context {
u32 curr_ipa_clk_rate;
bool q6_proxy_clk_vote_valid;
struct mutex q6_proxy_clk_vote_mutex;
+ u32 q6_proxy_clk_vote_cnt;
u32 ipa_num_pipes;
dma_addr_t pkt_init_imm[IPA3_MAX_NUM_PIPES];
u32 pkt_init_imm_opcode;
@@ -1318,6 +1364,7 @@ struct ipa3_context {
struct completion init_completion_obj;
struct completion uc_loaded_completion_obj;
struct ipa3_smp2p_info smp2p_info;
+ u32 mhi_evid_limits[2]; /* start and end values */
u32 ipa_tz_unlock_reg_num;
struct ipa_tz_unlock_reg_info *ipa_tz_unlock_reg;
struct ipa_dma_task_info dma_task_info;
@@ -1351,6 +1398,7 @@ struct ipa3_plat_drv_res {
bool apply_rg10_wa;
bool gsi_ch20_wa;
bool tethered_flow_control;
+ u32 mhi_evid_limits[2]; /* start and end values */
u32 ipa_tz_unlock_reg_num;
struct ipa_tz_unlock_reg_info *ipa_tz_unlock_reg;
bool use_ipa_pm;
@@ -1714,6 +1762,8 @@ int ipa3_del_hdr_proc_ctx_by_user(struct ipa_ioc_del_hdr_proc_ctx *hdls,
*/
int ipa3_add_rt_rule(struct ipa_ioc_add_rt_rule *rules);
+int ipa3_add_rt_rule_ext(struct ipa_ioc_add_rt_rule_ext *rules);
+
int ipa3_add_rt_rule_after(struct ipa_ioc_add_rt_rule_after *rules);
int ipa3_del_rt_rule(struct ipa_ioc_del_rt_rule *hdls);
@@ -1748,13 +1798,24 @@ int ipa3_reset_flt(enum ipa_ip_type ip);
/*
* NAT
*/
+int ipa3_nat_ipv6ct_init_devices(void);
+void ipa3_nat_ipv6ct_destroy_devices(void);
+
int ipa3_allocate_nat_device(struct ipa_ioc_nat_alloc_mem *mem);
+int ipa3_allocate_nat_table(
+ struct ipa_ioc_nat_ipv6ct_table_alloc *table_alloc);
+int ipa3_allocate_ipv6ct_table(
+ struct ipa_ioc_nat_ipv6ct_table_alloc *table_alloc);
int ipa3_nat_init_cmd(struct ipa_ioc_v4_nat_init *init);
+int ipa3_ipv6ct_init_cmd(struct ipa_ioc_ipv6ct_init *init);
+int ipa3_table_dma_cmd(struct ipa_ioc_nat_dma_cmd *dma);
int ipa3_nat_dma_cmd(struct ipa_ioc_nat_dma_cmd *dma);
int ipa3_nat_del_cmd(struct ipa_ioc_v4_nat_del *del);
+int ipa3_del_nat_table(struct ipa_ioc_nat_ipv6ct_table_del *del);
+int ipa3_del_ipv6ct_table(struct ipa_ioc_nat_ipv6ct_table_del *del);
int ipa3_nat_mdfy_pdn(struct ipa_ioc_nat_pdn_entry *mdfy_pdn);
@@ -2105,7 +2166,6 @@ int ipa3_uc_send_cmd(u32 cmd, u32 opcode, u32 expected_status,
bool polling_mode, unsigned long timeout_jiffies);
void ipa3_uc_register_handlers(enum ipa3_hw_features feature,
struct ipa3_uc_hdlrs *hdlrs);
-int ipa3_create_nat_device(void);
int ipa3_uc_notify_clk_state(bool enabled);
int ipa3_dma_setup(void);
void ipa3_dma_shutdown(void);
@@ -2249,4 +2309,7 @@ int ipa3_allocate_dma_task_for_gsi(void);
void ipa3_free_dma_task_for_gsi(void);
int ipa3_set_clock_plan_from_pm(int idx);
void __ipa_gsi_irq_rx_scedule_poll(struct ipa3_sys_context *sys);
+int ipa3_tz_unlock_reg(struct ipa_tz_unlock_reg_info *reg_info, u16 num_regs);
+void ipa3_init_imm_cmd_desc(struct ipa3_desc *desc,
+ struct ipahal_imm_cmd_pyld *cmd_pyld);
#endif /* _IPA3_I_H_ */
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_intf.c b/drivers/platform/msm/ipa/ipa_v3/ipa_intf.c
index 4ada018..40ef59a 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_intf.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_intf.c
@@ -221,7 +221,7 @@ int ipa3_query_intf(struct ipa_ioc_query_intf *lookup)
int result = -EINVAL;
if (lookup == NULL) {
- IPAERR("invalid param lookup=%p\n", lookup);
+ IPAERR_RL("invalid param lookup=%p\n", lookup);
return result;
}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_mhi.c b/drivers/platform/msm/ipa/ipa_v3/ipa_mhi.c
index 08f2a8c..cb970ba 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_mhi.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_mhi.c
@@ -356,6 +356,14 @@ int ipa3_mhi_init_engine(struct ipa_mhi_init_engine *params)
return -EINVAL;
}
+ if ((IPA_MHI_MAX_UL_CHANNELS + IPA_MHI_MAX_DL_CHANNELS) >
+ ((ipa3_ctx->mhi_evid_limits[1] -
+ ipa3_ctx->mhi_evid_limits[0]) + 1)) {
+ IPAERR("Not enough event rings for MHI\n");
+ ipa_assert();
+ return -EINVAL;
+ }
+
/* Initialize IPA MHI engine */
gsi_ep_info = ipa3_get_gsi_ep_info(IPA_CLIENT_MHI_PROD);
if (!gsi_ep_info) {
@@ -407,6 +415,8 @@ int ipa3_connect_mhi_pipe(struct ipa_mhi_connect_params_internal *in,
return -EINVAL;
}
+ in->start.gsi.evchid += ipa3_ctx->mhi_evid_limits[0];
+
client = in->sys->client;
ipa_ep_idx = ipa3_get_ep_mapping(client);
if (ipa_ep_idx == -1) {
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_nat.c b/drivers/platform/msm/ipa/ipa_v3/ipa_nat.c
index 958fc6c..c2daa05 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_nat.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_nat.c
@@ -18,23 +18,29 @@
#include <linux/uaccess.h>
#include "ipa_i.h"
#include "ipahal/ipahal.h"
+#include "ipahal/ipahal_nat.h"
#define IPA_NAT_PHYS_MEM_OFFSET 0
+#define IPA_IPV6CT_PHYS_MEM_OFFSET 0
#define IPA_NAT_PHYS_MEM_SIZE IPA_RAM_NAT_SIZE
+#define IPA_IPV6CT_PHYS_MEM_SIZE IPA_RAM_IPV6CT_SIZE
-#define IPA_NAT_TEMP_MEM_SIZE 128
+#define IPA_NAT_IPV6CT_TEMP_MEM_SIZE 128
-enum nat_table_type {
+#define IPA_NAT_MAX_NUM_OF_INIT_CMD_DESC 3
+#define IPA_IPV6CT_MAX_NUM_OF_INIT_CMD_DESC 2
+#define IPA_MAX_NUM_OF_TABLE_DMA_CMD_DESC 4
+
+enum ipa_nat_ipv6ct_table_type {
IPA_NAT_BASE_TBL = 0,
IPA_NAT_EXPN_TBL = 1,
IPA_NAT_INDX_TBL = 2,
IPA_NAT_INDEX_EXPN_TBL = 3,
+ IPA_IPV6CT_BASE_TBL = 4,
+ IPA_IPV6CT_EXPN_TBL = 5
};
-#define NAT_TABLE_ENTRY_SIZE_BYTE 32
-#define NAT_INTEX_TABLE_ENTRY_SIZE_BYTE 4
-
-static int ipa3_nat_vma_fault_remap(
+static int ipa3_nat_ipv6ct_vma_fault_remap(
struct vm_area_struct *vma, struct vm_fault *vmf)
{
IPADBG("\n");
@@ -44,193 +50,340 @@ static int ipa3_nat_vma_fault_remap(
}
/* VMA related file operations functions */
-static struct vm_operations_struct ipa3_nat_remap_vm_ops = {
- .fault = ipa3_nat_vma_fault_remap,
+static const struct vm_operations_struct ipa3_nat_ipv6ct_remap_vm_ops = {
+ .fault = ipa3_nat_ipv6ct_vma_fault_remap,
};
-static int ipa3_nat_open(struct inode *inode, struct file *filp)
+static int ipa3_nat_ipv6ct_open(struct inode *inode, struct file *filp)
{
- struct ipa3_nat_mem *nat_ctx;
+ struct ipa3_nat_ipv6ct_common_mem *dev;
IPADBG("\n");
- nat_ctx = container_of(inode->i_cdev, struct ipa3_nat_mem, cdev);
- filp->private_data = nat_ctx;
+ dev = container_of(inode->i_cdev,
+ struct ipa3_nat_ipv6ct_common_mem, cdev);
+ filp->private_data = dev;
IPADBG("return\n");
return 0;
}
-static int ipa3_nat_mmap(struct file *filp, struct vm_area_struct *vma)
+static int ipa3_nat_ipv6ct_mmap(struct file *filp, struct vm_area_struct *vma)
{
+ struct ipa3_nat_ipv6ct_common_mem *dev =
+ (struct ipa3_nat_ipv6ct_common_mem *)filp->private_data;
unsigned long vsize = vma->vm_end - vma->vm_start;
- struct ipa3_nat_mem *nat_ctx =
- (struct ipa3_nat_mem *)filp->private_data;
unsigned long phys_addr;
- int result;
+ int result = 0;
- mutex_lock(&nat_ctx->lock);
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
- if (nat_ctx->is_sys_mem) {
- IPADBG("Mapping system memory\n");
- if (nat_ctx->is_mapped) {
- IPAERR("mapping already exists, only 1 supported\n");
+ IPADBG("\n");
+
+ if (!dev->is_dev_init) {
+ IPAERR("attempt to mmap %s before dev init\n", dev->name);
+ return -EPERM;
+ }
+
+ mutex_lock(&dev->lock);
+ if (!dev->is_mem_allocated) {
+ IPAERR_RL("attempt to mmap %s before the memory allocation\n",
+ dev->name);
+ result = -EPERM;
+ goto bail;
+ }
+
+ if (dev->is_sys_mem) {
+ if (dev->is_mapped) {
+ IPAERR("%s already mapped, only 1 mapping supported\n",
+ dev->name);
result = -EINVAL;
goto bail;
}
- IPADBG("map sz=0x%zx\n", nat_ctx->size);
+ } else {
+ if ((dev->phys_mem_size == 0) || (vsize > dev->phys_mem_size)) {
+ IPAERR_RL("wrong parameters to %s mapping\n",
+ dev->name);
+ result = -EINVAL;
+ goto bail;
+ }
+ }
+
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+ if (dev->is_sys_mem) {
+ IPADBG("Mapping system memory\n");
+ IPADBG("map sz=0x%zx\n", dev->size);
result =
dma_mmap_coherent(
- ipa3_ctx->pdev, vma,
- nat_ctx->vaddr, nat_ctx->dma_handle,
- nat_ctx->size);
-
+ ipa3_ctx->pdev, vma,
+ dev->vaddr, dev->dma_handle,
+ dev->size);
if (result) {
IPAERR("unable to map memory. Err:%d\n", result);
goto bail;
}
- ipa3_ctx->nat_mem.nat_base_address = nat_ctx->vaddr;
+ dev->base_address = dev->vaddr;
} else {
IPADBG("Mapping shared(local) memory\n");
IPADBG("map sz=0x%lx\n", vsize);
- if ((IPA_NAT_PHYS_MEM_SIZE == 0) ||
- (vsize > IPA_NAT_PHYS_MEM_SIZE)) {
- result = -EINVAL;
- goto bail;
- }
phys_addr = ipa3_ctx->ipa_wrapper_base +
ipa3_ctx->ctrl->ipa_reg_base_ofst +
ipahal_get_reg_n_ofst(IPA_SRAM_DIRECT_ACCESS_n,
- IPA_NAT_PHYS_MEM_OFFSET);
+ dev->smem_offset);
if (remap_pfn_range(
- vma, vma->vm_start,
- phys_addr >> PAGE_SHIFT, vsize, vma->vm_page_prot)) {
+ vma, vma->vm_start,
+ phys_addr >> PAGE_SHIFT, vsize, vma->vm_page_prot)) {
IPAERR("remap failed\n");
result = -EAGAIN;
goto bail;
}
- ipa3_ctx->nat_mem.nat_base_address = (void *)vma->vm_start;
+ dev->base_address = (void *)vma->vm_start;
}
- nat_ctx->is_mapped = true;
- vma->vm_ops = &ipa3_nat_remap_vm_ops;
- IPADBG("return\n");
result = 0;
+ vma->vm_ops = &ipa3_nat_ipv6ct_remap_vm_ops;
+ dev->is_mapped = true;
+ IPADBG("return\n");
bail:
- mutex_unlock(&nat_ctx->lock);
+ mutex_unlock(&dev->lock);
return result;
}
-static const struct file_operations ipa3_nat_fops = {
+static const struct file_operations ipa3_nat_ipv6ct_fops = {
.owner = THIS_MODULE,
- .open = ipa3_nat_open,
- .mmap = ipa3_nat_mmap
+ .open = ipa3_nat_ipv6ct_open,
+ .mmap = ipa3_nat_ipv6ct_mmap
};
/**
- * ipa3_allocate_temp_nat_memory() - Allocates temp nat memory
- *
- * Called during nat table delete
+ * ipa3_allocate_nat_ipv6ct_tmp_memory() - Allocates the NAT\IPv6CT temp memory
*/
-void ipa3_allocate_temp_nat_memory(void)
+static struct ipa3_nat_ipv6ct_tmp_mem *ipa3_nat_ipv6ct_allocate_tmp_memory(void)
{
- struct ipa3_nat_mem *nat_ctx = &(ipa3_ctx->nat_mem);
- int gfp_flags = GFP_KERNEL | __GFP_ZERO;
-
- nat_ctx->tmp_vaddr =
- dma_alloc_coherent(ipa3_ctx->pdev, IPA_NAT_TEMP_MEM_SIZE,
- &nat_ctx->tmp_dma_handle, gfp_flags);
-
- if (nat_ctx->tmp_vaddr == NULL) {
- IPAERR("Temp Memory alloc failed\n");
- nat_ctx->is_tmp_mem = false;
- return;
- }
-
- nat_ctx->is_tmp_mem = true;
- IPADBG("IPA NAT allocated temp memory successfully\n");
-}
-
-/**
- * ipa3_create_nat_device() - Create the NAT device
- *
- * Called during ipa init to create nat device
- *
- * Returns: 0 on success, negative on failure
- */
-int ipa3_create_nat_device(void)
-{
- struct ipa3_nat_mem *nat_ctx = &(ipa3_ctx->nat_mem);
- int result;
+ struct ipa3_nat_ipv6ct_tmp_mem *tmp_mem;
+ gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO;
IPADBG("\n");
- mutex_lock(&nat_ctx->lock);
- nat_ctx->class = class_create(THIS_MODULE, NAT_DEV_NAME);
- if (IS_ERR(nat_ctx->class)) {
- IPAERR("unable to create the class\n");
- result = -ENODEV;
- goto vaddr_alloc_fail;
+ tmp_mem = kzalloc(sizeof(*tmp_mem), GFP_KERNEL);
+ if (tmp_mem == NULL)
+ return NULL;
+
+ tmp_mem->vaddr =
+ dma_alloc_coherent(ipa3_ctx->pdev, IPA_NAT_IPV6CT_TEMP_MEM_SIZE,
+ &tmp_mem->dma_handle, gfp_flags);
+ if (tmp_mem->vaddr == NULL)
+ goto bail_tmp_mem;
+
+ IPADBG("IPA successfully allocated temp memory\n");
+ return tmp_mem;
+
+bail_tmp_mem:
+ kfree(tmp_mem);
+ return NULL;
+}
+
+static int ipa3_nat_ipv6ct_init_device(
+ struct ipa3_nat_ipv6ct_common_mem *dev,
+ const char *name,
+ u32 phys_mem_size,
+ u32 smem_offset,
+ struct ipa3_nat_ipv6ct_tmp_mem *tmp_mem)
+{
+ int result;
+
+ IPADBG("Init %s\n", name);
+
+ if (strnlen(name, IPA_DEV_NAME_MAX_LEN) == IPA_DEV_NAME_MAX_LEN) {
+ IPAERR("device name is too long\n");
+ return -ENODEV;
}
- result = alloc_chrdev_region(&nat_ctx->dev_num,
- 0,
- 1,
- NAT_DEV_NAME);
+
+ strlcpy(dev->name, name, IPA_DEV_NAME_MAX_LEN);
+
+ dev->class = class_create(THIS_MODULE, name);
+ if (IS_ERR(dev->class)) {
+ IPAERR("unable to create the class for %s\n", name);
+ return -ENODEV;
+ }
+ result = alloc_chrdev_region(&dev->dev_num, 0, 1, name);
if (result) {
- IPAERR("alloc_chrdev_region err.\n");
+ IPAERR("alloc_chrdev_region err. for %s\n", name);
result = -ENODEV;
goto alloc_chrdev_region_fail;
}
- nat_ctx->dev =
- device_create(nat_ctx->class, NULL, nat_ctx->dev_num, nat_ctx,
- "%s", NAT_DEV_NAME);
+ dev->dev = device_create(dev->class, NULL, dev->dev_num, NULL, name);
- if (IS_ERR(nat_ctx->dev)) {
- IPAERR("device_create err:%ld\n", PTR_ERR(nat_ctx->dev));
+ if (IS_ERR(dev->dev)) {
+ IPAERR("device_create err:%ld\n", PTR_ERR(dev->dev));
result = -ENODEV;
goto device_create_fail;
}
- cdev_init(&nat_ctx->cdev, &ipa3_nat_fops);
- nat_ctx->cdev.owner = THIS_MODULE;
- nat_ctx->cdev.ops = &ipa3_nat_fops;
+ cdev_init(&dev->cdev, &ipa3_nat_ipv6ct_fops);
+ dev->cdev.owner = THIS_MODULE;
- result = cdev_add(&nat_ctx->cdev, nat_ctx->dev_num, 1);
+ mutex_init(&dev->lock);
+ mutex_lock(&dev->lock);
+
+ result = cdev_add(&dev->cdev, dev->dev_num, 1);
if (result) {
IPAERR("cdev_add err=%d\n", -result);
goto cdev_add_fail;
}
- IPADBG("ipa nat dev added successful. major:%d minor:%d\n",
- MAJOR(nat_ctx->dev_num),
- MINOR(nat_ctx->dev_num));
- nat_ctx->is_dev = true;
- ipa3_allocate_temp_nat_memory();
- IPADBG("IPA NAT device created successfully\n");
- result = 0;
- goto bail;
+ dev->phys_mem_size = phys_mem_size;
+ dev->smem_offset = smem_offset;
+
+ dev->is_dev_init = true;
+ mutex_unlock(&dev->lock);
+
+ IPADBG("ipa dev %s added successful. major:%d minor:%d\n", name,
+ MAJOR(dev->dev_num), MINOR(dev->dev_num));
+ return 0;
cdev_add_fail:
- device_destroy(nat_ctx->class, nat_ctx->dev_num);
+ mutex_unlock(&dev->lock);
+ device_destroy(dev->class, dev->dev_num);
device_create_fail:
- unregister_chrdev_region(nat_ctx->dev_num, 1);
+ unregister_chrdev_region(dev->dev_num, 1);
alloc_chrdev_region_fail:
- class_destroy(nat_ctx->class);
-vaddr_alloc_fail:
- if (nat_ctx->vaddr) {
- IPADBG("Releasing system memory\n");
- dma_free_coherent(
- ipa3_ctx->pdev, nat_ctx->size,
- nat_ctx->vaddr, nat_ctx->dma_handle);
- nat_ctx->vaddr = NULL;
- nat_ctx->dma_handle = 0;
- nat_ctx->size = 0;
+ class_destroy(dev->class);
+ return result;
+}
+
+static void ipa3_nat_ipv6ct_destroy_device(
+ struct ipa3_nat_ipv6ct_common_mem *dev)
+{
+ IPADBG("\n");
+
+ mutex_lock(&dev->lock);
+
+ device_destroy(dev->class, dev->dev_num);
+ unregister_chrdev_region(dev->dev_num, 1);
+ class_destroy(dev->class);
+ dev->is_dev_init = false;
+
+ mutex_unlock(&dev->lock);
+
+ IPADBG("return\n");
+}
+
+/**
+ * ipa3_nat_ipv6ct_init_devices() - Initialize the NAT and IPv6CT devices
+ *
+ * Called during IPA init to create memory device
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa3_nat_ipv6ct_init_devices(void)
+{
+ struct ipa3_nat_ipv6ct_tmp_mem *tmp_mem;
+ int result;
+
+ IPADBG("\n");
+
+ /*
+ * Allocate NAT/IPv6CT temporary memory. The memory is never deleted,
+ * because provided to HW once NAT or IPv6CT table is deleted.
+ * NULL is a legal value
+ */
+ tmp_mem = ipa3_nat_ipv6ct_allocate_tmp_memory();
+
+ if (ipa3_nat_ipv6ct_init_device(
+ &ipa3_ctx->nat_mem.dev,
+ IPA_NAT_DEV_NAME,
+ IPA_NAT_PHYS_MEM_SIZE,
+ IPA_NAT_PHYS_MEM_OFFSET,
+ tmp_mem)) {
+ IPAERR("unable to create nat device\n");
+ result = -ENODEV;
+ goto fail_init_nat_dev;
}
-bail:
- mutex_unlock(&nat_ctx->lock);
+ if ((ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) &&
+ ipa3_nat_ipv6ct_init_device(
+ &ipa3_ctx->ipv6ct_mem.dev,
+ IPA_IPV6CT_DEV_NAME,
+ IPA_IPV6CT_PHYS_MEM_SIZE,
+ IPA_IPV6CT_PHYS_MEM_OFFSET,
+ tmp_mem)) {
+ IPAERR("unable to create IPv6CT device\n");
+ result = -ENODEV;
+ goto fail_init_ipv6ct_dev;
+ }
+ return 0;
+
+fail_init_ipv6ct_dev:
+ ipa3_nat_ipv6ct_destroy_device(&ipa3_ctx->nat_mem.dev);
+fail_init_nat_dev:
+ if (tmp_mem != NULL) {
+ dma_free_coherent(ipa3_ctx->pdev, IPA_NAT_IPV6CT_TEMP_MEM_SIZE,
+ tmp_mem->vaddr, tmp_mem->dma_handle);
+ kfree(tmp_mem);
+ }
+ return result;
+}
+
+/**
+ * ipa3_nat_ipv6ct_destroy_devices() - destroy the NAT and IPv6CT devices
+ *
+ * Called during IPA init to destroy nat device
+ */
+void ipa3_nat_ipv6ct_destroy_devices(void)
+{
+ ipa3_nat_ipv6ct_destroy_device(&ipa3_ctx->nat_mem.dev);
+ if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0)
+ ipa3_nat_ipv6ct_destroy_device(&ipa3_ctx->ipv6ct_mem.dev);
+}
+
+static int ipa3_nat_ipv6ct_allocate_mem(struct ipa3_nat_ipv6ct_common_mem *dev,
+ struct ipa_ioc_nat_ipv6ct_table_alloc *table_alloc)
+{
+ gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO;
+ int result = 0;
+
+ IPADBG("passed memory size %zu for %s\n",
+ table_alloc->size, dev->name);
+
+ if (!dev->is_dev_init) {
+ IPAERR("%s hasn't been initialized\n", dev->name);
+ result = -EPERM;
+ goto bail;
+ }
+
+ if (dev->is_mem_allocated) {
+ IPAERR("Memory already allocated\n");
+ result = 0;
+ goto bail;
+ }
+
+ if (!table_alloc->size) {
+ IPAERR_RL("Invalid Parameters\n");
+ result = -EPERM;
+ goto bail;
+ }
+
+ if (table_alloc->size > IPA_NAT_PHYS_MEM_SIZE) {
+ IPADBG("Allocating system memory\n");
+ dev->is_sys_mem = true;
+ dev->vaddr =
+ dma_alloc_coherent(ipa3_ctx->pdev, table_alloc->size,
+ &dev->dma_handle, gfp_flags);
+ if (dev->vaddr == NULL) {
+ IPAERR("memory alloc failed\n");
+ result = -ENOMEM;
+ goto bail;
+ }
+ dev->size = table_alloc->size;
+ } else {
+ IPADBG("using shared(local) memory\n");
+ dev->is_sys_mem = false;
+ }
+
+ IPADBG("return\n");
+
+bail:
return result;
}
@@ -245,60 +398,51 @@ int ipa3_create_nat_device(void)
*/
int ipa3_allocate_nat_device(struct ipa_ioc_nat_alloc_mem *mem)
{
+ int result;
+ struct ipa_ioc_nat_ipv6ct_table_alloc tmp;
+
+ tmp.size = mem->size;
+ tmp.offset = 0;
+
+ result = ipa3_allocate_nat_table(&tmp);
+ if (result)
+ goto bail;
+
+ mem->offset = tmp.offset;
+
+bail:
+ return result;
+}
+
+/**
+ * ipa3_allocate_nat_table() - Allocates memory for the NAT table
+ * @table_alloc: [in/out] memory parameters
+ *
+ * Called by NAT client to allocate memory for the table entries.
+ * Based on the request size either shared or system memory will be used.
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa3_allocate_nat_table(struct ipa_ioc_nat_ipv6ct_table_alloc *table_alloc)
+{
struct ipa3_nat_mem *nat_ctx = &(ipa3_ctx->nat_mem);
gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO;
int result;
- IPADBG("passed memory size %zu\n", mem->size);
+ IPADBG("\n");
- mutex_lock(&nat_ctx->lock);
- if (strcmp(mem->dev_name, NAT_DEV_NAME)) {
- IPAERR_RL("Nat device name mismatch\n");
- IPAERR_RL("Expect: %s Recv: %s\n", NAT_DEV_NAME, mem->dev_name);
- result = -EPERM;
+ mutex_lock(&nat_ctx->dev.lock);
+
+ result = ipa3_nat_ipv6ct_allocate_mem(&nat_ctx->dev, table_alloc);
+ if (result)
goto bail;
- }
- if (nat_ctx->is_dev != true) {
- IPAERR("Nat device not created successfully during boot up\n");
- result = -EPERM;
- goto bail;
- }
-
- if (nat_ctx->is_dev_init == true) {
- IPAERR("Device already init\n");
- result = 0;
- goto bail;
- }
-
- if (mem->size <= 0 ||
- nat_ctx->is_dev_init == true) {
- IPAERR_RL("Invalid Parameters or device is already init\n");
- result = -EPERM;
- goto bail;
- }
-
- if (mem->size > IPA_NAT_PHYS_MEM_SIZE) {
- IPADBG("Allocating system memory\n");
- nat_ctx->is_sys_mem = true;
- nat_ctx->vaddr =
- dma_alloc_coherent(ipa3_ctx->pdev, mem->size,
- &nat_ctx->dma_handle, gfp_flags);
- if (nat_ctx->vaddr == NULL) {
- IPAERR("memory alloc failed\n");
- result = -ENOMEM;
- goto bail;
- }
- nat_ctx->size = mem->size;
- } else {
- IPADBG("using shared(local) memory\n");
- nat_ctx->is_sys_mem = false;
- }
if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) {
- struct ipa_pdn_entry *pdn_entries;
- struct ipa_mem_buffer *pdn_mem = &ipa3_ctx->nat_mem.pdn_mem;
+ size_t pdn_entry_size;
+ struct ipa_mem_buffer *pdn_mem = &nat_ctx->pdn_mem;
- pdn_mem->size = sizeof(struct ipa_pdn_entry) * IPA_MAX_PDN_NUM;
+ ipahal_nat_entry_size(IPAHAL_NAT_IPV4_PDN, &pdn_entry_size);
+ pdn_mem->size = pdn_entry_size * IPA_MAX_PDN_NUM;
if (IPA_MEM_PART(pdn_config_size) < pdn_mem->size) {
IPAERR(
"number of PDN entries exceeds SRAM available space\n");
@@ -315,25 +459,346 @@ int ipa3_allocate_nat_device(struct ipa_ioc_nat_alloc_mem *mem)
result = -ENOMEM;
goto fail_alloc_pdn;
}
- pdn_entries = pdn_mem->base;
- memset(pdn_entries, 0, pdn_mem->size);
+ memset(pdn_mem->base, 0, pdn_mem->size);
IPADBG("IPA NAT dev allocated PDN memory successfully\n");
}
- nat_ctx->is_dev_init = true;
+ nat_ctx->dev.is_mem_allocated = true;
IPADBG("IPA NAT dev init successfully\n");
- mutex_unlock(&nat_ctx->lock);
+ mutex_unlock(&nat_ctx->dev.lock);
+
+ IPADBG("return\n");
return 0;
fail_alloc_pdn:
- if (nat_ctx->vaddr) {
- dma_free_coherent(ipa3_ctx->pdev, mem->size, nat_ctx->vaddr,
- nat_ctx->dma_handle);
- nat_ctx->vaddr = NULL;
+ if (nat_ctx->dev.vaddr) {
+ dma_free_coherent(ipa3_ctx->pdev, table_alloc->size,
+ nat_ctx->dev.vaddr, nat_ctx->dev.dma_handle);
+ nat_ctx->dev.vaddr = NULL;
}
bail:
- mutex_unlock(&nat_ctx->lock);
+ mutex_unlock(&nat_ctx->dev.lock);
+
+ return result;
+}
+
+/**
+ * ipa3_allocate_ipv6ct_table() - Allocates memory for the IPv6CT table
+ * @table_alloc: [in/out] memory parameters
+ *
+ * Called by IPv6CT client to allocate memory for the table entries.
+ * Based on the request size either shared or system memory will be used.
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa3_allocate_ipv6ct_table(
+ struct ipa_ioc_nat_ipv6ct_table_alloc *table_alloc)
+{
+ int result;
+
+ IPADBG("\n");
+
+ if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_0) {
+ IPAERR_RL("IPv6 connection tracking isn't supported\n");
+ return -EPERM;
+ }
+
+ mutex_lock(&ipa3_ctx->ipv6ct_mem.dev.lock);
+
+ result = ipa3_nat_ipv6ct_allocate_mem(
+ &ipa3_ctx->ipv6ct_mem.dev, table_alloc);
+ if (result)
+ goto bail;
+
+ ipa3_ctx->ipv6ct_mem.dev.is_mem_allocated = true;
+ IPADBG("IPA IPv6CT dev init successfully\n");
+
+bail:
+ mutex_unlock(&ipa3_ctx->ipv6ct_mem.dev.lock);
+ return result;
+}
+
+static int ipa3_nat_ipv6ct_check_table_params(
+ struct ipa3_nat_ipv6ct_common_mem *dev,
+ uint32_t offset, uint16_t entries_num,
+ enum ipahal_nat_type nat_type)
+{
+ int result;
+ size_t entry_size, table_size;
+
+ result = ipahal_nat_entry_size(nat_type, &entry_size);
+ if (result) {
+ IPAERR("Failed to retrieve size of entry for %s\n",
+ ipahal_nat_type_str(nat_type));
+ return result;
+ }
+ table_size = entry_size * entries_num;
+
+ /* check for integer overflow */
+ if (offset > UINT_MAX - table_size) {
+ IPAERR_RL("Detected overflow\n");
+ return -EPERM;
+ }
+
+ /* Check offset is not beyond allocated size */
+ if (dev->size < offset + table_size) {
+ IPAERR_RL("Table offset not valid\n");
+ IPAERR_RL("offset:%d entries:%d table_size:%zu mem_size:%zu\n",
+ offset, entries_num, table_size, dev->size);
+ return -EPERM;
+ }
+
+ if (dev->is_sys_mem && offset > UINT_MAX - dev->dma_handle) {
+ IPAERR_RL("Failed due to integer overflow\n");
+ IPAERR_RL("%s dma_handle: 0x%pa offset: 0x%x\n",
+ dev->name, &dev->dma_handle, offset);
+ return -EPERM;
+ }
+
+ return 0;
+}
+
+static inline void ipa3_nat_ipv6ct_create_init_cmd(
+ struct ipahal_imm_cmd_nat_ipv6ct_init_common *table_init_cmd,
+ bool is_shared,
+ dma_addr_t base_addr,
+ uint8_t tbl_index,
+ uint32_t base_table_offset,
+ uint32_t expn_table_offset,
+ uint16_t table_entries,
+ uint16_t expn_table_entries,
+ const char *table_name)
+{
+ table_init_cmd->base_table_addr_shared = is_shared;
+ table_init_cmd->expansion_table_addr_shared = is_shared;
+
+ table_init_cmd->base_table_addr = base_addr + base_table_offset;
+ IPADBG("%s base table offset:0x%x\n", table_name, base_table_offset);
+
+ table_init_cmd->expansion_table_addr = base_addr + expn_table_offset;
+ IPADBG("%s expn table offset:0x%x\n", table_name, expn_table_offset);
+
+ table_init_cmd->table_index = tbl_index;
+ IPADBG("%s table index:0x%x\n", table_name, tbl_index);
+
+ table_init_cmd->size_base_table = table_entries;
+ IPADBG("%s base table size:0x%x\n", table_name, table_entries);
+
+ table_init_cmd->size_expansion_table = expn_table_entries;
+ IPADBG("%s expansion table size:0x%x\n",
+ table_name, expn_table_entries);
+}
+
+static inline void ipa3_nat_ipv6ct_init_device_structure(
+ struct ipa3_nat_ipv6ct_common_mem *dev,
+ uint32_t base_table_offset,
+ uint32_t expn_table_offset,
+ uint16_t table_entries,
+ uint16_t expn_table_entries)
+{
+ dev->base_table_addr = (char *)dev->base_address + base_table_offset;
+ IPADBG("%s base_table_addr: 0x%p\n", dev->name, dev->base_table_addr);
+
+ dev->expansion_table_addr =
+ (char *)dev->base_address + expn_table_offset;
+ IPADBG("%s expansion_table_addr: 0x%p\n",
+ dev->name, dev->expansion_table_addr);
+
+ IPADBG("%s table_entries: %d\n", dev->name, table_entries);
+ dev->table_entries = table_entries;
+
+ IPADBG("%s expn_table_entries: %d\n", dev->name, expn_table_entries);
+ dev->expn_table_entries = expn_table_entries;
+}
+
+static void ipa3_nat_create_init_cmd(
+ struct ipa_ioc_v4_nat_init *init,
+ bool is_shared,
+ dma_addr_t base_addr,
+ struct ipahal_imm_cmd_ip_v4_nat_init *cmd)
+{
+ IPADBG("\n");
+
+ ipa3_nat_ipv6ct_create_init_cmd(
+ &cmd->table_init,
+ is_shared,
+ base_addr,
+ init->tbl_index,
+ init->ipv4_rules_offset,
+ init->expn_rules_offset,
+ init->table_entries,
+ init->expn_table_entries,
+ ipa3_ctx->nat_mem.dev.name);
+
+ cmd->index_table_addr_shared = is_shared;
+ cmd->index_table_expansion_addr_shared = is_shared;
+
+ cmd->index_table_addr =
+ base_addr + init->index_offset;
+ IPADBG("index_offset:0x%x\n", init->index_offset);
+
+ cmd->index_table_expansion_addr =
+ base_addr + init->index_expn_offset;
+ IPADBG("index_expn_offset:0x%x\n", init->index_expn_offset);
+ if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) {
+ /*
+ * starting IPAv4.0 public ip field changed to store the
+ * PDN config table offset in SMEM
+ */
+ cmd->public_addr_info = IPA_MEM_PART(pdn_config_ofst);
+ IPADBG("pdn config base:0x%x\n", cmd->public_addr_info);
+ } else {
+ cmd->public_addr_info = init->ip_addr;
+ IPADBG("Public IP address:%pI4h\n", &cmd->public_addr_info);
+ }
+
+ IPADBG("return\n");
+}
+
+static void ipa3_nat_create_modify_pdn_cmd(
+ struct ipahal_imm_cmd_dma_shared_mem *mem_cmd, bool zero_mem)
+{
+ size_t pdn_entry_size, mem_size;
+
+ IPADBG("\n");
+
+ ipahal_nat_entry_size(IPAHAL_NAT_IPV4_PDN, &pdn_entry_size);
+ mem_size = pdn_entry_size * IPA_MAX_PDN_NUM;
+
+ if (zero_mem)
+ memset(ipa3_ctx->nat_mem.pdn_mem.base, 0, mem_size);
+
+ /* Copy the PDN config table to SRAM */
+ mem_cmd->is_read = false;
+ mem_cmd->skip_pipeline_clear = false;
+ mem_cmd->pipeline_clear_options = IPAHAL_HPS_CLEAR;
+ mem_cmd->size = mem_size;
+ mem_cmd->system_addr = ipa3_ctx->nat_mem.pdn_mem.phys_base;
+ mem_cmd->local_addr = ipa3_ctx->smem_restricted_bytes +
+ IPA_MEM_PART(pdn_config_ofst);
+
+ IPADBG("return\n");
+}
+
+static int ipa3_nat_send_init_cmd(struct ipahal_imm_cmd_ip_v4_nat_init *cmd,
+ bool zero_pdn_table)
+{
+ struct ipa3_desc desc[IPA_NAT_MAX_NUM_OF_INIT_CMD_DESC];
+ struct ipahal_imm_cmd_pyld *cmd_pyld[IPA_NAT_MAX_NUM_OF_INIT_CMD_DESC];
+ int i, num_cmd = 0, result;
+
+ IPADBG("\n");
+
+ /* NO-OP IC for ensuring that IPA pipeline is empty */
+ cmd_pyld[num_cmd] =
+ ipahal_construct_nop_imm_cmd(false, IPAHAL_HPS_CLEAR, false);
+ if (!cmd_pyld[num_cmd]) {
+ IPAERR("failed to construct NOP imm cmd\n");
+ return -ENOMEM;
+ }
+
+ ipa3_init_imm_cmd_desc(&desc[num_cmd], cmd_pyld[num_cmd]);
+ ++num_cmd;
+
+ cmd_pyld[num_cmd] = ipahal_construct_imm_cmd(
+ IPA_IMM_CMD_IP_V4_NAT_INIT, cmd, false);
+ if (!cmd_pyld[num_cmd]) {
+ IPAERR_RL("fail to construct NAT init imm cmd\n");
+ result = -EPERM;
+ goto destroy_imm_cmd;
+ }
+
+ ipa3_init_imm_cmd_desc(&desc[num_cmd], cmd_pyld[num_cmd]);
+ ++num_cmd;
+
+ if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) {
+ struct ipahal_imm_cmd_dma_shared_mem mem_cmd = { 0 };
+
+ if (num_cmd >= IPA_NAT_MAX_NUM_OF_INIT_CMD_DESC) {
+ IPAERR("number of commands is out of range\n");
+ result = -ENOBUFS;
+ goto destroy_imm_cmd;
+ }
+
+ /* Copy the PDN config table to SRAM */
+ ipa3_nat_create_modify_pdn_cmd(&mem_cmd, zero_pdn_table);
+ cmd_pyld[num_cmd] = ipahal_construct_imm_cmd(
+ IPA_IMM_CMD_DMA_SHARED_MEM, &mem_cmd, false);
+ if (!cmd_pyld[num_cmd]) {
+ IPAERR(
+ "fail construct dma_shared_mem cmd: for pdn table");
+ result = -ENOMEM;
+ goto destroy_imm_cmd;
+ }
+ ipa3_init_imm_cmd_desc(&desc[num_cmd], cmd_pyld[num_cmd]);
+ ++num_cmd;
+ IPADBG("added PDN table copy cmd\n");
+ }
+
+ result = ipa3_send_cmd(num_cmd, desc);
+ if (result) {
+ IPAERR("fail to send NAT init immediate command\n");
+ goto destroy_imm_cmd;
+ }
+
+ IPADBG("return\n");
+
+destroy_imm_cmd:
+ for (i = 0; i < num_cmd; ++i)
+ ipahal_destroy_imm_cmd(cmd_pyld[i]);
+
+ return result;
+}
+
+static int ipa3_ipv6ct_send_init_cmd(struct ipahal_imm_cmd_ip_v6_ct_init *cmd)
+{
+ struct ipa3_desc desc[IPA_IPV6CT_MAX_NUM_OF_INIT_CMD_DESC];
+ struct ipahal_imm_cmd_pyld
+ *cmd_pyld[IPA_IPV6CT_MAX_NUM_OF_INIT_CMD_DESC];
+ int i, num_cmd = 0, result;
+
+ IPADBG("\n");
+
+ /* NO-OP IC for ensuring that IPA pipeline is empty */
+ cmd_pyld[num_cmd] =
+ ipahal_construct_nop_imm_cmd(false, IPAHAL_HPS_CLEAR, false);
+ if (!cmd_pyld[num_cmd]) {
+ IPAERR("failed to construct NOP imm cmd\n");
+ return -ENOMEM;
+ }
+
+ ipa3_init_imm_cmd_desc(&desc[num_cmd], cmd_pyld[num_cmd]);
+ ++num_cmd;
+
+ if (num_cmd >= IPA_IPV6CT_MAX_NUM_OF_INIT_CMD_DESC) {
+ IPAERR("number of commands is out of range\n");
+ result = -ENOBUFS;
+ goto destroy_imm_cmd;
+ }
+
+ cmd_pyld[num_cmd] = ipahal_construct_imm_cmd(
+ IPA_IMM_CMD_IP_V6_CT_INIT, cmd, false);
+ if (!cmd_pyld[num_cmd]) {
+ IPAERR_RL("fail to construct IPv6CT init imm cmd\n");
+ result = -EPERM;
+ goto destroy_imm_cmd;
+ }
+
+ ipa3_init_imm_cmd_desc(&desc[num_cmd], cmd_pyld[num_cmd]);
+ ++num_cmd;
+
+ result = ipa3_send_cmd(num_cmd, desc);
+ if (result) {
+ IPAERR("Fail to send IPv6CT init immediate command\n");
+ goto destroy_imm_cmd;
+ }
+
+ IPADBG("return\n");
+
+destroy_imm_cmd:
+ for (i = 0; i < num_cmd; ++i)
+ ipahal_destroy_imm_cmd(cmd_pyld[i]);
return result;
}
@@ -349,214 +814,79 @@ int ipa3_allocate_nat_device(struct ipa_ioc_nat_alloc_mem *mem)
*/
int ipa3_nat_init_cmd(struct ipa_ioc_v4_nat_init *init)
{
-#define TBL_ENTRY_SIZE 32
-#define INDX_TBL_ENTRY_SIZE 4
-
- struct ipa3_desc desc[3];
struct ipahal_imm_cmd_ip_v4_nat_init cmd;
- int num_cmd = 0;
- int i = 0;
- struct ipahal_imm_cmd_pyld *cmd_pyld[3];
- struct ipahal_imm_cmd_dma_shared_mem mem_cmd = { 0 };
- int result = 0;
- u32 offset = 0;
- size_t tmp;
+ int result;
IPADBG("\n");
+
+ if (!ipa3_ctx->nat_mem.dev.is_mapped) {
+ IPAERR_RL("attempt to init %s before mmap\n",
+ ipa3_ctx->nat_mem.dev.name);
+ return -EPERM;
+ }
+
+ if (init->tbl_index >= 1) {
+ IPAERR_RL("Unsupported table index %d\n", init->tbl_index);
+ return -EPERM;
+ }
+
if (init->table_entries == 0) {
- IPADBG("Table entries is zero\n");
+ IPAERR_RL("Table entries is zero\n");
return -EPERM;
}
- /* check for integer overflow */
- if (init->ipv4_rules_offset >
- UINT_MAX - (TBL_ENTRY_SIZE * (init->table_entries + 1))) {
- IPAERR_RL("Detected overflow\n");
- return -EPERM;
- }
- /* Check Table Entry offset is not
- * beyond allocated size
- */
- tmp = init->ipv4_rules_offset +
- (TBL_ENTRY_SIZE * (init->table_entries + 1));
- if (tmp > ipa3_ctx->nat_mem.size) {
- IPAERR_RL("Table rules offset not valid\n");
- IPAERR_RL("offset:%d entrys:%d size:%zu mem_size:%zu\n",
- init->ipv4_rules_offset, (init->table_entries + 1),
- tmp, ipa3_ctx->nat_mem.size);
- return -EPERM;
+ result = ipa3_nat_ipv6ct_check_table_params(
+ &ipa3_ctx->nat_mem.dev,
+ init->ipv4_rules_offset,
+ init->table_entries + 1,
+ IPAHAL_NAT_IPV4);
+ if (result) {
+ IPAERR_RL("Bad params for NAT base table\n");
+ return result;
}
- /* check for integer overflow */
- if (init->expn_rules_offset >
- (UINT_MAX - (TBL_ENTRY_SIZE * init->expn_table_entries))) {
- IPAERR_RL("Detected overflow\n");
- return -EPERM;
- }
- /* Check Expn Table Entry offset is not
- * beyond allocated size
- */
- tmp = init->expn_rules_offset +
- (TBL_ENTRY_SIZE * init->expn_table_entries);
- if (tmp > ipa3_ctx->nat_mem.size) {
- IPAERR_RL("Expn Table rules offset not valid\n");
- IPAERR_RL("offset:%d entrys:%d size:%zu mem_size:%zu\n",
- init->expn_rules_offset, init->expn_table_entries,
- tmp, ipa3_ctx->nat_mem.size);
- return -EPERM;
+ result = ipa3_nat_ipv6ct_check_table_params(
+ &ipa3_ctx->nat_mem.dev,
+ init->expn_rules_offset,
+ init->expn_table_entries,
+ IPAHAL_NAT_IPV4);
+ if (result) {
+ IPAERR_RL("Bad params for NAT expansion table\n");
+ return result;
}
- /* check for integer overflow */
- if (init->index_offset >
- UINT_MAX - (INDX_TBL_ENTRY_SIZE * (init->table_entries + 1))) {
- IPAERR_RL("Detected overflow\n");
- return -EPERM;
- }
- /* Check Indx Table Entry offset is not
- * beyond allocated size
- */
- tmp = init->index_offset +
- (INDX_TBL_ENTRY_SIZE * (init->table_entries + 1));
- if (tmp > ipa3_ctx->nat_mem.size) {
- IPAERR_RL("Indx Table rules offset not valid\n");
- IPAERR_RL("offset:%d entrys:%d size:%zu mem_size:%zu\n",
- init->index_offset, (init->table_entries + 1),
- tmp, ipa3_ctx->nat_mem.size);
- return -EPERM;
+ result = ipa3_nat_ipv6ct_check_table_params(
+ &ipa3_ctx->nat_mem.dev,
+ init->index_offset,
+ init->table_entries + 1,
+ IPAHAL_NAT_IPV4_INDEX);
+ if (result) {
+ IPAERR_RL("Bad params for index table\n");
+ return result;
}
- /* check for integer overflow */
- if (init->index_expn_offset >
- UINT_MAX - (INDX_TBL_ENTRY_SIZE * init->expn_table_entries)) {
- IPAERR_RL("Detected overflow\n");
- return -EPERM;
- }
- /* Check Expn Table entry offset is not
- * beyond allocated size
- */
- tmp = init->index_expn_offset +
- (INDX_TBL_ENTRY_SIZE * init->expn_table_entries);
- if (tmp > ipa3_ctx->nat_mem.size) {
- IPAERR_RL("Indx Expn Table rules offset not valid\n");
- IPAERR_RL("offset:%d entrys:%d size:%zu mem_size:%zu\n",
- init->index_expn_offset, init->expn_table_entries,
- tmp, ipa3_ctx->nat_mem.size);
- return -EPERM;
+ result = ipa3_nat_ipv6ct_check_table_params(
+ &ipa3_ctx->nat_mem.dev,
+ init->index_expn_offset,
+ init->expn_table_entries,
+ IPAHAL_NAT_IPV4_INDEX);
+ if (result) {
+ IPAERR_RL("Bad params for index expansion table\n");
+ return result;
}
- memset(&desc, 0, sizeof(desc));
- /* NO-OP IC for ensuring that IPA pipeline is empty */
- cmd_pyld[num_cmd] =
- ipahal_construct_nop_imm_cmd(false, IPAHAL_HPS_CLEAR, false);
- if (!cmd_pyld[num_cmd]) {
- IPAERR("failed to construct NOP imm cmd\n");
- result = -ENOMEM;
- goto bail;
- }
-
- desc[num_cmd].opcode = cmd_pyld[num_cmd]->opcode;
- desc[num_cmd].type = IPA_IMM_CMD_DESC;
- desc[num_cmd].callback = NULL;
- desc[num_cmd].user1 = NULL;
- desc[num_cmd].user2 = 0;
- desc[num_cmd].pyld = cmd_pyld[num_cmd]->data;
- desc[num_cmd].len = cmd_pyld[num_cmd]->len;
- num_cmd++;
-
- if (ipa3_ctx->nat_mem.vaddr) {
+ if (ipa3_ctx->nat_mem.dev.is_sys_mem) {
IPADBG("using system memory for nat table\n");
- cmd.ipv4_rules_addr_shared = false;
- cmd.ipv4_expansion_rules_addr_shared = false;
- cmd.index_table_addr_shared = false;
- cmd.index_table_expansion_addr_shared = false;
-
- offset = UINT_MAX - ipa3_ctx->nat_mem.dma_handle;
-
- if ((init->ipv4_rules_offset > offset) ||
- (init->expn_rules_offset > offset) ||
- (init->index_offset > offset) ||
- (init->index_expn_offset > offset)) {
- IPAERR_RL("Failed due to integer overflow\n");
- IPAERR_RL("nat.mem.dma_handle: 0x%pa\n",
- &ipa3_ctx->nat_mem.dma_handle);
- IPAERR_RL("ipv4_rules_offset: 0x%x\n",
- init->ipv4_rules_offset);
- IPAERR_RL("expn_rules_offset: 0x%x\n",
- init->expn_rules_offset);
- IPAERR_RL("index_offset: 0x%x\n",
- init->index_offset);
- IPAERR_RL("index_expn_offset: 0x%x\n",
- init->index_expn_offset);
- result = -EPERM;
- goto destroy_imm_cmd;
- }
- cmd.ipv4_rules_addr =
- ipa3_ctx->nat_mem.dma_handle + init->ipv4_rules_offset;
- IPADBG("ipv4_rules_offset:0x%x\n", init->ipv4_rules_offset);
-
- cmd.ipv4_expansion_rules_addr =
- ipa3_ctx->nat_mem.dma_handle + init->expn_rules_offset;
- IPADBG("expn_rules_offset:0x%x\n", init->expn_rules_offset);
-
- cmd.index_table_addr =
- ipa3_ctx->nat_mem.dma_handle + init->index_offset;
- IPADBG("index_offset:0x%x\n", init->index_offset);
-
- cmd.index_table_expansion_addr =
- ipa3_ctx->nat_mem.dma_handle + init->index_expn_offset;
- IPADBG("index_expn_offset:0x%x\n", init->index_expn_offset);
+ /*
+ * Safe to process, since integer overflow was
+ * checked in ipa3_nat_ipv6ct_check_table_params
+ */
+ ipa3_nat_create_init_cmd(init, false,
+ ipa3_ctx->nat_mem.dev.dma_handle, &cmd);
} else {
IPADBG("using shared(local) memory for nat table\n");
- cmd.ipv4_rules_addr_shared = true;
- cmd.ipv4_expansion_rules_addr_shared = true;
- cmd.index_table_addr_shared = true;
- cmd.index_table_expansion_addr_shared = true;
-
- cmd.ipv4_rules_addr = init->ipv4_rules_offset +
- IPA_RAM_NAT_OFST;
-
- cmd.ipv4_expansion_rules_addr = init->expn_rules_offset +
- IPA_RAM_NAT_OFST;
-
- cmd.index_table_addr = init->index_offset +
- IPA_RAM_NAT_OFST;
-
- cmd.index_table_expansion_addr = init->index_expn_offset +
- IPA_RAM_NAT_OFST;
+ ipa3_nat_create_init_cmd(init, true, IPA_RAM_NAT_OFST, &cmd);
}
- cmd.table_index = init->tbl_index;
- IPADBG("Table index:0x%x\n", cmd.table_index);
- cmd.size_base_tables = init->table_entries;
- IPADBG("Base Table size:0x%x\n", cmd.size_base_tables);
- cmd.size_expansion_tables = init->expn_table_entries;
- IPADBG("Expansion Table size:0x%x\n", cmd.size_expansion_tables);
- if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) {
- /*
- * public ip field changed to store the PDN config base
- * address in IPAv4
- */
- cmd.public_ip_addr = IPA_MEM_PART(pdn_config_ofst);
- IPADBG("pdn config base:0x%x\n", cmd.public_ip_addr);
- } else {
- cmd.public_ip_addr = init->ip_addr;
- IPADBG("Public ip address:0x%x\n", cmd.public_ip_addr);
- }
- cmd_pyld[num_cmd] = ipahal_construct_imm_cmd(
- IPA_IMM_CMD_IP_V4_NAT_INIT, &cmd, false);
- if (!cmd_pyld[num_cmd]) {
- IPAERR_RL("Fail to construct ip_v4_nat_init imm cmd\n");
- result = -EPERM;
- goto destroy_imm_cmd;
- }
-
- desc[num_cmd].opcode = cmd_pyld[num_cmd]->opcode;
- desc[num_cmd].type = IPA_IMM_CMD_DESC;
- desc[num_cmd].callback = NULL;
- desc[num_cmd].user1 = NULL;
- desc[num_cmd].user2 = 0;
- desc[num_cmd].pyld = cmd_pyld[num_cmd]->data;
- desc[num_cmd].len = cmd_pyld[num_cmd]->len;
- num_cmd++;
if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) {
struct ipa_pdn_entry *pdn_entries;
@@ -569,87 +899,154 @@ int ipa3_nat_init_cmd(struct ipa_ioc_v4_nat_init *init)
pdn_entries[0].resrvd = 0;
IPADBG("Public ip address:0x%x\n", init->ip_addr);
-
- /* Copy the PDN config table to SRAM */
- mem_cmd.is_read = false;
- mem_cmd.skip_pipeline_clear = false;
- mem_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
- mem_cmd.size = sizeof(struct ipa_pdn_entry) * IPA_MAX_PDN_NUM;
- mem_cmd.system_addr = ipa3_ctx->nat_mem.pdn_mem.phys_base;
- mem_cmd.local_addr = ipa3_ctx->smem_restricted_bytes +
- IPA_MEM_PART(pdn_config_ofst);
- cmd_pyld[num_cmd] = ipahal_construct_imm_cmd(
- IPA_IMM_CMD_DMA_SHARED_MEM, &mem_cmd, false);
- if (!cmd_pyld[num_cmd]) {
- IPAERR(
- "fail construct dma_shared_mem cmd: for pdn table");
- result = -ENOMEM;
- goto destroy_imm_cmd;
- }
- desc[num_cmd].opcode = cmd_pyld[num_cmd]->opcode;
- desc[num_cmd].type = IPA_IMM_CMD_DESC;
- desc[num_cmd].callback = NULL;
- desc[num_cmd].user1 = NULL;
- desc[num_cmd].user2 = 0;
- desc[num_cmd].pyld = cmd_pyld[num_cmd]->data;
- desc[num_cmd].len = cmd_pyld[num_cmd]->len;
- num_cmd++;
- IPADBG("added PDN table copy cmd\n");
}
- IPADBG("posting v4 init command\n");
- if (ipa3_send_cmd(num_cmd, desc)) {
- IPAERR("Fail to send immediate command\n");
- result = -EPERM;
- goto destroy_imm_cmd;
+ IPADBG("posting NAT init command\n");
+ result = ipa3_nat_send_init_cmd(&cmd, false);
+ if (result) {
+ IPAERR("Fail to send NAT init immediate command\n");
+ return result;
}
+ ipa3_nat_ipv6ct_init_device_structure(
+ &ipa3_ctx->nat_mem.dev,
+ init->ipv4_rules_offset,
+ init->expn_rules_offset,
+ init->table_entries,
+ init->expn_table_entries);
+
ipa3_ctx->nat_mem.public_ip_addr = init->ip_addr;
- IPADBG("Table ip address:0x%x", ipa3_ctx->nat_mem.public_ip_addr);
-
- ipa3_ctx->nat_mem.ipv4_rules_addr =
- (char *)ipa3_ctx->nat_mem.nat_base_address + init->ipv4_rules_offset;
- IPADBG("ipv4_rules_addr: 0x%p\n",
- ipa3_ctx->nat_mem.ipv4_rules_addr);
-
- ipa3_ctx->nat_mem.ipv4_expansion_rules_addr =
- (char *)ipa3_ctx->nat_mem.nat_base_address + init->expn_rules_offset;
- IPADBG("ipv4_expansion_rules_addr: 0x%p\n",
- ipa3_ctx->nat_mem.ipv4_expansion_rules_addr);
+ IPADBG("Public IP address:%pI4h\n", &ipa3_ctx->nat_mem.public_ip_addr);
ipa3_ctx->nat_mem.index_table_addr =
- (char *)ipa3_ctx->nat_mem.nat_base_address +
+ (char *)ipa3_ctx->nat_mem.dev.base_address +
init->index_offset;
IPADBG("index_table_addr: 0x%p\n",
ipa3_ctx->nat_mem.index_table_addr);
ipa3_ctx->nat_mem.index_table_expansion_addr =
- (char *)ipa3_ctx->nat_mem.nat_base_address + init->index_expn_offset;
+ (char *)ipa3_ctx->nat_mem.dev.base_address + init->index_expn_offset;
IPADBG("index_table_expansion_addr: 0x%p\n",
ipa3_ctx->nat_mem.index_table_expansion_addr);
- IPADBG("size_base_tables: %d\n", init->table_entries);
- ipa3_ctx->nat_mem.size_base_tables = init->table_entries;
-
- IPADBG("size_expansion_tables: %d\n", init->expn_table_entries);
- ipa3_ctx->nat_mem.size_expansion_tables = init->expn_table_entries;
-
+ ipa3_ctx->nat_mem.dev.is_hw_init = true;
IPADBG("return\n");
-destroy_imm_cmd:
- for (i = 0; i < num_cmd; i++)
- ipahal_destroy_imm_cmd(cmd_pyld[i]);
-bail:
- return result;
+ return 0;
}
/**
-* ipa3_nat_mdfy_pdn() - Modify a PDN entry in PDN config table in IPA SRAM
-* @mdfy_pdn: [in] PDN info to be written to SRAM
-*
-* Called by NAT client driver to modify an entry in the PDN config table
-*
-* Returns: 0 on success, negative on failure
-*/
+ * ipa3_ipv6ct_init_cmd() - Post IP_V6_CONN_TRACK_INIT command to IPA HW
+ * @init: [in] initialization command attributes
+ *
+ * Called by NAT client driver to post IP_V6_CONN_TRACK_INIT command to IPA HW
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa3_ipv6ct_init_cmd(struct ipa_ioc_ipv6ct_init *init)
+{
+ struct ipahal_imm_cmd_ip_v6_ct_init cmd;
+ int result;
+
+ IPADBG("\n");
+
+ if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_0) {
+ IPAERR_RL("IPv6 connection tracking isn't supported\n");
+ return -EPERM;
+ }
+
+ if (!ipa3_ctx->ipv6ct_mem.dev.is_mapped) {
+ IPAERR_RL("attempt to init %s before mmap\n",
+ ipa3_ctx->ipv6ct_mem.dev.name);
+ return -EPERM;
+ }
+
+ if (init->tbl_index >= 1) {
+ IPAERR_RL("Unsupported table index %d\n", init->tbl_index);
+ return -EPERM;
+ }
+
+ if (init->table_entries == 0) {
+ IPAERR_RL("Table entries is zero\n");
+ return -EPERM;
+ }
+
+ result = ipa3_nat_ipv6ct_check_table_params(
+ &ipa3_ctx->ipv6ct_mem.dev,
+ init->base_table_offset,
+ init->table_entries + 1,
+ IPAHAL_NAT_IPV6CT);
+ if (result) {
+ IPAERR_RL("Bad params for IPv6CT base table\n");
+ return result;
+ }
+
+ result = ipa3_nat_ipv6ct_check_table_params(
+ &ipa3_ctx->ipv6ct_mem.dev,
+ init->expn_table_offset,
+ init->expn_table_entries,
+ IPAHAL_NAT_IPV6CT);
+ if (result) {
+ IPAERR_RL("Bad params for IPv6CT expansion table\n");
+ return result;
+ }
+
+ if (ipa3_ctx->ipv6ct_mem.dev.is_sys_mem) {
+ IPADBG("using system memory for nat table\n");
+ /*
+ * Safe to process, since integer overflow was
+ * checked in ipa3_nat_ipv6ct_check_table_params
+ */
+ ipa3_nat_ipv6ct_create_init_cmd(
+ &cmd.table_init,
+ false,
+ ipa3_ctx->ipv6ct_mem.dev.dma_handle,
+ init->tbl_index,
+ init->base_table_offset,
+ init->expn_table_offset,
+ init->table_entries,
+ init->expn_table_entries,
+ ipa3_ctx->ipv6ct_mem.dev.name);
+ } else {
+ IPADBG("using shared(local) memory for nat table\n");
+ ipa3_nat_ipv6ct_create_init_cmd(
+ &cmd.table_init,
+ true,
+ IPA_RAM_IPV6CT_OFST,
+ init->tbl_index,
+ init->base_table_offset,
+ init->expn_table_offset,
+ init->table_entries,
+ init->expn_table_entries,
+ ipa3_ctx->ipv6ct_mem.dev.name);
+ }
+
+ IPADBG("posting ip_v6_ct_init imm command\n");
+ result = ipa3_ipv6ct_send_init_cmd(&cmd);
+ if (result) {
+ IPAERR("fail to send IPv6CT init immediate command\n");
+ return result;
+ }
+
+ ipa3_nat_ipv6ct_init_device_structure(
+ &ipa3_ctx->ipv6ct_mem.dev,
+ init->base_table_offset,
+ init->expn_table_offset,
+ init->table_entries,
+ init->expn_table_entries);
+
+ ipa3_ctx->ipv6ct_mem.dev.is_hw_init = true;
+ IPADBG("return\n");
+ return 0;
+}
+
+/**
+ * ipa3_nat_mdfy_pdn() - Modify a PDN entry in PDN config table in IPA SRAM
+ * @mdfy_pdn: [in] PDN info to be written to SRAM
+ *
+ * Called by NAT client driver to modify an entry in the PDN config table
+ *
+ * Returns: 0 on success, negative on failure
+ */
int ipa3_nat_mdfy_pdn(struct ipa_ioc_nat_pdn_entry *mdfy_pdn)
{
struct ipahal_imm_cmd_dma_shared_mem mem_cmd = { 0 };
@@ -659,21 +1056,24 @@ int ipa3_nat_mdfy_pdn(struct ipa_ioc_nat_pdn_entry *mdfy_pdn)
struct ipa3_nat_mem *nat_ctx = &(ipa3_ctx->nat_mem);
struct ipa_pdn_entry *pdn_entries = nat_ctx->pdn_mem.base;
+ IPADBG("\n");
+
if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_0) {
- IPAERR("IPA HW does not support multi PDN\n");
+ IPAERR_RL("IPA HW does not support multi PDN\n");
return -EPERM;
}
- if (!nat_ctx->is_dev_init) {
- IPAERR("attempt to modify a PDN entry before dev init\n");
+ if (!nat_ctx->dev.is_mem_allocated) {
+ IPAERR_RL(
+ "attempt to modify a PDN entry before the PDN table memory allocation\n");
return -EPERM;
}
if (mdfy_pdn->pdn_index > (IPA_MAX_PDN_NUM - 1)) {
- IPAERR("pdn index out of range %d\n", mdfy_pdn->pdn_index);
+ IPAERR_RL("pdn index out of range %d\n", mdfy_pdn->pdn_index);
return -EPERM;
}
- mutex_lock(&nat_ctx->lock);
+ mutex_lock(&nat_ctx->dev.lock);
/* store ip in pdn entries cache array */
pdn_entries[mdfy_pdn->pdn_index].public_ip =
@@ -683,21 +1083,13 @@ int ipa3_nat_mdfy_pdn(struct ipa_ioc_nat_pdn_entry *mdfy_pdn)
pdn_entries[mdfy_pdn->pdn_index].src_metadata =
mdfy_pdn->src_metadata;
- IPADBG("Modify PDN in index %d: ", mdfy_pdn->pdn_index);
- IPADBG("Public ip address:0x%x, ", mdfy_pdn->public_ip);
- IPADBG("dst metadata:0x%x, ", mdfy_pdn->dst_metadata);
- IPADBG("src metadata:0x%x\n", mdfy_pdn->src_metadata);
-
- memset(&desc, 0, sizeof(desc));
+ IPADBG("Modify PDN in index: %d Public ip address:%pI4h\n",
+ mdfy_pdn->pdn_index, &mdfy_pdn->public_ip);
+ IPADBG("Modify PDN dst metadata: 0x%x src metadata: 0x%x\n",
+ mdfy_pdn->dst_metadata, mdfy_pdn->src_metadata);
/* Copy the PDN config table to SRAM */
- mem_cmd.is_read = false;
- mem_cmd.skip_pipeline_clear = false;
- mem_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
- mem_cmd.size = sizeof(struct ipa_pdn_entry) * IPA_MAX_PDN_NUM;
- mem_cmd.system_addr = nat_ctx->pdn_mem.phys_base;
- mem_cmd.local_addr = ipa3_ctx->smem_restricted_bytes +
- IPA_MEM_PART(pdn_config_ofst);
+ ipa3_nat_create_modify_pdn_cmd(&mem_cmd, false);
cmd_pyld = ipahal_construct_imm_cmd(
IPA_IMM_CMD_DMA_SHARED_MEM, &mem_cmd, false);
if (!cmd_pyld) {
@@ -706,25 +1098,199 @@ int ipa3_nat_mdfy_pdn(struct ipa_ioc_nat_pdn_entry *mdfy_pdn)
result = -ENOMEM;
goto bail;
}
- desc.opcode = cmd_pyld->opcode;
- desc.type = IPA_IMM_CMD_DESC;
- desc.callback = NULL;
- desc.user1 = NULL;
- desc.user2 = 0;
- desc.pyld = cmd_pyld->data;
- desc.len = cmd_pyld->len;
+ ipa3_init_imm_cmd_desc(&desc, cmd_pyld);
- IPADBG("sending PDN table copy cmd");
- if (ipa3_send_cmd(1, &desc)) {
- IPAERR("Fail to send immediate command\n");
- result = -EPERM;
- }
+ IPADBG("sending PDN table copy cmd\n");
+ result = ipa3_send_cmd(1, &desc);
+ if (result)
+ IPAERR("Fail to send PDN table copy immediate command\n");
ipahal_destroy_imm_cmd(cmd_pyld);
+
+ IPADBG("return\n");
+
bail:
- mutex_unlock(&nat_ctx->lock);
+ mutex_unlock(&nat_ctx->dev.lock);
return result;
}
+
+static uint32_t ipa3_nat_ipv6ct_calculate_table_size(uint8_t base_addr)
+{
+ size_t entry_size;
+ u32 entries_num;
+ enum ipahal_nat_type nat_type;
+
+ switch (base_addr) {
+ case IPA_NAT_BASE_TBL:
+ entries_num = ipa3_ctx->nat_mem.dev.table_entries + 1;
+ nat_type = IPAHAL_NAT_IPV4;
+ break;
+ case IPA_NAT_EXPN_TBL:
+ entries_num = ipa3_ctx->nat_mem.dev.expn_table_entries;
+ nat_type = IPAHAL_NAT_IPV4;
+ break;
+ case IPA_NAT_INDX_TBL:
+ entries_num = ipa3_ctx->nat_mem.dev.table_entries + 1;
+ nat_type = IPAHAL_NAT_IPV4_INDEX;
+ break;
+ case IPA_NAT_INDEX_EXPN_TBL:
+ entries_num = ipa3_ctx->nat_mem.dev.expn_table_entries;
+ nat_type = IPAHAL_NAT_IPV4_INDEX;
+ break;
+ case IPA_IPV6CT_BASE_TBL:
+ entries_num = ipa3_ctx->ipv6ct_mem.dev.table_entries + 1;
+ nat_type = IPAHAL_NAT_IPV6CT;
+ break;
+ case IPA_IPV6CT_EXPN_TBL:
+ entries_num = ipa3_ctx->ipv6ct_mem.dev.expn_table_entries;
+ nat_type = IPAHAL_NAT_IPV6CT;
+ break;
+ default:
+ IPAERR_RL("Invalid base_addr %d for table DMA command\n",
+ base_addr);
+ return 0;
+ }
+
+ ipahal_nat_entry_size(nat_type, &entry_size);
+ return entry_size * entries_num;
+}
+
+static int ipa3_table_validate_table_dma_one(struct ipa_ioc_nat_dma_one *param)
+{
+ uint32_t table_size;
+
+ if (param->table_index >= 1) {
+ IPAERR_RL("Unsupported table index %d\n", param->table_index);
+ return -EPERM;
+ }
+
+ switch (param->base_addr) {
+ case IPA_NAT_BASE_TBL:
+ case IPA_NAT_EXPN_TBL:
+ case IPA_NAT_INDX_TBL:
+ case IPA_NAT_INDEX_EXPN_TBL:
+ if (!ipa3_ctx->nat_mem.dev.is_hw_init) {
+ IPAERR_RL("attempt to write to %s before HW int\n",
+ ipa3_ctx->nat_mem.dev.name);
+ return -EPERM;
+ }
+ break;
+ case IPA_IPV6CT_BASE_TBL:
+ case IPA_IPV6CT_EXPN_TBL:
+ if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_0) {
+ IPAERR_RL("IPv6 connection tracking isn't supported\n");
+ return -EPERM;
+ }
+
+ if (!ipa3_ctx->ipv6ct_mem.dev.is_hw_init) {
+ IPAERR_RL("attempt to write to %s before HW int\n",
+ ipa3_ctx->ipv6ct_mem.dev.name);
+ return -EPERM;
+ }
+ break;
+ default:
+ IPAERR_RL("Invalid base_addr %d for table DMA command\n",
+ param->base_addr);
+ return -EPERM;
+ }
+
+ table_size = ipa3_nat_ipv6ct_calculate_table_size(param->base_addr);
+ if (!table_size) {
+ IPAERR_RL("Failed to calculate table size for base_addr %d\n",
+ param->base_addr);
+ return -EPERM;
+ }
+
+ if (param->offset >= table_size) {
+ IPAERR_RL("Invalid offset %d for table DMA command\n",
+ param->offset);
+ IPAERR_RL("table_index %d base addr %d size %d\n",
+ param->table_index, param->base_addr, table_size);
+ return -EPERM;
+ }
+
+ return 0;
+}
+
+
+/**
+ * ipa3_table_dma_cmd() - Post TABLE_DMA command to IPA HW
+ * @dma: [in] initialization command attributes
+ *
+ * Called by NAT/IPv6CT clients to post TABLE_DMA command to IPA HW
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa3_table_dma_cmd(struct ipa_ioc_nat_dma_cmd *dma)
+{
+ struct ipahal_imm_cmd_table_dma cmd;
+ enum ipahal_imm_cmd_name cmd_name = IPA_IMM_CMD_NAT_DMA;
+ struct ipahal_imm_cmd_pyld *cmd_pyld[IPA_MAX_NUM_OF_TABLE_DMA_CMD_DESC];
+ struct ipa3_desc desc[IPA_MAX_NUM_OF_TABLE_DMA_CMD_DESC];
+ uint8_t cnt, num_cmd = 0;
+ int result = 0;
+
+ IPADBG("\n");
+ if (!dma->entries ||
+ dma->entries >= IPA_MAX_NUM_OF_TABLE_DMA_CMD_DESC) {
+ IPAERR_RL("Invalid number of entries %d\n",
+ dma->entries);
+ result = -EPERM;
+ goto bail;
+ }
+
+ for (cnt = 0; cnt < dma->entries; ++cnt) {
+ result = ipa3_table_validate_table_dma_one(&dma->dma[cnt]);
+ if (result) {
+ IPAERR_RL("Table DMA command parameter %d is invalid\n",
+ cnt);
+ goto bail;
+ }
+ }
+
+ /* NO-OP IC for ensuring that IPA pipeline is empty */
+ cmd_pyld[num_cmd] =
+ ipahal_construct_nop_imm_cmd(false, IPAHAL_HPS_CLEAR, false);
+ if (!cmd_pyld[num_cmd]) {
+ IPAERR("Failed to construct NOP imm cmd\n");
+ result = -ENOMEM;
+ goto destroy_imm_cmd;
+ }
+ ipa3_init_imm_cmd_desc(&desc[num_cmd], cmd_pyld[num_cmd]);
+ ++num_cmd;
+
+ /* NAT_DMA was renamed to TABLE_DMA starting from IPAv4 */
+ if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0)
+ cmd_name = IPA_IMM_CMD_TABLE_DMA;
+
+ for (cnt = 0; cnt < dma->entries; ++cnt) {
+ cmd.table_index = dma->dma[cnt].table_index;
+ cmd.base_addr = dma->dma[cnt].base_addr;
+ cmd.offset = dma->dma[cnt].offset;
+ cmd.data = dma->dma[cnt].data;
+ cmd_pyld[num_cmd] =
+ ipahal_construct_imm_cmd(cmd_name, &cmd, false);
+ if (!cmd_pyld[num_cmd]) {
+ IPAERR_RL("Fail to construct table_dma imm cmd\n");
+ result = -ENOMEM;
+ goto destroy_imm_cmd;
+ }
+ ipa3_init_imm_cmd_desc(&desc[num_cmd], cmd_pyld[num_cmd]);
+ ++num_cmd;
+ }
+ result = ipa3_send_cmd(num_cmd, desc);
+ if (result)
+ IPAERR("Fail to send table_dma immediate command\n");
+
+ IPADBG("return\n");
+
+destroy_imm_cmd:
+ for (cnt = 0; cnt < num_cmd; ++cnt)
+ ipahal_destroy_imm_cmd(cmd_pyld[cnt]);
+bail:
+ return result;
+}
+
/**
* ipa3_nat_dma_cmd() - Post NAT_DMA command to IPA HW
* @dma: [in] initialization command attributes
@@ -735,221 +1301,138 @@ int ipa3_nat_mdfy_pdn(struct ipa_ioc_nat_pdn_entry *mdfy_pdn)
*/
int ipa3_nat_dma_cmd(struct ipa_ioc_nat_dma_cmd *dma)
{
-#define NUM_OF_DESC 2
-
- struct ipahal_imm_cmd_pyld *nop_cmd_pyld = NULL;
- struct ipahal_imm_cmd_nat_dma cmd;
- enum ipahal_imm_cmd_name cmd_name = IPA_IMM_CMD_NAT_DMA;
- struct ipahal_imm_cmd_pyld *cmd_pyld = NULL;
- struct ipa3_desc *desc = NULL;
- u16 size = 0, cnt = 0;
- int ret = 0;
-
- IPADBG("\n");
- if (dma->entries <= 0) {
- IPAERR_RL("Invalid number of commands %d\n",
- dma->entries);
- ret = -EPERM;
- goto bail;
- }
-
- for (cnt = 0; cnt < dma->entries; cnt++) {
- if (dma->dma[cnt].table_index >= 1) {
- IPAERR_RL("Invalid table index %d\n",
- dma->dma[cnt].table_index);
- ret = -EPERM;
- goto bail;
- }
-
- switch (dma->dma[cnt].base_addr) {
- case IPA_NAT_BASE_TBL:
- if (dma->dma[cnt].offset >=
- (ipa3_ctx->nat_mem.size_base_tables + 1) *
- NAT_TABLE_ENTRY_SIZE_BYTE) {
- IPAERR_RL("Invalid offset %d\n",
- dma->dma[cnt].offset);
- ret = -EPERM;
- goto bail;
- }
-
- break;
-
- case IPA_NAT_EXPN_TBL:
- if (dma->dma[cnt].offset >=
- ipa3_ctx->nat_mem.size_expansion_tables *
- NAT_TABLE_ENTRY_SIZE_BYTE) {
- IPAERR_RL("Invalid offset %d\n",
- dma->dma[cnt].offset);
- ret = -EPERM;
- goto bail;
- }
-
- break;
-
- case IPA_NAT_INDX_TBL:
- if (dma->dma[cnt].offset >=
- (ipa3_ctx->nat_mem.size_base_tables + 1) *
- NAT_INTEX_TABLE_ENTRY_SIZE_BYTE) {
- IPAERR_RL("Invalid offset %d\n",
- dma->dma[cnt].offset);
- ret = -EPERM;
- goto bail;
- }
-
- break;
-
- case IPA_NAT_INDEX_EXPN_TBL:
- if (dma->dma[cnt].offset >=
- ipa3_ctx->nat_mem.size_expansion_tables *
- NAT_INTEX_TABLE_ENTRY_SIZE_BYTE) {
- IPAERR_RL("Invalid offset %d\n",
- dma->dma[cnt].offset);
- ret = -EPERM;
- goto bail;
- }
-
- break;
-
- default:
- IPAERR_RL("Invalid base_addr %d\n",
- dma->dma[cnt].base_addr);
- ret = -EPERM;
- goto bail;
- }
- }
-
- size = sizeof(struct ipa3_desc) * NUM_OF_DESC;
- desc = kzalloc(size, GFP_KERNEL);
- if (desc == NULL) {
- IPAERR("Failed to alloc memory\n");
- ret = -ENOMEM;
- goto bail;
- }
-
- /* NO-OP IC for ensuring that IPA pipeline is empty */
- nop_cmd_pyld =
- ipahal_construct_nop_imm_cmd(false, IPAHAL_HPS_CLEAR, false);
- if (!nop_cmd_pyld) {
- IPAERR("Failed to construct NOP imm cmd\n");
- ret = -ENOMEM;
- goto bail;
- }
- desc[0].type = IPA_IMM_CMD_DESC;
- desc[0].opcode = nop_cmd_pyld->opcode;
- desc[0].callback = NULL;
- desc[0].user1 = NULL;
- desc[0].user2 = 0;
- desc[0].pyld = nop_cmd_pyld->data;
- desc[0].len = nop_cmd_pyld->len;
-
- /* NAT_DMA was renamed to TABLE_DMA starting from IPAv4 */
- if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0)
- cmd_name = IPA_IMM_CMD_TABLE_DMA;
-
- for (cnt = 0; cnt < dma->entries; cnt++) {
- cmd.table_index = dma->dma[cnt].table_index;
- cmd.base_addr = dma->dma[cnt].base_addr;
- cmd.offset = dma->dma[cnt].offset;
- cmd.data = dma->dma[cnt].data;
- cmd_pyld = ipahal_construct_imm_cmd(cmd_name, &cmd, false);
- if (!cmd_pyld) {
- IPAERR_RL("Fail to construct nat_dma imm cmd\n");
- continue;
- }
- desc[1].type = IPA_IMM_CMD_DESC;
- desc[1].opcode = cmd_pyld->opcode;
- desc[1].callback = NULL;
- desc[1].user1 = NULL;
- desc[1].user2 = 0;
- desc[1].pyld = cmd_pyld->data;
- desc[1].len = cmd_pyld->len;
-
- ret = ipa3_send_cmd(NUM_OF_DESC, desc);
- if (ret == -EPERM)
- IPAERR("Fail to send immediate command %d\n", cnt);
- ipahal_destroy_imm_cmd(cmd_pyld);
- }
-
-bail:
- if (desc != NULL)
- kfree(desc);
-
- if (nop_cmd_pyld != NULL)
- ipahal_destroy_imm_cmd(nop_cmd_pyld);
-
- return ret;
+ return ipa3_table_dma_cmd(dma);
}
-/**
- * ipa3_nat_free_mem_and_device() - free the NAT memory and remove the device
- * @nat_ctx: [in] the IPA NAT memory to free
- *
- * Called by NAT client driver to free the NAT memory and remove the device
- */
-void ipa3_nat_free_mem_and_device(struct ipa3_nat_mem *nat_ctx)
+static void ipa3_nat_ipv6ct_free_mem(struct ipa3_nat_ipv6ct_common_mem *dev)
{
- struct ipahal_imm_cmd_dma_shared_mem mem_cmd = { 0 };
- struct ipa3_desc desc;
- struct ipahal_imm_cmd_pyld *cmd_pyld;
+ IPADBG("\n");
+ if (!dev->is_mem_allocated) {
+ IPADBG("attempt to delete %s before memory allocation\n",
+ dev->name);
+ /* Deletion of partly initialized table is not an error */
+ goto clear;
+ }
+
+ if (dev->is_sys_mem) {
+ IPADBG("freeing the dma memory for %s\n", dev->name);
+ dma_free_coherent(
+ ipa3_ctx->pdev, dev->size,
+ dev->vaddr, dev->dma_handle);
+ dev->size = 0;
+ dev->vaddr = NULL;
+ }
+
+ dev->is_mem_allocated = false;
+
+clear:
+ dev->table_entries = 0;
+ dev->expn_table_entries = 0;
+ dev->base_table_addr = NULL;
+ dev->expansion_table_addr = NULL;
+
+ dev->is_hw_init = false;
+ dev->is_mapped = false;
+ dev->is_sys_mem = false;
+
+ IPADBG("return\n");
+}
+
+static int ipa3_nat_ipv6ct_create_del_table_cmd(
+ uint8_t tbl_index,
+ u32 base_addr,
+ struct ipa3_nat_ipv6ct_common_mem *dev,
+ struct ipahal_imm_cmd_nat_ipv6ct_init_common *table_init_cmd)
+{
+ bool mem_type_shared = true;
IPADBG("\n");
- mutex_lock(&nat_ctx->lock);
- if (nat_ctx->is_sys_mem) {
- IPADBG("freeing the dma memory\n");
- dma_free_coherent(
- ipa3_ctx->pdev, nat_ctx->size,
- nat_ctx->vaddr, nat_ctx->dma_handle);
- nat_ctx->size = 0;
- nat_ctx->vaddr = NULL;
+ if (tbl_index >= 1) {
+ IPAERR_RL("Unsupported table index %d\n", tbl_index);
+ return -EPERM;
}
- if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) {
- struct ipa_pdn_entry *pdn_entries =
- nat_ctx->pdn_mem.base;
-
- /* zero the PDN table and copy the PDN config table to SRAM */
- IPADBG("zeroing the PDN config table\n");
- memset(pdn_entries, 0, sizeof(struct ipa_pdn_entry) *
- IPA_MAX_PDN_NUM);
- mem_cmd.is_read = false;
- mem_cmd.skip_pipeline_clear = false;
- mem_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
- mem_cmd.size = sizeof(struct ipa_pdn_entry) * IPA_MAX_PDN_NUM;
- mem_cmd.system_addr = nat_ctx->pdn_mem.phys_base;
- mem_cmd.local_addr = ipa3_ctx->smem_restricted_bytes +
- IPA_MEM_PART(pdn_config_ofst);
- cmd_pyld = ipahal_construct_imm_cmd(
- IPA_IMM_CMD_DMA_SHARED_MEM, &mem_cmd, false);
- if (!cmd_pyld) {
- IPAERR(
- "fail construct dma_shared_mem cmd: for pdn table");
- goto lbl_free_pdn;
- }
- memset(&desc, 0, sizeof(desc));
- desc.opcode = cmd_pyld->opcode;
- desc.pyld = cmd_pyld->data;
- desc.len = cmd_pyld->len;
- desc.type = IPA_IMM_CMD_DESC;
-
- IPADBG("sending PDN table copy cmd\n");
- if (ipa3_send_cmd(1, &desc))
- IPAERR("Fail to send immediate command\n");
-
- ipahal_destroy_imm_cmd(cmd_pyld);
-lbl_free_pdn:
- IPADBG("freeing the PDN memory\n");
- dma_free_coherent(ipa3_ctx->pdev,
- nat_ctx->pdn_mem.size,
- nat_ctx->pdn_mem.base,
- nat_ctx->pdn_mem.phys_base);
+ if (dev->tmp_mem != NULL) {
+ IPADBG("using temp memory during %s del\n", dev->name);
+ mem_type_shared = false;
+ base_addr = dev->tmp_mem->dma_handle;
}
- nat_ctx->is_mapped = false;
- nat_ctx->is_sys_mem = false;
- nat_ctx->is_dev_init = false;
- mutex_unlock(&nat_ctx->lock);
+ table_init_cmd->table_index = tbl_index;
+ table_init_cmd->base_table_addr = base_addr;
+ table_init_cmd->base_table_addr_shared = mem_type_shared;
+ table_init_cmd->expansion_table_addr = base_addr;
+ table_init_cmd->expansion_table_addr_shared = mem_type_shared;
+ table_init_cmd->size_base_table = 0;
+ table_init_cmd->size_expansion_table = 0;
IPADBG("return\n");
+
+ return 0;
+}
+
+static int ipa3_nat_send_del_table_cmd(uint8_t tbl_index)
+{
+ struct ipahal_imm_cmd_ip_v4_nat_init cmd;
+ int result;
+
+ IPADBG("\n");
+
+ result = ipa3_nat_ipv6ct_create_del_table_cmd(
+ tbl_index,
+ IPA_NAT_PHYS_MEM_OFFSET,
+ &ipa3_ctx->nat_mem.dev,
+ &cmd.table_init);
+ if (result) {
+ IPAERR(
+ "Fail to create immediate command to delete NAT table\n");
+ return result;
+ }
+
+ cmd.index_table_addr = cmd.table_init.base_table_addr;
+ cmd.index_table_addr_shared = cmd.table_init.base_table_addr_shared;
+ cmd.index_table_expansion_addr = cmd.index_table_addr;
+ cmd.index_table_expansion_addr_shared = cmd.index_table_addr_shared;
+ cmd.public_addr_info = 0;
+
+ IPADBG("posting NAT delete command\n");
+ result = ipa3_nat_send_init_cmd(&cmd, true);
+ if (result) {
+ IPAERR("Fail to send NAT delete immediate command\n");
+ return result;
+ }
+
+ IPADBG("return\n");
+ return 0;
+}
+
+static int ipa3_ipv6ct_send_del_table_cmd(uint8_t tbl_index)
+{
+ struct ipahal_imm_cmd_ip_v6_ct_init cmd;
+ int result;
+
+ IPADBG("\n");
+
+ result = ipa3_nat_ipv6ct_create_del_table_cmd(
+ tbl_index,
+ IPA_IPV6CT_PHYS_MEM_OFFSET,
+ &ipa3_ctx->ipv6ct_mem.dev,
+ &cmd.table_init);
+ if (result) {
+ IPAERR(
+ "Fail to create immediate command to delete IPv6CT table\n");
+ return result;
+ }
+
+ IPADBG("posting IPv6CT delete command\n");
+ result = ipa3_ipv6ct_send_init_cmd(&cmd);
+ if (result) {
+ IPAERR("Fail to send IPv6CT delete immediate command\n");
+ return result;
+ }
+
+ IPADBG("return\n");
+ return 0;
}
/**
@@ -962,94 +1445,109 @@ void ipa3_nat_free_mem_and_device(struct ipa3_nat_mem *nat_ctx)
*/
int ipa3_nat_del_cmd(struct ipa_ioc_v4_nat_del *del)
{
- struct ipahal_imm_cmd_pyld *nop_cmd_pyld = NULL;
- struct ipa3_desc desc[2];
- struct ipahal_imm_cmd_ip_v4_nat_init cmd;
- struct ipahal_imm_cmd_pyld *cmd_pyld;
- bool mem_type_shared = true;
- u32 base_addr = IPA_NAT_PHYS_MEM_OFFSET;
- int result;
-
- IPADBG("\n");
- if (ipa3_ctx->nat_mem.is_tmp_mem) {
- IPAERR("using temp memory during nat del\n");
- mem_type_shared = false;
- base_addr = ipa3_ctx->nat_mem.tmp_dma_handle;
- }
+ struct ipa_ioc_nat_ipv6ct_table_del tmp;
if ((ipa3_ctx->ipa_hw_type < IPA_HW_v4_0) &&
(del->public_ip_addr == 0)) {
- IPADBG("Bad Parameter\n");
- result = -EPERM;
- goto bail;
+ IPAERR_RL("Bad Parameter public IP address\n");
+ return -EPERM;
}
- memset(&desc, 0, sizeof(desc));
- /* NO-OP IC for ensuring that IPA pipeline is empty */
- nop_cmd_pyld =
- ipahal_construct_nop_imm_cmd(false, IPAHAL_HPS_CLEAR, false);
- if (!nop_cmd_pyld) {
- IPAERR("Failed to construct NOP imm cmd\n");
- result = -ENOMEM;
- goto bail;
- }
- desc[0].opcode = nop_cmd_pyld->opcode;
- desc[0].type = IPA_IMM_CMD_DESC;
- desc[0].callback = NULL;
- desc[0].user1 = NULL;
- desc[0].user2 = 0;
- desc[0].pyld = nop_cmd_pyld->data;
- desc[0].len = nop_cmd_pyld->len;
+ tmp.table_index = del->table_index;
- cmd.table_index = del->table_index;
- cmd.ipv4_rules_addr = base_addr;
- cmd.ipv4_rules_addr_shared = mem_type_shared;
- cmd.ipv4_expansion_rules_addr = base_addr;
- cmd.ipv4_expansion_rules_addr_shared = mem_type_shared;
- cmd.index_table_addr = base_addr;
- cmd.index_table_addr_shared = mem_type_shared;
- cmd.index_table_expansion_addr = base_addr;
- cmd.index_table_expansion_addr_shared = mem_type_shared;
- cmd.size_base_tables = 0;
- cmd.size_expansion_tables = 0;
- cmd.public_ip_addr = 0;
- cmd_pyld = ipahal_construct_imm_cmd(
- IPA_IMM_CMD_IP_V4_NAT_INIT, &cmd, false);
- if (!cmd_pyld) {
- IPAERR_RL("Fail to construct ip_v4_nat_init imm cmd\n");
- result = -EPERM;
- goto destroy_regwrt_imm_cmd;
- }
- desc[1].opcode = cmd_pyld->opcode;
- desc[1].type = IPA_IMM_CMD_DESC;
- desc[1].callback = NULL;
- desc[1].user1 = NULL;
- desc[1].user2 = 0;
- desc[1].pyld = cmd_pyld->data;
- desc[1].len = cmd_pyld->len;
+ return ipa3_del_nat_table(&tmp);
+}
- if (ipa3_send_cmd(2, desc)) {
- IPAERR("Fail to send immediate command\n");
- result = -EPERM;
- goto destroy_imm_cmd;
+/**
+ * ipa3_del_nat_table() - Delete the NAT table
+ * @del: [in] delete table parameters
+ *
+ * Called by NAT client to delete the table
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa3_del_nat_table(struct ipa_ioc_nat_ipv6ct_table_del *del)
+{
+ int result = 0;
+
+ IPADBG("\n");
+ if (!ipa3_ctx->nat_mem.dev.is_dev_init) {
+ IPAERR("NAT hasn't been initialized\n");
+ return -EPERM;
}
- ipa3_ctx->nat_mem.size_base_tables = 0;
- ipa3_ctx->nat_mem.size_expansion_tables = 0;
+ mutex_lock(&ipa3_ctx->nat_mem.dev.lock);
+
+ if (ipa3_ctx->nat_mem.dev.is_hw_init) {
+ result = ipa3_nat_send_del_table_cmd(del->table_index);
+ if (result) {
+ IPAERR(
+ "Fail to send immediate command to delete NAT table\n");
+ goto bail;
+ }
+ }
+
ipa3_ctx->nat_mem.public_ip_addr = 0;
- ipa3_ctx->nat_mem.ipv4_rules_addr = 0;
- ipa3_ctx->nat_mem.ipv4_expansion_rules_addr = 0;
ipa3_ctx->nat_mem.index_table_addr = 0;
ipa3_ctx->nat_mem.index_table_expansion_addr = 0;
- ipa3_nat_free_mem_and_device(&ipa3_ctx->nat_mem);
- IPADBG("return\n");
- result = 0;
+ if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0 &&
+ ipa3_ctx->nat_mem.dev.is_mem_allocated) {
+ IPADBG("freeing the PDN memory\n");
+ dma_free_coherent(ipa3_ctx->pdev,
+ ipa3_ctx->nat_mem.pdn_mem.size,
+ ipa3_ctx->nat_mem.pdn_mem.base,
+ ipa3_ctx->nat_mem.pdn_mem.phys_base);
+ }
-destroy_imm_cmd:
- ipahal_destroy_imm_cmd(cmd_pyld);
-destroy_regwrt_imm_cmd:
- ipahal_destroy_imm_cmd(nop_cmd_pyld);
+ ipa3_nat_ipv6ct_free_mem(&ipa3_ctx->nat_mem.dev);
+ IPADBG("return\n");
+
bail:
+ mutex_unlock(&ipa3_ctx->nat_mem.dev.lock);
return result;
}
+
+/**
+ * ipa3_del_ipv6ct_table() - Delete the IPv6CT table
+ * @del: [in] delete table parameters
+ *
+ * Called by IPv6CT client to delete the table
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa3_del_ipv6ct_table(struct ipa_ioc_nat_ipv6ct_table_del *del)
+{
+ int result = 0;
+
+ IPADBG("\n");
+
+ if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_0) {
+ IPAERR_RL("IPv6 connection tracking isn't supported\n");
+ return -EPERM;
+ }
+
+ if (!ipa3_ctx->ipv6ct_mem.dev.is_dev_init) {
+ IPAERR("IPv6 connection tracking hasn't been initialized\n");
+ return -EPERM;
+ }
+
+ mutex_lock(&ipa3_ctx->ipv6ct_mem.dev.lock);
+
+ if (ipa3_ctx->ipv6ct_mem.dev.is_hw_init) {
+ result = ipa3_ipv6ct_send_del_table_cmd(del->table_index);
+ if (result) {
+ IPAERR(
+ "Fail to send immediate command to delete IPv6CT table\n");
+ goto bail;
+ }
+ }
+
+ ipa3_nat_ipv6ct_free_mem(&ipa3_ctx->ipv6ct_mem.dev);
+ IPADBG("return\n");
+
+bail:
+ mutex_unlock(&ipa3_ctx->ipv6ct_mem.dev.lock);
+ return result;
+}
+
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_pm.c b/drivers/platform/msm/ipa/ipa_v3/ipa_pm.c
index 3bf0327..fea9b3b 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_pm.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_pm.c
@@ -1029,9 +1029,10 @@ int ipa_pm_deactivate_all_deferred(void)
IPA_PM_DBG_STATE(client->hdl, client->name,
client->state);
spin_unlock_irqrestore(&client->state_lock, flags);
- } else if (client->state ==
- IPA_PM_ACTIVATED_PENDING_DEACTIVATION ||
- IPA_PM_ACTIVATED_PENDING_RESCHEDULE) {
+ } else if ((client->state ==
+ IPA_PM_ACTIVATED_PENDING_DEACTIVATION) ||
+ (client->state ==
+ IPA_PM_ACTIVATED_PENDING_RESCHEDULE)) {
run_algorithm = true;
client->state = IPA_PM_DEACTIVATED;
IPA_PM_DBG_STATE(client->hdl, client->name,
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c
index e3a3821..1c8715a 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c
@@ -758,6 +758,57 @@ int ipa3_qmi_filter_request_ex_send(
resp.resp.error, "ipa_install_filter");
}
+/* sending ul-filter-install-request to modem*/
+int ipa3_qmi_ul_filter_request_send(
+ struct ipa_configure_ul_firewall_rules_req_msg_v01 *req)
+{
+ struct ipa_configure_ul_firewall_rules_resp_msg_v01 resp;
+ struct msg_desc req_desc, resp_desc;
+ int rc;
+
+ IPAWANDBG("IPACM pass %u rules to Q6\n",
+ req->firewall_rules_list_len);
+
+ mutex_lock(&ipa3_qmi_lock);
+ if (ipa3_qmi_ctx != NULL) {
+ /* cache the qmi_filter_request */
+ memcpy(
+ &(ipa3_qmi_ctx->ipa_configure_ul_firewall_rules_req_msg_cache[
+ ipa3_qmi_ctx->num_ipa_configure_ul_firewall_rules_req_msg]),
+ req,
+ sizeof(struct
+ ipa_configure_ul_firewall_rules_req_msg_v01));
+ ipa3_qmi_ctx->num_ipa_configure_ul_firewall_rules_req_msg++;
+ ipa3_qmi_ctx->num_ipa_configure_ul_firewall_rules_req_msg %=
+ MAX_NUM_QMI_RULE_CACHE;
+ }
+ mutex_unlock(&ipa3_qmi_lock);
+
+ req_desc.max_msg_len =
+ QMI_IPA_INSTALL_UL_FIREWALL_RULES_REQ_MAX_MSG_LEN_V01;
+ req_desc.msg_id = QMI_IPA_INSTALL_UL_FIREWALL_RULES_REQ_V01;
+ req_desc.ei_array =
+ ipa3_configure_ul_firewall_rules_req_msg_data_v01_ei;
+
+ memset(&resp, 0,
+ sizeof(struct ipa_configure_ul_firewall_rules_resp_msg_v01));
+ resp_desc.max_msg_len =
+ QMI_IPA_INSTALL_UL_FIREWALL_RULES_RESP_MAX_MSG_LEN_V01;
+ resp_desc.msg_id = QMI_IPA_INSTALL_UL_FIREWALL_RULES_RESP_V01;
+ resp_desc.ei_array =
+ ipa3_configure_ul_firewall_rules_resp_msg_data_v01_ei;
+
+ rc = qmi_send_req_wait(ipa_q6_clnt, &req_desc,
+ req,
+ sizeof(
+ struct ipa_configure_ul_firewall_rules_req_msg_v01),
+ &resp_desc, &resp, sizeof(resp),
+ QMI_SEND_REQ_TIMEOUT_MS);
+ return ipa3_check_qmi_response(rc,
+ QMI_IPA_INSTALL_UL_FIREWALL_RULES_REQ_V01, resp.resp.result,
+ resp.resp.error, "ipa_received_ul_firewall_filter");
+}
+
int ipa3_qmi_enable_force_clear_datapath_send(
struct ipa_enable_force_clear_datapath_req_msg_v01 *req)
{
@@ -967,6 +1018,7 @@ static void ipa3_q6_clnt_ind_cb(struct qmi_handle *handle, unsigned int msg_id,
void *ind_cb_priv)
{
struct ipa_data_usage_quota_reached_ind_msg_v01 qmi_ind;
+ struct ipa_configure_ul_firewall_rules_ind_msg_v01 qmi_ul_firewall_ind;
struct msg_desc qmi_ind_desc;
int rc = 0;
@@ -995,6 +1047,36 @@ static void ipa3_q6_clnt_ind_cb(struct qmi_handle *handle, unsigned int msg_id,
ipa3_broadcast_quota_reach_ind(qmi_ind.apn.mux_id,
IPA_UPSTEAM_MODEM);
}
+
+ if (msg_id == QMI_IPA_INSTALL_UL_FIREWALL_RULES_IND_V01) {
+ memset(&qmi_ul_firewall_ind, 0, sizeof(
+ struct ipa_configure_ul_firewall_rules_ind_msg_v01));
+ qmi_ind_desc.max_msg_len =
+ QMI_IPA_INSTALL_UL_FIREWALL_RULES_IND_MAX_MSG_LEN_V01;
+ qmi_ind_desc.msg_id = QMI_IPA_INSTALL_UL_FIREWALL_RULES_IND_V01;
+ qmi_ind_desc.ei_array =
+ ipa3_configure_ul_firewall_rules_ind_msg_data_v01_ei;
+
+ rc = qmi_kernel_decode(
+ &qmi_ind_desc, &qmi_ul_firewall_ind, msg, msg_len);
+ if (rc < 0) {
+ IPAWANERR("Error decoding msg_id %d\n", msg_id);
+ return;
+ }
+
+ IPAWANDBG("UL firewall rules install indication on Q6");
+ if (qmi_ul_firewall_ind.result.is_success ==
+ QMI_IPA_UL_FIREWALL_STATUS_SUCCESS_V01) {
+ IPAWANDBG(" : Success\n");
+ IPAWANDBG
+ ("Mux ID : %d\n", qmi_ul_firewall_ind.result.mux_id);
+ } else if (qmi_ul_firewall_ind.result.is_success ==
+ QMI_IPA_UL_FIREWALL_STATUS_FAILURE_V01){
+ IPAWANERR(": Failure\n");
+ } else {
+ IPAWANERR(": Unexpected Result");
+ }
+ }
}
static void ipa3_q6_clnt_svc_arrive(struct work_struct *work)
@@ -1446,6 +1528,74 @@ int ipa3_qmi_stop_data_qouta(void)
resp.resp.error, "ipa_stop_data_usage_quota_req_msg_v01");
}
+int ipa3_qmi_enable_per_client_stats(
+ struct ipa_enable_per_client_stats_req_msg_v01 *req,
+ struct ipa_enable_per_client_stats_resp_msg_v01 *resp)
+{
+ struct msg_desc req_desc, resp_desc;
+ int rc;
+
+ req_desc.max_msg_len =
+ QMI_IPA_ENABLE_PER_CLIENT_STATS_REQ_MAX_MSG_LEN_V01;
+ req_desc.msg_id =
+ QMI_IPA_ENABLE_PER_CLIENT_STATS_REQ_V01;
+ req_desc.ei_array =
+ ipa3_enable_per_client_stats_req_msg_data_v01_ei;
+
+ resp_desc.max_msg_len =
+ QMI_IPA_ENABLE_PER_CLIENT_STATS_RESP_MAX_MSG_LEN_V01;
+ resp_desc.msg_id =
+ QMI_IPA_ENABLE_PER_CLIENT_STATS_RESP_V01;
+ resp_desc.ei_array =
+ ipa3_enable_per_client_stats_resp_msg_data_v01_ei;
+
+ IPAWANDBG("Sending QMI_IPA_ENABLE_PER_CLIENT_STATS_REQ_V01\n");
+
+ rc = qmi_send_req_wait(ipa_q6_clnt, &req_desc, req,
+ sizeof(struct ipa_enable_per_client_stats_req_msg_v01),
+ &resp_desc, resp,
+ sizeof(struct ipa_enable_per_client_stats_resp_msg_v01),
+ QMI_SEND_STATS_REQ_TIMEOUT_MS);
+
+ IPAWANDBG("QMI_IPA_ENABLE_PER_CLIENT_STATS_RESP_V01 received\n");
+
+ return ipa3_check_qmi_response(rc,
+ QMI_IPA_ENABLE_PER_CLIENT_STATS_REQ_V01, resp->resp.result,
+ resp->resp.error, "ipa3_qmi_enable_per_client_stats");
+}
+
+int ipa3_qmi_get_per_client_packet_stats(
+ struct ipa_get_stats_per_client_req_msg_v01 *req,
+ struct ipa_get_stats_per_client_resp_msg_v01 *resp)
+{
+ struct msg_desc req_desc, resp_desc;
+ int rc;
+
+ req_desc.max_msg_len = QMI_IPA_GET_STATS_PER_CLIENT_REQ_MAX_MSG_LEN_V01;
+ req_desc.msg_id = QMI_IPA_GET_STATS_PER_CLIENT_REQ_V01;
+ req_desc.ei_array = ipa3_get_stats_per_client_req_msg_data_v01_ei;
+
+ resp_desc.max_msg_len =
+ QMI_IPA_GET_STATS_PER_CLIENT_RESP_MAX_MSG_LEN_V01;
+ resp_desc.msg_id = QMI_IPA_GET_STATS_PER_CLIENT_RESP_V01;
+ resp_desc.ei_array = ipa3_get_stats_per_client_resp_msg_data_v01_ei;
+
+ IPAWANDBG("Sending QMI_IPA_GET_STATS_PER_CLIENT_REQ_V01\n");
+
+ rc = qmi_send_req_wait(ipa_q6_clnt, &req_desc, req,
+ sizeof(struct ipa_get_stats_per_client_req_msg_v01),
+ &resp_desc, resp,
+ sizeof(struct ipa_get_stats_per_client_resp_msg_v01),
+ QMI_SEND_STATS_REQ_TIMEOUT_MS);
+
+ IPAWANDBG("QMI_IPA_GET_STATS_PER_CLIENT_RESP_V01 received\n");
+
+ return ipa3_check_qmi_response(rc,
+ QMI_IPA_GET_STATS_PER_CLIENT_REQ_V01, resp->resp.result,
+ resp->resp.error,
+ "struct ipa_get_stats_per_client_req_msg_v01");
+}
+
void ipa3_qmi_init(void)
{
mutex_init(&ipa3_qmi_lock);
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.h b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.h
index d3a4ba0..3351a33 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.h
@@ -32,54 +32,62 @@
#define IPAWANDBG(fmt, args...) \
do { \
- pr_debug(DEV_NAME " %s:%d " fmt, __func__, __LINE__, ## args); \
+ pr_debug(DEV_NAME " %s:%d " fmt, __func__,\
+ __LINE__, ## args); \
IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
- DEV_NAME " %s:%d " fmt, ## args); \
+ DEV_NAME " %s:%d " fmt, ## args); \
IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
- DEV_NAME " %s:%d " fmt, ## args); \
+ DEV_NAME " %s:%d " fmt, ## args); \
} while (0)
#define IPAWANDBG_LOW(fmt, args...) \
do { \
- pr_debug(DEV_NAME " %s:%d " fmt, __func__, __LINE__, ## args); \
+ pr_debug(DEV_NAME " %s:%d " fmt, __func__,\
+ __LINE__, ## args); \
IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
- DEV_NAME " %s:%d " fmt, ## args); \
+ DEV_NAME " %s:%d " fmt, ## args); \
} while (0)
#define IPAWANERR(fmt, args...) \
do { \
- pr_err(DEV_NAME " %s:%d " fmt, __func__, __LINE__, ## args); \
+ pr_err(DEV_NAME " %s:%d " fmt, __func__,\
+ __LINE__, ## args); \
IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
- DEV_NAME " %s:%d " fmt, ## args); \
+ DEV_NAME " %s:%d " fmt, ## args); \
IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
- DEV_NAME " %s:%d " fmt, ## args); \
+ DEV_NAME " %s:%d " fmt, ## args); \
} while (0)
#define IPAWANINFO(fmt, args...) \
do { \
- pr_info(DEV_NAME " %s:%d " fmt, __func__, __LINE__, ## args); \
+ pr_info(DEV_NAME " %s:%d " fmt, __func__,\
+ __LINE__, ## args); \
IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
- DEV_NAME " %s:%d " fmt, ## args); \
+ DEV_NAME " %s:%d " fmt, ## args); \
IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
- DEV_NAME " %s:%d " fmt, ## args); \
+ DEV_NAME " %s:%d " fmt, ## args); \
} while (0)
extern struct ipa3_qmi_context *ipa3_qmi_ctx;
struct ipa3_qmi_context {
-struct ipa_ioc_ext_intf_prop q6_ul_filter_rule[MAX_NUM_Q6_RULE];
-u32 q6_ul_filter_rule_hdl[MAX_NUM_Q6_RULE];
-int num_ipa_install_fltr_rule_req_msg;
-struct ipa_install_fltr_rule_req_msg_v01
+ struct ipa_ioc_ext_intf_prop q6_ul_filter_rule[MAX_NUM_Q6_RULE];
+ u32 q6_ul_filter_rule_hdl[MAX_NUM_Q6_RULE];
+ int num_ipa_install_fltr_rule_req_msg;
+ struct ipa_install_fltr_rule_req_msg_v01
ipa_install_fltr_rule_req_msg_cache[MAX_NUM_QMI_RULE_CACHE];
-int num_ipa_install_fltr_rule_req_ex_msg;
-struct ipa_install_fltr_rule_req_ex_msg_v01
+ int num_ipa_install_fltr_rule_req_ex_msg;
+ struct ipa_install_fltr_rule_req_ex_msg_v01
ipa_install_fltr_rule_req_ex_msg_cache[MAX_NUM_QMI_RULE_CACHE];
-int num_ipa_fltr_installed_notif_req_msg;
-struct ipa_fltr_installed_notif_req_msg_v01
+ int num_ipa_fltr_installed_notif_req_msg;
+ struct ipa_fltr_installed_notif_req_msg_v01
ipa_fltr_installed_notif_req_msg_cache[MAX_NUM_QMI_RULE_CACHE];
-bool modem_cfg_emb_pipe_flt;
+ int num_ipa_configure_ul_firewall_rules_req_msg;
+ struct ipa_configure_ul_firewall_rules_req_msg_v01
+ ipa_configure_ul_firewall_rules_req_msg_cache
+ [MAX_NUM_QMI_RULE_CACHE];
+ bool modem_cfg_emb_pipe_flt;
};
struct ipa3_rmnet_mux_val {
@@ -95,16 +103,24 @@ extern struct elem_info ipa3_init_modem_driver_req_msg_data_v01_ei[];
extern struct elem_info ipa3_init_modem_driver_resp_msg_data_v01_ei[];
extern struct elem_info ipa3_indication_reg_req_msg_data_v01_ei[];
extern struct elem_info ipa3_indication_reg_resp_msg_data_v01_ei[];
-extern struct elem_info ipa3_master_driver_init_complt_ind_msg_data_v01_ei[];
+
+extern struct elem_info
+ ipa3_master_driver_init_complt_ind_msg_data_v01_ei[];
extern struct elem_info ipa3_install_fltr_rule_req_msg_data_v01_ei[];
extern struct elem_info ipa3_install_fltr_rule_resp_msg_data_v01_ei[];
extern struct elem_info ipa3_fltr_installed_notif_req_msg_data_v01_ei[];
-extern struct elem_info ipa3_fltr_installed_notif_resp_msg_data_v01_ei[];
-extern struct elem_info ipa3_enable_force_clear_datapath_req_msg_data_v01_ei[];
-extern struct elem_info ipa3_enable_force_clear_datapath_resp_msg_data_v01_ei[];
-extern struct elem_info ipa3_disable_force_clear_datapath_req_msg_data_v01_ei[];
+
+extern struct elem_info
+ ipa3_fltr_installed_notif_resp_msg_data_v01_ei[];
+extern struct elem_info
+ ipa3_enable_force_clear_datapath_req_msg_data_v01_ei[];
+extern struct elem_info
+ ipa3_enable_force_clear_datapath_resp_msg_data_v01_ei[];
+extern struct elem_info
+ ipa3_disable_force_clear_datapath_req_msg_data_v01_ei[];
extern struct elem_info
ipa3_disable_force_clear_datapath_resp_msg_data_v01_ei[];
+
extern struct elem_info ipa3_config_req_msg_data_v01_ei[];
extern struct elem_info ipa3_config_resp_msg_data_v01_ei[];
extern struct elem_info ipa3_get_data_stats_req_msg_data_v01_ei[];
@@ -112,14 +128,44 @@ extern struct elem_info ipa3_get_data_stats_resp_msg_data_v01_ei[];
extern struct elem_info ipa3_get_apn_data_stats_req_msg_data_v01_ei[];
extern struct elem_info ipa3_get_apn_data_stats_resp_msg_data_v01_ei[];
extern struct elem_info ipa3_set_data_usage_quota_req_msg_data_v01_ei[];
-extern struct elem_info ipa3_set_data_usage_quota_resp_msg_data_v01_ei[];
-extern struct elem_info ipa3_data_usage_quota_reached_ind_msg_data_v01_ei[];
-extern struct elem_info ipa3_stop_data_usage_quota_req_msg_data_v01_ei[];
-extern struct elem_info ipa3_stop_data_usage_quota_resp_msg_data_v01_ei[];
-extern struct elem_info ipa3_init_modem_driver_cmplt_req_msg_data_v01_ei[];
-extern struct elem_info ipa3_init_modem_driver_cmplt_resp_msg_data_v01_ei[];
-extern struct elem_info ipa3_install_fltr_rule_req_ex_msg_data_v01_ei[];
-extern struct elem_info ipa3_install_fltr_rule_resp_ex_msg_data_v01_ei[];
+
+extern struct elem_info
+ ipa3_set_data_usage_quota_resp_msg_data_v01_ei[];
+extern struct elem_info
+ ipa3_data_usage_quota_reached_ind_msg_data_v01_ei[];
+extern struct elem_info
+ ipa3_stop_data_usage_quota_req_msg_data_v01_ei[];
+extern struct elem_info
+ ipa3_stop_data_usage_quota_resp_msg_data_v01_ei[];
+extern struct elem_info
+ ipa3_init_modem_driver_cmplt_req_msg_data_v01_ei[];
+extern struct elem_info
+ ipa3_init_modem_driver_cmplt_resp_msg_data_v01_ei[];
+extern struct elem_info
+ ipa3_install_fltr_rule_req_ex_msg_data_v01_ei[];
+extern struct elem_info
+ ipa3_install_fltr_rule_resp_ex_msg_data_v01_ei[];
+extern struct elem_info
+ ipa3_ul_firewall_rule_type_data_v01_ei[];
+extern struct elem_info
+ ipa3_ul_firewall_config_result_type_data_v01_ei[];
+extern struct
+ elem_info ipa3_per_client_stats_info_type_data_v01_ei[];
+extern struct elem_info
+ ipa3_enable_per_client_stats_req_msg_data_v01_ei[];
+extern struct elem_info
+ ipa3_enable_per_client_stats_resp_msg_data_v01_ei[];
+extern struct elem_info
+ ipa3_get_stats_per_client_req_msg_data_v01_ei[];
+
+extern struct elem_info
+ ipa3_get_stats_per_client_resp_msg_data_v01_ei[];
+extern struct elem_info
+ ipa3_configure_ul_firewall_rules_req_msg_data_v01_ei[];
+extern struct elem_info
+ ipa3_configure_ul_firewall_rules_resp_msg_data_v01_ei[];
+extern struct elem_info
+ ipa3_configure_ul_firewall_rules_ind_msg_data_v01_ei[];
/**
* struct ipa3_rmnet_context - IPA rmnet context
@@ -148,6 +194,9 @@ int ipa3_qmi_filter_request_send(
int ipa3_qmi_filter_request_ex_send(
struct ipa_install_fltr_rule_req_ex_msg_v01 *req);
+int ipa3_qmi_ul_filter_request_send(
+ struct ipa_configure_ul_firewall_rules_req_msg_v01 *req);
+
/* sending filter-installed-notify-request to modem*/
int ipa3_qmi_filter_notify_send(struct ipa_fltr_installed_notif_req_msg_v01
*req);
@@ -194,6 +243,16 @@ int rmnet_ipa3_query_tethering_stats_all(
struct wan_ioctl_query_tether_stats_all *data);
int rmnet_ipa3_reset_tethering_stats(struct wan_ioctl_reset_tether_stats *data);
+int rmnet_ipa3_set_lan_client_info(struct wan_ioctl_lan_client_info *data);
+
+int rmnet_ipa3_clear_lan_client_info(struct wan_ioctl_lan_client_info *data);
+
+int rmnet_ipa3_send_lan_client_msg(struct wan_ioctl_send_lan_client_msg *data);
+
+int rmnet_ipa3_enable_per_client_stats(bool *data);
+
+int rmnet_ipa3_query_per_client_stats(
+ struct wan_ioctl_query_per_client_stats *data);
int ipa3_qmi_get_data_stats(struct ipa_get_data_stats_req_msg_v01 *req,
struct ipa_get_data_stats_resp_msg_v01 *resp);
@@ -210,6 +269,13 @@ void ipa3_q6_handshake_complete(bool ssr_bootup);
int ipa3_wwan_set_modem_perf_profile(int throughput);
int ipa3_wwan_set_modem_state(struct wan_ioctl_notify_wan_state *state);
+int ipa3_qmi_enable_per_client_stats(
+ struct ipa_enable_per_client_stats_req_msg_v01 *req,
+ struct ipa_enable_per_client_stats_resp_msg_v01 *resp);
+
+int ipa3_qmi_get_per_client_packet_stats(
+ struct ipa_get_stats_per_client_req_msg_v01 *req,
+ struct ipa_get_stats_per_client_resp_msg_v01 *resp);
void ipa3_qmi_init(void);
@@ -231,6 +297,12 @@ static inline int ipa3_qmi_filter_request_send(
return -EPERM;
}
+static inline int ipa3_qmi_ul_filter_request_send(
+ struct ipa_configure_ul_firewall_rules_req_msg_v01 *req)
+{
+ return -EPERM;
+}
+
static inline int ipa3_qmi_filter_request_ex_send(
struct ipa_install_fltr_rule_req_ex_msg_v01 *req)
{
@@ -328,16 +400,28 @@ static inline int ipa3_qmi_stop_data_qouta(void)
static inline void ipa3_q6_handshake_complete(bool ssr_bootup) { }
static inline int ipa3_wwan_set_modem_perf_profile(int throughput)
+static inline int ipa3_qmi_enable_per_client_stats(
+ struct ipa_enable_per_client_stats_req_msg_v01 *req,
+ struct ipa_enable_per_client_stats_resp_msg_v01 *resp)
+{
+ return -EPERM;
+}
+
+static inline int ipa3_qmi_get_per_client_packet_stats(
+ struct ipa_get_stats_per_client_req_msg_v01 *req,
+ struct ipa_get_stats_per_client_resp_msg_v01 *resp)
{
return -EPERM;
}
static inline void ipa3_qmi_init(void)
{
+
}
static inline void ipa3_qmi_cleanup(void)
{
+
}
#endif /* CONFIG_RMNET_IPA3 */
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service_v01.c b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service_v01.c
index d2d4158..703acd7 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service_v01.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service_v01.c
@@ -16,6 +16,8 @@
#include <soc/qcom/msm_qmi_interface.h>
+#include "ipa_qmi_service.h"
+
/* Type Definitions */
static struct elem_info ipa3_hdr_tbl_info_type_data_v01_ei[] = {
{
@@ -1756,6 +1758,36 @@ struct elem_info ipa3_fltr_installed_notif_req_msg_data_v01_ei[] = {
rule_id),
},
{
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x18,
+ .offset = offsetof(
+ struct ipa_fltr_installed_notif_req_msg_v01,
+ dst_pipe_id_valid),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x18,
+ .offset = offsetof(
+ struct ipa_fltr_installed_notif_req_msg_v01,
+ dst_pipe_id_len),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = QMI_IPA_MAX_CLIENT_DST_PIPES_V01,
+ .elem_size = sizeof(uint32_t),
+ .is_array = VAR_LEN_ARRAY,
+ .tlv_type = 0x18,
+ .offset = offsetof(
+ struct ipa_fltr_installed_notif_req_msg_v01,
+ dst_pipe_id),
+ },
+ {
.data_type = QMI_EOTI,
.is_array = NO_ARRAY,
.tlv_type = QMI_COMMON_TLV_TYPE,
@@ -2923,3 +2955,432 @@ struct elem_info ipa3_install_fltr_rule_resp_ex_msg_data_v01_ei[] = {
.tlv_type = QMI_COMMON_TLV_TYPE,
},
};
+
+struct elem_info ipa3_per_client_stats_info_type_data_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(
+ struct ipa_per_client_stats_info_type_v01,
+ client_id),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(
+ struct ipa_per_client_stats_info_type_v01,
+ src_pipe_id),
+ },
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint64_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(
+ struct ipa_per_client_stats_info_type_v01,
+ num_ul_ipv4_bytes),
+
+ },
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint64_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(
+ struct ipa_per_client_stats_info_type_v01,
+ num_ul_ipv6_bytes),
+
+ },
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint64_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(
+ struct ipa_per_client_stats_info_type_v01,
+ num_dl_ipv4_bytes),
+
+ },
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint64_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(
+ struct ipa_per_client_stats_info_type_v01,
+ num_dl_ipv6_bytes),
+
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(
+ struct ipa_per_client_stats_info_type_v01,
+ num_ul_ipv4_pkts),
+
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(
+ struct ipa_per_client_stats_info_type_v01,
+ num_ul_ipv6_pkts),
+
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(
+ struct ipa_per_client_stats_info_type_v01,
+ num_dl_ipv4_pkts),
+
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(
+ struct ipa_per_client_stats_info_type_v01,
+ num_dl_ipv6_pkts),
+
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info ipa3_ul_firewall_rule_type_data_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(
+ struct ipa_ul_firewall_rule_type_v01,
+ ip_type),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct ipa_filter_rule_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(struct ipa_ul_firewall_rule_type_v01,
+ filter_rule),
+ .ei_array = ipa3_filter_rule_type_data_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info ipa3_ul_firewall_config_result_type_data_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(
+ struct ipa_ul_firewall_config_result_type_v01,
+ is_success),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(
+ struct ipa_ul_firewall_config_result_type_v01,
+ mux_id),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info ipa3_enable_per_client_stats_req_msg_data_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct
+ ipa_enable_per_client_stats_req_msg_v01,
+ enable_per_client_stats),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info ipa3_enable_per_client_stats_resp_msg_data_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(
+ struct ipa_enable_per_client_stats_resp_msg_v01,
+ resp),
+ .ei_array = get_qmi_response_type_v01_ei(),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info ipa3_get_stats_per_client_req_msg_data_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(
+ struct ipa_get_stats_per_client_req_msg_v01,
+ client_id),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(
+ struct ipa_get_stats_per_client_req_msg_v01,
+ src_pipe_id),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(
+ struct ipa_get_stats_per_client_req_msg_v01,
+ reset_stats_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(
+ struct ipa_get_stats_per_client_req_msg_v01,
+ reset_stats),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info ipa3_get_stats_per_client_resp_msg_data_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(
+ struct ipa_get_stats_per_client_resp_msg_v01,
+ resp),
+ .ei_array = get_qmi_response_type_v01_ei(),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(
+ struct ipa_get_stats_per_client_resp_msg_v01,
+ per_client_stats_list_valid),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(
+ struct ipa_get_stats_per_client_resp_msg_v01,
+ per_client_stats_list_len),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = QMI_IPA_MAX_PER_CLIENTS_V01,
+ .elem_size =
+ sizeof(struct ipa_per_client_stats_info_type_v01),
+ .is_array = VAR_LEN_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(
+ struct ipa_get_stats_per_client_resp_msg_v01,
+ per_client_stats_list),
+ .ei_array =
+ ipa3_per_client_stats_info_type_data_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info ipa3_configure_ul_firewall_rules_req_msg_data_v01_ei[] = {
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x1,
+ .offset = offsetof(
+ struct ipa_configure_ul_firewall_rules_req_msg_v01,
+ firewall_rules_list_len),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = QMI_IPA_MAX_UL_FIREWALL_RULES_V01,
+ .elem_size = sizeof(struct ipa_ul_firewall_rule_type_v01),
+ .is_array = VAR_LEN_ARRAY,
+ .tlv_type = 0x1,
+ .offset = offsetof(
+ struct ipa_configure_ul_firewall_rules_req_msg_v01,
+ firewall_rules_list),
+ .ei_array =
+ ipa3_ul_firewall_rule_type_data_v01_ei,
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x2,
+ .offset = offsetof(
+ struct ipa_configure_ul_firewall_rules_req_msg_v01,
+ mux_id),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(
+ struct ipa_configure_ul_firewall_rules_req_msg_v01,
+ disable_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(
+ struct ipa_configure_ul_firewall_rules_req_msg_v01,
+ disable),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(
+ struct ipa_configure_ul_firewall_rules_req_msg_v01,
+ are_blacklist_filters_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(
+ struct ipa_configure_ul_firewall_rules_req_msg_v01,
+ are_blacklist_filters),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info ipa3_configure_ul_firewall_rules_resp_msg_data_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(
+ struct ipa_configure_ul_firewall_rules_resp_msg_v01,
+ resp),
+ .ei_array = get_qmi_response_type_v01_ei(),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info ipa3_configure_ul_firewall_rules_ind_msg_data_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(
+ struct ipa_ul_firewall_config_result_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(
+ struct ipa_configure_ul_firewall_rules_ind_msg_v01,
+ result),
+ .ei_array =
+ ipa3_ul_firewall_config_result_type_data_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c b/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c
index 8d7b107..fc76604 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c
@@ -21,6 +21,8 @@
#define IPA_RT_STATUS_OF_DEL_FAILED (-1)
#define IPA_RT_STATUS_OF_MDFY_FAILED (-1)
+#define IPA_RT_MAX_NUM_OF_COMMIT_TABLES_CMD_DESC 5
+
#define IPA_RT_GET_RULE_TYPE(__entry) \
( \
((__entry)->rule.hashable) ? \
@@ -57,15 +59,15 @@ static int ipa_generate_rt_hw_rule(enum ipa_ip_type ip,
gen_params.ipt = ip;
gen_params.dst_pipe_idx = ipa3_get_ep_mapping(entry->rule.dst);
if (gen_params.dst_pipe_idx == -1) {
- IPAERR("Wrong destination pipe specified in RT rule\n");
- WARN_ON(1);
+ IPAERR_RL("Wrong destination pipe specified in RT rule\n");
+ WARN_ON_RATELIMIT_IPA(1);
return -EPERM;
}
if (!IPA_CLIENT_IS_CONS(entry->rule.dst)) {
- IPAERR("No RT rule on IPA_client_producer pipe.\n");
- IPAERR("pipe_idx: %d dst_pipe: %d\n",
+ IPAERR_RL("No RT rule on IPA_client_producer pipe.\n");
+ IPAERR_RL("pipe_idx: %d dst_pipe: %d\n",
gen_params.dst_pipe_idx, entry->rule.dst);
- WARN_ON(1);
+ WARN_ON_RATELIMIT_IPA(1);
return -EPERM;
}
@@ -143,14 +145,14 @@ static int ipa_translate_rt_tbl_to_hw_fmt(enum ipa_ip_type ip,
tbl_mem.size = tbl->sz[rlt] -
ipahal_get_hw_tbl_hdr_width();
if (ipahal_fltrt_allocate_hw_sys_tbl(&tbl_mem)) {
- IPAERR("fail to alloc sys tbl of size %d\n",
+ IPAERR_RL("fail to alloc sys tbl of size %d\n",
tbl_mem.size);
goto err;
}
if (ipahal_fltrt_write_addr_to_hdr(tbl_mem.phys_base,
hdr, tbl->idx - apps_start_idx, true)) {
- IPAERR("fail to wrt sys tbl addr to hdr\n");
+ IPAERR_RL("fail to wrt sys tbl addr to hdr\n");
goto hdr_update_fail;
}
@@ -164,7 +166,7 @@ static int ipa_translate_rt_tbl_to_hw_fmt(enum ipa_ip_type ip,
res = ipa_generate_rt_hw_rule(ip, entry,
tbl_mem_buf);
if (res) {
- IPAERR("failed to gen HW RT rule\n");
+ IPAERR_RL("failed to gen HW RT rule\n");
goto hdr_update_fail;
}
tbl_mem_buf += entry->hw_len;
@@ -181,7 +183,7 @@ static int ipa_translate_rt_tbl_to_hw_fmt(enum ipa_ip_type ip,
/* update the hdr at the right index */
if (ipahal_fltrt_write_addr_to_hdr(offset, hdr,
tbl->idx - apps_start_idx, true)) {
- IPAERR("fail to wrt lcl tbl ofst to hdr\n");
+ IPAERR_RL("fail to wrt lcl tbl ofst to hdr\n");
goto hdr_update_fail;
}
@@ -193,7 +195,7 @@ static int ipa_translate_rt_tbl_to_hw_fmt(enum ipa_ip_type ip,
res = ipa_generate_rt_hw_rule(ip, entry,
body_i);
if (res) {
- IPAERR("failed to gen HW RT rule\n");
+ IPAERR_RL("failed to gen HW RT rule\n");
goto err;
}
body_i += entry->hw_len;
@@ -294,7 +296,7 @@ static int ipa_prep_rt_tbl_for_cmt(enum ipa_ip_type ip,
res = ipa_generate_rt_hw_rule(ip, entry, NULL);
if (res) {
- IPAERR("failed to calculate HW RT rule size\n");
+ IPAERR_RL("failed to calculate HW RT rule size\n");
return -EPERM;
}
@@ -309,8 +311,8 @@ static int ipa_prep_rt_tbl_for_cmt(enum ipa_ip_type ip,
if ((tbl->sz[IPA_RULE_HASHABLE] +
tbl->sz[IPA_RULE_NON_HASHABLE]) == 0) {
- WARN_ON(1);
- IPAERR("rt tbl %s is with zero total size\n", tbl->name);
+ WARN_ON_RATELIMIT_IPA(1);
+ IPAERR_RL("rt tbl %s is with zero total size\n", tbl->name);
}
hdr_width = ipahal_get_hw_tbl_hdr_width();
@@ -432,10 +434,11 @@ static bool ipa_rt_valid_lcl_tbl_size(enum ipa_ip_type ipt,
*/
int __ipa_commit_rt_v3(enum ipa_ip_type ip)
{
- struct ipa3_desc desc[5];
+ struct ipa3_desc desc[IPA_RT_MAX_NUM_OF_COMMIT_TABLES_CMD_DESC];
struct ipahal_imm_cmd_register_write reg_write_cmd = {0};
struct ipahal_imm_cmd_dma_shared_mem mem_cmd = {0};
- struct ipahal_imm_cmd_pyld *cmd_pyld[5];
+ struct ipahal_imm_cmd_pyld
+ *cmd_pyld[IPA_RT_MAX_NUM_OF_COMMIT_TABLES_CMD_DESC];
int num_cmd = 0;
struct ipahal_fltrt_alloc_imgs_params alloc_params;
u32 num_modem_rt_index;
@@ -557,10 +560,7 @@ int __ipa_commit_rt_v3(enum ipa_ip_type ip)
IPAERR("fail construct register_write imm cmd. IP %d\n", ip);
goto fail_size_valid;
}
- desc[num_cmd].opcode = cmd_pyld[num_cmd]->opcode;
- desc[num_cmd].pyld = cmd_pyld[num_cmd]->data;
- desc[num_cmd].len = cmd_pyld[num_cmd]->len;
- desc[num_cmd].type = IPA_IMM_CMD_DESC;
+ ipa3_init_imm_cmd_desc(&desc[num_cmd], cmd_pyld[num_cmd]);
num_cmd++;
mem_cmd.is_read = false;
@@ -575,10 +575,7 @@ int __ipa_commit_rt_v3(enum ipa_ip_type ip)
IPAERR("fail construct dma_shared_mem imm cmd. IP %d\n", ip);
goto fail_imm_cmd_construct;
}
- desc[num_cmd].opcode = cmd_pyld[num_cmd]->opcode;
- desc[num_cmd].pyld = cmd_pyld[num_cmd]->data;
- desc[num_cmd].len = cmd_pyld[num_cmd]->len;
- desc[num_cmd].type = IPA_IMM_CMD_DESC;
+ ipa3_init_imm_cmd_desc(&desc[num_cmd], cmd_pyld[num_cmd]);
num_cmd++;
mem_cmd.is_read = false;
@@ -593,13 +590,17 @@ int __ipa_commit_rt_v3(enum ipa_ip_type ip)
IPAERR("fail construct dma_shared_mem imm cmd. IP %d\n", ip);
goto fail_imm_cmd_construct;
}
- desc[num_cmd].opcode = cmd_pyld[num_cmd]->opcode;
- desc[num_cmd].pyld = cmd_pyld[num_cmd]->data;
- desc[num_cmd].len = cmd_pyld[num_cmd]->len;
- desc[num_cmd].type = IPA_IMM_CMD_DESC;
+ ipa3_init_imm_cmd_desc(&desc[num_cmd], cmd_pyld[num_cmd]);
num_cmd++;
if (lcl_nhash) {
+ if (num_cmd >= IPA_RT_MAX_NUM_OF_COMMIT_TABLES_CMD_DESC) {
+ IPAERR("number of commands is out of range: IP = %d\n",
+ ip);
+ rc = -ENOBUFS;
+ goto fail_imm_cmd_construct;
+ }
+
mem_cmd.is_read = false;
mem_cmd.skip_pipeline_clear = false;
mem_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
@@ -613,13 +614,17 @@ int __ipa_commit_rt_v3(enum ipa_ip_type ip)
ip);
goto fail_imm_cmd_construct;
}
- desc[num_cmd].opcode = cmd_pyld[num_cmd]->opcode;
- desc[num_cmd].pyld = cmd_pyld[num_cmd]->data;
- desc[num_cmd].len = cmd_pyld[num_cmd]->len;
- desc[num_cmd].type = IPA_IMM_CMD_DESC;
+ ipa3_init_imm_cmd_desc(&desc[num_cmd], cmd_pyld[num_cmd]);
num_cmd++;
}
if (lcl_hash) {
+ if (num_cmd >= IPA_RT_MAX_NUM_OF_COMMIT_TABLES_CMD_DESC) {
+ IPAERR("number of commands is out of range: IP = %d\n",
+ ip);
+ rc = -ENOBUFS;
+ goto fail_imm_cmd_construct;
+ }
+
mem_cmd.is_read = false;
mem_cmd.skip_pipeline_clear = false;
mem_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
@@ -633,10 +638,7 @@ int __ipa_commit_rt_v3(enum ipa_ip_type ip)
ip);
goto fail_imm_cmd_construct;
}
- desc[num_cmd].opcode = cmd_pyld[num_cmd]->opcode;
- desc[num_cmd].pyld = cmd_pyld[num_cmd]->data;
- desc[num_cmd].len = cmd_pyld[num_cmd]->len;
- desc[num_cmd].type = IPA_IMM_CMD_DESC;
+ ipa3_init_imm_cmd_desc(&desc[num_cmd], cmd_pyld[num_cmd]);
num_cmd++;
}
@@ -817,8 +819,8 @@ static struct ipa3_rt_tbl *__ipa_add_rt_tbl(enum ipa_ip_type ip,
id = ipa3_id_alloc(entry);
if (id < 0) {
- IPAERR("failed to add to tree\n");
- WARN_ON(1);
+ IPAERR_RL("failed to add to tree\n");
+ WARN_ON_RATELIMIT_IPA(1);
goto ipa_insert_failed;
}
entry->id = id;
@@ -857,7 +859,7 @@ static int __ipa_del_rt_tbl(struct ipa3_rt_tbl *entry)
else if (entry->set == &ipa3_ctx->rt_tbl_set[IPA_IP_v6])
ip = IPA_IP_v6;
else {
- WARN_ON(1);
+ WARN_ON_RATELIMIT_IPA(1);
return -EPERM;
}
@@ -890,14 +892,14 @@ static int __ipa_rt_validate_hndls(const struct ipa_rt_rule *rule,
struct ipa3_hdr_proc_ctx_entry **proc_ctx)
{
if (rule->hdr_hdl && rule->hdr_proc_ctx_hdl) {
- IPAERR("rule contains both hdr_hdl and hdr_proc_ctx_hdl\n");
+ IPAERR_RL("rule contains both hdr_hdl and hdr_proc_ctx_hdl\n");
return -EPERM;
}
if (rule->hdr_hdl) {
*hdr = ipa3_id_find(rule->hdr_hdl);
if ((*hdr == NULL) || ((*hdr)->cookie != IPA_HDR_COOKIE)) {
- IPAERR("rt rule does not point to valid hdr\n");
+ IPAERR_RL("rt rule does not point to valid hdr\n");
return -EPERM;
}
} else if (rule->hdr_proc_ctx_hdl) {
@@ -905,7 +907,7 @@ static int __ipa_rt_validate_hndls(const struct ipa_rt_rule *rule,
if ((*proc_ctx == NULL) ||
((*proc_ctx)->cookie != IPA_PROC_HDR_COOKIE)) {
- IPAERR("rt rule does not point to valid proc ctx\n");
+ IPAERR_RL("rt rule does not point to valid proc ctx\n");
return -EPERM;
}
}
@@ -916,7 +918,8 @@ static int __ipa_rt_validate_hndls(const struct ipa_rt_rule *rule,
static int __ipa_create_rt_entry(struct ipa3_rt_entry **entry,
const struct ipa_rt_rule *rule,
struct ipa3_rt_tbl *tbl, struct ipa3_hdr_entry *hdr,
- struct ipa3_hdr_proc_ctx_entry *proc_ctx)
+ struct ipa3_hdr_proc_ctx_entry *proc_ctx,
+ u16 rule_id)
{
int id;
@@ -931,11 +934,16 @@ static int __ipa_create_rt_entry(struct ipa3_rt_entry **entry,
(*(entry))->tbl = tbl;
(*(entry))->hdr = hdr;
(*(entry))->proc_ctx = proc_ctx;
- id = ipa3_alloc_rule_id(tbl->rule_ids);
- if (id < 0) {
- IPAERR("failed to allocate rule id\n");
- WARN_ON(1);
- goto alloc_rule_id_fail;
+ if (rule_id) {
+ id = rule_id;
+ (*(entry))->rule_id_valid = 1;
+ } else {
+ id = ipa3_alloc_rule_id(tbl->rule_ids);
+ if (id < 0) {
+ IPAERR_RL("failed to allocate rule id\n");
+ WARN_ON_RATELIMIT_IPA(1);
+ goto alloc_rule_id_fail;
+ }
}
(*(entry))->rule_id = id;
@@ -959,8 +967,8 @@ static int __ipa_finish_rt_rule_add(struct ipa3_rt_entry *entry, u32 *rule_hdl,
entry->proc_ctx->ref_cnt++;
id = ipa3_id_alloc(entry);
if (id < 0) {
- IPAERR("failed to add to tree\n");
- WARN_ON(1);
+ IPAERR_RL("failed to add to tree\n");
+ WARN_ON_RATELIMIT_IPA(1);
goto ipa_insert_failed;
}
IPADBG("add rt rule tbl_idx=%d rule_cnt=%d rule_id=%d\n",
@@ -982,7 +990,8 @@ static int __ipa_finish_rt_rule_add(struct ipa3_rt_entry *entry, u32 *rule_hdl,
}
static int __ipa_add_rt_rule(enum ipa_ip_type ip, const char *name,
- const struct ipa_rt_rule *rule, u8 at_rear, u32 *rule_hdl)
+ const struct ipa_rt_rule *rule, u8 at_rear, u32 *rule_hdl,
+ u16 rule_id)
{
struct ipa3_rt_tbl *tbl;
struct ipa3_rt_entry *entry;
@@ -1010,7 +1019,8 @@ static int __ipa_add_rt_rule(enum ipa_ip_type ip, const char *name,
goto error;
}
- if (__ipa_create_rt_entry(&entry, rule, tbl, hdr, proc_ctx))
+ if (__ipa_create_rt_entry(&entry, rule, tbl, hdr, proc_ctx,
+ rule_id))
goto error;
if (at_rear)
@@ -1041,7 +1051,7 @@ static int __ipa_add_rt_rule_after(struct ipa3_rt_tbl *tbl,
if (__ipa_rt_validate_hndls(rule, &hdr, &proc_ctx))
goto error;
- if (__ipa_create_rt_entry(&entry, rule, tbl, hdr, proc_ctx))
+ if (__ipa_create_rt_entry(&entry, rule, tbl, hdr, proc_ctx, 0))
goto error;
list_add(&entry->link, &((*add_after_entry)->link));
@@ -1086,8 +1096,54 @@ int ipa3_add_rt_rule(struct ipa_ioc_add_rt_rule *rules)
if (__ipa_add_rt_rule(rules->ip, rules->rt_tbl_name,
&rules->rules[i].rule,
rules->rules[i].at_rear,
- &rules->rules[i].rt_rule_hdl)) {
- IPAERR_RL("failed to add rt rule %d\n", i);
+ &rules->rules[i].rt_rule_hdl,
+ 0)) {
+ IPAERR("failed to add rt rule %d\n", i);
+ rules->rules[i].status = IPA_RT_STATUS_OF_ADD_FAILED;
+ } else {
+ rules->rules[i].status = 0;
+ }
+ }
+
+ if (rules->commit)
+ if (ipa3_ctx->ctrl->ipa3_commit_rt(rules->ip)) {
+ ret = -EPERM;
+ goto bail;
+ }
+
+ ret = 0;
+bail:
+ mutex_unlock(&ipa3_ctx->lock);
+ return ret;
+}
+
+/**
+ * ipa3_add_rt_rule_ext() - Add the specified routing rules to SW with rule id
+ * and optionally commit to IPA HW
+ * @rules: [inout] set of routing rules to add
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa3_add_rt_rule_ext(struct ipa_ioc_add_rt_rule_ext *rules)
+{
+ int i;
+ int ret;
+
+ if (rules == NULL || rules->num_rules == 0 || rules->ip >= IPA_IP_MAX) {
+ IPAERR("bad parm\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&ipa3_ctx->lock);
+ for (i = 0; i < rules->num_rules; i++) {
+ if (__ipa_add_rt_rule(rules->ip, rules->rt_tbl_name,
+ &rules->rules[i].rule,
+ rules->rules[i].at_rear,
+ &rules->rules[i].rt_rule_hdl,
+ rules->rules[i].rule_id)) {
+ IPAERR("failed to add rt rule %d\n", i);
rules->rules[i].status = IPA_RT_STATUS_OF_ADD_FAILED;
} else {
rules->rules[i].status = 0;
@@ -1137,8 +1193,8 @@ int ipa3_add_rt_rule_after(struct ipa_ioc_add_rt_rule_after *rules)
goto bail;
}
- if (tbl->rule_cnt <= 0) {
- IPAERR_RL("tbl->rule_cnt <= 0");
+ if (!tbl->rule_cnt) {
+ IPAERR_RL("tbl->rule_cnt == 0");
ret = -EINVAL;
goto bail;
}
@@ -1235,7 +1291,9 @@ int __ipa3_del_rt_rule(u32 rule_hdl)
IPADBG("del rt rule tbl_idx=%d rule_cnt=%d rule_id=%d\n ref_cnt=%u",
entry->tbl->idx, entry->tbl->rule_cnt,
entry->rule_id, entry->tbl->ref_cnt);
- idr_remove(entry->tbl->rule_ids, entry->rule_id);
+ /* if rule id was allocated from idr, remove it */
+ if (!entry->rule_id_valid)
+ idr_remove(entry->tbl->rule_ids, entry->rule_id);
if (entry->tbl->rule_cnt == 0 && entry->tbl->ref_cnt == 0) {
if (__ipa_del_rt_tbl(entry->tbl))
IPAERR_RL("fail to del RT tbl\n");
@@ -1375,7 +1433,7 @@ int ipa3_reset_rt(enum ipa_ip_type ip)
list_for_each_entry_safe(rule, rule_next,
&tbl->head_rt_rule_list, link) {
if (ipa3_id_find(rule->id) == NULL) {
- WARN_ON(1);
+ WARN_ON_RATELIMIT_IPA(1);
mutex_unlock(&ipa3_ctx->lock);
return -EFAULT;
}
@@ -1403,7 +1461,7 @@ int ipa3_reset_rt(enum ipa_ip_type ip)
}
if (ipa3_id_find(tbl->id) == NULL) {
- WARN_ON(1);
+ WARN_ON_RATELIMIT_IPA(1);
mutex_unlock(&ipa3_ctx->lock);
return -EFAULT;
}
@@ -1462,7 +1520,7 @@ int ipa3_get_rt_tbl(struct ipa_ioc_get_rt_tbl *lookup)
entry = __ipa3_find_rt_tbl(lookup->ip, lookup->name);
if (entry && entry->cookie == IPA_RT_TBL_COOKIE) {
if (entry->ref_cnt == U32_MAX) {
- IPAERR("fail: ref count crossed limit\n");
+ IPAERR_RL("fail: ref count crossed limit\n");
goto ret;
}
entry->ref_cnt++;
@@ -1514,7 +1572,7 @@ int ipa3_put_rt_tbl(u32 rt_tbl_hdl)
else if (entry->set == &ipa3_ctx->rt_tbl_set[IPA_IP_v6])
ip = IPA_IP_v6;
else {
- WARN_ON(1);
+ WARN_ON_RATELIMIT_IPA(1);
result = -EINVAL;
goto ret;
}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c
index b8928da..941e489 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c
@@ -620,8 +620,9 @@ static void ipa_save_uc_smmu_mapping_pa(int res_idx, phys_addr_t pa,
unsigned long iova, size_t len)
{
IPADBG("--res_idx=%d pa=0x%pa iova=0x%lx sz=0x%zx\n", res_idx,
- &pa, iova, len);
- wdi_res[res_idx].res = kzalloc(sizeof(struct ipa_wdi_res), GFP_KERNEL);
+ &pa, iova, len);
+ wdi_res[res_idx].res = kzalloc(sizeof(*wdi_res[res_idx].res),
+ GFP_KERNEL);
if (!wdi_res[res_idx].res)
BUG();
wdi_res[res_idx].nents = 1;
@@ -647,7 +648,8 @@ static void ipa_save_uc_smmu_mapping_sgt(int res_idx, struct sg_table *sgt,
return;
}
- wdi_res[res_idx].res = kcalloc(sgt->nents, sizeof(struct ipa_wdi_res),
+ wdi_res[res_idx].res = kcalloc(sgt->nents,
+ sizeof(*wdi_res[res_idx].res),
GFP_KERNEL);
if (!wdi_res[res_idx].res)
BUG();
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
index ae05880..065a099 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
@@ -1113,12 +1113,6 @@ static const struct ipa_ep_configuration ipa3_ep_mapping
{ 31, 31, 8, 8, IPA_EE_AP } },
/* IPA_4_0 */
- [IPA_4_0][IPA_CLIENT_WLAN1_PROD] = {
- true, IPA_v4_0_GROUP_UL_DL,
- true,
- IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
- QMB_MASTER_SELECT_DDR,
- { 7, 9, 8, 16, IPA_EE_AP } },
[IPA_4_0][IPA_CLIENT_USB_PROD] = {
true, IPA_v4_0_GROUP_UL_DL,
true,
@@ -1348,13 +1342,13 @@ static const struct ipa_ep_configuration ipa3_ep_mapping
true,
IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
QMB_MASTER_SELECT_DDR,
- { 3, 0, 16, 32, IPA_EE_Q6 } },
+ { 6, 2, 12, 24, IPA_EE_Q6 } },
[IPA_4_0_MHI][IPA_CLIENT_Q6_WAN_PROD] = {
true, IPA_v4_0_GROUP_UL_DL,
true,
IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP,
QMB_MASTER_SELECT_DDR,
- { 6, 2, 12, 24, IPA_EE_Q6 } },
+ { 3, 0, 16, 32, IPA_EE_Q6 } },
[IPA_4_0_MHI][IPA_CLIENT_Q6_CMD_PROD] = {
true, IPA_v4_0_MHI_GROUP_PCIE,
false,
@@ -2787,7 +2781,7 @@ int ipa3_cfg_ep_hdr(u32 clnt_hdl, const struct ipa_ep_cfg_hdr *ep_hdr)
ep_hdr->hdr_ofst_pkt_size_valid,
ep_hdr->hdr_additional_const_len);
- IPADBG("ofst_metadata=0x%x, ofst_metadata_valid=%d, len=0x%x",
+ IPADBG("ofst_metadata=0x%x, ofst_metadata_valid=%d, len=0x%x\n",
ep_hdr->hdr_ofst_metadata,
ep_hdr->hdr_ofst_metadata_valid,
ep_hdr->hdr_len);
@@ -2946,7 +2940,7 @@ int ipa3_cfg_ep_mode(u32 clnt_hdl, const struct ipa_ep_cfg_mode *ep_mode)
if (!IPA_CLIENT_IS_CONS(ep_mode->dst))
ep = ipa3_get_ep_mapping(IPA_CLIENT_APPS_LAN_CONS);
- IPADBG("pipe=%d mode=%d(%s), dst_client_number=%d",
+ IPADBG("pipe=%d mode=%d(%s), dst_client_number=%d\n",
clnt_hdl,
ep_mode->mode,
ipa3_get_mode_type_str(ep_mode->mode),
@@ -3908,13 +3902,10 @@ int ipa3_tag_process(struct ipa3_desc desc[],
res = -ENOMEM;
goto fail_free_tag_desc;
}
- tag_desc[desc_idx].opcode = cmd_pyld->opcode;
- tag_desc[desc_idx].pyld = cmd_pyld->data;
- tag_desc[desc_idx].len = cmd_pyld->len;
- tag_desc[desc_idx].type = IPA_IMM_CMD_DESC;
+ ipa3_init_imm_cmd_desc(&tag_desc[desc_idx], cmd_pyld);
tag_desc[desc_idx].callback = ipa3_tag_destroy_imm;
tag_desc[desc_idx].user1 = cmd_pyld;
- desc_idx++;
+ ++desc_idx;
/* IP_PACKET_INIT IC for tag status to be sent to apps */
pktinit_cmd.destination_pipe_index =
@@ -3926,13 +3917,10 @@ int ipa3_tag_process(struct ipa3_desc desc[],
res = -ENOMEM;
goto fail_free_desc;
}
- tag_desc[desc_idx].opcode = cmd_pyld->opcode;
- tag_desc[desc_idx].pyld = cmd_pyld->data;
- tag_desc[desc_idx].len = cmd_pyld->len;
- tag_desc[desc_idx].type = IPA_IMM_CMD_DESC;
+ ipa3_init_imm_cmd_desc(&tag_desc[desc_idx], cmd_pyld);
tag_desc[desc_idx].callback = ipa3_tag_destroy_imm;
tag_desc[desc_idx].user1 = cmd_pyld;
- desc_idx++;
+ ++desc_idx;
/* status IC */
status.tag = IPA_COOKIE;
@@ -3943,13 +3931,10 @@ int ipa3_tag_process(struct ipa3_desc desc[],
res = -ENOMEM;
goto fail_free_desc;
}
- tag_desc[desc_idx].opcode = cmd_pyld->opcode;
- tag_desc[desc_idx].pyld = cmd_pyld->data;
- tag_desc[desc_idx].len = cmd_pyld->len;
- tag_desc[desc_idx].type = IPA_IMM_CMD_DESC;
+ ipa3_init_imm_cmd_desc(&tag_desc[desc_idx], cmd_pyld);
tag_desc[desc_idx].callback = ipa3_tag_destroy_imm;
tag_desc[desc_idx].user1 = cmd_pyld;
- desc_idx++;
+ ++desc_idx;
comp = kzalloc(sizeof(*comp), GFP_KERNEL);
if (!comp) {
@@ -3972,6 +3957,12 @@ int ipa3_tag_process(struct ipa3_desc desc[],
memcpy(skb_put(dummy_skb, sizeof(comp)), &comp, sizeof(comp));
+ if (desc_idx >= IPA_TAG_MAX_DESC) {
+ IPAERR("number of commands is out of range\n");
+ res = -ENOBUFS;
+ goto fail_free_skb;
+ }
+
tag_desc[desc_idx].pyld = dummy_skb->data;
tag_desc[desc_idx].len = dummy_skb->len;
tag_desc[desc_idx].type = IPA_DATA_DESC_SKB;
@@ -3984,7 +3975,7 @@ int ipa3_tag_process(struct ipa3_desc desc[],
if (res) {
IPAERR("failed to send TAG packets %d\n", res);
res = -ENOMEM;
- goto fail_free_comp;
+ goto fail_free_skb;
}
kfree(tag_desc);
tag_desc = NULL;
@@ -4012,6 +4003,8 @@ int ipa3_tag_process(struct ipa3_desc desc[],
return 0;
+fail_free_skb:
+ kfree_skb(dummy_skb);
fail_free_comp:
kfree(comp);
fail_free_desc:
@@ -4082,19 +4075,16 @@ static int ipa3_tag_generate_force_close_desc(struct ipa3_desc desc[],
goto fail_alloc_reg_write_agg_close;
}
- desc[desc_idx].opcode = cmd_pyld->opcode;
- desc[desc_idx].pyld = cmd_pyld->data;
- desc[desc_idx].len = cmd_pyld->len;
- desc[desc_idx].type = IPA_IMM_CMD_DESC;
+ ipa3_init_imm_cmd_desc(&desc[desc_idx], cmd_pyld);
desc[desc_idx].callback = ipa3_tag_destroy_imm;
desc[desc_idx].user1 = cmd_pyld;
- desc_idx++;
+ ++desc_idx;
}
return desc_idx;
fail_alloc_reg_write_agg_close:
- for (i = 0; i < desc_idx; i++)
+ for (i = 0; i < desc_idx; ++i)
if (desc[desc_idx].callback)
desc[desc_idx].callback(desc[desc_idx].user1,
desc[desc_idx].user2);
@@ -4198,7 +4188,9 @@ void ipa3_proxy_clk_unvote(void)
mutex_lock(&ipa3_ctx->q6_proxy_clk_vote_mutex);
if (ipa3_ctx->q6_proxy_clk_vote_valid) {
IPA_ACTIVE_CLIENTS_DEC_SPECIAL("PROXY_CLK_VOTE");
- ipa3_ctx->q6_proxy_clk_vote_valid = false;
+ ipa3_ctx->q6_proxy_clk_vote_cnt--;
+ if (ipa3_ctx->q6_proxy_clk_vote_cnt == 0)
+ ipa3_ctx->q6_proxy_clk_vote_valid = false;
}
mutex_unlock(&ipa3_ctx->q6_proxy_clk_vote_mutex);
}
@@ -4214,8 +4206,10 @@ void ipa3_proxy_clk_vote(void)
return;
mutex_lock(&ipa3_ctx->q6_proxy_clk_vote_mutex);
- if (!ipa3_ctx->q6_proxy_clk_vote_valid) {
+ if (!ipa3_ctx->q6_proxy_clk_vote_valid ||
+ (ipa3_ctx->q6_proxy_clk_vote_cnt > 0)) {
IPA_ACTIVE_CLIENTS_INC_SPECIAL("PROXY_CLK_VOTE");
+ ipa3_ctx->q6_proxy_clk_vote_cnt++;
ipa3_ctx->q6_proxy_clk_vote_valid = true;
}
mutex_unlock(&ipa3_ctx->q6_proxy_clk_vote_mutex);
@@ -4377,10 +4371,17 @@ int ipa3_bind_api_controller(enum ipa_hw_type ipa_hw_type,
api_ctrl->ipa_mdfy_flt_rule = ipa3_mdfy_flt_rule;
api_ctrl->ipa_commit_flt = ipa3_commit_flt;
api_ctrl->ipa_reset_flt = ipa3_reset_flt;
- api_ctrl->allocate_nat_device = ipa3_allocate_nat_device;
+ api_ctrl->ipa_allocate_nat_device = ipa3_allocate_nat_device;
+ api_ctrl->ipa_allocate_nat_table = ipa3_allocate_nat_table;
+ api_ctrl->ipa_allocate_ipv6ct_table = ipa3_allocate_ipv6ct_table;
api_ctrl->ipa_nat_init_cmd = ipa3_nat_init_cmd;
+ api_ctrl->ipa_ipv6ct_init_cmd = ipa3_ipv6ct_init_cmd;
api_ctrl->ipa_nat_dma_cmd = ipa3_nat_dma_cmd;
+ api_ctrl->ipa_table_dma_cmd = ipa3_table_dma_cmd;
api_ctrl->ipa_nat_del_cmd = ipa3_nat_del_cmd;
+ api_ctrl->ipa_del_nat_table = ipa3_del_nat_table;
+ api_ctrl->ipa_del_ipv6ct_table = ipa3_del_ipv6ct_table;
+ api_ctrl->ipa_nat_mdfy_pdn = ipa3_nat_mdfy_pdn;
api_ctrl->ipa_send_msg = ipa3_send_msg;
api_ctrl->ipa_register_pull_msg = ipa3_register_pull_msg;
api_ctrl->ipa_deregister_pull_msg = ipa3_deregister_pull_msg;
@@ -4501,6 +4502,7 @@ int ipa3_bind_api_controller(enum ipa_hw_type ipa_hw_type,
api_ctrl->ipa_disconn_wdi3_pipes = ipa3_disconn_wdi3_pipes;
api_ctrl->ipa_enable_wdi3_pipes = ipa3_enable_wdi3_pipes;
api_ctrl->ipa_disable_wdi3_pipes = ipa3_disable_wdi3_pipes;
+ api_ctrl->ipa_tz_unlock_reg = ipa3_tz_unlock_reg;
return 0;
}
@@ -4996,12 +4998,9 @@ void ipa3_free_dma_task_for_gsi(void)
*/
int ipa3_inject_dma_task_for_gsi(void)
{
- struct ipa3_desc desc = {0};
+ struct ipa3_desc desc;
- desc.opcode = ipa3_ctx->dma_task_info.cmd_pyld->opcode;
- desc.pyld = ipa3_ctx->dma_task_info.cmd_pyld->data;
- desc.len = ipa3_ctx->dma_task_info.cmd_pyld->len;
- desc.type = IPA_IMM_CMD_DESC;
+ ipa3_init_imm_cmd_desc(&desc, ipa3_ctx->dma_task_info.cmd_pyld);
IPADBG("sending 1B packet to IPA\n");
if (ipa3_send_cmd_timeout(1, &desc,
@@ -5324,3 +5323,14 @@ void ipa3_enable_dcd(void)
ipahal_write_reg_fields(IPA_IDLE_INDICATION_CFG,
&idle_indication_cfg);
}
+
+void ipa3_init_imm_cmd_desc(struct ipa3_desc *desc,
+ struct ipahal_imm_cmd_pyld *cmd_pyld)
+{
+ memset(desc, 0, sizeof(*desc));
+ desc->opcode = cmd_pyld->opcode;
+ desc->pyld = cmd_pyld->data;
+ desc->len = cmd_pyld->len;
+ desc->type = IPA_IMM_CMD_DESC;
+}
+
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/Makefile b/drivers/platform/msm/ipa/ipa_v3/ipahal/Makefile
index 67e491b..869ee7e 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/Makefile
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/Makefile
@@ -1,3 +1,3 @@
obj-$(CONFIG_IPA3) += ipa_hal.o
-ipa_hal-y := ipahal.o ipahal_reg.o ipahal_fltrt.o ipahal_hw_stats.o
+ipa_hal-y := ipahal.o ipahal_reg.o ipahal_fltrt.o ipahal_hw_stats.o ipahal_nat.o
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.c b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.c
index a8d5342..d015b22 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.c
@@ -16,7 +16,7 @@
#include "ipahal_reg_i.h"
#include "ipahal_fltrt_i.h"
#include "ipahal_hw_stats_i.h"
-
+#include "ipahal_nat_i.h"
struct ipahal_context *ipahal_ctx;
@@ -35,6 +35,7 @@ static const char *ipahal_imm_cmd_name_to_str[IPA_IMM_CMD_MAX] = {
__stringify(IPA_IMM_CMD_IP_PACKET_TAG_STATUS),
__stringify(IPA_IMM_CMD_DMA_TASK_32B_ADDR),
__stringify(IPA_IMM_CMD_TABLE_DMA),
+ __stringify(IPA_IMM_CMD_IP_V6_CT_INIT)
};
static const char *ipahal_pkt_status_exception_to_str
@@ -352,8 +353,8 @@ static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_nat_dma(
{
struct ipahal_imm_cmd_pyld *pyld;
struct ipa_imm_cmd_hw_nat_dma *data;
- struct ipahal_imm_cmd_nat_dma *nat_params =
- (struct ipahal_imm_cmd_nat_dma *)params;
+ struct ipahal_imm_cmd_table_dma *nat_params =
+ (struct ipahal_imm_cmd_table_dma *)params;
pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + sizeof(*data), is_atomic_ctx);
if (unlikely(!pyld)) {
@@ -519,24 +520,55 @@ static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_ip_v4_nat_init(
pyld->len = sizeof(*data);
data = (struct ipa_imm_cmd_hw_ip_v4_nat_init *)pyld->data;
- data->ipv4_rules_addr = nat4_params->ipv4_rules_addr;
+ data->ipv4_rules_addr = nat4_params->table_init.base_table_addr;
data->ipv4_expansion_rules_addr =
- nat4_params->ipv4_expansion_rules_addr;
+ nat4_params->table_init.expansion_table_addr;
data->index_table_addr = nat4_params->index_table_addr;
data->index_table_expansion_addr =
nat4_params->index_table_expansion_addr;
- data->table_index = nat4_params->table_index;
+ data->table_index = nat4_params->table_init.table_index;
data->ipv4_rules_addr_type =
- nat4_params->ipv4_rules_addr_shared ? 1 : 0;
+ nat4_params->table_init.base_table_addr_shared ? 1 : 0;
data->ipv4_expansion_rules_addr_type =
- nat4_params->ipv4_expansion_rules_addr_shared ? 1 : 0;
+ nat4_params->table_init.expansion_table_addr_shared ? 1 : 0;
data->index_table_addr_type =
nat4_params->index_table_addr_shared ? 1 : 0;
data->index_table_expansion_addr_type =
nat4_params->index_table_expansion_addr_shared ? 1 : 0;
- data->size_base_tables = nat4_params->size_base_tables;
- data->size_expansion_tables = nat4_params->size_expansion_tables;
- data->public_ip_addr = nat4_params->public_ip_addr;
+ data->size_base_tables = nat4_params->table_init.size_base_table;
+ data->size_expansion_tables =
+ nat4_params->table_init.size_expansion_table;
+ data->public_addr_info = nat4_params->public_addr_info;
+
+ return pyld;
+}
+
+static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_ip_v6_ct_init(
+ enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx)
+{
+ struct ipahal_imm_cmd_pyld *pyld;
+ struct ipa_imm_cmd_hw_ip_v6_ct_init *data;
+ struct ipahal_imm_cmd_ip_v6_ct_init *ipv6ct_params =
+ (struct ipahal_imm_cmd_ip_v6_ct_init *)params;
+
+ pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + sizeof(*data), is_atomic_ctx);
+ if (unlikely(!pyld))
+ return pyld;
+ pyld->opcode = ipahal_imm_cmd_get_opcode(cmd);
+ pyld->len = sizeof(*data);
+ data = (struct ipa_imm_cmd_hw_ip_v6_ct_init *)pyld->data;
+
+ data->table_addr = ipv6ct_params->table_init.base_table_addr;
+ data->expansion_table_addr =
+ ipv6ct_params->table_init.expansion_table_addr;
+ data->table_index = ipv6ct_params->table_init.table_index;
+ data->table_addr_type =
+ ipv6ct_params->table_init.base_table_addr_shared ? 1 : 0;
+ data->expansion_table_addr_type =
+ ipv6ct_params->table_init.expansion_table_addr_shared ? 1 : 0;
+ data->size_base_table = ipv6ct_params->table_init.size_base_table;
+ data->size_expansion_table =
+ ipv6ct_params->table_init.size_expansion_table;
return pyld;
}
@@ -685,6 +717,9 @@ static struct ipahal_imm_cmd_obj
[IPA_HW_v4_0][IPA_IMM_CMD_DMA_SHARED_MEM] = {
ipa_imm_cmd_construct_dma_shared_mem_v_4_0,
19},
+ [IPA_HW_v4_0][IPA_IMM_CMD_IP_V6_CT_INIT] = {
+ ipa_imm_cmd_construct_ip_v6_ct_init,
+ 23}
};
/*
@@ -1526,13 +1561,21 @@ int ipahal_init(enum ipa_hw_type ipa_hw_type, void __iomem *base,
if (ipahal_hw_stats_init(ipa_hw_type)) {
IPAHAL_ERR("failed to init ipahal hw stats\n");
result = -EFAULT;
- goto bail_free_ctx;
+ goto bail_free_fltrt;
+ }
+
+ if (ipahal_nat_init(ipa_hw_type)) {
+ IPAHAL_ERR("failed to init ipahal NAT\n");
+ result = -EFAULT;
+ goto bail_free_fltrt;
}
ipahal_debugfs_init();
return 0;
+bail_free_fltrt:
+ ipahal_fltrt_destroy();
bail_free_ctx:
kfree(ipahal_ctx);
ipahal_ctx = NULL;
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.h b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.h
index 56b884b..0c2697c 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.h
@@ -37,6 +37,7 @@ enum ipahal_imm_cmd_name {
IPA_IMM_CMD_IP_PACKET_TAG_STATUS,
IPA_IMM_CMD_DMA_TASK_32B_ADDR,
IPA_IMM_CMD_TABLE_DMA,
+ IPA_IMM_CMD_IP_V6_CT_INIT,
IPA_IMM_CMD_MAX,
};
@@ -46,19 +47,19 @@ enum ipahal_imm_cmd_name {
* struct ipahal_imm_cmd_ip_v4_filter_init - IP_V4_FILTER_INIT cmd payload
* Inits IPv4 filter block.
* @hash_rules_addr: Addr in sys mem where ipv4 hashable flt tbl starts
+ * @nhash_rules_addr: Addr in sys mem where ipv4 non-hashable flt tbl starts
* @hash_rules_size: Size in bytes of the hashable tbl to cpy to local mem
* @hash_local_addr: Addr in shared mem where ipv4 hashable flt tbl should
* be copied to
- * @nhash_rules_addr: Addr in sys mem where ipv4 non-hashable flt tbl starts
* @nhash_rules_size: Size in bytes of the non-hashable tbl to cpy to local mem
* @nhash_local_addr: Addr in shared mem where ipv4 non-hashable flt tbl should
* be copied to
*/
struct ipahal_imm_cmd_ip_v4_filter_init {
u64 hash_rules_addr;
+ u64 nhash_rules_addr;
u32 hash_rules_size;
u32 hash_local_addr;
- u64 nhash_rules_addr;
u32 nhash_rules_size;
u32 nhash_local_addr;
};
@@ -67,79 +68,98 @@ struct ipahal_imm_cmd_ip_v4_filter_init {
* struct ipahal_imm_cmd_ip_v6_filter_init - IP_V6_FILTER_INIT cmd payload
* Inits IPv6 filter block.
* @hash_rules_addr: Addr in sys mem where ipv6 hashable flt tbl starts
+ * @nhash_rules_addr: Addr in sys mem where ipv6 non-hashable flt tbl starts
* @hash_rules_size: Size in bytes of the hashable tbl to cpy to local mem
* @hash_local_addr: Addr in shared mem where ipv6 hashable flt tbl should
* be copied to
- * @nhash_rules_addr: Addr in sys mem where ipv6 non-hashable flt tbl starts
* @nhash_rules_size: Size in bytes of the non-hashable tbl to cpy to local mem
* @nhash_local_addr: Addr in shared mem where ipv6 non-hashable flt tbl should
* be copied to
*/
struct ipahal_imm_cmd_ip_v6_filter_init {
u64 hash_rules_addr;
+ u64 nhash_rules_addr;
u32 hash_rules_size;
u32 hash_local_addr;
- u64 nhash_rules_addr;
u32 nhash_rules_size;
u32 nhash_local_addr;
};
/*
+ * struct ipahal_imm_cmd_nat_ipv6ct_init_common - NAT/IPv6CT table init command
+ * common part
+ * @base_table_addr: Address in sys/shared mem where base table start
+ * @expansion_table_addr: Address in sys/shared mem where expansion table
+ * starts. Entries that result in hash collision are located in this table.
+ * @base_table_addr_shared: base_table_addr in shared mem (if not, then sys)
+ * @expansion_table_addr_shared: expansion_rules_addr in
+ * shared mem (if not, then sys)
+ * @size_base_table: Num of entries in the base table
+ * @size_expansion_table: Num of entries in the expansion table
+ * @table_index: For future support of multiple tables
+ */
+struct ipahal_imm_cmd_nat_ipv6ct_init_common {
+ u64 base_table_addr;
+ u64 expansion_table_addr;
+ bool base_table_addr_shared;
+ bool expansion_table_addr_shared;
+ u16 size_base_table;
+ u16 size_expansion_table;
+ u8 table_index;
+};
+
+/*
* struct ipahal_imm_cmd_ip_v4_nat_init - IP_V4_NAT_INIT cmd payload
* Inits IPv4 NAT block. Initiate NAT table with it dimensions, location
- * cache address abd itger related parameters.
- * @table_index: For future support of multiple NAT tables
- * @ipv4_rules_addr: Addr in sys/shared mem where ipv4 NAT rules start
- * @ipv4_rules_addr_shared: ipv4_rules_addr in shared mem (if not, then sys)
- * @ipv4_expansion_rules_addr: Addr in sys/shared mem where expantion NAT
- * table starts. IPv4 NAT rules that result in NAT collision are located
- * in this table.
- * @ipv4_expansion_rules_addr_shared: ipv4_expansion_rules_addr in
- * shared mem (if not, then sys)
+ * cache address and other related parameters.
+ * @table_init: table initialization parameters
* @index_table_addr: Addr in sys/shared mem where index table, which points
* to NAT table starts
- * @index_table_addr_shared: index_table_addr in shared mem (if not, then sys)
* @index_table_expansion_addr: Addr in sys/shared mem where expansion index
* table starts
+ * @index_table_addr_shared: index_table_addr in shared mem (if not, then sys)
* @index_table_expansion_addr_shared: index_table_expansion_addr in
* shared mem (if not, then sys)
- * @size_base_tables: Num of entries in NAT tbl and idx tbl (each)
- * @size_expansion_tables: Num of entries in NAT expantion tbl and expantion
- * idx tbl (each)
- * @public_ip_addr: public IP address
+ * @public_addr_info: Public IP addresses info suitable to the IPA H/W version
+ * IPA H/W >= 4.0 - PDN config table offset in SMEM
+ * IPA H/W < 4.0 - The public IP address
*/
struct ipahal_imm_cmd_ip_v4_nat_init {
- u8 table_index;
- u64 ipv4_rules_addr;
- bool ipv4_rules_addr_shared;
- u64 ipv4_expansion_rules_addr;
- bool ipv4_expansion_rules_addr_shared;
+ struct ipahal_imm_cmd_nat_ipv6ct_init_common table_init;
u64 index_table_addr;
- bool index_table_addr_shared;
u64 index_table_expansion_addr;
+ bool index_table_addr_shared;
bool index_table_expansion_addr_shared;
- u16 size_base_tables;
- u16 size_expansion_tables;
- u32 public_ip_addr;
+ u32 public_addr_info;
+};
+
+/*
+ * struct ipahal_imm_cmd_ip_v6_ct_init - IP_V6_CONN_TRACK_INIT cmd payload
+ * Inits IPv6CT block. Initiate IPv6CT table with it dimensions, location
+ * cache address and other related parameters.
+ * @table_init: table initialization parameters
+ */
+struct ipahal_imm_cmd_ip_v6_ct_init {
+ struct ipahal_imm_cmd_nat_ipv6ct_init_common table_init;
};
/*
* struct ipahal_imm_cmd_ip_v4_routing_init - IP_V4_ROUTING_INIT cmd payload
* Inits IPv4 routing table/structure - with the rules and other related params
* @hash_rules_addr: Addr in sys mem where ipv4 hashable rt tbl starts
+ * @nhash_rules_addr: Addr in sys mem where ipv4 non-hashable rt tbl starts
* @hash_rules_size: Size in bytes of the hashable tbl to cpy to local mem
* @hash_local_addr: Addr in shared mem where ipv4 hashable rt tbl should
* be copied to
- * @nhash_rules_addr: Addr in sys mem where ipv4 non-hashable rt tbl starts
* @nhash_rules_size: Size in bytes of the non-hashable tbl to cpy to local mem
* @nhash_local_addr: Addr in shared mem where ipv4 non-hashable rt tbl should
* be copied to
*/
struct ipahal_imm_cmd_ip_v4_routing_init {
u64 hash_rules_addr;
+ u64 nhash_rules_addr;
u32 hash_rules_size;
u32 hash_local_addr;
- u64 nhash_rules_addr;
u32 nhash_rules_size;
u32 nhash_local_addr;
};
@@ -148,19 +168,19 @@ struct ipahal_imm_cmd_ip_v4_routing_init {
* struct ipahal_imm_cmd_ip_v6_routing_init - IP_V6_ROUTING_INIT cmd payload
* Inits IPv6 routing table/structure - with the rules and other related params
* @hash_rules_addr: Addr in sys mem where ipv6 hashable rt tbl starts
+ * @nhash_rules_addr: Addr in sys mem where ipv6 non-hashable rt tbl starts
* @hash_rules_size: Size in bytes of the hashable tbl to cpy to local mem
* @hash_local_addr: Addr in shared mem where ipv6 hashable rt tbl should
* be copied to
- * @nhash_rules_addr: Addr in sys mem where ipv6 non-hashable rt tbl starts
* @nhash_rules_size: Size in bytes of the non-hashable tbl to cpy to local mem
* @nhash_local_addr: Addr in shared mem where ipv6 non-hashable rt tbl should
* be copied to
*/
struct ipahal_imm_cmd_ip_v6_routing_init {
u64 hash_rules_addr;
+ u64 nhash_rules_addr;
u32 hash_rules_size;
u32 hash_local_addr;
- u64 nhash_rules_addr;
u32 nhash_rules_size;
u32 nhash_local_addr;
};
@@ -189,36 +209,20 @@ struct ipahal_imm_cmd_hdr_init_system {
};
/*
- * struct ipahal_imm_cmd_nat_dma - NAT_DMA cmd payload
- * Perform DMA operation on NAT related mem addressess. Copy data into
- * different locations within NAT associated tbls. (For add/remove NAT rules)
- * @table_index: NAT tbl index. Defines the NAT tbl on which to perform DMA op.
- * @base_addr: Base addr to which the DMA operation should be performed.
- * @offset: offset in bytes from base addr to write 'data' to
- * @data: data to be written
- */
-struct ipahal_imm_cmd_nat_dma {
- u8 table_index;
- u8 base_addr;
- u32 offset;
- u16 data;
-};
-
-/*
* struct ipahal_imm_cmd_table_dma - TABLE_DMA cmd payload
* Perform DMA operation on NAT and IPV6 connection tracking related mem
- * addresses. Copy data into different locations within IPV6CT and NAT
+ * addresses. Copy data into different locations within IPv6CT and NAT
* associated tbls. (For add/remove NAT rules)
- * @table_index: NAT tbl index. Defines the tbl on which to perform DMA op.
- * @base_addr: Base addr to which the DMA operation should be performed.
* @offset: offset in bytes from base addr to write 'data' to
* @data: data to be written
+ * @table_index: NAT tbl index. Defines the tbl on which to perform DMA op.
+ * @base_addr: Base addr to which the DMA operation should be performed.
*/
struct ipahal_imm_cmd_table_dma {
- u8 table_index;
- u8 base_addr;
u32 offset;
u16 data;
+ u8 table_index;
+ u8 base_addr;
};
/*
@@ -275,6 +279,7 @@ struct ipahal_imm_cmd_register_write {
/*
* struct ipahal_imm_cmd_dma_shared_mem - DMA_SHARED_MEM cmd payload
* Perform mem copy into or out of the SW area of IPA local mem
+ * @system_addr: Address in system memory
* @size: Size in bytes of data to copy. Expected size is up to 2K bytes
* @local_addr: Address in IPA local memory
* @clear_after_read: Clear local memory at the end of a read operation allows
@@ -282,16 +287,15 @@ struct ipahal_imm_cmd_register_write {
* @is_read: Read operation from local memory? If not, then write.
* @skip_pipeline_clear: if to skip pipeline clear waiting (don't wait)
* @pipeline_clear_option: options for pipeline clear waiting
- * @system_addr: Address in system memory
*/
struct ipahal_imm_cmd_dma_shared_mem {
+ u64 system_addr;
u32 size;
u32 local_addr;
bool clear_after_read;
bool is_read;
bool skip_pipeline_clear;
enum ipahal_pipeline_clear_option pipeline_clear_options;
- u64 system_addr;
};
/*
@@ -515,6 +519,7 @@ enum ipahal_pkt_status_nat_type {
* following statuses: IPA_STATUS_PACKET, IPA_STATUS_DROPPED_PACKET,
* IPA_STATUS_SUSPENDED_PACKET.
* Other statuses types has different status packet structure.
+ * @tag_info: S/W defined value provided via immediate command
* @status_opcode: The Type of the status (Opcode).
* @exception: The first exception that took place.
* In case of exception, src endp and pkt len are always valid.
@@ -522,9 +527,6 @@ enum ipahal_pkt_status_nat_type {
* and processing it may passed at IPA. See enum ipahal_pkt_status_mask
* @pkt_len: Pkt pyld len including hdr and retained hdr if used. Does
* not include padding or checksum trailer len.
- * @endp_src_idx: Source end point index.
- * @endp_dest_idx: Destination end point index.
- * Not valid in case of exception
* @metadata: meta data value used by packet
* @flt_local: Filter table location flag: Does matching flt rule belongs to
* flt tbl that resides in lcl memory? (if not, then system mem)
@@ -535,57 +537,59 @@ enum ipahal_pkt_status_nat_type {
* specifies to retain header?
* @flt_miss: Filtering miss flag: Was their a filtering rule miss?
* In case of miss, all flt info to be ignored
- * @flt_rule_id: The ID of the matching filter rule (if no miss).
- * This info can be combined with endp_src_idx to locate the exact rule.
* @rt_local: Route table location flag: Does matching rt rule belongs to
* rt tbl that resides in lcl memory? (if not, then system mem)
* @rt_hash: Route hash hit flag: Does matching rt rule was in hash tbl?
* @ucp: UC Processing flag
- * @rt_tbl_idx: Index of rt tbl that contains the rule on which was a match
* @rt_miss: Routing miss flag: Was their a routing rule miss?
- * @rt_rule_id: The ID of the matching rt rule. (if no miss). This info
- * can be combined with rt_tbl_idx to locate the exact rule.
* @nat_hit: NAT hit flag: Was their NAT hit?
- * @nat_entry_idx: Index of the NAT entry used of NAT processing
* @nat_type: Defines the type of the NAT operation:
- * @tag_info: S/W defined value provided via immediate command
- * @seq_num: Per source endp unique packet sequence number
* @time_of_day_ctr: running counter from IPA clock
* @hdr_local: Header table location flag: In header insertion, was the header
* taken from the table resides in local memory? (If no, then system mem)
- * @hdr_offset: Offset of used header in the header table
* @frag_hit: Frag hit flag: Was their frag rule hit in H/W frag table?
+ * @flt_rule_id: The ID of the matching filter rule (if no miss).
+ * This info can be combined with endp_src_idx to locate the exact rule.
+ * @rt_rule_id: The ID of the matching rt rule. (if no miss). This info
+ * can be combined with rt_tbl_idx to locate the exact rule.
+ * @nat_entry_idx: Index of the NAT entry used of NAT processing
+ * @hdr_offset: Offset of used header in the header table
+ * @endp_src_idx: Source end point index.
+ * @endp_dest_idx: Destination end point index.
+ * Not valid in case of exception
+ * @rt_tbl_idx: Index of rt tbl that contains the rule on which was a match
+ * @seq_num: Per source endp unique packet sequence number
* @frag_rule: Frag rule index in H/W frag table in case of frag hit
*/
struct ipahal_pkt_status {
+ u64 tag_info;
enum ipahal_pkt_status_opcode status_opcode;
enum ipahal_pkt_status_exception exception;
u32 status_mask;
u32 pkt_len;
- u8 endp_src_idx;
- u8 endp_dest_idx;
u32 metadata;
bool flt_local;
bool flt_hash;
bool flt_global;
bool flt_ret_hdr;
bool flt_miss;
- u16 flt_rule_id;
bool rt_local;
bool rt_hash;
bool ucp;
- u8 rt_tbl_idx;
bool rt_miss;
- u16 rt_rule_id;
bool nat_hit;
- u16 nat_entry_idx;
enum ipahal_pkt_status_nat_type nat_type;
- u64 tag_info;
- u8 seq_num;
u32 time_of_day_ctr;
bool hdr_local;
- u16 hdr_offset;
bool frag_hit;
+ u16 flt_rule_id;
+ u16 rt_rule_id;
+ u16 nat_entry_idx;
+ u16 hdr_offset;
+ u8 endp_src_idx;
+ u8 endp_dest_idx;
+ u8 rt_tbl_idx;
+ u8 seq_num;
u8 frag_rule;
};
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.c b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.c
index d6dbc85..a677046 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.c
@@ -187,17 +187,17 @@ static int ipa_fltrt_rule_generation_err_check(
if (attrib->attrib_mask & IPA_FLT_NEXT_HDR ||
attrib->attrib_mask & IPA_FLT_TC ||
attrib->attrib_mask & IPA_FLT_FLOW_LABEL) {
- IPAHAL_ERR("v6 attrib's specified for v4 rule\n");
+ IPAHAL_ERR_RL("v6 attrib's specified for v4 rule\n");
return -EPERM;
}
} else if (ipt == IPA_IP_v6) {
if (attrib->attrib_mask & IPA_FLT_TOS ||
attrib->attrib_mask & IPA_FLT_PROTOCOL) {
- IPAHAL_ERR("v4 attrib's specified for v6 rule\n");
+ IPAHAL_ERR_RL("v4 attrib's specified for v6 rule\n");
return -EPERM;
}
} else {
- IPAHAL_ERR("unsupported ip %d\n", ipt);
+ IPAHAL_ERR_RL("unsupported ip %d\n", ipt);
return -EPERM;
}
@@ -236,7 +236,7 @@ static int ipa_rt_gen_hw_rule(struct ipahal_rt_rule_gen_params *params,
break;
default:
IPAHAL_ERR("Invalid HDR type %d\n", params->hdr_type);
- WARN_ON(1);
+ WARN_ON_RATELIMIT_IPA(1);
return -EINVAL;
};
@@ -294,8 +294,8 @@ static int ipa_flt_gen_hw_rule(struct ipahal_flt_rule_gen_params *params,
rule_hdr->u.hdr.action = 0x3;
break;
default:
- IPAHAL_ERR("Invalid Rule Action %d\n", params->rule->action);
- WARN_ON(1);
+ IPAHAL_ERR_RL("Invalid Rule Action %d\n", params->rule->action);
+ WARN_ON_RATELIMIT_IPA(1);
return -EINVAL;
}
ipa_assert_on(params->rt_tbl_idx & ~0x1F);
@@ -316,14 +316,14 @@ static int ipa_flt_gen_hw_rule(struct ipahal_flt_rule_gen_params *params,
if (params->rule->eq_attrib_type) {
if (ipa_fltrt_generate_hw_rule_bdy_from_eq(
¶ms->rule->eq_attrib, &buf)) {
- IPAHAL_ERR("fail to generate hw rule from eq\n");
+ IPAHAL_ERR_RL("fail to generate hw rule from eq\n");
return -EPERM;
}
en_rule = params->rule->eq_attrib.rule_eq_bitmap;
} else {
if (ipa_fltrt_generate_hw_rule_bdy(params->ipt,
¶ms->rule->attrib, &buf, &en_rule)) {
- IPAHAL_ERR("fail to generate hw rule\n");
+ IPAHAL_ERR_RL("fail to generate hw rule\n");
return -EPERM;
}
}
@@ -343,7 +343,7 @@ static int ipa_flt_gen_hw_rule(struct ipahal_flt_rule_gen_params *params,
if (*hw_len == 0) {
*hw_len = buf - start;
} else if (*hw_len != (buf - start)) {
- IPAHAL_ERR("hw_len differs b/w passed=0x%x calc=%td\n",
+ IPAHAL_ERR_RL("hw_len differs b/w passed=0x%x calc=%td\n",
*hw_len, (buf - start));
return -EPERM;
}
@@ -376,7 +376,7 @@ static int ipa_flt_gen_hw_rule_ipav4(struct ipahal_flt_rule_gen_params *params,
break;
default:
IPAHAL_ERR("Invalid Rule Action %d\n", params->rule->action);
- WARN_ON(1);
+ WARN_ON_RATELIMIT_IPA(1);
return -EINVAL;
}
@@ -1381,7 +1381,7 @@ static int ipa_fltrt_generate_hw_rule_bdy(enum ipa_ip_type ipt,
sz = IPA3_0_HW_TBL_WIDTH * 2 + IPA3_0_HW_RULE_START_ALIGNMENT;
extra_wrd_buf = kzalloc(sz, GFP_KERNEL);
if (!extra_wrd_buf) {
- IPAHAL_ERR("failed to allocate %d bytes\n", sz);
+ IPAHAL_ERR_RL("failed to allocate %d bytes\n", sz);
rc = -ENOMEM;
goto fail_extra_alloc;
}
@@ -1389,7 +1389,7 @@ static int ipa_fltrt_generate_hw_rule_bdy(enum ipa_ip_type ipt,
sz = IPA3_0_HW_RULE_BUF_SIZE + IPA3_0_HW_RULE_START_ALIGNMENT;
rest_wrd_buf = kzalloc(sz, GFP_KERNEL);
if (!rest_wrd_buf) {
- IPAHAL_ERR("failed to allocate %d bytes\n", sz);
+ IPAHAL_ERR_RL("failed to allocate %d bytes\n", sz);
rc = -ENOMEM;
goto fail_rest_alloc;
}
@@ -1407,14 +1407,14 @@ static int ipa_fltrt_generate_hw_rule_bdy(enum ipa_ip_type ipt,
rc = ipa_fltrt_rule_generation_err_check(ipt, attrib);
if (rc) {
- IPAHAL_ERR("rule generation err check failed\n");
+ IPAHAL_ERR_RL("rule generation err check failed\n");
goto fail_err_check;
}
if (ipt == IPA_IP_v4) {
if (ipa_fltrt_generate_hw_rule_bdy_ip4(en_rule, attrib,
&extra_wrd_i, &rest_wrd_i)) {
- IPAHAL_ERR("failed to build ipv4 hw rule\n");
+ IPAHAL_ERR_RL("failed to build ipv4 hw rule\n");
rc = -EPERM;
goto fail_err_check;
}
@@ -1422,12 +1422,12 @@ static int ipa_fltrt_generate_hw_rule_bdy(enum ipa_ip_type ipt,
} else if (ipt == IPA_IP_v6) {
if (ipa_fltrt_generate_hw_rule_bdy_ip6(en_rule, attrib,
&extra_wrd_i, &rest_wrd_i)) {
- IPAHAL_ERR("failed to build ipv6 hw rule\n");
+ IPAHAL_ERR_RL("failed to build ipv6 hw rule\n");
rc = -EPERM;
goto fail_err_check;
}
} else {
- IPAHAL_ERR("unsupported ip %d\n", ipt);
+ IPAHAL_ERR_RL("unsupported ip %d\n", ipt);
goto fail_err_check;
}
@@ -1514,7 +1514,7 @@ static int ipa_fltrt_generate_hw_rule_bdy_from_eq(
* of equations that needs extra word param
*/
if (extra_bytes > 13) {
- IPAHAL_ERR("too much extra bytes\n");
+ IPAHAL_ERR_RL("too much extra bytes\n");
return -EPERM;
} else if (extra_bytes > IPA3_0_HW_TBL_HDR_WIDTH) {
/* two extra words */
@@ -2041,7 +2041,7 @@ static int ipa_flt_generate_eq_ip6(enum ipa_ip_type ip,
if (attrib->attrib_mask & IPA_FLT_SRC_ADDR) {
if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) {
- IPAHAL_ERR("ran out of meq128 eq\n");
+ IPAHAL_ERR_RL("ran out of meq128 eq\n");
return -EPERM;
}
*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
@@ -2069,7 +2069,7 @@ static int ipa_flt_generate_eq_ip6(enum ipa_ip_type ip,
if (attrib->attrib_mask & IPA_FLT_DST_ADDR) {
if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) {
- IPAHAL_ERR("ran out of meq128 eq\n");
+ IPAHAL_ERR_RL("ran out of meq128 eq\n");
return -EPERM;
}
*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
@@ -2097,7 +2097,7 @@ static int ipa_flt_generate_eq_ip6(enum ipa_ip_type ip,
if (attrib->attrib_mask & IPA_FLT_TOS_MASKED) {
if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) {
- IPAHAL_ERR("ran out of meq128 eq\n");
+ IPAHAL_ERR_RL("ran out of meq128 eq\n");
return -EPERM;
}
*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
@@ -2114,7 +2114,7 @@ static int ipa_flt_generate_eq_ip6(enum ipa_ip_type ip,
if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_ETHER_II) {
if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) {
- IPAHAL_ERR("ran out of meq128 eq\n");
+ IPAHAL_ERR_RL("ran out of meq128 eq\n");
return -EPERM;
}
*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
@@ -2130,7 +2130,7 @@ static int ipa_flt_generate_eq_ip6(enum ipa_ip_type ip,
if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_ETHER_II) {
if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) {
- IPAHAL_ERR("ran out of meq128 eq\n");
+ IPAHAL_ERR_RL("ran out of meq128 eq\n");
return -EPERM;
}
*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
@@ -2146,7 +2146,7 @@ static int ipa_flt_generate_eq_ip6(enum ipa_ip_type ip,
if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_802_3) {
if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) {
- IPAHAL_ERR("ran out of meq128 eq\n");
+ IPAHAL_ERR_RL("ran out of meq128 eq\n");
return -EPERM;
}
*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
@@ -2162,7 +2162,7 @@ static int ipa_flt_generate_eq_ip6(enum ipa_ip_type ip,
if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_802_3) {
if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) {
- IPAHAL_ERR("ran out of meq128 eq\n");
+ IPAHAL_ERR_RL("ran out of meq128 eq\n");
return -EPERM;
}
*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
@@ -2180,7 +2180,7 @@ static int ipa_flt_generate_eq_ip6(enum ipa_ip_type ip,
if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32,
ihl_ofst_meq32) || IPA_IS_RAN_OUT_OF_EQ(
ipa3_0_ihl_ofst_meq32, ihl_ofst_meq32 + 1)) {
- IPAHAL_ERR("ran out of ihl_meq32 eq\n");
+ IPAHAL_ERR_RL("ran out of ihl_meq32 eq\n");
return -EPERM;
}
*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
@@ -2213,7 +2213,7 @@ static int ipa_flt_generate_eq_ip6(enum ipa_ip_type ip,
if (attrib->attrib_mask & IPA_FLT_TCP_SYN) {
if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32,
ihl_ofst_meq32)) {
- IPAHAL_ERR("ran out of ihl_meq32 eq\n");
+ IPAHAL_ERR_RL("ran out of ihl_meq32 eq\n");
return -EPERM;
}
*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
@@ -2229,7 +2229,7 @@ static int ipa_flt_generate_eq_ip6(enum ipa_ip_type ip,
if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32,
ihl_ofst_meq32) || IPA_IS_RAN_OUT_OF_EQ(
ipa3_0_ihl_ofst_meq32, ihl_ofst_meq32 + 1)) {
- IPAHAL_ERR("ran out of ihl_meq32 eq\n");
+ IPAHAL_ERR_RL("ran out of ihl_meq32 eq\n");
return -EPERM;
}
*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
@@ -2271,7 +2271,7 @@ static int ipa_flt_generate_eq_ip6(enum ipa_ip_type ip,
if (attrib->attrib_mask & IPA_FLT_MAC_ETHER_TYPE) {
if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq32, ofst_meq32)) {
- IPAHAL_ERR("ran out of meq32 eq\n");
+ IPAHAL_ERR_RL("ran out of meq32 eq\n");
return -EPERM;
}
*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
@@ -2287,7 +2287,7 @@ static int ipa_flt_generate_eq_ip6(enum ipa_ip_type ip,
if (attrib->attrib_mask & IPA_FLT_TYPE) {
if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32,
ihl_ofst_meq32)) {
- IPAHAL_ERR("ran out of ihl_meq32 eq\n");
+ IPAHAL_ERR_RL("ran out of ihl_meq32 eq\n");
return -EPERM;
}
*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
@@ -2302,7 +2302,7 @@ static int ipa_flt_generate_eq_ip6(enum ipa_ip_type ip,
if (attrib->attrib_mask & IPA_FLT_CODE) {
if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32,
ihl_ofst_meq32)) {
- IPAHAL_ERR("ran out of ihl_meq32 eq\n");
+ IPAHAL_ERR_RL("ran out of ihl_meq32 eq\n");
return -EPERM;
}
*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
@@ -2317,7 +2317,7 @@ static int ipa_flt_generate_eq_ip6(enum ipa_ip_type ip,
if (attrib->attrib_mask & IPA_FLT_SPI) {
if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32,
ihl_ofst_meq32)) {
- IPAHAL_ERR("ran out of ihl_meq32 eq\n");
+ IPAHAL_ERR_RL("ran out of ihl_meq32 eq\n");
return -EPERM;
}
*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
@@ -2342,7 +2342,7 @@ static int ipa_flt_generate_eq_ip6(enum ipa_ip_type ip,
if (attrib->attrib_mask & IPA_FLT_SRC_PORT) {
if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_rng16,
ihl_ofst_rng16)) {
- IPAHAL_ERR("ran out of ihl_rng16 eq\n");
+ IPAHAL_ERR_RL("ran out of ihl_rng16 eq\n");
return -EPERM;
}
*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
@@ -2358,7 +2358,7 @@ static int ipa_flt_generate_eq_ip6(enum ipa_ip_type ip,
if (attrib->attrib_mask & IPA_FLT_DST_PORT) {
if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_rng16,
ihl_ofst_rng16)) {
- IPAHAL_ERR("ran out of ihl_rng16 eq\n");
+ IPAHAL_ERR_RL("ran out of ihl_rng16 eq\n");
return -EPERM;
}
*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
@@ -2374,11 +2374,11 @@ static int ipa_flt_generate_eq_ip6(enum ipa_ip_type ip,
if (attrib->attrib_mask & IPA_FLT_SRC_PORT_RANGE) {
if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_rng16,
ihl_ofst_rng16)) {
- IPAHAL_ERR("ran out of ihl_rng16 eq\n");
+ IPAHAL_ERR_RL("ran out of ihl_rng16 eq\n");
return -EPERM;
}
if (attrib->src_port_hi < attrib->src_port_lo) {
- IPAHAL_ERR("bad src port range param\n");
+ IPAHAL_ERR_RL("bad src port range param\n");
return -EPERM;
}
*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
@@ -2394,11 +2394,11 @@ static int ipa_flt_generate_eq_ip6(enum ipa_ip_type ip,
if (attrib->attrib_mask & IPA_FLT_DST_PORT_RANGE) {
if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_rng16,
ihl_ofst_rng16)) {
- IPAHAL_ERR("ran out of ihl_rng16 eq\n");
+ IPAHAL_ERR_RL("ran out of ihl_rng16 eq\n");
return -EPERM;
}
if (attrib->dst_port_hi < attrib->dst_port_lo) {
- IPAHAL_ERR("bad dst port range param\n");
+ IPAHAL_ERR_RL("bad dst port range param\n");
return -EPERM;
}
*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
@@ -2414,7 +2414,7 @@ static int ipa_flt_generate_eq_ip6(enum ipa_ip_type ip,
if (attrib->attrib_mask & IPA_FLT_TCP_SYN_L2TP) {
if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_rng16,
ihl_ofst_rng16)) {
- IPAHAL_ERR("ran out of ihl_rng16 eq\n");
+ IPAHAL_ERR_RL("ran out of ihl_rng16 eq\n");
return -EPERM;
}
*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
@@ -2713,7 +2713,7 @@ static int ipa_flt_parse_hw_rule(u8 *addr, struct ipahal_flt_rule_entry *rule)
break;
default:
IPAHAL_ERR("Invalid Rule Action %d\n", rule_hdr->u.hdr.action);
- WARN_ON(1);
+ WARN_ON_RATELIMIT_IPA(1);
rule->rule.action = rule_hdr->u.hdr.action;
}
@@ -2760,7 +2760,7 @@ static int ipa_flt_parse_hw_rule_ipav4(u8 *addr,
break;
default:
IPAHAL_ERR("Invalid Rule Action %d\n", rule_hdr->u.hdr.action);
- WARN_ON(1);
+ WARN_ON_RATELIMIT_IPA(1);
rule->rule.action = rule_hdr->u.hdr.action;
}
@@ -3221,7 +3221,7 @@ static int ipa_fltrt_alloc_init_tbl_hdr(
obj = &ipahal_fltrt_objs[ipahal_ctx->hw_type];
if (!params) {
- IPAHAL_ERR("Input error: params=%p\n", params);
+ IPAHAL_ERR_RL("Input error: params=%p\n", params);
return -EINVAL;
}
@@ -3230,7 +3230,7 @@ static int ipa_fltrt_alloc_init_tbl_hdr(
params->nhash_hdr.size,
¶ms->nhash_hdr.phys_base, GFP_KERNEL);
if (!params->nhash_hdr.base) {
- IPAHAL_ERR("fail to alloc DMA buff of size %d\n",
+ IPAHAL_ERR_RL("fail to alloc DMA buff of size %d\n",
params->nhash_hdr.size);
goto nhash_alloc_fail;
}
@@ -3241,7 +3241,7 @@ static int ipa_fltrt_alloc_init_tbl_hdr(
params->hash_hdr.size, ¶ms->hash_hdr.phys_base,
GFP_KERNEL);
if (!params->hash_hdr.base) {
- IPAHAL_ERR("fail to alloc DMA buff of size %d\n",
+ IPAHAL_ERR_RL("fail to alloc DMA buff of size %d\n",
params->hash_hdr.size);
goto hash_alloc_fail;
}
@@ -3374,21 +3374,21 @@ int ipahal_fltrt_allocate_hw_tbl_imgs(
/* Input validation */
if (!params) {
- IPAHAL_ERR("Input err: no params\n");
+ IPAHAL_ERR_RL("Input err: no params\n");
return -EINVAL;
}
if (params->ipt >= IPA_IP_MAX) {
- IPAHAL_ERR("Input err: Invalid ip type %d\n", params->ipt);
+ IPAHAL_ERR_RL("Input err: Invalid ip type %d\n", params->ipt);
return -EINVAL;
}
if (ipa_fltrt_alloc_init_tbl_hdr(params)) {
- IPAHAL_ERR("fail to alloc and init tbl hdr\n");
+ IPAHAL_ERR_RL("fail to alloc and init tbl hdr\n");
return -ENOMEM;
}
if (ipa_fltrt_alloc_lcl_bdy(params)) {
- IPAHAL_ERR("fail to alloc tbl bodies\n");
+ IPAHAL_ERR_RL("fail to alloc tbl bodies\n");
goto bdy_alloc_fail;
}
@@ -3649,12 +3649,12 @@ int ipahal_flt_generate_equation(enum ipa_ip_type ipt,
IPAHAL_DBG_LOW("Entry\n");
if (ipt >= IPA_IP_MAX) {
- IPAHAL_ERR("Input err: Invalid ip type %d\n", ipt);
+ IPAHAL_ERR_RL("Input err: Invalid ip type %d\n", ipt);
return -EINVAL;
}
if (!attrib || !eq_atrb) {
- IPAHAL_ERR("Input err: attrib=%p eq_atrb=%p\n",
+ IPAHAL_ERR_RL("Input err: attrib=%p eq_atrb=%p\n",
attrib, eq_atrb);
return -EINVAL;
}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_i.h b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_i.h
index 5eb1aef..8f78d56 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_i.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_i.h
@@ -46,6 +46,16 @@
IPAHAL_DRV_NAME " %s:%d " fmt, ## args); \
} while (0)
+#define IPAHAL_ERR_RL(fmt, args...) \
+ do { \
+ pr_err_ratelimited_ipa(IPAHAL_DRV_NAME " %s:%d " fmt, \
+ __func__, __LINE__, ## args); \
+ IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
+ IPAHAL_DRV_NAME " %s:%d " fmt, ## args); \
+ IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+ IPAHAL_DRV_NAME " %s:%d " fmt, ## args); \
+ } while (0)
+
#define IPAHAL_MEM_ALLOC(__size, __is_atomic_ctx) \
(kzalloc((__size), ((__is_atomic_ctx) ? GFP_ATOMIC : GFP_KERNEL)))
@@ -125,10 +135,10 @@ struct ipa_imm_cmd_hw_ip_v6_filter_init {
* struct ipa_imm_cmd_hw_ip_v4_nat_init - IP_V4_NAT_INIT command payload
* in H/W format.
* Inits IPv4 NAT block. Initiate NAT table with it dimensions, location
- * cache address abd itger related parameters.
+ * cache address and other related parameters.
* @ipv4_rules_addr: Addr in sys/shared mem where ipv4 NAT rules start
- * @ipv4_expansion_rules_addr: Addr in sys/shared mem where expantion NAT
- * table starts. IPv4 NAT rules that result in NAT collision are located
+ * @ipv4_expansion_rules_addr: Addr in sys/shared mem where expansion NAT
+ * table starts. IPv4 NAT rules that result in hash collision are located
* in this table.
* @index_table_addr: Addr in sys/shared mem where index table, which points
* to NAT table starts
@@ -143,11 +153,12 @@ struct ipa_imm_cmd_hw_ip_v6_filter_init {
* @index_table_expansion_addr_type: index_table_expansion_addr in
* sys or shared mem
* @size_base_tables: Num of entries in NAT tbl and idx tbl (each)
- * @size_expansion_tables: Num of entries in NAT expantion tbl and expantion
+ * @size_expansion_tables: Num of entries in NAT expansion tbl and expansion
* idx tbl (each)
* @rsvd2: reserved
- * @public_ip_addr: public IP address. for IPAv4 this is the PDN config table
- * offset in SMEM
+ * @public_addr_info: Public IP addresses info suitable to the IPA H/W version
+ * IPA H/W >= 4.0 - PDN config table offset in SMEM
+ * IPA H/W < 4.0 - The public IP address
*/
struct ipa_imm_cmd_hw_ip_v4_nat_init {
u64 ipv4_rules_addr:64;
@@ -163,7 +174,38 @@ struct ipa_imm_cmd_hw_ip_v4_nat_init {
u64 size_base_tables:12;
u64 size_expansion_tables:10;
u64 rsvd2:2;
- u64 public_ip_addr:32;
+ u64 public_addr_info:32;
+};
+
+/*
+ * struct ipa_imm_cmd_hw_ip_v6_ct_init - IP_V6_CONN_TRACK_INIT command payload
+ * in H/W format.
+ * Inits IPv6CT block. Initiate IPv6CT table with it dimensions, location
+ * cache address and other related parameters.
+ * @table_addr: Address in sys/shared mem where IPv6CT rules start
+ * @expansion_table_addr: Address in sys/shared mem where IPv6CT expansion
+ * table starts. IPv6CT rules that result in hash collision are located
+ * in this table.
+ * @table_index: For future support of multiple IPv6CT tables
+ * @rsvd1: reserved
+ * @table_addr_type: table_addr in sys or shared mem
+ * @expansion_table_addr_type: expansion_table_addr in sys or shared mem
+ * @rsvd2: reserved
+ * @size_base_tables: Number of entries in IPv6CT table
+ * @size_expansion_tables: Number of entries in IPv6CT expansion table
+ * @rsvd3: reserved
+ */
+struct ipa_imm_cmd_hw_ip_v6_ct_init {
+ u64 table_addr:64;
+ u64 expansion_table_addr:64;
+ u64 table_index:3;
+ u64 rsvd1:1;
+ u64 table_addr_type:1;
+ u64 expansion_table_addr_type:1;
+ u64 rsvd2:2;
+ u64 size_base_table:12;
+ u64 size_expansion_table:10;
+ u64 rsvd3:34;
};
/*
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_nat.c b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_nat.c
new file mode 100644
index 0000000..d335ba6
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_nat.c
@@ -0,0 +1,360 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/debugfs.h>
+#include "ipahal_nat.h"
+#include "ipahal_nat_i.h"
+#include "ipahal_i.h"
+
+#define IPA_64_LOW_32_MASK (0xFFFFFFFF)
+#define IPA_64_HIGH_32_MASK (0xFFFFFFFF00000000ULL)
+
+static const char *ipahal_nat_type_to_str[IPA_NAT_MAX] = {
+ __stringify(IPAHAL_NAT_IPV4),
+ __stringify(IPAHAL_NAT_IPV4_INDEX),
+ __stringify(IPAHAL_NAT_IPV4_PDN),
+ __stringify(IPAHAL_NAT_IPV6CT)
+};
+
+static size_t ipa_nat_ipv4_entry_size_v_3_0(void)
+{
+ return sizeof(struct ipa_nat_hw_ipv4_entry);
+}
+
+static size_t ipa_nat_ipv4_index_entry_size_v_3_0(void)
+{
+ return sizeof(struct ipa_nat_hw_indx_entry);
+}
+
+static size_t ipa_nat_ipv4_pdn_entry_size_v_4_0(void)
+{
+ return sizeof(struct ipa_nat_hw_pdn_entry);
+}
+
+static size_t ipa_nat_ipv6ct_entry_size_v_4_0(void)
+{
+ return sizeof(struct ipa_nat_hw_ipv6ct_entry);
+}
+
+static bool ipa_nat_ipv4_is_entry_zeroed_v_3_0(const void *entry)
+{
+ struct ipa_nat_hw_ipv4_entry zero_entry = { 0 };
+
+ return (memcmp(&zero_entry, entry, sizeof(zero_entry))) ? false : true;
+}
+
+static bool ipa_nat_ipv4_is_index_entry_zeroed_v_3_0(const void *entry)
+{
+ struct ipa_nat_hw_indx_entry zero_entry = { 0 };
+
+ return (memcmp(&zero_entry, entry, sizeof(zero_entry))) ? false : true;
+}
+
+static bool ipa_nat_ipv4_is_pdn_entry_zeroed_v_4_0(const void *entry)
+{
+ struct ipa_nat_hw_pdn_entry zero_entry = { 0 };
+
+ return (memcmp(&zero_entry, entry, sizeof(zero_entry))) ? false : true;
+}
+
+static bool ipa_nat_ipv6ct_is_entry_zeroed_v_4_0(const void *entry)
+{
+ struct ipa_nat_hw_ipv6ct_entry zero_entry = { 0 };
+
+ return (memcmp(&zero_entry, entry, sizeof(zero_entry))) ? false : true;
+}
+
+static int ipa_nat_ipv4_stringify_entry_v_3_0(const void *entry,
+ char *buff, size_t buff_size)
+{
+ const struct ipa_nat_hw_ipv4_entry *nat_entry =
+ (const struct ipa_nat_hw_ipv4_entry *)entry;
+
+ return scnprintf(buff, buff_size,
+ "\t\tPrivate_IP=%pI4h Target_IP=%pI4h\n"
+ "\t\tNext_Index=%d Public_Port=%d\n"
+ "\t\tPrivate_Port=%d Target_Port=%d\n"
+ "\t\tIP_CKSM_delta=0x%x Enable=%s Redirect=%s\n"
+ "\t\tTime_stamp=0x%x Proto=%d\n"
+ "\t\tPrev_Index=%d Indx_tbl_entry=%d\n"
+ "\t\tTCP_UDP_cksum_delta=0x%x\n",
+ &nat_entry->private_ip, &nat_entry->target_ip,
+ nat_entry->next_index, nat_entry->public_port,
+ nat_entry->private_port, nat_entry->target_port,
+ nat_entry->ip_chksum,
+ (nat_entry->enable) ? "true" : "false",
+ (nat_entry->redirect) ? "Direct_To_APPS" : "Fwd_to_route",
+ nat_entry->time_stamp, nat_entry->protocol,
+ nat_entry->prev_index, nat_entry->indx_tbl_entry,
+ nat_entry->tcp_udp_chksum);
+}
+
+static int ipa_nat_ipv4_stringify_entry_v_4_0(const void *entry,
+ char *buff, size_t buff_size)
+{
+ int length;
+ const struct ipa_nat_hw_ipv4_entry *nat_entry =
+ (const struct ipa_nat_hw_ipv4_entry *)entry;
+
+ length = ipa_nat_ipv4_stringify_entry_v_3_0(entry, buff, buff_size);
+
+ length += scnprintf(buff + length, buff_size - length,
+ "\t\tPDN_Index=%d\n", nat_entry->pdn_index);
+
+ return length;
+}
+
+static int ipa_nat_ipv4_index_stringify_entry_v_3_0(const void *entry,
+ char *buff, size_t buff_size)
+{
+ const struct ipa_nat_hw_indx_entry *index_entry =
+ (const struct ipa_nat_hw_indx_entry *)entry;
+
+ return scnprintf(buff, buff_size,
+ "\t\tTable_Entry=%d Next_Index=%d\n",
+ index_entry->tbl_entry, index_entry->next_index);
+}
+
+static int ipa_nat_ipv4_pdn_stringify_entry_v_4_0(const void *entry,
+ char *buff, size_t buff_size)
+{
+ const struct ipa_nat_hw_pdn_entry *pdn_entry =
+ (const struct ipa_nat_hw_pdn_entry *)entry;
+
+ return scnprintf(buff, buff_size,
+ "ip=%pI4h src_metadata=0x%X, dst_metadata=0x%X\n",
+ &pdn_entry->public_ip,
+ pdn_entry->src_metadata, pdn_entry->dst_metadata);
+}
+
+static inline int ipa_nat_ipv6_stringify_addr(char *buff, size_t buff_size,
+ const char *msg, u64 lsb, u64 msb)
+{
+ struct in6_addr addr;
+
+ addr.s6_addr32[0] = cpu_to_be32((msb & IPA_64_HIGH_32_MASK) >> 32);
+ addr.s6_addr32[1] = cpu_to_be32(msb & IPA_64_LOW_32_MASK);
+ addr.s6_addr32[2] = cpu_to_be32((lsb & IPA_64_HIGH_32_MASK) >> 32);
+ addr.s6_addr32[3] = cpu_to_be32(lsb & IPA_64_LOW_32_MASK);
+
+ return scnprintf(buff, buff_size,
+ "\t\t%s_IPv6_Addr=%pI6c\n", msg, &addr);
+}
+
+static int ipa_nat_ipv6ct_stringify_entry_v_4_0(const void *entry,
+ char *buff, size_t buff_size)
+{
+ int length = 0;
+ const struct ipa_nat_hw_ipv6ct_entry *ipv6ct_entry =
+ (const struct ipa_nat_hw_ipv6ct_entry *)entry;
+
+ length += ipa_nat_ipv6_stringify_addr(
+ buff + length,
+ buff_size - length,
+ "Src",
+ ipv6ct_entry->src_ipv6_lsb,
+ ipv6ct_entry->src_ipv6_msb);
+
+ length += ipa_nat_ipv6_stringify_addr(
+ buff + length,
+ buff_size - length,
+ "Dest",
+ ipv6ct_entry->dest_ipv6_lsb,
+ ipv6ct_entry->dest_ipv6_msb);
+
+ length += scnprintf(buff + length, buff_size - length,
+ "\t\tEnable=%s Redirect=%s Time_Stamp=0x%x Proto=%d\n"
+ "\t\tNext_Index=%d Dest_Port=%d Src_Port=%d\n"
+ "\t\tDirection Settings: Out=%s In=%s\n"
+ "\t\tPrev_Index=%d\n",
+ (ipv6ct_entry->enable) ? "true" : "false",
+ (ipv6ct_entry->redirect) ? "Direct_To_APPS" : "Fwd_to_route",
+ ipv6ct_entry->time_stamp,
+ ipv6ct_entry->protocol,
+ ipv6ct_entry->next_index,
+ ipv6ct_entry->dest_port,
+ ipv6ct_entry->src_port,
+ (ipv6ct_entry->out_allowed) ? "Allow" : "Deny",
+ (ipv6ct_entry->in_allowed) ? "Allow" : "Deny",
+ ipv6ct_entry->prev_index);
+
+ return length;
+}
+
+/*
+ * struct ipahal_nat_obj - H/W information for specific IPA version
+ * @entry_size - CB to get the size of the entry
+ * @is_entry_zeroed - CB to determine whether an entry is definitely zero
+ * @stringify_entry - CB to create string that represents an entry
+ */
+struct ipahal_nat_obj {
+ size_t (*entry_size)(void);
+ bool (*is_entry_zeroed)(const void *entry);
+ int (*stringify_entry)(const void *entry, char *buff, size_t buff_size);
+};
+
+/*
+ * This table contains the info regard each NAT type for IPAv3 and later.
+ * Information like: get entry size and stringify entry functions.
+ * All the information on all the NAT types on IPAv3 are statically
+ * defined below. If information is missing regard some NAT type on some
+ * IPA version, the init function will fill it with the information from the
+ * previous IPA version.
+ * Information is considered missing if all of the fields are 0
+ */
+static struct ipahal_nat_obj ipahal_nat_objs[IPA_HW_MAX][IPA_NAT_MAX] = {
+ /* IPAv3 */
+ [IPA_HW_v3_0][IPAHAL_NAT_IPV4] = {
+ ipa_nat_ipv4_entry_size_v_3_0,
+ ipa_nat_ipv4_is_entry_zeroed_v_3_0,
+ ipa_nat_ipv4_stringify_entry_v_3_0
+ },
+ [IPA_HW_v3_0][IPAHAL_NAT_IPV4_INDEX] = {
+ ipa_nat_ipv4_index_entry_size_v_3_0,
+ ipa_nat_ipv4_is_index_entry_zeroed_v_3_0,
+ ipa_nat_ipv4_index_stringify_entry_v_3_0
+ },
+
+ /* IPAv4 */
+ [IPA_HW_v4_0][IPAHAL_NAT_IPV4] = {
+ ipa_nat_ipv4_entry_size_v_3_0,
+ ipa_nat_ipv4_is_entry_zeroed_v_3_0,
+ ipa_nat_ipv4_stringify_entry_v_4_0
+ },
+ [IPA_HW_v4_0][IPAHAL_NAT_IPV4_PDN] = {
+ ipa_nat_ipv4_pdn_entry_size_v_4_0,
+ ipa_nat_ipv4_is_pdn_entry_zeroed_v_4_0,
+ ipa_nat_ipv4_pdn_stringify_entry_v_4_0
+ },
+ [IPA_HW_v4_0][IPAHAL_NAT_IPV6CT] = {
+ ipa_nat_ipv6ct_entry_size_v_4_0,
+ ipa_nat_ipv6ct_is_entry_zeroed_v_4_0,
+ ipa_nat_ipv6ct_stringify_entry_v_4_0
+ }
+};
+
+static void ipahal_nat_check_obj(struct ipahal_nat_obj *obj,
+ int nat_type, int ver)
+{
+ WARN(obj->entry_size == NULL, "%s missing entry_size for version %d\n",
+ ipahal_nat_type_str(nat_type), ver);
+ WARN(obj->is_entry_zeroed == NULL,
+ "%s missing is_entry_zeroed for version %d\n",
+ ipahal_nat_type_str(nat_type), ver);
+ WARN(obj->stringify_entry == NULL,
+ "%s missing stringify_entry for version %d\n",
+ ipahal_nat_type_str(nat_type), ver);
+}
+
+/*
+ * ipahal_nat_init() - Build the NAT information table
+ * See ipahal_nat_objs[][] comments
+ */
+int ipahal_nat_init(enum ipa_hw_type ipa_hw_type)
+{
+ int i;
+ int j;
+ struct ipahal_nat_obj zero_obj, *next_obj;
+
+ IPAHAL_DBG("Entry - HW_TYPE=%d\n", ipa_hw_type);
+
+ memset(&zero_obj, 0, sizeof(zero_obj));
+
+ if ((ipa_hw_type < 0) || (ipa_hw_type >= IPA_HW_MAX)) {
+ IPAHAL_ERR("invalid IPA HW type (%d)\n", ipa_hw_type);
+ return -EINVAL;
+ }
+
+ for (i = IPA_HW_v3_0 ; i < ipa_hw_type ; ++i) {
+ for (j = 0; j < IPA_NAT_MAX; ++j) {
+ next_obj = &ipahal_nat_objs[i + 1][j];
+ if (!memcmp(next_obj, &zero_obj, sizeof(*next_obj))) {
+ memcpy(next_obj, &ipahal_nat_objs[i][j],
+ sizeof(*next_obj));
+ } else {
+ ipahal_nat_check_obj(next_obj, j, i + 1);
+ }
+ }
+ }
+
+ return 0;
+}
+
+const char *ipahal_nat_type_str(enum ipahal_nat_type nat_type)
+{
+ if (nat_type < 0 || nat_type >= IPA_NAT_MAX) {
+ IPAHAL_ERR("requested NAT type %d is invalid\n", nat_type);
+ return "Invalid NAT type";
+ }
+
+ return ipahal_nat_type_to_str[nat_type];
+}
+
+int ipahal_nat_entry_size(enum ipahal_nat_type nat_type, size_t *entry_size)
+{
+ if (WARN(entry_size == NULL, "entry_size is NULL\n"))
+ return -EINVAL;
+ if (WARN(nat_type < 0 || nat_type >= IPA_NAT_MAX,
+ "requested NAT type %d is invalid\n", nat_type))
+ return -EINVAL;
+
+ IPAHAL_DBG("Get the entry size for NAT type=%s\n",
+ ipahal_nat_type_str(nat_type));
+ *entry_size = ipahal_nat_objs[ipahal_ctx->hw_type][nat_type].
+ entry_size();
+ IPAHAL_DBG("The entry size is %zu\n", *entry_size);
+
+ return 0;
+}
+
+int ipahal_nat_is_entry_zeroed(enum ipahal_nat_type nat_type, void *entry,
+ bool *entry_zeroed)
+{
+ if (WARN(entry == NULL || entry_zeroed == NULL,
+ "NULL pointer received\n"))
+ return -EINVAL;
+ if (WARN(nat_type < 0 || nat_type >= IPA_NAT_MAX,
+ "requested NAT type %d is invalid\n", nat_type))
+ return -EINVAL;
+
+ IPAHAL_DBG("Determine whether the entry is zeroed for NAT type=%s\n",
+ ipahal_nat_type_str(nat_type));
+ *entry_zeroed = ipahal_nat_objs[ipahal_ctx->hw_type][nat_type].
+ is_entry_zeroed(entry);
+ IPAHAL_DBG("The entry is %szeroed\n", (*entry_zeroed) ? "" : "not ");
+
+ return 0;
+}
+
+int ipahal_nat_stringify_entry(enum ipahal_nat_type nat_type, void *entry,
+ char *buff, size_t buff_size)
+{
+ int result;
+
+ if (WARN(entry == NULL || buff == NULL, "NULL pointer received\n"))
+ return -EINVAL;
+ if (WARN(!buff_size, "The output buff size is zero\n"))
+ return -EINVAL;
+ if (WARN(nat_type < 0 || nat_type >= IPA_NAT_MAX,
+ "requested NAT type %d is invalid\n", nat_type))
+ return -EINVAL;
+
+ IPAHAL_DBG("Create the string for the entry of NAT type=%s\n",
+ ipahal_nat_type_str(nat_type));
+ result = ipahal_nat_objs[ipahal_ctx->hw_type][nat_type].
+ stringify_entry(entry, buff, buff_size);
+ IPAHAL_DBG("The string successfully created with length %d\n",
+ result);
+
+ return result;
+}
+
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_nat.h b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_nat.h
new file mode 100644
index 0000000..f99c1a0
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_nat.h
@@ -0,0 +1,67 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _IPAHAL_NAT_H_
+#define _IPAHAL_NAT_H_
+
+/*
+ * NAT types
+ *
+ * NOTE:: Any change to this enum, need to change to ipahal_nat_to_str
+ * array as well.
+ */
+enum ipahal_nat_type {
+ IPAHAL_NAT_IPV4,
+ IPAHAL_NAT_IPV4_INDEX,
+ IPAHAL_NAT_IPV4_PDN,
+ IPAHAL_NAT_IPV6CT,
+ IPA_NAT_MAX
+};
+
+/* NAT Function APIs */
+
+/*
+ * ipahal_nat_type_str() - returns string that represent the NAT type
+ * @nat_type: [in] NAT type
+ */
+const char *ipahal_nat_type_str(enum ipahal_nat_type nat_type);
+
+/*
+ * ipahal_nat_entry_size() - Gets the size of HW NAT entry
+ * @nat_type: [in] The type of the NAT entry
+ * @entry_size: [out] The size of the HW NAT entry
+ */
+int ipahal_nat_entry_size(enum ipahal_nat_type nat_type, size_t *entry_size);
+
+/*
+ * ipahal_nat_is_entry_zeroed() - Determines whether HW NAT entry is
+ * definitely zero
+ * @nat_type: [in] The type of the NAT entry
+ * @entry: [in] The NAT entry
+ * @entry_zeroed: [out] True if the received entry is definitely zero
+ */
+int ipahal_nat_is_entry_zeroed(enum ipahal_nat_type nat_type, void *entry,
+ bool *entry_zeroed);
+
+/*
+ * ipahal_nat_stringify_entry() - Creates a string for HW NAT entry
+ * @nat_type: [in] The type of the NAT entry
+ * @entry: [in] The NAT entry
+ * @buff: [out] Output buffer for the result string
+ * @buff_size: [in] The size of the output buffer
+ * @return the number of characters written into buff not including
+ * the trailing '\0'
+ */
+int ipahal_nat_stringify_entry(enum ipahal_nat_type nat_type, void *entry,
+ char *buff, size_t buff_size);
+
+#endif /* _IPAHAL_NAT_H_ */
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_nat_i.h b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_nat_i.h
new file mode 100644
index 0000000..83bd0f5
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_nat_i.h
@@ -0,0 +1,153 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _IPAHAL_NAT_I_H_
+#define _IPAHAL_NAT_I_H_
+
+#include <linux/msm_ipa.h>
+
+/* ----------------------- IPv4 NAT Table Entry -------------------------
+ *
+ * -----------------------------------------------------------------------
+ * | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0 |
+ * -----------------------------------------------------------------------
+ * | Target IP(4B) | Private IP(4B) |
+ * -----------------------------------------------------------------------
+ * |Target Port(2B) |Private Port(2B)| Public Port(2B) | Next Index(2B) |
+ * -----------------------------------------------------------------------
+ * |Proto| TimeStamp(3B) | Flags(2B) |IP check sum Diff|
+ * |(1B) | |EN|Redirect|Resv | (2B) |
+ * -----------------------------------------------------------------------
+ * |TCP/UDP checksum| PDN info(2B) | SW Specific Parameters(4B) |
+ * | diff (2B) |Info|Resv |index table entry| prev index |
+ * -----------------------------------------------------------------------
+ */
+struct ipa_nat_hw_ipv4_entry {
+ /* An IP address can't be bit-field, because its address is used */
+ u32 private_ip;
+ u32 target_ip;
+
+ u32 next_index : 16;
+ u32 public_port : 16;
+ u32 private_port : 16;
+ u32 target_port : 16;
+ u32 ip_chksum : 16;
+
+ u32 rsvd1 : 14;
+ u32 redirect : 1;
+ u32 enable : 1;
+
+ u32 time_stamp : 24;
+ u32 protocol : 8;
+
+ u32 prev_index : 16;
+ u32 indx_tbl_entry : 16;
+
+ u32 rsvd2 : 12;
+ u32 pdn_index : 4; /* IPA 4.0 and greater */
+
+ u32 tcp_udp_chksum : 16;
+};
+
+/*--- IPV4 NAT Index Table Entry --
+ *---------------------------------
+ *| 3 | 2 | 1 | 0 |
+ *---------------------------------
+ *|next index(2B) |table entry(2B)|
+ *---------------------------------
+ */
+struct ipa_nat_hw_indx_entry {
+ u16 tbl_entry;
+ u16 next_index;
+};
+
+/**
+ * struct ipa_nat_hw_pdn_entry - IPA PDN config table entry
+ * @public_ip: the PDN's public ip
+ * @src_metadata: the PDN's metadata to be replaced for source NAT
+ * @dst_metadata: the PDN's metadata to be replaced for destination NAT
+ * @resrvd: reserved field
+ * ---------------------------------
+ * | 3 | 2 | 1 | 0 |
+ * ---------------------------------
+ * | public_ip (4B) |
+ * ---------------------------------
+ * | src_metadata (4B) |
+ * ---------------------------------
+ * | dst_metadata (4B) |
+ * ---------------------------------
+ * | resrvd (4B) |
+ * ---------------------------------
+ */
+struct ipa_nat_hw_pdn_entry {
+ u32 public_ip;
+ u32 src_metadata;
+ u32 dst_metadata;
+ u32 resrvd;
+};
+
+/*------------------------- IPV6CT Table Entry ------------------------------
+ *-----------------------------------------------------------------------------
+ *| 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0 |
+ *-----------------------------------------------------------------------------
+ *| Outbound Src IPv6 Address (8 LSB Bytes) |
+ *-----------------------------------------------------------------------------
+ *| Outbound Src IPv6 Address (8 MSB Bytes) |
+ *-----------------------------------------------------------------------------
+ *| Outbound Dest IPv6 Address (8 LSB Bytes) |
+ *-----------------------------------------------------------------------------
+ *| Outbound Dest IPv6 Address (8 MSB Bytes) |
+ *-----------------------------------------------------------------------------
+ *|Protocol| TimeStamp (3B) | Flags (2B) |Reserved (2B) |
+ *| (1B) | |Enable|Redirect|Resv | |
+ *-----------------------------------------------------------------------------
+ *|Reserved|Direction(1B)|Src Port(2B)| Dest Port (2B) |Next Index(2B)|
+ *| (1B) |IN|OUT|Resv | | | |
+ *-----------------------------------------------------------------------------
+ *| SW Specific Parameters(4B) | Reserved (4B) |
+ *| Prev Index (2B) |Reserved(2B)| |
+ *-----------------------------------------------------------------------------
+ *| Reserved (8B) |
+ *-----------------------------------------------------------------------------
+ */
+struct ipa_nat_hw_ipv6ct_entry {
+ /* An IP address can't be bit-field, because its address is used */
+ u64 src_ipv6_lsb;
+ u64 src_ipv6_msb;
+ u64 dest_ipv6_lsb;
+ u64 dest_ipv6_msb;
+
+ u64 rsvd1 : 30;
+ u64 redirect : 1;
+ u64 enable : 1;
+
+ u64 time_stamp : 24;
+ u64 protocol : 8;
+
+ u64 next_index : 16;
+ u64 dest_port : 16;
+ u64 src_port : 16;
+ u64 rsvd2 : 6;
+ u64 out_allowed : 1;
+ u64 in_allowed : 1;
+ u64 rsvd3 : 8;
+
+ u64 rsvd4 : 48;
+ u64 prev_index : 16;
+
+ u64 rsvd5 : 64;
+};
+
+int ipahal_nat_init(enum ipa_hw_type ipa_hw_type);
+
+#endif /* _IPAHAL_NAT_I_H_ */
+
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c
index 74f5bbd..1d8eb13 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c
@@ -1910,6 +1910,8 @@ void ipahal_get_aggr_force_close_valmask(int ep_idx,
return;
}
+ memset(valmask, 0, sizeof(struct ipahal_reg_valmask));
+
if (ipahal_ctx->hw_type <= IPA_HW_v3_1) {
shft = IPA_AGGR_FORCE_CLOSE_AGGR_FORCE_CLOSE_PIPE_BITMAP_SHFT;
bmsk = IPA_AGGR_FORCE_CLOSE_AGGR_FORCE_CLOSE_PIPE_BITMAP_BMSK;
diff --git a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
index e93210d..66d4b10 100644
--- a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
@@ -68,6 +68,9 @@
#define IPA_WWAN_CONS_DESC_FIFO_SZ 256
+static void rmnet_ipa_free_msg(void *buff, u32 len, u32 type);
+static void rmnet_ipa_get_stats_and_update(void);
+
static int ipa3_wwan_add_ul_flt_rule_to_ipa(void);
static int ipa3_wwan_del_ul_flt_rule_to_ipa(void);
static void ipa3_wwan_msg_free_cb(void*, u32, u32);
@@ -145,6 +148,10 @@ struct rmnet_ipa3_context {
u32 pm_hdl;
u32 q6_pm_hdl;
u32 q6_teth_pm_hdl;
+ struct mutex per_client_stats_guard;
+ struct ipa_tether_device_info
+ tether_device
+ [IPACM_MAX_CLIENT_DEVICE_TYPES];
};
static struct rmnet_ipa3_context *rmnet_ipa3_ctx;
@@ -1189,7 +1196,11 @@ static int ipa3_wwan_xmit(struct sk_buff *skb, struct net_device *dev)
static void ipa3_wwan_tx_timeout(struct net_device *dev)
{
- IPAWANERR("[%s] ipa3_wwan_tx_timeout(), data stall in UL\n", dev->name);
+ struct ipa3_wwan_private *wwan_ptr = netdev_priv(dev);
+
+ if (atomic_read(&wwan_ptr->outstanding_pkts) != 0)
+ IPAWANERR("[%s] data stall in UL, %d outstanding\n",
+ dev->name, atomic_read(&wwan_ptr->outstanding_pkts));
}
/**
@@ -1946,12 +1957,6 @@ int ipa3_wwan_set_modem_perf_profile(int throughput)
struct ipa_rm_perf_profile profile;
int ret;
- ret = ipa_pm_set_perf_profile(rmnet_ipa3_ctx->q6_pm_hdl, throughput);
- if (ret)
- return ret;
- return ipa_pm_set_perf_profile(rmnet_ipa3_ctx->q6_teth_pm_hdl,
- throughput);
-
if (ipa3_ctx->use_ipa_pm) {
ret = ipa_pm_set_perf_profile(rmnet_ipa3_ctx->q6_pm_hdl,
throughput);
@@ -2741,9 +2746,11 @@ static void rmnet_ipa_free_msg(void *buff, u32 len, u32 type)
}
if (type != IPA_TETHERING_STATS_UPDATE_STATS &&
- type != IPA_TETHERING_STATS_UPDATE_NETWORK_STATS) {
+ type != IPA_TETHERING_STATS_UPDATE_NETWORK_STATS &&
+ type != IPA_PER_CLIENT_STATS_CONNECT_EVENT &&
+ type != IPA_PER_CLIENT_STATS_DISCONNECT_EVENT) {
IPAWANERR("Wrong type given. buff %p type %d\n",
- buff, type);
+ buff, type);
}
kfree(buff);
}
@@ -3501,8 +3508,488 @@ void ipa3_q6_handshake_complete(bool ssr_bootup)
}
}
+static inline bool rmnet_ipa3_check_any_client_inited
+(
+ enum ipacm_per_client_device_type device_type
+)
+{
+ int i = 0;
+
+ for (; i < IPA_MAX_NUM_HW_PATH_CLIENTS; i++) {
+ if (rmnet_ipa3_ctx->tether_device[device_type].
+ lan_client[i].client_idx != -1 &&
+ rmnet_ipa3_ctx->tether_device[device_type].
+ lan_client[i].inited) {
+ IPAWANERR("Found client index: %d which is inited\n",
+ i);
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static inline int rmnet_ipa3_get_lan_client_info
+(
+ enum ipacm_per_client_device_type device_type,
+ uint8_t mac[]
+)
+{
+ int i = 0;
+
+ IPAWANDBG("Client MAC %02x:%02x:%02x:%02x:%02x:%02x\n",
+ mac[0], mac[1], mac[2],
+ mac[3], mac[4], mac[5]);
+
+ for (; i < IPA_MAX_NUM_HW_PATH_CLIENTS; i++) {
+ if (memcmp(
+ rmnet_ipa3_ctx->tether_device[device_type].
+ lan_client[i].mac,
+ mac,
+ IPA_MAC_ADDR_SIZE) == 0) {
+ IPAWANDBG("Matched client index: %d\n", i);
+ return i;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static inline int rmnet_ipa3_delete_lan_client_info
+(
+ enum ipacm_per_client_device_type device_type,
+ int lan_clnt_idx
+)
+{
+ struct ipa_lan_client *lan_client = NULL;
+ int i;
+
+ /* Check if the request is to clean up all clients. */
+ if (lan_clnt_idx == 0xffffffff) {
+ /* Reset the complete device info. */
+ memset(&rmnet_ipa3_ctx->tether_device[device_type], 0,
+ sizeof(struct ipa_tether_device_info));
+ rmnet_ipa3_ctx->tether_device[device_type].ul_src_pipe = -1;
+ for (i = 0; i < IPA_MAX_NUM_HW_PATH_CLIENTS; i++)
+ rmnet_ipa3_ctx->tether_device[device_type].
+ lan_client[i].client_idx = -1;
+ } else {
+ lan_client =
+ &rmnet_ipa3_ctx->tether_device[device_type].
+ lan_client[lan_clnt_idx];
+ /* Reset the client info before sending the message. */
+ memset(lan_client, 0, sizeof(struct ipa_lan_client));
+ lan_client->client_idx = -1;
+
+ }
+ return 0;
+}
+
+/* rmnet_ipa3_set_lan_client_info() -
+ * @data - IOCTL data
+ *
+ * This function handles WAN_IOC_SET_LAN_CLIENT_INFO.
+ * It is used to store LAN client information which
+ * is used to fetch the packet stats for a client.
+ *
+ * Return codes:
+ * 0: Success
+ * -EINVAL: Invalid args provided
+ */
+int rmnet_ipa3_set_lan_client_info(
+ struct wan_ioctl_lan_client_info *data)
+{
+
+ struct ipa_lan_client *lan_client = NULL;
+
+
+ IPAWANDBG("Client MAC %02x:%02x:%02x:%02x:%02x:%02x\n",
+ data->mac[0], data->mac[1], data->mac[2],
+ data->mac[3], data->mac[4], data->mac[5]);
+
+ /* Check if Device type is valid. */
+ if (data->device_type >= IPACM_MAX_CLIENT_DEVICE_TYPES ||
+ data->device_type < 0) {
+ IPAWANERR("Invalid Device type: %d\n", data->device_type);
+ return -EINVAL;
+ }
+
+ /* Check if Client index is valid. */
+ if (data->client_idx >= IPA_MAX_NUM_HW_PATH_CLIENTS ||
+ data->client_idx < 0) {
+ IPAWANERR("Invalid Client Index: %d\n", data->client_idx);
+ return -EINVAL;
+ }
+
+ mutex_lock(&rmnet_ipa3_ctx->per_client_stats_guard);
+ if (data->client_init) {
+ /* check if the client is already inited. */
+ if (rmnet_ipa3_ctx->tether_device[data->device_type]
+ .lan_client[data->client_idx].inited) {
+ IPAWANERR("Client already inited: %d:%d\n",
+ data->device_type, data->client_idx);
+ mutex_unlock(&rmnet_ipa3_ctx->per_client_stats_guard);
+ return -EINVAL;
+ }
+ }
+
+ lan_client =
+ &rmnet_ipa3_ctx->tether_device[data->device_type].
+ lan_client[data->client_idx];
+
+ memcpy(lan_client->mac, data->mac, IPA_MAC_ADDR_SIZE);
+
+ lan_client->client_idx = data->client_idx;
+
+ /* Update the Source pipe. */
+ rmnet_ipa3_ctx->tether_device[data->device_type].ul_src_pipe =
+ ipa3_get_ep_mapping(data->ul_src_pipe);
+
+ /* Update the header length if not set. */
+ if (!rmnet_ipa3_ctx->tether_device[data->device_type].hdr_len)
+ rmnet_ipa3_ctx->tether_device[data->device_type].hdr_len =
+ data->hdr_len;
+
+ lan_client->inited = true;
+
+ rmnet_ipa3_ctx->tether_device[data->device_type].num_clients++;
+
+ IPAWANDBG("Set the lan client info: %d, %d, %d\n",
+ lan_client->client_idx,
+ rmnet_ipa3_ctx->tether_device[data->device_type].ul_src_pipe,
+ rmnet_ipa3_ctx->tether_device[data->device_type].num_clients);
+
+ mutex_unlock(&rmnet_ipa3_ctx->per_client_stats_guard);
+
+ return 0;
+}
+
+/* rmnet_ipa3_delete_lan_client_info() -
+ * @data - IOCTL data
+ *
+ * This function handles WAN_IOC_DELETE_LAN_CLIENT_INFO.
+ * It is used to delete LAN client information which
+ * is used to fetch the packet stats for a client.
+ *
+ * Return codes:
+ * 0: Success
+ * -EINVAL: Invalid args provided
+ */
+int rmnet_ipa3_clear_lan_client_info(
+ struct wan_ioctl_lan_client_info *data)
+{
+
+ struct ipa_lan_client *lan_client = NULL;
+
+
+ IPAWANDBG("Client MAC %02x:%02x:%02x:%02x:%02x:%02x\n",
+ data->mac[0], data->mac[1], data->mac[2],
+ data->mac[3], data->mac[4], data->mac[5]);
+
+ /* Check if Device type is valid. */
+ if (data->device_type >= IPACM_MAX_CLIENT_DEVICE_TYPES ||
+ data->device_type < 0) {
+ IPAWANERR("Invalid Device type: %d\n", data->device_type);
+ return -EINVAL;
+ }
+
+ /* Check if Client index is valid. */
+ if (data->client_idx >= IPA_MAX_NUM_HW_PATH_CLIENTS ||
+ data->client_idx < 0) {
+ IPAWANERR("Invalid Client Index: %d\n", data->client_idx);
+ return -EINVAL;
+ }
+
+ mutex_lock(&rmnet_ipa3_ctx->per_client_stats_guard);
+ lan_client =
+ &rmnet_ipa3_ctx->tether_device[data->device_type].
+ lan_client[data->client_idx];
+
+ if (!data->client_init) {
+ /* check if the client is already de-inited. */
+ if (!lan_client->inited) {
+ IPAWANERR("Client already de-inited: %d:%d\n",
+ data->device_type, data->client_idx);
+ mutex_unlock(&rmnet_ipa3_ctx->per_client_stats_guard);
+ return -EINVAL;
+ }
+ }
+
+ lan_client->inited = false;
+ mutex_unlock(&rmnet_ipa3_ctx->per_client_stats_guard);
+
+ return 0;
+}
+
+
+/* rmnet_ipa3_send_lan_client_msg() -
+ * @data - IOCTL data
+ *
+ * This function handles WAN_IOC_SEND_LAN_CLIENT_MSG.
+ * It is used to send LAN client information to IPACM.
+ *
+ * Return codes:
+ * 0: Success
+ * -EINVAL: Invalid args provided
+ */
+int rmnet_ipa3_send_lan_client_msg(
+ struct wan_ioctl_send_lan_client_msg *data)
+{
+ struct ipa_msg_meta msg_meta;
+ int rc;
+ struct ipa_lan_client_msg *lan_client;
+
+ /* Notify IPACM to reset the client index. */
+ lan_client = kzalloc(sizeof(struct ipa_lan_client_msg),
+ GFP_KERNEL);
+ if (!lan_client) {
+ IPAWANERR("Can't allocate memory for tether_info\n");
+ return -ENOMEM;
+ }
+ memset(&msg_meta, 0, sizeof(struct ipa_msg_meta));
+ memcpy(lan_client, &data->lan_client,
+ sizeof(struct ipa_lan_client_msg));
+ msg_meta.msg_type = data->client_event;
+ msg_meta.msg_len = sizeof(struct ipa_lan_client_msg);
+
+ rc = ipa_send_msg(&msg_meta, lan_client, rmnet_ipa_free_msg);
+ if (rc) {
+ IPAWANERR("ipa_send_msg failed: %d\n", rc);
+ kfree(lan_client);
+ return rc;
+ }
+ return 0;
+}
+
+/* rmnet_ipa3_enable_per_client_stats() -
+ * @data - IOCTL data
+ *
+ * This function handles WAN_IOC_ENABLE_PER_CLIENT_STATS.
+ * It is used to indicate Q6 to start capturing per client stats.
+ *
+ * Return codes:
+ * 0: Success
+ * -EINVAL: Invalid args provided
+ */
+int rmnet_ipa3_enable_per_client_stats(
+ bool *data)
+{
+ struct ipa_enable_per_client_stats_req_msg_v01 *req;
+ struct ipa_enable_per_client_stats_resp_msg_v01 *resp;
+ int rc;
+
+ req =
+ kzalloc(sizeof(struct ipa_enable_per_client_stats_req_msg_v01),
+ GFP_KERNEL);
+ if (!req) {
+ IPAWANERR("Can't allocate memory for stats message\n");
+ return -ENOMEM;
+ }
+ resp =
+ kzalloc(sizeof(struct ipa_enable_per_client_stats_resp_msg_v01),
+ GFP_KERNEL);
+ if (!resp) {
+ IPAWANERR("Can't allocate memory for stats message\n");
+ kfree(req);
+ return -ENOMEM;
+ }
+ memset(req, 0,
+ sizeof(struct ipa_enable_per_client_stats_req_msg_v01));
+ memset(resp, 0,
+ sizeof(struct ipa_enable_per_client_stats_resp_msg_v01));
+
+ if (*data)
+ req->enable_per_client_stats = 1;
+ else
+ req->enable_per_client_stats = 0;
+
+ rc = ipa3_qmi_enable_per_client_stats(req, resp);
+ if (rc) {
+ IPAWANERR("can't enable per client stats\n");
+ kfree(req);
+ kfree(resp);
+ return rc;
+ }
+
+ kfree(req);
+ kfree(resp);
+ return 0;
+}
+
+int rmnet_ipa3_query_per_client_stats(
+ struct wan_ioctl_query_per_client_stats *data)
+{
+ struct ipa_get_stats_per_client_req_msg_v01 *req;
+ struct ipa_get_stats_per_client_resp_msg_v01 *resp;
+ int rc, lan_clnt_idx, lan_clnt_idx1, i;
+ struct ipa_lan_client *lan_client = NULL;
+
+
+ IPAWANDBG("Client MAC %02x:%02x:%02x:%02x:%02x:%02x\n",
+ data->client_info[0].mac[0],
+ data->client_info[0].mac[1],
+ data->client_info[0].mac[2],
+ data->client_info[0].mac[3],
+ data->client_info[0].mac[4],
+ data->client_info[0].mac[5]);
+
+ /* Check if Device type is valid. */
+ if (data->device_type >= IPACM_MAX_CLIENT_DEVICE_TYPES ||
+ data->device_type < 0) {
+ IPAWANERR("Invalid Device type: %d\n", data->device_type);
+ return -EINVAL;
+ }
+
+ /* Check if num_clients is valid. */
+ if (data->num_clients != IPA_MAX_NUM_HW_PATH_CLIENTS &&
+ data->num_clients != 1) {
+ IPAWANERR("Invalid number of clients: %d\n", data->num_clients);
+ return -EINVAL;
+ }
+
+ mutex_lock(&rmnet_ipa3_ctx->per_client_stats_guard);
+
+ if (data->num_clients == 1) {
+ /* Check if the client info is valid.*/
+ lan_clnt_idx1 = rmnet_ipa3_get_lan_client_info(
+ data->device_type,
+ data->client_info[0].mac);
+ if (lan_clnt_idx1 < 0) {
+ IPAWANERR("Client info not available return.\n");
+ mutex_unlock(&rmnet_ipa3_ctx->per_client_stats_guard);
+ return -EINVAL;
+ }
+ lan_client =
+ &rmnet_ipa3_ctx->tether_device[data->device_type].
+ lan_client[lan_clnt_idx1];
+ /*
+ * Check if disconnect flag is set and
+ * see if all the clients info are cleared.
+ */
+ if (data->disconnect_clnt &&
+ lan_client->inited) {
+ IPAWANERR("Client not inited. Try again.\n");
+ mutex_unlock(&rmnet_ipa3_ctx->per_client_stats_guard);
+ return -EAGAIN;
+ }
+
+ } else {
+ /* Max number of clients. */
+ /* Check if disconnect flag is set and
+ * see if all the clients info are cleared.
+ */
+ if (data->disconnect_clnt &&
+ rmnet_ipa3_check_any_client_inited(data->device_type)) {
+ IPAWANERR("CLient not inited. Try again.\n");
+ mutex_unlock(&rmnet_ipa3_ctx->per_client_stats_guard);
+ return -EAGAIN;
+ }
+ lan_clnt_idx1 = 0xffffffff;
+ }
+
+ req = kzalloc(sizeof(struct ipa_get_stats_per_client_req_msg_v01),
+ GFP_KERNEL);
+ if (!req) {
+ IPAWANERR("Can't allocate memory for stats message\n");
+ mutex_unlock(&rmnet_ipa3_ctx->per_client_stats_guard);
+ return -ENOMEM;
+ }
+ resp = kzalloc(sizeof(struct ipa_get_stats_per_client_resp_msg_v01),
+ GFP_KERNEL);
+ if (!resp) {
+ IPAWANERR("Can't allocate memory for stats message\n");
+ mutex_unlock(&rmnet_ipa3_ctx->per_client_stats_guard);
+ kfree(req);
+ return -ENOMEM;
+ }
+ memset(req, 0, sizeof(struct ipa_get_stats_per_client_req_msg_v01));
+ memset(resp, 0, sizeof(struct ipa_get_stats_per_client_resp_msg_v01));
+
+ if (data->reset_stats) {
+ req->reset_stats_valid = true;
+ req->reset_stats = true;
+ IPAWANDBG("fetch and reset the client stats\n");
+ }
+
+ req->client_id = lan_clnt_idx1;
+ req->src_pipe_id =
+ rmnet_ipa3_ctx->tether_device[data->device_type].ul_src_pipe;
+
+ IPAWANDBG("fetch the client stats for %d, %d\n", req->client_id,
+ req->src_pipe_id);
+
+ rc = ipa3_qmi_get_per_client_packet_stats(req, resp);
+ if (rc) {
+ IPAWANERR("can't get per client stats\n");
+ mutex_unlock(&rmnet_ipa3_ctx->per_client_stats_guard);
+ kfree(req);
+ kfree(resp);
+ return rc;
+ }
+
+ if (resp->per_client_stats_list_valid) {
+ for (i = 0; i < resp->per_client_stats_list_len
+ && i < IPA_MAX_NUM_HW_PATH_CLIENTS; i++) {
+ /* Subtract the header bytes from the DL bytes. */
+ data->client_info[i].ipv4_rx_bytes =
+ (resp->per_client_stats_list[i].num_dl_ipv4_bytes) -
+ (rmnet_ipa3_ctx->
+ tether_device[data->device_type].hdr_len *
+ resp->per_client_stats_list[i].num_dl_ipv4_pkts);
+ /* UL header bytes are subtracted by Q6. */
+ data->client_info[i].ipv4_tx_bytes =
+ resp->per_client_stats_list[i].num_ul_ipv4_bytes;
+ /* Subtract the header bytes from the DL bytes. */
+ data->client_info[i].ipv6_rx_bytes =
+ (resp->per_client_stats_list[i].num_dl_ipv6_bytes) -
+ (rmnet_ipa3_ctx->
+ tether_device[data->device_type].hdr_len *
+ resp->per_client_stats_list[i].num_dl_ipv6_pkts);
+ /* UL header bytes are subtracted by Q6. */
+ data->client_info[i].ipv6_tx_bytes =
+ resp->per_client_stats_list[i].num_ul_ipv6_bytes;
+
+ IPAWANDBG("tx_b_v4(%lu)v6(%lu)rx_b_v4(%lu) v6(%lu)\n",
+ (unsigned long int) data->client_info[i].ipv4_tx_bytes,
+ (unsigned long int) data->client_info[i].ipv6_tx_bytes,
+ (unsigned long int) data->client_info[i].ipv4_rx_bytes,
+ (unsigned long int) data->client_info[i].ipv6_rx_bytes);
+
+ /* Get the lan client index. */
+ lan_clnt_idx = resp->per_client_stats_list[i].client_id;
+ /* Check if lan_clnt_idx is valid. */
+ if (lan_clnt_idx < 0 ||
+ lan_clnt_idx >= IPA_MAX_NUM_HW_PATH_CLIENTS) {
+ IPAWANERR("Lan client index not valid.\n");
+ mutex_unlock(
+ &rmnet_ipa3_ctx->per_client_stats_guard);
+ kfree(req);
+ kfree(resp);
+ ipa_assert();
+ return -EINVAL;
+ }
+ memcpy(data->client_info[i].mac,
+ rmnet_ipa3_ctx->
+ tether_device[data->device_type].
+ lan_client[lan_clnt_idx].mac,
+ IPA_MAC_ADDR_SIZE);
+ }
+ }
+
+ if (data->disconnect_clnt) {
+ rmnet_ipa3_delete_lan_client_info(data->device_type,
+ lan_clnt_idx1);
+ }
+
+ mutex_unlock(&rmnet_ipa3_ctx->per_client_stats_guard);
+ kfree(req);
+ kfree(resp);
+ return 0;
+}
+
static int __init ipa3_wwan_init(void)
{
+ int i, j;
rmnet_ipa3_ctx = kzalloc(sizeof(*rmnet_ipa3_ctx), GFP_KERNEL);
if (!rmnet_ipa3_ctx) {
IPAWANERR("no memory\n");
@@ -3514,6 +4001,14 @@ static int __init ipa3_wwan_init(void)
mutex_init(&rmnet_ipa3_ctx->pipe_handle_guard);
mutex_init(&rmnet_ipa3_ctx->add_mux_channel_lock);
+ mutex_init(&rmnet_ipa3_ctx->per_client_stats_guard);
+ /* Reset the Lan Stats. */
+ for (i = 0; i < IPACM_MAX_CLIENT_DEVICE_TYPES; i++) {
+ rmnet_ipa3_ctx->tether_device[i].ul_src_pipe = -1;
+ for (j = 0; j < IPA_MAX_NUM_HW_PATH_CLIENTS; j++)
+ rmnet_ipa3_ctx->tether_device[i].
+ lan_client[j].client_idx = -1;
+ }
rmnet_ipa3_ctx->ipa3_to_apps_hdl = -1;
rmnet_ipa3_ctx->apps_to_ipa3_hdl = -1;
@@ -3536,6 +4031,7 @@ static void __exit ipa3_wwan_cleanup(void)
ipa3_qmi_cleanup();
mutex_destroy(&rmnet_ipa3_ctx->pipe_handle_guard);
mutex_destroy(&rmnet_ipa3_ctx->add_mux_channel_lock);
+ mutex_destroy(&rmnet_ipa3_ctx->per_client_stats_guard);
ret = subsys_notif_unregister_notifier(
rmnet_ipa3_ctx->subsys_notify_handle, &ipa3_ssr_notifier);
if (ret)
diff --git a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa_fd_ioctl.c b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa_fd_ioctl.c
index 2e43abf..0f85e12 100644
--- a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa_fd_ioctl.c
+++ b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa_fd_ioctl.c
@@ -53,6 +53,15 @@
#define WAN_IOC_NOTIFY_WAN_STATE32 _IOWR(WAN_IOC_MAGIC, \
WAN_IOCTL_NOTIFY_WAN_STATE, \
compat_uptr_t)
+#define WAN_IOCTL_ENABLE_PER_CLIENT_STATS32 _IOWR(WAN_IOC_MAGIC, \
+ WAN_IOCTL_ENABLE_PER_CLIENT_STATS, \
+ compat_uptr_t)
+#define WAN_IOCTL_QUERY_PER_CLIENT_STATS32 _IOWR(WAN_IOC_MAGIC, \
+ WAN_IOCTL_QUERY_PER_CLIENT_STATS, \
+ compat_uptr_t)
+#define WAN_IOCTL_SET_LAN_CLIENT_INFO32 _IOWR(WAN_IOC_MAGIC, \
+ WAN_IOCTL_SET_LAN_CLIENT_INFO, \
+ compat_uptr_t)
#endif
static unsigned int dev_num = 1;
@@ -128,6 +137,33 @@ static long ipa3_wan_ioctl(struct file *filp,
}
break;
+ case WAN_IOC_ADD_UL_FLT_RULE:
+ IPAWANDBG("device %s got WAN_IOC_UL_ADD_FLT_RULE :>>>\n",
+ DRIVER_NAME);
+ pyld_sz =
+ sizeof(struct ipa_configure_ul_firewall_rules_req_msg_v01);
+ param = kzalloc(pyld_sz, GFP_KERNEL);
+ if (!param) {
+ retval = -ENOMEM;
+ break;
+ }
+ if (copy_from_user(param, (const void __user *)arg, pyld_sz)) {
+ retval = -EFAULT;
+ break;
+ }
+ if (ipa3_qmi_ul_filter_request_send(
+ (struct ipa_configure_ul_firewall_rules_req_msg_v01 *)
+ param)) {
+ IPAWANDBG("IPACM->Q6 add ul filter rule failed\n");
+ retval = -EFAULT;
+ break;
+ }
+ if (copy_to_user((void __user *)arg, param, pyld_sz)) {
+ retval = -EFAULT;
+ break;
+ }
+ break;
+
case WAN_IOC_ADD_FLT_RULE_INDEX:
IPAWANDBG("device %s got WAN_IOC_ADD_FLT_RULE_INDEX :>>>\n",
DRIVER_NAME);
@@ -339,7 +375,115 @@ static long ipa3_wan_ioctl(struct file *filp,
retval = -EFAULT;
break;
}
+
break;
+ case WAN_IOC_ENABLE_PER_CLIENT_STATS:
+ IPAWANDBG_LOW("got WAN_IOC_ENABLE_PER_CLIENT_STATS :>>>\n");
+ pyld_sz = sizeof(bool);
+ param = kzalloc(pyld_sz, GFP_KERNEL);
+ if (!param) {
+ retval = -ENOMEM;
+ break;
+ }
+ if (copy_from_user(param, (const void __user *)arg, pyld_sz)) {
+ retval = -EFAULT;
+ break;
+ }
+ if (rmnet_ipa3_enable_per_client_stats(
+ (bool *)param)) {
+ IPAWANERR("WAN_IOC_ENABLE_PER_CLIENT_STATS failed\n");
+ retval = -EFAULT;
+ break;
+ }
+ break;
+ case WAN_IOC_QUERY_PER_CLIENT_STATS:
+ IPAWANDBG_LOW("got WAN_IOC_QUERY_PER_CLIENT_STATS :>>>\n");
+ pyld_sz = sizeof(struct wan_ioctl_query_per_client_stats);
+ param = kzalloc(pyld_sz, GFP_KERNEL);
+ if (!param) {
+ retval = -ENOMEM;
+ break;
+ }
+ if (copy_from_user(param, (const void __user *)arg, pyld_sz)) {
+ retval = -EFAULT;
+ break;
+ }
+
+ retval = rmnet_ipa3_query_per_client_stats(
+ (struct wan_ioctl_query_per_client_stats *)param);
+ if (retval) {
+ IPAWANERR("WAN_IOC_QUERY_PER_CLIENT_STATS failed\n");
+ break;
+ }
+
+ if (copy_to_user((void __user *)arg, param, pyld_sz)) {
+ retval = -EFAULT;
+ break;
+ }
+ break;
+
+ case WAN_IOC_SET_LAN_CLIENT_INFO:
+ IPAWANDBG_LOW("got WAN_IOC_SET_LAN_CLIENT_INFO :>>>\n");
+ pyld_sz = sizeof(struct wan_ioctl_lan_client_info);
+ param = kzalloc(pyld_sz, GFP_KERNEL);
+ if (!param) {
+ retval = -ENOMEM;
+ break;
+ }
+ if (copy_from_user(param, (const void __user *)arg, pyld_sz)) {
+ retval = -EFAULT;
+ break;
+ }
+ if (rmnet_ipa3_set_lan_client_info(
+ (struct wan_ioctl_lan_client_info *)param)) {
+ IPAWANERR("WAN_IOC_SET_LAN_CLIENT_INFO failed\n");
+ retval = -EFAULT;
+ break;
+ }
+ break;
+
+ case WAN_IOC_CLEAR_LAN_CLIENT_INFO:
+ IPAWANDBG_LOW("got WAN_IOC_CLEAR_LAN_CLIENT_INFO :>>>\n");
+ pyld_sz = sizeof(struct wan_ioctl_lan_client_info);
+ param = kzalloc(pyld_sz, GFP_KERNEL);
+ if (!param) {
+ retval = -ENOMEM;
+ break;
+ }
+ if (copy_from_user(param, (const void __user *)arg, pyld_sz)) {
+ retval = -EFAULT;
+ break;
+ }
+ if (rmnet_ipa3_clear_lan_client_info(
+ (struct wan_ioctl_lan_client_info *)param)) {
+ IPAWANERR("WAN_IOC_CLEAR_LAN_CLIENT_INFO failed\n");
+ retval = -EFAULT;
+ break;
+ }
+ break;
+
+
+ case WAN_IOC_SEND_LAN_CLIENT_MSG:
+ IPAWANDBG_LOW("got WAN_IOC_SEND_LAN_CLIENT_MSG :>>>\n");
+ pyld_sz = sizeof(struct wan_ioctl_send_lan_client_msg);
+ param = kzalloc(pyld_sz, GFP_KERNEL);
+ if (!param) {
+ retval = -ENOMEM;
+ break;
+ }
+ if (copy_from_user(param, (const void __user *)arg, pyld_sz)) {
+ retval = -EFAULT;
+ break;
+ }
+ if (rmnet_ipa3_send_lan_client_msg(
+ (struct wan_ioctl_send_lan_client_msg *)
+ param)) {
+ IPAWANERR("IOC_SEND_LAN_CLIENT_MSG failed\n");
+ retval = -EFAULT;
+ break;
+ }
+ break;
+
default:
retval = -ENOTTY;
}
diff --git a/drivers/platform/msm/ipa/test/ipa_test_mhi.c b/drivers/platform/msm/ipa/test/ipa_test_mhi.c
index 78cdc50..98861de 100644
--- a/drivers/platform/msm/ipa/test/ipa_test_mhi.c
+++ b/drivers/platform/msm/ipa/test/ipa_test_mhi.c
@@ -1359,11 +1359,11 @@ static int ipa_mhi_test_q_transfer_re(struct ipa_mem_buffer *mmio,
IPA_UT_LOG("DB to event 0x%llx: base %pa ofst 0x%x\n",
p_events[event_ring_index].wp,
&(gsi_ctx->per.phys_addr), GSI_EE_n_EV_CH_k_DOORBELL_0_OFFS(
- event_ring_index + IPA_MHI_GSI_ER_START, 0));
+ event_ring_index + ipa3_ctx->mhi_evid_limits[0], 0));
iowrite32(p_events[event_ring_index].wp,
test_mhi_ctx->gsi_mmio +
GSI_EE_n_EV_CH_k_DOORBELL_0_OFFS(
- event_ring_index + IPA_MHI_GSI_ER_START, 0));
+ event_ring_index + ipa3_ctx->mhi_evid_limits[0], 0));
for (i = 0; i < buf_array_size; i++) {
/* calculate virtual pointer for current WP and RP */
diff --git a/drivers/platform/msm/msm_11ad/msm_11ad.c b/drivers/platform/msm/msm_11ad/msm_11ad.c
index e76ff14..f64e9de 100644
--- a/drivers/platform/msm/msm_11ad/msm_11ad.c
+++ b/drivers/platform/msm/msm_11ad/msm_11ad.c
@@ -126,6 +126,7 @@ struct msm11ad_ctx {
struct cpumask boost_cpu;
bool keep_radio_on_during_sleep;
+ int features;
};
static LIST_HEAD(dev_list);
@@ -1085,6 +1086,10 @@ static int msm_11ad_probe(struct platform_device *pdev)
ctx->keep_radio_on_during_sleep = of_property_read_bool(of_node,
"qcom,keep-radio-on-during-sleep");
ctx->bus_scale = msm_bus_cl_get_pdata(pdev);
+ if (!ctx->bus_scale) {
+ dev_err(ctx->dev, "Unable to read bus-scaling from DT\n");
+ return -EINVAL;
+ }
ctx->smmu_s1_en = of_property_read_bool(of_node, "qcom,smmu-s1-en");
if (ctx->smmu_s1_en) {
@@ -1113,7 +1118,7 @@ static int msm_11ad_probe(struct platform_device *pdev)
rc = msm_11ad_init_vregs(ctx);
if (rc) {
dev_err(ctx->dev, "msm_11ad_init_vregs failed: %d\n", rc);
- return rc;
+ goto out_bus_scale;
}
rc = msm_11ad_enable_vregs(ctx);
if (rc) {
@@ -1172,6 +1177,18 @@ static int msm_11ad_probe(struct platform_device *pdev)
}
ctx->pcidev = pcidev;
+ rc = msm_pcie_pm_control(MSM_PCIE_RESUME, pcidev->bus->number,
+ pcidev, NULL, 0);
+ if (rc) {
+ dev_err(ctx->dev, "msm_pcie_pm_control(RESUME) failed:%d\n",
+ rc);
+ goto out_rc;
+ }
+
+ pci_set_power_state(pcidev, PCI_D0);
+
+ pci_restore_state(ctx->pcidev);
+
/* Read current state */
rc = pci_read_config_dword(pcidev,
PCIE20_CAP_LINKCTRLSTATUS, &val);
@@ -1179,7 +1196,7 @@ static int msm_11ad_probe(struct platform_device *pdev)
dev_err(ctx->dev,
"reading PCIE20_CAP_LINKCTRLSTATUS failed:%d\n",
rc);
- goto out_rc;
+ goto out_suspend;
}
ctx->l1_enabled_in_enum = val & PCI_EXP_LNKCTL_ASPM_L1;
@@ -1192,7 +1209,7 @@ static int msm_11ad_probe(struct platform_device *pdev)
if (rc) {
dev_err(ctx->dev,
"failed to disable L1, rc %d\n", rc);
- goto out_rc;
+ goto out_suspend;
}
}
@@ -1212,7 +1229,7 @@ static int msm_11ad_probe(struct platform_device *pdev)
rc = msm_11ad_ssr_init(ctx);
if (rc) {
dev_err(ctx->dev, "msm_11ad_ssr_init failed: %d\n", rc);
- goto out_rc;
+ goto out_suspend;
}
msm_11ad_init_cpu_boost(ctx);
@@ -1234,6 +1251,9 @@ static int msm_11ad_probe(struct platform_device *pdev)
msm_11ad_suspend_power_off(ctx);
return 0;
+out_suspend:
+ msm_pcie_pm_control(MSM_PCIE_SUSPEND, pcidev->bus->number,
+ pcidev, NULL, 0);
out_rc:
if (ctx->gpio_en >= 0)
gpio_direction_output(ctx->gpio_en, 0);
@@ -1247,6 +1267,8 @@ static int msm_11ad_probe(struct platform_device *pdev)
msm_11ad_release_clocks(ctx);
msm_11ad_disable_vregs(ctx);
msm_11ad_release_vregs(ctx);
+out_bus_scale:
+ msm_bus_cl_clear_pdata(ctx->bus_scale);
return rc;
}
@@ -1261,7 +1283,6 @@ static int msm_11ad_remove(struct platform_device *pdev)
ctx->pcidev);
kfree(ctx->pristine_state);
- msm_bus_cl_clear_pdata(ctx->bus_scale);
pci_dev_put(ctx->pcidev);
if (ctx->gpio_en >= 0) {
gpio_direction_output(ctx->gpio_en, 0);
@@ -1422,6 +1443,7 @@ static int msm_11ad_notify_crash(struct msm11ad_ctx *ctx)
dev_info(ctx->dev, "SSR requested\n");
(void)msm_11ad_ssr_copy_ramdump(ctx);
ctx->recovery_in_progress = true;
+ subsys_set_crash_status(ctx->subsys, CRASH_STATUS_ERR_FATAL);
rc = subsystem_restart_dev(ctx->subsys);
if (rc) {
dev_err(ctx->dev,
@@ -1444,9 +1466,19 @@ static int ops_notify(void *handle, enum wil_platform_event evt)
break;
case WIL_PLATFORM_EVT_PRE_RESET:
/*
- * TODO: Enable rf_clk3 clock before resetting the device to
- * ensure stable ref clock during the device reset
+ * Enable rf_clk3 clock before resetting the device to ensure
+ * stable ref clock during the device reset
*/
+ if (ctx->features &
+ BIT(WIL_PLATFORM_FEATURE_FW_EXT_CLK_CONTROL)) {
+ rc = msm_11ad_enable_clk(ctx, &ctx->rf_clk3);
+ if (rc) {
+ dev_err(ctx->dev,
+ "failed to enable clk, rc %d\n", rc);
+ break;
+ }
+ }
+
/* Re-enable L1 in case it was enabled in enumeration */
if (ctx->l1_enabled_in_enum) {
rc = msm_11ad_ctrl_aspm_l1(ctx, true);
@@ -1457,9 +1489,12 @@ static int ops_notify(void *handle, enum wil_platform_event evt)
break;
case WIL_PLATFORM_EVT_FW_RDY:
/*
- * TODO: Disable rf_clk3 clock after the device is up to allow
+ * Disable rf_clk3 clock after the device is up to allow
* the device to control it via its GPIO for power saving
*/
+ if (ctx->features &
+ BIT(WIL_PLATFORM_FEATURE_FW_EXT_CLK_CONTROL))
+ msm_11ad_disable_clk(ctx, &ctx->rf_clk3);
break;
default:
pr_debug("%s: Unhandled event %d\n", __func__, evt);
@@ -1469,14 +1504,28 @@ static int ops_notify(void *handle, enum wil_platform_event evt)
return rc;
}
-static bool ops_keep_radio_on_during_sleep(void *handle)
+static int ops_get_capa(void *handle)
{
struct msm11ad_ctx *ctx = (struct msm11ad_ctx *)handle;
+ int capa;
pr_debug("%s: keep radio on during sleep is %s\n", __func__,
ctx->keep_radio_on_during_sleep ? "allowed" : "not allowed");
- return ctx->keep_radio_on_during_sleep;
+ capa = (ctx->keep_radio_on_during_sleep ?
+ BIT(WIL_PLATFORM_CAPA_RADIO_ON_IN_SUSPEND) : 0) |
+ BIT(WIL_PLATFORM_CAPA_T_PWR_ON_0) |
+ BIT(WIL_PLATFORM_CAPA_EXT_CLK);
+
+ return capa;
+}
+
+static void ops_set_features(void *handle, int features)
+{
+ struct msm11ad_ctx *ctx = (struct msm11ad_ctx *)handle;
+
+ pr_debug("%s: features 0x%x\n", __func__, features);
+ ctx->features = features;
}
void *msm_11ad_dev_init(struct device *dev, struct wil_platform_ops *ops,
@@ -1518,7 +1567,8 @@ void *msm_11ad_dev_init(struct device *dev, struct wil_platform_ops *ops,
ops->resume = ops_resume;
ops->uninit = ops_uninit;
ops->notify = ops_notify;
- ops->keep_radio_on_during_sleep = ops_keep_radio_on_during_sleep;
+ ops->get_capa = ops_get_capa;
+ ops->set_features = ops_set_features;
return ctx;
}
diff --git a/drivers/platform/msm/msm_ext_display.c b/drivers/platform/msm/msm_ext_display.c
index 73bf935..24b8e2c 100644
--- a/drivers/platform/msm/msm_ext_display.c
+++ b/drivers/platform/msm/msm_ext_display.c
@@ -147,6 +147,12 @@ static int msm_ext_disp_process_audio(struct msm_ext_disp *ext_disp,
int ret = 0;
int state;
+ if (!ext_disp->ops) {
+ pr_err("codec not registered, skip notification\n");
+ ret = -EPERM;
+ goto end;
+ }
+
state = ext_disp->audio_sdev.state;
ret = extcon_set_state_sync(&ext_disp->audio_sdev,
ext_disp->current_disp, !!new_state);
@@ -155,7 +161,7 @@ static int msm_ext_disp_process_audio(struct msm_ext_disp *ext_disp,
ext_disp->audio_sdev.state == state ?
"is same" : "switched to",
ext_disp->audio_sdev.state);
-
+end:
return ret;
}
@@ -218,15 +224,10 @@ static int msm_ext_disp_update_audio_ops(struct msm_ext_disp *ext_disp,
goto end;
}
- if (!ext_disp->ops) {
- pr_err("codec ops not registered\n");
- ret = -EINVAL;
- goto end;
- }
-
if (state == EXT_DISPLAY_CABLE_CONNECT) {
/* connect codec with interface */
- *ext_disp->ops = data->codec_ops;
+ if (ext_disp->ops)
+ *ext_disp->ops = data->codec_ops;
/* update pdev for interface to use */
ext_disp->ext_disp_data.intf_pdev = data->pdev;
@@ -285,6 +286,28 @@ static int msm_ext_disp_audio_notify(struct platform_device *pdev,
return ret;
}
+static void msm_ext_disp_ready_for_display(struct msm_ext_disp *ext_disp)
+{
+ int ret;
+ struct msm_ext_disp_init_data *data = NULL;
+
+ if (!ext_disp) {
+ pr_err("invalid input\n");
+ return;
+ }
+
+ ret = msm_ext_disp_get_intf_data(ext_disp,
+ ext_disp->current_disp, &data);
+ if (ret) {
+ pr_err("%s not found\n",
+ msm_ext_disp_name(ext_disp->current_disp));
+ return;
+ }
+
+ *ext_disp->ops = data->codec_ops;
+ data->codec_ops.ready(ext_disp->pdev);
+}
+
int msm_hdmi_register_audio_codec(struct platform_device *pdev,
struct msm_ext_disp_audio_codec_ops *ops)
{
@@ -334,6 +357,8 @@ int msm_ext_disp_register_audio_codec(struct platform_device *pdev,
end:
mutex_unlock(&ext_disp->lock);
+ if (ext_disp->current_disp != EXT_DISPLAY_TYPE_MAX)
+ msm_ext_disp_ready_for_display(ext_disp);
return ret;
}
@@ -341,6 +366,8 @@ EXPORT_SYMBOL(msm_ext_disp_register_audio_codec);
static int msm_ext_disp_validate_intf(struct msm_ext_disp_init_data *init_data)
{
+ struct msm_ext_disp_audio_codec_ops *ops;
+
if (!init_data) {
pr_err("Invalid init_data\n");
return -EINVAL;
@@ -351,9 +378,15 @@ static int msm_ext_disp_validate_intf(struct msm_ext_disp_init_data *init_data)
return -EINVAL;
}
- if (!init_data->codec_ops.get_audio_edid_blk ||
- !init_data->codec_ops.cable_status ||
- !init_data->codec_ops.audio_info_setup) {
+ ops = &init_data->codec_ops;
+
+ if (!ops->audio_info_setup ||
+ !ops->get_audio_edid_blk ||
+ !ops->cable_status ||
+ !ops->get_intf_id ||
+ !ops->teardown_done ||
+ !ops->acknowledge ||
+ !ops->ready) {
pr_err("Invalid codec operation pointers\n");
return -EINVAL;
}
diff --git a/drivers/platform/msm/qcom-geni-se.c b/drivers/platform/msm/qcom-geni-se.c
index 423c8f1..94736d4 100644
--- a/drivers/platform/msm/qcom-geni-se.c
+++ b/drivers/platform/msm/qcom-geni-se.c
@@ -19,6 +19,7 @@
#include <linux/io.h>
#include <linux/list.h>
#include <linux/module.h>
+#include <linux/slab.h>
#include <linux/msm-bus.h>
#include <linux/msm-bus-board.h>
#include <linux/of.h>
@@ -575,13 +576,6 @@ void se_config_packing(void __iomem *base, int bpw,
}
EXPORT_SYMBOL(se_config_packing);
-static void se_geni_clks_off(struct se_geni_rsc *rsc)
-{
- clk_disable_unprepare(rsc->se_clk);
- clk_disable_unprepare(rsc->s_ahb_clk);
- clk_disable_unprepare(rsc->m_ahb_clk);
-}
-
static bool geni_se_check_bus_bw(struct geni_se_device *geni_se_dev)
{
int i;
@@ -641,6 +635,37 @@ static int geni_se_rmv_ab_ib(struct geni_se_device *geni_se_dev,
}
/**
+ * se_geni_clks_off() - Turn off clocks associated with the serial
+ * engine
+ * @rsc: Handle to resources associated with the serial engine.
+ *
+ * Return: 0 on success, standard Linux error codes on failure/error.
+ */
+int se_geni_clks_off(struct se_geni_rsc *rsc)
+{
+ int ret = 0;
+ struct geni_se_device *geni_se_dev;
+
+ if (unlikely(!rsc || !rsc->wrapper_dev))
+ return -EINVAL;
+
+ geni_se_dev = dev_get_drvdata(rsc->wrapper_dev);
+ if (unlikely(!geni_se_dev || !geni_se_dev->bus_bw))
+ return -ENODEV;
+
+ clk_disable_unprepare(rsc->se_clk);
+ clk_disable_unprepare(rsc->s_ahb_clk);
+ clk_disable_unprepare(rsc->m_ahb_clk);
+
+ ret = geni_se_rmv_ab_ib(geni_se_dev, rsc);
+ if (ret)
+ GENI_SE_ERR(geni_se_dev->log_ctx, false, NULL,
+ "%s: Error %d during bus_bw_update\n", __func__, ret);
+ return ret;
+}
+EXPORT_SYMBOL(se_geni_clks_off);
+
+/**
* se_geni_resources_off() - Turn off resources associated with the serial
* engine
* @rsc: Handle to resources associated with the serial engine.
@@ -665,37 +690,14 @@ int se_geni_resources_off(struct se_geni_rsc *rsc)
"%s: Error %d pinctrl_select_state\n", __func__, ret);
return ret;
}
- se_geni_clks_off(rsc);
- ret = geni_se_rmv_ab_ib(geni_se_dev, rsc);
+ ret = se_geni_clks_off(rsc);
if (ret)
GENI_SE_ERR(geni_se_dev->log_ctx, false, NULL,
- "%s: Error %d during bus_bw_update\n", __func__, ret);
+ "%s: Error %d turning off clocks\n", __func__, ret);
return ret;
}
EXPORT_SYMBOL(se_geni_resources_off);
-static int se_geni_clks_on(struct se_geni_rsc *rsc)
-{
- int ret;
-
- ret = clk_prepare_enable(rsc->m_ahb_clk);
- if (ret)
- return ret;
-
- ret = clk_prepare_enable(rsc->s_ahb_clk);
- if (ret) {
- clk_disable_unprepare(rsc->m_ahb_clk);
- return ret;
- }
-
- ret = clk_prepare_enable(rsc->se_clk);
- if (ret) {
- clk_disable_unprepare(rsc->s_ahb_clk);
- clk_disable_unprepare(rsc->m_ahb_clk);
- }
- return ret;
-}
-
static int geni_se_add_ab_ib(struct geni_se_device *geni_se_dev,
struct se_geni_rsc *rsc)
{
@@ -733,13 +735,13 @@ static int geni_se_add_ab_ib(struct geni_se_device *geni_se_dev,
}
/**
- * se_geni_resources_on() - Turn on resources associated with the serial
- * engine
+ * se_geni_clks_on() - Turn on clocks associated with the serial
+ * engine
* @rsc: Handle to resources associated with the serial engine.
*
* Return: 0 on success, standard Linux error codes on failure/error.
*/
-int se_geni_resources_on(struct se_geni_rsc *rsc)
+int se_geni_clks_on(struct se_geni_rsc *rsc)
{
int ret = 0;
struct geni_se_device *geni_se_dev;
@@ -758,11 +760,52 @@ int se_geni_resources_on(struct se_geni_rsc *rsc)
return ret;
}
+ ret = clk_prepare_enable(rsc->m_ahb_clk);
+ if (ret)
+ goto clks_on_err1;
+
+ ret = clk_prepare_enable(rsc->s_ahb_clk);
+ if (ret)
+ goto clks_on_err2;
+
+ ret = clk_prepare_enable(rsc->se_clk);
+ if (ret)
+ goto clks_on_err3;
+ return 0;
+
+clks_on_err3:
+ clk_disable_unprepare(rsc->s_ahb_clk);
+clks_on_err2:
+ clk_disable_unprepare(rsc->m_ahb_clk);
+clks_on_err1:
+ geni_se_rmv_ab_ib(geni_se_dev, rsc);
+ return ret;
+}
+EXPORT_SYMBOL(se_geni_clks_on);
+
+/**
+ * se_geni_resources_on() - Turn on resources associated with the serial
+ * engine
+ * @rsc: Handle to resources associated with the serial engine.
+ *
+ * Return: 0 on success, standard Linux error codes on failure/error.
+ */
+int se_geni_resources_on(struct se_geni_rsc *rsc)
+{
+ int ret = 0;
+ struct geni_se_device *geni_se_dev;
+
+ if (unlikely(!rsc || !rsc->wrapper_dev))
+ return -EINVAL;
+
+ geni_se_dev = dev_get_drvdata(rsc->wrapper_dev);
+ if (unlikely(!geni_se_dev))
+ return -EPROBE_DEFER;
+
ret = se_geni_clks_on(rsc);
if (ret) {
GENI_SE_ERR(geni_se_dev->log_ctx, false, NULL,
"%s: Error %d during clks_on\n", __func__, ret);
- geni_se_rmv_ab_ib(geni_se_dev, rsc);
return ret;
}
@@ -771,7 +814,6 @@ int se_geni_resources_on(struct se_geni_rsc *rsc)
GENI_SE_ERR(geni_se_dev->log_ctx, false, NULL,
"%s: Error %d pinctrl_select_state\n", __func__, ret);
se_geni_clks_off(rsc);
- geni_se_rmv_ab_ib(geni_se_dev, rsc);
}
return ret;
}
diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c
index 96ffda4..454cb2e 100644
--- a/drivers/platform/x86/hp-wmi.c
+++ b/drivers/platform/x86/hp-wmi.c
@@ -248,7 +248,7 @@ static int hp_wmi_display_state(void)
int ret = hp_wmi_perform_query(HPWMI_DISPLAY_QUERY, 0, &state,
sizeof(state), sizeof(state));
if (ret)
- return -EINVAL;
+ return ret < 0 ? ret : -EINVAL;
return state;
}
@@ -258,7 +258,7 @@ static int hp_wmi_hddtemp_state(void)
int ret = hp_wmi_perform_query(HPWMI_HDDTEMP_QUERY, 0, &state,
sizeof(state), sizeof(state));
if (ret)
- return -EINVAL;
+ return ret < 0 ? ret : -EINVAL;
return state;
}
@@ -268,7 +268,7 @@ static int hp_wmi_als_state(void)
int ret = hp_wmi_perform_query(HPWMI_ALS_QUERY, 0, &state,
sizeof(state), sizeof(state));
if (ret)
- return -EINVAL;
+ return ret < 0 ? ret : -EINVAL;
return state;
}
@@ -279,7 +279,7 @@ static int hp_wmi_dock_state(void)
sizeof(state), sizeof(state));
if (ret)
- return -EINVAL;
+ return ret < 0 ? ret : -EINVAL;
return state & 0x1;
}
@@ -290,7 +290,7 @@ static int hp_wmi_tablet_state(void)
int ret = hp_wmi_perform_query(HPWMI_HARDWARE_QUERY, 0, &state,
sizeof(state), sizeof(state));
if (ret)
- return ret;
+ return ret < 0 ? ret : -EINVAL;
return (state & 0x4) ? 1 : 0;
}
@@ -323,7 +323,7 @@ static int __init hp_wmi_enable_hotkeys(void)
int ret = hp_wmi_perform_query(HPWMI_BIOS_QUERY, 1, &value,
sizeof(value), 0);
if (ret)
- return -EINVAL;
+ return ret < 0 ? ret : -EINVAL;
return 0;
}
@@ -336,7 +336,7 @@ static int hp_wmi_set_block(void *data, bool blocked)
ret = hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 1,
&query, sizeof(query), 0);
if (ret)
- return -EINVAL;
+ return ret < 0 ? ret : -EINVAL;
return 0;
}
@@ -428,7 +428,7 @@ static int hp_wmi_post_code_state(void)
int ret = hp_wmi_perform_query(HPWMI_POSTCODEERROR_QUERY, 0, &state,
sizeof(state), sizeof(state));
if (ret)
- return -EINVAL;
+ return ret < 0 ? ret : -EINVAL;
return state;
}
@@ -494,7 +494,7 @@ static ssize_t set_als(struct device *dev, struct device_attribute *attr,
int ret = hp_wmi_perform_query(HPWMI_ALS_QUERY, 1, &tmp,
sizeof(tmp), sizeof(tmp));
if (ret)
- return -EINVAL;
+ return ret < 0 ? ret : -EINVAL;
return count;
}
@@ -515,7 +515,7 @@ static ssize_t set_postcode(struct device *dev, struct device_attribute *attr,
ret = hp_wmi_perform_query(HPWMI_POSTCODEERROR_QUERY, 1, &tmp,
sizeof(tmp), sizeof(tmp));
if (ret)
- return -EINVAL;
+ return ret < 0 ? ret : -EINVAL;
return count;
}
@@ -572,10 +572,12 @@ static void hp_wmi_notify(u32 value, void *context)
switch (event_id) {
case HPWMI_DOCK_EVENT:
- input_report_switch(hp_wmi_input_dev, SW_DOCK,
- hp_wmi_dock_state());
- input_report_switch(hp_wmi_input_dev, SW_TABLET_MODE,
- hp_wmi_tablet_state());
+ if (test_bit(SW_DOCK, hp_wmi_input_dev->swbit))
+ input_report_switch(hp_wmi_input_dev, SW_DOCK,
+ hp_wmi_dock_state());
+ if (test_bit(SW_TABLET_MODE, hp_wmi_input_dev->swbit))
+ input_report_switch(hp_wmi_input_dev, SW_TABLET_MODE,
+ hp_wmi_tablet_state());
input_sync(hp_wmi_input_dev);
break;
case HPWMI_PARK_HDD:
@@ -644,6 +646,7 @@ static int __init hp_wmi_input_setup(void)
{
acpi_status status;
int err;
+ int val;
hp_wmi_input_dev = input_allocate_device();
if (!hp_wmi_input_dev)
@@ -654,17 +657,26 @@ static int __init hp_wmi_input_setup(void)
hp_wmi_input_dev->id.bustype = BUS_HOST;
__set_bit(EV_SW, hp_wmi_input_dev->evbit);
- __set_bit(SW_DOCK, hp_wmi_input_dev->swbit);
- __set_bit(SW_TABLET_MODE, hp_wmi_input_dev->swbit);
+
+ /* Dock */
+ val = hp_wmi_dock_state();
+ if (!(val < 0)) {
+ __set_bit(SW_DOCK, hp_wmi_input_dev->swbit);
+ input_report_switch(hp_wmi_input_dev, SW_DOCK, val);
+ }
+
+ /* Tablet mode */
+ val = hp_wmi_tablet_state();
+ if (!(val < 0)) {
+ __set_bit(SW_TABLET_MODE, hp_wmi_input_dev->swbit);
+ input_report_switch(hp_wmi_input_dev, SW_TABLET_MODE, val);
+ }
err = sparse_keymap_setup(hp_wmi_input_dev, hp_wmi_keymap, NULL);
if (err)
goto err_free_dev;
/* Set initial hardware state */
- input_report_switch(hp_wmi_input_dev, SW_DOCK, hp_wmi_dock_state());
- input_report_switch(hp_wmi_input_dev, SW_TABLET_MODE,
- hp_wmi_tablet_state());
input_sync(hp_wmi_input_dev);
if (!hp_wmi_bios_2009_later() && hp_wmi_bios_2008_later())
@@ -950,10 +962,12 @@ static int hp_wmi_resume_handler(struct device *device)
* changed.
*/
if (hp_wmi_input_dev) {
- input_report_switch(hp_wmi_input_dev, SW_DOCK,
- hp_wmi_dock_state());
- input_report_switch(hp_wmi_input_dev, SW_TABLET_MODE,
- hp_wmi_tablet_state());
+ if (test_bit(SW_DOCK, hp_wmi_input_dev->swbit))
+ input_report_switch(hp_wmi_input_dev, SW_DOCK,
+ hp_wmi_dock_state());
+ if (test_bit(SW_TABLET_MODE, hp_wmi_input_dev->swbit))
+ input_report_switch(hp_wmi_input_dev, SW_TABLET_MODE,
+ hp_wmi_tablet_state());
input_sync(hp_wmi_input_dev);
}
diff --git a/drivers/platform/x86/intel_mid_thermal.c b/drivers/platform/x86/intel_mid_thermal.c
index 9f713b8..5c768c4 100644
--- a/drivers/platform/x86/intel_mid_thermal.c
+++ b/drivers/platform/x86/intel_mid_thermal.c
@@ -550,6 +550,7 @@ static const struct platform_device_id therm_id_table[] = {
{ "msic_thermal", 1 },
{ }
};
+MODULE_DEVICE_TABLE(platform, therm_id_table);
static struct platform_driver mid_thermal_driver = {
.driver = {
diff --git a/drivers/power/supply/axp288_fuel_gauge.c b/drivers/power/supply/axp288_fuel_gauge.c
index 5bdde69..f62f9df 100644
--- a/drivers/power/supply/axp288_fuel_gauge.c
+++ b/drivers/power/supply/axp288_fuel_gauge.c
@@ -169,8 +169,10 @@ static int fuel_gauge_reg_readb(struct axp288_fg_info *info, int reg)
break;
}
- if (ret < 0)
+ if (ret < 0) {
dev_err(&info->pdev->dev, "axp288 reg read err:%d\n", ret);
+ return ret;
+ }
return val;
}
diff --git a/drivers/power/supply/power_supply_sysfs.c b/drivers/power/supply/power_supply_sysfs.c
index b929d8b..785cf23 100644
--- a/drivers/power/supply/power_supply_sysfs.c
+++ b/drivers/power/supply/power_supply_sysfs.c
@@ -319,6 +319,7 @@ static struct device_attribute power_supply_attrs[] = {
POWER_SUPPLY_ATTR(pd_voltage_max),
POWER_SUPPLY_ATTR(pd_voltage_min),
POWER_SUPPLY_ATTR(sdp_current_max),
+ POWER_SUPPLY_ATTR(connector_type),
/* Local extensions of type int64_t */
POWER_SUPPLY_ATTR(charge_counter_ext),
/* Properties of type `const char *' */
diff --git a/drivers/power/supply/qcom/battery.c b/drivers/power/supply/qcom/battery.c
index 7f9a797..4b900e2 100644
--- a/drivers/power/supply/qcom/battery.c
+++ b/drivers/power/supply/qcom/battery.c
@@ -61,6 +61,7 @@ struct pl_data {
struct delayed_work status_change_work;
struct work_struct pl_disable_forever_work;
struct work_struct pl_taper_work;
+ struct delayed_work pl_awake_work;
bool taper_work_running;
struct power_supply *main_psy;
struct power_supply *pl_psy;
@@ -568,6 +569,14 @@ static void pl_disable_forever_work(struct work_struct *work)
vote(chip->hvdcp_hw_inov_dis_votable, PL_VOTER, false, 0);
}
+static void pl_awake_work(struct work_struct *work)
+{
+ struct pl_data *chip = container_of(work,
+ struct pl_data, pl_awake_work.work);
+
+ vote(chip->pl_awake_votable, PL_VOTER, false, 0);
+}
+
static bool is_main_available(struct pl_data *chip)
{
if (chip->main_psy)
@@ -595,6 +604,10 @@ static int pl_disable_vote_callback(struct votable *votable,
total_fcc_ua = get_effective_result_locked(chip->fcc_votable);
if (chip->pl_mode != POWER_SUPPLY_PL_NONE && !pl_disable) {
+ /* keep system awake to talk to slave charger through i2c */
+ cancel_delayed_work_sync(&chip->pl_awake_work);
+ vote(chip->pl_awake_votable, PL_VOTER, true, 0);
+
/* enable parallel charging */
rc = power_supply_get_property(chip->pl_psy,
POWER_SUPPLY_PROP_CHARGE_TYPE, &pval);
@@ -698,6 +711,10 @@ static int pl_disable_vote_callback(struct votable *votable,
}
rerun_election(chip->fv_votable);
+
+ cancel_delayed_work_sync(&chip->pl_awake_work);
+ schedule_delayed_work(&chip->pl_awake_work,
+ msecs_to_jiffies(5000));
}
pl_dbg(chip, PR_PARALLEL, "parallel charging %s\n",
@@ -1075,6 +1092,7 @@ int qcom_batt_init(void)
INIT_DELAYED_WORK(&chip->status_change_work, status_change_work);
INIT_WORK(&chip->pl_taper_work, pl_taper_work);
INIT_WORK(&chip->pl_disable_forever_work, pl_disable_forever_work);
+ INIT_DELAYED_WORK(&chip->pl_awake_work, pl_awake_work);
rc = pl_register_notifier(chip);
if (rc < 0) {
@@ -1128,6 +1146,7 @@ void qcom_batt_deinit(void)
cancel_delayed_work_sync(&chip->status_change_work);
cancel_work_sync(&chip->pl_taper_work);
cancel_work_sync(&chip->pl_disable_forever_work);
+ cancel_delayed_work_sync(&chip->pl_awake_work);
power_supply_unreg_notifier(&chip->nb);
destroy_votable(chip->pl_enable_votable_indirect);
diff --git a/drivers/power/supply/qcom/fg-core.h b/drivers/power/supply/qcom/fg-core.h
index 7c10e63..99120f4 100644
--- a/drivers/power/supply/qcom/fg-core.h
+++ b/drivers/power/supply/qcom/fg-core.h
@@ -57,6 +57,8 @@
/* Battery missing irq votable reasons */
#define BATT_MISS_IRQ_VOTER "fg_batt_miss_irq"
+#define ESR_FCC_VOTER "fg_esr_fcc"
+
#define DEBUG_PRINT_BUFFER_SIZE 64
/* 3 byte address + 1 space character */
#define ADDR_LEN 4
@@ -403,6 +405,7 @@ struct fg_chip {
struct votable *awake_votable;
struct votable *delta_bsoc_irq_en_votable;
struct votable *batt_miss_irq_en_votable;
+ struct votable *pl_disable_votable;
struct fg_sram_param *sp;
struct fg_dma_address *addr_map;
struct fg_alg_flag *alg_flags;
@@ -455,11 +458,11 @@ struct fg_chip {
bool qnovo_enable;
struct completion soc_update;
struct completion soc_ready;
- struct completion mem_grant;
struct delayed_work profile_load_work;
struct work_struct status_change_work;
struct delayed_work ttf_work;
struct delayed_work sram_dump_work;
+ struct delayed_work pl_enable_work;
};
/* Debugfs data structures are below */
diff --git a/drivers/power/supply/qcom/fg-memif.c b/drivers/power/supply/qcom/fg-memif.c
index c1b5adc..d9b5ad7 100644
--- a/drivers/power/supply/qcom/fg-memif.c
+++ b/drivers/power/supply/qcom/fg-memif.c
@@ -746,15 +746,12 @@ int fg_interleaved_mem_write(struct fg_chip *chip, u16 address, u8 offset,
return rc;
}
-#define MEM_GRANT_WAIT_MS 200
+#define MEM_GNT_WAIT_TIME_US 10000
+#define MEM_GNT_RETRIES 20
static int fg_direct_mem_request(struct fg_chip *chip, bool request)
{
- int rc, ret;
+ int rc, ret, i = 0;
u8 val, mask;
- bool tried_again = false;
-
- if (request)
- reinit_completion(&chip->mem_grant);
mask = MEM_ACCESS_REQ_BIT | IACS_SLCT_BIT;
val = request ? MEM_ACCESS_REQ_BIT : 0;
@@ -769,7 +766,7 @@ static int fg_direct_mem_request(struct fg_chip *chip, bool request)
rc = fg_masked_write(chip, MEM_IF_MEM_ARB_CFG(chip), mask, val);
if (rc < 0) {
pr_err("failed to configure mem_if_mem_arb_cfg rc:%d\n", rc);
- return rc;
+ goto release;
}
if (request)
@@ -780,43 +777,39 @@ static int fg_direct_mem_request(struct fg_chip *chip, bool request)
if (!request)
return 0;
-wait:
- ret = wait_for_completion_interruptible_timeout(
- &chip->mem_grant, msecs_to_jiffies(MEM_GRANT_WAIT_MS));
- /* If we were interrupted wait again one more time. */
- if (ret <= 0) {
- if ((ret == -ERESTARTSYS || ret == 0) && !tried_again) {
- pr_debug("trying again, ret=%d\n", ret);
- tried_again = true;
- goto wait;
- } else {
- pr_err("wait for mem_grant timed out ret=%d\n",
- ret);
- fg_dump_regs(chip);
+ while (i < MEM_GNT_RETRIES) {
+ rc = fg_read(chip, MEM_IF_INT_RT_STS(chip), &val, 1);
+ if (rc < 0) {
+ pr_err("Error in reading MEM_IF_INT_RT_STS, rc=%d\n",
+ rc);
+ goto release;
}
+
+ if (val & MEM_GNT_BIT)
+ return 0;
+
+ usleep_range(MEM_GNT_WAIT_TIME_US, MEM_GNT_WAIT_TIME_US + 1);
+ i++;
}
- if (ret <= 0) {
- val = 0;
- mask = MEM_ACCESS_REQ_BIT | IACS_SLCT_BIT;
- rc = fg_masked_write(chip, MEM_IF_MEM_INTF_CFG(chip), mask,
- val);
- if (rc < 0) {
- pr_err("failed to configure mem_if_mem_intf_cfg rc=%d\n",
- rc);
- return rc;
- }
+ rc = -ETIMEDOUT;
+ pr_err("wait for mem_grant timed out, val=0x%x\n", val);
+ fg_dump_regs(chip);
- mask = MEM_ARB_LO_LATENCY_EN_BIT | MEM_ARB_REQ_BIT;
- rc = fg_masked_write(chip, MEM_IF_MEM_ARB_CFG(chip), mask,
- val);
- if (rc < 0) {
- pr_err("failed to configure mem_if_mem_arb_cfg rc:%d\n",
- rc);
- return rc;
- }
+release:
+ val = 0;
+ mask = MEM_ACCESS_REQ_BIT | IACS_SLCT_BIT;
+ ret = fg_masked_write(chip, MEM_IF_MEM_INTF_CFG(chip), mask, val);
+ if (ret < 0) {
+ pr_err("failed to configure mem_if_mem_intf_cfg rc=%d\n", rc);
+ return ret;
+ }
- return -ETIMEDOUT;
+ mask = MEM_ARB_LO_LATENCY_EN_BIT | MEM_ARB_REQ_BIT;
+ ret = fg_masked_write(chip, MEM_IF_MEM_ARB_CFG(chip), mask, val);
+ if (ret < 0) {
+ pr_err("failed to configure mem_if_mem_arb_cfg rc:%d\n", rc);
+ return ret;
}
return rc;
@@ -847,8 +840,8 @@ static int fg_get_dma_address(struct fg_chip *chip, u16 sram_addr, u8 offset,
static int fg_get_partition_count(struct fg_chip *chip, u16 sram_addr, int len,
int *count)
{
- int i, num = 0;
- u16 end_addr, last_addr = 0;
+ int i, start_partn = 0, end_partn = 0;
+ u16 end_addr = 0;
end_addr = sram_addr + len / BYTES_PER_SRAM_WORD;
if (!(len % BYTES_PER_SRAM_WORD))
@@ -860,24 +853,24 @@ static int fg_get_partition_count(struct fg_chip *chip, u16 sram_addr, int len,
}
for (i = 0; i < NUM_PARTITIONS; i++) {
- pr_debug("address: %d last_addr: %d\n", sram_addr, last_addr);
if (sram_addr >= chip->addr_map[i].partition_start
- && sram_addr <= chip->addr_map[i].partition_end
- && last_addr < end_addr) {
- num++;
- last_addr = chip->addr_map[i].partition_end;
- sram_addr = chip->addr_map[i+1].partition_start;
- }
+ && sram_addr <= chip->addr_map[i].partition_end)
+ start_partn = i + 1;
+
+ if (end_addr >= chip->addr_map[i].partition_start
+ && end_addr <= chip->addr_map[i].partition_end)
+ end_partn = i + 1;
}
- if (num > 0) {
- *count = num;
- return 0;
+ if (!start_partn || !end_partn) {
+ pr_err("Couldn't find number of partitions for address %d\n",
+ sram_addr);
+ return -ENXIO;
}
- pr_err("Couldn't find number of partitions for address %d\n",
- sram_addr);
- return -ENXIO;
+ *count = (end_partn - start_partn) + 1;
+
+ return 0;
}
static int fg_get_partition_avail_bytes(struct fg_chip *chip, u16 sram_addr,
diff --git a/drivers/power/supply/qcom/qpnp-fg-gen3.c b/drivers/power/supply/qcom/qpnp-fg-gen3.c
index 2a47442..8c53b2e 100644
--- a/drivers/power/supply/qcom/qpnp-fg-gen3.c
+++ b/drivers/power/supply/qcom/qpnp-fg-gen3.c
@@ -762,7 +762,19 @@ static int fg_get_msoc(struct fg_chip *chip, int *msoc)
if (rc < 0)
return rc;
- *msoc = DIV_ROUND_CLOSEST(*msoc * FULL_CAPACITY, FULL_SOC_RAW);
+ /*
+ * To have better endpoints for 0 and 100, it is good to tune the
+ * calculation discarding values 0 and 255 while rounding off. Rest
+ * of the values 1-254 will be scaled to 1-99. DIV_ROUND_UP will not
+ * be suitable here as it rounds up any value higher than 252 to 100.
+ */
+ if (*msoc == FULL_SOC_RAW)
+ *msoc = 100;
+ else if (*msoc == 0)
+ *msoc = 0;
+ else
+ *msoc = DIV_ROUND_CLOSEST((*msoc - 1) * (FULL_CAPACITY - 2),
+ FULL_SOC_RAW - 2) + 1;
return 0;
}
@@ -833,7 +845,7 @@ static bool is_debug_batt_id(struct fg_chip *chip)
{
int debug_batt_id[2], rc;
- if (!chip->batt_id_ohms)
+ if (chip->batt_id_ohms < 0)
return false;
rc = fg_get_debug_batt_id(chip, debug_batt_id);
@@ -869,7 +881,7 @@ static int fg_get_prop_capacity(struct fg_chip *chip, int *val)
return 0;
}
- if (chip->battery_missing) {
+ if (chip->battery_missing || !chip->soc_reporting_ready) {
*val = BATT_MISS_SOC;
return 0;
}
@@ -2555,6 +2567,11 @@ static void status_change_work(struct work_struct *work)
goto out;
}
+ if (!chip->soc_reporting_ready) {
+ fg_dbg(chip, FG_STATUS, "Profile load is not complete yet\n");
+ goto out;
+ }
+
rc = power_supply_get_property(chip->batt_psy, POWER_SUPPLY_PROP_STATUS,
&prop);
if (rc < 0) {
@@ -2618,7 +2635,7 @@ static void status_change_work(struct work_struct *work)
fg_ttf_update(chip);
chip->prev_charge_status = chip->charge_status;
out:
- fg_dbg(chip, FG_POWER_SUPPLY, "charge_status:%d charge_type:%d charge_done:%d\n",
+ fg_dbg(chip, FG_STATUS, "charge_status:%d charge_type:%d charge_done:%d\n",
chip->charge_status, chip->charge_type, chip->charge_done);
pm_relax(chip->dev);
}
@@ -2721,6 +2738,49 @@ static bool is_profile_load_required(struct fg_chip *chip)
return true;
}
+static void fg_update_batt_profile(struct fg_chip *chip)
+{
+ int rc, offset;
+ u8 val;
+
+ rc = fg_sram_read(chip, PROFILE_INTEGRITY_WORD,
+ SW_CONFIG_OFFSET, &val, 1, FG_IMA_DEFAULT);
+ if (rc < 0) {
+ pr_err("Error in reading SW_CONFIG_OFFSET, rc=%d\n", rc);
+ return;
+ }
+
+ /*
+ * If the RCONN had not been updated, no need to update battery
+ * profile. Else, update the battery profile so that the profile
+ * modified by bootloader or HLOS matches with the profile read
+ * from device tree.
+ */
+
+ if (!(val & RCONN_CONFIG_BIT))
+ return;
+
+ rc = fg_sram_read(chip, ESR_RSLOW_CHG_WORD,
+ ESR_RSLOW_CHG_OFFSET, &val, 1, FG_IMA_DEFAULT);
+ if (rc < 0) {
+ pr_err("Error in reading ESR_RSLOW_CHG_OFFSET, rc=%d\n", rc);
+ return;
+ }
+ offset = (ESR_RSLOW_CHG_WORD - PROFILE_LOAD_WORD) * 4
+ + ESR_RSLOW_CHG_OFFSET;
+ chip->batt_profile[offset] = val;
+
+ rc = fg_sram_read(chip, ESR_RSLOW_DISCHG_WORD,
+ ESR_RSLOW_DISCHG_OFFSET, &val, 1, FG_IMA_DEFAULT);
+ if (rc < 0) {
+ pr_err("Error in reading ESR_RSLOW_DISCHG_OFFSET, rc=%d\n", rc);
+ return;
+ }
+ offset = (ESR_RSLOW_DISCHG_WORD - PROFILE_LOAD_WORD) * 4
+ + ESR_RSLOW_DISCHG_OFFSET;
+ chip->batt_profile[offset] = val;
+}
+
static void clear_battery_profile(struct fg_chip *chip)
{
u8 val = 0;
@@ -2778,6 +2838,16 @@ static int __fg_restart(struct fg_chip *chip)
return rc;
}
+static void pl_enable_work(struct work_struct *work)
+{
+ struct fg_chip *chip = container_of(work,
+ struct fg_chip,
+ pl_enable_work.work);
+
+ vote(chip->pl_disable_votable, ESR_FCC_VOTER, false, 0);
+ vote(chip->awake_votable, ESR_FCC_VOTER, false, 0);
+}
+
static void profile_load_work(struct work_struct *work)
{
struct fg_chip *chip = container_of(work,
@@ -2804,6 +2874,8 @@ static void profile_load_work(struct work_struct *work)
if (!chip->profile_available)
goto out;
+ fg_update_batt_profile(chip);
+
if (!is_profile_load_required(chip))
goto done;
@@ -2865,13 +2937,23 @@ static void profile_load_work(struct work_struct *work)
rc);
}
+ rc = fg_rconn_config(chip);
+ if (rc < 0)
+ pr_err("Error in configuring Rconn, rc=%d\n", rc);
+
batt_psy_initialized(chip);
fg_notify_charger(chip);
chip->profile_loaded = true;
fg_dbg(chip, FG_STATUS, "profile loaded successfully");
out:
chip->soc_reporting_ready = true;
+ vote(chip->awake_votable, ESR_FCC_VOTER, true, 0);
+ schedule_delayed_work(&chip->pl_enable_work, msecs_to_jiffies(5000));
vote(chip->awake_votable, PROFILE_LOAD, false, 0);
+ if (!work_pending(&chip->status_change_work)) {
+ pm_stay_awake(chip->dev);
+ schedule_work(&chip->status_change_work);
+ }
}
static void sram_dump_work(struct work_struct *work)
@@ -4059,12 +4141,6 @@ static int fg_hw_init(struct fg_chip *chip)
return rc;
}
- rc = fg_rconn_config(chip);
- if (rc < 0) {
- pr_err("Error in configuring Rconn, rc=%d\n", rc);
- return rc;
- }
-
fg_encode(chip->sp, FG_SRAM_ESR_TIGHT_FILTER,
chip->dt.esr_tight_flt_upct, buf);
rc = fg_sram_write(chip, chip->sp[FG_SRAM_ESR_TIGHT_FILTER].addr_word,
@@ -4160,25 +4236,6 @@ static int fg_adjust_timebase(struct fg_chip *chip)
/* INTERRUPT HANDLERS STAY HERE */
-static irqreturn_t fg_dma_grant_irq_handler(int irq, void *data)
-{
- struct fg_chip *chip = data;
- u8 status;
- int rc;
-
- rc = fg_read(chip, MEM_IF_INT_RT_STS(chip), &status, 1);
- if (rc < 0) {
- pr_err("failed to read addr=0x%04x, rc=%d\n",
- MEM_IF_INT_RT_STS(chip), rc);
- return IRQ_HANDLED;
- }
-
- fg_dbg(chip, FG_IRQ, "irq %d triggered, status:%d\n", irq, status);
- complete_all(&chip->mem_grant);
-
- return IRQ_HANDLED;
-}
-
static irqreturn_t fg_mem_xcp_irq_handler(int irq, void *data)
{
struct fg_chip *chip = data;
@@ -4237,6 +4294,9 @@ static irqreturn_t fg_batt_missing_irq_handler(int irq, void *data)
chip->profile_available = false;
chip->profile_loaded = false;
chip->soc_reporting_ready = false;
+ chip->batt_id_ohms = -EINVAL;
+ cancel_delayed_work_sync(&chip->pl_enable_work);
+ vote(chip->pl_disable_votable, ESR_FCC_VOTER, true, 0);
return IRQ_HANDLED;
}
@@ -4463,7 +4523,7 @@ static struct fg_irq_info fg_irqs[FG_IRQ_MAX] = {
/* MEM_IF irqs */
[DMA_GRANT_IRQ] = {
.name = "dma-grant",
- .handler = fg_dma_grant_irq_handler,
+ .handler = fg_dummy_irq_handler,
.wakeable = true,
},
[MEM_XCP_IRQ] = {
@@ -5060,6 +5120,7 @@ static int fg_gen3_probe(struct platform_device *pdev)
chip->prev_charge_status = -EINVAL;
chip->ki_coeff_full_soc = -EINVAL;
chip->online_status = -EINVAL;
+ chip->batt_id_ohms = -EINVAL;
chip->regmap = dev_get_regmap(chip->dev->parent, NULL);
if (!chip->regmap) {
dev_err(chip->dev, "Parent regmap is unavailable\n");
@@ -5091,6 +5152,12 @@ static int fg_gen3_probe(struct platform_device *pdev)
}
}
+ chip->pl_disable_votable = find_votable("PL_DISABLE");
+ if (chip->pl_disable_votable == NULL) {
+ rc = -EPROBE_DEFER;
+ goto exit;
+ }
+
chip->awake_votable = create_votable("FG_WS", VOTE_SET_ANY, fg_awake_cb,
chip);
if (IS_ERR(chip->awake_votable)) {
@@ -5133,8 +5200,8 @@ static int fg_gen3_probe(struct platform_device *pdev)
mutex_init(&chip->qnovo_esr_ctrl_lock);
init_completion(&chip->soc_update);
init_completion(&chip->soc_ready);
- init_completion(&chip->mem_grant);
INIT_DELAYED_WORK(&chip->profile_load_work, profile_load_work);
+ INIT_DELAYED_WORK(&chip->pl_enable_work, pl_enable_work);
INIT_WORK(&chip->status_change_work, status_change_work);
INIT_DELAYED_WORK(&chip->ttf_work, ttf_work);
INIT_DELAYED_WORK(&chip->sram_dump_work, sram_dump_work);
@@ -5148,23 +5215,6 @@ static int fg_gen3_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, chip);
- rc = fg_register_interrupts(chip);
- if (rc < 0) {
- dev_err(chip->dev, "Error in registering interrupts, rc:%d\n",
- rc);
- goto exit;
- }
-
- /* Keep SOC_UPDATE irq disabled until we require it */
- if (fg_irqs[SOC_UPDATE_IRQ].irq)
- disable_irq_nosync(fg_irqs[SOC_UPDATE_IRQ].irq);
-
- /* Keep BSOC_DELTA_IRQ disabled until we require it */
- vote(chip->delta_bsoc_irq_en_votable, DELTA_BSOC_IRQ_VOTER, false, 0);
-
- /* Keep BATT_MISSING_IRQ disabled until we require it */
- vote(chip->batt_miss_irq_en_votable, BATT_MISS_IRQ_VOTER, false, 0);
-
rc = fg_hw_init(chip);
if (rc < 0) {
dev_err(chip->dev, "Error in initializing FG hardware, rc:%d\n",
@@ -5192,6 +5242,23 @@ static int fg_gen3_probe(struct platform_device *pdev)
goto exit;
}
+ rc = fg_register_interrupts(chip);
+ if (rc < 0) {
+ dev_err(chip->dev, "Error in registering interrupts, rc:%d\n",
+ rc);
+ goto exit;
+ }
+
+ /* Keep SOC_UPDATE irq disabled until we require it */
+ if (fg_irqs[SOC_UPDATE_IRQ].irq)
+ disable_irq_nosync(fg_irqs[SOC_UPDATE_IRQ].irq);
+
+ /* Keep BSOC_DELTA_IRQ disabled until we require it */
+ vote(chip->delta_bsoc_irq_en_votable, DELTA_BSOC_IRQ_VOTER, false, 0);
+
+ /* Keep BATT_MISSING_IRQ disabled until we require it */
+ vote(chip->batt_miss_irq_en_votable, BATT_MISS_IRQ_VOTER, false, 0);
+
rc = fg_debugfs_create(chip);
if (rc < 0) {
dev_err(chip->dev, "Error in creating debugfs entries, rc:%d\n",
@@ -5207,8 +5274,8 @@ static int fg_gen3_probe(struct platform_device *pdev)
rc = fg_get_battery_temp(chip, &batt_temp);
if (!rc) {
- pr_info("battery SOC:%d voltage: %duV temp: %d id: %dKOhms\n",
- msoc, volt_uv, batt_temp, chip->batt_id_ohms / 1000);
+ pr_info("battery SOC:%d voltage: %duV temp: %d\n",
+ msoc, volt_uv, batt_temp);
rc = fg_esr_filter_config(chip, batt_temp);
if (rc < 0)
pr_err("Error in configuring ESR filter rc:%d\n", rc);
diff --git a/drivers/power/supply/qcom/qpnp-fg.c b/drivers/power/supply/qcom/qpnp-fg.c
index 17b9c1d3..a12b0ad 100644
--- a/drivers/power/supply/qcom/qpnp-fg.c
+++ b/drivers/power/supply/qcom/qpnp-fg.c
@@ -549,6 +549,7 @@ struct fg_trans {
struct fg_chip *chip;
struct fg_log_buffer *log; /* log buffer */
u8 *data; /* fg data that is read */
+ struct mutex memif_dfs_lock; /* Prevent thread concurrency */
};
struct fg_dbgfs {
@@ -5725,6 +5726,7 @@ static int fg_memif_data_open(struct inode *inode, struct file *file)
trans->addr = dbgfs_data.addr;
trans->chip = dbgfs_data.chip;
trans->offset = trans->addr;
+ mutex_init(&trans->memif_dfs_lock);
file->private_data = trans;
return 0;
@@ -5736,6 +5738,7 @@ static int fg_memif_dfs_close(struct inode *inode, struct file *file)
if (trans && trans->log && trans->data) {
file->private_data = NULL;
+ mutex_destroy(&trans->memif_dfs_lock);
kfree(trans->log);
kfree(trans->data);
kfree(trans);
@@ -5893,10 +5896,13 @@ static ssize_t fg_memif_dfs_reg_read(struct file *file, char __user *buf,
size_t ret;
size_t len;
+ mutex_lock(&trans->memif_dfs_lock);
/* Is the the log buffer empty */
if (log->rpos >= log->wpos) {
- if (get_log_data(trans) <= 0)
- return 0;
+ if (get_log_data(trans) <= 0) {
+ len = 0;
+ goto unlock_mutex;
+ }
}
len = min(count, log->wpos - log->rpos);
@@ -5904,7 +5910,8 @@ static ssize_t fg_memif_dfs_reg_read(struct file *file, char __user *buf,
ret = copy_to_user(buf, &log->data[log->rpos], len);
if (ret == len) {
pr_err("error copy sram register values to user\n");
- return -EFAULT;
+ len = -EFAULT;
+ goto unlock_mutex;
}
/* 'ret' is the number of bytes not copied */
@@ -5912,6 +5919,9 @@ static ssize_t fg_memif_dfs_reg_read(struct file *file, char __user *buf,
*ppos += len;
log->rpos += len;
+
+unlock_mutex:
+ mutex_unlock(&trans->memif_dfs_lock);
return len;
}
@@ -5932,15 +5942,20 @@ static ssize_t fg_memif_dfs_reg_write(struct file *file, const char __user *buf,
int cnt = 0;
u8 *values;
size_t ret = 0;
+ char *kbuf;
+ u32 offset;
struct fg_trans *trans = file->private_data;
- u32 offset = trans->offset;
+
+ mutex_lock(&trans->memif_dfs_lock);
+ offset = trans->offset;
/* Make a copy of the user data */
- char *kbuf = kmalloc(count + 1, GFP_KERNEL);
-
- if (!kbuf)
- return -ENOMEM;
+ kbuf = kmalloc(count + 1, GFP_KERNEL);
+ if (!kbuf) {
+ ret = -ENOMEM;
+ goto unlock_mutex;
+ }
ret = copy_from_user(kbuf, buf, count);
if (ret == count) {
@@ -5991,6 +6006,8 @@ static ssize_t fg_memif_dfs_reg_write(struct file *file, const char __user *buf,
free_buf:
kfree(kbuf);
+unlock_mutex:
+ mutex_unlock(&trans->memif_dfs_lock);
return ret;
}
diff --git a/drivers/power/supply/qcom/qpnp-smb2.c b/drivers/power/supply/qcom/qpnp-smb2.c
index 1ab0357..ea78ddd3 100644
--- a/drivers/power/supply/qcom/qpnp-smb2.c
+++ b/drivers/power/supply/qcom/qpnp-smb2.c
@@ -313,8 +313,6 @@ static int smb2_parse_dt(struct smb2 *chip)
chip->dt.auto_recharge_soc = of_property_read_bool(node,
"qcom,auto-recharge-soc");
- chg->micro_usb_mode = of_property_read_bool(node, "qcom,micro-usb");
-
chg->dcp_icl_ua = chip->dt.usb_icl_ua;
chg->suspend_input_on_debug_batt = of_property_read_bool(node,
@@ -356,6 +354,7 @@ static enum power_supply_property smb2_usb_props[] = {
POWER_SUPPLY_PROP_PD_VOLTAGE_MAX,
POWER_SUPPLY_PROP_PD_VOLTAGE_MIN,
POWER_SUPPLY_PROP_SDP_CURRENT_MAX,
+ POWER_SUPPLY_PROP_CONNECTOR_TYPE,
};
static int smb2_usb_get_prop(struct power_supply *psy,
@@ -378,9 +377,9 @@ static int smb2_usb_get_prop(struct power_supply *psy,
if (!val->intval)
break;
- if ((chg->typec_mode == POWER_SUPPLY_TYPEC_SOURCE_DEFAULT ||
- chg->micro_usb_mode) &&
- chg->real_charger_type == POWER_SUPPLY_TYPE_USB)
+ if (((chg->typec_mode == POWER_SUPPLY_TYPEC_SOURCE_DEFAULT)
+ || (chg->connector_type == POWER_SUPPLY_CONNECTOR_MICRO_USB))
+ && (chg->real_charger_type == POWER_SUPPLY_TYPE_USB))
val->intval = 0;
else
val->intval = 1;
@@ -409,7 +408,7 @@ static int smb2_usb_get_prop(struct power_supply *psy,
val->intval = chg->real_charger_type;
break;
case POWER_SUPPLY_PROP_TYPEC_MODE:
- if (chg->micro_usb_mode)
+ if (chg->connector_type == POWER_SUPPLY_CONNECTOR_MICRO_USB)
val->intval = POWER_SUPPLY_TYPEC_NONE;
else if (chip->bad_part)
val->intval = POWER_SUPPLY_TYPEC_SOURCE_DEFAULT;
@@ -417,13 +416,13 @@ static int smb2_usb_get_prop(struct power_supply *psy,
val->intval = chg->typec_mode;
break;
case POWER_SUPPLY_PROP_TYPEC_POWER_ROLE:
- if (chg->micro_usb_mode)
+ if (chg->connector_type == POWER_SUPPLY_CONNECTOR_MICRO_USB)
val->intval = POWER_SUPPLY_TYPEC_PR_NONE;
else
rc = smblib_get_prop_typec_power_role(chg, val);
break;
case POWER_SUPPLY_PROP_TYPEC_CC_ORIENTATION:
- if (chg->micro_usb_mode)
+ if (chg->connector_type == POWER_SUPPLY_CONNECTOR_MICRO_USB)
val->intval = 0;
else
rc = smblib_get_prop_typec_cc_orientation(chg, val);
@@ -471,6 +470,9 @@ static int smb2_usb_get_prop(struct power_supply *psy,
val->intval = get_client_vote(chg->usb_icl_votable,
USB_PSY_VOTER);
break;
+ case POWER_SUPPLY_PROP_CONNECTOR_TYPE:
+ val->intval = chg->connector_type;
+ break;
default:
pr_err("get prop %d is not supported in usb\n", psp);
rc = -EINVAL;
@@ -609,9 +611,9 @@ static int smb2_usb_port_get_prop(struct power_supply *psy,
if (!val->intval)
break;
- if ((chg->typec_mode == POWER_SUPPLY_TYPEC_SOURCE_DEFAULT ||
- chg->micro_usb_mode) &&
- chg->real_charger_type == POWER_SUPPLY_TYPE_USB)
+ if (((chg->typec_mode == POWER_SUPPLY_TYPEC_SOURCE_DEFAULT)
+ || (chg->connector_type == POWER_SUPPLY_CONNECTOR_MICRO_USB))
+ && (chg->real_charger_type == POWER_SUPPLY_TYPE_USB))
val->intval = 1;
else
val->intval = 0;
@@ -1268,7 +1270,7 @@ static int smb2_init_vconn_regulator(struct smb2 *chip)
struct regulator_config cfg = {};
int rc = 0;
- if (chg->micro_usb_mode)
+ if (chg->connector_type == POWER_SUPPLY_CONNECTOR_MICRO_USB)
return 0;
chg->vconn_vreg = devm_kzalloc(chg->dev, sizeof(*chg->vconn_vreg),
@@ -1563,9 +1565,9 @@ static int smb2_init_hw(struct smb2 *chip)
vote(chg->pd_disallowed_votable_indirect, HVDCP_TIMEOUT_VOTER,
true, 0);
vote(chg->pd_disallowed_votable_indirect, MICRO_USB_VOTER,
- chg->micro_usb_mode, 0);
+ (chg->connector_type == POWER_SUPPLY_CONNECTOR_MICRO_USB), 0);
vote(chg->hvdcp_enable_votable, MICRO_USB_VOTER,
- chg->micro_usb_mode, 0);
+ (chg->connector_type == POWER_SUPPLY_CONNECTOR_MICRO_USB), 0);
/*
* AICL configuration:
@@ -1595,7 +1597,17 @@ static int smb2_init_hw(struct smb2 *chip)
return rc;
}
- if (chg->micro_usb_mode)
+ /* Check USB connector type (typeC/microUSB) */
+ rc = smblib_read(chg, RID_CC_CONTROL_7_0_REG, &val);
+ if (rc < 0) {
+ dev_err(chg->dev, "Couldn't read RID_CC_CONTROL_7_0 rc=%d\n",
+ rc);
+ return rc;
+ }
+ chg->connector_type = (val & EN_MICRO_USB_MODE_BIT) ?
+ POWER_SUPPLY_CONNECTOR_MICRO_USB
+ : POWER_SUPPLY_CONNECTOR_TYPEC;
+ if (chg->connector_type == POWER_SUPPLY_CONNECTOR_MICRO_USB)
rc = smb2_disable_typec(chg);
else
rc = smb2_configure_typec(chg);
diff --git a/drivers/power/supply/qcom/smb-lib.c b/drivers/power/supply/qcom/smb-lib.c
index ddc8701..0012a92 100644
--- a/drivers/power/supply/qcom/smb-lib.c
+++ b/drivers/power/supply/qcom/smb-lib.c
@@ -979,8 +979,8 @@ int smblib_get_icl_current(struct smb_charger *chg, int *icl_ua)
u8 load_cfg;
bool override;
- if ((chg->typec_mode == POWER_SUPPLY_TYPEC_SOURCE_DEFAULT
- || chg->micro_usb_mode)
+ if (((chg->typec_mode == POWER_SUPPLY_TYPEC_SOURCE_DEFAULT)
+ || (chg->connector_type == POWER_SUPPLY_CONNECTOR_MICRO_USB))
&& (chg->usb_psy_desc.type == POWER_SUPPLY_TYPE_USB)) {
rc = get_sdp_current(chg, icl_ua);
if (rc < 0) {
@@ -2054,6 +2054,18 @@ static int smblib_dm_pulse(struct smb_charger *chg)
return rc;
}
+static int smblib_force_vbus_voltage(struct smb_charger *chg, u8 val)
+{
+ int rc;
+
+ rc = smblib_masked_write(chg, CMD_HVDCP_2_REG, val, val);
+ if (rc < 0)
+ smblib_err(chg, "Couldn't write to CMD_HVDCP_2_REG rc=%d\n",
+ rc);
+
+ return rc;
+}
+
int smblib_dp_dm(struct smb_charger *chg, int val)
{
int target_icl_ua, rc = 0;
@@ -2105,6 +2117,21 @@ int smblib_dp_dm(struct smb_charger *chg, int val)
smblib_dbg(chg, PR_PARALLEL, "ICL DOWN ICL=%d reduction=%d\n",
target_icl_ua, chg->usb_icl_delta_ua);
break;
+ case POWER_SUPPLY_DP_DM_FORCE_5V:
+ rc = smblib_force_vbus_voltage(chg, FORCE_5V_BIT);
+ if (rc < 0)
+ pr_err("Failed to force 5V\n");
+ break;
+ case POWER_SUPPLY_DP_DM_FORCE_9V:
+ rc = smblib_force_vbus_voltage(chg, FORCE_9V_BIT);
+ if (rc < 0)
+ pr_err("Failed to force 9V\n");
+ break;
+ case POWER_SUPPLY_DP_DM_FORCE_12V:
+ rc = smblib_force_vbus_voltage(chg, FORCE_12V_BIT);
+ if (rc < 0)
+ pr_err("Failed to force 12V\n");
+ break;
case POWER_SUPPLY_DP_DM_ICL_UP:
default:
break;
@@ -2512,23 +2539,16 @@ int smblib_get_prop_die_health(struct smb_charger *chg,
return rc;
}
- /* TEMP_RANGE bits are mutually exclusive */
- switch (stat & TEMP_RANGE_MASK) {
- case TEMP_BELOW_RANGE_BIT:
- val->intval = POWER_SUPPLY_HEALTH_COOL;
- break;
- case TEMP_WITHIN_RANGE_BIT:
- val->intval = POWER_SUPPLY_HEALTH_WARM;
- break;
- case TEMP_ABOVE_RANGE_BIT:
- val->intval = POWER_SUPPLY_HEALTH_HOT;
- break;
- case ALERT_LEVEL_BIT:
+ if (stat & ALERT_LEVEL_BIT)
val->intval = POWER_SUPPLY_HEALTH_OVERHEAT;
- break;
- default:
+ else if (stat & TEMP_ABOVE_RANGE_BIT)
+ val->intval = POWER_SUPPLY_HEALTH_HOT;
+ else if (stat & TEMP_WITHIN_RANGE_BIT)
+ val->intval = POWER_SUPPLY_HEALTH_WARM;
+ else if (stat & TEMP_BELOW_RANGE_BIT)
+ val->intval = POWER_SUPPLY_HEALTH_COOL;
+ else
val->intval = POWER_SUPPLY_HEALTH_UNKNOWN;
- }
return 0;
}
@@ -3404,7 +3424,7 @@ void smblib_usb_plugin_locked(struct smb_charger *chg)
smblib_err(chg, "Couldn't disable DPDM rc=%d\n", rc);
}
- if (chg->micro_usb_mode)
+ if (chg->connector_type == POWER_SUPPLY_CONNECTOR_MICRO_USB)
smblib_micro_usb_plugin(chg, vbus_rising);
power_supply_changed(chg->usb_psy);
@@ -3566,16 +3586,6 @@ static void smblib_handle_hvdcp_3p0_auth_done(struct smb_charger *chg,
/* the APSD done handler will set the USB supply type */
apsd_result = smblib_get_apsd_result(chg);
- if (get_effective_result(chg->hvdcp_hw_inov_dis_votable)) {
- if (apsd_result->pst == POWER_SUPPLY_TYPE_USB_HVDCP) {
- /* force HVDCP2 to 9V if INOV is disabled */
- rc = smblib_masked_write(chg, CMD_HVDCP_2_REG,
- FORCE_9V_BIT, FORCE_9V_BIT);
- if (rc < 0)
- smblib_err(chg,
- "Couldn't force 9V HVDCP rc=%d\n", rc);
- }
- }
smblib_dbg(chg, PR_INTERRUPT, "IRQ: hvdcp-3p0-auth-done rising; %s detected\n",
apsd_result->name);
@@ -3723,7 +3733,7 @@ static void smblib_handle_apsd_done(struct smb_charger *chg, bool rising)
switch (apsd_result->bit) {
case SDP_CHARGER_BIT:
case CDP_CHARGER_BIT:
- if (chg->micro_usb_mode)
+ if (chg->connector_type == POWER_SUPPLY_CONNECTOR_MICRO_USB)
extcon_set_cable_state_(chg->extcon, EXTCON_USB,
true);
/* if not DCP then no hvdcp timeout happens. Enable pd here */
@@ -3765,7 +3775,8 @@ irqreturn_t smblib_handle_usb_source_change(int irq, void *data)
}
smblib_dbg(chg, PR_REGISTER, "APSD_STATUS = 0x%02x\n", stat);
- if (chg->micro_usb_mode && (stat & APSD_DTC_STATUS_DONE_BIT)
+ if ((chg->connector_type == POWER_SUPPLY_CONNECTOR_MICRO_USB)
+ && (stat & APSD_DTC_STATUS_DONE_BIT)
&& !chg->uusb_apsd_rerun_done) {
/*
* Force re-run APSD to handle slow insertion related
@@ -4262,7 +4273,7 @@ irqreturn_t smblib_handle_usb_typec_change(int irq, void *data)
struct smb_irq_data *irq_data = data;
struct smb_charger *chg = irq_data->parent_data;
- if (chg->micro_usb_mode) {
+ if (chg->connector_type == POWER_SUPPLY_CONNECTOR_MICRO_USB) {
cancel_delayed_work_sync(&chg->uusb_otg_work);
vote(chg->awake_votable, OTG_DELAY_VOTER, true, 0);
smblib_dbg(chg, PR_INTERRUPT, "Scheduling OTG work\n");
@@ -4674,7 +4685,7 @@ static void smblib_vconn_oc_work(struct work_struct *work)
int rc, i;
u8 stat;
- if (chg->micro_usb_mode)
+ if (chg->connector_type == POWER_SUPPLY_CONNECTOR_MICRO_USB)
return;
smblib_err(chg, "over-current detected on VCONN\n");
@@ -5054,7 +5065,7 @@ int smblib_init(struct smb_charger *chg)
return rc;
}
- rc = qcom_step_chg_init(chg->step_chg_enabled,
+ rc = qcom_step_chg_init(chg->dev, chg->step_chg_enabled,
chg->sw_jeita_enabled);
if (rc < 0) {
smblib_err(chg, "Couldn't init qcom_step_chg_init rc=%d\n",
diff --git a/drivers/power/supply/qcom/smb-lib.h b/drivers/power/supply/qcom/smb-lib.h
index 1046b27..351a0e9 100644
--- a/drivers/power/supply/qcom/smb-lib.h
+++ b/drivers/power/supply/qcom/smb-lib.h
@@ -324,7 +324,7 @@ struct smb_charger {
bool sw_jeita_enabled;
bool is_hdc;
bool chg_done;
- bool micro_usb_mode;
+ bool connector_type;
bool otg_en;
bool vconn_en;
bool suspend_input_on_debug_batt;
diff --git a/drivers/power/supply/qcom/smb1355-charger.c b/drivers/power/supply/qcom/smb1355-charger.c
index e1603b6..59f2466 100644
--- a/drivers/power/supply/qcom/smb1355-charger.c
+++ b/drivers/power/supply/qcom/smb1355-charger.c
@@ -69,6 +69,12 @@
#define MAX_HICCUP_DUETO_BATDIS_MASK GENMASK(5, 2)
#define HICCUP_TIMEOUT_CFG_MASK GENMASK(1, 0)
+#define BATIF_CFG_SMISC_BATID_REG (BATIF_BASE + 0x73)
+#define CFG_SMISC_RBIAS_EXT_CTRL_BIT BIT(2)
+
+#define BATIF_ENG_SCMISC_SPARE1_REG (BATIF_BASE + 0xC2)
+#define EXT_BIAS_PIN_BIT BIT(2)
+
#define TEMP_COMP_STATUS_REG (MISC_BASE + 0x07)
#define SKIN_TEMP_RST_HOT_BIT BIT(6)
#define SKIN_TEMP_UB_HOT_BIT BIT(5)
@@ -95,6 +101,14 @@
#define BARK_WDOG_TIMEOUT_MASK GENMASK(3, 2)
#define BITE_WDOG_TIMEOUT_MASK GENMASK(1, 0)
+#define MISC_THERMREG_SRC_CFG_REG (MISC_BASE + 0x70)
+#define BYP_THERM_CHG_CURR_ADJUST_BIT BIT(2)
+#define THERMREG_SKIN_CMP_SRC_EN_BIT BIT(1)
+#define THERMREG_DIE_CMP_SRC_EN_BIT BIT(0)
+
+#define MISC_CHGR_TRIM_OPTIONS_REG (MISC_BASE + 0x55)
+#define CMD_RBIAS_EN_BIT BIT(2)
+
struct smb_chg_param {
const char *name;
u16 reg;
@@ -137,11 +151,16 @@ struct smb_iio {
struct iio_channel *temp_max_chan;
};
+struct smb_dt_props {
+ bool disable_ctm;
+};
+
struct smb1355 {
struct device *dev;
char *name;
struct regmap *regmap;
+ struct smb_dt_props dt;
struct smb_params param;
struct smb_iio iio;
@@ -289,6 +308,22 @@ static int smb1355_determine_initial_status(struct smb1355 *chip)
return 0;
}
+static int smb1355_parse_dt(struct smb1355 *chip)
+{
+ struct device_node *node = chip->dev->of_node;
+ int rc = 0;
+
+ if (!node) {
+ pr_err("device tree node missing\n");
+ return -EINVAL;
+ }
+
+ chip->dt.disable_ctm =
+ of_property_read_bool(node, "qcom,disable-ctm");
+
+ return rc;
+}
+
/*****************************
* PARALLEL PSY REGISTRATION *
*****************************/
@@ -582,6 +617,92 @@ static int smb1355_init_parallel_psy(struct smb1355 *chip)
* HARDWARE INITIALIZATION *
***************************/
+static int smb1355_tskin_sensor_config(struct smb1355 *chip)
+{
+ int rc;
+
+ if (chip->dt.disable_ctm) {
+ /*
+ * the TSKIN sensor with external resistor needs a bias,
+ * disable it here.
+ */
+ rc = smb1355_masked_write(chip, BATIF_ENG_SCMISC_SPARE1_REG,
+ EXT_BIAS_PIN_BIT, 0);
+ if (rc < 0) {
+ pr_err("Couldn't enable ext bias pin path rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ rc = smb1355_masked_write(chip, BATIF_CFG_SMISC_BATID_REG,
+ CFG_SMISC_RBIAS_EXT_CTRL_BIT, 0);
+ if (rc < 0) {
+ pr_err("Couldn't set BATIF_CFG_SMISC_BATID rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ rc = smb1355_masked_write(chip, MISC_CHGR_TRIM_OPTIONS_REG,
+ CMD_RBIAS_EN_BIT, 0);
+ if (rc < 0) {
+ pr_err("Couldn't set MISC_CHGR_TRIM_OPTIONS rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ /* disable skin temperature comparator source */
+ rc = smb1355_masked_write(chip, MISC_THERMREG_SRC_CFG_REG,
+ THERMREG_SKIN_CMP_SRC_EN_BIT, 0);
+ if (rc < 0) {
+ pr_err("Couldn't set Skin temp comparator src rc=%d\n",
+ rc);
+ return rc;
+ }
+ } else {
+ /*
+ * the TSKIN sensor with external resistor needs a bias,
+ * enable it here.
+ */
+ rc = smb1355_masked_write(chip, BATIF_ENG_SCMISC_SPARE1_REG,
+ EXT_BIAS_PIN_BIT, EXT_BIAS_PIN_BIT);
+ if (rc < 0) {
+ pr_err("Couldn't enable ext bias pin path rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ rc = smb1355_masked_write(chip, BATIF_CFG_SMISC_BATID_REG,
+ CFG_SMISC_RBIAS_EXT_CTRL_BIT,
+ CFG_SMISC_RBIAS_EXT_CTRL_BIT);
+ if (rc < 0) {
+ pr_err("Couldn't set BATIF_CFG_SMISC_BATID rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ rc = smb1355_masked_write(chip, MISC_CHGR_TRIM_OPTIONS_REG,
+ CMD_RBIAS_EN_BIT,
+ CMD_RBIAS_EN_BIT);
+ if (rc < 0) {
+ pr_err("Couldn't set MISC_CHGR_TRIM_OPTIONS rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ /* Enable skin temperature comparator source */
+ rc = smb1355_masked_write(chip, MISC_THERMREG_SRC_CFG_REG,
+ THERMREG_SKIN_CMP_SRC_EN_BIT,
+ THERMREG_SKIN_CMP_SRC_EN_BIT);
+ if (rc < 0) {
+ pr_err("Couldn't set Skin temp comparator src rc=%d\n",
+ rc);
+ return rc;
+ }
+ }
+
+ return rc;
+}
+
static int smb1355_init_hw(struct smb1355 *chip)
{
int rc;
@@ -624,7 +745,7 @@ static int smb1355_init_hw(struct smb1355 *chip)
HICCUP_TIMEOUT_CFG_MASK | MAX_HICCUP_DUETO_BATDIS_MASK,
0);
if (rc < 0) {
- pr_err("Couldn't enable parallel current sensing rc=%d\n",
+ pr_err("Couldn't set HICCUP interval rc=%d\n",
rc);
return rc;
}
@@ -647,6 +768,25 @@ static int smb1355_init_hw(struct smb1355 *chip)
return rc;
}
+ /*
+ * Disable thermal Die temperature comparator source and hw mitigation
+ * for skin/die
+ */
+ rc = smb1355_masked_write(chip, MISC_THERMREG_SRC_CFG_REG,
+ THERMREG_DIE_CMP_SRC_EN_BIT | BYP_THERM_CHG_CURR_ADJUST_BIT,
+ BYP_THERM_CHG_CURR_ADJUST_BIT);
+ if (rc < 0) {
+ pr_err("Couldn't set Skin temperature comparator src rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ rc = smb1355_tskin_sensor_config(chip);
+ if (rc < 0) {
+ pr_err("Couldn't configure tskin regs rc=%d\n", rc);
+ return rc;
+ }
+
return 0;
}
@@ -780,6 +920,12 @@ static int smb1355_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, chip);
+ rc = smb1355_parse_dt(chip);
+ if (rc < 0) {
+ pr_err("Couldn't parse device tree rc=%d\n", rc);
+ goto cleanup;
+ }
+
rc = smb1355_init_hw(chip);
if (rc < 0) {
pr_err("Couldn't initialize hardware rc=%d\n", rc);
diff --git a/drivers/power/supply/qcom/step-chg-jeita.c b/drivers/power/supply/qcom/step-chg-jeita.c
index c759314..a75cbbb 100644
--- a/drivers/power/supply/qcom/step-chg-jeita.c
+++ b/drivers/power/supply/qcom/step-chg-jeita.c
@@ -13,6 +13,8 @@
#include <linux/delay.h>
#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_batterydata.h>
#include <linux/power_supply.h>
#include <linux/slab.h>
#include <linux/pmic-voter.h>
@@ -56,19 +58,32 @@ struct jeita_fv_cfg {
};
struct step_chg_info {
+ struct device *dev;
ktime_t step_last_update_time;
ktime_t jeita_last_update_time;
bool step_chg_enable;
bool sw_jeita_enable;
+ bool config_is_read;
+ bool step_chg_cfg_valid;
+ bool sw_jeita_cfg_valid;
+ bool soc_based_step_chg;
+ bool batt_missing;
int jeita_fcc_index;
int jeita_fv_index;
int step_index;
+ int get_config_retry_count;
+
+ struct step_chg_cfg *step_chg_config;
+ struct jeita_fcc_cfg *jeita_fcc_config;
+ struct jeita_fv_cfg *jeita_fv_config;
struct votable *fcc_votable;
struct votable *fv_votable;
struct wakeup_source *step_chg_ws;
struct power_supply *batt_psy;
+ struct power_supply *bms_psy;
struct delayed_work status_change_work;
+ struct delayed_work get_config_work;
struct notifier_block nb;
};
@@ -76,69 +91,10 @@ static struct step_chg_info *the_chip;
#define STEP_CHG_HYSTERISIS_DELAY_US 5000000 /* 5 secs */
-/*
- * Step Charging Configuration
- * Update the table based on the battery profile
- * Supports VBATT and SOC based source
- * range data must be in increasing ranges and shouldn't overlap
- */
-static struct step_chg_cfg step_chg_config = {
- .psy_prop = POWER_SUPPLY_PROP_VOLTAGE_NOW,
- .prop_name = "VBATT",
- .hysteresis = 100000, /* 100mV */
- .fcc_cfg = {
- /* VBAT_LOW VBAT_HIGH FCC */
- {3600000, 4000000, 3000000},
- {4001000, 4200000, 2800000},
- {4201000, 4400000, 2000000},
- },
- /*
- * SOC STEP-CHG configuration example.
- *
- * .psy_prop = POWER_SUPPLY_PROP_CAPACITY,
- * .prop_name = "SOC",
- * .fcc_cfg = {
- * //SOC_LOW SOC_HIGH FCC
- * {20, 70, 3000000},
- * {70, 90, 2750000},
- * {90, 100, 2500000},
- * },
- */
-};
-
-/*
- * Jeita Charging Configuration
- * Update the table based on the battery profile
- * Please ensure that the TEMP ranges are programmed in the hw so that
- * an interrupt is issued and a consequent psy changed will cause us to
- * react immediately.
- * range data must be in increasing ranges and shouldn't overlap.
- * Gaps are okay
- */
-static struct jeita_fcc_cfg jeita_fcc_config = {
- .psy_prop = POWER_SUPPLY_PROP_TEMP,
- .prop_name = "BATT_TEMP",
- .hysteresis = 10, /* 1degC hysteresis */
- .fcc_cfg = {
- /* TEMP_LOW TEMP_HIGH FCC */
- {0, 100, 600000},
- {101, 200, 2000000},
- {201, 450, 3450000},
- {451, 550, 600000},
- },
-};
-
-static struct jeita_fv_cfg jeita_fv_config = {
- .psy_prop = POWER_SUPPLY_PROP_TEMP,
- .prop_name = "BATT_TEMP",
- .hysteresis = 10, /* 1degC hysteresis */
- .fv_cfg = {
- /* TEMP_LOW TEMP_HIGH FCC */
- {0, 100, 4200000},
- {101, 450, 4350000},
- {451, 550, 4200000},
- },
-};
+#define BATT_HOT_DECIDEGREE_MAX 600
+#define GET_CONFIG_DELAY_MS 2000
+#define GET_CONFIG_RETRY_COUNT 50
+#define WAIT_BATT_ID_READY_MS 200
static bool is_batt_available(struct step_chg_info *chip)
{
@@ -151,6 +107,240 @@ static bool is_batt_available(struct step_chg_info *chip)
return true;
}
+static bool is_bms_available(struct step_chg_info *chip)
+{
+ if (!chip->bms_psy)
+ chip->bms_psy = power_supply_get_by_name("bms");
+
+ if (!chip->bms_psy)
+ return false;
+
+ return true;
+}
+
+static int read_range_data_from_node(struct device_node *node,
+ const char *prop_str, struct range_data *ranges,
+ u32 max_threshold, u32 max_value)
+{
+ int rc = 0, i, length, per_tuple_length, tuples;
+
+ rc = of_property_count_elems_of_size(node, prop_str, sizeof(u32));
+ if (rc < 0) {
+ pr_err("Count %s failed, rc=%d\n", prop_str, rc);
+ return rc;
+ }
+
+ length = rc;
+ per_tuple_length = sizeof(struct range_data) / sizeof(u32);
+ if (length % per_tuple_length) {
+ pr_err("%s length (%d) should be multiple of %d\n",
+ prop_str, length, per_tuple_length);
+ return -EINVAL;
+ }
+ tuples = length / per_tuple_length;
+
+ if (tuples > MAX_STEP_CHG_ENTRIES) {
+ pr_err("too many entries(%d), only %d allowed\n",
+ tuples, MAX_STEP_CHG_ENTRIES);
+ return -EINVAL;
+ }
+
+ rc = of_property_read_u32_array(node, prop_str,
+ (u32 *)ranges, length);
+ if (rc) {
+ pr_err("Read %s failed, rc=%d", prop_str, rc);
+ return rc;
+ }
+
+ for (i = 0; i < tuples; i++) {
+ if (ranges[i].low_threshold >
+ ranges[i].high_threshold) {
+ pr_err("%s thresholds should be in ascendant ranges\n",
+ prop_str);
+ rc = -EINVAL;
+ goto clean;
+ }
+
+ if (i != 0) {
+ if (ranges[i - 1].high_threshold >
+ ranges[i].low_threshold) {
+ pr_err("%s thresholds should be in ascendant ranges\n",
+ prop_str);
+ rc = -EINVAL;
+ goto clean;
+ }
+ }
+
+ if (ranges[i].low_threshold > max_threshold)
+ ranges[i].low_threshold = max_threshold;
+ if (ranges[i].high_threshold > max_threshold)
+ ranges[i].high_threshold = max_threshold;
+ if (ranges[i].value > max_value)
+ ranges[i].value = max_value;
+ }
+
+ return rc;
+clean:
+ memset(ranges, 0, tuples * sizeof(struct range_data));
+ return rc;
+}
+
+static int get_step_chg_jeita_setting_from_profile(struct step_chg_info *chip)
+{
+ struct device_node *batt_node, *profile_node;
+ u32 max_fv_uv, max_fcc_ma;
+ const char *batt_type_str;
+ const __be32 *handle;
+ int batt_id_ohms, rc;
+ union power_supply_propval prop = {0, };
+
+ handle = of_get_property(chip->dev->of_node,
+ "qcom,battery-data", NULL);
+ if (!handle) {
+ pr_debug("ignore getting sw-jeita/step charging settings from profile\n");
+ return 0;
+ }
+
+ batt_node = of_find_node_by_phandle(be32_to_cpup(handle));
+ if (!batt_node) {
+ pr_err("Get battery data node failed\n");
+ return -EINVAL;
+ }
+
+ if (!is_bms_available(chip))
+ return -ENODEV;
+
+ power_supply_get_property(chip->bms_psy,
+ POWER_SUPPLY_PROP_RESISTANCE_ID, &prop);
+ batt_id_ohms = prop.intval;
+
+ /* bms_psy has not yet read the batt_id */
+ if (batt_id_ohms < 0)
+ return -EBUSY;
+
+ profile_node = of_batterydata_get_best_profile(batt_node,
+ batt_id_ohms / 1000, NULL);
+ if (IS_ERR(profile_node))
+ return PTR_ERR(profile_node);
+
+ if (!profile_node) {
+ pr_err("Couldn't find profile\n");
+ return -ENODATA;
+ }
+
+ rc = of_property_read_string(profile_node, "qcom,battery-type",
+ &batt_type_str);
+ if (rc < 0) {
+ pr_err("battery type unavailable, rc:%d\n", rc);
+ return rc;
+ }
+ pr_debug("battery: %s detected, getting sw-jeita/step charging settings\n",
+ batt_type_str);
+
+ rc = of_property_read_u32(profile_node, "qcom,max-voltage-uv",
+ &max_fv_uv);
+ if (rc < 0) {
+ pr_err("max-voltage_uv reading failed, rc=%d\n", rc);
+ return rc;
+ }
+
+ rc = of_property_read_u32(profile_node, "qcom,fastchg-current-ma",
+ &max_fcc_ma);
+ if (rc < 0) {
+ pr_err("max-fastchg-current-ma reading failed, rc=%d\n", rc);
+ return rc;
+ }
+
+ chip->soc_based_step_chg =
+ of_property_read_bool(profile_node, "qcom,soc-based-step-chg");
+ if (chip->soc_based_step_chg) {
+ chip->step_chg_config->psy_prop = POWER_SUPPLY_PROP_CAPACITY,
+ chip->step_chg_config->prop_name = "SOC";
+ chip->step_chg_config->hysteresis = 0;
+ }
+
+ chip->step_chg_cfg_valid = true;
+ rc = read_range_data_from_node(profile_node,
+ "qcom,step-chg-ranges",
+ chip->step_chg_config->fcc_cfg,
+ chip->soc_based_step_chg ? 100 : max_fv_uv,
+ max_fcc_ma * 1000);
+ if (rc < 0) {
+ pr_debug("Read qcom,step-chg-ranges failed from battery profile, rc=%d\n",
+ rc);
+ chip->step_chg_cfg_valid = false;
+ }
+
+ chip->sw_jeita_cfg_valid = true;
+ rc = read_range_data_from_node(profile_node,
+ "qcom,jeita-fcc-ranges",
+ chip->jeita_fcc_config->fcc_cfg,
+ BATT_HOT_DECIDEGREE_MAX, max_fcc_ma * 1000);
+ if (rc < 0) {
+ pr_debug("Read qcom,jeita-fcc-ranges failed from battery profile, rc=%d\n",
+ rc);
+ chip->sw_jeita_cfg_valid = false;
+ }
+
+ rc = read_range_data_from_node(profile_node,
+ "qcom,jeita-fv-ranges",
+ chip->jeita_fv_config->fv_cfg,
+ BATT_HOT_DECIDEGREE_MAX, max_fv_uv);
+ if (rc < 0) {
+ pr_debug("Read qcom,jeita-fv-ranges failed from battery profile, rc=%d\n",
+ rc);
+ chip->sw_jeita_cfg_valid = false;
+ }
+
+ return rc;
+}
+
+static void get_config_work(struct work_struct *work)
+{
+ struct step_chg_info *chip = container_of(work,
+ struct step_chg_info, get_config_work.work);
+ int i, rc;
+
+ chip->config_is_read = false;
+ rc = get_step_chg_jeita_setting_from_profile(chip);
+
+ if (rc < 0) {
+ if (rc == -ENODEV || rc == -EBUSY) {
+ if (chip->get_config_retry_count++
+ < GET_CONFIG_RETRY_COUNT) {
+ pr_debug("bms_psy is not ready, retry: %d\n",
+ chip->get_config_retry_count);
+ goto reschedule;
+ }
+ }
+ }
+
+ chip->config_is_read = true;
+
+ for (i = 0; i < MAX_STEP_CHG_ENTRIES; i++)
+ pr_debug("step-chg-cfg: %duV(SoC) ~ %duV(SoC), %duA\n",
+ chip->step_chg_config->fcc_cfg[i].low_threshold,
+ chip->step_chg_config->fcc_cfg[i].high_threshold,
+ chip->step_chg_config->fcc_cfg[i].value);
+ for (i = 0; i < MAX_STEP_CHG_ENTRIES; i++)
+ pr_debug("jeita-fcc-cfg: %ddecidegree ~ %ddecidegre, %duA\n",
+ chip->jeita_fcc_config->fcc_cfg[i].low_threshold,
+ chip->jeita_fcc_config->fcc_cfg[i].high_threshold,
+ chip->jeita_fcc_config->fcc_cfg[i].value);
+ for (i = 0; i < MAX_STEP_CHG_ENTRIES; i++)
+ pr_debug("jeita-fv-cfg: %ddecidegree ~ %ddecidegre, %duV\n",
+ chip->jeita_fv_config->fv_cfg[i].low_threshold,
+ chip->jeita_fv_config->fv_cfg[i].high_threshold,
+ chip->jeita_fv_config->fv_cfg[i].value);
+
+ return;
+
+reschedule:
+ schedule_delayed_work(&chip->get_config_work,
+ msecs_to_jiffies(GET_CONFIG_DELAY_MS));
+
+}
+
static int get_val(struct range_data *range, int hysteresis, int current_index,
int threshold,
int *new_index, int *val)
@@ -220,21 +410,22 @@ static int handle_step_chg_config(struct step_chg_info *chip)
else
chip->step_chg_enable = pval.intval;
- if (!chip->step_chg_enable) {
+ if (!chip->step_chg_enable || !chip->step_chg_cfg_valid) {
if (chip->fcc_votable)
vote(chip->fcc_votable, STEP_CHG_VOTER, false, 0);
goto update_time;
}
rc = power_supply_get_property(chip->batt_psy,
- step_chg_config.psy_prop, &pval);
+ chip->step_chg_config->psy_prop, &pval);
if (rc < 0) {
pr_err("Couldn't read %s property rc=%d\n",
- step_chg_config.prop_name, rc);
+ chip->step_chg_config->prop_name, rc);
return rc;
}
- rc = get_val(step_chg_config.fcc_cfg, step_chg_config.hysteresis,
+ rc = get_val(chip->step_chg_config->fcc_cfg,
+ chip->step_chg_config->hysteresis,
chip->step_index,
pval.intval,
&chip->step_index,
@@ -254,7 +445,7 @@ static int handle_step_chg_config(struct step_chg_info *chip)
vote(chip->fcc_votable, STEP_CHG_VOTER, true, fcc_ua);
pr_debug("%s = %d Step-FCC = %duA\n",
- step_chg_config.prop_name, pval.intval, fcc_ua);
+ chip->step_chg_config->prop_name, pval.intval, fcc_ua);
update_time:
chip->step_last_update_time = ktime_get();
@@ -278,7 +469,7 @@ static int handle_jeita(struct step_chg_info *chip)
else
chip->sw_jeita_enable = pval.intval;
- if (!chip->sw_jeita_enable) {
+ if (!chip->sw_jeita_enable || !chip->sw_jeita_cfg_valid) {
if (chip->fcc_votable)
vote(chip->fcc_votable, JEITA_VOTER, false, 0);
if (chip->fv_votable)
@@ -291,14 +482,15 @@ static int handle_jeita(struct step_chg_info *chip)
goto reschedule;
rc = power_supply_get_property(chip->batt_psy,
- jeita_fcc_config.psy_prop, &pval);
+ chip->jeita_fcc_config->psy_prop, &pval);
if (rc < 0) {
pr_err("Couldn't read %s property rc=%d\n",
- step_chg_config.prop_name, rc);
+ chip->jeita_fcc_config->prop_name, rc);
return rc;
}
- rc = get_val(jeita_fcc_config.fcc_cfg, jeita_fcc_config.hysteresis,
+ rc = get_val(chip->jeita_fcc_config->fcc_cfg,
+ chip->jeita_fcc_config->hysteresis,
chip->jeita_fcc_index,
pval.intval,
&chip->jeita_fcc_index,
@@ -318,7 +510,8 @@ static int handle_jeita(struct step_chg_info *chip)
vote(chip->fcc_votable, JEITA_VOTER, true, fcc_ua);
- rc = get_val(jeita_fv_config.fv_cfg, jeita_fv_config.hysteresis,
+ rc = get_val(chip->jeita_fv_config->fv_cfg,
+ chip->jeita_fv_config->hysteresis,
chip->jeita_fv_index,
pval.intval,
&chip->jeita_fv_index,
@@ -337,7 +530,7 @@ static int handle_jeita(struct step_chg_info *chip)
vote(chip->fv_votable, JEITA_VOTER, true, fv_uv);
pr_debug("%s = %d FCC = %duA FV = %duV\n",
- step_chg_config.prop_name, pval.intval, fcc_ua, fv_uv);
+ chip->jeita_fcc_config->prop_name, pval.intval, fcc_ua, fv_uv);
update_time:
chip->jeita_last_update_time = ktime_get();
@@ -348,6 +541,39 @@ static int handle_jeita(struct step_chg_info *chip)
return (STEP_CHG_HYSTERISIS_DELAY_US - elapsed_us + 1000);
}
+static int handle_battery_insertion(struct step_chg_info *chip)
+{
+ int rc;
+ union power_supply_propval pval = {0, };
+
+ rc = power_supply_get_property(chip->batt_psy,
+ POWER_SUPPLY_PROP_PRESENT, &pval);
+ if (rc < 0) {
+ pr_err("Get battery present status failed, rc=%d\n", rc);
+ return rc;
+ }
+
+ if (chip->batt_missing != (!pval.intval)) {
+ chip->batt_missing = !pval.intval;
+ pr_debug("battery %s detected\n",
+ chip->batt_missing ? "removal" : "insertion");
+ if (chip->batt_missing) {
+ chip->step_chg_cfg_valid = false;
+ chip->sw_jeita_cfg_valid = false;
+ chip->get_config_retry_count = 0;
+ } else {
+ /*
+ * Get config for the new inserted battery, delay
+ * to make sure BMS has read out the batt_id.
+ */
+ schedule_delayed_work(&chip->get_config_work,
+ msecs_to_jiffies(WAIT_BATT_ID_READY_MS));
+ }
+ }
+
+ return rc;
+}
+
static void status_change_work(struct work_struct *work)
{
struct step_chg_info *chip = container_of(work,
@@ -360,6 +586,7 @@ static void status_change_work(struct work_struct *work)
if (!is_batt_available(chip))
return;
+ handle_battery_insertion(chip);
/* skip elapsed_us debounce for handling battery temperature */
rc = handle_jeita(chip);
if (rc > 0)
@@ -395,6 +622,13 @@ static int step_chg_notifier_call(struct notifier_block *nb,
schedule_delayed_work(&chip->status_change_work, 0);
}
+ if ((strcmp(psy->desc->name, "bms") == 0)) {
+ if (chip->bms_psy == NULL)
+ chip->bms_psy = psy;
+ if (!chip->config_is_read)
+ schedule_delayed_work(&chip->get_config_work, 0);
+ }
+
return NOTIFY_OK;
}
@@ -412,7 +646,8 @@ static int step_chg_register_notifier(struct step_chg_info *chip)
return 0;
}
-int qcom_step_chg_init(bool step_chg_enable, bool sw_jeita_enable)
+int qcom_step_chg_init(struct device *dev,
+ bool step_chg_enable, bool sw_jeita_enable)
{
int rc;
struct step_chg_info *chip;
@@ -422,48 +657,46 @@ int qcom_step_chg_init(bool step_chg_enable, bool sw_jeita_enable)
return -EINVAL;
}
- chip = kzalloc(sizeof(*chip), GFP_KERNEL);
+ chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
if (!chip)
return -ENOMEM;
chip->step_chg_ws = wakeup_source_register("qcom-step-chg");
- if (!chip->step_chg_ws) {
- rc = -EINVAL;
- goto cleanup;
- }
+ if (!chip->step_chg_ws)
+ return -EINVAL;
+ chip->dev = dev;
chip->step_chg_enable = step_chg_enable;
chip->sw_jeita_enable = sw_jeita_enable;
-
chip->step_index = -EINVAL;
chip->jeita_fcc_index = -EINVAL;
chip->jeita_fv_index = -EINVAL;
- if (step_chg_enable && (!step_chg_config.psy_prop ||
- !step_chg_config.prop_name)) {
- /* fail if step-chg configuration is invalid */
- pr_err("Step-chg configuration not defined - fail\n");
- rc = -ENODATA;
- goto release_wakeup_source;
- }
+ chip->step_chg_config = devm_kzalloc(dev,
+ sizeof(struct step_chg_cfg), GFP_KERNEL);
+ if (!chip->step_chg_config)
+ return -ENOMEM;
- if (sw_jeita_enable && (!jeita_fcc_config.psy_prop ||
- !jeita_fcc_config.prop_name)) {
- /* fail if step-chg configuration is invalid */
- pr_err("Jeita TEMP configuration not defined - fail\n");
- rc = -ENODATA;
- goto release_wakeup_source;
- }
+ chip->step_chg_config->psy_prop = POWER_SUPPLY_PROP_VOLTAGE_NOW;
+ chip->step_chg_config->prop_name = "VBATT";
+ chip->step_chg_config->hysteresis = 100000;
- if (sw_jeita_enable && (!jeita_fv_config.psy_prop ||
- !jeita_fv_config.prop_name)) {
- /* fail if step-chg configuration is invalid */
- pr_err("Jeita TEMP configuration not defined - fail\n");
- rc = -ENODATA;
- goto release_wakeup_source;
- }
+ chip->jeita_fcc_config = devm_kzalloc(dev,
+ sizeof(struct jeita_fcc_cfg), GFP_KERNEL);
+ chip->jeita_fv_config = devm_kzalloc(dev,
+ sizeof(struct jeita_fv_cfg), GFP_KERNEL);
+ if (!chip->jeita_fcc_config || !chip->jeita_fv_config)
+ return -ENOMEM;
+
+ chip->jeita_fcc_config->psy_prop = POWER_SUPPLY_PROP_TEMP;
+ chip->jeita_fcc_config->prop_name = "BATT_TEMP";
+ chip->jeita_fcc_config->hysteresis = 10;
+ chip->jeita_fv_config->psy_prop = POWER_SUPPLY_PROP_TEMP;
+ chip->jeita_fv_config->prop_name = "BATT_TEMP";
+ chip->jeita_fv_config->hysteresis = 10;
INIT_DELAYED_WORK(&chip->status_change_work, status_change_work);
+ INIT_DELAYED_WORK(&chip->get_config_work, get_config_work);
rc = step_chg_register_notifier(chip);
if (rc < 0) {
@@ -471,18 +704,15 @@ int qcom_step_chg_init(bool step_chg_enable, bool sw_jeita_enable)
goto release_wakeup_source;
}
- the_chip = chip;
+ schedule_delayed_work(&chip->get_config_work,
+ msecs_to_jiffies(GET_CONFIG_DELAY_MS));
- if (step_chg_enable)
- pr_info("Step charging enabled. Using %s source\n",
- step_chg_config.prop_name);
+ the_chip = chip;
return 0;
release_wakeup_source:
wakeup_source_unregister(chip->step_chg_ws);
-cleanup:
- kfree(chip);
return rc;
}
@@ -494,8 +724,8 @@ void qcom_step_chg_deinit(void)
return;
cancel_delayed_work_sync(&chip->status_change_work);
+ cancel_delayed_work_sync(&chip->get_config_work);
power_supply_unreg_notifier(&chip->nb);
wakeup_source_unregister(chip->step_chg_ws);
the_chip = NULL;
- kfree(chip);
}
diff --git a/drivers/power/supply/qcom/step-chg-jeita.h b/drivers/power/supply/qcom/step-chg-jeita.h
index 5bb2b99..2404b86 100644
--- a/drivers/power/supply/qcom/step-chg-jeita.h
+++ b/drivers/power/supply/qcom/step-chg-jeita.h
@@ -12,6 +12,7 @@
#ifndef __STEP_CHG_H__
#define __STEP_CHG_H__
-int qcom_step_chg_init(bool step_chg_enable, bool sw_jeita_enable);
+int qcom_step_chg_init(struct device *dev,
+ bool step_chg_enable, bool sw_jeita_enable);
void qcom_step_chg_deinit(void);
#endif /* __STEP_CHG_H__ */
diff --git a/drivers/rapidio/devices/tsi721.c b/drivers/rapidio/devices/tsi721.c
index 9d19b9a..315a4be 100644
--- a/drivers/rapidio/devices/tsi721.c
+++ b/drivers/rapidio/devices/tsi721.c
@@ -37,8 +37,8 @@
#include "tsi721.h"
#ifdef DEBUG
-u32 dbg_level;
-module_param(dbg_level, uint, S_IWUSR | S_IRUGO);
+u32 tsi_dbg_level;
+module_param_named(dbg_level, tsi_dbg_level, uint, S_IWUSR | S_IRUGO);
MODULE_PARM_DESC(dbg_level, "Debugging output level (default 0 = none)");
#endif
diff --git a/drivers/rapidio/devices/tsi721.h b/drivers/rapidio/devices/tsi721.h
index 5941437..957eadc 100644
--- a/drivers/rapidio/devices/tsi721.h
+++ b/drivers/rapidio/devices/tsi721.h
@@ -40,11 +40,11 @@ enum {
};
#ifdef DEBUG
-extern u32 dbg_level;
+extern u32 tsi_dbg_level;
#define tsi_debug(level, dev, fmt, arg...) \
do { \
- if (DBG_##level & dbg_level) \
+ if (DBG_##level & tsi_dbg_level) \
dev_dbg(dev, "%s: " fmt "\n", __func__, ##arg); \
} while (0)
#else
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index d75f157..e463117 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -4197,6 +4197,10 @@ static void rdev_init_debugfs(struct regulator_dev *rdev)
const struct regulator_ops *ops;
mode_t mode;
+ /* Check if debugfs directory already exists */
+ if (rdev->debugfs)
+ return;
+
/* Avoid duplicate debugfs directory names */
if (parent && rname == rdev->desc->name) {
snprintf(name, sizeof(name), "%s-%s", dev_name(parent),
@@ -4221,6 +4225,7 @@ static void rdev_init_debugfs(struct regulator_dev *rdev)
regulator = regulator_get(NULL, rdev_get_name(rdev));
if (IS_ERR(regulator)) {
+ debugfs_remove_recursive(rdev->debugfs);
rdev_err(rdev, "regulator get failed, ret=%ld\n",
PTR_ERR(regulator));
return;
@@ -4291,6 +4296,8 @@ static int regulator_register_resolve_supply(struct device *dev, void *data)
if (regulator_resolve_supply(rdev))
rdev_dbg(rdev, "unable to resolve supply\n");
+ else
+ rdev_init_debugfs(rdev);
return 0;
}
@@ -4905,6 +4912,16 @@ static int __init regulator_init_complete(void)
if (of_have_populated_dt())
has_full_constraints = true;
+ /*
+ * Regulators may had failed to resolve their input supplies
+ * when were registered, either because the input supply was
+ * not registered yet or because its parent device was not
+ * bound yet. So attempt to resolve the input supplies for
+ * pending regulators before trying to disable unused ones.
+ */
+ class_for_each_device(®ulator_class, NULL, NULL,
+ regulator_register_resolve_supply);
+
/* If we have a full configuration then disable any regulators
* we have permission to change the status for and which are
* not in use or always_on. This is effectively the default
diff --git a/drivers/regulator/cpr3-regulator.h b/drivers/regulator/cpr3-regulator.h
index a315e46..778f482 100644
--- a/drivers/regulator/cpr3-regulator.h
+++ b/drivers/regulator/cpr3-regulator.h
@@ -949,6 +949,8 @@ void cprh_adjust_voltages_for_apm(struct cpr3_regulator *vreg);
void cprh_adjust_voltages_for_mem_acc(struct cpr3_regulator *vreg);
int cpr3_adjust_target_quotients(struct cpr3_regulator *vreg,
int *fuse_volt_adjust);
+int cpr3_parse_fuse_combo_map(struct cpr3_regulator *vreg, u64 *fuse_val,
+ int fuse_count);
#else
@@ -1133,6 +1135,12 @@ static inline int cpr3_adjust_target_quotients(struct cpr3_regulator *vreg,
return 0;
}
+static int cpr3_parse_fuse_combo_map(struct cpr3_regulator *vreg, u64 *fuse_val,
+ int fuse_count)
+{
+ return -EPERM;
+}
+
#endif /* CONFIG_REGULATOR_CPR3 */
#endif /* __REGULATOR_CPR_REGULATOR_H__ */
diff --git a/drivers/regulator/cpr3-util.c b/drivers/regulator/cpr3-util.c
index 3035155..39ee3c5 100644
--- a/drivers/regulator/cpr3-util.c
+++ b/drivers/regulator/cpr3-util.c
@@ -562,32 +562,41 @@ int cpr3_parse_common_corner_data(struct cpr3_regulator *vreg)
return -EINVAL;
}
- rc = of_property_read_u32(node, "qcom,cpr-fuse-combos",
- &max_fuse_combos);
- if (rc) {
- cpr3_err(vreg, "error reading property qcom,cpr-fuse-combos, rc=%d\n",
- rc);
- return rc;
- }
-
/*
- * Sanity check against arbitrarily large value to avoid excessive
- * memory allocation.
+ * Check if CPR3 regulator's fuse_combos_supported element is already
+ * populated by fuse-combo-map logic. If not populated, then parse the
+ * qcom,cpr-fuse-combos property.
*/
- if (max_fuse_combos > 100 || max_fuse_combos == 0) {
- cpr3_err(vreg, "qcom,cpr-fuse-combos is invalid: %u\n",
- max_fuse_combos);
- return -EINVAL;
- }
+ if (vreg->fuse_combos_supported)
+ max_fuse_combos = vreg->fuse_combos_supported;
+ else {
+ rc = of_property_read_u32(node, "qcom,cpr-fuse-combos",
+ &max_fuse_combos);
+ if (rc) {
+ cpr3_err(vreg, "error reading property qcom,cpr-fuse-combos, rc=%d\n",
+ rc);
+ return rc;
+ }
- if (vreg->fuse_combo >= max_fuse_combos) {
- cpr3_err(vreg, "device tree config supports fuse combos 0-%u but the hardware has combo %d\n",
- max_fuse_combos - 1, vreg->fuse_combo);
- BUG_ON(1);
- return -EINVAL;
- }
+ /*
+ * Sanity check against arbitrarily large value to avoid
+ * excessive memory allocation.
+ */
+ if (max_fuse_combos > 100 || max_fuse_combos == 0) {
+ cpr3_err(vreg, "qcom,cpr-fuse-combos is invalid: %u\n",
+ max_fuse_combos);
+ return -EINVAL;
+ }
- vreg->fuse_combos_supported = max_fuse_combos;
+ if (vreg->fuse_combo >= max_fuse_combos) {
+ cpr3_err(vreg, "device tree config supports fuse combos 0-%u but the hardware has combo %d\n",
+ max_fuse_combos - 1, vreg->fuse_combo);
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ vreg->fuse_combos_supported = max_fuse_combos;
+ }
of_property_read_u32(node, "qcom,cpr-speed-bins", &max_speed_bins);
@@ -2414,3 +2423,76 @@ int cpr3_adjust_target_quotients(struct cpr3_regulator *vreg,
kfree(ro_scale);
return rc;
}
+
+/**
+ * cpr3_parse_fuse_combo_map() - parse fuse combo map data for a CPR3 regulator
+ * from device tree.
+ * @vreg: Pointer to the CPR3 regulator
+ * @fuse_val: Array of selection fuse parameter values
+ * @fuse_count: Number of selection fuse parameters used in fuse combo
+ * map
+ *
+ * This function reads the qcom,cpr-fuse-combo-map device tree property and
+ * populates the fuse_combo element of CPR3 regulator with the row number of
+ * fuse combo map data that matches with the data in fuse_val input array.
+ *
+ * Return: 0 on success, -ENODEV if qcom,cpr-fuse-combo-map property is not
+ * specified in device node, other errno on failure
+ */
+int cpr3_parse_fuse_combo_map(struct cpr3_regulator *vreg, u64 *fuse_val,
+ int fuse_count)
+{
+ struct device_node *node = vreg->of_node;
+ int i, j, len, num_fuse_combos, row_size, rc = 0;
+ u32 *tmp;
+
+ if (!of_find_property(node, "qcom,cpr-fuse-combo-map", &len)) {
+ /* property not specified */
+ return -ENODEV;
+ }
+
+ row_size = fuse_count * 2;
+ if (len == 0 || len % (sizeof(u32) * row_size)) {
+ cpr3_err(vreg, "qcom,cpr-fuse-combo-map length=%d is invalid\n",
+ len);
+ return -EINVAL;
+ }
+
+ num_fuse_combos = len / (sizeof(u32) * row_size);
+ vreg->fuse_combos_supported = num_fuse_combos;
+
+ tmp = kzalloc(len, GFP_KERNEL);
+ if (!tmp)
+ return -ENOMEM;
+
+ rc = of_property_read_u32_array(node, "qcom,cpr-fuse-combo-map",
+ tmp, num_fuse_combos * row_size);
+ if (rc) {
+ cpr3_err(vreg, "could not read qcom,cpr-fuse-combo-map, rc=%d\n",
+ rc);
+ goto done;
+ }
+
+ for (i = 0; i < num_fuse_combos; i++) {
+ for (j = 0; j < fuse_count; j++) {
+ if (tmp[i * row_size + j * 2] > fuse_val[j]
+ || tmp[i * row_size + j * 2 + 1] < fuse_val[j])
+ break;
+ }
+ if (j == fuse_count) {
+ vreg->fuse_combo = i;
+ break;
+ }
+ }
+
+ if (i >= num_fuse_combos) {
+ cpr3_err(vreg, "No matching CPR fuse combo found!\n");
+ WARN_ON(1);
+ rc = -EINVAL;
+ goto done;
+ }
+
+done:
+ kfree(tmp);
+ return rc;
+}
diff --git a/drivers/regulator/cpr4-apss-regulator.c b/drivers/regulator/cpr4-apss-regulator.c
index cfc09ba..a9602cb 100644
--- a/drivers/regulator/cpr4-apss-regulator.c
+++ b/drivers/regulator/cpr4-apss-regulator.c
@@ -51,9 +51,13 @@
* @speed_bin: Application processor speed bin fuse parameter value for
* the given chip
* @cpr_fusing_rev: CPR fusing revision fuse parameter value
+ * @foundry_id: Foundry identifier fuse parameter value for the given
+ * chip
* @boost_cfg: CPR boost configuration fuse parameter value
* @boost_voltage: CPR boost voltage fuse parameter value (raw, not
* converted to a voltage)
+ * @aging_init_quot_diff: Initial quotient difference between CPR aging
+ * min and max sensors measured at time of manufacturing
*
* This struct holds the values for all of the fuses read from memory.
*/
@@ -64,9 +68,11 @@ struct cpr4_msm8953_apss_fuses {
u64 quot_offset[MSM8953_APSS_FUSE_CORNERS];
u64 speed_bin;
u64 cpr_fusing_rev;
+ u64 foundry_id;
u64 boost_cfg;
u64 boost_voltage;
u64 misc;
+ u64 aging_init_quot_diff;
};
/*
@@ -146,6 +152,11 @@ static const struct cpr3_fuse_param msm8953_apss_speed_bin_param[] = {
{},
};
+static const struct cpr3_fuse_param msm8953_apss_foundry_id_param[] = {
+ {37, 40, 42},
+ {},
+};
+
static const struct cpr3_fuse_param msm8953_cpr_boost_fuse_cfg_param[] = {
{36, 43, 45},
{},
@@ -161,6 +172,21 @@ static const struct cpr3_fuse_param msm8953_misc_fuse_volt_adj_param[] = {
{},
};
+static const struct cpr3_fuse_param msm8953_apss_aging_init_quot_diff_param[]
+= {
+ {72, 0, 7},
+ {},
+};
+
+/*
+ * The maximum number of fuse combinations possible for the selected fuse
+ * parameters in fuse combo map logic.
+ * Here, possible speed-bin values = 8, fuse revision values = 8, and foundry
+ * identifier values = 8. Total number of combinations = 512 (i.e., 8 * 8 * 8)
+ */
+#define CPR4_MSM8953_APSS_FUSE_COMBO_MAP_MAX_COUNT 512
+
+
/*
* The number of possible values for misc fuse is
* 2^(#bits defined for misc fuse)
@@ -206,6 +232,14 @@ static const int msm8953_apss_fuse_ref_volt
*/
static bool boost_fuse[MAX_BOOST_CONFIG_FUSE_VALUE] = {0, 1, 1, 1, 1, 1, 1, 1};
+/* CPR Aging parameters for msm8953 */
+#define MSM8953_APSS_AGING_INIT_QUOT_DIFF_SCALE 1
+#define MSM8953_APSS_AGING_INIT_QUOT_DIFF_SIZE 8
+#define MSM8953_APSS_AGING_SENSOR_ID 6
+
+/* Use a very high value for max aging margin to be applied */
+#define MSM8953_APSS_AGING_MAX_AGE_MARGIN_QUOT (-1000)
+
/**
* cpr4_msm8953_apss_read_fuse_data() - load APSS specific fuse parameter values
* @vreg: Pointer to the CPR3 regulator
@@ -243,6 +277,14 @@ static int cpr4_msm8953_apss_read_fuse_data(struct cpr3_regulator *vreg)
}
cpr3_info(vreg, "CPR fusing revision = %llu\n", fuse->cpr_fusing_rev);
+ rc = cpr3_read_fuse_param(base, msm8953_apss_foundry_id_param,
+ &fuse->foundry_id);
+ if (rc) {
+ cpr3_err(vreg, "Unable to read foundry id fuse, rc=%d\n", rc);
+ return rc;
+ }
+ cpr3_info(vreg, "foundry id = %llu\n", fuse->foundry_id);
+
rc = cpr3_read_fuse_param(base, msm8953_misc_fuse_volt_adj_param,
&fuse->misc);
if (rc) {
@@ -257,6 +299,14 @@ static int cpr4_msm8953_apss_read_fuse_data(struct cpr3_regulator *vreg)
return -EINVAL;
}
+ rc = cpr3_read_fuse_param(base, msm8953_apss_aging_init_quot_diff_param,
+ &fuse->aging_init_quot_diff);
+ if (rc) {
+ cpr3_err(vreg, "Unable to read aging initial quotient difference fuse, rc=%d\n",
+ rc);
+ return rc;
+ }
+
for (i = 0; i < MSM8953_APSS_FUSE_CORNERS; i++) {
rc = cpr3_read_fuse_param(base,
msm8953_apss_init_voltage_param[i],
@@ -1120,6 +1170,58 @@ static int cpr4_apss_parse_boost_properties(struct cpr3_regulator *vreg)
return rc;
}
+/*
+ * Constants which define the selection fuse parameters used in fuse combo map
+ * logic.
+ */
+enum cpr4_msm8953_apss_fuse_combo_parameters {
+ MSM8953_APSS_SPEED_BIN = 0,
+ MSM8953_APSS_CPR_FUSE_REV,
+ MSM8953_APSS_FOUNDRY_ID,
+ MSM8953_APSS_FUSE_COMBO_PARAM_COUNT,
+};
+
+/**
+ * cpr4_parse_fuse_combo_map() - parse APSS fuse combo map data from device tree
+ * properties of the CPR3 regulator's device node
+ * @vreg: Pointer to the CPR3 regulator
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int cpr4_parse_fuse_combo_map(struct cpr3_regulator *vreg)
+{
+ struct cpr4_msm8953_apss_fuses *fuse = vreg->platform_fuses;
+ u64 *fuse_val;
+ int rc;
+
+ fuse_val = kcalloc(MSM8953_APSS_FUSE_COMBO_PARAM_COUNT,
+ sizeof(*fuse_val), GFP_KERNEL);
+ if (!fuse_val)
+ return -ENOMEM;
+
+ fuse_val[MSM8953_APSS_SPEED_BIN] = fuse->speed_bin;
+ fuse_val[MSM8953_APSS_CPR_FUSE_REV] = fuse->cpr_fusing_rev;
+ fuse_val[MSM8953_APSS_FOUNDRY_ID] = fuse->foundry_id;
+ rc = cpr3_parse_fuse_combo_map(vreg, fuse_val,
+ MSM8953_APSS_FUSE_COMBO_PARAM_COUNT);
+ if (rc == -ENODEV) {
+ cpr3_debug(vreg, "using legacy fuse combo logic, rc=%d\n",
+ rc);
+ rc = 0;
+ } else if (rc < 0) {
+ cpr3_err(vreg, "error reading fuse combo map data, rc=%d\n",
+ rc);
+ } else if (vreg->fuse_combo >=
+ CPR4_MSM8953_APSS_FUSE_COMBO_MAP_MAX_COUNT) {
+ cpr3_err(vreg, "invalid CPR fuse combo = %d found\n",
+ vreg->fuse_combo);
+ rc = -EINVAL;
+ }
+
+ kfree(fuse_val);
+ return rc;
+}
+
/**
* cpr4_apss_init_regulator() - perform all steps necessary to initialize the
* configuration data for a CPR3 regulator
@@ -1140,6 +1242,13 @@ static int cpr4_apss_init_regulator(struct cpr3_regulator *vreg)
fuse = vreg->platform_fuses;
+ rc = cpr4_parse_fuse_combo_map(vreg);
+ if (rc) {
+ cpr3_err(vreg, "error while parsing fuse combo map, rc=%d\n",
+ rc);
+ return rc;
+ }
+
rc = cpr4_apss_parse_corner_data(vreg);
if (rc) {
cpr3_err(vreg, "unable to read CPR corner data from device tree, rc=%d\n",
@@ -1212,6 +1321,81 @@ static int cpr4_apss_init_regulator(struct cpr3_regulator *vreg)
}
/**
+ * cpr4_apss_init_aging() - perform APSS CPR4 controller specific
+ * aging initializations
+ * @ctrl: Pointer to the CPR3 controller
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int cpr4_apss_init_aging(struct cpr3_controller *ctrl)
+{
+ struct cpr4_msm8953_apss_fuses *fuse = NULL;
+ struct cpr3_regulator *vreg = NULL;
+ u32 aging_ro_scale;
+ int i, j, rc;
+
+ for (i = 0; i < ctrl->thread_count; i++) {
+ for (j = 0; j < ctrl->thread[i].vreg_count; j++) {
+ if (ctrl->thread[i].vreg[j].aging_allowed) {
+ ctrl->aging_required = true;
+ vreg = &ctrl->thread[i].vreg[j];
+ fuse = vreg->platform_fuses;
+ break;
+ }
+ }
+ }
+
+ if (!ctrl->aging_required || !fuse)
+ return 0;
+
+ rc = cpr3_parse_array_property(vreg, "qcom,cpr-aging-ro-scaling-factor",
+ 1, &aging_ro_scale);
+ if (rc)
+ return rc;
+
+ if (aging_ro_scale == 0) {
+ cpr3_err(ctrl, "aging RO scaling factor is invalid: %u\n",
+ aging_ro_scale);
+ return -EINVAL;
+ }
+
+ ctrl->aging_vdd_mode = REGULATOR_MODE_NORMAL;
+ ctrl->aging_complete_vdd_mode = REGULATOR_MODE_IDLE;
+
+ ctrl->aging_sensor_count = 1;
+ ctrl->aging_sensor = kzalloc(sizeof(*ctrl->aging_sensor), GFP_KERNEL);
+ if (!ctrl->aging_sensor)
+ return -ENOMEM;
+
+ ctrl->aging_sensor->sensor_id = MSM8953_APSS_AGING_SENSOR_ID;
+ ctrl->aging_sensor->ro_scale = aging_ro_scale;
+
+ ctrl->aging_sensor->init_quot_diff
+ = cpr3_convert_open_loop_voltage_fuse(0,
+ MSM8953_APSS_AGING_INIT_QUOT_DIFF_SCALE,
+ fuse->aging_init_quot_diff,
+ MSM8953_APSS_AGING_INIT_QUOT_DIFF_SIZE);
+
+ if (ctrl->aging_sensor->init_quot_diff == 0) {
+ /*
+ * Initial quotient difference value '0' has a special meaning
+ * in MSM8953 fusing scheme. Use max age margin quotient
+ * difference to consider full aging margin of 15 mV.
+ */
+ ctrl->aging_sensor->init_quot_diff
+ = MSM8953_APSS_AGING_MAX_AGE_MARGIN_QUOT;
+ cpr3_debug(ctrl, "Init quotient diff = 0, use max age margin quotient\n");
+ }
+
+ cpr3_info(ctrl, "sensor %u aging init quotient diff = %d, aging RO scale = %u QUOT/V\n",
+ ctrl->aging_sensor->sensor_id,
+ ctrl->aging_sensor->init_quot_diff,
+ ctrl->aging_sensor->ro_scale);
+
+ return 0;
+}
+
+/**
* cpr4_apss_init_controller() - perform APSS CPR4 controller specific
* initializations
* @ctrl: Pointer to the CPR3 controller
@@ -1390,6 +1574,13 @@ static int cpr4_apss_regulator_probe(struct platform_device *pdev)
}
}
+ rc = cpr4_apss_init_aging(ctrl);
+ if (rc) {
+ cpr3_err(ctrl, "failed to initialize aging configurations, rc=%d\n",
+ rc);
+ return rc;
+ }
+
platform_set_drvdata(pdev, ctrl);
return cpr3_regulator_register(pdev, ctrl);
diff --git a/drivers/regulator/fan53555.c b/drivers/regulator/fan53555.c
index 13d53a3..589167e 100644
--- a/drivers/regulator/fan53555.c
+++ b/drivers/regulator/fan53555.c
@@ -491,7 +491,10 @@ static const struct i2c_device_id fan53555_id[] = {
.name = "fan53555",
.driver_data = FAN53555_VENDOR_FAIRCHILD
}, {
- .name = "syr82x",
+ .name = "syr827",
+ .driver_data = FAN53555_VENDOR_SILERGY
+ }, {
+ .name = "syr828",
.driver_data = FAN53555_VENDOR_SILERGY
}, {
.name = "hl7509",
diff --git a/drivers/regulator/mem-acc-regulator.c b/drivers/regulator/mem-acc-regulator.c
index 4c03dec..e22a259 100644
--- a/drivers/regulator/mem-acc-regulator.c
+++ b/drivers/regulator/mem-acc-regulator.c
@@ -108,6 +108,8 @@ struct mem_acc_regulator {
u32 *phys_reg_addr_list;
void __iomem **remap_reg_addr_list;
struct corner_acc_reg_config *corner_acc_reg_config;
+ u32 *override_acc_range_fuse_list;
+ int override_acc_range_fuse_num;
};
static DEFINE_MUTEX(mem_acc_memory_mutex);
@@ -549,9 +551,8 @@ static int mem_acc_custom_data_init(struct platform_device *pdev,
return 0;
}
-static int override_mem_acc_custom_data(struct platform_device *pdev,
- struct mem_acc_regulator *mem_acc_vreg,
- int mem_type)
+static int override_mem_acc_custom_data(struct mem_acc_regulator *mem_acc_vreg,
+ int mem_type)
{
char *custom_apc_data_str;
int len, rc = 0, i;
@@ -647,27 +648,48 @@ static int mem_acc_override_corner_map(struct mem_acc_regulator *mem_acc_vreg)
}
-static int mem_acc_find_override_map_match(struct platform_device *pdev,
- struct mem_acc_regulator *mem_acc_vreg)
+static void mem_acc_read_efuse_param(struct mem_acc_regulator *mem_acc_vreg,
+ u32 *fuse_sel, int *val)
{
- struct device_node *of_node = pdev->dev.of_node;
+ u64 fuse_bits;
+
+ fuse_bits = mem_acc_read_efuse_row(mem_acc_vreg, fuse_sel[0],
+ fuse_sel[3]);
+ /*
+ * fuse_sel[1] = LSB position in row (shift)
+ * fuse_sel[2] = num of bits (mask)
+ */
+ *val = (fuse_bits >> fuse_sel[1]) & ((1 << fuse_sel[2]) - 1);
+}
+
+#define FUSE_TUPLE_SIZE 4
+static int mem_acc_parse_override_fuse_version_map(
+ struct mem_acc_regulator *mem_acc_vreg)
+{
+ struct device_node *of_node = mem_acc_vreg->dev->of_node;
int i, rc, tuple_size;
int len = 0;
u32 *tmp;
- char *prop_str = "qcom,override-fuse-version-map";
+ u32 fuse_sel[4];
+ char *prop_str;
- /* Specify default no match case. */
- mem_acc_vreg->override_map_match = FUSE_MAP_NO_MATCH;
- mem_acc_vreg->override_map_count = 0;
-
- if (!of_find_property(of_node, prop_str, &len)) {
- /* No mapping present. */
- return 0;
+ prop_str = "qcom,override-acc-fuse-sel";
+ rc = of_property_read_u32_array(of_node, prop_str, fuse_sel,
+ FUSE_TUPLE_SIZE);
+ if (rc < 0) {
+ pr_err("Read failed - %s rc=%d\n", prop_str, rc);
+ return rc;
}
+ mem_acc_read_efuse_param(mem_acc_vreg, fuse_sel,
+ &mem_acc_vreg->override_fuse_value);
+
+ prop_str = "qcom,override-fuse-version-map";
+ if (!of_find_property(of_node, prop_str, &len))
+ return -EINVAL;
+
tuple_size = 1;
mem_acc_vreg->override_map_count = len / (sizeof(u32) * tuple_size);
-
if (len == 0 || len % (sizeof(u32) * tuple_size)) {
pr_err("%s length=%d is invalid\n", prop_str, len);
return -EINVAL;
@@ -695,8 +717,9 @@ static int mem_acc_find_override_map_match(struct platform_device *pdev,
}
if (mem_acc_vreg->override_map_match != FUSE_MAP_NO_MATCH)
- pr_debug("%s tuple match found: %d\n", prop_str,
- mem_acc_vreg->override_map_match);
+ pr_info("override_fuse_val=%d, %s tuple match found: %d\n",
+ mem_acc_vreg->override_fuse_value, prop_str,
+ mem_acc_vreg->override_map_match);
else
pr_err("%s tuple match not found\n", prop_str);
@@ -705,6 +728,121 @@ static int mem_acc_find_override_map_match(struct platform_device *pdev,
return rc;
}
+static int mem_acc_parse_override_fuse_version_range(
+ struct mem_acc_regulator *mem_acc_vreg)
+{
+ struct device_node *of_node = mem_acc_vreg->dev->of_node;
+ int i, j, rc, size, row_size;
+ int num_fuse_sel, len = 0;
+ u32 *tmp = NULL;
+ char *prop_str;
+ u32 *fuse_val, *fuse_sel;
+ char *buf = NULL;
+ int pos = 0, buflen;
+
+ prop_str = "qcom,override-acc-range-fuse-list";
+ if (!of_find_property(of_node, prop_str, &len)) {
+ pr_err("%s property is missing\n", prop_str);
+ return -EINVAL;
+ }
+
+ size = len / sizeof(u32);
+ if (len == 0 || (size % FUSE_TUPLE_SIZE)) {
+ pr_err("%s property length (%d) is invalid\n", prop_str, len);
+ return -EINVAL;
+ }
+
+ num_fuse_sel = size / FUSE_TUPLE_SIZE;
+ fuse_val = devm_kcalloc(mem_acc_vreg->dev, num_fuse_sel,
+ sizeof(*fuse_val), GFP_KERNEL);
+ if (!fuse_val)
+ return -ENOMEM;
+ mem_acc_vreg->override_acc_range_fuse_list = fuse_val;
+ mem_acc_vreg->override_acc_range_fuse_num = num_fuse_sel;
+
+ fuse_sel = kzalloc(len, GFP_KERNEL);
+ if (!fuse_sel) {
+ rc = -ENOMEM;
+ goto done;
+ }
+
+ rc = of_property_read_u32_array(of_node, prop_str, fuse_sel,
+ size);
+ if (rc) {
+ pr_err("%s read failed, rc=%d\n", prop_str, rc);
+ goto done;
+ }
+
+ for (i = 0; i < num_fuse_sel; i++) {
+ mem_acc_read_efuse_param(mem_acc_vreg, &fuse_sel[i * 4],
+ &fuse_val[i]);
+ }
+
+ prop_str = "qcom,override-fuse-range-map";
+ if (!of_find_property(of_node, prop_str, &len))
+ goto done;
+
+ row_size = num_fuse_sel * 2;
+ mem_acc_vreg->override_map_count = len / (sizeof(u32) * row_size);
+
+ if (len == 0 || len % (sizeof(u32) * row_size)) {
+ pr_err("%s length=%d is invalid\n", prop_str, len);
+ rc = -EINVAL;
+ goto done;
+ }
+
+ tmp = kzalloc(len, GFP_KERNEL);
+ if (!tmp) {
+ rc = -ENOMEM;
+ goto done;
+ }
+
+ rc = of_property_read_u32_array(of_node, prop_str, tmp,
+ mem_acc_vreg->override_map_count * row_size);
+ if (rc) {
+ pr_err("could not read %s rc=%d\n", prop_str, rc);
+ goto done;
+ }
+
+ for (i = 0; i < mem_acc_vreg->override_map_count; i++) {
+ for (j = 0; j < num_fuse_sel; j++) {
+ if (tmp[i * row_size + j * 2] > fuse_val[j]
+ || tmp[i * row_size + j * 2 + 1] < fuse_val[j])
+ break;
+ }
+
+ if (j == num_fuse_sel) {
+ mem_acc_vreg->override_map_match = i;
+ break;
+ }
+ }
+
+ /*
+ * Log register and value mapping since they are useful for
+ * baseline MEM ACC logging.
+ */
+ buflen = num_fuse_sel * sizeof("fuse_selxxxx = XXXX ");
+ buf = kzalloc(buflen, GFP_KERNEL);
+ if (!buf)
+ goto done;
+
+ for (j = 0; j < num_fuse_sel; j++)
+ pos += scnprintf(buf + pos, buflen - pos, "fuse_sel%d = %d ",
+ j, fuse_val[j]);
+ buf[pos] = '\0';
+ if (mem_acc_vreg->override_map_match != FUSE_MAP_NO_MATCH)
+ pr_info("%s %s tuple match found: %d\n", buf, prop_str,
+ mem_acc_vreg->override_map_match);
+ else
+ pr_err("%s %s tuple match not found\n", buf, prop_str);
+
+done:
+ kfree(fuse_sel);
+ kfree(tmp);
+ kfree(buf);
+ return rc;
+}
+
#define MAX_CHARS_PER_INT 20
static int mem_acc_reg_addr_val_dump(struct mem_acc_regulator *mem_acc_vreg,
@@ -789,6 +927,150 @@ static int mem_acc_get_reg_addr_val(struct device_node *of_node,
return rc;
}
+static int mem_acc_override_reg_addr_val_init(
+ struct mem_acc_regulator *mem_acc_vreg)
+{
+ struct device_node *of_node = mem_acc_vreg->dev->of_node;
+ struct corner_acc_reg_config *corner_acc_reg_config;
+ struct acc_reg_value *override_reg_config_list;
+ int i, tuple_count, tuple_match, len = 0, rc = 0;
+ u32 list_size, override_max_reg_config_len;
+ char prop_str[40];
+ struct property *prop;
+ int num_corners = mem_acc_vreg->num_corners;
+
+ if (!mem_acc_vreg->corner_acc_reg_config)
+ return 0;
+
+ if (mem_acc_vreg->override_map_count) {
+ if (mem_acc_vreg->override_map_match == FUSE_MAP_NO_MATCH)
+ return 0;
+ tuple_count = mem_acc_vreg->override_map_count;
+ tuple_match = mem_acc_vreg->override_map_match;
+ } else {
+ tuple_count = 1;
+ tuple_match = 0;
+ }
+
+ corner_acc_reg_config = mem_acc_vreg->corner_acc_reg_config;
+ for (i = 1; i <= num_corners; i++) {
+ snprintf(prop_str, sizeof(prop_str),
+ "qcom,override-corner%d-addr-val-map", i);
+ prop = of_find_property(of_node, prop_str, &len);
+ list_size = len / (tuple_count * sizeof(u32));
+ if (!prop) {
+ pr_debug("%s property not specified\n", prop_str);
+ continue;
+ }
+
+ if ((!list_size) || list_size < (num_corners * 2)) {
+ pr_err("qcom,override-corner%d-addr-val-map property is missed or invalid length: len=%d\n",
+ i, len);
+ return -EINVAL;
+ }
+
+ override_max_reg_config_len = list_size / (num_corners * 2);
+ override_reg_config_list =
+ corner_acc_reg_config[i].reg_config_list;
+
+ if (corner_acc_reg_config[i].max_reg_config_len
+ != override_max_reg_config_len) {
+ /* Free already allocate memory */
+ devm_kfree(mem_acc_vreg->dev, override_reg_config_list);
+
+ /* Allocated memory for new requirement */
+ override_reg_config_list =
+ devm_kcalloc(mem_acc_vreg->dev,
+ override_max_reg_config_len * num_corners,
+ sizeof(*override_reg_config_list), GFP_KERNEL);
+ if (!override_reg_config_list)
+ return -ENOMEM;
+
+ corner_acc_reg_config[i].max_reg_config_len =
+ override_max_reg_config_len;
+ corner_acc_reg_config[i].reg_config_list =
+ override_reg_config_list;
+ }
+
+ rc = mem_acc_get_reg_addr_val(of_node, prop_str,
+ override_reg_config_list, tuple_match,
+ list_size, mem_acc_vreg->num_acc_reg);
+ if (rc) {
+ pr_err("Failed to read %s property: rc=%d\n",
+ prop_str, rc);
+ return rc;
+ }
+
+ rc = mem_acc_reg_addr_val_dump(mem_acc_vreg,
+ &corner_acc_reg_config[i], i);
+ if (rc) {
+ pr_err("could not dump acc address-value dump for corner=%d: rc=%d\n",
+ i, rc);
+ return rc;
+ }
+ }
+
+ return rc;
+}
+
+static int mem_acc_parse_override_config(struct mem_acc_regulator *mem_acc_vreg)
+{
+ struct device_node *of_node = mem_acc_vreg->dev->of_node;
+ int i, rc = 0;
+
+ /* Specify default no match case. */
+ mem_acc_vreg->override_map_match = FUSE_MAP_NO_MATCH;
+ mem_acc_vreg->override_map_count = 0;
+
+ if (of_find_property(of_node, "qcom,override-fuse-range-map",
+ NULL)) {
+ rc = mem_acc_parse_override_fuse_version_range(mem_acc_vreg);
+ if (rc) {
+ pr_err("parsing qcom,override-fuse-range-map property failed, rc=%d\n",
+ rc);
+ return rc;
+ }
+ } else if (of_find_property(of_node, "qcom,override-fuse-version-map",
+ NULL)) {
+ rc = mem_acc_parse_override_fuse_version_map(mem_acc_vreg);
+ if (rc) {
+ pr_err("parsing qcom,override-fuse-version-map property failed, rc=%d\n",
+ rc);
+ return rc;
+ }
+ } else {
+ /* No override fuse configuration defined in device node */
+ return 0;
+ }
+
+ if (mem_acc_vreg->override_map_match == FUSE_MAP_NO_MATCH)
+ return 0;
+
+ rc = mem_acc_override_corner_map(mem_acc_vreg);
+ if (rc) {
+ pr_err("Unable to override corner map rc=%d\n", rc);
+ return rc;
+ }
+
+ rc = mem_acc_override_reg_addr_val_init(mem_acc_vreg);
+ if (rc) {
+ pr_err("Unable to override reg_config_list init rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ for (i = 0; i < MEMORY_MAX; i++) {
+ rc = override_mem_acc_custom_data(mem_acc_vreg, i);
+ if (rc) {
+ pr_err("Unable to override custom data for mem_type=%d rc=%d\n",
+ i, rc);
+ return rc;
+ }
+ }
+
+ return rc;
+}
+
static int mem_acc_init_reg_config(struct mem_acc_regulator *mem_acc_vreg)
{
struct device_node *of_node = mem_acc_vreg->dev->of_node;
@@ -965,92 +1247,6 @@ static int mem_acc_reg_config_init(struct mem_acc_regulator *mem_acc_vreg)
return rc;
}
-static int mem_acc_override_reg_addr_val_init(
- struct mem_acc_regulator *mem_acc_vreg)
-{
- struct device_node *of_node = mem_acc_vreg->dev->of_node;
- struct corner_acc_reg_config *corner_acc_reg_config;
- struct acc_reg_value *override_reg_config_list;
- int i, tuple_count, tuple_match, len = 0, rc = 0;
- u32 list_size, override_max_reg_config_len;
- char prop_str[40];
- struct property *prop;
- int num_corners = mem_acc_vreg->num_corners;
-
- if (!mem_acc_vreg->corner_acc_reg_config)
- return 0;
-
- if (mem_acc_vreg->override_map_count) {
- if (mem_acc_vreg->override_map_match == FUSE_MAP_NO_MATCH)
- return 0;
- tuple_count = mem_acc_vreg->override_map_count;
- tuple_match = mem_acc_vreg->override_map_match;
- } else {
- tuple_count = 1;
- tuple_match = 0;
- }
-
- corner_acc_reg_config = mem_acc_vreg->corner_acc_reg_config;
- for (i = 1; i <= num_corners; i++) {
- snprintf(prop_str, sizeof(prop_str),
- "qcom,override-corner%d-addr-val-map", i);
- prop = of_find_property(of_node, prop_str, &len);
- list_size = len / (tuple_count * sizeof(u32));
- if (!prop) {
- pr_debug("%s property not specified\n", prop_str);
- continue;
- }
-
- if ((!list_size) || list_size < (num_corners * 2)) {
- pr_err("qcom,override-corner%d-addr-val-map property is missed or invalid length: len=%d\n",
- i, len);
- return -EINVAL;
- }
-
- override_max_reg_config_len = list_size / (num_corners * 2);
- override_reg_config_list =
- corner_acc_reg_config[i].reg_config_list;
-
- if (corner_acc_reg_config[i].max_reg_config_len
- != override_max_reg_config_len) {
- /* Free already allocate memory */
- devm_kfree(mem_acc_vreg->dev, override_reg_config_list);
-
- /* Allocated memory for new requirement */
- override_reg_config_list =
- devm_kcalloc(mem_acc_vreg->dev,
- override_max_reg_config_len * num_corners,
- sizeof(*override_reg_config_list), GFP_KERNEL);
- if (!override_reg_config_list)
- return -ENOMEM;
-
- corner_acc_reg_config[i].max_reg_config_len =
- override_max_reg_config_len;
- corner_acc_reg_config[i].reg_config_list =
- override_reg_config_list;
- }
-
- rc = mem_acc_get_reg_addr_val(of_node, prop_str,
- override_reg_config_list, tuple_match,
- list_size, mem_acc_vreg->num_acc_reg);
- if (rc) {
- pr_err("Failed to read %s property: rc=%d\n",
- prop_str, rc);
- return rc;
- }
-
- rc = mem_acc_reg_addr_val_dump(mem_acc_vreg,
- &corner_acc_reg_config[i], i);
- if (rc) {
- pr_err("could not dump acc address-value dump for corner=%d: rc=%d\n",
- i, rc);
- return rc;
- }
- }
-
- return rc;
-}
-
#define MEM_TYPE_STRING_LEN 20
static int mem_acc_init(struct platform_device *pdev,
struct mem_acc_regulator *mem_acc_vreg)
@@ -1058,8 +1254,6 @@ static int mem_acc_init(struct platform_device *pdev,
struct device_node *of_node = pdev->dev.of_node;
struct resource *res;
int len, rc, i, j;
- u32 fuse_sel[4];
- u64 fuse_bits;
bool acc_type_present = false;
char tmps[MEM_TYPE_STRING_LEN];
@@ -1201,59 +1395,12 @@ static int mem_acc_init(struct platform_device *pdev,
}
}
- if (of_find_property(mem_acc_vreg->dev->of_node,
- "qcom,override-acc-fuse-sel", NULL)) {
- rc = of_property_read_u32_array(mem_acc_vreg->dev->of_node,
- "qcom,override-acc-fuse-sel", fuse_sel, 4);
- if (rc < 0) {
- pr_err("Read failed - qcom,override-acc-fuse-sel rc=%d\n",
- rc);
- return rc;
- }
-
- fuse_bits = mem_acc_read_efuse_row(mem_acc_vreg, fuse_sel[0],
- fuse_sel[3]);
- /*
- * fuse_sel[1] = LSB position in row (shift)
- * fuse_sel[2] = num of bits (mask)
- */
- mem_acc_vreg->override_fuse_value = (fuse_bits >> fuse_sel[1]) &
- ((1 << fuse_sel[2]) - 1);
-
- rc = mem_acc_find_override_map_match(pdev, mem_acc_vreg);
- if (rc) {
- pr_err("Unable to find fuse map match rc=%d\n", rc);
- return rc;
- }
-
- pr_debug("override_fuse_val=%d override_map_match=%d\n",
- mem_acc_vreg->override_fuse_value,
- mem_acc_vreg->override_map_match);
-
- rc = mem_acc_override_corner_map(mem_acc_vreg);
- if (rc) {
- pr_err("Unable to override corner map rc=%d\n", rc);
- return rc;
- }
-
- rc = mem_acc_override_reg_addr_val_init(mem_acc_vreg);
- if (rc) {
- pr_err("Unable to override reg_config_list init rc=%d\n",
- rc);
- return rc;
- }
-
- for (i = 0; i < MEMORY_MAX; i++) {
- rc = override_mem_acc_custom_data(pdev,
- mem_acc_vreg, i);
- if (rc) {
- pr_err("Unable to override custom data for mem_type=%d rc=%d\n",
- i, rc);
- return rc;
- }
- }
+ rc = mem_acc_parse_override_config(mem_acc_vreg);
+ if (rc) {
+ pr_err("Unable to parse mem acc override configuration, rc=%d\n",
+ rc);
+ return rc;
}
-
if (acc_type_present) {
mem_acc_vreg->mem_acc_type_data = devm_kzalloc(
mem_acc_vreg->dev, mem_acc_vreg->num_corners *
diff --git a/drivers/regulator/qpnp-labibb-regulator.c b/drivers/regulator/qpnp-labibb-regulator.c
index 6b1e480..d672d5f 100644
--- a/drivers/regulator/qpnp-labibb-regulator.c
+++ b/drivers/regulator/qpnp-labibb-regulator.c
@@ -65,6 +65,7 @@
#define REG_LAB_PRECHARGE_CTL 0x5E
#define REG_LAB_SOFT_START_CTL 0x5F
#define REG_LAB_SPARE_CTL 0x60
+#define REG_LAB_MISC_CTL 0x60 /* PMI8998/PM660A */
#define REG_LAB_PFM_CTL 0x62
/* LAB registers for PM660A */
@@ -137,6 +138,9 @@
#define LAB_SPARE_TOUCH_WAKE_BIT BIT(3)
#define LAB_SPARE_DISABLE_SCP_BIT BIT(0)
+/* REG_LAB_MISC_CTL */
+#define LAB_AUTO_GM_BIT BIT(4)
+
/* REG_LAB_PFM_CTL */
#define LAB_PFM_EN_BIT BIT(7)
@@ -1854,7 +1858,7 @@ static int qpnp_labibb_save_settings(struct qpnp_labibb *labibb)
static int qpnp_labibb_ttw_enter_ibb_common(struct qpnp_labibb *labibb)
{
int rc = 0;
- u8 val;
+ u8 val, mask;
val = 0;
rc = qpnp_labibb_write(labibb, labibb->ibb_base + REG_IBB_PD_CTL,
@@ -1874,10 +1878,16 @@ static int qpnp_labibb_ttw_enter_ibb_common(struct qpnp_labibb *labibb)
return rc;
}
- val = IBB_WAIT_MBG_OK;
+ if (labibb->pmic_rev_id->pmic_subtype == PMI8998_SUBTYPE) {
+ val = 0;
+ mask = IBB_DIS_DLY_MASK;
+ } else {
+ val = IBB_WAIT_MBG_OK;
+ mask = IBB_DIS_DLY_MASK | IBB_WAIT_MBG_OK;
+ }
+
rc = qpnp_labibb_sec_masked_write(labibb, labibb->ibb_base,
- REG_IBB_PWRUP_PWRDN_CTL_2,
- IBB_DIS_DLY_MASK | IBB_WAIT_MBG_OK, val);
+ REG_IBB_PWRUP_PWRDN_CTL_2, mask, val);
if (rc < 0) {
pr_err("write to register %x failed rc = %d\n",
REG_IBB_PWRUP_PWRDN_CTL_2, rc);
@@ -1953,7 +1963,7 @@ static int qpnp_labibb_ttw_enter_ibb_pmi8950(struct qpnp_labibb *labibb)
static int qpnp_labibb_regulator_ttw_mode_enter(struct qpnp_labibb *labibb)
{
int rc = 0;
- u8 val;
+ u8 val, reg;
/* Save the IBB settings before they get modified for TTW mode */
if (!labibb->ibb_settings_saved) {
@@ -2015,10 +2025,17 @@ static int qpnp_labibb_regulator_ttw_mode_enter(struct qpnp_labibb *labibb)
}
val = LAB_SPARE_DISABLE_SCP_BIT;
+
if (labibb->pmic_rev_id->pmic_subtype != PMI8950_SUBTYPE)
val |= LAB_SPARE_TOUCH_WAKE_BIT;
- rc = qpnp_labibb_write(labibb, labibb->lab_base +
- REG_LAB_SPARE_CTL, &val, 1);
+
+ if (labibb->pmic_rev_id->pmic_subtype == PMI8998_SUBTYPE) {
+ reg = REG_LAB_MISC_CTL;
+ val |= LAB_AUTO_GM_BIT;
+ } else {
+ reg = REG_LAB_SPARE_CTL;
+ }
+ rc = qpnp_labibb_write(labibb, labibb->lab_base + reg, &val, 1);
if (rc < 0) {
pr_err("qpnp_labibb_write register %x failed rc = %d\n",
REG_LAB_SPARE_CTL, rc);
@@ -2048,7 +2065,15 @@ static int qpnp_labibb_regulator_ttw_mode_enter(struct qpnp_labibb *labibb)
case PMI8950_SUBTYPE:
rc = qpnp_labibb_ttw_enter_ibb_pmi8950(labibb);
break;
+ case PMI8998_SUBTYPE:
+ rc = labibb->lab_ver_ops->ps_ctl(labibb, 70, true);
+ if (rc < 0)
+ break;
+
+ rc = qpnp_ibb_ps_config(labibb, true);
+ break;
}
+
if (rc < 0) {
pr_err("Failed to configure TTW-enter for IBB rc=%d\n", rc);
return rc;
@@ -2081,7 +2106,7 @@ static int qpnp_labibb_ttw_exit_ibb_common(struct qpnp_labibb *labibb)
static int qpnp_labibb_regulator_ttw_mode_exit(struct qpnp_labibb *labibb)
{
int rc = 0;
- u8 val;
+ u8 val, reg;
if (!labibb->ibb_settings_saved) {
pr_err("IBB settings are not saved!\n");
@@ -2115,8 +2140,14 @@ static int qpnp_labibb_regulator_ttw_mode_exit(struct qpnp_labibb *labibb)
}
val = 0;
- rc = qpnp_labibb_write(labibb, labibb->lab_base +
- REG_LAB_SPARE_CTL, &val, 1);
+ if (labibb->pmic_rev_id->pmic_subtype == PMI8998_SUBTYPE) {
+ reg = REG_LAB_MISC_CTL;
+ val |= LAB_AUTO_GM_BIT;
+ } else {
+ reg = REG_LAB_SPARE_CTL;
+ }
+
+ rc = qpnp_labibb_write(labibb, labibb->lab_base + reg, &val, 1);
if (rc < 0) {
pr_err("qpnp_labibb_write register %x failed rc = %d\n",
REG_LAB_SPARE_CTL, rc);
@@ -3692,6 +3723,9 @@ static int qpnp_labibb_check_ttw_supported(struct qpnp_labibb *labibb)
case PMI8950_SUBTYPE:
/* TTW supported for all revisions */
break;
+ case PMI8998_SUBTYPE:
+ /* TTW supported for all revisions */
+ break;
default:
pr_info("TTW mode not supported for PMIC-subtype = %d\n",
labibb->pmic_rev_id->pmic_subtype);
diff --git a/drivers/regulator/refgen.c b/drivers/regulator/refgen.c
index 629fee0..830e1b0 100644
--- a/drivers/regulator/refgen.c
+++ b/drivers/regulator/refgen.c
@@ -31,7 +31,7 @@
#define REFGEN_BIAS_EN_DISABLE 0x6
#define REFGEN_REG_BG_CTRL 0x14
-#define REFGEN_BG_CTRL_MASK GENMASK(2, 0)
+#define REFGEN_BG_CTRL_MASK GENMASK(2, 1)
#define REFGEN_BG_CTRL_ENABLE 0x6
#define REFGEN_BG_CTRL_DISABLE 0x4
@@ -41,11 +41,21 @@ struct refgen {
void __iomem *addr;
};
+static void masked_writel(u32 val, u32 mask, void __iomem *addr)
+{
+ u32 reg;
+
+ reg = readl_relaxed(addr);
+ reg = (reg & ~mask) | (val & mask);
+ writel_relaxed(reg, addr);
+}
+
static int refgen_enable(struct regulator_dev *rdev)
{
struct refgen *vreg = rdev_get_drvdata(rdev);
- writel_relaxed(REFGEN_BG_CTRL_ENABLE, vreg->addr + REFGEN_REG_BG_CTRL);
+ masked_writel(REFGEN_BG_CTRL_ENABLE, REFGEN_BG_CTRL_MASK,
+ vreg->addr + REFGEN_REG_BG_CTRL);
writel_relaxed(REFGEN_BIAS_EN_ENABLE, vreg->addr + REFGEN_REG_BIAS_EN);
return 0;
@@ -56,7 +66,8 @@ static int refgen_disable(struct regulator_dev *rdev)
struct refgen *vreg = rdev_get_drvdata(rdev);
writel_relaxed(REFGEN_BIAS_EN_DISABLE, vreg->addr + REFGEN_REG_BIAS_EN);
- writel_relaxed(REFGEN_BG_CTRL_DISABLE, vreg->addr + REFGEN_REG_BG_CTRL);
+ masked_writel(REFGEN_BG_CTRL_DISABLE, REFGEN_BG_CTRL_MASK,
+ vreg->addr + REFGEN_REG_BG_CTRL);
return 0;
}
diff --git a/drivers/regulator/rpmh-regulator.c b/drivers/regulator/rpmh-regulator.c
index 562b05a..1de08d4 100644
--- a/drivers/regulator/rpmh-regulator.c
+++ b/drivers/regulator/rpmh-regulator.c
@@ -36,10 +36,13 @@
* %RPMH_REGULATOR_TYPE_ARC: RPMh ARC accelerator which supports voting on
* the CPR managed voltage level of LDO and SMPS
* type PMIC regulators.
+ * %RPMH_REGULATOR_TYPE_XOB: RPMh XOB accelerator which supports voting on
+ * the enable state of PMIC regulators.
*/
enum rpmh_regulator_type {
RPMH_REGULATOR_TYPE_VRM,
RPMH_REGULATOR_TYPE_ARC,
+ RPMH_REGULATOR_TYPE_XOB,
};
/**
@@ -52,6 +55,7 @@ enum rpmh_regulator_type {
* for enable voting. Instead, ARC level
* 0 corresponds to "disabled" for a given
* ARC regulator resource if supported.
+ * %RPMH_REGULATOR_REG_XOB_ENABLE: XOB enable voting register index
* %RPMH_REGULATOR_REG_ENABLE: Common enable index used in callback
* functions for both ARC and VRM.
* %RPMH_REGULATOR_REG_VRM_MODE: VRM regulator mode voting register index
@@ -61,6 +65,8 @@ enum rpmh_regulator_type {
* register indices
* %RPMH_REGULATOR_REG_ARC_MAX: Exclusive upper limit of ARC register
* indices
+ * %RPMH_REGULATOR_REG_XOB_MAX: Exclusive upper limit of XOB register
+ * indices
* %RPMH_REGULATOR_REG_VRM_MAX: Exclusive upper limit of VRM register
* indices
* %RPMH_REGULATOR_REG_MAX: Combined exclusive upper limit of ARC
@@ -73,11 +79,13 @@ enum rpmh_regulator_reg_index {
RPMH_REGULATOR_REG_ARC_LEVEL = 0,
RPMH_REGULATOR_REG_VRM_ENABLE = 1,
RPMH_REGULATOR_REG_ARC_PSEUDO_ENABLE = RPMH_REGULATOR_REG_VRM_ENABLE,
+ RPMH_REGULATOR_REG_XOB_ENABLE = RPMH_REGULATOR_REG_VRM_ENABLE,
RPMH_REGULATOR_REG_ENABLE = RPMH_REGULATOR_REG_VRM_ENABLE,
RPMH_REGULATOR_REG_VRM_MODE = 2,
RPMH_REGULATOR_REG_VRM_HEADROOM = 3,
RPMH_REGULATOR_REG_ARC_REAL_MAX = 1,
RPMH_REGULATOR_REG_ARC_MAX = 2,
+ RPMH_REGULATOR_REG_XOB_MAX = 2,
RPMH_REGULATOR_REG_VRM_MAX = 4,
RPMH_REGULATOR_REG_MAX = 4,
};
@@ -104,6 +112,9 @@ enum rpmh_regulator_reg_index {
#define RPMH_VRM_MODE_MIN 0
#define RPMH_VRM_MODE_MAX 7
+/* XOB voting registers are found in the VRM hardware module */
+#define CMD_DB_HW_XOB CMD_DB_HW_VRM
+
/*
* Mapping from RPMh VRM accelerator modes to regulator framework modes
* Assumes that SMPS PFM mode == LDO LPM mode and SMPS PWM mode == LDO HPM mode
@@ -297,6 +308,10 @@ static const char *const rpmh_regulator_arc_param_names[] = {
[RPMH_REGULATOR_REG_ARC_LEVEL] = "hlvl",
};
+static const char *const rpmh_regulator_xob_param_names[] = {
+ [RPMH_REGULATOR_REG_XOB_ENABLE] = "en",
+};
+
/**
* rpmh_regulator_req() - print the rpmh regulator request to the kernel log
* @vreg: Pointer to the RPMh regulator
@@ -323,12 +338,22 @@ static void rpmh_regulator_req(struct rpmh_vreg *vreg,
u32 valid;
bool first;
- max_reg_index = aggr_vreg->regulator_type == RPMH_REGULATOR_TYPE_VRM
- ? RPMH_REGULATOR_REG_VRM_MAX
- : RPMH_REGULATOR_REG_ARC_REAL_MAX;
- param_name = aggr_vreg->regulator_type == RPMH_REGULATOR_TYPE_VRM
- ? rpmh_regulator_vrm_param_names
- : rpmh_regulator_arc_param_names;
+ switch (aggr_vreg->regulator_type) {
+ case RPMH_REGULATOR_TYPE_VRM:
+ max_reg_index = RPMH_REGULATOR_REG_VRM_MAX;
+ param_name = rpmh_regulator_vrm_param_names;
+ break;
+ case RPMH_REGULATOR_TYPE_ARC:
+ max_reg_index = RPMH_REGULATOR_REG_ARC_REAL_MAX;
+ param_name = rpmh_regulator_arc_param_names;
+ break;
+ case RPMH_REGULATOR_TYPE_XOB:
+ max_reg_index = RPMH_REGULATOR_REG_XOB_MAX;
+ param_name = rpmh_regulator_xob_param_names;
+ break;
+ default:
+ return;
+ }
pos += scnprintf(buf + pos, buflen - pos,
"%s (%s), addr=0x%05X: s=%s; sent: ",
@@ -438,9 +463,20 @@ rpmh_regulator_send_aggregate_requests(struct rpmh_vreg *vreg)
enum rpmh_state state;
u32 sent_mask;
- max_reg_index = aggr_vreg->regulator_type == RPMH_REGULATOR_TYPE_VRM
- ? RPMH_REGULATOR_REG_VRM_MAX
- : RPMH_REGULATOR_REG_ARC_MAX;
+ switch (aggr_vreg->regulator_type) {
+ case RPMH_REGULATOR_TYPE_VRM:
+ max_reg_index = RPMH_REGULATOR_REG_VRM_MAX;
+ break;
+ case RPMH_REGULATOR_TYPE_ARC:
+ max_reg_index = RPMH_REGULATOR_REG_ARC_MAX;
+ break;
+ case RPMH_REGULATOR_TYPE_XOB:
+ max_reg_index = RPMH_REGULATOR_REG_XOB_MAX;
+ break;
+ default:
+ return -EINVAL;
+ }
+
/*
* Perform max aggregration of each register value across all regulators
* which use this RPMh resource.
@@ -1005,9 +1041,16 @@ static const struct regulator_ops rpmh_regulator_arc_ops = {
.list_voltage = rpmh_regulator_arc_list_voltage,
};
+static const struct regulator_ops rpmh_regulator_xob_ops = {
+ .enable = rpmh_regulator_enable,
+ .disable = rpmh_regulator_disable,
+ .is_enabled = rpmh_regulator_is_enabled,
+};
+
static const struct regulator_ops *rpmh_regulator_ops[] = {
[RPMH_REGULATOR_TYPE_VRM] = &rpmh_regulator_vrm_ops,
[RPMH_REGULATOR_TYPE_ARC] = &rpmh_regulator_arc_ops,
+ [RPMH_REGULATOR_TYPE_XOB] = &rpmh_regulator_xob_ops,
};
/**
@@ -1322,6 +1365,13 @@ static int rpmh_regulator_load_default_parameters(struct rpmh_vreg *vreg)
rc = of_property_read_u32(vreg->of_node, prop, &temp);
if (!rc)
vreg->rdesc.min_dropout_uV = temp;
+ } else if (type == RPMH_REGULATOR_TYPE_XOB) {
+ prop = "qcom,init-enable";
+ rc = of_property_read_u32(vreg->of_node, prop, &temp);
+ if (!rc)
+ rpmh_regulator_set_reg(vreg,
+ RPMH_REGULATOR_REG_XOB_ENABLE,
+ !!temp);
}
return 0;
@@ -1408,6 +1458,10 @@ static int rpmh_regulator_init_vreg(struct rpmh_vreg *vreg)
init_data->constraints.valid_ops_mask
|= REGULATOR_CHANGE_VOLTAGE;
+ if (type == RPMH_REGULATOR_TYPE_XOB
+ && init_data->constraints.min_uV == init_data->constraints.max_uV)
+ vreg->rdesc.fixed_uV = init_data->constraints.min_uV;
+
if (vreg->aggr_vreg->mode_count) {
init_data->constraints.valid_ops_mask
|= REGULATOR_CHANGE_MODE | REGULATOR_CHANGE_DRMS;
@@ -1434,8 +1488,19 @@ static int rpmh_regulator_init_vreg(struct rpmh_vreg *vreg)
init_data->constraints.valid_ops_mask
|= REGULATOR_CHANGE_STATUS;
- vreg->rdesc.n_voltages = type == RPMH_REGULATOR_TYPE_ARC ?
- vreg->aggr_vreg->level_count : 2;
+ switch (type) {
+ case RPMH_REGULATOR_TYPE_VRM:
+ vreg->rdesc.n_voltages = 2;
+ break;
+ case RPMH_REGULATOR_TYPE_ARC:
+ vreg->rdesc.n_voltages = vreg->aggr_vreg->level_count;
+ break;
+ case RPMH_REGULATOR_TYPE_XOB:
+ vreg->rdesc.n_voltages = 1;
+ break;
+ default:
+ return -EINVAL;
+ }
rc = of_property_read_u32(vreg->of_node, "qcom,set", &set);
if (rc) {
@@ -1493,6 +1558,10 @@ static const struct of_device_id rpmh_regulator_match_table[] = {
.compatible = "qcom,rpmh-arc-regulator",
.data = (void *)(uintptr_t)RPMH_REGULATOR_TYPE_ARC,
},
+ {
+ .compatible = "qcom,rpmh-xob-regulator",
+ .data = (void *)(uintptr_t)RPMH_REGULATOR_TYPE_XOB,
+ },
{}
};
@@ -1570,11 +1639,15 @@ static int rpmh_regulator_probe(struct platform_device *pdev)
if ((aggr_vreg->regulator_type == RPMH_REGULATOR_TYPE_ARC
&& sid != CMD_DB_HW_ARC)
|| (aggr_vreg->regulator_type == RPMH_REGULATOR_TYPE_VRM
- && sid != CMD_DB_HW_VRM)) {
+ && sid != CMD_DB_HW_VRM)
+ || (aggr_vreg->regulator_type == RPMH_REGULATOR_TYPE_XOB
+ && sid != CMD_DB_HW_XOB)) {
aggr_vreg_err(aggr_vreg, "RPMh slave ID mismatch; config=%d (%s) != cmd-db=%d\n",
aggr_vreg->regulator_type,
aggr_vreg->regulator_type == RPMH_REGULATOR_TYPE_ARC
- ? "ARC" : "VRM",
+ ? "ARC" : (aggr_vreg->regulator_type
+ == RPMH_REGULATOR_TYPE_VRM
+ ? "VRM" : "XOB"),
sid);
return -EINVAL;
}
@@ -1643,7 +1716,10 @@ static int rpmh_regulator_probe(struct platform_device *pdev)
aggr_vreg_debug(aggr_vreg, "successfully probed; addr=0x%05X, type=%s\n",
aggr_vreg->addr,
aggr_vreg->regulator_type == RPMH_REGULATOR_TYPE_ARC
- ? "ARC" : "VRM");
+ ? "ARC"
+ : (aggr_vreg->regulator_type
+ == RPMH_REGULATOR_TYPE_VRM
+ ? "VRM" : "XOB"));
return rc;
diff --git a/drivers/reset/reset-ti-syscon.c b/drivers/reset/reset-ti-syscon.c
index 47f0ffd..1799fd4 100644
--- a/drivers/reset/reset-ti-syscon.c
+++ b/drivers/reset/reset-ti-syscon.c
@@ -154,8 +154,8 @@ static int ti_syscon_reset_status(struct reset_controller_dev *rcdev,
if (ret)
return ret;
- return (reset_state & BIT(control->status_bit)) &&
- (control->flags & STATUS_SET);
+ return !(reset_state & BIT(control->status_bit)) ==
+ !(control->flags & STATUS_SET);
}
static struct reset_control_ops ti_syscon_reset_ops = {
diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c
index 84a52db..f1d4ca2 100644
--- a/drivers/rtc/interface.c
+++ b/drivers/rtc/interface.c
@@ -372,6 +372,14 @@ int rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
}
EXPORT_SYMBOL_GPL(rtc_set_alarm);
+static void rtc_alarm_disable(struct rtc_device *rtc)
+{
+ if (!rtc->ops || !rtc->ops->alarm_irq_enable)
+ return;
+
+ rtc->ops->alarm_irq_enable(rtc->dev.parent, false);
+}
+
/* Called once per device from rtc_device_register */
int rtc_initialize_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
{
@@ -399,7 +407,11 @@ int rtc_initialize_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
rtc->aie_timer.enabled = 1;
timerqueue_add(&rtc->timerqueue, &rtc->aie_timer.node);
+ } else if (alarm->enabled && (rtc_tm_to_ktime(now).tv64 >=
+ rtc->aie_timer.node.expires.tv64)){
+ rtc_alarm_disable(rtc);
}
+
mutex_unlock(&rtc->ops_lock);
return err;
}
@@ -790,14 +802,6 @@ static int rtc_timer_enqueue(struct rtc_device *rtc, struct rtc_timer *timer)
return 0;
}
-static void rtc_alarm_disable(struct rtc_device *rtc)
-{
- if (!rtc->ops || !rtc->ops->alarm_irq_enable)
- return;
-
- rtc->ops->alarm_irq_enable(rtc->dev.parent, false);
-}
-
/**
* rtc_timer_remove - Removes a rtc_timer from the rtc_device timerqueue
* @rtc rtc device
diff --git a/drivers/rtc/qpnp-rtc.c b/drivers/rtc/qpnp-rtc.c
index a2c004e..4152086 100644
--- a/drivers/rtc/qpnp-rtc.c
+++ b/drivers/rtc/qpnp-rtc.c
@@ -599,9 +599,6 @@ static int qpnp_rtc_probe(struct platform_device *pdev)
goto fail_rtc_enable;
}
- /* Init power_on_alarm after adding rtc device */
- power_on_alarm_init();
-
/* Request the alarm IRQ */
rc = request_any_context_irq(rtc_dd->rtc_alarm_irq,
qpnp_alarm_trigger, IRQF_TRIGGER_RISING,
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 1de0890..5ecd408 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -1704,8 +1704,11 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
/* check for for attention message */
if (scsw_dstat(&irb->scsw) & DEV_STAT_ATTENTION) {
device = dasd_device_from_cdev_locked(cdev);
- device->discipline->check_attention(device, irb->esw.esw1.lpum);
- dasd_put_device(device);
+ if (!IS_ERR(device)) {
+ device->discipline->check_attention(device,
+ irb->esw.esw1.lpum);
+ dasd_put_device(device);
+ }
}
if (!cqr)
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index f3756ca..d55e643 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -921,7 +921,6 @@ void qeth_clear_thread_running_bit(struct qeth_card *, unsigned long);
int qeth_core_hardsetup_card(struct qeth_card *);
void qeth_print_status_message(struct qeth_card *);
int qeth_init_qdio_queues(struct qeth_card *);
-int qeth_send_startlan(struct qeth_card *);
int qeth_send_ipa_cmd(struct qeth_card *, struct qeth_cmd_buffer *,
int (*reply_cb)
(struct qeth_card *, struct qeth_reply *, unsigned long),
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index e8c4830..21ef802 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -2944,7 +2944,7 @@ int qeth_send_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
}
EXPORT_SYMBOL_GPL(qeth_send_ipa_cmd);
-int qeth_send_startlan(struct qeth_card *card)
+static int qeth_send_startlan(struct qeth_card *card)
{
int rc;
struct qeth_cmd_buffer *iob;
@@ -2957,7 +2957,6 @@ int qeth_send_startlan(struct qeth_card *card)
rc = qeth_send_ipa_cmd(card, iob, NULL, NULL);
return rc;
}
-EXPORT_SYMBOL_GPL(qeth_send_startlan);
static int qeth_default_setadapterparms_cb(struct qeth_card *card,
struct qeth_reply *reply, unsigned long data)
@@ -5091,6 +5090,20 @@ int qeth_core_hardsetup_card(struct qeth_card *card)
goto out;
}
+ rc = qeth_send_startlan(card);
+ if (rc) {
+ QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc);
+ if (rc == IPA_RC_LAN_OFFLINE) {
+ dev_warn(&card->gdev->dev,
+ "The LAN is offline\n");
+ card->lan_online = 0;
+ } else {
+ rc = -ENODEV;
+ goto out;
+ }
+ } else
+ card->lan_online = 1;
+
card->options.ipa4.supported_funcs = 0;
card->options.ipa6.supported_funcs = 0;
card->options.adp.supported_funcs = 0;
@@ -5102,14 +5115,14 @@ int qeth_core_hardsetup_card(struct qeth_card *card)
if (qeth_is_supported(card, IPA_SETADAPTERPARMS)) {
rc = qeth_query_setadapterparms(card);
if (rc < 0) {
- QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc);
+ QETH_DBF_TEXT_(SETUP, 2, "7err%d", rc);
goto out;
}
}
if (qeth_adp_supported(card, IPA_SETADP_SET_DIAG_ASSIST)) {
rc = qeth_query_setdiagass(card);
if (rc < 0) {
- QETH_DBF_TEXT_(SETUP, 2, "7err%d", rc);
+ QETH_DBF_TEXT_(SETUP, 2, "8err%d", rc);
goto out;
}
}
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 5d010aa..8530477 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -1204,21 +1204,6 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
/* softsetup */
QETH_DBF_TEXT(SETUP, 2, "softsetp");
- rc = qeth_send_startlan(card);
- if (rc) {
- QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
- if (rc == 0xe080) {
- dev_warn(&card->gdev->dev,
- "The LAN is offline\n");
- card->lan_online = 0;
- goto contin;
- }
- rc = -ENODEV;
- goto out_remove;
- } else
- card->lan_online = 1;
-
-contin:
if ((card->info.type == QETH_CARD_TYPE_OSD) ||
(card->info.type == QETH_CARD_TYPE_OSX)) {
rc = qeth_l2_start_ipassists(card);
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 171be5e..03a2619 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -3230,21 +3230,6 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
/* softsetup */
QETH_DBF_TEXT(SETUP, 2, "softsetp");
- rc = qeth_send_startlan(card);
- if (rc) {
- QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
- if (rc == 0xe080) {
- dev_warn(&card->gdev->dev,
- "The LAN is offline\n");
- card->lan_online = 0;
- goto contin;
- }
- rc = -ENODEV;
- goto out_remove;
- } else
- card->lan_online = 1;
-
-contin:
rc = qeth_l3_setadapter_parms(card);
if (rc)
QETH_DBF_TEXT_(SETUP, 2, "2err%04x", rc);
diff --git a/drivers/s390/net/qeth_l3_sys.c b/drivers/s390/net/qeth_l3_sys.c
index 0e00a5c..cffe42f 100644
--- a/drivers/s390/net/qeth_l3_sys.c
+++ b/drivers/s390/net/qeth_l3_sys.c
@@ -692,15 +692,15 @@ static ssize_t qeth_l3_dev_vipa_add_show(char *buf, struct qeth_card *card,
enum qeth_prot_versions proto)
{
struct qeth_ipaddr *ipaddr;
- struct hlist_node *tmp;
char addr_str[40];
+ int str_len = 0;
int entry_len; /* length of 1 entry string, differs between v4 and v6 */
- int i = 0;
+ int i;
entry_len = (proto == QETH_PROT_IPV4)? 12 : 40;
entry_len += 2; /* \n + terminator */
spin_lock_bh(&card->ip_lock);
- hash_for_each_safe(card->ip_htable, i, tmp, ipaddr, hnode) {
+ hash_for_each(card->ip_htable, i, ipaddr, hnode) {
if (ipaddr->proto != proto)
continue;
if (ipaddr->type != QETH_IP_TYPE_VIPA)
@@ -708,16 +708,17 @@ static ssize_t qeth_l3_dev_vipa_add_show(char *buf, struct qeth_card *card,
/* String must not be longer than PAGE_SIZE. So we check if
* string length gets near PAGE_SIZE. Then we can savely display
* the next IPv6 address (worst case, compared to IPv4) */
- if ((PAGE_SIZE - i) <= entry_len)
+ if ((PAGE_SIZE - str_len) <= entry_len)
break;
qeth_l3_ipaddr_to_string(proto, (const u8 *)&ipaddr->u,
addr_str);
- i += snprintf(buf + i, PAGE_SIZE - i, "%s\n", addr_str);
+ str_len += snprintf(buf + str_len, PAGE_SIZE - str_len, "%s\n",
+ addr_str);
}
spin_unlock_bh(&card->ip_lock);
- i += snprintf(buf + i, PAGE_SIZE - i, "\n");
+ str_len += snprintf(buf + str_len, PAGE_SIZE - str_len, "\n");
- return i;
+ return str_len;
}
static ssize_t qeth_l3_dev_vipa_add4_show(struct device *dev,
@@ -854,15 +855,15 @@ static ssize_t qeth_l3_dev_rxip_add_show(char *buf, struct qeth_card *card,
enum qeth_prot_versions proto)
{
struct qeth_ipaddr *ipaddr;
- struct hlist_node *tmp;
char addr_str[40];
+ int str_len = 0;
int entry_len; /* length of 1 entry string, differs between v4 and v6 */
- int i = 0;
+ int i;
entry_len = (proto == QETH_PROT_IPV4)? 12 : 40;
entry_len += 2; /* \n + terminator */
spin_lock_bh(&card->ip_lock);
- hash_for_each_safe(card->ip_htable, i, tmp, ipaddr, hnode) {
+ hash_for_each(card->ip_htable, i, ipaddr, hnode) {
if (ipaddr->proto != proto)
continue;
if (ipaddr->type != QETH_IP_TYPE_RXIP)
@@ -870,16 +871,17 @@ static ssize_t qeth_l3_dev_rxip_add_show(char *buf, struct qeth_card *card,
/* String must not be longer than PAGE_SIZE. So we check if
* string length gets near PAGE_SIZE. Then we can savely display
* the next IPv6 address (worst case, compared to IPv4) */
- if ((PAGE_SIZE - i) <= entry_len)
+ if ((PAGE_SIZE - str_len) <= entry_len)
break;
qeth_l3_ipaddr_to_string(proto, (const u8 *)&ipaddr->u,
addr_str);
- i += snprintf(buf + i, PAGE_SIZE - i, "%s\n", addr_str);
+ str_len += snprintf(buf + str_len, PAGE_SIZE - str_len, "%s\n",
+ addr_str);
}
spin_unlock_bh(&card->ip_lock);
- i += snprintf(buf + i, PAGE_SIZE - i, "\n");
+ str_len += snprintf(buf + str_len, PAGE_SIZE - str_len, "\n");
- return i;
+ return str_len;
}
static ssize_t qeth_l3_dev_rxip_add4_show(struct device *dev,
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
index bcc8f3d..b3f9243 100644
--- a/drivers/s390/scsi/zfcp_aux.c
+++ b/drivers/s390/scsi/zfcp_aux.c
@@ -358,6 +358,8 @@ struct zfcp_adapter *zfcp_adapter_enqueue(struct ccw_device *ccw_device)
adapter->next_port_scan = jiffies;
+ adapter->erp_action.adapter = adapter;
+
if (zfcp_qdio_setup(adapter))
goto failed;
@@ -514,6 +516,9 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn,
port->dev.groups = zfcp_port_attr_groups;
port->dev.release = zfcp_port_release;
+ port->erp_action.adapter = adapter;
+ port->erp_action.port = port;
+
if (dev_set_name(&port->dev, "0x%016llx", (unsigned long long)wwpn)) {
kfree(port);
goto err_out;
diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c
index d5bf36e..34367d1 100644
--- a/drivers/s390/scsi/zfcp_dbf.c
+++ b/drivers/s390/scsi/zfcp_dbf.c
@@ -3,7 +3,7 @@
*
* Debug traces for zfcp.
*
- * Copyright IBM Corp. 2002, 2016
+ * Copyright IBM Corp. 2002, 2017
*/
#define KMSG_COMPONENT "zfcp"
@@ -447,6 +447,7 @@ static u16 zfcp_dbf_san_res_cap_len_if_gpn_ft(char *tag,
struct fc_ct_hdr *reqh = sg_virt(ct_els->req);
struct fc_ns_gid_ft *reqn = (struct fc_ns_gid_ft *)(reqh + 1);
struct scatterlist *resp_entry = ct_els->resp;
+ struct fc_ct_hdr *resph;
struct fc_gpn_ft_resp *acc;
int max_entries, x, last = 0;
@@ -473,6 +474,13 @@ static u16 zfcp_dbf_san_res_cap_len_if_gpn_ft(char *tag,
return len; /* not GPN_FT response so do not cap */
acc = sg_virt(resp_entry);
+
+ /* cap all but accept CT responses to at least the CT header */
+ resph = (struct fc_ct_hdr *)acc;
+ if ((ct_els->status) ||
+ (resph->ct_cmd != cpu_to_be16(FC_FS_ACC)))
+ return max(FC_CT_HDR_LEN, ZFCP_DBF_SAN_MAX_PAYLOAD);
+
max_entries = (reqh->ct_mr_size * 4 / sizeof(struct fc_gpn_ft_resp))
+ 1 /* zfcp_fc_scan_ports: bytes correct, entries off-by-one
* to account for header as 1st pseudo "entry" */;
@@ -555,8 +563,8 @@ void zfcp_dbf_scsi(char *tag, int level, struct scsi_cmnd *sc,
rec->scsi_retries = sc->retries;
rec->scsi_allowed = sc->allowed;
rec->scsi_id = sc->device->id;
- /* struct zfcp_dbf_scsi needs to be updated to handle 64bit LUNs */
rec->scsi_lun = (u32)sc->device->lun;
+ rec->scsi_lun_64_hi = (u32)(sc->device->lun >> 32);
rec->host_scribble = (unsigned long)sc->host_scribble;
memcpy(rec->scsi_opcode, sc->cmnd,
@@ -564,19 +572,32 @@ void zfcp_dbf_scsi(char *tag, int level, struct scsi_cmnd *sc,
if (fsf) {
rec->fsf_req_id = fsf->req_id;
+ rec->pl_len = FCP_RESP_WITH_EXT;
fcp_rsp = (struct fcp_resp_with_ext *)
&(fsf->qtcb->bottom.io.fcp_rsp);
+ /* mandatory parts of FCP_RSP IU in this SCSI record */
memcpy(&rec->fcp_rsp, fcp_rsp, FCP_RESP_WITH_EXT);
if (fcp_rsp->resp.fr_flags & FCP_RSP_LEN_VAL) {
fcp_rsp_info = (struct fcp_resp_rsp_info *) &fcp_rsp[1];
rec->fcp_rsp_info = fcp_rsp_info->rsp_code;
+ rec->pl_len += be32_to_cpu(fcp_rsp->ext.fr_rsp_len);
}
if (fcp_rsp->resp.fr_flags & FCP_SNS_LEN_VAL) {
- rec->pl_len = min((u16)SCSI_SENSE_BUFFERSIZE,
- (u16)ZFCP_DBF_PAY_MAX_REC);
- zfcp_dbf_pl_write(dbf, sc->sense_buffer, rec->pl_len,
- "fcp_sns", fsf->req_id);
+ rec->pl_len += be32_to_cpu(fcp_rsp->ext.fr_sns_len);
}
+ /* complete FCP_RSP IU in associated PAYload record
+ * but only if there are optional parts
+ */
+ if (fcp_rsp->resp.fr_flags != 0)
+ zfcp_dbf_pl_write(
+ dbf, fcp_rsp,
+ /* at least one full PAY record
+ * but not beyond hardware response field
+ */
+ min_t(u16, max_t(u16, rec->pl_len,
+ ZFCP_DBF_PAY_MAX_REC),
+ FSF_FCP_RSP_SIZE),
+ "fcp_riu", fsf->req_id);
}
debug_event(dbf->scsi, level, rec, sizeof(*rec));
diff --git a/drivers/s390/scsi/zfcp_dbf.h b/drivers/s390/scsi/zfcp_dbf.h
index db186d4..b60667c 100644
--- a/drivers/s390/scsi/zfcp_dbf.h
+++ b/drivers/s390/scsi/zfcp_dbf.h
@@ -2,7 +2,7 @@
* zfcp device driver
* debug feature declarations
*
- * Copyright IBM Corp. 2008, 2016
+ * Copyright IBM Corp. 2008, 2017
*/
#ifndef ZFCP_DBF_H
@@ -204,7 +204,7 @@ enum zfcp_dbf_scsi_id {
* @id: unique number of recovery record type
* @tag: identifier string specifying the location of initiation
* @scsi_id: scsi device id
- * @scsi_lun: scsi device logical unit number
+ * @scsi_lun: scsi device logical unit number, low part of 64 bit, old 32 bit
* @scsi_result: scsi result
* @scsi_retries: current retry number of scsi request
* @scsi_allowed: allowed retries
@@ -214,6 +214,7 @@ enum zfcp_dbf_scsi_id {
* @host_scribble: LLD specific data attached to SCSI request
* @pl_len: length of paload stored as zfcp_dbf_pay
* @fsf_rsp: response for fsf request
+ * @scsi_lun_64_hi: scsi device logical unit number, high part of 64 bit
*/
struct zfcp_dbf_scsi {
u8 id;
@@ -230,6 +231,7 @@ struct zfcp_dbf_scsi {
u64 host_scribble;
u16 pl_len;
struct fcp_resp_with_ext fcp_rsp;
+ u32 scsi_lun_64_hi;
} __packed;
/**
@@ -323,7 +325,11 @@ void zfcp_dbf_hba_fsf_response(struct zfcp_fsf_req *req)
{
struct fsf_qtcb *qtcb = req->qtcb;
- if ((qtcb->prefix.prot_status != FSF_PROT_GOOD) &&
+ if (unlikely(req->status & (ZFCP_STATUS_FSFREQ_DISMISSED |
+ ZFCP_STATUS_FSFREQ_ERROR))) {
+ zfcp_dbf_hba_fsf_resp("fs_rerr", 3, req);
+
+ } else if ((qtcb->prefix.prot_status != FSF_PROT_GOOD) &&
(qtcb->prefix.prot_status != FSF_PROT_FSF_STATUS_PRESENTED)) {
zfcp_dbf_hba_fsf_resp("fs_perr", 1, req);
@@ -401,7 +407,8 @@ void zfcp_dbf_scsi_abort(char *tag, struct scsi_cmnd *scmd,
* @flag: indicates type of reset (Target Reset, Logical Unit Reset)
*/
static inline
-void zfcp_dbf_scsi_devreset(char *tag, struct scsi_cmnd *scmnd, u8 flag)
+void zfcp_dbf_scsi_devreset(char *tag, struct scsi_cmnd *scmnd, u8 flag,
+ struct zfcp_fsf_req *fsf_req)
{
char tmp_tag[ZFCP_DBF_TAG_LEN];
@@ -411,7 +418,7 @@ void zfcp_dbf_scsi_devreset(char *tag, struct scsi_cmnd *scmnd, u8 flag)
memcpy(tmp_tag, "lr_", 3);
memcpy(&tmp_tag[3], tag, 4);
- _zfcp_dbf_scsi(tmp_tag, 1, scmnd, NULL);
+ _zfcp_dbf_scsi(tmp_tag, 1, scmnd, fsf_req);
}
/**
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index 7ccfce5..3b23d675 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -193,9 +193,8 @@ static struct zfcp_erp_action *zfcp_erp_setup_act(int need, u32 act_status,
atomic_or(ZFCP_STATUS_COMMON_ERP_INUSE,
&zfcp_sdev->status);
erp_action = &zfcp_sdev->erp_action;
- memset(erp_action, 0, sizeof(struct zfcp_erp_action));
- erp_action->port = port;
- erp_action->sdev = sdev;
+ WARN_ON_ONCE(erp_action->port != port);
+ WARN_ON_ONCE(erp_action->sdev != sdev);
if (!(atomic_read(&zfcp_sdev->status) &
ZFCP_STATUS_COMMON_RUNNING))
act_status |= ZFCP_STATUS_ERP_CLOSE_ONLY;
@@ -208,8 +207,8 @@ static struct zfcp_erp_action *zfcp_erp_setup_act(int need, u32 act_status,
zfcp_erp_action_dismiss_port(port);
atomic_or(ZFCP_STATUS_COMMON_ERP_INUSE, &port->status);
erp_action = &port->erp_action;
- memset(erp_action, 0, sizeof(struct zfcp_erp_action));
- erp_action->port = port;
+ WARN_ON_ONCE(erp_action->port != port);
+ WARN_ON_ONCE(erp_action->sdev != NULL);
if (!(atomic_read(&port->status) & ZFCP_STATUS_COMMON_RUNNING))
act_status |= ZFCP_STATUS_ERP_CLOSE_ONLY;
break;
@@ -219,7 +218,8 @@ static struct zfcp_erp_action *zfcp_erp_setup_act(int need, u32 act_status,
zfcp_erp_action_dismiss_adapter(adapter);
atomic_or(ZFCP_STATUS_COMMON_ERP_INUSE, &adapter->status);
erp_action = &adapter->erp_action;
- memset(erp_action, 0, sizeof(struct zfcp_erp_action));
+ WARN_ON_ONCE(erp_action->port != NULL);
+ WARN_ON_ONCE(erp_action->sdev != NULL);
if (!(atomic_read(&adapter->status) &
ZFCP_STATUS_COMMON_RUNNING))
act_status |= ZFCP_STATUS_ERP_CLOSE_ONLY;
@@ -229,7 +229,11 @@ static struct zfcp_erp_action *zfcp_erp_setup_act(int need, u32 act_status,
return NULL;
}
- erp_action->adapter = adapter;
+ WARN_ON_ONCE(erp_action->adapter != adapter);
+ memset(&erp_action->list, 0, sizeof(erp_action->list));
+ memset(&erp_action->timer, 0, sizeof(erp_action->timer));
+ erp_action->step = ZFCP_ERP_STEP_UNINITIALIZED;
+ erp_action->fsf_req_id = 0;
erp_action->action = need;
erp_action->status = act_status;
diff --git a/drivers/s390/scsi/zfcp_fc.h b/drivers/s390/scsi/zfcp_fc.h
index df2b541..a227582 100644
--- a/drivers/s390/scsi/zfcp_fc.h
+++ b/drivers/s390/scsi/zfcp_fc.h
@@ -4,7 +4,7 @@
* Fibre Channel related definitions and inline functions for the zfcp
* device driver
*
- * Copyright IBM Corp. 2009
+ * Copyright IBM Corp. 2009, 2017
*/
#ifndef ZFCP_FC_H
@@ -279,6 +279,10 @@ void zfcp_fc_eval_fcp_rsp(struct fcp_resp_with_ext *fcp_rsp,
!(rsp_flags & FCP_SNS_LEN_VAL) &&
fcp_rsp->resp.fr_status == SAM_STAT_GOOD)
set_host_byte(scsi, DID_ERROR);
+ } else if (unlikely(rsp_flags & FCP_RESID_OVER)) {
+ /* FCP_DL was not sufficient for SCSI data length */
+ if (fcp_rsp->resp.fr_status == SAM_STAT_GOOD)
+ set_host_byte(scsi, DID_ERROR);
}
}
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index 27ff38f..1964391 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -928,8 +928,8 @@ static void zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *req)
switch (header->fsf_status) {
case FSF_GOOD:
- zfcp_dbf_san_res("fsscth2", req);
ct->status = 0;
+ zfcp_dbf_san_res("fsscth2", req);
break;
case FSF_SERVICE_CLASS_NOT_SUPPORTED:
zfcp_fsf_class_not_supp(req);
@@ -1109,8 +1109,8 @@ static void zfcp_fsf_send_els_handler(struct zfcp_fsf_req *req)
switch (header->fsf_status) {
case FSF_GOOD:
- zfcp_dbf_san_res("fsselh1", req);
send_els->status = 0;
+ zfcp_dbf_san_res("fsselh1", req);
break;
case FSF_SERVICE_CLASS_NOT_SUPPORTED:
zfcp_fsf_class_not_supp(req);
@@ -2258,7 +2258,8 @@ int zfcp_fsf_fcp_cmnd(struct scsi_cmnd *scsi_cmnd)
fcp_cmnd = (struct fcp_cmnd *) &req->qtcb->bottom.io.fcp_cmnd;
zfcp_fc_scsi_to_fcp(fcp_cmnd, scsi_cmnd, 0);
- if (scsi_prot_sg_count(scsi_cmnd)) {
+ if ((scsi_get_prot_op(scsi_cmnd) != SCSI_PROT_NORMAL) &&
+ scsi_prot_sg_count(scsi_cmnd)) {
zfcp_qdio_set_data_div(qdio, &req->qdio_req,
scsi_prot_sg_count(scsi_cmnd));
retval = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req,
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index 07ffdbb..a9b8104 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -3,7 +3,7 @@
*
* Interface to Linux SCSI midlayer.
*
- * Copyright IBM Corp. 2002, 2016
+ * Copyright IBM Corp. 2002, 2017
*/
#define KMSG_COMPONENT "zfcp"
@@ -115,10 +115,15 @@ static int zfcp_scsi_slave_alloc(struct scsi_device *sdev)
struct zfcp_unit *unit;
int npiv = adapter->connection_features & FSF_FEATURE_NPIV_MODE;
+ zfcp_sdev->erp_action.adapter = adapter;
+ zfcp_sdev->erp_action.sdev = sdev;
+
port = zfcp_get_port_by_wwpn(adapter, rport->port_name);
if (!port)
return -ENXIO;
+ zfcp_sdev->erp_action.port = port;
+
unit = zfcp_unit_find(port, zfcp_scsi_dev_lun(sdev));
if (unit)
put_device(&unit->dev);
@@ -273,25 +278,29 @@ static int zfcp_task_mgmt_function(struct scsi_cmnd *scpnt, u8 tm_flags)
zfcp_erp_wait(adapter);
ret = fc_block_scsi_eh(scpnt);
- if (ret)
+ if (ret) {
+ zfcp_dbf_scsi_devreset("fiof", scpnt, tm_flags, NULL);
return ret;
+ }
if (!(atomic_read(&adapter->status) &
ZFCP_STATUS_COMMON_RUNNING)) {
- zfcp_dbf_scsi_devreset("nres", scpnt, tm_flags);
+ zfcp_dbf_scsi_devreset("nres", scpnt, tm_flags, NULL);
return SUCCESS;
}
}
- if (!fsf_req)
+ if (!fsf_req) {
+ zfcp_dbf_scsi_devreset("reqf", scpnt, tm_flags, NULL);
return FAILED;
+ }
wait_for_completion(&fsf_req->completion);
if (fsf_req->status & ZFCP_STATUS_FSFREQ_TMFUNCFAILED) {
- zfcp_dbf_scsi_devreset("fail", scpnt, tm_flags);
+ zfcp_dbf_scsi_devreset("fail", scpnt, tm_flags, fsf_req);
retval = FAILED;
} else {
- zfcp_dbf_scsi_devreset("okay", scpnt, tm_flags);
+ zfcp_dbf_scsi_devreset("okay", scpnt, tm_flags, fsf_req);
zfcp_scsi_forget_cmnds(zfcp_sdev, tm_flags);
}
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
index 6678d1f..065f11a 100644
--- a/drivers/scsi/aacraid/aachba.c
+++ b/drivers/scsi/aacraid/aachba.c
@@ -2954,16 +2954,11 @@ static void aac_srb_callback(void *context, struct fib * fibptr)
return;
BUG_ON(fibptr == NULL);
+
dev = fibptr->dev;
- scsi_dma_unmap(scsicmd);
-
- /* expose physical device if expose_physicald flag is on */
- if (scsicmd->cmnd[0] == INQUIRY && !(scsicmd->cmnd[1] & 0x01)
- && expose_physicals > 0)
- aac_expose_phy_device(scsicmd);
-
srbreply = (struct aac_srb_reply *) fib_data(fibptr);
+
scsicmd->sense_buffer[0] = '\0'; /* Initialize sense valid flag to false */
if (fibptr->flags & FIB_CONTEXT_FLAG_FASTRESP) {
@@ -2976,158 +2971,176 @@ static void aac_srb_callback(void *context, struct fib * fibptr)
*/
scsi_set_resid(scsicmd, scsi_bufflen(scsicmd)
- le32_to_cpu(srbreply->data_xfer_length));
- /*
- * First check the fib status
- */
+ }
- if (le32_to_cpu(srbreply->status) != ST_OK) {
- int len;
- printk(KERN_WARNING "aac_srb_callback: srb failed, status = %d\n", le32_to_cpu(srbreply->status));
- len = min_t(u32, le32_to_cpu(srbreply->sense_data_size),
- SCSI_SENSE_BUFFERSIZE);
+ scsi_dma_unmap(scsicmd);
+
+ /* expose physical device if expose_physicald flag is on */
+ if (scsicmd->cmnd[0] == INQUIRY && !(scsicmd->cmnd[1] & 0x01)
+ && expose_physicals > 0)
+ aac_expose_phy_device(scsicmd);
+
+ /*
+ * First check the fib status
+ */
+
+ if (le32_to_cpu(srbreply->status) != ST_OK) {
+ int len;
+
+ pr_warn("aac_srb_callback: srb failed, status = %d\n",
+ le32_to_cpu(srbreply->status));
+ len = min_t(u32, le32_to_cpu(srbreply->sense_data_size),
+ SCSI_SENSE_BUFFERSIZE);
+ scsicmd->result = DID_ERROR << 16
+ | COMMAND_COMPLETE << 8
+ | SAM_STAT_CHECK_CONDITION;
+ memcpy(scsicmd->sense_buffer,
+ srbreply->sense_data, len);
+ }
+
+ /*
+ * Next check the srb status
+ */
+ switch ((le32_to_cpu(srbreply->srb_status))&0x3f) {
+ case SRB_STATUS_ERROR_RECOVERY:
+ case SRB_STATUS_PENDING:
+ case SRB_STATUS_SUCCESS:
+ scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
+ break;
+ case SRB_STATUS_DATA_OVERRUN:
+ switch (scsicmd->cmnd[0]) {
+ case READ_6:
+ case WRITE_6:
+ case READ_10:
+ case WRITE_10:
+ case READ_12:
+ case WRITE_12:
+ case READ_16:
+ case WRITE_16:
+ if (le32_to_cpu(srbreply->data_xfer_length)
+ < scsicmd->underflow)
+ pr_warn("aacraid: SCSI CMD underflow\n");
+ else
+ pr_warn("aacraid: SCSI CMD Data Overrun\n");
scsicmd->result = DID_ERROR << 16
- | COMMAND_COMPLETE << 8
- | SAM_STAT_CHECK_CONDITION;
- memcpy(scsicmd->sense_buffer,
- srbreply->sense_data, len);
- }
-
- /*
- * Next check the srb status
- */
- switch ((le32_to_cpu(srbreply->srb_status))&0x3f) {
- case SRB_STATUS_ERROR_RECOVERY:
- case SRB_STATUS_PENDING:
- case SRB_STATUS_SUCCESS:
+ | COMMAND_COMPLETE << 8;
+ break;
+ case INQUIRY:
+ scsicmd->result = DID_OK << 16
+ | COMMAND_COMPLETE << 8;
+ break;
+ default:
scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
break;
- case SRB_STATUS_DATA_OVERRUN:
- switch (scsicmd->cmnd[0]) {
- case READ_6:
- case WRITE_6:
- case READ_10:
- case WRITE_10:
- case READ_12:
- case WRITE_12:
- case READ_16:
- case WRITE_16:
- if (le32_to_cpu(srbreply->data_xfer_length)
- < scsicmd->underflow)
- printk(KERN_WARNING"aacraid: SCSI CMD underflow\n");
- else
- printk(KERN_WARNING"aacraid: SCSI CMD Data Overrun\n");
- scsicmd->result = DID_ERROR << 16
- | COMMAND_COMPLETE << 8;
- break;
- case INQUIRY: {
- scsicmd->result = DID_OK << 16
- | COMMAND_COMPLETE << 8;
- break;
- }
- default:
- scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
- break;
- }
- break;
- case SRB_STATUS_ABORTED:
- scsicmd->result = DID_ABORT << 16 | ABORT << 8;
- break;
- case SRB_STATUS_ABORT_FAILED:
- /*
- * Not sure about this one - but assuming the
- * hba was trying to abort for some reason
- */
- scsicmd->result = DID_ERROR << 16 | ABORT << 8;
- break;
- case SRB_STATUS_PARITY_ERROR:
- scsicmd->result = DID_PARITY << 16
- | MSG_PARITY_ERROR << 8;
- break;
- case SRB_STATUS_NO_DEVICE:
- case SRB_STATUS_INVALID_PATH_ID:
- case SRB_STATUS_INVALID_TARGET_ID:
- case SRB_STATUS_INVALID_LUN:
- case SRB_STATUS_SELECTION_TIMEOUT:
- scsicmd->result = DID_NO_CONNECT << 16
- | COMMAND_COMPLETE << 8;
- break;
+ }
+ break;
+ case SRB_STATUS_ABORTED:
+ scsicmd->result = DID_ABORT << 16 | ABORT << 8;
+ break;
+ case SRB_STATUS_ABORT_FAILED:
+ /*
+ * Not sure about this one - but assuming the
+ * hba was trying to abort for some reason
+ */
+ scsicmd->result = DID_ERROR << 16 | ABORT << 8;
+ break;
+ case SRB_STATUS_PARITY_ERROR:
+ scsicmd->result = DID_PARITY << 16
+ | MSG_PARITY_ERROR << 8;
+ break;
+ case SRB_STATUS_NO_DEVICE:
+ case SRB_STATUS_INVALID_PATH_ID:
+ case SRB_STATUS_INVALID_TARGET_ID:
+ case SRB_STATUS_INVALID_LUN:
+ case SRB_STATUS_SELECTION_TIMEOUT:
+ scsicmd->result = DID_NO_CONNECT << 16
+ | COMMAND_COMPLETE << 8;
+ break;
- case SRB_STATUS_COMMAND_TIMEOUT:
- case SRB_STATUS_TIMEOUT:
- scsicmd->result = DID_TIME_OUT << 16
- | COMMAND_COMPLETE << 8;
- break;
+ case SRB_STATUS_COMMAND_TIMEOUT:
+ case SRB_STATUS_TIMEOUT:
+ scsicmd->result = DID_TIME_OUT << 16
+ | COMMAND_COMPLETE << 8;
+ break;
- case SRB_STATUS_BUSY:
- scsicmd->result = DID_BUS_BUSY << 16
- | COMMAND_COMPLETE << 8;
- break;
+ case SRB_STATUS_BUSY:
+ scsicmd->result = DID_BUS_BUSY << 16
+ | COMMAND_COMPLETE << 8;
+ break;
- case SRB_STATUS_BUS_RESET:
- scsicmd->result = DID_RESET << 16
- | COMMAND_COMPLETE << 8;
- break;
+ case SRB_STATUS_BUS_RESET:
+ scsicmd->result = DID_RESET << 16
+ | COMMAND_COMPLETE << 8;
+ break;
- case SRB_STATUS_MESSAGE_REJECTED:
- scsicmd->result = DID_ERROR << 16
- | MESSAGE_REJECT << 8;
- break;
- case SRB_STATUS_REQUEST_FLUSHED:
- case SRB_STATUS_ERROR:
- case SRB_STATUS_INVALID_REQUEST:
- case SRB_STATUS_REQUEST_SENSE_FAILED:
- case SRB_STATUS_NO_HBA:
- case SRB_STATUS_UNEXPECTED_BUS_FREE:
- case SRB_STATUS_PHASE_SEQUENCE_FAILURE:
- case SRB_STATUS_BAD_SRB_BLOCK_LENGTH:
- case SRB_STATUS_DELAYED_RETRY:
- case SRB_STATUS_BAD_FUNCTION:
- case SRB_STATUS_NOT_STARTED:
- case SRB_STATUS_NOT_IN_USE:
- case SRB_STATUS_FORCE_ABORT:
- case SRB_STATUS_DOMAIN_VALIDATION_FAIL:
- default:
+ case SRB_STATUS_MESSAGE_REJECTED:
+ scsicmd->result = DID_ERROR << 16
+ | MESSAGE_REJECT << 8;
+ break;
+ case SRB_STATUS_REQUEST_FLUSHED:
+ case SRB_STATUS_ERROR:
+ case SRB_STATUS_INVALID_REQUEST:
+ case SRB_STATUS_REQUEST_SENSE_FAILED:
+ case SRB_STATUS_NO_HBA:
+ case SRB_STATUS_UNEXPECTED_BUS_FREE:
+ case SRB_STATUS_PHASE_SEQUENCE_FAILURE:
+ case SRB_STATUS_BAD_SRB_BLOCK_LENGTH:
+ case SRB_STATUS_DELAYED_RETRY:
+ case SRB_STATUS_BAD_FUNCTION:
+ case SRB_STATUS_NOT_STARTED:
+ case SRB_STATUS_NOT_IN_USE:
+ case SRB_STATUS_FORCE_ABORT:
+ case SRB_STATUS_DOMAIN_VALIDATION_FAIL:
+ default:
#ifdef AAC_DETAILED_STATUS_INFO
- printk(KERN_INFO "aacraid: SRB ERROR(%u) %s scsi cmd 0x%x - scsi status 0x%x\n",
- le32_to_cpu(srbreply->srb_status) & 0x3F,
- aac_get_status_string(
- le32_to_cpu(srbreply->srb_status) & 0x3F),
- scsicmd->cmnd[0],
- le32_to_cpu(srbreply->scsi_status));
+ pr_info("aacraid: SRB ERROR(%u) %s scsi cmd 0x%x -scsi status 0x%x\n",
+ le32_to_cpu(srbreply->srb_status) & 0x3F,
+ aac_get_status_string(
+ le32_to_cpu(srbreply->srb_status) & 0x3F),
+ scsicmd->cmnd[0],
+ le32_to_cpu(srbreply->scsi_status));
#endif
- if ((scsicmd->cmnd[0] == ATA_12)
- || (scsicmd->cmnd[0] == ATA_16)) {
- if (scsicmd->cmnd[2] & (0x01 << 5)) {
- scsicmd->result = DID_OK << 16
- | COMMAND_COMPLETE << 8;
- break;
- } else {
- scsicmd->result = DID_ERROR << 16
- | COMMAND_COMPLETE << 8;
- break;
- }
+ /*
+ * When the CC bit is SET by the host in ATA pass thru CDB,
+ * driver is supposed to return DID_OK
+ *
+ * When the CC bit is RESET by the host, driver should
+ * return DID_ERROR
+ */
+ if ((scsicmd->cmnd[0] == ATA_12)
+ || (scsicmd->cmnd[0] == ATA_16)) {
+
+ if (scsicmd->cmnd[2] & (0x01 << 5)) {
+ scsicmd->result = DID_OK << 16
+ | COMMAND_COMPLETE << 8;
+ break;
} else {
scsicmd->result = DID_ERROR << 16
| COMMAND_COMPLETE << 8;
- break;
+ break;
}
- }
- if (le32_to_cpu(srbreply->scsi_status)
- == SAM_STAT_CHECK_CONDITION) {
- int len;
-
- scsicmd->result |= SAM_STAT_CHECK_CONDITION;
- len = min_t(u32, le32_to_cpu(srbreply->sense_data_size),
- SCSI_SENSE_BUFFERSIZE);
-#ifdef AAC_DETAILED_STATUS_INFO
- printk(KERN_WARNING "aac_srb_callback: check condition, status = %d len=%d\n",
- le32_to_cpu(srbreply->status), len);
-#endif
- memcpy(scsicmd->sense_buffer,
- srbreply->sense_data, len);
+ } else {
+ scsicmd->result = DID_ERROR << 16
+ | COMMAND_COMPLETE << 8;
+ break;
}
}
+ if (le32_to_cpu(srbreply->scsi_status)
+ == SAM_STAT_CHECK_CONDITION) {
+ int len;
+
+ scsicmd->result |= SAM_STAT_CHECK_CONDITION;
+ len = min_t(u32, le32_to_cpu(srbreply->sense_data_size),
+ SCSI_SENSE_BUFFERSIZE);
+#ifdef AAC_DETAILED_STATUS_INFO
+ pr_warn("aac_srb_callback: check condition, status = %d len=%d\n",
+ le32_to_cpu(srbreply->status), len);
+#endif
+ memcpy(scsicmd->sense_buffer,
+ srbreply->sense_data, len);
+ }
+
/*
* OR in the scsi status (already shifted up a bit)
*/
diff --git a/drivers/scsi/be2iscsi/be_iscsi.c b/drivers/scsi/be2iscsi/be_iscsi.c
index ba25821..963c732 100644
--- a/drivers/scsi/be2iscsi/be_iscsi.c
+++ b/drivers/scsi/be2iscsi/be_iscsi.c
@@ -166,33 +166,6 @@ beiscsi_conn_create(struct iscsi_cls_session *cls_session, u32 cid)
}
/**
- * beiscsi_bindconn_cid - Bind the beiscsi_conn with phba connection table
- * @beiscsi_conn: The pointer to beiscsi_conn structure
- * @phba: The phba instance
- * @cid: The cid to free
- */
-static int beiscsi_bindconn_cid(struct beiscsi_hba *phba,
- struct beiscsi_conn *beiscsi_conn,
- unsigned int cid)
-{
- uint16_t cri_index = BE_GET_CRI_FROM_CID(cid);
-
- if (phba->conn_table[cri_index]) {
- beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
- "BS_%d : Connection table already occupied. Detected clash\n");
-
- return -EINVAL;
- } else {
- beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
- "BS_%d : phba->conn_table[%d]=%p(beiscsi_conn)\n",
- cri_index, beiscsi_conn);
-
- phba->conn_table[cri_index] = beiscsi_conn;
- }
- return 0;
-}
-
-/**
* beiscsi_conn_bind - Binds iscsi session/connection with TCP connection
* @cls_session: pointer to iscsi cls session
* @cls_conn: pointer to iscsi cls conn
@@ -212,6 +185,7 @@ int beiscsi_conn_bind(struct iscsi_cls_session *cls_session,
struct hwi_wrb_context *pwrb_context;
struct beiscsi_endpoint *beiscsi_ep;
struct iscsi_endpoint *ep;
+ uint16_t cri_index;
ep = iscsi_lookup_endpoint(transport_fd);
if (!ep)
@@ -229,20 +203,34 @@ int beiscsi_conn_bind(struct iscsi_cls_session *cls_session,
return -EEXIST;
}
-
- pwrb_context = &phwi_ctrlr->wrb_context[BE_GET_CRI_FROM_CID(
- beiscsi_ep->ep_cid)];
+ cri_index = BE_GET_CRI_FROM_CID(beiscsi_ep->ep_cid);
+ if (phba->conn_table[cri_index]) {
+ if (beiscsi_conn != phba->conn_table[cri_index] ||
+ beiscsi_ep != phba->conn_table[cri_index]->ep) {
+ __beiscsi_log(phba, KERN_ERR,
+ "BS_%d : conn_table not empty at %u: cid %u conn %p:%p\n",
+ cri_index,
+ beiscsi_ep->ep_cid,
+ beiscsi_conn,
+ phba->conn_table[cri_index]);
+ return -EINVAL;
+ }
+ }
beiscsi_conn->beiscsi_conn_cid = beiscsi_ep->ep_cid;
beiscsi_conn->ep = beiscsi_ep;
beiscsi_ep->conn = beiscsi_conn;
+ /**
+ * Each connection is associated with a WRBQ kept in wrb_context.
+ * Store doorbell offset for transmit path.
+ */
+ pwrb_context = &phwi_ctrlr->wrb_context[cri_index];
beiscsi_conn->doorbell_offset = pwrb_context->doorbell_offset;
-
beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
- "BS_%d : beiscsi_conn=%p conn=%p ep_cid=%d\n",
- beiscsi_conn, conn, beiscsi_ep->ep_cid);
-
- return beiscsi_bindconn_cid(phba, beiscsi_conn, beiscsi_ep->ep_cid);
+ "BS_%d : cid %d phba->conn_table[%u]=%p\n",
+ beiscsi_ep->ep_cid, cri_index, beiscsi_conn);
+ phba->conn_table[cri_index] = beiscsi_conn;
+ return 0;
}
static int beiscsi_iface_create_ipv4(struct beiscsi_hba *phba)
@@ -973,9 +961,9 @@ int beiscsi_conn_start(struct iscsi_cls_conn *cls_conn)
*/
static int beiscsi_get_cid(struct beiscsi_hba *phba)
{
- unsigned short cid = 0xFFFF, cid_from_ulp;
- struct ulp_cid_info *cid_info = NULL;
uint16_t cid_avlbl_ulp0, cid_avlbl_ulp1;
+ unsigned short cid, cid_from_ulp;
+ struct ulp_cid_info *cid_info;
/* Find the ULP which has more CID available */
cid_avlbl_ulp0 = (phba->cid_array_info[BEISCSI_ULP0]) ?
@@ -984,20 +972,27 @@ static int beiscsi_get_cid(struct beiscsi_hba *phba)
BEISCSI_ULP1_AVLBL_CID(phba) : 0;
cid_from_ulp = (cid_avlbl_ulp0 > cid_avlbl_ulp1) ?
BEISCSI_ULP0 : BEISCSI_ULP1;
+ /**
+ * If iSCSI protocol is loaded only on ULP 0, and when cid_avlbl_ulp
+ * is ZERO for both, ULP 1 is returned.
+ * Check if ULP is loaded before getting new CID.
+ */
+ if (!test_bit(cid_from_ulp, (void *)&phba->fw_config.ulp_supported))
+ return BE_INVALID_CID;
- if (test_bit(cid_from_ulp, (void *)&phba->fw_config.ulp_supported)) {
- cid_info = phba->cid_array_info[cid_from_ulp];
- if (!cid_info->avlbl_cids)
- return cid;
-
- cid = cid_info->cid_array[cid_info->cid_alloc++];
-
- if (cid_info->cid_alloc == BEISCSI_GET_CID_COUNT(
- phba, cid_from_ulp))
- cid_info->cid_alloc = 0;
-
- cid_info->avlbl_cids--;
+ cid_info = phba->cid_array_info[cid_from_ulp];
+ cid = cid_info->cid_array[cid_info->cid_alloc];
+ if (!cid_info->avlbl_cids || cid == BE_INVALID_CID) {
+ __beiscsi_log(phba, KERN_ERR,
+ "BS_%d : failed to get cid: available %u:%u\n",
+ cid_info->avlbl_cids, cid_info->cid_free);
+ return BE_INVALID_CID;
}
+ /* empty the slot */
+ cid_info->cid_array[cid_info->cid_alloc++] = BE_INVALID_CID;
+ if (cid_info->cid_alloc == BEISCSI_GET_CID_COUNT(phba, cid_from_ulp))
+ cid_info->cid_alloc = 0;
+ cid_info->avlbl_cids--;
return cid;
}
@@ -1008,22 +1003,28 @@ static int beiscsi_get_cid(struct beiscsi_hba *phba)
*/
static void beiscsi_put_cid(struct beiscsi_hba *phba, unsigned short cid)
{
- uint16_t cid_post_ulp;
- struct hwi_controller *phwi_ctrlr;
- struct hwi_wrb_context *pwrb_context;
- struct ulp_cid_info *cid_info = NULL;
uint16_t cri_index = BE_GET_CRI_FROM_CID(cid);
+ struct hwi_wrb_context *pwrb_context;
+ struct hwi_controller *phwi_ctrlr;
+ struct ulp_cid_info *cid_info;
+ uint16_t cid_post_ulp;
phwi_ctrlr = phba->phwi_ctrlr;
pwrb_context = &phwi_ctrlr->wrb_context[cri_index];
cid_post_ulp = pwrb_context->ulp_num;
cid_info = phba->cid_array_info[cid_post_ulp];
- cid_info->avlbl_cids++;
-
+ /* fill only in empty slot */
+ if (cid_info->cid_array[cid_info->cid_free] != BE_INVALID_CID) {
+ __beiscsi_log(phba, KERN_ERR,
+ "BS_%d : failed to put cid %u: available %u:%u\n",
+ cid, cid_info->avlbl_cids, cid_info->cid_free);
+ return;
+ }
cid_info->cid_array[cid_info->cid_free++] = cid;
if (cid_info->cid_free == BEISCSI_GET_CID_COUNT(phba, cid_post_ulp))
cid_info->cid_free = 0;
+ cid_info->avlbl_cids++;
}
/**
@@ -1037,8 +1038,8 @@ static void beiscsi_free_ep(struct beiscsi_endpoint *beiscsi_ep)
beiscsi_put_cid(phba, beiscsi_ep->ep_cid);
beiscsi_ep->phba = NULL;
- phba->ep_array[BE_GET_CRI_FROM_CID
- (beiscsi_ep->ep_cid)] = NULL;
+ /* clear this to track freeing in beiscsi_ep_disconnect */
+ phba->ep_array[BE_GET_CRI_FROM_CID(beiscsi_ep->ep_cid)] = NULL;
/**
* Check if any connection resource allocated by driver
@@ -1049,6 +1050,11 @@ static void beiscsi_free_ep(struct beiscsi_endpoint *beiscsi_ep)
return;
beiscsi_conn = beiscsi_ep->conn;
+ /**
+ * Break ep->conn link here so that completions after
+ * this are ignored.
+ */
+ beiscsi_ep->conn = NULL;
if (beiscsi_conn->login_in_progress) {
beiscsi_free_mgmt_task_handles(beiscsi_conn,
beiscsi_conn->task);
@@ -1079,7 +1085,7 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep,
"BS_%d : In beiscsi_open_conn\n");
beiscsi_ep->ep_cid = beiscsi_get_cid(phba);
- if (beiscsi_ep->ep_cid == 0xFFFF) {
+ if (beiscsi_ep->ep_cid == BE_INVALID_CID) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
"BS_%d : No free cid available\n");
return ret;
@@ -1285,26 +1291,6 @@ static int beiscsi_close_conn(struct beiscsi_endpoint *beiscsi_ep, int flag)
}
/**
- * beiscsi_unbind_conn_to_cid - Unbind the beiscsi_conn from phba conn table
- * @phba: The phba instance
- * @cid: The cid to free
- */
-static int beiscsi_unbind_conn_to_cid(struct beiscsi_hba *phba,
- unsigned int cid)
-{
- uint16_t cri_index = BE_GET_CRI_FROM_CID(cid);
-
- if (phba->conn_table[cri_index])
- phba->conn_table[cri_index] = NULL;
- else {
- beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
- "BS_%d : Connection table Not occupied.\n");
- return -EINVAL;
- }
- return 0;
-}
-
-/**
* beiscsi_ep_disconnect - Tears down the TCP connection
* @ep: endpoint to be used
*
@@ -1318,13 +1304,23 @@ void beiscsi_ep_disconnect(struct iscsi_endpoint *ep)
unsigned int tag;
uint8_t mgmt_invalidate_flag, tcp_upload_flag;
unsigned short savecfg_flag = CMD_ISCSI_SESSION_SAVE_CFG_ON_FLASH;
+ uint16_t cri_index;
beiscsi_ep = ep->dd_data;
phba = beiscsi_ep->phba;
beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
- "BS_%d : In beiscsi_ep_disconnect for ep_cid = %d\n",
+ "BS_%d : In beiscsi_ep_disconnect for ep_cid = %u\n",
beiscsi_ep->ep_cid);
+ cri_index = BE_GET_CRI_FROM_CID(beiscsi_ep->ep_cid);
+ if (!phba->ep_array[cri_index]) {
+ __beiscsi_log(phba, KERN_ERR,
+ "BS_%d : ep_array at %u cid %u empty\n",
+ cri_index,
+ beiscsi_ep->ep_cid);
+ return;
+ }
+
if (beiscsi_ep->conn) {
beiscsi_conn = beiscsi_ep->conn;
iscsi_suspend_queue(beiscsi_conn->conn);
@@ -1356,7 +1352,12 @@ void beiscsi_ep_disconnect(struct iscsi_endpoint *ep)
free_ep:
msleep(BEISCSI_LOGOUT_SYNC_DELAY);
beiscsi_free_ep(beiscsi_ep);
- beiscsi_unbind_conn_to_cid(phba, beiscsi_ep->ep_cid);
+ if (!phba->conn_table[cri_index])
+ __beiscsi_log(phba, KERN_ERR,
+ "BS_%d : conn_table empty at %u: cid %u\n",
+ cri_index,
+ beiscsi_ep->ep_cid);
+ phba->conn_table[cri_index] = NULL;
iscsi_destroy_endpoint(beiscsi_ep->openiscsi_ep);
}
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
index d9239c2..741cc96 100644
--- a/drivers/scsi/be2iscsi/be_main.c
+++ b/drivers/scsi/be2iscsi/be_main.c
@@ -4085,9 +4085,10 @@ static int hba_setup_cid_tbls(struct beiscsi_hba *phba)
}
/* Allocate memory for CID array */
- ptr_cid_info->cid_array = kzalloc(sizeof(void *) *
- BEISCSI_GET_CID_COUNT(phba,
- ulp_num), GFP_KERNEL);
+ ptr_cid_info->cid_array =
+ kcalloc(BEISCSI_GET_CID_COUNT(phba, ulp_num),
+ sizeof(*ptr_cid_info->cid_array),
+ GFP_KERNEL);
if (!ptr_cid_info->cid_array) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
"BM_%d : Failed to allocate memory"
diff --git a/drivers/scsi/be2iscsi/be_main.h b/drivers/scsi/be2iscsi/be_main.h
index 6376657..02d00ab 100644
--- a/drivers/scsi/be2iscsi/be_main.h
+++ b/drivers/scsi/be2iscsi/be_main.h
@@ -358,6 +358,7 @@ struct beiscsi_hba {
unsigned int age;
struct list_head hba_queue;
#define BE_MAX_SESSION 2048
+#define BE_INVALID_CID 0xffff
#define BE_SET_CID_TO_CRI(cri_index, cid) \
(phba->cid_to_cri_map[cid] = cri_index)
#define BE_GET_CRI_FROM_CID(cid) (phba->cid_to_cri_map[cid])
diff --git a/drivers/scsi/device_handler/scsi_dh_emc.c b/drivers/scsi/device_handler/scsi_dh_emc.c
index 375d818..d5f6fbf 100644
--- a/drivers/scsi/device_handler/scsi_dh_emc.c
+++ b/drivers/scsi/device_handler/scsi_dh_emc.c
@@ -461,7 +461,7 @@ static int clariion_prep_fn(struct scsi_device *sdev, struct request *req)
static int clariion_std_inquiry(struct scsi_device *sdev,
struct clariion_dh_data *csdev)
{
- int err;
+ int err = SCSI_DH_OK;
char *sp_model;
err = send_inquiry_cmd(sdev, 0, csdev);
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index d8b1fbd..35cbd36 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -1901,9 +1901,12 @@ static void megasas_complete_outstanding_ioctls(struct megasas_instance *instanc
if (cmd_fusion->sync_cmd_idx != (u32)ULONG_MAX) {
cmd_mfi = instance->cmd_list[cmd_fusion->sync_cmd_idx];
if (cmd_mfi->sync_cmd &&
- cmd_mfi->frame->hdr.cmd != MFI_CMD_ABORT)
+ (cmd_mfi->frame->hdr.cmd != MFI_CMD_ABORT)) {
+ cmd_mfi->frame->hdr.cmd_status =
+ MFI_STAT_WRONG_STATE;
megasas_complete_cmd(instance,
cmd_mfi, DID_OK);
+ }
}
}
} else {
@@ -5290,7 +5293,8 @@ static int megasas_init_fw(struct megasas_instance *instance)
instance->throttlequeuedepth =
MEGASAS_THROTTLE_QUEUE_DEPTH;
- if (resetwaittime > MEGASAS_RESET_WAIT_TIME)
+ if ((resetwaittime < 1) ||
+ (resetwaittime > MEGASAS_RESET_WAIT_TIME))
resetwaittime = MEGASAS_RESET_WAIT_TIME;
if ((scmd_timeout < 10) || (scmd_timeout > MEGASAS_DEFAULT_CMD_TIMEOUT))
@@ -5459,6 +5463,14 @@ megasas_register_aen(struct megasas_instance *instance, u32 seq_num,
prev_aen.word =
le32_to_cpu(instance->aen_cmd->frame->dcmd.mbox.w[1]);
+ if ((curr_aen.members.class < MFI_EVT_CLASS_DEBUG) ||
+ (curr_aen.members.class > MFI_EVT_CLASS_DEAD)) {
+ dev_info(&instance->pdev->dev,
+ "%s %d out of range class %d send by application\n",
+ __func__, __LINE__, curr_aen.members.class);
+ return 0;
+ }
+
/*
* A class whose enum value is smaller is inclusive of all
* higher values. If a PROGRESS (= -1) was previously
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index bd04bd0..a156451 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -1960,7 +1960,8 @@ static void megasas_build_ld_nonrw_fusion(struct megasas_instance *instance,
*/
static void
megasas_build_syspd_fusion(struct megasas_instance *instance,
- struct scsi_cmnd *scmd, struct megasas_cmd_fusion *cmd, u8 fp_possible)
+ struct scsi_cmnd *scmd, struct megasas_cmd_fusion *cmd,
+ bool fp_possible)
{
u32 device_id;
struct MPI2_RAID_SCSI_IO_REQUEST *io_request;
@@ -2064,6 +2065,8 @@ megasas_build_io_fusion(struct megasas_instance *instance,
u16 sge_count;
u8 cmd_type;
struct MPI2_RAID_SCSI_IO_REQUEST *io_request = cmd->io_request;
+ struct MR_PRIV_DEVICE *mr_device_priv_data;
+ mr_device_priv_data = scp->device->hostdata;
/* Zero out some fields so they don't get reused */
memset(io_request->LUN, 0x0, 8);
@@ -2092,12 +2095,14 @@ megasas_build_io_fusion(struct megasas_instance *instance,
megasas_build_ld_nonrw_fusion(instance, scp, cmd);
break;
case READ_WRITE_SYSPDIO:
+ megasas_build_syspd_fusion(instance, scp, cmd, true);
+ break;
case NON_READ_WRITE_SYSPDIO:
- if (instance->secure_jbod_support &&
- (cmd_type == NON_READ_WRITE_SYSPDIO))
- megasas_build_syspd_fusion(instance, scp, cmd, 0);
+ if (instance->secure_jbod_support ||
+ mr_device_priv_data->is_tm_capable)
+ megasas_build_syspd_fusion(instance, scp, cmd, false);
else
- megasas_build_syspd_fusion(instance, scp, cmd, 1);
+ megasas_build_syspd_fusion(instance, scp, cmd, true);
break;
default:
break;
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 8c4641b..9a34afc 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -318,6 +318,8 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
return -EINVAL;
if (start > ha->optrom_size)
return -EINVAL;
+ if (size > ha->optrom_size - start)
+ size = ha->optrom_size - start;
mutex_lock(&ha->optrom_mutex);
switch (val) {
@@ -343,8 +345,7 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
}
ha->optrom_region_start = start;
- ha->optrom_region_size = start + size > ha->optrom_size ?
- ha->optrom_size - start : size;
+ ha->optrom_region_size = start + size;
ha->optrom_state = QLA_SREADING;
ha->optrom_buffer = vmalloc(ha->optrom_region_size);
@@ -417,8 +418,7 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
}
ha->optrom_region_start = start;
- ha->optrom_region_size = start + size > ha->optrom_size ?
- ha->optrom_size - start : size;
+ ha->optrom_region_size = start + size;
ha->optrom_state = QLA_SWRITING;
ha->optrom_buffer = vmalloc(ha->optrom_region_size);
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index 3dfb54a..f8ae704 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -74,7 +74,7 @@ qla24xx_deallocate_vp_id(scsi_qla_host_t *vha)
* ensures no active vp_list traversal while the vport is removed
* from the queue)
*/
- wait_event_timeout(vha->vref_waitq, atomic_read(&vha->vref_count),
+ wait_event_timeout(vha->vref_waitq, !atomic_read(&vha->vref_count),
10*HZ);
spin_lock_irqsave(&ha->vport_slock, flags);
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index 42bca61..c39551b 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -3696,7 +3696,7 @@ iscsi_if_rx(struct sk_buff *skb)
uint32_t group;
nlh = nlmsg_hdr(skb);
- if (nlh->nlmsg_len < sizeof(*nlh) ||
+ if (nlh->nlmsg_len < sizeof(*nlh) + sizeof(*ev) ||
skb->len < nlh->nlmsg_len) {
break;
}
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index b9290e7..02823a7 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -2770,8 +2770,6 @@ static int sd_revalidate_disk(struct gendisk *disk)
sd_read_write_same(sdkp, buffer);
}
- sdkp->first_scan = 0;
-
/*
* We now have all cache related info, determine how we deal
* with flush requests.
@@ -2786,7 +2784,7 @@ static int sd_revalidate_disk(struct gendisk *disk)
q->limits.max_dev_sectors = logical_to_sectors(sdp, dev_max);
/*
- * Use the device's preferred I/O size for reads and writes
+ * Determine the device's preferred I/O size for reads and writes
* unless the reported value is unreasonably small, large, or
* garbage.
*/
@@ -2800,8 +2798,19 @@ static int sd_revalidate_disk(struct gendisk *disk)
rw_max = min_not_zero(logical_to_sectors(sdp, dev_max),
(sector_t)BLK_DEF_MAX_SECTORS);
- /* Combine with controller limits */
- q->limits.max_sectors = min(rw_max, queue_max_hw_sectors(q));
+ /* Do not exceed controller limit */
+ rw_max = min(rw_max, queue_max_hw_sectors(q));
+
+ /*
+ * Only update max_sectors if previously unset or if the current value
+ * exceeds the capabilities of the hardware.
+ */
+ if (sdkp->first_scan ||
+ q->limits.max_sectors > q->limits.max_dev_sectors ||
+ q->limits.max_sectors > q->limits.max_hw_sectors)
+ q->limits.max_sectors = rw_max;
+
+ sdkp->first_scan = 0;
set_capacity(disk, logical_to_sectors(sdp, sdkp->capacity));
sd_config_write_same(sdkp);
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 4bd6fd4..9965135 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -122,7 +122,7 @@ struct sg_device; /* forward declarations */
struct sg_fd;
typedef struct sg_request { /* SG_MAX_QUEUE requests outstanding per file */
- struct sg_request *nextrp; /* NULL -> tail request (slist) */
+ struct list_head entry; /* list entry */
struct sg_fd *parentfp; /* NULL -> not in use */
Sg_scatter_hold data; /* hold buffer, perhaps scatter list */
sg_io_hdr_t header; /* scsi command+info, see <scsi/sg.h> */
@@ -146,8 +146,7 @@ typedef struct sg_fd { /* holds the state of a file descriptor */
int timeout; /* defaults to SG_DEFAULT_TIMEOUT */
int timeout_user; /* defaults to SG_DEFAULT_TIMEOUT_USER */
Sg_scatter_hold reserve; /* buffer held for this file descriptor */
- unsigned save_scat_len; /* original length of trunc. scat. element */
- Sg_request *headrp; /* head of request slist, NULL->empty */
+ struct list_head rq_list; /* head of request list */
struct fasync_struct *async_qp; /* used by asynchronous notification */
Sg_request req_arr[SG_MAX_QUEUE]; /* used as singly-linked list */
char low_dma; /* as in parent but possibly overridden to 1 */
@@ -833,6 +832,39 @@ static int max_sectors_bytes(struct request_queue *q)
return max_sectors << 9;
}
+static void
+sg_fill_request_table(Sg_fd *sfp, sg_req_info_t *rinfo)
+{
+ Sg_request *srp;
+ int val;
+ unsigned int ms;
+
+ val = 0;
+ list_for_each_entry(srp, &sfp->rq_list, entry) {
+ if (val >= SG_MAX_QUEUE)
+ break;
+ rinfo[val].req_state = srp->done + 1;
+ rinfo[val].problem =
+ srp->header.masked_status &
+ srp->header.host_status &
+ srp->header.driver_status;
+ if (srp->done)
+ rinfo[val].duration =
+ srp->header.duration;
+ else {
+ ms = jiffies_to_msecs(jiffies);
+ rinfo[val].duration =
+ (ms > srp->header.duration) ?
+ (ms - srp->header.duration) : 0;
+ }
+ rinfo[val].orphan = srp->orphan;
+ rinfo[val].sg_io_owned = srp->sg_io_owned;
+ rinfo[val].pack_id = srp->header.pack_id;
+ rinfo[val].usr_ptr = srp->header.usr_ptr;
+ val++;
+ }
+}
+
static long
sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
{
@@ -949,7 +981,7 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
if (!access_ok(VERIFY_WRITE, ip, sizeof (int)))
return -EFAULT;
read_lock_irqsave(&sfp->rq_list_lock, iflags);
- for (srp = sfp->headrp; srp; srp = srp->nextrp) {
+ list_for_each_entry(srp, &sfp->rq_list, entry) {
if ((1 == srp->done) && (!srp->sg_io_owned)) {
read_unlock_irqrestore(&sfp->rq_list_lock,
iflags);
@@ -962,7 +994,8 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
return 0;
case SG_GET_NUM_WAITING:
read_lock_irqsave(&sfp->rq_list_lock, iflags);
- for (val = 0, srp = sfp->headrp; srp; srp = srp->nextrp) {
+ val = 0;
+ list_for_each_entry(srp, &sfp->rq_list, entry) {
if ((1 == srp->done) && (!srp->sg_io_owned))
++val;
}
@@ -1031,40 +1064,13 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
return -EFAULT;
else {
sg_req_info_t *rinfo;
- unsigned int ms;
- rinfo = kmalloc(SZ_SG_REQ_INFO * SG_MAX_QUEUE,
- GFP_KERNEL);
+ rinfo = kzalloc(SZ_SG_REQ_INFO * SG_MAX_QUEUE,
+ GFP_KERNEL);
if (!rinfo)
return -ENOMEM;
read_lock_irqsave(&sfp->rq_list_lock, iflags);
- for (srp = sfp->headrp, val = 0; val < SG_MAX_QUEUE;
- ++val, srp = srp ? srp->nextrp : srp) {
- memset(&rinfo[val], 0, SZ_SG_REQ_INFO);
- if (srp) {
- rinfo[val].req_state = srp->done + 1;
- rinfo[val].problem =
- srp->header.masked_status &
- srp->header.host_status &
- srp->header.driver_status;
- if (srp->done)
- rinfo[val].duration =
- srp->header.duration;
- else {
- ms = jiffies_to_msecs(jiffies);
- rinfo[val].duration =
- (ms > srp->header.duration) ?
- (ms - srp->header.duration) : 0;
- }
- rinfo[val].orphan = srp->orphan;
- rinfo[val].sg_io_owned =
- srp->sg_io_owned;
- rinfo[val].pack_id =
- srp->header.pack_id;
- rinfo[val].usr_ptr =
- srp->header.usr_ptr;
- }
- }
+ sg_fill_request_table(sfp, rinfo);
read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
result = __copy_to_user(p, rinfo,
SZ_SG_REQ_INFO * SG_MAX_QUEUE);
@@ -1172,7 +1178,7 @@ sg_poll(struct file *filp, poll_table * wait)
return POLLERR;
poll_wait(filp, &sfp->read_wait, wait);
read_lock_irqsave(&sfp->rq_list_lock, iflags);
- for (srp = sfp->headrp; srp; srp = srp->nextrp) {
+ list_for_each_entry(srp, &sfp->rq_list, entry) {
/* if any read waiting, flag it */
if ((0 == res) && (1 == srp->done) && (!srp->sg_io_owned))
res = POLLIN | POLLRDNORM;
@@ -2055,7 +2061,6 @@ sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp)
req_schp->pages = NULL;
req_schp->page_order = 0;
req_schp->sglist_len = 0;
- sfp->save_scat_len = 0;
srp->res_used = 0;
/* Called without mutex lock to avoid deadlock */
sfp->res_in_use = 0;
@@ -2068,7 +2073,7 @@ sg_get_rq_mark(Sg_fd * sfp, int pack_id)
unsigned long iflags;
write_lock_irqsave(&sfp->rq_list_lock, iflags);
- for (resp = sfp->headrp; resp; resp = resp->nextrp) {
+ list_for_each_entry(resp, &sfp->rq_list, entry) {
/* look for requests that are ready + not SG_IO owned */
if ((1 == resp->done) && (!resp->sg_io_owned) &&
((-1 == pack_id) || (resp->header.pack_id == pack_id))) {
@@ -2086,70 +2091,45 @@ sg_add_request(Sg_fd * sfp)
{
int k;
unsigned long iflags;
- Sg_request *resp;
Sg_request *rp = sfp->req_arr;
write_lock_irqsave(&sfp->rq_list_lock, iflags);
- resp = sfp->headrp;
- if (!resp) {
- memset(rp, 0, sizeof (Sg_request));
- rp->parentfp = sfp;
- resp = rp;
- sfp->headrp = resp;
- } else {
- if (0 == sfp->cmd_q)
- resp = NULL; /* command queuing disallowed */
- else {
- for (k = 0; k < SG_MAX_QUEUE; ++k, ++rp) {
- if (!rp->parentfp)
- break;
- }
- if (k < SG_MAX_QUEUE) {
- memset(rp, 0, sizeof (Sg_request));
- rp->parentfp = sfp;
- while (resp->nextrp)
- resp = resp->nextrp;
- resp->nextrp = rp;
- resp = rp;
- } else
- resp = NULL;
+ if (!list_empty(&sfp->rq_list)) {
+ if (!sfp->cmd_q)
+ goto out_unlock;
+
+ for (k = 0; k < SG_MAX_QUEUE; ++k, ++rp) {
+ if (!rp->parentfp)
+ break;
}
+ if (k >= SG_MAX_QUEUE)
+ goto out_unlock;
}
- if (resp) {
- resp->nextrp = NULL;
- resp->header.duration = jiffies_to_msecs(jiffies);
- }
+ memset(rp, 0, sizeof (Sg_request));
+ rp->parentfp = sfp;
+ rp->header.duration = jiffies_to_msecs(jiffies);
+ list_add_tail(&rp->entry, &sfp->rq_list);
write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
- return resp;
+ return rp;
+out_unlock:
+ write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
+ return NULL;
}
/* Return of 1 for found; 0 for not found */
static int
sg_remove_request(Sg_fd * sfp, Sg_request * srp)
{
- Sg_request *prev_rp;
- Sg_request *rp;
unsigned long iflags;
int res = 0;
- if ((!sfp) || (!srp) || (!sfp->headrp))
+ if (!sfp || !srp || list_empty(&sfp->rq_list))
return res;
write_lock_irqsave(&sfp->rq_list_lock, iflags);
- prev_rp = sfp->headrp;
- if (srp == prev_rp) {
- sfp->headrp = prev_rp->nextrp;
- prev_rp->parentfp = NULL;
+ if (!list_empty(&srp->entry)) {
+ list_del(&srp->entry);
+ srp->parentfp = NULL;
res = 1;
- } else {
- while ((rp = prev_rp->nextrp)) {
- if (srp == rp) {
- prev_rp->nextrp = rp->nextrp;
- rp->parentfp = NULL;
- res = 1;
- break;
- }
- prev_rp = rp;
- }
}
write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
return res;
@@ -2168,7 +2148,7 @@ sg_add_sfp(Sg_device * sdp)
init_waitqueue_head(&sfp->read_wait);
rwlock_init(&sfp->rq_list_lock);
-
+ INIT_LIST_HEAD(&sfp->rq_list);
kref_init(&sfp->f_ref);
mutex_init(&sfp->f_mutex);
sfp->timeout = SG_DEFAULT_TIMEOUT;
@@ -2209,10 +2189,13 @@ sg_remove_sfp_usercontext(struct work_struct *work)
{
struct sg_fd *sfp = container_of(work, struct sg_fd, ew.work);
struct sg_device *sdp = sfp->parentdp;
+ Sg_request *srp;
/* Cleanup any responses which were never read(). */
- while (sfp->headrp)
- sg_finish_rem_req(sfp->headrp);
+ while (!list_empty(&sfp->rq_list)) {
+ srp = list_first_entry(&sfp->rq_list, Sg_request, entry);
+ sg_finish_rem_req(srp);
+ }
if (sfp->reserve.bufflen > 0) {
SCSI_LOG_TIMEOUT(6, sg_printk(KERN_INFO, sdp,
@@ -2615,7 +2598,7 @@ static int sg_proc_seq_show_devstrs(struct seq_file *s, void *v)
/* must be called while holding sg_index_lock */
static void sg_proc_debug_helper(struct seq_file *s, Sg_device * sdp)
{
- int k, m, new_interface, blen, usg;
+ int k, new_interface, blen, usg;
Sg_request *srp;
Sg_fd *fp;
const sg_io_hdr_t *hp;
@@ -2635,9 +2618,7 @@ static void sg_proc_debug_helper(struct seq_file *s, Sg_device * sdp)
seq_printf(s, " cmd_q=%d f_packid=%d k_orphan=%d closed=0\n",
(int) fp->cmd_q, (int) fp->force_packid,
(int) fp->keep_orphan);
- for (m = 0, srp = fp->headrp;
- srp != NULL;
- ++m, srp = srp->nextrp) {
+ list_for_each_entry(srp, &fp->rq_list, entry) {
hp = &srp->header;
new_interface = (hp->interface_id == '\0') ? 0 : 1;
if (srp->res_used) {
@@ -2672,7 +2653,7 @@ static void sg_proc_debug_helper(struct seq_file *s, Sg_device * sdp)
seq_printf(s, "ms sgat=%d op=0x%02x\n", usg,
(int) srp->data.cmd_opcode);
}
- if (0 == m)
+ if (list_empty(&fp->rq_list))
seq_puts(s, " No requests active\n");
read_unlock(&fp->rq_list_lock);
}
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index c5ab1b0..2bf96d3 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -1559,6 +1559,8 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
ret = storvsc_do_io(dev, cmd_request);
if (ret == -EAGAIN) {
+ if (payload_sz > sizeof(cmd_request->mpb))
+ kfree(payload);
/* no more space */
return SCSI_MLQUEUE_DEVICE_BUSY;
}
diff --git a/drivers/scsi/ufs/ufs-qcom-debugfs.c b/drivers/scsi/ufs/ufs-qcom-debugfs.c
index 494ecd1..db4ecec 100644
--- a/drivers/scsi/ufs/ufs-qcom-debugfs.c
+++ b/drivers/scsi/ufs/ufs-qcom-debugfs.c
@@ -121,7 +121,8 @@ static ssize_t ufs_qcom_dbg_testbus_cfg_write(struct file *file,
struct ufs_hba *hba = host->hba;
- ret = simple_write_to_buffer(configuration, TESTBUS_CFG_BUFF_LINE_SIZE,
+ ret = simple_write_to_buffer(configuration,
+ TESTBUS_CFG_BUFF_LINE_SIZE - 1,
&buff_pos, ubuf, cnt);
if (ret < 0) {
dev_err(host->hba->dev, "%s: failed to read user data\n",
diff --git a/drivers/scsi/ufs/ufs-qcom-ice.c b/drivers/scsi/ufs/ufs-qcom-ice.c
index 0c86263..84765b1 100644
--- a/drivers/scsi/ufs/ufs-qcom-ice.c
+++ b/drivers/scsi/ufs/ufs-qcom-ice.c
@@ -170,17 +170,15 @@ int ufs_qcom_ice_get_dev(struct ufs_qcom_host *qcom_host)
static void ufs_qcom_ice_cfg_work(struct work_struct *work)
{
unsigned long flags;
- struct ice_data_setting ice_set;
struct ufs_qcom_host *qcom_host =
container_of(work, struct ufs_qcom_host, ice_cfg_work);
- struct request *req_pending = NULL;
if (!qcom_host->ice.vops->config_start)
return;
spin_lock_irqsave(&qcom_host->ice_work_lock, flags);
- req_pending = qcom_host->req_pending;
- if (!req_pending) {
+ if (!qcom_host->req_pending) {
+ qcom_host->work_pending = false;
spin_unlock_irqrestore(&qcom_host->ice_work_lock, flags);
return;
}
@@ -189,24 +187,15 @@ static void ufs_qcom_ice_cfg_work(struct work_struct *work)
/*
* config_start is called again as previous attempt returned -EAGAIN,
* this call shall now take care of the necessary key setup.
- * 'ice_set' will not actually be used, instead the next call to
- * config_start() for this request, in the normal call flow, will
- * succeed as the key has now been setup.
*/
qcom_host->ice.vops->config_start(qcom_host->ice.pdev,
- qcom_host->req_pending, &ice_set, false);
+ qcom_host->req_pending, NULL, false);
spin_lock_irqsave(&qcom_host->ice_work_lock, flags);
qcom_host->req_pending = NULL;
+ qcom_host->work_pending = false;
spin_unlock_irqrestore(&qcom_host->ice_work_lock, flags);
- /*
- * Resume with requests processing. We assume config_start has been
- * successful, but even if it wasn't we still must resume in order to
- * allow for the request to be retried.
- */
- ufshcd_scsi_unblock_requests(qcom_host->hba);
-
}
/**
@@ -285,18 +274,14 @@ int ufs_qcom_ice_req_setup(struct ufs_qcom_host *qcom_host,
* requires a non-atomic context, this means we should
* call the function again from the worker thread to do
* the configuration. For this request the error will
- * propagate so it will be re-queued and until the
- * configuration is is completed we block further
- * request processing.
+ * propagate so it will be re-queued.
*/
if (err == -EAGAIN) {
dev_dbg(qcom_host->hba->dev,
"%s: scheduling task for ice setup\n",
__func__);
- if (!qcom_host->req_pending) {
- ufshcd_scsi_block_requests(
- qcom_host->hba);
+ if (!qcom_host->work_pending) {
qcom_host->req_pending = cmd->request;
if (!schedule_work(
@@ -307,10 +292,9 @@ int ufs_qcom_ice_req_setup(struct ufs_qcom_host *qcom_host,
&qcom_host->ice_work_lock,
flags);
- ufshcd_scsi_unblock_requests(
- qcom_host->hba);
return err;
}
+ qcom_host->work_pending = true;
}
} else {
@@ -409,9 +393,7 @@ int ufs_qcom_ice_cfg_start(struct ufs_qcom_host *qcom_host,
* requires a non-atomic context, this means we should
* call the function again from the worker thread to do
* the configuration. For this request the error will
- * propagate so it will be re-queued and until the
- * configuration is is completed we block further
- * request processing.
+ * propagate so it will be re-queued.
*/
if (err == -EAGAIN) {
@@ -419,9 +401,8 @@ int ufs_qcom_ice_cfg_start(struct ufs_qcom_host *qcom_host,
"%s: scheduling task for ice setup\n",
__func__);
- if (!qcom_host->req_pending) {
- ufshcd_scsi_block_requests(
- qcom_host->hba);
+ if (!qcom_host->work_pending) {
+
qcom_host->req_pending = cmd->request;
if (!schedule_work(
&qcom_host->ice_cfg_work)) {
@@ -431,10 +412,9 @@ int ufs_qcom_ice_cfg_start(struct ufs_qcom_host *qcom_host,
&qcom_host->ice_work_lock,
flags);
- ufshcd_scsi_unblock_requests(
- qcom_host->hba);
return err;
}
+ qcom_host->work_pending = true;
}
} else {
diff --git a/drivers/scsi/ufs/ufs-qcom.h b/drivers/scsi/ufs/ufs-qcom.h
index 0ab656e..9da3d19 100644
--- a/drivers/scsi/ufs/ufs-qcom.h
+++ b/drivers/scsi/ufs/ufs-qcom.h
@@ -375,6 +375,7 @@ struct ufs_qcom_host {
struct work_struct ice_cfg_work;
struct request *req_pending;
struct ufs_vreg *vddp_ref_clk;
+ bool work_pending;
};
static inline u32
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index b98d2ae..a6bc1da 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -549,19 +549,19 @@ static inline void ufshcd_remove_non_printable(char *val)
#ifdef CONFIG_TRACEPOINTS
static inline void ufshcd_add_command_trace(struct ufs_hba *hba,
- struct ufshcd_cmd_log_entry *entry, u8 opcode)
+ struct ufshcd_cmd_log_entry *entry)
{
if (trace_ufshcd_command_enabled()) {
u32 intr = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
trace_ufshcd_command(dev_name(hba->dev), entry->str, entry->tag,
entry->doorbell, entry->transfer_len, intr,
- entry->lba, opcode);
+ entry->lba, entry->cmd_id);
}
}
#else
static inline void ufshcd_add_command_trace(struct ufs_hba *hba,
- struct ufshcd_cmd_log_entry *entry, u8 opcode)
+ struct ufshcd_cmd_log_entry *entry)
{
}
#endif
@@ -582,7 +582,7 @@ static void ufshcd_cmd_log_init(struct ufs_hba *hba)
static void __ufshcd_cmd_log(struct ufs_hba *hba, char *str, char *cmd_type,
unsigned int tag, u8 cmd_id, u8 idn, u8 lun,
- sector_t lba, int transfer_len, u8 opcode)
+ sector_t lba, int transfer_len)
{
struct ufshcd_cmd_log_entry *entry;
@@ -606,19 +606,18 @@ static void __ufshcd_cmd_log(struct ufs_hba *hba, char *str, char *cmd_type,
hba->cmd_log.pos =
(hba->cmd_log.pos + 1) % UFSHCD_MAX_CMD_LOGGING;
- ufshcd_add_command_trace(hba, entry, opcode);
+ ufshcd_add_command_trace(hba, entry);
}
static void ufshcd_cmd_log(struct ufs_hba *hba, char *str, char *cmd_type,
unsigned int tag, u8 cmd_id, u8 idn)
{
- __ufshcd_cmd_log(hba, str, cmd_type, tag, cmd_id, idn,
- 0xff, (sector_t)-1, -1, -1);
+ __ufshcd_cmd_log(hba, str, cmd_type, tag, cmd_id, idn, 0, 0, 0);
}
static void ufshcd_dme_cmd_log(struct ufs_hba *hba, char *str, u8 cmd_id)
{
- ufshcd_cmd_log(hba, str, "dme", 0xff, cmd_id, 0xff);
+ ufshcd_cmd_log(hba, str, "dme", 0, cmd_id, 0);
}
static void ufshcd_print_cmd_log(struct ufs_hba *hba)
@@ -653,7 +652,7 @@ static void ufshcd_cmd_log_init(struct ufs_hba *hba)
static void __ufshcd_cmd_log(struct ufs_hba *hba, char *str, char *cmd_type,
unsigned int tag, u8 cmd_id, u8 idn, u8 lun,
- sector_t lba, int transfer_len, u8 opcode)
+ sector_t lba, int transfer_len)
{
struct ufshcd_cmd_log_entry entry;
@@ -663,7 +662,7 @@ static void __ufshcd_cmd_log(struct ufs_hba *hba, char *str, char *cmd_type,
entry.doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
entry.tag = tag;
- ufshcd_add_command_trace(hba, &entry, opcode);
+ ufshcd_add_command_trace(hba, &entry);
}
static void ufshcd_dme_cmd_log(struct ufs_hba *hba, char *str, u8 cmd_id)
@@ -683,8 +682,8 @@ static inline void ufshcd_cond_add_cmd_trace(struct ufs_hba *hba,
char *cmd_type = NULL;
u8 opcode = 0;
u8 cmd_id = 0, idn = 0;
- sector_t lba = -1;
- int transfer_len = -1;
+ sector_t lba = 0;
+ int transfer_len = 0;
lrbp = &hba->lrb[tag];
@@ -718,7 +717,7 @@ static inline void ufshcd_cond_add_cmd_trace(struct ufs_hba *hba,
}
__ufshcd_cmd_log(hba, (char *) str, cmd_type, tag, cmd_id, idn,
- lrbp->lun, lba, transfer_len, opcode);
+ lrbp->lun, lba, transfer_len);
}
#else
static inline void ufshcd_cond_add_cmd_trace(struct ufs_hba *hba,
@@ -909,7 +908,6 @@ static void ufshcd_print_host_state(struct ufs_hba *hba)
hba->capabilities, hba->caps);
dev_err(hba->dev, "quirks=0x%x, dev. quirks=0x%x\n", hba->quirks,
hba->dev_info.quirks);
- ufshcd_print_fsm_state(hba);
}
/**
@@ -2366,7 +2364,8 @@ int ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL);
/* Make sure that doorbell is committed immediately */
wmb();
- ufshcd_cond_add_cmd_trace(hba, task_tag, "send");
+ ufshcd_cond_add_cmd_trace(hba, task_tag,
+ hba->lrb[task_tag].cmd ? "scsi_send" : "dev_cmd_send");
ufshcd_update_tag_stats(hba, task_tag);
return ret;
}
@@ -2483,7 +2482,7 @@ ufshcd_dispatch_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
hba->active_uic_cmd = uic_cmd;
- ufshcd_dme_cmd_log(hba, "send", hba->active_uic_cmd->command);
+ ufshcd_dme_cmd_log(hba, "dme_send", hba->active_uic_cmd->command);
/* Write Args */
ufshcd_writel(hba, uic_cmd->argument1, REG_UIC_COMMAND_ARG_1);
ufshcd_writel(hba, uic_cmd->argument2, REG_UIC_COMMAND_ARG_2);
@@ -2517,7 +2516,7 @@ ufshcd_wait_for_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
if (ret)
ufsdbg_set_err_state(hba);
- ufshcd_dme_cmd_log(hba, "cmp1", hba->active_uic_cmd->command);
+ ufshcd_dme_cmd_log(hba, "dme_cmpl_1", hba->active_uic_cmd->command);
spin_lock_irqsave(hba->host->host_lock, flags);
hba->active_uic_cmd = NULL;
@@ -4450,7 +4449,7 @@ static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
cmd->command, status);
ret = (status != PWR_OK) ? status : -1;
}
- ufshcd_dme_cmd_log(hba, "cmp2", hba->active_uic_cmd->command);
+ ufshcd_dme_cmd_log(hba, "dme_cmpl_2", hba->active_uic_cmd->command);
out:
if (ret) {
@@ -5695,7 +5694,7 @@ void ufshcd_abort_outstanding_transfer_requests(struct ufs_hba *hba, int result)
lrbp = &hba->lrb[index];
cmd = lrbp->cmd;
if (cmd) {
- ufshcd_cond_add_cmd_trace(hba, index, "failed");
+ ufshcd_cond_add_cmd_trace(hba, index, "scsi_failed");
ufshcd_update_error_stats(hba,
UFS_ERR_INT_FATAL_ERRORS);
scsi_dma_unmap(cmd);
@@ -5725,7 +5724,7 @@ void ufshcd_abort_outstanding_transfer_requests(struct ufs_hba *hba, int result)
} else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE) {
if (hba->dev_cmd.complete) {
ufshcd_cond_add_cmd_trace(hba, index,
- "dev_failed");
+ "dev_cmd_failed");
ufshcd_outstanding_req_clear(hba, index);
complete(hba->dev_cmd.complete);
}
@@ -5753,7 +5752,7 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
lrbp = &hba->lrb[index];
cmd = lrbp->cmd;
if (cmd) {
- ufshcd_cond_add_cmd_trace(hba, index, "complete");
+ ufshcd_cond_add_cmd_trace(hba, index, "scsi_cmpl");
ufshcd_update_tag_stats_completion(hba, cmd);
result = ufshcd_transfer_rsp_status(hba, lrbp);
scsi_dma_unmap(cmd);
@@ -5799,7 +5798,7 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
} else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE) {
if (hba->dev_cmd.complete) {
ufshcd_cond_add_cmd_trace(hba, index,
- "dcmp");
+ "dev_cmd_cmpl");
complete(hba->dev_cmd.complete);
}
}
@@ -7033,6 +7032,7 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
*/
scsi_print_command(cmd);
if (!hba->req_abort_count) {
+ ufshcd_print_fsm_state(hba);
ufshcd_print_host_regs(hba);
ufshcd_print_host_state(hba);
ufshcd_print_pwr_info(hba);
@@ -9260,7 +9260,6 @@ static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
goto enable_gating;
}
- flush_work(&hba->eeh_work);
ret = ufshcd_link_state_transition(hba, req_link_state, 1);
if (ret)
goto set_dev_active;
diff --git a/drivers/slimbus/slimbus.c b/drivers/slimbus/slimbus.c
index 4c9210a..f34467b 100644
--- a/drivers/slimbus/slimbus.c
+++ b/drivers/slimbus/slimbus.c
@@ -333,6 +333,20 @@ static void slim_report(struct work_struct *work)
}
}
+static void slim_device_reset(struct work_struct *work)
+{
+ struct slim_driver *sbdrv;
+ struct slim_device *sbdev =
+ container_of(work, struct slim_device, device_reset);
+
+ if (!sbdev->dev.driver)
+ return;
+
+ sbdrv = to_slim_driver(sbdev->dev.driver);
+ if (sbdrv && sbdrv->reset_device)
+ sbdrv->reset_device(sbdev);
+}
+
/*
* slim_add_device: Add a new device without register board info.
* @ctrl: Controller to which this device is to be added to.
@@ -353,6 +367,7 @@ int slim_add_device(struct slim_controller *ctrl, struct slim_device *sbdev)
INIT_LIST_HEAD(&sbdev->mark_suspend);
INIT_LIST_HEAD(&sbdev->mark_removal);
INIT_WORK(&sbdev->wd, slim_report);
+ INIT_WORK(&sbdev->device_reset, slim_device_reset);
mutex_lock(&ctrl->m_ctrl);
list_add_tail(&sbdev->dev_list, &ctrl->devs);
mutex_unlock(&ctrl->m_ctrl);
@@ -684,16 +699,9 @@ void slim_framer_booted(struct slim_controller *ctrl)
mutex_unlock(&ctrl->sched.m_reconf);
mutex_lock(&ctrl->m_ctrl);
list_for_each_safe(pos, next, &ctrl->devs) {
- struct slim_driver *sbdrv;
-
sbdev = list_entry(pos, struct slim_device, dev_list);
- mutex_unlock(&ctrl->m_ctrl);
- if (sbdev && sbdev->dev.driver) {
- sbdrv = to_slim_driver(sbdev->dev.driver);
- if (sbdrv->reset_device)
- sbdrv->reset_device(sbdev);
- }
- mutex_lock(&ctrl->m_ctrl);
+ if (sbdev)
+ queue_work(ctrl->wq, &sbdev->device_reset);
}
mutex_unlock(&ctrl->m_ctrl);
}
diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig
index 62306bad..18aaacc 100644
--- a/drivers/soc/qcom/Kconfig
+++ b/drivers/soc/qcom/Kconfig
@@ -686,3 +686,11 @@
and ETM registers are saved and restored across power collapse.
If unsure, say 'N' here to avoid potential power, performance and
memory penalty.
+
+config QCOM_QDSS_BRIDGE
+ bool "Configure bridge driver for QTI/Qualcomm Technologies, Inc. MDM"
+ depends on MSM_MHI
+ help
+ The driver will help route diag traffic from modem side over the QDSS
+ sub-system to USB on APSS side. The driver acts as a bridge between the
+ MHI and USB interface. If unsure, say N.
\ No newline at end of file
diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile
index 9a4e010..bb08357 100644
--- a/drivers/soc/qcom/Makefile
+++ b/drivers/soc/qcom/Makefile
@@ -79,3 +79,4 @@
obj-$(CONFIG_QMP_DEBUGFS_CLIENT) += qmp-debugfs-client.o
obj-$(CONFIG_MSM_REMOTEQDSS) += remoteqdss.o
obj-$(CONFIG_QSEE_IPC_IRQ_BRIDGE) += qsee_ipc_irq_bridge.o
+obj-$(CONFIG_QCOM_QDSS_BRIDGE) += qdss_bridge.o
\ No newline at end of file
diff --git a/drivers/soc/qcom/cmd-db.c b/drivers/soc/qcom/cmd-db.c
index 252bd21..72abf50 100644
--- a/drivers/soc/qcom/cmd-db.c
+++ b/drivers/soc/qcom/cmd-db.c
@@ -197,6 +197,7 @@ int cmd_db_get_aux_data(const char *resource_id, u8 *data, int len)
len);
return len;
}
+EXPORT_SYMBOL(cmd_db_get_aux_data);
int cmd_db_get_aux_data_len(const char *resource_id)
{
@@ -208,6 +209,7 @@ int cmd_db_get_aux_data_len(const char *resource_id)
return ret < 0 ? 0 : ent.len;
}
+EXPORT_SYMBOL(cmd_db_get_aux_data_len);
u16 cmd_db_get_version(const char *resource_id)
{
diff --git a/drivers/soc/qcom/dcc_v2.c b/drivers/soc/qcom/dcc_v2.c
index e11efb0..cff407e 100644
--- a/drivers/soc/qcom/dcc_v2.c
+++ b/drivers/soc/qcom/dcc_v2.c
@@ -22,6 +22,7 @@
#include <linux/uaccess.h>
#include <soc/qcom/memory_dump.h>
#include <soc/qcom/scm.h>
+#include <dt-bindings/soc/qcom,dcc_v2.h>
#define TIMEOUT_US (100)
@@ -536,7 +537,7 @@ static int dcc_enable(struct dcc_drvdata *drvdata)
mutex_lock(&drvdata->mutex);
- memset_io(drvdata->ram_base, 0, drvdata->ram_size);
+ memset_io(drvdata->ram_base, 0xDE, drvdata->ram_size);
for (list = 0; list < DCC_MAX_LINK_LIST; list++) {
@@ -554,14 +555,12 @@ static int dcc_enable(struct dcc_drvdata *drvdata)
goto err;
}
- /* 3. If in capture mode program DCC_RAM_CFG reg */
- if (drvdata->func_type[list] == DCC_FUNC_TYPE_CAPTURE) {
- dcc_writel(drvdata, ram_cfg_base +
- drvdata->ram_offset/4, DCC_LL_BASE(list));
- dcc_writel(drvdata, drvdata->ram_start +
- drvdata->ram_offset/4, DCC_FD_BASE(list));
- dcc_writel(drvdata, 0xFFF, DCC_LL_TIMEOUT(list));
- }
+ /* 3. program DCC_RAM_CFG reg */
+ dcc_writel(drvdata, ram_cfg_base +
+ drvdata->ram_offset/4, DCC_LL_BASE(list));
+ dcc_writel(drvdata, drvdata->ram_start +
+ drvdata->ram_offset/4, DCC_FD_BASE(list));
+ dcc_writel(drvdata, 0xFFF, DCC_LL_TIMEOUT(list));
/* 4. Configure trigger, data sink and function type */
dcc_writel(drvdata, BIT(9) | ((drvdata->cti_trig << 8) |
@@ -813,6 +812,9 @@ static ssize_t dcc_show_config(struct device *dev,
buf[0] = '\0';
+ if (drvdata->curr_list >= DCC_MAX_LINK_LIST)
+ return -EINVAL;
+
mutex_lock(&drvdata->mutex);
list_for_each_entry(entry,
&drvdata->cfg_head[drvdata->curr_list], list) {
@@ -1088,14 +1090,30 @@ static ssize_t dcc_store_interrupt_disable(struct device *dev,
static DEVICE_ATTR(interrupt_disable, 0644,
dcc_show_interrupt_disable, dcc_store_interrupt_disable);
+static int dcc_add_loop(struct dcc_drvdata *drvdata, unsigned long loop_cnt)
+{
+ struct dcc_config_entry *entry;
+
+ entry = devm_kzalloc(drvdata->dev, sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return -ENOMEM;
+
+ entry->loop_cnt = min_t(uint32_t, loop_cnt, MAX_LOOP_CNT);
+ entry->index = drvdata->nr_config[drvdata->curr_list]++;
+ entry->desc_type = DCC_LOOP_TYPE;
+ INIT_LIST_HEAD(&entry->list);
+ list_add_tail(&entry->list, &drvdata->cfg_head[drvdata->curr_list]);
+
+ return 0;
+}
+
static ssize_t dcc_store_loop(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
- int ret = size;
+ int ret;
unsigned long loop_cnt;
struct dcc_drvdata *drvdata = dev_get_drvdata(dev);
- struct dcc_config_entry *entry;
mutex_lock(&drvdata->mutex);
@@ -1110,18 +1128,12 @@ static ssize_t dcc_store_loop(struct device *dev,
goto err;
}
- entry = devm_kzalloc(drvdata->dev, sizeof(*entry), GFP_KERNEL);
- if (!entry) {
- ret = -ENOMEM;
+ ret = dcc_add_loop(drvdata, loop_cnt);
+ if (ret)
goto err;
- }
- entry->loop_cnt = min_t(uint32_t, loop_cnt, MAX_LOOP_CNT);
- entry->index = drvdata->nr_config[drvdata->curr_list]++;
- entry->desc_type = DCC_LOOP_TYPE;
- INIT_LIST_HEAD(&entry->list);
- list_add_tail(&entry->list, &drvdata->cfg_head[drvdata->curr_list]);
-
+ mutex_unlock(&drvdata->mutex);
+ return size;
err:
mutex_unlock(&drvdata->mutex);
return ret;
@@ -1171,16 +1183,37 @@ static ssize_t dcc_rd_mod_wr(struct device *dev,
}
static DEVICE_ATTR(rd_mod_wr, 0200, NULL, dcc_rd_mod_wr);
+static int dcc_add_write(struct dcc_drvdata *drvdata, unsigned int addr,
+ unsigned int write_val, int apb_bus)
+{
+ struct dcc_config_entry *entry;
+
+ entry = devm_kzalloc(drvdata->dev, sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return -ENOMEM;
+
+ entry->desc_type = DCC_WRITE_TYPE;
+ entry->base = addr & BM(4, 31);
+ entry->offset = addr - entry->base;
+ entry->write_val = write_val;
+ entry->index = drvdata->nr_config[drvdata->curr_list]++;
+ entry->len = 1;
+ entry->apb_bus = apb_bus;
+ INIT_LIST_HEAD(&entry->list);
+ list_add_tail(&entry->list, &drvdata->cfg_head[drvdata->curr_list]);
+
+ return 0;
+}
+
static ssize_t dcc_write(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
- int ret = size;
+ int ret;
int nval;
unsigned int addr, write_val;
- int apb_bus;
+ int apb_bus = 0;
struct dcc_drvdata *drvdata = dev_get_drvdata(dev);
- struct dcc_config_entry *entry;
mutex_lock(&drvdata->mutex);
@@ -1197,23 +1230,15 @@ static ssize_t dcc_write(struct device *dev,
goto err;
}
- entry = devm_kzalloc(drvdata->dev, sizeof(*entry), GFP_KERNEL);
- if (!entry) {
- ret = -ENOMEM;
+ if (nval == 3 && apb_bus != 0)
+ apb_bus = 1;
+
+ ret = dcc_add_write(drvdata, addr, write_val, apb_bus);
+ if (ret)
goto err;
- }
- if (nval == 3)
- entry->apb_bus = true;
-
- entry->desc_type = DCC_WRITE_TYPE;
- entry->base = addr & BM(4, 31);
- entry->offset = addr - entry->base;
- entry->write_val = write_val;
- entry->index = drvdata->nr_config[drvdata->curr_list]++;
- entry->len = 1;
- INIT_LIST_HEAD(&entry->list);
- list_add_tail(&entry->list, &drvdata->cfg_head[drvdata->curr_list]);
+ mutex_unlock(&drvdata->mutex);
+ return size;
err:
mutex_unlock(&drvdata->mutex);
return ret;
@@ -1418,6 +1443,64 @@ static void dcc_sram_dev_exit(struct dcc_drvdata *drvdata)
dcc_sram_dev_deregister(drvdata);
}
+static void dcc_configure_list(struct dcc_drvdata *drvdata,
+ struct device_node *np)
+{
+ int ret, i;
+ const __be32 *prop;
+ uint32_t len, entry, val1, val2, apb_bus;
+ uint32_t curr_link_list;
+
+ ret = of_property_read_u32(np, "qcom,curr-link-list",
+ &curr_link_list);
+ if (ret)
+ return;
+
+ if (curr_link_list >= DCC_MAX_LINK_LIST) {
+ dev_err(drvdata->dev, "List configuration failed");
+ return;
+ }
+ drvdata->curr_list = curr_link_list;
+
+ prop = of_get_property(np, "qcom,link-list", &len);
+ if (prop) {
+ len /= sizeof(__be32);
+ i = 0;
+ while (i < len) {
+ entry = be32_to_cpu(prop[i++]);
+ val1 = be32_to_cpu(prop[i++]);
+ val2 = be32_to_cpu(prop[i++]);
+ apb_bus = be32_to_cpu(prop[i++]);
+
+ switch (entry) {
+ case DCC_READ:
+ ret = dcc_config_add(drvdata, val1,
+ val2, apb_bus);
+ break;
+ case DCC_WRITE:
+ ret = dcc_add_write(drvdata, val1,
+ val2, apb_bus);
+ break;
+ case DCC_LOOP:
+ ret = dcc_add_loop(drvdata, val1);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ if (ret) {
+ dev_err(drvdata->dev,
+ "DCC init time config failed err:%d\n",
+ ret);
+ break;
+ }
+ }
+
+ if (!ret)
+ dcc_enable(drvdata);
+ }
+}
+
static int dcc_probe(struct platform_device *pdev)
{
int ret, i;
@@ -1492,6 +1575,8 @@ static int dcc_probe(struct platform_device *pdev)
if (ret)
goto err;
+ dcc_configure_list(drvdata, pdev->dev.of_node);
+
return 0;
err:
return ret;
@@ -1525,15 +1610,17 @@ static struct platform_driver dcc_driver = {
static int __init dcc_init(void)
{
+ int ret;
+
+ ret = scm_is_secure_device();
+ if (ret == 0) {
+ pr_info("DCC is not available\n");
+ return -ENODEV;
+ }
+
return platform_driver_register(&dcc_driver);
}
-module_init(dcc_init);
-
-static void __exit dcc_exit(void)
-{
- platform_driver_unregister(&dcc_driver);
-}
-module_exit(dcc_exit);
+pure_initcall(dcc_init);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("MSM data capture and compare engine");
diff --git a/drivers/soc/qcom/debug_core.c b/drivers/soc/qcom/debug_core.c
deleted file mode 100644
index 164a866..0000000
--- a/drivers/soc/qcom/debug_core.c
+++ /dev/null
@@ -1,330 +0,0 @@
-/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/uaccess.h>
-#include <linux/string.h>
-#include <linux/debugfs.h>
-#include <linux/ctype.h>
-#include <linux/cpu.h>
-#include "soc/qcom/msm-core.h"
-
-#define MAX_PSTATES 50
-#define NUM_OF_PENTRY 3 /* number of variables for ptable node */
-#define NUM_OF_EENTRY 2 /* number of variables for enable node */
-
-enum arg_offset {
- CPU_OFFSET,
- FREQ_OFFSET,
- POWER_OFFSET,
-};
-
-struct core_debug {
- int cpu;
- struct cpu_pstate_pwr *head;
- int enabled;
- int len;
- struct cpu_pwr_stats *ptr;
- struct cpu_pstate_pwr *driver_data;
- int driver_len;
-};
-
-static DEFINE_PER_CPU(struct core_debug, c_dgfs);
-static struct cpu_pwr_stats *msm_core_data;
-static struct debugfs_blob_wrapper help_msg = {
- .data =
-"MSM CORE Debug-FS Support\n"
-"\n"
-"Hierarchy schema\n"
-"/sys/kernel/debug/msm_core\n"
-" /help - Static help text\n"
-" /ptable - write to p-state table\n"
-" /enable - enable the written p-state table\n"
-" /ptable_dump - Dump the debug ptable\n"
-"\n"
-"Usage\n"
-" Input test frequency and power information in ptable:\n"
-" echo \"0 300000 120\" > ptable\n"
-" format: <cpu> <frequency in khz> <power>\n"
-"\n"
-" Enable the ptable for the cpu:\n"
-" echo \"0 1\" > enable\n"
-" format: <cpu> <1 to enable, 0 to disable>\n"
-" Note: Writing 0 to disable will reset/clear the ptable\n"
-"\n"
-" Dump the entire ptable:\n"
-" cat ptable\n"
-" ----- CPU0 - Enabled ---------\n"
-" Freq Power\n"
-" 700000 120\n"
-"----- CPU0 - Live numbers -----\n"
-" Freq Power\n"
-" 300000 218\n"
-" ----- CPU1 - Written ---------\n"
-" Freq Power\n"
-" 700000 120\n"
-" Ptable dump will dump the status of the table as well\n"
-" It shows:\n"
-" Enabled -> for a cpu that debug ptable enabled\n"
-" Written -> for a cpu that has debug ptable values written\n"
-" but not enabled\n"
-"\n",
-
-};
-
-static void add_to_ptable(unsigned int *arg)
-{
- struct core_debug *node;
- int i, cpu = arg[CPU_OFFSET];
- uint32_t freq = arg[FREQ_OFFSET];
- uint32_t power = arg[POWER_OFFSET];
-
- if (!cpu_possible(cpu))
- return;
-
- if ((freq == 0) || (power == 0)) {
- pr_warn("Incorrect power data\n");
- return;
- }
-
- node = &per_cpu(c_dgfs, cpu);
-
- if (node->len >= MAX_PSTATES) {
- pr_warn("Dropped ptable update - no space left.\n");
- return;
- }
-
- if (!node->head) {
- node->head = kzalloc(sizeof(struct cpu_pstate_pwr) *
- (MAX_PSTATES + 1),
- GFP_KERNEL);
- if (!node->head)
- return;
- }
-
- for (i = 0; i < node->len; i++) {
- if (node->head[i].freq == freq) {
- node->head[i].power = power;
- return;
- }
- }
-
- /*
- * Insert a new frequency (may need to move things around to
- * keep in ascending order).
- */
- for (i = MAX_PSTATES - 1; i > 0; i--) {
- if (node->head[i-1].freq > freq) {
- node->head[i].freq = node->head[i-1].freq;
- node->head[i].power = node->head[i-1].power;
- } else if (node->head[i-1].freq != 0) {
- break;
- }
- }
-
- if (node->len < MAX_PSTATES) {
- node->head[i].freq = freq;
- node->head[i].power = power;
- node->len++;
- }
-
- if (node->ptr)
- node->ptr->len = node->len;
-}
-
-static int split_ptable_args(char *line, unsigned int *arg, uint32_t n)
-{
- char *args;
- int i;
- int ret = 0;
-
- for (i = 0; i < n; i++) {
- if (!line)
- break;
- args = strsep(&line, " ");
- ret = kstrtouint(args, 10, &arg[i]);
- if (ret)
- return ret;
- }
- return ret;
-}
-
-static ssize_t msm_core_ptable_write(struct file *file,
- const char __user *ubuf, size_t len, loff_t *offp)
-{
- char *kbuf;
- int ret;
- unsigned int arg[3];
-
- if (len == 0)
- return 0;
-
- kbuf = kzalloc(len + 1, GFP_KERNEL);
- if (!kbuf)
- return -ENOMEM;
-
- if (copy_from_user(kbuf, ubuf, len)) {
- ret = -EFAULT;
- goto done;
- }
- kbuf[len] = '\0';
- ret = split_ptable_args(kbuf, arg, NUM_OF_PENTRY);
- if (!ret) {
- add_to_ptable(arg);
- ret = len;
- }
-done:
- kfree(kbuf);
- return ret;
-}
-
-static void print_table(struct seq_file *m, struct cpu_pstate_pwr *c_n,
- int len)
-{
- int i;
-
- seq_puts(m, " Freq Power\n");
- for (i = 0; i < len; i++)
- seq_printf(m, " %d %u\n", c_n[i].freq,
- c_n[i].power);
-
-}
-
-static int msm_core_ptable_read(struct seq_file *m, void *data)
-{
- int cpu;
- struct core_debug *node;
-
- for_each_possible_cpu(cpu) {
- node = &per_cpu(c_dgfs, cpu);
- if (node->head) {
- seq_printf(m, "----- CPU%d - %s - Debug -------\n",
- cpu, node->enabled == 1 ? "Enabled" : "Written");
- print_table(m, node->head, node->len);
- }
- if (msm_core_data[cpu].ptable) {
- seq_printf(m, "--- CPU%d - Live numbers at %ldC---\n",
- cpu, node->ptr->temp);
- print_table(m, msm_core_data[cpu].ptable,
- node->driver_len);
- }
- }
- return 0;
-}
-
-static ssize_t msm_core_enable_write(struct file *file,
- const char __user *ubuf, size_t len, loff_t *offp)
-{
- char *kbuf;
- int ret;
- unsigned int arg[3];
- int cpu;
-
- if (len == 0)
- return 0;
-
- kbuf = kzalloc(len + 1, GFP_KERNEL);
- if (!kbuf)
- return -ENOMEM;
-
- if (copy_from_user(kbuf, ubuf, len)) {
- ret = -EFAULT;
- goto done;
- }
- kbuf[len] = '\0';
- ret = split_ptable_args(kbuf, arg, NUM_OF_EENTRY);
- if (ret)
- goto done;
- cpu = arg[CPU_OFFSET];
-
- if (cpu_possible(cpu)) {
- struct core_debug *node = &per_cpu(c_dgfs, cpu);
-
- if (arg[FREQ_OFFSET]) {
- msm_core_data[cpu].ptable = node->head;
- msm_core_data[cpu].len = node->len;
- } else {
- msm_core_data[cpu].ptable = node->driver_data;
- msm_core_data[cpu].len = node->driver_len;
- node->len = 0;
- }
- node->enabled = arg[FREQ_OFFSET];
- }
- ret = len;
- blocking_notifier_call_chain(
- get_power_update_notifier(), cpu, NULL);
-
-done:
- kfree(kbuf);
- return ret;
-}
-
-static const struct file_operations msm_core_enable_ops = {
- .write = msm_core_enable_write,
-};
-
-static int msm_core_dump_open(struct inode *inode, struct file *file)
-{
- return single_open(file, msm_core_ptable_read, inode->i_private);
-}
-
-static const struct file_operations msm_core_ptable_ops = {
- .open = msm_core_dump_open,
- .read = seq_read,
- .write = msm_core_ptable_write,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-int msm_core_debug_init(void)
-{
- struct dentry *dir = NULL;
- struct dentry *file = NULL;
- int i;
-
- msm_core_data = get_cpu_pwr_stats();
- if (!msm_core_data)
- goto fail;
-
- dir = debugfs_create_dir("msm_core", NULL);
- if (IS_ERR_OR_NULL(dir))
- return PTR_ERR(dir);
-
- file = debugfs_create_file("enable", 0660, dir, NULL,
- &msm_core_enable_ops);
- if (IS_ERR_OR_NULL(file))
- goto fail;
-
- file = debugfs_create_file("ptable", 0660, dir, NULL,
- &msm_core_ptable_ops);
- if (IS_ERR_OR_NULL(file))
- goto fail;
-
- help_msg.size = strlen(help_msg.data);
- file = debugfs_create_blob("help", 0444, dir, &help_msg);
- if (IS_ERR_OR_NULL(file))
- goto fail;
-
- for (i = 0; i < num_possible_cpus(); i++) {
- per_cpu(c_dgfs, i).ptr = &msm_core_data[i];
- per_cpu(c_dgfs, i).driver_data = msm_core_data[i].ptable;
- per_cpu(c_dgfs, i).driver_len = msm_core_data[i].len;
- }
- return 0;
-fail:
- debugfs_remove(dir);
- return PTR_ERR(file);
-}
-late_initcall(msm_core_debug_init);
diff --git a/drivers/soc/qcom/eud.c b/drivers/soc/qcom/eud.c
index f7f3317..3c4238c 100644
--- a/drivers/soc/qcom/eud.c
+++ b/drivers/soc/qcom/eud.c
@@ -29,6 +29,9 @@
#include <linux/power_supply.h>
#include <linux/clk.h>
#include <linux/of.h>
+#include <linux/regulator/consumer.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
#define EUD_ENABLE_CMD 1
#define EUD_DISABLE_CMD 0
@@ -74,6 +77,10 @@ struct eud_chip {
struct work_struct eud_work;
struct power_supply *batt_psy;
struct clk *cfg_ahb_clk;
+
+ /* regulator and notifier chain for it */
+ struct regulator *vdda33;
+ struct notifier_block vdda33_nb;
};
static const unsigned int eud_extcon_cable[] = {
@@ -487,12 +494,47 @@ static irqreturn_t handle_eud_irq(int irq, void *data)
return IRQ_HANDLED;
}
+static int vdda33_notifier_block_cb(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct eud_chip *chip = container_of(nb, struct eud_chip, vdda33_nb);
+ int attach_det = 0;
+
+ switch (event) {
+ case REGULATOR_EVENT_ENABLE:
+ attach_det = 1;
+ /* fall throuhg */
+ case REGULATOR_EVENT_DISABLE:
+ clk_prepare_enable(chip->cfg_ahb_clk);
+
+ /* eud does not retain interrupt mask when ldo24
+ * is turned off. Set the interrupt mask when
+ * ldo24 is turned on
+ */
+ if (attach_det)
+ writel_relaxed(EUD_INT_VBUS | EUD_INT_CHGR,
+ chip->eud_reg_base + EUD_REG_INT1_EN_MASK);
+ writel_relaxed(attach_det,
+ chip->eud_reg_base + EUD_REG_SW_ATTACH_DET);
+ clk_disable_unprepare(chip->cfg_ahb_clk);
+
+ dev_dbg(chip->dev, "%s(): %s\n", __func__,
+ attach_det ? "enable" : "disable");
+ break;
+ default:
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
static int msm_eud_probe(struct platform_device *pdev)
{
struct eud_chip *chip;
struct uart_port *port;
struct resource *res;
int ret;
+ int pet;
chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL);
if (!chip) {
@@ -575,6 +617,24 @@ static int msm_eud_probe(struct platform_device *pdev)
eud_private = pdev;
+ chip->vdda33 = devm_regulator_get(&pdev->dev, "vdda33");
+ if (IS_ERR(chip->vdda33)) {
+ dev_err(chip->dev, "%s: failed to get eud 3p1 regulator\n",
+ __func__);
+ return PTR_ERR(chip->vdda33);
+ }
+ chip->vdda33_nb.notifier_call = vdda33_notifier_block_cb;
+ regulator_register_notifier(chip->vdda33, &chip->vdda33_nb);
+
+ clk_prepare_enable(chip->cfg_ahb_clk);
+
+ pet = regulator_is_enabled(chip->vdda33) ? 1 : 0;
+ writel_relaxed(pet, chip->eud_reg_base + EUD_REG_SW_ATTACH_DET);
+
+ dev_dbg(chip->dev, "%s:%s pet\n", __func__, pet ? "Attach" : "Detach");
+
+ clk_disable_unprepare(chip->cfg_ahb_clk);
+
/* Enable EUD */
if (enable)
enable_eud(pdev);
@@ -587,6 +647,8 @@ static int msm_eud_remove(struct platform_device *pdev)
struct eud_chip *chip = platform_get_drvdata(pdev);
struct uart_port *port = &chip->port;
+ regulator_unregister_notifier(chip->vdda33, &chip->vdda33_nb);
+
uart_remove_one_port(&eud_uart_driver, port);
device_init_wakeup(chip->dev, false);
diff --git a/drivers/soc/qcom/glink.c b/drivers/soc/qcom/glink.c
index 6019e4b..e6fd52e 100644
--- a/drivers/soc/qcom/glink.c
+++ b/drivers/soc/qcom/glink.c
@@ -1669,6 +1669,8 @@ void ch_purge_intent_lists(struct channel_ctx *ctx)
&ctx->local_rx_intent_list, list) {
ctx->notify_rx_abort(ctx, ctx->user_priv,
ptr_intent->pkt_priv);
+ ctx->transport_ptr->ops->deallocate_rx_intent(
+ ctx->transport_ptr->ops, ptr_intent);
list_del(&ptr_intent->list);
kfree(ptr_intent);
}
@@ -3767,6 +3769,8 @@ static void glink_dummy_xprt_ctx_release(struct rwref_lock *xprt_st_lock)
GLINK_INFO("%s: freeing transport [%s->%s]context\n", __func__,
xprt_ctx->name,
xprt_ctx->edge);
+ kfree(xprt_ctx->ops);
+ xprt_ctx->ops = NULL;
kfree(xprt_ctx);
}
@@ -4070,6 +4074,7 @@ int glink_core_register_transport(struct glink_transport_if *if_ptr,
kfree(xprt_ptr);
return -ENOMEM;
}
+ cfg->tx_task = xprt_ptr->tx_task;
ret = glink_core_init_xprt_qos_cfg(xprt_ptr, cfg);
if (ret < 0) {
kfree(xprt_ptr);
@@ -4157,6 +4162,7 @@ static void glink_core_link_down(struct glink_transport_if *if_ptr)
rwref_write_get(&xprt_ptr->xprt_state_lhb0);
xprt_ptr->next_lcid = 1;
xprt_ptr->local_state = GLINK_XPRT_DOWN;
+ xprt_ptr->curr_qos_rate_kBps = 0;
xprt_ptr->local_version_idx = xprt_ptr->versions_entries - 1;
xprt_ptr->remote_version_idx = xprt_ptr->versions_entries - 1;
xprt_ptr->l_features =
@@ -4291,6 +4297,12 @@ static void glink_core_channel_cleanup(struct glink_core_xprt_ctx *xprt_ptr)
rwref_read_get(&xprt_ptr->xprt_state_lhb0);
ctx = get_first_ch_ctx(xprt_ptr);
while (ctx) {
+ spin_lock_irqsave(&xprt_ptr->tx_ready_lock_lhb3, flags);
+ spin_lock(&ctx->tx_lists_lock_lhc3);
+ if (!list_empty(&ctx->tx_active))
+ glink_qos_done_ch_tx(ctx);
+ spin_unlock(&ctx->tx_lists_lock_lhc3);
+ spin_unlock_irqrestore(&xprt_ptr->tx_ready_lock_lhb3, flags);
rwref_write_get_atomic(&ctx->ch_state_lhb2, true);
if (ctx->local_open_state == GLINK_CHANNEL_OPENED ||
ctx->local_open_state == GLINK_CHANNEL_OPENING) {
diff --git a/drivers/soc/qcom/glink_core_if.h b/drivers/soc/qcom/glink_core_if.h
index 1411330..704171f 100644
--- a/drivers/soc/qcom/glink_core_if.h
+++ b/drivers/soc/qcom/glink_core_if.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -14,6 +14,7 @@
#include <linux/of.h>
#include <linux/types.h>
+#include <linux/sched.h>
#include "glink_private.h"
/* Local Channel state */
@@ -105,6 +106,7 @@ struct glink_core_flow_info {
* @versions_entries: Number of entries in @versions.
* @max_cid: Maximum number of channel identifiers supported.
* @max_iid: Maximum number of intent identifiers supported.
+ * @tx_task: Task structure for tx thread.
* @mtu: MTU supported by this transport.
* @num_flows: Number of traffic flows/priority buckets.
* @flow_info: Information about each flow/priority.
@@ -117,6 +119,7 @@ struct glink_core_transport_cfg {
size_t versions_entries;
uint32_t max_cid;
uint32_t max_iid;
+ struct task_struct *tx_task;
size_t mtu;
uint32_t num_flows;
diff --git a/drivers/soc/qcom/glink_loopback_server.c b/drivers/soc/qcom/glink_loopback_server.c
index 4e9b118..94a3d8c 100644
--- a/drivers/soc/qcom/glink_loopback_server.c
+++ b/drivers/soc/qcom/glink_loopback_server.c
@@ -195,7 +195,7 @@ int glink_lbsrv_send_response(void *handle, uint32_t req_id, uint32_t req_type,
resp_pkt->response = response;
return glink_tx(handle, (void *)LINEAR, (void *)resp_pkt,
- sizeof(struct resp), 0);
+ sizeof(struct resp), GLINK_TX_REQ_INTENT);
}
static uint32_t calc_delay_ms(uint32_t random_delay, uint32_t delay_ms)
@@ -1143,7 +1143,7 @@ static void glink_lbsrv_tx_worker(struct work_struct *work)
return;
}
- flags = 0;
+ flags = GLINK_TX_REQ_INTENT;
if (tmp_work_info->tracer_pkt) {
flags |= GLINK_TX_TRACER_PKT;
tracer_pkt_log_event(tmp_work_info->data,
diff --git a/drivers/soc/qcom/glink_smem_native_xprt.c b/drivers/soc/qcom/glink_smem_native_xprt.c
index 384347d..ea7374f 100644
--- a/drivers/soc/qcom/glink_smem_native_xprt.c
+++ b/drivers/soc/qcom/glink_smem_native_xprt.c
@@ -33,6 +33,7 @@
#include <linux/spinlock.h>
#include <linux/srcu.h>
#include <linux/wait.h>
+#include <linux/cpumask.h>
#include <soc/qcom/smem.h>
#include <soc/qcom/tracer_pkt.h>
#include "glink_core_if.h"
@@ -226,6 +227,7 @@ struct edge_info {
spinlock_t rt_vote_lock;
uint32_t rt_votes;
uint32_t num_pw_states;
+ uint32_t readback;
unsigned long *ramp_time_us;
struct mailbox_config_info *mailbox;
};
@@ -270,6 +272,7 @@ static void send_irq(struct edge_info *einfo)
* Any data associated with this event must be visable to the remote
* before the interrupt is triggered
*/
+ einfo->readback = einfo->tx_ch_desc->write_index;
wmb();
writel_relaxed(einfo->out_irq_mask, einfo->out_irq_reg);
if (einfo->remote_proc_id != SMEM_SPSS)
@@ -2321,17 +2324,40 @@ static int subsys_name_to_id(const char *name)
return -ENODEV;
}
+static void glink_set_affinity(struct edge_info *einfo, u32 *arr, size_t size)
+{
+ struct cpumask cpumask;
+ pid_t pid;
+ int i;
+
+ cpumask_clear(&cpumask);
+ for (i = 0; i < size; i++) {
+ if (arr[i] < num_possible_cpus())
+ cpumask_set_cpu(arr[i], &cpumask);
+ }
+ if (irq_set_affinity(einfo->irq_line, &cpumask))
+ pr_err("%s: Failed to set irq affinity\n", __func__);
+
+ if (sched_setaffinity(einfo->task->pid, &cpumask))
+ pr_err("%s: Failed to set rx cpu affinity\n", __func__);
+
+ pid = einfo->xprt_cfg.tx_task->pid;
+ if (sched_setaffinity(pid, &cpumask))
+ pr_err("%s: Failed to set tx cpu affinity\n", __func__);
+}
+
static int glink_smem_native_probe(struct platform_device *pdev)
{
struct device_node *node;
struct device_node *phandle_node;
struct edge_info *einfo;
- int rc;
+ int rc, cpu_size;
char *key;
const char *subsys_name;
uint32_t irq_line;
uint32_t irq_mask;
struct resource *r;
+ u32 *cpu_array;
node = pdev->dev.of_node;
@@ -2478,6 +2504,20 @@ static int glink_smem_native_probe(struct platform_device *pdev)
pr_err("%s: enable_irq_wake() failed on %d\n", __func__,
irq_line);
+ key = "cpu-affinity";
+ cpu_size = of_property_count_u32_elems(node, key);
+ if (cpu_size > 0) {
+ cpu_array = kmalloc_array(cpu_size, sizeof(u32), GFP_KERNEL);
+ if (!cpu_array) {
+ rc = -ENOMEM;
+ goto request_irq_fail;
+ }
+ rc = of_property_read_u32_array(node, key, cpu_array, cpu_size);
+ if (!rc)
+ glink_set_affinity(einfo, cpu_array, cpu_size);
+ kfree(cpu_array);
+ }
+
register_debugfs_info(einfo);
/* fake an interrupt on this edge to see if the remote side is up */
irq_handler(0, einfo);
diff --git a/drivers/soc/qcom/icnss.c b/drivers/soc/qcom/icnss.c
index 04c611c..e391cd1 100644
--- a/drivers/soc/qcom/icnss.c
+++ b/drivers/soc/qcom/icnss.c
@@ -200,7 +200,6 @@ enum icnss_driver_event_type {
enum icnss_msa_perm {
ICNSS_MSA_PERM_HLOS_ALL = 0,
ICNSS_MSA_PERM_WLAN_HW_RW = 1,
- ICNSS_MSA_PERM_DUMP_COLLECT = 2,
ICNSS_MSA_PERM_MAX,
};
@@ -233,13 +232,6 @@ struct icnss_msa_perm_list_t msa_perm_secure_list[ICNSS_MSA_PERM_MAX] = {
.nelems = 2,
},
- [ICNSS_MSA_PERM_DUMP_COLLECT] = {
- .vmids = {VMID_MSS_MSA, VMID_WLAN, VMID_HLOS},
- .perms = {PERM_READ | PERM_WRITE,
- PERM_READ | PERM_WRITE,
- PERM_READ},
- .nelems = 3,
- },
};
struct icnss_msa_perm_list_t msa_perm_list[ICNSS_MSA_PERM_MAX] = {
@@ -257,14 +249,6 @@ struct icnss_msa_perm_list_t msa_perm_list[ICNSS_MSA_PERM_MAX] = {
.nelems = 3,
},
- [ICNSS_MSA_PERM_DUMP_COLLECT] = {
- .vmids = {VMID_MSS_MSA, VMID_WLAN, VMID_WLAN_CE, VMID_HLOS},
- .perms = {PERM_READ | PERM_WRITE,
- PERM_READ | PERM_WRITE,
- PERM_READ | PERM_WRITE,
- PERM_READ},
- .nelems = 4,
- },
};
struct icnss_event_pd_service_down_data {
@@ -2345,7 +2329,7 @@ static int icnss_driver_event_pd_service_down(struct icnss_priv *priv,
if (!test_bit(ICNSS_WLFW_EXISTS, &priv->state))
goto out;
- if (test_bit(ICNSS_PD_RESTART, &priv->state)) {
+ if (test_bit(ICNSS_PD_RESTART, &priv->state) && event_data->crashed) {
icnss_pr_err("PD Down while recovery inprogress, crashed: %d, state: 0x%lx\n",
event_data->crashed, priv->state);
ICNSS_ASSERT(0);
@@ -2491,9 +2475,10 @@ static int icnss_modem_notifier_nb(struct notifier_block *nb,
icnss_pr_vdbg("Modem-Notify: event %lu\n", code);
- if (code == SUBSYS_AFTER_SHUTDOWN) {
+ if (code == SUBSYS_AFTER_SHUTDOWN &&
+ notif->crashed == CRASH_STATUS_ERR_FATAL) {
ret = icnss_assign_msa_perm_all(priv,
- ICNSS_MSA_PERM_DUMP_COLLECT);
+ ICNSS_MSA_PERM_HLOS_ALL);
if (!ret) {
icnss_pr_info("Collecting msa0 segment dump\n");
icnss_msa0_ramdump(priv);
@@ -2509,8 +2494,17 @@ static int icnss_modem_notifier_nb(struct notifier_block *nb,
if (code != SUBSYS_BEFORE_SHUTDOWN)
return NOTIFY_OK;
- if (test_bit(ICNSS_PDR_REGISTERED, &priv->state))
+ if (test_bit(ICNSS_PDR_REGISTERED, &priv->state)) {
+ set_bit(ICNSS_FW_DOWN, &priv->state);
+ icnss_ignore_qmi_timeout(true);
+
+ fw_down_data.crashed = !!notif->crashed;
+ if (test_bit(ICNSS_FW_READY, &priv->state))
+ icnss_call_driver_uevent(priv,
+ ICNSS_UEVENT_FW_DOWN,
+ &fw_down_data);
return NOTIFY_OK;
+ }
icnss_pr_info("Modem went down, state: 0x%lx, crashed: %d\n",
priv->state, notif->crashed);
@@ -2644,14 +2638,18 @@ static int icnss_service_notifier_notify(struct notifier_block *nb,
icnss_pr_info("PD service down, pd_state: %d, state: 0x%lx: cause: %s\n",
*state, priv->state, icnss_pdr_cause[cause]);
event_post:
- set_bit(ICNSS_FW_DOWN, &priv->state);
- icnss_ignore_qmi_timeout(true);
- clear_bit(ICNSS_HOST_TRIGGERED_PDR, &priv->state);
+ if (!test_bit(ICNSS_FW_DOWN, &priv->state)) {
+ set_bit(ICNSS_FW_DOWN, &priv->state);
+ icnss_ignore_qmi_timeout(true);
- fw_down_data.crashed = event_data->crashed;
- if (test_bit(ICNSS_FW_READY, &priv->state))
- icnss_call_driver_uevent(priv, ICNSS_UEVENT_FW_DOWN,
- &fw_down_data);
+ fw_down_data.crashed = event_data->crashed;
+ if (test_bit(ICNSS_FW_READY, &priv->state))
+ icnss_call_driver_uevent(priv,
+ ICNSS_UEVENT_FW_DOWN,
+ &fw_down_data);
+ }
+
+ clear_bit(ICNSS_HOST_TRIGGERED_PDR, &priv->state);
icnss_driver_event_post(ICNSS_DRIVER_EVENT_PD_SERVICE_DOWN,
ICNSS_EVENT_SYNC, event_data);
done:
diff --git a/drivers/soc/qcom/jtagv8-etm.c b/drivers/soc/qcom/jtagv8-etm.c
index ff8cc99..3f4b8bc 100644
--- a/drivers/soc/qcom/jtagv8-etm.c
+++ b/drivers/soc/qcom/jtagv8-etm.c
@@ -183,24 +183,28 @@
#define HW_SOC_ID_M8953 (293)
#define etm_writel(etm, val, off) \
+ writel_relaxed_no_log(val, etm->base + off)
+#define etm_writel_log(etm, val, off) \
__raw_writel(val, etm->base + off)
+
#define etm_readl(etm, off) \
- __raw_readl(etm->base + off)
+ readl_relaxed_no_log(etm->base + off)
#define etm_writeq(etm, val, off) \
- __raw_writeq(val, etm->base + off)
+ writeq_relaxed_no_log(val, etm->base + off)
+
#define etm_readq(etm, off) \
- __raw_readq(etm->base + off)
+ readq_relaxed_no_log(etm->base + off)
#define ETM_LOCK(base) \
do { \
mb(); /* ensure configuration take effect before we lock it */ \
- etm_writel(base, 0x0, CORESIGHT_LAR); \
+ etm_writel_log(base, 0x0, CORESIGHT_LAR); \
} while (0)
#define ETM_UNLOCK(base) \
do { \
- etm_writel(base, CORESIGHT_UNLOCK, CORESIGHT_LAR); \
+ etm_writel_log(base, CORESIGHT_UNLOCK, CORESIGHT_LAR); \
mb(); /* ensure unlock take effect before we configure */ \
} while (0)
diff --git a/drivers/soc/qcom/llcc-sdm670.c b/drivers/soc/qcom/llcc-sdm670.c
index 494b93b..aaed9ee 100644
--- a/drivers/soc/qcom/llcc-sdm670.c
+++ b/drivers/soc/qcom/llcc-sdm670.c
@@ -63,7 +63,7 @@ static struct llcc_slice_config sdm670_data[] = {
SCT_ENTRY("audio", 6, 6, 512, 1, 0, 0xF, 0x0, 0, 0, 1, 1, 0),
SCT_ENTRY("modem", 8, 8, 512, 1, 0, 0xF, 0x0, 0, 0, 1, 1, 0),
SCT_ENTRY("gpu", 12, 12, 384, 1, 1, 0x0, 0x0, 0, 0, 1, 1, 0),
- SCT_ENTRY("mmuhwt", 13, 13, 512, 1, 0, 0x0, 0x8, 0, 0, 1, 0, 1),
+ SCT_ENTRY("mmuhwt", 13, 13, 512, 1, 0, 0xF, 0x0, 0, 0, 1, 0, 1),
SCT_ENTRY("audiohw", 22, 22, 512, 1, 1, 0xF, 0x0, 0, 0, 1, 1, 0),
};
diff --git a/drivers/soc/qcom/llcc_perfmon.c b/drivers/soc/qcom/llcc_perfmon.c
index 39276a9..8c86e7d 100644
--- a/drivers/soc/qcom/llcc_perfmon.c
+++ b/drivers/soc/qcom/llcc_perfmon.c
@@ -127,8 +127,11 @@ static void perfmon_counter_dump(struct llcc_perfmon_private *llcc_priv)
unsigned int i, j;
unsigned long long total;
+ if (!llcc_priv->configured_counters)
+ return;
+
llcc_bcast_write(llcc_priv, PERFMON_DUMP, MONITOR_DUMP);
- for (i = 0; i < llcc_priv->configured_counters - 1; i++) {
+ for (i = 0; i < llcc_priv->configured_counters; i++) {
total = 0;
for (j = 0; j < llcc_priv->num_banks; j++) {
regmap_read(llcc_priv->llcc_map, llcc_priv->bank_off[j]
@@ -138,15 +141,6 @@ static void perfmon_counter_dump(struct llcc_perfmon_private *llcc_priv)
llcc_priv->configured[i].counter_dump += total;
}
-
- total = 0;
- for (j = 0; j < llcc_priv->num_banks; j++) {
- regmap_read(llcc_priv->llcc_map, llcc_priv->bank_off[j] +
- LLCC_COUNTER_n_VALUE(i), &val);
- total += val;
- }
-
- llcc_priv->configured[i].counter_dump += total;
}
static ssize_t perfmon_counter_dump_show(struct device *dev,
@@ -288,8 +282,8 @@ static ssize_t perfmon_configure_store(struct device *dev,
llcc_priv->configured[j].port_sel = port_sel;
llcc_priv->configured[j].event_sel = event_sel;
port_ops = llcc_priv->port_ops[port_sel];
- pr_info("configured event %ld counter %d on port %ld\n",
- event_sel, j, port_sel);
+ pr_info("counter %d configured for event %ld from port %ld\n",
+ j, event_sel, port_sel);
port_ops->event_config(llcc_priv, event_sel, j++, true);
if (!(llcc_priv->enables_port & (1 << port_sel)))
if (port_ops->event_enable)
@@ -355,8 +349,8 @@ static ssize_t perfmon_remove_store(struct device *dev,
llcc_priv->configured[j].port_sel = MAX_NUMBER_OF_PORTS;
llcc_priv->configured[j].event_sel = 100;
port_ops = llcc_priv->port_ops[port_sel];
- pr_info("Removed event %ld counter %d from port %ld\n",
- event_sel, j, port_sel);
+ pr_info("removed counter %d for event %ld from port %ld\n",
+ j, event_sel, port_sel);
port_ops->event_config(llcc_priv, event_sel, j++, false);
if (llcc_priv->enables_port & (1 << port_sel))
@@ -531,13 +525,13 @@ static ssize_t perfmon_start_store(struct device *dev,
val = MANUAL_MODE | MONITOR_EN;
if (llcc_priv->expires.tv64) {
- if (hrtimer_is_queued(&llcc_priv->hrtimer))
- hrtimer_forward_now(&llcc_priv->hrtimer,
- llcc_priv->expires);
- else
- hrtimer_start(&llcc_priv->hrtimer,
- llcc_priv->expires,
- HRTIMER_MODE_REL_PINNED);
+ if (hrtimer_is_queued(&llcc_priv->hrtimer))
+ hrtimer_forward_now(&llcc_priv->hrtimer,
+ llcc_priv->expires);
+ else
+ hrtimer_start(&llcc_priv->hrtimer,
+ llcc_priv->expires,
+ HRTIMER_MODE_REL_PINNED);
}
} else {
diff --git a/drivers/soc/qcom/lpm-stats.c b/drivers/soc/qcom/lpm-stats.c
index ee68433..4a41eee 100644
--- a/drivers/soc/qcom/lpm-stats.c
+++ b/drivers/soc/qcom/lpm-stats.c
@@ -21,6 +21,7 @@
#include <linux/debugfs.h>
#include <linux/sched.h>
#include <linux/slab.h>
+#include <linux/smp.h>
#include <linux/suspend.h>
#include <soc/qcom/spm.h>
#include <soc/qcom/pm.h>
@@ -45,7 +46,7 @@ struct level_stats {
int64_t max_time[CONFIG_MSM_IDLE_STATS_BUCKET_COUNT];
int success_count;
int failed_count;
- int64_t total_time;
+ uint64_t total_time;
uint64_t enter_time;
};
@@ -104,7 +105,7 @@ static void level_stats_print(struct seq_file *m, struct level_stats *stats)
int i = 0;
int64_t bucket_time = 0;
char seqs[MAX_STR_LEN] = {0};
- int64_t s = stats->total_time;
+ uint64_t s = stats->total_time;
uint32_t ns = do_div(s, NSEC_PER_SEC);
snprintf(seqs, MAX_STR_LEN,
@@ -255,6 +256,15 @@ static ssize_t level_stats_file_write(struct file *file,
return count;
}
+static void reset_cpu_stats(void *info)
+{
+ struct lpm_stats *stats = &(*this_cpu_ptr(&(cpu_stats)));
+ int i;
+
+ for (i = 0; i < stats->num_levels; i++)
+ level_stats_reset(&stats->time_stats[i]);
+}
+
static ssize_t lpm_stats_file_write(struct file *file,
const char __user *buffer, size_t count, loff_t *off)
{
@@ -276,6 +286,12 @@ static ssize_t lpm_stats_file_write(struct file *file,
return -EINVAL;
level_stats_reset_all(stats);
+ /*
+ * Wake up each CPU and reset the stats from that CPU,
+ * for that CPU, so we could have better timestamp for
+ * accounting.
+ */
+ on_each_cpu(reset_cpu_stats, NULL, 1);
return count;
}
diff --git a/drivers/soc/qcom/memory_dump_v2.c b/drivers/soc/qcom/memory_dump_v2.c
index 5ed66bf..5873f5c 100644
--- a/drivers/soc/qcom/memory_dump_v2.c
+++ b/drivers/soc/qcom/memory_dump_v2.c
@@ -38,7 +38,18 @@ struct msm_memory_dump {
struct msm_dump_table *table;
};
+struct dump_vaddr_entry {
+ uint32_t id;
+ void *dump_vaddr;
+};
+
+struct msm_mem_dump_vaddr_tbl {
+ uint8_t num_node;
+ struct dump_vaddr_entry *entries;
+};
+
static struct msm_memory_dump memdump;
+static struct msm_mem_dump_vaddr_tbl vaddr_tbl;
uint32_t msm_dump_table_version(void)
{
@@ -113,6 +124,28 @@ int msm_dump_data_register(enum msm_dump_table_ids id,
}
EXPORT_SYMBOL(msm_dump_data_register);
+void *get_msm_dump_ptr(enum msm_dump_data_ids id)
+{
+ int i;
+
+ if (!vaddr_tbl.entries)
+ return NULL;
+
+ if (id > MSM_DUMP_DATA_MAX)
+ return NULL;
+
+ for (i = 0; i < vaddr_tbl.num_node; i++) {
+ if (vaddr_tbl.entries[i].id == id)
+ break;
+ }
+
+ if (i == vaddr_tbl.num_node)
+ return NULL;
+
+ return (void *)vaddr_tbl.entries[i].dump_vaddr;
+}
+EXPORT_SYMBOL(get_msm_dump_ptr);
+
static int __init init_memory_dump(void)
{
struct msm_dump_table *table;
@@ -209,6 +242,14 @@ static int mem_dump_probe(struct platform_device *pdev)
struct msm_dump_entry dump_entry;
int ret;
u32 size, id;
+ int i = 0;
+
+ vaddr_tbl.num_node = of_get_child_count(node);
+ vaddr_tbl.entries = devm_kcalloc(&pdev->dev, vaddr_tbl.num_node,
+ sizeof(struct dump_vaddr_entry),
+ GFP_KERNEL);
+ if (!vaddr_tbl.entries)
+ dev_err(&pdev->dev, "Unable to allocate mem for ptr addr\n");
for_each_available_child_of_node(node, child_node) {
ret = of_property_read_u32(child_node, "qcom,dump-size", &size);
@@ -254,6 +295,10 @@ static int mem_dump_probe(struct platform_device *pdev)
dma_free_coherent(&pdev->dev, size, dump_vaddr,
dump_addr);
devm_kfree(&pdev->dev, dump_data);
+ } else if (vaddr_tbl.entries) {
+ vaddr_tbl.entries[i].id = id;
+ vaddr_tbl.entries[i].dump_vaddr = dump_vaddr;
+ i++;
}
}
return 0;
diff --git a/drivers/soc/qcom/msm-core.c b/drivers/soc/qcom/msm-core.c
deleted file mode 100644
index f8103de..0000000
--- a/drivers/soc/qcom/msm-core.c
+++ /dev/null
@@ -1,902 +0,0 @@
-/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#include <linux/cpu.h>
-#include <linux/cpufreq.h>
-#include <linux/err.h>
-#include <linux/fs.h>
-#include <linux/init.h>
-#include <linux/io.h>
-#include <linux/kthread.h>
-#include <linux/kernel.h>
-#include <linux/miscdevice.h>
-#include <linux/module.h>
-#include <linux/msm-core-interface.h>
-#include <linux/mutex.h>
-#include <linux/of.h>
-#include <linux/of_platform.h>
-#include <linux/pm_opp.h>
-#include <linux/platform_device.h>
-#include <linux/pm_opp.h>
-#include <linux/slab.h>
-#include <linux/suspend.h>
-#include <linux/thermal.h>
-#include <linux/types.h>
-#include <linux/uaccess.h>
-#include <linux/uio_driver.h>
-#include <asm/smp_plat.h>
-#include <asm/cputype.h>
-#include <stdbool.h>
-#define CREATE_TRACE_POINTS
-#include <trace/events/trace_msm_core.h>
-
-#define TEMP_BASE_POINT 35
-#define TEMP_MAX_POINT 95
-#define CPU_HOTPLUG_LIMIT 80
-#define CPU_BIT_MASK(cpu) BIT(cpu)
-#define DEFAULT_TEMP 40
-#define DEFAULT_LOW_HYST_TEMP 10
-#define DEFAULT_HIGH_HYST_TEMP 5
-#define MAX_CORES_PER_CLUSTER 4
-#define MAX_NUM_OF_CLUSTERS 2
-#define NUM_OF_CORNERS 10
-#define DEFAULT_SCALING_FACTOR 1
-
-#define ALLOCATE_2D_ARRAY(type) \
-static type **allocate_2d_array_##type(int idx)\
-{\
- int i;\
- type **ptr = NULL;\
- if (!idx) \
- return ERR_PTR(-EINVAL);\
- ptr = kzalloc(sizeof(*ptr) * TEMP_DATA_POINTS, \
- GFP_KERNEL);\
- if (!ptr) { \
- return ERR_PTR(-ENOMEM); \
- } \
- for (i = 0; i < TEMP_DATA_POINTS; i++) { \
- ptr[i] = kzalloc(sizeof(*ptr[i]) * \
- idx, GFP_KERNEL);\
- if (!ptr[i]) {\
- goto done;\
- } \
- } \
- return ptr;\
-done:\
- for (i = 0; i < TEMP_DATA_POINTS; i++) \
- kfree(ptr[i]);\
- kfree(ptr);\
- return ERR_PTR(-ENOMEM);\
-}
-
-struct cpu_activity_info {
- int cpu;
- int mpidr;
- long temp;
- int sensor_id;
- struct cpu_static_info *sp;
-};
-
-struct cpu_static_info {
- uint32_t **power;
- cpumask_t mask;
- struct cpufreq_frequency_table *table;
- uint32_t *voltage;
- uint32_t num_of_freqs;
-};
-
-static DEFINE_MUTEX(policy_update_mutex);
-static DEFINE_MUTEX(kthread_update_mutex);
-static DEFINE_SPINLOCK(update_lock);
-static struct delayed_work sampling_work;
-static struct completion sampling_completion;
-static struct task_struct *sampling_task;
-static int low_hyst_temp;
-static int high_hyst_temp;
-static struct platform_device *msm_core_pdev;
-static struct cpu_activity_info activity[NR_CPUS];
-DEFINE_PER_CPU(struct cpu_pstate_pwr *, ptable);
-static struct cpu_pwr_stats cpu_stats[NR_CPUS];
-ALLOCATE_2D_ARRAY(uint32_t);
-
-static int poll_ms;
-module_param_named(polling_interval, poll_ms, int, 0664);
-
-static int disabled;
-module_param_named(disabled, disabled, int, 0664);
-
-static bool in_suspend;
-static bool activate_power_table;
-static int max_throttling_temp = 80; /* in C */
-module_param_named(throttling_temp, max_throttling_temp, int, 0664);
-
-static void samplequeue_handle(struct work_struct *work)
-{
- complete(&sampling_completion);
-}
-
-static void repopulate_stats(int cpu)
-{
- int i;
- struct cpu_activity_info *cpu_node = &activity[cpu];
- int temp_point;
- struct cpu_pstate_pwr *pt = per_cpu(ptable, cpu);
-
- if (!pt)
- return;
-
- if (cpu_node->temp < TEMP_BASE_POINT)
- temp_point = 0;
- else if (cpu_node->temp > TEMP_MAX_POINT)
- temp_point = TEMP_DATA_POINTS - 1;
- else
- temp_point = (cpu_node->temp - TEMP_BASE_POINT) / 5;
-
- cpu_stats[cpu].temp = cpu_node->temp;
- for (i = 0; i < cpu_node->sp->num_of_freqs; i++)
- pt[i].power = cpu_node->sp->power[temp_point][i];
-
- trace_cpu_stats(cpu, cpu_stats[cpu].temp, pt[0].power,
- pt[cpu_node->sp->num_of_freqs-1].power);
-};
-
-void trigger_cpu_pwr_stats_calc(void)
-{
- int cpu;
- static long prev_temp[NR_CPUS];
- struct cpu_activity_info *cpu_node;
-
- if (disabled)
- return;
-
- spin_lock(&update_lock);
-
- for_each_online_cpu(cpu) {
- cpu_node = &activity[cpu];
- if (cpu_node->sensor_id < 0)
- continue;
-
- prev_temp[cpu] = cpu_node->temp;
-
- /*
- * Do not populate/update stats before policy and ptable have
- * been updated.
- */
- if (activate_power_table && cpu_stats[cpu].ptable
- && cpu_node->sp->table)
- repopulate_stats(cpu);
- }
- spin_unlock(&update_lock);
-}
-EXPORT_SYMBOL(trigger_cpu_pwr_stats_calc);
-
-void set_cpu_throttled(cpumask_t *mask, bool throttling)
-{
- int cpu;
-
- if (!mask)
- return;
-
- spin_lock(&update_lock);
- for_each_cpu(cpu, mask)
- cpu_stats[cpu].throttling = throttling;
- spin_unlock(&update_lock);
-}
-EXPORT_SYMBOL(set_cpu_throttled);
-
-static void update_related_freq_table(struct cpufreq_policy *policy)
-{
- int cpu, num_of_freqs;
- struct cpufreq_frequency_table *table;
-
- table = policy->freq_table;
- if (!table) {
- pr_err("Couldn't get freq table for cpu%d\n",
- policy->cpu);
- return;
- }
-
- for (num_of_freqs = 0; table[num_of_freqs].frequency !=
- CPUFREQ_TABLE_END;)
- num_of_freqs++;
-
- /*
- * Synchronous cores within cluster have the same
- * policy. Since these cores do not have the cpufreq
- * table initialized for all of them, copy the same
- * table to all the related cpus.
- */
- for_each_cpu(cpu, policy->related_cpus) {
- activity[cpu].sp->table = table;
- activity[cpu].sp->num_of_freqs = num_of_freqs;
- }
-}
-
-static __ref int do_sampling(void *data)
-{
- int cpu;
- struct cpu_activity_info *cpu_node;
- static int prev_temp[NR_CPUS];
-
- while (!kthread_should_stop()) {
- wait_for_completion(&sampling_completion);
- cancel_delayed_work(&sampling_work);
-
- mutex_lock(&kthread_update_mutex);
- if (in_suspend)
- goto unlock;
-
- trigger_cpu_pwr_stats_calc();
-
- for_each_online_cpu(cpu) {
- cpu_node = &activity[cpu];
- if (prev_temp[cpu] != cpu_node->temp) {
- prev_temp[cpu] = cpu_node->temp;
- }
- }
- if (!poll_ms)
- goto unlock;
-
- schedule_delayed_work(&sampling_work,
- msecs_to_jiffies(poll_ms));
-unlock:
- mutex_unlock(&kthread_update_mutex);
- }
- return 0;
-}
-
-static void clear_static_power(struct cpu_static_info *sp)
-{
- int i;
-
- if (!sp)
- return;
-
- if (cpumask_first(&sp->mask) < num_possible_cpus())
- return;
-
- for (i = 0; i < TEMP_DATA_POINTS; i++)
- kfree(sp->power[i]);
- kfree(sp->power);
- kfree(sp);
-}
-
-BLOCKING_NOTIFIER_HEAD(msm_core_stats_notifier_list);
-
-struct blocking_notifier_head *get_power_update_notifier(void)
-{
- return &msm_core_stats_notifier_list;
-}
-
-int register_cpu_pwr_stats_ready_notifier(struct notifier_block *nb)
-{
- return blocking_notifier_chain_register(&msm_core_stats_notifier_list,
- nb);
-}
-
-static int update_userspace_power(struct sched_params __user *argp)
-{
- int i;
- int ret;
- int cpu = -1;
- struct cpu_activity_info *node;
- struct cpu_static_info *sp, *clear_sp;
- int cpumask, cluster;
- bool pdata_valid[NR_CPUS] = {0};
- bool cpu_found = false;
-
- get_user(cpumask, &argp->cpumask);
- get_user(cluster, &argp->cluster);
-
- pr_debug("%s: cpumask %d, cluster: %d\n", __func__, cpumask,
- cluster);
- for (i = 0; cpumask > 0; i++, cpumask >>= 1) {
- if (!(cpumask & 0x01))
- continue;
-
- for_each_possible_cpu(cpu) {
- if ((cpu_topology[cpu].core_id != i) ||
- (cpu_topology[cpu].cluster_id != cluster))
- continue;
-
- cpu_found = true;
- break;
- }
- if (cpu_found)
- break;
- }
-
- if ((cpu < 0) || (cpu >= num_possible_cpus()))
- return -EINVAL;
-
- node = &activity[cpu];
- /* Allocate new memory to copy cpumask specific power
- * information.
- */
- sp = kzalloc(sizeof(*sp), GFP_KERNEL);
- if (!sp)
- return -ENOMEM;
-
-
- sp->power = allocate_2d_array_uint32_t(node->sp->num_of_freqs);
- if (IS_ERR_OR_NULL(sp->power)) {
- ret = PTR_ERR(sp->power);
- kfree(sp);
- return ret;
- }
- sp->num_of_freqs = node->sp->num_of_freqs;
- sp->voltage = node->sp->voltage;
- sp->table = node->sp->table;
-
- for (i = 0; i < TEMP_DATA_POINTS; i++) {
- ret = copy_from_user(sp->power[i], &argp->power[i][0],
- sizeof(sp->power[i][0]) * node->sp->num_of_freqs);
- if (ret)
- goto failed;
- }
-
- /* Copy the same power values for all the cpus in the cpumask
- * argp->cpumask within the cluster (argp->cluster)
- */
- get_user(cpumask, &argp->cpumask);
- spin_lock(&update_lock);
- for (i = 0; cpumask > 0; i++, cpumask >>= 1) {
- if (!(cpumask & 0x01))
- continue;
- for_each_possible_cpu(cpu) {
- if (((cpu_topology[cpu].core_id != i) ||
- (cpu_topology[cpu].cluster_id != cluster)))
- continue;
-
- node = &activity[cpu];
- clear_sp = node->sp;
- node->sp = sp;
- cpumask_set_cpu(cpu, &sp->mask);
- if (clear_sp) {
- cpumask_clear_cpu(cpu, &clear_sp->mask);
- clear_static_power(clear_sp);
- }
- cpu_stats[cpu].ptable = per_cpu(ptable, cpu);
- repopulate_stats(cpu);
- pdata_valid[cpu] = true;
- }
- }
- spin_unlock(&update_lock);
-
- for_each_possible_cpu(cpu) {
- if (!pdata_valid[cpu])
- continue;
-
- blocking_notifier_call_chain(
- &msm_core_stats_notifier_list, cpu, NULL);
- }
-
- activate_power_table = true;
- return 0;
-
-failed:
- for (i = 0; i < TEMP_DATA_POINTS; i++)
- kfree(sp->power[i]);
- kfree(sp->power);
- kfree(sp);
- return ret;
-}
-
-static long msm_core_ioctl(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- long ret = 0;
- struct cpu_activity_info *node = NULL;
- struct sched_params __user *argp = (struct sched_params __user *)arg;
- int i, cpu = num_possible_cpus();
- int cluster, cpumask;
- bool cpu_found = false;
-
- if (!argp)
- return -EINVAL;
-
- get_user(cluster, &argp->cluster);
- get_user(cpumask, &argp->cpumask);
-
- switch (cmd) {
- case EA_LEAKAGE:
- ret = update_userspace_power(argp);
- if (ret)
- pr_err("Userspace power update failed with %ld\n", ret);
- break;
- case EA_VOLT:
- for (i = 0; cpumask > 0; i++, cpumask >>= 1) {
- for_each_possible_cpu(cpu) {
- if (((cpu_topology[cpu].core_id != i) ||
- (cpu_topology[cpu].cluster_id != cluster)))
- continue;
-
- cpu_found = true;
- break;
- }
- if (cpu_found)
- break;
- }
- if (cpu >= num_possible_cpus())
- break;
-
- mutex_lock(&policy_update_mutex);
- node = &activity[cpu];
- if (!node->sp->table) {
- ret = -EINVAL;
- goto unlock;
- }
- ret = copy_to_user((void __user *)&argp->voltage[0],
- node->sp->voltage,
- sizeof(uint32_t) * node->sp->num_of_freqs);
- if (ret)
- break;
- for (i = 0; i < node->sp->num_of_freqs; i++) {
- ret = copy_to_user((void __user *)&argp->freq[i],
- &node->sp->table[i].frequency,
- sizeof(uint32_t));
- if (ret)
- break;
- }
-unlock:
- mutex_unlock(&policy_update_mutex);
- break;
- default:
- break;
- }
-
- return ret;
-}
-
-#ifdef CONFIG_COMPAT
-static long msm_core_compat_ioctl(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- arg = (unsigned long)compat_ptr(arg);
- return msm_core_ioctl(file, cmd, arg);
-}
-#endif
-
-static int msm_core_open(struct inode *inode, struct file *file)
-{
- return 0;
-}
-
-static int msm_core_release(struct inode *inode, struct file *file)
-{
- return 0;
-}
-
-static int msm_core_stats_init(struct device *dev, int cpu)
-{
- int i;
- struct cpu_activity_info *cpu_node;
- struct cpu_pstate_pwr *pstate = NULL;
-
- cpu_node = &activity[cpu];
- cpu_stats[cpu].cpu = cpu;
- cpu_stats[cpu].temp = cpu_node->temp;
- cpu_stats[cpu].throttling = false;
-
- cpu_stats[cpu].len = cpu_node->sp->num_of_freqs;
- pstate = devm_kzalloc(dev,
- sizeof(*pstate) * cpu_node->sp->num_of_freqs,
- GFP_KERNEL);
- if (!pstate)
- return -ENOMEM;
-
- for (i = 0; i < cpu_node->sp->num_of_freqs; i++)
- pstate[i].freq = cpu_node->sp->table[i].frequency;
-
- per_cpu(ptable, cpu) = pstate;
-
- return 0;
-}
-
-static int msm_core_task_init(struct device *dev)
-{
- init_completion(&sampling_completion);
- sampling_task = kthread_run(do_sampling, NULL, "msm-core:sampling");
- if (IS_ERR(sampling_task)) {
- pr_err("Failed to create do_sampling err: %ld\n",
- PTR_ERR(sampling_task));
- return PTR_ERR(sampling_task);
- }
- return 0;
-}
-
-struct cpu_pwr_stats *get_cpu_pwr_stats(void)
-{
- return cpu_stats;
-}
-EXPORT_SYMBOL(get_cpu_pwr_stats);
-
-static int msm_get_power_values(int cpu, struct cpu_static_info *sp)
-{
- int i = 0, j;
- int ret = 0;
- uint64_t power;
-
- /* Calculate dynamic power spent for every frequency using formula:
- * Power = V * V * f
- * where V = voltage for frequency
- * f = frequency
- */
- sp->power = allocate_2d_array_uint32_t(sp->num_of_freqs);
- if (IS_ERR_OR_NULL(sp->power))
- return PTR_ERR(sp->power);
-
- for (i = 0; i < TEMP_DATA_POINTS; i++) {
- for (j = 0; j < sp->num_of_freqs; j++) {
- power = sp->voltage[j] *
- sp->table[j].frequency;
- do_div(power, 1000);
- do_div(power, 1000);
- power *= sp->voltage[j];
- do_div(power, 1000);
- sp->power[i][j] = power;
- }
- }
- return ret;
-}
-
-static int msm_get_voltage_levels(struct device *dev, int cpu,
- struct cpu_static_info *sp)
-{
- unsigned int *voltage;
- int i;
- int corner;
- struct dev_pm_opp *opp;
- struct device *cpu_dev = get_cpu_device(cpu);
- /*
- * Convert cpr corner voltage to average voltage of both
- * a53 and a57 votlage value
- */
- int average_voltage[NUM_OF_CORNERS] = {0, 746, 841, 843, 940, 953, 976,
- 1024, 1090, 1100};
-
- if (!cpu_dev)
- return -ENODEV;
-
- voltage = devm_kzalloc(dev,
- sizeof(*voltage) * sp->num_of_freqs, GFP_KERNEL);
-
- if (!voltage)
- return -ENOMEM;
-
- rcu_read_lock();
- for (i = 0; i < sp->num_of_freqs; i++) {
- opp = dev_pm_opp_find_freq_exact(cpu_dev,
- sp->table[i].frequency * 1000, true);
- corner = dev_pm_opp_get_voltage(opp);
-
- if (corner > 400000)
- voltage[i] = corner / 1000;
- else if (corner > 0 && corner < ARRAY_SIZE(average_voltage))
- voltage[i] = average_voltage[corner];
- else
- voltage[i]
- = average_voltage[ARRAY_SIZE(average_voltage) - 1];
- }
- rcu_read_unlock();
-
- sp->voltage = voltage;
- return 0;
-}
-
-static int msm_core_dyn_pwr_init(struct platform_device *pdev,
- int cpu)
-{
- int ret = 0;
-
- if (!activity[cpu].sp->table)
- return 0;
-
- ret = msm_get_voltage_levels(&pdev->dev, cpu, activity[cpu].sp);
- if (ret)
- return ret;
-
- ret = msm_get_power_values(cpu, activity[cpu].sp);
-
- return ret;
-}
-
-static int msm_core_mpidr_init(struct device_node *phandle)
-{
- int ret = 0;
- char *key = NULL;
- int mpidr;
-
- key = "reg";
- ret = of_property_read_u32(phandle, key,
- &mpidr);
- if (ret) {
- pr_err("%s: Cannot read mpidr\n", __func__);
- return ret;
- }
- return mpidr;
-}
-
-static int msm_core_cpu_policy_handler(struct notifier_block *nb,
- unsigned long val, void *data)
-{
- struct cpufreq_policy *policy = data;
- struct cpu_activity_info *cpu_info = &activity[policy->cpu];
- int cpu;
- int ret;
-
- if (cpu_info->sp->table)
- return NOTIFY_OK;
-
- switch (val) {
- case CPUFREQ_CREATE_POLICY:
- mutex_lock(&policy_update_mutex);
- update_related_freq_table(policy);
-
- for_each_cpu(cpu, policy->related_cpus) {
- ret = msm_core_dyn_pwr_init(msm_core_pdev, cpu);
- if (ret)
- pr_debug("voltage-pwr table update failed\n");
-
- ret = msm_core_stats_init(&msm_core_pdev->dev, cpu);
- if (ret)
- pr_debug("Stats table update failed\n");
- }
- mutex_unlock(&policy_update_mutex);
- break;
- default:
- break;
- }
- return NOTIFY_OK;
-}
-
-struct notifier_block cpu_policy = {
- .notifier_call = msm_core_cpu_policy_handler
-};
-
-static int system_suspend_handler(struct notifier_block *nb,
- unsigned long val, void *data)
-{
- int cpu;
-
- mutex_lock(&kthread_update_mutex);
- switch (val) {
- case PM_POST_HIBERNATION:
- case PM_POST_SUSPEND:
- case PM_POST_RESTORE:
- /*
- * Set completion event to read temperature and repopulate
- * stats
- */
- in_suspend = 0;
- complete(&sampling_completion);
- break;
- case PM_HIBERNATION_PREPARE:
- case PM_SUSPEND_PREPARE:
- /*
- * cancel delayed work to be able to restart immediately
- * after system resume
- */
- in_suspend = 1;
- cancel_delayed_work(&sampling_work);
- /*
- * cancel TSENS interrupts as we do not want to wake up from
- * suspend to take care of repopulate stats while the system is
- * in suspend
- */
- for_each_possible_cpu(cpu) {
- if (activity[cpu].sensor_id < 0)
- continue;
- }
- break;
- default:
- break;
- }
- mutex_unlock(&kthread_update_mutex);
-
- return NOTIFY_OK;
-}
-
-static int msm_core_freq_init(void)
-{
- int cpu;
- struct cpufreq_policy *policy;
-
- for_each_possible_cpu(cpu) {
- activity[cpu].sp = kzalloc(sizeof(*(activity[cpu].sp)),
- GFP_KERNEL);
- if (!activity[cpu].sp)
- return -ENOMEM;
- }
-
- for_each_online_cpu(cpu) {
- if (activity[cpu].sp->table)
- continue;
-
- policy = cpufreq_cpu_get(cpu);
- if (!policy)
- continue;
-
- update_related_freq_table(policy);
- cpufreq_cpu_put(policy);
- }
-
- return 0;
-}
-
-static int msm_core_params_init(struct platform_device *pdev)
-{
- int ret = 0;
- unsigned long cpu = 0;
- struct device_node *child_node = NULL;
- int mpidr;
-
- for_each_possible_cpu(cpu) {
- child_node = of_get_cpu_node(cpu, NULL);
-
- if (!child_node)
- continue;
-
- mpidr = msm_core_mpidr_init(child_node);
- if (mpidr < 0)
- return mpidr;
-
- activity[cpu].mpidr = mpidr;
-
- if (!activity[cpu].sp->table)
- continue;
-
- ret = msm_core_dyn_pwr_init(msm_core_pdev, cpu);
- if (ret)
- pr_debug("voltage-pwr table update failed\n");
-
- ret = msm_core_stats_init(&msm_core_pdev->dev, cpu);
- if (ret)
- pr_debug("Stats table update failed\n");
- }
-
- return 0;
-}
-
-static const struct file_operations msm_core_ops = {
- .owner = THIS_MODULE,
- .unlocked_ioctl = msm_core_ioctl,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = msm_core_compat_ioctl,
-#endif
- .open = msm_core_open,
- .release = msm_core_release,
-};
-
-static struct miscdevice msm_core_device = {
- .minor = MISC_DYNAMIC_MINOR,
- .name = "pta",
- .fops = &msm_core_ops
-};
-
-static void free_dyn_memory(void)
-{
- int i, cpu;
-
- for_each_possible_cpu(cpu) {
- if (activity[cpu].sp) {
- for (i = 0; i < TEMP_DATA_POINTS; i++) {
- if (!activity[cpu].sp->power)
- break;
-
- kfree(activity[cpu].sp->power[i]);
- }
- }
- kfree(activity[cpu].sp);
- }
-}
-
-static int msm_core_dev_probe(struct platform_device *pdev)
-{
- int ret = 0;
- char *key = NULL;
- struct device_node *node;
- struct uio_info *info;
-
- if (!pdev)
- return -ENODEV;
-
- msm_core_pdev = pdev;
- node = pdev->dev.of_node;
- if (!node)
- return -ENODEV;
-
- key = "qcom,low-hyst-temp";
- ret = of_property_read_u32(node, key, &low_hyst_temp);
- if (ret)
- low_hyst_temp = DEFAULT_LOW_HYST_TEMP;
-
- key = "qcom,high-hyst-temp";
- ret = of_property_read_u32(node, key, &high_hyst_temp);
- if (ret)
- high_hyst_temp = DEFAULT_HIGH_HYST_TEMP;
-
- key = "qcom,polling-interval";
- ret = of_property_read_u32(node, key, &poll_ms);
- if (ret)
- pr_info("msm-core initialized without polling period\n");
-
- key = "qcom,throttling-temp";
- ret = of_property_read_u32(node, key, &max_throttling_temp);
-
- ret = msm_core_freq_init();
- if (ret)
- goto failed;
-
- ret = misc_register(&msm_core_device);
- if (ret) {
- pr_err("%s: Error registering device %d\n", __func__, ret);
- goto failed;
- }
-
- ret = msm_core_params_init(pdev);
- if (ret)
- goto failed;
-
- ret = msm_core_task_init(&pdev->dev);
- if (ret)
- goto failed;
-
- INIT_DEFERRABLE_WORK(&sampling_work, samplequeue_handle);
- schedule_delayed_work(&sampling_work, msecs_to_jiffies(0));
- cpufreq_register_notifier(&cpu_policy, CPUFREQ_POLICY_NOTIFIER);
- pm_notifier(system_suspend_handler, 0);
- return 0;
-failed:
- info = dev_get_drvdata(&pdev->dev);
- uio_unregister_device(info);
- free_dyn_memory();
- return ret;
-}
-
-static int msm_core_remove(struct platform_device *pdev)
-{
- int cpu;
- struct uio_info *info = dev_get_drvdata(&pdev->dev);
-
- uio_unregister_device(info);
-
- for_each_possible_cpu(cpu) {
- if (activity[cpu].sensor_id < 0)
- continue;
- }
- free_dyn_memory();
- misc_deregister(&msm_core_device);
- return 0;
-}
-
-static const struct of_device_id msm_core_match_table[] = {
- {.compatible = "qcom,apss-core-ea"},
- {},
-};
-
-static struct platform_driver msm_core_driver = {
- .probe = msm_core_dev_probe,
- .driver = {
- .name = "msm_core",
- .owner = THIS_MODULE,
- .of_match_table = msm_core_match_table,
- },
- .remove = msm_core_remove,
-};
-
-static int __init msm_core_init(void)
-{
- return platform_driver_register(&msm_core_driver);
-}
-late_initcall(msm_core_init);
diff --git a/drivers/soc/qcom/pil-q6v5.c b/drivers/soc/qcom/pil-q6v5.c
index 6d6b9f7..d23b050 100644
--- a/drivers/soc/qcom/pil-q6v5.c
+++ b/drivers/soc/qcom/pil-q6v5.c
@@ -87,7 +87,7 @@
#define MSS_STATUS (0x40)
#define QDSP6SS_SLEEP (0x3C)
#define SLEEP_CHECK_MAX_LOOPS (200)
-#define BOOT_FSM_TIMEOUT (100)
+#define BOOT_FSM_TIMEOUT (10000)
#define QDSP6SS_ACC_OVERRIDE_VAL 0x20
@@ -411,7 +411,7 @@ static int __pil_q6v65_reset(struct pil_desc *pil)
/* Wait for boot FSM to complete */
ret = readl_poll_timeout(drv->rmb_base + MSS_STATUS, val,
- (val & BIT(1)) != 0, 10, BOOT_FSM_TIMEOUT);
+ (val & BIT(0)) != 0, 10, BOOT_FSM_TIMEOUT);
if (ret) {
dev_err(drv->desc.dev, "Boot FSM failed to complete.\n");
diff --git a/drivers/soc/qcom/qbt1000.c b/drivers/soc/qcom/qbt1000.c
index e4ada03..86f314a 100644
--- a/drivers/soc/qcom/qbt1000.c
+++ b/drivers/soc/qcom/qbt1000.c
@@ -150,18 +150,17 @@ static int get_cmd_rsp_buffers(struct qseecom_handle *hdl,
uint32_t *rsp_len)
{
/* 64 bytes alignment for QSEECOM */
- *cmd_len = ALIGN(*cmd_len, 64);
- *rsp_len = ALIGN(*rsp_len, 64);
+ uint64_t aligned_cmd_len = ALIGN((uint64_t)*cmd_len, 64);
+ uint64_t aligned_rsp_len = ALIGN((uint64_t)*rsp_len, 64);
- if (((uint64_t)*rsp_len + (uint64_t)*cmd_len)
- > (uint64_t)g_app_buf_size) {
- pr_err("buffer too small to hold cmd=%d and rsp=%d\n",
- *cmd_len, *rsp_len);
+ if ((aligned_rsp_len + aligned_cmd_len) > (uint64_t)g_app_buf_size)
return -ENOMEM;
- }
*cmd = hdl->sbuf;
+ *cmd_len = aligned_cmd_len;
*rsp = hdl->sbuf + *cmd_len;
+ *rsp_len = aligned_rsp_len;
+
return 0;
}
@@ -318,6 +317,12 @@ static long qbt1000_ioctl(
drvdata = file->private_data;
+ if (IS_ERR(priv_arg)) {
+ dev_err(drvdata->dev, "%s: invalid user space pointer %lu\n",
+ __func__, arg);
+ return -EINVAL;
+ }
+
mutex_lock(&drvdata->mutex);
pr_debug("qbt1000_ioctl %d\n", cmd);
@@ -362,6 +367,7 @@ static long qbt1000_ioctl(
}
pr_debug("app %s load before\n", app.name);
+ app.name[MAX_NAME_SIZE - 1] = '\0';
/* start the TZ app */
rc = qseecom_start_app(
@@ -375,7 +381,8 @@ static long qbt1000_ioctl(
pr_err("App %s failed to set bw\n", app.name);
}
} else {
- pr_err("app %s failed to load\n", app.name);
+ dev_err(drvdata->dev, "%s: Fingerprint Trusted App failed to load\n",
+ __func__);
goto end;
}
diff --git a/drivers/soc/qcom/qdss_bridge.c b/drivers/soc/qcom/qdss_bridge.c
new file mode 100644
index 0000000..8668155
--- /dev/null
+++ b/drivers/soc/qcom/qdss_bridge.c
@@ -0,0 +1,463 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define KMSG_COMPONENT "QDSS diag bridge"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/ratelimit.h>
+#include <linux/workqueue.h>
+#include <linux/platform_device.h>
+#include <linux/moduleparam.h>
+#include <linux/msm_mhi.h>
+#include <linux/usb/usb_qdss.h>
+#include "qdss_bridge.h"
+
+#define MODULE_NAME "qdss_bridge"
+
+#define QDSS_BUF_SIZE (16*1024)
+#define MHI_CLIENT_QDSS_IN 9
+
+/* Max number of objects needed */
+static int poolsize = 32;
+module_param(poolsize, int, 0644);
+
+/* Size of single buffer */
+static int itemsize = QDSS_BUF_SIZE;
+module_param(itemsize, int, 0644);
+
+static int qdss_destroy_buf_tbl(struct qdss_bridge_drvdata *drvdata)
+{
+ struct list_head *start, *temp;
+ struct qdss_buf_tbl_lst *entry = NULL;
+
+ list_for_each_safe(start, temp, &drvdata->buf_tbl) {
+ entry = list_entry(start, struct qdss_buf_tbl_lst, link);
+ list_del(&entry->link);
+ kfree(entry->buf);
+ kfree(entry->usb_req);
+ kfree(entry);
+ }
+
+ return 0;
+}
+
+static int qdss_create_buf_tbl(struct qdss_bridge_drvdata *drvdata)
+{
+ struct qdss_buf_tbl_lst *entry;
+ void *buf;
+ struct qdss_request *usb_req;
+ int i;
+
+ for (i = 0; i < poolsize; i++) {
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ goto err;
+
+ buf = kzalloc(QDSS_BUF_SIZE, GFP_KERNEL);
+ usb_req = kzalloc(sizeof(*usb_req), GFP_KERNEL);
+
+ entry->buf = buf;
+ entry->usb_req = usb_req;
+ atomic_set(&entry->available, 1);
+ list_add_tail(&entry->link, &drvdata->buf_tbl);
+
+ if (!buf || !usb_req)
+ goto err;
+ }
+
+ return 0;
+err:
+ qdss_destroy_buf_tbl(drvdata);
+ return -ENOMEM;
+}
+
+struct qdss_buf_tbl_lst *qdss_get_buf_tbl_entry(
+ struct qdss_bridge_drvdata *drvdata,
+ void *buf)
+{
+ struct qdss_buf_tbl_lst *entry;
+
+ list_for_each_entry(entry, &drvdata->buf_tbl, link) {
+ if (atomic_read(&entry->available))
+ continue;
+ if (entry->buf == buf)
+ return entry;
+ }
+
+ return NULL;
+}
+
+struct qdss_buf_tbl_lst *qdss_get_entry(struct qdss_bridge_drvdata *drvdata)
+{
+ struct qdss_buf_tbl_lst *item;
+
+ list_for_each_entry(item, &drvdata->buf_tbl, link)
+ if (atomic_cmpxchg(&item->available, 1, 0) == 1)
+ return item;
+
+ return NULL;
+}
+
+static void qdss_buf_tbl_remove(struct qdss_bridge_drvdata *drvdata,
+ void *buf)
+{
+ struct qdss_buf_tbl_lst *entry = NULL;
+
+ list_for_each_entry(entry, &drvdata->buf_tbl, link) {
+ if (entry->buf != buf)
+ continue;
+ atomic_set(&entry->available, 1);
+ return;
+ }
+
+ pr_err_ratelimited("Failed to find buffer for removal\n");
+}
+
+static void mhi_ch_close(struct qdss_bridge_drvdata *drvdata)
+{
+ flush_workqueue(drvdata->mhi_wq);
+ qdss_destroy_buf_tbl(drvdata);
+ mhi_close_channel(drvdata->hdl);
+}
+
+static void mhi_close_work_fn(struct work_struct *work)
+{
+ struct qdss_bridge_drvdata *drvdata =
+ container_of(work,
+ struct qdss_bridge_drvdata,
+ close_work);
+
+ usb_qdss_close(drvdata->usb_ch);
+ mhi_ch_close(drvdata);
+}
+
+static void mhi_read_work_fn(struct work_struct *work)
+{
+ int err = 0;
+ enum MHI_FLAGS mhi_flags = MHI_EOT;
+ struct qdss_buf_tbl_lst *entry;
+
+ struct qdss_bridge_drvdata *drvdata =
+ container_of(work,
+ struct qdss_bridge_drvdata,
+ read_work);
+
+ do {
+ if (!drvdata->opened)
+ break;
+ entry = qdss_get_entry(drvdata);
+ if (!entry)
+ break;
+
+ err = mhi_queue_xfer(drvdata->hdl, entry->buf, QDSS_BUF_SIZE,
+ mhi_flags);
+ if (err) {
+ pr_err_ratelimited("Unable to read from MHI buffer err:%d",
+ err);
+ goto fail;
+ }
+ } while (entry);
+
+ return;
+fail:
+ qdss_buf_tbl_remove(drvdata, entry->buf);
+ queue_work(drvdata->mhi_wq, &drvdata->read_work);
+}
+
+static int mhi_queue_read(struct qdss_bridge_drvdata *drvdata)
+{
+ queue_work(drvdata->mhi_wq, &(drvdata->read_work));
+ return 0;
+}
+
+static int usb_write(struct qdss_bridge_drvdata *drvdata,
+ struct mhi_result *result)
+{
+ int ret = 0;
+ struct qdss_buf_tbl_lst *entry;
+
+ entry = qdss_get_buf_tbl_entry(drvdata, result->buf_addr);
+ if (!entry)
+ return -EINVAL;
+
+ entry->usb_req->buf = result->buf_addr;
+ entry->usb_req->length = result->bytes_xferd;
+ ret = usb_qdss_data_write(drvdata->usb_ch, entry->usb_req);
+
+ return ret;
+}
+
+static void mhi_read_done_work_fn(struct work_struct *work)
+{
+ unsigned char *buf = NULL;
+ struct mhi_result result;
+ int err = 0;
+ struct qdss_bridge_drvdata *drvdata =
+ container_of(work,
+ struct qdss_bridge_drvdata,
+ read_done_work);
+
+ do {
+ err = mhi_poll_inbound(drvdata->hdl, &result);
+ if (err) {
+ pr_debug("MHI poll failed err:%d\n", err);
+ break;
+ }
+ buf = result.buf_addr;
+ if (!buf)
+ break;
+ err = usb_write(drvdata, &result);
+ if (err)
+ qdss_buf_tbl_remove(drvdata, buf);
+ } while (1);
+}
+
+static void usb_write_done(struct qdss_bridge_drvdata *drvdata,
+ struct qdss_request *d_req)
+{
+ if (d_req->status) {
+ pr_err_ratelimited("USB write failed err:%d\n", d_req->status);
+ mhi_queue_read(drvdata);
+ return;
+ }
+ qdss_buf_tbl_remove(drvdata, d_req->buf);
+ mhi_queue_read(drvdata);
+}
+
+static void usb_notifier(void *priv, unsigned int event,
+ struct qdss_request *d_req, struct usb_qdss_ch *ch)
+{
+ struct qdss_bridge_drvdata *drvdata = priv;
+
+ if (!drvdata)
+ return;
+
+ switch (event) {
+ case USB_QDSS_CONNECT:
+ usb_qdss_alloc_req(drvdata->usb_ch, poolsize, 0);
+ mhi_queue_read(drvdata);
+ break;
+
+ case USB_QDSS_DISCONNECT:
+ /* Leave MHI/USB open.Only close on MHI disconnect */
+ break;
+
+ case USB_QDSS_DATA_WRITE_DONE:
+ usb_write_done(drvdata, d_req);
+ break;
+
+ default:
+ break;
+ }
+}
+
+static int mhi_ch_open(struct qdss_bridge_drvdata *drvdata)
+{
+ int ret;
+
+ if (drvdata->opened)
+ return 0;
+
+ ret = mhi_open_channel(drvdata->hdl);
+ if (ret) {
+ pr_err("Unable to open MHI channel\n");
+ return ret;
+ }
+
+ ret = mhi_get_free_desc(drvdata->hdl);
+ if (ret <= 0)
+ return -EIO;
+
+ drvdata->opened = 1;
+ return 0;
+}
+
+static void qdss_bridge_open_work_fn(struct work_struct *work)
+{
+ struct qdss_bridge_drvdata *drvdata =
+ container_of(work,
+ struct qdss_bridge_drvdata,
+ open_work);
+ int ret;
+
+ ret = mhi_ch_open(drvdata);
+ if (ret)
+ goto err_open;
+
+ ret = qdss_create_buf_tbl(drvdata);
+ if (ret)
+ goto err;
+
+ drvdata->usb_ch = usb_qdss_open("qdss_mdm", drvdata, usb_notifier);
+ if (IS_ERR_OR_NULL(drvdata->usb_ch)) {
+ ret = PTR_ERR(drvdata->usb_ch);
+ goto err;
+ }
+
+ return;
+err:
+ mhi_ch_close(drvdata);
+err_open:
+ pr_err("Open work failed with err:%d\n", ret);
+}
+
+static void mhi_notifier(struct mhi_cb_info *cb_info)
+{
+ struct mhi_result *result;
+ struct qdss_bridge_drvdata *drvdata;
+
+ if (!cb_info)
+ return;
+
+ result = cb_info->result;
+ if (!result) {
+ pr_err_ratelimited("Failed to obtain MHI result\n");
+ return;
+ }
+
+ drvdata = (struct qdss_bridge_drvdata *)cb_info->result->user_data;
+ if (!drvdata) {
+ pr_err_ratelimited("MHI returned invalid drvdata\n");
+ return;
+ }
+
+ switch (cb_info->cb_reason) {
+ case MHI_CB_MHI_ENABLED:
+ queue_work(drvdata->mhi_wq, &drvdata->open_work);
+ break;
+
+ case MHI_CB_XFER:
+ if (!drvdata->opened)
+ break;
+
+ queue_work(drvdata->mhi_wq, &drvdata->read_done_work);
+ break;
+
+ case MHI_CB_MHI_DISABLED:
+ if (!drvdata->opened)
+ break;
+
+ drvdata->opened = 0;
+ queue_work(drvdata->mhi_wq, &drvdata->close_work);
+ break;
+
+ default:
+ pr_err_ratelimited("MHI returned invalid cb reason 0x%x\n",
+ cb_info->cb_reason);
+ break;
+ }
+}
+
+static int qdss_mhi_register_ch(struct qdss_bridge_drvdata *drvdata)
+{
+ struct mhi_client_info_t *client_info;
+ int ret;
+ struct mhi_client_info_t *mhi_info;
+
+ client_info = devm_kzalloc(drvdata->dev, sizeof(*client_info),
+ GFP_KERNEL);
+ if (!client_info)
+ return -ENOMEM;
+
+ client_info->mhi_client_cb = mhi_notifier;
+ drvdata->client_info = client_info;
+
+ mhi_info = client_info;
+ mhi_info->chan = MHI_CLIENT_QDSS_IN;
+ mhi_info->dev = drvdata->dev;
+ mhi_info->node_name = "qcom,mhi";
+ mhi_info->user_data = drvdata;
+
+ ret = mhi_register_channel(&drvdata->hdl, mhi_info);
+ return ret;
+}
+
+int qdss_mhi_init(struct qdss_bridge_drvdata *drvdata)
+{
+ int ret;
+
+ drvdata->mhi_wq = create_singlethread_workqueue(MODULE_NAME);
+ if (!drvdata->mhi_wq)
+ return -ENOMEM;
+
+ INIT_WORK(&(drvdata->read_work), mhi_read_work_fn);
+ INIT_WORK(&(drvdata->read_done_work), mhi_read_done_work_fn);
+ INIT_WORK(&(drvdata->open_work), qdss_bridge_open_work_fn);
+ INIT_WORK(&(drvdata->close_work), mhi_close_work_fn);
+ INIT_LIST_HEAD(&drvdata->buf_tbl);
+ drvdata->opened = 0;
+
+ ret = qdss_mhi_register_ch(drvdata);
+ if (ret) {
+ destroy_workqueue(drvdata->mhi_wq);
+ pr_err("Unable to register MHI read channel err:%d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int qdss_mhi_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct device *dev = &pdev->dev;
+ struct qdss_bridge_drvdata *drvdata;
+
+ drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
+ if (!drvdata) {
+ ret = -ENOMEM;
+ return ret;
+ }
+
+ drvdata->dev = &pdev->dev;
+ platform_set_drvdata(pdev, drvdata);
+
+ ret = qdss_mhi_init(drvdata);
+ if (ret)
+ goto err;
+
+ return 0;
+err:
+ pr_err("Device probe failed err:%d\n", ret);
+ return ret;
+}
+
+static const struct of_device_id qdss_mhi_table[] = {
+ {.compatible = "qcom,qdss-mhi"},
+ {},
+};
+
+static struct platform_driver qdss_mhi_driver = {
+ .probe = qdss_mhi_probe,
+ .driver = {
+ .name = MODULE_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = qdss_mhi_table,
+ },
+};
+
+static int __init qdss_bridge_init(void)
+{
+ return platform_driver_register(&qdss_mhi_driver);
+}
+
+static void __exit qdss_bridge_exit(void)
+{
+ platform_driver_unregister(&qdss_mhi_driver);
+}
+
+module_init(qdss_bridge_init);
+module_exit(qdss_bridge_exit);
+MODULE_LICENSE("GPL v2")
+MODULE_DESCRIPTION("QDSS Bridge driver");
diff --git a/drivers/soc/qcom/qdss_bridge.h b/drivers/soc/qcom/qdss_bridge.h
new file mode 100644
index 0000000..97b9c40
--- /dev/null
+++ b/drivers/soc/qcom/qdss_bridge.h
@@ -0,0 +1,37 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _QDSS_BRIDGE_H
+#define _QDSS_BRIDGE_H
+
+struct qdss_buf_tbl_lst {
+ struct list_head link;
+ unsigned char *buf;
+ struct qdss_request *usb_req;
+ atomic_t available;
+};
+
+struct qdss_bridge_drvdata {
+ struct device *dev;
+ bool opened;
+ struct work_struct read_work;
+ struct work_struct read_done_work;
+ struct work_struct open_work;
+ struct work_struct close_work;
+ struct workqueue_struct *mhi_wq;
+ struct mhi_client_handle *hdl;
+ struct mhi_client_info_t *client_info;
+ struct list_head buf_tbl;
+ struct usb_qdss_ch *usb_ch;
+};
+
+#endif
diff --git a/drivers/soc/qcom/scm.c b/drivers/soc/qcom/scm.c
index ac5cc54..492b68c 100644
--- a/drivers/soc/qcom/scm.c
+++ b/drivers/soc/qcom/scm.c
@@ -764,7 +764,7 @@ int scm_call2_atomic(u32 fn_id, struct scm_desc *desc)
return scm_remap_error(ret);
return ret;
}
-
+EXPORT_SYMBOL(scm_call2_atomic);
/**
* scm_call() - Send an SCM command
* @svc_id: service identifier
diff --git a/drivers/soc/qcom/secure_buffer.c b/drivers/soc/qcom/secure_buffer.c
index 6553ac0..5289cd0 100644
--- a/drivers/soc/qcom/secure_buffer.c
+++ b/drivers/soc/qcom/secure_buffer.c
@@ -212,6 +212,7 @@ int hyp_assign_table(struct sg_table *table,
kfree(source_vm_copy);
return ret;
}
+EXPORT_SYMBOL(hyp_assign_table);
int hyp_assign_phys(phys_addr_t addr, u64 size, u32 *source_vm_list,
int source_nelems, int *dest_vmids,
diff --git a/drivers/soc/qcom/service-locator.c b/drivers/soc/qcom/service-locator.c
index 57f38d3..9dfe281 100644
--- a/drivers/soc/qcom/service-locator.c
+++ b/drivers/soc/qcom/service-locator.c
@@ -149,11 +149,10 @@ static void service_locator_recv_msg(struct work_struct *work)
do {
pr_debug("Notified about a Receive event\n");
- ret = qmi_recv_msg(service_locator.clnt_handle);
- if (ret < 0)
- pr_err("Error receiving message rc:%d. Retrying...\n",
- ret);
- } while (ret == 0);
+ } while ((ret = qmi_recv_msg(service_locator.clnt_handle)) == 0);
+
+ if (ret != -ENOMSG)
+ pr_err("Error receiving message rc:%d\n", ret);
}
@@ -190,7 +189,7 @@ static int servreg_loc_send_msg(struct msg_desc *req_desc,
*/
rc = qmi_send_req_wait(service_locator.clnt_handle, req_desc, req,
sizeof(*req), resp_desc, resp, sizeof(*resp),
- msecs_to_jiffies(QMI_SERVREG_LOC_SERVER_TIMEOUT));
+ QMI_SERVREG_LOC_SERVER_TIMEOUT);
if (rc < 0) {
pr_err("QMI send req failed for client %s, ret - %d\n",
pd->client_name, rc);
diff --git a/drivers/soc/qcom/socinfo.c b/drivers/soc/qcom/socinfo.c
index c35119c..9af39e1 100644
--- a/drivers/soc/qcom/socinfo.c
+++ b/drivers/soc/qcom/socinfo.c
@@ -66,6 +66,7 @@ enum {
HW_PLATFORM_RCM = 21,
HW_PLATFORM_STP = 23,
HW_PLATFORM_SBC = 24,
+ HW_PLATFORM_HDK = 31,
HW_PLATFORM_INVALID
};
@@ -86,6 +87,7 @@ const char *hw_platform[] = {
[HW_PLATFORM_DTV] = "DTV",
[HW_PLATFORM_STP] = "STP",
[HW_PLATFORM_SBC] = "SBC",
+ [HW_PLATFORM_HDK] = "HDK",
};
enum {
@@ -578,6 +580,13 @@ static struct msm_soc_info cpu_of_id[] = {
/* SDA670 ID */
[337] = {MSM_CPU_SDA670, "SDA670"},
+ /* 8953 ID */
+ [293] = {MSM_CPU_8953, "MSM8953"},
+ [304] = {MSM_CPU_8953, "APQ8053"},
+
+ /* SDM450 ID */
+ [338] = {MSM_CPU_SDM450, "SDM450"},
+
/* Uninitialized IDs are not known to run Linux.
* MSM_CPU_UNKNOWN is set to 0 to ensure these IDs are
* considered as unknown CPU.
@@ -1452,6 +1461,14 @@ static void * __init setup_dummy_socinfo(void)
dummy_socinfo.id = 334;
strlcpy(dummy_socinfo.build_id, "sdxpoorwills - ",
sizeof(dummy_socinfo.build_id));
+ } else if (early_machine_is_msm8953()) {
+ dummy_socinfo.id = 293;
+ strlcpy(dummy_socinfo.build_id, "msm8953 - ",
+ sizeof(dummy_socinfo.build_id));
+ } else if (early_machine_is_sdm450()) {
+ dummy_socinfo.id = 338;
+ strlcpy(dummy_socinfo.build_id, "sdm450 - ",
+ sizeof(dummy_socinfo.build_id));
}
strlcat(dummy_socinfo.build_id, "Dummy socinfo",
diff --git a/drivers/soc/qcom/spcom.c b/drivers/soc/qcom/spcom.c
index 68681f9..119a788 100644
--- a/drivers/soc/qcom/spcom.c
+++ b/drivers/soc/qcom/spcom.c
@@ -506,6 +506,7 @@ static void spcom_notify_state(void *handle, const void *priv,
* We do it here, ASAP, to allow rx data.
*/
+ ch->rx_abort = false; /* cleanup from previouse close */
pr_debug("call glink_queue_rx_intent() ch [%s].\n", ch->name);
ret = glink_queue_rx_intent(handle, ch, ch->rx_buf_size);
if (ret) {
@@ -579,7 +580,10 @@ static bool spcom_notify_rx_intent_req(void *handle, const void *priv,
* spcom_notify_rx_abort() - glink callback on aborting rx pending buffer.
*
* Rx abort may happen if channel is closed by remote side, while rx buffer is
- * pending in the queue.
+ * pending in the queue, like upon SP reset (SSR).
+ *
+ * More common scenario, is when rx intent is queud (for next transfer),
+ * and the channel is closed locally.
*/
static void spcom_notify_rx_abort(void *handle, const void *priv,
const void *pkt_priv)
@@ -593,7 +597,10 @@ static void spcom_notify_rx_abort(void *handle, const void *priv,
pr_debug("ch [%s] pending rx aborted.\n", ch->name);
- if (spcom_is_channel_open(ch) && (!ch->rx_abort)) {
+ /* ignore rx-abort after local channel disconect,
+ * so check that the channel is connected.
+ */
+ if (spcom_is_channel_connected(ch) && (!ch->rx_abort)) {
ch->rx_abort = true;
complete_all(&ch->rx_done);
}
@@ -953,6 +960,7 @@ static int spcom_rx(struct spcom_channel *ch,
return -ETIMEDOUT;
} else if (ch->rx_abort) {
mutex_unlock(&ch->lock);
+ pr_err("rx_abort, probably remote side reset (SSR).\n");
return -ERESTART; /* probably SSR */
} else if (ch->actual_rx_size) {
pr_debug("actual_rx_size is [%zu]\n", ch->actual_rx_size);
diff --git a/drivers/soc/qcom/subsys-pil-tz.c b/drivers/soc/qcom/subsys-pil-tz.c
index 3ea4be6..d65756c 100644
--- a/drivers/soc/qcom/subsys-pil-tz.c
+++ b/drivers/soc/qcom/subsys-pil-tz.c
@@ -1127,23 +1127,55 @@ static int pil_tz_driver_probe(struct platform_device *pdev)
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
"sp2soc_irq_status");
d->irq_status = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(d->irq_status)) {
+ dev_err(&pdev->dev, "Invalid resource for sp2soc_irq_status\n");
+ rc = PTR_ERR(d->irq_status);
+ goto err_ramdump;
+ }
+
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
"sp2soc_irq_clr");
d->irq_clear = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(d->irq_clear)) {
+ dev_err(&pdev->dev, "Invalid resource for sp2soc_irq_clr\n");
+ rc = PTR_ERR(d->irq_clear);
+ goto err_ramdump;
+ }
+
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
"sp2soc_irq_mask");
d->irq_mask = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(d->irq_mask)) {
+ dev_err(&pdev->dev, "Invalid resource for sp2soc_irq_mask\n");
+ rc = PTR_ERR(d->irq_mask);
+ goto err_ramdump;
+ }
+
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
"rmb_err");
d->err_status = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(d->err_status)) {
+ dev_err(&pdev->dev, "Invalid resource for rmb_err\n");
+ rc = PTR_ERR(d->err_status);
+ goto err_ramdump;
+ }
+
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
"rmb_err_spare2");
d->err_status_spare = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(d->err_status_spare)) {
+ dev_err(&pdev->dev, "Invalid resource for rmb_err_spare2\n");
+ rc = PTR_ERR(d->err_status_spare);
+ goto err_ramdump;
+ }
+
rc = of_property_read_u32_array(pdev->dev.of_node,
"qcom,spss-scsr-bits", d->bits_arr, sizeof(d->bits_arr)/
sizeof(d->bits_arr[0]));
- if (rc)
+ if (rc) {
dev_err(&pdev->dev, "Failed to read qcom,spss-scsr-bits");
+ goto err_ramdump;
+ }
mask_scsr_irqs(d);
} else {
@@ -1186,6 +1218,7 @@ static int pil_tz_driver_probe(struct platform_device *pdev)
destroy_ramdump_device(d->ramdump_dev);
err_ramdump:
pil_desc_release(&d->desc);
+ platform_set_drvdata(pdev, NULL);
return rc;
}
diff --git a/drivers/soc/qcom/subsystem_restart.c b/drivers/soc/qcom/subsystem_restart.c
index 55cb604..110cdf7 100644
--- a/drivers/soc/qcom/subsystem_restart.c
+++ b/drivers/soc/qcom/subsystem_restart.c
@@ -655,13 +655,16 @@ static int subsystem_powerup(struct subsys_device *dev, void *data)
if (ret < 0) {
notify_each_subsys_device(&dev, 1, SUBSYS_POWERUP_FAILURE,
NULL);
- if (!dev->desc->ignore_ssr_failure) {
+ if (system_state == SYSTEM_RESTART
+ || system_state == SYSTEM_POWER_OFF)
+ WARN(1, "SSR aborted: %s, system reboot/shutdown is under way\n",
+ name);
+ else if (!dev->desc->ignore_ssr_failure)
panic("[%s:%d]: Powerup error: %s!",
current->comm, current->pid, name);
- } else {
+ else
pr_err("Powerup failure on %s\n", name);
- return ret;
- }
+ return ret;
}
enable_all_irqs(dev);
@@ -1174,6 +1177,7 @@ void subsys_set_crash_status(struct subsys_device *dev,
{
dev->crashed = crashed;
}
+EXPORT_SYMBOL(subsys_set_crash_status);
enum crash_status subsys_get_crash_status(struct subsys_device *dev)
{
diff --git a/drivers/soc/qcom/system_pm.c b/drivers/soc/qcom/system_pm.c
index 2ecbf15..3d978f7 100644
--- a/drivers/soc/qcom/system_pm.c
+++ b/drivers/soc/qcom/system_pm.c
@@ -13,6 +13,7 @@
#include <linux/kernel.h>
#include <linux/platform_device.h>
+#include <asm/arch_timer.h>
#include <soc/qcom/rpmh.h>
#include <soc/qcom/system_pm.h>
diff --git a/drivers/spi/spi-bcm-qspi.c b/drivers/spi/spi-bcm-qspi.c
index 14f9dea..7d629b4 100644
--- a/drivers/spi/spi-bcm-qspi.c
+++ b/drivers/spi/spi-bcm-qspi.c
@@ -1215,7 +1215,7 @@ int bcm_qspi_probe(struct platform_device *pdev,
goto qspi_probe_err;
}
} else {
- goto qspi_probe_err;
+ goto qspi_resource_err;
}
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "bspi");
@@ -1237,7 +1237,7 @@ int bcm_qspi_probe(struct platform_device *pdev,
qspi->base[CHIP_SELECT] = devm_ioremap_resource(dev, res);
if (IS_ERR(qspi->base[CHIP_SELECT])) {
ret = PTR_ERR(qspi->base[CHIP_SELECT]);
- goto qspi_probe_err;
+ goto qspi_resource_err;
}
}
@@ -1245,7 +1245,7 @@ int bcm_qspi_probe(struct platform_device *pdev,
GFP_KERNEL);
if (!qspi->dev_ids) {
ret = -ENOMEM;
- goto qspi_probe_err;
+ goto qspi_resource_err;
}
for (val = 0; val < num_irqs; val++) {
@@ -1334,8 +1334,9 @@ int bcm_qspi_probe(struct platform_device *pdev,
bcm_qspi_hw_uninit(qspi);
clk_disable_unprepare(qspi->clk);
qspi_probe_err:
- spi_master_put(master);
kfree(qspi->dev_ids);
+qspi_resource_err:
+ spi_master_put(master);
return ret;
}
/* probe function to be called by SoC specific platform driver probe */
diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
index d6239fa..3f3751e 100644
--- a/drivers/spi/spi-pxa2xx.c
+++ b/drivers/spi/spi-pxa2xx.c
@@ -1458,6 +1458,10 @@ static const struct pci_device_id pxa2xx_spi_pci_compound_match[] = {
{ PCI_VDEVICE(INTEL, 0x1ac2), LPSS_BXT_SSP },
{ PCI_VDEVICE(INTEL, 0x1ac4), LPSS_BXT_SSP },
{ PCI_VDEVICE(INTEL, 0x1ac6), LPSS_BXT_SSP },
+ /* GLK */
+ { PCI_VDEVICE(INTEL, 0x31c2), LPSS_BXT_SSP },
+ { PCI_VDEVICE(INTEL, 0x31c4), LPSS_BXT_SSP },
+ { PCI_VDEVICE(INTEL, 0x31c6), LPSS_BXT_SSP },
/* APL */
{ PCI_VDEVICE(INTEL, 0x5ac2), LPSS_BXT_SSP },
{ PCI_VDEVICE(INTEL, 0x5ac4), LPSS_BXT_SSP },
diff --git a/drivers/spmi/spmi-pmic-arb.c b/drivers/spmi/spmi-pmic-arb.c
index b29e60d..d6089aa 100644
--- a/drivers/spmi/spmi-pmic-arb.c
+++ b/drivers/spmi/spmi-pmic-arb.c
@@ -944,8 +944,8 @@ static int pmic_arb_read_apid_map_v5(struct spmi_pmic_arb *pa)
* multiple EE's to write to a single PPID in arbiter version 5, there
* is more than one APID mapped to each PPID. The owner field for each
* of these mappings specifies the EE which is allowed to write to the
- * APID. The owner of the last (highest) APID for a given PPID will
- * receive interrupts from the PPID.
+ * APID. The owner of the last (highest) APID which has the IRQ owner
+ * bit set for a given PPID will receive interrupts from the PPID.
*/
for (apid = 0; apid < pa->max_periph; apid++) {
offset = pa->ver_ops->channel_map_offset(apid);
@@ -969,7 +969,10 @@ static int pmic_arb_read_apid_map_v5(struct spmi_pmic_arb *pa)
valid = pa->ppid_to_apid[ppid] & PMIC_ARB_CHAN_VALID;
prev_apid = pa->ppid_to_apid[ppid] & ~PMIC_ARB_CHAN_VALID;
- if (valid && is_irq_owner &&
+ if (!valid || pa->apid_data[apid].write_owner == pa->ee) {
+ /* First PPID mapping or one for this EE */
+ pa->ppid_to_apid[ppid] = apid | PMIC_ARB_CHAN_VALID;
+ } else if (valid && is_irq_owner &&
pa->apid_data[prev_apid].write_owner == pa->ee) {
/*
* Duplicate PPID mapping after the one for this EE;
@@ -977,9 +980,6 @@ static int pmic_arb_read_apid_map_v5(struct spmi_pmic_arb *pa)
*/
pa->apid_data[prev_apid].irq_owner
= pa->apid_data[apid].irq_owner;
- } else if (!valid || is_irq_owner) {
- /* First PPID mapping or duplicate for another EE */
- pa->ppid_to_apid[ppid] = apid | PMIC_ARB_CHAN_VALID;
}
pa->apid_data[apid].ppid = ppid;
diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c
index 0948c22..720ac31 100644
--- a/drivers/staging/android/ion/ion.c
+++ b/drivers/staging/android/ion/ion.c
@@ -215,7 +215,6 @@ static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
buffer->dev = dev;
buffer->size = len;
- buffer->flags = flags;
INIT_LIST_HEAD(&buffer->vmas);
table = heap->ops->map_dma(heap, buffer);
diff --git a/drivers/staging/android/ion/ion_cma_heap.c b/drivers/staging/android/ion/ion_cma_heap.c
index b264ec2..72f2b6a 100644
--- a/drivers/staging/android/ion/ion_cma_heap.c
+++ b/drivers/staging/android/ion/ion_cma_heap.c
@@ -23,6 +23,7 @@
#include <linux/err.h>
#include <linux/dma-mapping.h>
#include <linux/msm_ion.h>
+#include <linux/of.h>
#include <asm/cacheflush.h>
#include <soc/qcom/secure_buffer.h>
@@ -56,9 +57,22 @@ static int ion_cma_get_sgtable(struct device *dev, struct sg_table *sgt,
return ret;
sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
+ sg_dma_address(sgt->sgl) = sg_phys(sgt->sgl);
return 0;
}
+static bool ion_cma_has_kernel_mapping(struct ion_heap *heap)
+{
+ struct device *dev = heap->priv;
+ struct device_node *mem_region;
+
+ mem_region = of_parse_phandle(dev->of_node, "memory-region", 0);
+ if (IS_ERR(mem_region))
+ return false;
+
+ return !of_property_read_bool(mem_region, "no-map");
+}
+
/* ION CMA heap operations functions */
static int ion_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer,
unsigned long len, unsigned long align,
@@ -73,14 +87,20 @@ static int ion_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer,
if (!info)
return ION_CMA_ALLOCATE_FAILED;
+ /* Override flags if cached-mappings are not supported */
+ if (!ion_cma_has_kernel_mapping(heap)) {
+ flags &= ~((unsigned long)ION_FLAG_CACHED);
+ buffer->flags = flags;
+ }
+
if (!ION_IS_CACHED(flags))
info->cpu_addr = dma_alloc_writecombine(dev, len,
&info->handle,
GFP_KERNEL);
else
- info->cpu_addr = dma_alloc_nonconsistent(dev, len,
- &info->handle,
- GFP_KERNEL);
+ info->cpu_addr = dma_alloc_attrs(dev, len, &info->handle,
+ GFP_KERNEL,
+ DMA_ATTR_FORCE_COHERENT);
if (!info->cpu_addr) {
dev_err(dev, "Fail to allocate buffer\n");
@@ -96,6 +116,11 @@ static int ion_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer,
ion_cma_get_sgtable(dev,
info->table, info->cpu_addr, info->handle, len);
+ /* Ensure memory is dma-ready - refer to ion_buffer_create() */
+ if (info->is_cached)
+ dma_sync_sg_for_device(dev, info->table->sgl,
+ info->table->nents, DMA_BIDIRECTIONAL);
+
/* keep this for memory release */
buffer->priv_virt = info;
dev_dbg(dev, "Allocate buffer %pK\n", buffer);
@@ -110,10 +135,13 @@ static void ion_cma_free(struct ion_buffer *buffer)
{
struct device *dev = buffer->heap->priv;
struct ion_cma_buffer_info *info = buffer->priv_virt;
+ unsigned long attrs = 0;
dev_dbg(dev, "Release buffer %pK\n", buffer);
/* release memory */
- dma_free_coherent(dev, buffer->size, info->cpu_addr, info->handle);
+ if (info->is_cached)
+ attrs |= DMA_ATTR_FORCE_COHERENT;
+ dma_free_attrs(dev, buffer->size, info->cpu_addr, info->handle, attrs);
sg_free_table(info->table);
/* release sg table */
kfree(info->table);
@@ -156,8 +184,9 @@ static int ion_cma_mmap(struct ion_heap *mapper, struct ion_buffer *buffer,
struct ion_cma_buffer_info *info = buffer->priv_virt;
if (info->is_cached)
- return dma_mmap_nonconsistent(dev, vma, info->cpu_addr,
- info->handle, buffer->size);
+ return dma_mmap_attrs(dev, vma, info->cpu_addr,
+ info->handle, buffer->size,
+ DMA_ATTR_FORCE_COHERENT);
else
return dma_mmap_writecombine(dev, vma, info->cpu_addr,
info->handle, buffer->size);
diff --git a/drivers/staging/fsl-mc/bus/fsl-mc-msi.c b/drivers/staging/fsl-mc/bus/fsl-mc-msi.c
index 3d46b1b..7de992c 100644
--- a/drivers/staging/fsl-mc/bus/fsl-mc-msi.c
+++ b/drivers/staging/fsl-mc/bus/fsl-mc-msi.c
@@ -17,6 +17,7 @@
#include <linux/irqdomain.h>
#include <linux/msi.h>
#include "../include/mc-bus.h"
+#include "fsl-mc-private.h"
/*
* Generate a unique ID identifying the interrupt (only used within the MSI
diff --git a/drivers/staging/fsl-mc/bus/irq-gic-v3-its-fsl-mc-msi.c b/drivers/staging/fsl-mc/bus/irq-gic-v3-its-fsl-mc-msi.c
index 7a6ac64..eaeb3c5 100644
--- a/drivers/staging/fsl-mc/bus/irq-gic-v3-its-fsl-mc-msi.c
+++ b/drivers/staging/fsl-mc/bus/irq-gic-v3-its-fsl-mc-msi.c
@@ -17,6 +17,7 @@
#include <linux/of.h>
#include <linux/of_irq.h>
#include "../include/mc-bus.h"
+#include "fsl-mc-private.h"
static struct irq_chip its_msi_irq_chip = {
.name = "fsl-mc-bus-msi",
diff --git a/drivers/staging/iio/adc/ad7192.c b/drivers/staging/iio/adc/ad7192.c
index 1cf6b79..eeacb0e 100644
--- a/drivers/staging/iio/adc/ad7192.c
+++ b/drivers/staging/iio/adc/ad7192.c
@@ -222,11 +222,9 @@ static int ad7192_setup(struct ad7192_state *st,
struct iio_dev *indio_dev = spi_get_drvdata(st->sd.spi);
unsigned long long scale_uv;
int i, ret, id;
- u8 ones[6];
/* reset the serial interface */
- memset(&ones, 0xFF, 6);
- ret = spi_write(st->sd.spi, &ones, 6);
+ ret = ad_sd_reset(&st->sd, 48);
if (ret < 0)
goto out;
usleep_range(500, 1000); /* Wait for at least 500us */
diff --git a/drivers/staging/iio/trigger/iio-trig-bfin-timer.c b/drivers/staging/iio/trigger/iio-trig-bfin-timer.c
index 38dca69..ce500a5 100644
--- a/drivers/staging/iio/trigger/iio-trig-bfin-timer.c
+++ b/drivers/staging/iio/trigger/iio-trig-bfin-timer.c
@@ -260,7 +260,7 @@ static int iio_bfin_tmr_trigger_probe(struct platform_device *pdev)
out1:
iio_trigger_unregister(st->trig);
out:
- iio_trigger_put(st->trig);
+ iio_trigger_free(st->trig);
return ret;
}
@@ -273,7 +273,7 @@ static int iio_bfin_tmr_trigger_remove(struct platform_device *pdev)
peripheral_free(st->t->pin);
free_irq(st->irq, st);
iio_trigger_unregister(st->trig);
- iio_trigger_put(st->trig);
+ iio_trigger_free(st->trig);
return 0;
}
diff --git a/drivers/staging/lustre/lustre/include/lustre/lustre_user.h b/drivers/staging/lustre/lustre/include/lustre/lustre_user.h
index 6fc9855..e533088 100644
--- a/drivers/staging/lustre/lustre/include/lustre/lustre_user.h
+++ b/drivers/staging/lustre/lustre/include/lustre/lustre_user.h
@@ -1213,23 +1213,21 @@ struct hsm_action_item {
* \retval buffer
*/
static inline char *hai_dump_data_field(struct hsm_action_item *hai,
- char *buffer, int len)
+ char *buffer, size_t len)
{
- int i, sz, data_len;
+ int i, data_len;
char *ptr;
ptr = buffer;
- sz = len;
data_len = hai->hai_len - sizeof(*hai);
- for (i = 0 ; (i < data_len) && (sz > 0) ; i++) {
- int cnt;
-
- cnt = snprintf(ptr, sz, "%.2X",
- (unsigned char)hai->hai_data[i]);
- ptr += cnt;
- sz -= cnt;
+ for (i = 0; (i < data_len) && (len > 2); i++) {
+ snprintf(ptr, 3, "%02X", (unsigned char)hai->hai_data[i]);
+ ptr += 2;
+ len -= 2;
}
+
*ptr = '\0';
+
return buffer;
}
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c b/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c
index 3c48b4f..d18ab3f 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c
@@ -546,6 +546,13 @@ struct ldlm_lock *__ldlm_handle2lock(const struct lustre_handle *handle,
if (!lock)
return NULL;
+ if (lock->l_export && lock->l_export->exp_failed) {
+ CDEBUG(D_INFO, "lock export failed: lock %p, exp %p\n",
+ lock, lock->l_export);
+ LDLM_LOCK_PUT(lock);
+ return NULL;
+ }
+
/* It's unlikely but possible that someone marked the lock as
* destroyed after we did handle2object on it
*/
diff --git a/drivers/staging/lustre/lustre/llite/rw26.c b/drivers/staging/lustre/lustre/llite/rw26.c
index 26f3a37..0cb70c3 100644
--- a/drivers/staging/lustre/lustre/llite/rw26.c
+++ b/drivers/staging/lustre/lustre/llite/rw26.c
@@ -354,6 +354,10 @@ static ssize_t ll_direct_IO_26(struct kiocb *iocb, struct iov_iter *iter)
if (!lli->lli_has_smd)
return -EBADF;
+ /* Check EOF by ourselves */
+ if (iov_iter_rw(iter) == READ && file_offset >= i_size_read(inode))
+ return 0;
+
/* FIXME: io smaller than PAGE_SIZE is broken on ia64 ??? */
if ((file_offset & ~PAGE_MASK) || (count & ~PAGE_MASK))
return -EINVAL;
diff --git a/drivers/staging/lustre/lustre/lmv/lmv_obd.c b/drivers/staging/lustre/lustre/lmv/lmv_obd.c
index 7dbb2b9..cd19ce8 100644
--- a/drivers/staging/lustre/lustre/lmv/lmv_obd.c
+++ b/drivers/staging/lustre/lustre/lmv/lmv_obd.c
@@ -744,16 +744,18 @@ static int lmv_hsm_req_count(struct lmv_obd *lmv,
/* count how many requests must be sent to the given target */
for (i = 0; i < hur->hur_request.hr_itemcount; i++) {
curr_tgt = lmv_find_target(lmv, &hur->hur_user_item[i].hui_fid);
+ if (IS_ERR(curr_tgt))
+ return PTR_ERR(curr_tgt);
if (obd_uuid_equals(&curr_tgt->ltd_uuid, &tgt_mds->ltd_uuid))
nr++;
}
return nr;
}
-static void lmv_hsm_req_build(struct lmv_obd *lmv,
- struct hsm_user_request *hur_in,
- const struct lmv_tgt_desc *tgt_mds,
- struct hsm_user_request *hur_out)
+static int lmv_hsm_req_build(struct lmv_obd *lmv,
+ struct hsm_user_request *hur_in,
+ const struct lmv_tgt_desc *tgt_mds,
+ struct hsm_user_request *hur_out)
{
int i, nr_out;
struct lmv_tgt_desc *curr_tgt;
@@ -764,6 +766,8 @@ static void lmv_hsm_req_build(struct lmv_obd *lmv,
for (i = 0; i < hur_in->hur_request.hr_itemcount; i++) {
curr_tgt = lmv_find_target(lmv,
&hur_in->hur_user_item[i].hui_fid);
+ if (IS_ERR(curr_tgt))
+ return PTR_ERR(curr_tgt);
if (obd_uuid_equals(&curr_tgt->ltd_uuid, &tgt_mds->ltd_uuid)) {
hur_out->hur_user_item[nr_out] =
hur_in->hur_user_item[i];
@@ -773,6 +777,8 @@ static void lmv_hsm_req_build(struct lmv_obd *lmv,
hur_out->hur_request.hr_itemcount = nr_out;
memcpy(hur_data(hur_out), hur_data(hur_in),
hur_in->hur_request.hr_data_len);
+
+ return 0;
}
static int lmv_hsm_ct_unregister(struct lmv_obd *lmv, unsigned int cmd, int len,
@@ -1052,15 +1058,17 @@ static int lmv_iocontrol(unsigned int cmd, struct obd_export *exp,
} else {
/* split fid list to their respective MDS */
for (i = 0; i < count; i++) {
- unsigned int nr, reqlen;
- int rc1;
struct hsm_user_request *req;
+ size_t reqlen;
+ int nr, rc1;
tgt = lmv->tgts[i];
if (!tgt || !tgt->ltd_exp)
continue;
nr = lmv_hsm_req_count(lmv, hur, tgt);
+ if (nr < 0)
+ return nr;
if (nr == 0) /* nothing for this MDS */
continue;
@@ -1072,10 +1080,13 @@ static int lmv_iocontrol(unsigned int cmd, struct obd_export *exp,
if (!req)
return -ENOMEM;
- lmv_hsm_req_build(lmv, hur, tgt, req);
+ rc1 = lmv_hsm_req_build(lmv, hur, tgt, req);
+ if (rc1 < 0)
+ goto hsm_req_err;
rc1 = obd_iocontrol(cmd, tgt->ltd_exp, reqlen,
req, uarg);
+hsm_req_err:
if (rc1 != 0 && rc == 0)
rc = rc1;
kvfree(req);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/service.c b/drivers/staging/lustre/lustre/ptlrpc/service.c
index 72f3930..9d34848 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/service.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/service.c
@@ -1264,20 +1264,15 @@ static int ptlrpc_server_hpreq_init(struct ptlrpc_service_part *svcpt,
*/
if (req->rq_ops->hpreq_check) {
rc = req->rq_ops->hpreq_check(req);
- /**
- * XXX: Out of all current
- * ptlrpc_hpreq_ops::hpreq_check(), only
- * ldlm_cancel_hpreq_check() can return an error code;
- * other functions assert in similar places, which seems
- * odd. What also does not seem right is that handlers
- * for those RPCs do not assert on the same checks, but
- * rather handle the error cases. e.g. see
- * ost_rw_hpreq_check(), and ost_brw_read(),
- * ost_brw_write().
+ if (rc == -ESTALE) {
+ req->rq_status = rc;
+ ptlrpc_error(req);
+ }
+ /** can only return error,
+ * 0 for normal request,
+ * or 1 for high priority request
*/
- if (rc < 0)
- return rc;
- LASSERT(rc == 0 || rc == 1);
+ LASSERT(rc <= 1);
}
spin_lock_bh(&req->rq_export->exp_rpc_lock);
diff --git a/drivers/staging/rtl8712/ieee80211.h b/drivers/staging/rtl8712/ieee80211.h
index 67ab580..68fd65e 100644
--- a/drivers/staging/rtl8712/ieee80211.h
+++ b/drivers/staging/rtl8712/ieee80211.h
@@ -138,51 +138,51 @@ struct ieee_ibss_seq {
};
struct ieee80211_hdr {
- u16 frame_ctl;
- u16 duration_id;
+ __le16 frame_ctl;
+ __le16 duration_id;
u8 addr1[ETH_ALEN];
u8 addr2[ETH_ALEN];
u8 addr3[ETH_ALEN];
- u16 seq_ctl;
+ __le16 seq_ctl;
u8 addr4[ETH_ALEN];
-} __packed;
+} __packed __aligned(2);
struct ieee80211_hdr_3addr {
- u16 frame_ctl;
- u16 duration_id;
+ __le16 frame_ctl;
+ __le16 duration_id;
u8 addr1[ETH_ALEN];
u8 addr2[ETH_ALEN];
u8 addr3[ETH_ALEN];
- u16 seq_ctl;
-} __packed;
+ __le16 seq_ctl;
+} __packed __aligned(2);
struct ieee80211_hdr_qos {
- u16 frame_ctl;
- u16 duration_id;
+ __le16 frame_ctl;
+ __le16 duration_id;
u8 addr1[ETH_ALEN];
u8 addr2[ETH_ALEN];
u8 addr3[ETH_ALEN];
- u16 seq_ctl;
+ __le16 seq_ctl;
u8 addr4[ETH_ALEN];
- u16 qc;
-} __packed;
+ __le16 qc;
+} __packed __aligned(2);
struct ieee80211_hdr_3addr_qos {
- u16 frame_ctl;
- u16 duration_id;
+ __le16 frame_ctl;
+ __le16 duration_id;
u8 addr1[ETH_ALEN];
u8 addr2[ETH_ALEN];
u8 addr3[ETH_ALEN];
- u16 seq_ctl;
- u16 qc;
+ __le16 seq_ctl;
+ __le16 qc;
} __packed;
struct eapol {
u8 snap[6];
- u16 ethertype;
+ __be16 ethertype;
u8 version;
u8 type;
- u16 length;
+ __le16 length;
} __packed;
enum eap_type {
@@ -514,13 +514,13 @@ struct ieee80211_security {
*/
struct ieee80211_header_data {
- u16 frame_ctl;
- u16 duration_id;
+ __le16 frame_ctl;
+ __le16 duration_id;
u8 addr1[6];
u8 addr2[6];
u8 addr3[6];
- u16 seq_ctrl;
-};
+ __le16 seq_ctrl;
+} __packed __aligned(2);
#define BEACON_PROBE_SSID_ID_POSITION 12
@@ -552,18 +552,18 @@ struct ieee80211_info_element {
/*
* These are the data types that can make up management packets
*
- u16 auth_algorithm;
- u16 auth_sequence;
- u16 beacon_interval;
- u16 capability;
+ __le16 auth_algorithm;
+ __le16 auth_sequence;
+ __le16 beacon_interval;
+ __le16 capability;
u8 current_ap[ETH_ALEN];
- u16 listen_interval;
+ __le16 listen_interval;
struct {
u16 association_id:14, reserved:2;
} __packed;
- u32 time_stamp[2];
- u16 reason;
- u16 status;
+ __le32 time_stamp[2];
+ __le16 reason;
+ __le16 status;
*/
#define IEEE80211_DEFAULT_TX_ESSID "Penguin"
@@ -571,16 +571,16 @@ struct ieee80211_info_element {
struct ieee80211_authentication {
struct ieee80211_header_data header;
- u16 algorithm;
- u16 transaction;
- u16 status;
+ __le16 algorithm;
+ __le16 transaction;
+ __le16 status;
} __packed;
struct ieee80211_probe_response {
struct ieee80211_header_data header;
- u32 time_stamp[2];
- u16 beacon_interval;
- u16 capability;
+ __le32 time_stamp[2];
+ __le16 beacon_interval;
+ __le16 capability;
struct ieee80211_info_element info_element;
} __packed;
@@ -590,16 +590,16 @@ struct ieee80211_probe_request {
struct ieee80211_assoc_request_frame {
struct ieee80211_hdr_3addr header;
- u16 capability;
- u16 listen_interval;
+ __le16 capability;
+ __le16 listen_interval;
struct ieee80211_info_element_hdr info_element;
} __packed;
struct ieee80211_assoc_response_frame {
struct ieee80211_hdr_3addr header;
- u16 capability;
- u16 status;
- u16 aid;
+ __le16 capability;
+ __le16 status;
+ __le16 aid;
} __packed;
struct ieee80211_txb {
diff --git a/drivers/staging/rtl8712/rtl871x_xmit.c b/drivers/staging/rtl8712/rtl871x_xmit.c
index be38364..c478639 100644
--- a/drivers/staging/rtl8712/rtl871x_xmit.c
+++ b/drivers/staging/rtl8712/rtl871x_xmit.c
@@ -344,7 +344,8 @@ sint r8712_update_attrib(struct _adapter *padapter, _pkt *pkt,
* some settings above.
*/
if (check_fwstate(pmlmepriv, WIFI_MP_STATE))
- pattrib->priority = (txdesc.txdw1 >> QSEL_SHT) & 0x1f;
+ pattrib->priority =
+ (le32_to_cpu(txdesc.txdw1) >> QSEL_SHT) & 0x1f;
return _SUCCESS;
}
@@ -485,7 +486,7 @@ static sint make_wlanhdr(struct _adapter *padapter, u8 *hdr,
struct ieee80211_hdr *pwlanhdr = (struct ieee80211_hdr *)hdr;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct qos_priv *pqospriv = &pmlmepriv->qospriv;
- u16 *fctrl = &pwlanhdr->frame_ctl;
+ __le16 *fctrl = &pwlanhdr->frame_ctl;
memset(hdr, 0, WLANHDR_OFFSET);
SetFrameSubType(fctrl, pattrib->subtype);
@@ -574,7 +575,7 @@ static sint r8712_put_snap(u8 *data, u16 h_proto)
snap->oui[0] = oui[0];
snap->oui[1] = oui[1];
snap->oui[2] = oui[2];
- *(u16 *)(data + SNAP_SIZE) = htons(h_proto);
+ *(__be16 *)(data + SNAP_SIZE) = htons(h_proto);
return SNAP_SIZE + sizeof(u16);
}
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
index 1091b9f..f72eebc 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
@@ -106,8 +106,14 @@ int vchiq_platform_init(struct platform_device *pdev, VCHIQ_STATE_T *state)
g_virt_to_bus_offset = virt_to_dma(dev, (void *)0);
- (void)of_property_read_u32(dev->of_node, "cache-line-size",
+ err = of_property_read_u32(dev->of_node, "cache-line-size",
&g_cache_line_size);
+
+ if (err) {
+ dev_err(dev, "Missing cache-line-size property\n");
+ return -ENODEV;
+ }
+
g_fragments_size = 2 * g_cache_line_size;
/* Allocate space for the channels in coherent memory */
@@ -538,18 +544,20 @@ free_pagelist(PAGELIST_T *pagelist, int actual)
if (head_bytes > actual)
head_bytes = actual;
- memcpy((char *)page_address(pages[0]) +
+ memcpy((char *)kmap(pages[0]) +
pagelist->offset,
fragments,
head_bytes);
+ kunmap(pages[0]);
}
if ((actual >= 0) && (head_bytes < actual) &&
(tail_bytes != 0)) {
- memcpy((char *)page_address(pages[num_pages - 1]) +
+ memcpy((char *)kmap(pages[num_pages - 1]) +
((pagelist->offset + actual) &
(PAGE_SIZE - 1) & ~(g_cache_line_size - 1)),
fragments + g_cache_line_size,
tail_bytes);
+ kunmap(pages[num_pages - 1]);
}
down(&g_free_fragments_mutex);
diff --git a/drivers/target/iscsi/iscsi_target_erl0.c b/drivers/target/iscsi/iscsi_target_erl0.c
index efc453e..ab92a1b 100644
--- a/drivers/target/iscsi/iscsi_target_erl0.c
+++ b/drivers/target/iscsi/iscsi_target_erl0.c
@@ -44,10 +44,8 @@ void iscsit_set_dataout_sequence_values(
*/
if (cmd->unsolicited_data) {
cmd->seq_start_offset = cmd->write_data_done;
- cmd->seq_end_offset = (cmd->write_data_done +
- ((cmd->se_cmd.data_length >
- conn->sess->sess_ops->FirstBurstLength) ?
- conn->sess->sess_ops->FirstBurstLength : cmd->se_cmd.data_length));
+ cmd->seq_end_offset = min(cmd->se_cmd.data_length,
+ conn->sess->sess_ops->FirstBurstLength);
return;
}
diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c
index 09f7f20..f25bade 100644
--- a/drivers/thermal/cpu_cooling.c
+++ b/drivers/thermal/cpu_cooling.c
@@ -32,6 +32,7 @@
#include <linux/cpu_cooling.h>
#include <linux/sched.h>
#include <linux/of_device.h>
+#include <linux/suspend.h>
#include <trace/events/thermal.h>
@@ -117,10 +118,12 @@ struct cpufreq_cooling_device {
static DEFINE_IDR(cpufreq_idr);
static DEFINE_MUTEX(cooling_cpufreq_lock);
+static atomic_t in_suspend;
static unsigned int cpufreq_dev_count;
static int8_t cpuhp_registered;
static struct work_struct cpuhp_register_work;
static struct cpumask cpus_pending_online;
+static struct cpumask cpus_isolated_by_thermal;
static DEFINE_MUTEX(core_isolate_lock);
static DEFINE_MUTEX(cooling_list_lock);
@@ -218,6 +221,51 @@ unsigned long cpufreq_cooling_get_level(unsigned int cpu, unsigned int freq)
}
EXPORT_SYMBOL_GPL(cpufreq_cooling_get_level);
+static int cpufreq_cooling_pm_notify(struct notifier_block *nb,
+ unsigned long mode, void *_unused)
+{
+ struct cpufreq_cooling_device *cpufreq_dev;
+ unsigned int cpu;
+
+ switch (mode) {
+ case PM_HIBERNATION_PREPARE:
+ case PM_RESTORE_PREPARE:
+ case PM_SUSPEND_PREPARE:
+ atomic_set(&in_suspend, 1);
+ break;
+ case PM_POST_HIBERNATION:
+ case PM_POST_RESTORE:
+ case PM_POST_SUSPEND:
+ mutex_lock(&cooling_list_lock);
+ mutex_lock(&core_isolate_lock);
+ list_for_each_entry(cpufreq_dev, &cpufreq_dev_list, node) {
+ if (cpufreq_dev->cpufreq_state ==
+ cpufreq_dev->max_level) {
+ cpu = cpumask_any(&cpufreq_dev->allowed_cpus);
+ if (cpu_online(cpu) &&
+ !cpumask_test_and_set_cpu(cpu,
+ &cpus_isolated_by_thermal)) {
+ if (sched_isolate_cpu(cpu))
+ cpumask_clear_cpu(cpu,
+ &cpus_isolated_by_thermal);
+ }
+ }
+ }
+ mutex_unlock(&core_isolate_lock);
+ mutex_unlock(&cooling_list_lock);
+
+ atomic_set(&in_suspend, 0);
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static struct notifier_block cpufreq_cooling_pm_nb = {
+ .notifier_call = cpufreq_cooling_pm_notify,
+};
+
static int cpufreq_hp_offline(unsigned int offline_cpu)
{
struct cpufreq_cooling_device *cpufreq_dev;
@@ -228,7 +276,9 @@ static int cpufreq_hp_offline(unsigned int offline_cpu)
continue;
mutex_lock(&core_isolate_lock);
- if (cpufreq_dev->cpufreq_state == cpufreq_dev->max_level)
+ if ((cpufreq_dev->cpufreq_state == cpufreq_dev->max_level) &&
+ (cpumask_test_and_clear_cpu(offline_cpu,
+ &cpus_isolated_by_thermal)))
sched_unisolate_cpu_unlocked(offline_cpu);
mutex_unlock(&core_isolate_lock);
break;
@@ -243,6 +293,9 @@ static int cpufreq_hp_online(unsigned int online_cpu)
struct cpufreq_cooling_device *cpufreq_dev;
int ret = 0;
+ if (atomic_read(&in_suspend))
+ return 0;
+
mutex_lock(&cooling_list_lock);
list_for_each_entry(cpufreq_dev, &cpufreq_dev_list, node) {
if (!cpumask_test_cpu(online_cpu, &cpufreq_dev->allowed_cpus))
@@ -677,8 +730,13 @@ static int cpufreq_set_cur_state(struct thermal_cooling_device *cdev,
cpufreq_device->cpufreq_state = state;
/* If state is the last, isolate the CPU */
if (state == cpufreq_device->max_level) {
- if (cpu_online(cpu))
- sched_isolate_cpu(cpu);
+ if (cpu_online(cpu) &&
+ (!cpumask_test_and_set_cpu(cpu,
+ &cpus_isolated_by_thermal))) {
+ if (sched_isolate_cpu(cpu))
+ cpumask_clear_cpu(cpu,
+ &cpus_isolated_by_thermal);
+ }
mutex_unlock(&core_isolate_lock);
return ret;
} else if ((prev_state == cpufreq_device->max_level)
@@ -695,8 +753,10 @@ static int cpufreq_set_cur_state(struct thermal_cooling_device *cdev,
if (ret)
pr_err("CPU:%d online error:%d\n", cpu, ret);
goto update_frequency;
- } else
+ } else if (cpumask_test_and_clear_cpu(cpu,
+ &cpus_isolated_by_thermal)) {
sched_unisolate_cpu(cpu);
+ }
}
mutex_unlock(&core_isolate_lock);
update_frequency:
@@ -1105,12 +1165,14 @@ __cpufreq_cooling_register(struct device_node *np,
mutex_unlock(&cooling_list_lock);
/* Register the notifier for first cpufreq cooling device */
- if (!cpufreq_dev_count++)
+ if (!cpufreq_dev_count++ && !cpufreq_dev->plat_ops)
cpufreq_register_notifier(&thermal_cpufreq_notifier_block,
CPUFREQ_POLICY_NOTIFIER);
if (!cpuhp_registered) {
cpuhp_registered = 1;
+ register_pm_notifier(&cpufreq_cooling_pm_nb);
cpumask_clear(&cpus_pending_online);
+ cpumask_clear(&cpus_isolated_by_thermal);
INIT_WORK(&cpuhp_register_work, register_cdev);
queue_work(system_wq, &cpuhp_register_work);
}
@@ -1285,9 +1347,13 @@ void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev)
/* Unregister the notifier for the last cpufreq cooling device */
mutex_lock(&cooling_cpufreq_lock);
- if (!--cpufreq_dev_count)
- cpufreq_unregister_notifier(&thermal_cpufreq_notifier_block,
- CPUFREQ_POLICY_NOTIFIER);
+ if (!--cpufreq_dev_count) {
+ unregister_pm_notifier(&cpufreq_cooling_pm_nb);
+ if (!cpufreq_dev->plat_ops)
+ cpufreq_unregister_notifier(
+ &thermal_cpufreq_notifier_block,
+ CPUFREQ_POLICY_NOTIFIER);
+ }
mutex_lock(&cooling_list_lock);
list_del(&cpufreq_dev->node);
diff --git a/drivers/thermal/qcom/msm_lmh_dcvs.c b/drivers/thermal/qcom/msm_lmh_dcvs.c
index 4e5546e..94c93b5 100644
--- a/drivers/thermal/qcom/msm_lmh_dcvs.c
+++ b/drivers/thermal/qcom/msm_lmh_dcvs.c
@@ -58,8 +58,7 @@
#define LIMITS_CLUSTER_0 0x6370302D
#define LIMITS_CLUSTER_1 0x6370312D
-#define LIMITS_DOMAIN_MAX 0x444D4158
-#define LIMITS_DOMAIN_MIN 0x444D494E
+#define LIMITS_FREQ_CAP 0x46434150
#define LIMITS_TEMP_DEFAULT 75000
#define LIMITS_TEMP_HIGH_THRESH_MAX 120000
@@ -225,31 +224,36 @@ static irqreturn_t lmh_dcvs_handle_isr(int irq, void *data)
}
static int limits_dcvs_write(uint32_t node_id, uint32_t fn,
- uint32_t setting, uint32_t val)
+ uint32_t setting, uint32_t val, uint32_t val1,
+ bool enable_val1)
{
int ret;
struct scm_desc desc_arg;
uint32_t *payload = NULL;
+ uint32_t payload_len;
- payload = kzalloc(sizeof(uint32_t) * 5, GFP_KERNEL);
+ payload_len = ((enable_val1) ? 6 : 5) * sizeof(uint32_t);
+ payload = kzalloc(payload_len, GFP_KERNEL);
if (!payload)
return -ENOMEM;
payload[0] = fn; /* algorithm */
payload[1] = 0; /* unused sub-algorithm */
payload[2] = setting;
- payload[3] = 1; /* number of values */
+ payload[3] = enable_val1 ? 2 : 1; /* number of values */
payload[4] = val;
+ if (enable_val1)
+ payload[5] = val1;
desc_arg.args[0] = SCM_BUFFER_PHYS(payload);
- desc_arg.args[1] = sizeof(uint32_t) * 5;
+ desc_arg.args[1] = payload_len;
desc_arg.args[2] = LIMITS_NODE_DCVS;
desc_arg.args[3] = node_id;
desc_arg.args[4] = 0; /* version */
desc_arg.arginfo = SCM_ARGS(5, SCM_RO, SCM_VAL, SCM_VAL,
SCM_VAL, SCM_VAL);
- dmac_flush_range(payload, (void *)payload + 5 * (sizeof(uint32_t)));
+ dmac_flush_range(payload, (void *)payload + payload_len);
ret = scm_call2(SCM_SIP_FNID(SCM_SVC_LMH, LIMITS_DCVSH), &desc_arg);
kfree(payload);
@@ -288,16 +292,17 @@ static int lmh_set_trips(void *data, int low, int high)
hw->temp_limits[LIMITS_TRIP_ARM] = (uint32_t)low;
ret = limits_dcvs_write(hw->affinity, LIMITS_SUB_FN_THERMAL,
- LIMITS_ARM_THRESHOLD, low);
+ LIMITS_ARM_THRESHOLD, low, 0, 0);
if (ret)
return ret;
ret = limits_dcvs_write(hw->affinity, LIMITS_SUB_FN_THERMAL,
- LIMITS_HI_THRESHOLD, high);
+ LIMITS_HI_THRESHOLD, high, 0, 0);
if (ret)
return ret;
ret = limits_dcvs_write(hw->affinity, LIMITS_SUB_FN_THERMAL,
LIMITS_LOW_THRESHOLD,
- high - LIMITS_LOW_THRESHOLD_OFFSET);
+ high - LIMITS_LOW_THRESHOLD_OFFSET,
+ 0, 0);
if (ret)
return ret;
@@ -365,8 +370,9 @@ static int lmh_set_max_limit(int cpu, u32 freq)
max_freq = hw->cdev_data[idx].max_freq;
idx++;
}
- ret = limits_dcvs_write(hw->affinity, LIMITS_SUB_FN_GENERAL,
- LIMITS_DOMAIN_MAX, max_freq);
+ ret = limits_dcvs_write(hw->affinity, LIMITS_SUB_FN_THERMAL,
+ LIMITS_FREQ_CAP, max_freq,
+ (max_freq == U32_MAX) ? 0 : 1, 1);
mutex_unlock(&hw->access_lock);
lmh_dcvs_notify(hw);
@@ -556,22 +562,22 @@ static int limits_dcvs_probe(struct platform_device *pdev)
/* Enable the thermal algorithm early */
ret = limits_dcvs_write(hw->affinity, LIMITS_SUB_FN_THERMAL,
- LIMITS_ALGO_MODE_ENABLE, 1);
+ LIMITS_ALGO_MODE_ENABLE, 1, 0, 0);
if (ret)
return ret;
/* Enable the LMH outer loop algorithm */
ret = limits_dcvs_write(hw->affinity, LIMITS_SUB_FN_CRNT,
- LIMITS_ALGO_MODE_ENABLE, 1);
+ LIMITS_ALGO_MODE_ENABLE, 1, 0, 0);
if (ret)
return ret;
/* Enable the Reliability algorithm */
ret = limits_dcvs_write(hw->affinity, LIMITS_SUB_FN_REL,
- LIMITS_ALGO_MODE_ENABLE, 1);
+ LIMITS_ALGO_MODE_ENABLE, 1, 0, 0);
if (ret)
return ret;
/* Enable the BCL algorithm */
ret = limits_dcvs_write(hw->affinity, LIMITS_SUB_FN_BCL,
- LIMITS_ALGO_MODE_ENABLE, 1);
+ LIMITS_ALGO_MODE_ENABLE, 1, 0, 0);
if (ret)
return ret;
ret = enable_lmh();
diff --git a/drivers/thermal/qpnp-adc-tm.c b/drivers/thermal/qpnp-adc-tm.c
index 04320d8..411588e 100644
--- a/drivers/thermal/qpnp-adc-tm.c
+++ b/drivers/thermal/qpnp-adc-tm.c
@@ -1730,6 +1730,7 @@ int32_t qpnp_adc_tm_channel_measure(struct qpnp_adc_tm_chip *chip,
param->gain_num = qpnp_vadc_amux_scaling_ratio[amux_prescaling].num;
param->gain_den = qpnp_vadc_amux_scaling_ratio[amux_prescaling].den;
param->adc_tm_hc = chip->adc_tm_hc;
+ param->full_scale_code = chip->adc->adc_prop->full_scale_code;
chip->adc->amux_prop->amux_channel = channel;
chip->adc->amux_prop->decimation =
chip->adc->adc_channels[dt_index].adc_decimation;
diff --git a/drivers/thermal/tsens-dbg.c b/drivers/thermal/tsens-dbg.c
index 9b10a1b..2e795b1 100644
--- a/drivers/thermal/tsens-dbg.c
+++ b/drivers/thermal/tsens-dbg.c
@@ -77,11 +77,10 @@ static int tsens_dbg_log_interrupt_timestamp(struct tsens_device *data,
pr_debug("%d %d\n", id, dbg_type);
tmdev = data;
/* debug */
- idx = tmdev->tsens_dbg.tsens_thread_iq_dbg.idx;
- tmdev->tsens_dbg.tsens_thread_iq_dbg.dbg_count[idx%10]++;
- tmdev->tsens_dbg.tsens_thread_iq_dbg.time_stmp[idx%10] =
+ idx = tmdev->tsens_dbg.irq_idx;
+ tmdev->tsens_dbg.irq_time_stmp[idx%10] =
sched_clock();
- tmdev->tsens_dbg.tsens_thread_iq_dbg.idx++;
+ tmdev->tsens_dbg.irq_idx++;
return 0;
}
diff --git a/drivers/thermal/tsens.h b/drivers/thermal/tsens.h
index a695d57..ec2d592 100644
--- a/drivers/thermal/tsens.h
+++ b/drivers/thermal/tsens.h
@@ -48,7 +48,6 @@ static inline int tsens2xxx_dbg(struct tsens_device *data, u32 id,
#endif
struct tsens_dbg {
- u32 dbg_count[DEBUG_SIZE];
u32 idx;
unsigned long long time_stmp[DEBUG_SIZE];
unsigned long temp[DEBUG_SIZE];
@@ -56,9 +55,10 @@ struct tsens_dbg {
struct tsens_dbg_context {
struct tsens_device *tmdev;
- struct tsens_dbg tsens_thread_iq_dbg;
struct tsens_dbg sensor_dbg_info[TSENS_MAX_SENSORS];
int tsens_critical_wd_cnt;
+ u32 irq_idx;
+ unsigned long long irq_time_stmp[DEBUG_SIZE];
struct delayed_work tsens_critical_poll_test;
};
@@ -120,7 +120,6 @@ struct tsens_device {
struct device *dev;
struct platform_device *pdev;
struct list_head list;
- u32 num_sensors;
struct regmap *map;
struct regmap_field *status_field;
void __iomem *tsens_srot_addr;
diff --git a/drivers/thermal/tsens2xxx.c b/drivers/thermal/tsens2xxx.c
index de9f27f..fd625ae 100644
--- a/drivers/thermal/tsens2xxx.c
+++ b/drivers/thermal/tsens2xxx.c
@@ -94,7 +94,7 @@ static int tsens2xxx_get_temp(struct tsens_sensor *sensor, int *temp)
if (code & TSENS_TM_SN_STATUS_VALID_BIT) {
msm_tsens_convert_temp(last_temp, temp);
- return 0;
+ goto dbg;
}
code = readl_relaxed_no_log(sensor_addr +
@@ -103,7 +103,7 @@ static int tsens2xxx_get_temp(struct tsens_sensor *sensor, int *temp)
if (code & TSENS_TM_SN_STATUS_VALID_BIT) {
last_temp = last_temp2;
msm_tsens_convert_temp(last_temp, temp);
- return 0;
+ goto dbg;
}
code = readl_relaxed_no_log(sensor_addr +
@@ -113,7 +113,7 @@ static int tsens2xxx_get_temp(struct tsens_sensor *sensor, int *temp)
if (code & TSENS_TM_SN_STATUS_VALID_BIT) {
last_temp = last_temp3;
msm_tsens_convert_temp(last_temp, temp);
- return 0;
+ goto dbg;
}
if (last_temp == last_temp2)
@@ -123,6 +123,7 @@ static int tsens2xxx_get_temp(struct tsens_sensor *sensor, int *temp)
msm_tsens_convert_temp(last_temp, temp);
+dbg:
if (tmdev->ops->dbg)
tmdev->ops->dbg(tmdev, (u32) sensor->hw_id,
TSENS_DBG_LOG_TEMP_READS, temp);
diff --git a/drivers/tty/goldfish.c b/drivers/tty/goldfish.c
index 3fc9123..996bd47 100644
--- a/drivers/tty/goldfish.c
+++ b/drivers/tty/goldfish.c
@@ -300,7 +300,7 @@ static int goldfish_tty_probe(struct platform_device *pdev)
return 0;
err_tty_register_device_failed:
- free_irq(irq, pdev);
+ free_irq(irq, qtty);
err_request_irq_failed:
goldfish_tty_current_line_count--;
if (goldfish_tty_current_line_count == 0)
diff --git a/drivers/tty/serial/8250/8250_moxa.c b/drivers/tty/serial/8250/8250_moxa.c
index 26eb539..d5069b2 100644
--- a/drivers/tty/serial/8250/8250_moxa.c
+++ b/drivers/tty/serial/8250/8250_moxa.c
@@ -68,6 +68,7 @@ static int moxa8250_probe(struct pci_dev *pdev, const struct pci_device_id *id)
sizeof(unsigned int) * nr_ports, GFP_KERNEL);
if (!brd)
return -ENOMEM;
+ brd->num_ports = nr_ports;
memset(&uart, 0, sizeof(struct uart_8250_port));
diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
index f24d303..1ef31e3 100644
--- a/drivers/tty/serial/8250/8250_port.c
+++ b/drivers/tty/serial/8250/8250_port.c
@@ -1751,8 +1751,6 @@ void serial8250_tx_chars(struct uart_8250_port *up)
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
uart_write_wakeup(port);
- pr_debug("%s: THRE\n", __func__);
-
/*
* With RPM enabled, we have to wait until the FIFO is empty before the
* HW can go idle. So we get here once again with empty FIFO and disable
@@ -1817,8 +1815,6 @@ int serial8250_handle_irq(struct uart_port *port, unsigned int iir)
status = serial_port_in(port, UART_LSR);
- pr_debug("%s: status = %x\n", __func__, status);
-
if (status & (UART_LSR_DR | UART_LSR_BI)) {
if (!up->dma || handle_rx_dma(up, iir))
status = serial8250_rx_chars(up, status);
diff --git a/drivers/tty/serial/msm_geni_serial.c b/drivers/tty/serial/msm_geni_serial.c
index 9c819fb..b142869 100644
--- a/drivers/tty/serial/msm_geni_serial.c
+++ b/drivers/tty/serial/msm_geni_serial.c
@@ -185,6 +185,8 @@ static void msm_geni_serial_power_off(struct uart_port *uport);
static int msm_geni_serial_poll_bit(struct uart_port *uport,
int offset, int bit_field, bool set);
static void msm_geni_serial_stop_rx(struct uart_port *uport);
+static int msm_geni_serial_runtime_resume(struct device *dev);
+static int msm_geni_serial_runtime_suspend(struct device *dev);
static atomic_t uart_line_id = ATOMIC_INIT(0);
@@ -246,7 +248,7 @@ static bool device_pending_suspend(struct uart_port *uport)
{
int usage_count = atomic_read(&uport->dev->power.usage_count);
- return (pm_runtime_suspended(uport->dev) || !usage_count);
+ return (pm_runtime_status_suspended(uport->dev) || !usage_count);
}
static bool check_transfers_inflight(struct uart_port *uport)
@@ -311,26 +313,24 @@ static void wait_for_transfers_inflight(struct uart_port *uport)
static int vote_clock_on(struct uart_port *uport)
{
struct msm_geni_serial_port *port = GET_DEV_PORT(uport);
+ int usage_count = atomic_read(&uport->dev->power.usage_count);
int ret = 0;
- if (!pm_runtime_enabled(uport->dev)) {
- dev_err(uport->dev, "RPM not available.Can't enable clocks\n");
- return -EPERM;
- }
ret = msm_geni_serial_power_on(uport);
if (ret) {
dev_err(uport->dev, "Failed to vote clock on\n");
return ret;
}
port->ioctl_count++;
- IPC_LOG_MSG(port->ipc_log_pwr, "%s%s ioctl %d\n", __func__,
- current->comm, port->ioctl_count);
+ IPC_LOG_MSG(port->ipc_log_pwr, "%s%s ioctl %d usage_count %d\n",
+ __func__, current->comm, port->ioctl_count, usage_count);
return 0;
}
static int vote_clock_off(struct uart_port *uport)
{
struct msm_geni_serial_port *port = GET_DEV_PORT(uport);
+ int usage_count = atomic_read(&uport->dev->power.usage_count);
if (!pm_runtime_enabled(uport->dev)) {
dev_err(uport->dev, "RPM not available.Can't enable clocks\n");
@@ -347,8 +347,8 @@ static int vote_clock_off(struct uart_port *uport)
wait_for_transfers_inflight(uport);
port->ioctl_count--;
msm_geni_serial_power_off(uport);
- IPC_LOG_MSG(port->ipc_log_pwr, "%s%s ioctl %d\n", __func__,
- current->comm, port->ioctl_count);
+ IPC_LOG_MSG(port->ipc_log_pwr, "%s%s ioctl %d usage_count %d\n",
+ __func__, current->comm, port->ioctl_count, usage_count);
return 0;
};
@@ -472,13 +472,37 @@ static int msm_geni_serial_power_on(struct uart_port *uport)
int ret = 0;
struct msm_geni_serial_port *port = GET_DEV_PORT(uport);
- ret = pm_runtime_get_sync(uport->dev);
- if (ret < 0) {
- IPC_LOG_MSG(port->ipc_log_pwr, "%s Err\n", __func__);
- WARN_ON_ONCE(1);
- pm_runtime_put_noidle(uport->dev);
- pm_runtime_set_suspended(uport->dev);
- return ret;
+ if (!pm_runtime_enabled(uport->dev)) {
+ if (pm_runtime_status_suspended(uport->dev)) {
+ struct uart_state *state = uport->state;
+ struct tty_port *tport = &state->port;
+ int lock = mutex_trylock(&tport->mutex);
+
+ IPC_LOG_MSG(port->ipc_log_pwr,
+ "%s:Manual resume\n", __func__);
+ pm_runtime_disable(uport->dev);
+ ret = msm_geni_serial_runtime_resume(uport->dev);
+ if (ret) {
+ IPC_LOG_MSG(port->ipc_log_pwr,
+ "%s:Manual RPM CB failed %d\n",
+ __func__, ret);
+ } else {
+ pm_runtime_get_noresume(uport->dev);
+ pm_runtime_set_active(uport->dev);
+ }
+ pm_runtime_enable(uport->dev);
+ if (lock)
+ mutex_unlock(&tport->mutex);
+ }
+ } else {
+ ret = pm_runtime_get_sync(uport->dev);
+ if (ret < 0) {
+ IPC_LOG_MSG(port->ipc_log_pwr, "%s Err\n", __func__);
+ WARN_ON_ONCE(1);
+ pm_runtime_put_noidle(uport->dev);
+ pm_runtime_set_suspended(uport->dev);
+ return ret;
+ }
}
return 0;
}
@@ -834,8 +858,11 @@ static void msm_geni_serial_start_tx(struct uart_port *uport)
goto exit_start_tx;
}
- if (!uart_console(uport))
+ if (!uart_console(uport)) {
+ IPC_LOG_MSG(msm_port->ipc_log_misc,
+ "%s.Power on.\n", __func__);
pm_runtime_get(uport->dev);
+ }
if (msm_port->xfer_mode == FIFO_MODE) {
geni_status = geni_read_reg_nolog(uport->membase,
@@ -1315,7 +1342,7 @@ static irqreturn_t msm_geni_serial_isr(int isr, void *dev)
spin_lock_irqsave(&uport->lock, flags);
if (uart_console(uport) && uport->suspended)
goto exit_geni_serial_isr;
- if (!uart_console(uport) && pm_runtime_suspended(uport->dev)) {
+ if (!uart_console(uport) && pm_runtime_status_suspended(uport->dev)) {
dev_err(uport->dev, "%s.Device is suspended.\n", __func__);
IPC_LOG_MSG(msm_port->ipc_log_misc,
"%s.Device is suspended.\n", __func__);
@@ -1503,6 +1530,17 @@ static void msm_geni_serial_shutdown(struct uart_port *uport)
spin_unlock_irqrestore(&uport->lock, flags);
if (!uart_console(uport)) {
+ if (msm_port->ioctl_count) {
+ int i;
+
+ for (i = 0; i < msm_port->ioctl_count; i++) {
+ IPC_LOG_MSG(msm_port->ipc_log_pwr,
+ "%s IOCTL vote present. Forcing off\n",
+ __func__);
+ msm_geni_serial_power_off(uport);
+ }
+ msm_port->ioctl_count = 0;
+ }
msm_geni_serial_power_off(uport);
if (msm_port->wakeup_irq > 0) {
irq_set_irq_wake(msm_port->wakeup_irq, 0);
@@ -1851,11 +1889,8 @@ static unsigned int msm_geni_serial_tx_empty(struct uart_port *uport)
unsigned int is_tx_empty = 1;
struct msm_geni_serial_port *port = GET_DEV_PORT(uport);
- if (!uart_console(uport) && device_pending_suspend(uport)) {
- IPC_LOG_MSG(port->ipc_log_pwr,
- "%s Device suspended,vote clocks on.\n", __func__);
+ if (!uart_console(uport) && device_pending_suspend(uport))
return 0;
- }
if (port->xfer_mode == SE_DMA)
tx_fifo_status = port->tx_dma ? 1 : 0;
diff --git a/drivers/tty/serial/msm_serial_hs.c b/drivers/tty/serial/msm_serial_hs.c
index 6a05d5b..792fb3b 100644
--- a/drivers/tty/serial/msm_serial_hs.c
+++ b/drivers/tty/serial/msm_serial_hs.c
@@ -53,7 +53,6 @@
#include <linux/sysfs.h>
#include <linux/stat.h>
#include <linux/device.h>
-#include <linux/wakelock.h>
#include <linux/debugfs.h>
#include <linux/of.h>
#include <linux/of_device.h>
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index 7e97a1c..15eaea5 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -193,18 +193,17 @@ static const struct plat_sci_reg sci_regmap[SCIx_NR_REGTYPES][SCIx_NR_REGS] = {
},
/*
- * Common definitions for legacy IrDA ports, dependent on
- * regshift value.
+ * Common definitions for legacy IrDA ports.
*/
[SCIx_IRDA_REGTYPE] = {
[SCSMR] = { 0x00, 8 },
- [SCBRR] = { 0x01, 8 },
- [SCSCR] = { 0x02, 8 },
- [SCxTDR] = { 0x03, 8 },
- [SCxSR] = { 0x04, 8 },
- [SCxRDR] = { 0x05, 8 },
- [SCFCR] = { 0x06, 8 },
- [SCFDR] = { 0x07, 16 },
+ [SCBRR] = { 0x02, 8 },
+ [SCSCR] = { 0x04, 8 },
+ [SCxTDR] = { 0x06, 8 },
+ [SCxSR] = { 0x08, 16 },
+ [SCxRDR] = { 0x0a, 8 },
+ [SCFCR] = { 0x0c, 8 },
+ [SCFDR] = { 0x0e, 16 },
[SCTFDR] = sci_reg_invalid,
[SCRFDR] = sci_reg_invalid,
[SCSPTR] = sci_reg_invalid,
diff --git a/drivers/tty/serial/sunhv.c b/drivers/tty/serial/sunhv.c
index 4e603d0..59828d8 100644
--- a/drivers/tty/serial/sunhv.c
+++ b/drivers/tty/serial/sunhv.c
@@ -398,6 +398,12 @@ static struct uart_driver sunhv_reg = {
static struct uart_port *sunhv_port;
+void sunhv_migrate_hvcons_irq(int cpu)
+{
+ /* Migrate hvcons irq to param cpu */
+ irq_force_affinity(sunhv_port->irq, cpumask_of(cpu));
+}
+
/* Copy 's' into the con_write_page, decoding "\n" into
* "\r\n" along the way. We have to return two lengths
* because the caller needs to know how much to advance
diff --git a/drivers/tty/tty_buffer.c b/drivers/tty/tty_buffer.c
index aa80dc9..c220c2c 100644
--- a/drivers/tty/tty_buffer.c
+++ b/drivers/tty/tty_buffer.c
@@ -362,6 +362,32 @@ int tty_insert_flip_string_flags(struct tty_port *port,
EXPORT_SYMBOL(tty_insert_flip_string_flags);
/**
+ * __tty_insert_flip_char - Add one character to the tty buffer
+ * @port: tty port
+ * @ch: character
+ * @flag: flag byte
+ *
+ * Queue a single byte to the tty buffering, with an optional flag.
+ * This is the slow path of tty_insert_flip_char.
+ */
+int __tty_insert_flip_char(struct tty_port *port, unsigned char ch, char flag)
+{
+ struct tty_buffer *tb;
+ int flags = (flag == TTY_NORMAL) ? TTYB_NORMAL : 0;
+
+ if (!__tty_buffer_request_room(port, 1, flags))
+ return 0;
+
+ tb = port->buf.tail;
+ if (~tb->flags & TTYB_NORMAL)
+ *flag_buf_ptr(tb, tb->used) = flag;
+ *char_buf_ptr(tb, tb->used++) = ch;
+
+ return 1;
+}
+EXPORT_SYMBOL(__tty_insert_flip_char);
+
+/**
* tty_schedule_flip - push characters to ldisc
* @port: tty port to push from
*
diff --git a/drivers/usb/chipidea/otg.c b/drivers/usb/chipidea/otg.c
index 0cf149e..f36a1ac3 100644
--- a/drivers/usb/chipidea/otg.c
+++ b/drivers/usb/chipidea/otg.c
@@ -134,9 +134,9 @@ void ci_handle_vbus_change(struct ci_hdrc *ci)
if (!ci->is_otg)
return;
- if (hw_read_otgsc(ci, OTGSC_BSV))
+ if (hw_read_otgsc(ci, OTGSC_BSV) && !ci->vbus_active)
usb_gadget_vbus_connect(&ci->gadget);
- else
+ else if (!hw_read_otgsc(ci, OTGSC_BSV) && ci->vbus_active)
usb_gadget_vbus_disconnect(&ci->gadget);
}
@@ -175,14 +175,21 @@ static void ci_handle_id_switch(struct ci_hdrc *ci)
ci_role_stop(ci);
- if (role == CI_ROLE_GADGET)
+ if (role == CI_ROLE_GADGET &&
+ IS_ERR(ci->platdata->vbus_extcon.edev))
/*
- * wait vbus lower than OTGSC_BSV before connecting
- * to host
+ * Wait vbus lower than OTGSC_BSV before connecting
+ * to host. If connecting status is from an external
+ * connector instead of register, we don't need to
+ * care vbus on the board, since it will not affect
+ * external connector status.
*/
hw_wait_vbus_lower_bsv(ci);
ci_role_start(ci, role);
+ /* vbus change may have already occurred */
+ if (role == CI_ROLE_GADGET)
+ ci_handle_vbus_change(ci);
}
}
/**
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index f16491c..ea20b2c 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -1773,6 +1773,9 @@ static const struct usb_device_id acm_ids[] = {
{ USB_DEVICE(0xfff0, 0x0100), /* DATECS FP-2000 */
.driver_info = NO_UNION_NORMAL, /* reports zero length descriptor */
},
+ { USB_DEVICE(0x09d8, 0x0320), /* Elatec GmbH TWN3 */
+ .driver_info = NO_UNION_NORMAL, /* has misplaced union descriptor */
+ },
{ USB_DEVICE(0x2912, 0x0001), /* ATOL FPrint */
.driver_info = CLEAR_HALT_CONDITIONS,
diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
index 0b845e5..9f00165 100644
--- a/drivers/usb/class/cdc-wdm.c
+++ b/drivers/usb/class/cdc-wdm.c
@@ -194,8 +194,10 @@ static void wdm_in_callback(struct urb *urb)
/*
* only set a new error if there is no previous error.
* Errors are only cleared during read/open
+ * Avoid propagating -EPIPE (stall) to userspace since it is
+ * better handled as an empty read
*/
- if (desc->rerr == 0)
+ if (desc->rerr == 0 && status != -EPIPE)
desc->rerr = status;
if (length + desc->length > desc->wMaxCommand) {
diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
index 3fd2b54..701d9f7 100644
--- a/drivers/usb/core/config.c
+++ b/drivers/usb/core/config.c
@@ -638,15 +638,23 @@ static int usb_parse_configuration(struct usb_device *dev, int cfgidx,
} else if (header->bDescriptorType ==
USB_DT_INTERFACE_ASSOCIATION) {
+ struct usb_interface_assoc_descriptor *d;
+
+ d = (struct usb_interface_assoc_descriptor *)header;
+ if (d->bLength < USB_DT_INTERFACE_ASSOCIATION_SIZE) {
+ dev_warn(ddev,
+ "config %d has an invalid interface association descriptor of length %d, skipping\n",
+ cfgno, d->bLength);
+ continue;
+ }
+
if (iad_num == USB_MAXIADS) {
dev_warn(ddev, "found more Interface "
"Association Descriptors "
"than allocated for in "
"configuration %d\n", cfgno);
} else {
- config->intf_assoc[iad_num] =
- (struct usb_interface_assoc_descriptor
- *)header;
+ config->intf_assoc[iad_num] = d;
iad_num++;
}
@@ -847,7 +855,7 @@ int usb_get_configuration(struct usb_device *dev)
}
if (dev->quirks & USB_QUIRK_DELAY_INIT)
- msleep(100);
+ msleep(200);
result = usb_get_descriptor(dev, USB_DT_CONFIG, cfgno,
bigbuffer, length);
@@ -947,10 +955,12 @@ int usb_get_bos_descriptor(struct usb_device *dev)
for (i = 0; i < num; i++) {
buffer += length;
cap = (struct usb_dev_cap_header *)buffer;
- length = cap->bLength;
- if (total_len < length)
+ if (total_len < sizeof(*cap) || total_len < cap->bLength) {
+ dev->bos->desc->bNumDeviceCaps = i;
break;
+ }
+ length = cap->bLength;
total_len -= length;
if (cap->bDescriptorType != USB_DT_DEVICE_CAPABILITY) {
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index 7b8ca7d..035f03b 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -3093,6 +3093,7 @@ void usb_remove_hcd(struct usb_hcd *hcd)
}
usb_put_invalidate_rhdev(hcd);
+ hcd->flags = 0;
}
EXPORT_SYMBOL_GPL(usb_remove_hcd);
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 3b0cc03..50a6f2f 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -2715,13 +2715,16 @@ static int hub_port_wait_reset(struct usb_hub *hub, int port1,
if (!(portstatus & USB_PORT_STAT_CONNECTION))
return -ENOTCONN;
- /* bomb out completely if the connection bounced. A USB 3.0
- * connection may bounce if multiple warm resets were issued,
+ /* Retry if connect change is set but status is still connected.
+ * A USB 3.0 connection may bounce if multiple warm resets were issued,
* but the device may have successfully re-connected. Ignore it.
*/
if (!hub_is_superspeed(hub->hdev) &&
- (portchange & USB_PORT_STAT_C_CONNECTION))
- return -ENOTCONN;
+ (portchange & USB_PORT_STAT_C_CONNECTION)) {
+ usb_clear_port_feature(hub->hdev, port1,
+ USB_PORT_FEAT_C_CONNECTION);
+ return -EAGAIN;
+ }
if (!(portstatus & USB_PORT_STAT_ENABLE))
return -EBUSY;
@@ -4741,7 +4744,7 @@ hub_power_remaining(struct usb_hub *hub)
static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus,
u16 portchange)
{
- int status = -ENODEV;
+ int ret, status = -ENODEV;
int i;
unsigned unit_load;
struct usb_device *hdev = hub->hdev;
@@ -4749,6 +4752,7 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus,
struct usb_port *port_dev = hub->ports[port1 - 1];
struct usb_device *udev = port_dev->child;
static int unreliable_port = -1;
+ enum usb_device_speed dev_speed = USB_SPEED_UNKNOWN;
/* Disconnect any existing devices under this port */
if (udev) {
@@ -4803,6 +4807,7 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus,
else
unit_load = 100;
+retry_enum:
status = 0;
for (i = 0; i < SET_CONFIG_TRIES; i++) {
@@ -4840,8 +4845,15 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus,
if (status < 0)
goto loop;
+ dev_speed = udev->speed;
+ if (udev->speed > USB_SPEED_UNKNOWN &&
+ udev->speed <= USB_SPEED_HIGH && hcd->usb_phy
+ && hcd->usb_phy->disable_chirp)
+ hcd->usb_phy->disable_chirp(hcd->usb_phy,
+ false);
+
if (udev->quirks & USB_QUIRK_DELAY_INIT)
- msleep(1000);
+ msleep(2000);
/* consecutive bus-powered hubs aren't reliable; they can
* violate the voltage drop budget. if the new child has
@@ -4942,6 +4954,19 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus,
if (status != -ENOTCONN && status != -ENODEV)
dev_err(&port_dev->dev,
"unable to enumerate USB device\n");
+ if (!hub->hdev->parent && dev_speed == USB_SPEED_UNKNOWN
+ && hcd->usb_phy && hcd->usb_phy->disable_chirp) {
+ ret = hcd->usb_phy->disable_chirp(hcd->usb_phy, true);
+ if (!ret) {
+ dev_dbg(&port_dev->dev,
+ "chirp disabled re-try enum\n");
+ goto retry_enum;
+ } else {
+ /* bail out and re-enable chirping */
+ hcd->usb_phy->disable_chirp(hcd->usb_phy,
+ false);
+ }
+ }
}
done:
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index 41a9845..4f8221e 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -2237,6 +2237,10 @@ int cdc_parse_cdc_header(struct usb_cdc_parsed_header *hdr,
elength = 1;
goto next_desc;
}
+ if ((buflen < elength) || (elength < 3)) {
+ dev_err(&intf->dev, "invalid descriptor buffer length\n");
+ break;
+ }
if (buffer[1] != USB_DT_CS_INTERFACE) {
dev_err(&intf->dev, "skipping garbage\n");
goto next_desc;
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index 82806e3..a6aaf2f 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -221,6 +221,10 @@ static const struct usb_device_id usb_quirk_list[] = {
/* Corsair Strafe RGB */
{ USB_DEVICE(0x1b1c, 0x1b20), .driver_info = USB_QUIRK_DELAY_INIT },
+ /* MIDI keyboard WORLDE MINI */
+ { USB_DEVICE(0x1c75, 0x0204), .driver_info =
+ USB_QUIRK_CONFIG_INTF_STRINGS },
+
/* Acer C120 LED Projector */
{ USB_DEVICE(0x1de1, 0xc102), .driver_info = USB_QUIRK_NO_LPM },
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index 5ca987a..a5e050a 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -690,8 +690,6 @@ static void dwc3_core_exit(struct dwc3 *dwc)
{
dwc3_event_buffers_cleanup(dwc);
- usb_phy_shutdown(dwc->usb2_phy);
- usb_phy_shutdown(dwc->usb3_phy);
phy_exit(dwc->usb2_generic_phy);
phy_exit(dwc->usb3_generic_phy);
@@ -1220,7 +1218,8 @@ static int dwc3_probe(struct platform_device *pdev)
&dwc->fladj);
dwc->disable_clk_gating = device_property_read_bool(dev,
"snps,disable-clk-gating");
-
+ dwc->enable_bus_suspend = device_property_read_bool(dev,
+ "snps,bus-suspend-enable");
if (dwc->enable_bus_suspend) {
pm_runtime_set_autosuspend_delay(dev, 500);
pm_runtime_use_autosuspend(dev);
diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
index 63d0a3e..f511055 100644
--- a/drivers/usb/dwc3/core.h
+++ b/drivers/usb/dwc3/core.h
@@ -568,7 +568,6 @@ struct dwc3_ep_events {
* @started_list: list of started requests on this endpoint
* @lock: spinlock for endpoint request queue traversal
* @regs: pointer to first endpoint register
- * @trb_dma_pool: dma pool used to get aligned trb memory pool
* @trb_pool: array of transaction buffers
* @trb_pool_dma: dma address of @trb_pool
* @num_trbs: num of trbs in the trb dma pool
@@ -600,7 +599,6 @@ struct dwc3_ep {
spinlock_t lock;
void __iomem *regs;
- struct dma_pool *trb_dma_pool;
struct dwc3_trb *trb_pool;
dma_addr_t trb_pool_dma;
u32 num_trbs;
diff --git a/drivers/usb/dwc3/dwc3-msm.c b/drivers/usb/dwc3/dwc3-msm.c
index d0fc511..b6ad39b 100644
--- a/drivers/usb/dwc3/dwc3-msm.c
+++ b/drivers/usb/dwc3/dwc3-msm.c
@@ -266,6 +266,7 @@ struct dwc3_msm {
struct pm_qos_request pm_qos_req_dma;
struct delayed_work perf_vote_work;
struct delayed_work sdp_check;
+ struct mutex suspend_resume_mutex;
};
#define USB_HSPHY_3P3_VOL_MIN 3050000 /* uV */
@@ -1051,25 +1052,17 @@ static int gsi_prepare_trbs(struct usb_ep *ep, struct usb_gsi_request *req)
int num_trbs = (dep->direction) ? (2 * (req->num_bufs) + 2)
: (req->num_bufs + 2);
- dep->trb_dma_pool = dma_pool_create(ep->name, dwc->sysdev,
- num_trbs * sizeof(struct dwc3_trb),
- num_trbs * sizeof(struct dwc3_trb), 0);
- if (!dep->trb_dma_pool) {
+ dep->trb_pool = dma_zalloc_coherent(dwc->sysdev,
+ num_trbs * sizeof(struct dwc3_trb),
+ &dep->trb_pool_dma, GFP_KERNEL);
+
+ if (!dep->trb_pool) {
dev_err(dep->dwc->dev, "failed to alloc trb dma pool for %s\n",
dep->name);
return -ENOMEM;
}
dep->num_trbs = num_trbs;
-
- dep->trb_pool = dma_pool_alloc(dep->trb_dma_pool,
- GFP_KERNEL, &dep->trb_pool_dma);
- if (!dep->trb_pool) {
- dev_err(dep->dwc->dev, "failed to allocate trb pool for %s\n",
- dep->name);
- return -ENOMEM;
- }
-
/* IN direction */
if (dep->direction) {
for (i = 0; i < num_trbs ; i++) {
@@ -1159,18 +1152,19 @@ static int gsi_prepare_trbs(struct usb_ep *ep, struct usb_gsi_request *req)
static void gsi_free_trbs(struct usb_ep *ep)
{
struct dwc3_ep *dep = to_dwc3_ep(ep);
+ struct dwc3 *dwc = dep->dwc;
if (dep->endpoint.ep_type == EP_TYPE_NORMAL)
return;
/* Free TRBs and TRB pool for EP */
- if (dep->trb_dma_pool) {
- dma_pool_free(dep->trb_dma_pool, dep->trb_pool,
- dep->trb_pool_dma);
- dma_pool_destroy(dep->trb_dma_pool);
+ if (dep->trb_pool_dma) {
+ dma_free_coherent(dwc->sysdev,
+ dep->num_trbs * sizeof(struct dwc3_trb),
+ dep->trb_pool,
+ dep->trb_pool_dma);
dep->trb_pool = NULL;
dep->trb_pool_dma = 0;
- dep->trb_dma_pool = NULL;
}
}
/*
@@ -1852,7 +1846,7 @@ static void dwc3_msm_notify_event(struct dwc3 *dwc, unsigned int event)
break;
evt->dwc = dwc;
evt->length = DWC3_EVENT_BUFFERS_SIZE;
- evt->buf = dma_alloc_coherent(dwc->dev,
+ evt->buf = dma_alloc_coherent(dwc->sysdev,
DWC3_EVENT_BUFFERS_SIZE,
&evt->dma, GFP_KERNEL);
if (!evt->buf) {
@@ -1923,7 +1917,7 @@ static void dwc3_msm_notify_event(struct dwc3 *dwc, unsigned int event)
for (i = 0; i < mdwc->num_gsi_event_buffers; i++) {
evt = mdwc->gsi_ev_buff[i];
if (evt)
- dma_free_coherent(dwc->dev, evt->length,
+ dma_free_coherent(dwc->sysdev, evt->length,
evt->buf, evt->dma);
}
break;
@@ -2157,8 +2151,10 @@ static int dwc3_msm_suspend(struct dwc3_msm *mdwc)
struct dwc3_event_buffer *evt;
struct usb_irq *uirq;
+ mutex_lock(&mdwc->suspend_resume_mutex);
if (atomic_read(&dwc->in_lpm)) {
dev_dbg(mdwc->dev, "%s: Already suspended\n", __func__);
+ mutex_unlock(&mdwc->suspend_resume_mutex);
return 0;
}
@@ -2171,6 +2167,7 @@ static int dwc3_msm_suspend(struct dwc3_msm *mdwc)
dev_dbg(mdwc->dev,
"%s: %d device events pending, abort suspend\n",
__func__, evt->count / 4);
+ mutex_unlock(&mdwc->suspend_resume_mutex);
return -EBUSY;
}
}
@@ -2189,6 +2186,7 @@ static int dwc3_msm_suspend(struct dwc3_msm *mdwc)
dev_dbg(mdwc->dev,
"%s: cable disconnected while not in idle otg state\n",
__func__);
+ mutex_unlock(&mdwc->suspend_resume_mutex);
return -EBUSY;
}
@@ -2202,12 +2200,15 @@ static int dwc3_msm_suspend(struct dwc3_msm *mdwc)
pr_err("%s(): Trying to go in LPM with state:%d\n",
__func__, dwc->gadget.state);
pr_err("%s(): LPM is not performed.\n", __func__);
+ mutex_unlock(&mdwc->suspend_resume_mutex);
return -EBUSY;
}
ret = dwc3_msm_prepare_suspend(mdwc);
- if (ret)
+ if (ret) {
+ mutex_unlock(&mdwc->suspend_resume_mutex);
return ret;
+ }
/* Disable core irq */
if (dwc->irq)
@@ -2315,6 +2316,7 @@ static int dwc3_msm_suspend(struct dwc3_msm *mdwc)
}
dev_info(mdwc->dev, "DWC3 in low power mode\n");
+ mutex_unlock(&mdwc->suspend_resume_mutex);
return 0;
}
@@ -2327,8 +2329,10 @@ static int dwc3_msm_resume(struct dwc3_msm *mdwc)
dev_dbg(mdwc->dev, "%s: exiting lpm\n", __func__);
+ mutex_lock(&mdwc->suspend_resume_mutex);
if (!atomic_read(&dwc->in_lpm)) {
dev_dbg(mdwc->dev, "%s: Already resumed\n", __func__);
+ mutex_unlock(&mdwc->suspend_resume_mutex);
return 0;
}
@@ -2479,6 +2483,8 @@ static int dwc3_msm_resume(struct dwc3_msm *mdwc)
msecs_to_jiffies(1000 * PM_QOS_SAMPLE_SEC));
dbg_event(0xFF, "Ctl Res", atomic_read(&dwc->in_lpm));
+ mutex_unlock(&mdwc->suspend_resume_mutex);
+
return 0;
}
@@ -2983,8 +2989,8 @@ static int dwc3_msm_extcon_register(struct dwc3_msm *mdwc)
return ret;
}
-#define SMMU_BASE 0x10000000 /* Device address range base */
-#define SMMU_SIZE 0x40000000 /* Device address range size */
+#define SMMU_BASE 0x60000000 /* Device address range base */
+#define SMMU_SIZE 0x90000000 /* Device address range size */
static int dwc3_msm_init_iommu(struct dwc3_msm *mdwc)
{
@@ -3422,6 +3428,7 @@ static int dwc3_msm_probe(struct platform_device *pdev)
POWER_SUPPLY_PROP_PRESENT, &pval);
}
+ mutex_init(&mdwc->suspend_resume_mutex);
/* Update initial VBUS/ID state from extcon */
if (mdwc->extcon_vbus && extcon_get_state(mdwc->extcon_vbus,
EXTCON_USB))
@@ -3791,8 +3798,7 @@ static int dwc3_otg_start_host(struct dwc3_msm *mdwc, int on)
dwc3_usb3_phy_suspend(dwc, false);
mdwc->in_host_mode = false;
- pm_runtime_mark_last_busy(mdwc->dev);
- pm_runtime_put_sync_autosuspend(mdwc->dev);
+ pm_runtime_put_sync_suspend(mdwc->dev);
dbg_event(0xFF, "StopHost psync",
atomic_read(&mdwc->dev->power.usage_count));
}
@@ -3897,6 +3903,8 @@ static int dwc3_restart_usb_host_mode(struct notifier_block *nb,
if (dwc->maximum_speed == usb_speed)
goto err;
+ dbg_event(0xFF, "fw_restarthost", 0);
+ flush_delayed_work(&mdwc->sm_work);
dbg_event(0xFF, "stop_host_mode", dwc->maximum_speed);
ret = dwc3_otg_start_host(mdwc, 0);
if (ret)
@@ -4062,7 +4070,7 @@ static void dwc3_otg_sm_work(struct work_struct *w)
* which was incremented upon cable connect in
* OTG_STATE_B_IDLE state
*/
- pm_runtime_put_sync(mdwc->dev);
+ pm_runtime_put_sync_suspend(mdwc->dev);
dbg_event(0xFF, "!BSV psync",
atomic_read(&mdwc->dev->power.usage_count));
work = 1;
diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c
index 1c33051..4e7de00 100644
--- a/drivers/usb/dwc3/ep0.c
+++ b/drivers/usb/dwc3/ep0.c
@@ -599,22 +599,30 @@ static int dwc3_ep0_set_config(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
return -EINVAL;
case USB_STATE_ADDRESS:
- /* Read ep0IN related TXFIFO size */
- dwc->last_fifo_depth = (dwc3_readl(dwc->regs,
- DWC3_GTXFIFOSIZ(0)) & 0xFFFF);
- /* Clear existing allocated TXFIFO for all IN eps except ep0 */
- for (num = 0; num < dwc->num_in_eps; num++) {
- dep = dwc->eps[(num << 1) | 1];
- if (num) {
- dwc3_writel(dwc->regs, DWC3_GTXFIFOSIZ(num), 0);
- dep->fifo_depth = 0;
- } else {
- dep->fifo_depth = dwc->last_fifo_depth;
- }
+ /*
+ * If tx-fifo-resize flag is not set for the controller, then
+ * do not clear existing allocated TXFIFO since we do not
+ * allocate it again in dwc3_gadget_resize_tx_fifos
+ */
+ if (dwc->needs_fifo_resize) {
+ /* Read ep0IN related TXFIFO size */
+ dwc->last_fifo_depth = (dwc3_readl(dwc->regs,
+ DWC3_GTXFIFOSIZ(0)) & 0xFFFF);
+ /* Clear existing TXFIFO for all IN eps except ep0 */
+ for (num = 0; num < dwc->num_in_eps; num++) {
+ dep = dwc->eps[(num << 1) | 1];
+ if (num) {
+ dwc3_writel(dwc->regs,
+ DWC3_GTXFIFOSIZ(num), 0);
+ dep->fifo_depth = 0;
+ } else {
+ dep->fifo_depth = dwc->last_fifo_depth;
+ }
- dev_dbg(dwc->dev, "%s(): %s dep->fifo_depth:%x\n",
+ dev_dbg(dwc->dev, "%s(): %s fifo_depth:%x\n",
__func__, dep->name, dep->fifo_depth);
- dbg_event(0xFF, "fifo_reset", dep->number);
+ dbg_event(0xFF, "fifo_reset", dep->number);
+ }
}
ret = dwc3_ep0_delegate_req(dwc, ctrl);
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 9d247b8..c12fbf3 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -230,7 +230,8 @@ int dwc3_gadget_resize_tx_fifos(struct dwc3 *dwc, struct dwc3_ep *dep)
tmp = ((max_packet + mdwidth) * mult) + mdwidth;
fifo_size = DIV_ROUND_UP(tmp, mdwidth);
dep->fifo_depth = fifo_size;
- fifo_size |= (dwc->last_fifo_depth << 16);
+ fifo_size |= (dwc3_readl(dwc->regs, DWC3_GTXFIFOSIZ(0)) & 0xffff0000)
+ + (dwc->last_fifo_depth << 16);
dwc->last_fifo_depth += (fifo_size & 0xffff);
dev_dbg(dwc->dev, "%s ep_num:%d last_fifo_depth:%04x fifo_depth:%d\n",
@@ -950,9 +951,42 @@ static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
if (!node) {
trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS_FIRST;
+ /*
+ * USB Specification 2.0 Section 5.9.2 states that: "If
+ * there is only a single transaction in the microframe,
+ * only a DATA0 data packet PID is used. If there are
+ * two transactions per microframe, DATA1 is used for
+ * the first transaction data packet and DATA0 is used
+ * for the second transaction data packet. If there are
+ * three transactions per microframe, DATA2 is used for
+ * the first transaction data packet, DATA1 is used for
+ * the second, and DATA0 is used for the third."
+ *
+ * IOW, we should satisfy the following cases:
+ *
+ * 1) length <= maxpacket
+ * - DATA0
+ *
+ * 2) maxpacket < length <= (2 * maxpacket)
+ * - DATA1, DATA0
+ *
+ * 3) (2 * maxpacket) < length <= (3 * maxpacket)
+ * - DATA2, DATA1, DATA0
+ */
if (speed == USB_SPEED_HIGH) {
struct usb_ep *ep = &dep->endpoint;
- trb->size |= DWC3_TRB_SIZE_PCM1(ep->mult - 1);
+ unsigned int mult = ep->mult - 1;
+ unsigned int maxp;
+
+ maxp = usb_endpoint_maxp(ep->desc) & 0x07ff;
+
+ if (length <= (2 * maxp))
+ mult--;
+
+ if (length <= maxp)
+ mult--;
+
+ trb->size |= DWC3_TRB_SIZE_PCM1(mult);
}
} else {
trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS;
@@ -1403,6 +1437,48 @@ static int dwc3_gadget_ep_dequeue(struct usb_ep *ep,
if (r == req) {
/* wait until it is processed */
dwc3_stop_active_transfer(dwc, dep->number, true);
+
+ /*
+ * If request was already started, this means we had to
+ * stop the transfer. With that we also need to ignore
+ * all TRBs used by the request, however TRBs can only
+ * be modified after completion of END_TRANSFER
+ * command. So what we do here is that we wait for
+ * END_TRANSFER completion and only after that, we jump
+ * over TRBs by clearing HWO and incrementing dequeue
+ * pointer.
+ *
+ * Note that we have 2 possible types of transfers here:
+ *
+ * i) Linear buffer request
+ * ii) SG-list based request
+ *
+ * SG-list based requests will have r->num_pending_sgs
+ * set to a valid number (> 0). Linear requests,
+ * normally use a single TRB.
+ *
+ * All of these cases need to be taken into
+ * consideration so we don't mess up our TRB ring
+ * pointers.
+ */
+ if (!r->trb)
+ goto out1;
+
+ if (r->num_pending_sgs) {
+ struct dwc3_trb *trb;
+ int i = 0;
+
+ for (i = 0; i < r->num_pending_sgs; i++) {
+ trb = r->trb + i;
+ trb->ctrl &= ~DWC3_TRB_CTRL_HWO;
+ dwc3_ep_inc_deq(dep);
+ }
+ } else {
+ struct dwc3_trb *trb = r->trb;
+
+ trb->ctrl &= ~DWC3_TRB_CTRL_HWO;
+ dwc3_ep_inc_deq(dep);
+ }
goto out1;
}
dev_err(dwc->dev, "request %pK was not queued to %s\n",
@@ -1414,6 +1490,7 @@ static int dwc3_gadget_ep_dequeue(struct usb_ep *ep,
out1:
dbg_event(dep->number, "DEQUEUE", 0);
/* giveback the request */
+ dep->queued_requests--;
dwc3_gadget_giveback(dep, req, -ECONNRESET);
out0:
@@ -2256,6 +2333,10 @@ static int dwc3_gadget_stop(struct usb_gadget *g)
dwc->gadget_driver = NULL;
spin_unlock_irqrestore(&dwc->lock, flags);
+ dbg_event(0xFF, "fwq_started", 0);
+ flush_workqueue(dwc->dwc_wq);
+ dbg_event(0xFF, "fwq_completed", 0);
+
return 0;
}
@@ -2750,43 +2831,55 @@ static void dwc3_endpoint_interrupt(struct dwc3 *dwc,
static void dwc3_disconnect_gadget(struct dwc3 *dwc)
{
+ struct usb_gadget_driver *gadget_driver;
+
if (dwc->gadget_driver && dwc->gadget_driver->disconnect) {
+ gadget_driver = dwc->gadget_driver;
spin_unlock(&dwc->lock);
dbg_event(0xFF, "DISCONNECT", 0);
- dwc->gadget_driver->disconnect(&dwc->gadget);
+ gadget_driver->disconnect(&dwc->gadget);
spin_lock(&dwc->lock);
}
}
static void dwc3_suspend_gadget(struct dwc3 *dwc)
{
+ struct usb_gadget_driver *gadget_driver;
+
if (dwc->gadget_driver && dwc->gadget_driver->suspend) {
+ gadget_driver = dwc->gadget_driver;
spin_unlock(&dwc->lock);
dbg_event(0xFF, "SUSPEND", 0);
- dwc->gadget_driver->suspend(&dwc->gadget);
+ gadget_driver->suspend(&dwc->gadget);
spin_lock(&dwc->lock);
}
}
static void dwc3_resume_gadget(struct dwc3 *dwc)
{
+ struct usb_gadget_driver *gadget_driver;
+
if (dwc->gadget_driver && dwc->gadget_driver->resume) {
+ gadget_driver = dwc->gadget_driver;
spin_unlock(&dwc->lock);
dbg_event(0xFF, "RESUME", 0);
- dwc->gadget_driver->resume(&dwc->gadget);
+ gadget_driver->resume(&dwc->gadget);
spin_lock(&dwc->lock);
}
}
static void dwc3_reset_gadget(struct dwc3 *dwc)
{
+ struct usb_gadget_driver *gadget_driver;
+
if (!dwc->gadget_driver)
return;
if (dwc->gadget.speed != USB_SPEED_UNKNOWN) {
+ gadget_driver = dwc->gadget_driver;
spin_unlock(&dwc->lock);
dbg_event(0xFF, "UDC RESET", 0);
- usb_gadget_udc_reset(&dwc->gadget, dwc->gadget_driver);
+ usb_gadget_udc_reset(&dwc->gadget, gadget_driver);
spin_lock(&dwc->lock);
}
}
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index e32de9a..98509f2 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -2164,6 +2164,8 @@ static DEVICE_ATTR_RO(suspended);
static void __composite_unbind(struct usb_gadget *gadget, bool unbind_driver)
{
struct usb_composite_dev *cdev = get_gadget_data(gadget);
+ struct usb_gadget_strings *gstr = cdev->driver->strings[0];
+ struct usb_string *dev_str = gstr->strings;
/* composite_disconnect() must already have been called
* by the underlying peripheral controller driver!
@@ -2183,6 +2185,9 @@ static void __composite_unbind(struct usb_gadget *gadget, bool unbind_driver)
composite_dev_cleanup(cdev);
+ if (dev_str[USB_GADGET_MANUFACTURER_IDX].s == cdev->def_manufacturer)
+ dev_str[USB_GADGET_MANUFACTURER_IDX].s = "";
+
kfree(cdev->def_manufacturer);
kfree(cdev);
set_gadget_data(gadget, NULL);
diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c
index d6e77a5..f915e55 100644
--- a/drivers/usb/gadget/configfs.c
+++ b/drivers/usb/gadget/configfs.c
@@ -14,11 +14,16 @@
#include <linux/kdev_t.h>
#include <linux/usb/ch9.h>
+#ifdef CONFIG_USB_F_NCM
+#include <function/u_ncm.h>
+#endif
+
#ifdef CONFIG_USB_CONFIGFS_F_ACC
extern int acc_ctrlrequest(struct usb_composite_dev *cdev,
const struct usb_ctrlrequest *ctrl);
void acc_disconnect(void);
#endif
+
static struct class *android_class;
static struct device *android_device;
static int index;
@@ -84,6 +89,7 @@ struct gadget_info {
struct usb_composite_driver composite;
struct usb_composite_dev cdev;
bool use_os_desc;
+ bool unbinding;
char b_vendor_code;
char qw_sign[OS_STRING_QW_SIGN_LEN];
#ifdef CONFIG_USB_CONFIGFS_UEVENT
@@ -281,9 +287,12 @@ static int unregister_gadget(struct gadget_info *gi)
if (!gi->composite.gadget_driver.udc_name)
return -ENODEV;
+ gi->unbinding = true;
ret = usb_gadget_unregister_driver(&gi->composite.gadget_driver);
if (ret)
return ret;
+
+ gi->unbinding = false;
kfree(gi->composite.gadget_driver.udc_name);
gi->composite.gadget_driver.udc_name = NULL;
return 0;
@@ -1179,11 +1188,12 @@ static struct configfs_attribute *interf_grp_attrs[] = {
NULL
};
-int usb_os_desc_prepare_interf_dir(struct config_group *parent,
- int n_interf,
- struct usb_os_desc **desc,
- char **names,
- struct module *owner)
+struct config_group *usb_os_desc_prepare_interf_dir(
+ struct config_group *parent,
+ int n_interf,
+ struct usb_os_desc **desc,
+ char **names,
+ struct module *owner)
{
struct config_group *os_desc_group;
struct config_item_type *os_desc_type, *interface_type;
@@ -1195,7 +1205,7 @@ int usb_os_desc_prepare_interf_dir(struct config_group *parent,
char *vlabuf = kzalloc(vla_group_size(data_chunk), GFP_KERNEL);
if (!vlabuf)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
os_desc_group = vla_ptr(vlabuf, data_chunk, os_desc_group);
os_desc_type = vla_ptr(vlabuf, data_chunk, os_desc_type);
@@ -1220,7 +1230,7 @@ int usb_os_desc_prepare_interf_dir(struct config_group *parent,
configfs_add_default_group(&d->group, os_desc_group);
}
- return 0;
+ return os_desc_group;
}
EXPORT_SYMBOL(usb_os_desc_prepare_interf_dir);
@@ -1503,6 +1513,18 @@ static int android_setup(struct usb_gadget *gadget,
}
}
+#ifdef CONFIG_USB_F_NCM
+ if (value < 0)
+ value = ncm_ctrlrequest(cdev, c);
+
+ /*
+ * for mirror link command case, if it already been handled,
+ * do not pass to composite_setup
+ */
+ if (value == 0)
+ return value;
+#endif
+
#ifdef CONFIG_USB_CONFIGFS_F_ACC
if (value < 0)
value = acc_ctrlrequest(cdev, c);
@@ -1554,7 +1576,8 @@ static void android_disconnect(struct usb_gadget *gadget)
acc_disconnect();
#endif
gi->connected = 0;
- schedule_work(&gi->work);
+ if (!gi->unbinding)
+ schedule_work(&gi->work);
composite_disconnect(gadget);
}
#endif
diff --git a/drivers/usb/gadget/configfs.h b/drivers/usb/gadget/configfs.h
index 36c468c..540d5e9 100644
--- a/drivers/usb/gadget/configfs.h
+++ b/drivers/usb/gadget/configfs.h
@@ -5,11 +5,12 @@
void unregister_gadget_item(struct config_item *item);
-int usb_os_desc_prepare_interf_dir(struct config_group *parent,
- int n_interf,
- struct usb_os_desc **desc,
- char **names,
- struct module *owner);
+struct config_group *usb_os_desc_prepare_interf_dir(
+ struct config_group *parent,
+ int n_interf,
+ struct usb_os_desc **desc,
+ char **names,
+ struct module *owner);
static inline struct usb_os_desc *to_usb_os_desc(struct config_item *item)
{
diff --git a/drivers/usb/gadget/function/f_audio_source.c b/drivers/usb/gadget/function/f_audio_source.c
index 1590927..9f7a29a 100644
--- a/drivers/usb/gadget/function/f_audio_source.c
+++ b/drivers/usb/gadget/function/f_audio_source.c
@@ -755,11 +755,11 @@ static int audio_pcm_close(struct snd_pcm_substream *substream)
struct audio_dev *audio = substream->private_data;
unsigned long flags;
- spin_lock_irqsave(&audio->lock, flags);
-
/* Remove the QoS request */
pm_qos_remove_request(&audio->pm_qos);
+ spin_lock_irqsave(&audio->lock, flags);
+
audio->substream = NULL;
spin_unlock_irqrestore(&audio->lock, flags);
diff --git a/drivers/usb/gadget/function/f_gsi.h b/drivers/usb/gadget/function/f_gsi.h
index d6bf0f4..bdd0dfa 100644
--- a/drivers/usb/gadget/function/f_gsi.h
+++ b/drivers/usb/gadget/function/f_gsi.h
@@ -586,7 +586,7 @@ static struct usb_endpoint_descriptor rndis_gsi_fs_out_desc = {
};
static struct usb_descriptor_header *gsi_eth_fs_function[] = {
- (struct usb_descriptor_header *) &gsi_eth_fs_function,
+ (struct usb_descriptor_header *) &rndis_gsi_iad_descriptor,
/* control interface matches ACM, not Ethernet */
(struct usb_descriptor_header *) &rndis_gsi_control_intf,
(struct usb_descriptor_header *) &rndis_gsi_header_desc,
diff --git a/drivers/usb/gadget/function/f_mass_storage.c b/drivers/usb/gadget/function/f_mass_storage.c
index 0b758236..33ed64f 100644
--- a/drivers/usb/gadget/function/f_mass_storage.c
+++ b/drivers/usb/gadget/function/f_mass_storage.c
@@ -306,8 +306,6 @@ struct fsg_common {
struct completion thread_notifier;
struct task_struct *thread_task;
- /* Callback functions. */
- const struct fsg_operations *ops;
/* Gadget's private data. */
void *private_data;
@@ -2540,6 +2538,7 @@ static void handle_exception(struct fsg_common *common)
static int fsg_main_thread(void *common_)
{
struct fsg_common *common = common_;
+ int i;
/*
* Allow the thread to be killed by a signal, but set the signal mask
@@ -2601,21 +2600,16 @@ static int fsg_main_thread(void *common_)
common->thread_task = NULL;
spin_unlock_irq(&common->lock);
- if (!common->ops || !common->ops->thread_exits
- || common->ops->thread_exits(common) < 0) {
- int i;
+ /* Eject media from all LUNs */
- down_write(&common->filesem);
- for (i = 0; i < ARRAY_SIZE(common->luns); --i) {
- struct fsg_lun *curlun = common->luns[i];
- if (!curlun || !fsg_lun_is_open(curlun))
- continue;
+ down_write(&common->filesem);
+ for (i = 0; i < ARRAY_SIZE(common->luns); i++) {
+ struct fsg_lun *curlun = common->luns[i];
+ if (curlun && fsg_lun_is_open(curlun))
fsg_lun_close(curlun);
- curlun->unit_attention_data = SS_MEDIUM_NOT_PRESENT;
- }
- up_write(&common->filesem);
}
+ up_write(&common->filesem);
/* Let fsg_unbind() know the thread has exited */
complete_and_exit(&common->thread_notifier, 0);
@@ -2805,13 +2799,6 @@ void fsg_common_remove_luns(struct fsg_common *common)
}
EXPORT_SYMBOL_GPL(fsg_common_remove_luns);
-void fsg_common_set_ops(struct fsg_common *common,
- const struct fsg_operations *ops)
-{
- common->ops = ops;
-}
-EXPORT_SYMBOL_GPL(fsg_common_set_ops);
-
void fsg_common_free_buffers(struct fsg_common *common)
{
_fsg_common_free_buffers(common->buffhds, common->fsg_num_buffers);
diff --git a/drivers/usb/gadget/function/f_mass_storage.h b/drivers/usb/gadget/function/f_mass_storage.h
index d390231..dc05ca0 100644
--- a/drivers/usb/gadget/function/f_mass_storage.h
+++ b/drivers/usb/gadget/function/f_mass_storage.h
@@ -60,17 +60,6 @@ struct fsg_module_parameters {
struct fsg_common;
/* FSF callback functions */
-struct fsg_operations {
- /*
- * Callback function to call when thread exits. If no
- * callback is set or it returns value lower then zero MSF
- * will force eject all LUNs it operates on (including those
- * marked as non-removable or with prevent_medium_removal flag
- * set).
- */
- int (*thread_exits)(struct fsg_common *common);
-};
-
struct fsg_lun_opts {
struct config_group group;
struct fsg_lun *lun;
@@ -142,9 +131,6 @@ void fsg_common_remove_lun(struct fsg_lun *lun);
void fsg_common_remove_luns(struct fsg_common *common);
-void fsg_common_set_ops(struct fsg_common *common,
- const struct fsg_operations *ops);
-
int fsg_common_create_lun(struct fsg_common *common, struct fsg_lun_config *cfg,
unsigned int id, const char *name,
const char **name_pfx);
diff --git a/drivers/usb/gadget/function/f_ncm.c b/drivers/usb/gadget/function/f_ncm.c
index d2fbed7..98e353d 100644
--- a/drivers/usb/gadget/function/f_ncm.c
+++ b/drivers/usb/gadget/function/f_ncm.c
@@ -1605,10 +1605,57 @@ static struct config_item_type ncm_func_type = {
.ct_owner = THIS_MODULE,
};
+#ifdef CONFIG_USB_CONFIGFS_UEVENT
+
+struct ncm_setup_desc {
+ struct work_struct work;
+ struct device *device;
+ uint8_t major; // Mirror Link major version
+ uint8_t minor; // Mirror Link minor version
+};
+
+static struct ncm_setup_desc *_ncm_setup_desc;
+
+#define MIRROR_LINK_STRING_LENGTH_MAX 32
+static void ncm_setup_work(struct work_struct *data)
+{
+ char mirror_link_string[MIRROR_LINK_STRING_LENGTH_MAX];
+ char *envp[2] = { mirror_link_string, NULL };
+
+ snprintf(mirror_link_string, MIRROR_LINK_STRING_LENGTH_MAX,
+ "MirrorLink=V%d.%d",
+ _ncm_setup_desc->major, _ncm_setup_desc->minor);
+ kobject_uevent_env(&_ncm_setup_desc->device->kobj, KOBJ_CHANGE, envp);
+}
+
+int ncm_ctrlrequest(struct usb_composite_dev *cdev,
+ const struct usb_ctrlrequest *ctrl)
+{
+ int value = -EOPNOTSUPP;
+
+ if (ctrl->bRequestType == 0x40 && ctrl->bRequest == 0xF0) {
+ _ncm_setup_desc->minor = (uint8_t)(ctrl->wValue >> 8);
+ _ncm_setup_desc->major = (uint8_t)(ctrl->wValue & 0xFF);
+ schedule_work(&_ncm_setup_desc->work);
+ value = 0;
+ }
+
+ return value;
+}
+#endif
+
static void ncm_free_inst(struct usb_function_instance *f)
{
struct f_ncm_opts *opts;
+#ifdef CONFIG_USB_CONFIGFS_UEVENT
+ /* release _ncm_setup_desc related resource */
+ device_destroy(_ncm_setup_desc->device->class,
+ _ncm_setup_desc->device->devt);
+ cancel_work(&_ncm_setup_desc->work);
+ kfree(_ncm_setup_desc);
+#endif
+
opts = container_of(f, struct f_ncm_opts, func_inst);
if (opts->bound)
gether_cleanup(netdev_priv(opts->net));
@@ -1627,6 +1674,14 @@ static struct usb_function_instance *ncm_alloc_inst(void)
config_group_init_type_name(&opts->func_inst.group, "", &ncm_func_type);
+#ifdef CONFIG_USB_CONFIGFS_UEVENT
+ _ncm_setup_desc = kzalloc(sizeof(*_ncm_setup_desc), GFP_KERNEL);
+ if (!_ncm_setup_desc)
+ return ERR_PTR(-ENOMEM);
+ INIT_WORK(&_ncm_setup_desc->work, ncm_setup_work);
+ _ncm_setup_desc->device = create_function_device("f_ncm");
+#endif
+
return &opts->func_inst;
}
diff --git a/drivers/usb/gadget/function/f_rndis.c b/drivers/usb/gadget/function/f_rndis.c
index 41e1b47..56a8e1b 100644
--- a/drivers/usb/gadget/function/f_rndis.c
+++ b/drivers/usb/gadget/function/f_rndis.c
@@ -920,6 +920,7 @@ static void rndis_free_inst(struct usb_function_instance *f)
free_netdev(opts->net);
}
+ kfree(opts->rndis_interf_group); /* single VLA chunk */
kfree(opts);
}
@@ -928,6 +929,7 @@ static struct usb_function_instance *rndis_alloc_inst(void)
struct f_rndis_opts *opts;
struct usb_os_desc *descs[1];
char *names[1];
+ struct config_group *rndis_interf_group;
opts = kzalloc(sizeof(*opts), GFP_KERNEL);
if (!opts)
@@ -948,8 +950,14 @@ static struct usb_function_instance *rndis_alloc_inst(void)
names[0] = "rndis";
config_group_init_type_name(&opts->func_inst.group, "",
&rndis_func_type);
- usb_os_desc_prepare_interf_dir(&opts->func_inst.group, 1, descs,
- names, THIS_MODULE);
+ rndis_interf_group =
+ usb_os_desc_prepare_interf_dir(&opts->func_inst.group, 1, descs,
+ names, THIS_MODULE);
+ if (IS_ERR(rndis_interf_group)) {
+ rndis_free_inst(&opts->func_inst);
+ return ERR_CAST(rndis_interf_group);
+ }
+ opts->rndis_interf_group = rndis_interf_group;
return &opts->func_inst;
}
diff --git a/drivers/usb/gadget/function/u_ncm.h b/drivers/usb/gadget/function/u_ncm.h
index ce0f3a7..b4541e2 100644
--- a/drivers/usb/gadget/function/u_ncm.h
+++ b/drivers/usb/gadget/function/u_ncm.h
@@ -33,4 +33,8 @@ struct f_ncm_opts {
int refcnt;
};
+extern struct device *create_function_device(char *name);
+int ncm_ctrlrequest(struct usb_composite_dev *cdev,
+ const struct usb_ctrlrequest *ctrl);
+
#endif /* U_NCM_H */
diff --git a/drivers/usb/gadget/function/u_rndis.h b/drivers/usb/gadget/function/u_rndis.h
index 4eafd50..4e2ad04 100644
--- a/drivers/usb/gadget/function/u_rndis.h
+++ b/drivers/usb/gadget/function/u_rndis.h
@@ -26,6 +26,7 @@ struct f_rndis_opts {
bool bound;
bool borrowed_net;
+ struct config_group *rndis_interf_group;
struct usb_os_desc rndis_os_desc;
char rndis_ext_compat_id[16];
diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c
index f959c42..f69dbd4 100644
--- a/drivers/usb/gadget/legacy/inode.c
+++ b/drivers/usb/gadget/legacy/inode.c
@@ -27,7 +27,7 @@
#include <linux/mmu_context.h>
#include <linux/aio.h>
#include <linux/uio.h>
-
+#include <linux/delay.h>
#include <linux/device.h>
#include <linux/moduleparam.h>
@@ -116,6 +116,7 @@ enum ep0_state {
struct dev_data {
spinlock_t lock;
atomic_t count;
+ int udc_usage;
enum ep0_state state; /* P: lock */
struct usb_gadgetfs_event event [N_EVENT];
unsigned ev_next;
@@ -513,9 +514,9 @@ static void ep_aio_complete(struct usb_ep *ep, struct usb_request *req)
INIT_WORK(&priv->work, ep_user_copy_worker);
schedule_work(&priv->work);
}
- spin_unlock(&epdata->dev->lock);
usb_ep_free_request(ep, req);
+ spin_unlock(&epdata->dev->lock);
put_ep(epdata);
}
@@ -939,9 +940,11 @@ ep0_read (struct file *fd, char __user *buf, size_t len, loff_t *ptr)
struct usb_request *req = dev->req;
if ((retval = setup_req (ep, req, 0)) == 0) {
+ ++dev->udc_usage;
spin_unlock_irq (&dev->lock);
retval = usb_ep_queue (ep, req, GFP_KERNEL);
spin_lock_irq (&dev->lock);
+ --dev->udc_usage;
}
dev->state = STATE_DEV_CONNECTED;
@@ -983,11 +986,14 @@ ep0_read (struct file *fd, char __user *buf, size_t len, loff_t *ptr)
retval = -EIO;
else {
len = min (len, (size_t)dev->req->actual);
-// FIXME don't call this with the spinlock held ...
+ ++dev->udc_usage;
+ spin_unlock_irq(&dev->lock);
if (copy_to_user (buf, dev->req->buf, len))
retval = -EFAULT;
else
retval = len;
+ spin_lock_irq(&dev->lock);
+ --dev->udc_usage;
clean_req (dev->gadget->ep0, dev->req);
/* NOTE userspace can't yet choose to stall */
}
@@ -1131,6 +1137,7 @@ ep0_write (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
retval = setup_req (dev->gadget->ep0, dev->req, len);
if (retval == 0) {
dev->state = STATE_DEV_CONNECTED;
+ ++dev->udc_usage;
spin_unlock_irq (&dev->lock);
if (copy_from_user (dev->req->buf, buf, len))
retval = -EFAULT;
@@ -1142,6 +1149,7 @@ ep0_write (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
GFP_KERNEL);
}
spin_lock_irq(&dev->lock);
+ --dev->udc_usage;
if (retval < 0) {
clean_req (dev->gadget->ep0, dev->req);
} else
@@ -1243,9 +1251,21 @@ static long dev_ioctl (struct file *fd, unsigned code, unsigned long value)
struct usb_gadget *gadget = dev->gadget;
long ret = -ENOTTY;
- if (gadget->ops->ioctl)
+ spin_lock_irq(&dev->lock);
+ if (dev->state == STATE_DEV_OPENED ||
+ dev->state == STATE_DEV_UNBOUND) {
+ /* Not bound to a UDC */
+ } else if (gadget->ops->ioctl) {
+ ++dev->udc_usage;
+ spin_unlock_irq(&dev->lock);
+
ret = gadget->ops->ioctl (gadget, code, value);
+ spin_lock_irq(&dev->lock);
+ --dev->udc_usage;
+ }
+ spin_unlock_irq(&dev->lock);
+
return ret;
}
@@ -1463,10 +1483,12 @@ gadgetfs_setup (struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
if (value < 0)
break;
+ ++dev->udc_usage;
spin_unlock (&dev->lock);
value = usb_ep_queue (gadget->ep0, dev->req,
GFP_KERNEL);
spin_lock (&dev->lock);
+ --dev->udc_usage;
if (value < 0) {
clean_req (gadget->ep0, dev->req);
break;
@@ -1490,8 +1512,12 @@ gadgetfs_setup (struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
req->length = value;
req->zero = value < w_length;
+ ++dev->udc_usage;
spin_unlock (&dev->lock);
value = usb_ep_queue (gadget->ep0, req, GFP_KERNEL);
+ spin_lock(&dev->lock);
+ --dev->udc_usage;
+ spin_unlock(&dev->lock);
if (value < 0) {
DBG (dev, "ep_queue --> %d\n", value);
req->status = 0;
@@ -1518,21 +1544,24 @@ static void destroy_ep_files (struct dev_data *dev)
/* break link to FS */
ep = list_first_entry (&dev->epfiles, struct ep_data, epfiles);
list_del_init (&ep->epfiles);
+ spin_unlock_irq (&dev->lock);
+
dentry = ep->dentry;
ep->dentry = NULL;
parent = d_inode(dentry->d_parent);
/* break link to controller */
+ mutex_lock(&ep->lock);
if (ep->state == STATE_EP_ENABLED)
(void) usb_ep_disable (ep->ep);
ep->state = STATE_EP_UNBOUND;
usb_ep_free_request (ep->ep, ep->req);
ep->ep = NULL;
+ mutex_unlock(&ep->lock);
+
wake_up (&ep->wait);
put_ep (ep);
- spin_unlock_irq (&dev->lock);
-
/* break link to dcache */
inode_lock(parent);
d_delete (dentry);
@@ -1603,6 +1632,11 @@ gadgetfs_unbind (struct usb_gadget *gadget)
spin_lock_irq (&dev->lock);
dev->state = STATE_DEV_UNBOUND;
+ while (dev->udc_usage > 0) {
+ spin_unlock_irq(&dev->lock);
+ usleep_range(1000, 2000);
+ spin_lock_irq(&dev->lock);
+ }
spin_unlock_irq (&dev->lock);
destroy_ep_files (dev);
diff --git a/drivers/usb/gadget/legacy/mass_storage.c b/drivers/usb/gadget/legacy/mass_storage.c
index 125974f..fcba597 100644
--- a/drivers/usb/gadget/legacy/mass_storage.c
+++ b/drivers/usb/gadget/legacy/mass_storage.c
@@ -107,15 +107,6 @@ static unsigned int fsg_num_buffers = CONFIG_USB_GADGET_STORAGE_NUM_BUFFERS;
FSG_MODULE_PARAMETERS(/* no prefix */, mod_data);
-static unsigned long msg_registered;
-static void msg_cleanup(void);
-
-static int msg_thread_exits(struct fsg_common *common)
-{
- msg_cleanup();
- return 0;
-}
-
static int msg_do_config(struct usb_configuration *c)
{
struct fsg_opts *opts;
@@ -154,9 +145,6 @@ static struct usb_configuration msg_config_driver = {
static int msg_bind(struct usb_composite_dev *cdev)
{
- static const struct fsg_operations ops = {
- .thread_exits = msg_thread_exits,
- };
struct fsg_opts *opts;
struct fsg_config config;
int status;
@@ -173,8 +161,6 @@ static int msg_bind(struct usb_composite_dev *cdev)
if (status)
goto fail;
- fsg_common_set_ops(opts->common, &ops);
-
status = fsg_common_set_cdev(opts->common, cdev, config.can_stall);
if (status)
goto fail_set_cdev;
@@ -210,7 +196,6 @@ static int msg_bind(struct usb_composite_dev *cdev)
usb_composite_overwrite_options(cdev, &coverwrite);
dev_info(&cdev->gadget->dev,
DRIVER_DESC ", version: " DRIVER_VERSION "\n");
- set_bit(0, &msg_registered);
return 0;
fail_otg_desc:
@@ -261,9 +246,8 @@ static int __init msg_init(void)
}
module_init(msg_init);
-static void msg_cleanup(void)
+static void __exit msg_cleanup(void)
{
- if (test_and_clear_bit(0, &msg_registered))
- usb_composite_unregister(&msg_driver);
+ usb_composite_unregister(&msg_driver);
}
module_exit(msg_cleanup);
diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.c b/drivers/usb/gadget/udc/atmel_usba_udc.c
index a95b3e7..ad84029 100644
--- a/drivers/usb/gadget/udc/atmel_usba_udc.c
+++ b/drivers/usb/gadget/udc/atmel_usba_udc.c
@@ -28,6 +28,8 @@
#include <linux/of_gpio.h>
#include "atmel_usba_udc.h"
+#define USBA_VBUS_IRQFLAGS (IRQF_ONESHOT \
+ | IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING)
#ifdef CONFIG_USB_GADGET_DEBUG_FS
#include <linux/debugfs.h>
@@ -2172,7 +2174,7 @@ static int usba_udc_probe(struct platform_device *pdev)
IRQ_NOAUTOEN);
ret = devm_request_threaded_irq(&pdev->dev,
gpio_to_irq(udc->vbus_pin), NULL,
- usba_vbus_irq_thread, IRQF_ONESHOT,
+ usba_vbus_irq_thread, USBA_VBUS_IRQFLAGS,
"atmel_usba_udc", udc);
if (ret) {
udc->vbus_pin = -ENODEV;
diff --git a/drivers/usb/gadget/udc/dummy_hcd.c b/drivers/usb/gadget/udc/dummy_hcd.c
index 94c8a9f..b62a3de 100644
--- a/drivers/usb/gadget/udc/dummy_hcd.c
+++ b/drivers/usb/gadget/udc/dummy_hcd.c
@@ -237,6 +237,8 @@ struct dummy_hcd {
struct usb_device *udev;
struct list_head urbp_list;
+ struct urbp *next_frame_urbp;
+
u32 stream_en_ep;
u8 num_stream[30 / 2];
@@ -253,11 +255,13 @@ struct dummy {
*/
struct dummy_ep ep[DUMMY_ENDPOINTS];
int address;
+ int callback_usage;
struct usb_gadget gadget;
struct usb_gadget_driver *driver;
struct dummy_request fifo_req;
u8 fifo_buf[FIFO_SIZE];
u16 devstatus;
+ unsigned ints_enabled:1;
unsigned udc_suspended:1;
unsigned pullup:1;
@@ -416,6 +420,7 @@ static void set_link_state_by_speed(struct dummy_hcd *dum_hcd)
static void set_link_state(struct dummy_hcd *dum_hcd)
{
struct dummy *dum = dum_hcd->dum;
+ unsigned int power_bit;
dum_hcd->active = 0;
if (dum->pullup)
@@ -426,32 +431,43 @@ static void set_link_state(struct dummy_hcd *dum_hcd)
return;
set_link_state_by_speed(dum_hcd);
+ power_bit = (dummy_hcd_to_hcd(dum_hcd)->speed == HCD_USB3 ?
+ USB_SS_PORT_STAT_POWER : USB_PORT_STAT_POWER);
if ((dum_hcd->port_status & USB_PORT_STAT_ENABLE) == 0 ||
dum_hcd->active)
dum_hcd->resuming = 0;
/* Currently !connected or in reset */
- if ((dum_hcd->port_status & USB_PORT_STAT_CONNECTION) == 0 ||
+ if ((dum_hcd->port_status & power_bit) == 0 ||
(dum_hcd->port_status & USB_PORT_STAT_RESET) != 0) {
- unsigned disconnect = USB_PORT_STAT_CONNECTION &
+ unsigned int disconnect = power_bit &
dum_hcd->old_status & (~dum_hcd->port_status);
- unsigned reset = USB_PORT_STAT_RESET &
+ unsigned int reset = USB_PORT_STAT_RESET &
(~dum_hcd->old_status) & dum_hcd->port_status;
/* Report reset and disconnect events to the driver */
- if (dum->driver && (disconnect || reset)) {
+ if (dum->ints_enabled && (disconnect || reset)) {
stop_activity(dum);
+ ++dum->callback_usage;
+ spin_unlock(&dum->lock);
if (reset)
usb_gadget_udc_reset(&dum->gadget, dum->driver);
else
dum->driver->disconnect(&dum->gadget);
+ spin_lock(&dum->lock);
+ --dum->callback_usage;
}
- } else if (dum_hcd->active != dum_hcd->old_active) {
+ } else if (dum_hcd->active != dum_hcd->old_active &&
+ dum->ints_enabled) {
+ ++dum->callback_usage;
+ spin_unlock(&dum->lock);
if (dum_hcd->old_active && dum->driver->suspend)
dum->driver->suspend(&dum->gadget);
else if (!dum_hcd->old_active && dum->driver->resume)
dum->driver->resume(&dum->gadget);
+ spin_lock(&dum->lock);
+ --dum->callback_usage;
}
dum_hcd->old_status = dum_hcd->port_status;
@@ -965,8 +981,11 @@ static int dummy_udc_start(struct usb_gadget *g,
* can't enumerate without help from the driver we're binding.
*/
+ spin_lock_irq(&dum->lock);
dum->devstatus = 0;
dum->driver = driver;
+ dum->ints_enabled = 1;
+ spin_unlock_irq(&dum->lock);
return 0;
}
@@ -977,6 +996,16 @@ static int dummy_udc_stop(struct usb_gadget *g)
struct dummy *dum = dum_hcd->dum;
spin_lock_irq(&dum->lock);
+ dum->ints_enabled = 0;
+ stop_activity(dum);
+
+ /* emulate synchronize_irq(): wait for callbacks to finish */
+ while (dum->callback_usage > 0) {
+ spin_unlock_irq(&dum->lock);
+ usleep_range(1000, 2000);
+ spin_lock_irq(&dum->lock);
+ }
+
dum->driver = NULL;
spin_unlock_irq(&dum->lock);
@@ -1030,7 +1059,12 @@ static int dummy_udc_probe(struct platform_device *pdev)
memzero_explicit(&dum->gadget, sizeof(struct usb_gadget));
dum->gadget.name = gadget_name;
dum->gadget.ops = &dummy_ops;
- dum->gadget.max_speed = USB_SPEED_SUPER;
+ if (mod_data.is_super_speed)
+ dum->gadget.max_speed = USB_SPEED_SUPER;
+ else if (mod_data.is_high_speed)
+ dum->gadget.max_speed = USB_SPEED_HIGH;
+ else
+ dum->gadget.max_speed = USB_SPEED_FULL;
dum->gadget.dev.parent = &pdev->dev;
init_dummy_udc_hw(dum);
@@ -1239,6 +1273,8 @@ static int dummy_urb_enqueue(
list_add_tail(&urbp->urbp_list, &dum_hcd->urbp_list);
urb->hcpriv = urbp;
+ if (!dum_hcd->next_frame_urbp)
+ dum_hcd->next_frame_urbp = urbp;
if (usb_pipetype(urb->pipe) == PIPE_CONTROL)
urb->error_count = 1; /* mark as a new urb */
@@ -1515,6 +1551,8 @@ static struct dummy_ep *find_endpoint(struct dummy *dum, u8 address)
if (!is_active((dum->gadget.speed == USB_SPEED_SUPER ?
dum->ss_hcd : dum->hs_hcd)))
return NULL;
+ if (!dum->ints_enabled)
+ return NULL;
if ((address & ~USB_DIR_IN) == 0)
return &dum->ep[0];
for (i = 1; i < DUMMY_ENDPOINTS; i++) {
@@ -1756,6 +1794,7 @@ static void dummy_timer(unsigned long _dum_hcd)
spin_unlock_irqrestore(&dum->lock, flags);
return;
}
+ dum_hcd->next_frame_urbp = NULL;
for (i = 0; i < DUMMY_ENDPOINTS; i++) {
if (!ep_info[i].name)
@@ -1772,6 +1811,10 @@ static void dummy_timer(unsigned long _dum_hcd)
int type;
int status = -EINPROGRESS;
+ /* stop when we reach URBs queued after the timer interrupt */
+ if (urbp == dum_hcd->next_frame_urbp)
+ break;
+
urb = urbp->urb;
if (urb->unlinked)
goto return_urb;
@@ -1851,10 +1894,12 @@ static void dummy_timer(unsigned long _dum_hcd)
* until setup() returns; no reentrancy issues etc.
*/
if (value > 0) {
+ ++dum->callback_usage;
spin_unlock(&dum->lock);
value = dum->driver->setup(&dum->gadget,
&setup);
spin_lock(&dum->lock);
+ --dum->callback_usage;
if (value >= 0) {
/* no delays (max 64KB data stage) */
@@ -2559,8 +2604,6 @@ static struct hc_driver dummy_hcd = {
.product_desc = "Dummy host controller",
.hcd_priv_size = sizeof(struct dummy_hcd),
- .flags = HCD_USB3 | HCD_SHARED,
-
.reset = dummy_setup,
.start = dummy_start,
.stop = dummy_stop,
@@ -2589,8 +2632,12 @@ static int dummy_hcd_probe(struct platform_device *pdev)
dev_info(&pdev->dev, "%s, driver " DRIVER_VERSION "\n", driver_desc);
dum = *((void **)dev_get_platdata(&pdev->dev));
- if (!mod_data.is_super_speed)
+ if (mod_data.is_super_speed)
+ dummy_hcd.flags = HCD_USB3 | HCD_SHARED;
+ else if (mod_data.is_high_speed)
dummy_hcd.flags = HCD_USB2;
+ else
+ dummy_hcd.flags = HCD_USB11;
hs_hcd = usb_create_hcd(&dummy_hcd, &pdev->dev, dev_name(&pdev->dev));
if (!hs_hcd)
return -ENOMEM;
diff --git a/drivers/usb/gadget/udc/renesas_usb3.c b/drivers/usb/gadget/udc/renesas_usb3.c
index d2cfefa..bb89e24 100644
--- a/drivers/usb/gadget/udc/renesas_usb3.c
+++ b/drivers/usb/gadget/udc/renesas_usb3.c
@@ -879,7 +879,7 @@ static int usb3_write_pipe(struct renesas_usb3_ep *usb3_ep,
usb3_ep->ep.maxpacket);
u8 *buf = usb3_req->req.buf + usb3_req->req.actual;
u32 tmp = 0;
- bool is_last;
+ bool is_last = !len ? true : false;
if (usb3_wait_pipe_status(usb3_ep, PX_STA_BUFSTS) < 0)
return -EBUSY;
@@ -900,7 +900,8 @@ static int usb3_write_pipe(struct renesas_usb3_ep *usb3_ep,
usb3_write(usb3, tmp, fifo_reg);
}
- is_last = usb3_is_transfer_complete(usb3_ep, usb3_req);
+ if (!is_last)
+ is_last = usb3_is_transfer_complete(usb3_ep, usb3_req);
/* Send the data */
usb3_set_px_con_send(usb3_ep, len, is_last);
@@ -991,7 +992,8 @@ static void usb3_start_pipe0(struct renesas_usb3_ep *usb3_ep,
usb3_set_p0_con_for_ctrl_read_data(usb3);
} else {
usb3_clear_bit(usb3, P0_MOD_DIR, USB3_P0_MOD);
- usb3_set_p0_con_for_ctrl_write_data(usb3);
+ if (usb3_req->req.length)
+ usb3_set_p0_con_for_ctrl_write_data(usb3);
}
usb3_p0_xfer(usb3_ep, usb3_req);
@@ -1568,7 +1570,16 @@ static u32 usb3_calc_ramarea(int ram_size)
static u32 usb3_calc_rammap_val(struct renesas_usb3_ep *usb3_ep,
const struct usb_endpoint_descriptor *desc)
{
- return usb3_ep->rammap_val | PN_RAMMAP_MPKT(usb_endpoint_maxp(desc));
+ int i;
+ const u32 max_packet_array[] = {8, 16, 32, 64, 512};
+ u32 mpkt = PN_RAMMAP_MPKT(1024);
+
+ for (i = 0; i < ARRAY_SIZE(max_packet_array); i++) {
+ if (usb_endpoint_maxp(desc) <= max_packet_array[i])
+ mpkt = PN_RAMMAP_MPKT(max_packet_array[i]);
+ }
+
+ return usb3_ep->rammap_val | mpkt;
}
static int usb3_enable_pipe_n(struct renesas_usb3_ep *usb3_ep,
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
index 0b80cee3..eb121b2 100644
--- a/drivers/usb/host/Kconfig
+++ b/drivers/usb/host/Kconfig
@@ -45,9 +45,9 @@
If unsure, say N.
config USB_XHCI_MTK
- tristate "xHCI support for Mediatek MT65xx"
+ tristate "xHCI support for Mediatek MT65xx/MT7621"
select MFD_SYSCON
- depends on ARCH_MEDIATEK || COMPILE_TEST
+ depends on (MIPS && SOC_MT7621) || ARCH_MEDIATEK || COMPILE_TEST
---help---
Say 'Y' to enable the support for the xHCI host controller
found in Mediatek MT65xx SoCs.
diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
index 58b9685..ee213c5 100644
--- a/drivers/usb/host/pci-quirks.c
+++ b/drivers/usb/host/pci-quirks.c
@@ -447,7 +447,7 @@ static int usb_asmedia_wait_write(struct pci_dev *pdev)
if ((value & ASMT_CONTROL_WRITE_BIT) == 0)
return 0;
- usleep_range(40, 60);
+ udelay(50);
}
dev_warn(&pdev->dev, "%s: check_write_ready timeout", __func__);
@@ -1022,7 +1022,7 @@ EXPORT_SYMBOL_GPL(usb_disable_xhci_ports);
*
* Takes care of the handoff between the Pre-OS (i.e. BIOS) and the OS.
* It signals to the BIOS that the OS wants control of the host controller,
- * and then waits 5 seconds for the BIOS to hand over control.
+ * and then waits 1 second for the BIOS to hand over control.
* If we timeout, assume the BIOS is broken and take control anyway.
*/
static void quirk_usb_handoff_xhci(struct pci_dev *pdev)
@@ -1069,9 +1069,9 @@ static void quirk_usb_handoff_xhci(struct pci_dev *pdev)
if (val & XHCI_HC_BIOS_OWNED) {
writel(val | XHCI_HC_OS_OWNED, base + ext_cap_offset);
- /* Wait for 5 seconds with 10 microsecond polling interval */
+ /* Wait for 1 second with 10 microsecond polling interval */
timeout = handshake(base + ext_cap_offset, XHCI_HC_BIOS_OWNED,
- 0, 5000, 10);
+ 0, 1000000, 10);
/* Assume a buggy BIOS and take HC ownership anyway */
if (timeout) {
@@ -1100,7 +1100,7 @@ static void quirk_usb_handoff_xhci(struct pci_dev *pdev)
* operational or runtime registers. Wait 5 seconds and no more.
*/
timeout = handshake(op_reg_base + XHCI_STS_OFFSET, XHCI_STS_CNR, 0,
- 5000, 10);
+ 5000000, 10);
/* Assume a buggy HC and start HC initialization anyway */
if (timeout) {
val = readl(op_reg_base + XHCI_STS_OFFSET);
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index 93aa6a0..5434902 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -112,7 +112,7 @@ static int xhci_create_usb3_bos_desc(struct xhci_hcd *xhci, char *buf,
/* If PSI table exists, add the custom speed attributes from it */
if (usb3_1 && xhci->usb3_rhub.psi_count) {
- u32 ssp_cap_base, bm_attrib, psi;
+ u32 ssp_cap_base, bm_attrib, psi, psi_mant, psi_exp;
int offset;
ssp_cap_base = USB_DT_BOS_SIZE + USB_DT_USB_SS_CAP_SIZE;
@@ -139,6 +139,15 @@ static int xhci_create_usb3_bos_desc(struct xhci_hcd *xhci, char *buf,
for (i = 0; i < xhci->usb3_rhub.psi_count; i++) {
psi = xhci->usb3_rhub.psi[i];
psi &= ~USB_SSP_SUBLINK_SPEED_RSVD;
+ psi_exp = XHCI_EXT_PORT_PSIE(psi);
+ psi_mant = XHCI_EXT_PORT_PSIM(psi);
+
+ /* Shift to Gbps and set SSP Link BIT(14) if 10Gpbs */
+ for (; psi_exp < 3; psi_exp++)
+ psi_mant /= 1000;
+ if (psi_mant >= 10)
+ psi |= BIT(14);
+
if ((psi & PLT_MASK) == PLT_SYM) {
/* Symmetric, create SSA RX and TX from one PSI entry */
put_unaligned_le32(psi, &buf[offset]);
@@ -357,7 +366,7 @@ int xhci_find_slot_id_by_port(struct usb_hcd *hcd, struct xhci_hcd *xhci,
slot_id = 0;
for (i = 0; i < MAX_HC_SLOTS; i++) {
- if (!xhci->devs[i])
+ if (!xhci->devs[i] || !xhci->devs[i]->udev)
continue;
speed = xhci->devs[i]->udev->speed;
if (((speed >= USB_SPEED_SUPER) == (hcd->speed >= HCD_USB3))
@@ -406,25 +415,25 @@ static int xhci_stop_device(struct xhci_hcd *xhci, int slot_id, int suspend)
GFP_NOWAIT);
if (!command) {
spin_unlock_irqrestore(&xhci->lock, flags);
- xhci_free_command(xhci, cmd);
- return -ENOMEM;
-
+ ret = -ENOMEM;
+ goto cmd_cleanup;
}
ret = xhci_queue_stop_endpoint(xhci, command, slot_id,
- i, suspend);
+ i, suspend);
if (ret) {
spin_unlock_irqrestore(&xhci->lock, flags);
xhci_free_command(xhci, command);
- goto err_cmd_queue;
+ goto cmd_cleanup;
}
}
}
ret = xhci_queue_stop_endpoint(xhci, cmd, slot_id, 0, suspend);
if (ret) {
spin_unlock_irqrestore(&xhci->lock, flags);
- goto err_cmd_queue;
+ goto cmd_cleanup;
}
+
xhci_ring_cmd_db(xhci);
spin_unlock_irqrestore(&xhci->lock, flags);
@@ -436,7 +445,7 @@ static int xhci_stop_device(struct xhci_hcd *xhci, int slot_id, int suspend)
ret = -ETIME;
}
-err_cmd_queue:
+cmd_cleanup:
xhci_free_command(xhci, cmd);
return ret;
}
@@ -1571,9 +1580,6 @@ int xhci_bus_suspend(struct usb_hcd *hcd)
t2 |= PORT_WKOC_E | PORT_WKCONN_E;
t2 &= ~PORT_WKDISC_E;
}
- if ((xhci->quirks & XHCI_U2_DISABLE_WAKE) &&
- (hcd->speed < HCD_USB3))
- t2 &= ~PORT_WAKE_BITS;
} else
t2 &= ~PORT_WAKE_BITS;
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index 2383344..c87ef38 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -54,11 +54,6 @@
#define PCI_DEVICE_ID_INTEL_APL_XHCI 0x5aa8
#define PCI_DEVICE_ID_INTEL_DNV_XHCI 0x19d0
-#define PCI_DEVICE_ID_AMD_PROMONTORYA_4 0x43b9
-#define PCI_DEVICE_ID_AMD_PROMONTORYA_3 0x43ba
-#define PCI_DEVICE_ID_AMD_PROMONTORYA_2 0x43bb
-#define PCI_DEVICE_ID_AMD_PROMONTORYA_1 0x43bc
-
#define PCI_DEVICE_ID_ASMEDIA_1042A_XHCI 0x1142
static const char hcd_name[] = "xhci_hcd";
@@ -142,13 +137,6 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
if (pdev->vendor == PCI_VENDOR_ID_AMD)
xhci->quirks |= XHCI_TRUST_TX_LENGTH;
- if ((pdev->vendor == PCI_VENDOR_ID_AMD) &&
- ((pdev->device == PCI_DEVICE_ID_AMD_PROMONTORYA_4) ||
- (pdev->device == PCI_DEVICE_ID_AMD_PROMONTORYA_3) ||
- (pdev->device == PCI_DEVICE_ID_AMD_PROMONTORYA_2) ||
- (pdev->device == PCI_DEVICE_ID_AMD_PROMONTORYA_1)))
- xhci->quirks |= XHCI_U2_DISABLE_WAKE;
-
if (pdev->vendor == PCI_VENDOR_ID_INTEL) {
xhci->quirks |= XHCI_LPM_SUPPORT;
xhci->quirks |= XHCI_INTEL_HOST;
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 1660c7c..1332057 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -4871,7 +4871,8 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
*/
hcd->has_tt = 1;
} else {
- if (xhci->sbrn == 0x31) {
+ /* Some 3.1 hosts return sbrn 0x30, can't rely on sbrn alone */
+ if (xhci->sbrn == 0x31 || xhci->usb3_rhub.min_rev >= 1) {
xhci_info(xhci, "Host supports USB 3.1 Enhanced SuperSpeed\n");
hcd->speed = HCD_USB31;
hcd->self.root_hub->speed = USB_SPEED_SUPER_PLUS;
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index db46db4..c11eab1 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1509,7 +1509,7 @@ struct xhci_bus_state {
static inline unsigned int hcd_index(struct usb_hcd *hcd)
{
- if (hcd->speed == HCD_USB3)
+ if (hcd->speed >= HCD_USB3)
return 0;
else
return 1;
@@ -1670,7 +1670,7 @@ struct xhci_hcd {
/* For controller with a broken Port Disable implementation */
#define XHCI_BROKEN_PORT_PED (1 << 25)
#define XHCI_LIMIT_ENDPOINT_INTERVAL_7 (1 << 26)
-#define XHCI_U2_DISABLE_WAKE (1 << 27)
+/* Reserved. It was XHCI_U2_DISABLE_WAKE */
#define XHCI_ASMEDIA_MODIFY_FLOWCONTROL (1 << 28)
unsigned int num_active_eps;
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index a6b6b1c..aac28d9 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -890,7 +890,7 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
*/
if (int_usb & MUSB_INTR_RESET) {
handled = IRQ_HANDLED;
- if (devctl & MUSB_DEVCTL_HM) {
+ if (is_host_active(musb)) {
/*
* When BABBLE happens what we can depends on which
* platform MUSB is running, because some platforms
@@ -900,9 +900,7 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
* drop the session.
*/
dev_err(musb->controller, "Babble\n");
-
- if (is_host_active(musb))
- musb_recover_from_babble(musb);
+ musb_recover_from_babble(musb);
} else {
musb_dbg(musb, "BUS RESET as %s",
usb_otg_state_string(musb->xceiv->otg->state));
diff --git a/drivers/usb/musb/sunxi.c b/drivers/usb/musb/sunxi.c
index 1408245..3e1f3da 100644
--- a/drivers/usb/musb/sunxi.c
+++ b/drivers/usb/musb/sunxi.c
@@ -313,6 +313,8 @@ static int sunxi_musb_exit(struct musb *musb)
if (test_bit(SUNXI_MUSB_FL_HAS_SRAM, &glue->flags))
sunxi_sram_release(musb->controller->parent);
+ devm_usb_put_phy(glue->dev, glue->xceiv);
+
return 0;
}
diff --git a/drivers/usb/pd/policy_engine.c b/drivers/usb/pd/policy_engine.c
index aabfb41..44ab6d6 100644
--- a/drivers/usb/pd/policy_engine.c
+++ b/drivers/usb/pd/policy_engine.c
@@ -368,6 +368,7 @@ struct usbpd {
enum usbpd_state current_state;
bool hard_reset_recvd;
+ ktime_t hard_reset_recvd_time;
struct list_head rx_q;
spinlock_t rx_lock;
struct rx_msg *rx_ext_msg;
@@ -614,6 +615,9 @@ static int pd_send_msg(struct usbpd *pd, u8 msg_type, const u32 *data,
int ret;
u16 hdr;
+ if (pd->hard_reset_recvd)
+ return -EBUSY;
+
hdr = PD_MSG_HDR(msg_type, pd->current_dr, pd->current_pr,
pd->tx_msgid, num_data, pd->spec_rev);
@@ -805,11 +809,13 @@ static void phy_sig_received(struct usbpd *pd, enum pd_sig_type sig)
return;
}
- usbpd_dbg(&pd->dev, "hard reset received\n");
+ pd->hard_reset_recvd = true;
+ pd->hard_reset_recvd_time = ktime_get();
+
+ usbpd_err(&pd->dev, "hard reset received\n");
/* Force CC logic to source/sink to keep Rp/Rd unchanged */
set_power_role(pd, pd->current_pr);
- pd->hard_reset_recvd = true;
power_supply_set_property(pd->usb_psy,
POWER_SUPPLY_PROP_PD_IN_HARD_RESET, &val);
@@ -1074,6 +1080,9 @@ static void usbpd_set_state(struct usbpd *pd, enum usbpd_state next_state)
unsigned long flags;
int ret;
+ if (pd->hard_reset_recvd) /* let usbpd_sm handle it */
+ return;
+
usbpd_dbg(&pd->dev, "%s -> %s\n",
usbpd_state_strings[pd->current_state],
usbpd_state_strings[next_state]);
@@ -2044,8 +2053,13 @@ static void usbpd_sm(struct work_struct *w)
if (pd->current_pr == PR_SINK) {
usbpd_set_state(pd, PE_SNK_TRANSITION_TO_DEFAULT);
} else {
+ s64 delta = ktime_ms_delta(ktime_get(),
+ pd->hard_reset_recvd_time);
pd->current_state = PE_SRC_TRANSITION_TO_DEFAULT;
- kick_sm(pd, PS_HARD_RESET_TIME);
+ if (delta >= PS_HARD_RESET_TIME)
+ kick_sm(pd, 0);
+ else
+ kick_sm(pd, PS_HARD_RESET_TIME - (int)delta);
}
goto sm_done;
@@ -2302,8 +2316,11 @@ static void usbpd_sm(struct work_struct *w)
&val);
/* save the PDOs so userspace can further evaluate */
- memcpy(&pd->received_pdos, rx_msg->payload,
+ memset(&pd->received_pdos, 0,
sizeof(pd->received_pdos));
+ memcpy(&pd->received_pdos, rx_msg->payload,
+ min_t(size_t, rx_msg->data_len,
+ sizeof(pd->received_pdos)));
pd->src_cap_id++;
usbpd_set_state(pd, PE_SNK_EVALUATE_CAPABILITY);
@@ -2411,8 +2428,11 @@ static void usbpd_sm(struct work_struct *w)
case PE_SNK_READY:
if (IS_DATA(rx_msg, MSG_SOURCE_CAPABILITIES)) {
/* save the PDOs so userspace can further evaluate */
- memcpy(&pd->received_pdos, rx_msg->payload,
+ memset(&pd->received_pdos, 0,
sizeof(pd->received_pdos));
+ memcpy(&pd->received_pdos, rx_msg->payload,
+ min_t(size_t, rx_msg->data_len,
+ sizeof(pd->received_pdos)));
pd->src_cap_id++;
usbpd_set_state(pd, PE_SNK_EVALUATE_CAPABILITY);
diff --git a/drivers/usb/pd/qpnp-pdphy.c b/drivers/usb/pd/qpnp-pdphy.c
index 6395ca2..8a4f3d4 100644
--- a/drivers/usb/pd/qpnp-pdphy.c
+++ b/drivers/usb/pd/qpnp-pdphy.c
@@ -582,6 +582,10 @@ static irqreturn_t pdphy_msg_tx_irq(int irq, void *data)
{
struct usb_pdphy *pdphy = data;
+ /* TX already aborted by received signal */
+ if (pdphy->tx_status != -EINPROGRESS)
+ return IRQ_HANDLED;
+
if (irq == pdphy->msg_tx_irq) {
pdphy->msg_tx_cnt++;
pdphy->tx_status = 0;
@@ -635,6 +639,10 @@ static irqreturn_t pdphy_sig_rx_irq_thread(int irq, void *data)
if (pdphy->signal_cb)
pdphy->signal_cb(pdphy->usbpd, frame_type);
+ if (pdphy->tx_status == -EINPROGRESS) {
+ pdphy->tx_status = -EBUSY;
+ wake_up(&pdphy->tx_waitq);
+ }
done:
return IRQ_HANDLED;
}
diff --git a/drivers/usb/phy/phy-msm-qusb-v2.c b/drivers/usb/phy/phy-msm-qusb-v2.c
index 1f80cde..81c39a3 100644
--- a/drivers/usb/phy/phy-msm-qusb-v2.c
+++ b/drivers/usb/phy/phy-msm-qusb-v2.c
@@ -26,6 +26,7 @@
#include <linux/regulator/machine.h>
#include <linux/usb/phy.h>
#include <linux/reset.h>
+#include <linux/debugfs.h>
/* QUSB2PHY_PWR_CTRL1 register related bits */
#define PWR_CTRL1_POWR_DOWN BIT(0)
@@ -63,20 +64,13 @@
#define LINESTATE_DP BIT(0)
#define LINESTATE_DM BIT(1)
-/* eud related registers */
-#define EUD_SW_ATTACH_DET 0x1018
-#define EUD_INT1_EN_MASK 0x0024
+#define BIAS_CTRL_2_OVERRIDE_VAL 0x28
-/* EUD interrupt mask bits */
-#define EUD_INT_RX BIT(0)
-#define EUD_INT_TX BIT(1)
-#define EUD_INT_VBUS BIT(2)
-#define EUD_INT_CHGR BIT(3)
-#define EUD_INT_SAFE_MODE BIT(4)
+#define SQ_CTRL1_CHIRP_DISABLE 0x20
+#define SQ_CTRL2_CHIRP_DISABLE 0x80
-unsigned int phy_tune1;
-module_param(phy_tune1, uint, 0644);
-MODULE_PARM_DESC(phy_tune1, "QUSB PHY v2 TUNE1");
+/* PERIPH_SS_PHY_REFGEN_NORTH_BG_CTRL register bits */
+#define BANDGAP_BYPASS BIT(0)
enum qusb_phy_reg {
PORT_TUNE1,
@@ -85,6 +79,9 @@ enum qusb_phy_reg {
INTR_CTRL,
PLL_CORE_INPUT_OVERRIDE,
TEST1,
+ BIAS_CTRL_2,
+ SQ_CTRL1,
+ SQ_CTRL2,
USB2_PHY_REG_MAX,
};
@@ -92,8 +89,8 @@ struct qusb_phy {
struct usb_phy phy;
struct mutex lock;
void __iomem *base;
- void __iomem *eud_base;
void __iomem *efuse_reg;
+ void __iomem *refgen_north_bg_reg;
struct clk *ref_clk_src;
struct clk *ref_clk;
@@ -125,6 +122,10 @@ struct qusb_phy {
struct regulator_desc dpdm_rdesc;
struct regulator_dev *dpdm_rdev;
+ u32 sq_ctrl1_default;
+ u32 sq_ctrl2_default;
+ bool chirp_disable;
+
/* emulation targets specific */
void __iomem *emu_phy_base;
bool emulation;
@@ -134,6 +135,10 @@ struct qusb_phy {
int phy_pll_reset_seq_len;
int *emu_dcm_reset_seq;
int emu_dcm_reset_seq_len;
+
+ /* override TUNEX registers value */
+ struct dentry *root;
+ u8 tune[5];
};
static void qusb_phy_enable_clocks(struct qusb_phy *qphy, bool on)
@@ -415,7 +420,7 @@ static void qusb_phy_host_init(struct usb_phy *phy)
static int qusb_phy_init(struct usb_phy *phy)
{
struct qusb_phy *qphy = container_of(phy, struct qusb_phy, phy);
- int ret;
+ int ret, p_index;
u8 reg;
dev_dbg(phy->dev, "%s\n", __func__);
@@ -470,14 +475,19 @@ static int qusb_phy_init(struct usb_phy *phy)
qphy->base + qphy->phy_reg[PORT_TUNE1]);
}
- /* If phy_tune1 modparam set, override tune1 value */
- if (phy_tune1) {
- pr_debug("%s(): (modparam) TUNE1 val:0x%02x\n",
- __func__, phy_tune1);
- writel_relaxed(phy_tune1,
- qphy->base + qphy->phy_reg[PORT_TUNE1]);
+ /* if debugfs based tunex params are set, use that value. */
+ for (p_index = 0; p_index < 5; p_index++) {
+ if (qphy->tune[p_index])
+ writel_relaxed(qphy->tune[p_index],
+ qphy->base + qphy->phy_reg[PORT_TUNE1] +
+ (4 * p_index));
}
+ if (qphy->refgen_north_bg_reg)
+ if (readl_relaxed(qphy->refgen_north_bg_reg) & BANDGAP_BYPASS)
+ writel_relaxed(BIAS_CTRL_2_OVERRIDE_VAL,
+ qphy->base + qphy->phy_reg[BIAS_CTRL_2]);
+
/* ensure above writes are completed before re-enabling PHY */
wmb();
@@ -651,6 +661,52 @@ static int qusb_phy_notify_disconnect(struct usb_phy *phy,
return 0;
}
+static int qusb_phy_disable_chirp(struct usb_phy *phy, bool disable)
+{
+ struct qusb_phy *qphy = container_of(phy, struct qusb_phy, phy);
+ int ret = 0;
+
+ dev_dbg(phy->dev, "%s qphy chirp disable %d disable %d\n", __func__,
+ qphy->chirp_disable, disable);
+
+ mutex_lock(&qphy->lock);
+
+ if (qphy->chirp_disable == disable) {
+ ret = -EALREADY;
+ goto done;
+ }
+
+ qphy->chirp_disable = disable;
+
+ if (disable) {
+ qphy->sq_ctrl1_default =
+ readl_relaxed(qphy->base + qphy->phy_reg[SQ_CTRL1]);
+ qphy->sq_ctrl2_default =
+ readl_relaxed(qphy->base + qphy->phy_reg[SQ_CTRL2]);
+
+ writel_relaxed(SQ_CTRL1_CHIRP_DISABLE,
+ qphy->base + qphy->phy_reg[SQ_CTRL1]);
+ readl_relaxed(qphy->base + qphy->phy_reg[SQ_CTRL1]);
+
+ writel_relaxed(SQ_CTRL1_CHIRP_DISABLE,
+ qphy->base + qphy->phy_reg[SQ_CTRL2]);
+ readl_relaxed(qphy->base + qphy->phy_reg[SQ_CTRL2]);
+
+ goto done;
+ }
+
+ writel_relaxed(qphy->sq_ctrl1_default,
+ qphy->base + qphy->phy_reg[SQ_CTRL1]);
+ readl_relaxed(qphy->base + qphy->phy_reg[SQ_CTRL1]);
+
+ writel_relaxed(qphy->sq_ctrl2_default,
+ qphy->base + qphy->phy_reg[SQ_CTRL2]);
+ readl_relaxed(qphy->base + qphy->phy_reg[SQ_CTRL2]);
+done:
+ mutex_unlock(&qphy->lock);
+ return ret;
+}
+
static int qusb_phy_dpdm_regulator_enable(struct regulator_dev *rdev)
{
int ret = 0;
@@ -667,22 +723,6 @@ static int qusb_phy_dpdm_regulator_enable(struct regulator_dev *rdev)
return ret;
}
qphy->dpdm_enable = true;
-
- if (qphy->eud_base) {
- if (qphy->cfg_ahb_clk)
- clk_prepare_enable(qphy->cfg_ahb_clk);
- writel_relaxed(BIT(0),
- qphy->eud_base + EUD_SW_ATTACH_DET);
- /* to flush above write before next write */
- wmb();
-
- writel_relaxed(EUD_INT_VBUS | EUD_INT_CHGR,
- qphy->eud_base + EUD_INT1_EN_MASK);
- /* to flush above write before turning off clk */
- wmb();
- if (qphy->cfg_ahb_clk)
- clk_disable_unprepare(qphy->cfg_ahb_clk);
- }
}
return ret;
@@ -697,16 +737,6 @@ static int qusb_phy_dpdm_regulator_disable(struct regulator_dev *rdev)
__func__, qphy->dpdm_enable);
if (qphy->dpdm_enable) {
- if (qphy->eud_base) {
- if (qphy->cfg_ahb_clk)
- clk_prepare_enable(qphy->cfg_ahb_clk);
- writel_relaxed(0, qphy->eud_base + EUD_SW_ATTACH_DET);
- /* to flush above write before turning off clk */
- wmb();
- if (qphy->cfg_ahb_clk)
- clk_disable_unprepare(qphy->cfg_ahb_clk);
- }
-
ret = qusb_phy_enable_power(qphy, false);
if (ret < 0) {
dev_dbg(qphy->phy.dev,
@@ -762,6 +792,38 @@ static int qusb_phy_regulator_init(struct qusb_phy *qphy)
return 0;
}
+static int qusb_phy_create_debugfs(struct qusb_phy *qphy)
+{
+ struct dentry *file;
+ int ret = 0, i;
+ char name[6];
+
+ qphy->root = debugfs_create_dir(dev_name(qphy->phy.dev), NULL);
+ if (IS_ERR_OR_NULL(qphy->root)) {
+ dev_err(qphy->phy.dev,
+ "can't create debugfs root for %s\n",
+ dev_name(qphy->phy.dev));
+ ret = -ENOMEM;
+ goto create_err;
+ }
+
+ for (i = 0; i < 5; i++) {
+ snprintf(name, sizeof(name), "tune%d", (i + 1));
+ file = debugfs_create_x8(name, 0644, qphy->root,
+ &qphy->tune[i]);
+ if (IS_ERR_OR_NULL(file)) {
+ dev_err(qphy->phy.dev,
+ "can't create debugfs entry for %s\n", name);
+ debugfs_remove_recursive(qphy->root);
+ ret = ENOMEM;
+ goto create_err;
+ }
+ }
+
+create_err:
+ return ret;
+}
+
static int qusb_phy_probe(struct platform_device *pdev)
{
struct qusb_phy *qphy;
@@ -814,15 +876,10 @@ static int qusb_phy_probe(struct platform_device *pdev)
}
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
- "eud_base");
- if (res) {
- qphy->eud_base = devm_ioremap(dev, res->start,
- resource_size(res));
- if (IS_ERR(qphy->eud_base)) {
- dev_dbg(dev, "couldn't ioremap eud_base\n");
- qphy->eud_base = NULL;
- }
- }
+ "refgen_north_bg_reg_addr");
+ if (res)
+ qphy->refgen_north_bg_reg = devm_ioremap(dev, res->start,
+ resource_size(res));
/* ref_clk_src is needed irrespective of SE_CLK or DIFF_CLK usage */
qphy->ref_clk_src = devm_clk_get(dev, "ref_clk_src");
@@ -941,7 +998,7 @@ static int qusb_phy_probe(struct platform_device *pdev)
if (qphy->phy_reg) {
qphy->qusb_phy_reg_offset_cnt =
size / sizeof(*qphy->phy_reg);
- if (qphy->qusb_phy_reg_offset_cnt > USB2_PHY_REG_MAX) {
+ if (qphy->qusb_phy_reg_offset_cnt != USB2_PHY_REG_MAX) {
dev_err(dev, "invalid reg offset count\n");
return -EINVAL;
}
@@ -1035,30 +1092,18 @@ static int qusb_phy_probe(struct platform_device *pdev)
qphy->phy.type = USB_PHY_TYPE_USB2;
qphy->phy.notify_connect = qusb_phy_notify_connect;
qphy->phy.notify_disconnect = qusb_phy_notify_disconnect;
+ qphy->phy.disable_chirp = qusb_phy_disable_chirp;
ret = usb_add_phy_dev(&qphy->phy);
if (ret)
return ret;
- /* ldo24 is turned on and eud is pet irrespective of cable
- * cable connection status by boot sw. Assume usb cable is not
- * connected and perform detach pet. If usb cable is connected,
- * eud hw will be pet in the dpdm callback.
- */
- if (qphy->eud_base) {
- if (qphy->cfg_ahb_clk)
- clk_prepare_enable(qphy->cfg_ahb_clk);
-
- writel_relaxed(0, qphy->eud_base + EUD_SW_ATTACH_DET);
-
- if (qphy->cfg_ahb_clk)
- clk_disable_unprepare(qphy->cfg_ahb_clk);
- }
-
ret = qusb_phy_regulator_init(qphy);
if (ret)
usb_remove_phy(&qphy->phy);
+ qusb_phy_create_debugfs(qphy);
+
return ret;
}
@@ -1069,6 +1114,7 @@ static int qusb_phy_remove(struct platform_device *pdev)
usb_remove_phy(&qphy->phy);
qusb_phy_enable_clocks(qphy, false);
qusb_phy_enable_power(qphy, false);
+ debugfs_remove_recursive(qphy->root);
return 0;
}
diff --git a/drivers/usb/renesas_usbhs/fifo.c b/drivers/usb/renesas_usbhs/fifo.c
index 857e783..6c6a3a8 100644
--- a/drivers/usb/renesas_usbhs/fifo.c
+++ b/drivers/usb/renesas_usbhs/fifo.c
@@ -285,11 +285,26 @@ static void usbhsf_fifo_clear(struct usbhs_pipe *pipe,
struct usbhs_fifo *fifo)
{
struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
+ int ret = 0;
- if (!usbhs_pipe_is_dcp(pipe))
- usbhsf_fifo_barrier(priv, fifo);
+ if (!usbhs_pipe_is_dcp(pipe)) {
+ /*
+ * This driver checks the pipe condition first to avoid -EBUSY
+ * from usbhsf_fifo_barrier() with about 10 msec delay in
+ * the interrupt handler if the pipe is RX direction and empty.
+ */
+ if (usbhs_pipe_is_dir_in(pipe))
+ ret = usbhs_pipe_is_accessible(pipe);
+ if (!ret)
+ ret = usbhsf_fifo_barrier(priv, fifo);
+ }
- usbhs_write(priv, fifo->ctr, BCLR);
+ /*
+ * if non-DCP pipe, this driver should set BCLR when
+ * usbhsf_fifo_barrier() returns 0.
+ */
+ if (!ret)
+ usbhs_write(priv, fifo->ctr, BCLR);
}
static int usbhsf_fifo_rcv_len(struct usbhs_priv *priv,
@@ -845,9 +860,9 @@ static void xfer_work(struct work_struct *work)
fifo->name, usbhs_pipe_number(pipe), pkt->length, pkt->zero);
usbhs_pipe_running(pipe, 1);
- usbhsf_dma_start(pipe, fifo);
usbhs_pipe_set_trans_count_if_bulk(pipe, pkt->trans);
dma_async_issue_pending(chan);
+ usbhsf_dma_start(pipe, fifo);
usbhs_pipe_enable(pipe);
xfer_work_end:
diff --git a/drivers/usb/serial/console.c b/drivers/usb/serial/console.c
index b6f1ade..76062ce 100644
--- a/drivers/usb/serial/console.c
+++ b/drivers/usb/serial/console.c
@@ -186,6 +186,7 @@ static int usb_console_setup(struct console *co, char *options)
tty_kref_put(tty);
reset_open_count:
port->port.count = 0;
+ info->port = NULL;
usb_autopm_put_interface(serial->interface);
error_get_interface:
usb_serial_put(serial);
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index 470b17b..11ee55e 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -171,6 +171,7 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */
{ USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */
{ USB_DEVICE(0x18EF, 0xE025) }, /* ELV Marble Sound Board 1 */
+ { USB_DEVICE(0x18EF, 0xE032) }, /* ELV TFD500 Data Logger */
{ USB_DEVICE(0x1901, 0x0190) }, /* GE B850 CP2105 Recorder interface */
{ USB_DEVICE(0x1901, 0x0193) }, /* GE B650 CP2104 PMC interface */
{ USB_DEVICE(0x1901, 0x0194) }, /* GE Healthcare Remote Alarm Box */
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 1939496..3249f42 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -1015,6 +1015,8 @@ static const struct usb_device_id id_table_combined[] = {
{ USB_DEVICE(WICED_VID, WICED_USB20706V2_PID) },
{ USB_DEVICE(TI_VID, TI_CC3200_LAUNCHPAD_PID),
.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
+ { USB_DEVICE(CYPRESS_VID, CYPRESS_WICED_BT_USB_PID) },
+ { USB_DEVICE(CYPRESS_VID, CYPRESS_WICED_WL_USB_PID) },
{ } /* Terminating entry */
};
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index 4fcf1ce..f9d15bd 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -610,6 +610,13 @@
#define ADI_GNICEPLUS_PID 0xF001
/*
+ * Cypress WICED USB UART
+ */
+#define CYPRESS_VID 0x04B4
+#define CYPRESS_WICED_BT_USB_PID 0x009B
+#define CYPRESS_WICED_WL_USB_PID 0xF900
+
+/*
* Microchip Technology, Inc.
*
* MICROCHIP_VID (0x04D8) and MICROCHIP_USB_BOARD_PID (0x000A) are
diff --git a/drivers/usb/serial/metro-usb.c b/drivers/usb/serial/metro-usb.c
index 39e6830..45182c6 100644
--- a/drivers/usb/serial/metro-usb.c
+++ b/drivers/usb/serial/metro-usb.c
@@ -45,6 +45,7 @@ struct metrousb_private {
static const struct usb_device_id id_table[] = {
{ USB_DEVICE(FOCUS_VENDOR_ID, FOCUS_PRODUCT_ID_BI) },
{ USB_DEVICE(FOCUS_VENDOR_ID, FOCUS_PRODUCT_ID_UNI) },
+ { USB_DEVICE_INTERFACE_CLASS(0x0c2e, 0x0730, 0xff) }, /* MS7820 */
{ }, /* Terminating entry. */
};
MODULE_DEVICE_TABLE(usb, id_table);
diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c
index 136ff5e..135eb04 100644
--- a/drivers/usb/serial/mos7720.c
+++ b/drivers/usb/serial/mos7720.c
@@ -234,11 +234,16 @@ static int read_mos_reg(struct usb_serial *serial, unsigned int serial_portnum,
status = usb_control_msg(usbdev, pipe, request, requesttype, value,
index, buf, 1, MOS_WDR_TIMEOUT);
- if (status == 1)
+ if (status == 1) {
*data = *buf;
- else if (status < 0)
+ } else {
dev_err(&usbdev->dev,
"mos7720: usb_control_msg() failed: %d\n", status);
+ if (status >= 0)
+ status = -EIO;
+ *data = 0;
+ }
+
kfree(buf);
return status;
diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
index 5c4fc3a..6baacf6 100644
--- a/drivers/usb/serial/mos7840.c
+++ b/drivers/usb/serial/mos7840.c
@@ -285,9 +285,15 @@ static int mos7840_get_reg_sync(struct usb_serial_port *port, __u16 reg,
ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), MCS_RDREQ,
MCS_RD_RTYPE, 0, reg, buf, VENDOR_READ_LENGTH,
MOS_WDR_TIMEOUT);
+ if (ret < VENDOR_READ_LENGTH) {
+ if (ret >= 0)
+ ret = -EIO;
+ goto out;
+ }
+
*val = buf[0];
dev_dbg(&port->dev, "%s offset is %x, return val %x\n", __func__, reg, *val);
-
+out:
kfree(buf);
return ret;
}
@@ -353,8 +359,13 @@ static int mos7840_get_uart_reg(struct usb_serial_port *port, __u16 reg,
ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), MCS_RDREQ,
MCS_RD_RTYPE, Wval, reg, buf, VENDOR_READ_LENGTH,
MOS_WDR_TIMEOUT);
+ if (ret < VENDOR_READ_LENGTH) {
+ if (ret >= 0)
+ ret = -EIO;
+ goto out;
+ }
*val = buf[0];
-
+out:
kfree(buf);
return ret;
}
@@ -1490,10 +1501,10 @@ static int mos7840_tiocmget(struct tty_struct *tty)
return -ENODEV;
status = mos7840_get_uart_reg(port, MODEM_STATUS_REGISTER, &msr);
- if (status != 1)
+ if (status < 0)
return -EIO;
status = mos7840_get_uart_reg(port, MODEM_CONTROL_REGISTER, &mcr);
- if (status != 1)
+ if (status < 0)
return -EIO;
result = ((mcr & MCR_DTR) ? TIOCM_DTR : 0)
| ((mcr & MCR_RTS) ? TIOCM_RTS : 0)
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 2a99443..db3d34c 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -522,6 +522,7 @@ static void option_instat_callback(struct urb *urb);
/* TP-LINK Incorporated products */
#define TPLINK_VENDOR_ID 0x2357
+#define TPLINK_PRODUCT_LTE 0x000D
#define TPLINK_PRODUCT_MA180 0x0201
/* Changhong products */
@@ -2011,6 +2012,7 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE(CELLIENT_VENDOR_ID, CELLIENT_PRODUCT_MEN200) },
{ USB_DEVICE(PETATEL_VENDOR_ID, PETATEL_PRODUCT_NP10T_600A) },
{ USB_DEVICE(PETATEL_VENDOR_ID, PETATEL_PRODUCT_NP10T_600E) },
+ { USB_DEVICE_AND_INTERFACE_INFO(TPLINK_VENDOR_ID, TPLINK_PRODUCT_LTE, 0xff, 0x00, 0x00) }, /* TP-Link LTE Module */
{ USB_DEVICE(TPLINK_VENDOR_ID, TPLINK_PRODUCT_MA180),
.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
{ USB_DEVICE(TPLINK_VENDOR_ID, 0x9000), /* TP-Link MA260 */
diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
index 652b433..e1c1e32 100644
--- a/drivers/usb/serial/qcserial.c
+++ b/drivers/usb/serial/qcserial.c
@@ -174,6 +174,10 @@ static const struct usb_device_id id_table[] = {
{DEVICE_SWI(0x413c, 0x81b3)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */
{DEVICE_SWI(0x413c, 0x81b5)}, /* Dell Wireless 5811e QDL */
{DEVICE_SWI(0x413c, 0x81b6)}, /* Dell Wireless 5811e QDL */
+ {DEVICE_SWI(0x413c, 0x81cf)}, /* Dell Wireless 5819 */
+ {DEVICE_SWI(0x413c, 0x81d0)}, /* Dell Wireless 5819 */
+ {DEVICE_SWI(0x413c, 0x81d1)}, /* Dell Wireless 5818 */
+ {DEVICE_SWI(0x413c, 0x81d2)}, /* Dell Wireless 5818 */
/* Huawei devices */
{DEVICE_HWI(0x03f0, 0x581d)}, /* HP lt4112 LTE/HSPA+ Gobi 4G Modem (Huawei me906e) */
diff --git a/drivers/usb/storage/transport.c b/drivers/usb/storage/transport.c
index 1a59f33..a3ccb89 100644
--- a/drivers/usb/storage/transport.c
+++ b/drivers/usb/storage/transport.c
@@ -834,13 +834,25 @@ void usb_stor_invoke_transport(struct scsi_cmnd *srb, struct us_data *us)
if (result == USB_STOR_TRANSPORT_GOOD) {
srb->result = SAM_STAT_GOOD;
srb->sense_buffer[0] = 0x0;
+ }
+
+ /*
+ * ATA-passthru commands use sense data to report
+ * the command completion status, and often devices
+ * return Check Condition status when nothing is
+ * wrong.
+ */
+ else if (srb->cmnd[0] == ATA_16 ||
+ srb->cmnd[0] == ATA_12) {
+ /* leave the data alone */
+ }
/*
* If there was a problem, report an unspecified
* hardware error to prevent the higher layers from
* entering an infinite retry loop.
*/
- } else {
+ else {
srb->result = DID_ERROR << 16;
if ((sshdr.response_code & 0x72) == 0x72)
srb->sense_buffer[1] = HARDWARE_ERROR;
diff --git a/drivers/usb/storage/uas-detect.h b/drivers/usb/storage/uas-detect.h
index f58caa9..a155cd0 100644
--- a/drivers/usb/storage/uas-detect.h
+++ b/drivers/usb/storage/uas-detect.h
@@ -9,7 +9,8 @@ static int uas_is_interface(struct usb_host_interface *intf)
intf->desc.bInterfaceProtocol == USB_PR_UAS);
}
-static int uas_find_uas_alt_setting(struct usb_interface *intf)
+static struct usb_host_interface *uas_find_uas_alt_setting(
+ struct usb_interface *intf)
{
int i;
@@ -17,10 +18,10 @@ static int uas_find_uas_alt_setting(struct usb_interface *intf)
struct usb_host_interface *alt = &intf->altsetting[i];
if (uas_is_interface(alt))
- return alt->desc.bAlternateSetting;
+ return alt;
}
- return -ENODEV;
+ return NULL;
}
static int uas_find_endpoints(struct usb_host_interface *alt,
@@ -58,14 +59,14 @@ static int uas_use_uas_driver(struct usb_interface *intf,
struct usb_device *udev = interface_to_usbdev(intf);
struct usb_hcd *hcd = bus_to_hcd(udev->bus);
unsigned long flags = id->driver_info;
- int r, alt;
-
+ struct usb_host_interface *alt;
+ int r;
alt = uas_find_uas_alt_setting(intf);
- if (alt < 0)
+ if (!alt)
return 0;
- r = uas_find_endpoints(&intf->altsetting[alt], eps);
+ r = uas_find_endpoints(alt, eps);
if (r < 0)
return 0;
diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
index 5ef014b..9876af4 100644
--- a/drivers/usb/storage/uas.c
+++ b/drivers/usb/storage/uas.c
@@ -873,14 +873,14 @@ MODULE_DEVICE_TABLE(usb, uas_usb_ids);
static int uas_switch_interface(struct usb_device *udev,
struct usb_interface *intf)
{
- int alt;
+ struct usb_host_interface *alt;
alt = uas_find_uas_alt_setting(intf);
- if (alt < 0)
- return alt;
+ if (!alt)
+ return -ENODEV;
- return usb_set_interface(udev,
- intf->altsetting[0].desc.bInterfaceNumber, alt);
+ return usb_set_interface(udev, alt->desc.bInterfaceNumber,
+ alt->desc.bAlternateSetting);
}
static int uas_configure_endpoints(struct uas_dev_info *devinfo)
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index 9129f6c..2572fd5 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -1459,6 +1459,13 @@ UNUSUAL_DEV( 0x0bc2, 0x3010, 0x0000, 0x0000,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_SANE_SENSE ),
+/* Reported by Kris Lindgren <kris.lindgren@gmail.com> */
+UNUSUAL_DEV( 0x0bc2, 0x3332, 0x0000, 0x9999,
+ "Seagate",
+ "External",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_NO_WP_DETECT ),
+
UNUSUAL_DEV( 0x0d49, 0x7310, 0x0000, 0x9999,
"Maxtor",
"USB to SATA",
diff --git a/drivers/uwb/hwa-rc.c b/drivers/uwb/hwa-rc.c
index 35a1e77..9a53912 100644
--- a/drivers/uwb/hwa-rc.c
+++ b/drivers/uwb/hwa-rc.c
@@ -825,6 +825,8 @@ static int hwarc_probe(struct usb_interface *iface,
if (iface->cur_altsetting->desc.bNumEndpoints < 1)
return -ENODEV;
+ if (!usb_endpoint_xfer_int(&iface->cur_altsetting->endpoint[0].desc))
+ return -ENODEV;
result = -ENOMEM;
uwb_rc = uwb_rc_alloc();
diff --git a/drivers/uwb/uwbd.c b/drivers/uwb/uwbd.c
index 01c20a2..39dd4ef 100644
--- a/drivers/uwb/uwbd.c
+++ b/drivers/uwb/uwbd.c
@@ -302,18 +302,22 @@ static int uwbd(void *param)
/** Start the UWB daemon */
void uwbd_start(struct uwb_rc *rc)
{
- rc->uwbd.task = kthread_run(uwbd, rc, "uwbd");
- if (rc->uwbd.task == NULL)
+ struct task_struct *task = kthread_run(uwbd, rc, "uwbd");
+ if (IS_ERR(task)) {
+ rc->uwbd.task = NULL;
printk(KERN_ERR "UWB: Cannot start management daemon; "
"UWB won't work\n");
- else
+ } else {
+ rc->uwbd.task = task;
rc->uwbd.pid = rc->uwbd.task->pid;
+ }
}
/* Stop the UWB daemon and free any unprocessed events */
void uwbd_stop(struct uwb_rc *rc)
{
- kthread_stop(rc->uwbd.task);
+ if (rc->uwbd.task)
+ kthread_stop(rc->uwbd.task);
uwbd_flush(rc);
}
diff --git a/drivers/video/fbdev/aty/atyfb_base.c b/drivers/video/fbdev/aty/atyfb_base.c
index 11026e7..81367cf 100644
--- a/drivers/video/fbdev/aty/atyfb_base.c
+++ b/drivers/video/fbdev/aty/atyfb_base.c
@@ -1861,7 +1861,7 @@ static int atyfb_ioctl(struct fb_info *info, u_int cmd, u_long arg)
#if defined(DEBUG) && defined(CONFIG_FB_ATY_CT)
case ATYIO_CLKR:
if (M64_HAS(INTEGRATED)) {
- struct atyclk clk;
+ struct atyclk clk = { 0 };
union aty_pll *pll = &par->pll;
u32 dsp_config = pll->ct.dsp_config;
u32 dsp_on_off = pll->ct.dsp_on_off;
diff --git a/drivers/video/fbdev/pmag-ba-fb.c b/drivers/video/fbdev/pmag-ba-fb.c
index 5872bc4..df02fb4 100644
--- a/drivers/video/fbdev/pmag-ba-fb.c
+++ b/drivers/video/fbdev/pmag-ba-fb.c
@@ -129,7 +129,7 @@ static struct fb_ops pmagbafb_ops = {
/*
* Turn the hardware cursor off.
*/
-static void __init pmagbafb_erase_cursor(struct fb_info *info)
+static void pmagbafb_erase_cursor(struct fb_info *info)
{
struct pmagbafb_par *par = info->par;
diff --git a/drivers/watchdog/kempld_wdt.c b/drivers/watchdog/kempld_wdt.c
index 8e302d0..3efa295 100644
--- a/drivers/watchdog/kempld_wdt.c
+++ b/drivers/watchdog/kempld_wdt.c
@@ -140,12 +140,19 @@ static int kempld_wdt_set_stage_timeout(struct kempld_wdt_data *wdt_data,
unsigned int timeout)
{
struct kempld_device_data *pld = wdt_data->pld;
- u32 prescaler = kempld_prescaler[PRESCALER_21];
+ u32 prescaler;
u64 stage_timeout64;
u32 stage_timeout;
u32 remainder;
u8 stage_cfg;
+#if GCC_VERSION < 40400
+ /* work around a bug compiling do_div() */
+ prescaler = READ_ONCE(kempld_prescaler[PRESCALER_21]);
+#else
+ prescaler = kempld_prescaler[PRESCALER_21];
+#endif
+
if (!stage)
return -EINVAL;
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
index 2ef2b61..79b8ab4 100644
--- a/drivers/xen/gntdev.c
+++ b/drivers/xen/gntdev.c
@@ -1030,6 +1030,7 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
mutex_unlock(&priv->lock);
if (use_ptemod) {
+ map->pages_vm_start = vma->vm_start;
err = apply_to_page_range(vma->vm_mm, vma->vm_start,
vma->vm_end - vma->vm_start,
find_grant_ptes, map);
@@ -1067,7 +1068,6 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
set_grant_ptes_as_special, NULL);
}
#endif
- map->pages_vm_start = vma->vm_start;
}
return 0;
diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c
index 26e5e85..9122ba2 100644
--- a/drivers/xen/manage.c
+++ b/drivers/xen/manage.c
@@ -277,8 +277,16 @@ static void sysrq_handler(struct xenbus_watch *watch, const char **vec,
err = xenbus_transaction_start(&xbt);
if (err)
return;
- if (!xenbus_scanf(xbt, "control", "sysrq", "%c", &sysrq_key)) {
- pr_err("Unable to read sysrq code in control/sysrq\n");
+ err = xenbus_scanf(xbt, "control", "sysrq", "%c", &sysrq_key);
+ if (err < 0) {
+ /*
+ * The Xenstore watch fires directly after registering it and
+ * after a suspend/resume cycle. So ENOENT is no error but
+ * might happen in those cases.
+ */
+ if (err != -ENOENT)
+ pr_err("Error %d reading sysrq code in control/sysrq\n",
+ err);
xenbus_transaction_end(xbt, 1);
return;
}
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index 679f79f..b68ced5 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -680,3 +680,22 @@ xen_swiotlb_set_dma_mask(struct device *dev, u64 dma_mask)
return 0;
}
EXPORT_SYMBOL_GPL(xen_swiotlb_set_dma_mask);
+
+/*
+ * Create userspace mapping for the DMA-coherent memory.
+ * This function should be called with the pages from the current domain only,
+ * passing pages mapped from other domains would lead to memory corruption.
+ */
+int
+xen_swiotlb_dma_mmap(struct device *dev, struct vm_area_struct *vma,
+ void *cpu_addr, dma_addr_t dma_addr, size_t size,
+ unsigned long attrs)
+{
+#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
+ if (__generic_dma_ops(dev)->mmap)
+ return __generic_dma_ops(dev)->mmap(dev, vma, cpu_addr,
+ dma_addr, size, attrs);
+#endif
+ return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size);
+}
+EXPORT_SYMBOL_GPL(xen_swiotlb_dma_mmap);
diff --git a/fs/block_dev.c b/fs/block_dev.c
index 07e46b7..cb936c9 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -450,10 +450,12 @@ int bdev_write_page(struct block_device *bdev, sector_t sector,
set_page_writeback(page);
result = ops->rw_page(bdev, sector + get_start_sect(bdev), page, true);
- if (result)
+ if (result) {
end_page_writeback(page);
- else
+ } else {
+ clean_page_buffers(page);
unlock_page(page);
+ }
blk_queue_exit(bdev->bd_queue);
return result;
}
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 8a05fa7..f089d7d 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -8050,8 +8050,10 @@ static int __btrfs_correct_data_nocsum(struct inode *inode,
start += sectorsize;
- if (nr_sectors--) {
+ nr_sectors--;
+ if (nr_sectors) {
pgoff += sectorsize;
+ ASSERT(pgoff < PAGE_SIZE);
goto next_block_or_try_again;
}
}
@@ -8157,8 +8159,10 @@ static int __btrfs_subio_endio_read(struct inode *inode,
ASSERT(nr_sectors);
- if (--nr_sectors) {
+ nr_sectors--;
+ if (nr_sectors) {
pgoff += sectorsize;
+ ASSERT(pgoff < PAGE_SIZE);
goto next_block;
}
}
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 1782804..0fe346c 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -3052,7 +3052,7 @@ static int btrfs_cmp_data_prepare(struct inode *src, u64 loff,
out:
if (ret)
btrfs_cmp_data_free(cmp);
- return 0;
+ return ret;
}
static int btrfs_cmp_data(struct inode *src, u64 loff, struct inode *dst,
@@ -4082,6 +4082,10 @@ static long btrfs_ioctl_default_subvol(struct file *file, void __user *argp)
ret = PTR_ERR(new_root);
goto out;
}
+ if (!is_fstree(new_root->objectid)) {
+ ret = -ENOENT;
+ goto out;
+ }
path = btrfs_alloc_path();
if (!path) {
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index 2cf5e14..04c61bc 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -2367,11 +2367,11 @@ void free_reloc_roots(struct list_head *list)
while (!list_empty(list)) {
reloc_root = list_entry(list->next, struct btrfs_root,
root_list);
+ __del_reloc_root(reloc_root);
free_extent_buffer(reloc_root->node);
free_extent_buffer(reloc_root->commit_root);
reloc_root->node = NULL;
reloc_root->commit_root = NULL;
- __del_reloc_root(reloc_root);
}
}
diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
index 71261b4..77f9efc 100644
--- a/fs/btrfs/send.c
+++ b/fs/btrfs/send.c
@@ -1680,6 +1680,9 @@ static int is_inode_existent(struct send_ctx *sctx, u64 ino, u64 gen)
{
int ret;
+ if (ino == BTRFS_FIRST_FREE_OBJECTID)
+ return 1;
+
ret = get_cur_inode_state(sctx, ino, gen);
if (ret < 0)
goto out;
@@ -1865,7 +1868,7 @@ static int will_overwrite_ref(struct send_ctx *sctx, u64 dir, u64 dir_gen,
* not deleted and then re-created, if it was then we have no overwrite
* and we can just unlink this entry.
*/
- if (sctx->parent_root) {
+ if (sctx->parent_root && dir != BTRFS_FIRST_FREE_OBJECTID) {
ret = get_inode_info(sctx->parent_root, dir, NULL, &gen, NULL,
NULL, NULL, NULL);
if (ret < 0 && ret != -ENOENT)
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 71a60cc..06a77e4 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -6226,7 +6226,7 @@ int btrfs_map_bio(struct btrfs_root *root, struct bio *bio,
for (dev_nr = 0; dev_nr < total_devs; dev_nr++) {
dev = bbio->stripes[dev_nr].dev;
if (!dev || !dev->bdev ||
- (bio_op(bio) == REQ_OP_WRITE && !dev->writeable)) {
+ (bio_op(first_bio) == REQ_OP_WRITE && !dev->writeable)) {
bbio_error(bbio, first_bio, logical);
continue;
}
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index 03951f9..3e1c136 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -1900,6 +1900,7 @@ static int try_flush_caps(struct inode *inode, u64 *ptid)
retry:
spin_lock(&ci->i_ceph_lock);
if (ci->i_ceph_flags & CEPH_I_NOFLUSH) {
+ spin_unlock(&ci->i_ceph_lock);
dout("try_flush_caps skipping %p I_NOFLUSH set\n", inode);
goto out;
}
@@ -1917,8 +1918,10 @@ static int try_flush_caps(struct inode *inode, u64 *ptid)
mutex_lock(&session->s_mutex);
goto retry;
}
- if (cap->session->s_state < CEPH_MDS_SESSION_OPEN)
+ if (cap->session->s_state < CEPH_MDS_SESSION_OPEN) {
+ spin_unlock(&ci->i_ceph_lock);
goto out;
+ }
flushing = __mark_caps_flushing(inode, session, true,
&flush_tid, &oldest_flush_tid);
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index 953275b..4a6df2c 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -1323,8 +1323,8 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req,
ceph_dir_clear_ordered(dir);
dout("d_delete %p\n", dn);
d_delete(dn);
- } else {
- if (have_lease && d_unhashed(dn))
+ } else if (have_lease) {
+ if (d_unhashed(dn))
d_add(dn, NULL);
update_dentry_lease(dn, rinfo->dlease,
session,
diff --git a/fs/ceph/ioctl.c b/fs/ceph/ioctl.c
index 7d752d5..4c9c72f 100644
--- a/fs/ceph/ioctl.c
+++ b/fs/ceph/ioctl.c
@@ -25,7 +25,7 @@ static long ceph_ioctl_get_layout(struct file *file, void __user *arg)
l.stripe_count = ci->i_layout.stripe_count;
l.object_size = ci->i_layout.object_size;
l.data_pool = ci->i_layout.pool_id;
- l.preferred_osd = (s32)-1;
+ l.preferred_osd = -1;
if (copy_to_user(arg, &l, sizeof(l)))
return -EFAULT;
}
@@ -97,7 +97,7 @@ static long ceph_ioctl_set_layout(struct file *file, void __user *arg)
nl.data_pool = ci->i_layout.pool_id;
/* this is obsolete, and always -1 */
- nl.preferred_osd = le64_to_cpu(-1);
+ nl.preferred_osd = -1;
err = __validate_layout(mdsc, &nl);
if (err)
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index e3e1a80..c0f52c4 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -1782,13 +1782,18 @@ static int build_dentry_path(struct dentry *dentry,
int *pfreepath)
{
char *path;
+ struct inode *dir;
- if (ceph_snap(d_inode(dentry->d_parent)) == CEPH_NOSNAP) {
- *pino = ceph_ino(d_inode(dentry->d_parent));
+ rcu_read_lock();
+ dir = d_inode_rcu(dentry->d_parent);
+ if (dir && ceph_snap(dir) == CEPH_NOSNAP) {
+ *pino = ceph_ino(dir);
+ rcu_read_unlock();
*ppath = dentry->d_name.name;
*ppathlen = dentry->d_name.len;
return 0;
}
+ rcu_read_unlock();
path = ceph_mdsc_build_path(dentry, ppathlen, pino, 1);
if (IS_ERR(path))
return PTR_ERR(path);
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index c0c2530..87658f6 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -1360,7 +1360,7 @@ exit_cifs(void)
exit_cifs_idmap();
#endif
#ifdef CONFIG_CIFS_UPCALL
- unregister_key_type(&cifs_spnego_key_type);
+ exit_cifs_spnego();
#endif
cifs_destroy_request_bufs();
cifs_destroy_mids();
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index 48ef401..7b496a4 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -365,6 +365,8 @@ struct smb_version_operations {
unsigned int (*calc_smb_size)(void *);
/* check for STATUS_PENDING and process it in a positive case */
bool (*is_status_pending)(char *, struct TCP_Server_Info *, int);
+ /* check for STATUS_NETWORK_SESSION_EXPIRED */
+ bool (*is_session_expired)(char *);
/* send oplock break response */
int (*oplock_response)(struct cifs_tcon *, struct cifs_fid *,
struct cifsInodeInfo *);
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index 1f91c9d..cc420d6 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -1457,6 +1457,13 @@ cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
return length;
server->total_read += length;
+ if (server->ops->is_session_expired &&
+ server->ops->is_session_expired(buf)) {
+ cifs_reconnect(server);
+ wake_up(&server->response_q);
+ return -1;
+ }
+
if (server->ops->is_status_pending &&
server->ops->is_status_pending(buf, server, 0)) {
discard_remaining_data(server);
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 1a54569..580b3a4 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -796,6 +796,13 @@ standard_receive3(struct TCP_Server_Info *server, struct mid_q_entry *mid)
cifs_dump_mem("Bad SMB: ", buf,
min_t(unsigned int, server->total_read, 48));
+ if (server->ops->is_session_expired &&
+ server->ops->is_session_expired(buf)) {
+ cifs_reconnect(server);
+ wake_up(&server->response_q);
+ return -1;
+ }
+
if (server->ops->is_status_pending &&
server->ops->is_status_pending(buf, server, length))
return -1;
@@ -4071,6 +4078,14 @@ cifs_setup_session(const unsigned int xid, struct cifs_ses *ses,
cifs_dbg(FYI, "Security Mode: 0x%x Capabilities: 0x%x TimeAdjust: %d\n",
server->sec_mode, server->capabilities, server->timeAdj);
+ if (ses->auth_key.response) {
+ cifs_dbg(VFS, "Free previous auth_key.response = %p\n",
+ ses->auth_key.response);
+ kfree(ses->auth_key.response);
+ ses->auth_key.response = NULL;
+ ses->auth_key.len = 0;
+ }
+
if (server->ops->sess_setup)
rc = server->ops->sess_setup(xid, ses, nls_info);
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
index dd3e236..d9cbda2 100644
--- a/fs/cifs/dir.c
+++ b/fs/cifs/dir.c
@@ -193,7 +193,8 @@ check_name(struct dentry *direntry, struct cifs_tcon *tcon)
struct cifs_sb_info *cifs_sb = CIFS_SB(direntry->d_sb);
int i;
- if (unlikely(direntry->d_name.len >
+ if (unlikely(tcon->fsAttrInfo.MaxPathNameComponentLength &&
+ direntry->d_name.len >
le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength)))
return -ENAMETOOLONG;
@@ -509,7 +510,7 @@ cifs_atomic_open(struct inode *inode, struct dentry *direntry,
rc = check_name(direntry, tcon);
if (rc)
- goto out_free_xid;
+ goto out;
server = tcon->ses->server;
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 3925758..cf192f9 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -224,6 +224,13 @@ cifs_nt_open(char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
if (backup_cred(cifs_sb))
create_options |= CREATE_OPEN_BACKUP_INTENT;
+ /* O_SYNC also has bit for O_DSYNC so following check picks up either */
+ if (f_flags & O_SYNC)
+ create_options |= CREATE_WRITE_THROUGH;
+
+ if (f_flags & O_DIRECT)
+ create_options |= CREATE_NO_BUFFER;
+
oparms.tcon = tcon;
oparms.cifs_sb = cifs_sb;
oparms.desired_access = desired_access;
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
index b696824..812e488 100644
--- a/fs/cifs/smb2ops.c
+++ b/fs/cifs/smb2ops.c
@@ -1018,6 +1018,18 @@ smb2_is_status_pending(char *buf, struct TCP_Server_Info *server, int length)
return true;
}
+static bool
+smb2_is_session_expired(char *buf)
+{
+ struct smb2_hdr *hdr = (struct smb2_hdr *)buf;
+
+ if (hdr->Status != STATUS_NETWORK_SESSION_EXPIRED)
+ return false;
+
+ cifs_dbg(FYI, "Session expired\n");
+ return true;
+}
+
static int
smb2_oplock_response(struct cifs_tcon *tcon, struct cifs_fid *fid,
struct cifsInodeInfo *cinode)
@@ -1609,6 +1621,7 @@ struct smb_version_operations smb20_operations = {
.close_dir = smb2_close_dir,
.calc_smb_size = smb2_calc_size,
.is_status_pending = smb2_is_status_pending,
+ .is_session_expired = smb2_is_session_expired,
.oplock_response = smb2_oplock_response,
.queryfs = smb2_queryfs,
.mand_lock = smb2_mand_lock,
@@ -1690,6 +1703,7 @@ struct smb_version_operations smb21_operations = {
.close_dir = smb2_close_dir,
.calc_smb_size = smb2_calc_size,
.is_status_pending = smb2_is_status_pending,
+ .is_session_expired = smb2_is_session_expired,
.oplock_response = smb2_oplock_response,
.queryfs = smb2_queryfs,
.mand_lock = smb2_mand_lock,
@@ -1773,6 +1787,7 @@ struct smb_version_operations smb30_operations = {
.close_dir = smb2_close_dir,
.calc_smb_size = smb2_calc_size,
.is_status_pending = smb2_is_status_pending,
+ .is_session_expired = smb2_is_session_expired,
.oplock_response = smb2_oplock_response,
.queryfs = smb2_queryfs,
.mand_lock = smb2_mand_lock,
@@ -1862,6 +1877,7 @@ struct smb_version_operations smb311_operations = {
.close_dir = smb2_close_dir,
.calc_smb_size = smb2_calc_size,
.is_status_pending = smb2_is_status_pending,
+ .is_session_expired = smb2_is_session_expired,
.oplock_response = smb2_oplock_response,
.queryfs = smb2_queryfs,
.mand_lock = smb2_mand_lock,
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
index 0437e5f..69b610ad 100644
--- a/fs/cifs/smb2pdu.c
+++ b/fs/cifs/smb2pdu.c
@@ -366,7 +366,7 @@ assemble_neg_contexts(struct smb2_negotiate_req *req)
build_encrypt_ctxt((struct smb2_encryption_neg_context *)pneg_ctxt);
req->NegotiateContextOffset = cpu_to_le32(OFFSET_OF_NEG_CONTEXT);
req->NegotiateContextCount = cpu_to_le16(2);
- inc_rfc1001_len(req, 4 + sizeof(struct smb2_preauth_neg_context) + 2
+ inc_rfc1001_len(req, 4 + sizeof(struct smb2_preauth_neg_context)
+ sizeof(struct smb2_encryption_neg_context)); /* calculate hash */
}
#else
@@ -531,15 +531,22 @@ int smb3_validate_negotiate(const unsigned int xid, struct cifs_tcon *tcon)
/*
* validation ioctl must be signed, so no point sending this if we
- * can not sign it. We could eventually change this to selectively
+ * can not sign it (ie are not known user). Even if signing is not
+ * required (enabled but not negotiated), in those cases we selectively
* sign just this, the first and only signed request on a connection.
- * This is good enough for now since a user who wants better security
- * would also enable signing on the mount. Having validation of
- * negotiate info for signed connections helps reduce attack vectors
+ * Having validation of negotiate info helps reduce attack vectors.
*/
- if (tcon->ses->server->sign == false)
+ if (tcon->ses->session_flags & SMB2_SESSION_FLAG_IS_GUEST)
return 0; /* validation requires signing */
+ if (tcon->ses->user_name == NULL) {
+ cifs_dbg(FYI, "Can't validate negotiate: null user mount\n");
+ return 0; /* validation requires signing */
+ }
+
+ if (tcon->ses->session_flags & SMB2_SESSION_FLAG_IS_NULL)
+ cifs_dbg(VFS, "Unexpected null user (anonymous) auth flag sent by server\n");
+
vneg_inbuf.Capabilities =
cpu_to_le32(tcon->ses->server->vals->req_capabilities);
memcpy(vneg_inbuf.Guid, tcon->ses->server->client_guid,
@@ -1010,6 +1017,8 @@ SMB2_sess_setup(const unsigned int xid, struct cifs_ses *ses,
while (sess_data->func)
sess_data->func(sess_data);
+ if ((ses->session_flags & SMB2_SESSION_FLAG_IS_GUEST) && (ses->sign))
+ cifs_dbg(VFS, "signing requested but authenticated as guest\n");
rc = sess_data->result;
out:
kfree(sess_data);
diff --git a/fs/crypto/Makefile b/fs/crypto/Makefile
index f17684c..facf63c 100644
--- a/fs/crypto/Makefile
+++ b/fs/crypto/Makefile
@@ -1,3 +1,4 @@
obj-$(CONFIG_FS_ENCRYPTION) += fscrypto.o
+ccflags-y += -Ifs/ext4
fscrypto-y := crypto.o fname.o policy.o keyinfo.o
diff --git a/fs/crypto/crypto.c b/fs/crypto/crypto.c
index 61cfcce..5c24071 100644
--- a/fs/crypto/crypto.c
+++ b/fs/crypto/crypto.c
@@ -28,6 +28,7 @@
#include <linux/dcache.h>
#include <linux/namei.h>
#include <linux/fscrypto.h>
+#include "ext4_ice.h"
static unsigned int num_prealloc_crypto_pages = 32;
static unsigned int num_prealloc_crypto_ctxs = 128;
@@ -406,6 +407,9 @@ static void completion_pages(struct work_struct *work)
bio_for_each_segment_all(bv, bio, i) {
struct page *page = bv->bv_page;
+ if (ext4_is_ice_enabled())
+ SetPageUptodate(page);
+ else {
int ret = fscrypt_decrypt_page(page);
if (ret) {
@@ -414,6 +418,7 @@ static void completion_pages(struct work_struct *work)
} else {
SetPageUptodate(page);
}
+ }
unlock_page(page);
}
fscrypt_release_ctx(ctx);
diff --git a/fs/crypto/keyinfo.c b/fs/crypto/keyinfo.c
index bb46063..106e55c 100644
--- a/fs/crypto/keyinfo.c
+++ b/fs/crypto/keyinfo.c
@@ -11,6 +11,7 @@
#include <keys/user-type.h>
#include <linux/scatterlist.h>
#include <linux/fscrypto.h>
+#include "ext4_ice.h"
static void derive_crypt_complete(struct crypto_async_request *req, int rc)
{
@@ -108,6 +109,11 @@ static int validate_user_key(struct fscrypt_info *crypt_info,
goto out;
}
ukp = user_key_payload(keyring_key);
+ if (!ukp) {
+ /* key was revoked before we acquired its semaphore */
+ res = -EKEYREVOKED;
+ goto out;
+ }
if (ukp->datalen != sizeof(struct fscrypt_key)) {
res = -EINVAL;
goto out;
@@ -130,13 +136,17 @@ static int validate_user_key(struct fscrypt_info *crypt_info,
}
static int determine_cipher_type(struct fscrypt_info *ci, struct inode *inode,
- const char **cipher_str_ret, int *keysize_ret)
+ const char **cipher_str_ret, int *keysize_ret, int *fname)
{
if (S_ISREG(inode->i_mode)) {
if (ci->ci_data_mode == FS_ENCRYPTION_MODE_AES_256_XTS) {
*cipher_str_ret = "xts(aes)";
*keysize_ret = FS_AES_256_XTS_KEY_SIZE;
return 0;
+ } else if (ci->ci_data_mode == FS_ENCRYPTION_MODE_PRIVATE) {
+ *cipher_str_ret = "bugon";
+ *keysize_ret = FS_AES_256_XTS_KEY_SIZE;
+ return 0;
}
pr_warn_once("fscrypto: unsupported contents encryption mode "
"%d for inode %lu\n",
@@ -148,6 +158,7 @@ static int determine_cipher_type(struct fscrypt_info *ci, struct inode *inode,
if (ci->ci_filename_mode == FS_ENCRYPTION_MODE_AES_256_CTS) {
*cipher_str_ret = "cts(cbc(aes))";
*keysize_ret = FS_AES_256_CTS_KEY_SIZE;
+ *fname = 1;
return 0;
}
pr_warn_once("fscrypto: unsupported filenames encryption mode "
@@ -167,9 +178,26 @@ static void put_crypt_info(struct fscrypt_info *ci)
return;
crypto_free_skcipher(ci->ci_ctfm);
+ memzero_explicit(ci->ci_raw_key,
+ sizeof(ci->ci_raw_key));
kmem_cache_free(fscrypt_info_cachep, ci);
}
+static int fs_data_encryption_mode(void)
+{
+ return ext4_is_ice_enabled() ? FS_ENCRYPTION_MODE_PRIVATE :
+ FS_ENCRYPTION_MODE_AES_256_XTS;
+}
+
+int fs_using_hardware_encryption(struct inode *inode)
+{
+ struct fscrypt_info *ci = inode->i_crypt_info;
+
+ return S_ISREG(inode->i_mode) && ci &&
+ ci->ci_data_mode == FS_ENCRYPTION_MODE_PRIVATE;
+}
+EXPORT_SYMBOL(fs_using_hardware_encryption);
+
int fscrypt_get_encryption_info(struct inode *inode)
{
struct fscrypt_info *crypt_info;
@@ -177,8 +205,8 @@ int fscrypt_get_encryption_info(struct inode *inode)
struct crypto_skcipher *ctfm;
const char *cipher_str;
int keysize;
- u8 *raw_key = NULL;
int res;
+ int fname = 0;
if (inode->i_crypt_info)
return 0;
@@ -195,7 +223,7 @@ int fscrypt_get_encryption_info(struct inode *inode)
if (!fscrypt_dummy_context_enabled(inode))
return res;
ctx.format = FS_ENCRYPTION_CONTEXT_FORMAT_V1;
- ctx.contents_encryption_mode = FS_ENCRYPTION_MODE_AES_256_XTS;
+ ctx.contents_encryption_mode = fs_data_encryption_mode();
ctx.filenames_encryption_mode = FS_ENCRYPTION_MODE_AES_256_CTS;
ctx.flags = 0;
} else if (res != sizeof(ctx)) {
@@ -219,7 +247,8 @@ int fscrypt_get_encryption_info(struct inode *inode)
memcpy(crypt_info->ci_master_key, ctx.master_key_descriptor,
sizeof(crypt_info->ci_master_key));
- res = determine_cipher_type(crypt_info, inode, &cipher_str, &keysize);
+ res = determine_cipher_type(crypt_info, inode, &cipher_str, &keysize,
+ &fname);
if (res)
goto out;
@@ -228,24 +257,21 @@ int fscrypt_get_encryption_info(struct inode *inode)
* crypto API as part of key derivation.
*/
res = -ENOMEM;
- raw_key = kmalloc(FS_MAX_KEY_SIZE, GFP_NOFS);
- if (!raw_key)
- goto out;
if (fscrypt_dummy_context_enabled(inode)) {
- memset(raw_key, 0x42, FS_AES_256_XTS_KEY_SIZE);
+ memset(crypt_info->ci_raw_key, 0x42, FS_AES_256_XTS_KEY_SIZE);
goto got_key;
}
- res = validate_user_key(crypt_info, &ctx, raw_key,
+ res = validate_user_key(crypt_info, &ctx, crypt_info->ci_raw_key,
FS_KEY_DESC_PREFIX, FS_KEY_DESC_PREFIX_SIZE);
if (res && inode->i_sb->s_cop->key_prefix) {
u8 *prefix = NULL;
int prefix_size, res2;
prefix_size = inode->i_sb->s_cop->key_prefix(inode, &prefix);
- res2 = validate_user_key(crypt_info, &ctx, raw_key,
- prefix, prefix_size);
+ res2 = validate_user_key(crypt_info, &ctx,
+ crypt_info->ci_raw_key, prefix, prefix_size);
if (res2) {
if (res2 == -ENOKEY)
res = -ENOKEY;
@@ -255,28 +281,33 @@ int fscrypt_get_encryption_info(struct inode *inode)
goto out;
}
got_key:
- ctfm = crypto_alloc_skcipher(cipher_str, 0, 0);
- if (!ctfm || IS_ERR(ctfm)) {
- res = ctfm ? PTR_ERR(ctfm) : -ENOMEM;
- printk(KERN_DEBUG
- "%s: error %d (inode %u) allocating crypto tfm\n",
- __func__, res, (unsigned) inode->i_ino);
+ if (crypt_info->ci_data_mode != FS_ENCRYPTION_MODE_PRIVATE || fname) {
+ ctfm = crypto_alloc_skcipher(cipher_str, 0, 0);
+ if (!ctfm || IS_ERR(ctfm)) {
+ res = ctfm ? PTR_ERR(ctfm) : -ENOMEM;
+ pr_err("%s: error %d inode %u allocating crypto tfm\n",
+ __func__, res, (unsigned int) inode->i_ino);
+ goto out;
+ }
+ crypt_info->ci_ctfm = ctfm;
+ crypto_skcipher_clear_flags(ctfm, ~0);
+ crypto_skcipher_set_flags(ctfm, CRYPTO_TFM_REQ_WEAK_KEY);
+ res = crypto_skcipher_setkey(ctfm, crypt_info->ci_raw_key,
+ keysize);
+ if (res)
+ goto out;
+ } else if (!ext4_is_ice_enabled()) {
+ pr_warn("%s: ICE support not available\n",
+ __func__);
+ res = -EINVAL;
goto out;
}
- crypt_info->ci_ctfm = ctfm;
- crypto_skcipher_clear_flags(ctfm, ~0);
- crypto_skcipher_set_flags(ctfm, CRYPTO_TFM_REQ_WEAK_KEY);
- res = crypto_skcipher_setkey(ctfm, raw_key, keysize);
- if (res)
- goto out;
-
if (cmpxchg(&inode->i_crypt_info, NULL, crypt_info) == NULL)
crypt_info = NULL;
out:
if (res == -ENOKEY)
res = 0;
put_crypt_info(crypt_info);
- kzfree(raw_key);
return res;
}
EXPORT_SYMBOL(fscrypt_get_encryption_info);
diff --git a/fs/direct-io.c b/fs/direct-io.c
index c60756e..bf03a92 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -411,6 +411,7 @@ static inline void dio_bio_submit(struct dio *dio, struct dio_submit *sdio)
if (dio->is_async && dio->op == REQ_OP_READ && dio->should_dirty)
bio_set_pages_dirty(bio);
+ bio->bi_dio_inode = dio->inode;
dio->bio_bdev = bio->bi_bdev;
if (sdio->submit_io) {
@@ -424,6 +425,18 @@ static inline void dio_bio_submit(struct dio *dio, struct dio_submit *sdio)
sdio->logical_offset_in_bio = 0;
}
+struct inode *dio_bio_get_inode(struct bio *bio)
+{
+ struct inode *inode = NULL;
+
+ if (bio == NULL)
+ return NULL;
+
+ inode = bio->bi_dio_inode;
+
+ return inode;
+}
+EXPORT_SYMBOL(dio_bio_get_inode);
/*
* Release any resources in case of a failure
*/
@@ -835,7 +848,8 @@ submit_page_section(struct dio *dio, struct dio_submit *sdio, struct page *page,
*/
if (sdio->boundary) {
ret = dio_send_cur_page(dio, sdio, map_bh);
- dio_bio_submit(dio, sdio);
+ if (sdio->bio)
+ dio_bio_submit(dio, sdio);
put_page(sdio->cur_page);
sdio->cur_page = NULL;
}
diff --git a/fs/ecryptfs/ecryptfs_kernel.h b/fs/ecryptfs/ecryptfs_kernel.h
index 599a292..a896e46 100644
--- a/fs/ecryptfs/ecryptfs_kernel.h
+++ b/fs/ecryptfs/ecryptfs_kernel.h
@@ -84,11 +84,16 @@ struct ecryptfs_page_crypt_context {
static inline struct ecryptfs_auth_tok *
ecryptfs_get_encrypted_key_payload_data(struct key *key)
{
- if (key->type == &key_type_encrypted)
- return (struct ecryptfs_auth_tok *)
- (&((struct encrypted_key_payload *)key->payload.data[0])->payload_data);
- else
+ struct encrypted_key_payload *payload;
+
+ if (key->type != &key_type_encrypted)
return NULL;
+
+ payload = key->payload.data[0];
+ if (!payload)
+ return ERR_PTR(-EKEYREVOKED);
+
+ return (struct ecryptfs_auth_tok *)payload->payload_data;
}
static inline struct key *ecryptfs_get_encrypted_key(char *sig)
@@ -114,12 +119,17 @@ static inline struct ecryptfs_auth_tok *
ecryptfs_get_key_payload_data(struct key *key)
{
struct ecryptfs_auth_tok *auth_tok;
+ const struct user_key_payload *ukp;
auth_tok = ecryptfs_get_encrypted_key_payload_data(key);
- if (!auth_tok)
- return (struct ecryptfs_auth_tok *)user_key_payload(key)->data;
- else
+ if (auth_tok)
return auth_tok;
+
+ ukp = user_key_payload(key);
+ if (!ukp)
+ return ERR_PTR(-EKEYREVOKED);
+
+ return (struct ecryptfs_auth_tok *)ukp->data;
}
#define ECRYPTFS_MAX_KEYSET_SIZE 1024
diff --git a/fs/ecryptfs/keystore.c b/fs/ecryptfs/keystore.c
index 3cf1546..fa218cd 100644
--- a/fs/ecryptfs/keystore.c
+++ b/fs/ecryptfs/keystore.c
@@ -459,7 +459,8 @@ static int ecryptfs_verify_version(u16 version)
* @auth_tok_key: key containing the authentication token
* @auth_tok: authentication token
*
- * Returns zero on valid auth tok; -EINVAL otherwise
+ * Returns zero on valid auth tok; -EINVAL if the payload is invalid; or
+ * -EKEYREVOKED if the key was revoked before we acquired its semaphore.
*/
static int
ecryptfs_verify_auth_tok_from_key(struct key *auth_tok_key,
@@ -468,6 +469,12 @@ ecryptfs_verify_auth_tok_from_key(struct key *auth_tok_key,
int rc = 0;
(*auth_tok) = ecryptfs_get_key_payload_data(auth_tok_key);
+ if (IS_ERR(*auth_tok)) {
+ rc = PTR_ERR(*auth_tok);
+ *auth_tok = NULL;
+ goto out;
+ }
+
if (ecryptfs_verify_version((*auth_tok)->version)) {
printk(KERN_ERR "Data structure version mismatch. Userspace "
"tools must match eCryptfs kernel module with major "
diff --git a/fs/ext4/Kconfig b/fs/ext4/Kconfig
index e38039f..e9232a0 100644
--- a/fs/ext4/Kconfig
+++ b/fs/ext4/Kconfig
@@ -109,10 +109,16 @@
decrypted pages in the page cache.
config EXT4_FS_ENCRYPTION
- bool
- default y
+ bool "Ext4 FS Encryption"
+ default n
depends on EXT4_ENCRYPTION
+config EXT4_FS_ICE_ENCRYPTION
+ bool "Ext4 Encryption with ICE support"
+ default n
+ depends on EXT4_FS_ENCRYPTION
+ depends on PFK
+
config EXT4_DEBUG
bool "EXT4 debugging support"
depends on EXT4_FS
diff --git a/fs/ext4/Makefile b/fs/ext4/Makefile
index 354103f..d9e563a 100644
--- a/fs/ext4/Makefile
+++ b/fs/ext4/Makefile
@@ -1,6 +1,7 @@
#
# Makefile for the linux ext4-filesystem routines.
#
+ccflags-y += -Ifs/crypto
obj-$(CONFIG_EXT4_FS) += ext4.o
@@ -12,3 +13,4 @@
ext4-$(CONFIG_EXT4_FS_POSIX_ACL) += acl.o
ext4-$(CONFIG_EXT4_FS_SECURITY) += xattr_security.o
+ext4-$(CONFIG_EXT4_FS_ICE_ENCRYPTION) += ext4_ice.o
diff --git a/fs/ext4/acl.c b/fs/ext4/acl.c
index dfa5199..dfd01ca 100644
--- a/fs/ext4/acl.c
+++ b/fs/ext4/acl.c
@@ -192,13 +192,6 @@ __ext4_set_acl(handle_t *handle, struct inode *inode, int type,
switch (type) {
case ACL_TYPE_ACCESS:
name_index = EXT4_XATTR_INDEX_POSIX_ACL_ACCESS;
- if (acl) {
- error = posix_acl_update_mode(inode, &inode->i_mode, &acl);
- if (error)
- return error;
- inode->i_ctime = ext4_current_time(inode);
- ext4_mark_inode_dirty(handle, inode);
- }
break;
case ACL_TYPE_DEFAULT:
@@ -231,6 +224,8 @@ ext4_set_acl(struct inode *inode, struct posix_acl *acl, int type)
{
handle_t *handle;
int error, retries = 0;
+ umode_t mode = inode->i_mode;
+ int update_mode = 0;
retry:
handle = ext4_journal_start(inode, EXT4_HT_XATTR,
@@ -238,7 +233,20 @@ ext4_set_acl(struct inode *inode, struct posix_acl *acl, int type)
if (IS_ERR(handle))
return PTR_ERR(handle);
+ if ((type == ACL_TYPE_ACCESS) && acl) {
+ error = posix_acl_update_mode(inode, &mode, &acl);
+ if (error)
+ goto out_stop;
+ update_mode = 1;
+ }
+
error = __ext4_set_acl(handle, inode, type, acl);
+ if (!error && update_mode) {
+ inode->i_mode = mode;
+ inode->i_ctime = ext4_current_time(inode);
+ ext4_mark_inode_dirty(handle, inode);
+ }
+out_stop:
ext4_journal_stop(handle);
if (error == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
goto retry;
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index 20ee0e4..9b67de7 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -2352,6 +2352,7 @@ static inline void ext4_fname_free_filename(struct ext4_filename *fname) { }
#define fscrypt_fname_free_buffer fscrypt_notsupp_fname_free_buffer
#define fscrypt_fname_disk_to_usr fscrypt_notsupp_fname_disk_to_usr
#define fscrypt_fname_usr_to_disk fscrypt_notsupp_fname_usr_to_disk
+#define fs_using_hardware_encryption fs_notsupp_using_hardware_encryption
#endif
/* dir.c */
diff --git a/fs/ext4/ext4_ice.c b/fs/ext4/ext4_ice.c
new file mode 100644
index 0000000..25f79ae
--- /dev/null
+++ b/fs/ext4/ext4_ice.c
@@ -0,0 +1,107 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "ext4_ice.h"
+
+/*
+ * Retrieves encryption key from the inode
+ */
+char *ext4_get_ice_encryption_key(const struct inode *inode)
+{
+ struct fscrypt_info *ci = NULL;
+
+ if (!inode)
+ return NULL;
+
+ ci = inode->i_crypt_info;
+ if (!ci)
+ return NULL;
+
+ return &(ci->ci_raw_key[0]);
+}
+
+/*
+ * Retrieves encryption salt from the inode
+ */
+char *ext4_get_ice_encryption_salt(const struct inode *inode)
+{
+ struct fscrypt_info *ci = NULL;
+
+ if (!inode)
+ return NULL;
+
+ ci = inode->i_crypt_info;
+ if (!ci)
+ return NULL;
+
+ return &(ci->ci_raw_key[ext4_get_ice_encryption_key_size(inode)]);
+}
+
+/*
+ * returns true if the cipher mode in inode is AES XTS
+ */
+int ext4_is_aes_xts_cipher(const struct inode *inode)
+{
+ struct fscrypt_info *ci = NULL;
+
+ ci = inode->i_crypt_info;
+ if (!ci)
+ return 0;
+
+ return (ci->ci_data_mode == FS_ENCRYPTION_MODE_PRIVATE);
+}
+
+/*
+ * returns true if encryption info in both inodes is equal
+ */
+int ext4_is_ice_encryption_info_equal(const struct inode *inode1,
+ const struct inode *inode2)
+{
+ char *key1 = NULL;
+ char *key2 = NULL;
+ char *salt1 = NULL;
+ char *salt2 = NULL;
+
+ if (!inode1 || !inode2)
+ return 0;
+
+ if (inode1 == inode2)
+ return 1;
+
+ /* both do not belong to ice, so we don't care, they are equal for us */
+ if (!ext4_should_be_processed_by_ice(inode1) &&
+ !ext4_should_be_processed_by_ice(inode2))
+ return 1;
+
+ /* one belongs to ice, the other does not -> not equal */
+ if (ext4_should_be_processed_by_ice(inode1) ^
+ ext4_should_be_processed_by_ice(inode2))
+ return 0;
+
+ key1 = ext4_get_ice_encryption_key(inode1);
+ key2 = ext4_get_ice_encryption_key(inode2);
+ salt1 = ext4_get_ice_encryption_salt(inode1);
+ salt2 = ext4_get_ice_encryption_salt(inode2);
+
+ /* key and salt should not be null by this point */
+ if (!key1 || !key2 || !salt1 || !salt2 ||
+ (ext4_get_ice_encryption_key_size(inode1) !=
+ ext4_get_ice_encryption_key_size(inode2)) ||
+ (ext4_get_ice_encryption_salt_size(inode1) !=
+ ext4_get_ice_encryption_salt_size(inode2)))
+ return 0;
+
+ return ((memcmp(key1, key2,
+ ext4_get_ice_encryption_key_size(inode1)) == 0) &&
+ (memcmp(salt1, salt2,
+ ext4_get_ice_encryption_salt_size(inode1)) == 0));
+}
diff --git a/fs/ext4/ext4_ice.h b/fs/ext4/ext4_ice.h
new file mode 100644
index 0000000..04e09bf
--- /dev/null
+++ b/fs/ext4/ext4_ice.h
@@ -0,0 +1,104 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _EXT4_ICE_H
+#define _EXT4_ICE_H
+
+#include "ext4.h"
+#include <linux/fscrypto.h>
+
+#ifdef CONFIG_EXT4_FS_ICE_ENCRYPTION
+static inline int ext4_should_be_processed_by_ice(const struct inode *inode)
+{
+ if (!ext4_encrypted_inode((struct inode *)inode))
+ return 0;
+
+ return fs_using_hardware_encryption((struct inode *)inode);
+}
+
+static inline int ext4_is_ice_enabled(void)
+{
+ return 1;
+}
+
+int ext4_is_aes_xts_cipher(const struct inode *inode);
+
+char *ext4_get_ice_encryption_key(const struct inode *inode);
+char *ext4_get_ice_encryption_salt(const struct inode *inode);
+
+int ext4_is_ice_encryption_info_equal(const struct inode *inode1,
+ const struct inode *inode2);
+
+static inline size_t ext4_get_ice_encryption_key_size(
+ const struct inode *inode)
+{
+ return FS_AES_256_XTS_KEY_SIZE / 2;
+}
+
+static inline size_t ext4_get_ice_encryption_salt_size(
+ const struct inode *inode)
+{
+ return FS_AES_256_XTS_KEY_SIZE / 2;
+}
+
+#else
+static inline int ext4_should_be_processed_by_ice(const struct inode *inode)
+{
+ return 0;
+}
+static inline int ext4_is_ice_enabled(void)
+{
+ return 0;
+}
+
+static inline char *ext4_get_ice_encryption_key(const struct inode *inode)
+{
+ return NULL;
+}
+
+static inline char *ext4_get_ice_encryption_salt(const struct inode *inode)
+{
+ return NULL;
+}
+
+static inline size_t ext4_get_ice_encryption_key_size(
+ const struct inode *inode)
+{
+ return 0;
+}
+
+static inline size_t ext4_get_ice_encryption_salt_size(
+ const struct inode *inode)
+{
+ return 0;
+}
+
+static inline int ext4_is_xts_cipher(const struct inode *inode)
+{
+ return 0;
+}
+
+static inline int ext4_is_ice_encryption_info_equal(
+ const struct inode *inode1,
+ const struct inode *inode2)
+{
+ return 0;
+}
+
+static inline int ext4_is_aes_xts_cipher(const struct inode *inode)
+{
+ return 0;
+}
+
+#endif
+
+#endif /* _EXT4_ICE_H */
diff --git a/fs/ext4/file.c b/fs/ext4/file.c
index d17d12e..510e664 100644
--- a/fs/ext4/file.c
+++ b/fs/ext4/file.c
@@ -527,7 +527,7 @@ static loff_t ext4_seek_data(struct file *file, loff_t offset, loff_t maxsize)
inode_lock(inode);
isize = i_size_read(inode);
- if (offset >= isize) {
+ if (offset < 0 || offset >= isize) {
inode_unlock(inode);
return -ENXIO;
}
@@ -590,7 +590,7 @@ static loff_t ext4_seek_hole(struct file *file, loff_t offset, loff_t maxsize)
inode_lock(inode);
isize = i_size_read(inode);
- if (offset >= isize) {
+ if (offset < 0 || offset >= isize) {
inode_unlock(inode);
return -ENXIO;
}
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index de47a29..dcb9669 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -42,6 +42,7 @@
#include "xattr.h"
#include "acl.h"
#include "truncate.h"
+#include "ext4_ice.h"
#include <trace/events/ext4.h>
#include <trace/events/android_fs.h>
@@ -1152,7 +1153,8 @@ static int ext4_block_write_begin(struct page *page, loff_t pos, unsigned len,
ll_rw_block(REQ_OP_READ, 0, 1, &bh);
*wait_bh++ = bh;
decrypt = ext4_encrypted_inode(inode) &&
- S_ISREG(inode->i_mode);
+ S_ISREG(inode->i_mode) &&
+ !ext4_is_ice_enabled();
}
}
/*
@@ -2120,15 +2122,29 @@ static int ext4_writepage(struct page *page,
static int mpage_submit_page(struct mpage_da_data *mpd, struct page *page)
{
int len;
- loff_t size = i_size_read(mpd->inode);
+ loff_t size;
int err;
BUG_ON(page->index != mpd->first_page);
+ clear_page_dirty_for_io(page);
+ /*
+ * We have to be very careful here! Nothing protects writeback path
+ * against i_size changes and the page can be writeably mapped into
+ * page tables. So an application can be growing i_size and writing
+ * data through mmap while writeback runs. clear_page_dirty_for_io()
+ * write-protects our page in page tables and the page cannot get
+ * written to again until we release page lock. So only after
+ * clear_page_dirty_for_io() we are safe to sample i_size for
+ * ext4_bio_write_page() to zero-out tail of the written page. We rely
+ * on the barrier provided by TestClearPageDirty in
+ * clear_page_dirty_for_io() to make sure i_size is really sampled only
+ * after page tables are updated.
+ */
+ size = i_size_read(mpd->inode);
if (page->index == size >> PAGE_SHIFT)
len = size & ~PAGE_MASK;
else
len = PAGE_SIZE;
- clear_page_dirty_for_io(page);
err = ext4_bio_write_page(&mpd->io_submit, page, len, mpd->wbc, false);
if (!err)
mpd->wbc->nr_to_write--;
@@ -3495,7 +3511,8 @@ static ssize_t ext4_direct_IO_write(struct kiocb *iocb, struct iov_iter *iter)
get_block_func = ext4_dio_get_block_unwritten_async;
dio_flags = DIO_LOCKING;
}
-#ifdef CONFIG_EXT4_FS_ENCRYPTION
+#if defined(CONFIG_EXT4_FS_ENCRYPTION) && \
+!defined(CONFIG_EXT4_FS_ICE_ENCRYPTION)
BUG_ON(ext4_encrypted_inode(inode) && S_ISREG(inode->i_mode));
#endif
if (IS_DAX(inode)) {
@@ -3609,7 +3626,8 @@ static ssize_t ext4_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
ssize_t ret;
int rw = iov_iter_rw(iter);
-#ifdef CONFIG_EXT4_FS_ENCRYPTION
+#if defined(CONFIG_EXT4_FS_ENCRYPTION) && \
+!defined(CONFIG_EXT4_FS_ICE_ENCRYPTION)
if (ext4_encrypted_inode(inode) && S_ISREG(inode->i_mode))
return 0;
#endif
@@ -3806,7 +3824,8 @@ static int __ext4_block_zero_page_range(handle_t *handle,
if (!buffer_uptodate(bh))
goto unlock;
if (S_ISREG(inode->i_mode) &&
- ext4_encrypted_inode(inode)) {
+ ext4_encrypted_inode(inode) &&
+ !fs_using_hardware_encryption(inode)) {
/* We expect the key to be set. */
BUG_ON(!fscrypt_has_encryption_key(inode));
BUG_ON(blocksize != PAGE_SIZE);
diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
index cec9280..1ddceb6 100644
--- a/fs/ext4/ioctl.c
+++ b/fs/ext4/ioctl.c
@@ -773,10 +773,6 @@ long ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
case EXT4_IOC_SET_ENCRYPTION_POLICY: {
#ifdef CONFIG_EXT4_FS_ENCRYPTION
struct fscrypt_policy policy;
-
- if (!ext4_has_feature_encrypt(sb))
- return -EOPNOTSUPP;
-
if (copy_from_user(&policy,
(struct fscrypt_policy __user *)arg,
sizeof(policy)))
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index df8168f..e5e99a7 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -2136,8 +2136,10 @@ ext4_mb_regular_allocator(struct ext4_allocation_context *ac)
* We search using buddy data only if the order of the request
* is greater than equal to the sbi_s_mb_order2_reqs
* You can tune it via /sys/fs/ext4/<partition>/mb_order2_req
+ * We also support searching for power-of-two requests only for
+ * requests upto maximum buddy size we have constructed.
*/
- if (i >= sbi->s_mb_order2_reqs) {
+ if (i >= sbi->s_mb_order2_reqs && i <= sb->s_blocksize_bits + 2) {
/*
* This should tell if fe_len is exactly power of 2
*/
@@ -2207,7 +2209,7 @@ ext4_mb_regular_allocator(struct ext4_allocation_context *ac)
}
ac->ac_groups_scanned++;
- if (cr == 0 && ac->ac_2order < sb->s_blocksize_bits+2)
+ if (cr == 0)
ext4_mb_simple_scan_group(ac, &e4b);
else if (cr == 1 && sbi->s_stripe &&
!(ac->ac_g_ex.fe_len % sbi->s_stripe))
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index 423a21c..00b8a5a 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -3527,6 +3527,12 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
EXT4_I(old_dentry->d_inode)->i_projid)))
return -EXDEV;
+ if ((ext4_encrypted_inode(old_dir) &&
+ !fscrypt_has_encryption_key(old_dir)) ||
+ (ext4_encrypted_inode(new_dir) &&
+ !fscrypt_has_encryption_key(new_dir)))
+ return -ENOKEY;
+
retval = dquot_initialize(old.dir);
if (retval)
return retval;
@@ -3726,6 +3732,12 @@ static int ext4_cross_rename(struct inode *old_dir, struct dentry *old_dentry,
u8 new_file_type;
int retval;
+ if ((ext4_encrypted_inode(old_dir) &&
+ !fscrypt_has_encryption_key(old_dir)) ||
+ (ext4_encrypted_inode(new_dir) &&
+ !fscrypt_has_encryption_key(new_dir)))
+ return -ENOKEY;
+
if ((ext4_encrypted_inode(old_dir) ||
ext4_encrypted_inode(new_dir)) &&
(old_dir != new_dir) &&
diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c
index 0094923..d8a0770 100644
--- a/fs/ext4/page-io.c
+++ b/fs/ext4/page-io.c
@@ -29,6 +29,7 @@
#include "ext4_jbd2.h"
#include "xattr.h"
#include "acl.h"
+#include "ext4_ice.h"
static struct kmem_cache *io_end_cachep;
@@ -470,6 +471,7 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
gfp_t gfp_flags = GFP_NOFS;
retry_encrypt:
+ if (!fs_using_hardware_encryption(inode))
data_page = fscrypt_encrypt_page(inode, page, gfp_flags);
if (IS_ERR(data_page)) {
ret = PTR_ERR(data_page);
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 5fa9ba1..1f58179 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -2334,6 +2334,7 @@ static void ext4_orphan_cleanup(struct super_block *sb,
unsigned int s_flags = sb->s_flags;
int nr_orphans = 0, nr_truncates = 0;
#ifdef CONFIG_QUOTA
+ int quota_update = 0;
int i;
#endif
if (!es->s_last_orphan) {
@@ -2372,14 +2373,32 @@ static void ext4_orphan_cleanup(struct super_block *sb,
#ifdef CONFIG_QUOTA
/* Needed for iput() to work correctly and not trash data */
sb->s_flags |= MS_ACTIVE;
- /* Turn on quotas so that they are updated correctly */
+
+ /*
+ * Turn on quotas which were not enabled for read-only mounts if
+ * filesystem has quota feature, so that they are updated correctly.
+ */
+ if (ext4_has_feature_quota(sb) && (s_flags & MS_RDONLY)) {
+ int ret = ext4_enable_quotas(sb);
+
+ if (!ret)
+ quota_update = 1;
+ else
+ ext4_msg(sb, KERN_ERR,
+ "Cannot turn on quotas: error %d", ret);
+ }
+
+ /* Turn on journaled quotas used for old sytle */
for (i = 0; i < EXT4_MAXQUOTAS; i++) {
if (EXT4_SB(sb)->s_qf_names[i]) {
int ret = ext4_quota_on_mount(sb, i);
- if (ret < 0)
+
+ if (!ret)
+ quota_update = 1;
+ else
ext4_msg(sb, KERN_ERR,
"Cannot turn on journaled "
- "quota: error %d", ret);
+ "quota: type %d: error %d", i, ret);
}
}
#endif
@@ -2438,10 +2457,12 @@ static void ext4_orphan_cleanup(struct super_block *sb,
ext4_msg(sb, KERN_INFO, "%d truncate%s cleaned up",
PLURAL(nr_truncates));
#ifdef CONFIG_QUOTA
- /* Turn quotas off */
- for (i = 0; i < EXT4_MAXQUOTAS; i++) {
- if (sb_dqopt(sb)->files[i])
- dquot_quota_off(sb, i);
+ /* Turn off quotas if they were enabled for orphan cleanup */
+ if (quota_update) {
+ for (i = 0; i < EXT4_MAXQUOTAS; i++) {
+ if (sb_dqopt(sb)->files[i])
+ dquot_quota_off(sb, i);
+ }
}
#endif
sb->s_flags = s_flags; /* Restore MS_RDONLY status */
@@ -2607,9 +2628,9 @@ static unsigned long ext4_get_stripe_size(struct ext4_sb_info *sbi)
if (sbi->s_stripe && sbi->s_stripe <= sbi->s_blocks_per_group)
ret = sbi->s_stripe;
- else if (stripe_width <= sbi->s_blocks_per_group)
+ else if (stripe_width && stripe_width <= sbi->s_blocks_per_group)
ret = stripe_width;
- else if (stride <= sbi->s_blocks_per_group)
+ else if (stride && stride <= sbi->s_blocks_per_group)
ret = stride;
else
ret = 0;
@@ -5365,6 +5386,9 @@ static int ext4_enable_quotas(struct super_block *sb)
DQUOT_USAGE_ENABLED |
(quota_mopt[type] ? DQUOT_LIMITS_ENABLED : 0));
if (err) {
+ for (type--; type >= 0; type--)
+ dquot_quota_off(sb, type);
+
ext4_warning(sb,
"Failed to enable quota tracking "
"(type=%d, err=%d). Please run "
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index 2c5ae0b..08b3f62 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -1630,7 +1630,12 @@ static int f2fs_write_begin(struct file *file, struct address_space *mapping,
goto fail;
}
repeat:
- page = grab_cache_page_write_begin(mapping, index, flags);
+ /*
+ * Do not use grab_cache_page_write_begin() to avoid deadlock due to
+ * wait_for_stable_page. Will wait that below with our IO control.
+ */
+ page = pagecache_get_page(mapping, index,
+ FGP_LOCK | FGP_WRITE | FGP_CREAT, GFP_NOFS);
if (!page) {
err = -ENOMEM;
goto fail;
diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c
index 489fa0d..08d7dc9 100644
--- a/fs/f2fs/namei.c
+++ b/fs/f2fs/namei.c
@@ -663,6 +663,12 @@ static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry,
bool is_old_inline = f2fs_has_inline_dentry(old_dir);
int err = -ENOENT;
+ if ((f2fs_encrypted_inode(old_dir) &&
+ !fscrypt_has_encryption_key(old_dir)) ||
+ (f2fs_encrypted_inode(new_dir) &&
+ !fscrypt_has_encryption_key(new_dir)))
+ return -ENOKEY;
+
if ((old_dir != new_dir) && f2fs_encrypted_inode(new_dir) &&
!fscrypt_has_permitted_context(new_dir, old_inode)) {
err = -EPERM;
@@ -843,6 +849,12 @@ static int f2fs_cross_rename(struct inode *old_dir, struct dentry *old_dentry,
int old_nlink = 0, new_nlink = 0;
int err = -ENOENT;
+ if ((f2fs_encrypted_inode(old_dir) &&
+ !fscrypt_has_encryption_key(old_dir)) ||
+ (f2fs_encrypted_inode(new_dir) &&
+ !fscrypt_has_encryption_key(new_dir)))
+ return -ENOKEY;
+
if ((f2fs_encrypted_inode(old_dir) || f2fs_encrypted_inode(new_dir)) &&
(old_dir != new_dir) &&
(!fscrypt_has_permitted_context(new_dir, old_inode) ||
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index 74a2b44..e10f616 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -1263,7 +1263,7 @@ static int get_ssr_segment(struct f2fs_sb_info *sbi, int type)
struct curseg_info *curseg = CURSEG_I(sbi, type);
const struct victim_selection *v_ops = DIRTY_I(sbi)->v_ops;
- if (IS_NODESEG(type) || !has_not_enough_free_secs(sbi, 0, 0))
+ if (IS_NODESEG(type))
return v_ops->get_victim(sbi,
&(curseg)->next_segno, BG_GC, type, SSR);
diff --git a/fs/fscache/object-list.c b/fs/fscache/object-list.c
index 5d5ddaa..37e0c31d 100644
--- a/fs/fscache/object-list.c
+++ b/fs/fscache/object-list.c
@@ -330,6 +330,13 @@ static void fscache_objlist_config(struct fscache_objlist_data *data)
rcu_read_lock();
confkey = user_key_payload(key);
+ if (!confkey) {
+ /* key was revoked */
+ rcu_read_unlock();
+ key_put(key);
+ goto no_config;
+ }
+
buf = confkey->data;
for (len = confkey->datalen - 1; len >= 0; len--) {
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index c7c3c96..1693308 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -1361,7 +1361,8 @@ static int parse_dirplusfile(char *buf, size_t nbytes, struct file *file,
*/
over = !dir_emit(ctx, dirent->name, dirent->namelen,
dirent->ino, dirent->type);
- ctx->pos = dirent->off;
+ if (!over)
+ ctx->pos = dirent->off;
}
buf += reclen;
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index 7bff6f4..7a8b1d7 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -1820,29 +1820,27 @@ void gfs2_glock_exit(void)
static void gfs2_glock_iter_next(struct gfs2_glock_iter *gi)
{
- do {
- gi->gl = rhashtable_walk_next(&gi->hti);
+ while ((gi->gl = rhashtable_walk_next(&gi->hti))) {
if (IS_ERR(gi->gl)) {
if (PTR_ERR(gi->gl) == -EAGAIN)
continue;
gi->gl = NULL;
+ return;
}
- /* Skip entries for other sb and dead entries */
- } while ((gi->gl) && ((gi->sdp != gi->gl->gl_name.ln_sbd) ||
- __lockref_is_dead(&gi->gl->gl_lockref)));
+ /* Skip entries for other sb and dead entries */
+ if (gi->sdp == gi->gl->gl_name.ln_sbd &&
+ !__lockref_is_dead(&gi->gl->gl_lockref))
+ return;
+ }
}
static void *gfs2_glock_seq_start(struct seq_file *seq, loff_t *pos)
{
struct gfs2_glock_iter *gi = seq->private;
loff_t n = *pos;
- int ret;
- if (gi->last_pos <= *pos)
- n = (*pos - gi->last_pos);
-
- ret = rhashtable_walk_start(&gi->hti);
- if (ret)
+ rhashtable_walk_enter(&gl_hash_table, &gi->hti);
+ if (rhashtable_walk_start(&gi->hti) != 0)
return NULL;
do {
@@ -1850,6 +1848,7 @@ static void *gfs2_glock_seq_start(struct seq_file *seq, loff_t *pos)
} while (gi->gl && n--);
gi->last_pos = *pos;
+
return gi->gl;
}
@@ -1861,6 +1860,7 @@ static void *gfs2_glock_seq_next(struct seq_file *seq, void *iter_ptr,
(*pos)++;
gi->last_pos = *pos;
gfs2_glock_iter_next(gi);
+
return gi->gl;
}
@@ -1870,6 +1870,7 @@ static void gfs2_glock_seq_stop(struct seq_file *seq, void *iter_ptr)
gi->gl = NULL;
rhashtable_walk_stop(&gi->hti);
+ rhashtable_walk_exit(&gi->hti);
}
static int gfs2_glock_seq_show(struct seq_file *seq, void *iter_ptr)
@@ -1932,12 +1933,10 @@ static int gfs2_glocks_open(struct inode *inode, struct file *file)
struct gfs2_glock_iter *gi = seq->private;
gi->sdp = inode->i_private;
- gi->last_pos = 0;
seq->buf = kmalloc(GFS2_SEQ_GOODSIZE, GFP_KERNEL | __GFP_NOWARN);
if (seq->buf)
seq->size = GFS2_SEQ_GOODSIZE;
gi->gl = NULL;
- ret = rhashtable_walk_init(&gl_hash_table, &gi->hti, GFP_KERNEL);
}
return ret;
}
@@ -1948,7 +1947,6 @@ static int gfs2_glocks_release(struct inode *inode, struct file *file)
struct gfs2_glock_iter *gi = seq->private;
gi->gl = NULL;
- rhashtable_walk_exit(&gi->hti);
return seq_release_private(inode, file);
}
@@ -1960,12 +1958,10 @@ static int gfs2_glstats_open(struct inode *inode, struct file *file)
struct seq_file *seq = file->private_data;
struct gfs2_glock_iter *gi = seq->private;
gi->sdp = inode->i_private;
- gi->last_pos = 0;
seq->buf = kmalloc(GFS2_SEQ_GOODSIZE, GFP_KERNEL | __GFP_NOWARN);
if (seq->buf)
seq->size = GFS2_SEQ_GOODSIZE;
gi->gl = NULL;
- ret = rhashtable_walk_init(&gl_hash_table, &gi->hti, GFP_KERNEL);
}
return ret;
}
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index 704fa0b..2c2f182 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -695,14 +695,11 @@ static struct inode *hugetlbfs_get_root(struct super_block *sb,
inode = new_inode(sb);
if (inode) {
- struct hugetlbfs_inode_info *info;
inode->i_ino = get_next_ino();
inode->i_mode = S_IFDIR | config->mode;
inode->i_uid = config->uid;
inode->i_gid = config->gid;
inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
- info = HUGETLBFS_I(inode);
- mpol_shared_policy_init(&info->policy, NULL);
inode->i_op = &hugetlbfs_dir_inode_operations;
inode->i_fop = &simple_dir_operations;
/* directory inodes start off with i_nlink == 2 (for "." entry) */
@@ -733,7 +730,6 @@ static struct inode *hugetlbfs_get_inode(struct super_block *sb,
inode = new_inode(sb);
if (inode) {
- struct hugetlbfs_inode_info *info;
inode->i_ino = get_next_ino();
inode_init_owner(inode, dir, mode);
lockdep_set_class(&inode->i_mapping->i_mmap_rwsem,
@@ -741,15 +737,6 @@ static struct inode *hugetlbfs_get_inode(struct super_block *sb,
inode->i_mapping->a_ops = &hugetlbfs_aops;
inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
inode->i_mapping->private_data = resv_map;
- info = HUGETLBFS_I(inode);
- /*
- * The policy is initialized here even if we are creating a
- * private inode because initialization simply creates an
- * an empty rb tree and calls rwlock_init(), later when we
- * call mpol_free_shared_policy() it will just return because
- * the rb tree will still be empty.
- */
- mpol_shared_policy_init(&info->policy, NULL);
switch (mode & S_IFMT) {
default:
init_special_inode(inode, mode, dev);
@@ -937,6 +924,18 @@ static struct inode *hugetlbfs_alloc_inode(struct super_block *sb)
hugetlbfs_inc_free_inodes(sbinfo);
return NULL;
}
+
+ /*
+ * Any time after allocation, hugetlbfs_destroy_inode can be called
+ * for the inode. mpol_free_shared_policy is unconditionally called
+ * as part of hugetlbfs_destroy_inode. So, initialize policy here
+ * in case of a quick call to destroy.
+ *
+ * Note that the policy is initialized even if we are creating a
+ * private inode. This simplifies hugetlbfs_destroy_inode.
+ */
+ mpol_shared_policy_init(&p->policy, NULL);
+
return &p->vfs_inode;
}
diff --git a/fs/mpage.c b/fs/mpage.c
index 1193d43..d4e17c8 100644
--- a/fs/mpage.c
+++ b/fs/mpage.c
@@ -502,6 +502,16 @@ static void clean_buffers(struct page *page, unsigned first_unmapped)
try_to_free_buffers(page);
}
+/*
+ * For situations where we want to clean all buffers attached to a page.
+ * We don't need to calculate how many buffers are attached to the page,
+ * we just need to specify a number larger than the maximum number of buffers.
+ */
+void clean_page_buffers(struct page *page)
+{
+ clean_buffers(page, ~0U);
+}
+
static int __mpage_writepage(struct page *page, struct writeback_control *wbc,
void *data)
{
@@ -640,10 +650,8 @@ static int __mpage_writepage(struct page *page, struct writeback_control *wbc,
if (bio == NULL) {
if (first_unmapped == blocks_per_page) {
if (!bdev_write_page(bdev, blocks[0] << (blkbits - 9),
- page, wbc)) {
- clean_buffers(page, first_unmapped);
+ page, wbc))
goto out;
- }
}
bio = mpage_alloc(bdev, blocks[0] << (blkbits - 9),
BIO_MAX_PAGES, GFP_NOFS|__GFP_HIGH);
diff --git a/fs/namei.c b/fs/namei.c
index e10895c..2af3818 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -2903,6 +2903,11 @@ int vfs_create2(struct vfsmount *mnt, struct inode *dir, struct dentry *dentry,
if (error)
return error;
error = dir->i_op->create(dir, dentry, mode, want_excl);
+ if (error)
+ return error;
+ error = security_inode_post_create(dir, dentry, mode);
+ if (error)
+ return error;
if (!error)
fsnotify_create(dir, dentry);
return error;
@@ -3002,10 +3007,16 @@ static inline int open_to_namei_flags(int flag)
static int may_o_create(const struct path *dir, struct dentry *dentry, umode_t mode)
{
+ struct user_namespace *s_user_ns;
int error = security_path_mknod(dir, dentry, mode, 0);
if (error)
return error;
+ s_user_ns = dir->dentry->d_sb->s_user_ns;
+ if (!kuid_has_mapping(s_user_ns, current_fsuid()) ||
+ !kgid_has_mapping(s_user_ns, current_fsgid()))
+ return -EOVERFLOW;
+
error = inode_permission2(dir->mnt, dir->dentry->d_inode, MAY_WRITE | MAY_EXEC);
if (error)
return error;
@@ -3712,6 +3723,13 @@ int vfs_mknod2(struct vfsmount *mnt, struct inode *dir, struct dentry *dentry, u
return error;
error = dir->i_op->mknod(dir, dentry, mode, dev);
+ if (error)
+ return error;
+
+ error = security_inode_post_create(dir, dentry, mode);
+ if (error)
+ return error;
+
if (!error)
fsnotify_create(dir, dentry);
return error;
diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c
index 0a21150..af84a92 100644
--- a/fs/nfs/callback.c
+++ b/fs/nfs/callback.c
@@ -75,7 +75,10 @@ nfs4_callback_svc(void *vrqstp)
set_freezable();
- while (!kthread_should_stop()) {
+ while (!kthread_freezable_should_stop(NULL)) {
+
+ if (signal_pending(current))
+ flush_signals(current);
/*
* Listen for a request on the socket
*/
@@ -84,6 +87,8 @@ nfs4_callback_svc(void *vrqstp)
continue;
svc_process(rqstp);
}
+ svc_exit_thread(rqstp);
+ module_put_and_exit(0);
return 0;
}
@@ -102,9 +107,10 @@ nfs41_callback_svc(void *vrqstp)
set_freezable();
- while (!kthread_should_stop()) {
- if (try_to_freeze())
- continue;
+ while (!kthread_freezable_should_stop(NULL)) {
+
+ if (signal_pending(current))
+ flush_signals(current);
prepare_to_wait(&serv->sv_cb_waitq, &wq, TASK_INTERRUPTIBLE);
spin_lock_bh(&serv->sv_cb_lock);
@@ -120,11 +126,13 @@ nfs41_callback_svc(void *vrqstp)
error);
} else {
spin_unlock_bh(&serv->sv_cb_lock);
- schedule();
+ if (!kthread_should_stop())
+ schedule();
finish_wait(&serv->sv_cb_waitq, &wq);
}
- flush_signals(current);
}
+ svc_exit_thread(rqstp);
+ module_put_and_exit(0);
return 0;
}
@@ -220,23 +228,23 @@ static int nfs_callback_up_net(int minorversion, struct svc_serv *serv,
static struct svc_serv_ops nfs40_cb_sv_ops = {
.svo_function = nfs4_callback_svc,
.svo_enqueue_xprt = svc_xprt_do_enqueue,
- .svo_setup = svc_set_num_threads,
+ .svo_setup = svc_set_num_threads_sync,
.svo_module = THIS_MODULE,
};
#if defined(CONFIG_NFS_V4_1)
static struct svc_serv_ops nfs41_cb_sv_ops = {
.svo_function = nfs41_callback_svc,
.svo_enqueue_xprt = svc_xprt_do_enqueue,
- .svo_setup = svc_set_num_threads,
+ .svo_setup = svc_set_num_threads_sync,
.svo_module = THIS_MODULE,
};
-struct svc_serv_ops *nfs4_cb_sv_ops[] = {
+static struct svc_serv_ops *nfs4_cb_sv_ops[] = {
[0] = &nfs40_cb_sv_ops,
[1] = &nfs41_cb_sv_ops,
};
#else
-struct svc_serv_ops *nfs4_cb_sv_ops[] = {
+static struct svc_serv_ops *nfs4_cb_sv_ops[] = {
[0] = &nfs40_cb_sv_ops,
[1] = NULL,
};
diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c
index 211dc2a..3069cd4 100644
--- a/fs/nfsd/nfs4callback.c
+++ b/fs/nfsd/nfs4callback.c
@@ -753,6 +753,14 @@ int set_callback_cred(void)
return 0;
}
+void cleanup_callback_cred(void)
+{
+ if (callback_cred) {
+ put_rpccred(callback_cred);
+ callback_cred = NULL;
+ }
+}
+
static struct rpc_cred *get_backchannel_cred(struct nfs4_client *clp, struct rpc_clnt *client, struct nfsd4_session *ses)
{
if (clp->cl_minorversion == 0) {
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index a0dee8a..d35eb07 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -7012,23 +7012,24 @@ nfs4_state_start(void)
ret = set_callback_cred();
if (ret)
- return -ENOMEM;
+ return ret;
+
laundry_wq = alloc_workqueue("%s", WQ_UNBOUND, 0, "nfsd4");
if (laundry_wq == NULL) {
ret = -ENOMEM;
- goto out_recovery;
+ goto out_cleanup_cred;
}
ret = nfsd4_create_callback_queue();
if (ret)
goto out_free_laundry;
set_max_delegations();
-
return 0;
out_free_laundry:
destroy_workqueue(laundry_wq);
-out_recovery:
+out_cleanup_cred:
+ cleanup_callback_cred();
return ret;
}
@@ -7086,6 +7087,7 @@ nfs4_state_shutdown(void)
{
destroy_workqueue(laundry_wq);
nfsd4_destroy_callback_queue();
+ cleanup_callback_cred();
}
static void
diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h
index 4516e8b..005c911 100644
--- a/fs/nfsd/state.h
+++ b/fs/nfsd/state.h
@@ -615,6 +615,7 @@ extern struct nfs4_client_reclaim *nfsd4_find_reclaim_client(const char *recdir,
extern __be32 nfs4_check_open_reclaim(clientid_t *clid,
struct nfsd4_compound_state *cstate, struct nfsd_net *nn);
extern int set_callback_cred(void);
+extern void cleanup_callback_cred(void);
extern void nfsd4_probe_callback(struct nfs4_client *clp);
extern void nfsd4_probe_callback_sync(struct nfs4_client *clp);
extern void nfsd4_change_callback(struct nfs4_client *clp, struct nfs4_cb_conn *);
diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c
index f72712f..06089be 100644
--- a/fs/ocfs2/alloc.c
+++ b/fs/ocfs2/alloc.c
@@ -7310,13 +7310,24 @@ int ocfs2_truncate_inline(struct inode *inode, struct buffer_head *di_bh,
static int ocfs2_trim_extent(struct super_block *sb,
struct ocfs2_group_desc *gd,
- u32 start, u32 count)
+ u64 group, u32 start, u32 count)
{
u64 discard, bcount;
+ struct ocfs2_super *osb = OCFS2_SB(sb);
bcount = ocfs2_clusters_to_blocks(sb, count);
- discard = le64_to_cpu(gd->bg_blkno) +
- ocfs2_clusters_to_blocks(sb, start);
+ discard = ocfs2_clusters_to_blocks(sb, start);
+
+ /*
+ * For the first cluster group, the gd->bg_blkno is not at the start
+ * of the group, but at an offset from the start. If we add it while
+ * calculating discard for first group, we will wrongly start fstrim a
+ * few blocks after the desried start block and the range can cross
+ * over into the next cluster group. So, add it only if this is not
+ * the first cluster group.
+ */
+ if (group != osb->first_cluster_group_blkno)
+ discard += le64_to_cpu(gd->bg_blkno);
trace_ocfs2_trim_extent(sb, (unsigned long long)discard, bcount);
@@ -7324,7 +7335,7 @@ static int ocfs2_trim_extent(struct super_block *sb,
}
static int ocfs2_trim_group(struct super_block *sb,
- struct ocfs2_group_desc *gd,
+ struct ocfs2_group_desc *gd, u64 group,
u32 start, u32 max, u32 minbits)
{
int ret = 0, count = 0, next;
@@ -7343,7 +7354,7 @@ static int ocfs2_trim_group(struct super_block *sb,
next = ocfs2_find_next_bit(bitmap, max, start);
if ((next - start) >= minbits) {
- ret = ocfs2_trim_extent(sb, gd,
+ ret = ocfs2_trim_extent(sb, gd, group,
start, next - start);
if (ret < 0) {
mlog_errno(ret);
@@ -7441,7 +7452,8 @@ int ocfs2_trim_fs(struct super_block *sb, struct fstrim_range *range)
}
gd = (struct ocfs2_group_desc *)gd_bh->b_data;
- cnt = ocfs2_trim_group(sb, gd, first_bit, last_bit, minlen);
+ cnt = ocfs2_trim_group(sb, gd, group,
+ first_bit, last_bit, minlen);
brelse(gd_bh);
gd_bh = NULL;
if (cnt < 0) {
diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
index 77d1632..8dce409 100644
--- a/fs/ocfs2/dlmglue.c
+++ b/fs/ocfs2/dlmglue.c
@@ -532,6 +532,7 @@ void ocfs2_lock_res_init_once(struct ocfs2_lock_res *res)
init_waitqueue_head(&res->l_event);
INIT_LIST_HEAD(&res->l_blocked_list);
INIT_LIST_HEAD(&res->l_mask_waiters);
+ INIT_LIST_HEAD(&res->l_holders);
}
void ocfs2_inode_lock_res_init(struct ocfs2_lock_res *res,
@@ -749,6 +750,50 @@ void ocfs2_lock_res_free(struct ocfs2_lock_res *res)
res->l_flags = 0UL;
}
+/*
+ * Keep a list of processes who have interest in a lockres.
+ * Note: this is now only uesed for check recursive cluster locking.
+ */
+static inline void ocfs2_add_holder(struct ocfs2_lock_res *lockres,
+ struct ocfs2_lock_holder *oh)
+{
+ INIT_LIST_HEAD(&oh->oh_list);
+ oh->oh_owner_pid = get_pid(task_pid(current));
+
+ spin_lock(&lockres->l_lock);
+ list_add_tail(&oh->oh_list, &lockres->l_holders);
+ spin_unlock(&lockres->l_lock);
+}
+
+static inline void ocfs2_remove_holder(struct ocfs2_lock_res *lockres,
+ struct ocfs2_lock_holder *oh)
+{
+ spin_lock(&lockres->l_lock);
+ list_del(&oh->oh_list);
+ spin_unlock(&lockres->l_lock);
+
+ put_pid(oh->oh_owner_pid);
+}
+
+static inline int ocfs2_is_locked_by_me(struct ocfs2_lock_res *lockres)
+{
+ struct ocfs2_lock_holder *oh;
+ struct pid *pid;
+
+ /* look in the list of holders for one with the current task as owner */
+ spin_lock(&lockres->l_lock);
+ pid = task_pid(current);
+ list_for_each_entry(oh, &lockres->l_holders, oh_list) {
+ if (oh->oh_owner_pid == pid) {
+ spin_unlock(&lockres->l_lock);
+ return 1;
+ }
+ }
+ spin_unlock(&lockres->l_lock);
+
+ return 0;
+}
+
static inline void ocfs2_inc_holders(struct ocfs2_lock_res *lockres,
int level)
{
@@ -2333,8 +2378,9 @@ int ocfs2_inode_lock_full_nested(struct inode *inode,
goto getbh;
}
- if (ocfs2_mount_local(osb))
- goto local;
+ if ((arg_flags & OCFS2_META_LOCK_GETBH) ||
+ ocfs2_mount_local(osb))
+ goto update;
if (!(arg_flags & OCFS2_META_LOCK_RECOVERY))
ocfs2_wait_for_recovery(osb);
@@ -2363,7 +2409,7 @@ int ocfs2_inode_lock_full_nested(struct inode *inode,
if (!(arg_flags & OCFS2_META_LOCK_RECOVERY))
ocfs2_wait_for_recovery(osb);
-local:
+update:
/*
* We only see this flag if we're being called from
* ocfs2_read_locked_inode(). It means we're locking an inode
@@ -2497,6 +2543,59 @@ void ocfs2_inode_unlock(struct inode *inode,
ocfs2_cluster_unlock(OCFS2_SB(inode->i_sb), lockres, level);
}
+/*
+ * This _tracker variantes are introduced to deal with the recursive cluster
+ * locking issue. The idea is to keep track of a lock holder on the stack of
+ * the current process. If there's a lock holder on the stack, we know the
+ * task context is already protected by cluster locking. Currently, they're
+ * used in some VFS entry routines.
+ *
+ * return < 0 on error, return == 0 if there's no lock holder on the stack
+ * before this call, return == 1 if this call would be a recursive locking.
+ */
+int ocfs2_inode_lock_tracker(struct inode *inode,
+ struct buffer_head **ret_bh,
+ int ex,
+ struct ocfs2_lock_holder *oh)
+{
+ int status;
+ int arg_flags = 0, has_locked;
+ struct ocfs2_lock_res *lockres;
+
+ lockres = &OCFS2_I(inode)->ip_inode_lockres;
+ has_locked = ocfs2_is_locked_by_me(lockres);
+ /* Just get buffer head if the cluster lock has been taken */
+ if (has_locked)
+ arg_flags = OCFS2_META_LOCK_GETBH;
+
+ if (likely(!has_locked || ret_bh)) {
+ status = ocfs2_inode_lock_full(inode, ret_bh, ex, arg_flags);
+ if (status < 0) {
+ if (status != -ENOENT)
+ mlog_errno(status);
+ return status;
+ }
+ }
+ if (!has_locked)
+ ocfs2_add_holder(lockres, oh);
+
+ return has_locked;
+}
+
+void ocfs2_inode_unlock_tracker(struct inode *inode,
+ int ex,
+ struct ocfs2_lock_holder *oh,
+ int had_lock)
+{
+ struct ocfs2_lock_res *lockres;
+
+ lockres = &OCFS2_I(inode)->ip_inode_lockres;
+ if (!had_lock) {
+ ocfs2_remove_holder(lockres, oh);
+ ocfs2_inode_unlock(inode, ex);
+ }
+}
+
int ocfs2_orphan_scan_lock(struct ocfs2_super *osb, u32 *seqno)
{
struct ocfs2_lock_res *lockres;
diff --git a/fs/ocfs2/dlmglue.h b/fs/ocfs2/dlmglue.h
index d293a22..a7fc18b 100644
--- a/fs/ocfs2/dlmglue.h
+++ b/fs/ocfs2/dlmglue.h
@@ -70,6 +70,11 @@ struct ocfs2_orphan_scan_lvb {
__be32 lvb_os_seqno;
};
+struct ocfs2_lock_holder {
+ struct list_head oh_list;
+ struct pid *oh_owner_pid;
+};
+
/* ocfs2_inode_lock_full() 'arg_flags' flags */
/* don't wait on recovery. */
#define OCFS2_META_LOCK_RECOVERY (0x01)
@@ -77,6 +82,8 @@ struct ocfs2_orphan_scan_lvb {
#define OCFS2_META_LOCK_NOQUEUE (0x02)
/* don't block waiting for the downconvert thread, instead return -EAGAIN */
#define OCFS2_LOCK_NONBLOCK (0x04)
+/* just get back disk inode bh if we've got cluster lock. */
+#define OCFS2_META_LOCK_GETBH (0x08)
/* Locking subclasses of inode cluster lock */
enum {
@@ -170,4 +177,15 @@ void ocfs2_put_dlm_debug(struct ocfs2_dlm_debug *dlm_debug);
/* To set the locking protocol on module initialization */
void ocfs2_set_locking_protocol(void);
+
+/* The _tracker pair is used to avoid cluster recursive locking */
+int ocfs2_inode_lock_tracker(struct inode *inode,
+ struct buffer_head **ret_bh,
+ int ex,
+ struct ocfs2_lock_holder *oh);
+void ocfs2_inode_unlock_tracker(struct inode *inode,
+ int ex,
+ struct ocfs2_lock_holder *oh,
+ int had_lock);
+
#endif /* DLMGLUE_H */
diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
index e63af7d..594575e 100644
--- a/fs/ocfs2/ocfs2.h
+++ b/fs/ocfs2/ocfs2.h
@@ -172,6 +172,7 @@ struct ocfs2_lock_res {
struct list_head l_blocked_list;
struct list_head l_mask_waiters;
+ struct list_head l_holders;
unsigned long l_flags;
char l_name[OCFS2_LOCK_ID_MAX_LEN];
diff --git a/fs/orangefs/acl.c b/fs/orangefs/acl.c
index 7a37544..9409aac 100644
--- a/fs/orangefs/acl.c
+++ b/fs/orangefs/acl.c
@@ -61,9 +61,9 @@ struct posix_acl *orangefs_get_acl(struct inode *inode, int type)
return acl;
}
-int orangefs_set_acl(struct inode *inode, struct posix_acl *acl, int type)
+static int __orangefs_set_acl(struct inode *inode, struct posix_acl *acl,
+ int type)
{
- struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode);
int error = 0;
void *value = NULL;
size_t size = 0;
@@ -72,22 +72,6 @@ int orangefs_set_acl(struct inode *inode, struct posix_acl *acl, int type)
switch (type) {
case ACL_TYPE_ACCESS:
name = XATTR_NAME_POSIX_ACL_ACCESS;
- if (acl) {
- umode_t mode;
-
- error = posix_acl_update_mode(inode, &mode, &acl);
- if (error) {
- gossip_err("%s: posix_acl_update_mode err: %d\n",
- __func__,
- error);
- return error;
- }
-
- if (inode->i_mode != mode)
- SetModeFlag(orangefs_inode);
- inode->i_mode = mode;
- mark_inode_dirty_sync(inode);
- }
break;
case ACL_TYPE_DEFAULT:
name = XATTR_NAME_POSIX_ACL_DEFAULT;
@@ -132,6 +116,29 @@ int orangefs_set_acl(struct inode *inode, struct posix_acl *acl, int type)
return error;
}
+int orangefs_set_acl(struct inode *inode, struct posix_acl *acl, int type)
+{
+ int error;
+
+ if (type == ACL_TYPE_ACCESS && acl) {
+ umode_t mode;
+
+ error = posix_acl_update_mode(inode, &mode, &acl);
+ if (error) {
+ gossip_err("%s: posix_acl_update_mode err: %d\n",
+ __func__,
+ error);
+ return error;
+ }
+
+ if (inode->i_mode != mode)
+ SetModeFlag(ORANGEFS_I(inode));
+ inode->i_mode = mode;
+ mark_inode_dirty_sync(inode);
+ }
+ return __orangefs_set_acl(inode, acl, type);
+}
+
int orangefs_init_acl(struct inode *inode, struct inode *dir)
{
struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode);
@@ -146,13 +153,14 @@ int orangefs_init_acl(struct inode *inode, struct inode *dir)
return error;
if (default_acl) {
- error = orangefs_set_acl(inode, default_acl, ACL_TYPE_DEFAULT);
+ error = __orangefs_set_acl(inode, default_acl,
+ ACL_TYPE_DEFAULT);
posix_acl_release(default_acl);
}
if (acl) {
if (!error)
- error = orangefs_set_acl(inode, acl, ACL_TYPE_ACCESS);
+ error = __orangefs_set_acl(inode, acl, ACL_TYPE_ACCESS);
posix_acl_release(acl);
}
diff --git a/fs/proc/array.c b/fs/proc/array.c
index 81818ad..c932ec4 100644
--- a/fs/proc/array.c
+++ b/fs/proc/array.c
@@ -60,6 +60,7 @@
#include <linux/tty.h>
#include <linux/string.h>
#include <linux/mman.h>
+#include <linux/sched.h>
#include <linux/proc_fs.h>
#include <linux/ioport.h>
#include <linux/uaccess.h>
@@ -416,7 +417,15 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
* esp and eip are intentionally zeroed out. There is no
* non-racy way to read them without freezing the task.
* Programs that need reliable values can use ptrace(2).
+ *
+ * The only exception is if the task is core dumping because
+ * a program is not able to use ptrace(2) in that case. It is
+ * safe because the task has stopped executing permanently.
*/
+ if (permitted && (task->flags & PF_DUMPCORE)) {
+ eip = KSTK_EIP(task);
+ esp = KSTK_ESP(task);
+ }
}
get_task_comm(tcomm, task);
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 18f7612..1370a4e 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -2896,6 +2896,52 @@ static int proc_tgid_io_accounting(struct seq_file *m, struct pid_namespace *ns,
}
#endif /* CONFIG_TASK_IO_ACCOUNTING */
+#ifdef CONFIG_DETECT_HUNG_TASK
+static ssize_t proc_hung_task_detection_enabled_read(struct file *file,
+ char __user *buf, size_t count, loff_t *ppos)
+{
+ struct task_struct *task = get_proc_task(file_inode(file));
+ char buffer[PROC_NUMBUF];
+ size_t len;
+ bool hang_detection_enabled;
+
+ if (!task)
+ return -ESRCH;
+ hang_detection_enabled = task->hang_detection_enabled;
+ put_task_struct(task);
+
+ len = snprintf(buffer, sizeof(buffer), "%d\n", hang_detection_enabled);
+
+ return simple_read_from_buffer(buf, sizeof(buffer), ppos, buffer, len);
+}
+
+static ssize_t proc_hung_task_detection_enabled_write(struct file *file,
+ const char __user *buf, size_t count, loff_t *ppos)
+{
+ struct task_struct *task;
+ bool hang_detection_enabled;
+ int rv;
+
+ rv = kstrtobool_from_user(buf, count, &hang_detection_enabled);
+ if (rv < 0)
+ return rv;
+
+ task = get_proc_task(file_inode(file));
+ if (!task)
+ return -ESRCH;
+ task->hang_detection_enabled = hang_detection_enabled;
+ put_task_struct(task);
+
+ return count;
+}
+
+static const struct file_operations proc_hung_task_detection_enabled_operations = {
+ .read = proc_hung_task_detection_enabled_read,
+ .write = proc_hung_task_detection_enabled_write,
+ .llseek = generic_file_llseek,
+};
+#endif
+
#ifdef CONFIG_USER_NS
static int proc_id_map_open(struct inode *inode, struct file *file,
const struct seq_operations *seq_ops)
@@ -3138,6 +3184,10 @@ static const struct pid_entry tgid_base_stuff[] = {
#ifdef CONFIG_HARDWALL
ONE("hardwall", S_IRUGO, proc_pid_hardwall),
#endif
+#ifdef CONFIG_DETECT_HUNG_TASK
+ REG("hang_detection_enabled", 0666,
+ proc_hung_task_detection_enabled_operations),
+#endif
#ifdef CONFIG_USER_NS
REG("uid_map", S_IRUGO|S_IWUSR, proc_uid_map_operations),
REG("gid_map", S_IRUGO|S_IWUSR, proc_gid_map_operations),
@@ -3526,6 +3576,10 @@ static const struct pid_entry tid_base_stuff[] = {
#ifdef CONFIG_HARDWALL
ONE("hardwall", S_IRUGO, proc_pid_hardwall),
#endif
+#ifdef CONFIG_DETECT_HUNG_TASK
+ REG("hang_detection_enabled", 0666,
+ proc_hung_task_detection_enabled_operations),
+#endif
#ifdef CONFIG_USER_NS
REG("uid_map", S_IRUGO|S_IWUSR, proc_uid_map_operations),
REG("gid_map", S_IRUGO|S_IWUSR, proc_gid_map_operations),
diff --git a/fs/read_write.c b/fs/read_write.c
index e479e24..ba28059 100644
--- a/fs/read_write.c
+++ b/fs/read_write.c
@@ -114,7 +114,7 @@ generic_file_llseek_size(struct file *file, loff_t offset, int whence,
* In the generic case the entire file is data, so as long as
* offset isn't at the end of the file then the offset is data.
*/
- if (offset >= eof)
+ if ((unsigned long long)offset >= eof)
return -ENXIO;
break;
case SEEK_HOLE:
@@ -122,7 +122,7 @@ generic_file_llseek_size(struct file *file, loff_t offset, int whence,
* There is a virtual hole at the end of the file, so as long as
* offset isn't i_size or larger, return i_size.
*/
- if (offset >= eof)
+ if ((unsigned long long)offset >= eof)
return -ENXIO;
offset = eof;
break;
@@ -1518,6 +1518,11 @@ ssize_t vfs_copy_file_range(struct file *file_in, loff_t pos_in,
if (flags != 0)
return -EINVAL;
+ if (S_ISDIR(inode_in->i_mode) || S_ISDIR(inode_out->i_mode))
+ return -EISDIR;
+ if (!S_ISREG(inode_in->i_mode) || !S_ISREG(inode_out->i_mode))
+ return -EINVAL;
+
ret = rw_verify_area(READ, file_in, &pos_in, len);
if (unlikely(ret))
return ret;
diff --git a/fs/squashfs/Kconfig b/fs/squashfs/Kconfig
index ffb093e..6dd158a 100644
--- a/fs/squashfs/Kconfig
+++ b/fs/squashfs/Kconfig
@@ -26,34 +26,6 @@
If unsure, say N.
choice
- prompt "File decompression options"
- depends on SQUASHFS
- help
- Squashfs now supports two options for decompressing file
- data. Traditionally Squashfs has decompressed into an
- intermediate buffer and then memcopied it into the page cache.
- Squashfs now supports the ability to decompress directly into
- the page cache.
-
- If unsure, select "Decompress file data into an intermediate buffer"
-
-config SQUASHFS_FILE_CACHE
- bool "Decompress file data into an intermediate buffer"
- help
- Decompress file data into an intermediate buffer and then
- memcopy it into the page cache.
-
-config SQUASHFS_FILE_DIRECT
- bool "Decompress files directly into the page cache"
- help
- Directly decompress file data into the page cache.
- Doing so can significantly improve performance because
- it eliminates a memcpy and it also removes the lock contention
- on the single buffer.
-
-endchoice
-
-choice
prompt "Decompressor parallelisation options"
depends on SQUASHFS
help
diff --git a/fs/squashfs/Makefile b/fs/squashfs/Makefile
index 246a6f3..fe51f15 100644
--- a/fs/squashfs/Makefile
+++ b/fs/squashfs/Makefile
@@ -5,8 +5,7 @@
obj-$(CONFIG_SQUASHFS) += squashfs.o
squashfs-y += block.o cache.o dir.o export.o file.o fragment.o id.o inode.o
squashfs-y += namei.o super.o symlink.o decompressor.o
-squashfs-$(CONFIG_SQUASHFS_FILE_CACHE) += file_cache.o
-squashfs-$(CONFIG_SQUASHFS_FILE_DIRECT) += file_direct.o page_actor.o
+squashfs-y += file_direct.o page_actor.o
squashfs-$(CONFIG_SQUASHFS_DECOMP_SINGLE) += decompressor_single.o
squashfs-$(CONFIG_SQUASHFS_DECOMP_MULTI) += decompressor_multi.o
squashfs-$(CONFIG_SQUASHFS_DECOMP_MULTI_PERCPU) += decompressor_multi_percpu.o
diff --git a/fs/squashfs/block.c b/fs/squashfs/block.c
index ce62a38..7077476 100644
--- a/fs/squashfs/block.c
+++ b/fs/squashfs/block.c
@@ -28,9 +28,12 @@
#include <linux/fs.h>
#include <linux/vfs.h>
+#include <linux/bio.h>
#include <linux/slab.h>
#include <linux/string.h>
+#include <linux/pagemap.h>
#include <linux/buffer_head.h>
+#include <linux/workqueue.h>
#include "squashfs_fs.h"
#include "squashfs_fs_sb.h"
@@ -38,45 +41,382 @@
#include "decompressor.h"
#include "page_actor.h"
-/*
- * Read the metadata block length, this is stored in the first two
- * bytes of the metadata block.
- */
-static struct buffer_head *get_block_length(struct super_block *sb,
- u64 *cur_index, int *offset, int *length)
+static struct workqueue_struct *squashfs_read_wq;
+
+struct squashfs_read_request {
+ struct super_block *sb;
+ u64 index;
+ int length;
+ int compressed;
+ int offset;
+ u64 read_end;
+ struct squashfs_page_actor *output;
+ enum {
+ SQUASHFS_COPY,
+ SQUASHFS_DECOMPRESS,
+ SQUASHFS_METADATA,
+ } data_processing;
+ bool synchronous;
+
+ /*
+ * If the read is synchronous, it is possible to retrieve information
+ * about the request by setting these pointers.
+ */
+ int *res;
+ int *bytes_read;
+ int *bytes_uncompressed;
+
+ int nr_buffers;
+ struct buffer_head **bh;
+ struct work_struct offload;
+};
+
+struct squashfs_bio_request {
+ struct buffer_head **bh;
+ int nr_buffers;
+};
+
+static int squashfs_bio_submit(struct squashfs_read_request *req);
+
+int squashfs_init_read_wq(void)
{
- struct squashfs_sb_info *msblk = sb->s_fs_info;
- struct buffer_head *bh;
+ squashfs_read_wq = create_workqueue("SquashFS read wq");
+ return !!squashfs_read_wq;
+}
- bh = sb_bread(sb, *cur_index);
- if (bh == NULL)
- return NULL;
+void squashfs_destroy_read_wq(void)
+{
+ flush_workqueue(squashfs_read_wq);
+ destroy_workqueue(squashfs_read_wq);
+}
- if (msblk->devblksize - *offset == 1) {
- *length = (unsigned char) bh->b_data[*offset];
- put_bh(bh);
- bh = sb_bread(sb, ++(*cur_index));
- if (bh == NULL)
- return NULL;
- *length |= (unsigned char) bh->b_data[0] << 8;
- *offset = 1;
- } else {
- *length = (unsigned char) bh->b_data[*offset] |
- (unsigned char) bh->b_data[*offset + 1] << 8;
- *offset += 2;
+static void free_read_request(struct squashfs_read_request *req, int error)
+{
+ if (!req->synchronous)
+ squashfs_page_actor_free(req->output, error);
+ if (req->res)
+ *(req->res) = error;
+ kfree(req->bh);
+ kfree(req);
+}
- if (*offset == msblk->devblksize) {
- put_bh(bh);
- bh = sb_bread(sb, ++(*cur_index));
- if (bh == NULL)
- return NULL;
- *offset = 0;
+static void squashfs_process_blocks(struct squashfs_read_request *req)
+{
+ int error = 0;
+ int bytes, i, length;
+ struct squashfs_sb_info *msblk = req->sb->s_fs_info;
+ struct squashfs_page_actor *actor = req->output;
+ struct buffer_head **bh = req->bh;
+ int nr_buffers = req->nr_buffers;
+
+ for (i = 0; i < nr_buffers; ++i) {
+ if (!bh[i])
+ continue;
+ wait_on_buffer(bh[i]);
+ if (!buffer_uptodate(bh[i]))
+ error = -EIO;
+ }
+ if (error)
+ goto cleanup;
+
+ if (req->data_processing == SQUASHFS_METADATA) {
+ /* Extract the length of the metadata block */
+ if (req->offset != msblk->devblksize - 1) {
+ length = le16_to_cpup((__le16 *)
+ (bh[0]->b_data + req->offset));
+ } else {
+ length = (unsigned char)bh[0]->b_data[req->offset];
+ length |= (unsigned char)bh[1]->b_data[0] << 8;
+ }
+ req->compressed = SQUASHFS_COMPRESSED(length);
+ req->data_processing = req->compressed ? SQUASHFS_DECOMPRESS
+ : SQUASHFS_COPY;
+ length = SQUASHFS_COMPRESSED_SIZE(length);
+ if (req->index + length + 2 > req->read_end) {
+ for (i = 0; i < nr_buffers; ++i)
+ put_bh(bh[i]);
+ kfree(bh);
+ req->length = length;
+ req->index += 2;
+ squashfs_bio_submit(req);
+ return;
+ }
+ req->length = length;
+ req->offset = (req->offset + 2) % PAGE_SIZE;
+ if (req->offset < 2) {
+ put_bh(bh[0]);
+ ++bh;
+ --nr_buffers;
+ }
+ }
+ if (req->bytes_read)
+ *(req->bytes_read) = req->length;
+
+ if (req->data_processing == SQUASHFS_COPY) {
+ squashfs_bh_to_actor(bh, nr_buffers, req->output, req->offset,
+ req->length, msblk->devblksize);
+ } else if (req->data_processing == SQUASHFS_DECOMPRESS) {
+ req->length = squashfs_decompress(msblk, bh, nr_buffers,
+ req->offset, req->length, actor);
+ if (req->length < 0) {
+ error = -EIO;
+ goto cleanup;
}
}
- return bh;
+ /* Last page may have trailing bytes not filled */
+ bytes = req->length % PAGE_SIZE;
+ if (bytes && actor->page[actor->pages - 1])
+ zero_user_segment(actor->page[actor->pages - 1], bytes,
+ PAGE_SIZE);
+
+cleanup:
+ if (req->bytes_uncompressed)
+ *(req->bytes_uncompressed) = req->length;
+ if (error) {
+ for (i = 0; i < nr_buffers; ++i)
+ if (bh[i])
+ put_bh(bh[i]);
+ }
+ free_read_request(req, error);
}
+static void read_wq_handler(struct work_struct *work)
+{
+ squashfs_process_blocks(container_of(work,
+ struct squashfs_read_request, offload));
+}
+
+static void squashfs_bio_end_io(struct bio *bio)
+{
+ int i;
+ int error = bio->bi_error;
+ struct squashfs_bio_request *bio_req = bio->bi_private;
+
+ bio_put(bio);
+
+ for (i = 0; i < bio_req->nr_buffers; ++i) {
+ if (!bio_req->bh[i])
+ continue;
+ if (!error)
+ set_buffer_uptodate(bio_req->bh[i]);
+ else
+ clear_buffer_uptodate(bio_req->bh[i]);
+ unlock_buffer(bio_req->bh[i]);
+ }
+ kfree(bio_req);
+}
+
+static int bh_is_optional(struct squashfs_read_request *req, int idx)
+{
+ int start_idx, end_idx;
+ struct squashfs_sb_info *msblk = req->sb->s_fs_info;
+
+ start_idx = (idx * msblk->devblksize - req->offset) >> PAGE_SHIFT;
+ end_idx = ((idx + 1) * msblk->devblksize - req->offset + 1) >> PAGE_SHIFT;
+ if (start_idx >= req->output->pages)
+ return 1;
+ if (start_idx < 0)
+ start_idx = end_idx;
+ if (end_idx >= req->output->pages)
+ end_idx = start_idx;
+ return !req->output->page[start_idx] && !req->output->page[end_idx];
+}
+
+static int actor_getblks(struct squashfs_read_request *req, u64 block)
+{
+ int i;
+
+ req->bh = kmalloc_array(req->nr_buffers, sizeof(*(req->bh)), GFP_NOIO);
+ if (!req->bh)
+ return -ENOMEM;
+
+ for (i = 0; i < req->nr_buffers; ++i) {
+ /*
+ * When dealing with an uncompressed block, the actor may
+ * contains NULL pages. There's no need to read the buffers
+ * associated with these pages.
+ */
+ if (!req->compressed && bh_is_optional(req, i)) {
+ req->bh[i] = NULL;
+ continue;
+ }
+ req->bh[i] = sb_getblk(req->sb, block + i);
+ if (!req->bh[i]) {
+ while (--i) {
+ if (req->bh[i])
+ put_bh(req->bh[i]);
+ }
+ return -1;
+ }
+ }
+ return 0;
+}
+
+static int squashfs_bio_submit(struct squashfs_read_request *req)
+{
+ struct bio *bio = NULL;
+ struct buffer_head *bh;
+ struct squashfs_bio_request *bio_req = NULL;
+ int b = 0, prev_block = 0;
+ struct squashfs_sb_info *msblk = req->sb->s_fs_info;
+
+ u64 read_start = round_down(req->index, msblk->devblksize);
+ u64 read_end = round_up(req->index + req->length, msblk->devblksize);
+ sector_t block = read_start >> msblk->devblksize_log2;
+ sector_t block_end = read_end >> msblk->devblksize_log2;
+ int offset = read_start - round_down(req->index, PAGE_SIZE);
+ int nr_buffers = block_end - block;
+ int blksz = msblk->devblksize;
+ int bio_max_pages = nr_buffers > BIO_MAX_PAGES ? BIO_MAX_PAGES
+ : nr_buffers;
+
+ /* Setup the request */
+ req->read_end = read_end;
+ req->offset = req->index - read_start;
+ req->nr_buffers = nr_buffers;
+ if (actor_getblks(req, block) < 0)
+ goto getblk_failed;
+
+ /* Create and submit the BIOs */
+ for (b = 0; b < nr_buffers; ++b, offset += blksz) {
+ bh = req->bh[b];
+ if (!bh || !trylock_buffer(bh))
+ continue;
+ if (buffer_uptodate(bh)) {
+ unlock_buffer(bh);
+ continue;
+ }
+ offset %= PAGE_SIZE;
+
+ /* Append the buffer to the current BIO if it is contiguous */
+ if (bio && bio_req && prev_block + 1 == b) {
+ if (bio_add_page(bio, bh->b_page, blksz, offset)) {
+ bio_req->nr_buffers += 1;
+ prev_block = b;
+ continue;
+ }
+ }
+
+ /* Otherwise, submit the current BIO and create a new one */
+ if (bio)
+ submit_bio(bio);
+ bio_req = kcalloc(1, sizeof(struct squashfs_bio_request),
+ GFP_NOIO);
+ if (!bio_req)
+ goto req_alloc_failed;
+ bio_req->bh = &req->bh[b];
+ bio = bio_alloc(GFP_NOIO, bio_max_pages);
+ if (!bio)
+ goto bio_alloc_failed;
+ bio->bi_bdev = req->sb->s_bdev;
+ bio->bi_iter.bi_sector = (block + b)
+ << (msblk->devblksize_log2 - 9);
+ bio_set_op_attrs(bio, REQ_OP_READ, 0);
+ bio->bi_private = bio_req;
+ bio->bi_end_io = squashfs_bio_end_io;
+
+ bio_add_page(bio, bh->b_page, blksz, offset);
+ bio_req->nr_buffers += 1;
+ prev_block = b;
+ }
+ if (bio)
+ submit_bio(bio);
+
+ if (req->synchronous)
+ squashfs_process_blocks(req);
+ else {
+ INIT_WORK(&req->offload, read_wq_handler);
+ schedule_work(&req->offload);
+ }
+ return 0;
+
+bio_alloc_failed:
+ kfree(bio_req);
+req_alloc_failed:
+ unlock_buffer(bh);
+ while (--nr_buffers >= b)
+ if (req->bh[nr_buffers])
+ put_bh(req->bh[nr_buffers]);
+ while (--b >= 0)
+ if (req->bh[b])
+ wait_on_buffer(req->bh[b]);
+getblk_failed:
+ free_read_request(req, -ENOMEM);
+ return -ENOMEM;
+}
+
+static int read_metadata_block(struct squashfs_read_request *req,
+ u64 *next_index)
+{
+ int ret, error, bytes_read = 0, bytes_uncompressed = 0;
+ struct squashfs_sb_info *msblk = req->sb->s_fs_info;
+
+ if (req->index + 2 > msblk->bytes_used) {
+ free_read_request(req, -EINVAL);
+ return -EINVAL;
+ }
+ req->length = 2;
+
+ /* Do not read beyond the end of the device */
+ if (req->index + req->length > msblk->bytes_used)
+ req->length = msblk->bytes_used - req->index;
+ req->data_processing = SQUASHFS_METADATA;
+
+ /*
+ * Reading metadata is always synchronous because we don't know the
+ * length in advance and the function is expected to update
+ * 'next_index' and return the length.
+ */
+ req->synchronous = true;
+ req->res = &error;
+ req->bytes_read = &bytes_read;
+ req->bytes_uncompressed = &bytes_uncompressed;
+
+ TRACE("Metadata block @ 0x%llx, %scompressed size %d, src size %d\n",
+ req->index, req->compressed ? "" : "un", bytes_read,
+ req->output->length);
+
+ ret = squashfs_bio_submit(req);
+ if (ret)
+ return ret;
+ if (error)
+ return error;
+ if (next_index)
+ *next_index += 2 + bytes_read;
+ return bytes_uncompressed;
+}
+
+static int read_data_block(struct squashfs_read_request *req, int length,
+ u64 *next_index, bool synchronous)
+{
+ int ret, error = 0, bytes_uncompressed = 0, bytes_read = 0;
+
+ req->compressed = SQUASHFS_COMPRESSED_BLOCK(length);
+ req->length = length = SQUASHFS_COMPRESSED_SIZE_BLOCK(length);
+ req->data_processing = req->compressed ? SQUASHFS_DECOMPRESS
+ : SQUASHFS_COPY;
+
+ req->synchronous = synchronous;
+ if (synchronous) {
+ req->res = &error;
+ req->bytes_read = &bytes_read;
+ req->bytes_uncompressed = &bytes_uncompressed;
+ }
+
+ TRACE("Data block @ 0x%llx, %scompressed size %d, src size %d\n",
+ req->index, req->compressed ? "" : "un", req->length,
+ req->output->length);
+
+ ret = squashfs_bio_submit(req);
+ if (ret)
+ return ret;
+ if (synchronous)
+ ret = error ? error : bytes_uncompressed;
+ if (next_index)
+ *next_index += length;
+ return ret;
+}
/*
* Read and decompress a metadata block or datablock. Length is non-zero
@@ -87,128 +427,50 @@ static struct buffer_head *get_block_length(struct super_block *sb,
* generated a larger block - this does occasionally happen with compression
* algorithms).
*/
-int squashfs_read_data(struct super_block *sb, u64 index, int length,
- u64 *next_index, struct squashfs_page_actor *output)
+static int __squashfs_read_data(struct super_block *sb, u64 index, int length,
+ u64 *next_index, struct squashfs_page_actor *output, bool sync)
{
- struct squashfs_sb_info *msblk = sb->s_fs_info;
- struct buffer_head **bh;
- int offset = index & ((1 << msblk->devblksize_log2) - 1);
- u64 cur_index = index >> msblk->devblksize_log2;
- int bytes, compressed, b = 0, k = 0, avail, i;
+ struct squashfs_read_request *req;
- bh = kcalloc(((output->length + msblk->devblksize - 1)
- >> msblk->devblksize_log2) + 1, sizeof(*bh), GFP_KERNEL);
- if (bh == NULL)
+ req = kcalloc(1, sizeof(struct squashfs_read_request), GFP_KERNEL);
+ if (!req) {
+ if (!sync)
+ squashfs_page_actor_free(output, -ENOMEM);
return -ENOMEM;
-
- if (length) {
- /*
- * Datablock.
- */
- bytes = -offset;
- compressed = SQUASHFS_COMPRESSED_BLOCK(length);
- length = SQUASHFS_COMPRESSED_SIZE_BLOCK(length);
- if (next_index)
- *next_index = index + length;
-
- TRACE("Block @ 0x%llx, %scompressed size %d, src size %d\n",
- index, compressed ? "" : "un", length, output->length);
-
- if (length < 0 || length > output->length ||
- (index + length) > msblk->bytes_used)
- goto read_failure;
-
- for (b = 0; bytes < length; b++, cur_index++) {
- bh[b] = sb_getblk(sb, cur_index);
- if (bh[b] == NULL)
- goto block_release;
- bytes += msblk->devblksize;
- }
- ll_rw_block(REQ_OP_READ, 0, b, bh);
- } else {
- /*
- * Metadata block.
- */
- if ((index + 2) > msblk->bytes_used)
- goto read_failure;
-
- bh[0] = get_block_length(sb, &cur_index, &offset, &length);
- if (bh[0] == NULL)
- goto read_failure;
- b = 1;
-
- bytes = msblk->devblksize - offset;
- compressed = SQUASHFS_COMPRESSED(length);
- length = SQUASHFS_COMPRESSED_SIZE(length);
- if (next_index)
- *next_index = index + length + 2;
-
- TRACE("Block @ 0x%llx, %scompressed size %d\n", index,
- compressed ? "" : "un", length);
-
- if (length < 0 || length > output->length ||
- (index + length) > msblk->bytes_used)
- goto block_release;
-
- for (; bytes < length; b++) {
- bh[b] = sb_getblk(sb, ++cur_index);
- if (bh[b] == NULL)
- goto block_release;
- bytes += msblk->devblksize;
- }
- ll_rw_block(REQ_OP_READ, 0, b - 1, bh + 1);
}
- for (i = 0; i < b; i++) {
- wait_on_buffer(bh[i]);
- if (!buffer_uptodate(bh[i]))
- goto block_release;
+ req->sb = sb;
+ req->index = index;
+ req->output = output;
+
+ if (next_index)
+ *next_index = index;
+
+ if (length)
+ length = read_data_block(req, length, next_index, sync);
+ else
+ length = read_metadata_block(req, next_index);
+
+ if (length < 0) {
+ ERROR("squashfs_read_data failed to read block 0x%llx\n",
+ (unsigned long long)index);
+ return -EIO;
}
- if (compressed) {
- length = squashfs_decompress(msblk, bh, b, offset, length,
- output);
- if (length < 0)
- goto read_failure;
- } else {
- /*
- * Block is uncompressed.
- */
- int in, pg_offset = 0;
- void *data = squashfs_first_page(output);
-
- for (bytes = length; k < b; k++) {
- in = min(bytes, msblk->devblksize - offset);
- bytes -= in;
- while (in) {
- if (pg_offset == PAGE_SIZE) {
- data = squashfs_next_page(output);
- pg_offset = 0;
- }
- avail = min_t(int, in, PAGE_SIZE -
- pg_offset);
- memcpy(data + pg_offset, bh[k]->b_data + offset,
- avail);
- in -= avail;
- pg_offset += avail;
- offset += avail;
- }
- offset = 0;
- put_bh(bh[k]);
- }
- squashfs_finish_page(output);
- }
-
- kfree(bh);
return length;
+}
-block_release:
- for (; k < b; k++)
- put_bh(bh[k]);
+int squashfs_read_data(struct super_block *sb, u64 index, int length,
+ u64 *next_index, struct squashfs_page_actor *output)
+{
+ return __squashfs_read_data(sb, index, length, next_index, output,
+ true);
+}
-read_failure:
- ERROR("squashfs_read_data failed to read block 0x%llx\n",
- (unsigned long long) index);
- kfree(bh);
- return -EIO;
+int squashfs_read_data_async(struct super_block *sb, u64 index, int length,
+ u64 *next_index, struct squashfs_page_actor *output)
+{
+
+ return __squashfs_read_data(sb, index, length, next_index, output,
+ false);
}
diff --git a/fs/squashfs/cache.c b/fs/squashfs/cache.c
index 23813c0..05e4244 100644
--- a/fs/squashfs/cache.c
+++ b/fs/squashfs/cache.c
@@ -209,17 +209,14 @@ void squashfs_cache_put(struct squashfs_cache_entry *entry)
*/
void squashfs_cache_delete(struct squashfs_cache *cache)
{
- int i, j;
+ int i;
if (cache == NULL)
return;
for (i = 0; i < cache->entries; i++) {
- if (cache->entry[i].data) {
- for (j = 0; j < cache->pages; j++)
- kfree(cache->entry[i].data[j]);
- kfree(cache->entry[i].data);
- }
+ if (cache->entry[i].page)
+ free_page_array(cache->entry[i].page, cache->pages);
kfree(cache->entry[i].actor);
}
@@ -236,7 +233,7 @@ void squashfs_cache_delete(struct squashfs_cache *cache)
struct squashfs_cache *squashfs_cache_init(char *name, int entries,
int block_size)
{
- int i, j;
+ int i;
struct squashfs_cache *cache = kzalloc(sizeof(*cache), GFP_KERNEL);
if (cache == NULL) {
@@ -268,22 +265,13 @@ struct squashfs_cache *squashfs_cache_init(char *name, int entries,
init_waitqueue_head(&cache->entry[i].wait_queue);
entry->cache = cache;
entry->block = SQUASHFS_INVALID_BLK;
- entry->data = kcalloc(cache->pages, sizeof(void *), GFP_KERNEL);
- if (entry->data == NULL) {
+ entry->page = alloc_page_array(cache->pages, GFP_KERNEL);
+ if (!entry->page) {
ERROR("Failed to allocate %s cache entry\n", name);
goto cleanup;
}
-
- for (j = 0; j < cache->pages; j++) {
- entry->data[j] = kmalloc(PAGE_SIZE, GFP_KERNEL);
- if (entry->data[j] == NULL) {
- ERROR("Failed to allocate %s buffer\n", name);
- goto cleanup;
- }
- }
-
- entry->actor = squashfs_page_actor_init(entry->data,
- cache->pages, 0);
+ entry->actor = squashfs_page_actor_init(entry->page,
+ cache->pages, 0, NULL);
if (entry->actor == NULL) {
ERROR("Failed to allocate %s cache entry\n", name);
goto cleanup;
@@ -314,18 +302,20 @@ int squashfs_copy_data(void *buffer, struct squashfs_cache_entry *entry,
return min(length, entry->length - offset);
while (offset < entry->length) {
- void *buff = entry->data[offset / PAGE_SIZE]
- + (offset % PAGE_SIZE);
+ void *buff = kmap_atomic(entry->page[offset / PAGE_SIZE])
+ + (offset % PAGE_SIZE);
int bytes = min_t(int, entry->length - offset,
PAGE_SIZE - (offset % PAGE_SIZE));
if (bytes >= remaining) {
memcpy(buffer, buff, remaining);
+ kunmap_atomic(buff);
remaining = 0;
break;
}
memcpy(buffer, buff, bytes);
+ kunmap_atomic(buff);
buffer += bytes;
remaining -= bytes;
offset += bytes;
@@ -416,43 +406,38 @@ struct squashfs_cache_entry *squashfs_get_datablock(struct super_block *sb,
void *squashfs_read_table(struct super_block *sb, u64 block, int length)
{
int pages = (length + PAGE_SIZE - 1) >> PAGE_SHIFT;
- int i, res;
- void *table, *buffer, **data;
+ struct page **page;
+ void *buff;
+ int res;
struct squashfs_page_actor *actor;
- table = buffer = kmalloc(length, GFP_KERNEL);
- if (table == NULL)
+ page = alloc_page_array(pages, GFP_KERNEL);
+ if (!page)
return ERR_PTR(-ENOMEM);
- data = kcalloc(pages, sizeof(void *), GFP_KERNEL);
- if (data == NULL) {
+ actor = squashfs_page_actor_init(page, pages, length, NULL);
+ if (actor == NULL) {
res = -ENOMEM;
goto failed;
}
- actor = squashfs_page_actor_init(data, pages, length);
- if (actor == NULL) {
- res = -ENOMEM;
- goto failed2;
- }
-
- for (i = 0; i < pages; i++, buffer += PAGE_SIZE)
- data[i] = buffer;
-
res = squashfs_read_data(sb, block, length |
SQUASHFS_COMPRESSED_BIT_BLOCK, NULL, actor);
- kfree(data);
- kfree(actor);
-
if (res < 0)
- goto failed;
+ goto failed2;
- return table;
+ buff = kmalloc(length, GFP_KERNEL);
+ if (!buff)
+ goto failed2;
+ squashfs_actor_to_buf(actor, buff, length);
+ squashfs_page_actor_free(actor, 0);
+ free_page_array(page, pages);
+ return buff;
failed2:
- kfree(data);
+ squashfs_page_actor_free(actor, 0);
failed:
- kfree(table);
+ free_page_array(page, pages);
return ERR_PTR(res);
}
diff --git a/fs/squashfs/decompressor.c b/fs/squashfs/decompressor.c
index d2bc136..7de35bf 100644
--- a/fs/squashfs/decompressor.c
+++ b/fs/squashfs/decompressor.c
@@ -24,7 +24,8 @@
#include <linux/types.h>
#include <linux/mutex.h>
#include <linux/slab.h>
-#include <linux/buffer_head.h>
+#include <linux/highmem.h>
+#include <linux/fs.h>
#include "squashfs_fs.h"
#include "squashfs_fs_sb.h"
@@ -94,40 +95,44 @@ const struct squashfs_decompressor *squashfs_lookup_decompressor(int id)
static void *get_comp_opts(struct super_block *sb, unsigned short flags)
{
struct squashfs_sb_info *msblk = sb->s_fs_info;
- void *buffer = NULL, *comp_opts;
+ void *comp_opts, *buffer = NULL;
+ struct page *page;
struct squashfs_page_actor *actor = NULL;
int length = 0;
+ if (!SQUASHFS_COMP_OPTS(flags))
+ return squashfs_comp_opts(msblk, buffer, length);
+
/*
* Read decompressor specific options from file system if present
*/
- if (SQUASHFS_COMP_OPTS(flags)) {
- buffer = kmalloc(PAGE_SIZE, GFP_KERNEL);
- if (buffer == NULL) {
- comp_opts = ERR_PTR(-ENOMEM);
- goto out;
- }
- actor = squashfs_page_actor_init(&buffer, 1, 0);
- if (actor == NULL) {
- comp_opts = ERR_PTR(-ENOMEM);
- goto out;
- }
+ page = alloc_page(GFP_KERNEL);
+ if (!page)
+ return ERR_PTR(-ENOMEM);
- length = squashfs_read_data(sb,
- sizeof(struct squashfs_super_block), 0, NULL, actor);
-
- if (length < 0) {
- comp_opts = ERR_PTR(length);
- goto out;
- }
+ actor = squashfs_page_actor_init(&page, 1, 0, NULL);
+ if (actor == NULL) {
+ comp_opts = ERR_PTR(-ENOMEM);
+ goto actor_error;
}
- comp_opts = squashfs_comp_opts(msblk, buffer, length);
+ length = squashfs_read_data(sb,
+ sizeof(struct squashfs_super_block), 0, NULL, actor);
-out:
- kfree(actor);
- kfree(buffer);
+ if (length < 0) {
+ comp_opts = ERR_PTR(length);
+ goto read_error;
+ }
+
+ buffer = kmap_atomic(page);
+ comp_opts = squashfs_comp_opts(msblk, buffer, length);
+ kunmap_atomic(buffer);
+
+read_error:
+ squashfs_page_actor_free(actor, 0);
+actor_error:
+ __free_page(page);
return comp_opts;
}
diff --git a/fs/squashfs/file.c b/fs/squashfs/file.c
index 13d8094..bb2e77e 100644
--- a/fs/squashfs/file.c
+++ b/fs/squashfs/file.c
@@ -47,6 +47,7 @@
#include <linux/string.h>
#include <linux/pagemap.h>
#include <linux/mutex.h>
+#include <linux/mm_inline.h>
#include "squashfs_fs.h"
#include "squashfs_fs_sb.h"
@@ -438,6 +439,21 @@ static int squashfs_readpage_fragment(struct page *page)
return res;
}
+static int squashfs_readpages_fragment(struct page *page,
+ struct list_head *readahead_pages, struct address_space *mapping)
+{
+ if (!page) {
+ page = lru_to_page(readahead_pages);
+ list_del(&page->lru);
+ if (add_to_page_cache_lru(page, mapping, page->index,
+ mapping_gfp_constraint(mapping, GFP_KERNEL))) {
+ put_page(page);
+ return 0;
+ }
+ }
+ return squashfs_readpage_fragment(page);
+}
+
static int squashfs_readpage_sparse(struct page *page, int index, int file_end)
{
struct inode *inode = page->mapping->host;
@@ -450,54 +466,105 @@ static int squashfs_readpage_sparse(struct page *page, int index, int file_end)
return 0;
}
-static int squashfs_readpage(struct file *file, struct page *page)
+static int squashfs_readpages_sparse(struct page *page,
+ struct list_head *readahead_pages, int index, int file_end,
+ struct address_space *mapping)
{
- struct inode *inode = page->mapping->host;
+ if (!page) {
+ page = lru_to_page(readahead_pages);
+ list_del(&page->lru);
+ if (add_to_page_cache_lru(page, mapping, page->index,
+ mapping_gfp_constraint(mapping, GFP_KERNEL))) {
+ put_page(page);
+ return 0;
+ }
+ }
+ return squashfs_readpage_sparse(page, index, file_end);
+}
+
+static int __squashfs_readpages(struct file *file, struct page *page,
+ struct list_head *readahead_pages, unsigned int nr_pages,
+ struct address_space *mapping)
+{
+ struct inode *inode = mapping->host;
struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
- int index = page->index >> (msblk->block_log - PAGE_SHIFT);
int file_end = i_size_read(inode) >> msblk->block_log;
int res;
- void *pageaddr;
+
+ do {
+ struct page *cur_page = page ? page
+ : lru_to_page(readahead_pages);
+ int page_index = cur_page->index;
+ int index = page_index >> (msblk->block_log - PAGE_SHIFT);
+
+ if (page_index >= ((i_size_read(inode) + PAGE_SIZE - 1) >>
+ PAGE_SHIFT))
+ return 1;
+
+ if (index < file_end || squashfs_i(inode)->fragment_block ==
+ SQUASHFS_INVALID_BLK) {
+ u64 block = 0;
+ int bsize = read_blocklist(inode, index, &block);
+
+ if (bsize < 0)
+ return -1;
+
+ if (bsize == 0) {
+ res = squashfs_readpages_sparse(page,
+ readahead_pages, index, file_end,
+ mapping);
+ } else {
+ res = squashfs_readpages_block(page,
+ readahead_pages, &nr_pages, mapping,
+ page_index, block, bsize);
+ }
+ } else {
+ res = squashfs_readpages_fragment(page,
+ readahead_pages, mapping);
+ }
+ if (res)
+ return 0;
+ page = NULL;
+ } while (readahead_pages && !list_empty(readahead_pages));
+
+ return 0;
+}
+
+static int squashfs_readpage(struct file *file, struct page *page)
+{
+ int ret;
TRACE("Entered squashfs_readpage, page index %lx, start block %llx\n",
- page->index, squashfs_i(inode)->start);
+ page->index, squashfs_i(page->mapping->host)->start);
- if (page->index >= ((i_size_read(inode) + PAGE_SIZE - 1) >>
- PAGE_SHIFT))
- goto out;
+ get_page(page);
- if (index < file_end || squashfs_i(inode)->fragment_block ==
- SQUASHFS_INVALID_BLK) {
- u64 block = 0;
- int bsize = read_blocklist(inode, index, &block);
- if (bsize < 0)
- goto error_out;
-
- if (bsize == 0)
- res = squashfs_readpage_sparse(page, index, file_end);
+ ret = __squashfs_readpages(file, page, NULL, 1, page->mapping);
+ if (ret) {
+ flush_dcache_page(page);
+ if (ret < 0)
+ SetPageError(page);
else
- res = squashfs_readpage_block(page, block, bsize);
- } else
- res = squashfs_readpage_fragment(page);
+ SetPageUptodate(page);
+ zero_user_segment(page, 0, PAGE_SIZE);
+ unlock_page(page);
+ put_page(page);
+ }
- if (!res)
- return 0;
+ return 0;
+}
-error_out:
- SetPageError(page);
-out:
- pageaddr = kmap_atomic(page);
- memset(pageaddr, 0, PAGE_SIZE);
- kunmap_atomic(pageaddr);
- flush_dcache_page(page);
- if (!PageError(page))
- SetPageUptodate(page);
- unlock_page(page);
-
+static int squashfs_readpages(struct file *file, struct address_space *mapping,
+ struct list_head *pages, unsigned int nr_pages)
+{
+ TRACE("Entered squashfs_readpages, %u pages, first page index %lx\n",
+ nr_pages, lru_to_page(pages)->index);
+ __squashfs_readpages(file, NULL, pages, nr_pages, mapping);
return 0;
}
const struct address_space_operations squashfs_aops = {
- .readpage = squashfs_readpage
+ .readpage = squashfs_readpage,
+ .readpages = squashfs_readpages,
};
diff --git a/fs/squashfs/file_cache.c b/fs/squashfs/file_cache.c
deleted file mode 100644
index f2310d2..0000000
--- a/fs/squashfs/file_cache.c
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Copyright (c) 2013
- * Phillip Lougher <phillip@squashfs.org.uk>
- *
- * This work is licensed under the terms of the GNU GPL, version 2. See
- * the COPYING file in the top-level directory.
- */
-
-#include <linux/fs.h>
-#include <linux/vfs.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/string.h>
-#include <linux/pagemap.h>
-#include <linux/mutex.h>
-
-#include "squashfs_fs.h"
-#include "squashfs_fs_sb.h"
-#include "squashfs_fs_i.h"
-#include "squashfs.h"
-
-/* Read separately compressed datablock and memcopy into page cache */
-int squashfs_readpage_block(struct page *page, u64 block, int bsize)
-{
- struct inode *i = page->mapping->host;
- struct squashfs_cache_entry *buffer = squashfs_get_datablock(i->i_sb,
- block, bsize);
- int res = buffer->error;
-
- if (res)
- ERROR("Unable to read page, block %llx, size %x\n", block,
- bsize);
- else
- squashfs_copy_cache(page, buffer, buffer->length, 0);
-
- squashfs_cache_put(buffer);
- return res;
-}
diff --git a/fs/squashfs/file_direct.c b/fs/squashfs/file_direct.c
index cb485d8..dc87f77 100644
--- a/fs/squashfs/file_direct.c
+++ b/fs/squashfs/file_direct.c
@@ -13,6 +13,7 @@
#include <linux/string.h>
#include <linux/pagemap.h>
#include <linux/mutex.h>
+#include <linux/mm_inline.h>
#include "squashfs_fs.h"
#include "squashfs_fs_sb.h"
@@ -20,157 +21,136 @@
#include "squashfs.h"
#include "page_actor.h"
-static int squashfs_read_cache(struct page *target_page, u64 block, int bsize,
- int pages, struct page **page);
-
-/* Read separately compressed datablock directly into page cache */
-int squashfs_readpage_block(struct page *target_page, u64 block, int bsize)
-
+static void release_actor_pages(struct page **page, int pages, int error)
{
- struct inode *inode = target_page->mapping->host;
- struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
+ int i;
- int file_end = (i_size_read(inode) - 1) >> PAGE_SHIFT;
- int mask = (1 << (msblk->block_log - PAGE_SHIFT)) - 1;
- int start_index = target_page->index & ~mask;
- int end_index = start_index | mask;
- int i, n, pages, missing_pages, bytes, res = -ENOMEM;
+ for (i = 0; i < pages; i++) {
+ if (!page[i])
+ continue;
+ flush_dcache_page(page[i]);
+ if (!error)
+ SetPageUptodate(page[i]);
+ else {
+ SetPageError(page[i]);
+ zero_user_segment(page[i], 0, PAGE_SIZE);
+ }
+ unlock_page(page[i]);
+ put_page(page[i]);
+ }
+ kfree(page);
+}
+
+/*
+ * Create a "page actor" which will kmap and kunmap the
+ * page cache pages appropriately within the decompressor
+ */
+static struct squashfs_page_actor *actor_from_page_cache(
+ unsigned int actor_pages, struct page *target_page,
+ struct list_head *rpages, unsigned int *nr_pages, int start_index,
+ struct address_space *mapping)
+{
struct page **page;
struct squashfs_page_actor *actor;
- void *pageaddr;
+ int i, n;
+ gfp_t gfp = mapping_gfp_constraint(mapping, GFP_KERNEL);
- if (end_index > file_end)
- end_index = file_end;
+ page = kmalloc_array(actor_pages, sizeof(void *), GFP_KERNEL);
+ if (!page)
+ return NULL;
- pages = end_index - start_index + 1;
+ for (i = 0, n = start_index; i < actor_pages; i++, n++) {
+ if (target_page == NULL && rpages && !list_empty(rpages)) {
+ struct page *cur_page = lru_to_page(rpages);
- page = kmalloc_array(pages, sizeof(void *), GFP_KERNEL);
- if (page == NULL)
- return res;
+ if (cur_page->index < start_index + actor_pages) {
+ list_del(&cur_page->lru);
+ --(*nr_pages);
+ if (add_to_page_cache_lru(cur_page, mapping,
+ cur_page->index, gfp))
+ put_page(cur_page);
+ else
+ target_page = cur_page;
+ } else
+ rpages = NULL;
+ }
- /*
- * Create a "page actor" which will kmap and kunmap the
- * page cache pages appropriately within the decompressor
- */
- actor = squashfs_page_actor_init_special(page, pages, 0);
- if (actor == NULL)
- goto out;
-
- /* Try to grab all the pages covered by the Squashfs block */
- for (missing_pages = 0, i = 0, n = start_index; i < pages; i++, n++) {
- page[i] = (n == target_page->index) ? target_page :
- grab_cache_page_nowait(target_page->mapping, n);
-
- if (page[i] == NULL) {
- missing_pages++;
- continue;
+ if (target_page && target_page->index == n) {
+ page[i] = target_page;
+ target_page = NULL;
+ } else {
+ page[i] = grab_cache_page_nowait(mapping, n);
+ if (page[i] == NULL)
+ continue;
}
if (PageUptodate(page[i])) {
unlock_page(page[i]);
put_page(page[i]);
page[i] = NULL;
- missing_pages++;
}
}
- if (missing_pages) {
- /*
- * Couldn't get one or more pages, this page has either
- * been VM reclaimed, but others are still in the page cache
- * and uptodate, or we're racing with another thread in
- * squashfs_readpage also trying to grab them. Fall back to
- * using an intermediate buffer.
- */
- res = squashfs_read_cache(target_page, block, bsize, pages,
- page);
- if (res < 0)
- goto mark_errored;
-
- goto out;
+ actor = squashfs_page_actor_init(page, actor_pages, 0,
+ release_actor_pages);
+ if (!actor) {
+ release_actor_pages(page, actor_pages, -ENOMEM);
+ kfree(page);
+ return NULL;
}
-
- /* Decompress directly into the page cache buffers */
- res = squashfs_read_data(inode->i_sb, block, bsize, NULL, actor);
- if (res < 0)
- goto mark_errored;
-
- /* Last page may have trailing bytes not filled */
- bytes = res % PAGE_SIZE;
- if (bytes) {
- pageaddr = kmap_atomic(page[pages - 1]);
- memset(pageaddr + bytes, 0, PAGE_SIZE - bytes);
- kunmap_atomic(pageaddr);
- }
-
- /* Mark pages as uptodate, unlock and release */
- for (i = 0; i < pages; i++) {
- flush_dcache_page(page[i]);
- SetPageUptodate(page[i]);
- unlock_page(page[i]);
- if (page[i] != target_page)
- put_page(page[i]);
- }
-
- kfree(actor);
- kfree(page);
-
- return 0;
-
-mark_errored:
- /* Decompression failed, mark pages as errored. Target_page is
- * dealt with by the caller
- */
- for (i = 0; i < pages; i++) {
- if (page[i] == NULL || page[i] == target_page)
- continue;
- flush_dcache_page(page[i]);
- SetPageError(page[i]);
- unlock_page(page[i]);
- put_page(page[i]);
- }
-
-out:
- kfree(actor);
- kfree(page);
- return res;
+ return actor;
}
+int squashfs_readpages_block(struct page *target_page,
+ struct list_head *readahead_pages,
+ unsigned int *nr_pages,
+ struct address_space *mapping,
+ int page_index, u64 block, int bsize)
-static int squashfs_read_cache(struct page *target_page, u64 block, int bsize,
- int pages, struct page **page)
{
- struct inode *i = target_page->mapping->host;
- struct squashfs_cache_entry *buffer = squashfs_get_datablock(i->i_sb,
- block, bsize);
- int bytes = buffer->length, res = buffer->error, n, offset = 0;
- void *pageaddr;
+ struct squashfs_page_actor *actor;
+ struct inode *inode = mapping->host;
+ struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
+ int start_index, end_index, file_end, actor_pages, res;
+ int mask = (1 << (msblk->block_log - PAGE_SHIFT)) - 1;
- if (res) {
- ERROR("Unable to read page, block %llx, size %x\n", block,
- bsize);
- goto out;
+ /*
+ * If readpage() is called on an uncompressed datablock, we can just
+ * read the pages instead of fetching the whole block.
+ * This greatly improves the performance when a process keep doing
+ * random reads because we only fetch the necessary data.
+ * The readahead algorithm will take care of doing speculative reads
+ * if necessary.
+ * We can't read more than 1 block even if readahead provides use more
+ * pages because we don't know yet if the next block is compressed or
+ * not.
+ */
+ if (bsize && !SQUASHFS_COMPRESSED_BLOCK(bsize)) {
+ u64 block_end = block + msblk->block_size;
+
+ block += (page_index & mask) * PAGE_SIZE;
+ actor_pages = (block_end - block) / PAGE_SIZE;
+ if (*nr_pages < actor_pages)
+ actor_pages = *nr_pages;
+ start_index = page_index;
+ bsize = min_t(int, bsize, (PAGE_SIZE * actor_pages)
+ | SQUASHFS_COMPRESSED_BIT_BLOCK);
+ } else {
+ file_end = (i_size_read(inode) - 1) >> PAGE_SHIFT;
+ start_index = page_index & ~mask;
+ end_index = start_index | mask;
+ if (end_index > file_end)
+ end_index = file_end;
+ actor_pages = end_index - start_index + 1;
}
- for (n = 0; n < pages && bytes > 0; n++,
- bytes -= PAGE_SIZE, offset += PAGE_SIZE) {
- int avail = min_t(int, bytes, PAGE_SIZE);
+ actor = actor_from_page_cache(actor_pages, target_page,
+ readahead_pages, nr_pages, start_index,
+ mapping);
+ if (!actor)
+ return -ENOMEM;
- if (page[n] == NULL)
- continue;
-
- pageaddr = kmap_atomic(page[n]);
- squashfs_copy_data(pageaddr, buffer, offset, avail);
- memset(pageaddr + avail, 0, PAGE_SIZE - avail);
- kunmap_atomic(pageaddr);
- flush_dcache_page(page[n]);
- SetPageUptodate(page[n]);
- unlock_page(page[n]);
- if (page[n] != target_page)
- put_page(page[n]);
- }
-
-out:
- squashfs_cache_put(buffer);
- return res;
+ res = squashfs_read_data_async(inode->i_sb, block, bsize, NULL,
+ actor);
+ return res < 0 ? res : 0;
}
diff --git a/fs/squashfs/lz4_wrapper.c b/fs/squashfs/lz4_wrapper.c
index ff4468b..df4fa3c 100644
--- a/fs/squashfs/lz4_wrapper.c
+++ b/fs/squashfs/lz4_wrapper.c
@@ -94,39 +94,17 @@ static int lz4_uncompress(struct squashfs_sb_info *msblk, void *strm,
struct buffer_head **bh, int b, int offset, int length,
struct squashfs_page_actor *output)
{
- struct squashfs_lz4 *stream = strm;
- void *buff = stream->input, *data;
- int avail, i, bytes = length, res;
+ int res;
size_t dest_len = output->length;
+ struct squashfs_lz4 *stream = strm;
- for (i = 0; i < b; i++) {
- avail = min(bytes, msblk->devblksize - offset);
- memcpy(buff, bh[i]->b_data + offset, avail);
- buff += avail;
- bytes -= avail;
- offset = 0;
- put_bh(bh[i]);
- }
-
+ squashfs_bh_to_buf(bh, b, stream->input, offset, length,
+ msblk->devblksize);
res = lz4_decompress_unknownoutputsize(stream->input, length,
stream->output, &dest_len);
if (res)
return -EIO;
-
- bytes = dest_len;
- data = squashfs_first_page(output);
- buff = stream->output;
- while (data) {
- if (bytes <= PAGE_SIZE) {
- memcpy(data, buff, bytes);
- break;
- }
- memcpy(data, buff, PAGE_SIZE);
- buff += PAGE_SIZE;
- bytes -= PAGE_SIZE;
- data = squashfs_next_page(output);
- }
- squashfs_finish_page(output);
+ squashfs_buf_to_actor(stream->output, output, dest_len);
return dest_len;
}
diff --git a/fs/squashfs/lzo_wrapper.c b/fs/squashfs/lzo_wrapper.c
index 934c17e..2c844d5 100644
--- a/fs/squashfs/lzo_wrapper.c
+++ b/fs/squashfs/lzo_wrapper.c
@@ -79,45 +79,19 @@ static int lzo_uncompress(struct squashfs_sb_info *msblk, void *strm,
struct buffer_head **bh, int b, int offset, int length,
struct squashfs_page_actor *output)
{
- struct squashfs_lzo *stream = strm;
- void *buff = stream->input, *data;
- int avail, i, bytes = length, res;
+ int res;
size_t out_len = output->length;
+ struct squashfs_lzo *stream = strm;
- for (i = 0; i < b; i++) {
- avail = min(bytes, msblk->devblksize - offset);
- memcpy(buff, bh[i]->b_data + offset, avail);
- buff += avail;
- bytes -= avail;
- offset = 0;
- put_bh(bh[i]);
- }
-
+ squashfs_bh_to_buf(bh, b, stream->input, offset, length,
+ msblk->devblksize);
res = lzo1x_decompress_safe(stream->input, (size_t)length,
stream->output, &out_len);
if (res != LZO_E_OK)
- goto failed;
+ return -EIO;
+ squashfs_buf_to_actor(stream->output, output, out_len);
- res = bytes = (int)out_len;
- data = squashfs_first_page(output);
- buff = stream->output;
- while (data) {
- if (bytes <= PAGE_SIZE) {
- memcpy(data, buff, bytes);
- break;
- } else {
- memcpy(data, buff, PAGE_SIZE);
- buff += PAGE_SIZE;
- bytes -= PAGE_SIZE;
- data = squashfs_next_page(output);
- }
- }
- squashfs_finish_page(output);
-
- return res;
-
-failed:
- return -EIO;
+ return out_len;
}
const struct squashfs_decompressor squashfs_lzo_comp_ops = {
diff --git a/fs/squashfs/page_actor.c b/fs/squashfs/page_actor.c
index 9b7b1b6..e348f56 100644
--- a/fs/squashfs/page_actor.c
+++ b/fs/squashfs/page_actor.c
@@ -9,79 +9,11 @@
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/pagemap.h>
+#include <linux/buffer_head.h>
#include "page_actor.h"
-/*
- * This file contains implementations of page_actor for decompressing into
- * an intermediate buffer, and for decompressing directly into the
- * page cache.
- *
- * Calling code should avoid sleeping between calls to squashfs_first_page()
- * and squashfs_finish_page().
- */
-
-/* Implementation of page_actor for decompressing into intermediate buffer */
-static void *cache_first_page(struct squashfs_page_actor *actor)
-{
- actor->next_page = 1;
- return actor->buffer[0];
-}
-
-static void *cache_next_page(struct squashfs_page_actor *actor)
-{
- if (actor->next_page == actor->pages)
- return NULL;
-
- return actor->buffer[actor->next_page++];
-}
-
-static void cache_finish_page(struct squashfs_page_actor *actor)
-{
- /* empty */
-}
-
-struct squashfs_page_actor *squashfs_page_actor_init(void **buffer,
- int pages, int length)
-{
- struct squashfs_page_actor *actor = kmalloc(sizeof(*actor), GFP_KERNEL);
-
- if (actor == NULL)
- return NULL;
-
- actor->length = length ? : pages * PAGE_SIZE;
- actor->buffer = buffer;
- actor->pages = pages;
- actor->next_page = 0;
- actor->squashfs_first_page = cache_first_page;
- actor->squashfs_next_page = cache_next_page;
- actor->squashfs_finish_page = cache_finish_page;
- return actor;
-}
-
-/* Implementation of page_actor for decompressing directly into page cache. */
-static void *direct_first_page(struct squashfs_page_actor *actor)
-{
- actor->next_page = 1;
- return actor->pageaddr = kmap_atomic(actor->page[0]);
-}
-
-static void *direct_next_page(struct squashfs_page_actor *actor)
-{
- if (actor->pageaddr)
- kunmap_atomic(actor->pageaddr);
-
- return actor->pageaddr = actor->next_page == actor->pages ? NULL :
- kmap_atomic(actor->page[actor->next_page++]);
-}
-
-static void direct_finish_page(struct squashfs_page_actor *actor)
-{
- if (actor->pageaddr)
- kunmap_atomic(actor->pageaddr);
-}
-
-struct squashfs_page_actor *squashfs_page_actor_init_special(struct page **page,
- int pages, int length)
+struct squashfs_page_actor *squashfs_page_actor_init(struct page **page,
+ int pages, int length, void (*release_pages)(struct page **, int, int))
{
struct squashfs_page_actor *actor = kmalloc(sizeof(*actor), GFP_KERNEL);
@@ -93,8 +25,129 @@ struct squashfs_page_actor *squashfs_page_actor_init_special(struct page **page,
actor->pages = pages;
actor->next_page = 0;
actor->pageaddr = NULL;
- actor->squashfs_first_page = direct_first_page;
- actor->squashfs_next_page = direct_next_page;
- actor->squashfs_finish_page = direct_finish_page;
+ actor->release_pages = release_pages;
return actor;
}
+
+void squashfs_page_actor_free(struct squashfs_page_actor *actor, int error)
+{
+ if (!actor)
+ return;
+
+ if (actor->release_pages)
+ actor->release_pages(actor->page, actor->pages, error);
+ kfree(actor);
+}
+
+void squashfs_actor_to_buf(struct squashfs_page_actor *actor, void *buf,
+ int length)
+{
+ void *pageaddr;
+ int pos = 0, avail, i;
+
+ for (i = 0; i < actor->pages && pos < length; ++i) {
+ avail = min_t(int, length - pos, PAGE_SIZE);
+ if (actor->page[i]) {
+ pageaddr = kmap_atomic(actor->page[i]);
+ memcpy(buf + pos, pageaddr, avail);
+ kunmap_atomic(pageaddr);
+ }
+ pos += avail;
+ }
+}
+
+void squashfs_buf_to_actor(void *buf, struct squashfs_page_actor *actor,
+ int length)
+{
+ void *pageaddr;
+ int pos = 0, avail, i;
+
+ for (i = 0; i < actor->pages && pos < length; ++i) {
+ avail = min_t(int, length - pos, PAGE_SIZE);
+ if (actor->page[i]) {
+ pageaddr = kmap_atomic(actor->page[i]);
+ memcpy(pageaddr, buf + pos, avail);
+ kunmap_atomic(pageaddr);
+ }
+ pos += avail;
+ }
+}
+
+void squashfs_bh_to_actor(struct buffer_head **bh, int nr_buffers,
+ struct squashfs_page_actor *actor, int offset, int length, int blksz)
+{
+ void *kaddr = NULL;
+ int bytes = 0, pgoff = 0, b = 0, p = 0, avail, i;
+
+ while (bytes < length) {
+ if (actor->page[p]) {
+ kaddr = kmap_atomic(actor->page[p]);
+ while (pgoff < PAGE_SIZE && bytes < length) {
+ avail = min_t(int, blksz - offset,
+ PAGE_SIZE - pgoff);
+ memcpy(kaddr + pgoff, bh[b]->b_data + offset,
+ avail);
+ pgoff += avail;
+ bytes += avail;
+ offset = (offset + avail) % blksz;
+ if (!offset) {
+ put_bh(bh[b]);
+ ++b;
+ }
+ }
+ kunmap_atomic(kaddr);
+ pgoff = 0;
+ } else {
+ for (i = 0; i < PAGE_SIZE / blksz; ++i) {
+ if (bh[b])
+ put_bh(bh[b]);
+ ++b;
+ }
+ bytes += PAGE_SIZE;
+ }
+ ++p;
+ }
+}
+
+void squashfs_bh_to_buf(struct buffer_head **bh, int nr_buffers, void *buf,
+ int offset, int length, int blksz)
+{
+ int i, avail, bytes = 0;
+
+ for (i = 0; i < nr_buffers && bytes < length; ++i) {
+ avail = min_t(int, length - bytes, blksz - offset);
+ if (bh[i]) {
+ memcpy(buf + bytes, bh[i]->b_data + offset, avail);
+ put_bh(bh[i]);
+ }
+ bytes += avail;
+ offset = 0;
+ }
+}
+
+void free_page_array(struct page **page, int nr_pages)
+{
+ int i;
+
+ for (i = 0; i < nr_pages; ++i)
+ __free_page(page[i]);
+ kfree(page);
+}
+
+struct page **alloc_page_array(int nr_pages, int gfp_mask)
+{
+ int i;
+ struct page **page;
+
+ page = kcalloc(nr_pages, sizeof(struct page *), gfp_mask);
+ if (!page)
+ return NULL;
+ for (i = 0; i < nr_pages; ++i) {
+ page[i] = alloc_page(gfp_mask);
+ if (!page[i]) {
+ free_page_array(page, i);
+ return NULL;
+ }
+ }
+ return page;
+}
diff --git a/fs/squashfs/page_actor.h b/fs/squashfs/page_actor.h
index 98537ea..aa1ed79 100644
--- a/fs/squashfs/page_actor.h
+++ b/fs/squashfs/page_actor.h
@@ -5,77 +5,61 @@
* Phillip Lougher <phillip@squashfs.org.uk>
*
* This work is licensed under the terms of the GNU GPL, version 2. See
- * the COPYING file in the top-level directory.
+ * the COPYING file in the top-level squashfsory.
*/
-#ifndef CONFIG_SQUASHFS_FILE_DIRECT
struct squashfs_page_actor {
- void **page;
+ struct page **page;
+ void *pageaddr;
int pages;
int length;
int next_page;
+ void (*release_pages)(struct page **, int, int);
};
-static inline struct squashfs_page_actor *squashfs_page_actor_init(void **page,
- int pages, int length)
-{
- struct squashfs_page_actor *actor = kmalloc(sizeof(*actor), GFP_KERNEL);
+extern struct squashfs_page_actor *squashfs_page_actor_init(struct page **,
+ int, int, void (*)(struct page **, int, int));
+extern void squashfs_page_actor_free(struct squashfs_page_actor *, int);
- if (actor == NULL)
- return NULL;
+extern void squashfs_actor_to_buf(struct squashfs_page_actor *, void *, int);
+extern void squashfs_buf_to_actor(void *, struct squashfs_page_actor *, int);
+extern void squashfs_bh_to_actor(struct buffer_head **, int,
+ struct squashfs_page_actor *, int, int, int);
+extern void squashfs_bh_to_buf(struct buffer_head **, int, void *, int, int,
+ int);
- actor->length = length ? : pages * PAGE_SIZE;
- actor->page = page;
- actor->pages = pages;
- actor->next_page = 0;
- return actor;
-}
-
+/*
+ * Calling code should avoid sleeping between calls to squashfs_first_page()
+ * and squashfs_finish_page().
+ */
static inline void *squashfs_first_page(struct squashfs_page_actor *actor)
{
actor->next_page = 1;
- return actor->page[0];
+ return actor->pageaddr = actor->page[0] ? kmap_atomic(actor->page[0])
+ : NULL;
}
static inline void *squashfs_next_page(struct squashfs_page_actor *actor)
{
- return actor->next_page == actor->pages ? NULL :
- actor->page[actor->next_page++];
+ if (!IS_ERR_OR_NULL(actor->pageaddr))
+ kunmap_atomic(actor->pageaddr);
+
+ if (actor->next_page == actor->pages)
+ return actor->pageaddr = ERR_PTR(-ENODATA);
+
+ actor->pageaddr = actor->page[actor->next_page] ?
+ kmap_atomic(actor->page[actor->next_page]) : NULL;
+ ++actor->next_page;
+ return actor->pageaddr;
}
static inline void squashfs_finish_page(struct squashfs_page_actor *actor)
{
- /* empty */
+ if (!IS_ERR_OR_NULL(actor->pageaddr))
+ kunmap_atomic(actor->pageaddr);
}
-#else
-struct squashfs_page_actor {
- union {
- void **buffer;
- struct page **page;
- };
- void *pageaddr;
- void *(*squashfs_first_page)(struct squashfs_page_actor *);
- void *(*squashfs_next_page)(struct squashfs_page_actor *);
- void (*squashfs_finish_page)(struct squashfs_page_actor *);
- int pages;
- int length;
- int next_page;
-};
-extern struct squashfs_page_actor *squashfs_page_actor_init(void **, int, int);
-extern struct squashfs_page_actor *squashfs_page_actor_init_special(struct page
- **, int, int);
-static inline void *squashfs_first_page(struct squashfs_page_actor *actor)
-{
- return actor->squashfs_first_page(actor);
-}
-static inline void *squashfs_next_page(struct squashfs_page_actor *actor)
-{
- return actor->squashfs_next_page(actor);
-}
-static inline void squashfs_finish_page(struct squashfs_page_actor *actor)
-{
- actor->squashfs_finish_page(actor);
-}
-#endif
+extern struct page **alloc_page_array(int, int);
+extern void free_page_array(struct page **, int);
+
#endif
diff --git a/fs/squashfs/squashfs.h b/fs/squashfs/squashfs.h
index 887d6d2..f4faab5 100644
--- a/fs/squashfs/squashfs.h
+++ b/fs/squashfs/squashfs.h
@@ -28,8 +28,12 @@
#define WARNING(s, args...) pr_warn("SQUASHFS: "s, ## args)
/* block.c */
+extern int squashfs_init_read_wq(void);
+extern void squashfs_destroy_read_wq(void);
extern int squashfs_read_data(struct super_block *, u64, int, u64 *,
struct squashfs_page_actor *);
+extern int squashfs_read_data_async(struct super_block *, u64, int, u64 *,
+ struct squashfs_page_actor *);
/* cache.c */
extern struct squashfs_cache *squashfs_cache_init(char *, int, int);
@@ -70,8 +74,9 @@ extern __le64 *squashfs_read_fragment_index_table(struct super_block *,
void squashfs_copy_cache(struct page *, struct squashfs_cache_entry *, int,
int);
-/* file_xxx.c */
-extern int squashfs_readpage_block(struct page *, u64, int);
+/* file_direct.c */
+extern int squashfs_readpages_block(struct page *, struct list_head *,
+ unsigned int *, struct address_space *, int, u64, int);
/* id.c */
extern int squashfs_get_id(struct super_block *, unsigned int, unsigned int *);
diff --git a/fs/squashfs/squashfs_fs_sb.h b/fs/squashfs/squashfs_fs_sb.h
index 1da565c..8a6995d 100644
--- a/fs/squashfs/squashfs_fs_sb.h
+++ b/fs/squashfs/squashfs_fs_sb.h
@@ -49,7 +49,7 @@ struct squashfs_cache_entry {
int num_waiters;
wait_queue_head_t wait_queue;
struct squashfs_cache *cache;
- void **data;
+ struct page **page;
struct squashfs_page_actor *actor;
};
diff --git a/fs/squashfs/super.c b/fs/squashfs/super.c
index cf01e15..e2a0a73 100644
--- a/fs/squashfs/super.c
+++ b/fs/squashfs/super.c
@@ -444,9 +444,15 @@ static int __init init_squashfs_fs(void)
if (err)
return err;
+ if (!squashfs_init_read_wq()) {
+ destroy_inodecache();
+ return -ENOMEM;
+ }
+
err = register_filesystem(&squashfs_fs_type);
if (err) {
destroy_inodecache();
+ squashfs_destroy_read_wq();
return err;
}
@@ -460,6 +466,7 @@ static void __exit exit_squashfs_fs(void)
{
unregister_filesystem(&squashfs_fs_type);
destroy_inodecache();
+ squashfs_destroy_read_wq();
}
diff --git a/fs/squashfs/xz_wrapper.c b/fs/squashfs/xz_wrapper.c
index 6bfaef7..2f7be1f 100644
--- a/fs/squashfs/xz_wrapper.c
+++ b/fs/squashfs/xz_wrapper.c
@@ -55,7 +55,7 @@ static void *squashfs_xz_comp_opts(struct squashfs_sb_info *msblk,
struct comp_opts *opts;
int err = 0, n;
- opts = kmalloc(sizeof(*opts), GFP_KERNEL);
+ opts = kmalloc(sizeof(*opts), GFP_ATOMIC);
if (opts == NULL) {
err = -ENOMEM;
goto out2;
@@ -136,6 +136,7 @@ static int squashfs_xz_uncompress(struct squashfs_sb_info *msblk, void *strm,
enum xz_ret xz_err;
int avail, total = 0, k = 0;
struct squashfs_xz *stream = strm;
+ void *buf = NULL;
xz_dec_reset(stream->state);
stream->buf.in_pos = 0;
@@ -156,12 +157,20 @@ static int squashfs_xz_uncompress(struct squashfs_sb_info *msblk, void *strm,
if (stream->buf.out_pos == stream->buf.out_size) {
stream->buf.out = squashfs_next_page(output);
- if (stream->buf.out != NULL) {
+ if (!IS_ERR(stream->buf.out)) {
stream->buf.out_pos = 0;
total += PAGE_SIZE;
}
}
+ if (!stream->buf.out) {
+ if (!buf) {
+ buf = kmalloc(PAGE_SIZE, GFP_ATOMIC);
+ if (!buf)
+ goto out;
+ }
+ stream->buf.out = buf;
+ }
xz_err = xz_dec_run(stream->state, &stream->buf);
if (stream->buf.in_pos == stream->buf.in_size && k < b)
@@ -173,11 +182,13 @@ static int squashfs_xz_uncompress(struct squashfs_sb_info *msblk, void *strm,
if (xz_err != XZ_STREAM_END || k < b)
goto out;
+ kfree(buf);
return total + stream->buf.out_pos;
out:
for (; k < b; k++)
put_bh(bh[k]);
+ kfree(buf);
return -EIO;
}
diff --git a/fs/squashfs/zlib_wrapper.c b/fs/squashfs/zlib_wrapper.c
index 2ec24d1..d917c72 100644
--- a/fs/squashfs/zlib_wrapper.c
+++ b/fs/squashfs/zlib_wrapper.c
@@ -66,6 +66,7 @@ static int zlib_uncompress(struct squashfs_sb_info *msblk, void *strm,
struct buffer_head **bh, int b, int offset, int length,
struct squashfs_page_actor *output)
{
+ void *buf = NULL;
int zlib_err, zlib_init = 0, k = 0;
z_stream *stream = strm;
@@ -84,10 +85,19 @@ static int zlib_uncompress(struct squashfs_sb_info *msblk, void *strm,
if (stream->avail_out == 0) {
stream->next_out = squashfs_next_page(output);
- if (stream->next_out != NULL)
+ if (!IS_ERR(stream->next_out))
stream->avail_out = PAGE_SIZE;
}
+ if (!stream->next_out) {
+ if (!buf) {
+ buf = kmalloc(PAGE_SIZE, GFP_ATOMIC);
+ if (!buf)
+ goto out;
+ }
+ stream->next_out = buf;
+ }
+
if (!zlib_init) {
zlib_err = zlib_inflateInit(stream);
if (zlib_err != Z_OK) {
@@ -115,11 +125,13 @@ static int zlib_uncompress(struct squashfs_sb_info *msblk, void *strm,
if (k < b)
goto out;
+ kfree(buf);
return stream->total_out;
out:
for (; k < b; k++)
put_bh(bh[k]);
+ kfree(buf);
return -EIO;
}
diff --git a/fs/timerfd.c b/fs/timerfd.c
index 7ec77f8..ab8dd15 100644
--- a/fs/timerfd.c
+++ b/fs/timerfd.c
@@ -50,8 +50,7 @@ static DEFINE_SPINLOCK(cancel_lock);
static inline bool isalarm(struct timerfd_ctx *ctx)
{
return ctx->clockid == CLOCK_REALTIME_ALARM ||
- ctx->clockid == CLOCK_BOOTTIME_ALARM ||
- ctx->clockid == CLOCK_POWEROFF_ALARM;
+ ctx->clockid == CLOCK_BOOTTIME_ALARM;
}
/*
@@ -143,8 +142,7 @@ static void timerfd_setup_cancel(struct timerfd_ctx *ctx, int flags)
{
spin_lock(&ctx->cancel_lock);
if ((ctx->clockid == CLOCK_REALTIME ||
- ctx->clockid == CLOCK_REALTIME_ALARM ||
- ctx->clockid == CLOCK_POWEROFF_ALARM) &&
+ ctx->clockid == CLOCK_REALTIME_ALARM) &&
(flags & TFD_TIMER_ABSTIME) && (flags & TFD_TIMER_CANCEL_ON_SET)) {
if (!ctx->might_cancel) {
ctx->might_cancel = true;
@@ -176,7 +174,6 @@ static int timerfd_setup(struct timerfd_ctx *ctx, int flags,
enum hrtimer_mode htmode;
ktime_t texp;
int clockid = ctx->clockid;
- enum alarmtimer_type type;
htmode = (flags & TFD_TIMER_ABSTIME) ?
HRTIMER_MODE_ABS: HRTIMER_MODE_REL;
@@ -187,8 +184,10 @@ static int timerfd_setup(struct timerfd_ctx *ctx, int flags,
ctx->tintv = timespec_to_ktime(ktmr->it_interval);
if (isalarm(ctx)) {
- type = clock2alarm(ctx->clockid);
- alarm_init(&ctx->t.alarm, type, timerfd_alarmproc);
+ alarm_init(&ctx->t.alarm,
+ ctx->clockid == CLOCK_REALTIME_ALARM ?
+ ALARM_REALTIME : ALARM_BOOTTIME,
+ timerfd_alarmproc);
} else {
hrtimer_init(&ctx->t.tmr, clockid, htmode);
hrtimer_set_expires(&ctx->t.tmr, texp);
@@ -388,7 +387,6 @@ SYSCALL_DEFINE2(timerfd_create, int, clockid, int, flags)
{
int ufd;
struct timerfd_ctx *ctx;
- enum alarmtimer_type type;
/* Check the TFD_* constants for consistency. */
BUILD_BUG_ON(TFD_CLOEXEC != O_CLOEXEC);
@@ -399,8 +397,7 @@ SYSCALL_DEFINE2(timerfd_create, int, clockid, int, flags)
clockid != CLOCK_REALTIME &&
clockid != CLOCK_REALTIME_ALARM &&
clockid != CLOCK_BOOTTIME &&
- clockid != CLOCK_BOOTTIME_ALARM &&
- clockid != CLOCK_POWEROFF_ALARM))
+ clockid != CLOCK_BOOTTIME_ALARM))
return -EINVAL;
if (!capable(CAP_WAKE_ALARM) &&
@@ -416,12 +413,13 @@ SYSCALL_DEFINE2(timerfd_create, int, clockid, int, flags)
spin_lock_init(&ctx->cancel_lock);
ctx->clockid = clockid;
- if (isalarm(ctx)) {
- type = clock2alarm(ctx->clockid);
- alarm_init(&ctx->t.alarm, type, timerfd_alarmproc);
- } else {
+ if (isalarm(ctx))
+ alarm_init(&ctx->t.alarm,
+ ctx->clockid == CLOCK_REALTIME_ALARM ?
+ ALARM_REALTIME : ALARM_BOOTTIME,
+ timerfd_alarmproc);
+ else
hrtimer_init(&ctx->t.tmr, clockid, HRTIMER_MODE_ABS);
- }
ctx->moffs = ktime_mono_to_real((ktime_t){ .tv64 = 0 });
@@ -498,10 +496,6 @@ static int do_timerfd_settime(int ufd, int flags,
ret = timerfd_setup(ctx, flags, new);
spin_unlock_irq(&ctx->wqh.lock);
-
- if (ctx->clockid == CLOCK_POWEROFF_ALARM)
- set_power_on_alarm();
-
fdput(f);
return ret;
}
diff --git a/fs/xattr.c b/fs/xattr.c
index ed8c374..932b906 100644
--- a/fs/xattr.c
+++ b/fs/xattr.c
@@ -249,7 +249,7 @@ xattr_getsecurity(struct inode *inode, const char *name, void *value,
}
memcpy(value, buffer, len);
out:
- security_release_secctx(buffer, len);
+ kfree(buffer);
out_noalloc:
return len;
}
diff --git a/fs/xfs/kmem.c b/fs/xfs/kmem.c
index 339c696..bb2beae 100644
--- a/fs/xfs/kmem.c
+++ b/fs/xfs/kmem.c
@@ -24,24 +24,6 @@
#include "kmem.h"
#include "xfs_message.h"
-/*
- * Greedy allocation. May fail and may return vmalloced memory.
- */
-void *
-kmem_zalloc_greedy(size_t *size, size_t minsize, size_t maxsize)
-{
- void *ptr;
- size_t kmsize = maxsize;
-
- while (!(ptr = vzalloc(kmsize))) {
- if ((kmsize >>= 1) <= minsize)
- kmsize = minsize;
- }
- if (ptr)
- *size = kmsize;
- return ptr;
-}
-
void *
kmem_alloc(size_t size, xfs_km_flags_t flags)
{
diff --git a/fs/xfs/kmem.h b/fs/xfs/kmem.h
index 689f746..f0fc84f 100644
--- a/fs/xfs/kmem.h
+++ b/fs/xfs/kmem.h
@@ -69,8 +69,6 @@ static inline void kmem_free(const void *ptr)
}
-extern void *kmem_zalloc_greedy(size_t *, size_t, size_t);
-
static inline void *
kmem_zalloc(size_t size, xfs_km_flags_t flags)
{
diff --git a/fs/xfs/libxfs/xfs_ag_resv.c b/fs/xfs/libxfs/xfs_ag_resv.c
index 33db69b..eed8f58 100644
--- a/fs/xfs/libxfs/xfs_ag_resv.c
+++ b/fs/xfs/libxfs/xfs_ag_resv.c
@@ -157,7 +157,8 @@ __xfs_ag_resv_free(
trace_xfs_ag_resv_free(pag, type, 0);
resv = xfs_perag_resv(pag, type);
- pag->pag_mount->m_ag_max_usable += resv->ar_asked;
+ if (pag->pag_agno == 0)
+ pag->pag_mount->m_ag_max_usable += resv->ar_asked;
/*
* AGFL blocks are always considered "free", so whatever
* was reserved at mount time must be given back at umount.
@@ -217,7 +218,14 @@ __xfs_ag_resv_init(
return error;
}
- mp->m_ag_max_usable -= ask;
+ /*
+ * Reduce the maximum per-AG allocation length by however much we're
+ * trying to reserve for an AG. Since this is a filesystem-wide
+ * counter, we only make the adjustment for AG 0. This assumes that
+ * there aren't any AGs hungrier for per-AG reservation than AG 0.
+ */
+ if (pag->pag_agno == 0)
+ mp->m_ag_max_usable -= ask;
resv = xfs_perag_resv(pag, type);
resv->ar_asked = ask;
diff --git a/fs/xfs/libxfs/xfs_alloc.c b/fs/xfs/libxfs/xfs_alloc.c
index 9f06a21..c3702cd 100644
--- a/fs/xfs/libxfs/xfs_alloc.c
+++ b/fs/xfs/libxfs/xfs_alloc.c
@@ -1579,6 +1579,10 @@ xfs_alloc_ag_vextent_small(
bp = xfs_btree_get_bufs(args->mp, args->tp,
args->agno, fbno, 0);
+ if (!bp) {
+ error = -EFSCORRUPTED;
+ goto error0;
+ }
xfs_trans_binval(args->tp, bp);
}
args->len = 1;
@@ -2136,6 +2140,10 @@ xfs_alloc_fix_freelist(
if (error)
goto out_agbp_relse;
bp = xfs_btree_get_bufs(mp, tp, args->agno, bno, 0);
+ if (!bp) {
+ error = -EFSCORRUPTED;
+ goto out_agbp_relse;
+ }
xfs_trans_binval(tp, bp);
}
diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
index d2f4ab1..7eb9970 100644
--- a/fs/xfs/libxfs/xfs_bmap.c
+++ b/fs/xfs/libxfs/xfs_bmap.c
@@ -4057,6 +4057,17 @@ xfs_trim_extent(
}
}
+/* trim extent to within eof */
+void
+xfs_trim_extent_eof(
+ struct xfs_bmbt_irec *irec,
+ struct xfs_inode *ip)
+
+{
+ xfs_trim_extent(irec, 0, XFS_B_TO_FSB(ip->i_mount,
+ i_size_read(VFS_I(ip))));
+}
+
/*
* Trim the returned map to the required bounds
*/
diff --git a/fs/xfs/libxfs/xfs_bmap.h b/fs/xfs/libxfs/xfs_bmap.h
index db53ac7f..f1446d1 100644
--- a/fs/xfs/libxfs/xfs_bmap.h
+++ b/fs/xfs/libxfs/xfs_bmap.h
@@ -196,6 +196,7 @@ void xfs_bmap_trace_exlist(struct xfs_inode *ip, xfs_extnum_t cnt,
void xfs_trim_extent(struct xfs_bmbt_irec *irec, xfs_fileoff_t bno,
xfs_filblks_t len);
+void xfs_trim_extent_eof(struct xfs_bmbt_irec *, struct xfs_inode *);
int xfs_bmap_add_attrfork(struct xfs_inode *ip, int size, int rsvd);
void xfs_bmap_local_to_extents_empty(struct xfs_inode *ip, int whichfork);
void xfs_bmap_add_free(struct xfs_mount *mp, struct xfs_defer_ops *dfops,
diff --git a/fs/xfs/libxfs/xfs_log_format.h b/fs/xfs/libxfs/xfs_log_format.h
index 083cdd6..ce6958b 100644
--- a/fs/xfs/libxfs/xfs_log_format.h
+++ b/fs/xfs/libxfs/xfs_log_format.h
@@ -270,6 +270,7 @@ typedef struct xfs_inode_log_format {
__uint32_t ilf_fields; /* flags for fields logged */
__uint16_t ilf_asize; /* size of attr d/ext/root */
__uint16_t ilf_dsize; /* size of data/ext/root */
+ __uint32_t ilf_pad; /* pad for 64 bit boundary */
__uint64_t ilf_ino; /* inode number */
union {
__uint32_t ilfu_rdev; /* rdev value for dev inode*/
@@ -280,7 +281,12 @@ typedef struct xfs_inode_log_format {
__int32_t ilf_boffset; /* off of inode in buffer */
} xfs_inode_log_format_t;
-typedef struct xfs_inode_log_format_32 {
+/*
+ * Old 32 bit systems will log in this format without the 64 bit
+ * alignment padding. Recovery will detect this and convert it to the
+ * correct format.
+ */
+struct xfs_inode_log_format_32 {
__uint16_t ilf_type; /* inode log item type */
__uint16_t ilf_size; /* size of this item */
__uint32_t ilf_fields; /* flags for fields logged */
@@ -294,24 +300,7 @@ typedef struct xfs_inode_log_format_32 {
__int64_t ilf_blkno; /* blkno of inode buffer */
__int32_t ilf_len; /* len of inode buffer */
__int32_t ilf_boffset; /* off of inode in buffer */
-} __attribute__((packed)) xfs_inode_log_format_32_t;
-
-typedef struct xfs_inode_log_format_64 {
- __uint16_t ilf_type; /* inode log item type */
- __uint16_t ilf_size; /* size of this item */
- __uint32_t ilf_fields; /* flags for fields logged */
- __uint16_t ilf_asize; /* size of attr d/ext/root */
- __uint16_t ilf_dsize; /* size of data/ext/root */
- __uint32_t ilf_pad; /* pad for 64 bit boundary */
- __uint64_t ilf_ino; /* inode number */
- union {
- __uint32_t ilfu_rdev; /* rdev value for dev inode*/
- uuid_t ilfu_uuid; /* mount point value */
- } ilf_u;
- __int64_t ilf_blkno; /* blkno of inode buffer */
- __int32_t ilf_len; /* len of inode buffer */
- __int32_t ilf_boffset; /* off of inode in buffer */
-} xfs_inode_log_format_64_t;
+} __attribute__((packed));
/*
diff --git a/fs/xfs/xfs_acl.c b/fs/xfs/xfs_acl.c
index b2d55a3..710a131 100644
--- a/fs/xfs/xfs_acl.c
+++ b/fs/xfs/xfs_acl.c
@@ -247,6 +247,8 @@ xfs_set_mode(struct inode *inode, umode_t mode)
int
xfs_set_acl(struct inode *inode, struct posix_acl *acl, int type)
{
+ umode_t mode;
+ bool set_mode = false;
int error = 0;
if (!acl)
@@ -257,18 +259,27 @@ xfs_set_acl(struct inode *inode, struct posix_acl *acl, int type)
return error;
if (type == ACL_TYPE_ACCESS) {
- umode_t mode;
struct posix_acl *old_acl = acl;
error = posix_acl_update_mode(inode, &mode, &acl);
if (!acl)
posix_acl_release(old_acl);
if (error)
return error;
- error = xfs_set_mode(inode, mode);
- if (error)
- return error;
+ set_mode = true;
}
set_acl:
- return __xfs_set_acl(inode, acl, type);
+ error = __xfs_set_acl(inode, acl, type);
+ if (error)
+ return error;
+
+ /*
+ * We set the mode after successfully updating the ACL xattr because the
+ * xattr update can fail at ENOSPC and we don't want to change the mode
+ * if the ACL update hasn't been applied.
+ */
+ if (set_mode)
+ error = xfs_set_mode(inode, mode);
+
+ return error;
}
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
index d23889e..d31cd1e 100644
--- a/fs/xfs/xfs_aops.c
+++ b/fs/xfs/xfs_aops.c
@@ -335,7 +335,8 @@ xfs_end_io(
error = xfs_reflink_end_cow(ip, offset, size);
break;
case XFS_IO_UNWRITTEN:
- error = xfs_iomap_write_unwritten(ip, offset, size);
+ /* writeback should never update isize */
+ error = xfs_iomap_write_unwritten(ip, offset, size, false);
break;
default:
ASSERT(!xfs_ioend_is_append(ioend) || ioend->io_append_trans);
@@ -437,6 +438,19 @@ xfs_imap_valid(
{
offset >>= inode->i_blkbits;
+ /*
+ * We have to make sure the cached mapping is within EOF to protect
+ * against eofblocks trimming on file release leaving us with a stale
+ * mapping. Otherwise, a page for a subsequent file extending buffered
+ * write could get picked up by this writeback cycle and written to the
+ * wrong blocks.
+ *
+ * Note that what we really want here is a generic mapping invalidation
+ * mechanism to protect us from arbitrary extent modifying contexts, not
+ * just eofblocks.
+ */
+ xfs_trim_extent_eof(imap, XFS_I(inode));
+
return offset >= imap->br_startoff &&
offset < imap->br_startoff + imap->br_blockcount;
}
@@ -725,6 +739,14 @@ xfs_vm_invalidatepage(
{
trace_xfs_invalidatepage(page->mapping->host, page, offset,
length);
+
+ /*
+ * If we are invalidating the entire page, clear the dirty state from it
+ * so that we can check for attempts to release dirty cached pages in
+ * xfs_vm_releasepage().
+ */
+ if (offset == 0 && length >= PAGE_SIZE)
+ cancel_dirty_page(page);
block_invalidatepage(page, offset, length);
}
@@ -1180,25 +1202,27 @@ xfs_vm_releasepage(
* mm accommodates an old ext3 case where clean pages might not have had
* the dirty bit cleared. Thus, it can send actual dirty pages to
* ->releasepage() via shrink_active_list(). Conversely,
- * block_invalidatepage() can send pages that are still marked dirty
- * but otherwise have invalidated buffers.
+ * block_invalidatepage() can send pages that are still marked dirty but
+ * otherwise have invalidated buffers.
*
* We want to release the latter to avoid unnecessary buildup of the
- * LRU, skip the former and warn if we've left any lingering
- * delalloc/unwritten buffers on clean pages. Skip pages with delalloc
- * or unwritten buffers and warn if the page is not dirty. Otherwise
- * try to release the buffers.
+ * LRU, so xfs_vm_invalidatepage() clears the page dirty flag on pages
+ * that are entirely invalidated and need to be released. Hence the
+ * only time we should get dirty pages here is through
+ * shrink_active_list() and so we can simply skip those now.
+ *
+ * warn if we've left any lingering delalloc/unwritten buffers on clean
+ * or invalidated pages we are about to release.
*/
+ if (PageDirty(page))
+ return 0;
+
xfs_count_page_state(page, &delalloc, &unwritten);
- if (delalloc) {
- WARN_ON_ONCE(!PageDirty(page));
+ if (WARN_ON_ONCE(delalloc))
return 0;
- }
- if (unwritten) {
- WARN_ON_ONCE(!PageDirty(page));
+ if (WARN_ON_ONCE(unwritten))
return 0;
- }
return try_to_free_buffers(page);
}
@@ -1532,6 +1556,21 @@ xfs_end_io_direct_write(
return 0;
}
+ if (flags & XFS_DIO_FLAG_COW)
+ error = xfs_reflink_end_cow(ip, offset, size);
+
+ /*
+ * Unwritten conversion updates the in-core isize after extent
+ * conversion but before updating the on-disk size. Updating isize any
+ * earlier allows a racing dio read to find unwritten extents before
+ * they are converted.
+ */
+ if (flags & XFS_DIO_FLAG_UNWRITTEN) {
+ trace_xfs_end_io_direct_write_unwritten(ip, offset, size);
+
+ return xfs_iomap_write_unwritten(ip, offset, size, true);
+ }
+
/*
* We need to update the in-core inode size here so that we don't end up
* with the on-disk inode size being outside the in-core inode size. We
@@ -1548,13 +1587,6 @@ xfs_end_io_direct_write(
i_size_write(inode, offset + size);
spin_unlock(&ip->i_flags_lock);
- if (flags & XFS_DIO_FLAG_COW)
- error = xfs_reflink_end_cow(ip, offset, size);
- if (flags & XFS_DIO_FLAG_UNWRITTEN) {
- trace_xfs_end_io_direct_write_unwritten(ip, offset, size);
-
- error = xfs_iomap_write_unwritten(ip, offset, size);
- }
if (flags & XFS_DIO_FLAG_APPEND) {
trace_xfs_end_io_direct_write_append(ip, offset, size);
diff --git a/fs/xfs/xfs_attr_inactive.c b/fs/xfs/xfs_attr_inactive.c
index be0b79d..c664300 100644
--- a/fs/xfs/xfs_attr_inactive.c
+++ b/fs/xfs/xfs_attr_inactive.c
@@ -302,6 +302,8 @@ xfs_attr3_node_inactive(
&bp, XFS_ATTR_FORK);
if (error)
return error;
+ node = bp->b_addr;
+ btree = dp->d_ops->node_tree_p(node);
child_fsb = be32_to_cpu(btree[i + 1].before);
xfs_trans_brelse(*trans, bp);
}
diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c
index 5ffefac..cb62871 100644
--- a/fs/xfs/xfs_bmap_util.c
+++ b/fs/xfs/xfs_bmap_util.c
@@ -84,6 +84,7 @@ xfs_zero_extent(
GFP_NOFS, true);
}
+#ifdef CONFIG_XFS_RT
int
xfs_bmap_rtalloc(
struct xfs_bmalloca *ap) /* bmap alloc argument struct */
@@ -195,6 +196,7 @@ xfs_bmap_rtalloc(
}
return 0;
}
+#endif /* CONFIG_XFS_RT */
/*
* Check if the endoff is outside the last extent. If so the caller will grow
@@ -1445,7 +1447,19 @@ xfs_shift_file_space(
return error;
/*
- * The extent shiting code works on extent granularity. So, if
+ * Clean out anything hanging around in the cow fork now that
+ * we've flushed all the dirty data out to disk to avoid having
+ * CoW extents at the wrong offsets.
+ */
+ if (xfs_is_reflink_inode(ip)) {
+ error = xfs_reflink_cancel_cow_range(ip, offset, NULLFILEOFF,
+ true);
+ if (error)
+ return error;
+ }
+
+ /*
+ * The extent shifting code works on extent granularity. So, if
* stop_fsb is not the starting block of extent, we need to split
* the extent at stop_fsb.
*/
@@ -2094,11 +2108,31 @@ xfs_swap_extents(
ip->i_d.di_flags2 |= tip->i_d.di_flags2 & XFS_DIFLAG2_REFLINK;
tip->i_d.di_flags2 &= ~XFS_DIFLAG2_REFLINK;
tip->i_d.di_flags2 |= f & XFS_DIFLAG2_REFLINK;
+ }
+
+ /* Swap the cow forks. */
+ if (xfs_sb_version_hasreflink(&mp->m_sb)) {
+ xfs_extnum_t extnum;
+
+ ASSERT(ip->i_cformat == XFS_DINODE_FMT_EXTENTS);
+ ASSERT(tip->i_cformat == XFS_DINODE_FMT_EXTENTS);
+
+ extnum = ip->i_cnextents;
+ ip->i_cnextents = tip->i_cnextents;
+ tip->i_cnextents = extnum;
+
cowfp = ip->i_cowfp;
ip->i_cowfp = tip->i_cowfp;
tip->i_cowfp = cowfp;
- xfs_inode_set_cowblocks_tag(ip);
- xfs_inode_set_cowblocks_tag(tip);
+
+ if (ip->i_cowfp && ip->i_cnextents)
+ xfs_inode_set_cowblocks_tag(ip);
+ else
+ xfs_inode_clear_cowblocks_tag(ip);
+ if (tip->i_cowfp && tip->i_cnextents)
+ xfs_inode_set_cowblocks_tag(tip);
+ else
+ xfs_inode_clear_cowblocks_tag(tip);
}
xfs_trans_log_inode(tp, ip, src_log_flags);
diff --git a/fs/xfs/xfs_bmap_util.h b/fs/xfs/xfs_bmap_util.h
index f100539..ce330f0 100644
--- a/fs/xfs/xfs_bmap_util.h
+++ b/fs/xfs/xfs_bmap_util.h
@@ -28,7 +28,20 @@ struct xfs_mount;
struct xfs_trans;
struct xfs_bmalloca;
+#ifdef CONFIG_XFS_RT
int xfs_bmap_rtalloc(struct xfs_bmalloca *ap);
+#else /* !CONFIG_XFS_RT */
+/*
+ * Attempts to allocate RT extents when RT is disable indicates corruption and
+ * should trigger a shutdown.
+ */
+static inline int
+xfs_bmap_rtalloc(struct xfs_bmalloca *ap)
+{
+ return -EFSCORRUPTED;
+}
+#endif /* CONFIG_XFS_RT */
+
int xfs_bmap_eof(struct xfs_inode *ip, xfs_fileoff_t endoff,
int whichfork, int *eof);
int xfs_bmap_punch_delalloc_range(struct xfs_inode *ip,
diff --git a/fs/xfs/xfs_error.c b/fs/xfs/xfs_error.c
index ed7ee4e..bcf7297 100644
--- a/fs/xfs/xfs_error.c
+++ b/fs/xfs/xfs_error.c
@@ -167,7 +167,7 @@ xfs_verifier_error(
{
struct xfs_mount *mp = bp->b_target->bt_mount;
- xfs_alert(mp, "Metadata %s detected at %pF, %s block 0x%llx",
+ xfs_alert(mp, "Metadata %s detected at %pS, %s block 0x%llx",
bp->b_error == -EFSBADCRC ? "CRC error" : "corruption",
__return_address, bp->b_ops->name, bp->b_bn);
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
index 586b398..362c6b4 100644
--- a/fs/xfs/xfs_file.c
+++ b/fs/xfs/xfs_file.c
@@ -92,7 +92,7 @@ xfs_zero_range(
xfs_off_t count,
bool *did_zero)
{
- return iomap_zero_range(VFS_I(ip), pos, count, NULL, &xfs_iomap_ops);
+ return iomap_zero_range(VFS_I(ip), pos, count, did_zero, &xfs_iomap_ops);
}
int
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index 9e795ab..fe9a9a1 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -1632,10 +1632,12 @@ xfs_itruncate_extents(
goto out;
/*
- * Clear the reflink flag if we truncated everything.
+ * Clear the reflink flag if there are no data fork blocks and
+ * there are no extents staged in the cow fork.
*/
- if (ip->i_d.di_nblocks == 0 && xfs_is_reflink_inode(ip)) {
- ip->i_d.di_flags2 &= ~XFS_DIFLAG2_REFLINK;
+ if (xfs_is_reflink_inode(ip) && ip->i_cnextents == 0) {
+ if (ip->i_d.di_nblocks == 0)
+ ip->i_d.di_flags2 &= ~XFS_DIFLAG2_REFLINK;
xfs_inode_clear_cowblocks_tag(ip);
}
diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c
index 9491574..d0a3c4b 100644
--- a/fs/xfs/xfs_inode_item.c
+++ b/fs/xfs/xfs_inode_item.c
@@ -364,6 +364,9 @@ xfs_inode_to_log_dinode(
to->di_dmstate = from->di_dmstate;
to->di_flags = from->di_flags;
+ /* log a dummy value to ensure log structure is fully initialised */
+ to->di_next_unlinked = NULLAGINO;
+
if (from->di_version == 3) {
to->di_changecount = inode->i_version;
to->di_crtime.t_sec = from->di_crtime.t_sec;
@@ -404,6 +407,11 @@ xfs_inode_item_format_core(
* the second with the on-disk inode structure, and a possible third and/or
* fourth with the inode data/extents/b-tree root and inode attributes
* data/extents/b-tree root.
+ *
+ * Note: Always use the 64 bit inode log format structure so we don't
+ * leave an uninitialised hole in the format item on 64 bit systems. Log
+ * recovery on 32 bit systems handles this just fine, so there's no reason
+ * for not using an initialising the properly padded structure all the time.
*/
STATIC void
xfs_inode_item_format(
@@ -412,8 +420,8 @@ xfs_inode_item_format(
{
struct xfs_inode_log_item *iip = INODE_ITEM(lip);
struct xfs_inode *ip = iip->ili_inode;
- struct xfs_inode_log_format *ilf;
struct xfs_log_iovec *vecp = NULL;
+ struct xfs_inode_log_format *ilf;
ASSERT(ip->i_d.di_version > 1);
@@ -425,7 +433,17 @@ xfs_inode_item_format(
ilf->ilf_boffset = ip->i_imap.im_boffset;
ilf->ilf_fields = XFS_ILOG_CORE;
ilf->ilf_size = 2; /* format + core */
- xlog_finish_iovec(lv, vecp, sizeof(struct xfs_inode_log_format));
+
+ /*
+ * make sure we don't leak uninitialised data into the log in the case
+ * when we don't log every field in the inode.
+ */
+ ilf->ilf_dsize = 0;
+ ilf->ilf_asize = 0;
+ ilf->ilf_pad = 0;
+ memset(&ilf->ilf_u.ilfu_uuid, 0, sizeof(ilf->ilf_u.ilfu_uuid));
+
+ xlog_finish_iovec(lv, vecp, sizeof(*ilf));
xfs_inode_item_format_core(ip, lv, &vecp);
xfs_inode_item_format_data_fork(iip, ilf, lv, &vecp);
@@ -745,7 +763,7 @@ xfs_iflush_done(
*/
iip = INODE_ITEM(blip);
if ((iip->ili_logged && blip->li_lsn == iip->ili_flush_lsn) ||
- lip->li_flags & XFS_LI_FAILED)
+ (blip->li_flags & XFS_LI_FAILED))
need_ail++;
blip = next;
@@ -855,48 +873,30 @@ xfs_istale_done(
}
/*
- * convert an xfs_inode_log_format struct from either 32 or 64 bit versions
- * (which can have different field alignments) to the native version
+ * convert an xfs_inode_log_format struct from the old 32 bit version
+ * (which can have different field alignments) to the native 64 bit version
*/
int
xfs_inode_item_format_convert(
- xfs_log_iovec_t *buf,
- xfs_inode_log_format_t *in_f)
+ struct xfs_log_iovec *buf,
+ struct xfs_inode_log_format *in_f)
{
- if (buf->i_len == sizeof(xfs_inode_log_format_32_t)) {
- xfs_inode_log_format_32_t *in_f32 = buf->i_addr;
+ struct xfs_inode_log_format_32 *in_f32 = buf->i_addr;
- in_f->ilf_type = in_f32->ilf_type;
- in_f->ilf_size = in_f32->ilf_size;
- in_f->ilf_fields = in_f32->ilf_fields;
- in_f->ilf_asize = in_f32->ilf_asize;
- in_f->ilf_dsize = in_f32->ilf_dsize;
- in_f->ilf_ino = in_f32->ilf_ino;
- /* copy biggest field of ilf_u */
- memcpy(in_f->ilf_u.ilfu_uuid.__u_bits,
- in_f32->ilf_u.ilfu_uuid.__u_bits,
- sizeof(uuid_t));
- in_f->ilf_blkno = in_f32->ilf_blkno;
- in_f->ilf_len = in_f32->ilf_len;
- in_f->ilf_boffset = in_f32->ilf_boffset;
- return 0;
- } else if (buf->i_len == sizeof(xfs_inode_log_format_64_t)){
- xfs_inode_log_format_64_t *in_f64 = buf->i_addr;
+ if (buf->i_len != sizeof(*in_f32))
+ return -EFSCORRUPTED;
- in_f->ilf_type = in_f64->ilf_type;
- in_f->ilf_size = in_f64->ilf_size;
- in_f->ilf_fields = in_f64->ilf_fields;
- in_f->ilf_asize = in_f64->ilf_asize;
- in_f->ilf_dsize = in_f64->ilf_dsize;
- in_f->ilf_ino = in_f64->ilf_ino;
- /* copy biggest field of ilf_u */
- memcpy(in_f->ilf_u.ilfu_uuid.__u_bits,
- in_f64->ilf_u.ilfu_uuid.__u_bits,
- sizeof(uuid_t));
- in_f->ilf_blkno = in_f64->ilf_blkno;
- in_f->ilf_len = in_f64->ilf_len;
- in_f->ilf_boffset = in_f64->ilf_boffset;
- return 0;
- }
- return -EFSCORRUPTED;
+ in_f->ilf_type = in_f32->ilf_type;
+ in_f->ilf_size = in_f32->ilf_size;
+ in_f->ilf_fields = in_f32->ilf_fields;
+ in_f->ilf_asize = in_f32->ilf_asize;
+ in_f->ilf_dsize = in_f32->ilf_dsize;
+ in_f->ilf_ino = in_f32->ilf_ino;
+ /* copy biggest field of ilf_u */
+ memcpy(in_f->ilf_u.ilfu_uuid.__u_bits,
+ in_f32->ilf_u.ilfu_uuid.__u_bits, sizeof(uuid_t));
+ in_f->ilf_blkno = in_f32->ilf_blkno;
+ in_f->ilf_len = in_f32->ilf_len;
+ in_f->ilf_boffset = in_f32->ilf_boffset;
+ return 0;
}
diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
index bce2e26..6c95812 100644
--- a/fs/xfs/xfs_ioctl.c
+++ b/fs/xfs/xfs_ioctl.c
@@ -1085,6 +1085,7 @@ xfs_ioctl_setattr_dax_invalidate(
int *join_flags)
{
struct inode *inode = VFS_I(ip);
+ struct super_block *sb = inode->i_sb;
int error;
*join_flags = 0;
@@ -1097,7 +1098,7 @@ xfs_ioctl_setattr_dax_invalidate(
if (fa->fsx_xflags & FS_XFLAG_DAX) {
if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode)))
return -EINVAL;
- if (ip->i_mount->m_sb.sb_blocksize != PAGE_SIZE)
+ if (bdev_dax_supported(sb, sb->s_blocksize) < 0)
return -EINVAL;
}
diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c
index 65740d1..f286f63 100644
--- a/fs/xfs/xfs_iomap.c
+++ b/fs/xfs/xfs_iomap.c
@@ -836,7 +836,8 @@ int
xfs_iomap_write_unwritten(
xfs_inode_t *ip,
xfs_off_t offset,
- xfs_off_t count)
+ xfs_off_t count,
+ bool update_isize)
{
xfs_mount_t *mp = ip->i_mount;
xfs_fileoff_t offset_fsb;
@@ -847,6 +848,7 @@ xfs_iomap_write_unwritten(
xfs_trans_t *tp;
xfs_bmbt_irec_t imap;
struct xfs_defer_ops dfops;
+ struct inode *inode = VFS_I(ip);
xfs_fsize_t i_size;
uint resblks;
int error;
@@ -906,7 +908,8 @@ xfs_iomap_write_unwritten(
i_size = XFS_FSB_TO_B(mp, offset_fsb + count_fsb);
if (i_size > offset + count)
i_size = offset + count;
-
+ if (update_isize && i_size > i_size_read(inode))
+ i_size_write(inode, i_size);
i_size = xfs_new_eof(ip, i_size);
if (i_size) {
ip->i_d.di_size = i_size;
diff --git a/fs/xfs/xfs_iomap.h b/fs/xfs/xfs_iomap.h
index 6d45cf0..d71703a 100644
--- a/fs/xfs/xfs_iomap.h
+++ b/fs/xfs/xfs_iomap.h
@@ -27,7 +27,7 @@ int xfs_iomap_write_direct(struct xfs_inode *, xfs_off_t, size_t,
struct xfs_bmbt_irec *, int);
int xfs_iomap_write_allocate(struct xfs_inode *, int, xfs_off_t,
struct xfs_bmbt_irec *);
-int xfs_iomap_write_unwritten(struct xfs_inode *, xfs_off_t, xfs_off_t);
+int xfs_iomap_write_unwritten(struct xfs_inode *, xfs_off_t, xfs_off_t, bool);
void xfs_bmbt_to_iomap(struct xfs_inode *, struct iomap *,
struct xfs_bmbt_irec *);
diff --git a/fs/xfs/xfs_itable.c b/fs/xfs/xfs_itable.c
index d8a77db..26d67ce 100644
--- a/fs/xfs/xfs_itable.c
+++ b/fs/xfs/xfs_itable.c
@@ -361,7 +361,6 @@ xfs_bulkstat(
xfs_agino_t agino; /* inode # in allocation group */
xfs_agnumber_t agno; /* allocation group number */
xfs_btree_cur_t *cur; /* btree cursor for ialloc btree */
- size_t irbsize; /* size of irec buffer in bytes */
xfs_inobt_rec_incore_t *irbuf; /* start of irec buffer */
int nirbuf; /* size of irbuf */
int ubcount; /* size of user's buffer */
@@ -388,11 +387,10 @@ xfs_bulkstat(
*ubcountp = 0;
*done = 0;
- irbuf = kmem_zalloc_greedy(&irbsize, PAGE_SIZE, PAGE_SIZE * 4);
+ irbuf = kmem_zalloc_large(PAGE_SIZE * 4, KM_SLEEP);
if (!irbuf)
return -ENOMEM;
-
- nirbuf = irbsize / sizeof(*irbuf);
+ nirbuf = (PAGE_SIZE * 4) / sizeof(*irbuf);
/*
* Loop over the allocation groups, starting from the last
diff --git a/fs/xfs/xfs_ondisk.h b/fs/xfs/xfs_ondisk.h
index 0c381d7..0492436 100644
--- a/fs/xfs/xfs_ondisk.h
+++ b/fs/xfs/xfs_ondisk.h
@@ -134,7 +134,7 @@ xfs_check_ondisk_structs(void)
XFS_CHECK_STRUCT_SIZE(struct xfs_icreate_log, 28);
XFS_CHECK_STRUCT_SIZE(struct xfs_ictimestamp, 8);
XFS_CHECK_STRUCT_SIZE(struct xfs_inode_log_format_32, 52);
- XFS_CHECK_STRUCT_SIZE(struct xfs_inode_log_format_64, 56);
+ XFS_CHECK_STRUCT_SIZE(struct xfs_inode_log_format, 56);
XFS_CHECK_STRUCT_SIZE(struct xfs_qoff_logformat, 20);
XFS_CHECK_STRUCT_SIZE(struct xfs_trans_header, 16);
}
diff --git a/fs/xfs/xfs_pnfs.c b/fs/xfs/xfs_pnfs.c
index 93a7aaf..cecd375 100644
--- a/fs/xfs/xfs_pnfs.c
+++ b/fs/xfs/xfs_pnfs.c
@@ -279,7 +279,7 @@ xfs_fs_commit_blocks(
(end - 1) >> PAGE_SHIFT);
WARN_ON_ONCE(error);
- error = xfs_iomap_write_unwritten(ip, start, length);
+ error = xfs_iomap_write_unwritten(ip, start, length, false);
if (error)
goto out_drop_iolock;
}
diff --git a/fs/xfs/xfs_reflink.c b/fs/xfs/xfs_reflink.c
index 0015c19..17d3c96 100644
--- a/fs/xfs/xfs_reflink.c
+++ b/fs/xfs/xfs_reflink.c
@@ -767,7 +767,13 @@ xfs_reflink_end_cow(
/* If there is a hole at end_fsb - 1 go to the previous extent */
if (eof || got.br_startoff > end_fsb) {
- ASSERT(idx > 0);
+ /*
+ * In case of racing, overlapping AIO writes no COW extents
+ * might be left by the time I/O completes for the loser of
+ * the race. In that case we are done.
+ */
+ if (idx <= 0)
+ goto out_cancel;
xfs_bmbt_get_all(xfs_iext_get_ext(ifp, --idx), &got);
}
@@ -841,6 +847,7 @@ xfs_reflink_end_cow(
out_defer:
xfs_defer_cancel(&dfops);
+out_cancel:
xfs_trans_cancel(tp);
xfs_iunlock(ip, XFS_ILOCK_EXCL);
out:
diff --git a/include/asm-generic/percpu.h b/include/asm-generic/percpu.h
index 0504ef8..976f8ac 100644
--- a/include/asm-generic/percpu.h
+++ b/include/asm-generic/percpu.h
@@ -115,15 +115,35 @@ do { \
(__ret); \
})
-#define this_cpu_generic_read(pcp) \
+#define __this_cpu_generic_read_nopreempt(pcp) \
({ \
typeof(pcp) __ret; \
preempt_disable_notrace(); \
- __ret = raw_cpu_generic_read(pcp); \
+ __ret = READ_ONCE(*raw_cpu_ptr(&(pcp))); \
preempt_enable_notrace(); \
__ret; \
})
+#define __this_cpu_generic_read_noirq(pcp) \
+({ \
+ typeof(pcp) __ret; \
+ unsigned long __flags; \
+ raw_local_irq_save(__flags); \
+ __ret = raw_cpu_generic_read(pcp); \
+ raw_local_irq_restore(__flags); \
+ __ret; \
+})
+
+#define this_cpu_generic_read(pcp) \
+({ \
+ typeof(pcp) __ret; \
+ if (__native_word(pcp)) \
+ __ret = __this_cpu_generic_read_nopreempt(pcp); \
+ else \
+ __ret = __this_cpu_generic_read_noirq(pcp); \
+ __ret; \
+})
+
#define this_cpu_generic_to_op(pcp, val, op) \
do { \
unsigned long __flags; \
diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h
index 453a63f..50810be 100644
--- a/include/drm/drm_dp_helper.h
+++ b/include/drm/drm_dp_helper.h
@@ -491,7 +491,9 @@
# define DP_TEST_PHY_PATTERN_SYMBOL_ERR_MEASUREMENT_CNT 0x2
# define DP_TEST_PHY_PATTERN_PRBS7 0x3
# define DP_TEST_PHY_PATTERN_80_BIT_CUSTOM_PATTERN 0x4
-# define DP_TEST_PHY_PATTERN_HBR2_CTS_EYE_PATTERN 0x5
+# define DP_TEST_PHY_PATTERN_CP2520_PATTERN_1 0x5
+# define DP_TEST_PHY_PATTERN_CP2520_PATTERN_2 0x6
+# define DP_TEST_PHY_PATTERN_CP2520_PATTERN_3 0x7
#define DP_TEST_RESPONSE 0x260
# define DP_TEST_ACK (1 << 0)
diff --git a/include/drm/drm_mipi_dsi.h b/include/drm/drm_mipi_dsi.h
index f5678aa..0dbddb3 100644
--- a/include/drm/drm_mipi_dsi.h
+++ b/include/drm/drm_mipi_dsi.h
@@ -23,6 +23,8 @@ struct mipi_dsi_device;
#define MIPI_DSI_MSG_USE_LPM BIT(1)
/* read mipi_dsi_msg.ctrl and unicast to only that ctrls */
#define MIPI_DSI_MSG_UNICAST BIT(2)
+/* Stack all commands until lastcommand bit and trigger all in one go */
+#define MIPI_DSI_MSG_LASTCOMMAND BIT(3)
/**
* struct mipi_dsi_msg - read/write DSI buffer
diff --git a/include/dt-bindings/clock/exynos5433.h b/include/dt-bindings/clock/exynos5433.h
index 4fa6bb2..be39d23 100644
--- a/include/dt-bindings/clock/exynos5433.h
+++ b/include/dt-bindings/clock/exynos5433.h
@@ -771,7 +771,10 @@
#define CLK_PCLK_DECON 113
-#define DISP_NR_CLK 114
+#define CLK_PHYCLK_MIPIDPHY0_BITCLKDIV8_PHY 114
+#define CLK_PHYCLK_MIPIDPHY0_RXCLKESC0_PHY 115
+
+#define DISP_NR_CLK 116
/* CMU_AUD */
#define CLK_MOUT_AUD_PLL_USER 1
diff --git a/arch/arm64/boot/dts/qcom/sdm845-qvr.dts b/include/dt-bindings/clock/qcom,cpu-a7.h
similarity index 61%
copy from arch/arm64/boot/dts/qcom/sdm845-qvr.dts
copy to include/dt-bindings/clock/qcom,cpu-a7.h
index c06b806..9b89030 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-qvr.dts
+++ b/include/dt-bindings/clock/qcom,cpu-a7.h
@@ -1,4 +1,5 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -10,14 +11,11 @@
* GNU General Public License for more details.
*/
+#ifndef _DT_BINDINGS_CLK_MSM_CPU_A7_H
+#define _DT_BINDINGS_CLK_MSM_CPU_A7_H
-/dts-v1/;
+#define SYS_APC0_AUX_CLK 0
+#define APCS_CPU_PLL 1
+#define APCS_CLK 2
-#include "sdm845-v2.dtsi"
-#include "sdm845-qvr.dtsi"
-
-/ {
- model = "Qualcomm Technologies, Inc. SDM845 QVR";
- compatible = "qcom,sdm845-qvr", "qcom,sdm845", "qcom,qvr";
- qcom,board-id = <0x01000B 0x20>;
-};
+#endif
diff --git a/include/dt-bindings/clock/qcom,gcc-sdxpoorwills.h b/include/dt-bindings/clock/qcom,gcc-sdxpoorwills.h
index e773848..950811f 100644
--- a/include/dt-bindings/clock/qcom,gcc-sdxpoorwills.h
+++ b/include/dt-bindings/clock/qcom,gcc-sdxpoorwills.h
@@ -48,59 +48,60 @@
#define GCC_CPUSS_AHB_CLK 30
#define GCC_CPUSS_AHB_CLK_SRC 31
#define GCC_CPUSS_GNOC_CLK 32
-#define GCC_CPUSS_GPLL0_CLK_SRC 33
-#define GCC_CPUSS_RBCPR_CLK 34
-#define GCC_CPUSS_RBCPR_CLK_SRC 35
-#define GCC_GP1_CLK 36
-#define GCC_GP1_CLK_SRC 37
-#define GCC_GP2_CLK 38
-#define GCC_GP2_CLK_SRC 39
-#define GCC_GP3_CLK 40
-#define GCC_GP3_CLK_SRC 41
-#define GCC_MSS_CFG_AHB_CLK 42
-#define GCC_MSS_GPLL0_DIV_CLK_SRC 43
-#define GCC_MSS_SNOC_AXI_CLK 44
-#define GCC_PCIE_AUX_CLK 45
-#define GCC_PCIE_AUX_PHY_CLK_SRC 46
-#define GCC_PCIE_CFG_AHB_CLK 47
-#define GCC_PCIE_0_CLKREF_EN 48
-#define GCC_PCIE_MSTR_AXI_CLK 49
-#define GCC_PCIE_PHY_REFGEN_CLK 50
-#define GCC_PCIE_PHY_REFGEN_CLK_SRC 51
-#define GCC_PCIE_PIPE_CLK 52
-#define GCC_PCIE_SLEEP_CLK 53
-#define GCC_PCIE_SLV_AXI_CLK 54
-#define GCC_PCIE_SLV_Q2A_AXI_CLK 55
-#define GCC_PDM2_CLK 56
-#define GCC_PDM2_CLK_SRC 57
-#define GCC_PDM_AHB_CLK 58
-#define GCC_PDM_XO4_CLK 59
-#define GCC_PRNG_AHB_CLK 60
-#define GCC_SDCC1_AHB_CLK 61
-#define GCC_SDCC1_APPS_CLK 62
-#define GCC_SDCC1_APPS_CLK_SRC 63
-#define GCC_SPMI_FETCHER_AHB_CLK 64
-#define GCC_SPMI_FETCHER_CLK 65
-#define GCC_SPMI_FETCHER_CLK_SRC 66
-#define GCC_SYS_NOC_CPUSS_AHB_CLK 67
-#define GCC_SYS_NOC_USB3_CLK 68
-#define GCC_USB30_MASTER_CLK 69
-#define GCC_USB30_MASTER_CLK_SRC 70
-#define GCC_USB30_MOCK_UTMI_CLK 71
-#define GCC_USB30_MOCK_UTMI_CLK_SRC 72
-#define GCC_USB30_SLEEP_CLK 73
-#define GCC_USB3_PRIM_CLKREF_CLK 74
-#define GCC_USB3_PHY_AUX_CLK 75
-#define GCC_USB3_PHY_AUX_CLK_SRC 76
-#define GCC_USB3_PHY_PIPE_CLK 77
-#define GCC_USB_PHY_CFG_AHB2PHY_CLK 78
-#define GCC_XO_DIV4_CLK 79
-#define GPLL0 80
-#define GPLL0_OUT_EVEN 81
-
-/* GDSCs */
-#define PCIE_GDSC 0
-#define USB30_GDSC 1
+#define GCC_CPUSS_RBCPR_CLK 33
+#define GCC_CPUSS_RBCPR_CLK_SRC 34
+#define GCC_EMAC_CLK_SRC 35
+#define GCC_EMAC_PTP_CLK_SRC 36
+#define GCC_ETH_AXI_CLK 37
+#define GCC_ETH_PTP_CLK 38
+#define GCC_ETH_RGMII_CLK 39
+#define GCC_ETH_SLAVE_AHB_CLK 40
+#define GCC_GP1_CLK 41
+#define GCC_GP1_CLK_SRC 42
+#define GCC_GP2_CLK 43
+#define GCC_GP2_CLK_SRC 44
+#define GCC_GP3_CLK 45
+#define GCC_GP3_CLK_SRC 46
+#define GCC_MSS_CFG_AHB_CLK 47
+#define GCC_MSS_GPLL0_DIV_CLK_SRC 48
+#define GCC_MSS_SNOC_AXI_CLK 49
+#define GCC_PCIE_AUX_CLK 50
+#define GCC_PCIE_AUX_PHY_CLK_SRC 51
+#define GCC_PCIE_CFG_AHB_CLK 52
+#define GCC_PCIE_MSTR_AXI_CLK 53
+#define GCC_PCIE_PHY_REFGEN_CLK 54
+#define GCC_PCIE_PHY_REFGEN_CLK_SRC 55
+#define GCC_PCIE_PIPE_CLK 56
+#define GCC_PCIE_SLEEP_CLK 57
+#define GCC_PCIE_SLV_AXI_CLK 58
+#define GCC_PCIE_SLV_Q2A_AXI_CLK 59
+#define GCC_PDM2_CLK 60
+#define GCC_PDM2_CLK_SRC 61
+#define GCC_PDM_AHB_CLK 62
+#define GCC_PDM_XO4_CLK 63
+#define GCC_PRNG_AHB_CLK 64
+#define GCC_SDCC1_AHB_CLK 65
+#define GCC_SDCC1_APPS_CLK 66
+#define GCC_SDCC1_APPS_CLK_SRC 67
+#define GCC_SPMI_FETCHER_AHB_CLK 68
+#define GCC_SPMI_FETCHER_CLK 69
+#define GCC_SPMI_FETCHER_CLK_SRC 70
+#define GCC_SYS_NOC_CPUSS_AHB_CLK 71
+#define GCC_SYS_NOC_USB3_CLK 72
+#define GCC_USB30_MASTER_CLK 73
+#define GCC_USB30_MASTER_CLK_SRC 74
+#define GCC_USB30_MOCK_UTMI_CLK 75
+#define GCC_USB30_MOCK_UTMI_CLK_SRC 76
+#define GCC_USB30_SLEEP_CLK 77
+#define GCC_USB3_PHY_AUX_CLK 78
+#define GCC_USB3_PHY_AUX_CLK_SRC 79
+#define GCC_USB3_PHY_PIPE_CLK 80
+#define GCC_USB_PHY_CFG_AHB2PHY_CLK 81
+#define GPLL0 82
+#define GPLL0_OUT_EVEN 83
+#define GPLL4 84
+#define GPLL4_OUT_EVEN 85
+#define GCC_USB3_PRIM_CLKREF_CLK 86
/* CPU clocks */
#define CLOCK_A7SS 0
@@ -125,5 +126,6 @@
#define GCC_USB3PHY_PHY_BCR 16
#define GCC_QUSB2PHY_BCR 17
#define GCC_USB_PHY_CFG_AHB2PHY_BCR 18
+#define GCC_EMAC_BCR 19
#endif
diff --git a/arch/arm64/boot/dts/qcom/sdm845-qvr.dts b/include/dt-bindings/soc/qcom,dcc_v2.h
similarity index 70%
copy from arch/arm64/boot/dts/qcom/sdm845-qvr.dts
copy to include/dt-bindings/soc/qcom,dcc_v2.h
index c06b806..fb4ed6d 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-qvr.dts
+++ b/include/dt-bindings/soc/qcom,dcc_v2.h
@@ -10,14 +10,11 @@
* GNU General Public License for more details.
*/
+#ifndef __DT_BINDINGS_QCOM_DCC_V2_H
+#define __DT_BINDINGS_QCOM_DCC_V2_H
-/dts-v1/;
+#define DCC_READ 0
+#define DCC_WRITE 1
+#define DCC_LOOP 2
-#include "sdm845-v2.dtsi"
-#include "sdm845-qvr.dtsi"
-
-/ {
- model = "Qualcomm Technologies, Inc. SDM845 QVR";
- compatible = "qcom,sdm845-qvr", "qcom,sdm845", "qcom,qvr";
- qcom,board-id = <0x01000B 0x20>;
-};
+#endif
diff --git a/include/linux/alarmtimer.h b/include/linux/alarmtimer.h
index 8a30cb5..9d80312 100644
--- a/include/linux/alarmtimer.h
+++ b/include/linux/alarmtimer.h
@@ -5,12 +5,10 @@
#include <linux/hrtimer.h>
#include <linux/timerqueue.h>
#include <linux/rtc.h>
-#include <linux/types.h>
enum alarmtimer_type {
ALARM_REALTIME,
ALARM_BOOTTIME,
- ALARM_POWEROFF_REALTIME,
ALARM_NUMTYPE,
};
@@ -50,9 +48,6 @@ void alarm_start_relative(struct alarm *alarm, ktime_t start);
void alarm_restart(struct alarm *alarm);
int alarm_try_to_cancel(struct alarm *alarm);
int alarm_cancel(struct alarm *alarm);
-void set_power_on_alarm(void);
-void power_on_alarm_init(void);
-enum alarmtimer_type clock2alarm(clockid_t clockid);
u64 alarm_forward(struct alarm *alarm, ktime_t now, ktime_t interval);
u64 alarm_forward_now(struct alarm *alarm, ktime_t interval);
@@ -60,8 +55,5 @@ ktime_t alarm_expires_remaining(const struct alarm *alarm);
/* Provide way to access the rtc device being used by alarmtimers */
struct rtc_device *alarmtimer_get_rtcdev(void);
-#ifdef CONFIG_RTC_DRV_QPNP
-extern bool poweron_alarm;
-#endif
#endif
diff --git a/include/linux/arm-smmu-errata.h b/include/linux/arm-smmu-errata.h
new file mode 100644
index 0000000..3d36a52
--- /dev/null
+++ b/include/linux/arm-smmu-errata.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __ARM_SMMU_ERRATA_H__
+#define __ARM_SMMU_ERRATA_H__
+
+#define ARM_SMMU_MIN_IOVA_ALIGN SZ_16K
+#define ARM_SMMU_GUARD_PROT (IOMMU_READ | IOMMU_WRITE | IOMMU_GUARD)
+
+#ifdef CONFIG_ARM_SMMU
+
+struct page *arm_smmu_errata_get_guard_page(int vmid);
+#else
+
+static inline struct page *arm_smmu_errata_get_guard_page(
+ int vmid)
+{
+ return NULL;
+}
+#endif
+#endif
diff --git a/include/linux/audit.h b/include/linux/audit.h
index 9d4443f..2be99b2 100644
--- a/include/linux/audit.h
+++ b/include/linux/audit.h
@@ -387,6 +387,20 @@ static inline int audit_socketcall(int nargs, unsigned long *args)
return __audit_socketcall(nargs, args);
return 0;
}
+
+static inline int audit_socketcall_compat(int nargs, u32 *args)
+{
+ unsigned long a[AUDITSC_ARGS];
+ int i;
+
+ if (audit_dummy_context())
+ return 0;
+
+ for (i = 0; i < nargs; i++)
+ a[i] = (unsigned long)args[i];
+ return __audit_socketcall(nargs, a);
+}
+
static inline int audit_sockaddr(int len, void *addr)
{
if (unlikely(!audit_dummy_context()))
@@ -513,6 +527,12 @@ static inline int audit_socketcall(int nargs, unsigned long *args)
{
return 0;
}
+
+static inline int audit_socketcall_compat(int nargs, u32 *args)
+{
+ return 0;
+}
+
static inline void audit_fd_pair(int fd1, int fd2)
{ }
static inline int audit_sockaddr(int len, void *addr)
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index 2b8b6e0..8a7a15c 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -81,6 +81,12 @@ struct bio {
struct bio_set *bi_pool;
/*
+ * When using dircet-io (O_DIRECT), we can't get the inode from a bio
+ * by walking bio->bi_io_vec->bv_page->mapping->host
+ * since the page is anon.
+ */
+ struct inode *bi_dio_inode;
+ /*
* We can inline a number of vecs at the end of the bio, to avoid
* double allocations for a small number of bio_vecs. This member
* MUST obviously be kept at the very end of the bio.
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
index ebbacd1..447a915 100644
--- a/include/linux/buffer_head.h
+++ b/include/linux/buffer_head.h
@@ -226,6 +226,7 @@ int generic_write_end(struct file *, struct address_space *,
loff_t, unsigned, unsigned,
struct page *, void *);
void page_zero_new_buffers(struct page *page, unsigned from, unsigned to);
+void clean_page_buffers(struct page *page);
int cont_write_begin(struct file *, struct address_space *, loff_t,
unsigned, unsigned, struct page **, void **,
get_block_t *, loff_t *);
diff --git a/include/linux/ccp.h b/include/linux/ccp.h
index edc5d04..1cfe5ef 100644
--- a/include/linux/ccp.h
+++ b/include/linux/ccp.h
@@ -1,7 +1,7 @@
/*
* AMD Cryptographic Coprocessor (CCP) driver
*
- * Copyright (C) 2013,2016 Advanced Micro Devices, Inc.
+ * Copyright (C) 2013,2017 Advanced Micro Devices, Inc.
*
* Author: Tom Lendacky <thomas.lendacky@amd.com>
* Author: Gary R Hook <gary.hook@amd.com>
@@ -222,6 +222,7 @@ enum ccp_xts_aes_unit_size {
* AES operation the new IV overwrites the old IV.
*/
struct ccp_xts_aes_engine {
+ enum ccp_aes_type type;
enum ccp_aes_action action;
enum ccp_xts_aes_unit_size unit_size;
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index e0aa720..3484287 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -28,21 +28,6 @@ struct cpu {
struct device dev;
};
-struct cpu_pstate_pwr {
- unsigned int freq;
- uint32_t power;
-};
-
-struct cpu_pwr_stats {
- int cpu;
- long temp;
- struct cpu_pstate_pwr *ptable;
- bool throttling;
- int len;
-};
-
-extern struct cpu_pwr_stats *get_cpu_pwr_stats(void);
-
extern void boot_cpu_init(void);
extern void boot_cpu_state_init(void);
diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
index 7a2ae2f..d921206 100644
--- a/include/linux/cpuhotplug.h
+++ b/include/linux/cpuhotplug.h
@@ -59,6 +59,7 @@ enum cpuhp_state {
CPUHP_AP_OFFLINE,
CPUHP_AP_SCHED_STARTING,
CPUHP_AP_RCUTREE_DYING,
+ CPUHP_AP_KMAP_DYING,
CPUHP_AP_IRQ_GIC_STARTING,
CPUHP_AP_IRQ_GICV3_STARTING,
CPUHP_AP_IRQ_HIP04_STARTING,
diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
index cd32a49..d807fa9 100644
--- a/include/linux/cpuset.h
+++ b/include/linux/cpuset.h
@@ -55,7 +55,9 @@ static inline void cpuset_dec(void)
extern int cpuset_init(void);
extern void cpuset_init_smp(void);
+extern void cpuset_force_rebuild(void);
extern void cpuset_update_active_cpus(bool cpu_online);
+extern void cpuset_wait_for_hotplug(void);
extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask);
extern void cpuset_cpus_allowed_fallback(struct task_struct *p);
extern nodemask_t cpuset_mems_allowed(struct task_struct *p);
@@ -168,11 +170,15 @@ static inline bool cpusets_enabled(void) { return false; }
static inline int cpuset_init(void) { return 0; }
static inline void cpuset_init_smp(void) {}
+static inline void cpuset_force_rebuild(void) { }
+
static inline void cpuset_update_active_cpus(bool cpu_online)
{
partition_sched_domains(1, NULL, NULL);
}
+static inline void cpuset_wait_for_hotplug(void) { }
+
static inline void cpuset_cpus_allowed(struct task_struct *p,
struct cpumask *mask)
{
diff --git a/include/linux/dma-iommu.h b/include/linux/dma-iommu.h
index 7f7e9a7..8dce6fd 100644
--- a/include/linux/dma-iommu.h
+++ b/include/linux/dma-iommu.h
@@ -20,6 +20,7 @@
#include <asm/errno.h>
#ifdef CONFIG_IOMMU_DMA
+#include <linux/dma-mapping.h>
#include <linux/iommu.h>
#include <linux/msi.h>
diff --git a/include/linux/dma-mapping-fast.h b/include/linux/dma-mapping-fast.h
index 64ae548..e9dabab 100644
--- a/include/linux/dma-mapping-fast.h
+++ b/include/linux/dma-mapping-fast.h
@@ -25,6 +25,9 @@ struct dma_fast_smmu_mapping {
size_t size;
size_t num_4k_pages;
+ u32 min_iova_align;
+ struct page *guard_page;
+
unsigned int bitmap_size;
unsigned long *bitmap;
unsigned long next_start;
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 18bd249..4f6ec47 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -2925,6 +2925,8 @@ static inline void inode_dio_end(struct inode *inode)
wake_up_bit(&inode->i_state, __I_DIO_WAKEUP);
}
+struct inode *dio_bio_get_inode(struct bio *bio);
+
extern void inode_set_flags(struct inode *inode, unsigned int flags,
unsigned int mask);
diff --git a/include/linux/fscrypto.h b/include/linux/fscrypto.h
index f6dfc29..9b57c19 100644
--- a/include/linux/fscrypto.h
+++ b/include/linux/fscrypto.h
@@ -34,6 +34,7 @@
#define FS_ENCRYPTION_MODE_AES_256_GCM 2
#define FS_ENCRYPTION_MODE_AES_256_CBC 3
#define FS_ENCRYPTION_MODE_AES_256_CTS 4
+#define FS_ENCRYPTION_MODE_PRIVATE 127
/**
* Encryption context for inode
@@ -80,6 +81,7 @@ struct fscrypt_info {
u8 ci_flags;
struct crypto_skcipher *ci_ctfm;
u8 ci_master_key[FS_KEY_DESCRIPTOR_SIZE];
+ u8 ci_raw_key[FS_MAX_KEY_SIZE];
};
#define FS_CTX_REQUIRES_FREE_ENCRYPT_FL 0x00000001
@@ -176,7 +178,8 @@ static inline bool fscrypt_dummy_context_enabled(struct inode *inode)
static inline bool fscrypt_valid_contents_enc_mode(u32 mode)
{
- return (mode == FS_ENCRYPTION_MODE_AES_256_XTS);
+ return (mode == FS_ENCRYPTION_MODE_AES_256_XTS ||
+ mode == FS_ENCRYPTION_MODE_PRIVATE);
}
static inline bool fscrypt_valid_filenames_enc_mode(u32 mode)
@@ -257,6 +260,7 @@ extern int fscrypt_inherit_context(struct inode *, struct inode *,
/* keyinfo.c */
extern int fscrypt_get_encryption_info(struct inode *);
extern void fscrypt_put_encryption_info(struct inode *, struct fscrypt_info *);
+extern int fs_using_hardware_encryption(struct inode *inode);
/* fname.c */
extern int fscrypt_setup_filename(struct inode *, const struct qstr *,
@@ -354,6 +358,11 @@ static inline void fscrypt_notsupp_put_encryption_info(struct inode *i,
return;
}
+static inline int fs_notsupp_using_hardware_encryption(struct inode *inode)
+{
+ return -EOPNOTSUPP;
+}
+
/* fname.c */
static inline int fscrypt_notsupp_setup_filename(struct inode *dir,
const struct qstr *iname,
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 46cd745..16ef407 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -189,7 +189,7 @@ struct vm_area_struct;
#define __GFP_OTHER_NODE ((__force gfp_t)___GFP_OTHER_NODE)
/* Room for N __GFP_FOO bits */
-#define __GFP_BITS_SHIFT 26
+#define __GFP_BITS_SHIFT 27
#define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
/*
diff --git a/include/linux/highmem.h b/include/linux/highmem.h
index 61aff32..657b565 100644
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
@@ -41,6 +41,7 @@ void kmap_flush_unused(void);
#ifdef CONFIG_ARCH_WANT_KMAP_ATOMIC_FLUSH
void kmap_atomic_flush_unused(void);
+int kmap_remove_unused_cpu(unsigned int cpu);
#else
static inline void kmap_atomic_flush_unused(void) { }
#endif
@@ -91,6 +92,10 @@ static inline void __kunmap_atomic(void *addr)
#endif /* CONFIG_HIGHMEM */
+#if !defined(CONFIG_HIGHMEM) || !defined(CONFIG_ARCH_WANT_KMAP_ATOMIC_FLUSH)
+static inline int kmap_remove_unused_cpu(unsigned int cpu) { return 0; }
+#endif
+
#if defined(CONFIG_HIGHMEM) || defined(CONFIG_X86_32)
DECLARE_PER_CPU(int, __kmap_atomic_idx);
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index d596a07..8cc99de 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -1521,11 +1521,11 @@ static inline void hv_signal_on_read(struct vmbus_channel *channel)
cur_write_sz = hv_get_bytes_to_write(rbi);
- if (cur_write_sz < pending_sz)
+ if (cur_write_sz <= pending_sz)
return;
cached_write_sz = hv_get_cached_bytes_to_write(rbi);
- if (cached_write_sz < pending_sz)
+ if (cached_write_sz <= pending_sz)
vmbus_setevent(channel);
return;
diff --git a/include/linux/iio/adc/ad_sigma_delta.h b/include/linux/iio/adc/ad_sigma_delta.h
index e7fdec4..6cc48ac 100644
--- a/include/linux/iio/adc/ad_sigma_delta.h
+++ b/include/linux/iio/adc/ad_sigma_delta.h
@@ -111,6 +111,9 @@ int ad_sd_write_reg(struct ad_sigma_delta *sigma_delta, unsigned int reg,
int ad_sd_read_reg(struct ad_sigma_delta *sigma_delta, unsigned int reg,
unsigned int size, unsigned int *val);
+int ad_sd_reset(struct ad_sigma_delta *sigma_delta,
+ unsigned int reset_length);
+
int ad_sigma_delta_single_conversion(struct iio_dev *indio_dev,
const struct iio_chan_spec *chan, int *val);
int ad_sd_calibrate_all(struct ad_sigma_delta *sigma_delta,
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index 99eb77a..f25acfc 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -19,12 +19,12 @@
#ifndef __LINUX_IOMMU_H
#define __LINUX_IOMMU_H
+#include <linux/scatterlist.h>
+#include <linux/device.h>
+#include <linux/types.h>
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/of.h>
-#include <linux/types.h>
-#include <linux/scatterlist.h>
-#include <trace/events/iommu.h>
#define IOMMU_READ (1 << 0)
#define IOMMU_WRITE (1 << 1)
@@ -32,6 +32,7 @@
#define IOMMU_NOEXEC (1 << 3)
#define IOMMU_MMIO (1 << 4) /* e.g. things like MSI doorbells */
#define IOMMU_PRIV (1 << 5)
+#define IOMMU_GUARD (1 << 28) /* Guard Page */
/* Use upstream device's bus attribute */
#define IOMMU_USE_UPSTREAM_HINT (1 << 6)
@@ -87,6 +88,8 @@ struct iommu_pgtbl_info {
#define IOMMU_DOMAIN_DMA (__IOMMU_DOMAIN_PAGING | \
__IOMMU_DOMAIN_DMA_API)
+
+#define IOMMU_DOMAIN_NAME_LEN 32
struct iommu_domain {
unsigned type;
const struct iommu_ops *ops;
@@ -95,6 +98,7 @@ struct iommu_domain {
void *handler_token;
struct iommu_domain_geometry geometry;
void *iova_cookie;
+ char name[IOMMU_DOMAIN_NAME_LEN];
};
enum iommu_cap {
@@ -143,6 +147,7 @@ enum iommu_attr {
DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT,
DOMAIN_ATTR_CB_STALL_DISABLE,
DOMAIN_ATTR_UPSTREAM_IOVA_ALLOCATOR,
+ DOMAIN_ATTR_MMU500_ERRATA_MIN_ALIGN,
DOMAIN_ATTR_MAX,
};
@@ -239,10 +244,6 @@ struct iommu_ops {
/* Get the number of windows per domain */
u32 (*domain_get_windows)(struct iommu_domain *domain);
void (*trigger_fault)(struct iommu_domain *domain, unsigned long flags);
- unsigned long (*reg_read)(struct iommu_domain *domain,
- unsigned long offset);
- void (*reg_write)(struct iommu_domain *domain, unsigned long val,
- unsigned long offset);
void (*tlbi_domain)(struct iommu_domain *domain);
int (*enable_config_clocks)(struct iommu_domain *domain);
void (*disable_config_clocks)(struct iommu_domain *domain);
@@ -279,6 +280,9 @@ extern int iommu_map(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t size, int prot);
extern size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova,
size_t size);
+extern size_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
+ struct scatterlist *sg, unsigned int nents,
+ int prot);
extern size_t default_iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
struct scatterlist *sg,unsigned int nents,
int prot);
@@ -338,58 +342,9 @@ extern void iommu_domain_window_disable(struct iommu_domain *domain, u32 wnd_nr)
extern uint64_t iommu_iova_to_pte(struct iommu_domain *domain,
dma_addr_t iova);
-/**
- * report_iommu_fault() - report about an IOMMU fault to the IOMMU framework
- * @domain: the iommu domain where the fault has happened
- * @dev: the device where the fault has happened
- * @iova: the faulting address
- * @flags: mmu fault flags (e.g. IOMMU_FAULT_READ/IOMMU_FAULT_WRITE/...)
- *
- * This function should be called by the low-level IOMMU implementations
- * whenever IOMMU faults happen, to allow high-level users, that are
- * interested in such events, to know about them.
- *
- * This event may be useful for several possible use cases:
- * - mere logging of the event
- * - dynamic TLB/PTE loading
- * - if restarting of the faulting device is required
- *
- * Returns 0 on success and an appropriate error code otherwise (if dynamic
- * PTE/TLB loading will one day be supported, implementations will be able
- * to tell whether it succeeded or not according to this return value).
- *
- * Specifically, -ENOSYS is returned if a fault handler isn't installed
- * (though fault handlers can also return -ENOSYS, in case they want to
- * elicit the default behavior of the IOMMU drivers).
- * Client fault handler returns -EBUSY to signal to the IOMMU driver
- * that the client will take responsibility for any further fault
- * handling, including clearing fault status registers or retrying
- * the faulting transaction.
- */
-static inline int report_iommu_fault(struct iommu_domain *domain,
- struct device *dev, unsigned long iova, int flags)
-{
- int ret = -ENOSYS;
-
- /*
- * if upper layers showed interest and installed a fault handler,
- * invoke it.
- */
- if (domain->handler)
- ret = domain->handler(domain, dev, iova, flags,
- domain->handler_token);
-
- trace_io_page_fault(dev, iova, flags);
- return ret;
-}
-
-static inline size_t iommu_map_sg(struct iommu_domain *domain,
- unsigned long iova, struct scatterlist *sg,
- unsigned int nents, int prot)
-{
- return domain->ops->map_sg(domain, iova, sg, nents, prot);
-}
+extern int report_iommu_fault(struct iommu_domain *domain, struct device *dev,
+ unsigned long iova, int flags);
extern void iommu_trigger_fault(struct iommu_domain *domain,
unsigned long flags);
diff --git a/include/linux/ipa.h b/include/linux/ipa.h
index f8e7e8c..dd6849d 100644
--- a/include/linux/ipa.h
+++ b/include/linux/ipa.h
@@ -40,8 +40,8 @@ enum ipa_nat_en_type {
};
/**
-* enum ipa_ipv6ct_en_type - IPv6CT setting type in IPA end-point
-*/
+ * enum ipa_ipv6ct_en_type - IPv6CT setting type in IPA end-point
+ */
enum ipa_ipv6ct_en_type {
IPA_BYPASS_IPV6CT,
IPA_ENABLE_IPV6CT,
@@ -130,7 +130,7 @@ struct ipa_ep_cfg_nat {
* struct ipa_ep_cfg_conn_track - IPv6 Connection tracking configuration in
* IPA end-point
* @conn_track_en: Defines speculative conn_track action, means if specific
- * pipe needs to have UL/DL IPv6 Connection Tracking or Bybass
+ * pipe needs to have UL/DL IPv6 Connection Tracking or Bypass
* IPv6 Connection Tracking. 0: Bypass IPv6 Connection Tracking
* 1: IPv6 UL/DL Connection Tracking.
* Valid for Input Pipes only (IPA consumer)
@@ -407,8 +407,8 @@ struct ipa_ep_cfg_seq {
/**
* struct ipa_ep_cfg - configuration of IPA end-point
- * @nat: NAT parmeters
- * @conn_track: IPv6CT parmeters
+ * @nat: NAT parameters
+ * @conn_track: IPv6CT parameters
* @hdr: Header parameters
* @hdr_ext: Extended header parameters
* @mode: Mode parameters
@@ -1165,6 +1165,16 @@ struct ipa_gsi_ep_config {
int ee;
};
+/**
+ * struct ipa_tz_unlock_reg_info - Used in order unlock regions of memory by TZ
+ * @reg_addr - Physical address of the start of the region
+ * @size - Size of the region in bytes
+ */
+struct ipa_tz_unlock_reg_info {
+ u64 reg_addr;
+ u64 size;
+};
+
#if defined CONFIG_IPA || defined CONFIG_IPA3
/*
@@ -1282,15 +1292,24 @@ int ipa_commit_flt(enum ipa_ip_type ip);
int ipa_reset_flt(enum ipa_ip_type ip);
/*
- * NAT
+ * NAT\IPv6CT
*/
-int allocate_nat_device(struct ipa_ioc_nat_alloc_mem *mem);
+int ipa_allocate_nat_device(struct ipa_ioc_nat_alloc_mem *mem);
+int ipa_allocate_nat_table(struct ipa_ioc_nat_ipv6ct_table_alloc *table_alloc);
+int ipa_allocate_ipv6ct_table(
+ struct ipa_ioc_nat_ipv6ct_table_alloc *table_alloc);
int ipa_nat_init_cmd(struct ipa_ioc_v4_nat_init *init);
+int ipa_ipv6ct_init_cmd(struct ipa_ioc_ipv6ct_init *init);
int ipa_nat_dma_cmd(struct ipa_ioc_nat_dma_cmd *dma);
+int ipa_table_dma_cmd(struct ipa_ioc_nat_dma_cmd *dma);
int ipa_nat_del_cmd(struct ipa_ioc_v4_nat_del *del);
+int ipa_del_nat_table(struct ipa_ioc_nat_ipv6ct_table_del *del);
+int ipa_del_ipv6ct_table(struct ipa_ioc_nat_ipv6ct_table_del *del);
+
+int ipa_nat_mdfy_pdn(struct ipa_ioc_nat_pdn_entry *mdfy_pdn);
/*
* Messaging
@@ -1530,6 +1549,21 @@ typedef void (*ipa_ready_cb)(void *user_data);
int ipa_register_ipa_ready_cb(void (*ipa_ready_cb)(void *user_data),
void *user_data);
+/**
+ * ipa_tz_unlock_reg - Unlocks memory regions so that they become accessible
+ * from AP.
+ * @reg_info - Pointer to array of memory regions to unlock
+ * @num_regs - Number of elements in the array
+ *
+ * Converts the input array of regions to a struct that TZ understands and
+ * issues an SCM call.
+ * Also flushes the memory cache to DDR in order to make sure that TZ sees the
+ * correct data structure.
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa_tz_unlock_reg(struct ipa_tz_unlock_reg_info *reg_info, u16 num_regs);
+
#else /* (CONFIG_IPA || CONFIG_IPA3) */
/*
@@ -1776,29 +1810,64 @@ static inline int ipa_reset_flt(enum ipa_ip_type ip)
/*
* NAT
*/
-static inline int allocate_nat_device(struct ipa_ioc_nat_alloc_mem *mem)
+static inline int ipa_allocate_nat_device(struct ipa_ioc_nat_alloc_mem *mem)
{
return -EPERM;
}
+static inline int ipa_allocate_nat_table(
+ struct ipa_ioc_nat_ipv6ct_table_alloc *table_alloc)
+{
+ return -EPERM;
+}
+
+static inline int ipa_allocate_ipv6ct_table(
+ struct ipa_ioc_nat_ipv6ct_table_alloc *table_alloc)
+{
+ return -EPERM;
+}
static inline int ipa_nat_init_cmd(struct ipa_ioc_v4_nat_init *init)
{
return -EPERM;
}
+static inline int ipa_ipv6ct_init_cmd(struct ipa_ioc_ipv6ct_init *init)
+{
+ return -EPERM;
+}
static inline int ipa_nat_dma_cmd(struct ipa_ioc_nat_dma_cmd *dma)
{
return -EPERM;
}
+static inline int ipa_table_dma_cmd(struct ipa_ioc_nat_dma_cmd *dma)
+{
+ return -EPERM;
+}
static inline int ipa_nat_del_cmd(struct ipa_ioc_v4_nat_del *del)
{
return -EPERM;
}
+static inline int ipa_del_nat_table(struct ipa_ioc_nat_ipv6ct_table_del *del)
+{
+ return -EPERM;
+}
+
+static inline int ipa_del_ipv6ct_table(
+ struct ipa_ioc_nat_ipv6ct_table_del *del)
+{
+ return -EPERM;
+}
+
+static inline int ipa_nat_mdfy_pdn(struct ipa_ioc_nat_pdn_entry *mdfy_pdn)
+{
+ return -EPERM;
+}
+
/*
* Messaging
*/
@@ -2276,6 +2345,12 @@ static inline int ipa_register_ipa_ready_cb(
return -EPERM;
}
+static inline int ipa_tz_unlock_reg(struct ipa_tz_unlock_reg_info *reg_info,
+ u16 num_regs)
+{
+ return -EPERM;
+}
+
#endif /* (CONFIG_IPA || CONFIG_IPA3) */
#endif /* _IPA_H_ */
diff --git a/include/linux/key.h b/include/linux/key.h
index 7229147..ed9b44f 100644
--- a/include/linux/key.h
+++ b/include/linux/key.h
@@ -126,6 +126,11 @@ static inline bool is_key_possessed(const key_ref_t key_ref)
return (unsigned long) key_ref & 1UL;
}
+enum key_state {
+ KEY_IS_UNINSTANTIATED,
+ KEY_IS_POSITIVE, /* Positively instantiated */
+};
+
/*****************************************************************************/
/*
* authentication token / access credential / keyring
@@ -157,6 +162,7 @@ struct key {
* - may not match RCU dereferenced payload
* - payload should contain own length
*/
+ short state; /* Key state (+) or rejection error (-) */
#ifdef KEY_DEBUGGING
unsigned magic;
@@ -165,17 +171,16 @@ struct key {
#endif
unsigned long flags; /* status flags (change with bitops) */
-#define KEY_FLAG_INSTANTIATED 0 /* set if key has been instantiated */
-#define KEY_FLAG_DEAD 1 /* set if key type has been deleted */
-#define KEY_FLAG_REVOKED 2 /* set if key had been revoked */
-#define KEY_FLAG_IN_QUOTA 3 /* set if key consumes quota */
-#define KEY_FLAG_USER_CONSTRUCT 4 /* set if key is being constructed in userspace */
-#define KEY_FLAG_NEGATIVE 5 /* set if key is negative */
-#define KEY_FLAG_ROOT_CAN_CLEAR 6 /* set if key can be cleared by root without permission */
-#define KEY_FLAG_INVALIDATED 7 /* set if key has been invalidated */
-#define KEY_FLAG_BUILTIN 8 /* set if key is built in to the kernel */
-#define KEY_FLAG_ROOT_CAN_INVAL 9 /* set if key can be invalidated by root without permission */
-#define KEY_FLAG_KEEP 10 /* set if key should not be removed */
+#define KEY_FLAG_DEAD 0 /* set if key type has been deleted */
+#define KEY_FLAG_REVOKED 1 /* set if key had been revoked */
+#define KEY_FLAG_IN_QUOTA 2 /* set if key consumes quota */
+#define KEY_FLAG_USER_CONSTRUCT 3 /* set if key is being constructed in userspace */
+#define KEY_FLAG_ROOT_CAN_CLEAR 4 /* set if key can be cleared by root without permission */
+#define KEY_FLAG_INVALIDATED 5 /* set if key has been invalidated */
+#define KEY_FLAG_BUILTIN 6 /* set if key is built in to the kernel */
+#define KEY_FLAG_ROOT_CAN_INVAL 7 /* set if key can be invalidated by root without permission */
+#define KEY_FLAG_KEEP 8 /* set if key should not be removed */
+#define KEY_FLAG_UID_KEYRING 9 /* set if key is a user or user session keyring */
/* the key type and key description string
* - the desc is used to match a key against search criteria
@@ -201,7 +206,6 @@ struct key {
struct list_head name_link;
struct assoc_array keys;
};
- int reject_error;
};
/* This is set on a keyring to restrict the addition of a link to a key
@@ -235,6 +239,7 @@ extern struct key *key_alloc(struct key_type *type,
#define KEY_ALLOC_NOT_IN_QUOTA 0x0002 /* not in quota */
#define KEY_ALLOC_BUILT_IN 0x0004 /* Key is built into kernel */
#define KEY_ALLOC_BYPASS_RESTRICTION 0x0008 /* Override the check on restricted keyrings */
+#define KEY_ALLOC_UID_KEYRING 0x0010 /* allocating a user or user session keyring */
extern void key_revoke(struct key *key);
extern void key_invalidate(struct key *key);
@@ -341,17 +346,27 @@ extern void key_set_timeout(struct key *, unsigned);
#define KEY_NEED_SETATTR 0x20 /* Require permission to change attributes */
#define KEY_NEED_ALL 0x3f /* All the above permissions */
+static inline short key_read_state(const struct key *key)
+{
+ /* Barrier versus mark_key_instantiated(). */
+ return smp_load_acquire(&key->state);
+}
+
/**
- * key_is_instantiated - Determine if a key has been positively instantiated
+ * key_is_positive - Determine if a key has been positively instantiated
* @key: The key to check.
*
* Return true if the specified key has been positively instantiated, false
* otherwise.
*/
-static inline bool key_is_instantiated(const struct key *key)
+static inline bool key_is_positive(const struct key *key)
{
- return test_bit(KEY_FLAG_INSTANTIATED, &key->flags) &&
- !test_bit(KEY_FLAG_NEGATIVE, &key->flags);
+ return key_read_state(key) == KEY_IS_POSITIVE;
+}
+
+static inline bool key_is_negative(const struct key *key)
+{
+ return key_read_state(key) < 0;
}
#define rcu_dereference_key(KEY) \
diff --git a/include/linux/lsm_hooks.h b/include/linux/lsm_hooks.h
index 8f5af30..580cc10 100644
--- a/include/linux/lsm_hooks.h
+++ b/include/linux/lsm_hooks.h
@@ -1419,6 +1419,8 @@ union security_list_options {
size_t *len);
int (*inode_create)(struct inode *dir, struct dentry *dentry,
umode_t mode);
+ int (*inode_post_create)(struct inode *dir, struct dentry *dentry,
+ umode_t mode);
int (*inode_link)(struct dentry *old_dentry, struct inode *dir,
struct dentry *new_dentry);
int (*inode_unlink)(struct inode *dir, struct dentry *dentry);
@@ -1706,6 +1708,7 @@ struct security_hook_heads {
struct list_head inode_free_security;
struct list_head inode_init_security;
struct list_head inode_create;
+ struct list_head inode_post_create;
struct list_head inode_link;
struct list_head inode_unlink;
struct list_head inode_symlink;
diff --git a/include/linux/mbus.h b/include/linux/mbus.h
index 2931aa4..f70420e 100644
--- a/include/linux/mbus.h
+++ b/include/linux/mbus.h
@@ -31,8 +31,8 @@ struct mbus_dram_target_info
struct mbus_dram_window {
u8 cs_index;
u8 mbus_attr;
- u32 base;
- u32 size;
+ u64 base;
+ u64 size;
} cs[4];
};
diff --git a/include/linux/mmc/core.h b/include/linux/mmc/core.h
index b718105..c5a4a25 100644
--- a/include/linux/mmc/core.h
+++ b/include/linux/mmc/core.h
@@ -180,6 +180,7 @@ extern int mmc_send_tuning(struct mmc_host *host, u32 opcode, int *cmd_error);
extern int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd);
extern int mmc_set_auto_bkops(struct mmc_card *card, bool enable);
extern int mmc_suspend_clk_scaling(struct mmc_host *host);
+extern void mmc_flush_detect_work(struct mmc_host *host);
#define MMC_ERASE_ARG 0x00000000
#define MMC_SECURE_ERASE_ARG 0x80000000
@@ -233,7 +234,7 @@ extern void mmc_cmdq_clk_scaling_start_busy(struct mmc_host *host,
bool lock_needed);
extern void mmc_cmdq_clk_scaling_stop_busy(struct mmc_host *host,
bool lock_needed, bool is_cmdq_dcmd);
-extern void mmc_recovery_fallback_lower_speed(struct mmc_host *host);
+extern int mmc_recovery_fallback_lower_speed(struct mmc_host *host);
/**
* mmc_claim_host - exclusively claim a host
diff --git a/include/linux/mmc/sdio_func.h b/include/linux/mmc/sdio_func.h
index d0a69e7..f563bcf 100644
--- a/include/linux/mmc/sdio_func.h
+++ b/include/linux/mmc/sdio_func.h
@@ -61,7 +61,7 @@ struct sdio_func {
unsigned int state; /* function state */
#define SDIO_STATE_PRESENT (1<<0) /* present in sysfs */
- u8 tmpbuf[4]; /* DMA:able scratch buffer */
+ u8 *tmpbuf; /* DMA:able scratch buffer */
unsigned num_info; /* number of info strings */
const char **info; /* info strings */
diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
index a1a210d..25c0dc3 100644
--- a/include/linux/mmu_notifier.h
+++ b/include/linux/mmu_notifier.h
@@ -419,6 +419,11 @@ extern void mmu_notifier_synchronize(void);
#else /* CONFIG_MMU_NOTIFIER */
+static inline int mm_has_notifiers(struct mm_struct *mm)
+{
+ return 0;
+}
+
static inline void mmu_notifier_release(struct mm_struct *mm)
{
}
diff --git a/include/linux/msm_drm_notify.h b/include/linux/msm_drm_notify.h
new file mode 100644
index 0000000..924ba85
--- /dev/null
+++ b/include/linux/msm_drm_notify.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef _MSM_DRM_NOTIFY_H_
+#define _MSM_DRM_NOTIFY_H_
+
+#include <linux/notifier.h>
+
+/* A hardware display blank change occurred */
+#define MSM_DRM_EVENT_BLANK 0x01
+/* A hardware display blank early change occurred */
+#define MSM_DRM_EARLY_EVENT_BLANK 0x02
+
+enum {
+ /* panel: power on */
+ MSM_DRM_BLANK_UNBLANK,
+ /* panel: power off */
+ MSM_DRM_BLANK_POWERDOWN,
+};
+
+enum msm_drm_display_id {
+ /* primary display */
+ MSM_DRM_PRIMARY_DISPLAY,
+ /* external display */
+ MSM_DRM_EXTERNAL_DISPLAY,
+ MSM_DRM_DISPLAY_MAX
+};
+
+struct msm_drm_notifier {
+ enum msm_drm_display_id id;
+ void *data;
+};
+
+int msm_drm_register_client(struct notifier_block *nb);
+int msm_drm_unregister_client(struct notifier_block *nb);
+#endif
diff --git a/include/linux/msm_ext_display.h b/include/linux/msm_ext_display.h
index 08e0def..e34f468 100644
--- a/include/linux/msm_ext_display.h
+++ b/include/linux/msm_ext_display.h
@@ -117,6 +117,7 @@ struct msm_ext_disp_intf_ops {
* @get_intf_id: id of connected interface
* @teardown_done: audio session teardown done by qdsp
* @acknowledge: acknowledge audio status received by user modules
+ * @ready: notify audio when codec driver is ready.
*/
struct msm_ext_disp_audio_codec_ops {
int (*audio_info_setup)(struct platform_device *pdev,
@@ -127,6 +128,7 @@ struct msm_ext_disp_audio_codec_ops {
int (*get_intf_id)(struct platform_device *pdev);
void (*teardown_done)(struct platform_device *pdev);
int (*acknowledge)(struct platform_device *pdev, u32 ack);
+ int (*ready)(struct platform_device *pdev);
};
/**
diff --git a/include/linux/msm_gsi.h b/include/linux/msm_gsi.h
index ebca446..6e0b439 100644
--- a/include/linux/msm_gsi.h
+++ b/include/linux/msm_gsi.h
@@ -82,6 +82,8 @@ enum gsi_intr_type {
* @irq: IRQ number
* @phys_addr: physical address of GSI block
* @size: register size of GSI block
+ * @mhi_er_id_limits_valid: valid flag for mhi_er_id_limits
+ * @mhi_er_id_limits: MHI event ring start and end ids
* @notify_cb: general notification callback
* @req_clk_cb: callback to request peripheral clock
* granted should be set to true if request is completed
@@ -105,6 +107,8 @@ struct gsi_per_props {
unsigned int irq;
phys_addr_t phys_addr;
unsigned long size;
+ bool mhi_er_id_limits_valid;
+ uint32_t mhi_er_id_limits[2];
void (*notify_cb)(struct gsi_per_notify *notify);
void (*req_clk_cb)(void *user_data, bool *granted);
int (*rel_clk_cb)(void *user_data);
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 266471e..c92ed22 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -2450,6 +2450,7 @@ void dev_disable_lro(struct net_device *dev);
int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *newskb);
int dev_queue_xmit(struct sk_buff *skb);
int dev_queue_xmit_accel(struct sk_buff *skb, void *accel_priv);
+int dev_queue_xmit_list(struct sk_buff *skb);
int register_netdevice(struct net_device *dev);
void unregister_netdevice_queue(struct net_device *dev, struct list_head *head);
void unregister_netdevice_many(struct list_head *head);
@@ -3363,6 +3364,10 @@ int dev_change_xdp_fd(struct net_device *dev, int fd);
struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev);
struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
struct netdev_queue *txq, int *ret);
+struct sk_buff *dev_hard_start_xmit_list(struct sk_buff *skb,
+ struct net_device *dev,
+ struct netdev_queue *txq,
+ int *ret);
int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
int dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
bool is_skb_forwardable(const struct net_device *dev,
diff --git a/include/linux/pfk.h b/include/linux/pfk.h
new file mode 100644
index 0000000..82ee741
--- /dev/null
+++ b/include/linux/pfk.h
@@ -0,0 +1,57 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef PFK_H_
+#define PFK_H_
+
+#include <linux/bio.h>
+
+struct ice_crypto_setting;
+
+#ifdef CONFIG_PFK
+
+int pfk_load_key_start(const struct bio *bio,
+ struct ice_crypto_setting *ice_setting, bool *is_pfe, bool);
+int pfk_load_key_end(const struct bio *bio, bool *is_pfe);
+int pfk_remove_key(const unsigned char *key, size_t key_size);
+bool pfk_allow_merge_bio(const struct bio *bio1, const struct bio *bio2);
+void pfk_clear_on_reset(void);
+
+#else
+static inline int pfk_load_key_start(const struct bio *bio,
+ struct ice_crypto_setting *ice_setting, bool *is_pfe, bool async)
+{
+ return -ENODEV;
+}
+
+static inline int pfk_load_key_end(const struct bio *bio, bool *is_pfe)
+{
+ return -ENODEV;
+}
+
+static inline int pfk_remove_key(const unsigned char *key, size_t key_size)
+{
+ return -ENODEV;
+}
+
+static inline bool pfk_allow_merge_bio(const struct bio *bio1,
+ const struct bio *bio2)
+{
+ return true;
+}
+
+static inline void pfk_clear_on_reset(void)
+{}
+
+#endif /* CONFIG_PFK */
+
+#endif /* PFK_H */
diff --git a/include/linux/phy.h b/include/linux/phy.h
index 8431c8c..a04d69a 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -142,11 +142,7 @@ static inline const char *phy_modes(phy_interface_t interface)
/* Used when trying to connect to a specific phy (mii bus id:phy device id) */
#define PHY_ID_FMT "%s:%02x"
-/*
- * Need to be a little smaller than phydev->dev.bus_id to leave room
- * for the ":%02x"
- */
-#define MII_BUS_ID_SIZE (20 - 3)
+#define MII_BUS_ID_SIZE 61
/* Or MII_ADDR_C45 into regnum for read/write on mii_bus to enable the 21 bit
IEEE 802.3ae clause 45 addressing mode used by 10GIGE phy chips. */
@@ -602,7 +598,7 @@ struct phy_driver {
/* A Structure for boards to register fixups with the PHY Lib */
struct phy_fixup {
struct list_head list;
- char bus_id[20];
+ char bus_id[MII_BUS_ID_SIZE + 3];
u32 phy_uid;
u32 phy_uid_mask;
int (*run)(struct phy_device *phydev);
diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h
index b6c8c92..d253ca6 100644
--- a/include/linux/power_supply.h
+++ b/include/linux/power_supply.h
@@ -103,6 +103,9 @@ enum {
POWER_SUPPLY_DP_DM_HVDCP3_SUPPORTED = 10,
POWER_SUPPLY_DP_DM_ICL_DOWN = 11,
POWER_SUPPLY_DP_DM_ICL_UP = 12,
+ POWER_SUPPLY_DP_DM_FORCE_5V = 13,
+ POWER_SUPPLY_DP_DM_FORCE_9V = 14,
+ POWER_SUPPLY_DP_DM_FORCE_12V = 15,
};
enum {
@@ -112,6 +115,11 @@ enum {
POWER_SUPPLY_PL_USBMID_USBMID,
};
+enum {
+ POWER_SUPPLY_CONNECTOR_TYPEC,
+ POWER_SUPPLY_CONNECTOR_MICRO_USB,
+};
+
enum power_supply_property {
/* Properties of type `int' */
POWER_SUPPLY_PROP_STATUS = 0,
@@ -257,6 +265,7 @@ enum power_supply_property {
POWER_SUPPLY_PROP_PD_VOLTAGE_MAX,
POWER_SUPPLY_PROP_PD_VOLTAGE_MIN,
POWER_SUPPLY_PROP_SDP_CURRENT_MAX,
+ POWER_SUPPLY_PROP_CONNECTOR_TYPE,
/* Local extensions of type int64_t */
POWER_SUPPLY_PROP_CHARGE_COUNTER_EXT,
/* Properties of type `const char *' */
diff --git a/include/linux/preempt.h b/include/linux/preempt.h
index 75e4e30..7eeceac 100644
--- a/include/linux/preempt.h
+++ b/include/linux/preempt.h
@@ -65,19 +65,24 @@
/*
* Are we doing bottom half or hardware interrupt processing?
- * Are we in a softirq context? Interrupt context?
- * in_softirq - Are we currently processing softirq or have bh disabled?
- * in_serving_softirq - Are we currently processing softirq?
+ *
+ * in_irq() - We're in (hard) IRQ context
+ * in_softirq() - We have BH disabled, or are processing softirqs
+ * in_interrupt() - We're in NMI,IRQ,SoftIRQ context or have BH disabled
+ * in_serving_softirq() - We're in softirq context
+ * in_nmi() - We're in NMI context
+ * in_task() - We're in task context
+ *
+ * Note: due to the BH disabled confusion: in_softirq(),in_interrupt() really
+ * should not be used in new code.
*/
#define in_irq() (hardirq_count())
#define in_softirq() (softirq_count())
#define in_interrupt() (irq_count())
#define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET)
-
-/*
- * Are we in NMI context?
- */
-#define in_nmi() (preempt_count() & NMI_MASK)
+#define in_nmi() (preempt_count() & NMI_MASK)
+#define in_task() (!(preempt_count() & \
+ (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_OFFSET)))
/*
* The preempt_count offset after preempt_disable();
diff --git a/include/linux/qcom-geni-se.h b/include/linux/qcom-geni-se.h
index 77a46bd..fc02ece 100644
--- a/include/linux/qcom-geni-se.h
+++ b/include/linux/qcom-geni-se.h
@@ -547,6 +547,24 @@ void se_config_packing(void __iomem *base, int bpw, int pack_words,
bool msb_to_lsb);
/**
+ * se_geni_clks_off() - Turn off clocks associated with the serial
+ * engine
+ * @rsc: Handle to resources associated with the serial engine.
+ *
+ * Return: 0 on success, standard Linux error codes on failure/error.
+ */
+int se_geni_clks_off(struct se_geni_rsc *rsc);
+
+/**
+ * se_geni_clks_on() - Turn on clocks associated with the serial
+ * engine
+ * @rsc: Handle to resources associated with the serial engine.
+ *
+ * Return: 0 on success, standard Linux error codes on failure/error.
+ */
+int se_geni_clks_on(struct se_geni_rsc *rsc);
+
+/**
* se_geni_resources_off() - Turn off resources associated with the serial
* engine
* @rsc: Handle to resources associated with the serial engine.
@@ -842,6 +860,16 @@ static inline void se_config_packing(void __iomem *base, int bpw,
{
}
+static inline int se_geni_clks_on(struct se_geni_rsc *rsc)
+{
+ return -ENXIO;
+}
+
+static inline int se_geni_clks_off(struct se_geni_rsc *rsc)
+{
+ return -ENXIO;
+}
+
static inline int se_geni_resources_on(struct se_geni_rsc *rsc)
{
return -ENXIO;
diff --git a/include/linux/qpnp/qpnp-adc.h b/include/linux/qpnp/qpnp-adc.h
index 0e4586f..3e060d9 100644
--- a/include/linux/qpnp/qpnp-adc.h
+++ b/include/linux/qpnp/qpnp-adc.h
@@ -968,6 +968,7 @@ enum qpnp_state_request {
* @PMIC_THERM inputs the units in millidegC.
*/
struct qpnp_adc_tm_btm_param {
+ uint32_t full_scale_code;
int32_t high_temp;
int32_t low_temp;
int32_t high_thr;
@@ -1026,13 +1027,13 @@ struct qpnp_vadc_scaling_ratio {
/**
* struct qpnp_adc_properties - Represent the ADC properties.
* @adc_reference: Reference voltage for QPNP ADC.
- * @bitresolution: ADC bit resolution for QPNP ADC.
+ * @full_scale_code: Full scale value with intrinsic offset removed.
* @biploar: Polarity for QPNP ADC.
* @adc_hc: Represents using HC variant of the ADC controller.
*/
struct qpnp_adc_properties {
uint32_t adc_vdd_reference;
- uint32_t bitresolution;
+ uint32_t full_scale_code;
bool bipolar;
bool adc_hc;
};
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 67860f3..0d4035a 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -184,6 +184,8 @@ extern void sched_get_nr_running_avg(int *avg, int *iowait_avg, int *big_avg,
unsigned int *big_max_nr);
extern unsigned int sched_get_cpu_util(int cpu);
extern u64 sched_get_cpu_last_busy_time(int cpu);
+extern u32 sched_get_wake_up_idle(struct task_struct *p);
+extern int sched_set_wake_up_idle(struct task_struct *p, int wake_up_idle);
#else
static inline void sched_update_nr_prod(int cpu, long delta, bool inc)
{
@@ -201,6 +203,15 @@ static inline u64 sched_get_cpu_last_busy_time(int cpu)
{
return 0;
}
+static inline u32 sched_get_wake_up_idle(struct task_struct *p)
+{
+ return 0;
+}
+static inline int sched_set_wake_up_idle(struct task_struct *p,
+ int wake_up_idle)
+{
+ return 0;
+}
#endif
extern void calc_global_load(unsigned long ticks);
@@ -1946,6 +1957,7 @@ struct task_struct {
#ifdef CONFIG_DETECT_HUNG_TASK
/* hung task detection */
unsigned long last_switch_count;
+ bool hang_detection_enabled;
#endif
/* filesystem information */
struct fs_struct *fs;
@@ -2698,6 +2710,7 @@ extern void do_set_cpus_allowed(struct task_struct *p,
extern int set_cpus_allowed_ptr(struct task_struct *p,
const struct cpumask *new_mask);
+extern bool cpupri_check_rt(void);
#else
static inline void do_set_cpus_allowed(struct task_struct *p,
const struct cpumask *new_mask)
@@ -2710,6 +2723,10 @@ static inline int set_cpus_allowed_ptr(struct task_struct *p,
return -EINVAL;
return 0;
}
+static inline bool cpupri_check_rt(void)
+{
+ return false;
+}
#endif
struct sched_load {
@@ -2718,9 +2735,6 @@ struct sched_load {
unsigned long predicted_load;
};
-extern int sched_set_wake_up_idle(struct task_struct *p, int wake_up_idle);
-extern u32 sched_get_wake_up_idle(struct task_struct *p);
-
struct cpu_cycle_counter_cb {
u64 (*get_cpu_cycle_counter)(int cpu);
};
diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h
index 3597d55..12bd032 100644
--- a/include/linux/sched/sysctl.h
+++ b/include/linux/sched/sysctl.h
@@ -6,6 +6,7 @@ extern int sysctl_hung_task_check_count;
extern unsigned int sysctl_hung_task_panic;
extern unsigned long sysctl_hung_task_timeout_secs;
extern int sysctl_hung_task_warnings;
+extern int sysctl_hung_task_selective_monitoring;
extern int proc_dohung_task_timeout_secs(struct ctl_table *table, int write,
void __user *buffer,
size_t *lenp, loff_t *ppos);
diff --git a/include/linux/sde_rsc.h b/include/linux/sde_rsc.h
index 19e76db..cda2654 100644
--- a/include/linux/sde_rsc.h
+++ b/include/linux/sde_rsc.h
@@ -259,7 +259,8 @@ static inline void sde_rsc_client_destroy(struct sde_rsc_client *client)
static inline int sde_rsc_client_state_update(struct sde_rsc_client *client,
enum sde_rsc_state state,
- struct sde_rsc_cmd_config *config, int crtc_id)
+ struct sde_rsc_cmd_config *config, int crtc_id,
+ int *wait_vblank_crtc_id)
{
return 0;
}
diff --git a/include/linux/security.h b/include/linux/security.h
index c2125e9..02e05de 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -30,6 +30,7 @@
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/fs.h>
+#include <linux/bio.h>
struct linux_binprm;
struct cred;
@@ -256,6 +257,8 @@ int security_old_inode_init_security(struct inode *inode, struct inode *dir,
const struct qstr *qstr, const char **name,
void **value, size_t *len);
int security_inode_create(struct inode *dir, struct dentry *dentry, umode_t mode);
+int security_inode_post_create(struct inode *dir, struct dentry *dentry,
+ umode_t mode);
int security_inode_link(struct dentry *old_dentry, struct inode *dir,
struct dentry *new_dentry);
int security_inode_unlink(struct inode *dir, struct dentry *dentry);
@@ -304,6 +307,7 @@ int security_file_send_sigiotask(struct task_struct *tsk,
struct fown_struct *fown, int sig);
int security_file_receive(struct file *file);
int security_file_open(struct file *file, const struct cred *cred);
+
int security_task_create(unsigned long clone_flags);
void security_task_free(struct task_struct *task);
int security_cred_alloc_blank(struct cred *cred, gfp_t gfp);
@@ -637,6 +641,13 @@ static inline int security_inode_create(struct inode *dir,
return 0;
}
+static inline int security_inode_post_create(struct inode *dir,
+ struct dentry *dentry,
+ umode_t mode)
+{
+ return 0;
+}
+
static inline int security_inode_link(struct dentry *old_dentry,
struct inode *dir,
struct dentry *new_dentry)
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 32810f2..b1a09c5 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -738,7 +738,11 @@ struct sk_buff {
#endif
__u8 ipvs_property:1;
__u8 inner_protocol_type:1;
+ __u8 fast_forwarded:1;
__u8 remcsum_offload:1;
+
+ /*4 or 6 bit hole */
+
#ifdef CONFIG_NET_SWITCHDEV
__u8 offload_fwd_mark:1;
#endif
diff --git a/include/linux/slimbus/slimbus.h b/include/linux/slimbus/slimbus.h
index f1b1a7f..53af941 100644
--- a/include/linux/slimbus/slimbus.h
+++ b/include/linux/slimbus/slimbus.h
@@ -684,6 +684,7 @@ struct slim_pending_ch {
* first time it has reported present.
* @dev_list: List of devices on a controller
* @wd: Work structure associated with workqueue for presence notification
+ * @device_reset: Work structure for device reset notification
* @sldev_reconf: Mutex to protect the pending data-channel lists.
* @pending_msgsl: Message bandwidth reservation request by this client in
* slots that's pending reconfiguration.
@@ -706,6 +707,7 @@ struct slim_device {
bool notified;
struct list_head dev_list;
struct work_struct wd;
+ struct work_struct device_reset;
struct mutex sldev_reconf;
u32 pending_msgsl;
u32 cur_msgsl;
diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
index 7321ae9..102c84d 100644
--- a/include/linux/sunrpc/svc.h
+++ b/include/linux/sunrpc/svc.h
@@ -470,6 +470,7 @@ void svc_pool_map_put(void);
struct svc_serv * svc_create_pooled(struct svc_program *, unsigned int,
struct svc_serv_ops *);
int svc_set_num_threads(struct svc_serv *, struct svc_pool *, int);
+int svc_set_num_threads_sync(struct svc_serv *, struct svc_pool *, int);
int svc_pool_stats_open(struct svc_serv *serv, struct file *file);
void svc_destroy(struct svc_serv *);
void svc_shutdown_net(struct svc_serv *, struct net *);
diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h
index 41d81fb..751a510 100644
--- a/include/linux/trace_events.h
+++ b/include/linux/trace_events.h
@@ -275,6 +275,7 @@ struct trace_event_call {
int perf_refcount;
struct hlist_head __percpu *perf_events;
struct bpf_prog *prog;
+ struct perf_event *bpf_prog_owner;
int (*perf_perm)(struct trace_event_call *,
struct perf_event *);
diff --git a/include/linux/tty_flip.h b/include/linux/tty_flip.h
index c28dd52..d43837f 100644
--- a/include/linux/tty_flip.h
+++ b/include/linux/tty_flip.h
@@ -12,6 +12,7 @@ extern int tty_prepare_flip_string(struct tty_port *port,
unsigned char **chars, size_t size);
extern void tty_flip_buffer_push(struct tty_port *port);
void tty_schedule_flip(struct tty_port *port);
+int __tty_insert_flip_char(struct tty_port *port, unsigned char ch, char flag);
static inline int tty_insert_flip_char(struct tty_port *port,
unsigned char ch, char flag)
@@ -26,7 +27,7 @@ static inline int tty_insert_flip_char(struct tty_port *port,
*char_buf_ptr(tb, tb->used++) = ch;
return 1;
}
- return tty_insert_flip_string_flags(port, &ch, &flag, 1);
+ return __tty_insert_flip_char(port, ch, flag);
}
static inline int tty_insert_flip_string(struct tty_port *port,
diff --git a/include/linux/usb/cdc_ncm.h b/include/linux/usb/cdc_ncm.h
index 00d2324..b0fad11 100644
--- a/include/linux/usb/cdc_ncm.h
+++ b/include/linux/usb/cdc_ncm.h
@@ -83,6 +83,7 @@
/* Driver flags */
#define CDC_NCM_FLAG_NDP_TO_END 0x02 /* NDP is placed at end of frame */
#define CDC_MBIM_FLAG_AVOID_ALTSETTING_TOGGLE 0x04 /* Avoid altsetting toggle during init */
+#define CDC_NCM_FLAG_RESET_NTB16 0x08 /* set NDP16 one more time after altsetting switch */
#define cdc_ncm_comm_intf_is_mbim(x) ((x)->desc.bInterfaceSubClass == USB_CDC_SUBCLASS_MBIM && \
(x)->desc.bInterfaceProtocol == USB_CDC_PROTO_NONE)
diff --git a/include/linux/usb/phy.h b/include/linux/usb/phy.h
index ffb6393..092c32e 100644
--- a/include/linux/usb/phy.h
+++ b/include/linux/usb/phy.h
@@ -138,6 +138,7 @@ struct usb_phy {
/* reset the PHY clocks */
int (*reset)(struct usb_phy *x);
+ int (*disable_chirp)(struct usb_phy *x, bool disable);
};
/**
diff --git a/include/net/cnss_nl.h b/include/net/cnss_nl.h
index 86c2fcc..b8a7cfd 100644
--- a/include/net/cnss_nl.h
+++ b/include/net/cnss_nl.h
@@ -23,12 +23,16 @@
* @CLD80211_ATTR_VENDOR_DATA: Embed all other attributes in this nested
* attribute.
* @CLD80211_ATTR_DATA: Embed complete data in this attribute
+ * @CLD80211_ATTR_META_DATA: Embed meta data for above data. This will help
+ * wlan driver to peek into request message packet without opening up definition
+ * of complete request message.
*
* Any new message in future can be added as another attribute
*/
enum cld80211_attr {
CLD80211_ATTR_VENDOR_DATA = 1,
CLD80211_ATTR_DATA,
+ CLD80211_ATTR_META_DATA,
/* add new attributes above here */
__CLD80211_ATTR_AFTER_LAST,
diff --git a/include/net/cnss_utils.h b/include/net/cnss_utils.h
index 6ff0fd0..77d14d1 100644
--- a/include/net/cnss_utils.h
+++ b/include/net/cnss_utils.h
@@ -33,6 +33,9 @@ extern int cnss_utils_get_driver_load_cnt(struct device *dev);
extern void cnss_utils_increment_driver_load_cnt(struct device *dev);
extern int cnss_utils_set_wlan_mac_address(const u8 *in, uint32_t len);
extern u8 *cnss_utils_get_wlan_mac_address(struct device *dev, uint32_t *num);
+extern int cnss_utils_set_wlan_derived_mac_address(const u8 *in, uint32_t len);
+extern u8 *cnss_utils_get_wlan_derived_mac_address(struct device *dev,
+ uint32_t *num);
extern void cnss_utils_set_cc_source(struct device *dev,
enum cnss_utils_cc_src cc_source);
extern enum cnss_utils_cc_src cnss_utils_get_cc_source(struct device *dev);
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index e2dba93..2c7d876 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -902,21 +902,10 @@ struct ieee80211_tx_info {
unsigned long jiffies;
};
/* NB: vif can be NULL for injected frames */
- union {
- /* NB: vif can be NULL for injected frames */
- struct ieee80211_vif *vif;
-
- /* When packets are enqueued on txq it's easy
- * to re-construct the vif pointer. There's no
- * more space in tx_info so it can be used to
- * store the necessary enqueue time for packet
- * sojourn time computation.
- */
- codel_time_t enqueue_time;
- };
+ struct ieee80211_vif *vif;
struct ieee80211_key_conf *hw_key;
u32 flags;
- /* 4 bytes free */
+ codel_time_t enqueue_time;
} control;
struct {
u64 cookie;
diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h
index 7815545..4260d3c 100644
--- a/include/net/netfilter/nf_conntrack.h
+++ b/include/net/netfilter/nf_conntrack.h
@@ -71,6 +71,11 @@ struct nf_conn_help {
#include <net/netfilter/ipv4/nf_conntrack_ipv4.h>
#include <net/netfilter/ipv6/nf_conntrack_ipv6.h>
+/* Handle NATTYPE Stuff,only if NATTYPE module was defined */
+#ifdef CONFIG_IP_NF_TARGET_NATTYPE_MODULE
+#include <linux/netfilter_ipv4/ipt_NATTYPE.h>
+#endif
+
struct nf_conn {
/* Usage count in here is 1 for hash table, 1 per skb,
* plus 1 for any connection(s) we are `master' for
@@ -120,6 +125,12 @@ struct nf_conn {
/* Extensions */
struct nf_ct_ext *ext;
+ void *sfe_entry;
+
+#ifdef CONFIG_IP_NF_TARGET_NATTYPE_MODULE
+ unsigned long nattype_entry;
+#endif
+
/* Storage reserved for other modules, must be the last member */
union nf_conntrack_proto proto;
};
diff --git a/include/net/netfilter/nf_conntrack_core.h b/include/net/netfilter/nf_conntrack_core.h
index 62e17d1..a0043c7 100644
--- a/include/net/netfilter/nf_conntrack_core.h
+++ b/include/net/netfilter/nf_conntrack_core.h
@@ -50,6 +50,8 @@ bool nf_ct_invert_tuple(struct nf_conntrack_tuple *inverse,
const struct nf_conntrack_tuple *orig,
const struct nf_conntrack_l3proto *l3proto,
const struct nf_conntrack_l4proto *l4proto);
+extern void (*delete_sfe_entry)(struct nf_conn *ct);
+extern bool (*nattype_refresh_timer)(unsigned long nattype);
/* Find a connection corresponding to a tuple. */
struct nf_conntrack_tuple_hash *
diff --git a/include/net/netfilter/nf_tables_ipv6.h b/include/net/netfilter/nf_tables_ipv6.h
index d150b50..97983d1 100644
--- a/include/net/netfilter/nf_tables_ipv6.h
+++ b/include/net/netfilter/nf_tables_ipv6.h
@@ -9,12 +9,13 @@ nft_set_pktinfo_ipv6(struct nft_pktinfo *pkt,
struct sk_buff *skb,
const struct nf_hook_state *state)
{
+ unsigned int flags = IP6_FH_F_AUTH;
int protohdr, thoff = 0;
unsigned short frag_off;
nft_set_pktinfo(pkt, skb, state);
- protohdr = ipv6_find_hdr(pkt->skb, &thoff, -1, &frag_off, NULL);
+ protohdr = ipv6_find_hdr(pkt->skb, &thoff, -1, &frag_off, &flags);
if (protohdr < 0) {
nft_set_pktinfo_proto_unspec(pkt, skb);
return;
@@ -32,6 +33,7 @@ __nft_set_pktinfo_ipv6_validate(struct nft_pktinfo *pkt,
const struct nf_hook_state *state)
{
#if IS_ENABLED(CONFIG_IPV6)
+ unsigned int flags = IP6_FH_F_AUTH;
struct ipv6hdr *ip6h, _ip6h;
unsigned int thoff = 0;
unsigned short frag_off;
@@ -50,7 +52,7 @@ __nft_set_pktinfo_ipv6_validate(struct nft_pktinfo *pkt,
if (pkt_len + sizeof(*ip6h) > skb->len)
return -1;
- protohdr = ipv6_find_hdr(pkt->skb, &thoff, -1, &frag_off, NULL);
+ protohdr = ipv6_find_hdr(pkt->skb, &thoff, -1, &frag_off, &flags);
if (protohdr < 0)
return -1;
diff --git a/include/net/netlink.h b/include/net/netlink.h
index 254a0fc..42adccd 100644
--- a/include/net/netlink.h
+++ b/include/net/netlink.h
@@ -756,7 +756,10 @@ static inline int nla_parse_nested(struct nlattr *tb[], int maxtype,
*/
static inline int nla_put_u8(struct sk_buff *skb, int attrtype, u8 value)
{
- return nla_put(skb, attrtype, sizeof(u8), &value);
+ /* temporary variables to work around GCC PR81715 with asan-stack=1 */
+ u8 tmp = value;
+
+ return nla_put(skb, attrtype, sizeof(u8), &tmp);
}
/**
@@ -767,7 +770,9 @@ static inline int nla_put_u8(struct sk_buff *skb, int attrtype, u8 value)
*/
static inline int nla_put_u16(struct sk_buff *skb, int attrtype, u16 value)
{
- return nla_put(skb, attrtype, sizeof(u16), &value);
+ u16 tmp = value;
+
+ return nla_put(skb, attrtype, sizeof(u16), &tmp);
}
/**
@@ -778,7 +783,9 @@ static inline int nla_put_u16(struct sk_buff *skb, int attrtype, u16 value)
*/
static inline int nla_put_be16(struct sk_buff *skb, int attrtype, __be16 value)
{
- return nla_put(skb, attrtype, sizeof(__be16), &value);
+ __be16 tmp = value;
+
+ return nla_put(skb, attrtype, sizeof(__be16), &tmp);
}
/**
@@ -789,7 +796,9 @@ static inline int nla_put_be16(struct sk_buff *skb, int attrtype, __be16 value)
*/
static inline int nla_put_net16(struct sk_buff *skb, int attrtype, __be16 value)
{
- return nla_put_be16(skb, attrtype | NLA_F_NET_BYTEORDER, value);
+ __be16 tmp = value;
+
+ return nla_put_be16(skb, attrtype | NLA_F_NET_BYTEORDER, tmp);
}
/**
@@ -800,7 +809,9 @@ static inline int nla_put_net16(struct sk_buff *skb, int attrtype, __be16 value)
*/
static inline int nla_put_le16(struct sk_buff *skb, int attrtype, __le16 value)
{
- return nla_put(skb, attrtype, sizeof(__le16), &value);
+ __le16 tmp = value;
+
+ return nla_put(skb, attrtype, sizeof(__le16), &tmp);
}
/**
@@ -811,7 +822,9 @@ static inline int nla_put_le16(struct sk_buff *skb, int attrtype, __le16 value)
*/
static inline int nla_put_u32(struct sk_buff *skb, int attrtype, u32 value)
{
- return nla_put(skb, attrtype, sizeof(u32), &value);
+ u32 tmp = value;
+
+ return nla_put(skb, attrtype, sizeof(u32), &tmp);
}
/**
@@ -822,7 +835,9 @@ static inline int nla_put_u32(struct sk_buff *skb, int attrtype, u32 value)
*/
static inline int nla_put_be32(struct sk_buff *skb, int attrtype, __be32 value)
{
- return nla_put(skb, attrtype, sizeof(__be32), &value);
+ __be32 tmp = value;
+
+ return nla_put(skb, attrtype, sizeof(__be32), &tmp);
}
/**
@@ -833,7 +848,9 @@ static inline int nla_put_be32(struct sk_buff *skb, int attrtype, __be32 value)
*/
static inline int nla_put_net32(struct sk_buff *skb, int attrtype, __be32 value)
{
- return nla_put_be32(skb, attrtype | NLA_F_NET_BYTEORDER, value);
+ __be32 tmp = value;
+
+ return nla_put_be32(skb, attrtype | NLA_F_NET_BYTEORDER, tmp);
}
/**
@@ -844,7 +861,9 @@ static inline int nla_put_net32(struct sk_buff *skb, int attrtype, __be32 value)
*/
static inline int nla_put_le32(struct sk_buff *skb, int attrtype, __le32 value)
{
- return nla_put(skb, attrtype, sizeof(__le32), &value);
+ __le32 tmp = value;
+
+ return nla_put(skb, attrtype, sizeof(__le32), &tmp);
}
/**
@@ -857,7 +876,9 @@ static inline int nla_put_le32(struct sk_buff *skb, int attrtype, __le32 value)
static inline int nla_put_u64_64bit(struct sk_buff *skb, int attrtype,
u64 value, int padattr)
{
- return nla_put_64bit(skb, attrtype, sizeof(u64), &value, padattr);
+ u64 tmp = value;
+
+ return nla_put_64bit(skb, attrtype, sizeof(u64), &tmp, padattr);
}
/**
@@ -870,7 +891,9 @@ static inline int nla_put_u64_64bit(struct sk_buff *skb, int attrtype,
static inline int nla_put_be64(struct sk_buff *skb, int attrtype, __be64 value,
int padattr)
{
- return nla_put_64bit(skb, attrtype, sizeof(__be64), &value, padattr);
+ __be64 tmp = value;
+
+ return nla_put_64bit(skb, attrtype, sizeof(__be64), &tmp, padattr);
}
/**
@@ -883,7 +906,9 @@ static inline int nla_put_be64(struct sk_buff *skb, int attrtype, __be64 value,
static inline int nla_put_net64(struct sk_buff *skb, int attrtype, __be64 value,
int padattr)
{
- return nla_put_be64(skb, attrtype | NLA_F_NET_BYTEORDER, value,
+ __be64 tmp = value;
+
+ return nla_put_be64(skb, attrtype | NLA_F_NET_BYTEORDER, tmp,
padattr);
}
@@ -897,7 +922,9 @@ static inline int nla_put_net64(struct sk_buff *skb, int attrtype, __be64 value,
static inline int nla_put_le64(struct sk_buff *skb, int attrtype, __le64 value,
int padattr)
{
- return nla_put_64bit(skb, attrtype, sizeof(__le64), &value, padattr);
+ __le64 tmp = value;
+
+ return nla_put_64bit(skb, attrtype, sizeof(__le64), &tmp, padattr);
}
/**
@@ -908,7 +935,9 @@ static inline int nla_put_le64(struct sk_buff *skb, int attrtype, __le64 value,
*/
static inline int nla_put_s8(struct sk_buff *skb, int attrtype, s8 value)
{
- return nla_put(skb, attrtype, sizeof(s8), &value);
+ s8 tmp = value;
+
+ return nla_put(skb, attrtype, sizeof(s8), &tmp);
}
/**
@@ -919,7 +948,9 @@ static inline int nla_put_s8(struct sk_buff *skb, int attrtype, s8 value)
*/
static inline int nla_put_s16(struct sk_buff *skb, int attrtype, s16 value)
{
- return nla_put(skb, attrtype, sizeof(s16), &value);
+ s16 tmp = value;
+
+ return nla_put(skb, attrtype, sizeof(s16), &tmp);
}
/**
@@ -930,7 +961,9 @@ static inline int nla_put_s16(struct sk_buff *skb, int attrtype, s16 value)
*/
static inline int nla_put_s32(struct sk_buff *skb, int attrtype, s32 value)
{
- return nla_put(skb, attrtype, sizeof(s32), &value);
+ s32 tmp = value;
+
+ return nla_put(skb, attrtype, sizeof(s32), &tmp);
}
/**
@@ -943,7 +976,9 @@ static inline int nla_put_s32(struct sk_buff *skb, int attrtype, s32 value)
static inline int nla_put_s64(struct sk_buff *skb, int attrtype, s64 value,
int padattr)
{
- return nla_put_64bit(skb, attrtype, sizeof(s64), &value, padattr);
+ s64 tmp = value;
+
+ return nla_put_64bit(skb, attrtype, sizeof(s64), &tmp, padattr);
}
/**
@@ -993,7 +1028,9 @@ static inline int nla_put_msecs(struct sk_buff *skb, int attrtype,
static inline int nla_put_in_addr(struct sk_buff *skb, int attrtype,
__be32 addr)
{
- return nla_put_be32(skb, attrtype, addr);
+ __be32 tmp = addr;
+
+ return nla_put_be32(skb, attrtype, tmp);
}
/**
diff --git a/include/net/sctp/ulpevent.h b/include/net/sctp/ulpevent.h
index 2c098cd..231df4f 100644
--- a/include/net/sctp/ulpevent.h
+++ b/include/net/sctp/ulpevent.h
@@ -141,8 +141,12 @@ __u16 sctp_ulpevent_get_notification_type(const struct sctp_ulpevent *event);
static inline int sctp_ulpevent_type_enabled(__u16 sn_type,
struct sctp_event_subscribe *mask)
{
+ int offset = sn_type - SCTP_SN_TYPE_BASE;
char *amask = (char *) mask;
- return amask[sn_type - SCTP_SN_TYPE_BASE];
+
+ if (offset >= sizeof(struct sctp_event_subscribe))
+ return 0;
+ return amask[offset];
}
/* Given an event subscription, is this event enabled? */
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index 835c30e..9b6e6a4 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -155,6 +155,7 @@ struct xfrm_state {
int header_len;
int trailer_len;
u32 extra_flags;
+ u32 output_mark;
} props;
struct xfrm_lifetime_cfg lft;
@@ -284,10 +285,12 @@ struct xfrm_policy_afinfo {
struct dst_entry *(*dst_lookup)(struct net *net,
int tos, int oif,
const xfrm_address_t *saddr,
- const xfrm_address_t *daddr);
+ const xfrm_address_t *daddr,
+ u32 mark);
int (*get_saddr)(struct net *net, int oif,
xfrm_address_t *saddr,
- xfrm_address_t *daddr);
+ xfrm_address_t *daddr,
+ u32 mark);
void (*decode_session)(struct sk_buff *skb,
struct flowi *fl,
int reverse);
diff --git a/include/soc/qcom/cmd-db.h b/include/soc/qcom/cmd-db.h
index e2c72d1..3c2aff3 100644
--- a/include/soc/qcom/cmd-db.h
+++ b/include/soc/qcom/cmd-db.h
@@ -110,17 +110,18 @@ static inline u32 cmd_db_get_addr(const char *resource_id)
return 0;
}
-bool cmd_db_get_priority(u32 addr, u8 drv_id)
+static inline bool cmd_db_get_priority(u32 addr, u8 drv_id)
{
return false;
}
-int cmd_db_get_aux_data(const char *resource_id, u8 *data, int len)
+static inline int cmd_db_get_aux_data(const char *resource_id,
+ u8 *data, int len)
{
return -ENODEV;
}
-int cmd_db_get_aux_data_len(const char *resource_id)
+static inline int cmd_db_get_aux_data_len(const char *resource_id)
{
return -ENODEV;
}
diff --git a/include/soc/qcom/memory_dump.h b/include/soc/qcom/memory_dump.h
index e67ee0e..b4733d7 100644
--- a/include/soc/qcom/memory_dump.h
+++ b/include/soc/qcom/memory_dump.h
@@ -83,6 +83,7 @@ enum msm_dump_data_ids {
MSM_DUMP_DATA_RPM = 0xEA,
MSM_DUMP_DATA_SCANDUMP = 0xEB,
MSM_DUMP_DATA_RPMH = 0xEC,
+ MSM_DUMP_DATA_FCM = 0xEE,
MSM_DUMP_DATA_POWER_REGS = 0xED,
MSM_DUMP_DATA_TMC_ETF = 0xF0,
MSM_DUMP_DATA_TMC_REG = 0x100,
@@ -122,12 +123,19 @@ struct msm_dump_entry {
#ifdef CONFIG_QCOM_MEMORY_DUMP_V2
extern int msm_dump_data_register(enum msm_dump_table_ids id,
struct msm_dump_entry *entry);
+
+extern void *get_msm_dump_ptr(enum msm_dump_data_ids id);
#else
static inline int msm_dump_data_register(enum msm_dump_table_ids id,
struct msm_dump_entry *entry)
{
return -EINVAL;
}
+
+static inline void *get_msm_dump_ptr(enum msm_dump_data_ids id)
+{
+ return NULL;
+}
#endif
#endif
diff --git a/include/soc/qcom/msm-core.h b/include/soc/qcom/msm-core.h
deleted file mode 100644
index f1c06a6..0000000
--- a/include/soc/qcom/msm-core.h
+++ /dev/null
@@ -1,27 +0,0 @@
-/*
- * Copyright (c) 2014-2015,2017 The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef __ARCH_ARM_MACH_MSM_CORE_H
-#define __ARCH_ARM_MACH_MSM_CORE_H
-#ifdef CONFIG_APSS_CORE_EA
-void set_cpu_throttled(struct cpumask *mask, bool throttling);
-struct blocking_notifier_head *get_power_update_notifier(void);
-void trigger_cpu_pwr_stats_calc(void);
-struct cpu_pwr_stats *get_cpu_pwr_stats(void);
-#else
-static inline void set_cpu_throttled(struct cpumask *mask, bool throttling) {}
-struct blocking_notifier_head *get_power_update_notifier(void) {return NULL; }
-static inline void trigger_cpu_pwr_stats_calc(void) {}
-struct cpu_pwr_stats *get_cpu_pwr_stats(void) {return NULL; }
-#endif
-#endif
diff --git a/include/soc/qcom/socinfo.h b/include/soc/qcom/socinfo.h
index 71bd075..9e91e4b 100644
--- a/include/soc/qcom/socinfo.h
+++ b/include/soc/qcom/socinfo.h
@@ -106,6 +106,10 @@
of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,qcs605")
#define early_machine_is_sda670() \
of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,sda670")
+#define early_machine_is_msm8953() \
+ of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,msm8953")
+#define early_machine_is_sdm450() \
+ of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,sdm450")
#else
#define of_board_is_sim() 0
#define of_board_is_rumi() 0
@@ -148,6 +152,8 @@
#define early_machine_is_sdm670() 0
#define early_machine_is_qcs605() 0
#define early_machine_is_sda670() 0
+#define early_machine_is_msm8953() 0
+#define early_machine_is_sdm450() 0
#endif
#define PLATFORM_SUBTYPE_MDM 1
@@ -212,6 +218,8 @@ enum msm_cpu {
MSM_CPU_SDM670,
MSM_CPU_QCS605,
MSM_CPU_SDA670,
+ MSM_CPU_8953,
+ MSM_CPU_SDM450,
};
struct msm_soc_info {
diff --git a/include/sound/seq_kernel.h b/include/sound/seq_kernel.h
index feb58d4..4b9ee30 100644
--- a/include/sound/seq_kernel.h
+++ b/include/sound/seq_kernel.h
@@ -49,7 +49,8 @@ typedef union snd_seq_timestamp snd_seq_timestamp_t;
#define SNDRV_SEQ_DEFAULT_CLIENT_EVENTS 200
/* max delivery path length */
-#define SNDRV_SEQ_MAX_HOPS 10
+/* NOTE: this shouldn't be greater than MAX_LOCKDEP_SUBCLASSES */
+#define SNDRV_SEQ_MAX_HOPS 8
/* max size of event size */
#define SNDRV_SEQ_MAX_EVENT_LEN 0x3fffffff
diff --git a/include/sound/seq_virmidi.h b/include/sound/seq_virmidi.h
index a03acd0..695257a 100644
--- a/include/sound/seq_virmidi.h
+++ b/include/sound/seq_virmidi.h
@@ -60,6 +60,7 @@ struct snd_virmidi_dev {
int port; /* created/attached port */
unsigned int flags; /* SNDRV_VIRMIDI_* */
rwlock_t filelist_lock;
+ struct rw_semaphore filelist_sem;
struct list_head filelist;
};
diff --git a/include/trace/events/iommu.h b/include/trace/events/iommu.h
index 9652037..255e228 100644
--- a/include/trace/events/iommu.h
+++ b/include/trace/events/iommu.h
@@ -12,8 +12,10 @@
#include <linux/tracepoint.h>
#include <linux/pci.h>
+#include <linux/iommu.h>
struct device;
+struct iommu_domain;
DECLARE_EVENT_CLASS(iommu_group_event,
@@ -85,47 +87,84 @@ DEFINE_EVENT(iommu_device_event, detach_device_from_domain,
TRACE_EVENT(map,
- TP_PROTO(unsigned long iova, phys_addr_t paddr, size_t size),
+ TP_PROTO(struct iommu_domain *domain, unsigned long iova,
+ phys_addr_t paddr, size_t size, int prot),
- TP_ARGS(iova, paddr, size),
+ TP_ARGS(domain, iova, paddr, size, prot),
TP_STRUCT__entry(
+ __string(name, domain->name)
__field(u64, iova)
__field(u64, paddr)
__field(size_t, size)
+ __field(int, prot)
),
TP_fast_assign(
+ __assign_str(name, domain->name);
__entry->iova = iova;
__entry->paddr = paddr;
__entry->size = size;
+ __entry->prot = prot;
),
- TP_printk("IOMMU: iova=0x%016llx paddr=0x%016llx size=%zu",
- __entry->iova, __entry->paddr, __entry->size
+ TP_printk("IOMMU:%s iova=0x%016llx paddr=0x%016llx size=0x%zx prot=0x%x",
+ __get_str(name), __entry->iova, __entry->paddr,
+ __entry->size, __entry->prot
)
);
TRACE_EVENT(unmap,
- TP_PROTO(unsigned long iova, size_t size, size_t unmapped_size),
+ TP_PROTO(struct iommu_domain *domain, unsigned long iova, size_t size,
+ size_t unmapped_size),
- TP_ARGS(iova, size, unmapped_size),
+ TP_ARGS(domain, iova, size, unmapped_size),
TP_STRUCT__entry(
+ __string(name, domain->name)
__field(u64, iova)
__field(size_t, size)
__field(size_t, unmapped_size)
),
TP_fast_assign(
+ __assign_str(name, domain->name);
__entry->iova = iova;
__entry->size = size;
__entry->unmapped_size = unmapped_size;
),
- TP_printk("IOMMU: iova=0x%016llx size=%zu unmapped_size=%zu",
- __entry->iova, __entry->size, __entry->unmapped_size
+ TP_printk("IOMMU:%s iova=0x%016llx size=0x%zx unmapped_size=0x%zx",
+ __get_str(name), __entry->iova, __entry->size,
+ __entry->unmapped_size
+ )
+);
+
+TRACE_EVENT(map_sg,
+
+ TP_PROTO(struct iommu_domain *domain, unsigned long iova, size_t size,
+ int prot),
+
+ TP_ARGS(domain, iova, size, prot),
+
+ TP_STRUCT__entry(
+ __string(name, domain->name)
+ __field(u64, iova)
+ __field(size_t, size)
+ __field(int, prot)
+ ),
+
+ TP_fast_assign(
+ __assign_str(name, domain->name);
+ __entry->iova = iova;
+ __entry->size = size;
+ __entry->prot = prot;
+ ),
+
+ TP_printk("IOMMU:%s iova=0x%016llx size=0x%zx prot=0x%x",
+ __get_str(name), __entry->iova, __entry->size,
+ __entry->prot
)
);
@@ -217,6 +256,24 @@ DEFINE_EVENT(iommu_errata_tlbi, errata_failed,
TP_ARGS(dev, time)
);
+
+TRACE_EVENT(smmu_init,
+
+ TP_PROTO(u64 time),
+
+ TP_ARGS(time),
+
+ TP_STRUCT__entry(
+ __field(u64, time)
+ ),
+
+ TP_fast_assign(
+ __entry->time = time;
+ ),
+
+ TP_printk("ARM SMMU init latency: %lld us", __entry->time)
+);
+
#endif /* _TRACE_IOMMU_H */
/* This part must be outside protection */
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index b4bcedf..a3b01c6 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -262,9 +262,9 @@ TRACE_EVENT(sched_update_history,
TRACE_EVENT(sched_get_task_cpu_cycles,
- TP_PROTO(int cpu, int event, u64 cycles, u64 exec_time),
+ TP_PROTO(int cpu, int event, u64 cycles, u64 exec_time, struct task_struct *p),
- TP_ARGS(cpu, event, cycles, exec_time),
+ TP_ARGS(cpu, event, cycles, exec_time, p),
TP_STRUCT__entry(
__field(int, cpu )
@@ -273,6 +273,8 @@ TRACE_EVENT(sched_get_task_cpu_cycles,
__field(u64, exec_time )
__field(u32, freq )
__field(u32, legacy_freq )
+ __field(pid_t, pid )
+ __array(char, comm, TASK_COMM_LEN )
),
TP_fast_assign(
@@ -282,11 +284,13 @@ TRACE_EVENT(sched_get_task_cpu_cycles,
__entry->exec_time = exec_time;
__entry->freq = cpu_cycles_to_freq(cycles, exec_time);
__entry->legacy_freq = cpu_cur_freq(cpu);
+ __entry->pid = p->pid;
+ memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
),
- TP_printk("cpu=%d event=%d cycles=%llu exec_time=%llu freq=%u legacy_freq=%u",
+ TP_printk("cpu=%d event=%d cycles=%llu exec_time=%llu freq=%u legacy_freq=%u task=%d (%s)",
__entry->cpu, __entry->event, __entry->cycles,
- __entry->exec_time, __entry->freq, __entry->legacy_freq)
+ __entry->exec_time, __entry->freq, __entry->legacy_freq, __entry->pid, __entry->comm)
);
TRACE_EVENT(sched_update_task_ravg,
diff --git a/include/trace/events/trace_msm_core.h b/include/trace/events/trace_msm_core.h
deleted file mode 100644
index 45747f7..0000000
--- a/include/trace/events/trace_msm_core.h
+++ /dev/null
@@ -1,103 +0,0 @@
-/* Copyright (c) 2014,2016 The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#undef TRACE_SYSTEM
-#define TRACE_SYSTEM msm_core
-
-#if !defined(_TRACE_MSM_CORE_H) || defined(TRACE_HEADER_MULTI_READ)
-#define _TRACE_MSM_CORE_H
-
-#include <linux/tracepoint.h>
-#include <linux/thermal.h>
-
-TRACE_EVENT(cpu_stats,
-
- TP_PROTO(unsigned int cpu, long temp,
- uint64_t min_power, uint64_t max_power),
-
- TP_ARGS(cpu, temp, min_power, max_power),
-
- TP_STRUCT__entry(
- __field(unsigned int, cpu)
- __field(long, temp)
- __field(uint64_t, min_power)
- __field(uint64_t, max_power)
- ),
-
- TP_fast_assign(
- __entry->cpu = cpu;
- __entry->temp = temp;
- __entry->min_power = min_power;
- __entry->max_power = max_power;
- ),
-
- TP_printk("Cpu%d: temp:%ld power@minfreq:%llu power@maxfreq:%llu",
- __entry->cpu, __entry->temp, __entry->min_power,
- __entry->max_power)
-);
-
-TRACE_EVENT(temp_threshold,
-
- TP_PROTO(unsigned int cpu, long temp,
- long hi_thresh, long low_thresh),
-
- TP_ARGS(cpu, temp, hi_thresh, low_thresh),
-
- TP_STRUCT__entry(
- __field(unsigned int, cpu)
- __field(long, temp)
- __field(long, hi_thresh)
- __field(long, low_thresh)
- ),
-
- TP_fast_assign(
- __entry->cpu = cpu;
- __entry->temp = temp;
- __entry->hi_thresh = hi_thresh;
- __entry->low_thresh = low_thresh;
- ),
-
- TP_printk("Cpu%d: temp:%ld hi_thresh:%ld low_thresh:%ld",
- __entry->cpu, __entry->temp, __entry->hi_thresh,
- __entry->low_thresh)
-);
-
-TRACE_EVENT(temp_notification,
-
- TP_PROTO(unsigned int sensor_id, enum thermal_trip_type type,
- int temp, int prev_temp),
-
- TP_ARGS(sensor_id, type, temp, prev_temp),
-
- TP_STRUCT__entry(
- __field(unsigned int, sensor_id)
- __field(enum thermal_trip_type, type)
- __field(int, temp)
- __field(int, prev_temp)
- ),
-
- TP_fast_assign(
- __entry->sensor_id = sensor_id;
- __entry->type = type;
- __entry->temp = temp;
- __entry->prev_temp = prev_temp;
- ),
-
- TP_printk("Sensor_id%d: %s threshold triggered temp:%d(previous:%d)",
- __entry->sensor_id,
- __entry->type == THERMAL_TRIP_CONFIGURABLE_HI ? "High" : "Low",
- __entry->temp, __entry->prev_temp)
-);
-
-#endif
-#define TRACE_INCLUDE_FILE trace_msm_core
-#include <trace/define_trace.h>
diff --git a/include/trace/events/ufs.h b/include/trace/events/ufs.h
index 6dc4735..aaa5cf7 100644
--- a/include/trace/events/ufs.h
+++ b/include/trace/events/ufs.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013-2015, 2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -225,10 +225,10 @@ TRACE_EVENT(ufshcd_command,
),
TP_printk(
- "%s: %s: tag: %u, DB: 0x%x, size: %d, IS: %u, LBA: %llu, opcode: 0x%x",
- __get_str(str), __get_str(dev_name), __entry->tag,
- __entry->doorbell, __entry->transfer_len,
- __entry->intr, __entry->lba, (u32)__entry->opcode
+ "%s: %14s: tag: %-2u cmd: 0x%-2x lba: %-9llu size: %-7d DB: 0x%-8x IS: 0x%x",
+ __get_str(dev_name), __get_str(str), __entry->tag,
+ (u32)__entry->opcode, __entry->lba, __entry->transfer_len,
+ __entry->doorbell, __entry->intr
)
);
diff --git a/include/uapi/drm/drm_fourcc.h b/include/uapi/drm/drm_fourcc.h
index 7846ec8..e6ff4cc 100644
--- a/include/uapi/drm/drm_fourcc.h
+++ b/include/uapi/drm/drm_fourcc.h
@@ -154,6 +154,7 @@ extern "C" {
/* Vendor Ids: */
#define DRM_FORMAT_MOD_NONE 0
+#define DRM_FORMAT_MOD_VENDOR_NONE 0
#define DRM_FORMAT_MOD_VENDOR_INTEL 0x01
#define DRM_FORMAT_MOD_VENDOR_AMD 0x02
#define DRM_FORMAT_MOD_VENDOR_NV 0x03
diff --git a/include/uapi/drm/msm_drm.h b/include/uapi/drm/msm_drm.h
index d5438d3..6f33a4a 100644
--- a/include/uapi/drm/msm_drm.h
+++ b/include/uapi/drm/msm_drm.h
@@ -61,6 +61,44 @@ struct drm_msm_timespec {
__s64 tv_nsec; /* nanoseconds */
};
+/*
+ * HDR Metadata
+ * These are defined as per EDID spec and shall be used by the sink
+ * to set the HDR metadata for playback from userspace.
+ */
+
+#define HDR_PRIMARIES_COUNT 3
+
+#define DRM_MSM_EXT_HDR_METADATA
+struct drm_msm_ext_hdr_metadata {
+ __u32 hdr_state; /* HDR state */
+ __u32 eotf; /* electro optical transfer function */
+ __u32 hdr_supported; /* HDR supported */
+ __u32 display_primaries_x[HDR_PRIMARIES_COUNT]; /* Primaries x */
+ __u32 display_primaries_y[HDR_PRIMARIES_COUNT]; /* Primaries y */
+ __u32 white_point_x; /* white_point_x */
+ __u32 white_point_y; /* white_point_y */
+ __u32 max_luminance; /* Max luminance */
+ __u32 min_luminance; /* Min Luminance */
+ __u32 max_content_light_level; /* max content light level */
+ __u32 max_average_light_level; /* max average light level */
+};
+
+/**
+ * HDR sink properties
+ * These are defined as per EDID spec and shall be used by the userspace
+ * to determine the HDR properties to be set to the sink.
+ */
+#define DRM_MSM_EXT_HDR_PROPERTIES
+struct drm_msm_ext_hdr_properties {
+ __u8 hdr_metadata_type_one; /* static metadata type one */
+ __u32 hdr_supported; /* HDR supported */
+ __u32 hdr_eotf; /* electro optical transfer function */
+ __u32 hdr_max_luminance; /* Max luminance */
+ __u32 hdr_avg_luminance; /* Avg luminance */
+ __u32 hdr_min_luminance; /* Min Luminance */
+};
+
#define MSM_PARAM_GPU_ID 0x01
#define MSM_PARAM_GMEM_SIZE 0x02
#define MSM_PARAM_CHIP_ID 0x03
diff --git a/include/uapi/linux/esoc_ctrl.h b/include/uapi/linux/esoc_ctrl.h
index 4201c95..45f3222 100644
--- a/include/uapi/linux/esoc_ctrl.h
+++ b/include/uapi/linux/esoc_ctrl.h
@@ -9,6 +9,7 @@
#define ESOC_WAIT_FOR_REQ _IOR(ESOC_CODE, 2, unsigned int)
#define ESOC_NOTIFY _IOW(ESOC_CODE, 3, unsigned int)
#define ESOC_GET_STATUS _IOR(ESOC_CODE, 4, unsigned int)
+#define ESOC_GET_ERR_FATAL _IOR(ESOC_CODE, 5, unsigned int)
#define ESOC_WAIT_FOR_CRASH _IOR(ESOC_CODE, 6, unsigned int)
#define ESOC_REG_REQ_ENG _IO(ESOC_CODE, 7)
#define ESOC_REG_CMD_ENG _IO(ESOC_CODE, 8)
@@ -17,6 +18,7 @@
#define HSIC "HSIC"
#define HSICPCIe "HSIC+PCIe"
#define PCIe "PCIe"
+#define ESOC_REQ_SEND_SHUTDOWN ESOC_REQ_SEND_SHUTDOWN
enum esoc_evt {
ESOC_RUN_STATE = 0x1,
@@ -57,6 +59,7 @@ enum esoc_req {
ESOC_REQ_IMG = 1,
ESOC_REQ_DEBUG,
ESOC_REQ_SHUTDOWN,
+ ESOC_REQ_SEND_SHUTDOWN,
};
#ifdef __KERNEL__
diff --git a/include/uapi/linux/ipa_qmi_service_v01.h b/include/uapi/linux/ipa_qmi_service_v01.h
index 1ba819b..1917c0d 100644
--- a/include/uapi/linux/ipa_qmi_service_v01.h
+++ b/include/uapi/linux/ipa_qmi_service_v01.h
@@ -47,6 +47,12 @@
#define QMI_IPA_MAX_FILTERS_EX_V01 128
#define QMI_IPA_MAX_PIPES_V01 20
#define QMI_IPA_MAX_APN_V01 8
+#define QMI_IPA_MAX_PER_CLIENTS_V01 64
+/* Currently max we can use is only 1. But for scalability purpose
+ * we are having max value as 8.
+ */
+#define QMI_IPA_MAX_CLIENT_DST_PIPES_V01 8
+#define QMI_IPA_MAX_UL_FIREWALL_RULES_V01 64
#define IPA_INT_MAX ((int)(~0U>>1))
#define IPA_INT_MIN (-IPA_INT_MAX - 1)
@@ -989,6 +995,16 @@ struct ipa_fltr_installed_notif_req_msg_v01 {
* failure, the Rule Ids in this list must be set to a reserved
* index (255).
*/
+
+ /* Optional */
+ /* List of destination pipe IDs. */
+ uint8_t dst_pipe_id_valid;
+ /* Must be set to true if dst_pipe_id is being passed. */
+ uint32_t dst_pipe_id_len;
+ /* Must be set to # of elements in dst_pipe_id. */
+ uint32_t dst_pipe_id[QMI_IPA_MAX_CLIENT_DST_PIPES_V01];
+ /* Provides the list of destination pipe IDs for a source pipe. */
+
}; /* Message */
/* Response Message; This is the message that is exchanged between the
@@ -1626,6 +1642,273 @@ struct ipa_install_fltr_rule_resp_ex_msg_v01 {
*/
}; /* Message */
+/*
+ * Request Message; Requests the modem IPA driver to enable or
+ * disable collection of per client statistics.
+ */
+struct ipa_enable_per_client_stats_req_msg_v01 {
+
+ /* Mandatory */
+ /* Collect statistics per client; */
+ uint8_t enable_per_client_stats;
+ /*
+ * Indicates whether to start or stop collecting
+ * per client statistics.
+ */
+}; /* Message */
+
+/*
+ * Response Message; Requests the modem IPA driver to enable or disable
+ * collection of per client statistics.
+ */
+struct ipa_enable_per_client_stats_resp_msg_v01 {
+
+ /* Mandatory */
+ /* Result Code */
+ struct ipa_qmi_response_type_v01 resp;
+ /* Standard response type. */
+}; /* Message */
+
+struct ipa_per_client_stats_info_type_v01 {
+
+ uint32_t client_id;
+ /*
+ * Id of the client on APPS processor side for which Modem processor
+ * needs to send uplink/downlink statistics.
+ */
+
+ uint32_t src_pipe_id;
+ /*
+ * IPA consumer pipe on which client on APPS side sent uplink
+ * data to modem.
+ */
+
+ uint64_t num_ul_ipv4_bytes;
+ /*
+ * Accumulated number of uplink IPv4 bytes for a client.
+ */
+
+ uint64_t num_ul_ipv6_bytes;
+ /*
+ * Accumulated number of uplink IPv6 bytes for a client.
+ */
+
+ uint64_t num_dl_ipv4_bytes;
+ /*
+ * Accumulated number of downlink IPv4 bytes for a client.
+ */
+
+ uint64_t num_dl_ipv6_bytes;
+ /*
+ * Accumulated number of downlink IPv6 byes for a client.
+ */
+
+
+ uint32_t num_ul_ipv4_pkts;
+ /*
+ * Accumulated number of uplink IPv4 packets for a client.
+ */
+
+ uint32_t num_ul_ipv6_pkts;
+ /*
+ * Accumulated number of uplink IPv6 packets for a client.
+ */
+
+ uint32_t num_dl_ipv4_pkts;
+ /*
+ * Accumulated number of downlink IPv4 packets for a client.
+ */
+
+ uint32_t num_dl_ipv6_pkts;
+ /*
+ * Accumulated number of downlink IPv6 packets for a client.
+ */
+}; /* Type */
+
+/*
+ * Request Message; Requests the modem IPA driver to provide statistics
+ * for a givenclient.
+ */
+struct ipa_get_stats_per_client_req_msg_v01 {
+
+ /* Mandatory */
+ /* Client id */
+ uint32_t client_id;
+ /*
+ * Id of the client on APPS processor side for which Modem processor
+ * needs to send uplink/downlink statistics. if client id is specified
+ * as 0xffffffff, then Q6 will send the stats for all the clients of
+ * the specified source pipe.
+ */
+
+ /* Mandatory */
+ /* Source pipe id */
+ uint32_t src_pipe_id;
+ /*
+ * IPA consumer pipe on which client on APPS side sent uplink
+ * data to modem. In future, this implementation can be extended
+ * to provide 0xffffffff as the source pipe id, where Q6 will send
+ * the stats of all the clients across all different tethered-pipes.
+ */
+
+ /* Optional */
+ /* Reset client statistics. */
+ uint8_t reset_stats_valid;
+ /* Must be set to true if reset_stats is being passed. */
+ uint8_t reset_stats;
+ /*
+ * Option to reset the statistics currently collected by modem for this
+ * particular client.
+ */
+}; /* Message */
+
+/*
+ * Response Message; Requests the modem IPA driver to provide statistics
+ * for a given client.
+ */
+struct ipa_get_stats_per_client_resp_msg_v01 {
+
+ /* Mandatory */
+ /* Result Code */
+ struct ipa_qmi_response_type_v01 resp;
+ /* Standard response type. */
+
+ /* Optional */
+ /* Per clients Statistics List */
+ uint8_t per_client_stats_list_valid;
+ /* Must be set to true if per_client_stats_list is being passed. */
+ uint32_t per_client_stats_list_len;
+ /* Must be set to # of elements in per_client_stats_list. */
+ struct ipa_per_client_stats_info_type_v01
+ per_client_stats_list[QMI_IPA_MAX_PER_CLIENTS_V01];
+ /*
+ * List of all per client statistics that are retrieved.
+ */
+}; /* Message */
+
+struct ipa_ul_firewall_rule_type_v01 {
+
+ enum ipa_ip_type_enum_v01 ip_type;
+ /*
+ * IP type for which this rule is applicable.
+ * The driver must identify the filter table (v6 or v4), and this
+ * field is essential for that. Values:
+ * - QMI_IPA_IP_TYPE_INVALID (0) -- Invalid IP type identifier
+ * - QMI_IPA_IP_TYPE_V4 (1) -- IPv4 type
+ * - QMI_IPA_IP_TYPE_V6 (2) -- IPv6 type
+ */
+
+ struct ipa_filter_rule_type_v01 filter_rule;
+ /*
+ * Rules in the filter specification. These rules are the
+ * ones that are matched against fields in the packet.
+ * Currently we only send IPv6 whitelist rules to Q6.
+ */
+}; /* Type */
+
+/*
+ * Request Message; Requestes remote IPA driver to install uplink
+ * firewall rules.
+ */
+struct ipa_configure_ul_firewall_rules_req_msg_v01 {
+
+ /* Optional */
+ /* Uplink Firewall Specification */
+ uint32_t firewall_rules_list_len;
+ /* Must be set to # of elements in firewall_rules_list. */
+ struct ipa_ul_firewall_rule_type_v01
+ firewall_rules_list[QMI_IPA_MAX_UL_FIREWALL_RULES_V01];
+ /*
+ * List of uplink firewall specifications of filters that must be
+ * installed.
+ */
+
+ uint32_t mux_id;
+ /*
+ * QMAP Mux ID. As a part of the QMAP protocol,
+ * several data calls may be multiplexed over the same physical
+ * transport channel. This identifier is used to identify one
+ * such data call. The maximum value for this identifier is 255.
+ */
+
+ /* Optional */
+ uint8_t disable_valid;
+ /* Must be set to true if enable is being passed. */
+ uint8_t disable;
+ /*
+ * Indicates whether uplink firewall needs to be enabled or disabled.
+ */
+
+ /* Optional */
+ uint8_t are_blacklist_filters_valid;
+ /* Must be set to true if are_blacklist_filters is being passed. */
+ uint8_t are_blacklist_filters;
+ /*
+ * Indicates whether the filters received as part of this message are
+ * blacklist filters. i.e. drop uplink packets matching these rules.
+ */
+}; /* Message */
+
+/*
+ * Response Message; Requestes remote IPA driver to install
+ * uplink firewall rules.
+ */
+struct ipa_configure_ul_firewall_rules_resp_msg_v01 {
+
+ /* Mandatory */
+ /* Result Code */
+ struct ipa_qmi_response_type_v01 resp;
+ /*
+ * Standard response type.
+ * Standard response type. Contains the following data members:
+ * qmi_result_type -- QMI_RESULT_SUCCESS or QMI_RESULT_FAILURE
+ * qmi_error_type -- Error code. Possible error code values are
+ * described in the error codes section of each message definition.
+ */
+}; /* Message */
+
+enum ipa_ul_firewall_status_enum_v01 {
+ IPA_UL_FIREWALL_STATUS_ENUM_MIN_ENUM_VAL_V01 = -2147483647,
+ /* To force a 32 bit signed enum. Do not change or use*/
+ QMI_IPA_UL_FIREWALL_STATUS_SUCCESS_V01 = 0,
+ /* Indicates that the uplink firewall rules
+ * are configured successfully.
+ */
+ QMI_IPA_UL_FIREWALL_STATUS_FAILURE_V01 = 1,
+ /* Indicates that the uplink firewall rules
+ * are not configured successfully.
+ */
+ IPA_UL_FIREWALL_STATUS_ENUM_MAX_ENUM_VAL_V01 = 2147483647
+ /* To force a 32 bit signed enum. Do not change or use*/
+};
+
+struct ipa_ul_firewall_config_result_type_v01 {
+
+ enum ipa_ul_firewall_status_enum_v01 is_success;
+ /*
+ * Indicates whether the uplink firewall rules are configured
+ * successfully.
+ */
+
+ uint32_t mux_id;
+ /*
+ * QMAP Mux ID. As a part of the QMAP protocol,
+ * several data calls may be multiplexed over the same physical
+ * transport channel. This identifier is used to identify one
+ * such data call. The maximum value for this identifier is 255.
+ */
+};
+
+/*
+ * Indication Message; Requestes remote IPA driver to install
+ * uplink firewall rules.
+ */
+struct ipa_configure_ul_firewall_rules_ind_msg_v01 {
+
+ struct ipa_ul_firewall_config_result_type_v01 result;
+}; /* Message */
+
+
/*Service Message Definition*/
#define QMI_IPA_INDICATION_REGISTER_REQ_V01 0x0020
#define QMI_IPA_INDICATION_REGISTER_RESP_V01 0x0020
@@ -1659,6 +1942,13 @@ struct ipa_install_fltr_rule_resp_ex_msg_v01 {
#define QMI_IPA_INIT_MODEM_DRIVER_CMPLT_RESP_V01 0x0035
#define QMI_IPA_INSTALL_FILTER_RULE_EX_REQ_V01 0x0037
#define QMI_IPA_INSTALL_FILTER_RULE_EX_RESP_V01 0x0037
+#define QMI_IPA_ENABLE_PER_CLIENT_STATS_REQ_V01 0x0038
+#define QMI_IPA_ENABLE_PER_CLIENT_STATS_RESP_V01 0x0038
+#define QMI_IPA_GET_STATS_PER_CLIENT_REQ_V01 0x0039
+#define QMI_IPA_GET_STATS_PER_CLIENT_RESP_V01 0x0039
+#define QMI_IPA_INSTALL_UL_FIREWALL_RULES_REQ_V01 0x003A
+#define QMI_IPA_INSTALL_UL_FIREWALL_RULES_RESP_V01 0x003A
+#define QMI_IPA_INSTALL_UL_FIREWALL_RULES_IND_V01 0x003A
/* add for max length*/
#define QMI_IPA_INIT_MODEM_DRIVER_REQ_MAX_MSG_LEN_V01 134
@@ -1667,7 +1957,7 @@ struct ipa_install_fltr_rule_resp_ex_msg_v01 {
#define QMI_IPA_INDICATION_REGISTER_RESP_MAX_MSG_LEN_V01 7
#define QMI_IPA_INSTALL_FILTER_RULE_REQ_MAX_MSG_LEN_V01 22369
#define QMI_IPA_INSTALL_FILTER_RULE_RESP_MAX_MSG_LEN_V01 783
-#define QMI_IPA_FILTER_INSTALLED_NOTIF_REQ_MAX_MSG_LEN_V01 834
+#define QMI_IPA_FILTER_INSTALLED_NOTIF_REQ_MAX_MSG_LEN_V01 870
#define QMI_IPA_FILTER_INSTALLED_NOTIF_RESP_MAX_MSG_LEN_V01 7
#define QMI_IPA_MASTER_DRIVER_INIT_COMPLETE_IND_MAX_MSG_LEN_V01 7
#define QMI_IPA_DATA_USAGE_QUOTA_REACHED_IND_MAX_MSG_LEN_V01 15
@@ -1700,6 +1990,15 @@ struct ipa_install_fltr_rule_resp_ex_msg_v01 {
#define QMI_IPA_INSTALL_FILTER_RULE_EX_REQ_MAX_MSG_LEN_V01 22685
#define QMI_IPA_INSTALL_FILTER_RULE_EX_RESP_MAX_MSG_LEN_V01 523
+#define QMI_IPA_ENABLE_PER_CLIENT_STATS_REQ_MAX_MSG_LEN_V01 4
+#define QMI_IPA_ENABLE_PER_CLIENT_STATS_RESP_MAX_MSG_LEN_V01 7
+
+#define QMI_IPA_GET_STATS_PER_CLIENT_REQ_MAX_MSG_LEN_V01 18
+#define QMI_IPA_GET_STATS_PER_CLIENT_RESP_MAX_MSG_LEN_V01 3595
+
+#define QMI_IPA_INSTALL_UL_FIREWALL_RULES_REQ_MAX_MSG_LEN_V01 9875
+#define QMI_IPA_INSTALL_UL_FIREWALL_RULES_RESP_MAX_MSG_LEN_V01 7
+#define QMI_IPA_INSTALL_UL_FIREWALL_RULES_IND_MAX_MSG_LEN_V01 11
/* Service Object Accessor */
#endif/* IPA_QMI_SERVICE_V01_H */
diff --git a/include/uapi/linux/mroute6.h b/include/uapi/linux/mroute6.h
index 5062fb5..ed57211 100644
--- a/include/uapi/linux/mroute6.h
+++ b/include/uapi/linux/mroute6.h
@@ -4,6 +4,7 @@
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/sockios.h>
+#include <linux/in6.h> /* For struct sockaddr_in6. */
/*
* Based on the MROUTING 3.5 defines primarily to keep
diff --git a/include/uapi/linux/msm_ipa.h b/include/uapi/linux/msm_ipa.h
index 51c0165..d3b9a33 100644
--- a/include/uapi/linux/msm_ipa.h
+++ b/include/uapi/linux/msm_ipa.h
@@ -16,65 +16,85 @@
#define IPA_IOC_MAGIC 0xCF
/**
+ * IPA device full path
+ */
+#define IPA_DEV_NAME "/dev/ipa"
+
+/**
+ * IPA NAT table character device name
+ */
+#define IPA_NAT_DEV_NAME "ipaNatTable"
+
+/**
+ * IPA IPv6CT table character device name
+ */
+#define IPA_IPV6CT_DEV_NAME "ipaIpv6CTTable"
+
+ /**
* name of the default routing tables for v4 and v6
*/
#define IPA_DFLT_RT_TBL_NAME "ipa_dflt_rt"
/**
- * the commands supported by IPA driver
+ * commands supported by IPA driver
*/
-#define IPA_IOCTL_ADD_HDR 0
-#define IPA_IOCTL_DEL_HDR 1
-#define IPA_IOCTL_ADD_RT_RULE 2
-#define IPA_IOCTL_DEL_RT_RULE 3
-#define IPA_IOCTL_ADD_FLT_RULE 4
-#define IPA_IOCTL_DEL_FLT_RULE 5
-#define IPA_IOCTL_COMMIT_HDR 6
-#define IPA_IOCTL_RESET_HDR 7
-#define IPA_IOCTL_COMMIT_RT 8
-#define IPA_IOCTL_RESET_RT 9
-#define IPA_IOCTL_COMMIT_FLT 10
-#define IPA_IOCTL_RESET_FLT 11
-#define IPA_IOCTL_DUMP 12
-#define IPA_IOCTL_GET_RT_TBL 13
-#define IPA_IOCTL_PUT_RT_TBL 14
-#define IPA_IOCTL_COPY_HDR 15
-#define IPA_IOCTL_QUERY_INTF 16
-#define IPA_IOCTL_QUERY_INTF_TX_PROPS 17
-#define IPA_IOCTL_QUERY_INTF_RX_PROPS 18
-#define IPA_IOCTL_GET_HDR 19
-#define IPA_IOCTL_PUT_HDR 20
-#define IPA_IOCTL_SET_FLT 21
-#define IPA_IOCTL_ALLOC_NAT_MEM 22
-#define IPA_IOCTL_V4_INIT_NAT 23
-#define IPA_IOCTL_NAT_DMA 24
-#define IPA_IOCTL_V4_DEL_NAT 26
-#define IPA_IOCTL_PULL_MSG 27
-#define IPA_IOCTL_GET_NAT_OFFSET 28
-#define IPA_IOCTL_RM_ADD_DEPENDENCY 29
-#define IPA_IOCTL_RM_DEL_DEPENDENCY 30
-#define IPA_IOCTL_GENERATE_FLT_EQ 31
-#define IPA_IOCTL_QUERY_INTF_EXT_PROPS 32
-#define IPA_IOCTL_QUERY_EP_MAPPING 33
-#define IPA_IOCTL_QUERY_RT_TBL_INDEX 34
-#define IPA_IOCTL_WRITE_QMAPID 35
-#define IPA_IOCTL_MDFY_FLT_RULE 36
-#define IPA_IOCTL_NOTIFY_WAN_UPSTREAM_ROUTE_ADD 37
-#define IPA_IOCTL_NOTIFY_WAN_UPSTREAM_ROUTE_DEL 38
-#define IPA_IOCTL_NOTIFY_WAN_EMBMS_CONNECTED 39
-#define IPA_IOCTL_ADD_HDR_PROC_CTX 40
-#define IPA_IOCTL_DEL_HDR_PROC_CTX 41
-#define IPA_IOCTL_MDFY_RT_RULE 42
-#define IPA_IOCTL_ADD_RT_RULE_AFTER 43
-#define IPA_IOCTL_ADD_FLT_RULE_AFTER 44
-#define IPA_IOCTL_GET_HW_VERSION 45
-#define IPA_IOCTL_ADD_RT_RULE_EXT 46
-#define IPA_IOCTL_ADD_VLAN_IFACE 47
-#define IPA_IOCTL_DEL_VLAN_IFACE 48
-#define IPA_IOCTL_ADD_L2TP_VLAN_MAPPING 49
-#define IPA_IOCTL_DEL_L2TP_VLAN_MAPPING 50
-#define IPA_IOCTL_NAT_MODIFY_PDN 51
-#define IPA_IOCTL_MAX 52
+#define IPA_IOCTL_ADD_HDR 0
+#define IPA_IOCTL_DEL_HDR 1
+#define IPA_IOCTL_ADD_RT_RULE 2
+#define IPA_IOCTL_DEL_RT_RULE 3
+#define IPA_IOCTL_ADD_FLT_RULE 4
+#define IPA_IOCTL_DEL_FLT_RULE 5
+#define IPA_IOCTL_COMMIT_HDR 6
+#define IPA_IOCTL_RESET_HDR 7
+#define IPA_IOCTL_COMMIT_RT 8
+#define IPA_IOCTL_RESET_RT 9
+#define IPA_IOCTL_COMMIT_FLT 10
+#define IPA_IOCTL_RESET_FLT 11
+#define IPA_IOCTL_DUMP 12
+#define IPA_IOCTL_GET_RT_TBL 13
+#define IPA_IOCTL_PUT_RT_TBL 14
+#define IPA_IOCTL_COPY_HDR 15
+#define IPA_IOCTL_QUERY_INTF 16
+#define IPA_IOCTL_QUERY_INTF_TX_PROPS 17
+#define IPA_IOCTL_QUERY_INTF_RX_PROPS 18
+#define IPA_IOCTL_GET_HDR 19
+#define IPA_IOCTL_PUT_HDR 20
+#define IPA_IOCTL_SET_FLT 21
+#define IPA_IOCTL_ALLOC_NAT_MEM 22
+#define IPA_IOCTL_V4_INIT_NAT 23
+#define IPA_IOCTL_TABLE_DMA_CMD 24
+#define IPA_IOCTL_NAT_DMA IPA_IOCTL_TABLE_DMA_CMD
+#define IPA_IOCTL_INIT_IPV6CT_TABLE 25
+#define IPA_IOCTL_V4_DEL_NAT 26
+#define IPA_IOCTL_PULL_MSG 27
+#define IPA_IOCTL_GET_NAT_OFFSET 28
+#define IPA_IOCTL_RM_ADD_DEPENDENCY 29
+#define IPA_IOCTL_RM_DEL_DEPENDENCY 30
+#define IPA_IOCTL_GENERATE_FLT_EQ 31
+#define IPA_IOCTL_QUERY_INTF_EXT_PROPS 32
+#define IPA_IOCTL_QUERY_EP_MAPPING 33
+#define IPA_IOCTL_QUERY_RT_TBL_INDEX 34
+#define IPA_IOCTL_WRITE_QMAPID 35
+#define IPA_IOCTL_MDFY_FLT_RULE 36
+#define IPA_IOCTL_NOTIFY_WAN_UPSTREAM_ROUTE_ADD 37
+#define IPA_IOCTL_NOTIFY_WAN_UPSTREAM_ROUTE_DEL 38
+#define IPA_IOCTL_NOTIFY_WAN_EMBMS_CONNECTED 39
+#define IPA_IOCTL_ADD_HDR_PROC_CTX 40
+#define IPA_IOCTL_DEL_HDR_PROC_CTX 41
+#define IPA_IOCTL_MDFY_RT_RULE 42
+#define IPA_IOCTL_ADD_RT_RULE_AFTER 43
+#define IPA_IOCTL_ADD_FLT_RULE_AFTER 44
+#define IPA_IOCTL_GET_HW_VERSION 45
+#define IPA_IOCTL_ADD_RT_RULE_EXT 46
+#define IPA_IOCTL_ADD_VLAN_IFACE 47
+#define IPA_IOCTL_DEL_VLAN_IFACE 48
+#define IPA_IOCTL_ADD_L2TP_VLAN_MAPPING 49
+#define IPA_IOCTL_DEL_L2TP_VLAN_MAPPING 50
+#define IPA_IOCTL_NAT_MODIFY_PDN 51
+#define IPA_IOCTL_ALLOC_NAT_TABLE 52
+#define IPA_IOCTL_ALLOC_IPV6CT_TABLE 53
+#define IPA_IOCTL_DEL_NAT_TABLE 54
+#define IPA_IOCTL_DEL_IPV6CT_TABLE 55
/**
* max size of the header to be inserted
@@ -107,6 +127,17 @@
#define IPA_WAN_MSG_IPv6_ADDR_GW_LEN 4
/**
+ * max number of lan clients supported per device type
+ * for LAN stats via HW.
+ */
+#define IPA_MAX_NUM_HW_PATH_CLIENTS 16
+
+/**
+ * max number of destination pipes possible for a client.
+ */
+#define QMI_IPA_MAX_CLIENT_DST_PIPES 4
+
+/**
* the attributes of the rule (routing or filtering)
*/
#define IPA_FLT_TOS (1ul << 0)
@@ -450,6 +481,7 @@ enum ipa_tethering_stats_event {
IPA_TETHERING_STATS_EVENT_MAX,
};
+
enum ipa_quota_event {
IPA_QUOTA_REACH = IPA_TETHERING_STATS_EVENT_MAX,
IPA_QUOTA_EVENT_MAX,
@@ -469,7 +501,13 @@ enum ipa_vlan_l2tp_event {
IPA_VLAN_L2TP_EVENT_MAX,
};
-#define IPA_EVENT_MAX_NUM (IPA_VLAN_L2TP_EVENT_MAX)
+enum ipa_per_client_stats_event {
+ IPA_PER_CLIENT_STATS_CONNECT_EVENT = IPA_VLAN_L2TP_EVENT_MAX,
+ IPA_PER_CLIENT_STATS_DISCONNECT_EVENT,
+ IPA_PER_CLIENT_STATS_EVENT_MAX
+};
+
+#define IPA_EVENT_MAX_NUM (IPA_PER_CLIENT_STATS_EVENT_MAX)
#define IPA_EVENT_MAX ((int)IPA_EVENT_MAX_NUM)
/**
@@ -1155,6 +1193,48 @@ struct ipa_rt_rule_del {
};
/**
+ * struct ipa_rt_rule_add_ext - routing rule descriptor includes in
+ * and out parameters
+ * @rule: actual rule to be added
+ * @at_rear: add at back of routing table, it is NOT possible to add rules at
+ * the rear of the "default" routing tables
+ * @rt_rule_hdl: output parameter, handle to rule, valid when status is 0
+ * @status: output parameter, status of routing rule add operation,
+ * @rule_id: rule_id to be assigned to the routing rule. In case client
+ * specifies rule_id as 0 the driver will assign a new rule_id
+ * 0 for success,
+ * -1 for failure
+ */
+struct ipa_rt_rule_add_ext {
+ struct ipa_rt_rule rule;
+ uint8_t at_rear;
+ uint32_t rt_rule_hdl;
+ int status;
+ uint16_t rule_id;
+};
+
+/**
+ * struct ipa_ioc_add_rt_rule - routing rule addition parameters (supports
+ * multiple rules and commit with rule_id);
+ *
+ * all rules MUST be added to same table
+ * @commit: should rules be written to IPA HW also?
+ * @ip: IP family of rule
+ * @rt_tbl_name: name of routing table resource
+ * @num_rules: number of routing rules that follow
+ * @ipa_rt_rule_add_ext rules: all rules need to go back to back here,
+ * no pointers
+ */
+struct ipa_ioc_add_rt_rule_ext {
+ uint8_t commit;
+ enum ipa_ip_type ip;
+ char rt_tbl_name[IPA_RESOURCE_NAME_MAX];
+ uint8_t num_rules;
+ struct ipa_rt_rule_add_ext rules[0];
+};
+
+
+/**
* struct ipa_ioc_del_rt_rule - routing rule deletion parameters (supports
* multiple headers and commit)
* @commit: should rules be removed from IPA HW also?
@@ -1442,15 +1522,26 @@ struct ipa_ioc_nat_alloc_mem {
};
/**
- * struct ipa_ioc_v4_nat_init - nat table initialization
- * parameters
+ * struct ipa_ioc_nat_ipv6ct_table_alloc - NAT/IPv6CT table memory allocation
+ * properties
+ * @size: input parameter, size of table in bytes
+ * @offset: output parameter, offset into page in case of system memory
+ */
+struct ipa_ioc_nat_ipv6ct_table_alloc {
+ size_t size;
+ off_t offset;
+};
+
+/**
+ * struct ipa_ioc_v4_nat_init - nat table initialization parameters
* @tbl_index: input parameter, index of the table
* @ipv4_rules_offset: input parameter, ipv4 rules address offset
* @expn_rules_offset: input parameter, ipv4 expansion rules address offset
* @index_offset: input parameter, index rules offset
* @index_expn_offset: input parameter, index expansion rules offset
- * @table_entries: input parameter, ipv4 rules table size in entries
- * @expn_table_entries: input parameter, ipv4 expansion rules table size
+ * @table_entries: input parameter, ipv4 rules table number of entries
+ * @expn_table_entries: input parameter, ipv4 expansion rules table number of
+ * entries
* @ip_addr: input parameter, public ip address
*/
struct ipa_ioc_v4_nat_init {
@@ -1467,6 +1558,23 @@ struct ipa_ioc_v4_nat_init {
};
/**
+ * struct ipa_ioc_ipv6ct_init - IPv6CT table initialization parameters
+ * @base_table_offset: input parameter, IPv6CT base table address offset
+ * @expn_table_offset: input parameter, IPv6CT expansion table address offset
+ * @table_entries: input parameter, IPv6CT table number of entries
+ * @expn_table_entries: input parameter, IPv6CT expansion table number of
+ * entries
+ * @tbl_index: input parameter, index of the table
+ */
+struct ipa_ioc_ipv6ct_init {
+ uint32_t base_table_offset;
+ uint32_t expn_table_offset;
+ uint16_t table_entries;
+ uint16_t expn_table_entries;
+ uint8_t tbl_index;
+};
+
+/**
* struct ipa_ioc_v4_nat_del - nat table delete parameter
* @table_index: input parameter, index of the table
* @public_ip_addr: input parameter, public ip address
@@ -1477,7 +1585,15 @@ struct ipa_ioc_v4_nat_del {
};
/**
- * struct ipa_ioc_nat_dma_one - nat dma command parameter
+ * struct ipa_ioc_nat_ipv6ct_table_del - NAT/IPv6CT table delete parameter
+ * @table_index: input parameter, index of the table
+ */
+struct ipa_ioc_nat_ipv6ct_table_del {
+ uint8_t table_index;
+};
+
+/**
+ * struct ipa_ioc_nat_dma_one - nat/ipv6ct dma command parameter
* @table_index: input parameter, index of the table
* @base_addr: type of table, from which the base address of the table
* can be inferred
@@ -1494,7 +1610,7 @@ struct ipa_ioc_nat_dma_one {
};
/**
- * struct ipa_ioc_nat_dma_cmd - To hold multiple nat dma commands
+ * struct ipa_ioc_nat_dma_cmd - To hold multiple nat/ipv6ct dma commands
* @entries: number of dma commands in use
* @dma: data pointer to the dma commands
*/
@@ -1505,12 +1621,12 @@ struct ipa_ioc_nat_dma_cmd {
};
/**
-* struct ipa_ioc_nat_pdn_entry - PDN entry modification data
-* @pdn_index: index of the entry in the PDN config table to be changed
-* @public_ip: PDN's public ip
-* @src_metadata: PDN's source NAT metadata for metadata replacement
-* @dst_metadata: PDN's destination NAT metadata for metadata replacement
-*/
+ * struct ipa_ioc_nat_pdn_entry - PDN entry modification data
+ * @pdn_index: index of the entry in the PDN config table to be changed
+ * @public_ip: PDN's public ip
+ * @src_metadata: PDN's source NAT metadata for metadata replacement
+ * @dst_metadata: PDN's destination NAT metadata for metadata replacement
+ */
struct ipa_ioc_nat_pdn_entry {
uint8_t pdn_index;
uint32_t public_ip;
@@ -1677,6 +1793,52 @@ enum ipacm_client_enum {
IPACM_CLIENT_WLAN,
IPACM_CLIENT_MAX
};
+
+enum ipacm_per_client_device_type {
+ IPACM_CLIENT_DEVICE_TYPE_USB = 0,
+ IPACM_CLIENT_DEVICE_TYPE_WLAN = 1,
+ IPACM_CLIENT_DEVICE_TYPE_ETH = 2
+};
+
+/**
+ * max number of device types supported.
+ */
+#define IPACM_MAX_CLIENT_DEVICE_TYPES 3
+
+/**
+ * @lanIface - Name of the lan interface
+ * @mac: Mac address of the client.
+ */
+struct ipa_lan_client_msg {
+ char lanIface[IPA_RESOURCE_NAME_MAX];
+ uint8_t mac[IPA_MAC_ADDR_SIZE];
+};
+
+/**
+ * struct ipa_lan_client - lan client data
+ * @mac: MAC Address of the client.
+ * @client_idx: Client Index.
+ * @inited: Bool to indicate whether client info is set.
+ */
+struct ipa_lan_client {
+ uint8_t mac[IPA_MAC_ADDR_SIZE];
+ int8_t client_idx;
+ uint8_t inited;
+};
+
+/**
+ * struct ipa_tether_device_info - tether device info indicated from IPACM
+ * @ul_src_pipe: Source pipe of the lan client.
+ * @hdr_len: Header length of the client.
+ * @num_clients: Number of clients connected.
+ */
+struct ipa_tether_device_info {
+ int32_t ul_src_pipe;
+ uint8_t hdr_len;
+ uint32_t num_clients;
+ struct ipa_lan_client lan_client[IPA_MAX_NUM_HW_PATH_CLIENTS];
+};
+
/**
* actual IOCTLs supported by IPA driver
*/
@@ -1689,6 +1851,9 @@ enum ipacm_client_enum {
#define IPA_IOC_ADD_RT_RULE _IOWR(IPA_IOC_MAGIC, \
IPA_IOCTL_ADD_RT_RULE, \
struct ipa_ioc_add_rt_rule *)
+#define IPA_IOC_ADD_RT_RULE_EXT _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_ADD_RT_RULE_EXT, \
+ struct ipa_ioc_add_rt_rule_ext *)
#define IPA_IOC_ADD_RT_RULE_AFTER _IOWR(IPA_IOC_MAGIC, \
IPA_IOCTL_ADD_RT_RULE_AFTER, \
struct ipa_ioc_add_rt_rule_after *)
@@ -1752,15 +1917,33 @@ enum ipacm_client_enum {
#define IPA_IOC_ALLOC_NAT_MEM _IOWR(IPA_IOC_MAGIC, \
IPA_IOCTL_ALLOC_NAT_MEM, \
struct ipa_ioc_nat_alloc_mem *)
+#define IPA_IOC_ALLOC_NAT_TABLE _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_ALLOC_NAT_TABLE, \
+ struct ipa_ioc_nat_ipv6ct_table_alloc *)
+#define IPA_IOC_ALLOC_IPV6CT_TABLE _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_ALLOC_IPV6CT_TABLE, \
+ struct ipa_ioc_nat_ipv6ct_table_alloc *)
#define IPA_IOC_V4_INIT_NAT _IOWR(IPA_IOC_MAGIC, \
IPA_IOCTL_V4_INIT_NAT, \
struct ipa_ioc_v4_nat_init *)
+#define IPA_IOC_INIT_IPV6CT_TABLE _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_INIT_IPV6CT_TABLE, \
+ struct ipa_ioc_ipv6ct_init *)
#define IPA_IOC_NAT_DMA _IOWR(IPA_IOC_MAGIC, \
IPA_IOCTL_NAT_DMA, \
struct ipa_ioc_nat_dma_cmd *)
+#define IPA_IOC_TABLE_DMA_CMD _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_TABLE_DMA_CMD, \
+ struct ipa_ioc_nat_dma_cmd *)
#define IPA_IOC_V4_DEL_NAT _IOWR(IPA_IOC_MAGIC, \
IPA_IOCTL_V4_DEL_NAT, \
struct ipa_ioc_v4_nat_del *)
+#define IPA_IOC_DEL_NAT_TABLE _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_DEL_NAT_TABLE, \
+ struct ipa_ioc_nat_ipv6ct_table_del *)
+#define IPA_IOC_DEL_IPV6CT_TABLE _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_DEL_IPV6CT_TABLE, \
+ struct ipa_ioc_nat_ipv6ct_table_del *)
#define IPA_IOC_GET_NAT_OFFSET _IOWR(IPA_IOC_MAGIC, \
IPA_IOCTL_GET_NAT_OFFSET, \
uint32_t *)
diff --git a/include/uapi/linux/msm_kgsl.h b/include/uapi/linux/msm_kgsl.h
index f05155b..9ee2a8b 100644
--- a/include/uapi/linux/msm_kgsl.h
+++ b/include/uapi/linux/msm_kgsl.h
@@ -142,6 +142,7 @@
#define KGSL_MEMFLAGS_USE_CPU_MAP 0x10000000ULL
#define KGSL_MEMFLAGS_SPARSE_PHYS 0x20000000ULL
#define KGSL_MEMFLAGS_SPARSE_VIRT 0x40000000ULL
+#define KGSL_MEMFLAGS_IOCOHERENT 0x80000000ULL
/* Memory types for which allocations are made */
#define KGSL_MEMTYPE_MASK 0x0000FF00
diff --git a/include/uapi/linux/netfilter_ipv4/Kbuild b/include/uapi/linux/netfilter_ipv4/Kbuild
index ecb291d..7391cdc 100644
--- a/include/uapi/linux/netfilter_ipv4/Kbuild
+++ b/include/uapi/linux/netfilter_ipv4/Kbuild
@@ -8,3 +8,4 @@
header-y += ipt_ah.h
header-y += ipt_ecn.h
header-y += ipt_ttl.h
+header-y += ipt_NATTYPE.h
diff --git a/include/uapi/linux/netfilter_ipv4/ipt_NATTYPE.h b/include/uapi/linux/netfilter_ipv4/ipt_NATTYPE.h
new file mode 100644
index 0000000..b612290
--- /dev/null
+++ b/include/uapi/linux/netfilter_ipv4/ipt_NATTYPE.h
@@ -0,0 +1,25 @@
+#ifndef _IPT_NATTYPE_H_target
+#define _IPT_NATTYPE_H_target
+
+#define NATTYPE_TIMEOUT 300
+
+enum nattype_mode {
+ MODE_DNAT,
+ MODE_FORWARD_IN,
+ MODE_FORWARD_OUT
+};
+
+enum nattype_type {
+ TYPE_PORT_ADDRESS_RESTRICTED,
+ TYPE_ENDPOINT_INDEPENDENT,
+ TYPE_ADDRESS_RESTRICTED
+};
+
+
+struct ipt_nattype_info {
+ u_int16_t mode;
+ u_int16_t type;
+};
+
+#endif /*_IPT_NATTYPE_H_target*/
+
diff --git a/include/uapi/linux/rds.h b/include/uapi/linux/rds.h
index 0f9265c..7af20a1 100644
--- a/include/uapi/linux/rds.h
+++ b/include/uapi/linux/rds.h
@@ -35,6 +35,7 @@
#define _LINUX_RDS_H
#include <linux/types.h>
+#include <linux/socket.h> /* For __kernel_sockaddr_storage. */
#define RDS_IB_ABI_VERSION 0x301
@@ -223,7 +224,7 @@ struct rds_get_mr_args {
};
struct rds_get_mr_for_dest_args {
- struct sockaddr_storage dest_addr;
+ struct __kernel_sockaddr_storage dest_addr;
struct rds_iovec vec;
uint64_t cookie_addr;
uint64_t flags;
diff --git a/include/uapi/linux/rmnet_ipa_fd_ioctl.h b/include/uapi/linux/rmnet_ipa_fd_ioctl.h
index 04aaaad..2992e2c 100644
--- a/include/uapi/linux/rmnet_ipa_fd_ioctl.h
+++ b/include/uapi/linux/rmnet_ipa_fd_ioctl.h
@@ -34,6 +34,12 @@
#define WAN_IOCTL_ADD_FLT_RULE_EX 9
#define WAN_IOCTL_QUERY_TETHER_STATS_ALL 10
#define WAN_IOCTL_NOTIFY_WAN_STATE 11
+#define WAN_IOCTL_ADD_UL_FLT_RULE 12
+#define WAN_IOCTL_ENABLE_PER_CLIENT_STATS 13
+#define WAN_IOCTL_QUERY_PER_CLIENT_STATS 14
+#define WAN_IOCTL_SET_LAN_CLIENT_INFO 15
+#define WAN_IOCTL_CLEAR_LAN_CLIENT_INFO 16
+#define WAN_IOCTL_SEND_LAN_CLIENT_MSG 17
/* User space may not have this defined. */
#ifndef IFNAMSIZ
@@ -130,6 +136,56 @@ struct wan_ioctl_query_dl_filter_stats {
struct wan_ioctl_notify_wan_state {
uint8_t up;
};
+struct wan_ioctl_send_lan_client_msg {
+ /* Lan client info. */
+ struct ipa_lan_client_msg lan_client;
+ /* Event to indicate whether client is
+ * connected or disconnected.
+ */
+ enum ipa_per_client_stats_event client_event;
+};
+
+struct wan_ioctl_lan_client_info {
+ /* Device type of the client. */
+ enum ipacm_per_client_device_type device_type;
+ /* MAC Address of the client. */
+ uint8_t mac[IPA_MAC_ADDR_SIZE];
+ /* Init client. */
+ uint8_t client_init;
+ /* Client Index */
+ int8_t client_idx;
+ /* Header length of the client. */
+ uint8_t hdr_len;
+ /* Source pipe of the lan client. */
+ enum ipa_client_type ul_src_pipe;
+};
+
+struct wan_ioctl_per_client_info {
+ /* MAC Address of the client. */
+ uint8_t mac[IPA_MAC_ADDR_SIZE];
+ /* Ipv4 UL traffic bytes. */
+ uint64_t ipv4_tx_bytes;
+ /* Ipv4 DL traffic bytes. */
+ uint64_t ipv4_rx_bytes;
+ /* Ipv6 UL traffic bytes. */
+ uint64_t ipv6_tx_bytes;
+ /* Ipv6 DL traffic bytes. */
+ uint64_t ipv6_rx_bytes;
+};
+
+struct wan_ioctl_query_per_client_stats {
+ /* Device type of the client. */
+ enum ipacm_per_client_device_type device_type;
+ /* Indicate whether to reset the stats (use 1) or not */
+ uint8_t reset_stats;
+ /* Indicates whether client is disconnected. */
+ uint8_t disconnect_clnt;
+ /* Number of clients. */
+ uint8_t num_clients;
+ /* Client information. */
+ struct wan_ioctl_per_client_info
+ client_info[IPA_MAX_NUM_HW_PATH_CLIENTS];
+};
#define WAN_IOC_ADD_FLT_RULE _IOWR(WAN_IOC_MAGIC, \
WAN_IOCTL_ADD_FLT_RULE, \
@@ -179,4 +235,27 @@ struct wan_ioctl_notify_wan_state {
WAN_IOCTL_NOTIFY_WAN_STATE, \
struct wan_ioctl_notify_wan_state *)
+#define WAN_IOC_ADD_UL_FLT_RULE _IOWR(WAN_IOC_MAGIC, \
+ WAN_IOCTL_ADD_UL_FLT_RULE, \
+ struct ipa_configure_ul_firewall_rules_req_msg_v01 *)
+
+#define WAN_IOC_ENABLE_PER_CLIENT_STATS _IOWR(WAN_IOC_MAGIC, \
+ WAN_IOCTL_ENABLE_PER_CLIENT_STATS, \
+ bool *)
+
+#define WAN_IOC_QUERY_PER_CLIENT_STATS _IOWR(WAN_IOC_MAGIC, \
+ WAN_IOCTL_QUERY_PER_CLIENT_STATS, \
+ struct wan_ioctl_query_per_client_stats *)
+
+#define WAN_IOC_SET_LAN_CLIENT_INFO _IOWR(WAN_IOC_MAGIC, \
+ WAN_IOCTL_SET_LAN_CLIENT_INFO, \
+ struct wan_ioctl_lan_client_info *)
+
+#define WAN_IOC_SEND_LAN_CLIENT_MSG _IOWR(WAN_IOC_MAGIC, \
+ WAN_IOCTL_SEND_LAN_CLIENT_MSG, \
+ struct wan_ioctl_send_lan_client_msg *)
+
+#define WAN_IOC_CLEAR_LAN_CLIENT_INFO _IOWR(WAN_IOC_MAGIC, \
+ WAN_IOCTL_CLEAR_LAN_CLIENT_INFO, \
+ struct wan_ioctl_lan_client_info *)
#endif /* _RMNET_IPA_FD_IOCTL_H */
diff --git a/include/uapi/linux/spi/spidev.h b/include/uapi/linux/spi/spidev.h
index dd5f21e..856de39 100644
--- a/include/uapi/linux/spi/spidev.h
+++ b/include/uapi/linux/spi/spidev.h
@@ -23,6 +23,7 @@
#define SPIDEV_H
#include <linux/types.h>
+#include <linux/ioctl.h>
/* User space versions of kernel symbols for SPI clocking modes,
* matching <linux/spi/spi.h>
diff --git a/include/uapi/linux/time.h b/include/uapi/linux/time.h
index 7fe799e..e75e1b6 100644
--- a/include/uapi/linux/time.h
+++ b/include/uapi/linux/time.h
@@ -56,7 +56,6 @@ struct itimerval {
#define CLOCK_BOOTTIME_ALARM 9
#define CLOCK_SGI_CYCLE 10 /* Hardware specific */
#define CLOCK_TAI 11
-#define CLOCK_POWEROFF_ALARM 12
#define MAX_CLOCKS 16
#define CLOCKS_MASK (CLOCK_REALTIME | CLOCK_MONOTONIC)
diff --git a/include/uapi/linux/usb/ch9.h b/include/uapi/linux/usb/ch9.h
index 0e5ce0d..0d69769 100644
--- a/include/uapi/linux/usb/ch9.h
+++ b/include/uapi/linux/usb/ch9.h
@@ -759,6 +759,7 @@ struct usb_interface_assoc_descriptor {
__u8 iFunction;
} __attribute__ ((packed));
+#define USB_DT_INTERFACE_ASSOCIATION_SIZE 8
/*-------------------------------------------------------------------------*/
diff --git a/include/uapi/linux/v4l2-controls.h b/include/uapi/linux/v4l2-controls.h
index d750568..1df8c41 100644
--- a/include/uapi/linux/v4l2-controls.h
+++ b/include/uapi/linux/v4l2-controls.h
@@ -1134,6 +1134,37 @@ enum v4l2_mpeg_vidc_video_flip {
V4L2_CID_MPEG_VIDC_VIDEO_FLIP_BOTH = 3,
};
+/* HDR SEI INFO related control IDs and definitions*/
+#define V4L2_MPEG_VIDC_VENC_HDR_INFO_ENABLED 1
+#define V4L2_MPEG_VIDC_VENC_HDR_INFO_DISABLED 0
+
+#define V4L2_CID_MPEG_VIDC_VENC_HDR_INFO \
+ (V4L2_CID_MPEG_MSM_VIDC_BASE + 116)
+#define V4L2_CID_MPEG_VIDC_VENC_RGB_PRIMARY_00 \
+ (V4L2_CID_MPEG_MSM_VIDC_BASE + 117)
+#define V4L2_CID_MPEG_VIDC_VENC_RGB_PRIMARY_01 \
+ (V4L2_CID_MPEG_MSM_VIDC_BASE + 118)
+#define V4L2_CID_MPEG_VIDC_VENC_RGB_PRIMARY_10 \
+ (V4L2_CID_MPEG_MSM_VIDC_BASE + 119)
+#define V4L2_CID_MPEG_VIDC_VENC_RGB_PRIMARY_11 \
+ (V4L2_CID_MPEG_MSM_VIDC_BASE + 120)
+#define V4L2_CID_MPEG_VIDC_VENC_RGB_PRIMARY_20 \
+ (V4L2_CID_MPEG_MSM_VIDC_BASE + 121)
+#define V4L2_CID_MPEG_VIDC_VENC_RGB_PRIMARY_21 \
+ (V4L2_CID_MPEG_MSM_VIDC_BASE + 122)
+#define V4L2_CID_MPEG_VIDC_VENC_WHITEPOINT_X \
+ (V4L2_CID_MPEG_MSM_VIDC_BASE + 123)
+#define V4L2_CID_MPEG_VIDC_VENC_WHITEPOINT_Y \
+ (V4L2_CID_MPEG_MSM_VIDC_BASE + 124)
+#define V4L2_CID_MPEG_VIDC_VENC_MAX_DISP_LUM \
+ (V4L2_CID_MPEG_MSM_VIDC_BASE + 125)
+#define V4L2_CID_MPEG_VIDC_VENC_MIN_DISP_LUM \
+ (V4L2_CID_MPEG_MSM_VIDC_BASE + 126)
+#define V4L2_CID_MPEG_VIDC_VENC_MAX_CLL \
+ (V4L2_CID_MPEG_MSM_VIDC_BASE + 127)
+#define V4L2_CID_MPEG_VIDC_VENC_MAX_FLL \
+ (V4L2_CID_MPEG_MSM_VIDC_BASE + 128)
+
/* Camera class control IDs */
#define V4L2_CID_CAMERA_CLASS_BASE (V4L2_CTRL_CLASS_CAMERA | 0x900)
diff --git a/include/uapi/linux/xfrm.h b/include/uapi/linux/xfrm.h
index 1fc62b2..7d75e56 100644
--- a/include/uapi/linux/xfrm.h
+++ b/include/uapi/linux/xfrm.h
@@ -303,6 +303,8 @@ enum xfrm_attr_type_t {
XFRMA_PROTO, /* __u8 */
XFRMA_ADDRESS_FILTER, /* struct xfrm_address_filter */
XFRMA_PAD,
+ XFRMA_OFFLOAD_DEV, /* struct xfrm_state_offload */
+ XFRMA_OUTPUT_MARK, /* __u32 */
__XFRMA_MAX
#define XFRMA_MAX (__XFRMA_MAX - 1)
diff --git a/include/uapi/media/Kbuild b/include/uapi/media/Kbuild
index e72a1f0..1e087a1 100644
--- a/include/uapi/media/Kbuild
+++ b/include/uapi/media/Kbuild
@@ -14,3 +14,4 @@
header-y += msm_sde_rotator.h
header-y += radio-iris.h
header-y += radio-iris-commands.h
+header-y += cam_lrme.h
diff --git a/include/uapi/media/cam_isp.h b/include/uapi/media/cam_isp.h
index 05c9283..afd109f 100644
--- a/include/uapi/media/cam_isp.h
+++ b/include/uapi/media/cam_isp.h
@@ -74,8 +74,9 @@
#define CAM_ISP_PACKET_META_CLOCK 7
#define CAM_ISP_PACKET_META_CSID 8
#define CAM_ISP_PACKET_META_DUAL_CONFIG 9
-#define CAM_ISP_PACKET_META_GENERIC_BLOB 10
-#define CAM_ISP_PACKET_META_MAX 11
+#define CAM_ISP_PACKET_META_GENERIC_BLOB_LEFT 10
+#define CAM_ISP_PACKET_META_GENERIC_BLOB_RIGHT 11
+#define CAM_ISP_PACKET_META_GENERIC_BLOB_COMMON 12
/* DSP mode */
#define CAM_ISP_DSP_MODE_NONE 0
@@ -83,7 +84,9 @@
#define CAM_ISP_DSP_MODE_ROUND 2
/* ISP Generic Cmd Buffer Blob types */
-#define CAM_ISP_GENERIC_BLOB_TYPE_HFR_CONFIG 0
+#define CAM_ISP_GENERIC_BLOB_TYPE_HFR_CONFIG 0
+#define CAM_ISP_GENERIC_BLOB_TYPE_CLOCK_CONFIG 1
+#define CAM_ISP_GENERIC_BLOB_TYPE_BW_CONFIG 2
/* Query devices */
/**
@@ -247,20 +250,20 @@ struct cam_isp_port_hfr_config {
uint32_t framedrop_pattern;
uint32_t framedrop_period;
uint32_t reserved;
-};
+} __attribute__((packed));
/**
* struct cam_isp_resource_hfr_config - Resource HFR configuration
*
- * @num_io_configs: Number of ports
+ * @num_ports: Number of ports
* @reserved: Reserved for alignment
- * @io_hfr_config: HFR configuration for each IO port
+ * @port_hfr_config: HFR configuration for each IO port
*/
struct cam_isp_resource_hfr_config {
- uint32_t num_io_configs;
+ uint32_t num_ports;
uint32_t reserved;
- struct cam_isp_port_hfr_config io_hfr_config[1];
-};
+ struct cam_isp_port_hfr_config port_hfr_config[1];
+} __attribute__((packed));
/**
* struct cam_isp_dual_split_params - dual isp spilt parameters
@@ -316,6 +319,60 @@ struct cam_isp_dual_config {
uint32_t reserved;
struct cam_isp_dual_split_params split_params;
struct cam_isp_dual_stripe_config stripes[1];
-};
+} __attribute__((packed));
+
+/**
+ * struct cam_isp_clock_config - Clock configuration
+ *
+ * @usage_type: Usage type (Single/Dual)
+ * @num_rdi: Number of RDI votes
+ * @left_pix_hz: Pixel Clock for Left ISP
+ * @right_pix_hz: Pixel Clock for Right ISP, valid only if Dual
+ * @rdi_hz: RDI Clock. ISP clock will be max of RDI and
+ * PIX clocks. For a particular context which ISP
+ * HW the RDI is allocated to is not known to UMD.
+ * Hence pass the clock and let KMD decide.
+ */
+struct cam_isp_clock_config {
+ uint32_t usage_type;
+ uint32_t num_rdi;
+ uint64_t left_pix_hz;
+ uint64_t right_pix_hz;
+ uint64_t rdi_hz[1];
+} __attribute__((packed));
+
+/**
+ * struct cam_isp_bw_vote - Bandwidth vote information
+ *
+ * @resource_id: Resource ID
+ * @reserved: Reserved field for alignment
+ * @cam_bw_bps: Bandwidth vote for CAMNOC
+ * @ext_bw_bps: Bandwidth vote for path-to-DDR after CAMNOC
+ */
+
+struct cam_isp_bw_vote {
+ uint32_t resource_id;
+ uint32_t reserved;
+ uint64_t cam_bw_bps;
+ uint64_t ext_bw_bps;
+} __attribute__((packed));
+
+/**
+ * struct cam_isp_bw_config - Bandwidth configuration
+ *
+ * @usage_type: Usage type (Single/Dual)
+ * @num_rdi: Number of RDI votes
+ * @left_pix_vote: Bandwidth vote for left ISP
+ * @right_pix_vote: Bandwidth vote for right ISP
+ * @rdi_vote: RDI bandwidth requirements
+ */
+
+struct cam_isp_bw_config {
+ uint32_t usage_type;
+ uint32_t num_rdi;
+ struct cam_isp_bw_vote left_pix_vote;
+ struct cam_isp_bw_vote right_pix_vote;
+ struct cam_isp_bw_vote rdi_vote[1];
+} __attribute__((packed));
#endif /* __UAPI_CAM_ISP_H__ */
diff --git a/include/uapi/media/cam_lrme.h b/include/uapi/media/cam_lrme.h
new file mode 100644
index 0000000..97d9578
--- /dev/null
+++ b/include/uapi/media/cam_lrme.h
@@ -0,0 +1,65 @@
+#ifndef __UAPI_CAM_LRME_H__
+#define __UAPI_CAM_LRME_H__
+
+#include "cam_defs.h"
+
+/* LRME Resource Types */
+
+enum CAM_LRME_IO_TYPE {
+ CAM_LRME_IO_TYPE_TAR,
+ CAM_LRME_IO_TYPE_REF,
+ CAM_LRME_IO_TYPE_RES,
+ CAM_LRME_IO_TYPE_DS2,
+};
+
+#define CAM_LRME_INPUT_PORT_TYPE_TAR (1 << 0)
+#define CAM_LRME_INPUT_PORT_TYPE_REF (1 << 1)
+
+#define CAM_LRME_OUTPUT_PORT_TYPE_DS2 (1 << 0)
+#define CAM_LRME_OUTPUT_PORT_TYPE_RES (1 << 1)
+
+#define CAM_LRME_DEV_MAX 1
+
+
+struct cam_lrme_hw_version {
+ uint32_t gen;
+ uint32_t rev;
+ uint32_t step;
+};
+
+struct cam_lrme_dev_cap {
+ struct cam_lrme_hw_version clc_hw_version;
+ struct cam_lrme_hw_version bus_rd_hw_version;
+ struct cam_lrme_hw_version bus_wr_hw_version;
+ struct cam_lrme_hw_version top_hw_version;
+ struct cam_lrme_hw_version top_titan_version;
+};
+
+/**
+ * struct cam_lrme_query_cap_cmd - LRME query device capability payload
+ *
+ * @dev_iommu_handle: LRME iommu handles for secure/non secure
+ * modes
+ * @cdm_iommu_handle: Iommu handles for secure/non secure modes
+ * @num_devices: number of hardware devices
+ * @dev_caps: Returned device capability array
+ */
+struct cam_lrme_query_cap_cmd {
+ struct cam_iommu_handle device_iommu;
+ struct cam_iommu_handle cdm_iommu;
+ uint32_t num_devices;
+ struct cam_lrme_dev_cap dev_caps[CAM_LRME_DEV_MAX];
+};
+
+struct cam_lrme_soc_info {
+ uint64_t clock_rate;
+ uint64_t bandwidth;
+ uint64_t reserved[4];
+};
+
+struct cam_lrme_acquire_args {
+ struct cam_lrme_soc_info lrme_soc_info;
+};
+
+#endif /* __UAPI_CAM_LRME_H__ */
+
diff --git a/include/uapi/media/cam_req_mgr.h b/include/uapi/media/cam_req_mgr.h
index 9b7d055..233d84e 100644
--- a/include/uapi/media/cam_req_mgr.h
+++ b/include/uapi/media/cam_req_mgr.h
@@ -355,14 +355,14 @@ struct cam_mem_cache_ops_cmd {
* @error_type: type of error
* @request_id: request id of frame
* @device_hdl: device handle
- * @reserved: reserved field
+ * @linke_hdl: link_hdl
* @resource_size: size of the resource
*/
struct cam_req_mgr_error_msg {
uint32_t error_type;
uint32_t request_id;
int32_t device_hdl;
- int32_t reserved;
+ int32_t link_hdl;
uint64_t resource_size;
};
diff --git a/include/xen/swiotlb-xen.h b/include/xen/swiotlb-xen.h
index 7c35e27..683057f 100644
--- a/include/xen/swiotlb-xen.h
+++ b/include/xen/swiotlb-xen.h
@@ -58,4 +58,9 @@ xen_swiotlb_dma_supported(struct device *hwdev, u64 mask);
extern int
xen_swiotlb_set_dma_mask(struct device *dev, u64 dma_mask);
+
+extern int
+xen_swiotlb_dma_mmap(struct device *dev, struct vm_area_struct *vma,
+ void *cpu_addr, dma_addr_t dma_addr, size_t size,
+ unsigned long attrs);
#endif /* __LINUX_SWIOTLB_XEN_H */
diff --git a/init/Kconfig b/init/Kconfig
index 9782dfc..7b3006a 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -1051,23 +1051,6 @@
config PAGE_COUNTER
bool
-config CGROUP_SCHEDTUNE
- bool "CFS tasks boosting cgroup subsystem (EXPERIMENTAL)"
- depends on SCHED_TUNE
- help
- This option provides the "schedtune" controller which improves the
- flexibility of the task boosting mechanism by introducing the support
- to define "per task" boost values.
-
- This new controller:
- 1. allows only a two layers hierarchy, where the root defines the
- system-wide boost value and its direct childrens define each one a
- different "class of tasks" to be boosted with a different value
- 2. supports up to 16 different task classes, each one which could be
- configured with a different boost value
-
- Say N if unsure.
-
config MEMCG
bool "Memory controller"
select PAGE_COUNTER
@@ -1276,13 +1259,6 @@
endif # CGROUPS
-config SCHED_WALT
- bool "WALT"
- depends on SMP && FAIR_GROUP_SCHED
- help
- Use Window-Assisted Load Tracking (WALT) as an alternative or
- additional load tracking scheme in lieu of or along with PELT.
-
config SCHED_CORE_CTL
bool "QTI Core Control"
depends on SMP
diff --git a/init/initramfs.c b/init/initramfs.c
index f8ce812..d0b53f4 100644
--- a/init/initramfs.c
+++ b/init/initramfs.c
@@ -19,6 +19,7 @@
#include <linux/syscalls.h>
#include <linux/utime.h>
#include <linux/initramfs.h>
+#include <linux/file.h>
static ssize_t __init xwrite(int fd, const char *p, size_t count)
{
@@ -664,6 +665,7 @@ static int __init populate_rootfs(void)
printk(KERN_EMERG "Initramfs unpacking failed: %s\n", err);
free_initrd();
#endif
+ flush_delayed_fput();
/*
* Try loading default modules from initramfs. This gives
* us a chance to load before device_initcalls.
diff --git a/init/main.c b/init/main.c
index aca8f3e..674bc77 100644
--- a/init/main.c
+++ b/init/main.c
@@ -70,7 +70,6 @@
#include <linux/shmem_fs.h>
#include <linux/slab.h>
#include <linux/perf_event.h>
-#include <linux/file.h>
#include <linux/ptrace.h>
#include <linux/blkdev.h>
#include <linux/elevator.h>
@@ -946,8 +945,6 @@ static int __ref kernel_init(void *unused)
system_state = SYSTEM_RUNNING;
numa_default_policy();
- flush_delayed_fput();
-
rcu_end_inkernel_boot();
if (ramdisk_execute_command) {
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 779c871..372454a 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -1720,7 +1720,8 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
}
} else {
if (insn->src_reg != BPF_REG_0 || insn->off != 0 ||
- (insn->imm != 16 && insn->imm != 32 && insn->imm != 64)) {
+ (insn->imm != 16 && insn->imm != 32 && insn->imm != 64) ||
+ BPF_CLASS(insn->code) == BPF_ALU64) {
verbose("BPF_END uses reserved fields\n");
return -EINVAL;
}
diff --git a/kernel/configs/android-fetch-configs.sh b/kernel/configs/android-fetch-configs.sh
new file mode 100755
index 0000000..a5b56d4
--- /dev/null
+++ b/kernel/configs/android-fetch-configs.sh
@@ -0,0 +1,4 @@
+#!/bin/sh
+
+curl https://android.googlesource.com/kernel/configs/+archive/master/android-4.9.tar.gz | tar xzv
+
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 0d10ef5..915e750 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -25,6 +25,7 @@
#include <linux/smpboot.h>
#include <linux/relay.h>
#include <linux/slab.h>
+#include <linux/highmem.h>
#include <trace/events/power.h>
#define CREATE_TRACE_POINTS
@@ -1433,6 +1434,11 @@ static struct cpuhp_step cpuhp_ap_states[] = {
.startup.single = NULL,
.teardown.single = rcutree_dying_cpu,
},
+ [CPUHP_AP_KMAP_DYING] = {
+ .name = "KMAP:dying",
+ .startup.single = NULL,
+ .teardown.single = kmap_remove_unused_cpu,
+ },
/* Entry state on starting. Interrupts enabled from here on. Transient
* state for synchronsization */
[CPUHP_AP_ONLINE] = {
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index cbf240d..af9159a 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -2312,6 +2312,13 @@ static void cpuset_hotplug_update_tasks(struct cpuset *cs)
mutex_unlock(&cpuset_mutex);
}
+static bool force_rebuild;
+
+void cpuset_force_rebuild(void)
+{
+ force_rebuild = true;
+}
+
/**
* cpuset_hotplug_workfn - handle CPU/memory hotunplug for a cpuset
*
@@ -2386,8 +2393,10 @@ static void cpuset_hotplug_workfn(struct work_struct *work)
}
/* rebuild sched domains if cpus_allowed has changed */
- if (cpus_updated)
+ if (cpus_updated || force_rebuild) {
+ force_rebuild = false;
rebuild_sched_domains();
+ }
}
void cpuset_update_active_cpus(bool cpu_online)
@@ -2406,6 +2415,11 @@ void cpuset_update_active_cpus(bool cpu_online)
schedule_work(&cpuset_hotplug_work);
}
+void cpuset_wait_for_hotplug(void)
+{
+ flush_work(&cpuset_hotplug_work);
+}
+
/*
* Keep top_cpuset.mems_allowed tracking node_states[N_MEMORY].
* Call this routine anytime after node_states[N_MEMORY] changes.
diff --git a/kernel/events/core.c b/kernel/events/core.c
index f6e81b5..b784662 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -7963,6 +7963,7 @@ static int perf_event_set_bpf_prog(struct perf_event *event, u32 prog_fd)
}
}
event->tp_event->prog = prog;
+ event->tp_event->bpf_prog_owner = event;
return 0;
}
@@ -7977,7 +7978,7 @@ static void perf_event_free_bpf_prog(struct perf_event *event)
return;
prog = event->tp_event->prog;
- if (prog) {
+ if (prog && event->tp_event->bpf_prog_owner == event) {
event->tp_event->prog = NULL;
bpf_prog_put(prog);
}
diff --git a/kernel/hung_task.c b/kernel/hung_task.c
index 2b59c82..b94d3d1 100644
--- a/kernel/hung_task.c
+++ b/kernel/hung_task.c
@@ -17,6 +17,7 @@
#include <linux/sysctl.h>
#include <linux/utsname.h>
#include <trace/events/sched.h>
+#include <linux/sched/sysctl.h>
/*
* The number of tasks checked:
@@ -24,6 +25,14 @@
int __read_mostly sysctl_hung_task_check_count = PID_MAX_LIMIT;
/*
+ * Selective monitoring of hung tasks.
+ *
+ * if set to 1, khungtaskd skips monitoring tasks, which has
+ * task_struct->hang_detection_enabled value not set, else monitors all tasks.
+ */
+int sysctl_hung_task_selective_monitoring = 1;
+
+/*
* Limit number of tasks checked in a batch.
*
* This value controls the preemptibility of khungtaskd since preemption
@@ -179,7 +188,10 @@ static void check_hung_uninterruptible_tasks(unsigned long timeout)
}
/* use "==" to skip the TASK_KILLABLE tasks waiting on NFS */
if (t->state == TASK_UNINTERRUPTIBLE)
- check_hung_task(t, timeout);
+ /* Check for selective monitoring */
+ if (!sysctl_hung_task_selective_monitoring ||
+ t->hang_detection_enabled)
+ check_hung_task(t, timeout);
}
unlock:
rcu_read_unlock();
diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c
index 00bb0ae..77977f55df 100644
--- a/kernel/irq/irqdesc.c
+++ b/kernel/irq/irqdesc.c
@@ -405,10 +405,8 @@ static void free_desc(unsigned int irq)
* The sysfs entry must be serialized against a concurrent
* irq_sysfs_init() as well.
*/
- mutex_lock(&sparse_irq_lock);
kobject_del(&desc->kobj);
delete_irq_desc(irq);
- mutex_unlock(&sparse_irq_lock);
/*
* We free the descriptor, masks and stat fields via RCU. That
@@ -446,20 +444,15 @@ static int alloc_descs(unsigned int start, unsigned int cnt, int node,
desc = alloc_desc(start + i, node, flags, mask, owner);
if (!desc)
goto err;
- mutex_lock(&sparse_irq_lock);
irq_insert_desc(start + i, desc);
irq_sysfs_add(start + i, desc);
- mutex_unlock(&sparse_irq_lock);
}
+ bitmap_set(allocated_irqs, start, cnt);
return start;
err:
for (i--; i >= 0; i--)
free_desc(start + i);
-
- mutex_lock(&sparse_irq_lock);
- bitmap_clear(allocated_irqs, start, cnt);
- mutex_unlock(&sparse_irq_lock);
return -ENOMEM;
}
@@ -558,6 +551,7 @@ static inline int alloc_descs(unsigned int start, unsigned int cnt, int node,
desc->owner = owner;
}
+ bitmap_set(allocated_irqs, start, cnt);
return start;
}
@@ -653,10 +647,10 @@ void irq_free_descs(unsigned int from, unsigned int cnt)
if (from >= nr_irqs || (from + cnt) > nr_irqs)
return;
+ mutex_lock(&sparse_irq_lock);
for (i = 0; i < cnt; i++)
free_desc(from + i);
- mutex_lock(&sparse_irq_lock);
bitmap_clear(allocated_irqs, from, cnt);
mutex_unlock(&sparse_irq_lock);
}
@@ -703,19 +697,15 @@ __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node,
from, cnt, 0);
ret = -EEXIST;
if (irq >=0 && start != irq)
- goto err;
+ goto unlock;
if (start + cnt > nr_irqs) {
ret = irq_expand_nr_irqs(start + cnt);
if (ret)
- goto err;
+ goto unlock;
}
-
- bitmap_set(allocated_irqs, start, cnt);
- mutex_unlock(&sparse_irq_lock);
- return alloc_descs(start, cnt, node, affinity, owner);
-
-err:
+ ret = alloc_descs(start, cnt, node, affinity, owner);
+unlock:
mutex_unlock(&sparse_irq_lock);
return ret;
}
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 4d7ffc0..6599c7f 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -3260,10 +3260,17 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
if (depth) {
hlock = curr->held_locks + depth - 1;
if (hlock->class_idx == class_idx && nest_lock) {
- if (hlock->references)
+ if (hlock->references) {
+ /*
+ * Check: unsigned int references:12, overflow.
+ */
+ if (DEBUG_LOCKS_WARN_ON(hlock->references == (1 << 12)-1))
+ return 0;
+
hlock->references++;
- else
+ } else {
hlock->references = 2;
+ }
return 1;
}
diff --git a/kernel/power/process.c b/kernel/power/process.c
index 68d27ae..9a12c83 100644
--- a/kernel/power/process.c
+++ b/kernel/power/process.c
@@ -19,8 +19,9 @@
#include <linux/kmod.h>
#include <trace/events/power.h>
#include <linux/wakeup_reason.h>
+#include <linux/cpuset.h>
-/*
+/*
* Timeout for stopping processes
*/
unsigned int __read_mostly freeze_timeout_msecs = 20 * MSEC_PER_SEC;
@@ -210,6 +211,8 @@ void thaw_processes(void)
__usermodehelper_set_disable_depth(UMH_FREEZING);
thaw_workqueues();
+ cpuset_wait_for_hotplug();
+
read_lock(&tasklist_lock);
for_each_process_thread(g, p) {
/* No other threads should have PF_SUSPEND_TASK set */
diff --git a/kernel/power/qos.c b/kernel/power/qos.c
index 009f788..5183134 100644
--- a/kernel/power/qos.c
+++ b/kernel/power/qos.c
@@ -267,7 +267,8 @@ static const struct file_operations pm_qos_debug_fops = {
.release = single_release,
};
-static inline void pm_qos_set_value_for_cpus(struct pm_qos_constraints *c)
+static inline void pm_qos_set_value_for_cpus(struct pm_qos_constraints *c,
+ struct cpumask *cpus)
{
struct pm_qos_request *req = NULL;
int cpu;
@@ -294,8 +295,11 @@ static inline void pm_qos_set_value_for_cpus(struct pm_qos_constraints *c)
}
}
- for_each_possible_cpu(cpu)
+ for_each_possible_cpu(cpu) {
+ if (c->target_per_cpu[cpu] != qos_val[cpu])
+ cpumask_set_cpu(cpu, cpus);
c->target_per_cpu[cpu] = qos_val[cpu];
+ }
}
/**
@@ -316,6 +320,7 @@ int pm_qos_update_target(struct pm_qos_constraints *c,
unsigned long flags;
int prev_value, curr_value, new_value;
struct plist_node *node = &req->node;
+ struct cpumask cpus;
int ret;
spin_lock_irqsave(&pm_qos_lock, flags);
@@ -346,18 +351,24 @@ int pm_qos_update_target(struct pm_qos_constraints *c,
}
curr_value = pm_qos_get_value(c);
+ cpumask_clear(&cpus);
pm_qos_set_value(c, curr_value);
- pm_qos_set_value_for_cpus(c);
+ pm_qos_set_value_for_cpus(c, &cpus);
spin_unlock_irqrestore(&pm_qos_lock, flags);
trace_pm_qos_update_target(action, prev_value, curr_value);
- if (prev_value != curr_value) {
+
+ /*
+ * if cpu mask bits are set, call the notifier call chain
+ * to update the new qos restriction for the cores
+ */
+
+ if (!cpumask_empty(&cpus)) {
ret = 1;
if (c->notifiers)
blocking_notifier_call_chain(c->notifiers,
- (unsigned long)curr_value,
- NULL);
+ (unsigned long)curr_value, &cpus);
} else {
ret = 0;
}
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 4f8f9c1..084c41c 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -792,8 +792,13 @@ void rcu_irq_exit(void)
long long oldval;
struct rcu_dynticks *rdtp;
- RCU_LOCKDEP_WARN(!irqs_disabled(), "rcu_irq_exit() invoked with irqs enabled!!!");
rdtp = this_cpu_ptr(&rcu_dynticks);
+
+ /* Page faults can happen in NMI handlers, so check... */
+ if (READ_ONCE(rdtp->dynticks_nmi_nesting))
+ return;
+
+ RCU_LOCKDEP_WARN(!irqs_disabled(), "rcu_irq_exit() invoked with irqs enabled!!!");
oldval = rdtp->dynticks_nesting;
rdtp->dynticks_nesting--;
WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
@@ -930,8 +935,13 @@ void rcu_irq_enter(void)
struct rcu_dynticks *rdtp;
long long oldval;
- RCU_LOCKDEP_WARN(!irqs_disabled(), "rcu_irq_enter() invoked with irqs enabled!!!");
rdtp = this_cpu_ptr(&rcu_dynticks);
+
+ /* Page faults can happen in NMI handlers, so check... */
+ if (READ_ONCE(rdtp->dynticks_nmi_nesting))
+ return;
+
+ RCU_LOCKDEP_WARN(!irqs_disabled(), "rcu_irq_enter() invoked with irqs enabled!!!");
oldval = rdtp->dynticks_nesting;
rdtp->dynticks_nesting++;
WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
diff --git a/kernel/sched/Makefile b/kernel/sched/Makefile
index f6cce95..4b87c4e 100644
--- a/kernel/sched/Makefile
+++ b/kernel/sched/Makefile
@@ -29,4 +29,3 @@
obj-$(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) += cpufreq_schedutil.o
obj-$(CONFIG_SCHED_CORE_CTL) += core_ctl.o
obj-$(CONFIG_CPU_FREQ_GOV_SCHED) += cpufreq_sched.o
-obj-$(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) += cpufreq_schedutil.o
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 58c4341..bbe783e 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1169,6 +1169,7 @@ static int __set_cpus_allowed_ptr(struct task_struct *p,
cpumask_t allowed_mask;
rq = task_rq_lock(p, &rf);
+ update_rq_clock(rq);
if (p->flags & PF_KTHREAD) {
/*
@@ -7959,16 +7960,15 @@ static void cpuset_cpu_active(void)
* operation in the resume sequence, just build a single sched
* domain, ignoring cpusets.
*/
- num_cpus_frozen--;
- if (likely(num_cpus_frozen)) {
- partition_sched_domains(1, NULL, NULL);
+ partition_sched_domains(1, NULL, NULL);
+ if (--num_cpus_frozen)
return;
- }
/*
* This is the last CPU online operation. So fall through and
* restore the original sched domains by considering the
* cpuset configurations.
*/
+ cpuset_force_rebuild();
}
cpuset_update_active_cpus(true);
}
@@ -8660,6 +8660,7 @@ void sched_move_task(struct task_struct *tsk)
struct rq *rq;
rq = task_rq_lock(tsk, &rf);
+ update_rq_clock(rq);
running = task_current(rq, tsk);
queued = task_on_rq_queued(tsk);
diff --git a/kernel/sched/core_ctl.c b/kernel/sched/core_ctl.c
index f95243b..cc5a97c 100644
--- a/kernel/sched/core_ctl.c
+++ b/kernel/sched/core_ctl.c
@@ -38,6 +38,7 @@ struct cluster_data {
unsigned int active_cpus;
unsigned int num_cpus;
unsigned int nr_isolated_cpus;
+ unsigned int nr_not_preferred_cpus;
#ifdef CONFIG_SCHED_CORE_ROTATE
unsigned long set_max;
unsigned long set_cur;
@@ -350,6 +351,7 @@ static ssize_t store_not_preferred(struct cluster_data *state,
unsigned int val[MAX_CPUS_PER_CLUSTER];
unsigned long flags;
int ret;
+ int not_preferred_count = 0;
ret = sscanf(buf, "%u %u %u %u %u %u\n",
&val[0], &val[1], &val[2], &val[3],
@@ -361,7 +363,9 @@ static ssize_t store_not_preferred(struct cluster_data *state,
for (i = 0; i < state->num_cpus; i++) {
c = &per_cpu(cpu_state, i + state->first_cpu);
c->not_preferred = val[i];
+ not_preferred_count += !!val[i];
}
+ state->nr_not_preferred_cpus = not_preferred_count;
spin_unlock_irqrestore(&state_lock, flags);
return count;
@@ -793,10 +797,18 @@ static void try_to_isolate(struct cluster_data *cluster, unsigned int need)
continue;
if (cluster->active_cpus == need)
break;
- /* Don't offline busy CPUs. */
+ /* Don't isolate busy CPUs. */
if (c->is_busy)
continue;
+ /*
+ * We isolate only the not_preferred CPUs. If none
+ * of the CPUs are selected as not_preferred, then
+ * all CPUs are eligible for isolation.
+ */
+ if (cluster->nr_not_preferred_cpus && !c->not_preferred)
+ continue;
+
if (!should_we_isolate(c->cpu, cluster))
continue;
@@ -1109,6 +1121,7 @@ static int cluster_init(const struct cpumask *mask)
cluster->set_cur = cluster->set_max - 1;
#endif
cluster->enable = true;
+ cluster->nr_not_preferred_cpus = 0;
INIT_LIST_HEAD(&cluster->lru);
spin_lock_init(&cluster->pending_lock);
diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
index 3192612..32b67eb 100644
--- a/kernel/sched/cpufreq_schedutil.c
+++ b/kernel/sched/cpufreq_schedutil.c
@@ -235,7 +235,9 @@ static void sugov_track_cycles(struct sugov_policy *sg_policy,
/* Track cycles in current window */
delta_ns = upto - sg_policy->last_cyc_update_time;
- cycles = (prev_freq * delta_ns) / (NSEC_PER_SEC / KHZ);
+ delta_ns *= prev_freq;
+ do_div(delta_ns, (NSEC_PER_SEC / KHZ));
+ cycles = delta_ns;
sg_policy->curr_cycles += cycles;
sg_policy->last_cyc_update_time = upto;
}
diff --git a/kernel/sched/cpupri.c b/kernel/sched/cpupri.c
index ba5e3e2..87bea1e 100644
--- a/kernel/sched/cpupri.c
+++ b/kernel/sched/cpupri.c
@@ -279,3 +279,14 @@ void cpupri_cleanup(struct cpupri *cp)
for (i = 0; i < CPUPRI_NR_PRIORITIES; i++)
free_cpumask_var(cp->pri_to_cpu[i].mask);
}
+
+/*
+ * cpupri_check_rt - check if CPU has a RT task
+ * should be called from rcu-sched read section.
+ */
+bool cpupri_check_rt(void)
+{
+ int cpu = raw_smp_processor_id();
+
+ return cpu_rq(cpu)->rd->cpupri.cpu_to_pri[cpu] > CPUPRI_NORMAL;
+}
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 2d92092..42be34a 100755
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -2769,6 +2769,7 @@ u32 sched_get_wake_up_idle(struct task_struct *p)
return !!enabled;
}
+EXPORT_SYMBOL(sched_get_wake_up_idle);
int sched_set_wake_up_idle(struct task_struct *p, int wake_up_idle)
{
@@ -2781,6 +2782,7 @@ int sched_set_wake_up_idle(struct task_struct *p, int wake_up_idle)
return 0;
}
+EXPORT_SYMBOL(sched_set_wake_up_idle);
/* Precomputed fixed inverse multiplies for multiplication by y^n */
static const u32 runnable_avg_yN_inv[] = {
@@ -3223,6 +3225,36 @@ static inline int propagate_entity_load_avg(struct sched_entity *se)
return 1;
}
+/*
+ * Check if we need to update the load and the utilization of a blocked
+ * group_entity:
+ */
+static inline bool skip_blocked_update(struct sched_entity *se)
+{
+ struct cfs_rq *gcfs_rq = group_cfs_rq(se);
+
+ /*
+ * If sched_entity still have not zero load or utilization, we have to
+ * decay it:
+ */
+ if (se->avg.load_avg || se->avg.util_avg)
+ return false;
+
+ /*
+ * If there is a pending propagation, we have to update the load and
+ * the utilization of the sched_entity:
+ */
+ if (gcfs_rq->propagate_avg)
+ return false;
+
+ /*
+ * Otherwise, the load and the utilization of the sched_entity is
+ * already zero and there is no pending propagation, so it will be a
+ * waste of time to try to decay it:
+ */
+ return true;
+}
+
#else /* CONFIG_FAIR_GROUP_SCHED */
static inline void update_tg_load_avg(struct cfs_rq *cfs_rq, int force) {}
@@ -5563,13 +5595,6 @@ static unsigned long __cpu_norm_util(int cpu, unsigned long capacity, int delta)
return DIV_ROUND_UP(util << SCHED_CAPACITY_SHIFT, capacity);
}
-static inline bool bias_to_waker_cpu_enabled(struct task_struct *wakee,
- struct task_struct *waker)
-{
- return task_util(waker) > sched_big_waker_task_load &&
- task_util(wakee) < sched_small_wakee_task_load;
-}
-
static inline bool
bias_to_waker_cpu(struct task_struct *p, int cpu, struct cpumask *rtg_target)
{
@@ -6903,7 +6928,7 @@ static int energy_aware_wake_cpu(struct task_struct *p, int target, int sync)
int target_cpu, targeted_cpus = 0;
unsigned long task_util_boosted = 0, curr_util = 0;
long new_util, new_util_cum;
- int i = -1;
+ int i;
int ediff = -1;
int cpu = smp_processor_id();
int min_util_cpu = -1;
@@ -6924,14 +6949,8 @@ static int energy_aware_wake_cpu(struct task_struct *p, int target, int sync)
struct related_thread_group *grp;
cpumask_t search_cpus;
int prev_cpu = task_cpu(p);
- struct task_struct *curr = cpu_rq(cpu)->curr;
-#ifdef CONFIG_SCHED_CORE_ROTATE
bool do_rotate = false;
bool avoid_prev_cpu = false;
-#else
-#define do_rotate false
-#define avoid_prev_cpu false
-#endif
sd = rcu_dereference(per_cpu(sd_ea, prev_cpu));
@@ -6945,14 +6964,14 @@ static int energy_aware_wake_cpu(struct task_struct *p, int target, int sync)
curr_util = boosted_task_util(cpu_rq(cpu)->curr);
- need_idle = wake_to_idle(p);
-
+ need_idle = wake_to_idle(p) || schedtune_prefer_idle(p);
+ if (need_idle)
+ sync = 0;
grp = task_related_thread_group(p);
if (grp && grp->preferred_cluster)
rtg_target = &grp->preferred_cluster->cpus;
- if (sync && bias_to_waker_cpu_enabled(p, curr) &&
- bias_to_waker_cpu(p, cpu, rtg_target)) {
+ if (sync && bias_to_waker_cpu(p, cpu, rtg_target)) {
trace_sched_task_util_bias_to_waker(p, prev_cpu,
task_util(p), cpu, cpu, 0, need_idle);
return cpu;
@@ -7020,13 +7039,11 @@ static int energy_aware_wake_cpu(struct task_struct *p, int target, int sync)
cpumask_and(&search_cpus, &search_cpus,
sched_group_cpus(sg_target));
-#ifdef CONFIG_SCHED_CORE_ROTATE
i = find_first_cpu_bit(p, &search_cpus, sg_target,
&avoid_prev_cpu, &do_rotate,
&first_cpu_bit_env);
retry:
-#endif
/* Find cpu with sufficient capacity */
while ((i = cpumask_next(i, &search_cpus)) < nr_cpu_ids) {
cpumask_clear_cpu(i, &search_cpus);
@@ -7122,9 +7139,7 @@ static int energy_aware_wake_cpu(struct task_struct *p, int target, int sync)
}
} else if (cpu_rq(i)->nr_running) {
target_cpu = i;
-#ifdef CONFIG_SCHED_CORE_ROTATE
do_rotate = false;
-#endif
break;
}
} else if (!need_idle) {
@@ -7164,7 +7179,6 @@ static int energy_aware_wake_cpu(struct task_struct *p, int target, int sync)
}
}
-#ifdef CONFIG_SCHED_CORE_ROTATE
if (do_rotate) {
/*
* We started iteration somewhere in the middle of
@@ -7175,7 +7189,6 @@ static int energy_aware_wake_cpu(struct task_struct *p, int target, int sync)
i = -1;
goto retry;
}
-#endif
if (target_cpu == -1 ||
(target_cpu != min_util_cpu && !safe_to_pack &&
@@ -8400,6 +8413,8 @@ static void update_blocked_averages(int cpu)
* list_add_leaf_cfs_rq() for details.
*/
for_each_leaf_cfs_rq(rq, cfs_rq) {
+ struct sched_entity *se;
+
/* throttled entities do not contribute to load */
if (throttled_hierarchy(cfs_rq))
continue;
@@ -8407,9 +8422,10 @@ static void update_blocked_averages(int cpu)
if (update_cfs_rq_load_avg(cfs_rq_clock_task(cfs_rq), cfs_rq, true))
update_tg_load_avg(cfs_rq, 0);
- /* Propagate pending load changes to the parent */
- if (cfs_rq->tg->se[cpu])
- update_load_avg(cfs_rq->tg->se[cpu], 0);
+ /* Propagate pending load changes to the parent, if any: */
+ se = cfs_rq->tg->se[cpu];
+ if (se && !skip_blocked_update(se))
+ update_load_avg(se, 0);
}
raw_spin_unlock_irqrestore(&rq->lock, flags);
}
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index 7feba85..c97b779 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -914,15 +914,30 @@ static void dump_throttled_rt_tasks(struct rt_rq *rt_rq)
char *pos = buf;
char *end = buf + sizeof(buf);
int idx;
+ struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
pos += snprintf(pos, sizeof(buf),
- "sched: RT throttling activated for rt_rq %p (cpu %d)\n",
+ "sched: RT throttling activated for rt_rq %pK (cpu %d)\n",
rt_rq, cpu_of(rq_of_rt_rq(rt_rq)));
+ pos += snprintf(pos, end - pos,
+ "rt_period_timer: expires=%lld now=%llu period=%llu\n",
+ hrtimer_get_expires_ns(&rt_b->rt_period_timer),
+ ktime_get_ns(), sched_rt_period(rt_rq));
+
if (bitmap_empty(array->bitmap, MAX_RT_PRIO))
goto out;
pos += snprintf(pos, end - pos, "potential CPU hogs:\n");
+#ifdef CONFIG_SCHED_INFO
+ if (sched_info_on())
+ pos += snprintf(pos, end - pos,
+ "current %s (%d) is running for %llu nsec\n",
+ current->comm, current->pid,
+ rq_clock(rq_of_rt_rq(rt_rq)) -
+ current->sched_info.last_arrival);
+#endif
+
idx = sched_find_first_bit(array->bitmap);
while (idx < MAX_RT_PRIO) {
list_for_each_entry(rt_se, array->queue + idx, run_list) {
@@ -1743,13 +1758,8 @@ static int find_lowest_rq(struct task_struct *task)
int best_cpu_idle_idx = INT_MAX;
int cpu_idle_idx = -1;
bool placement_boost;
-#ifdef CONFIG_SCHED_CORE_ROTATE
bool do_rotate = false;
bool avoid_prev_cpu = false;
-#else
-#define do_rotate false
-#define avoid_prev_cpu false
-#endif
/* Make sure the mask is initialized first */
if (unlikely(!lowest_mask))
@@ -1809,11 +1819,9 @@ static int find_lowest_rq(struct task_struct *task)
cpumask_andnot(&backup_search_cpu, &backup_search_cpu,
&search_cpu);
-#ifdef CONFIG_SCHED_CORE_ROTATE
cpu = find_first_cpu_bit(task, &search_cpu, sg_target,
&avoid_prev_cpu, &do_rotate,
&first_cpu_bit_env);
-#endif
} else {
cpumask_copy(&search_cpu, lowest_mask);
cpumask_clear(&backup_search_cpu);
@@ -1879,7 +1887,6 @@ static int find_lowest_rq(struct task_struct *task)
best_cpu = cpu;
}
-#ifdef CONFIG_SCHED_CORE_ROTATE
if (do_rotate) {
/*
* We started iteration somewhere in the middle of
@@ -1890,13 +1897,13 @@ static int find_lowest_rq(struct task_struct *task)
cpu = -1;
goto retry;
}
-#endif
if (best_cpu != -1) {
return best_cpu;
} else if (!cpumask_empty(&backup_search_cpu)) {
cpumask_copy(&search_cpu, &backup_search_cpu);
cpumask_clear(&backup_search_cpu);
+ cpu = -1;
goto retry;
}
}
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index b5a271b..494ab14 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1810,7 +1810,7 @@ static inline unsigned long
cpu_util_freq_pelt(int cpu)
{
struct rq *rq = cpu_rq(cpu);
- unsigned long util = rq->cfs.avg.util_avg;
+ u64 util = rq->cfs.avg.util_avg;
unsigned long capacity = capacity_orig_of(cpu);
util *= (100 + per_cpu(sched_load_boost, cpu));
@@ -2853,4 +2853,6 @@ int
find_first_cpu_bit(struct task_struct *p, const cpumask_t *search_cpus,
struct sched_group *sg_target, bool *avoid_prev_cpu,
bool *do_rotate, struct find_first_cpu_bit_env *env);
+#else
+#define find_first_cpu_bit(...) -1
#endif
diff --git a/kernel/sched/sched_avg.c b/kernel/sched/sched_avg.c
index 46480a7..10d7f1b 100644
--- a/kernel/sched/sched_avg.c
+++ b/kernel/sched/sched_avg.c
@@ -206,7 +206,7 @@ unsigned int sched_get_cpu_util(int cpu)
raw_spin_unlock_irqrestore(&rq->lock, flags);
util = (util >= capacity) ? capacity : util;
- busy = (util * 100) / capacity;
+ busy = div64_ul((util * 100), capacity);
return busy;
}
diff --git a/kernel/sched/tune.h b/kernel/sched/tune.h
index 4f64417..d1b4c72 100644
--- a/kernel/sched/tune.h
+++ b/kernel/sched/tune.h
@@ -28,6 +28,7 @@ void schedtune_dequeue_task(struct task_struct *p, int cpu);
#define schedtune_cpu_boost(cpu) get_sysctl_sched_cfs_boost()
#define schedtune_task_boost(tsk) get_sysctl_sched_cfs_boost()
+#define schedtune_prefer_idle(tsk) 0
#define schedtune_exit_task(task) do { } while (0)
@@ -44,6 +45,7 @@ int schedtune_accept_deltas(int nrg_delta, int cap_delta,
#define schedtune_cpu_boost(cpu) 0
#define schedtune_task_boost(tsk) 0
+#define schedtune_prefer_idle(tsk) 0
#define schedtune_exit_task(task) do { } while (0)
diff --git a/kernel/sched/walt.c b/kernel/sched/walt.c
index 1f5639c..da7c0f0 100644
--- a/kernel/sched/walt.c
+++ b/kernel/sched/walt.c
@@ -162,13 +162,6 @@ static const unsigned int top_tasks_bitmap_size =
*/
__read_mostly unsigned int sysctl_sched_freq_reporting_policy;
-
-#define SCHED_BIG_WAKER_TASK_LOAD_PCT 25UL
-#define SCHED_SMALL_WAKEE_TASK_LOAD_PCT 10UL
-
-__read_mostly unsigned int sched_big_waker_task_load;
-__read_mostly unsigned int sched_small_wakee_task_load;
-
static int __init set_sched_ravg_window(char *str)
{
unsigned int window_size;
@@ -1874,7 +1867,7 @@ update_task_rq_cpu_cycles(struct task_struct *p, struct rq *rq, int event,
p->cpu_cycles = cur_cycles;
- trace_sched_get_task_cpu_cycles(cpu, event, rq->cc.cycles, rq->cc.time);
+ trace_sched_get_task_cpu_cycles(cpu, event, rq->cc.cycles, rq->cc.time, p);
}
static inline void run_walt_irq_work(u64 old_window_start, struct rq *rq)
@@ -3121,8 +3114,7 @@ void walt_sched_init(struct rq *rq)
walt_cpu_util_freq_divisor =
(sched_ravg_window >> SCHED_CAPACITY_SHIFT) * 100;
- sched_big_waker_task_load =
- (SCHED_BIG_WAKER_TASK_LOAD_PCT << SCHED_CAPACITY_SHIFT) / 100;
- sched_small_wakee_task_load =
- (SCHED_SMALL_WAKEE_TASK_LOAD_PCT << SCHED_CAPACITY_SHIFT) / 100;
+ sched_init_task_load_windows =
+ div64_u64((u64)sysctl_sched_init_task_load_pct *
+ (u64)sched_ravg_window, 100);
}
diff --git a/kernel/sched/walt.h b/kernel/sched/walt.h
index 86d5bfd..10f3e84 100644
--- a/kernel/sched/walt.h
+++ b/kernel/sched/walt.h
@@ -219,7 +219,7 @@ static inline unsigned int max_task_load(void)
return sched_ravg_window;
}
-static inline u32 cpu_cycles_to_freq(u64 cycles, u32 period)
+static inline u32 cpu_cycles_to_freq(u64 cycles, u64 period)
{
return div64_u64(cycles, period);
}
diff --git a/kernel/seccomp.c b/kernel/seccomp.c
index 0db7c8a..af182a6 100644
--- a/kernel/seccomp.c
+++ b/kernel/seccomp.c
@@ -457,14 +457,19 @@ static long seccomp_attach_filter(unsigned int flags,
return 0;
}
+void __get_seccomp_filter(struct seccomp_filter *filter)
+{
+ /* Reference count is bounded by the number of total processes. */
+ atomic_inc(&filter->usage);
+}
+
/* get_seccomp_filter - increments the reference count of the filter on @tsk */
void get_seccomp_filter(struct task_struct *tsk)
{
struct seccomp_filter *orig = tsk->seccomp.filter;
if (!orig)
return;
- /* Reference count is bounded by the number of total processes. */
- atomic_inc(&orig->usage);
+ __get_seccomp_filter(orig);
}
static inline void seccomp_filter_free(struct seccomp_filter *filter)
@@ -475,10 +480,8 @@ static inline void seccomp_filter_free(struct seccomp_filter *filter)
}
}
-/* put_seccomp_filter - decrements the ref count of tsk->seccomp.filter */
-void put_seccomp_filter(struct task_struct *tsk)
+static void __put_seccomp_filter(struct seccomp_filter *orig)
{
- struct seccomp_filter *orig = tsk->seccomp.filter;
/* Clean up single-reference branches iteratively. */
while (orig && atomic_dec_and_test(&orig->usage)) {
struct seccomp_filter *freeme = orig;
@@ -487,6 +490,12 @@ void put_seccomp_filter(struct task_struct *tsk)
}
}
+/* put_seccomp_filter - decrements the ref count of tsk->seccomp.filter */
+void put_seccomp_filter(struct task_struct *tsk)
+{
+ __put_seccomp_filter(tsk->seccomp.filter);
+}
+
/**
* seccomp_send_sigsys - signals the task to allow in-process syscall emulation
* @syscall: syscall number to send to userland
@@ -892,13 +901,13 @@ long seccomp_get_filter(struct task_struct *task, unsigned long filter_off,
if (!data)
goto out;
- get_seccomp_filter(task);
+ __get_seccomp_filter(filter);
spin_unlock_irq(&task->sighand->siglock);
if (copy_to_user(data, fprog->filter, bpf_classic_proglen(fprog)))
ret = -EFAULT;
- put_seccomp_filter(task);
+ __put_seccomp_filter(filter);
return ret;
out:
diff --git a/kernel/softirq.c b/kernel/softirq.c
index bde8e33..6833ffa 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -245,6 +245,8 @@ static inline bool lockdep_softirq_start(void) { return false; }
static inline void lockdep_softirq_end(bool in_hardirq) { }
#endif
+#define long_softirq_pending() (local_softirq_pending() & LONG_SOFTIRQ_MASK)
+#define defer_for_rt() (long_softirq_pending() && cpupri_check_rt())
asmlinkage __visible void __softirq_entry __do_softirq(void)
{
unsigned long end = jiffies + MAX_SOFTIRQ_TIME;
@@ -308,6 +310,7 @@ asmlinkage __visible void __softirq_entry __do_softirq(void)
pending = local_softirq_pending();
if (pending) {
if (time_before(jiffies, end) && !need_resched() &&
+ !defer_for_rt() &&
--max_restart)
goto restart;
@@ -363,7 +366,7 @@ static inline void invoke_softirq(void)
if (ksoftirqd_running())
return;
- if (!force_irqthreads) {
+ if (!force_irqthreads && !defer_for_rt()) {
#ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK
/*
* We can safely execute softirq on the current stack if
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 2c4cd17..29bb99c 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -1186,6 +1186,16 @@ static struct ctl_table kern_table[] = {
.proc_handler = proc_dointvec_minmax,
.extra1 = &neg_one,
},
+ {
+ .procname = "hung_task_selective_monitoring",
+ .data = &sysctl_hung_task_selective_monitoring,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = &zero,
+ .extra2 = &one,
+ },
+
#endif
#ifdef CONFIG_RT_MUTEXES
{
@@ -1292,6 +1302,8 @@ static struct ctl_table kern_table[] = {
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = timer_migration_handler,
+ .extra1 = &zero,
+ .extra2 = &one,
},
#endif
#ifdef CONFIG_BPF_SYSCALL
diff --git a/kernel/time/Makefile b/kernel/time/Makefile
index b9b881eb..7251e3c 100644
--- a/kernel/time/Makefile
+++ b/kernel/time/Makefile
@@ -11,5 +11,3 @@
obj-$(CONFIG_TICK_ONESHOT) += tick-oneshot.o tick-sched.o
obj-$(CONFIG_DEBUG_FS) += timekeeping_debug.o
obj-$(CONFIG_TEST_UDELAY) += test_udelay.o
-
-ccflags-y += -Idrivers/cpuidle
diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
index 842928a..b2df539 100644
--- a/kernel/time/alarmtimer.c
+++ b/kernel/time/alarmtimer.c
@@ -26,11 +26,6 @@
#include <linux/workqueue.h>
#include <linux/freezer.h>
-#ifdef CONFIG_MSM_PM
-#include "lpm-levels.h"
-#endif
-#include <linux/workqueue.h>
-
/**
* struct alarm_base - Alarm timer bases
* @lock: Lock for syncrhonized access to the base
@@ -50,116 +45,12 @@ static ktime_t freezer_delta;
static DEFINE_SPINLOCK(freezer_delta_lock);
static struct wakeup_source *ws;
-static struct delayed_work work;
-static struct workqueue_struct *power_off_alarm_workqueue;
#ifdef CONFIG_RTC_CLASS
/* rtc timer and device for setting alarm wakeups at suspend */
static struct rtc_timer rtctimer;
static struct rtc_device *rtcdev;
static DEFINE_SPINLOCK(rtcdev_lock);
-static struct mutex power_on_alarm_lock;
-static struct alarm init_alarm;
-
-/**
- * power_on_alarm_init - Init power on alarm value
- *
- * Read rtc alarm value after device booting up and add this alarm
- * into alarm queue.
- */
-void power_on_alarm_init(void)
-{
- struct rtc_wkalrm rtc_alarm;
- struct rtc_time rt;
- unsigned long alarm_time;
- struct rtc_device *rtc;
- ktime_t alarm_ktime;
-
- rtc = alarmtimer_get_rtcdev();
-
- if (!rtc)
- return;
-
- rtc_read_alarm(rtc, &rtc_alarm);
- rt = rtc_alarm.time;
-
- rtc_tm_to_time(&rt, &alarm_time);
-
- if (alarm_time) {
- alarm_ktime = ktime_set(alarm_time, 0);
- alarm_init(&init_alarm, ALARM_POWEROFF_REALTIME, NULL);
- alarm_start(&init_alarm, alarm_ktime);
- }
-}
-
-/**
- * set_power_on_alarm - set power on alarm value into rtc register
- *
- * Get the soonest power off alarm timer and set the alarm value into rtc
- * register.
- */
-void set_power_on_alarm(void)
-{
- int rc;
- struct timespec wall_time, alarm_ts;
- long alarm_secs = 0l;
- long rtc_secs, alarm_time, alarm_delta;
- struct rtc_time rtc_time;
- struct rtc_wkalrm alarm;
- struct rtc_device *rtc;
- struct timerqueue_node *next;
- unsigned long flags;
- struct alarm_base *base = &alarm_bases[ALARM_POWEROFF_REALTIME];
-
- rc = mutex_lock_interruptible(&power_on_alarm_lock);
- if (rc != 0)
- return;
-
- spin_lock_irqsave(&base->lock, flags);
- next = timerqueue_getnext(&base->timerqueue);
- spin_unlock_irqrestore(&base->lock, flags);
-
- if (next) {
- alarm_ts = ktime_to_timespec(next->expires);
- alarm_secs = alarm_ts.tv_sec;
- }
-
- if (!alarm_secs)
- goto disable_alarm;
-
- getnstimeofday(&wall_time);
-
- /*
- * alarm_secs have to be bigger than "wall_time +1".
- * It is to make sure that alarm time will be always
- * bigger than wall time.
- */
- if (alarm_secs <= wall_time.tv_sec + 1)
- goto disable_alarm;
-
- rtc = alarmtimer_get_rtcdev();
- if (!rtc)
- goto exit;
-
- rtc_read_time(rtc, &rtc_time);
- rtc_tm_to_time(&rtc_time, &rtc_secs);
- alarm_delta = wall_time.tv_sec - rtc_secs;
- alarm_time = alarm_secs - alarm_delta;
-
- rtc_time_to_tm(alarm_time, &alarm.time);
- alarm.enabled = 1;
- rc = rtc_set_alarm(rtcdev, &alarm);
- if (rc)
- goto disable_alarm;
-
- mutex_unlock(&power_on_alarm_lock);
- return;
-
-disable_alarm:
- rtc_alarm_irq_enable(rtcdev, 0);
-exit:
- mutex_unlock(&power_on_alarm_lock);
-}
static void alarmtimer_triggered_func(void *p)
{
@@ -231,8 +122,6 @@ static void alarmtimer_rtc_remove_device(struct device *dev,
static inline void alarmtimer_rtc_timer_init(void)
{
- mutex_init(&power_on_alarm_lock);
-
rtc_timer_init(&rtctimer, NULL, NULL);
}
@@ -259,14 +148,8 @@ struct rtc_device *alarmtimer_get_rtcdev(void)
static inline int alarmtimer_rtc_interface_setup(void) { return 0; }
static inline void alarmtimer_rtc_interface_remove(void) { }
static inline void alarmtimer_rtc_timer_init(void) { }
-void set_power_on_alarm(void) { }
#endif
-static void alarm_work_func(struct work_struct *unused)
-{
- set_power_on_alarm();
-}
-
/**
* alarmtimer_enqueue - Adds an alarm timer to an alarm_base timerqueue
* @base: pointer to the base where the timer is being run
@@ -336,10 +219,6 @@ static enum hrtimer_restart alarmtimer_fired(struct hrtimer *timer)
}
spin_unlock_irqrestore(&base->lock, flags);
- /* set next power off alarm */
- if (alarm->type == ALARM_POWEROFF_REALTIME)
- queue_delayed_work(power_off_alarm_workqueue, &work, 0);
-
return ret;
}
@@ -362,70 +241,6 @@ EXPORT_SYMBOL_GPL(alarm_expires_remaining);
* set an rtc timer to fire that far into the future, which
* will wake us from suspend.
*/
-#if defined(CONFIG_RTC_DRV_QPNP) && defined(CONFIG_MSM_PM)
-static int alarmtimer_suspend(struct device *dev)
-{
- struct rtc_time tm;
- ktime_t min, now;
- unsigned long flags;
- struct rtc_device *rtc;
- int i;
- int ret = 0;
-
- spin_lock_irqsave(&freezer_delta_lock, flags);
- min = freezer_delta;
- freezer_delta = ktime_set(0, 0);
- spin_unlock_irqrestore(&freezer_delta_lock, flags);
-
- rtc = alarmtimer_get_rtcdev();
- /* If we have no rtcdev, just return */
- if (!rtc)
- return 0;
-
- /* Find the soonest timer to expire*/
- for (i = 0; i < ALARM_NUMTYPE; i++) {
- struct alarm_base *base = &alarm_bases[i];
- struct timerqueue_node *next;
- ktime_t delta;
-
- spin_lock_irqsave(&base->lock, flags);
- next = timerqueue_getnext(&base->timerqueue);
- spin_unlock_irqrestore(&base->lock, flags);
- if (!next)
- continue;
- delta = ktime_sub(next->expires, base->gettime());
- if (!min.tv64 || (delta.tv64 < min.tv64))
- min = delta;
- }
- if (min.tv64 == 0)
- return 0;
-
- if (ktime_to_ns(min) < 2 * NSEC_PER_SEC) {
- __pm_wakeup_event(ws, 2 * MSEC_PER_SEC);
- return -EBUSY;
- }
-
- /* Setup a timer to fire that far in the future */
- rtc_timer_cancel(rtc, &rtctimer);
- rtc_read_time(rtc, &tm);
- now = rtc_tm_to_ktime(tm);
- now = ktime_add(now, min);
- if (poweron_alarm) {
- struct rtc_time tm_val;
- unsigned long secs;
-
- tm_val = rtc_ktime_to_tm(min);
- rtc_tm_to_time(&tm_val, &secs);
- lpm_suspend_wake_time(secs);
- } else {
- /* Set alarm, if in the past reject suspend briefly to handle */
- ret = rtc_timer_start(rtc, &rtctimer, now, ktime_set(0, 0));
- if (ret < 0)
- __pm_wakeup_event(ws, MSEC_PER_SEC);
- }
- return ret;
-}
-#else
static int alarmtimer_suspend(struct device *dev)
{
struct rtc_time tm;
@@ -435,8 +250,6 @@ static int alarmtimer_suspend(struct device *dev)
int i;
int ret;
- cancel_delayed_work_sync(&work);
-
spin_lock_irqsave(&freezer_delta_lock, flags);
min = freezer_delta;
freezer_delta = ktime_set(0, 0);
@@ -482,7 +295,7 @@ static int alarmtimer_suspend(struct device *dev)
__pm_wakeup_event(ws, MSEC_PER_SEC);
return ret;
}
-#endif
+
static int alarmtimer_resume(struct device *dev)
{
struct rtc_device *rtc;
@@ -490,8 +303,6 @@ static int alarmtimer_resume(struct device *dev)
rtc = alarmtimer_get_rtcdev();
if (rtc)
rtc_timer_cancel(rtc, &rtctimer);
-
- queue_delayed_work(power_off_alarm_workqueue, &work, 0);
return 0;
}
@@ -672,14 +483,12 @@ EXPORT_SYMBOL_GPL(alarm_forward_now);
* clock2alarm - helper that converts from clockid to alarmtypes
* @clockid: clockid.
*/
-enum alarmtimer_type clock2alarm(clockid_t clockid)
+static enum alarmtimer_type clock2alarm(clockid_t clockid)
{
if (clockid == CLOCK_REALTIME_ALARM)
return ALARM_REALTIME;
if (clockid == CLOCK_BOOTTIME_ALARM)
return ALARM_BOOTTIME;
- if (clockid == CLOCK_POWEROFF_ALARM)
- return ALARM_POWEROFF_REALTIME;
return -1;
}
@@ -1073,13 +882,10 @@ static int __init alarmtimer_init(void)
posix_timers_register_clock(CLOCK_REALTIME_ALARM, &alarm_clock);
posix_timers_register_clock(CLOCK_BOOTTIME_ALARM, &alarm_clock);
- posix_timers_register_clock(CLOCK_POWEROFF_ALARM, &alarm_clock);
/* Initialize alarm bases */
alarm_bases[ALARM_REALTIME].base_clockid = CLOCK_REALTIME;
alarm_bases[ALARM_REALTIME].gettime = &ktime_get_real;
- alarm_bases[ALARM_POWEROFF_REALTIME].base_clockid = CLOCK_REALTIME;
- alarm_bases[ALARM_POWEROFF_REALTIME].gettime = &ktime_get_real;
alarm_bases[ALARM_BOOTTIME].base_clockid = CLOCK_BOOTTIME;
alarm_bases[ALARM_BOOTTIME].gettime = &ktime_get_boottime;
for (i = 0; i < ALARM_NUMTYPE; i++) {
@@ -1101,24 +907,8 @@ static int __init alarmtimer_init(void)
goto out_drv;
}
ws = wakeup_source_register("alarmtimer");
- if (!ws) {
- error = -ENOMEM;
- goto out_ws;
- }
-
- INIT_DELAYED_WORK(&work, alarm_work_func);
- power_off_alarm_workqueue =
- create_singlethread_workqueue("power_off_alarm");
- if (!power_off_alarm_workqueue) {
- error = -ENOMEM;
- goto out_wq;
- }
-
return 0;
-out_wq:
- wakeup_source_unregister(ws);
-out_ws:
- platform_device_unregister(pdev);
+
out_drv:
platform_driver_unregister(&alarmtimer_driver);
out_if:
diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
index 2aef653..a01a71f 100644
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
@@ -94,17 +94,15 @@ DEFINE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases) =
};
static const int hrtimer_clock_to_base_table[MAX_CLOCKS] = {
+ /* Make sure we catch unsupported clockids */
+ [0 ... MAX_CLOCKS - 1] = HRTIMER_MAX_CLOCK_BASES,
+
[CLOCK_REALTIME] = HRTIMER_BASE_REALTIME,
[CLOCK_MONOTONIC] = HRTIMER_BASE_MONOTONIC,
[CLOCK_BOOTTIME] = HRTIMER_BASE_BOOTTIME,
[CLOCK_TAI] = HRTIMER_BASE_TAI,
};
-static inline int hrtimer_clockid_to_base(clockid_t clock_id)
-{
- return hrtimer_clock_to_base_table[clock_id];
-}
-
/*
* Functions and macros which are different for UP/SMP systems are kept in a
* single place
@@ -1091,6 +1089,18 @@ u64 hrtimer_get_next_event(void)
}
#endif
+static inline int hrtimer_clockid_to_base(clockid_t clock_id)
+{
+ if (likely(clock_id < MAX_CLOCKS)) {
+ int base = hrtimer_clock_to_base_table[clock_id];
+
+ if (likely(base != HRTIMER_MAX_CLOCK_BASES))
+ return base;
+ }
+ WARN(1, "Invalid clockid %d. Using MONOTONIC\n", clock_id);
+ return HRTIMER_BASE_MONOTONIC;
+}
+
static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
enum hrtimer_mode mode)
{
diff --git a/kernel/time/tick-oneshot.c b/kernel/time/tick-oneshot.c
index b513446..05ae01e 100644
--- a/kernel/time/tick-oneshot.c
+++ b/kernel/time/tick-oneshot.c
@@ -33,6 +33,7 @@ int tick_program_event(ktime_t expires, int force)
* We don't need the clock event device any more, stop it.
*/
clockevents_switch_state(dev, CLOCK_EVT_STATE_ONESHOT_STOPPED);
+ dev->next_event.tv64 = KTIME_MAX;
return 0;
}
diff --git a/kernel/time/timer.c b/kernel/time/timer.c
index 80aa30d..4c0b001 100644
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
@@ -245,7 +245,7 @@ int timer_migration_handler(struct ctl_table *table, int write,
int ret;
mutex_lock(&mutex);
- ret = proc_dointvec(table, write, buffer, lenp, ppos);
+ ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
if (!ret && write)
timers_update_migration(false);
mutex_unlock(&mutex);
@@ -1385,8 +1385,8 @@ static int next_pending_bucket(struct timer_base *base, unsigned offset,
pos = find_next_bit(base->pending_map, start, offset);
pos_down = pos < start ? pos + LVL_SIZE - start : -1;
- if (((pos_up + base->clk) << LVL_SHIFT(lvl)) >
- ((pos_down + base->clk) << LVL_SHIFT(lvl)))
+ if (((pos_up + (u64)base->clk) << LVL_SHIFT(lvl)) >
+ ((pos_down + (u64)base->clk) << LVL_SHIFT(lvl)))
return pos_down;
return pos_up;
}
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 6e432ed..5b8d718 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -2747,13 +2747,14 @@ static int ftrace_shutdown(struct ftrace_ops *ops, int command)
if (!command || !ftrace_enabled) {
/*
- * If these are per_cpu ops, they still need their
- * per_cpu field freed. Since, function tracing is
+ * If these are dynamic or per_cpu ops, they still
+ * need their data freed. Since, function tracing is
* not currently active, we can just free them
* without synchronizing all CPUs.
*/
- if (ops->flags & FTRACE_OPS_FL_PER_CPU)
- per_cpu_ops_free(ops);
+ if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_PER_CPU))
+ goto free_ops;
+
return 0;
}
@@ -2808,6 +2809,7 @@ static int ftrace_shutdown(struct ftrace_ops *ops, int command)
if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_PER_CPU)) {
schedule_on_each_cpu(ftrace_sync);
+ free_ops:
arch_ftrace_trampoline_free(ops);
if (ops->flags & FTRACE_OPS_FL_PER_CPU)
@@ -4379,9 +4381,6 @@ static char ftrace_graph_buf[FTRACE_FILTER_SIZE] __initdata;
static char ftrace_graph_notrace_buf[FTRACE_FILTER_SIZE] __initdata;
static int ftrace_set_func(unsigned long *array, int *idx, int size, char *buffer);
-static unsigned long save_global_trampoline;
-static unsigned long save_global_flags;
-
static int __init set_graph_function(char *str)
{
strlcpy(ftrace_graph_buf, str, FTRACE_FILTER_SIZE);
@@ -5979,17 +5978,6 @@ void unregister_ftrace_graph(void)
unregister_pm_notifier(&ftrace_suspend_notifier);
unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
-#ifdef CONFIG_DYNAMIC_FTRACE
- /*
- * Function graph does not allocate the trampoline, but
- * other global_ops do. We need to reset the ALLOC_TRAMP flag
- * if one was used.
- */
- global_ops.trampoline = save_global_trampoline;
- if (save_global_flags & FTRACE_OPS_FL_ALLOC_TRAMP)
- global_ops.flags |= FTRACE_OPS_FL_ALLOC_TRAMP;
-#endif
-
out:
mutex_unlock(&ftrace_lock);
}
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index cddedb5..e3aae88 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -2416,11 +2416,17 @@ static char *get_trace_buf(void)
if (!buffer || buffer->nesting >= 4)
return NULL;
- return &buffer->buffer[buffer->nesting++][0];
+ buffer->nesting++;
+
+ /* Interrupts must see nesting incremented before we use the buffer */
+ barrier();
+ return &buffer->buffer[buffer->nesting][0];
}
static void put_trace_buf(void)
{
+ /* Don't let the decrement of nesting leak before this */
+ barrier();
this_cpu_dec(trace_percpu_buffer->nesting);
}
@@ -3636,11 +3642,17 @@ static int tracing_open(struct inode *inode, struct file *file)
/* If this file was open for write, then erase contents */
if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
int cpu = tracing_get_cpu(inode);
+ struct trace_buffer *trace_buf = &tr->trace_buffer;
+
+#ifdef CONFIG_TRACER_MAX_TRACE
+ if (tr->current_trace->print_max)
+ trace_buf = &tr->max_buffer;
+#endif
if (cpu == RING_BUFFER_ALL_CPUS)
- tracing_reset_online_cpus(&tr->trace_buffer);
+ tracing_reset_online_cpus(trace_buf);
else
- tracing_reset(&tr->trace_buffer, cpu);
+ tracing_reset(trace_buf, cpu);
}
if (file->f_mode & FMODE_READ) {
@@ -5275,7 +5287,7 @@ static int tracing_wait_pipe(struct file *filp)
*
* iter->pos will be 0 if we haven't read anything.
*/
- if (!tracing_is_on() && iter->pos)
+ if (!tracer_tracing_is_on(iter->tr) && iter->pos)
break;
mutex_unlock(&iter->mutex);
@@ -5814,7 +5826,7 @@ static int tracing_set_clock(struct trace_array *tr, const char *clockstr)
tracing_reset_online_cpus(&tr->trace_buffer);
#ifdef CONFIG_TRACER_MAX_TRACE
- if (tr->flags & TRACE_ARRAY_FL_GLOBAL && tr->max_buffer.buffer)
+ if (tr->max_buffer.buffer)
ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
tracing_reset_online_cpus(&tr->max_buffer);
#endif
diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c
index b0f86ea..ca70d11 100644
--- a/kernel/trace/trace_selftest.c
+++ b/kernel/trace/trace_selftest.c
@@ -272,7 +272,7 @@ static int trace_selftest_ops(struct trace_array *tr, int cnt)
goto out_free;
if (cnt > 1) {
if (trace_selftest_test_global_cnt == 0)
- goto out;
+ goto out_free;
}
if (trace_selftest_test_dyn_cnt == 0)
goto out_free;
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 0e5e54f..3630826 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -70,6 +70,7 @@ enum {
* attach_mutex to avoid changing binding state while
* worker_attach_to_pool() is in progress.
*/
+ POOL_MANAGER_ACTIVE = 1 << 0, /* being managed */
POOL_DISASSOCIATED = 1 << 2, /* cpu can't serve workers */
/* worker flags */
@@ -167,7 +168,6 @@ struct worker_pool {
/* L: hash of busy workers */
/* see manage_workers() for details on the two manager mutexes */
- struct mutex manager_arb; /* manager arbitration */
struct worker *manager; /* L: purely informational */
struct mutex attach_mutex; /* attach/detach exclusion */
struct list_head workers; /* A: attached workers */
@@ -299,6 +299,7 @@ static struct workqueue_attrs *wq_update_unbound_numa_attrs_buf;
static DEFINE_MUTEX(wq_pool_mutex); /* protects pools and workqueues list */
static DEFINE_SPINLOCK(wq_mayday_lock); /* protects wq->maydays list */
+static DECLARE_WAIT_QUEUE_HEAD(wq_manager_wait); /* wait for manager to go away */
static LIST_HEAD(workqueues); /* PR: list of all workqueues */
static bool workqueue_freezing; /* PL: have wqs started freezing? */
@@ -801,7 +802,7 @@ static bool need_to_create_worker(struct worker_pool *pool)
/* Do we have too many workers and should some go away? */
static bool too_many_workers(struct worker_pool *pool)
{
- bool managing = mutex_is_locked(&pool->manager_arb);
+ bool managing = pool->flags & POOL_MANAGER_ACTIVE;
int nr_idle = pool->nr_idle + managing; /* manager is considered idle */
int nr_busy = pool->nr_workers - nr_idle;
@@ -1985,24 +1986,17 @@ static bool manage_workers(struct worker *worker)
{
struct worker_pool *pool = worker->pool;
- /*
- * Anyone who successfully grabs manager_arb wins the arbitration
- * and becomes the manager. mutex_trylock() on pool->manager_arb
- * failure while holding pool->lock reliably indicates that someone
- * else is managing the pool and the worker which failed trylock
- * can proceed to executing work items. This means that anyone
- * grabbing manager_arb is responsible for actually performing
- * manager duties. If manager_arb is grabbed and released without
- * actual management, the pool may stall indefinitely.
- */
- if (!mutex_trylock(&pool->manager_arb))
+ if (pool->flags & POOL_MANAGER_ACTIVE)
return false;
+
+ pool->flags |= POOL_MANAGER_ACTIVE;
pool->manager = worker;
maybe_create_worker(pool);
pool->manager = NULL;
- mutex_unlock(&pool->manager_arb);
+ pool->flags &= ~POOL_MANAGER_ACTIVE;
+ wake_up(&wq_manager_wait);
return true;
}
@@ -3210,7 +3204,6 @@ static int init_worker_pool(struct worker_pool *pool)
setup_timer(&pool->mayday_timer, pool_mayday_timeout,
(unsigned long)pool);
- mutex_init(&pool->manager_arb);
mutex_init(&pool->attach_mutex);
INIT_LIST_HEAD(&pool->workers);
@@ -3280,13 +3273,15 @@ static void put_unbound_pool(struct worker_pool *pool)
hash_del(&pool->hash_node);
/*
- * Become the manager and destroy all workers. Grabbing
- * manager_arb prevents @pool's workers from blocking on
- * attach_mutex.
+ * Become the manager and destroy all workers. This prevents
+ * @pool's workers from blocking on attach_mutex. We're the last
+ * manager and @pool gets freed with the flag set.
*/
- mutex_lock(&pool->manager_arb);
-
spin_lock_irq(&pool->lock);
+ wait_event_lock_irq(wq_manager_wait,
+ !(pool->flags & POOL_MANAGER_ACTIVE), pool->lock);
+ pool->flags |= POOL_MANAGER_ACTIVE;
+
while ((worker = first_idle_worker(pool)))
destroy_worker(worker);
WARN_ON(pool->nr_workers || pool->nr_idle);
@@ -3300,8 +3295,6 @@ static void put_unbound_pool(struct worker_pool *pool)
if (pool->detach_completion)
wait_for_completion(pool->detach_completion);
- mutex_unlock(&pool->manager_arb);
-
/* shut down the timers */
del_timer_sync(&pool->idle_timer);
del_timer_sync(&pool->mayday_timer);
diff --git a/kernel/workqueue_internal.h b/kernel/workqueue_internal.h
index 8635417..29fa81f 100644
--- a/kernel/workqueue_internal.h
+++ b/kernel/workqueue_internal.h
@@ -9,6 +9,7 @@
#include <linux/workqueue.h>
#include <linux/kthread.h>
+#include <linux/preempt.h>
struct worker_pool;
@@ -59,7 +60,7 @@ struct worker {
*/
static inline struct worker *current_wq_worker(void)
{
- if (current->flags & PF_WQ_WORKER)
+ if (in_task() && (current->flags & PF_WQ_WORKER))
return kthread_data(current);
return NULL;
}
diff --git a/lib/asn1_decoder.c b/lib/asn1_decoder.c
index 0bd8a61..1ef0cec 100644
--- a/lib/asn1_decoder.c
+++ b/lib/asn1_decoder.c
@@ -228,7 +228,7 @@ int asn1_ber_decoder(const struct asn1_decoder *decoder,
hdr = 2;
/* Extract a tag from the data */
- if (unlikely(dp >= datalen - 1))
+ if (unlikely(datalen - dp < 2))
goto data_overrun_error;
tag = data[dp++];
if (unlikely((tag & 0x1f) == ASN1_LONG_TAG))
@@ -274,7 +274,7 @@ int asn1_ber_decoder(const struct asn1_decoder *decoder,
int n = len - 0x80;
if (unlikely(n > 2))
goto length_too_long;
- if (unlikely(dp >= datalen - n))
+ if (unlikely(n > datalen - dp))
goto data_overrun_error;
hdr += n;
for (len = 0; n > 0; n--) {
@@ -284,6 +284,9 @@ int asn1_ber_decoder(const struct asn1_decoder *decoder,
if (unlikely(len > datalen - dp))
goto data_overrun_error;
}
+ } else {
+ if (unlikely(len > datalen - dp))
+ goto data_overrun_error;
}
if (flags & FLAG_CONS) {
diff --git a/lib/assoc_array.c b/lib/assoc_array.c
index 59fd7c0..5cd0935 100644
--- a/lib/assoc_array.c
+++ b/lib/assoc_array.c
@@ -598,21 +598,31 @@ static bool assoc_array_insert_into_terminal_node(struct assoc_array_edit *edit,
if ((edit->segment_cache[ASSOC_ARRAY_FAN_OUT] ^ base_seg) == 0)
goto all_leaves_cluster_together;
- /* Otherwise we can just insert a new node ahead of the old
- * one.
+ /* Otherwise all the old leaves cluster in the same slot, but
+ * the new leaf wants to go into a different slot - so we
+ * create a new node (n0) to hold the new leaf and a pointer to
+ * a new node (n1) holding all the old leaves.
+ *
+ * This can be done by falling through to the node splitting
+ * path.
*/
- goto present_leaves_cluster_but_not_new_leaf;
+ pr_devel("present leaves cluster but not new leaf\n");
}
split_node:
pr_devel("split node\n");
- /* We need to split the current node; we know that the node doesn't
- * simply contain a full set of leaves that cluster together (it
- * contains meta pointers and/or non-clustering leaves).
+ /* We need to split the current node. The node must contain anything
+ * from a single leaf (in the one leaf case, this leaf will cluster
+ * with the new leaf) and the rest meta-pointers, to all leaves, some
+ * of which may cluster.
+ *
+ * It won't contain the case in which all the current leaves plus the
+ * new leaves want to cluster in the same slot.
*
* We need to expel at least two leaves out of a set consisting of the
- * leaves in the node and the new leaf.
+ * leaves in the node and the new leaf. The current meta pointers can
+ * just be copied as they shouldn't cluster with any of the leaves.
*
* We need a new node (n0) to replace the current one and a new node to
* take the expelled nodes (n1).
@@ -717,33 +727,6 @@ static bool assoc_array_insert_into_terminal_node(struct assoc_array_edit *edit,
pr_devel("<--%s() = ok [split node]\n", __func__);
return true;
-present_leaves_cluster_but_not_new_leaf:
- /* All the old leaves cluster in the same slot, but the new leaf wants
- * to go into a different slot, so we create a new node to hold the new
- * leaf and a pointer to a new node holding all the old leaves.
- */
- pr_devel("present leaves cluster but not new leaf\n");
-
- new_n0->back_pointer = node->back_pointer;
- new_n0->parent_slot = node->parent_slot;
- new_n0->nr_leaves_on_branch = node->nr_leaves_on_branch;
- new_n1->back_pointer = assoc_array_node_to_ptr(new_n0);
- new_n1->parent_slot = edit->segment_cache[0];
- new_n1->nr_leaves_on_branch = node->nr_leaves_on_branch;
- edit->adjust_count_on = new_n0;
-
- for (i = 0; i < ASSOC_ARRAY_FAN_OUT; i++)
- new_n1->slots[i] = node->slots[i];
-
- new_n0->slots[edit->segment_cache[0]] = assoc_array_node_to_ptr(new_n0);
- edit->leaf_p = &new_n0->slots[edit->segment_cache[ASSOC_ARRAY_FAN_OUT]];
-
- edit->set[0].ptr = &assoc_array_ptr_to_node(node->back_pointer)->slots[node->parent_slot];
- edit->set[0].to = assoc_array_node_to_ptr(new_n0);
- edit->excised_meta[0] = assoc_array_node_to_ptr(node);
- pr_devel("<--%s() = ok [insert node before]\n", __func__);
- return true;
-
all_leaves_cluster_together:
/* All the leaves, new and old, want to cluster together in this node
* in the same slot, so we have to replace this node with a shortcut to
diff --git a/lib/digsig.c b/lib/digsig.c
index 55b8b2f..a876156 100644
--- a/lib/digsig.c
+++ b/lib/digsig.c
@@ -87,6 +87,12 @@ static int digsig_verify_rsa(struct key *key,
down_read(&key->sem);
ukp = user_key_payload(key);
+ if (!ukp) {
+ /* key was revoked before we acquired its semaphore */
+ err = -EKEYREVOKED;
+ goto err1;
+ }
+
if (ukp->datalen < sizeof(*pkh))
goto err1;
diff --git a/lib/ratelimit.c b/lib/ratelimit.c
index 08f8043..d01f471 100644
--- a/lib/ratelimit.c
+++ b/lib/ratelimit.c
@@ -48,7 +48,9 @@ int ___ratelimit(struct ratelimit_state *rs, const char *func)
if (time_is_before_jiffies(rs->begin + rs->interval)) {
if (rs->missed) {
if (!(rs->flags & RATELIMIT_MSG_ON_RELEASE)) {
- pr_warn("%s: %d callbacks suppressed\n", func, rs->missed);
+ printk_deferred(KERN_WARNING
+ "%s: %d callbacks suppressed\n",
+ func, rs->missed);
rs->missed = 0;
}
}
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 3b38b73..fce6c48 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -462,6 +462,8 @@ static void mem_cgroup_update_tree(struct mem_cgroup *memcg, struct page *page)
struct mem_cgroup_tree_per_node *mctz;
mctz = soft_limit_tree_from_page(page);
+ if (!mctz)
+ return;
/*
* Necessary to update all ancestors when hierarchy is used.
* because their event counter is not touched.
@@ -499,7 +501,8 @@ static void mem_cgroup_remove_from_trees(struct mem_cgroup *memcg)
for_each_node(nid) {
mz = mem_cgroup_nodeinfo(memcg, nid);
mctz = soft_limit_tree_node(nid);
- mem_cgroup_remove_exceeded(mz, mctz);
+ if (mctz)
+ mem_cgroup_remove_exceeded(mz, mctz);
}
}
@@ -2565,7 +2568,7 @@ unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
* is empty. Do it lockless to prevent lock bouncing. Races
* are acceptable as soft limit is best effort anyway.
*/
- if (RB_EMPTY_ROOT(&mctz->rb_root))
+ if (!mctz || RB_EMPTY_ROOT(&mctz->rb_root))
return 0;
/*
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 6eb61a4..0dab426 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -179,7 +179,7 @@ static void release_memory_resource(struct resource *res)
void get_page_bootmem(unsigned long info, struct page *page,
unsigned long type)
{
- page->lru.next = (struct list_head *) type;
+ page->freelist = (void *)type;
SetPagePrivate(page);
set_page_private(page, info);
page_ref_inc(page);
@@ -189,11 +189,12 @@ void put_page_bootmem(struct page *page)
{
unsigned long type;
- type = (unsigned long) page->lru.next;
+ type = (unsigned long) page->freelist;
BUG_ON(type < MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE ||
type > MEMORY_HOTPLUG_MAX_BOOTMEM_TYPE);
if (page_ref_dec_return(page) == 1) {
+ page->freelist = NULL;
ClearPagePrivate(page);
set_page_private(page, 0);
INIT_LIST_HEAD(&page->lru);
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index af783a6..1f13413 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -38,6 +38,7 @@
#include <linux/kthread.h>
#include <linux/init.h>
#include <linux/show_mem_notifier.h>
+#include <linux/mmu_notifier.h>
#include <asm/tlb.h>
#include "internal.h"
@@ -495,6 +496,21 @@ static bool __oom_reap_task_mm(struct task_struct *tsk, struct mm_struct *mm)
}
/*
+ * If the mm has notifiers then we would need to invalidate them around
+ * unmap_page_range and that is risky because notifiers can sleep and
+ * what they do is basically undeterministic. So let's have a short
+ * sleep to give the oom victim some more time.
+ * TODO: we really want to get rid of this ugly hack and make sure that
+ * notifiers cannot block for unbounded amount of time and add
+ * mmu_notifier_invalidate_range_{start,end} around unmap_page_range
+ */
+ if (mm_has_notifiers(mm)) {
+ up_read(&mm->mmap_sem);
+ schedule_timeout_idle(HZ);
+ goto unlock_oom;
+ }
+
+ /*
* increase mm_users only after we know we will reap something so
* that the mmput_async is called only when we have reaped something
* and delayed __mmput doesn't matter that much
diff --git a/mm/page_owner.c b/mm/page_owner.c
index fe850b9..c4381d93 100644
--- a/mm/page_owner.c
+++ b/mm/page_owner.c
@@ -554,11 +554,17 @@ static void init_pages_in_zone(pg_data_t *pgdat, struct zone *zone)
continue;
/*
- * We are safe to check buddy flag and order, because
- * this is init stage and only single thread runs.
+ * To avoid having to grab zone->lock, be a little
+ * careful when reading buddy page order. The only
+ * danger is that we skip too much and potentially miss
+ * some early allocated pages, which is better than
+ * heavy lock contention.
*/
if (PageBuddy(page)) {
- pfn += (1UL << page_order(page)) - 1;
+ unsigned long order = page_order_unsafe(page);
+
+ if (order > 0 && order < MAX_ORDER)
+ pfn += (1UL << order) - 1;
continue;
}
@@ -577,6 +583,7 @@ static void init_pages_in_zone(pg_data_t *pgdat, struct zone *zone)
set_page_owner(page, 0, 0);
count++;
}
+ cond_resched();
}
pr_info("Node %d, zone %8s: page owner found early allocated %lu pages\n",
@@ -587,15 +594,12 @@ static void init_zones_in_node(pg_data_t *pgdat)
{
struct zone *zone;
struct zone *node_zones = pgdat->node_zones;
- unsigned long flags;
for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) {
if (!populated_zone(zone))
continue;
- spin_lock_irqsave(&zone->lock, flags);
init_pages_in_zone(pgdat, zone);
- spin_unlock_irqrestore(&zone->lock, flags);
}
}
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 5d2f24f..622f6b6 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -255,7 +255,7 @@ struct kmem_cache *find_mergeable(size_t size, size_t align,
{
struct kmem_cache *s;
- if (slab_nomerge || (flags & SLAB_NEVER_MERGE))
+ if (slab_nomerge)
return NULL;
if (ctor)
@@ -266,6 +266,9 @@ struct kmem_cache *find_mergeable(size_t size, size_t align,
size = ALIGN(size, align);
flags = kmem_cache_flags(size, flags, name, NULL);
+ if (flags & SLAB_NEVER_MERGE)
+ return NULL;
+
list_for_each_entry_reverse(s, &slab_caches, list) {
if (slab_unmergeable(s))
continue;
diff --git a/mm/sparse.c b/mm/sparse.c
index 1e168bf..8c4c82e 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -662,7 +662,7 @@ static void free_map_bootmem(struct page *memmap)
>> PAGE_SHIFT;
for (i = 0; i < nr_pages; i++, page++) {
- magic = (unsigned long) page->lru.next;
+ magic = (unsigned long) page->freelist;
BUG_ON(magic == NODE_INFO);
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index 7625ec8..5d4006e 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -1098,11 +1098,14 @@ static int br_dev_newlink(struct net *src_net, struct net_device *dev,
spin_unlock_bh(&br->lock);
}
- err = br_changelink(dev, tb, data);
+ err = register_netdevice(dev);
if (err)
return err;
- return register_netdevice(dev);
+ err = br_changelink(dev, tb, data);
+ if (err)
+ unregister_netdevice(dev);
+ return err;
}
static size_t br_get_size(const struct net_device *brdev)
diff --git a/net/compat.c b/net/compat.c
index 1cd2ec0..a96fd2f 100644
--- a/net/compat.c
+++ b/net/compat.c
@@ -22,6 +22,7 @@
#include <linux/filter.h>
#include <linux/compat.h>
#include <linux/security.h>
+#include <linux/audit.h>
#include <linux/export.h>
#include <net/scm.h>
@@ -781,14 +782,24 @@ COMPAT_SYSCALL_DEFINE5(recvmmsg, int, fd, struct compat_mmsghdr __user *, mmsg,
COMPAT_SYSCALL_DEFINE2(socketcall, int, call, u32 __user *, args)
{
- int ret;
- u32 a[6];
+ u32 a[AUDITSC_ARGS];
+ unsigned int len;
u32 a0, a1;
+ int ret;
if (call < SYS_SOCKET || call > SYS_SENDMMSG)
return -EINVAL;
- if (copy_from_user(a, args, nas[call]))
+ len = nas[call];
+ if (len > sizeof(a))
+ return -EINVAL;
+
+ if (copy_from_user(a, args, len))
return -EFAULT;
+
+ ret = audit_socketcall_compat(len / sizeof(a[0]), a);
+ if (ret)
+ return ret;
+
a0 = a[0];
a1 = a[1];
diff --git a/net/core/dev.c b/net/core/dev.c
index bc129b0..18de74e 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2357,6 +2357,9 @@ void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason)
{
unsigned long flags;
+ if (unlikely(!skb))
+ return;
+
if (likely(atomic_read(&skb->users) == 1)) {
smp_rmb();
atomic_set(&skb->users, 0);
@@ -2949,6 +2952,37 @@ static int xmit_one(struct sk_buff *skb, struct net_device *dev,
return rc;
}
+static int xmit_list(struct sk_buff *list, struct net_device *dev,
+ struct netdev_queue *txq)
+{
+ unsigned int len;
+ int rc;
+ struct sk_buff *skb = list, *head = list;
+
+ /* Call the taps for individual skb's in the list. */
+ if (!list_empty(&ptype_all)) {
+ while (skb) {
+ struct sk_buff *next = skb->next;
+
+ skb->next = NULL;
+
+ dev_queue_xmit_nit(skb, dev);
+
+ skb = next;
+ /* Keep the original list intact. */
+ head->next = skb;
+ head = head->next;
+ }
+ }
+
+ len = list->len;
+ trace_net_dev_start_xmit(list, dev);
+ rc = netdev_start_xmit(list, dev, txq, false);
+ trace_net_dev_xmit(list, rc, dev, len);
+
+ return rc;
+}
+
struct sk_buff *dev_hard_start_xmit(struct sk_buff *first, struct net_device *dev,
struct netdev_queue *txq, int *ret)
{
@@ -2977,6 +3011,25 @@ struct sk_buff *dev_hard_start_xmit(struct sk_buff *first, struct net_device *de
return skb;
}
+struct sk_buff *dev_hard_start_xmit_list(struct sk_buff *first,
+ struct net_device *dev,
+ struct netdev_queue *txq, int *ret)
+{
+ struct sk_buff *skb = first;
+ int rc = NETDEV_TX_OK;
+
+ if (skb) {
+ rc = xmit_list(skb, dev, txq);
+ if (unlikely(!dev_xmit_complete(rc)))
+ goto out;
+ skb = NULL;
+ }
+
+out:
+ *ret = rc;
+ return skb;
+}
+
static struct sk_buff *validate_xmit_vlan(struct sk_buff *skb,
netdev_features_t features)
{
@@ -3327,6 +3380,7 @@ struct netdev_queue *netdev_pick_tx(struct net_device *dev,
* __dev_queue_xmit - transmit a buffer
* @skb: buffer to transmit
* @accel_priv: private data used for L2 forwarding offload
+ * @skb_list: Boolean used for skb list processing.
*
* Queue a buffer for transmission to a network device. The caller must
* have set the device and priority and built the buffer before calling
@@ -3349,7 +3403,8 @@ struct netdev_queue *netdev_pick_tx(struct net_device *dev,
* the BH enable code must have IRQs enabled so that it will not deadlock.
* --BLG
*/
-static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv)
+static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv,
+ bool skb_list)
{
struct net_device *dev = skb->dev;
struct netdev_queue *txq;
@@ -3424,7 +3479,14 @@ static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv)
if (!netif_xmit_stopped(txq)) {
__this_cpu_inc(xmit_recursion);
- skb = dev_hard_start_xmit(skb, dev, txq, &rc);
+ if (likely(!skb_list))
+ skb = dev_hard_start_xmit(skb, dev,
+ txq, &rc);
+ else
+ skb = dev_hard_start_xmit_list(skb,
+ dev,
+ txq,
+ &rc);
__this_cpu_dec(xmit_recursion);
if (dev_xmit_complete(rc)) {
HARD_TX_UNLOCK(dev, txq);
@@ -3457,16 +3519,22 @@ static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv)
int dev_queue_xmit(struct sk_buff *skb)
{
- return __dev_queue_xmit(skb, NULL);
+ return __dev_queue_xmit(skb, NULL, false);
}
EXPORT_SYMBOL(dev_queue_xmit);
int dev_queue_xmit_accel(struct sk_buff *skb, void *accel_priv)
{
- return __dev_queue_xmit(skb, accel_priv);
+ return __dev_queue_xmit(skb, accel_priv, false);
}
EXPORT_SYMBOL(dev_queue_xmit_accel);
+int dev_queue_xmit_list(struct sk_buff *skb)
+{
+ return __dev_queue_xmit(skb, NULL, true);
+}
+EXPORT_SYMBOL(dev_queue_xmit_list);
+
/*=======================================================================
Receiver routines
@@ -4103,6 +4171,12 @@ static inline int nf_ingress(struct sk_buff *skb, struct packet_type **pt_prev,
return 0;
}
+int (*athrs_fast_nat_recv)(struct sk_buff *skb) __rcu __read_mostly;
+EXPORT_SYMBOL(athrs_fast_nat_recv);
+
+int (*embms_tm_multicast_recv)(struct sk_buff *skb) __rcu __read_mostly;
+EXPORT_SYMBOL(embms_tm_multicast_recv);
+
static int __netif_receive_skb_core(struct sk_buff *skb, bool pfmemalloc)
{
struct packet_type *ptype, *pt_prev;
@@ -4111,6 +4185,8 @@ static int __netif_receive_skb_core(struct sk_buff *skb, bool pfmemalloc)
bool deliver_exact = false;
int ret = NET_RX_DROP;
__be16 type;
+ int (*fast_recv)(struct sk_buff *skb);
+ int (*embms_recv)(struct sk_buff *skb);
net_timestamp_check(!netdev_tstamp_prequeue, skb);
@@ -4170,6 +4246,18 @@ static int __netif_receive_skb_core(struct sk_buff *skb, bool pfmemalloc)
goto out;
}
#endif
+ fast_recv = rcu_dereference(athrs_fast_nat_recv);
+ if (fast_recv) {
+ if (fast_recv(skb)) {
+ ret = NET_RX_SUCCESS;
+ goto out;
+ }
+ }
+
+ embms_recv = rcu_dereference(embms_tm_multicast_recv);
+ if (embms_recv)
+ embms_recv(skb);
+
#ifdef CONFIG_NET_CLS_ACT
skb->tc_verd = 0;
ncls:
@@ -4843,6 +4931,24 @@ __sum16 __skb_gro_checksum_complete(struct sk_buff *skb)
}
EXPORT_SYMBOL(__skb_gro_checksum_complete);
+static void net_rps_send_ipi(struct softnet_data *remsd)
+{
+#ifdef CONFIG_RPS
+ while (remsd) {
+ struct softnet_data *next = remsd->rps_ipi_next;
+
+ if (cpu_online(remsd->cpu)) {
+ smp_call_function_single_async(remsd->cpu, &remsd->csd);
+ } else {
+ rps_lock(remsd);
+ remsd->backlog.state = 0;
+ rps_unlock(remsd);
+ }
+ remsd = next;
+ }
+#endif
+}
+
/*
* net_rps_action_and_irq_enable sends any pending IPI's for rps.
* Note: called with local irq disabled, but exits with local irq enabled.
@@ -4858,20 +4964,7 @@ static void net_rps_action_and_irq_enable(struct softnet_data *sd)
local_irq_enable();
/* Send pending IPI's to kick RPS processing on remote cpus. */
- while (remsd) {
- struct softnet_data *next = remsd->rps_ipi_next;
-
- if (cpu_online(remsd->cpu)) {
- smp_call_function_single_async(remsd->cpu,
- &remsd->csd);
- } else {
- pr_err("%s() cpu offline\n", __func__);
- rps_lock(remsd);
- remsd->backlog.state = 0;
- rps_unlock(remsd);
- }
- remsd = next;
- }
+ net_rps_send_ipi(remsd);
} else
#endif
local_irq_enable();
@@ -8028,7 +8121,7 @@ static int dev_cpu_callback(struct notifier_block *nfb,
struct sk_buff **list_skb;
struct sk_buff *skb;
unsigned int cpu, oldcpu = (unsigned long)ocpu;
- struct softnet_data *sd, *oldsd;
+ struct softnet_data *sd, *oldsd, *remsd;
if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
return NOTIFY_OK;
@@ -8072,6 +8165,13 @@ static int dev_cpu_callback(struct notifier_block *nfb,
raise_softirq_irqoff(NET_TX_SOFTIRQ);
local_irq_enable();
+#ifdef CONFIG_RPS
+ remsd = oldsd->rps_ipi_list;
+ oldsd->rps_ipi_list = NULL;
+#endif
+ /* send out pending IPI's on offline CPU */
+ net_rps_send_ipi(remsd);
+
/* Process offline CPU's input_pkt_queue */
while ((skb = __skb_dequeue(&oldsd->process_queue))) {
netif_rx_ni(skb);
diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
index b6791d9..31c4041 100644
--- a/net/core/fib_rules.c
+++ b/net/core/fib_rules.c
@@ -425,6 +425,7 @@ int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr *nlh)
if (tb[FRA_TUN_ID])
rule->tun_id = nla_get_be64(tb[FRA_TUN_ID]);
+ err = -EINVAL;
if (tb[FRA_L3MDEV]) {
#ifdef CONFIG_NET_L3_MASTER_DEV
rule->l3mdev = nla_get_u8(tb[FRA_L3MDEV]);
@@ -446,7 +447,6 @@ int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr *nlh)
else
rule->suppress_ifgroup = -1;
- err = -EINVAL;
if (tb[FRA_GOTO]) {
if (rule->action != FR_ACT_GOTO)
goto errout_free;
@@ -576,8 +576,10 @@ int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr *nlh)
if (tb[FRA_UID_RANGE]) {
range = nla_get_kuid_range(tb);
- if (!uid_range_set(&range))
+ if (!uid_range_set(&range)) {
+ err = -EINVAL;
goto errout;
+ }
} else {
range = fib_kuid_range_unset;
}
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 62893eb..b91cecc 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -941,7 +941,11 @@ static void neigh_timer_handler(unsigned long arg)
if (!mod_timer(&neigh->timer, next))
neigh_hold(neigh);
}
- if (neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) {
+
+ if (neigh_probe_enable) {
+ if (neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE | NUD_STALE))
+ neigh_probe(neigh);
+ } else if (neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) {
neigh_probe(neigh);
} else {
out:
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 4d26297..c2339b8 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -3758,6 +3758,9 @@ static int rtnl_fill_statsinfo(struct sk_buff *skb, struct net_device *dev,
return -EMSGSIZE;
ifsm = nlmsg_data(nlh);
+ ifsm->family = PF_UNSPEC;
+ ifsm->pad1 = 0;
+ ifsm->pad2 = 0;
ifsm->ifindex = dev->ifindex;
ifsm->filter_mask = filter_mask;
diff --git a/net/core/sock.c b/net/core/sock.c
index f07eaea..c6f42ee 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1501,6 +1501,8 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
sock_copy(newsk, sk);
+ newsk->sk_prot_creator = sk->sk_prot;
+
/* SANITY */
if (likely(newsk->sk_net_refcnt))
get_net(sock_net(newsk));
diff --git a/net/dns_resolver/dns_key.c b/net/dns_resolver/dns_key.c
index 8737412..e1d4d89 100644
--- a/net/dns_resolver/dns_key.c
+++ b/net/dns_resolver/dns_key.c
@@ -224,7 +224,7 @@ static int dns_resolver_match_preparse(struct key_match_data *match_data)
static void dns_resolver_describe(const struct key *key, struct seq_file *m)
{
seq_puts(m, key->description);
- if (key_is_instantiated(key)) {
+ if (key_is_positive(key)) {
int err = PTR_ERR(key->payload.data[dns_key_error]);
if (err)
diff --git a/net/dsa/Kconfig b/net/dsa/Kconfig
index 96e47c5..39bb5b3 100644
--- a/net/dsa/Kconfig
+++ b/net/dsa/Kconfig
@@ -1,12 +1,13 @@
config HAVE_NET_DSA
def_bool y
- depends on NETDEVICES && !S390
+ depends on INET && NETDEVICES && !S390
# Drivers must select NET_DSA and the appropriate tagging format
config NET_DSA
tristate "Distributed Switch Architecture"
- depends on HAVE_NET_DSA && NET_SWITCHDEV
+ depends on HAVE_NET_DSA
+ select NET_SWITCHDEV
select PHYLIB
---help---
Say Y if you want to enable support for the hardware switches supported
diff --git a/net/dsa/slave.c b/net/dsa/slave.c
index 079d76b..5000e6f 100644
--- a/net/dsa/slave.c
+++ b/net/dsa/slave.c
@@ -1269,26 +1269,32 @@ int dsa_slave_create(struct dsa_switch *ds, struct device *parent,
p->old_duplex = -1;
ds->ports[port].netdev = slave_dev;
- ret = register_netdev(slave_dev);
- if (ret) {
- netdev_err(master, "error %d registering interface %s\n",
- ret, slave_dev->name);
- ds->ports[port].netdev = NULL;
- free_netdev(slave_dev);
- return ret;
- }
netif_carrier_off(slave_dev);
ret = dsa_slave_phy_setup(p, slave_dev);
if (ret) {
netdev_err(master, "error %d setting up slave phy\n", ret);
- unregister_netdev(slave_dev);
- free_netdev(slave_dev);
- return ret;
+ goto out_free;
+ }
+
+ ret = register_netdev(slave_dev);
+ if (ret) {
+ netdev_err(master, "error %d registering interface %s\n",
+ ret, slave_dev->name);
+ goto out_phy;
}
return 0;
+
+out_phy:
+ phy_disconnect(p->phy);
+ if (of_phy_is_fixed_link(ds->ports[port].dn))
+ of_phy_deregister_fixed_link(ds->ports[port].dn);
+out_free:
+ free_netdev(slave_dev);
+ ds->ports[port].netdev = NULL;
+ return ret;
}
void dsa_slave_destroy(struct net_device *slave_dev)
diff --git a/net/embms_kernel/Makefile b/net/embms_kernel/Makefile
new file mode 100644
index 0000000..c21480e
--- /dev/null
+++ b/net/embms_kernel/Makefile
@@ -0,0 +1,22 @@
+#
+# Makefile for Embms Kernel module.
+#
+
+KERNEL_SRC ?= /lib/modules/$(shell uname -r)/build
+
+obj-m += embms_kernel.o
+
+ccflags-y += -D__CHECK_ENDIAN__
+
+CDEFINES += -D__CHECK_ENDIAN__
+
+KBUILD_CPPFLAGS += $(CDEFINES)
+
+all:
+ $(MAKE) -C $(KERNEL_SRC) M=$(shell pwd) modules
+modules_install:
+ $(MAKE) INSTALL_MOD_STRIP=1 -C $(KERNEL_SRC) M=$(shell pwd) modules_install
+
+clean:
+ $(MAKE) -C $(KERNEL_SRC) M=$(PWD) clean
+
diff --git a/net/embms_kernel/embms_kernel.c b/net/embms_kernel/embms_kernel.c
new file mode 100644
index 0000000..3bbe51b
--- /dev/null
+++ b/net/embms_kernel/embms_kernel.c
@@ -0,0 +1,1031 @@
+/*************************************************************************
+ * -----------------------------------------------------------------------
+ * Copyright (c) 2013-2015, 2017, The Linux Foundation. All rights reserved.
+
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ * See the GNU General Public License for more details.
+ * -----------------------------------------------------------------------
+
+ * DESCRIPTION
+ * Main file for eMBMs Tunneling Module in kernel.
+ *************************************************************************
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <net/ip.h>
+#include <linux/uaccess.h>
+#include <linux/types.h>
+#include <linux/version.h>
+#include <linux/etherdevice.h>
+
+#include <linux/inetdevice.h>
+#include <linux/netfilter.h>
+#include <net/arp.h>
+#include <net/neighbour.h>
+
+#include <linux/skbuff.h>
+#include <linux/list.h>
+#include <linux/in.h>
+#include <net/netfilter/nf_conntrack.h>
+#include <linux/miscdevice.h>
+#include "embms_kernel.h"
+
+struct embms_info_internal embms_conf;
+
+/* Global structures used for tunneling. These include
+ * iphdr and udphdr which are appended to skbs for
+ * tunneling, net_device and tunnleing related
+ * structs and params
+ */
+
+unsigned char hdr_buff[sizeof(struct iphdr) + sizeof(struct udphdr)];
+struct iphdr *iph_global;
+struct udphdr *udph_global;
+struct net_device *dev_global;
+
+static struct tmgi_to_clnt_info tmgi_to_clnt_map_tbl;
+
+/* handle_multicast_stream - packet forwarding
+ * function for multicast stream
+ * Main use case is for EMBMS Over Softap feature
+ */
+
+static int handle_multicast_stream(struct sk_buff *skb)
+{
+ struct iphdr *iph;
+ struct udphdr *udph;
+ unsigned char *tmp_ptr = NULL;
+ struct sk_buff *skb_new = NULL;
+ struct sk_buff *skb_cpy = NULL;
+ struct clnt_info *temp_client = NULL;
+ struct tmgi_to_clnt_info *temp_tmgi = NULL;
+ struct list_head *tmgi_entry_ptr, *prev_tmgi_entry_ptr;
+ struct list_head *clnt_ptr, *prev_clnt_ptr;
+ int hdr_size = sizeof(*udph) + sizeof(*iph) + ETH_HLEN;
+
+ /* only IP packets */
+ if (htons(ETH_P_IP) != skb->protocol) {
+ embms_error("Not an IP packet\n");
+ return 0;
+ }
+
+ if (embms_conf.embms_tunneling_status == TUNNELING_OFF) {
+ embms_debug("Tunneling Disabled. Can't process packets\n");
+ return 0;
+ }
+
+ if (unlikely(memcmp(skb->dev->name, embms_conf.embms_iface,
+ strlen(embms_conf.embms_iface)) != 0)) {
+ embms_error("Packet received on %s iface. NOT an EMBMS Iface\n",
+ skb->dev->name);
+ return 0;
+ }
+
+ /* Check if dst ip of packet is same as multicast ip of any tmgi*/
+
+ iph = (struct iphdr *)skb->data;
+ udph = (struct udphdr *)(skb->data + sizeof(struct iphdr));
+
+ spin_lock_bh(&embms_conf.lock);
+
+ list_for_each_safe(tmgi_entry_ptr, prev_tmgi_entry_ptr,
+ &tmgi_to_clnt_map_tbl.tmgi_list_ptr) {
+ temp_tmgi = list_entry(tmgi_entry_ptr,
+ struct tmgi_to_clnt_info,
+ tmgi_list_ptr);
+
+ if ((temp_tmgi->tmgi_multicast_addr == iph->daddr) &&
+ (temp_tmgi->tmgi_port == udph->dest))
+ break;
+ }
+
+ if (tmgi_entry_ptr == &tmgi_to_clnt_map_tbl.tmgi_list_ptr) {
+ embms_error("handle_multicast_stream:");
+ embms_error("could not find matchin tmgi entry\n");
+ spin_unlock_bh(&embms_conf.lock);
+ return 0;
+ }
+
+ /* Found a matching tmgi entry. Realloc headroom to
+ * accommodate new Ethernet, IP and UDP header
+ */
+
+ skb_new = skb_realloc_headroom(skb, hdr_size);
+ if (unlikely(!skb_new)) {
+ embms_error("Can't allocate headroom\n");
+ spin_unlock_bh(&embms_conf.lock);
+ return 0;
+ }
+
+ /* push skb->data and copy IP and UDP headers*/
+
+ tmp_ptr = skb_push(skb_new,
+ sizeof(struct udphdr) + sizeof(struct iphdr));
+
+ iph = (struct iphdr *)tmp_ptr;
+ udph = (struct udphdr *)(tmp_ptr + sizeof(struct iphdr));
+
+ memcpy(tmp_ptr, hdr_buff, hdr_size - ETH_HLEN);
+ udph->len = htons(skb_new->len - sizeof(struct iphdr));
+ iph->tot_len = htons(skb_new->len);
+
+ list_for_each_safe(clnt_ptr, prev_clnt_ptr,
+ &temp_tmgi->client_list_head) {
+ temp_client = list_entry(clnt_ptr,
+ struct clnt_info,
+ client_list_ptr);
+
+ /* Make a copy of skb_new with new IP and UDP header.
+ * We can't use skb_new or its clone here since we need to
+ * constantly change dst ip and dst port which is not possible
+ * for shared memory as is the case with skb_new.
+ */
+
+ skb_cpy = skb_copy(skb_new, GFP_ATOMIC);
+ if (unlikely(!skb_cpy)) {
+ embms_error("Can't copy skb\n");
+ kfree_skb(skb_new);
+ return 0;
+ }
+
+ iph = (struct iphdr *)skb_cpy->data;
+ udph = (struct udphdr *)(skb_cpy->data + sizeof(struct iphdr));
+
+ iph->id = htons(atomic_inc_return(&embms_conf.ip_ident));
+
+ /* Calculate checksum for new IP and UDP header*/
+
+ udph->dest = temp_client->port;
+ skb_cpy->csum = csum_partial((char *)udph,
+ ntohs(udph->len),
+ skb_cpy->csum);
+
+ iph->daddr = temp_client->addr;
+ ip_send_check(iph);
+
+ udph->check = 0;
+ udph->check = csum_tcpudp_magic(iph->saddr, iph->daddr,
+ ntohs(udph->len),
+ IPPROTO_UDP,
+ skb_cpy->csum);
+
+ if (udph->check == 0)
+ udph->check = CSUM_MANGLED_0;
+
+ if (unlikely(!dev_global)) {
+ embms_error("Global device NULL\n");
+ kfree_skb(skb_cpy);
+ kfree_skb(skb_new);
+ return 0;
+ }
+
+ /* update device info and add MAC header*/
+
+ skb_cpy->dev = dev_global;
+
+ skb_cpy->dev->header_ops->create(skb_cpy, skb_cpy->dev,
+ ETH_P_IP, temp_client->dmac,
+ NULL, skb_cpy->len);
+ dev_queue_xmit(skb_cpy);
+ }
+
+ spin_unlock_bh(&embms_conf.lock);
+ kfree_skb(skb_new);
+ return 1;
+}
+
+static int check_embms_device(atomic_t *use_count)
+{
+ int ret;
+
+ if (atomic_inc_return(use_count) == 1) {
+ ret = 0;
+ } else {
+ atomic_dec(use_count);
+ ret = -EBUSY;
+ }
+ return ret;
+}
+
+static int embms_device_open(struct inode *inode, struct file *file)
+{
+ /*Check if the device is busy*/
+ if (check_embms_device(&embms_conf.device_under_use)) {
+ embms_error("embms_tm_open : EMBMS device busy\n");
+ return -EBUSY;
+ }
+
+ try_module_get(THIS_MODULE);
+ return SUCCESS;
+}
+
+static int embms_device_release(struct inode *inode, struct file *file)
+{
+ /* Reduce device use count before leaving*/
+ embms_debug("Releasing EMBMS device..\n");
+ atomic_dec(&embms_conf.device_under_use);
+ embms_conf.embms_tunneling_status = TUNNELING_OFF;
+ module_put(THIS_MODULE);
+ return SUCCESS;
+}
+
+static struct tmgi_to_clnt_info *check_for_tmgi_entry(u32 addr,
+ u16 port)
+{
+ struct list_head *tmgi_ptr, *prev_tmgi_ptr;
+ struct tmgi_to_clnt_info *temp_tmgi = NULL;
+
+ embms_debug("check_for_tmgi_entry: mcast addr :%pI4, port %u\n",
+ &addr, ntohs(port));
+
+ list_for_each_safe(tmgi_ptr,
+ prev_tmgi_ptr,
+ &tmgi_to_clnt_map_tbl.tmgi_list_ptr) {
+ temp_tmgi = list_entry(tmgi_ptr,
+ struct tmgi_to_clnt_info,
+ tmgi_list_ptr);
+
+ if ((temp_tmgi->tmgi_multicast_addr == addr) &&
+ (temp_tmgi->tmgi_port == port)) {
+ embms_debug("check_for_tmgi_entry:TMGI entry found\n");
+ return temp_tmgi;
+ }
+ }
+ return NULL;
+}
+
+static struct clnt_info *chk_clnt_entry(struct tmgi_to_clnt_info *tmgi,
+ struct tmgi_to_clnt_info_update *clnt)
+{
+ struct list_head *clnt_ptr, *prev_clnt_ptr;
+ struct clnt_info *temp_client = NULL;
+
+ embms_debug("check_for_client_entry: clnt addr :%pI4, port %u\n",
+ &clnt->client_addr, ntohs(clnt->client_port));
+
+ list_for_each_safe(clnt_ptr,
+ prev_clnt_ptr,
+ &tmgi->client_list_head) {
+ temp_client = list_entry(clnt_ptr,
+ struct clnt_info,
+ client_list_ptr);
+ if ((temp_client->addr == clnt->client_addr) &&
+ (temp_client->port == clnt->client_port)) {
+ embms_debug("Clnt entry present\n");
+ return temp_client;
+ }
+ }
+ return NULL;
+}
+
+static int add_new_tmgi_entry(struct tmgi_to_clnt_info_update *info_update,
+ struct clnt_info *clnt)
+{
+ struct tmgi_to_clnt_info *new_tmgi = NULL;
+
+ embms_debug("add_new_tmgi_entry:Enter\n");
+
+ new_tmgi = kzalloc(sizeof(*new_tmgi),
+ GFP_ATOMIC);
+ if (!new_tmgi) {
+ embms_error("add_new_tmgi_entry: mem alloc failed\n");
+ return -ENOMEM;
+ }
+
+ memset(new_tmgi, 0, sizeof(struct tmgi_to_clnt_info));
+
+ new_tmgi->tmgi_multicast_addr = info_update->multicast_addr;
+ new_tmgi->tmgi_port = info_update->multicast_port;
+
+ embms_debug("add_new_tmgi_entry:");
+ embms_debug("New tmgi multicast addr :%pI4 , port %u\n",
+ &info_update->multicast_addr,
+ ntohs(info_update->multicast_port));
+
+ embms_debug("add_new_tmgi_entry:Adding client entry\n");
+
+ spin_lock_bh(&embms_conf.lock);
+
+ INIT_LIST_HEAD(&new_tmgi->client_list_head);
+ list_add(&clnt->client_list_ptr,
+ &new_tmgi->client_list_head);
+ new_tmgi->no_of_clients++;
+
+ /* Once above steps are done successfully,
+ * we add tmgi entry to our local table
+ */
+
+ list_add(&new_tmgi->tmgi_list_ptr,
+ &tmgi_to_clnt_map_tbl.tmgi_list_ptr);
+ embms_conf.no_of_tmgi_sessions++;
+
+ spin_unlock_bh(&embms_conf.lock);
+
+ return SUCCESS;
+}
+
+static void print_tmgi_to_client_table(void)
+{
+ int i, j;
+ struct clnt_info *temp_client = NULL;
+ struct tmgi_to_clnt_info *temp_tmgi = NULL;
+ struct list_head *tmgi_entry_ptr, *prev_tmgi_entry_ptr;
+ struct list_head *clnt_ptr, *prev_clnt_ptr;
+
+ embms_debug("====================================================\n");
+ embms_debug("Printing TMGI to Client Table :\n");
+ embms_debug("No of Active TMGIs : %d\n",
+ embms_conf.no_of_tmgi_sessions);
+ embms_debug("====================================================\n\n");
+
+ if (embms_conf.no_of_tmgi_sessions > 0) {
+ i = 1;
+ list_for_each_safe(tmgi_entry_ptr, prev_tmgi_entry_ptr,
+ &tmgi_to_clnt_map_tbl.tmgi_list_ptr) {
+ temp_tmgi = list_entry(tmgi_entry_ptr,
+ struct tmgi_to_clnt_info,
+ tmgi_list_ptr);
+
+ embms_debug("TMGI entry %d :\n", i);
+ embms_debug("TMGI multicast addr : %pI4 , port %u\n\n",
+ &temp_tmgi->tmgi_multicast_addr,
+ ntohs(temp_tmgi->tmgi_port));
+ embms_debug("No of clients : %d\n",
+ temp_tmgi->no_of_clients);
+ j = 1;
+
+ list_for_each_safe(clnt_ptr, prev_clnt_ptr,
+ &temp_tmgi->client_list_head) {
+ temp_client = list_entry(clnt_ptr,
+ struct clnt_info,
+ client_list_ptr);
+ embms_debug("Client entry %d :\n", j);
+ embms_debug("client addr : %pI4 , port %u\n\n",
+ &temp_client->addr,
+ ntohs(temp_client->port));
+ j++;
+ }
+ i++;
+ embms_debug("===========================================\n\n");
+ }
+ } else {
+ embms_debug("No TMGI entries to Display\n");
+ }
+ embms_debug("==================================================================\n\n");
+}
+
+/**
+ * delete_tmgi_entry_from_table() - deletes tmgi from global tmgi-client table
+ * @buffer: Buffer containing TMGI info for deletion.
+ *
+ * This function completely removes the TMGI from
+ * global TMGI-client table, along with the client list
+ * so that no packets for this TMGI are processed
+ *
+ * Return: Success on deleting TMGI entry, error otherwise.
+ */
+
+int delete_tmgi_entry_from_table(char *buffer)
+{
+ struct tmgi_to_clnt_info_update *info_update;
+ struct clnt_info *temp_client = NULL;
+ struct tmgi_to_clnt_info *temp_tmgi = NULL;
+ struct list_head *clnt_ptr, *prev_clnt_ptr;
+
+ embms_debug("delete_tmgi_entry_from_table: Enter\n");
+
+ info_update = (struct tmgi_to_clnt_info_update *)buffer;
+
+ if (!info_update) {
+ embms_error("delete_tmgi_entry_from_table:");
+ embms_error("NULL arguments passed\n");
+ return -EBADPARAM;
+ }
+
+ /* This function is used to delete a specific TMGI entry
+ * when that particular TMGI goes down
+ * Search for the TMGI entry in our local table
+ */
+ if (embms_conf.no_of_tmgi_sessions == 0) {
+ embms_error("TMGI count 0. Nothing to delete\n");
+ return SUCCESS;
+ }
+
+ temp_tmgi = check_for_tmgi_entry(info_update->multicast_addr,
+ info_update->multicast_port);
+
+ if (!temp_tmgi) {
+ /* TMGI entry was not found in our local table*/
+ embms_error("delete_client_entry_from_table :");
+ embms_error("Desired TMGI entry not found\n");
+ return -EBADPARAM;
+ }
+
+ spin_lock_bh(&embms_conf.lock);
+
+ /* We need to free memory allocated to client entries
+ * for a particular TMGI entry
+ */
+
+ list_for_each_safe(clnt_ptr, prev_clnt_ptr,
+ &temp_tmgi->client_list_head) {
+ temp_client = list_entry(clnt_ptr,
+ struct clnt_info,
+ client_list_ptr);
+ embms_debug("delete_tmgi_entry_from_table :");
+ embms_debug("Client addr to delete :%pI4 , port %u\n",
+ &temp_client->addr, ntohs(temp_client->port));
+ list_del(&temp_client->client_list_ptr);
+ temp_tmgi->no_of_clients--;
+ kfree(temp_client);
+ }
+
+ /* Free memory allocated to tmgi entry*/
+
+ list_del(&temp_tmgi->tmgi_list_ptr);
+ kfree(temp_tmgi);
+ embms_conf.no_of_tmgi_sessions--;
+
+ spin_unlock_bh(&embms_conf.lock);
+
+ embms_debug("delete_tmgi_entry_from_table : TMGI Entry deleted.\n");
+
+ return SUCCESS;
+}
+
+/**
+ * delete_client_entry_from_all_tmgi() - deletes client from all tmgi lists
+ * @buffer: Buffer containing client info for deletion.
+ *
+ * This function completely removes a client from
+ * all TMGIs in global TMGI-client table. Also delets TMGI
+ * entries if no more clients are there
+ *
+ * Return: Success on deleting client entry, error otherwise.
+ */
+int delete_client_entry_from_all_tmgi(char *buffer)
+{
+ struct tmgi_to_clnt_info_update *info_update;
+ struct clnt_info *temp_client = NULL;
+ struct tmgi_to_clnt_info *tmgi = NULL;
+ struct list_head *tmgi_entry_ptr, *prev_tmgi_entry_ptr;
+
+ /* We use this function when we want to delete any
+ * client entry from all TMGI entries. This scenario
+ * happens when any client disconnects and hence
+ * we need to clean all realted client entries
+ * in our mapping table
+ */
+
+ embms_debug("del_clnt_from_all_tmgi: Enter\n");
+
+ info_update = (struct tmgi_to_clnt_info_update *)buffer;
+
+ if (!info_update) {
+ embms_error("del_clnt_from_all_tmgi:");
+ embms_error("NULL arguments passed\n");
+ return -EBADPARAM;
+ }
+
+ /* We start checking from first TMGI entry and if client
+ * entry is found in client entries of any TMGI, we clean
+ * up that client entry from that TMGI entry
+ */
+ if (embms_conf.no_of_tmgi_sessions == 0)
+ return SUCCESS;
+
+ list_for_each_safe(tmgi_entry_ptr, prev_tmgi_entry_ptr,
+ &tmgi_to_clnt_map_tbl.tmgi_list_ptr) {
+ tmgi = list_entry(tmgi_entry_ptr,
+ struct tmgi_to_clnt_info,
+ tmgi_list_ptr);
+
+ temp_client = chk_clnt_entry(tmgi, info_update);
+ if (!temp_client)
+ continue;
+
+ spin_lock_bh(&embms_conf.lock);
+
+ list_del(&temp_client->client_list_ptr);
+ tmgi->no_of_clients--;
+ kfree(temp_client);
+
+ spin_unlock_bh(&embms_conf.lock);
+
+ temp_client = NULL;
+
+ if (tmgi->no_of_clients == 0) {
+ /* Deleted clnt was the only clnt for
+ * that TMGI we need to delete TMGI
+ * entry from table
+ */
+ embms_debug("del_clnt_from_all_tmgi:");
+ embms_debug("Deleted client was ");
+ embms_debug("last client for tmgi\n");
+ embms_debug("del_clnt_from_all_tmgi:");
+ embms_debug("Delting tmgi as it has ");
+ embms_debug("zero clients.TMGI IP ");
+ embms_debug(":%pI4 , port %u\n",
+ &tmgi->tmgi_multicast_addr,
+ ntohs(tmgi->tmgi_port));
+
+ spin_lock_bh(&embms_conf.lock);
+
+ list_del(&tmgi->tmgi_list_ptr);
+ embms_conf.no_of_tmgi_sessions--;
+ kfree(tmgi);
+
+ spin_unlock_bh(&embms_conf.lock);
+
+ embms_debug("del_clnt_from_all_tmgi:");
+ embms_debug("TMGI entry deleted\n");
+ }
+ }
+
+ embms_debug("del_clnt_from_all_tmgi Successful\n");
+ return SUCCESS;
+}
+
+/**
+ * add_client_entry_to_table() - add client entry to specified TMGI
+ * @buffer: Buffer containing client info for addition.
+ *
+ * This function adds a client to the specified TMGI in
+ * the global TMGI-client table. If TMGI entry is not
+ * present, it adds a new TMGI entry and adds client
+ * entry to it.
+ *
+ * Return: Success on adding client entry, error otherwise.
+ */
+int add_client_entry_to_table(char *buffer)
+{
+ int ret;
+ struct tmgi_to_clnt_info_update *info_update;
+ struct clnt_info *new_client = NULL;
+ struct tmgi_to_clnt_info *tmgi = NULL;
+ struct neighbour *neigh_entry;
+
+ embms_debug("add_client_entry_to_table: Enter\n");
+
+ info_update = (struct tmgi_to_clnt_info_update *)buffer;
+
+ if (!info_update) {
+ embms_error("add_client_entry_to_table:");
+ embms_error("NULL arguments passed\n");
+ return -EBADPARAM;
+ }
+
+ new_client = kzalloc(sizeof(*new_client), GFP_ATOMIC);
+ if (!new_client) {
+ embms_error("add_client_entry_to_table:");
+ embms_error("Cannot allocate memory\n");
+ return -ENOMEM;
+ }
+
+ new_client->addr = info_update->client_addr;
+ new_client->port = info_update->client_port;
+
+ neigh_entry = __ipv4_neigh_lookup(dev_global,
+ (u32)(new_client->addr));
+ if (!neigh_entry) {
+ embms_error("add_client_entry_to_table :");
+ embms_error("Can't find neighbour entry\n");
+ kfree(new_client);
+ return -EBADPARAM;
+ }
+
+ ether_addr_copy(new_client->dmac, neigh_entry->ha);
+
+ embms_debug("DMAC of client : %pM\n", new_client->dmac);
+
+ embms_debug("add_client_entry_to_table:");
+ embms_debug("New client addr :%pI4 , port %u\n",
+ &info_update->client_addr,
+ ntohs(info_update->client_port));
+
+ if (embms_conf.no_of_tmgi_sessions == 0) {
+ /* TMGI Client mapping table is empty.
+ * First client entry is being added
+ */
+
+ embms_debug("tmgi_to_clnt_map_tbl is empty\n");
+
+ ret = add_new_tmgi_entry(info_update, new_client);
+ if (ret != SUCCESS) {
+ kfree(new_client);
+ new_client = NULL;
+ }
+
+ goto exit_add;
+ }
+
+ /* In this case, table already has some entries
+ * and we need to search for the specific tmgi entry
+ * for which client entry is to be added
+ */
+
+ tmgi = check_for_tmgi_entry(info_update->multicast_addr,
+ info_update->multicast_port);
+ if (tmgi) {
+ if (chk_clnt_entry(tmgi, info_update)) {
+ kfree(new_client);
+ return -ENOEFFECT;
+ }
+
+ /* Adding client to the client list
+ * for the specified TMGI
+ */
+
+ spin_lock_bh(&embms_conf.lock);
+
+ list_add(&new_client->client_list_ptr,
+ &tmgi->client_list_head);
+ tmgi->no_of_clients++;
+
+ spin_unlock_bh(&embms_conf.lock);
+
+ ret = SUCCESS;
+ } else {
+ /* TMGI specified in the message was not found in
+ * mapping table.Hence, we need to add a new entry
+ * for this TMGI and add the specified client to the client
+ * list
+ */
+
+ embms_debug("TMGI entry not present. Adding tmgi entry\n");
+
+ ret = add_new_tmgi_entry(info_update, new_client);
+ if (ret != SUCCESS) {
+ kfree(new_client);
+ new_client = NULL;
+ }
+ }
+
+exit_add:
+ return ret;
+}
+
+/**
+ * delete_client_entry_from_table() - delete client entry from specified TMGI
+ * @buffer: Buffer containing client info for deletion.
+ *
+ * This function deletes a client from the specified TMGI in
+ * the global TMGI-client table. If this was the last client
+ * entry, it also deletes the TMGI entry.
+ *
+ * Return: Success on deleting client entry, error otherwise.
+ */
+int delete_client_entry_from_table(char *buffer)
+{
+ struct tmgi_to_clnt_info_update *info_update;
+ struct clnt_info *temp_client = NULL;
+ struct tmgi_to_clnt_info *temp_tmgi = NULL;
+
+ embms_debug("delete_client_entry_from_table: Enter\n");
+
+ info_update = (struct tmgi_to_clnt_info_update *)buffer;
+
+ if (!info_update) {
+ embms_error("delete_client_entry_from_table:");
+ embms_error("NULL arguments passed\n");
+ return -EBADPARAM;
+ }
+
+ /* Search for the TMGI entry*/
+ if (embms_conf.no_of_tmgi_sessions == 0)
+ return SUCCESS;
+
+ temp_tmgi = check_for_tmgi_entry(info_update->multicast_addr,
+ info_update->multicast_port);
+
+ if (!temp_tmgi) {
+ embms_error("delete_client_entry_from_table:TMGI not found\n");
+ return -EBADPARAM;
+ }
+ /* Delete client entry for a specific tmgi*/
+
+ embms_debug("delete_client_entry_from_table:clnt addr :%pI4,port %u\n",
+ &info_update->client_addr,
+ ntohs(info_update->client_port));
+
+ temp_client = chk_clnt_entry(temp_tmgi, info_update);
+
+ if (!temp_client) {
+ /* Specified client entry was not found in client list
+ * of specified TMGI
+ */
+ embms_error("delete_client_entry_from_table:Clnt not found\n");
+ return -EBADPARAM;
+ }
+
+ spin_lock_bh(&embms_conf.lock);
+
+ list_del(&temp_client->client_list_ptr);
+ temp_tmgi->no_of_clients--;
+
+ spin_unlock_bh(&embms_conf.lock);
+
+ kfree(temp_client);
+ temp_client = NULL;
+
+ embms_debug("delete_client_entry_from_table:Client entry deleted\n");
+
+ if (temp_tmgi->no_of_clients == 0) {
+ /* If deleted client was the only client for that TMGI
+ * we need to delete TMGI entry from table
+ */
+ embms_debug("delete_client_entry_from_table:");
+ embms_debug("Deleted client was the last client for tmgi\n");
+ embms_debug("delete_client_entry_from_table:");
+ embms_debug("Deleting tmgi since it has zero clients\n");
+
+ spin_lock_bh(&embms_conf.lock);
+
+ list_del(&temp_tmgi->tmgi_list_ptr);
+ embms_conf.no_of_tmgi_sessions--;
+ kfree(temp_tmgi);
+
+ spin_unlock_bh(&embms_conf.lock);
+
+ embms_debug("delete_client_entry_from_table: TMGI deleted\n");
+ }
+
+ if (embms_conf.no_of_tmgi_sessions == 0)
+ embms_conf.embms_tunneling_status = TUNNELING_OFF;
+
+ return SUCCESS;
+}
+
+/**
+ * embms_device_ioctl() - handle IOCTL calls to device
+ * @file: File descriptor of file opened from userspace process
+ * @ioctl_num: IOCTL to use
+ * @ioctl_param: IOCTL parameters/arguments
+ *
+ * This function is called whenever a process tries to do
+ * an ioctl on our device file. As per the IOCTL number,
+ * it calls various functions to manipulate global
+ * TMGI-client table
+ *
+ * Return: Success if functoin call returns SUCCESS, error otherwise.
+ */
+
+long embms_device_ioctl(struct file *file, unsigned int ioctl_num,
+ unsigned long ioctl_param)
+{
+ int ret;
+ char buffer[BUF_LEN];
+ struct in_device *iface_dev;
+ struct in_ifaddr *iface_info;
+ struct tmgi_to_clnt_info_update *info_update;
+ char __user *argp = (char __user *)ioctl_param;
+
+ memset(buffer, 0, BUF_LEN);
+
+ /* Switch according to the ioctl called*/
+ switch (ioctl_num) {
+ case ADD_EMBMS_TUNNEL:
+ if (copy_from_user(buffer, argp,
+ sizeof(struct tmgi_to_clnt_info_update)))
+ return -EFAULT;
+
+ ret = add_client_entry_to_table(buffer);
+ print_tmgi_to_client_table();
+ break;
+
+ case DEL_EMBMS_TUNNEL:
+ if (copy_from_user(buffer, argp,
+ sizeof(struct tmgi_to_clnt_info_update)))
+ return -EFAULT;
+
+ ret = delete_client_entry_from_table(buffer);
+ print_tmgi_to_client_table();
+ break;
+
+ case TMGI_DEACTIVATE:
+ if (copy_from_user(buffer, argp,
+ sizeof(struct tmgi_to_clnt_info_update)))
+ return -EFAULT;
+
+ ret = delete_tmgi_entry_from_table(buffer);
+ print_tmgi_to_client_table();
+ break;
+
+ case CLIENT_DEACTIVATE:
+ if (copy_from_user(buffer, argp,
+ sizeof(struct tmgi_to_clnt_info_update)))
+ return -EFAULT;
+
+ ret = delete_client_entry_from_all_tmgi(buffer);
+ print_tmgi_to_client_table();
+ break;
+
+ case GET_EMBMS_TUNNELING_STATUS:
+ /* This ioctl is both input (ioctl_param) and
+ * output (the return value of this function)
+ */
+ embms_debug("Sending tunneling status : %d\n",
+ embms_conf.embms_tunneling_status);
+ ret = embms_conf.embms_tunneling_status;
+ break;
+
+ case START_EMBMS_TUNNEL:
+
+ if (copy_from_user(buffer, argp,
+ sizeof(struct tmgi_to_clnt_info_update)))
+ return -EFAULT;
+
+ info_update = (struct tmgi_to_clnt_info_update *)buffer;
+ embms_conf.embms_data_port = info_update->data_port;
+ udph_global->source = embms_conf.embms_data_port;
+
+ memset(embms_conf.embms_iface, 0, EMBMS_MAX_IFACE_NAME);
+ memcpy(embms_conf.embms_iface, info_update->iface_name,
+ EMBMS_MAX_IFACE_NAME);
+
+ embms_conf.embms_tunneling_status = TUNNELING_ON;
+ embms_debug("Starting Tunneling. Embms_data_port = %d\n",
+ ntohs(embms_conf.embms_data_port));
+ embms_debug("Embms Data Iface = %s\n", embms_conf.embms_iface);
+ ret = SUCCESS;
+
+ /*Initialise dev_global to bridge device*/
+ dev_global = __dev_get_by_name(&init_net, BRIDGE_IFACE);
+ if (!dev_global) {
+ embms_error("Error in getting device info\n");
+ ret = FAILURE;
+ } else {
+ iface_dev = (struct in_device *)dev_global->ip_ptr;
+ iface_info = iface_dev->ifa_list;
+ while (iface_info) {
+ if (memcmp(iface_info->ifa_label,
+ BRIDGE_IFACE,
+ strlen(BRIDGE_IFACE)) == 0)
+ break;
+
+ iface_info = iface_info->ifa_next;
+ }
+ if (iface_info) {
+ embms_debug("IP address of %s iface is %pI4\n",
+ BRIDGE_IFACE,
+ &iface_info->ifa_address);
+ /*Populate source addr for header*/
+ iph_global->saddr = iface_info->ifa_address;
+ ret = SUCCESS;
+ } else {
+ embms_debug("Could not find iface address\n");
+ ret = FAILURE;
+ }
+ }
+
+ break;
+
+ case STOP_EMBMS_TUNNEL:
+
+ embms_conf.embms_tunneling_status = TUNNELING_OFF;
+ embms_debug("Stopped Tunneling..\n");
+ ret = SUCCESS;
+ break;
+ }
+
+ return ret;
+}
+
+/* Module Declarations
+ * This structure will hold the functions to be called
+ * when a process does something to the device we
+ * created. Since a pointer to this structure is kept in
+ * the devices table, it can't be local to
+ * init_module. NULL is for unimplemented functions.
+ */
+static const struct file_operations embms_device_fops = {
+ .owner = THIS_MODULE,
+ .open = embms_device_open,
+ .release = embms_device_release,
+ .read = NULL,
+ .write = NULL,
+ .unlocked_ioctl = embms_device_ioctl,
+};
+
+static int embms_ioctl_init(void)
+{
+ int ret;
+ struct device *dev;
+
+ ret = alloc_chrdev_region(&device, 0, dev_num, EMBMS_DEVICE_NAME);
+ if (ret) {
+ embms_error("device_alloc err\n");
+ goto dev_alloc_err;
+ }
+
+ embms_class = class_create(THIS_MODULE, EMBMS_DEVICE_NAME);
+ if (IS_ERR(embms_class)) {
+ embms_error("class_create err\n");
+ goto class_err;
+ }
+
+ dev = device_create(embms_class, NULL, device,
+ &embms_conf, EMBMS_DEVICE_NAME);
+ if (IS_ERR(dev)) {
+ embms_error("device_create err\n");
+ goto device_err;
+ }
+
+ cdev_init(&embms_device, &embms_device_fops);
+ ret = cdev_add(&embms_device, device, dev_num);
+ if (ret) {
+ embms_error("cdev_add err\n");
+ goto cdev_add_err;
+ }
+
+ embms_debug("ioctl init OK!!\n");
+ return 0;
+
+cdev_add_err:
+ device_destroy(embms_class, device);
+device_err:
+ class_destroy(embms_class);
+class_err:
+ unregister_chrdev_region(device, dev_num);
+dev_alloc_err:
+ return -ENODEV;
+}
+
+static void embms_ioctl_deinit(void)
+{
+ cdev_del(&embms_device);
+ device_destroy(embms_class, device);
+ class_destroy(embms_class);
+ unregister_chrdev_region(device, dev_num);
+}
+
+/*Initialize the module - Register the misc device*/
+static int __init start_embms(void)
+{
+ int ret = 0;
+
+ iph_global = (struct iphdr *)hdr_buff;
+ udph_global = (struct udphdr *)(hdr_buff + sizeof(struct iphdr));
+
+ embms_conf.embms_tunneling_status = TUNNELING_OFF;
+ embms_conf.no_of_tmgi_sessions = 0;
+ embms_conf.embms_data_port = 0;
+ atomic_set(&embms_conf.device_under_use, 0);
+ atomic_set(&embms_conf.ip_ident, 0);
+ spin_lock_init(&embms_conf.lock);
+
+ embms_debug("Registering embms device\n");
+
+ ret = embms_ioctl_init();
+ if (ret) {
+ embms_error("embms device failed to register");
+ goto fail_init;
+ }
+
+ INIT_LIST_HEAD(&tmgi_to_clnt_map_tbl.tmgi_list_ptr);
+
+ memset(hdr_buff, 0, sizeof(struct udphdr) + sizeof(struct iphdr));
+ udph_global->check = UDP_CHECKSUM;
+ iph_global->version = IP_VERSION;
+ iph_global->ihl = IP_IHL;
+ iph_global->tos = IP_TOS;
+ iph_global->frag_off = IP_FRAG_OFFSET;
+ iph_global->ttl = IP_TTL;
+ iph_global->protocol = IPPROTO_UDP;
+
+ dev_global = NULL;
+
+ if (!embms_tm_multicast_recv)
+ RCU_INIT_POINTER(embms_tm_multicast_recv,
+ handle_multicast_stream);
+
+ return ret;
+
+fail_init:
+ embms_ioctl_deinit();
+ return ret;
+}
+
+/*Cleanup - unregister the appropriate file from proc*/
+
+static void __exit stop_embms(void)
+{
+ embms_ioctl_deinit();
+
+ if (rcu_dereference(embms_tm_multicast_recv))
+ RCU_INIT_POINTER(embms_tm_multicast_recv, NULL);
+
+ embms_debug("unregister_chrdev done\n");
+}
+
+module_init(start_embms);
+module_exit(stop_embms);
+MODULE_LICENSE("GPL v2");
diff --git a/net/embms_kernel/embms_kernel.h b/net/embms_kernel/embms_kernel.h
new file mode 100644
index 0000000..c8248ce
--- /dev/null
+++ b/net/embms_kernel/embms_kernel.h
@@ -0,0 +1,233 @@
+/******************************************************************
+ * Copyright (c) 2013-2015,2017, The Linux Foundation. All rights reserved.
+
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ * See the GNU General Public License for more details.
+ *---------------------------------------------------------------
+
+ * DESCRIPTION
+ * Header file for eMBMs Tunneling Module in kernel.
+ *******************************************************************
+ */
+
+#ifndef EMBMS_H
+#define EMBMS_H
+
+#include <linux/ioctl.h>
+#include <stdbool.h>
+#include <linux/if_addr.h>
+#include <linux/list.h>
+#include <linux/ip.h>
+#include <linux/miscdevice.h>
+#include <linux/spinlock.h>
+#include <linux/cdev.h>
+
+#define EMBMS_MAX_IFACE_NAME 20
+
+/* Defining IP and UDP header related macros*/
+
+#define UDP_CHECKSUM 0
+#define IP_VERSION 4
+#define IP_IHL 5
+#define IP_TOS 0
+#define IP_ID 1
+#define IP_FRAG_OFFSET htons(0x4000)
+#define IP_TTL 64
+#define BRIDGE_IFACE "bridge0"
+
+#define BUF_LEN 1024
+#define TUNNELING_ON 1
+#define TUNNELING_OFF 0
+
+// definitions required for IOCTL
+static unsigned int dev_num = 1;
+/* Embms device used for communication*/
+struct cdev embms_device;
+static struct class *embms_class;
+static dev_t device;
+#define EMBMS_IOC_MAGIC 0x64
+
+#define embms_debug pr_debug
+#define embms_error pr_debug
+
+/* The name of the device file*/
+#define EMBMS_DEVICE_NAME "embms_tm_device"
+
+extern int (*embms_tm_multicast_recv)(struct sk_buff *skb);
+
+/**
+ * enum embms_action_type - Describes action to perform
+ * @ADD_CLIENT_ENTRY: add client entry to TMGI
+ * @DELETE_CLIENT_ENTRY: deelte client entry from TMGI
+ * @TMGI_DEACTIVATE: Delete TMGI entry
+ * @CLIENT_ACTIVATE_ALL_TMGI: Add client to all TMGI
+ * @CLIENT_DEACTIVATE_ALL_TMGI: Delete client from all TMGI
+ * @SESSION_DEACTIVATE: Stop session
+ * @SOCK_INFO: Socket information like V4 addr, port etc
+ *
+ * This enum defines the types of action which are
+ * supported by this module.
+ */
+
+enum {
+ ADD_CLIENT_ENTRY = 0,
+ DELETE_CLIENT_ENTRY,
+ TMGI_DEACTIVATE,
+ CLIENT_ACTIVATE_ALL_TMGI,
+ CLIENT_DEACTIVATE_ALL_TMGI,
+ SESSION_DEACTIVATE,
+ SOCK_INFO
+} embms_action_type;
+
+/**
+ * struct tmgi_to_clnt_info_update - information for addition/deletion
+ * @multicast_addr: TMGI multicast IP to receive data
+ * @multicast_port: TMGI multicast port to receive date
+ * @client_addr: Client IPV4 address for sending data
+ * @client_port: Client port for sending data
+ * @data_port: port used to send data to client
+ * @action_type: Action to be performed
+ * @iface_name: iface to listen to for data
+ *
+ * This structure contains information as to what action
+ * needs to be performed on TMGI-client table. It is
+ * sent as a parameter during an IOCTL call
+ */
+
+struct tmgi_to_clnt_info_update {
+ u32 multicast_addr;
+ u16 multicast_port;
+ u32 client_addr;
+ u16 client_port;
+ u16 data_port;
+ u32 action_type;
+ char iface_name[EMBMS_MAX_IFACE_NAME];
+};
+
+/**
+ * struct clnt_info - contains client information
+ * @addr: Client IPV4 address for sending packets
+ * @port: Client port for sending packets
+ * @dmac: Client DMAC address
+ * @client_list_ptr : list ptr used to maintain client list
+ *
+ * This structure maintains complete client information
+ * to be used when sending packets to client
+ */
+
+struct clnt_info {
+ u32 addr;
+ u16 port;
+ u8 dmac[ETH_ALEN];
+ struct list_head client_list_ptr;
+};
+
+/**
+ * struct tmgi_to_clnt_info - contains TMGI information
+ * @tmgi_multicast_addr: TMGI IPV4 address to listen for packets
+ * @tmgi_port: Client port to listen for packets
+ * @no_of_clients: No of clients for a TMGI
+ * @client_list_head : list head for client list
+ * @tmgi_list_ptr : list ptr to maintain tmgi list
+ *
+ * This structure maintains complete client information
+ * to be used when sending data to client
+ */
+
+struct tmgi_to_clnt_info {
+ u32 tmgi_multicast_addr;
+ u16 tmgi_port;
+ u16 no_of_clients;
+ struct list_head client_list_head;
+ struct list_head tmgi_list_ptr;
+};
+
+/**
+ * struct embms_info_internal - stores module specific params
+ * @device_under_use: Used to prevent concurent access to the same device
+ * @embms_data_port: Source Data port used for tunnelled packets
+ * @embms_iface: Iface to receive embms traffic
+ * @embms_tunneling_status : Current EMBMS Status
+ * @no_of_tmgi_sessions : Number of current active TMGI sessions
+ * @lock : Lock for concurrency scenarios
+ * @ip_ident : IP identification number to be used for sent packets
+ *
+ * This tructure holds module specific information which is
+ * used throughout the module to maintain consistency
+ */
+
+struct embms_info_internal {
+ atomic_t device_under_use;
+ int embms_data_port;
+ char embms_iface[EMBMS_MAX_IFACE_NAME];
+ int embms_tunneling_status;
+ int no_of_tmgi_sessions;
+ /*lock to prevent concurrent access*/
+ spinlock_t lock;
+ atomic_t ip_ident;
+};
+
+/* This ioctl is used to add a new client entry to tunneling module.
+ * Entry params are populated in the struct used for ioctl
+ */
+
+#define ADD_EMBMS_TUNNEL _IOW(EMBMS_IOC_MAGIC, 0, \
+ struct tmgi_to_clnt_info_update)
+
+/* This ioctl is used to delete a client entry for a particular
+ * TMGI from tunneling module.
+ * Entry params are populated in the struct used for ioctl
+ */
+
+#define DEL_EMBMS_TUNNEL _IOW(EMBMS_IOC_MAGIC, 1, \
+ struct tmgi_to_clnt_info_update)
+
+/* This ioctl is used to delete a TMGI entry completely
+ * from tunneling module.
+ * Entry params are populated in the struct used for ioctl
+ */
+
+#define TMGI_DEACTIVATE _IOW(EMBMS_IOC_MAGIC, 2, \
+ struct tmgi_to_clnt_info_update)
+
+/* This ioctl is used to delete client entry completely
+ * from tunneling module.
+ * Entry params are populated in the struct used for ioctl
+ */
+
+#define CLIENT_DEACTIVATE _IOW(EMBMS_IOC_MAGIC, 3, \
+ struct tmgi_to_clnt_info_update)
+
+/* Gets the ON/OFF status of Tunneling module*/
+
+#define GET_EMBMS_TUNNELING_STATUS _IO(EMBMS_IOC_MAGIC, 4)
+
+/* Used to start tunneling. Argument is the port
+ * number to be used to send
+ * data to clients
+ */
+
+#define START_EMBMS_TUNNEL _IOW(EMBMS_IOC_MAGIC, 5, \
+ struct tmgi_to_clnt_info_update)
+
+/* Used to stop tunnleing*/
+
+#define STOP_EMBMS_TUNNEL _IO(EMBMS_IOC_MAGIC, 6)
+
+/* Return values indicating error status*/
+#define SUCCESS 0 /* Successful operation*/
+#define FAILURE -1 /* Unsuccessful operation*/
+
+/* Error Condition Values*/
+#define ENOMEM -2 /* Out of memory*/
+#define EBADPARAM -3 /* Incorrect parameters passed*/
+#define ENOEFFECT -4 /* No Effect*/
+
+#endif
+
diff --git a/net/ipv4/ah4.c b/net/ipv4/ah4.c
index f2a7102..22377c8 100644
--- a/net/ipv4/ah4.c
+++ b/net/ipv4/ah4.c
@@ -270,6 +270,9 @@ static void ah_input_done(struct crypto_async_request *base, int err)
int ihl = ip_hdrlen(skb);
int ah_hlen = (ah->hdrlen + 2) << 2;
+ if (err)
+ goto out;
+
work_iph = AH_SKB_CB(skb)->tmp;
auth_data = ah_tmp_auth(work_iph, ihl);
icv = ah_tmp_icv(ahp->ahash, auth_data, ahp->icv_trunc_len);
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index c8409ca0..2ec005c 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -319,7 +319,7 @@ static int __fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst,
int ret, no_addr;
struct fib_result res;
struct flowi4 fl4;
- struct net *net;
+ struct net *net = dev_net(dev);
bool dev_match;
fl4.flowi4_oif = 0;
@@ -332,6 +332,7 @@ static int __fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst,
fl4.flowi4_scope = RT_SCOPE_UNIVERSE;
fl4.flowi4_tun_key.tun_id = 0;
fl4.flowi4_flags = 0;
+ fl4.flowi4_uid = sock_net_uid(net, NULL);
no_addr = idev->ifa_list == NULL;
@@ -339,13 +340,12 @@ static int __fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst,
trace_fib_validate_source(dev, &fl4);
- net = dev_net(dev);
if (fib_lookup(net, &fl4, &res, 0))
goto last_resort;
if (res.type != RTN_UNICAST &&
(res.type != RTN_LOCAL || !IN_DEV_ACCEPT_LOCAL(idev)))
goto e_inval;
- if (!rpf && !fib_num_tclassid_users(dev_net(dev)) &&
+ if (!rpf && !fib_num_tclassid_users(net) &&
(dev->ifindex != oif || !IN_DEV_TX_REDIRECTS(idev)))
goto last_resort;
fib_combine_itag(itag, &res);
diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
index 5d7944f..b120b9b 100644
--- a/net/ipv4/ip_vti.c
+++ b/net/ipv4/ip_vti.c
@@ -168,6 +168,7 @@ static netdev_tx_t vti_xmit(struct sk_buff *skb, struct net_device *dev,
struct ip_tunnel_parm *parms = &tunnel->parms;
struct dst_entry *dst = skb_dst(skb);
struct net_device *tdev; /* Device to other host */
+ int pkt_len = skb->len;
int err;
int mtu;
@@ -229,7 +230,7 @@ static netdev_tx_t vti_xmit(struct sk_buff *skb, struct net_device *dev,
err = dst_output(tunnel->net, skb->sk, skb);
if (net_xmit_eval(err) == 0)
- err = skb->len;
+ err = pkt_len;
iptunnel_xmit_stats(dev, err);
return NETDEV_TX_OK;
diff --git a/net/ipv4/netfilter/Kconfig b/net/ipv4/netfilter/Kconfig
index d613309..af9fa59 100644
--- a/net/ipv4/netfilter/Kconfig
+++ b/net/ipv4/netfilter/Kconfig
@@ -282,6 +282,17 @@
To compile it as a module, choose M here. If unsure, say N.
+config IP_NF_TARGET_NATTYPE_MODULE
+ tristate "NATTYPE target support"
+ depends on NF_NAT
+ default m if NETFILTER_ADVANCED=n
+ help
+ NATTYPE is a special case of NAT: used to support FULL Cone NAT
+ and ADDRESS Restricted Cone NAT. All incoming connections are
+ allowed if there is an outgoing connection using that port.
+
+ To compile it as a module, choose M here. If unsure, say N.
+
config IP_NF_TARGET_NETMAP
tristate "NETMAP target support"
depends on NETFILTER_ADVANCED
diff --git a/net/ipv4/netfilter/Makefile b/net/ipv4/netfilter/Makefile
index 853328f..1429845 100644
--- a/net/ipv4/netfilter/Makefile
+++ b/net/ipv4/netfilter/Makefile
@@ -57,6 +57,7 @@
obj-$(CONFIG_IP_NF_TARGET_CLUSTERIP) += ipt_CLUSTERIP.o
obj-$(CONFIG_IP_NF_TARGET_ECN) += ipt_ECN.o
obj-$(CONFIG_IP_NF_TARGET_MASQUERADE) += ipt_MASQUERADE.o
+obj-$(CONFIG_IP_NF_TARGET_NATTYPE_MODULE) += ipt_NATTYPE.o
obj-$(CONFIG_IP_NF_TARGET_REJECT) += ipt_REJECT.o
obj-$(CONFIG_IP_NF_TARGET_SYNPROXY) += ipt_SYNPROXY.o
diff --git a/net/ipv4/netfilter/ipt_NATTYPE.c b/net/ipv4/netfilter/ipt_NATTYPE.c
new file mode 100644
index 0000000..b8d93e9
--- /dev/null
+++ b/net/ipv4/netfilter/ipt_NATTYPE.c
@@ -0,0 +1,612 @@
+/* netfilter NATTYPE
+ * net/ipv4/netfilter/ipt_NATTYPE.c
+ * Endpoint Independent, Address Restricted and Port-Address Restricted
+ * NAT types' kernel side implementation.
+ *
+ * (C) Copyright 2011, Ubicom, Inc.
+ *
+ * This file is part of the Ubicom32 Linux Kernel Port.
+ *
+ * The Ubicom32 Linux Kernel Port is free software: you can redistribute
+ * it and/or modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * The Ubicom32 Linux Kernel Port is distributed in the hope that it
+ * will be useful, but WITHOUT ANY WARRANTY; without even the implied
+ * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
+ * the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with the Ubicom32 Linux Kernel Port. If not,
+ * see <http://www.gnu.org/licenses/>.
+ *
+ * Ubicom32 implementation derived from
+ * Cameo's implementation(with many thanks):
+ */
+#include <linux/types.h>
+#include <linux/ip.h>
+#include <linux/udp.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter_ipv4.h>
+#include <linux/module.h>
+#include <net/protocol.h>
+#include <net/checksum.h>
+#include <net/ip.h>
+#include <linux/tcp.h>
+#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_conntrack_core.h>
+#include <net/netfilter/nf_nat_rule.h>
+#include <linux/netfilter/x_tables.h>
+#include <linux/netfilter_ipv4/ipt_NATTYPE.h>
+#include <linux/atomic.h>
+
+#if !defined(NATTYPE_DEBUG)
+#define DEBUGP(type, args...)
+#else
+static const char * const types[] = {"TYPE_PORT_ADDRESS_RESTRICTED",
+ "TYPE_ENDPOINT_INDEPENDENT",
+ "TYPE_ADDRESS_RESTRICTED"};
+static const char * const modes[] = {"MODE_DNAT", "MODE_FORWARD_IN",
+ "MODE_FORWARD_OUT"};
+#define DEBUGP(args...) pr_debug(args)
+#endif
+
+/* netfilter NATTYPE TODO:
+ * Add magic value checks to data structure.
+ */
+struct ipt_nattype {
+ struct list_head list;
+ struct timer_list timeout;
+ unsigned char is_valid;
+ unsigned short proto; /* Protocol: TCP or UDP */
+ struct nf_nat_ipv4_range range; /* LAN side src info*/
+ unsigned short nat_port; /* Routed NAT port */
+ unsigned int dest_addr; /* Original egress packets dst addr */
+ unsigned short dest_port;/* Original egress packets destination port */
+};
+
+/* TODO: It might be better to use a hash table for performance in
+ * heavy traffic.
+ */
+static LIST_HEAD(nattype_list);
+static DEFINE_SPINLOCK(nattype_lock);
+
+/* netfilter NATTYPE
+ * nattype_nte_debug_print()
+ */
+static void nattype_nte_debug_print(const struct ipt_nattype *nte,
+ const char *s)
+{
+ DEBUGP("%p: %s - proto[%d], src[%pI4:%d], nat[<x>:%d], dest[%pI4:%d]\n",
+ nte, s, nte->proto,
+ &nte->range.min_ip, ntohs(nte->range.min.all),
+ ntohs(nte->nat_port),
+ &nte->dest_addr, ntohs(nte->dest_port));
+}
+
+/* netfilter NATTYPE nattype_free()
+ * Free the object.
+ */
+static void nattype_free(struct ipt_nattype *nte)
+{
+ nattype_nte_debug_print(nte, "free");
+ kfree(nte);
+}
+
+/* netfilter NATTYPE nattype_refresh_timer()
+ * Refresh the timer for this object.
+ */
+bool nattype_refresh_timer_impl(unsigned long nat_type)
+{
+ struct ipt_nattype *nte = (struct ipt_nattype *)nat_type;
+
+ if (!nte)
+ return false;
+ spin_lock_bh(&nattype_lock);
+ if (!nte->is_valid) {
+ spin_unlock_bh(&nattype_lock);
+ return false;
+ }
+ if (del_timer(&nte->timeout)) {
+ nte->timeout.expires = jiffies + NATTYPE_TIMEOUT * HZ;
+ add_timer(&nte->timeout);
+ spin_unlock_bh(&nattype_lock);
+ return true;
+ }
+ spin_unlock_bh(&nattype_lock);
+ return false;
+}
+
+/* netfilter NATTYPE nattype_timer_timeout()
+ * The timer has gone off, self-destruct
+ */
+static void nattype_timer_timeout(unsigned long in_nattype)
+{
+ struct ipt_nattype *nte = (void *)in_nattype;
+
+ /* netfilter NATTYPE
+ * The race with list deletion is solved by ensuring
+ * that either this code or the list deletion code
+ * but not both will remove the oject.
+ */
+ nattype_nte_debug_print(nte, "timeout");
+ spin_lock_bh(&nattype_lock);
+ list_del(&nte->list);
+ memset(nte, 0, sizeof(struct ipt_nattype));
+ spin_unlock_bh(&nattype_lock);
+ nattype_free(nte);
+}
+
+/* netfilter NATTYPE nattype_packet_in_match()
+ * Ingress packet, try to match with this nattype entry.
+ */
+static bool nattype_packet_in_match(const struct ipt_nattype *nte,
+ struct sk_buff *skb,
+ const struct ipt_nattype_info *info)
+{
+ const struct iphdr *iph = ip_hdr(skb);
+ u16 dst_port = 0;
+
+ /* If the protocols are not the same, no sense in looking
+ * further.
+ */
+ if (nte->proto != iph->protocol) {
+ DEBUGP("nattype_packet_in_match: protocol failed: nte proto:"
+ DEBUGP(" %d, packet proto: %d\n",
+ nte->proto, iph->protocol);
+ return false;
+ }
+
+ /* In ADDRESS_RESTRICT, the egress destination must match the source
+ * of this ingress packet.
+ */
+ if (info->type == TYPE_ADDRESS_RESTRICTED) {
+ if (nte->dest_addr != iph->saddr) {
+ DEBUGP("nattype_packet_in_match: dest/src check");
+ DEBUGP(" failed: dest_addr: %pI4, src dest: %pI4\n",
+ &nte->dest_addr, &iph->saddr);
+ return false;
+ }
+ }
+
+ /* Obtain the destination port value for TCP or UDP. The nattype
+ * entries are stored in native (not host).
+ */
+ if (iph->protocol == IPPROTO_TCP) {
+ struct tcphdr _tcph;
+ struct tcphdr *tcph;
+
+ tcph = skb_header_pointer(skb, ip_hdrlen(skb),
+ sizeof(_tcph), &_tcph);
+ if (!tcph)
+ return false;
+ dst_port = tcph->dest;
+ } else if (iph->protocol == IPPROTO_UDP) {
+ struct udphdr _udph;
+ struct udphdr *udph;
+
+ udph = skb_header_pointer(skb, ip_hdrlen(skb),
+ sizeof(_udph), &_udph);
+ if (!udph)
+ return false;
+ dst_port = udph->dest;
+ }
+
+ /* Our NAT port must match the ingress pacekt's
+ * destination packet.
+ */
+ if (nte->nat_port != dst_port) {
+ DEBUGP("nattype_packet_in_match fail: ");
+ DEBUGP(" nat port: %d,dest_port: %d\n",
+ ntohs(nte->nat_port), ntohs(dst_port));
+ return false;
+ }
+
+ /* In either EI or AR mode, the ingress packet's src port
+ * can be anything.
+ */
+ nattype_nte_debug_print(nte, "INGRESS MATCH");
+ return true;
+}
+
+/* netfilter NATTYPE nattype_compare
+ * Compare two entries, return true if relevant fields are the same.
+ */
+static bool nattype_compare(struct ipt_nattype *n1, struct ipt_nattype *n2)
+{
+ /* netfilter NATTYPE Protocol
+ * compare.
+ */
+ if (n1->proto != n2->proto) {
+ DEBUGP("nattype_compare: protocol mismatch: %d:%d\n",
+ n1->proto, n2->proto);
+ return false;
+ }
+
+ /* netfilter NATTYPE LAN Source compare.
+ * Since we always keep min/max values the same,
+ * just compare the min values.
+ */
+ if (n1->range.min_ip != n2->range.min_ip) {
+ DEBUGP("nattype_compare: r.min_ip mismatch: %pI4:%pI4\n",
+ &n1->range.min_ip, &n2->range.min_ip);
+ return false;
+ }
+
+ if (n1->range.min.all != n2->range.min.all) {
+ DEBUGP("nattype_compare: r.min mismatch: %d:%d\n",
+ ntohs(n1->range.min.all),
+ ntohs(n2->range.min.all));
+ return false;
+ }
+
+ /* netfilter NATTYPE
+ * NAT port
+ */
+ if (n1->nat_port != n2->nat_port) {
+ DEBUGP("nattype_compare: nat_port mistmatch: %d:%d\n",
+ ntohs(n1->nat_port), ntohs(n2->nat_port));
+ return false;
+ }
+
+ /* netfilter NATTYPE
+ * Destination compare
+ */
+ if (n1->dest_addr != n2->dest_addr) {
+ DEBUGP("nattype_compare: dest_addr mismatch: %pI4:%pI4\n",
+ &n1->dest_addr, &n2->dest_addr);
+ return false;
+ }
+
+ if (n1->dest_port != n2->dest_port) {
+ DEBUGP("nattype_compare: dest_port mismatch: %d:%d\n",
+ ntohs(n1->dest_port), ntohs(n2->dest_port));
+ return false;
+ }
+ return true;
+}
+
+ /**
+ * netfilter NATTYPE nattype_nat()
+ * Ingress packet on PRE_ROUTING hook, find match, update conntrack
+ * to allow
+ **/
+static unsigned int nattype_nat(struct sk_buff *skb,
+ const struct xt_action_param *par)
+{
+ struct ipt_nattype *nte;
+
+ if (par->hooknum != NF_INET_PRE_ROUTING)
+ return XT_CONTINUE;
+ spin_lock_bh(&nattype_lock);
+ list_for_each_entry(nte, &nattype_list, list) {
+ struct nf_conn *ct;
+ enum ip_conntrack_info ctinfo;
+ struct nf_nat_ipv4_range newrange;
+ unsigned int ret;
+
+ if (!nattype_packet_in_match(nte, skb, par->targinfo))
+ continue;
+
+ /* Copy the LAN source data into the ingress' pacekts
+ * conntrack in the reply direction.
+ */
+ newrange = nte->range;
+ spin_unlock_bh(&nattype_lock);
+
+ /* netfilter NATTYPE Find the
+ * ingress packet's conntrack.
+ */
+ ct = nf_ct_get(skb, &ctinfo);
+ if (!ct) {
+ DEBUGP("ingress packet conntrack not found\n");
+ return XT_CONTINUE;
+ }
+
+ /* Expand the ingress conntrack
+ * to include the reply as source
+ */
+ DEBUGP("Expand ingress conntrack=%p, type=%d, src[%pI4:%d]\n",
+ ct, ctinfo, &newrange.min_ip, ntohs(newrange.min.all));
+ ct->nattype_entry = (unsigned long)nte;
+ ret = nf_nat_setup_info(ct, &newrange, NF_NAT_MANIP_DST);
+ DEBUGP("Expand returned: %d\n", ret);
+ return ret;
+ }
+ spin_unlock_bh(&nattype_lock);
+ return XT_CONTINUE;
+}
+
+/* netfilter NATTYPE nattype_forward()
+ * Ingress and Egress packet forwarding hook
+ */
+static unsigned int nattype_forward(struct sk_buff *skb,
+ const struct xt_action_param *par)
+{
+ const struct iphdr *iph = ip_hdr(skb);
+ void *protoh = (void *)iph + iph->ihl * 4;
+ struct ipt_nattype *nte;
+ struct ipt_nattype *nte2;
+ struct nf_conn *ct;
+ enum ip_conntrack_info ctinfo;
+ const struct ipt_nattype_info *info = par->targinfo;
+ u16 nat_port;
+
+ if (par->hooknum != NF_INET_FORWARD)
+ return XT_CONTINUE;
+
+ /* Ingress packet,
+ * refresh the timer if we find an entry.
+ */
+ if (info->mode == MODE_FORWARD_IN) {
+ spin_lock_bh(&nattype_lock);
+ list_for_each_entry(nte, &nattype_list, list) {
+ /* netfilter NATTYPE
+ * Compare the ingress packet with the existing
+ * entries looking for a match.
+ */
+ if (!nattype_packet_in_match(nte, skb, info))
+ continue;
+
+ spin_unlock_bh(&nattype_lock);
+ /* netfilter NATTYPE
+ * Refresh the timer, if we fail, break
+ * out and forward fail as though we never
+ * found the entry.
+ */
+ if (!nattype_refresh_timer((unsigned long)nte))
+ break;
+
+ /* netfilter NATTYPE
+ * The entry is found and refreshed, the
+ * entry values should not change so print
+ * them outside the lock.
+ */
+ nattype_nte_debug_print(nte, "refresh");
+ DEBUGP("FORWARD_IN_ACCEPT\n");
+ return NF_ACCEPT;
+ }
+ spin_unlock_bh(&nattype_lock);
+ DEBUGP("FORWARD_IN_FAIL\n");
+ return XT_CONTINUE;
+ }
+
+ /* netfilter NATTYPE
+ * Egress packet, create a new rule in our list. If conntrack does
+ * not have an entry, skip this packet.
+ */
+ ct = nf_ct_get(skb, &ctinfo);
+ if (!ct || (ctinfo == IP_CT_NEW && ctinfo == IP_CT_RELATED))
+ return XT_CONTINUE;
+
+ nat_port = ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u.all;
+
+ /* netfilter NATTYPE
+ * Allocate a new entry
+ */
+ nte = kzalloc(sizeof(*nte), GFP_ATOMIC | __GFP_NOWARN);
+ if (!nte) {
+ DEBUGP("kernel malloc fail\n");
+ return XT_CONTINUE;
+ }
+
+ INIT_LIST_HEAD(&nte->list);
+
+ nte->proto = iph->protocol;
+ nte->nat_port = nat_port;
+ nte->dest_addr = iph->daddr;
+ nte->range.min_ip = iph->saddr;
+ nte->range.max_ip = nte->range.min_ip;
+
+ /* netfilter NATTYPE
+ * TOOD: Would it be better to get this information from the
+ * conntrack instead of the headers.
+ */
+ if (iph->protocol == IPPROTO_TCP) {
+ nte->range.min.tcp.port = ((struct tcphdr *)protoh)->source;
+ nte->range.max.tcp.port = nte->range.min.tcp.port;
+ nte->dest_port = ((struct tcphdr *)protoh)->dest;
+ } else if (iph->protocol == IPPROTO_UDP) {
+ nte->range.min.udp.port = ((struct udphdr *)protoh)->source;
+ nte->range.max.udp.port = nte->range.min.udp.port;
+ nte->dest_port = ((struct udphdr *)protoh)->dest;
+ }
+ nte->range.flags = (NF_NAT_RANGE_MAP_IPS |
+ NF_NAT_RANGE_PROTO_SPECIFIED);
+
+ /* netfilter NATTYPE
+ * Initialize the self-destruct timer.
+ */
+ init_timer(&nte->timeout);
+ nte->timeout.data = (unsigned long)nte;
+ nte->timeout.function = nattype_timer_timeout;
+
+ /* netfilter NATTYPE
+ * We have created the new nte; however, it might not be unique.
+ * Search the list for a matching entry. If found, throw away
+ * the new entry and refresh the old. If not found, atomically
+ * insert the new entry on the list.
+ */
+ spin_lock_bh(&nattype_lock);
+ list_for_each_entry(nte2, &nattype_list, list) {
+ if (!nattype_compare(nte, nte2))
+ continue;
+ spin_unlock_bh(&nattype_lock);
+ /* netfilter NATTYPE
+ * If we can not refresh this entry, insert our new
+ * entry as this one is timed out and will be removed
+ * from the list shortly.
+ */
+ if (!nattype_refresh_timer((unsigned long)nte2))
+ break;
+
+ /* netfilter NATTYPE
+ * Found and refreshed an existing entry. Its values
+ * do not change so print the values outside of the lock.
+ *
+ * Free up the new entry.
+ */
+ nattype_nte_debug_print(nte2, "refresh");
+ nattype_free(nte);
+ return XT_CONTINUE;
+ }
+
+ /* netfilter NATTYPE
+ * Add the new entry to the list.
+ */
+ nte->timeout.expires = jiffies + (NATTYPE_TIMEOUT * HZ);
+ add_timer(&nte->timeout);
+ list_add(&nte->list, &nattype_list);
+ ct->nattype_entry = (unsigned long)nte;
+ nte->is_valid = 1;
+ spin_unlock_bh(&nattype_lock);
+ nattype_nte_debug_print(nte, "ADD");
+ return XT_CONTINUE;
+}
+
+/* netfilter NATTYPE
+ * nattype_target()
+ * One of the iptables hooks has a packet for us to analyze, do so.
+ */
+static unsigned int nattype_target(struct sk_buff *skb,
+ const struct xt_action_param *par)
+{
+ const struct ipt_nattype_info *info = par->targinfo;
+ const struct iphdr *iph = ip_hdr(skb);
+
+ /* netfilter NATTYPE
+ * The default behavior for Linux is PORT and ADDRESS restricted. So
+ * we do not need to create rules/entries if we are in that mode.
+ */
+ if (info->type == TYPE_PORT_ADDRESS_RESTRICTED)
+ return XT_CONTINUE;
+
+ /* netfilter NATTYPE
+ * Check if we have enough data in the skb.
+ */
+ if (skb->len < ip_hdrlen(skb))
+ return XT_CONTINUE;
+
+ /* netfilter NATTYPE
+ * We can not perform endpoint filtering on anything but UDP and TCP.
+ */
+ if ((iph->protocol != IPPROTO_TCP) && (iph->protocol != IPPROTO_UDP))
+ return XT_CONTINUE;
+
+ /* netfilter NATTYPE
+ * Check for LAND attack and ignore.
+ */
+ if (iph->daddr == iph->saddr)
+ return XT_CONTINUE;
+
+ /* netfilter NATTYPE
+ * Check that we have valid source and destination addresses.
+ */
+ if ((iph->daddr == (__be32)0) || (iph->saddr == (__be32)0))
+ return XT_CONTINUE;
+
+ DEBUGP("nattype_target: type = %s, mode = %s\n",
+ types[info->type], modes[info->mode]);
+
+ /* netfilter NATTYPE
+ * TODO: why have mode at all since par->hooknum provides
+ * this information?
+ */
+ switch (info->mode) {
+ case MODE_DNAT:
+ return nattype_nat(skb, par);
+ case MODE_FORWARD_OUT:
+ case MODE_FORWARD_IN:
+ return nattype_forward(skb, par);
+ }
+ return XT_CONTINUE;
+}
+
+/* netfilter NATTYPE
+ * nattype_check()
+ * check info (mode/type) set by iptables.
+ */
+static int nattype_check(const struct xt_tgchk_param *par)
+{
+ const struct ipt_nattype_info *info = par->targinfo;
+ struct list_head *cur, *tmp;
+
+ if ((info->type != TYPE_PORT_ADDRESS_RESTRICTED) &&
+ (info->type != TYPE_ENDPOINT_INDEPENDENT) &&
+ (info->type != TYPE_ADDRESS_RESTRICTED)) {
+ DEBUGP("nattype_check: unknown type: %d\n", info->type);
+ return -EINVAL;
+ }
+
+ if (info->mode != MODE_DNAT && info->mode != MODE_FORWARD_IN &&
+ info->mode != MODE_FORWARD_OUT) {
+ DEBUGP("nattype_check: unknown mode - %d.\n", info->mode);
+ return -EINVAL;
+ }
+
+ DEBUGP("nattype_check: type = %s, mode = %s\n",
+ types[info->type], modes[info->mode]);
+
+ if (par->hook_mask & ~((1 << NF_INET_PRE_ROUTING) |
+ (1 << NF_INET_FORWARD))) {
+ DEBUGP("nattype_check: bad hooks %x.\n", par->hook_mask);
+ return -EINVAL;
+ }
+
+ /* netfilter NATTYPE
+ * Remove all entries from the nattype list.
+ */
+drain:
+ spin_lock_bh(&nattype_lock);
+ list_for_each_safe(cur, tmp, &nattype_list) {
+ struct ipt_nattype *nte = (void *)cur;
+
+ /* netfilter NATTYPE
+ * If the timeout is in process, it will tear
+ * us down. Since it is waiting on the spinlock
+ * we have to give up the spinlock to give the
+ * timeout on another CPU a chance to run.
+ */
+ if (!del_timer(&nte->timeout)) {
+ spin_unlock_bh(&nattype_lock);
+ goto drain;
+ }
+
+ DEBUGP("%p: removing from list\n", nte);
+ list_del(&nte->list);
+ spin_unlock_bh(&nattype_lock);
+ nattype_free(nte);
+ goto drain;
+ }
+ spin_unlock_bh(&nattype_lock);
+ return 0;
+}
+
+static struct xt_target nattype = {
+ .name = "NATTYPE",
+ .family = NFPROTO_IPV4,
+ .target = nattype_target,
+ .checkentry = nattype_check,
+ .targetsize = sizeof(struct ipt_nattype_info),
+ .hooks = ((1 << NF_INET_PRE_ROUTING) |
+ (1 << NF_INET_FORWARD)),
+ .me = THIS_MODULE,
+};
+
+static int __init init(void)
+{
+ WARN_ON(nattype_refresh_timer);
+ RCU_INIT_POINTER(nattype_refresh_timer, nattype_refresh_timer_impl);
+ return xt_register_target(&nattype);
+}
+
+static void __exit fini(void)
+{
+ xt_unregister_target(&nattype);
+}
+
+module_init(init);
+module_exit(fini);
+
+MODULE_LICENSE("GPL");
diff --git a/net/ipv4/netfilter/nf_nat_snmp_basic.c b/net/ipv4/netfilter/nf_nat_snmp_basic.c
index c9b52c3..5a8f7c3 100644
--- a/net/ipv4/netfilter/nf_nat_snmp_basic.c
+++ b/net/ipv4/netfilter/nf_nat_snmp_basic.c
@@ -1304,6 +1304,7 @@ static int __init nf_nat_snmp_basic_init(void)
static void __exit nf_nat_snmp_basic_fini(void)
{
RCU_INIT_POINTER(nf_nat_snmp_hook, NULL);
+ synchronize_rcu();
nf_conntrack_helper_unregister(&snmp_trap_helper);
}
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index a4faf30..cd632e6 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -1860,6 +1860,7 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
fl4.flowi4_flags = 0;
fl4.daddr = daddr;
fl4.saddr = saddr;
+ fl4.flowi4_uid = sock_net_uid(net, NULL);
err = fib_lookup(net, &fl4, &res, 0);
if (err != 0) {
if (!IN_DEV_FORWARD(in_dev))
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index a835716..dd08d16 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -914,6 +914,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
struct tcp_skb_cb *tcb;
struct tcp_out_options opts;
unsigned int tcp_options_size, tcp_header_size;
+ struct sk_buff *oskb = NULL;
struct tcp_md5sig_key *md5;
struct tcphdr *th;
int err;
@@ -922,11 +923,9 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
tp = tcp_sk(sk);
if (clone_it) {
- skb_mstamp_get(&skb->skb_mstamp);
TCP_SKB_CB(skb)->tx.in_flight = TCP_SKB_CB(skb)->end_seq
- tp->snd_una;
- tcp_rate_skb_sent(sk, skb);
-
+ oskb = skb;
if (unlikely(skb_cloned(skb)))
skb = pskb_copy(skb, gfp_mask);
else
@@ -934,6 +933,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
if (unlikely(!skb))
return -ENOBUFS;
}
+ skb_mstamp_get(&skb->skb_mstamp);
inet = inet_sk(sk);
tcb = TCP_SKB_CB(skb);
@@ -1035,12 +1035,15 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
err = icsk->icsk_af_ops->queue_xmit(sk, skb, &inet->cork.fl);
- if (likely(err <= 0))
- return err;
-
- tcp_enter_cwr(sk);
-
- return net_xmit_eval(err);
+ if (unlikely(err > 0)) {
+ tcp_enter_cwr(sk);
+ err = net_xmit_eval(err);
+ }
+ if (!err && oskb) {
+ skb_mstamp_get(&oskb->skb_mstamp);
+ tcp_rate_skb_sent(sk, oskb);
+ }
+ return err;
}
/* This routine just queues the buffer for sending.
@@ -2709,10 +2712,11 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs)
skb_headroom(skb) >= 0xFFFF)) {
struct sk_buff *nskb;
- skb_mstamp_get(&skb->skb_mstamp);
nskb = __pskb_copy(skb, MAX_TCP_HEADER, GFP_ATOMIC);
err = nskb ? tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC) :
-ENOBUFS;
+ if (!err)
+ skb_mstamp_get(&skb->skb_mstamp);
} else {
err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
}
@@ -3325,6 +3329,10 @@ static int tcp_send_syn_data(struct sock *sk, struct sk_buff *syn)
goto done;
}
+ /* data was not sent, this is our new send_head */
+ sk->sk_send_head = syn_data;
+ tp->packets_out -= tcp_skb_pcount(syn_data);
+
fallback:
/* Send a regular SYN with Fast Open cookie request option */
if (fo->cookie.len > 0)
@@ -3374,6 +3382,11 @@ int tcp_connect(struct sock *sk)
*/
tp->snd_nxt = tp->write_seq;
tp->pushed_seq = tp->write_seq;
+ buff = tcp_send_head(sk);
+ if (unlikely(buff)) {
+ tp->snd_nxt = TCP_SKB_CB(buff)->seq;
+ tp->pushed_seq = TCP_SKB_CB(buff)->seq;
+ }
TCP_INC_STATS(sock_net(sk), TCP_MIB_ACTIVEOPENS);
/* Timer for repeating the SYN until an answer. */
diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c
index 6de016f..0932c85 100644
--- a/net/ipv4/udp_offload.c
+++ b/net/ipv4/udp_offload.c
@@ -29,6 +29,7 @@ static struct sk_buff *__skb_udp_tunnel_segment(struct sk_buff *skb,
u16 mac_len = skb->mac_len;
int udp_offset, outer_hlen;
__wsum partial;
+ bool need_ipsec;
if (unlikely(!pskb_may_pull(skb, tnl_hlen)))
goto out;
@@ -62,8 +63,10 @@ static struct sk_buff *__skb_udp_tunnel_segment(struct sk_buff *skb,
ufo = !!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP);
+ need_ipsec = skb_dst(skb) && dst_xfrm(skb_dst(skb));
/* Try to offload checksum if possible */
offload_csum = !!(need_csum &&
+ !need_ipsec &&
(skb->dev->features &
(is_ipv6 ? (NETIF_F_HW_CSUM | NETIF_F_IPV6_CSUM) :
(NETIF_F_HW_CSUM | NETIF_F_IP_CSUM))));
diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
index 6a7ff69..7f9a8df 100644
--- a/net/ipv4/xfrm4_policy.c
+++ b/net/ipv4/xfrm4_policy.c
@@ -22,7 +22,8 @@ static struct xfrm_policy_afinfo xfrm4_policy_afinfo;
static struct dst_entry *__xfrm4_dst_lookup(struct net *net, struct flowi4 *fl4,
int tos, int oif,
const xfrm_address_t *saddr,
- const xfrm_address_t *daddr)
+ const xfrm_address_t *daddr,
+ u32 mark)
{
struct rtable *rt;
@@ -30,6 +31,7 @@ static struct dst_entry *__xfrm4_dst_lookup(struct net *net, struct flowi4 *fl4,
fl4->daddr = daddr->a4;
fl4->flowi4_tos = tos;
fl4->flowi4_oif = l3mdev_master_ifindex_by_index(net, oif);
+ fl4->flowi4_mark = mark;
if (saddr)
fl4->saddr = saddr->a4;
@@ -44,20 +46,22 @@ static struct dst_entry *__xfrm4_dst_lookup(struct net *net, struct flowi4 *fl4,
static struct dst_entry *xfrm4_dst_lookup(struct net *net, int tos, int oif,
const xfrm_address_t *saddr,
- const xfrm_address_t *daddr)
+ const xfrm_address_t *daddr,
+ u32 mark)
{
struct flowi4 fl4;
- return __xfrm4_dst_lookup(net, &fl4, tos, oif, saddr, daddr);
+ return __xfrm4_dst_lookup(net, &fl4, tos, oif, saddr, daddr, mark);
}
static int xfrm4_get_saddr(struct net *net, int oif,
- xfrm_address_t *saddr, xfrm_address_t *daddr)
+ xfrm_address_t *saddr, xfrm_address_t *daddr,
+ u32 mark)
{
struct dst_entry *dst;
struct flowi4 fl4;
- dst = __xfrm4_dst_lookup(net, &fl4, 0, oif, NULL, daddr);
+ dst = __xfrm4_dst_lookup(net, &fl4, 0, oif, NULL, daddr, mark);
if (IS_ERR(dst))
return -EHOSTUNREACH;
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index 5da8649..7ba9a6e 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -914,6 +914,7 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt,
fn->fn_flags |= RTN_RTINFO;
}
nsiblings = iter->rt6i_nsiblings;
+ iter->rt6i_node = NULL;
fib6_purge_rt(iter, fn, info->nl_net);
if (fn->rr_ptr == iter)
fn->rr_ptr = NULL;
@@ -928,6 +929,7 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt,
break;
if (rt6_qualify_for_ecmp(iter)) {
*ins = iter->dst.rt6_next;
+ iter->rt6i_node = NULL;
fib6_purge_rt(iter, fn, info->nl_net);
if (fn->rr_ptr == iter)
fn->rr_ptr = NULL;
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index ae87b9a..48e6e75 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -940,24 +940,25 @@ static int ip6gre_tunnel_ioctl(struct net_device *dev,
}
static int ip6gre_header(struct sk_buff *skb, struct net_device *dev,
- unsigned short type,
- const void *daddr, const void *saddr, unsigned int len)
+ unsigned short type, const void *daddr,
+ const void *saddr, unsigned int len)
{
struct ip6_tnl *t = netdev_priv(dev);
- struct ipv6hdr *ipv6h = (struct ipv6hdr *)skb_push(skb, t->hlen);
- __be16 *p = (__be16 *)(ipv6h+1);
+ struct ipv6hdr *ipv6h;
+ __be16 *p;
- ip6_flow_hdr(ipv6h, 0,
- ip6_make_flowlabel(dev_net(dev), skb,
- t->fl.u.ip6.flowlabel, true,
- &t->fl.u.ip6));
+ ipv6h = (struct ipv6hdr *)skb_push(skb, t->hlen + sizeof(*ipv6h));
+ ip6_flow_hdr(ipv6h, 0, ip6_make_flowlabel(dev_net(dev), skb,
+ t->fl.u.ip6.flowlabel,
+ true, &t->fl.u.ip6));
ipv6h->hop_limit = t->parms.hop_limit;
ipv6h->nexthdr = NEXTHDR_GRE;
ipv6h->saddr = t->parms.laddr;
ipv6h->daddr = t->parms.raddr;
- p[0] = t->parms.o_flags;
- p[1] = htons(type);
+ p = (__be16 *)(ipv6h + 1);
+ p[0] = t->parms.o_flags;
+ p[1] = htons(type);
/*
* Set the source hardware address.
@@ -1301,6 +1302,7 @@ static void ip6gre_tap_setup(struct net_device *dev)
dev->features |= NETIF_F_NETNS_LOCAL;
dev->priv_flags &= ~IFF_TX_SKB_SHARING;
dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
+ netif_keep_dst(dev);
}
static bool ip6gre_netlink_encap_parms(struct nlattr *data[],
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index b44e9f5..64aefc2 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -1042,6 +1042,7 @@ int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield,
struct dst_entry *dst = NULL, *ndst = NULL;
struct net_device *tdev;
int mtu;
+ unsigned int eth_hlen = t->dev->type == ARPHRD_ETHER ? ETH_HLEN : 0;
unsigned int psh_hlen = sizeof(struct ipv6hdr) + t->encap_hlen;
unsigned int max_headroom = psh_hlen;
bool use_cache = false;
@@ -1120,7 +1121,7 @@ int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield,
t->parms.name);
goto tx_err_dst_release;
}
- mtu = dst_mtu(dst) - psh_hlen - t->tun_hlen;
+ mtu = dst_mtu(dst) - eth_hlen - psh_hlen - t->tun_hlen;
if (encap_limit >= 0) {
max_headroom += 8;
mtu -= 8;
@@ -1129,7 +1130,7 @@ int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield,
mtu = IPV6_MIN_MTU;
if (skb_dst(skb) && !t->parms.collect_md)
skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
- if (skb->len - t->tun_hlen > mtu && !skb_is_gso(skb)) {
+ if (skb->len - t->tun_hlen - eth_hlen > mtu && !skb_is_gso(skb)) {
*pmtu = mtu;
err = -EMSGSIZE;
goto tx_err_dst_release;
@@ -2235,6 +2236,9 @@ static int __init ip6_tunnel_init(void)
{
int err;
+ if (!ipv6_mod_enabled())
+ return -EOPNOTSUPP;
+
err = register_pernet_device(&ip6_tnl_net_ops);
if (err < 0)
goto out_pernet;
diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
index bbeedff..da64b20 100644
--- a/net/ipv6/ip6_vti.c
+++ b/net/ipv6/ip6_vti.c
@@ -445,6 +445,7 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
struct dst_entry *dst = skb_dst(skb);
struct net_device *tdev;
struct xfrm_state *x;
+ int pkt_len = skb->len;
int err = -1;
int mtu;
@@ -498,7 +499,7 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats);
u64_stats_update_begin(&tstats->syncp);
- tstats->tx_bytes += skb->len;
+ tstats->tx_bytes += pkt_len;
tstats->tx_packets++;
u64_stats_update_end(&tstats->syncp);
} else {
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
index e0f71c0..4003b28 100644
--- a/net/ipv6/xfrm6_policy.c
+++ b/net/ipv6/xfrm6_policy.c
@@ -29,7 +29,8 @@ static struct xfrm_policy_afinfo xfrm6_policy_afinfo;
static struct dst_entry *xfrm6_dst_lookup(struct net *net, int tos, int oif,
const xfrm_address_t *saddr,
- const xfrm_address_t *daddr)
+ const xfrm_address_t *daddr,
+ u32 mark)
{
struct flowi6 fl6;
struct dst_entry *dst;
@@ -38,6 +39,7 @@ static struct dst_entry *xfrm6_dst_lookup(struct net *net, int tos, int oif,
memset(&fl6, 0, sizeof(fl6));
fl6.flowi6_oif = l3mdev_master_ifindex_by_index(net, oif);
fl6.flowi6_flags = FLOWI_FLAG_SKIP_NH_OIF;
+ fl6.flowi6_mark = mark;
memcpy(&fl6.daddr, daddr, sizeof(fl6.daddr));
if (saddr)
memcpy(&fl6.saddr, saddr, sizeof(fl6.saddr));
@@ -54,12 +56,13 @@ static struct dst_entry *xfrm6_dst_lookup(struct net *net, int tos, int oif,
}
static int xfrm6_get_saddr(struct net *net, int oif,
- xfrm_address_t *saddr, xfrm_address_t *daddr)
+ xfrm_address_t *saddr, xfrm_address_t *daddr,
+ u32 mark)
{
struct dst_entry *dst;
struct net_device *dev;
- dst = xfrm6_dst_lookup(net, 0, oif, NULL, daddr);
+ dst = xfrm6_dst_lookup(net, 0, oif, NULL, daddr, mark);
if (IS_ERR(dst))
return -EHOSTUNREACH;
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
index 3bce651..b06acd0 100644
--- a/net/l2tp/l2tp_core.c
+++ b/net/l2tp/l2tp_core.c
@@ -1415,6 +1415,9 @@ static void l2tp_tunnel_del_work(struct work_struct *work)
struct sock *sk = NULL;
tunnel = container_of(work, struct l2tp_tunnel, del_work);
+
+ l2tp_tunnel_closeall(tunnel);
+
sk = l2tp_tunnel_sock_lookup(tunnel);
if (!sk)
goto out;
@@ -1734,15 +1737,12 @@ EXPORT_SYMBOL_GPL(l2tp_tunnel_create);
/* This function is used by the netlink TUNNEL_DELETE command.
*/
-int l2tp_tunnel_delete(struct l2tp_tunnel *tunnel)
+void l2tp_tunnel_delete(struct l2tp_tunnel *tunnel)
{
- l2tp_tunnel_inc_refcount(tunnel);
- l2tp_tunnel_closeall(tunnel);
- if (false == queue_work(l2tp_wq, &tunnel->del_work)) {
- l2tp_tunnel_dec_refcount(tunnel);
- return 1;
+ if (!test_and_set_bit(0, &tunnel->dead)) {
+ l2tp_tunnel_inc_refcount(tunnel);
+ queue_work(l2tp_wq, &tunnel->del_work);
}
- return 0;
}
EXPORT_SYMBOL_GPL(l2tp_tunnel_delete);
diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h
index 0095012..42419f1 100644
--- a/net/l2tp/l2tp_core.h
+++ b/net/l2tp/l2tp_core.h
@@ -169,6 +169,9 @@ struct l2tp_tunnel_cfg {
struct l2tp_tunnel {
int magic; /* Should be L2TP_TUNNEL_MAGIC */
+
+ unsigned long dead;
+
struct rcu_head rcu;
rwlock_t hlist_lock; /* protect session_hlist */
struct hlist_head session_hlist[L2TP_HASH_SIZE];
@@ -257,7 +260,7 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id,
u32 peer_tunnel_id, struct l2tp_tunnel_cfg *cfg,
struct l2tp_tunnel **tunnelp);
void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel);
-int l2tp_tunnel_delete(struct l2tp_tunnel *tunnel);
+void l2tp_tunnel_delete(struct l2tp_tunnel *tunnel);
struct l2tp_session *l2tp_session_create(int priv_size,
struct l2tp_tunnel *tunnel,
u32 session_id, u32 peer_session_id,
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 37bec0f..a7aa54f 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -791,6 +791,7 @@ static int ieee80211_open(struct net_device *dev)
static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
bool going_down)
{
+ struct ieee80211_sub_if_data *txq_sdata = sdata;
struct ieee80211_local *local = sdata->local;
struct fq *fq = &local->fq;
unsigned long flags;
@@ -931,6 +932,9 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
switch (sdata->vif.type) {
case NL80211_IFTYPE_AP_VLAN:
+ txq_sdata = container_of(sdata->bss,
+ struct ieee80211_sub_if_data, u.ap);
+
mutex_lock(&local->mtx);
list_del(&sdata->u.vlan.list);
mutex_unlock(&local->mtx);
@@ -1001,8 +1005,17 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
}
spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
- if (sdata->vif.txq) {
- struct txq_info *txqi = to_txq_info(sdata->vif.txq);
+ if (txq_sdata->vif.txq) {
+ struct txq_info *txqi = to_txq_info(txq_sdata->vif.txq);
+
+ /*
+ * FIXME FIXME
+ *
+ * We really shouldn't purge the *entire* txqi since that
+ * contains frames for the other AP_VLANs (and possibly
+ * the AP itself) as well, but there's no API in FQ now
+ * to be able to filter.
+ */
spin_lock_bh(&fq->lock);
ieee80211_txq_purge(local, txqi);
diff --git a/net/mac80211/offchannel.c b/net/mac80211/offchannel.c
index eede5c6..30bba53 100644
--- a/net/mac80211/offchannel.c
+++ b/net/mac80211/offchannel.c
@@ -707,6 +707,8 @@ static int ieee80211_cancel_roc(struct ieee80211_local *local,
if (!cookie)
return -ENOENT;
+ flush_work(&local->hw_roc_start);
+
mutex_lock(&local->mtx);
list_for_each_entry_safe(roc, tmp, &local->roc_list, list) {
if (!mgmt_tx && roc->cookie != cookie)
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index b2c823ff..348700b4 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -688,7 +688,7 @@ static void __sta_info_recalc_tim(struct sta_info *sta, bool ignore_pending)
}
/* No need to do anything if the driver does all */
- if (ieee80211_hw_check(&local->hw, AP_LINK_PS))
+ if (ieee80211_hw_check(&local->hw, AP_LINK_PS) && !local->ops->set_tim)
return;
if (sta->dead)
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index dd190ff..274c564 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -1277,11 +1277,6 @@ static void ieee80211_set_skb_enqueue_time(struct sk_buff *skb)
IEEE80211_SKB_CB(skb)->control.enqueue_time = codel_get_time();
}
-static void ieee80211_set_skb_vif(struct sk_buff *skb, struct txq_info *txqi)
-{
- IEEE80211_SKB_CB(skb)->control.vif = txqi->txq.vif;
-}
-
static u32 codel_skb_len_func(const struct sk_buff *skb)
{
return skb->len;
@@ -3388,6 +3383,7 @@ struct sk_buff *ieee80211_tx_dequeue(struct ieee80211_hw *hw,
struct ieee80211_tx_info *info;
struct ieee80211_tx_data tx;
ieee80211_tx_result r;
+ struct ieee80211_vif *vif;
spin_lock_bh(&fq->lock);
@@ -3404,8 +3400,6 @@ struct sk_buff *ieee80211_tx_dequeue(struct ieee80211_hw *hw,
if (!skb)
goto out;
- ieee80211_set_skb_vif(skb, txqi);
-
hdr = (struct ieee80211_hdr *)skb->data;
info = IEEE80211_SKB_CB(skb);
@@ -3462,6 +3456,34 @@ struct sk_buff *ieee80211_tx_dequeue(struct ieee80211_hw *hw,
}
}
+ switch (tx.sdata->vif.type) {
+ case NL80211_IFTYPE_MONITOR:
+ if (tx.sdata->u.mntr.flags & MONITOR_FLAG_ACTIVE) {
+ vif = &tx.sdata->vif;
+ break;
+ }
+ tx.sdata = rcu_dereference(local->monitor_sdata);
+ if (tx.sdata) {
+ vif = &tx.sdata->vif;
+ info->hw_queue =
+ vif->hw_queue[skb_get_queue_mapping(skb)];
+ } else if (ieee80211_hw_check(&local->hw, QUEUE_CONTROL)) {
+ ieee80211_free_txskb(&local->hw, skb);
+ goto begin;
+ } else {
+ vif = NULL;
+ }
+ break;
+ case NL80211_IFTYPE_AP_VLAN:
+ tx.sdata = container_of(tx.sdata->bss,
+ struct ieee80211_sub_if_data, u.ap);
+ /* fall through */
+ default:
+ vif = &tx.sdata->vif;
+ break;
+ }
+
+ IEEE80211_SKB_CB(skb)->control.vif = vif;
out:
spin_unlock_bh(&fq->lock);
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index d5b49fc..c348c40 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -72,6 +72,9 @@ EXPORT_SYMBOL_GPL(nf_conntrack_expect_lock);
struct hlist_nulls_head *nf_conntrack_hash __read_mostly;
EXPORT_SYMBOL_GPL(nf_conntrack_hash);
+bool (*nattype_refresh_timer)(unsigned long nattype) __rcu __read_mostly;
+EXPORT_SYMBOL(nattype_refresh_timer);
+
struct conntrack_gc_work {
struct delayed_work dwork;
u32 last_bucket;
@@ -95,19 +98,26 @@ static struct conntrack_gc_work conntrack_gc_work;
void nf_conntrack_lock(spinlock_t *lock) __acquires(lock)
{
+ /* 1) Acquire the lock */
spin_lock(lock);
- while (unlikely(nf_conntrack_locks_all)) {
- spin_unlock(lock);
- /*
- * Order the 'nf_conntrack_locks_all' load vs. the
- * spin_unlock_wait() loads below, to ensure
- * that 'nf_conntrack_locks_all_lock' is indeed held:
- */
- smp_rmb(); /* spin_lock(&nf_conntrack_locks_all_lock) */
- spin_unlock_wait(&nf_conntrack_locks_all_lock);
- spin_lock(lock);
- }
+ /* 2) read nf_conntrack_locks_all, with ACQUIRE semantics
+ * It pairs with the smp_store_release() in nf_conntrack_all_unlock()
+ */
+ if (likely(smp_load_acquire(&nf_conntrack_locks_all) == false))
+ return;
+
+ /* fast path failed, unlock */
+ spin_unlock(lock);
+
+ /* Slow path 1) get global lock */
+ spin_lock(&nf_conntrack_locks_all_lock);
+
+ /* Slow path 2) get the lock we want */
+ spin_lock(lock);
+
+ /* Slow path 3) release the global lock */
+ spin_unlock(&nf_conntrack_locks_all_lock);
}
EXPORT_SYMBOL_GPL(nf_conntrack_lock);
@@ -148,28 +158,27 @@ static void nf_conntrack_all_lock(void)
int i;
spin_lock(&nf_conntrack_locks_all_lock);
+
nf_conntrack_locks_all = true;
- /*
- * Order the above store of 'nf_conntrack_locks_all' against
- * the spin_unlock_wait() loads below, such that if
- * nf_conntrack_lock() observes 'nf_conntrack_locks_all'
- * we must observe nf_conntrack_locks[] held:
- */
- smp_mb(); /* spin_lock(&nf_conntrack_locks_all_lock) */
-
for (i = 0; i < CONNTRACK_LOCKS; i++) {
- spin_unlock_wait(&nf_conntrack_locks[i]);
+ spin_lock(&nf_conntrack_locks[i]);
+
+ /* This spin_unlock provides the "release" to ensure that
+ * nf_conntrack_locks_all==true is visible to everyone that
+ * acquired spin_lock(&nf_conntrack_locks[]).
+ */
+ spin_unlock(&nf_conntrack_locks[i]);
}
}
static void nf_conntrack_all_unlock(void)
{
- /*
- * All prior stores must be complete before we clear
+ /* All prior stores must be complete before we clear
* 'nf_conntrack_locks_all'. Otherwise nf_conntrack_lock()
* might observe the false value but not the entire
- * critical section:
+ * critical section.
+ * It pairs with the smp_load_acquire() in nf_conntrack_lock()
*/
smp_store_release(&nf_conntrack_locks_all, false);
spin_unlock(&nf_conntrack_locks_all_lock);
@@ -381,11 +390,15 @@ void nf_ct_tmpl_free(struct nf_conn *tmpl)
}
EXPORT_SYMBOL_GPL(nf_ct_tmpl_free);
+void (*delete_sfe_entry)(struct nf_conn *ct) __rcu __read_mostly;
+EXPORT_SYMBOL(delete_sfe_entry);
+
static void
destroy_conntrack(struct nf_conntrack *nfct)
{
struct nf_conn *ct = (struct nf_conn *)nfct;
struct nf_conntrack_l4proto *l4proto;
+ void (*delete_entry)(struct nf_conn *ct);
pr_debug("destroy_conntrack(%pK)\n", ct);
NF_CT_ASSERT(atomic_read(&nfct->use) == 0);
@@ -394,6 +407,17 @@ destroy_conntrack(struct nf_conntrack *nfct)
nf_ct_tmpl_free(ct);
return;
}
+
+ if (ct->sfe_entry) {
+ delete_entry = rcu_dereference(delete_sfe_entry);
+ if (delete_entry)
+ delete_entry(ct);
+ }
+
+ /* To make sure we don't get any weird locking issues here:
+ * destroy_conntrack() MUST NOT be called with a write lock
+ * to nf_conntrack_lock!!! -HW
+ */
rcu_read_lock();
l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct));
if (l4proto->destroy)
@@ -1199,6 +1223,10 @@ init_conntrack(struct net *net, struct nf_conn *tmpl,
#ifdef CONFIG_NF_CONNTRACK_SECMARK
ct->secmark = exp->master->secmark;
#endif
+/* Initialize the NAT type entry. */
+#if defined(CONFIG_IP_NF_TARGET_NATTYPE_MODULE)
+ ct->nattype_entry = 0;
+#endif
NF_CT_STAT_INC(net, expect_new);
}
spin_unlock(&nf_conntrack_expect_lock);
@@ -1439,6 +1467,9 @@ void __nf_ct_refresh_acct(struct nf_conn *ct,
{
struct nf_conn_acct *acct;
u64 pkts;
+#if defined(CONFIG_IP_NF_TARGET_NATTYPE_MODULE)
+ bool (*nattype_ref_timer)(unsigned long nattype);
+#endif
NF_CT_ASSERT(skb);
@@ -1451,6 +1482,13 @@ void __nf_ct_refresh_acct(struct nf_conn *ct,
extra_jiffies += nfct_time_stamp;
ct->timeout = extra_jiffies;
+/* Refresh the NAT type entry. */
+#if defined(CONFIG_IP_NF_TARGET_NATTYPE_MODULE)
+ nattype_ref_timer = rcu_dereference(nattype_refresh_timer);
+ if (nattype_ref_timer)
+ nattype_ref_timer(ct->nattype_entry);
+#endif
+
acct:
if (do_acct) {
acct = nf_conn_acct_find(ct);
diff --git a/net/netfilter/nf_conntrack_ecache.c b/net/netfilter/nf_conntrack_ecache.c
index da9df2d..22fc321 100644
--- a/net/netfilter/nf_conntrack_ecache.c
+++ b/net/netfilter/nf_conntrack_ecache.c
@@ -290,6 +290,7 @@ void nf_conntrack_unregister_notifier(struct net *net,
BUG_ON(notify != new);
RCU_INIT_POINTER(net->ct.nf_conntrack_event_cb, NULL);
mutex_unlock(&nf_ct_ecache_mutex);
+ /* synchronize_rcu() is called from ctnetlink_exit. */
}
EXPORT_SYMBOL_GPL(nf_conntrack_unregister_notifier);
@@ -326,6 +327,7 @@ void nf_ct_expect_unregister_notifier(struct net *net,
BUG_ON(notify != new);
RCU_INIT_POINTER(net->ct.nf_expect_event_cb, NULL);
mutex_unlock(&nf_ct_ecache_mutex);
+ /* synchronize_rcu() is called from ctnetlink_exit. */
}
EXPORT_SYMBOL_GPL(nf_ct_expect_unregister_notifier);
diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c
index f8dbacf..0d6c72d 100644
--- a/net/netfilter/nf_conntrack_expect.c
+++ b/net/netfilter/nf_conntrack_expect.c
@@ -411,7 +411,7 @@ static inline int __nf_ct_expect_check(struct nf_conntrack_expect *expect)
struct net *net = nf_ct_exp_net(expect);
struct hlist_node *next;
unsigned int h;
- int ret = 1;
+ int ret = 0;
if (!master_help) {
ret = -ESHUTDOWN;
@@ -461,7 +461,7 @@ int nf_ct_expect_related_report(struct nf_conntrack_expect *expect,
spin_lock_bh(&nf_conntrack_expect_lock);
ret = __nf_ct_expect_check(expect);
- if (ret <= 0)
+ if (ret < 0)
goto out;
ret = nf_ct_expect_insert(expect);
diff --git a/net/netfilter/nf_conntrack_irc.c b/net/netfilter/nf_conntrack_irc.c
index 1972a14..b97caa1 100644
--- a/net/netfilter/nf_conntrack_irc.c
+++ b/net/netfilter/nf_conntrack_irc.c
@@ -19,6 +19,7 @@
#include <linux/tcp.h>
#include <linux/netfilter.h>
#include <linux/slab.h>
+#include <linux/list.h>
#include <net/netfilter/nf_conntrack.h>
#include <net/netfilter/nf_conntrack_expect.h>
@@ -32,6 +33,18 @@ static unsigned int max_dcc_channels = 8;
static unsigned int dcc_timeout __read_mostly = 300;
/* This is slow, but it's simple. --RR */
static char *irc_buffer;
+struct irc_client_info {
+ char *nickname;
+ bool conn_to_server;
+ int nickname_len;
+ __be32 server_ip;
+ __be32 client_ip;
+ struct list_head ptr;
+ };
+
+static struct irc_client_info client_list;
+
+static unsigned int no_of_clients;
static DEFINE_SPINLOCK(irc_buffer_lock);
unsigned int (*nf_nat_irc_hook)(struct sk_buff *skb,
@@ -61,7 +74,7 @@ static const char *const dccprotos[] = {
};
#define MINMATCHLEN 5
-
+#define MINLENNICK 1
/* tries to get the ip_addr and port out of a dcc command
* return value: -1 on failure, 0 on success
* data pointer to first byte of DCC command data
@@ -71,6 +84,23 @@ static const char *const dccprotos[] = {
* ad_beg_p returns pointer to first byte of addr data
* ad_end_p returns pointer to last byte of addr data
*/
+static struct irc_client_info *search_client_by_ip
+(
+ struct nf_conntrack_tuple *tuple
+)
+{
+ struct irc_client_info *temp, *ret = NULL;
+ struct list_head *obj_ptr, *prev_obj_ptr;
+
+ list_for_each_safe(obj_ptr, prev_obj_ptr, &client_list.ptr) {
+ temp = list_entry(obj_ptr, struct irc_client_info, ptr);
+ if ((temp->client_ip == tuple->src.u3.ip) &&
+ (temp->server_ip == tuple->dst.u3.ip))
+ ret = temp;
+ }
+ return ret;
+}
+
static int parse_dcc(char *data, const char *data_end, __be32 *ip,
u_int16_t *port, char **ad_beg_p, char **ad_end_p)
{
@@ -105,6 +135,106 @@ static int parse_dcc(char *data, const char *data_end, __be32 *ip,
return 0;
}
+static bool mangle_ip(struct nf_conn *ct,
+ int dir, char *nick_start)
+{
+ char *nick_end;
+ struct nf_conntrack_tuple *tuple;
+ struct irc_client_info *temp;
+ struct list_head *obj_ptr, *prev_obj_ptr;
+
+ tuple = &ct->tuplehash[dir].tuple;
+ nick_end = nick_start;
+ while (*nick_end != ' ')
+ nick_end++;
+ list_for_each_safe(obj_ptr, prev_obj_ptr,
+ &client_list.ptr) {
+ temp = list_entry(obj_ptr,
+ struct irc_client_info, ptr);
+ /*If it is an internal client,
+ *do not mangle the DCC Server IP
+ */
+ if ((temp->server_ip == tuple->dst.u3.ip) &&
+ (temp->nickname_len == (nick_end - nick_start))) {
+ if (memcmp(nick_start, temp->nickname,
+ temp->nickname_len) == 0)
+ return false;
+ }
+ }
+ return true;
+}
+
+static int handle_nickname(struct nf_conn *ct,
+ int dir, char *nick_start)
+{
+ char *nick_end;
+ struct nf_conntrack_tuple *tuple;
+ struct irc_client_info *temp;
+ int i, j;
+ bool add_entry = true;
+
+ nick_end = nick_start;
+ i = 0;
+ while (*nick_end != '\n') {
+ nick_end++;
+ i++;
+ }
+ tuple = &ct->tuplehash[dir].tuple;
+ /*Check if the entry is already
+ * present for that client
+ */
+ temp = search_client_by_ip(tuple);
+ if (temp) {
+ add_entry = false;
+ /*Update nickname if the client is not already
+ * connected to the server.If the client is
+ * connected, wait for server to confirm
+ * if nickname is valid
+ */
+ if (!temp->conn_to_server) {
+ kfree(temp->nickname);
+ temp->nickname =
+ kmalloc(i, GFP_ATOMIC);
+ if (temp->nickname) {
+ temp->nickname_len = i;
+ memcpy(temp->nickname,
+ nick_start, temp->nickname_len);
+ } else {
+ list_del(&temp->ptr);
+ no_of_clients--;
+ kfree(temp);
+ }
+ }
+ }
+ /*Add client entry if not already present*/
+ if (add_entry) {
+ j = sizeof(struct irc_client_info);
+ temp = kmalloc(j, GFP_ATOMIC);
+ if (temp) {
+ no_of_clients++;
+ tuple = &ct->tuplehash[dir].tuple;
+ temp->nickname_len = i;
+ temp->nickname =
+ kmalloc(temp->nickname_len, GFP_ATOMIC);
+ if (!temp->nickname) {
+ kfree(temp);
+ return NF_DROP;
+ }
+ memcpy(temp->nickname, nick_start,
+ temp->nickname_len);
+ memcpy(&temp->client_ip,
+ &tuple->src.u3.ip, sizeof(__be32));
+ memcpy(&temp->server_ip,
+ &tuple->dst.u3.ip, sizeof(__be32));
+ temp->conn_to_server = false;
+ list_add(&temp->ptr,
+ &client_list.ptr);
+ } else {
+ return NF_DROP;
+ }
+ }
+ return NF_ACCEPT;
+}
static int help(struct sk_buff *skb, unsigned int protoff,
struct nf_conn *ct, enum ip_conntrack_info ctinfo)
{
@@ -113,7 +243,7 @@ static int help(struct sk_buff *skb, unsigned int protoff,
const struct tcphdr *th;
struct tcphdr _tcph;
const char *data_limit;
- char *data, *ib_ptr;
+ char *data, *ib_ptr, *for_print, *nick_end;
int dir = CTINFO2DIR(ctinfo);
struct nf_conntrack_expect *exp;
struct nf_conntrack_tuple *tuple;
@@ -123,10 +253,8 @@ static int help(struct sk_buff *skb, unsigned int protoff,
int i, ret = NF_ACCEPT;
char *addr_beg_p, *addr_end_p;
typeof(nf_nat_irc_hook) nf_nat_irc;
-
- /* If packet is coming from IRC server */
- if (dir == IP_CT_DIR_REPLY)
- return NF_ACCEPT;
+ struct irc_client_info *temp;
+ bool mangle = true;
/* Until there's been traffic both ways, don't look in packets. */
if (ctinfo != IP_CT_ESTABLISHED && ctinfo != IP_CT_ESTABLISHED_REPLY)
@@ -150,80 +278,223 @@ static int help(struct sk_buff *skb, unsigned int protoff,
data = ib_ptr;
data_limit = ib_ptr + skb->len - dataoff;
- /* strlen("\1DCC SENT t AAAAAAAA P\1\n")=24
- * 5+MINMATCHLEN+strlen("t AAAAAAAA P\1\n")=14 */
- while (data < data_limit - (19 + MINMATCHLEN)) {
- if (memcmp(data, "\1DCC ", 5)) {
- data++;
- continue;
+ /* If packet is coming from IRC server
+ * parse the packet for different type of
+ * messages (MOTD,NICK etc) and process
+ * accordingly
+ */
+ if (dir == IP_CT_DIR_REPLY) {
+ /* strlen("NICK xxxxxx")
+ * 5+strlen("xxxxxx")=1 (minimum length of nickname)
+ */
+
+ while (data < data_limit - 6) {
+ if (memcmp(data, " MOTD ", 6)) {
+ data++;
+ continue;
+ }
+ /* MOTD message signifies successful
+ * registration with server
+ */
+ tuple = &ct->tuplehash[!dir].tuple;
+ temp = search_client_by_ip(tuple);
+ if (temp && !temp->conn_to_server)
+ temp->conn_to_server = true;
+ ret = NF_ACCEPT;
+ goto out;
}
- data += 5;
- /* we have at least (19+MINMATCHLEN)-5 bytes valid data left */
- iph = ip_hdr(skb);
- pr_debug("DCC found in master %pI4:%u %pI4:%u\n",
- &iph->saddr, ntohs(th->source),
- &iph->daddr, ntohs(th->dest));
-
- for (i = 0; i < ARRAY_SIZE(dccprotos); i++) {
- if (memcmp(data, dccprotos[i], strlen(dccprotos[i]))) {
- /* no match */
+ /* strlen("NICK :xxxxxx")
+ * 6+strlen("xxxxxx")=1 (minimum length of nickname)
+ * Parsing the server reply to get nickname
+ * of the client
+ */
+ data = ib_ptr;
+ data_limit = ib_ptr + skb->len - dataoff;
+ while (data < data_limit - (6 + MINLENNICK)) {
+ if (memcmp(data, "NICK :", 6)) {
+ data++;
continue;
}
- data += strlen(dccprotos[i]);
- pr_debug("DCC %s detected\n", dccprotos[i]);
-
- /* we have at least
- * (19+MINMATCHLEN)-5-dccprotos[i].matchlen bytes valid
- * data left (== 14/13 bytes) */
- if (parse_dcc(data, data_limit, &dcc_ip,
- &dcc_port, &addr_beg_p, &addr_end_p)) {
- pr_debug("unable to parse dcc command\n");
- continue;
- }
-
- pr_debug("DCC bound ip/port: %pI4:%u\n",
- &dcc_ip, dcc_port);
-
- /* dcc_ip can be the internal OR external (NAT'ed) IP */
- tuple = &ct->tuplehash[dir].tuple;
- if (tuple->src.u3.ip != dcc_ip &&
- tuple->dst.u3.ip != dcc_ip) {
- net_warn_ratelimited("Forged DCC command from %pI4: %pI4:%u\n",
- &tuple->src.u3.ip,
- &dcc_ip, dcc_port);
- continue;
- }
-
- exp = nf_ct_expect_alloc(ct);
- if (exp == NULL) {
- nf_ct_helper_log(skb, ct,
- "cannot alloc expectation");
- ret = NF_DROP;
- goto out;
+ data += 6;
+ nick_end = data;
+ i = 0;
+ while ((*nick_end != 0x0d) &&
+ (*(nick_end + 1) != '\n')) {
+ nick_end++;
+ i++;
}
tuple = &ct->tuplehash[!dir].tuple;
- port = htons(dcc_port);
- nf_ct_expect_init(exp, NF_CT_EXPECT_CLASS_DEFAULT,
- tuple->src.l3num,
- NULL, &tuple->dst.u3,
- IPPROTO_TCP, NULL, &port);
-
- nf_nat_irc = rcu_dereference(nf_nat_irc_hook);
- if (nf_nat_irc && ct->status & IPS_NAT_MASK)
- ret = nf_nat_irc(skb, ctinfo, protoff,
- addr_beg_p - ib_ptr,
- addr_end_p - addr_beg_p,
- exp);
- else if (nf_ct_expect_related(exp) != 0) {
- nf_ct_helper_log(skb, ct,
- "cannot add expectation");
- ret = NF_DROP;
+ temp = search_client_by_ip(tuple);
+ if (temp && temp->nickname) {
+ kfree(temp->nickname);
+ temp->nickname = kmalloc(i, GFP_ATOMIC);
+ if (temp->nickname) {
+ temp->nickname_len = i;
+ memcpy(temp->nickname, data,
+ temp->nickname_len);
+ temp->conn_to_server = true;
+ } else {
+ list_del(&temp->ptr);
+ no_of_clients--;
+ kfree(temp);
+ ret = NF_ACCEPT;
+ }
}
- nf_ct_expect_put(exp);
+ /*NICK during registration*/
+ ret = NF_ACCEPT;
goto out;
}
}
+
+ else{
+ /*Parsing NICK command from client to create an entry
+ * strlen("NICK xxxxxx")
+ * 5+strlen("xxxxxx")=1 (minimum length of nickname)
+ */
+ data = ib_ptr;
+ data_limit = ib_ptr + skb->len - dataoff;
+ while (data < data_limit - (5 + MINLENNICK)) {
+ if (memcmp(data, "NICK ", 5)) {
+ data++;
+ continue;
+ }
+ data += 5;
+ ret = handle_nickname(ct, dir, data);
+ goto out;
+ }
+
+ data = ib_ptr;
+ while (data < data_limit - 6) {
+ if (memcmp(data, "QUIT :", 6)) {
+ data++;
+ continue;
+ }
+ /* Parsing QUIT to free the list entry
+ */
+ tuple = &ct->tuplehash[dir].tuple;
+ temp = search_client_by_ip(tuple);
+ if (temp) {
+ list_del(&temp->ptr);
+ no_of_clients--;
+ kfree(temp->nickname);
+ kfree(temp);
+ }
+ ret = NF_ACCEPT;
+ goto out;
+ }
+ /* strlen("\1DCC SENT t AAAAAAAA P\1\n")=24
+ * 5+MINMATCHLEN+strlen("t AAAAAAAA P\1\n")=14
+ */
+ data = ib_ptr;
+ while (data < data_limit - (19 + MINMATCHLEN)) {
+ if (memcmp(data, "\1DCC ", 5)) {
+ data++;
+ continue;
+ }
+ data += 5;
+ /* we have at least (19+MINMATCHLEN)-5
+ *bytes valid data left
+ */
+ iph = ip_hdr(skb);
+ pr_debug("DCC found in master %pI4:%u %pI4:%u\n",
+ &iph->saddr, ntohs(th->source),
+ &iph->daddr, ntohs(th->dest));
+
+ for (i = 0; i < ARRAY_SIZE(dccprotos); i++) {
+ if (memcmp(data, dccprotos[i],
+ strlen(dccprotos[i]))) {
+ /* no match */
+ continue;
+ }
+ data += strlen(dccprotos[i]);
+ pr_debug("DCC %s detected\n", dccprotos[i]);
+
+ /* we have at least
+ * (19+MINMATCHLEN)-5-dccprotos[i].matchlen
+ *bytes valid data left (== 14/13 bytes)
+ */
+ if (parse_dcc(data, data_limit, &dcc_ip,
+ &dcc_port, &addr_beg_p,
+ &addr_end_p)) {
+ pr_debug("unable to parse dcc command\n");
+ continue;
+ }
+
+ pr_debug("DCC bound ip/port: %pI4:%u\n",
+ &dcc_ip, dcc_port);
+
+ /* dcc_ip can be the internal OR
+ *external (NAT'ed) IP
+ */
+ tuple = &ct->tuplehash[dir].tuple;
+ if (tuple->src.u3.ip != dcc_ip &&
+ tuple->dst.u3.ip != dcc_ip) {
+ net_warn_ratelimited("Forged DCC command from %pI4: %pI4:%u\n",
+ &tuple->src.u3.ip,
+ &dcc_ip, dcc_port);
+ continue;
+ }
+
+ exp = nf_ct_expect_alloc(ct);
+ if (!exp) {
+ nf_ct_helper_log(skb, ct,
+ "cannot alloc expectation");
+ ret = NF_DROP;
+ goto out;
+ }
+ tuple = &ct->tuplehash[!dir].tuple;
+ port = htons(dcc_port);
+ nf_ct_expect_init(exp,
+ NF_CT_EXPECT_CLASS_DEFAULT,
+ tuple->src.l3num,
+ NULL, &tuple->dst.u3,
+ IPPROTO_TCP, NULL, &port);
+
+ nf_nat_irc = rcu_dereference(nf_nat_irc_hook);
+
+ tuple = &ct->tuplehash[dir].tuple;
+ for_print = ib_ptr;
+ /* strlen("PRIVMSG xxxx :\1DCC
+ *SENT t AAAAAAAA P\1\n")=26
+ * 8+strlen(xxxx) = 1(min length)+7+
+ *MINMATCHLEN+strlen("t AAAAAAAA P\1\n")=14
+ *Parsing DCC command to get client name and
+ *check whether it is an internal client
+ */
+ while (for_print <
+ data_limit - (25 + MINMATCHLEN)) {
+ if (memcmp(for_print, "PRIVMSG ", 8)) {
+ for_print++;
+ continue;
+ }
+ for_print += 8;
+ mangle = mangle_ip(ct,
+ dir, for_print);
+ break;
+ }
+ if (mangle &&
+ nf_nat_irc &&
+ ct->status & IPS_NAT_MASK)
+ ret = nf_nat_irc(skb, ctinfo,
+ protoff,
+ addr_beg_p - ib_ptr,
+ addr_end_p
+ - addr_beg_p,
+ exp);
+
+ else if (mangle &&
+ nf_ct_expect_related(exp)
+ != 0) {
+ nf_ct_helper_log(skb, ct,
+ "cannot add expectation");
+ ret = NF_DROP;
+ }
+ nf_ct_expect_put(exp);
+ goto out;
+ }
+ }
+ }
out:
spin_unlock_bh(&irc_buffer_lock);
return ret;
@@ -266,7 +537,8 @@ static int __init nf_conntrack_irc_init(void)
kfree(irc_buffer);
return ret;
}
-
+ no_of_clients = 0;
+ INIT_LIST_HEAD(&client_list.ptr);
return 0;
}
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 08b24a9..6bd58eea 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -3417,6 +3417,7 @@ static void __exit ctnetlink_exit(void)
#ifdef CONFIG_NETFILTER_NETLINK_GLUE_CT
RCU_INIT_POINTER(nfnl_ct_hook, NULL);
#endif
+ synchronize_rcu();
}
module_init(ctnetlink_init);
diff --git a/net/netfilter/nf_conntrack_sip.c b/net/netfilter/nf_conntrack_sip.c
index 3a8dc39..f132ef9 100644
--- a/net/netfilter/nf_conntrack_sip.c
+++ b/net/netfilter/nf_conntrack_sip.c
@@ -49,13 +49,28 @@ module_param(sip_direct_signalling, int, 0600);
MODULE_PARM_DESC(sip_direct_signalling, "expect incoming calls from registrar "
"only (default 1)");
-static int sip_direct_media __read_mostly = 1;
-module_param(sip_direct_media, int, 0600);
-MODULE_PARM_DESC(sip_direct_media, "Expect Media streams between signalling "
- "endpoints only (default 1)");
-
const struct nf_nat_sip_hooks *nf_nat_sip_hooks;
EXPORT_SYMBOL_GPL(nf_nat_sip_hooks);
+static struct ctl_table_header *sip_sysctl_header;
+static unsigned int nf_ct_disable_sip_alg;
+static int sip_direct_media = 1;
+static struct ctl_table sip_sysctl_tbl[] = {
+ {
+ .procname = "nf_conntrack_disable_sip_alg",
+ .data = &nf_ct_disable_sip_alg,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+ {
+ .procname = "nf_conntrack_sip_direct_media",
+ .data = &sip_direct_media,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+ {}
+};
static int string_len(const struct nf_conn *ct, const char *dptr,
const char *limit, int *shift)
@@ -1467,6 +1482,8 @@ static int process_sip_msg(struct sk_buff *skb, struct nf_conn *ct,
const struct nf_nat_sip_hooks *hooks;
int ret;
+ if (nf_ct_disable_sip_alg)
+ return NF_ACCEPT;
if (strncasecmp(*dptr, "SIP/2.0 ", strlen("SIP/2.0 ")) != 0)
ret = process_sip_request(skb, protoff, dataoff, dptr, datalen);
else
@@ -1626,6 +1643,16 @@ static int __init nf_conntrack_sip_init(void)
{
int i, ret;
+ sip_sysctl_header = register_net_sysctl(&init_net, "net/netfilter",
+ sip_sysctl_tbl);
+ if (!sip_sysctl_header)
+ pr_debug("nf_ct_sip:Unable to register SIP systbl\n");
+
+ if (nf_ct_disable_sip_alg)
+ pr_debug("nf_ct_sip: SIP ALG disabled\n");
+ else
+ pr_debug("nf_ct_sip: SIP ALG enabled\n");
+
if (ports_c == 0)
ports[ports_c++] = SIP_PORT;
diff --git a/net/netfilter/nf_nat_core.c b/net/netfilter/nf_nat_core.c
index dde64c4..2916f48 100644
--- a/net/netfilter/nf_nat_core.c
+++ b/net/netfilter/nf_nat_core.c
@@ -892,6 +892,8 @@ static void __exit nf_nat_cleanup(void)
#ifdef CONFIG_XFRM
RCU_INIT_POINTER(nf_nat_decode_session_hook, NULL);
#endif
+ synchronize_rcu();
+
for (i = 0; i < NFPROTO_NUMPROTO; i++)
kfree(nf_nat_l4protos[i]);
diff --git a/net/netfilter/nfnetlink_cthelper.c b/net/netfilter/nfnetlink_cthelper.c
index 3b79f34..b1fcfa0 100644
--- a/net/netfilter/nfnetlink_cthelper.c
+++ b/net/netfilter/nfnetlink_cthelper.c
@@ -161,6 +161,7 @@ nfnl_cthelper_parse_expect_policy(struct nf_conntrack_helper *helper,
int i, ret;
struct nf_conntrack_expect_policy *expect_policy;
struct nlattr *tb[NFCTH_POLICY_SET_MAX+1];
+ unsigned int class_max;
ret = nla_parse_nested(tb, NFCTH_POLICY_SET_MAX, attr,
nfnl_cthelper_expect_policy_set);
@@ -170,19 +171,18 @@ nfnl_cthelper_parse_expect_policy(struct nf_conntrack_helper *helper,
if (!tb[NFCTH_POLICY_SET_NUM])
return -EINVAL;
- helper->expect_class_max =
- ntohl(nla_get_be32(tb[NFCTH_POLICY_SET_NUM]));
-
- if (helper->expect_class_max != 0 &&
- helper->expect_class_max > NF_CT_MAX_EXPECT_CLASSES)
+ class_max = ntohl(nla_get_be32(tb[NFCTH_POLICY_SET_NUM]));
+ if (class_max == 0)
+ return -EINVAL;
+ if (class_max > NF_CT_MAX_EXPECT_CLASSES)
return -EOVERFLOW;
expect_policy = kzalloc(sizeof(struct nf_conntrack_expect_policy) *
- helper->expect_class_max, GFP_KERNEL);
+ class_max, GFP_KERNEL);
if (expect_policy == NULL)
return -ENOMEM;
- for (i=0; i<helper->expect_class_max; i++) {
+ for (i = 0; i < class_max; i++) {
if (!tb[NFCTH_POLICY_SET+i])
goto err;
@@ -191,6 +191,8 @@ nfnl_cthelper_parse_expect_policy(struct nf_conntrack_helper *helper,
if (ret < 0)
goto err;
}
+
+ helper->expect_class_max = class_max - 1;
helper->expect_policy = expect_policy;
return 0;
err:
@@ -377,10 +379,10 @@ nfnl_cthelper_dump_policy(struct sk_buff *skb,
goto nla_put_failure;
if (nla_put_be32(skb, NFCTH_POLICY_SET_NUM,
- htonl(helper->expect_class_max)))
+ htonl(helper->expect_class_max + 1)))
goto nla_put_failure;
- for (i=0; i<helper->expect_class_max; i++) {
+ for (i = 0; i < helper->expect_class_max + 1; i++) {
nest_parms2 = nla_nest_start(skb,
(NFCTH_POLICY_SET+i) | NLA_F_NESTED);
if (nest_parms2 == NULL)
diff --git a/net/netfilter/nfnetlink_cttimeout.c b/net/netfilter/nfnetlink_cttimeout.c
index 139e086..47d6656 100644
--- a/net/netfilter/nfnetlink_cttimeout.c
+++ b/net/netfilter/nfnetlink_cttimeout.c
@@ -646,8 +646,8 @@ static void __exit cttimeout_exit(void)
#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
RCU_INIT_POINTER(nf_ct_timeout_find_get_hook, NULL);
RCU_INIT_POINTER(nf_ct_timeout_put_hook, NULL);
+ synchronize_rcu();
#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
- rcu_barrier();
}
module_init(cttimeout_init);
diff --git a/net/netfilter/nft_meta.c b/net/netfilter/nft_meta.c
index 6c1e024..7c33955 100644
--- a/net/netfilter/nft_meta.c
+++ b/net/netfilter/nft_meta.c
@@ -159,8 +159,34 @@ void nft_meta_get_eval(const struct nft_expr *expr,
else
*dest = PACKET_BROADCAST;
break;
+ case NFPROTO_NETDEV:
+ switch (skb->protocol) {
+ case htons(ETH_P_IP): {
+ int noff = skb_network_offset(skb);
+ struct iphdr *iph, _iph;
+
+ iph = skb_header_pointer(skb, noff,
+ sizeof(_iph), &_iph);
+ if (!iph)
+ goto err;
+
+ if (ipv4_is_multicast(iph->daddr))
+ *dest = PACKET_MULTICAST;
+ else
+ *dest = PACKET_BROADCAST;
+
+ break;
+ }
+ case htons(ETH_P_IPV6):
+ *dest = PACKET_MULTICAST;
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ goto err;
+ }
+ break;
default:
- WARN_ON(1);
+ WARN_ON_ONCE(1);
goto err;
}
break;
diff --git a/net/netfilter/xt_qtaguid.c b/net/netfilter/xt_qtaguid.c
index 1cf2874..822be06 100644
--- a/net/netfilter/xt_qtaguid.c
+++ b/net/netfilter/xt_qtaguid.c
@@ -1174,6 +1174,38 @@ static void iface_stat_update(struct net_device *net_dev, bool stash_only)
spin_unlock_bh(&iface_stat_list_lock);
}
+/* Guarantied to return a net_device that has a name */
+static void get_dev_and_dir(const struct sk_buff *skb,
+ struct xt_action_param *par,
+ enum ifs_tx_rx *direction,
+ const struct net_device **el_dev)
+{
+ BUG_ON(!direction || !el_dev);
+
+ if (par->in) {
+ *el_dev = par->in;
+ *direction = IFS_RX;
+ } else if (par->out) {
+ *el_dev = par->out;
+ *direction = IFS_TX;
+ } else {
+ pr_err("qtaguid[%d]: %s(): no par->in/out?!!\n",
+ par->hooknum, __func__);
+ BUG();
+ }
+ if (unlikely(!(*el_dev)->name)) {
+ pr_err("qtaguid[%d]: %s(): no dev->name?!!\n",
+ par->hooknum, __func__);
+ BUG();
+ }
+ if (skb->dev && *el_dev != skb->dev) {
+ MT_DEBUG("qtaguid[%d]: skb->dev=%pK %s vs par->%s=%pK %s\n",
+ par->hooknum, skb->dev, skb->dev->name,
+ *direction == IFS_RX ? "in" : "out", *el_dev,
+ (*el_dev)->name);
+ }
+}
+
/*
* Update stats for the specified interface from the skb.
* Do nothing if the entry
@@ -1185,46 +1217,27 @@ static void iface_stat_update_from_skb(const struct sk_buff *skb,
{
struct iface_stat *entry;
const struct net_device *el_dev;
- enum ifs_tx_rx direction = par->in ? IFS_RX : IFS_TX;
+ enum ifs_tx_rx direction;
int bytes = skb->len;
int proto;
- if (!skb->dev) {
- MT_DEBUG("qtaguid[%d]: no skb->dev\n", par->hooknum);
- el_dev = par->in ? : par->out;
- } else {
- const struct net_device *other_dev;
- el_dev = skb->dev;
- other_dev = par->in ? : par->out;
- if (el_dev != other_dev) {
- MT_DEBUG("qtaguid[%d]: skb->dev=%p %s vs "
- "par->(in/out)=%p %s\n",
- par->hooknum, el_dev, el_dev->name, other_dev,
- other_dev->name);
- }
- }
-
- if (unlikely(!el_dev)) {
- pr_err_ratelimited("qtaguid[%d]: %s(): no par->in/out?!!\n",
- par->hooknum, __func__);
- BUG();
- } else {
- proto = ipx_proto(skb, par);
- MT_DEBUG("qtaguid[%d]: dev name=%s type=%d fam=%d proto=%d\n",
- par->hooknum, el_dev->name, el_dev->type,
- par->family, proto);
- }
+ get_dev_and_dir(skb, par, &direction, &el_dev);
+ proto = ipx_proto(skb, par);
+ MT_DEBUG("qtaguid[%d]: iface_stat: %s(%s): "
+ "type=%d fam=%d proto=%d dir=%d\n",
+ par->hooknum, __func__, el_dev->name, el_dev->type,
+ par->family, proto, direction);
spin_lock_bh(&iface_stat_list_lock);
entry = get_iface_entry(el_dev->name);
if (entry == NULL) {
- IF_DEBUG("qtaguid: iface_stat: %s(%s): not tracked\n",
- __func__, el_dev->name);
+ IF_DEBUG("qtaguid[%d]: iface_stat: %s(%s): not tracked\n",
+ par->hooknum, __func__, el_dev->name);
spin_unlock_bh(&iface_stat_list_lock);
return;
}
- IF_DEBUG("qtaguid: %s(%s): entry=%p\n", __func__,
+ IF_DEBUG("qtaguid[%d]: %s(%s): entry=%p\n", par->hooknum, __func__,
el_dev->name, entry);
data_counters_update(&entry->totals_via_skb, 0, direction, proto,
@@ -1289,14 +1302,14 @@ static void if_tag_stat_update(const char *ifname, uid_t uid,
spin_lock_bh(&iface_stat_list_lock);
iface_entry = get_iface_entry(ifname);
if (!iface_entry) {
- pr_err_ratelimited("qtaguid: iface_stat: stat_update() "
+ pr_err_ratelimited("qtaguid: tag_stat: stat_update() "
"%s not found\n", ifname);
spin_unlock_bh(&iface_stat_list_lock);
return;
}
/* It is ok to process data when an iface_entry is inactive */
- MT_DEBUG("qtaguid: iface_stat: stat_update() dev=%s entry=%p\n",
+ MT_DEBUG("qtaguid: tag_stat: stat_update() dev=%s entry=%pK\n",
ifname, iface_entry);
/*
@@ -1313,8 +1326,8 @@ static void if_tag_stat_update(const char *ifname, uid_t uid,
tag = combine_atag_with_uid(acct_tag, uid);
uid_tag = make_tag_from_uid(uid);
}
- MT_DEBUG("qtaguid: iface_stat: stat_update(): "
- " looking for tag=0x%llx (uid=%u) in ife=%p\n",
+ MT_DEBUG("qtaguid: tag_stat: stat_update(): "
+ " looking for tag=0x%llx (uid=%u) in ife=%pK\n",
tag, get_uid_from_tag(tag), iface_entry);
/* Loop over tag list under this interface for {acct_tag,uid_tag} */
spin_lock_bh(&iface_entry->tag_stat_list_lock);
@@ -1573,8 +1586,8 @@ static struct sock *qtaguid_find_sk(const struct sk_buff *skb,
struct sock *sk;
unsigned int hook_mask = (1 << par->hooknum);
- MT_DEBUG("qtaguid: find_sk(skb=%p) hooknum=%d family=%d\n", skb,
- par->hooknum, par->family);
+ MT_DEBUG("qtaguid[%d]: find_sk(skb=%pK) family=%d\n",
+ par->hooknum, skb, par->family);
/*
* Let's not abuse the the xt_socket_get*_sk(), or else it will
@@ -1595,8 +1608,8 @@ static struct sock *qtaguid_find_sk(const struct sk_buff *skb,
}
if (sk) {
- MT_DEBUG("qtaguid: %p->sk_proto=%u "
- "->sk_state=%d\n", sk, sk->sk_protocol, sk->sk_state);
+ MT_DEBUG("qtaguid[%d]: %pK->sk_proto=%u->sk_state=%d\n",
+ par->hooknum, sk, sk->sk_protocol, sk->sk_state);
}
return sk;
}
@@ -1606,35 +1619,19 @@ static void account_for_uid(const struct sk_buff *skb,
struct xt_action_param *par)
{
const struct net_device *el_dev;
+ enum ifs_tx_rx direction;
+ int proto;
- if (!skb->dev) {
- MT_DEBUG("qtaguid[%d]: no skb->dev\n", par->hooknum);
- el_dev = par->in ? : par->out;
- } else {
- const struct net_device *other_dev;
- el_dev = skb->dev;
- other_dev = par->in ? : par->out;
- if (el_dev != other_dev) {
- MT_DEBUG("qtaguid[%d]: skb->dev=%p %s vs "
- "par->(in/out)=%p %s\n",
- par->hooknum, el_dev, el_dev->name, other_dev,
- other_dev->name);
- }
- }
+ get_dev_and_dir(skb, par, &direction, &el_dev);
+ proto = ipx_proto(skb, par);
+ MT_DEBUG("qtaguid[%d]: dev name=%s type=%d fam=%d proto=%d dir=%d\n",
+ par->hooknum, el_dev->name, el_dev->type,
+ par->family, proto, direction);
- if (unlikely(!el_dev)) {
- pr_info("qtaguid[%d]: no par->in/out?!!\n", par->hooknum);
- } else {
- int proto = ipx_proto(skb, par);
- MT_DEBUG("qtaguid[%d]: dev name=%s type=%d fam=%d proto=%d\n",
- par->hooknum, el_dev->name, el_dev->type,
- par->family, proto);
-
- if_tag_stat_update(el_dev->name, uid,
- skb->sk ? skb->sk : alternate_sk,
- par->in ? IFS_RX : IFS_TX,
- proto, skb->len);
- }
+ if_tag_stat_update(el_dev->name, uid,
+ skb->sk ? skb->sk : alternate_sk,
+ direction,
+ proto, skb->len);
}
static bool qtaguid_mt(const struct sk_buff *skb, struct xt_action_param *par)
@@ -1646,6 +1643,11 @@ static bool qtaguid_mt(const struct sk_buff *skb, struct xt_action_param *par)
kuid_t sock_uid;
bool res;
bool set_sk_callback_lock = false;
+ /*
+ * TODO: unhack how to force just accounting.
+ * For now we only do tag stats when the uid-owner is not requested
+ */
+ bool do_tag_stat = !(info->match & XT_QTAGUID_UID);
if (unlikely(module_passive))
return (info->match ^ info->invert) == 0;
@@ -1718,19 +1720,13 @@ static bool qtaguid_mt(const struct sk_buff *skb, struct xt_action_param *par)
MT_DEBUG("qtaguid[%d]: sk=%p got_sock=%d fam=%d proto=%d\n",
par->hooknum, sk, got_sock, par->family, ipx_proto(skb, par));
-
- if (sk == NULL) {
+ if (!sk) {
/*
* Here, the qtaguid_find_sk() using connection tracking
* couldn't find the owner, so for now we just count them
* against the system.
*/
- /*
- * TODO: unhack how to force just accounting.
- * For now we only do iface stats when the uid-owner is not
- * requested.
- */
- if (!(info->match & XT_QTAGUID_UID))
+ if (do_tag_stat)
account_for_uid(skb, sk, 0, par);
MT_DEBUG("qtaguid[%d]: leaving (sk=NULL)\n", par->hooknum);
res = (info->match ^ info->invert) == 0;
@@ -1741,12 +1737,9 @@ static bool qtaguid_mt(const struct sk_buff *skb, struct xt_action_param *par)
goto put_sock_ret_res;
}
sock_uid = sk->sk_uid;
- /*
- * TODO: unhack how to force just accounting.
- * For now we only do iface stats when the uid-owner is not requested
- */
- if (!(info->match & XT_QTAGUID_UID))
- account_for_uid(skb, sk, from_kuid(&init_user_ns, sock_uid), par);
+ if (do_tag_stat)
+ account_for_uid(skb, sk, from_kuid(&init_user_ns, sock_uid),
+ par);
/*
* The following two tests fail the match when:
@@ -1758,8 +1751,8 @@ static bool qtaguid_mt(const struct sk_buff *skb, struct xt_action_param *par)
kuid_t uid_min = make_kuid(&init_user_ns, info->uid_min);
kuid_t uid_max = make_kuid(&init_user_ns, info->uid_max);
- if ((uid_gte(sk->sk_uid, uid_min) &&
- uid_lte(sk->sk_uid, uid_max)) ^
+ if ((uid_gte(sock_uid, uid_min) &&
+ uid_lte(sock_uid, uid_max)) ^
!(info->invert & XT_QTAGUID_UID)) {
MT_DEBUG("qtaguid[%d]: leaving uid not matching\n",
par->hooknum);
@@ -1773,16 +1766,18 @@ static bool qtaguid_mt(const struct sk_buff *skb, struct xt_action_param *par)
set_sk_callback_lock = true;
read_lock_bh(&sk->sk_callback_lock);
MT_DEBUG("qtaguid[%d]: sk=%pK->sk_socket=%pK->file=%pK\n",
- par->hooknum, sk, sk->sk_socket,
- sk->sk_socket ? sk->sk_socket->file : (void *)-1LL);
+ par->hooknum, sk, sk->sk_socket,
+ sk->sk_socket ? sk->sk_socket->file : (void *)-1LL);
filp = sk->sk_socket ? sk->sk_socket->file : NULL;
if (!filp) {
- res = ((info->match ^ info->invert) & XT_QTAGUID_GID) == 0;
+ res = ((info->match ^ info->invert) &
+ XT_QTAGUID_GID) == 0;
atomic64_inc(&qtu_events.match_no_sk_gid);
goto put_sock_ret_res;
}
MT_DEBUG("qtaguid[%d]: filp...uid=%u\n",
- par->hooknum, filp ? from_kuid(&init_user_ns, filp->f_cred->fsuid) : -1);
+ par->hooknum, filp ?
+ from_kuid(&init_user_ns, filp->f_cred->fsuid) : -1);
if ((gid_gte(filp->f_cred->fsgid, gid_min) &&
gid_lte(filp->f_cred->fsgid, gid_max)) ^
!(info->invert & XT_QTAGUID_GID)) {
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 246f29d..2a5775f 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -2211,10 +2211,13 @@ int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
mutex_unlock(nlk->cb_mutex);
+ ret = 0;
if (cb->start)
- cb->start(cb);
+ ret = cb->start(cb);
- ret = netlink_dump(sk);
+ if (!ret)
+ ret = netlink_dump(sk);
+
sock_put(sk);
if (ret)
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 35ba4b6..b17f909 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -1648,10 +1648,6 @@ static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
mutex_lock(&fanout_mutex);
- err = -EINVAL;
- if (!po->running)
- goto out;
-
err = -EALREADY;
if (po->fanout)
goto out;
@@ -1700,7 +1696,10 @@ static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
list_add(&match->list, &fanout_list);
}
err = -EINVAL;
- if (match->type == type &&
+
+ spin_lock(&po->bind_lock);
+ if (po->running &&
+ match->type == type &&
match->prot_hook.type == po->prot_hook.type &&
match->prot_hook.dev == po->prot_hook.dev) {
err = -ENOSPC;
@@ -1712,6 +1711,13 @@ static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
err = 0;
}
}
+ spin_unlock(&po->bind_lock);
+
+ if (err && !atomic_read(&match->sk_ref)) {
+ list_del(&match->list);
+ kfree(match);
+ }
+
out:
if (err && rollover) {
kfree(rollover);
@@ -2832,6 +2838,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
struct virtio_net_hdr vnet_hdr = { 0 };
int offset = 0;
struct packet_sock *po = pkt_sk(sk);
+ bool has_vnet_hdr = false;
int hlen, tlen, linear;
int extra_len = 0;
@@ -2875,6 +2882,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
err = packet_snd_vnet_parse(msg, &len, &vnet_hdr);
if (err)
goto out_unlock;
+ has_vnet_hdr = true;
}
if (unlikely(sock_flag(sk, SOCK_NOFCS))) {
@@ -2935,7 +2943,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
packet_pick_tx_queue(dev, skb);
- if (po->has_vnet_hdr) {
+ if (has_vnet_hdr) {
err = packet_snd_vnet_gso(skb, &vnet_hdr);
if (err)
goto out_free;
@@ -3063,13 +3071,15 @@ static int packet_do_bind(struct sock *sk, const char *name, int ifindex,
int ret = 0;
bool unlisted = false;
- if (po->fanout)
- return -EINVAL;
-
lock_sock(sk);
spin_lock(&po->bind_lock);
rcu_read_lock();
+ if (po->fanout) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
if (name) {
dev = dev_get_by_name_rcu(sock_net(sk), name);
if (!dev) {
@@ -3884,6 +3894,8 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
case PACKET_HDRLEN:
if (len > sizeof(int))
len = sizeof(int);
+ if (len < sizeof(int))
+ return -EINVAL;
if (copy_from_user(&val, optval, len))
return -EFAULT;
switch (val) {
diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c
index 5b2ab95..169156c 100644
--- a/net/rds/ib_cm.c
+++ b/net/rds/ib_cm.c
@@ -405,7 +405,7 @@ static int rds_ib_setup_qp(struct rds_connection *conn)
ret = PTR_ERR(ic->i_send_cq);
ic->i_send_cq = NULL;
rdsdebug("ib_create_cq send failed: %d\n", ret);
- goto out;
+ goto rds_ibdev_out;
}
cq_attr.cqe = ic->i_recv_ring.w_nr;
@@ -416,19 +416,19 @@ static int rds_ib_setup_qp(struct rds_connection *conn)
ret = PTR_ERR(ic->i_recv_cq);
ic->i_recv_cq = NULL;
rdsdebug("ib_create_cq recv failed: %d\n", ret);
- goto out;
+ goto send_cq_out;
}
ret = ib_req_notify_cq(ic->i_send_cq, IB_CQ_NEXT_COMP);
if (ret) {
rdsdebug("ib_req_notify_cq send failed: %d\n", ret);
- goto out;
+ goto recv_cq_out;
}
ret = ib_req_notify_cq(ic->i_recv_cq, IB_CQ_SOLICITED);
if (ret) {
rdsdebug("ib_req_notify_cq recv failed: %d\n", ret);
- goto out;
+ goto recv_cq_out;
}
/* XXX negotiate max send/recv with remote? */
@@ -453,7 +453,7 @@ static int rds_ib_setup_qp(struct rds_connection *conn)
ret = rdma_create_qp(ic->i_cm_id, ic->i_pd, &attr);
if (ret) {
rdsdebug("rdma_create_qp failed: %d\n", ret);
- goto out;
+ goto recv_cq_out;
}
ic->i_send_hdrs = ib_dma_alloc_coherent(dev,
@@ -463,7 +463,7 @@ static int rds_ib_setup_qp(struct rds_connection *conn)
if (!ic->i_send_hdrs) {
ret = -ENOMEM;
rdsdebug("ib_dma_alloc_coherent send failed\n");
- goto out;
+ goto qp_out;
}
ic->i_recv_hdrs = ib_dma_alloc_coherent(dev,
@@ -473,7 +473,7 @@ static int rds_ib_setup_qp(struct rds_connection *conn)
if (!ic->i_recv_hdrs) {
ret = -ENOMEM;
rdsdebug("ib_dma_alloc_coherent recv failed\n");
- goto out;
+ goto send_hdrs_dma_out;
}
ic->i_ack = ib_dma_alloc_coherent(dev, sizeof(struct rds_header),
@@ -481,7 +481,7 @@ static int rds_ib_setup_qp(struct rds_connection *conn)
if (!ic->i_ack) {
ret = -ENOMEM;
rdsdebug("ib_dma_alloc_coherent ack failed\n");
- goto out;
+ goto recv_hdrs_dma_out;
}
ic->i_sends = vzalloc_node(ic->i_send_ring.w_nr * sizeof(struct rds_ib_send_work),
@@ -489,7 +489,7 @@ static int rds_ib_setup_qp(struct rds_connection *conn)
if (!ic->i_sends) {
ret = -ENOMEM;
rdsdebug("send allocation failed\n");
- goto out;
+ goto ack_dma_out;
}
ic->i_recvs = vzalloc_node(ic->i_recv_ring.w_nr * sizeof(struct rds_ib_recv_work),
@@ -497,7 +497,7 @@ static int rds_ib_setup_qp(struct rds_connection *conn)
if (!ic->i_recvs) {
ret = -ENOMEM;
rdsdebug("recv allocation failed\n");
- goto out;
+ goto sends_out;
}
rds_ib_recv_init_ack(ic);
@@ -505,8 +505,33 @@ static int rds_ib_setup_qp(struct rds_connection *conn)
rdsdebug("conn %p pd %p cq %p %p\n", conn, ic->i_pd,
ic->i_send_cq, ic->i_recv_cq);
-out:
+ return ret;
+
+sends_out:
+ vfree(ic->i_sends);
+ack_dma_out:
+ ib_dma_free_coherent(dev, sizeof(struct rds_header),
+ ic->i_ack, ic->i_ack_dma);
+recv_hdrs_dma_out:
+ ib_dma_free_coherent(dev, ic->i_recv_ring.w_nr *
+ sizeof(struct rds_header),
+ ic->i_recv_hdrs, ic->i_recv_hdrs_dma);
+send_hdrs_dma_out:
+ ib_dma_free_coherent(dev, ic->i_send_ring.w_nr *
+ sizeof(struct rds_header),
+ ic->i_send_hdrs, ic->i_send_hdrs_dma);
+qp_out:
+ rdma_destroy_qp(ic->i_cm_id);
+recv_cq_out:
+ if (!ib_destroy_cq(ic->i_recv_cq))
+ ic->i_recv_cq = NULL;
+send_cq_out:
+ if (!ib_destroy_cq(ic->i_send_cq))
+ ic->i_send_cq = NULL;
+rds_ibdev_out:
+ rds_ib_remove_conn(rds_ibdev, conn);
rds_ib_dev_put(rds_ibdev);
+
return ret;
}
diff --git a/net/rds/ib_send.c b/net/rds/ib_send.c
index 84d90c9..1910981 100644
--- a/net/rds/ib_send.c
+++ b/net/rds/ib_send.c
@@ -69,16 +69,6 @@ static void rds_ib_send_complete(struct rds_message *rm,
complete(rm, notify_status);
}
-static void rds_ib_send_unmap_data(struct rds_ib_connection *ic,
- struct rm_data_op *op,
- int wc_status)
-{
- if (op->op_nents)
- ib_dma_unmap_sg(ic->i_cm_id->device,
- op->op_sg, op->op_nents,
- DMA_TO_DEVICE);
-}
-
static void rds_ib_send_unmap_rdma(struct rds_ib_connection *ic,
struct rm_rdma_op *op,
int wc_status)
@@ -139,6 +129,21 @@ static void rds_ib_send_unmap_atomic(struct rds_ib_connection *ic,
rds_ib_stats_inc(s_ib_atomic_fadd);
}
+static void rds_ib_send_unmap_data(struct rds_ib_connection *ic,
+ struct rm_data_op *op,
+ int wc_status)
+{
+ struct rds_message *rm = container_of(op, struct rds_message, data);
+
+ if (op->op_nents)
+ ib_dma_unmap_sg(ic->i_cm_id->device,
+ op->op_sg, op->op_nents,
+ DMA_TO_DEVICE);
+
+ if (rm->rdma.op_active && rm->data.op_notify)
+ rds_ib_send_unmap_rdma(ic, &rm->rdma, wc_status);
+}
+
/*
* Unmap the resources associated with a struct send_work.
*
diff --git a/net/rds/rdma.c b/net/rds/rdma.c
index 4c93bad..8d3a851 100644
--- a/net/rds/rdma.c
+++ b/net/rds/rdma.c
@@ -626,6 +626,16 @@ int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
}
op->op_notifier->n_user_token = args->user_token;
op->op_notifier->n_status = RDS_RDMA_SUCCESS;
+
+ /* Enable rmda notification on data operation for composite
+ * rds messages and make sure notification is enabled only
+ * for the data operation which follows it so that application
+ * gets notified only after full message gets delivered.
+ */
+ if (rm->data.op_sg) {
+ rm->rdma.op_notify = 0;
+ rm->data.op_notify = !!(args->flags & RDS_RDMA_NOTIFY_ME);
+ }
}
/* The cookie contains the R_Key of the remote memory region, and
diff --git a/net/rds/rds.h b/net/rds/rds.h
index 67ba67c..f107a96 100644
--- a/net/rds/rds.h
+++ b/net/rds/rds.h
@@ -414,6 +414,7 @@ struct rds_message {
} rdma;
struct rm_data_op {
unsigned int op_active:1;
+ unsigned int op_notify:1;
unsigned int op_nents;
unsigned int op_count;
unsigned int op_dmasg;
diff --git a/net/rds/send.c b/net/rds/send.c
index 896626b..f28651b 100644
--- a/net/rds/send.c
+++ b/net/rds/send.c
@@ -475,12 +475,14 @@ void rds_rdma_send_complete(struct rds_message *rm, int status)
struct rm_rdma_op *ro;
struct rds_notifier *notifier;
unsigned long flags;
+ unsigned int notify = 0;
spin_lock_irqsave(&rm->m_rs_lock, flags);
+ notify = rm->rdma.op_notify | rm->data.op_notify;
ro = &rm->rdma;
if (test_bit(RDS_MSG_ON_SOCK, &rm->m_flags) &&
- ro->op_active && ro->op_notify && ro->op_notifier) {
+ ro->op_active && notify && ro->op_notifier) {
notifier = ro->op_notifier;
rs = rm->m_rs;
sock_hold(rds_rs_to_sk(rs));
diff --git a/net/rmnet_data/rmnet_data_config.h b/net/rmnet_data/rmnet_data_config.h
index 5ce4600..aa8a0b5 100644
--- a/net/rmnet_data/rmnet_data_config.h
+++ b/net/rmnet_data/rmnet_data_config.h
@@ -34,12 +34,14 @@
* parmeter depends on the rmnet_mode
*/
struct rmnet_logical_ep_conf_s {
+ struct net_device *egress_dev;
+ struct timespec last_flush_time;
+ long curr_time_limit;
+ unsigned int flush_byte_count;
+ unsigned int curr_byte_threshold;
u8 refcount;
u8 rmnet_mode;
u8 mux_id;
- struct timespec flush_time;
- unsigned int flush_byte_count;
- struct net_device *egress_dev;
};
/**
diff --git a/net/rmnet_data/rmnet_data_handlers.c b/net/rmnet_data/rmnet_data_handlers.c
index 57646ef..a5b22c4 100644
--- a/net/rmnet_data/rmnet_data_handlers.c
+++ b/net/rmnet_data/rmnet_data_handlers.c
@@ -41,26 +41,30 @@ module_param(dump_pkt_tx, uint, 0644);
MODULE_PARM_DESC(dump_pkt_tx, "Dump packets exiting egress handler");
#endif /* CONFIG_RMNET_DATA_DEBUG_PKT */
-/* Time in nano seconds. This number must be less that a second. */
-long gro_flush_time __read_mostly = 10000L;
-module_param(gro_flush_time, long, 0644);
-MODULE_PARM_DESC(gro_flush_time, "Flush GRO when spaced more than this");
+static bool gro_flush_logic_on __read_mostly = 1;
+module_param(gro_flush_logic_on, bool, 0644);
+MODULE_PARM_DESC(gro_flush_logic_on, "If off let GRO determine flushing");
-unsigned int gro_min_byte_thresh __read_mostly = 7500;
-module_param(gro_min_byte_thresh, uint, 0644);
-MODULE_PARM_DESC(gro_min_byte_thresh, "Min byte thresh to change flush time");
-
-unsigned int dynamic_gro_on __read_mostly = 1;
-module_param(dynamic_gro_on, uint, 0644);
+static bool dynamic_gro_on __read_mostly = 1;
+module_param(dynamic_gro_on, bool, 0644);
MODULE_PARM_DESC(dynamic_gro_on, "Toggle to turn on dynamic gro logic");
+/* Time in nano seconds. This number must be less that a second. */
+static long lower_flush_time __read_mostly = 10000L;
+module_param(lower_flush_time, long, 0644);
+MODULE_PARM_DESC(lower_flush_time, "Min time value for flushing GRO");
+
+static unsigned int lower_byte_limit __read_mostly = 7500;
+module_param(lower_byte_limit, uint, 0644);
+MODULE_PARM_DESC(lower_byte_limit, "Min byte count for flushing GRO");
+
unsigned int upper_flush_time __read_mostly = 15000;
module_param(upper_flush_time, uint, 0644);
-MODULE_PARM_DESC(upper_flush_time, "Upper limit on flush time");
+MODULE_PARM_DESC(upper_flush_time, "Max time value for flushing GRO");
unsigned int upper_byte_limit __read_mostly = 10500;
module_param(upper_byte_limit, uint, 0644);
-MODULE_PARM_DESC(upper_byte_limit, "Upper byte limit");
+MODULE_PARM_DESC(upper_byte_limit, "Max byte count for flushing GRO");
#define RMNET_DATA_IP_VERSION_4 0x40
#define RMNET_DATA_IP_VERSION_6 0x60
@@ -184,19 +188,35 @@ static rx_handler_result_t rmnet_bridge_handler
return RX_HANDLER_CONSUMED;
}
-#ifdef NET_SKBUFF_DATA_USES_OFFSET
-static void rmnet_reset_mac_header(struct sk_buff *skb)
+/* RX/TX Fixup */
+
+/* rmnet_vnd_rx_fixup() - Virtual Network Device receive fixup hook
+ * @skb: Socket buffer ("packet") to modify
+ * @dev: Virtual network device
+ *
+ * Additional VND specific packet processing for ingress packets
+ *
+ * Return: void
+ */
+static void rmnet_vnd_rx_fixup(struct sk_buff *skb, struct net_device *dev)
{
- skb->mac_header = 0;
- skb->mac_len = 0;
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += skb->len;
}
-#else
-static void rmnet_reset_mac_header(struct sk_buff *skb)
+
+/* rmnet_vnd_tx_fixup() - Virtual Network Device transmic fixup hook
+ * @skb: Socket buffer ("packet") to modify
+ * @dev: Virtual network device
+ *
+ * Additional VND specific packet processing for egress packets
+ *
+ * Return: void
+ */
+static void rmnet_vnd_tx_fixup(struct sk_buff *skb, struct net_device *dev)
{
- skb->mac_header = skb->network_header;
- skb->mac_len = 0;
+ dev->stats.tx_packets++;
+ dev->stats.tx_bytes += skb->len;
}
-#endif /*NET_SKBUFF_DATA_USES_OFFSET*/
/* rmnet_check_skb_can_gro() - Check is skb can be passed through GRO handler
*
@@ -242,62 +262,64 @@ static void rmnet_optional_gro_flush(struct napi_struct *napi,
{
struct timespec curr_time, diff;
- if (!gro_flush_time)
+ if (!gro_flush_logic_on)
return;
- if (unlikely(ep->flush_time.tv_sec == 0)) {
- getnstimeofday(&ep->flush_time);
+ if (unlikely(ep->last_flush_time.tv_sec == 0)) {
+ getnstimeofday(&ep->last_flush_time);
ep->flush_byte_count = 0;
+ ep->curr_time_limit = lower_flush_time;
+ ep->curr_byte_threshold = lower_byte_limit;
} else {
getnstimeofday(&(curr_time));
- diff = timespec_sub(curr_time, ep->flush_time);
+ diff = timespec_sub(curr_time, ep->last_flush_time);
ep->flush_byte_count += skb_size;
if (dynamic_gro_on) {
if ((!(diff.tv_sec > 0) || diff.tv_nsec <=
- gro_flush_time) &&
+ ep->curr_time_limit) &&
ep->flush_byte_count >=
- gro_min_byte_thresh) {
+ ep->curr_byte_threshold) {
/* Processed many bytes in a small time window.
* No longer need to flush so often and we can
* increase our byte limit
*/
- gro_flush_time = upper_flush_time;
- gro_min_byte_thresh = upper_byte_limit;
+ ep->curr_time_limit = upper_flush_time;
+ ep->curr_byte_threshold = upper_byte_limit;
} else if ((diff.tv_sec > 0 ||
- diff.tv_nsec > gro_flush_time) &&
+ diff.tv_nsec > ep->curr_time_limit) &&
ep->flush_byte_count <
- gro_min_byte_thresh) {
+ ep->curr_byte_threshold) {
/* We have not hit our time limit and we are not
* receive many bytes. Demote ourselves to the
* lowest limits and flush
*/
napi_gro_flush(napi, false);
- getnstimeofday(&ep->flush_time);
+ ep->last_flush_time = curr_time;
ep->flush_byte_count = 0;
- gro_flush_time = 10000L;
- gro_min_byte_thresh = 7500L;
+ ep->curr_time_limit = lower_flush_time;
+ ep->curr_byte_threshold = lower_byte_limit;
} else if ((diff.tv_sec > 0 ||
- diff.tv_nsec > gro_flush_time) &&
+ diff.tv_nsec > ep->curr_time_limit) &&
ep->flush_byte_count >=
- gro_min_byte_thresh) {
+ ep->curr_byte_threshold) {
/* Above byte and time limt, therefore we can
* move/maintain our limits to be the max
* and flush
*/
napi_gro_flush(napi, false);
- getnstimeofday(&ep->flush_time);
+ ep->last_flush_time = curr_time;
ep->flush_byte_count = 0;
- gro_flush_time = upper_flush_time;
- gro_min_byte_thresh = upper_byte_limit;
+ ep->curr_time_limit = upper_flush_time;
+ ep->curr_byte_threshold = upper_byte_limit;
}
/* else, below time limit and below
* byte thresh, so change nothing
*/
} else if (diff.tv_sec > 0 ||
- diff.tv_nsec >= gro_flush_time) {
+ diff.tv_nsec >= lower_flush_time) {
napi_gro_flush(napi, false);
- getnstimeofday(&ep->flush_time);
+ ep->last_flush_time = curr_time;
ep->flush_byte_count = 0;
}
}
@@ -321,42 +343,33 @@ static rx_handler_result_t __rmnet_deliver_skb
trace___rmnet_deliver_skb(skb);
switch (ep->rmnet_mode) {
+ case RMNET_EPMODE_VND:
+ skb_reset_transport_header(skb);
+ skb_reset_network_header(skb);
+ rmnet_vnd_rx_fixup(skb, skb->dev);
+
+ skb->pkt_type = PACKET_HOST;
+ skb_set_mac_header(skb, 0);
+
+ if (rmnet_check_skb_can_gro(skb) &&
+ (skb->dev->features & NETIF_F_GRO)) {
+ napi = get_current_napi_context();
+
+ skb_size = skb->len;
+ gro_res = napi_gro_receive(napi, skb);
+ trace_rmnet_gro_downlink(gro_res);
+ rmnet_optional_gro_flush(napi, ep, skb_size);
+ } else{
+ netif_receive_skb(skb);
+ }
+ return RX_HANDLER_CONSUMED;
+
case RMNET_EPMODE_NONE:
return RX_HANDLER_PASS;
case RMNET_EPMODE_BRIDGE:
return rmnet_bridge_handler(skb, ep);
- case RMNET_EPMODE_VND:
- skb_reset_transport_header(skb);
- skb_reset_network_header(skb);
- switch (rmnet_vnd_rx_fixup(skb, skb->dev)) {
- case RX_HANDLER_CONSUMED:
- return RX_HANDLER_CONSUMED;
-
- case RX_HANDLER_PASS:
- skb->pkt_type = PACKET_HOST;
- rmnet_reset_mac_header(skb);
- if (rmnet_check_skb_can_gro(skb) &&
- (skb->dev->features & NETIF_F_GRO)) {
- napi = get_current_napi_context();
- if (napi) {
- skb_size = skb->len;
- gro_res = napi_gro_receive(napi, skb);
- trace_rmnet_gro_downlink(gro_res);
- rmnet_optional_gro_flush(napi, ep,
- skb_size);
- } else {
- WARN_ONCE(1, "current napi is NULL\n");
- netif_receive_skb(skb);
- }
- } else {
- netif_receive_skb(skb);
- }
- return RX_HANDLER_CONSUMED;
- }
- return RX_HANDLER_PASS;
-
default:
LOGD("Unknown ep mode %d", ep->rmnet_mode);
rmnet_kfree_skb(skb, RMNET_STATS_SKBFREE_DELIVER_NO_EP);
@@ -441,16 +454,7 @@ static rx_handler_result_t _rmnet_map_ingress_handler
ep = &config->muxed_ep[mux_id];
- if (!ep->refcount) {
- LOGD("Packet on %s:%d; has no logical endpoint config",
- skb->dev->name, mux_id);
-
- rmnet_kfree_skb(skb, RMNET_STATS_SKBFREE_MAPINGRESS_MUX_NO_EP);
- return RX_HANDLER_CONSUMED;
- }
-
- if (config->ingress_data_format & RMNET_INGRESS_FORMAT_DEMUXING)
- skb->dev = ep->egress_dev;
+ skb->dev = ep->egress_dev;
if ((config->ingress_data_format & RMNET_INGRESS_FORMAT_MAP_CKSUMV3) ||
(config->ingress_data_format & RMNET_INGRESS_FORMAT_MAP_CKSUMV4)) {
@@ -462,6 +466,7 @@ static rx_handler_result_t _rmnet_map_ingress_handler
skb->ip_summed |= CHECKSUM_UNNECESSARY;
else if (ckresult !=
RMNET_MAP_CHECKSUM_ERR_UNKNOWN_IP_VERSION &&
+ ckresult != RMNET_MAP_CHECKSUM_VALIDATION_FAILED &&
ckresult != RMNET_MAP_CHECKSUM_ERR_UNKNOWN_TRANSPORT &&
ckresult != RMNET_MAP_CHECKSUM_VALID_FLAG_NOT_SET &&
ckresult != RMNET_MAP_CHECKSUM_FRAGMENTED_PACKET) {
@@ -495,17 +500,13 @@ static rx_handler_result_t rmnet_map_ingress_handler
(struct sk_buff *skb, struct rmnet_phys_ep_config *config)
{
struct sk_buff *skbn;
- int rc, co = 0;
+ int rc;
if (config->ingress_data_format & RMNET_INGRESS_FORMAT_DEAGGREGATION) {
trace_rmnet_start_deaggregation(skb);
while ((skbn = rmnet_map_deaggregate(skb, config)) != 0) {
_rmnet_map_ingress_handler(skbn, config);
- co++;
}
- trace_rmnet_end_deaggregation(skb, co);
- LOGD("De-aggregated %d packets", co);
- rmnet_stats_deagg_pkts(co);
rmnet_kfree_skb(skb, RMNET_STATS_SKBFREE_MAPINGRESS_AGGBUF);
rc = RX_HANDLER_CONSUMED;
} else {
@@ -538,12 +539,15 @@ static int rmnet_map_egress_handler(struct sk_buff *skb,
int required_headroom, additional_header_length, ckresult;
struct rmnet_map_header_s *map_header;
int non_linear_skb;
+ int csum_required = (config->egress_data_format &
+ RMNET_EGRESS_FORMAT_MAP_CKSUMV3) ||
+ (config->egress_data_format &
+ RMNET_EGRESS_FORMAT_MAP_CKSUMV4);
additional_header_length = 0;
required_headroom = sizeof(struct rmnet_map_header_s);
- if ((config->egress_data_format & RMNET_EGRESS_FORMAT_MAP_CKSUMV3) ||
- (config->egress_data_format & RMNET_EGRESS_FORMAT_MAP_CKSUMV4)) {
+ if (csum_required) {
required_headroom +=
sizeof(struct rmnet_map_ul_checksum_header_s);
additional_header_length +=
@@ -558,8 +562,7 @@ static int rmnet_map_egress_handler(struct sk_buff *skb,
return 1;
}
- if ((config->egress_data_format & RMNET_EGRESS_FORMAT_MAP_CKSUMV3) ||
- (config->egress_data_format & RMNET_EGRESS_FORMAT_MAP_CKSUMV4)) {
+ if (csum_required) {
ckresult = rmnet_map_checksum_uplink_packet
(skb, orig_dev, config->egress_data_format);
trace_rmnet_map_checksum_uplink_packet(orig_dev, ckresult);
@@ -570,7 +573,8 @@ static int rmnet_map_egress_handler(struct sk_buff *skb,
skb_is_nonlinear(skb);
if ((!(config->egress_data_format &
- RMNET_EGRESS_FORMAT_AGGREGATION)) || non_linear_skb)
+ RMNET_EGRESS_FORMAT_AGGREGATION)) || csum_required ||
+ non_linear_skb)
map_header = rmnet_map_add_map_header
(skb, additional_header_length, RMNET_MAP_NO_PAD_BYTES);
else
@@ -592,8 +596,14 @@ static int rmnet_map_egress_handler(struct sk_buff *skb,
skb->protocol = htons(ETH_P_MAP);
- if ((config->egress_data_format & RMNET_EGRESS_FORMAT_AGGREGATION) &&
- !non_linear_skb) {
+ if (config->egress_data_format & RMNET_EGRESS_FORMAT_AGGREGATION) {
+ if (rmnet_ul_aggregation_skip(skb, required_headroom))
+ return RMNET_MAP_SUCCESS;
+
+ if (non_linear_skb)
+ if (unlikely(__skb_linearize(skb)))
+ return RMNET_MAP_SUCCESS;
+
rmnet_map_aggregate(skb, config);
return RMNET_MAP_CONSUMED;
}
diff --git a/net/rmnet_data/rmnet_data_stats.c b/net/rmnet_data/rmnet_data_stats.c
index f4aa492..db74c4f 100644
--- a/net/rmnet_data/rmnet_data_stats.c
+++ b/net/rmnet_data/rmnet_data_stats.c
@@ -41,11 +41,6 @@ unsigned long int queue_xmit[RMNET_STATS_QUEUE_XMIT_MAX * 2];
module_param_array(queue_xmit, ulong, 0, 0444);
MODULE_PARM_DESC(queue_xmit, "SKBs queued for transmit");
-static DEFINE_SPINLOCK(rmnet_deagg_count);
-unsigned long int deagg_count[RMNET_STATS_AGG_MAX];
-module_param_array(deagg_count, ulong, 0, 0444);
-MODULE_PARM_DESC(deagg_count, "SKBs De-aggregated");
-
static DEFINE_SPINLOCK(rmnet_agg_count);
unsigned long int agg_count[RMNET_STATS_AGG_MAX];
module_param_array(agg_count, ulong, 0, 0444);
@@ -72,16 +67,7 @@ void rmnet_kfree_skb(struct sk_buff *skb, unsigned int reason)
skb_free[reason]++;
spin_unlock_irqrestore(&rmnet_skb_free_lock, flags);
- if (likely(skb)) {
- struct rmnet_phys_ep_conf_s *config;
-
- config = (struct rmnet_phys_ep_conf_s *)rcu_dereference
- (skb->dev->rx_handler_data);
- if (likely(config))
- config->recycle(skb);
- else
- kfree_skb(skb);
- }
+ kfree_skb(skb);
}
void rmnet_stats_queue_xmit(int rc, unsigned int reason)
@@ -108,16 +94,6 @@ void rmnet_stats_agg_pkts(int aggcount)
spin_unlock_irqrestore(&rmnet_agg_count, flags);
}
-void rmnet_stats_deagg_pkts(int aggcount)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&rmnet_deagg_count, flags);
- deagg_count[RMNET_STATS_AGG_BUFF]++;
- deagg_count[RMNET_STATS_AGG_PKT] += aggcount;
- spin_unlock_irqrestore(&rmnet_deagg_count, flags);
-}
-
void rmnet_stats_dl_checksum(unsigned int rc)
{
unsigned long flags;
diff --git a/net/rmnet_data/rmnet_data_stats.h b/net/rmnet_data/rmnet_data_stats.h
index e3350ef..366e486 100644
--- a/net/rmnet_data/rmnet_data_stats.h
+++ b/net/rmnet_data/rmnet_data_stats.h
@@ -24,7 +24,6 @@ enum rmnet_skb_free_e {
RMNET_STATS_SKBFREE_DELIVER_NO_EP,
RMNET_STATS_SKBFREE_IPINGRESS_NO_EP,
RMNET_STATS_SKBFREE_MAPINGRESS_BAD_MUX,
- RMNET_STATS_SKBFREE_MAPINGRESS_MUX_NO_EP,
RMNET_STATS_SKBFREE_MAPINGRESS_AGGBUF,
RMNET_STATS_SKBFREE_INGRESS_NOT_EXPECT_MAPD,
RMNET_STATS_SKBFREE_INGRESS_NOT_EXPECT_MAPC,
diff --git a/net/rmnet_data/rmnet_data_vnd.c b/net/rmnet_data/rmnet_data_vnd.c
index 72f3c3b..c4ef460 100644
--- a/net/rmnet_data/rmnet_data_vnd.c
+++ b/net/rmnet_data/rmnet_data_vnd.c
@@ -101,55 +101,6 @@ static void rmnet_vnd_add_qos_header(struct sk_buff *skb,
}
}
-/* RX/TX Fixup */
-
-/* rmnet_vnd_rx_fixup() - Virtual Network Device receive fixup hook
- * @skb: Socket buffer ("packet") to modify
- * @dev: Virtual network device
- *
- * Additional VND specific packet processing for ingress packets
- *
- * Return:
- * - RX_HANDLER_PASS if packet should continue to process in stack
- * - RX_HANDLER_CONSUMED if packet should not be processed in stack
- *
- */
-int rmnet_vnd_rx_fixup(struct sk_buff *skb, struct net_device *dev)
-{
- if (unlikely(!dev || !skb))
- return RX_HANDLER_CONSUMED;
-
- dev->stats.rx_packets++;
- dev->stats.rx_bytes += skb->len;
-
- return RX_HANDLER_PASS;
-}
-
-/* rmnet_vnd_tx_fixup() - Virtual Network Device transmic fixup hook
- * @skb: Socket buffer ("packet") to modify
- * @dev: Virtual network device
- *
- * Additional VND specific packet processing for egress packets
- *
- * Return:
- * - RX_HANDLER_PASS if packet should continue to be transmitted
- * - RX_HANDLER_CONSUMED if packet should not be transmitted by stack
- */
-int rmnet_vnd_tx_fixup(struct sk_buff *skb, struct net_device *dev)
-{
- struct rmnet_vnd_private_s *dev_conf;
-
- dev_conf = (struct rmnet_vnd_private_s *)netdev_priv(dev);
-
- if (unlikely(!dev || !skb))
- return RX_HANDLER_CONSUMED;
-
- dev->stats.tx_packets++;
- dev->stats.tx_bytes += skb->len;
-
- return RX_HANDLER_PASS;
-}
-
/* Network Device Operations */
/* rmnet_vnd_start_xmit() - Transmit NDO callback
@@ -220,12 +171,16 @@ static int _rmnet_vnd_do_qos_ioctl(struct net_device *dev,
switch (cmd) {
case RMNET_IOCTL_SET_QOS_ENABLE:
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
LOGM("RMNET_IOCTL_SET_QOS_ENABLE on %s", dev->name);
if (!dev_conf->qos_version)
dev_conf->qos_version = RMNET_IOCTL_QOS_MODE_6;
break;
case RMNET_IOCTL_SET_QOS_DISABLE:
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
LOGM("RMNET_IOCTL_SET_QOS_DISABLE on %s", dev->name);
dev_conf->qos_version = 0;
break;
@@ -240,6 +195,8 @@ static int _rmnet_vnd_do_qos_ioctl(struct net_device *dev,
break;
case RMNET_IOCTL_FLOW_ENABLE:
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
LOGL("RMNET_IOCTL_FLOW_ENABLE on %s", dev->name);
if (copy_from_user(&ioctl_data, ifr->ifr_ifru.ifru_data,
sizeof(struct rmnet_ioctl_data_s))) {
@@ -252,6 +209,8 @@ static int _rmnet_vnd_do_qos_ioctl(struct net_device *dev,
break;
case RMNET_IOCTL_FLOW_DISABLE:
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
LOGL("RMNET_IOCTL_FLOW_DISABLE on %s", dev->name);
if (copy_from_user(&ioctl_data, ifr->ifr_ifru.ifru_data,
sizeof(struct rmnet_ioctl_data_s))) {
@@ -367,6 +326,8 @@ static int rmnet_vnd_ioctl_extended(struct net_device *dev, struct ifreq *ifr)
break;
case RMNET_IOCTL_SET_QOS_VERSION:
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
if (ext_cmd.u.data == RMNET_IOCTL_QOS_MODE_6 ||
ext_cmd.u.data == RMNET_IOCTL_QOS_MODE_8 ||
ext_cmd.u.data == 0) {
diff --git a/net/rmnet_data/rmnet_data_vnd.h b/net/rmnet_data/rmnet_data_vnd.h
index 9d8eb54..1d3a63b 100644
--- a/net/rmnet_data/rmnet_data_vnd.h
+++ b/net/rmnet_data/rmnet_data_vnd.h
@@ -27,8 +27,6 @@ int rmnet_vnd_get_name(int id, char *name, int name_len);
int rmnet_vnd_create_dev(int id, struct net_device **new_device,
const char *prefix, int use_name);
int rmnet_vnd_free_dev(int id);
-int rmnet_vnd_rx_fixup(struct sk_buff *skb, struct net_device *dev);
-int rmnet_vnd_tx_fixup(struct sk_buff *skb, struct net_device *dev);
int rmnet_vnd_is_vnd(struct net_device *dev);
int rmnet_vnd_add_tc_flow(u32 id, u32 map_flow, u32 tc_flow);
int rmnet_vnd_del_tc_flow(u32 id, u32 map_flow, u32 tc_flow);
diff --git a/net/rmnet_data/rmnet_map.h b/net/rmnet_data/rmnet_map.h
index f597f1b..3bab6d9 100644
--- a/net/rmnet_data/rmnet_map.h
+++ b/net/rmnet_data/rmnet_map.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -146,5 +146,5 @@ int rmnet_map_checksum_downlink_packet(struct sk_buff *skb);
int rmnet_map_checksum_uplink_packet(struct sk_buff *skb,
struct net_device *orig_dev,
u32 egress_data_format);
-
+int rmnet_ul_aggregation_skip(struct sk_buff *skb, int offset);
#endif /* _RMNET_MAP_H_ */
diff --git a/net/rmnet_data/rmnet_map_data.c b/net/rmnet_data/rmnet_map_data.c
index d7e420b..1c0f1060 100644
--- a/net/rmnet_data/rmnet_map_data.c
+++ b/net/rmnet_data/rmnet_map_data.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -234,16 +234,9 @@ void rmnet_map_aggregate(struct sk_buff *skb,
if (!skb || !config)
return;
- size = config->egress_agg_size - skb->len;
-
- if (size < 2000) {
- LOGL("Invalid length %d", size);
- return;
- }
new_packet:
spin_lock_irqsave(&config->agg_lock, flags);
-
memcpy(&last, &config->agg_last, sizeof(struct timespec));
getnstimeofday(&config->agg_last);
@@ -265,6 +258,7 @@ void rmnet_map_aggregate(struct sk_buff *skb,
return;
}
+ size = config->egress_agg_size - skb->len;
config->agg_skb = skb_copy_expand(skb, 0, size, GFP_ATOMIC);
if (!config->agg_skb) {
config->agg_skb = 0;
@@ -748,3 +742,31 @@ int rmnet_map_checksum_uplink_packet(struct sk_buff *skb,
done:
return ret;
}
+
+int rmnet_ul_aggregation_skip(struct sk_buff *skb, int offset)
+{
+ unsigned char *packet_start = skb->data + offset;
+ int is_icmp = 0;
+
+ if ((skb->data[offset]) >> 4 == 0x04) {
+ struct iphdr *ip4h = (struct iphdr *)(packet_start);
+
+ if (ip4h->protocol == IPPROTO_ICMP)
+ is_icmp = 1;
+ } else if ((skb->data[offset]) >> 4 == 0x06) {
+ struct ipv6hdr *ip6h = (struct ipv6hdr *)(packet_start);
+
+ if (ip6h->nexthdr == IPPROTO_ICMPV6) {
+ is_icmp = 1;
+ } else if (ip6h->nexthdr == NEXTHDR_FRAGMENT) {
+ struct frag_hdr *frag;
+
+ frag = (struct frag_hdr *)(packet_start
+ + sizeof(struct ipv6hdr));
+ if (frag->nexthdr == IPPROTO_ICMPV6)
+ is_icmp = 1;
+ }
+ }
+
+ return is_icmp;
+}
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index c651cfc..f311732 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -141,7 +141,7 @@ static int tcf_del_walker(struct tcf_hashinfo *hinfo, struct sk_buff *skb,
hlist_for_each_entry_safe(p, n, head, tcfa_head) {
ret = __tcf_hash_release(p, false, true);
if (ret == ACT_P_DELETED) {
- module_put(p->ops->owner);
+ module_put(ops->owner);
n_i++;
} else if (ret < 0)
goto nla_put_failure;
@@ -450,13 +450,15 @@ EXPORT_SYMBOL(tcf_action_exec);
int tcf_action_destroy(struct list_head *actions, int bind)
{
+ const struct tc_action_ops *ops;
struct tc_action *a, *tmp;
int ret = 0;
list_for_each_entry_safe(a, tmp, actions, list) {
+ ops = a->ops;
ret = __tcf_hash_release(a, bind, true);
if (ret == ACT_P_DELETED)
- module_put(a->ops->owner);
+ module_put(ops->owner);
else if (ret < 0)
return ret;
}
diff --git a/net/sched/cls_matchall.c b/net/sched/cls_matchall.c
index b12bc2a..e75fb65 100644
--- a/net/sched/cls_matchall.c
+++ b/net/sched/cls_matchall.c
@@ -32,6 +32,7 @@ static int mall_classify(struct sk_buff *skb, const struct tcf_proto *tp,
if (tc_skip_sw(head->flags))
return -1;
+ *res = head->res;
return tcf_exts_exec(skb, &head->exts, res);
}
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 6cfb6e9..b8031aa 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -178,8 +178,13 @@ int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
if (likely(skb)) {
HARD_TX_LOCK(dev, txq, smp_processor_id());
- if (!netif_xmit_frozen_or_stopped(txq))
- skb = dev_hard_start_xmit(skb, dev, txq, &ret);
+ if (!netif_xmit_frozen_or_stopped(txq)) {
+ if (unlikely(skb->fast_forwarded))
+ skb = dev_hard_start_xmit_list(skb, dev,
+ txq, &ret);
+ else
+ skb = dev_hard_start_xmit(skb, dev, txq, &ret);
+ }
HARD_TX_UNLOCK(dev, txq);
} else {
@@ -681,6 +686,7 @@ void qdisc_reset(struct Qdisc *qdisc)
qdisc->gso_skb = NULL;
}
qdisc->q.qlen = 0;
+ qdisc->qstats.backlog = 0;
}
EXPORT_SYMBOL(qdisc_reset);
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
index 75f290b..272c345 100644
--- a/net/sunrpc/svc.c
+++ b/net/sunrpc/svc.c
@@ -702,6 +702,65 @@ choose_victim(struct svc_serv *serv, struct svc_pool *pool, unsigned int *state)
return task;
}
+/* create new threads */
+static int
+svc_start_kthreads(struct svc_serv *serv, struct svc_pool *pool, int nrservs)
+{
+ struct svc_rqst *rqstp;
+ struct task_struct *task;
+ struct svc_pool *chosen_pool;
+ unsigned int state = serv->sv_nrthreads-1;
+ int node;
+
+ do {
+ nrservs--;
+ chosen_pool = choose_pool(serv, pool, &state);
+
+ node = svc_pool_map_get_node(chosen_pool->sp_id);
+ rqstp = svc_prepare_thread(serv, chosen_pool, node);
+ if (IS_ERR(rqstp))
+ return PTR_ERR(rqstp);
+
+ __module_get(serv->sv_ops->svo_module);
+ task = kthread_create_on_node(serv->sv_ops->svo_function, rqstp,
+ node, "%s", serv->sv_name);
+ if (IS_ERR(task)) {
+ module_put(serv->sv_ops->svo_module);
+ svc_exit_thread(rqstp);
+ return PTR_ERR(task);
+ }
+
+ rqstp->rq_task = task;
+ if (serv->sv_nrpools > 1)
+ svc_pool_map_set_cpumask(task, chosen_pool->sp_id);
+
+ svc_sock_update_bufs(serv);
+ wake_up_process(task);
+ } while (nrservs > 0);
+
+ return 0;
+}
+
+
+/* destroy old threads */
+static int
+svc_signal_kthreads(struct svc_serv *serv, struct svc_pool *pool, int nrservs)
+{
+ struct task_struct *task;
+ unsigned int state = serv->sv_nrthreads-1;
+
+ /* destroy old threads */
+ do {
+ task = choose_victim(serv, pool, &state);
+ if (task == NULL)
+ break;
+ send_sig(SIGINT, task, 1);
+ nrservs++;
+ } while (nrservs < 0);
+
+ return 0;
+}
+
/*
* Create or destroy enough new threads to make the number
* of threads the given number. If `pool' is non-NULL, applies
@@ -719,13 +778,6 @@ choose_victim(struct svc_serv *serv, struct svc_pool *pool, unsigned int *state)
int
svc_set_num_threads(struct svc_serv *serv, struct svc_pool *pool, int nrservs)
{
- struct svc_rqst *rqstp;
- struct task_struct *task;
- struct svc_pool *chosen_pool;
- int error = 0;
- unsigned int state = serv->sv_nrthreads-1;
- int node;
-
if (pool == NULL) {
/* The -1 assumes caller has done a svc_get() */
nrservs -= (serv->sv_nrthreads-1);
@@ -735,46 +787,52 @@ svc_set_num_threads(struct svc_serv *serv, struct svc_pool *pool, int nrservs)
spin_unlock_bh(&pool->sp_lock);
}
- /* create new threads */
- while (nrservs > 0) {
- nrservs--;
- chosen_pool = choose_pool(serv, pool, &state);
-
- node = svc_pool_map_get_node(chosen_pool->sp_id);
- rqstp = svc_prepare_thread(serv, chosen_pool, node);
- if (IS_ERR(rqstp)) {
- error = PTR_ERR(rqstp);
- break;
- }
-
- __module_get(serv->sv_ops->svo_module);
- task = kthread_create_on_node(serv->sv_ops->svo_function, rqstp,
- node, "%s", serv->sv_name);
- if (IS_ERR(task)) {
- error = PTR_ERR(task);
- module_put(serv->sv_ops->svo_module);
- svc_exit_thread(rqstp);
- break;
- }
-
- rqstp->rq_task = task;
- if (serv->sv_nrpools > 1)
- svc_pool_map_set_cpumask(task, chosen_pool->sp_id);
-
- svc_sock_update_bufs(serv);
- wake_up_process(task);
- }
- /* destroy old threads */
- while (nrservs < 0 &&
- (task = choose_victim(serv, pool, &state)) != NULL) {
- send_sig(SIGINT, task, 1);
- nrservs++;
- }
-
- return error;
+ if (nrservs > 0)
+ return svc_start_kthreads(serv, pool, nrservs);
+ if (nrservs < 0)
+ return svc_signal_kthreads(serv, pool, nrservs);
+ return 0;
}
EXPORT_SYMBOL_GPL(svc_set_num_threads);
+/* destroy old threads */
+static int
+svc_stop_kthreads(struct svc_serv *serv, struct svc_pool *pool, int nrservs)
+{
+ struct task_struct *task;
+ unsigned int state = serv->sv_nrthreads-1;
+
+ /* destroy old threads */
+ do {
+ task = choose_victim(serv, pool, &state);
+ if (task == NULL)
+ break;
+ kthread_stop(task);
+ nrservs++;
+ } while (nrservs < 0);
+ return 0;
+}
+
+int
+svc_set_num_threads_sync(struct svc_serv *serv, struct svc_pool *pool, int nrservs)
+{
+ if (pool == NULL) {
+ /* The -1 assumes caller has done a svc_get() */
+ nrservs -= (serv->sv_nrthreads-1);
+ } else {
+ spin_lock_bh(&pool->sp_lock);
+ nrservs -= pool->sp_nrthreads;
+ spin_unlock_bh(&pool->sp_lock);
+ }
+
+ if (nrservs > 0)
+ return svc_start_kthreads(serv, pool, nrservs);
+ if (nrservs < 0)
+ return svc_stop_kthreads(serv, pool, nrservs);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(svc_set_num_threads_sync);
+
/*
* Called from a server thread as it's exiting. Caller must hold the "service
* mutex" for the service.
diff --git a/net/tipc/msg.c b/net/tipc/msg.c
index 56ea0ad..912f1fb 100644
--- a/net/tipc/msg.c
+++ b/net/tipc/msg.c
@@ -547,7 +547,7 @@ bool tipc_msg_lookup_dest(struct net *net, struct sk_buff *skb, int *err)
return false;
if (msg_errcode(msg))
return false;
- *err = -TIPC_ERR_NO_NAME;
+ *err = TIPC_ERR_NO_NAME;
if (skb_linearize(skb))
return false;
msg = buf_msg(skb);
diff --git a/net/wireless/db.txt b/net/wireless/db.txt
index f323faf..ff9887f 100644
--- a/net/wireless/db.txt
+++ b/net/wireless/db.txt
@@ -1477,9 +1477,9 @@
country VN: DFS-FCC
(2402 - 2482 @ 40), (20)
- (5170 - 5250 @ 80), (24), AUTO-BW
- (5250 - 5330 @ 80), (24), DFS, AUTO-BW
- (5490 - 5730 @ 160), (24), DFS
+ (5170 - 5250 @ 80), (24)
+ (5250 - 5330 @ 80), (24), DFS
+ (5490 - 5730 @ 80), (24), DFS
(5735 - 5835 @ 80), (30)
# 60 gHz band channels 1-4
(57240 - 65880 @ 2160), (40)
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 3dd7b21..d8387b1 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -9700,6 +9700,9 @@ static int nl80211_join_mesh(struct sk_buff *skb, struct genl_info *info)
if (err)
return err;
+ if (!setup.chandef.chan)
+ return -EINVAL;
+
err = validate_beacon_tx_rate(rdev, setup.chandef.chan->band,
&setup.beacon_rate);
if (err)
diff --git a/net/wireless/sme.c b/net/wireless/sme.c
index bb7f5be..d414049 100644
--- a/net/wireless/sme.c
+++ b/net/wireless/sme.c
@@ -545,11 +545,6 @@ static int cfg80211_sme_connect(struct wireless_dev *wdev,
return -EOPNOTSUPP;
if (wdev->current_bss) {
- if (!prev_bssid)
- return -EALREADY;
- if (prev_bssid &&
- !ether_addr_equal(prev_bssid, wdev->current_bss->pub.bssid))
- return -ENOTCONN;
cfg80211_unhold_bss(wdev->current_bss);
cfg80211_put_bss(wdev->wiphy, &wdev->current_bss->pub);
wdev->current_bss = NULL;
@@ -1085,11 +1080,35 @@ int cfg80211_connect(struct cfg80211_registered_device *rdev,
ASSERT_WDEV_LOCK(wdev);
- if (WARN_ON(wdev->connect_keys)) {
- kzfree(wdev->connect_keys);
- wdev->connect_keys = NULL;
+ /*
+ * If we have an ssid_len, we're trying to connect or are
+ * already connected, so reject a new SSID unless it's the
+ * same (which is the case for re-association.)
+ */
+ if (wdev->ssid_len &&
+ (wdev->ssid_len != connect->ssid_len ||
+ memcmp(wdev->ssid, connect->ssid, wdev->ssid_len)))
+ return -EALREADY;
+
+ /*
+ * If connected, reject (re-)association unless prev_bssid
+ * matches the current BSSID.
+ */
+ if (wdev->current_bss) {
+ if (!prev_bssid)
+ return -EALREADY;
+ if (!ether_addr_equal(prev_bssid, wdev->current_bss->pub.bssid))
+ return -ENOTCONN;
}
+ /*
+ * Reject if we're in the process of connecting with WEP,
+ * this case isn't very interesting and trying to handle
+ * it would make the code much more complex.
+ */
+ if (wdev->connect_keys)
+ return -EINPROGRESS;
+
cfg80211_oper_and_ht_capa(&connect->ht_capa_mask,
rdev->wiphy.ht_capa_mod_mask);
@@ -1140,7 +1159,12 @@ int cfg80211_connect(struct cfg80211_registered_device *rdev,
if (err) {
wdev->connect_keys = NULL;
- wdev->ssid_len = 0;
+ /*
+ * This could be reassoc getting refused, don't clear
+ * ssid_len in that case.
+ */
+ if (!wdev->current_bss)
+ wdev->ssid_len = 0;
return err;
}
@@ -1165,5 +1189,13 @@ int cfg80211_disconnect(struct cfg80211_registered_device *rdev,
else if (wdev->current_bss)
err = rdev_disconnect(rdev, dev, reason);
+ /*
+ * Clear ssid_len unless we actually were fully connected,
+ * in which case cfg80211_disconnected() will take care of
+ * this later.
+ */
+ if (!wdev->current_bss)
+ wdev->ssid_len = 0;
+
return err;
}
diff --git a/net/xfrm/xfrm_output.c b/net/xfrm/xfrm_output.c
index 637387b..d864a6d 100644
--- a/net/xfrm/xfrm_output.c
+++ b/net/xfrm/xfrm_output.c
@@ -66,6 +66,9 @@ static int xfrm_output_one(struct sk_buff *skb, int err)
goto error_nolock;
}
+ if (x->props.output_mark)
+ skb->mark = x->props.output_mark;
+
err = x->outer_mode->output(x, skb);
if (err) {
XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTSTATEMODEERROR);
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 8ce5711..77fbfbd 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -125,7 +125,7 @@ static inline struct dst_entry *__xfrm_dst_lookup(struct net *net,
int tos, int oif,
const xfrm_address_t *saddr,
const xfrm_address_t *daddr,
- int family)
+ int family, u32 mark)
{
struct xfrm_policy_afinfo *afinfo;
struct dst_entry *dst;
@@ -134,7 +134,7 @@ static inline struct dst_entry *__xfrm_dst_lookup(struct net *net,
if (unlikely(afinfo == NULL))
return ERR_PTR(-EAFNOSUPPORT);
- dst = afinfo->dst_lookup(net, tos, oif, saddr, daddr);
+ dst = afinfo->dst_lookup(net, tos, oif, saddr, daddr, mark);
xfrm_policy_put_afinfo(afinfo);
@@ -145,7 +145,7 @@ static inline struct dst_entry *xfrm_dst_lookup(struct xfrm_state *x,
int tos, int oif,
xfrm_address_t *prev_saddr,
xfrm_address_t *prev_daddr,
- int family)
+ int family, u32 mark)
{
struct net *net = xs_net(x);
xfrm_address_t *saddr = &x->props.saddr;
@@ -161,7 +161,7 @@ static inline struct dst_entry *xfrm_dst_lookup(struct xfrm_state *x,
daddr = x->coaddr;
}
- dst = __xfrm_dst_lookup(net, tos, oif, saddr, daddr, family);
+ dst = __xfrm_dst_lookup(net, tos, oif, saddr, daddr, family, mark);
if (!IS_ERR(dst)) {
if (prev_saddr != saddr)
@@ -1427,14 +1427,14 @@ int __xfrm_sk_clone_policy(struct sock *sk, const struct sock *osk)
static int
xfrm_get_saddr(struct net *net, int oif, xfrm_address_t *local,
- xfrm_address_t *remote, unsigned short family)
+ xfrm_address_t *remote, unsigned short family, u32 mark)
{
int err;
struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
if (unlikely(afinfo == NULL))
return -EINVAL;
- err = afinfo->get_saddr(net, oif, local, remote);
+ err = afinfo->get_saddr(net, oif, local, remote, mark);
xfrm_policy_put_afinfo(afinfo);
return err;
}
@@ -1465,7 +1465,7 @@ xfrm_tmpl_resolve_one(struct xfrm_policy *policy, const struct flowi *fl,
if (xfrm_addr_any(local, tmpl->encap_family)) {
error = xfrm_get_saddr(net, fl->flowi_oif,
&tmp, remote,
- tmpl->encap_family);
+ tmpl->encap_family, 0);
if (error)
goto fail;
local = &tmp;
@@ -1744,7 +1744,8 @@ static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
if (xfrm[i]->props.mode != XFRM_MODE_TRANSPORT) {
family = xfrm[i]->props.family;
dst = xfrm_dst_lookup(xfrm[i], tos, fl->flowi_oif,
- &saddr, &daddr, family);
+ &saddr, &daddr, family,
+ xfrm[i]->props.output_mark);
err = PTR_ERR(dst);
if (IS_ERR(dst))
goto put_states;
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index b2bba35..2cade02 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -584,6 +584,9 @@ static struct xfrm_state *xfrm_state_construct(struct net *net,
xfrm_mark_get(attrs, &x->mark);
+ if (attrs[XFRMA_OUTPUT_MARK])
+ x->props.output_mark = nla_get_u32(attrs[XFRMA_OUTPUT_MARK]);
+
err = __xfrm_init_state(x, false);
if (err)
goto error;
@@ -871,6 +874,11 @@ static int copy_to_user_state_extra(struct xfrm_state *x,
goto out;
if (x->security)
ret = copy_sec_ctx(x->security, skb);
+ if (x->props.output_mark) {
+ ret = nla_put_u32(skb, XFRMA_OUTPUT_MARK, x->props.output_mark);
+ if (ret)
+ goto out;
+ }
out:
return ret;
}
@@ -1656,32 +1664,34 @@ static int dump_one_policy(struct xfrm_policy *xp, int dir, int count, void *ptr
static int xfrm_dump_policy_done(struct netlink_callback *cb)
{
- struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *) &cb->args[1];
+ struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *)cb->args;
struct net *net = sock_net(cb->skb->sk);
xfrm_policy_walk_done(walk, net);
return 0;
}
+static int xfrm_dump_policy_start(struct netlink_callback *cb)
+{
+ struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *)cb->args;
+
+ BUILD_BUG_ON(sizeof(*walk) > sizeof(cb->args));
+
+ xfrm_policy_walk_init(walk, XFRM_POLICY_TYPE_ANY);
+ return 0;
+}
+
static int xfrm_dump_policy(struct sk_buff *skb, struct netlink_callback *cb)
{
struct net *net = sock_net(skb->sk);
- struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *) &cb->args[1];
+ struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *)cb->args;
struct xfrm_dump_info info;
- BUILD_BUG_ON(sizeof(struct xfrm_policy_walk) >
- sizeof(cb->args) - sizeof(cb->args[0]));
-
info.in_skb = cb->skb;
info.out_skb = skb;
info.nlmsg_seq = cb->nlh->nlmsg_seq;
info.nlmsg_flags = NLM_F_MULTI;
- if (!cb->args[0]) {
- cb->args[0] = 1;
- xfrm_policy_walk_init(walk, XFRM_POLICY_TYPE_ANY);
- }
-
(void) xfrm_policy_walk(net, walk, dump_one_policy, &info);
return skb->len;
@@ -2419,6 +2429,7 @@ static const struct nla_policy xfrma_policy[XFRMA_MAX+1] = {
[XFRMA_SA_EXTRA_FLAGS] = { .type = NLA_U32 },
[XFRMA_PROTO] = { .type = NLA_U8 },
[XFRMA_ADDRESS_FILTER] = { .len = sizeof(struct xfrm_address_filter) },
+ [XFRMA_OUTPUT_MARK] = { .len = NLA_U32 },
};
static const struct nla_policy xfrma_spd_policy[XFRMA_SPD_MAX+1] = {
@@ -2428,6 +2439,7 @@ static const struct nla_policy xfrma_spd_policy[XFRMA_SPD_MAX+1] = {
static const struct xfrm_link {
int (*doit)(struct sk_buff *, struct nlmsghdr *, struct nlattr **);
+ int (*start)(struct netlink_callback *);
int (*dump)(struct sk_buff *, struct netlink_callback *);
int (*done)(struct netlink_callback *);
const struct nla_policy *nla_pol;
@@ -2441,6 +2453,7 @@ static const struct xfrm_link {
[XFRM_MSG_NEWPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_add_policy },
[XFRM_MSG_DELPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_get_policy },
[XFRM_MSG_GETPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_get_policy,
+ .start = xfrm_dump_policy_start,
.dump = xfrm_dump_policy,
.done = xfrm_dump_policy_done },
[XFRM_MSG_ALLOCSPI - XFRM_MSG_BASE] = { .doit = xfrm_alloc_userspi },
@@ -2492,6 +2505,7 @@ static int xfrm_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
{
struct netlink_dump_control c = {
+ .start = link->start,
.dump = link->dump,
.done = link->done,
};
@@ -2635,6 +2649,8 @@ static inline size_t xfrm_sa_len(struct xfrm_state *x)
l += nla_total_size(sizeof(*x->coaddr));
if (x->props.extra_flags)
l += nla_total_size(sizeof(x->props.extra_flags));
+ if (x->props.output_mark)
+ l += nla_total_size(sizeof(x->props.output_mark));
/* Must count x->lastused as it may become non-zero behind our back. */
l += nla_total_size_64bit(sizeof(u64));
diff --git a/samples/trace_events/trace-events-sample.c b/samples/trace_events/trace-events-sample.c
index 880a7d1..4ccff66 100644
--- a/samples/trace_events/trace-events-sample.c
+++ b/samples/trace_events/trace-events-sample.c
@@ -78,28 +78,36 @@ static int simple_thread_fn(void *arg)
}
static DEFINE_MUTEX(thread_mutex);
+static int simple_thread_cnt;
void foo_bar_reg(void)
{
+ mutex_lock(&thread_mutex);
+ if (simple_thread_cnt++)
+ goto out;
+
pr_info("Starting thread for foo_bar_fn\n");
/*
* We shouldn't be able to start a trace when the module is
* unloading (there's other locks to prevent that). But
* for consistency sake, we still take the thread_mutex.
*/
- mutex_lock(&thread_mutex);
simple_tsk_fn = kthread_run(simple_thread_fn, NULL, "event-sample-fn");
+ out:
mutex_unlock(&thread_mutex);
}
void foo_bar_unreg(void)
{
- pr_info("Killing thread for foo_bar_fn\n");
- /* protect against module unloading */
mutex_lock(&thread_mutex);
+ if (--simple_thread_cnt)
+ goto out;
+
+ pr_info("Killing thread for foo_bar_fn\n");
if (simple_tsk_fn)
kthread_stop(simple_tsk_fn);
simple_tsk_fn = NULL;
+ out:
mutex_unlock(&thread_mutex);
}
diff --git a/security/Kconfig b/security/Kconfig
index 5693989..4415de2 100644
--- a/security/Kconfig
+++ b/security/Kconfig
@@ -6,6 +6,11 @@
source security/keys/Kconfig
+if ARCH_QCOM
+source security/pfe/Kconfig
+endif
+
+
config SECURITY_DMESG_RESTRICT
bool "Restrict unprivileged access to the kernel syslog"
default n
diff --git a/security/Makefile b/security/Makefile
index f2d71cd..79166ba 100644
--- a/security/Makefile
+++ b/security/Makefile
@@ -9,6 +9,7 @@
subdir-$(CONFIG_SECURITY_APPARMOR) += apparmor
subdir-$(CONFIG_SECURITY_YAMA) += yama
subdir-$(CONFIG_SECURITY_LOADPIN) += loadpin
+subdir-$(CONFIG_ARCH_QCOM) += pfe
# always enable default capabilities
obj-y += commoncap.o
@@ -24,6 +25,7 @@
obj-$(CONFIG_SECURITY_APPARMOR) += apparmor/
obj-$(CONFIG_SECURITY_YAMA) += yama/
obj-$(CONFIG_SECURITY_LOADPIN) += loadpin/
+obj-$(CONFIG_ARCH_QCOM) += pfe/
obj-$(CONFIG_CGROUP_DEVICE) += device_cgroup.o
# Object integrity file lists
diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c
index 57bc405..935752c 100644
--- a/security/apparmor/lsm.c
+++ b/security/apparmor/lsm.c
@@ -671,9 +671,9 @@ enum profile_mode aa_g_profile_mode = APPARMOR_ENFORCE;
module_param_call(mode, param_set_mode, param_get_mode,
&aa_g_profile_mode, S_IRUSR | S_IWUSR);
-#ifdef CONFIG_SECURITY_APPARMOR_HASH
/* whether policy verification hashing is enabled */
bool aa_g_hash_policy = IS_ENABLED(CONFIG_SECURITY_APPARMOR_HASH_DEFAULT);
+#ifdef CONFIG_SECURITY_APPARMOR_HASH
module_param_named(hash_policy, aa_g_hash_policy, aabool, S_IRUSR | S_IWUSR);
#endif
diff --git a/security/keys/Kconfig b/security/keys/Kconfig
index d942c7c..e0a3978 100644
--- a/security/keys/Kconfig
+++ b/security/keys/Kconfig
@@ -41,10 +41,8 @@
bool "Large payload keys"
depends on KEYS
depends on TMPFS
- depends on (CRYPTO_ANSI_CPRNG = y || CRYPTO_DRBG = y)
select CRYPTO_AES
- select CRYPTO_ECB
- select CRYPTO_RNG
+ select CRYPTO_GCM
help
This option provides support for holding large keys within the kernel
(for example Kerberos ticket caches). The data may be stored out to
diff --git a/security/keys/big_key.c b/security/keys/big_key.c
index 835c1ab..e628817 100644
--- a/security/keys/big_key.c
+++ b/security/keys/big_key.c
@@ -1,5 +1,6 @@
/* Large capacity key type
*
+ * Copyright (C) 2017 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
* Copyright (C) 2013 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
@@ -16,10 +17,10 @@
#include <linux/shmem_fs.h>
#include <linux/err.h>
#include <linux/scatterlist.h>
+#include <linux/random.h>
#include <keys/user-type.h>
#include <keys/big_key-type.h>
-#include <crypto/rng.h>
-#include <crypto/skcipher.h>
+#include <crypto/aead.h>
/*
* Layout of key payload words.
@@ -49,7 +50,12 @@ enum big_key_op {
/*
* Key size for big_key data encryption
*/
-#define ENC_KEY_SIZE 16
+#define ENC_KEY_SIZE 32
+
+/*
+ * Authentication tag length
+ */
+#define ENC_AUTHTAG_SIZE 16
/*
* big_key defined keys take an arbitrary string as the description and an
@@ -64,57 +70,62 @@ struct key_type key_type_big_key = {
.destroy = big_key_destroy,
.describe = big_key_describe,
.read = big_key_read,
+ /* no ->update(); don't add it without changing big_key_crypt() nonce */
};
/*
- * Crypto names for big_key data encryption
+ * Crypto names for big_key data authenticated encryption
*/
-static const char big_key_rng_name[] = "stdrng";
-static const char big_key_alg_name[] = "ecb(aes)";
+static const char big_key_alg_name[] = "gcm(aes)";
/*
- * Crypto algorithms for big_key data encryption
+ * Crypto algorithms for big_key data authenticated encryption
*/
-static struct crypto_rng *big_key_rng;
-static struct crypto_skcipher *big_key_skcipher;
+static struct crypto_aead *big_key_aead;
/*
- * Generate random key to encrypt big_key data
+ * Since changing the key affects the entire object, we need a mutex.
*/
-static inline int big_key_gen_enckey(u8 *key)
-{
- return crypto_rng_get_bytes(big_key_rng, key, ENC_KEY_SIZE);
-}
+static DEFINE_MUTEX(big_key_aead_lock);
/*
* Encrypt/decrypt big_key data
*/
static int big_key_crypt(enum big_key_op op, u8 *data, size_t datalen, u8 *key)
{
- int ret = -EINVAL;
+ int ret;
struct scatterlist sgio;
- SKCIPHER_REQUEST_ON_STACK(req, big_key_skcipher);
+ struct aead_request *aead_req;
+ /* We always use a zero nonce. The reason we can get away with this is
+ * because we're using a different randomly generated key for every
+ * different encryption. Notably, too, key_type_big_key doesn't define
+ * an .update function, so there's no chance we'll wind up reusing the
+ * key to encrypt updated data. Simply put: one key, one encryption.
+ */
+ u8 zero_nonce[crypto_aead_ivsize(big_key_aead)];
- if (crypto_skcipher_setkey(big_key_skcipher, key, ENC_KEY_SIZE)) {
+ aead_req = aead_request_alloc(big_key_aead, GFP_KERNEL);
+ if (!aead_req)
+ return -ENOMEM;
+
+ memset(zero_nonce, 0, sizeof(zero_nonce));
+ sg_init_one(&sgio, data, datalen + (op == BIG_KEY_ENC ? ENC_AUTHTAG_SIZE : 0));
+ aead_request_set_crypt(aead_req, &sgio, &sgio, datalen, zero_nonce);
+ aead_request_set_callback(aead_req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL);
+ aead_request_set_ad(aead_req, 0);
+
+ mutex_lock(&big_key_aead_lock);
+ if (crypto_aead_setkey(big_key_aead, key, ENC_KEY_SIZE)) {
ret = -EAGAIN;
goto error;
}
-
- skcipher_request_set_tfm(req, big_key_skcipher);
- skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP,
- NULL, NULL);
-
- sg_init_one(&sgio, data, datalen);
- skcipher_request_set_crypt(req, &sgio, &sgio, datalen, NULL);
-
if (op == BIG_KEY_ENC)
- ret = crypto_skcipher_encrypt(req);
+ ret = crypto_aead_encrypt(aead_req);
else
- ret = crypto_skcipher_decrypt(req);
-
- skcipher_request_zero(req);
-
+ ret = crypto_aead_decrypt(aead_req);
error:
+ mutex_unlock(&big_key_aead_lock);
+ aead_request_free(aead_req);
return ret;
}
@@ -146,15 +157,13 @@ int big_key_preparse(struct key_preparsed_payload *prep)
*
* File content is stored encrypted with randomly generated key.
*/
- size_t enclen = ALIGN(datalen, crypto_skcipher_blocksize(big_key_skcipher));
+ size_t enclen = datalen + ENC_AUTHTAG_SIZE;
- /* prepare aligned data to encrypt */
data = kmalloc(enclen, GFP_KERNEL);
if (!data)
return -ENOMEM;
memcpy(data, prep->data, datalen);
- memset(data + datalen, 0x00, enclen - datalen);
/* generate random key */
enckey = kmalloc(ENC_KEY_SIZE, GFP_KERNEL);
@@ -162,13 +171,10 @@ int big_key_preparse(struct key_preparsed_payload *prep)
ret = -ENOMEM;
goto error;
}
-
- ret = big_key_gen_enckey(enckey);
- if (ret)
- goto err_enckey;
+ get_random_bytes(enckey, ENC_KEY_SIZE);
/* encrypt aligned data */
- ret = big_key_crypt(BIG_KEY_ENC, data, enclen, enckey);
+ ret = big_key_crypt(BIG_KEY_ENC, data, datalen, enckey);
if (ret)
goto err_enckey;
@@ -194,7 +200,7 @@ int big_key_preparse(struct key_preparsed_payload *prep)
*path = file->f_path;
path_get(path);
fput(file);
- kfree(data);
+ kzfree(data);
} else {
/* Just store the data in a buffer */
void *data = kmalloc(datalen, GFP_KERNEL);
@@ -210,9 +216,9 @@ int big_key_preparse(struct key_preparsed_payload *prep)
err_fput:
fput(file);
err_enckey:
- kfree(enckey);
+ kzfree(enckey);
error:
- kfree(data);
+ kzfree(data);
return ret;
}
@@ -226,7 +232,7 @@ void big_key_free_preparse(struct key_preparsed_payload *prep)
path_put(path);
}
- kfree(prep->payload.data[big_key_data]);
+ kzfree(prep->payload.data[big_key_data]);
}
/*
@@ -239,7 +245,7 @@ void big_key_revoke(struct key *key)
/* clear the quota */
key_payload_reserve(key, 0);
- if (key_is_instantiated(key) &&
+ if (key_is_positive(key) &&
(size_t)key->payload.data[big_key_len] > BIG_KEY_FILE_THRESHOLD)
vfs_truncate(path, 0);
}
@@ -258,7 +264,7 @@ void big_key_destroy(struct key *key)
path->mnt = NULL;
path->dentry = NULL;
}
- kfree(key->payload.data[big_key_data]);
+ kzfree(key->payload.data[big_key_data]);
key->payload.data[big_key_data] = NULL;
}
@@ -271,7 +277,7 @@ void big_key_describe(const struct key *key, struct seq_file *m)
seq_puts(m, key->description);
- if (key_is_instantiated(key))
+ if (key_is_positive(key))
seq_printf(m, ": %zu [%s]",
datalen,
datalen > BIG_KEY_FILE_THRESHOLD ? "file" : "buff");
@@ -294,7 +300,7 @@ long big_key_read(const struct key *key, char __user *buffer, size_t buflen)
struct file *file;
u8 *data;
u8 *enckey = (u8 *)key->payload.data[big_key_data];
- size_t enclen = ALIGN(datalen, crypto_skcipher_blocksize(big_key_skcipher));
+ size_t enclen = datalen + ENC_AUTHTAG_SIZE;
data = kmalloc(enclen, GFP_KERNEL);
if (!data)
@@ -326,7 +332,7 @@ long big_key_read(const struct key *key, char __user *buffer, size_t buflen)
err_fput:
fput(file);
error:
- kfree(data);
+ kzfree(data);
} else {
ret = datalen;
if (copy_to_user(buffer, key->payload.data[big_key_data],
@@ -342,47 +348,31 @@ long big_key_read(const struct key *key, char __user *buffer, size_t buflen)
*/
static int __init big_key_init(void)
{
- struct crypto_skcipher *cipher;
- struct crypto_rng *rng;
int ret;
- rng = crypto_alloc_rng(big_key_rng_name, 0, 0);
- if (IS_ERR(rng)) {
- pr_err("Can't alloc rng: %ld\n", PTR_ERR(rng));
- return PTR_ERR(rng);
- }
-
- big_key_rng = rng;
-
- /* seed RNG */
- ret = crypto_rng_reset(rng, NULL, crypto_rng_seedsize(rng));
- if (ret) {
- pr_err("Can't reset rng: %d\n", ret);
- goto error_rng;
- }
-
/* init block cipher */
- cipher = crypto_alloc_skcipher(big_key_alg_name, 0, CRYPTO_ALG_ASYNC);
- if (IS_ERR(cipher)) {
- ret = PTR_ERR(cipher);
+ big_key_aead = crypto_alloc_aead(big_key_alg_name, 0, CRYPTO_ALG_ASYNC);
+ if (IS_ERR(big_key_aead)) {
+ ret = PTR_ERR(big_key_aead);
pr_err("Can't alloc crypto: %d\n", ret);
- goto error_rng;
+ return ret;
}
-
- big_key_skcipher = cipher;
+ ret = crypto_aead_setauthsize(big_key_aead, ENC_AUTHTAG_SIZE);
+ if (ret < 0) {
+ pr_err("Can't set crypto auth tag len: %d\n", ret);
+ goto free_aead;
+ }
ret = register_key_type(&key_type_big_key);
if (ret < 0) {
pr_err("Can't register type: %d\n", ret);
- goto error_cipher;
+ goto free_aead;
}
return 0;
-error_cipher:
- crypto_free_skcipher(big_key_skcipher);
-error_rng:
- crypto_free_rng(big_key_rng);
+free_aead:
+ crypto_free_aead(big_key_aead);
return ret;
}
diff --git a/security/keys/encrypted-keys/encrypted.c b/security/keys/encrypted-keys/encrypted.c
index 8d9330a..a871159 100644
--- a/security/keys/encrypted-keys/encrypted.c
+++ b/security/keys/encrypted-keys/encrypted.c
@@ -315,6 +315,13 @@ static struct key *request_user_key(const char *master_desc, const u8 **master_k
down_read(&ukey->sem);
upayload = user_key_payload(ukey);
+ if (!upayload) {
+ /* key was revoked before we acquired its semaphore */
+ up_read(&ukey->sem);
+ key_put(ukey);
+ ukey = ERR_PTR(-EKEYREVOKED);
+ goto error;
+ }
*master_key = upayload->data;
*master_keylen = upayload->datalen;
error:
@@ -867,7 +874,7 @@ static int encrypted_update(struct key *key, struct key_preparsed_payload *prep)
size_t datalen = prep->datalen;
int ret = 0;
- if (test_bit(KEY_FLAG_NEGATIVE, &key->flags))
+ if (key_is_negative(key))
return -ENOKEY;
if (datalen <= 0 || datalen > 32767 || !prep->data)
return -EINVAL;
diff --git a/security/keys/gc.c b/security/keys/gc.c
index 9cb4fe4..1659094 100644
--- a/security/keys/gc.c
+++ b/security/keys/gc.c
@@ -129,15 +129,15 @@ static noinline void key_gc_unused_keys(struct list_head *keys)
while (!list_empty(keys)) {
struct key *key =
list_entry(keys->next, struct key, graveyard_link);
+ short state = key->state;
+
list_del(&key->graveyard_link);
kdebug("- %u", key->serial);
key_check(key);
/* Throw away the key data if the key is instantiated */
- if (test_bit(KEY_FLAG_INSTANTIATED, &key->flags) &&
- !test_bit(KEY_FLAG_NEGATIVE, &key->flags) &&
- key->type->destroy)
+ if (state == KEY_IS_POSITIVE && key->type->destroy)
key->type->destroy(key);
security_key_free(key);
@@ -151,7 +151,7 @@ static noinline void key_gc_unused_keys(struct list_head *keys)
}
atomic_dec(&key->user->nkeys);
- if (test_bit(KEY_FLAG_INSTANTIATED, &key->flags))
+ if (state != KEY_IS_UNINSTANTIATED)
atomic_dec(&key->user->nikeys);
key_user_put(key->user);
diff --git a/security/keys/internal.h b/security/keys/internal.h
index a705a7d..fb0c650 100644
--- a/security/keys/internal.h
+++ b/security/keys/internal.h
@@ -137,7 +137,7 @@ extern key_ref_t keyring_search_aux(key_ref_t keyring_ref,
extern key_ref_t search_my_process_keyrings(struct keyring_search_context *ctx);
extern key_ref_t search_process_keyrings(struct keyring_search_context *ctx);
-extern struct key *find_keyring_by_name(const char *name, bool skip_perm_check);
+extern struct key *find_keyring_by_name(const char *name, bool uid_keyring);
extern int install_user_keyrings(void);
extern int install_thread_keyring_to_cred(struct cred *);
diff --git a/security/keys/key.c b/security/keys/key.c
index 2f4ce35..7dc5906 100644
--- a/security/keys/key.c
+++ b/security/keys/key.c
@@ -301,6 +301,8 @@ struct key *key_alloc(struct key_type *type, const char *desc,
key->flags |= 1 << KEY_FLAG_IN_QUOTA;
if (flags & KEY_ALLOC_BUILT_IN)
key->flags |= 1 << KEY_FLAG_BUILTIN;
+ if (flags & KEY_ALLOC_UID_KEYRING)
+ key->flags |= 1 << KEY_FLAG_UID_KEYRING;
#ifdef KEY_DEBUGGING
key->magic = KEY_DEBUG_MAGIC;
@@ -399,6 +401,18 @@ int key_payload_reserve(struct key *key, size_t datalen)
EXPORT_SYMBOL(key_payload_reserve);
/*
+ * Change the key state to being instantiated.
+ */
+static void mark_key_instantiated(struct key *key, int reject_error)
+{
+ /* Commit the payload before setting the state; barrier versus
+ * key_read_state().
+ */
+ smp_store_release(&key->state,
+ (reject_error < 0) ? reject_error : KEY_IS_POSITIVE);
+}
+
+/*
* Instantiate a key and link it into the target keyring atomically. Must be
* called with the target keyring's semaphore writelocked. The target key's
* semaphore need not be locked as instantiation is serialised by
@@ -421,14 +435,14 @@ static int __key_instantiate_and_link(struct key *key,
mutex_lock(&key_construction_mutex);
/* can't instantiate twice */
- if (!test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) {
+ if (key->state == KEY_IS_UNINSTANTIATED) {
/* instantiate the key */
ret = key->type->instantiate(key, prep);
if (ret == 0) {
/* mark the key as being instantiated */
atomic_inc(&key->user->nikeys);
- set_bit(KEY_FLAG_INSTANTIATED, &key->flags);
+ mark_key_instantiated(key, 0);
if (test_and_clear_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags))
awaken = 1;
@@ -570,13 +584,10 @@ int key_reject_and_link(struct key *key,
mutex_lock(&key_construction_mutex);
/* can't instantiate twice */
- if (!test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) {
+ if (key->state == KEY_IS_UNINSTANTIATED) {
/* mark the key as being negatively instantiated */
atomic_inc(&key->user->nikeys);
- key->reject_error = -error;
- smp_wmb();
- set_bit(KEY_FLAG_NEGATIVE, &key->flags);
- set_bit(KEY_FLAG_INSTANTIATED, &key->flags);
+ mark_key_instantiated(key, -error);
now = current_kernel_time();
key->expiry = now.tv_sec + timeout;
key_schedule_gc(key->expiry + key_gc_delay);
@@ -748,8 +759,8 @@ static inline key_ref_t __key_update(key_ref_t key_ref,
ret = key->type->update(key, prep);
if (ret == 0)
- /* updating a negative key instantiates it */
- clear_bit(KEY_FLAG_NEGATIVE, &key->flags);
+ /* Updating a negative key positively instantiates it */
+ mark_key_instantiated(key, 0);
up_write(&key->sem);
@@ -933,6 +944,16 @@ key_ref_t key_create_or_update(key_ref_t keyring_ref,
*/
__key_link_end(keyring, &index_key, edit);
+ key = key_ref_to_ptr(key_ref);
+ if (test_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags)) {
+ ret = wait_for_key_construction(key, true);
+ if (ret < 0) {
+ key_ref_put(key_ref);
+ key_ref = ERR_PTR(ret);
+ goto error_free_prep;
+ }
+ }
+
key_ref = __key_update(key_ref, &prep);
goto error_free_prep;
}
@@ -983,8 +1004,8 @@ int key_update(key_ref_t key_ref, const void *payload, size_t plen)
ret = key->type->update(key, &prep);
if (ret == 0)
- /* updating a negative key instantiates it */
- clear_bit(KEY_FLAG_NEGATIVE, &key->flags);
+ /* Updating a negative key positively instantiates it */
+ mark_key_instantiated(key, 0);
up_write(&key->sem);
diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
index ada12c3..797edcf 100644
--- a/security/keys/keyctl.c
+++ b/security/keys/keyctl.c
@@ -766,6 +766,10 @@ long keyctl_read_key(key_serial_t keyid, char __user *buffer, size_t buflen)
key = key_ref_to_ptr(key_ref);
+ ret = key_read_state(key);
+ if (ret < 0)
+ goto error2; /* Negatively instantiated */
+
/* see if we can read it directly */
ret = key_permission(key_ref, KEY_NEED_READ);
if (ret == 0)
@@ -896,7 +900,7 @@ long keyctl_chown_key(key_serial_t id, uid_t user, gid_t group)
atomic_dec(&key->user->nkeys);
atomic_inc(&newowner->nkeys);
- if (test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) {
+ if (key->state != KEY_IS_UNINSTANTIATED) {
atomic_dec(&key->user->nikeys);
atomic_inc(&newowner->nikeys);
}
diff --git a/security/keys/keyring.c b/security/keys/keyring.c
index c91e4e0..4e9b4d2 100644
--- a/security/keys/keyring.c
+++ b/security/keys/keyring.c
@@ -407,7 +407,7 @@ static void keyring_describe(const struct key *keyring, struct seq_file *m)
else
seq_puts(m, "[anon]");
- if (key_is_instantiated(keyring)) {
+ if (key_is_positive(keyring)) {
if (keyring->keys.nr_leaves_on_tree != 0)
seq_printf(m, ": %lu", keyring->keys.nr_leaves_on_tree);
else
@@ -416,7 +416,7 @@ static void keyring_describe(const struct key *keyring, struct seq_file *m)
}
struct keyring_read_iterator_context {
- size_t qty;
+ size_t buflen;
size_t count;
key_serial_t __user *buffer;
};
@@ -428,9 +428,9 @@ static int keyring_read_iterator(const void *object, void *data)
int ret;
kenter("{%s,%d},,{%zu/%zu}",
- key->type->name, key->serial, ctx->count, ctx->qty);
+ key->type->name, key->serial, ctx->count, ctx->buflen);
- if (ctx->count >= ctx->qty)
+ if (ctx->count >= ctx->buflen)
return 1;
ret = put_user(key->serial, ctx->buffer);
@@ -452,38 +452,33 @@ static long keyring_read(const struct key *keyring,
char __user *buffer, size_t buflen)
{
struct keyring_read_iterator_context ctx;
- unsigned long nr_keys;
- int ret;
+ long ret;
kenter("{%d},,%zu", key_serial(keyring), buflen);
if (buflen & (sizeof(key_serial_t) - 1))
return -EINVAL;
- nr_keys = keyring->keys.nr_leaves_on_tree;
- if (nr_keys == 0)
- return 0;
-
- /* Calculate how much data we could return */
- ctx.qty = nr_keys * sizeof(key_serial_t);
-
- if (!buffer || !buflen)
- return ctx.qty;
-
- if (buflen > ctx.qty)
- ctx.qty = buflen;
-
- /* Copy the IDs of the subscribed keys into the buffer */
- ctx.buffer = (key_serial_t __user *)buffer;
- ctx.count = 0;
- ret = assoc_array_iterate(&keyring->keys, keyring_read_iterator, &ctx);
- if (ret < 0) {
- kleave(" = %d [iterate]", ret);
- return ret;
+ /* Copy as many key IDs as fit into the buffer */
+ if (buffer && buflen) {
+ ctx.buffer = (key_serial_t __user *)buffer;
+ ctx.buflen = buflen;
+ ctx.count = 0;
+ ret = assoc_array_iterate(&keyring->keys,
+ keyring_read_iterator, &ctx);
+ if (ret < 0) {
+ kleave(" = %ld [iterate]", ret);
+ return ret;
+ }
}
- kleave(" = %zu [ok]", ctx.count);
- return ctx.count;
+ /* Return the size of the buffer needed */
+ ret = keyring->keys.nr_leaves_on_tree * sizeof(key_serial_t);
+ if (ret <= buflen)
+ kleave("= %ld [ok]", ret);
+ else
+ kleave("= %ld [buffer too small]", ret);
+ return ret;
}
/*
@@ -550,7 +545,8 @@ static int keyring_search_iterator(const void *object, void *iterator_data)
{
struct keyring_search_context *ctx = iterator_data;
const struct key *key = keyring_ptr_to_key(object);
- unsigned long kflags = key->flags;
+ unsigned long kflags = READ_ONCE(key->flags);
+ short state = READ_ONCE(key->state);
kenter("{%d}", key->serial);
@@ -594,9 +590,8 @@ static int keyring_search_iterator(const void *object, void *iterator_data)
if (ctx->flags & KEYRING_SEARCH_DO_STATE_CHECK) {
/* we set a different error code if we pass a negative key */
- if (kflags & (1 << KEY_FLAG_NEGATIVE)) {
- smp_rmb();
- ctx->result = ERR_PTR(key->reject_error);
+ if (state < 0) {
+ ctx->result = ERR_PTR(state);
kleave(" = %d [neg]", ctx->skipped_ret);
goto skipped;
}
@@ -989,15 +984,15 @@ key_ref_t find_key_to_update(key_ref_t keyring_ref,
/*
* Find a keyring with the specified name.
*
- * All named keyrings in the current user namespace are searched, provided they
- * grant Search permission directly to the caller (unless this check is
- * skipped). Keyrings whose usage points have reached zero or who have been
- * revoked are skipped.
+ * Only keyrings that have nonzero refcount, are not revoked, and are owned by a
+ * user in the current user namespace are considered. If @uid_keyring is %true,
+ * the keyring additionally must have been allocated as a user or user session
+ * keyring; otherwise, it must grant Search permission directly to the caller.
*
* Returns a pointer to the keyring with the keyring's refcount having being
* incremented on success. -ENOKEY is returned if a key could not be found.
*/
-struct key *find_keyring_by_name(const char *name, bool skip_perm_check)
+struct key *find_keyring_by_name(const char *name, bool uid_keyring)
{
struct key *keyring;
int bucket;
@@ -1025,10 +1020,15 @@ struct key *find_keyring_by_name(const char *name, bool skip_perm_check)
if (strcmp(keyring->description, name) != 0)
continue;
- if (!skip_perm_check &&
- key_permission(make_key_ref(keyring, 0),
- KEY_NEED_SEARCH) < 0)
- continue;
+ if (uid_keyring) {
+ if (!test_bit(KEY_FLAG_UID_KEYRING,
+ &keyring->flags))
+ continue;
+ } else {
+ if (key_permission(make_key_ref(keyring, 0),
+ KEY_NEED_SEARCH) < 0)
+ continue;
+ }
/* we've got a match but we might end up racing with
* key_cleanup() if the keyring is currently 'dead'
diff --git a/security/keys/proc.c b/security/keys/proc.c
index b9f531c..0361286 100644
--- a/security/keys/proc.c
+++ b/security/keys/proc.c
@@ -182,6 +182,7 @@ static int proc_keys_show(struct seq_file *m, void *v)
unsigned long timo;
key_ref_t key_ref, skey_ref;
char xbuf[16];
+ short state;
int rc;
struct keyring_search_context ctx = {
@@ -240,17 +241,19 @@ static int proc_keys_show(struct seq_file *m, void *v)
sprintf(xbuf, "%luw", timo / (60*60*24*7));
}
+ state = key_read_state(key);
+
#define showflag(KEY, LETTER, FLAG) \
(test_bit(FLAG, &(KEY)->flags) ? LETTER : '-')
seq_printf(m, "%08x %c%c%c%c%c%c%c %5d %4s %08x %5d %5d %-9.9s ",
key->serial,
- showflag(key, 'I', KEY_FLAG_INSTANTIATED),
+ state != KEY_IS_UNINSTANTIATED ? 'I' : '-',
showflag(key, 'R', KEY_FLAG_REVOKED),
showflag(key, 'D', KEY_FLAG_DEAD),
showflag(key, 'Q', KEY_FLAG_IN_QUOTA),
showflag(key, 'U', KEY_FLAG_USER_CONSTRUCT),
- showflag(key, 'N', KEY_FLAG_NEGATIVE),
+ state < 0 ? 'N' : '-',
showflag(key, 'i', KEY_FLAG_INVALIDATED),
atomic_read(&key->usage),
xbuf,
diff --git a/security/keys/process_keys.c b/security/keys/process_keys.c
index 45536c6..2d35d71 100644
--- a/security/keys/process_keys.c
+++ b/security/keys/process_keys.c
@@ -76,7 +76,8 @@ int install_user_keyrings(void)
if (IS_ERR(uid_keyring)) {
uid_keyring = keyring_alloc(buf, user->uid, INVALID_GID,
cred, user_keyring_perm,
- KEY_ALLOC_IN_QUOTA,
+ KEY_ALLOC_UID_KEYRING |
+ KEY_ALLOC_IN_QUOTA,
NULL, NULL);
if (IS_ERR(uid_keyring)) {
ret = PTR_ERR(uid_keyring);
@@ -93,7 +94,8 @@ int install_user_keyrings(void)
session_keyring =
keyring_alloc(buf, user->uid, INVALID_GID,
cred, user_keyring_perm,
- KEY_ALLOC_IN_QUOTA,
+ KEY_ALLOC_UID_KEYRING |
+ KEY_ALLOC_IN_QUOTA,
NULL, NULL);
if (IS_ERR(session_keyring)) {
ret = PTR_ERR(session_keyring);
@@ -727,7 +729,7 @@ key_ref_t lookup_user_key(key_serial_t id, unsigned long lflags,
ret = -EIO;
if (!(lflags & KEY_LOOKUP_PARTIAL) &&
- !test_bit(KEY_FLAG_INSTANTIATED, &key->flags))
+ key_read_state(key) == KEY_IS_UNINSTANTIATED)
goto invalid_key;
/* check the permissions */
diff --git a/security/keys/request_key.c b/security/keys/request_key.c
index 43affcf..5030fcf 100644
--- a/security/keys/request_key.c
+++ b/security/keys/request_key.c
@@ -594,10 +594,9 @@ int wait_for_key_construction(struct key *key, bool intr)
intr ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
if (ret)
return -ERESTARTSYS;
- if (test_bit(KEY_FLAG_NEGATIVE, &key->flags)) {
- smp_rmb();
- return key->reject_error;
- }
+ ret = key_read_state(key);
+ if (ret < 0)
+ return ret;
return key_validate(key);
}
EXPORT_SYMBOL(wait_for_key_construction);
diff --git a/security/keys/request_key_auth.c b/security/keys/request_key_auth.c
index 9db8b4a..ba74a0b 100644
--- a/security/keys/request_key_auth.c
+++ b/security/keys/request_key_auth.c
@@ -73,7 +73,7 @@ static void request_key_auth_describe(const struct key *key,
seq_puts(m, "key:");
seq_puts(m, key->description);
- if (key_is_instantiated(key))
+ if (key_is_positive(key))
seq_printf(m, " pid:%d ci:%zu", rka->pid, rka->callout_len);
}
diff --git a/security/keys/trusted.c b/security/keys/trusted.c
index 90d6175..4ba2f6b 100644
--- a/security/keys/trusted.c
+++ b/security/keys/trusted.c
@@ -70,7 +70,7 @@ static int TSS_sha1(const unsigned char *data, unsigned int datalen,
}
ret = crypto_shash_digest(&sdesc->shash, data, datalen, digest);
- kfree(sdesc);
+ kzfree(sdesc);
return ret;
}
@@ -114,7 +114,7 @@ static int TSS_rawhmac(unsigned char *digest, const unsigned char *key,
if (!ret)
ret = crypto_shash_final(&sdesc->shash, digest);
out:
- kfree(sdesc);
+ kzfree(sdesc);
return ret;
}
@@ -165,7 +165,7 @@ static int TSS_authhmac(unsigned char *digest, const unsigned char *key,
paramdigest, TPM_NONCE_SIZE, h1,
TPM_NONCE_SIZE, h2, 1, &c, 0, 0);
out:
- kfree(sdesc);
+ kzfree(sdesc);
return ret;
}
@@ -246,7 +246,7 @@ static int TSS_checkhmac1(unsigned char *buffer,
if (memcmp(testhmac, authdata, SHA1_DIGEST_SIZE))
ret = -EINVAL;
out:
- kfree(sdesc);
+ kzfree(sdesc);
return ret;
}
@@ -347,7 +347,7 @@ static int TSS_checkhmac2(unsigned char *buffer,
if (memcmp(testhmac2, authdata2, SHA1_DIGEST_SIZE))
ret = -EINVAL;
out:
- kfree(sdesc);
+ kzfree(sdesc);
return ret;
}
@@ -564,7 +564,7 @@ static int tpm_seal(struct tpm_buf *tb, uint16_t keytype,
*bloblen = storedsize;
}
out:
- kfree(td);
+ kzfree(td);
return ret;
}
@@ -678,7 +678,7 @@ static int key_seal(struct trusted_key_payload *p,
if (ret < 0)
pr_info("trusted_key: srkseal failed (%d)\n", ret);
- kfree(tb);
+ kzfree(tb);
return ret;
}
@@ -703,7 +703,7 @@ static int key_unseal(struct trusted_key_payload *p,
/* pull migratable flag out of sealed key */
p->migratable = p->key[--p->key_len];
- kfree(tb);
+ kzfree(tb);
return ret;
}
@@ -1037,12 +1037,12 @@ static int trusted_instantiate(struct key *key,
if (!ret && options->pcrlock)
ret = pcrlock(options->pcrlock);
out:
- kfree(datablob);
- kfree(options);
+ kzfree(datablob);
+ kzfree(options);
if (!ret)
rcu_assign_keypointer(key, payload);
else
- kfree(payload);
+ kzfree(payload);
return ret;
}
@@ -1051,8 +1051,7 @@ static void trusted_rcu_free(struct rcu_head *rcu)
struct trusted_key_payload *p;
p = container_of(rcu, struct trusted_key_payload, rcu);
- memset(p->key, 0, p->key_len);
- kfree(p);
+ kzfree(p);
}
/*
@@ -1067,7 +1066,7 @@ static int trusted_update(struct key *key, struct key_preparsed_payload *prep)
char *datablob;
int ret = 0;
- if (test_bit(KEY_FLAG_NEGATIVE, &key->flags))
+ if (key_is_negative(key))
return -ENOKEY;
p = key->payload.data[0];
if (!p->migratable)
@@ -1094,13 +1093,13 @@ static int trusted_update(struct key *key, struct key_preparsed_payload *prep)
ret = datablob_parse(datablob, new_p, new_o);
if (ret != Opt_update) {
ret = -EINVAL;
- kfree(new_p);
+ kzfree(new_p);
goto out;
}
if (!new_o->keyhandle) {
ret = -EINVAL;
- kfree(new_p);
+ kzfree(new_p);
goto out;
}
@@ -1114,22 +1113,22 @@ static int trusted_update(struct key *key, struct key_preparsed_payload *prep)
ret = key_seal(new_p, new_o);
if (ret < 0) {
pr_info("trusted_key: key_seal failed (%d)\n", ret);
- kfree(new_p);
+ kzfree(new_p);
goto out;
}
if (new_o->pcrlock) {
ret = pcrlock(new_o->pcrlock);
if (ret < 0) {
pr_info("trusted_key: pcrlock failed (%d)\n", ret);
- kfree(new_p);
+ kzfree(new_p);
goto out;
}
}
rcu_assign_keypointer(key, new_p);
call_rcu(&p->rcu, trusted_rcu_free);
out:
- kfree(datablob);
- kfree(new_o);
+ kzfree(datablob);
+ kzfree(new_o);
return ret;
}
@@ -1148,34 +1147,30 @@ static long trusted_read(const struct key *key, char __user *buffer,
p = rcu_dereference_key(key);
if (!p)
return -EINVAL;
- if (!buffer || buflen <= 0)
- return 2 * p->blob_len;
- ascii_buf = kmalloc(2 * p->blob_len, GFP_KERNEL);
- if (!ascii_buf)
- return -ENOMEM;
- bufp = ascii_buf;
- for (i = 0; i < p->blob_len; i++)
- bufp = hex_byte_pack(bufp, p->blob[i]);
- if ((copy_to_user(buffer, ascii_buf, 2 * p->blob_len)) != 0) {
- kfree(ascii_buf);
- return -EFAULT;
+ if (buffer && buflen >= 2 * p->blob_len) {
+ ascii_buf = kmalloc(2 * p->blob_len, GFP_KERNEL);
+ if (!ascii_buf)
+ return -ENOMEM;
+
+ bufp = ascii_buf;
+ for (i = 0; i < p->blob_len; i++)
+ bufp = hex_byte_pack(bufp, p->blob[i]);
+ if (copy_to_user(buffer, ascii_buf, 2 * p->blob_len) != 0) {
+ kzfree(ascii_buf);
+ return -EFAULT;
+ }
+ kzfree(ascii_buf);
}
- kfree(ascii_buf);
return 2 * p->blob_len;
}
/*
- * trusted_destroy - before freeing the key, clear the decrypted data
+ * trusted_destroy - clear and free the key's payload
*/
static void trusted_destroy(struct key *key)
{
- struct trusted_key_payload *p = key->payload.data[0];
-
- if (!p)
- return;
- memset(p->key, 0, p->key_len);
- kfree(key->payload.data[0]);
+ kzfree(key->payload.data[0]);
}
struct key_type key_type_trusted = {
diff --git a/security/keys/user_defined.c b/security/keys/user_defined.c
index 66b1840..3dc2607 100644
--- a/security/keys/user_defined.c
+++ b/security/keys/user_defined.c
@@ -106,7 +106,7 @@ int user_update(struct key *key, struct key_preparsed_payload *prep)
/* attach the new data, displacing the old */
key->expiry = prep->expiry;
- if (!test_bit(KEY_FLAG_NEGATIVE, &key->flags))
+ if (key_is_positive(key))
zap = rcu_dereference_key(key);
rcu_assign_keypointer(key, prep->payload.data[0]);
prep->payload.data[0] = NULL;
@@ -154,7 +154,7 @@ EXPORT_SYMBOL_GPL(user_destroy);
void user_describe(const struct key *key, struct seq_file *m)
{
seq_puts(m, key->description);
- if (key_is_instantiated(key))
+ if (key_is_positive(key))
seq_printf(m, ": %u", key->datalen);
}
diff --git a/security/pfe/Kconfig b/security/pfe/Kconfig
new file mode 100644
index 0000000..0cd9e81
--- /dev/null
+++ b/security/pfe/Kconfig
@@ -0,0 +1,28 @@
+menu "Qualcomm Technologies, Inc Per File Encryption security device drivers"
+ depends on ARCH_QCOM
+
+config PFT
+ bool "Per-File-Tagger driver"
+ depends on SECURITY
+ default n
+ help
+ This driver is used for tagging enterprise files.
+ It is part of the Per-File-Encryption (PFE) feature.
+ The driver is tagging files when created by
+ registered application.
+ Tagged files are encrypted using the dm-req-crypt driver.
+
+config PFK
+ bool "Per-File-Key driver"
+ depends on SECURITY
+ depends on SECURITY_SELINUX
+ default n
+ help
+ This driver is used for storing eCryptfs information
+ in file node.
+ This is part of eCryptfs hardware enhanced solution
+ provided by Qualcomm Technologies, Inc.
+ Information is used when file is encrypted later using
+ ICE or dm crypto engine
+
+endmenu
diff --git a/security/pfe/Makefile b/security/pfe/Makefile
new file mode 100644
index 0000000..242a216
--- /dev/null
+++ b/security/pfe/Makefile
@@ -0,0 +1,10 @@
+#
+# Makefile for the MSM specific security device drivers.
+#
+
+ccflags-y += -Isecurity/selinux -Isecurity/selinux/include
+ccflags-y += -Ifs/ext4
+ccflags-y += -Ifs/crypto
+
+obj-$(CONFIG_PFT) += pft.o
+obj-$(CONFIG_PFK) += pfk.o pfk_kc.o pfk_ice.o pfk_ext4.o
diff --git a/security/pfe/pfk.c b/security/pfe/pfk.c
new file mode 100644
index 0000000..615353e
--- /dev/null
+++ b/security/pfe/pfk.c
@@ -0,0 +1,483 @@
+/*
+ * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/*
+ * Per-File-Key (PFK).
+ *
+ * This driver is responsible for overall management of various
+ * Per File Encryption variants that work on top of or as part of different
+ * file systems.
+ *
+ * The driver has the following purpose :
+ * 1) Define priorities between PFE's if more than one is enabled
+ * 2) Extract key information from inode
+ * 3) Load and manage various keys in ICE HW engine
+ * 4) It should be invoked from various layers in FS/BLOCK/STORAGE DRIVER
+ * that need to take decision on HW encryption management of the data
+ * Some examples:
+ * BLOCK LAYER: when it takes decision on whether 2 chunks can be united
+ * to one encryption / decryption request sent to the HW
+ *
+ * UFS DRIVER: when it need to configure ICE HW with a particular key slot
+ * to be used for encryption / decryption
+ *
+ * PFE variants can differ on particular way of storing the cryptographic info
+ * inside inode, actions to be taken upon file operations, etc., but the common
+ * properties are described above
+ *
+ */
+
+
+/* Uncomment the line below to enable debug messages */
+/* #define DEBUG 1 */
+#define pr_fmt(fmt) "pfk [%s]: " fmt, __func__
+
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/errno.h>
+#include <linux/printk.h>
+#include <linux/bio.h>
+#include <linux/security.h>
+#include <crypto/ice.h>
+
+#include <linux/pfk.h>
+
+#include "pfk_kc.h"
+#include "objsec.h"
+#include "pfk_ice.h"
+#include "pfk_ext4.h"
+#include "pfk_internal.h"
+#include "ext4.h"
+
+static bool pfk_ready;
+
+
+/* might be replaced by a table when more than one cipher is supported */
+#define PFK_SUPPORTED_KEY_SIZE 32
+#define PFK_SUPPORTED_SALT_SIZE 32
+
+/* Various PFE types and function tables to support each one of them */
+enum pfe_type {EXT4_CRYPT_PFE, INVALID_PFE};
+
+typedef int (*pfk_parse_inode_type)(const struct bio *bio,
+ const struct inode *inode,
+ struct pfk_key_info *key_info,
+ enum ice_cryto_algo_mode *algo,
+ bool *is_pfe);
+
+typedef bool (*pfk_allow_merge_bio_type)(const struct bio *bio1,
+ const struct bio *bio2, const struct inode *inode1,
+ const struct inode *inode2);
+
+static const pfk_parse_inode_type pfk_parse_inode_ftable[] = {
+ /* EXT4_CRYPT_PFE */ &pfk_ext4_parse_inode,
+};
+
+static const pfk_allow_merge_bio_type pfk_allow_merge_bio_ftable[] = {
+ /* EXT4_CRYPT_PFE */ &pfk_ext4_allow_merge_bio,
+};
+
+static void __exit pfk_exit(void)
+{
+ pfk_ready = false;
+ pfk_ext4_deinit();
+ pfk_kc_deinit();
+}
+
+static int __init pfk_init(void)
+{
+
+ int ret = 0;
+
+ ret = pfk_ext4_init();
+ if (ret != 0)
+ goto fail;
+
+ ret = pfk_kc_init();
+ if (ret != 0) {
+ pr_err("could init pfk key cache, error %d\n", ret);
+ pfk_ext4_deinit();
+ goto fail;
+ }
+
+ pfk_ready = true;
+ pr_info("Driver initialized successfully\n");
+
+ return 0;
+
+fail:
+ pr_err("Failed to init driver\n");
+ return -ENODEV;
+}
+
+/*
+ * If more than one type is supported simultaneously, this function will also
+ * set the priority between them
+ */
+static enum pfe_type pfk_get_pfe_type(const struct inode *inode)
+{
+ if (!inode)
+ return INVALID_PFE;
+
+ if (pfk_is_ext4_type(inode))
+ return EXT4_CRYPT_PFE;
+
+ return INVALID_PFE;
+}
+
+/**
+ * inode_to_filename() - get the filename from inode pointer.
+ * @inode: inode pointer
+ *
+ * it is used for debug prints.
+ *
+ * Return: filename string or "unknown".
+ */
+char *inode_to_filename(const struct inode *inode)
+{
+ struct dentry *dentry = NULL;
+ char *filename = NULL;
+
+ if (hlist_empty(&inode->i_dentry))
+ return "unknown";
+
+ dentry = hlist_entry(inode->i_dentry.first, struct dentry, d_u.d_alias);
+ filename = dentry->d_iname;
+
+ return filename;
+}
+
+/**
+ * pfk_is_ready() - driver is initialized and ready.
+ *
+ * Return: true if the driver is ready.
+ */
+static inline bool pfk_is_ready(void)
+{
+ return pfk_ready;
+}
+
+/**
+ * pfk_bio_get_inode() - get the inode from a bio.
+ * @bio: Pointer to BIO structure.
+ *
+ * Walk the bio struct links to get the inode.
+ * Please note, that in general bio may consist of several pages from
+ * several files, but in our case we always assume that all pages come
+ * from the same file, since our logic ensures it. That is why we only
+ * walk through the first page to look for inode.
+ *
+ * Return: pointer to the inode struct if successful, or NULL otherwise.
+ *
+ */
+static struct inode *pfk_bio_get_inode(const struct bio *bio)
+{
+ struct address_space *mapping;
+
+ if (!bio)
+ return NULL;
+ if (!bio->bi_io_vec)
+ return NULL;
+ if (!bio->bi_io_vec->bv_page)
+ return NULL;
+ if (!bio_has_data((struct bio *)bio))
+ return NULL;
+
+ if (PageAnon(bio->bi_io_vec->bv_page)) {
+ struct inode *inode;
+
+ //Using direct-io (O_DIRECT) without page cache
+ inode = dio_bio_get_inode((struct bio *)bio);
+ pr_debug("inode on direct-io, inode = 0x%pK.\n", inode);
+
+ return inode;
+ }
+
+ mapping = page_mapping(bio->bi_io_vec->bv_page);
+ if (!mapping)
+ return NULL;
+
+ if (!mapping->host)
+ return NULL;
+
+ return bio->bi_io_vec->bv_page->mapping->host;
+}
+
+/**
+ * pfk_key_size_to_key_type() - translate key size to key size enum
+ * @key_size: key size in bytes
+ * @key_size_type: pointer to store the output enum (can be null)
+ *
+ * return 0 in case of success, error otherwise (i.e not supported key size)
+ */
+int pfk_key_size_to_key_type(size_t key_size,
+ enum ice_crpto_key_size *key_size_type)
+{
+ /*
+ * currently only 32 bit key size is supported
+ * in the future, table with supported key sizes might
+ * be introduced
+ */
+
+ if (key_size != PFK_SUPPORTED_KEY_SIZE) {
+ pr_err("not supported key size %zu\n", key_size);
+ return -EINVAL;
+ }
+
+ if (key_size_type)
+ *key_size_type = ICE_CRYPTO_KEY_SIZE_256;
+
+ return 0;
+}
+
+/*
+ * Retrieves filesystem type from inode's superblock
+ */
+bool pfe_is_inode_filesystem_type(const struct inode *inode,
+ const char *fs_type)
+{
+ if (!inode || !fs_type)
+ return false;
+
+ if (!inode->i_sb)
+ return false;
+
+ if (!inode->i_sb->s_type)
+ return false;
+
+ return (strcmp(inode->i_sb->s_type->name, fs_type) == 0);
+}
+
+
+/**
+ * pfk_load_key_start() - loads PFE encryption key to the ICE
+ * Can also be invoked from non
+ * PFE context, in this case it
+ * is not relevant and is_pfe
+ * flag is set to false
+ *
+ * @bio: Pointer to the BIO structure
+ * @ice_setting: Pointer to ice setting structure that will be filled with
+ * ice configuration values, including the index to which the key was loaded
+ * @is_pfe: will be false if inode is not relevant to PFE, in such a case
+ * it should be treated as non PFE by the block layer
+ *
+ * Returns the index where the key is stored in encryption hw and additional
+ * information that will be used later for configuration of the encryption hw.
+ *
+ * Must be followed by pfk_load_key_end when key is no longer used by ice
+ *
+ */
+int pfk_load_key_start(const struct bio *bio,
+ struct ice_crypto_setting *ice_setting, bool *is_pfe,
+ bool async)
+{
+ int ret = 0;
+ struct pfk_key_info key_info = {NULL, NULL, 0, 0};
+ enum ice_cryto_algo_mode algo_mode = ICE_CRYPTO_ALGO_MODE_AES_XTS;
+ enum ice_crpto_key_size key_size_type = 0;
+ u32 key_index = 0;
+ struct inode *inode = NULL;
+ enum pfe_type which_pfe = INVALID_PFE;
+
+ if (!is_pfe) {
+ pr_err("is_pfe is NULL\n");
+ return -EINVAL;
+ }
+
+ /*
+ * only a few errors below can indicate that
+ * this function was not invoked within PFE context,
+ * otherwise we will consider it PFE
+ */
+ *is_pfe = true;
+
+ if (!pfk_is_ready())
+ return -ENODEV;
+
+ if (!ice_setting) {
+ pr_err("ice setting is NULL\n");
+ return -EINVAL;
+ }
+//pr_err("%s %d\n", __func__, __LINE__);
+ inode = pfk_bio_get_inode(bio);
+ if (!inode) {
+ *is_pfe = false;
+ return -EINVAL;
+ }
+ //pr_err("%s %d\n", __func__, __LINE__);
+ which_pfe = pfk_get_pfe_type(inode);
+ if (which_pfe == INVALID_PFE) {
+ *is_pfe = false;
+ return -EPERM;
+ }
+
+ pr_debug("parsing file %s with PFE %d\n",
+ inode_to_filename(inode), which_pfe);
+//pr_err("%s %d\n", __func__, __LINE__);
+ ret = (*(pfk_parse_inode_ftable[which_pfe]))
+ (bio, inode, &key_info, &algo_mode, is_pfe);
+ if (ret != 0)
+ return ret;
+//pr_err("%s %d\n", __func__, __LINE__);
+ ret = pfk_key_size_to_key_type(key_info.key_size, &key_size_type);
+ if (ret != 0)
+ return ret;
+//pr_err("%s %d\n", __func__, __LINE__);
+ ret = pfk_kc_load_key_start(key_info.key, key_info.key_size,
+ key_info.salt, key_info.salt_size, &key_index, async);
+ if (ret) {
+ if (ret != -EBUSY && ret != -EAGAIN)
+ pr_err("start: could not load key into pfk key cache, error %d\n",
+ ret);
+
+ return ret;
+ }
+
+ ice_setting->key_size = key_size_type;
+ ice_setting->algo_mode = algo_mode;
+ /* hardcoded for now */
+ ice_setting->key_mode = ICE_CRYPTO_USE_LUT_SW_KEY;
+ ice_setting->key_index = key_index;
+
+ pr_debug("loaded key for file %s key_index %d\n",
+ inode_to_filename(inode), key_index);
+
+ return 0;
+}
+
+/**
+ * pfk_load_key_end() - marks the PFE key as no longer used by ICE
+ * Can also be invoked from non
+ * PFE context, in this case it is not
+ * relevant and is_pfe flag is
+ * set to false
+ *
+ * @bio: Pointer to the BIO structure
+ * @is_pfe: Pointer to is_pfe flag, which will be true if function was invoked
+ * from PFE context
+ */
+int pfk_load_key_end(const struct bio *bio, bool *is_pfe)
+{
+ int ret = 0;
+ struct pfk_key_info key_info = {0};
+ enum pfe_type which_pfe = INVALID_PFE;
+ struct inode *inode = NULL;
+
+ if (!is_pfe) {
+ pr_err("is_pfe is NULL\n");
+ return -EINVAL;
+ }
+
+ /* only a few errors below can indicate that
+ * this function was not invoked within PFE context,
+ * otherwise we will consider it PFE
+ */
+ *is_pfe = true;
+
+ if (!pfk_is_ready())
+ return -ENODEV;
+
+ inode = pfk_bio_get_inode(bio);
+ if (!inode) {
+ *is_pfe = false;
+ return -EINVAL;
+ }
+
+ which_pfe = pfk_get_pfe_type(inode);
+ if (which_pfe == INVALID_PFE) {
+ *is_pfe = false;
+ return -EPERM;
+ }
+
+ ret = (*(pfk_parse_inode_ftable[which_pfe]))
+ (bio, inode, &key_info, NULL, is_pfe);
+ if (ret != 0)
+ return ret;
+
+ pfk_kc_load_key_end(key_info.key, key_info.key_size,
+ key_info.salt, key_info.salt_size);
+
+ pr_debug("finished using key for file %s\n",
+ inode_to_filename(inode));
+
+ return 0;
+}
+
+/**
+ * pfk_allow_merge_bio() - Check if 2 BIOs can be merged.
+ * @bio1: Pointer to first BIO structure.
+ * @bio2: Pointer to second BIO structure.
+ *
+ * Prevent merging of BIOs from encrypted and non-encrypted
+ * files, or files encrypted with different key.
+ * Also prevent non encrypted and encrypted data from the same file
+ * to be merged (ecryptfs header if stored inside file should be non
+ * encrypted)
+ * This API is called by the file system block layer.
+ *
+ * Return: true if the BIOs allowed to be merged, false
+ * otherwise.
+ */
+bool pfk_allow_merge_bio(const struct bio *bio1, const struct bio *bio2)
+{
+ struct inode *inode1 = NULL;
+ struct inode *inode2 = NULL;
+ enum pfe_type which_pfe1 = INVALID_PFE;
+ enum pfe_type which_pfe2 = INVALID_PFE;
+
+ if (!pfk_is_ready())
+ return false;
+
+ if (!bio1 || !bio2)
+ return false;
+
+ if (bio1 == bio2)
+ return true;
+
+ inode1 = pfk_bio_get_inode(bio1);
+ inode2 = pfk_bio_get_inode(bio2);
+
+
+ which_pfe1 = pfk_get_pfe_type(inode1);
+ which_pfe2 = pfk_get_pfe_type(inode2);
+
+ /* nodes with different encryption, do not merge */
+ if (which_pfe1 != which_pfe2)
+ return false;
+
+ /* both nodes do not have encryption, allow merge */
+ if (which_pfe1 == INVALID_PFE)
+ return true;
+
+ return (*(pfk_allow_merge_bio_ftable[which_pfe1]))(bio1, bio2,
+ inode1, inode2);
+}
+/**
+ * Flush key table on storage core reset. During core reset key configuration
+ * is lost in ICE. We need to flash the cache, so that the keys will be
+ * reconfigured again for every subsequent transaction
+ */
+void pfk_clear_on_reset(void)
+{
+ if (!pfk_is_ready())
+ return;
+
+ pfk_kc_clear_on_reset();
+}
+
+module_init(pfk_init);
+module_exit(pfk_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Per-File-Key driver");
diff --git a/security/pfe/pfk_ext4.c b/security/pfe/pfk_ext4.c
new file mode 100644
index 0000000..7ce70bc
--- /dev/null
+++ b/security/pfe/pfk_ext4.c
@@ -0,0 +1,212 @@
+/*
+ * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/*
+ * Per-File-Key (PFK) - EXT4
+ *
+ * This driver is used for working with EXT4 crypt extension
+ *
+ * The key information is stored in node by EXT4 when file is first opened
+ * and will be later accessed by Block Device Driver to actually load the key
+ * to encryption hw.
+ *
+ * PFK exposes API's for loading and removing keys from encryption hw
+ * and also API to determine whether 2 adjacent blocks can be agregated by
+ * Block Layer in one request to encryption hw.
+ *
+ */
+
+
+/* Uncomment the line below to enable debug messages */
+/* #define DEBUG 1 */
+#define pr_fmt(fmt) "pfk_ext4 [%s]: " fmt, __func__
+
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/errno.h>
+#include <linux/printk.h>
+
+#include "ext4_ice.h"
+#include "pfk_ext4.h"
+
+static bool pfk_ext4_ready;
+
+/*
+ * pfk_ext4_deinit() - Deinit function, should be invoked by upper PFK layer
+ */
+void pfk_ext4_deinit(void)
+{
+ pfk_ext4_ready = false;
+}
+
+/*
+ * pfk_ecryptfs_init() - Init function, should be invoked by upper PFK layer
+ */
+int __init pfk_ext4_init(void)
+{
+ pfk_ext4_ready = true;
+ pr_info("PFK EXT4 inited successfully\n");
+
+ return 0;
+}
+
+/**
+ * pfk_ecryptfs_is_ready() - driver is initialized and ready.
+ *
+ * Return: true if the driver is ready.
+ */
+static inline bool pfk_ext4_is_ready(void)
+{
+ return pfk_ext4_ready;
+}
+
+/**
+ * pfk_ext4_dump_inode() - dumps all interesting info about inode to the screen
+ *
+ *
+ */
+/*
+ * static void pfk_ext4_dump_inode(const struct inode* inode)
+ * {
+ * struct ext4_crypt_info *ci = ext4_encryption_info((struct inode*)inode);
+ *
+ * pr_debug("dumping inode with address 0x%p\n", inode);
+ * pr_debug("S_ISREG is %d\n", S_ISREG(inode->i_mode));
+ * pr_debug("EXT4_INODE_ENCRYPT flag is %d\n",
+ * ext4_test_inode_flag((struct inode*)inode, EXT4_INODE_ENCRYPT));
+ * if (ci) {
+ * pr_debug("crypt_info address 0x%p\n", ci);
+ * pr_debug("ci->ci_data_mode %d\n", ci->ci_data_mode);
+ * } else {
+ * pr_debug("crypt_info is NULL\n");
+ * }
+ * }
+ */
+
+/**
+ * pfk_is_ext4_type() - return true if inode belongs to ICE EXT4 PFE
+ * @inode: inode pointer
+ */
+bool pfk_is_ext4_type(const struct inode *inode)
+{
+ if (!pfe_is_inode_filesystem_type(inode, "ext4"))
+ return false;
+
+ return ext4_should_be_processed_by_ice(inode);
+}
+
+/**
+ * pfk_ext4_parse_cipher() - parse cipher from inode to enum
+ * @inode: inode
+ * @algo: pointer to store the output enum (can be null)
+ *
+ * return 0 in case of success, error otherwise (i.e not supported cipher)
+ */
+static int pfk_ext4_parse_cipher(const struct inode *inode,
+ enum ice_cryto_algo_mode *algo)
+{
+ /*
+ * currently only AES XTS algo is supported
+ * in the future, table with supported ciphers might
+ * be introduced
+ */
+
+ if (!inode)
+ return -EINVAL;
+
+ if (!ext4_is_aes_xts_cipher(inode)) {
+ pr_err("ext4 alghoritm is not supported by pfk\n");
+ return -EINVAL;
+ }
+
+ if (algo)
+ *algo = ICE_CRYPTO_ALGO_MODE_AES_XTS;
+
+ return 0;
+}
+
+
+int pfk_ext4_parse_inode(const struct bio *bio,
+ const struct inode *inode,
+ struct pfk_key_info *key_info,
+ enum ice_cryto_algo_mode *algo,
+ bool *is_pfe)
+{
+ int ret = 0;
+
+ if (!is_pfe)
+ return -EINVAL;
+
+ /*
+ * only a few errors below can indicate that
+ * this function was not invoked within PFE context,
+ * otherwise we will consider it PFE
+ */
+ *is_pfe = true;
+
+ if (!pfk_ext4_is_ready())
+ return -ENODEV;
+
+ if (!inode)
+ return -EINVAL;
+
+ if (!key_info)
+ return -EINVAL;
+
+ key_info->key = ext4_get_ice_encryption_key(inode);
+ if (!key_info->key) {
+ pr_err("could not parse key from ext4\n");
+ return -EINVAL;
+ }
+
+ key_info->key_size = ext4_get_ice_encryption_key_size(inode);
+ if (!key_info->key_size) {
+ pr_err("could not parse key size from ext4\n");
+ return -EINVAL;
+ }
+
+ key_info->salt = ext4_get_ice_encryption_salt(inode);
+ if (!key_info->salt) {
+ pr_err("could not parse salt from ext4\n");
+ return -EINVAL;
+ }
+
+ key_info->salt_size = ext4_get_ice_encryption_salt_size(inode);
+ if (!key_info->salt_size) {
+ pr_err("could not parse salt size from ext4\n");
+ return -EINVAL;
+ }
+
+ ret = pfk_ext4_parse_cipher(inode, algo);
+ if (ret != 0) {
+ pr_err("not supported cipher\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+bool pfk_ext4_allow_merge_bio(const struct bio *bio1,
+ const struct bio *bio2, const struct inode *inode1,
+ const struct inode *inode2)
+{
+ /* if there is no ext4 pfk, don't disallow merging blocks */
+ if (!pfk_ext4_is_ready())
+ return true;
+
+ if (!inode1 || !inode2)
+ return false;
+
+ return ext4_is_ice_encryption_info_equal(inode1, inode2);
+}
+
diff --git a/security/pfe/pfk_ext4.h b/security/pfe/pfk_ext4.h
new file mode 100644
index 0000000..1f33632
--- /dev/null
+++ b/security/pfe/pfk_ext4.h
@@ -0,0 +1,37 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _PFK_EXT4_H_
+#define _PFK_EXT4_H_
+
+#include <linux/types.h>
+#include <linux/fs.h>
+#include <crypto/ice.h>
+#include "pfk_internal.h"
+
+bool pfk_is_ext4_type(const struct inode *inode);
+
+int pfk_ext4_parse_inode(const struct bio *bio,
+ const struct inode *inode,
+ struct pfk_key_info *key_info,
+ enum ice_cryto_algo_mode *algo,
+ bool *is_pfe);
+
+bool pfk_ext4_allow_merge_bio(const struct bio *bio1,
+ const struct bio *bio2, const struct inode *inode1,
+ const struct inode *inode2);
+
+int __init pfk_ext4_init(void);
+
+void pfk_ext4_deinit(void);
+
+#endif /* _PFK_EXT4_H_ */
diff --git a/security/pfe/pfk_ice.c b/security/pfe/pfk_ice.c
new file mode 100644
index 0000000..f0bbf9c
--- /dev/null
+++ b/security/pfe/pfk_ice.c
@@ -0,0 +1,188 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/async.h>
+#include <linux/mm.h>
+#include <linux/of.h>
+#include <soc/qcom/scm.h>
+#include <linux/device-mapper.h>
+#include <soc/qcom/qseecomi.h>
+#include <crypto/ice.h>
+#include "pfk_ice.h"
+
+
+/**********************************/
+/** global definitions **/
+/**********************************/
+
+#define TZ_ES_SET_ICE_KEY 0x2
+#define TZ_ES_INVALIDATE_ICE_KEY 0x3
+
+/* index 0 and 1 is reserved for FDE */
+#define MIN_ICE_KEY_INDEX 2
+
+#define MAX_ICE_KEY_INDEX 31
+
+
+#define TZ_ES_SET_ICE_KEY_ID \
+ TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_SIP, TZ_SVC_ES, TZ_ES_SET_ICE_KEY)
+
+
+#define TZ_ES_INVALIDATE_ICE_KEY_ID \
+ TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_SIP, \
+ TZ_SVC_ES, TZ_ES_INVALIDATE_ICE_KEY)
+
+
+#define TZ_ES_SET_ICE_KEY_PARAM_ID \
+ TZ_SYSCALL_CREATE_PARAM_ID_5( \
+ TZ_SYSCALL_PARAM_TYPE_VAL, \
+ TZ_SYSCALL_PARAM_TYPE_BUF_RW, TZ_SYSCALL_PARAM_TYPE_VAL, \
+ TZ_SYSCALL_PARAM_TYPE_BUF_RW, TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_ES_INVALIDATE_ICE_KEY_PARAM_ID \
+ TZ_SYSCALL_CREATE_PARAM_ID_1( \
+ TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define ICE_KEY_SIZE 32
+#define ICE_SALT_SIZE 32
+
+static uint8_t ice_key[ICE_KEY_SIZE];
+static uint8_t ice_salt[ICE_KEY_SIZE];
+
+int qti_pfk_ice_set_key(uint32_t index, uint8_t *key, uint8_t *salt,
+ char *storage_type)
+{
+ struct scm_desc desc = {0};
+ int ret, ret1;
+ char *tzbuf_key = (char *)ice_key;
+ char *tzbuf_salt = (char *)ice_salt;
+ char *s_type = storage_type;
+
+ uint32_t smc_id = 0;
+ u32 tzbuflen_key = sizeof(ice_key);
+ u32 tzbuflen_salt = sizeof(ice_salt);
+
+ if (index < MIN_ICE_KEY_INDEX || index > MAX_ICE_KEY_INDEX) {
+ pr_err("%s Invalid index %d\n", __func__, index);
+ return -EINVAL;
+ }
+ if (!key || !salt) {
+ pr_err("%s Invalid key/salt\n", __func__);
+ return -EINVAL;
+ }
+
+ if (!tzbuf_key || !tzbuf_salt) {
+ pr_err("%s No Memory\n", __func__);
+ return -ENOMEM;
+ }
+
+ if (s_type == NULL) {
+ pr_err("%s Invalid Storage type\n", __func__);
+ return -EINVAL;
+ }
+
+ memset(tzbuf_key, 0, tzbuflen_key);
+ memset(tzbuf_salt, 0, tzbuflen_salt);
+
+ memcpy(ice_key, key, tzbuflen_key);
+ memcpy(ice_salt, salt, tzbuflen_salt);
+
+ dmac_flush_range(tzbuf_key, tzbuf_key + tzbuflen_key);
+ dmac_flush_range(tzbuf_salt, tzbuf_salt + tzbuflen_salt);
+
+ smc_id = TZ_ES_SET_ICE_KEY_ID;
+
+ desc.arginfo = TZ_ES_SET_ICE_KEY_PARAM_ID;
+ desc.args[0] = index;
+ desc.args[1] = virt_to_phys(tzbuf_key);
+ desc.args[2] = tzbuflen_key;
+ desc.args[3] = virt_to_phys(tzbuf_salt);
+ desc.args[4] = tzbuflen_salt;
+
+ ret = qcom_ice_setup_ice_hw((const char *)s_type, true);
+
+ if (ret) {
+ pr_err("%s: could not enable clocks: %d\n", __func__, ret);
+ goto out;
+ }
+
+ ret = scm_call2(smc_id, &desc);
+
+ if (ret) {
+ pr_err("%s: Set Key Error: %d\n", __func__, ret);
+ if (ret == -EBUSY) {
+ if (qcom_ice_setup_ice_hw((const char *)s_type, false))
+ pr_err("%s: clock disable failed\n", __func__);
+ goto out;
+ }
+ /*Try to invalidate the key to keep ICE in proper state*/
+ smc_id = TZ_ES_INVALIDATE_ICE_KEY_ID;
+ desc.arginfo = TZ_ES_INVALIDATE_ICE_KEY_PARAM_ID;
+ desc.args[0] = index;
+ ret1 = scm_call2(smc_id, &desc);
+ if (ret1)
+ pr_err("%s: Invalidate Key Error: %d\n", __func__,
+ ret1);
+ }
+ ret = qcom_ice_setup_ice_hw((const char *)s_type, false);
+
+out:
+ return ret;
+}
+
+int qti_pfk_ice_invalidate_key(uint32_t index, char *storage_type)
+{
+ struct scm_desc desc = {0};
+ int ret;
+
+ uint32_t smc_id = 0;
+
+ if (index < MIN_ICE_KEY_INDEX || index > MAX_ICE_KEY_INDEX) {
+ pr_err("%s Invalid index %d\n", __func__, index);
+ return -EINVAL;
+ }
+
+ if (storage_type == NULL) {
+ pr_err("%s Invalid Storage type\n", __func__);
+ return -EINVAL;
+ }
+
+ smc_id = TZ_ES_INVALIDATE_ICE_KEY_ID;
+
+ desc.arginfo = TZ_ES_INVALIDATE_ICE_KEY_PARAM_ID;
+ desc.args[0] = index;
+
+ ret = qcom_ice_setup_ice_hw((const char *)storage_type, true);
+
+ if (ret) {
+ pr_err("%s: could not enable clocks: 0x%x\n", __func__, ret);
+ return ret;
+ }
+
+ ret = scm_call2(smc_id, &desc);
+
+ if (ret) {
+ pr_err("%s: Error: 0x%x\n", __func__, ret);
+ if (qcom_ice_setup_ice_hw((const char *)storage_type, false))
+ pr_err("%s: could not disable clocks\n", __func__);
+ } else {
+ ret = qcom_ice_setup_ice_hw((const char *)storage_type, false);
+ }
+
+ return ret;
+}
diff --git a/security/pfe/pfk_ice.h b/security/pfe/pfk_ice.h
new file mode 100644
index 0000000..fb7c0d1
--- /dev/null
+++ b/security/pfe/pfk_ice.h
@@ -0,0 +1,33 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef PFK_ICE_H_
+#define PFK_ICE_H_
+
+/*
+ * PFK ICE
+ *
+ * ICE keys configuration through scm calls.
+ *
+ */
+
+#include <linux/types.h>
+
+int pfk_ice_init(void);
+int pfk_ice_deinit(void);
+
+int qti_pfk_ice_set_key(uint32_t index, uint8_t *key, uint8_t *salt,
+ char *storage_type);
+int qti_pfk_ice_invalidate_key(uint32_t index, char *storage_type);
+
+
+#endif /* PFK_ICE_H_ */
diff --git a/security/pfe/pfk_internal.h b/security/pfe/pfk_internal.h
new file mode 100644
index 0000000..86526fa
--- /dev/null
+++ b/security/pfe/pfk_internal.h
@@ -0,0 +1,34 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _PFK_INTERNAL_H_
+#define _PFK_INTERNAL_H_
+
+#include <linux/types.h>
+#include <crypto/ice.h>
+
+struct pfk_key_info {
+ const unsigned char *key;
+ const unsigned char *salt;
+ size_t key_size;
+ size_t salt_size;
+};
+
+int pfk_key_size_to_key_type(size_t key_size,
+ enum ice_crpto_key_size *key_size_type);
+
+bool pfe_is_inode_filesystem_type(const struct inode *inode,
+ const char *fs_type);
+
+char *inode_to_filename(const struct inode *inode);
+
+#endif /* _PFK_INTERNAL_H_ */
diff --git a/security/pfe/pfk_kc.c b/security/pfe/pfk_kc.c
new file mode 100644
index 0000000..da71f80
--- /dev/null
+++ b/security/pfe/pfk_kc.c
@@ -0,0 +1,905 @@
+/*
+ * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/*
+ * PFK Key Cache
+ *
+ * Key Cache used internally in PFK.
+ * The purpose of the cache is to save access time to QSEE when loading keys.
+ * Currently the cache is the same size as the total number of keys that can
+ * be loaded to ICE. Since this number is relatively small, the algorithms for
+ * cache eviction are simple, linear and based on last usage timestamp, i.e
+ * the node that will be evicted is the one with the oldest timestamp.
+ * Empty entries always have the oldest timestamp.
+ */
+
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <crypto/ice.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/jiffies.h>
+#include <linux/slab.h>
+#include <linux/printk.h>
+#include <linux/sched.h>
+
+#include "pfk_kc.h"
+#include "pfk_ice.h"
+
+
+/** the first available index in ice engine */
+#define PFK_KC_STARTING_INDEX 2
+
+/** currently the only supported key and salt sizes */
+#define PFK_KC_KEY_SIZE 32
+#define PFK_KC_SALT_SIZE 32
+
+/** Table size */
+/* TODO replace by some constant from ice.h */
+#define PFK_KC_TABLE_SIZE ((32) - (PFK_KC_STARTING_INDEX))
+
+/** The maximum key and salt size */
+#define PFK_MAX_KEY_SIZE PFK_KC_KEY_SIZE
+#define PFK_MAX_SALT_SIZE PFK_KC_SALT_SIZE
+#define PFK_UFS "ufs"
+
+static DEFINE_SPINLOCK(kc_lock);
+static unsigned long flags;
+static bool kc_ready;
+static char *s_type = "sdcc";
+
+/**
+ * enum pfk_kc_entry_state - state of the entry inside kc table
+ *
+ * @FREE: entry is free
+ * @ACTIVE_ICE_PRELOAD: entry is actively used by ICE engine
+ and cannot be used by others. SCM call
+ to load key to ICE is pending to be performed
+ * @ACTIVE_ICE_LOADED: entry is actively used by ICE engine and
+ cannot be used by others. SCM call to load the
+ key to ICE was successfully executed and key is
+ now loaded
+ * @INACTIVE_INVALIDATING: entry is being invalidated during file close
+ and cannot be used by others until invalidation
+ is complete
+ * @INACTIVE: entry's key is already loaded, but is not
+ currently being used. It can be re-used for
+ optimization and to avoid SCM call cost or
+ it can be taken by another key if there are
+ no FREE entries
+ * @SCM_ERROR: error occurred while scm call was performed to
+ load the key to ICE
+ */
+enum pfk_kc_entry_state {
+ FREE,
+ ACTIVE_ICE_PRELOAD,
+ ACTIVE_ICE_LOADED,
+ INACTIVE_INVALIDATING,
+ INACTIVE,
+ SCM_ERROR
+};
+
+struct kc_entry {
+ unsigned char key[PFK_MAX_KEY_SIZE];
+ size_t key_size;
+
+ unsigned char salt[PFK_MAX_SALT_SIZE];
+ size_t salt_size;
+
+ u64 time_stamp;
+ u32 key_index;
+
+ struct task_struct *thread_pending;
+
+ enum pfk_kc_entry_state state;
+
+ /* ref count for the number of requests in the HW queue for this key */
+ int loaded_ref_cnt;
+ int scm_error;
+};
+
+static struct kc_entry kc_table[PFK_KC_TABLE_SIZE];
+
+/**
+ * kc_is_ready() - driver is initialized and ready.
+ *
+ * Return: true if the key cache is ready.
+ */
+static inline bool kc_is_ready(void)
+{
+ return kc_ready;
+}
+
+static inline void kc_spin_lock(void)
+{
+ spin_lock_irqsave(&kc_lock, flags);
+}
+
+static inline void kc_spin_unlock(void)
+{
+ spin_unlock_irqrestore(&kc_lock, flags);
+}
+
+/**
+ * kc_entry_is_available() - checks whether the entry is available
+ *
+ * Return true if it is , false otherwise or if invalid
+ * Should be invoked under spinlock
+ */
+static bool kc_entry_is_available(const struct kc_entry *entry)
+{
+ if (!entry)
+ return false;
+
+ return (entry->state == FREE || entry->state == INACTIVE);
+}
+
+/**
+ * kc_entry_wait_till_available() - waits till entry is available
+ *
+ * Returns 0 in case of success or -ERESTARTSYS if the wait was interrupted
+ * by signal
+ *
+ * Should be invoked under spinlock
+ */
+static int kc_entry_wait_till_available(struct kc_entry *entry)
+{
+ int res = 0;
+
+ while (!kc_entry_is_available(entry)) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ if (signal_pending(current)) {
+ res = -ERESTARTSYS;
+ break;
+ }
+ /* assuming only one thread can try to invalidate
+ * the same entry
+ */
+ entry->thread_pending = current;
+ kc_spin_unlock();
+ schedule();
+ kc_spin_lock();
+ }
+ set_current_state(TASK_RUNNING);
+
+ return res;
+}
+
+/**
+ * kc_entry_start_invalidating() - moves entry to state
+ * INACTIVE_INVALIDATING
+ * If entry is in use, waits till
+ * it gets available
+ * @entry: pointer to entry
+ *
+ * Return 0 in case of success, otherwise error
+ * Should be invoked under spinlock
+ */
+static int kc_entry_start_invalidating(struct kc_entry *entry)
+{
+ int res;
+
+ res = kc_entry_wait_till_available(entry);
+ if (res)
+ return res;
+
+ entry->state = INACTIVE_INVALIDATING;
+
+ return 0;
+}
+
+/**
+ * kc_entry_finish_invalidating() - moves entry to state FREE
+ * wakes up all the tasks waiting
+ * on it
+ *
+ * @entry: pointer to entry
+ *
+ * Return 0 in case of success, otherwise error
+ * Should be invoked under spinlock
+ */
+static void kc_entry_finish_invalidating(struct kc_entry *entry)
+{
+ if (!entry)
+ return;
+
+ if (entry->state != INACTIVE_INVALIDATING)
+ return;
+
+ entry->state = FREE;
+}
+
+/**
+ * kc_min_entry() - compare two entries to find one with minimal time
+ * @a: ptr to the first entry. If NULL the other entry will be returned
+ * @b: pointer to the second entry
+ *
+ * Return the entry which timestamp is the minimal, or b if a is NULL
+ */
+static inline struct kc_entry *kc_min_entry(struct kc_entry *a,
+ struct kc_entry *b)
+{
+ if (!a)
+ return b;
+
+ if (time_before64(b->time_stamp, a->time_stamp))
+ return b;
+
+ return a;
+}
+
+/**
+ * kc_entry_at_index() - return entry at specific index
+ * @index: index of entry to be accessed
+ *
+ * Return entry
+ * Should be invoked under spinlock
+ */
+static struct kc_entry *kc_entry_at_index(int index)
+{
+ return &(kc_table[index]);
+}
+
+/**
+ * kc_find_key_at_index() - find kc entry starting at specific index
+ * @key: key to look for
+ * @key_size: the key size
+ * @salt: salt to look for
+ * @salt_size: the salt size
+ * @sarting_index: index to start search with, if entry found, updated with
+ * index of that entry
+ *
+ * Return entry or NULL in case of error
+ * Should be invoked under spinlock
+ */
+static struct kc_entry *kc_find_key_at_index(const unsigned char *key,
+ size_t key_size, const unsigned char *salt, size_t salt_size,
+ int *starting_index)
+{
+ struct kc_entry *entry = NULL;
+ int i = 0;
+
+ for (i = *starting_index; i < PFK_KC_TABLE_SIZE; i++) {
+ entry = kc_entry_at_index(i);
+
+ if (salt != NULL) {
+ if (entry->salt_size != salt_size)
+ continue;
+
+ if (memcmp(entry->salt, salt, salt_size) != 0)
+ continue;
+ }
+
+ if (entry->key_size != key_size)
+ continue;
+
+ if (memcmp(entry->key, key, key_size) == 0) {
+ *starting_index = i;
+ return entry;
+ }
+ }
+
+ return NULL;
+}
+
+/**
+ * kc_find_key() - find kc entry
+ * @key: key to look for
+ * @key_size: the key size
+ * @salt: salt to look for
+ * @salt_size: the salt size
+ *
+ * Return entry or NULL in case of error
+ * Should be invoked under spinlock
+ */
+static struct kc_entry *kc_find_key(const unsigned char *key, size_t key_size,
+ const unsigned char *salt, size_t salt_size)
+{
+ int index = 0;
+
+ return kc_find_key_at_index(key, key_size, salt, salt_size, &index);
+}
+
+/**
+ * kc_find_oldest_entry_non_locked() - finds the entry with minimal timestamp
+ * that is not locked
+ *
+ * Returns entry with minimal timestamp. Empty entries have timestamp
+ * of 0, therefore they are returned first.
+ * If all the entries are locked, will return NULL
+ * Should be invoked under spin lock
+ */
+static struct kc_entry *kc_find_oldest_entry_non_locked(void)
+{
+ struct kc_entry *curr_min_entry = NULL;
+ struct kc_entry *entry = NULL;
+ int i = 0;
+
+ for (i = 0; i < PFK_KC_TABLE_SIZE; i++) {
+ entry = kc_entry_at_index(i);
+
+ if (entry->state == FREE)
+ return entry;
+
+ if (entry->state == INACTIVE)
+ curr_min_entry = kc_min_entry(curr_min_entry, entry);
+ }
+
+ return curr_min_entry;
+}
+
+/**
+ * kc_update_timestamp() - updates timestamp of entry to current
+ *
+ * @entry: entry to update
+ *
+ */
+static void kc_update_timestamp(struct kc_entry *entry)
+{
+ if (!entry)
+ return;
+
+ entry->time_stamp = get_jiffies_64();
+}
+
+/**
+ * kc_clear_entry() - clear the key from entry and mark entry not in use
+ *
+ * @entry: pointer to entry
+ *
+ * Should be invoked under spinlock
+ */
+static void kc_clear_entry(struct kc_entry *entry)
+{
+ if (!entry)
+ return;
+
+ memset(entry->key, 0, entry->key_size);
+ memset(entry->salt, 0, entry->salt_size);
+
+ entry->key_size = 0;
+ entry->salt_size = 0;
+
+ entry->time_stamp = 0;
+ entry->scm_error = 0;
+
+ entry->state = FREE;
+
+ entry->loaded_ref_cnt = 0;
+ entry->thread_pending = NULL;
+}
+
+/**
+ * kc_update_entry() - replaces the key in given entry and
+ * loads the new key to ICE
+ *
+ * @entry: entry to replace key in
+ * @key: key
+ * @key_size: key_size
+ * @salt: salt
+ * @salt_size: salt_size
+ *
+ * The previous key is securely released and wiped, the new one is loaded
+ * to ICE.
+ * Should be invoked under spinlock
+ */
+static int kc_update_entry(struct kc_entry *entry, const unsigned char *key,
+ size_t key_size, const unsigned char *salt, size_t salt_size)
+{
+ int ret;
+
+ kc_clear_entry(entry);
+
+ memcpy(entry->key, key, key_size);
+ entry->key_size = key_size;
+
+ memcpy(entry->salt, salt, salt_size);
+ entry->salt_size = salt_size;
+
+ /* Mark entry as no longer free before releasing the lock */
+ entry->state = ACTIVE_ICE_PRELOAD;
+ kc_spin_unlock();
+
+ ret = qti_pfk_ice_set_key(entry->key_index, entry->key,
+ entry->salt, s_type);
+
+ kc_spin_lock();
+ return ret;
+}
+
+/**
+ * pfk_kc_init() - init function
+ *
+ * Return 0 in case of success, error otherwise
+ */
+int pfk_kc_init(void)
+{
+ int i = 0;
+ struct kc_entry *entry = NULL;
+
+ kc_spin_lock();
+ for (i = 0; i < PFK_KC_TABLE_SIZE; i++) {
+ entry = kc_entry_at_index(i);
+ entry->key_index = PFK_KC_STARTING_INDEX + i;
+ }
+ kc_ready = true;
+ kc_spin_unlock();
+ return 0;
+}
+
+/**
+ * pfk_kc_denit() - deinit function
+ *
+ * Return 0 in case of success, error otherwise
+ */
+int pfk_kc_deinit(void)
+{
+ int res = pfk_kc_clear();
+
+ kc_ready = false;
+ return res;
+}
+
+/**
+ * pfk_kc_load_key_start() - retrieve the key from cache or add it if
+ * it's not there and return the ICE hw key index in @key_index.
+ * @key: pointer to the key
+ * @key_size: the size of the key
+ * @salt: pointer to the salt
+ * @salt_size: the size of the salt
+ * @key_index: the pointer to key_index where the output will be stored
+ * @async: whether scm calls are allowed in the caller context
+ *
+ * If key is present in cache, than the key_index will be retrieved from cache.
+ * If it is not present, the oldest entry from kc table will be evicted,
+ * the key will be loaded to ICE via QSEE to the index that is the evicted
+ * entry number and stored in cache.
+ * Entry that is going to be used is marked as being used, it will mark
+ * as not being used when ICE finishes using it and pfk_kc_load_key_end
+ * will be invoked.
+ * As QSEE calls can only be done from a non-atomic context, when @async flag
+ * is set to 'false', it specifies that it is ok to make the calls in the
+ * current context. Otherwise, when @async is set, the caller should retry the
+ * call again from a different context, and -EAGAIN error will be returned.
+ *
+ * Return 0 in case of success, error otherwise
+ */
+int pfk_kc_load_key_start(const unsigned char *key, size_t key_size,
+ const unsigned char *salt, size_t salt_size, u32 *key_index,
+ bool async)
+{
+ int ret = 0;
+ struct kc_entry *entry = NULL;
+ bool entry_exists = false;
+
+ if (!kc_is_ready())
+ return -ENODEV;
+
+ if (!key || !salt || !key_index) {
+ pr_err("%s key/salt/key_index NULL\n", __func__);
+ return -EINVAL;
+ }
+
+ if (key_size != PFK_KC_KEY_SIZE) {
+ pr_err("unsupported key size %zu\n", key_size);
+ return -EINVAL;
+ }
+
+ if (salt_size != PFK_KC_SALT_SIZE) {
+ pr_err("unsupported salt size %zu\n", salt_size);
+ return -EINVAL;
+ }
+
+ kc_spin_lock();
+
+ entry = kc_find_key(key, key_size, salt, salt_size);
+ if (!entry) {
+ if (async) {
+ pr_debug("%s task will populate entry\n", __func__);
+ kc_spin_unlock();
+ return -EAGAIN;
+ }
+
+ entry = kc_find_oldest_entry_non_locked();
+ if (!entry) {
+ /* could not find a single non locked entry,
+ * return EBUSY to upper layers so that the
+ * request will be rescheduled
+ */
+ kc_spin_unlock();
+ return -EBUSY;
+ }
+ } else {
+ entry_exists = true;
+ }
+
+ pr_debug("entry with index %d is in state %d\n",
+ entry->key_index, entry->state);
+
+ switch (entry->state) {
+ case (INACTIVE):
+ if (entry_exists) {
+ kc_update_timestamp(entry);
+ entry->state = ACTIVE_ICE_LOADED;
+
+ if (!strcmp(s_type, (char *)PFK_UFS)) {
+ if (async)
+ entry->loaded_ref_cnt++;
+ } else {
+ entry->loaded_ref_cnt++;
+ }
+ break;
+ }
+ case (FREE):
+ ret = kc_update_entry(entry, key, key_size, salt, salt_size);
+ if (ret) {
+ entry->state = SCM_ERROR;
+ entry->scm_error = ret;
+ pr_err("%s: key load error (%d)\n", __func__, ret);
+ } else {
+ kc_update_timestamp(entry);
+ entry->state = ACTIVE_ICE_LOADED;
+
+ /*
+ * In case of UFS only increase ref cnt for async calls,
+ * sync calls from within work thread do not pass
+ * requests further to HW
+ */
+ if (!strcmp(s_type, (char *)PFK_UFS)) {
+ if (async)
+ entry->loaded_ref_cnt++;
+ } else {
+ entry->loaded_ref_cnt++;
+ }
+ }
+ break;
+ case (ACTIVE_ICE_PRELOAD):
+ case (INACTIVE_INVALIDATING):
+ ret = -EAGAIN;
+ break;
+ case (ACTIVE_ICE_LOADED):
+ kc_update_timestamp(entry);
+
+ if (!strcmp(s_type, (char *)PFK_UFS)) {
+ if (async)
+ entry->loaded_ref_cnt++;
+ } else {
+ entry->loaded_ref_cnt++;
+ }
+ break;
+ case(SCM_ERROR):
+ ret = entry->scm_error;
+ kc_clear_entry(entry);
+ entry->state = FREE;
+ break;
+ default:
+ pr_err("invalid state %d for entry with key index %d\n",
+ entry->state, entry->key_index);
+ ret = -EINVAL;
+ }
+
+ *key_index = entry->key_index;
+ kc_spin_unlock();
+
+ return ret;
+}
+
+/**
+ * pfk_kc_load_key_end() - finish the process of key loading that was started
+ * by pfk_kc_load_key_start
+ * by marking the entry as not
+ * being in use
+ * @key: pointer to the key
+ * @key_size: the size of the key
+ * @salt: pointer to the salt
+ * @salt_size: the size of the salt
+ *
+ */
+void pfk_kc_load_key_end(const unsigned char *key, size_t key_size,
+ const unsigned char *salt, size_t salt_size)
+{
+ struct kc_entry *entry = NULL;
+ struct task_struct *tmp_pending = NULL;
+ int ref_cnt = 0;
+
+ if (!kc_is_ready())
+ return;
+
+ if (!key || !salt)
+ return;
+
+ if (key_size != PFK_KC_KEY_SIZE)
+ return;
+
+ if (salt_size != PFK_KC_SALT_SIZE)
+ return;
+
+ kc_spin_lock();
+
+ entry = kc_find_key(key, key_size, salt, salt_size);
+ if (!entry) {
+ kc_spin_unlock();
+ pr_err("internal error, there should an entry to unlock\n");
+
+ return;
+ }
+ ref_cnt = --entry->loaded_ref_cnt;
+
+ if (ref_cnt < 0)
+ pr_err("internal error, ref count should never be negative\n");
+
+ if (!ref_cnt) {
+ entry->state = INACTIVE;
+ /*
+ * wake-up invalidation if it's waiting
+ * for the entry to be released
+ */
+ if (entry->thread_pending) {
+ tmp_pending = entry->thread_pending;
+ entry->thread_pending = NULL;
+
+ kc_spin_unlock();
+ wake_up_process(tmp_pending);
+ return;
+ }
+ }
+
+ kc_spin_unlock();
+}
+
+/**
+ * pfk_kc_remove_key() - remove the key from cache and from ICE engine
+ * @key: pointer to the key
+ * @key_size: the size of the key
+ * @salt: pointer to the key
+ * @salt_size: the size of the key
+ *
+ * Return 0 in case of success, error otherwise (also in case of non
+ * (existing key)
+ */
+int pfk_kc_remove_key_with_salt(const unsigned char *key, size_t key_size,
+ const unsigned char *salt, size_t salt_size)
+{
+ struct kc_entry *entry = NULL;
+ int res = 0;
+
+ if (!kc_is_ready())
+ return -ENODEV;
+
+ if (!key)
+ return -EINVAL;
+
+ if (!salt)
+ return -EINVAL;
+
+ if (key_size != PFK_KC_KEY_SIZE)
+ return -EINVAL;
+
+ if (salt_size != PFK_KC_SALT_SIZE)
+ return -EINVAL;
+
+ kc_spin_lock();
+
+ entry = kc_find_key(key, key_size, salt, salt_size);
+ if (!entry) {
+ pr_debug("%s: key does not exist\n", __func__);
+ kc_spin_unlock();
+ return -EINVAL;
+ }
+
+ res = kc_entry_start_invalidating(entry);
+ if (res != 0) {
+ kc_spin_unlock();
+ return res;
+ }
+ kc_clear_entry(entry);
+
+ kc_spin_unlock();
+
+ qti_pfk_ice_invalidate_key(entry->key_index, s_type);
+
+ kc_spin_lock();
+ kc_entry_finish_invalidating(entry);
+ kc_spin_unlock();
+
+ return 0;
+}
+
+/**
+ * pfk_kc_remove_key() - remove the key from cache and from ICE engine
+ * when no salt is available. Will only search key part, if there are several,
+ * all will be removed
+ *
+ * @key: pointer to the key
+ * @key_size: the size of the key
+ *
+ * Return 0 in case of success, error otherwise (also for non-existing key)
+ */
+int pfk_kc_remove_key(const unsigned char *key, size_t key_size)
+{
+ struct kc_entry *entry = NULL;
+ int index = 0;
+ int temp_indexes[PFK_KC_TABLE_SIZE] = {0};
+ int temp_indexes_size = 0;
+ int i = 0;
+ int res = 0;
+
+ if (!kc_is_ready())
+ return -ENODEV;
+
+ if (!key)
+ return -EINVAL;
+
+ if (key_size != PFK_KC_KEY_SIZE)
+ return -EINVAL;
+
+ memset(temp_indexes, -1, sizeof(temp_indexes));
+
+ kc_spin_lock();
+
+ entry = kc_find_key_at_index(key, key_size, NULL, 0, &index);
+ if (!entry) {
+ pr_err("%s: key does not exist\n", __func__);
+ kc_spin_unlock();
+ return -EINVAL;
+ }
+
+ res = kc_entry_start_invalidating(entry);
+ if (res != 0) {
+ kc_spin_unlock();
+ return res;
+ }
+
+ temp_indexes[temp_indexes_size++] = index;
+ kc_clear_entry(entry);
+
+ /* let's clean additional entries with the same key if there are any */
+ do {
+ index++;
+ entry = kc_find_key_at_index(key, key_size, NULL, 0, &index);
+ if (!entry)
+ break;
+
+ res = kc_entry_start_invalidating(entry);
+ if (res != 0) {
+ kc_spin_unlock();
+ goto out;
+ }
+
+ temp_indexes[temp_indexes_size++] = index;
+
+ kc_clear_entry(entry);
+
+
+ } while (true);
+
+ kc_spin_unlock();
+
+ temp_indexes_size--;
+ for (i = temp_indexes_size; i >= 0 ; i--)
+ qti_pfk_ice_invalidate_key(
+ kc_entry_at_index(temp_indexes[i])->key_index,
+ s_type);
+
+ /* fall through */
+ res = 0;
+
+out:
+ kc_spin_lock();
+ for (i = temp_indexes_size; i >= 0 ; i--)
+ kc_entry_finish_invalidating(
+ kc_entry_at_index(temp_indexes[i]));
+ kc_spin_unlock();
+
+ return res;
+}
+
+/**
+ * pfk_kc_clear() - clear the table and remove all keys from ICE
+ *
+ * Return 0 on success, error otherwise
+ *
+ */
+int pfk_kc_clear(void)
+{
+ struct kc_entry *entry = NULL;
+ int i = 0;
+ int res = 0;
+
+ if (!kc_is_ready())
+ return -ENODEV;
+
+ kc_spin_lock();
+ for (i = 0; i < PFK_KC_TABLE_SIZE; i++) {
+ entry = kc_entry_at_index(i);
+ res = kc_entry_start_invalidating(entry);
+ if (res != 0) {
+ kc_spin_unlock();
+ goto out;
+ }
+ kc_clear_entry(entry);
+ }
+ kc_spin_unlock();
+
+ for (i = 0; i < PFK_KC_TABLE_SIZE; i++)
+ qti_pfk_ice_invalidate_key(kc_entry_at_index(i)->key_index,
+ s_type);
+
+ /* fall through */
+ res = 0;
+out:
+ kc_spin_lock();
+ for (i = 0; i < PFK_KC_TABLE_SIZE; i++)
+ kc_entry_finish_invalidating(kc_entry_at_index(i));
+ kc_spin_unlock();
+
+ return res;
+}
+
+/**
+ * pfk_kc_clear_on_reset() - clear the table and remove all keys from ICE
+ * The assumption is that at this point we don't have any pending transactions
+ * Also, there is no need to clear keys from ICE
+ *
+ * Return 0 on success, error otherwise
+ *
+ */
+void pfk_kc_clear_on_reset(void)
+{
+ struct kc_entry *entry = NULL;
+ int i = 0;
+
+ if (!kc_is_ready())
+ return;
+
+ kc_spin_lock();
+ for (i = 0; i < PFK_KC_TABLE_SIZE; i++) {
+ entry = kc_entry_at_index(i);
+ kc_clear_entry(entry);
+ }
+ kc_spin_unlock();
+}
+
+static int pfk_kc_find_storage_type(char **device)
+{
+ char boot[20] = {'\0'};
+ char *match = (char *)strnstr(saved_command_line,
+ "androidboot.bootdevice=",
+ strlen(saved_command_line));
+ if (match) {
+ memcpy(boot, (match + strlen("androidboot.bootdevice=")),
+ sizeof(boot) - 1);
+ if (strnstr(boot, PFK_UFS, strlen(boot)))
+ *device = PFK_UFS;
+
+ return 0;
+ }
+ return -EINVAL;
+}
+
+static int __init pfk_kc_pre_init(void)
+{
+ return pfk_kc_find_storage_type(&s_type);
+}
+
+static void __exit pfk_kc_exit(void)
+{
+ s_type = NULL;
+}
+
+module_init(pfk_kc_pre_init);
+module_exit(pfk_kc_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Per-File-Key-KC driver");
diff --git a/security/pfe/pfk_kc.h b/security/pfe/pfk_kc.h
new file mode 100644
index 0000000..dc4ad15
--- /dev/null
+++ b/security/pfe/pfk_kc.h
@@ -0,0 +1,33 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef PFK_KC_H_
+#define PFK_KC_H_
+
+#include <linux/types.h>
+
+int pfk_kc_init(void);
+int pfk_kc_deinit(void);
+int pfk_kc_load_key_start(const unsigned char *key, size_t key_size,
+ const unsigned char *salt, size_t salt_size, u32 *key_index,
+ bool async);
+void pfk_kc_load_key_end(const unsigned char *key, size_t key_size,
+ const unsigned char *salt, size_t salt_size);
+int pfk_kc_remove_key_with_salt(const unsigned char *key, size_t key_size,
+ const unsigned char *salt, size_t salt_size);
+int pfk_kc_remove_key(const unsigned char *key, size_t key_size);
+int pfk_kc_clear(void);
+void pfk_kc_clear_on_reset(void);
+extern char *saved_command_line;
+
+
+#endif /* PFK_KC_H_ */
diff --git a/security/security.c b/security/security.c
index 6a7b359..e1f9e32 100644
--- a/security/security.c
+++ b/security/security.c
@@ -524,6 +524,14 @@ int security_inode_create(struct inode *dir, struct dentry *dentry, umode_t mode
}
EXPORT_SYMBOL_GPL(security_inode_create);
+int security_inode_post_create(struct inode *dir, struct dentry *dentry,
+ umode_t mode)
+{
+ if (unlikely(IS_PRIVATE(dir)))
+ return 0;
+ return call_int_hook(inode_post_create, 0, dir, dentry, mode);
+}
+
int security_inode_link(struct dentry *old_dentry, struct inode *dir,
struct dentry *new_dentry)
{
@@ -1668,6 +1676,8 @@ struct security_hook_heads security_hook_heads __lsm_ro_after_init = {
.inode_init_security =
LIST_HEAD_INIT(security_hook_heads.inode_init_security),
.inode_create = LIST_HEAD_INIT(security_hook_heads.inode_create),
+ .inode_post_create =
+ LIST_HEAD_INIT(security_hook_heads.inode_post_create),
.inode_link = LIST_HEAD_INIT(security_hook_heads.inode_link),
.inode_unlink = LIST_HEAD_INIT(security_hook_heads.inode_unlink),
.inode_symlink =
diff --git a/security/selinux/include/objsec.h b/security/selinux/include/objsec.h
index c21e135..13011038 100644
--- a/security/selinux/include/objsec.h
+++ b/security/selinux/include/objsec.h
@@ -25,8 +25,9 @@
#include <linux/in.h>
#include <linux/spinlock.h>
#include <net/net_namespace.h>
-#include "flask.h"
-#include "avc.h"
+//#include "flask.h"
+//#include "avc.h"
+#include "security.h"
struct task_security_struct {
u32 osid; /* SID prior to last execve */
@@ -52,6 +53,8 @@ struct inode_security_struct {
u32 sid; /* SID of this object */
u16 sclass; /* security class of this object */
unsigned char initialized; /* initialization flag */
+ u32 tag; /* Per-File-Encryption tag */
+ void *pfk_data; /* Per-File-Key data from ecryptfs */
struct mutex lock;
};
diff --git a/security/selinux/include/security.h b/security/selinux/include/security.h
index 308a286..b8e98c1 100644
--- a/security/selinux/include/security.h
+++ b/security/selinux/include/security.h
@@ -12,7 +12,6 @@
#include <linux/dcache.h>
#include <linux/magic.h>
#include <linux/types.h>
-#include "flask.h"
#define SECSID_NULL 0x00000000 /* unspecified SID */
#define SECSID_WILD 0xffffffff /* wildcard SID */
diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
index b75c31a..530ed9b 100644
--- a/security/smack/smack_lsm.c
+++ b/security/smack/smack_lsm.c
@@ -1486,7 +1486,7 @@ static int smack_inode_removexattr(struct dentry *dentry, const char *name)
* @inode: the object
* @name: attribute name
* @buffer: where to put the result
- * @alloc: unused
+ * @alloc: duplicate memory
*
* Returns the size of the attribute or an error code
*/
@@ -1499,43 +1499,38 @@ static int smack_inode_getsecurity(struct inode *inode,
struct super_block *sbp;
struct inode *ip = (struct inode *)inode;
struct smack_known *isp;
- int ilen;
- int rc = 0;
- if (strcmp(name, XATTR_SMACK_SUFFIX) == 0) {
+ if (strcmp(name, XATTR_SMACK_SUFFIX) == 0)
isp = smk_of_inode(inode);
- ilen = strlen(isp->smk_known);
- *buffer = isp->smk_known;
- return ilen;
+ else {
+ /*
+ * The rest of the Smack xattrs are only on sockets.
+ */
+ sbp = ip->i_sb;
+ if (sbp->s_magic != SOCKFS_MAGIC)
+ return -EOPNOTSUPP;
+
+ sock = SOCKET_I(ip);
+ if (sock == NULL || sock->sk == NULL)
+ return -EOPNOTSUPP;
+
+ ssp = sock->sk->sk_security;
+
+ if (strcmp(name, XATTR_SMACK_IPIN) == 0)
+ isp = ssp->smk_in;
+ else if (strcmp(name, XATTR_SMACK_IPOUT) == 0)
+ isp = ssp->smk_out;
+ else
+ return -EOPNOTSUPP;
}
- /*
- * The rest of the Smack xattrs are only on sockets.
- */
- sbp = ip->i_sb;
- if (sbp->s_magic != SOCKFS_MAGIC)
- return -EOPNOTSUPP;
-
- sock = SOCKET_I(ip);
- if (sock == NULL || sock->sk == NULL)
- return -EOPNOTSUPP;
-
- ssp = sock->sk->sk_security;
-
- if (strcmp(name, XATTR_SMACK_IPIN) == 0)
- isp = ssp->smk_in;
- else if (strcmp(name, XATTR_SMACK_IPOUT) == 0)
- isp = ssp->smk_out;
- else
- return -EOPNOTSUPP;
-
- ilen = strlen(isp->smk_known);
- if (rc == 0) {
- *buffer = isp->smk_known;
- rc = ilen;
+ if (alloc) {
+ *buffer = kstrdup(isp->smk_known, GFP_KERNEL);
+ if (*buffer == NULL)
+ return -ENOMEM;
}
- return rc;
+ return strlen(isp->smk_known);
}
diff --git a/sound/core/compress_offload.c b/sound/core/compress_offload.c
index a098656..add1f8d 100644
--- a/sound/core/compress_offload.c
+++ b/sound/core/compress_offload.c
@@ -781,7 +781,7 @@ static int snd_compr_drain(struct snd_compr_stream *stream)
if (!retval) {
stream->runtime->state = SNDRV_PCM_STATE_DRAINING;
wake_up(&stream->runtime->sleep);
- return retval;
+ goto ret;
}
ret:
@@ -981,14 +981,13 @@ static const struct file_operations snd_compr_file_ops = {
static int snd_compress_dev_register(struct snd_device *device)
{
int ret = -EINVAL;
- char str[16];
struct snd_compr *compr;
if (snd_BUG_ON(!device || !device->device_data))
return -EBADFD;
compr = device->device_data;
- pr_debug("reg %s for device %s, direction %d\n", str, compr->name,
+ pr_debug("reg device %s, direction %d\n", compr->name,
compr->direction);
/* register compressed device */
ret = snd_register_device(SNDRV_DEVICE_TYPE_COMPRESS,
diff --git a/sound/core/seq/oss/seq_oss_midi.c b/sound/core/seq/oss/seq_oss_midi.c
index aaff9ee..b30b213 100644
--- a/sound/core/seq/oss/seq_oss_midi.c
+++ b/sound/core/seq/oss/seq_oss_midi.c
@@ -612,9 +612,7 @@ send_midi_event(struct seq_oss_devinfo *dp, struct snd_seq_event *ev, struct seq
if (!dp->timer->running)
len = snd_seq_oss_timer_start(dp->timer);
if (ev->type == SNDRV_SEQ_EVENT_SYSEX) {
- if ((ev->flags & SNDRV_SEQ_EVENT_LENGTH_MASK) == SNDRV_SEQ_EVENT_LENGTH_VARIABLE)
- snd_seq_oss_readq_puts(dp->readq, mdev->seq_device,
- ev->data.ext.ptr, ev->data.ext.len);
+ snd_seq_oss_readq_sysex(dp->readq, mdev->seq_device, ev);
} else {
len = snd_midi_event_decode(mdev->coder, msg, sizeof(msg), ev);
if (len > 0)
diff --git a/sound/core/seq/oss/seq_oss_readq.c b/sound/core/seq/oss/seq_oss_readq.c
index 046cb586..06b2122 100644
--- a/sound/core/seq/oss/seq_oss_readq.c
+++ b/sound/core/seq/oss/seq_oss_readq.c
@@ -118,6 +118,35 @@ snd_seq_oss_readq_puts(struct seq_oss_readq *q, int dev, unsigned char *data, in
}
/*
+ * put MIDI sysex bytes; the event buffer may be chained, thus it has
+ * to be expanded via snd_seq_dump_var_event().
+ */
+struct readq_sysex_ctx {
+ struct seq_oss_readq *readq;
+ int dev;
+};
+
+static int readq_dump_sysex(void *ptr, void *buf, int count)
+{
+ struct readq_sysex_ctx *ctx = ptr;
+
+ return snd_seq_oss_readq_puts(ctx->readq, ctx->dev, buf, count);
+}
+
+int snd_seq_oss_readq_sysex(struct seq_oss_readq *q, int dev,
+ struct snd_seq_event *ev)
+{
+ struct readq_sysex_ctx ctx = {
+ .readq = q,
+ .dev = dev
+ };
+
+ if ((ev->flags & SNDRV_SEQ_EVENT_LENGTH_MASK) != SNDRV_SEQ_EVENT_LENGTH_VARIABLE)
+ return 0;
+ return snd_seq_dump_var_event(ev, readq_dump_sysex, &ctx);
+}
+
+/*
* copy an event to input queue:
* return zero if enqueued
*/
diff --git a/sound/core/seq/oss/seq_oss_readq.h b/sound/core/seq/oss/seq_oss_readq.h
index f1463f1..8d033ca 100644
--- a/sound/core/seq/oss/seq_oss_readq.h
+++ b/sound/core/seq/oss/seq_oss_readq.h
@@ -44,6 +44,8 @@ void snd_seq_oss_readq_delete(struct seq_oss_readq *q);
void snd_seq_oss_readq_clear(struct seq_oss_readq *readq);
unsigned int snd_seq_oss_readq_poll(struct seq_oss_readq *readq, struct file *file, poll_table *wait);
int snd_seq_oss_readq_puts(struct seq_oss_readq *readq, int dev, unsigned char *data, int len);
+int snd_seq_oss_readq_sysex(struct seq_oss_readq *q, int dev,
+ struct snd_seq_event *ev);
int snd_seq_oss_readq_put_event(struct seq_oss_readq *readq, union evrec *ev);
int snd_seq_oss_readq_put_timestamp(struct seq_oss_readq *readq, unsigned long curt, int seq_mode);
int snd_seq_oss_readq_pick(struct seq_oss_readq *q, union evrec *rec);
diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c
index 67c4c68..45ef591 100644
--- a/sound/core/seq/seq_clientmgr.c
+++ b/sound/core/seq/seq_clientmgr.c
@@ -663,7 +663,7 @@ static int deliver_to_subscribers(struct snd_seq_client *client,
if (atomic)
read_lock(&grp->list_lock);
else
- down_read(&grp->list_mutex);
+ down_read_nested(&grp->list_mutex, hop);
list_for_each_entry(subs, &grp->list_head, src_list) {
/* both ports ready? */
if (atomic_read(&subs->ref_count) != 2)
@@ -1259,6 +1259,7 @@ static int snd_seq_ioctl_create_port(struct snd_seq_client *client, void *arg)
struct snd_seq_port_info *info = arg;
struct snd_seq_client_port *port;
struct snd_seq_port_callback *callback;
+ int port_idx;
/* it is not allowed to create the port for an another client */
if (info->addr.client != client->number)
@@ -1269,7 +1270,9 @@ static int snd_seq_ioctl_create_port(struct snd_seq_client *client, void *arg)
return -ENOMEM;
if (client->type == USER_CLIENT && info->kernel) {
- snd_seq_delete_port(client, port->addr.port);
+ port_idx = port->addr.port;
+ snd_seq_port_unlock(port);
+ snd_seq_delete_port(client, port_idx);
return -EINVAL;
}
if (client->type == KERNEL_CLIENT) {
@@ -1290,6 +1293,7 @@ static int snd_seq_ioctl_create_port(struct snd_seq_client *client, void *arg)
snd_seq_set_port_info(port, info);
snd_seq_system_client_ev_port_start(port->addr.client, port->addr.port);
+ snd_seq_port_unlock(port);
return 0;
}
diff --git a/sound/core/seq/seq_lock.c b/sound/core/seq/seq_lock.c
index 12ba833..ba5752e 100644
--- a/sound/core/seq/seq_lock.c
+++ b/sound/core/seq/seq_lock.c
@@ -23,8 +23,6 @@
#include <sound/core.h>
#include "seq_lock.h"
-#if defined(CONFIG_SMP) || defined(CONFIG_SND_DEBUG)
-
/* wait until all locks are released */
void snd_use_lock_sync_helper(snd_use_lock_t *lockp, const char *file, int line)
{
@@ -42,5 +40,3 @@ void snd_use_lock_sync_helper(snd_use_lock_t *lockp, const char *file, int line)
}
EXPORT_SYMBOL(snd_use_lock_sync_helper);
-
-#endif
diff --git a/sound/core/seq/seq_lock.h b/sound/core/seq/seq_lock.h
index 54044bc..ac38031 100644
--- a/sound/core/seq/seq_lock.h
+++ b/sound/core/seq/seq_lock.h
@@ -3,8 +3,6 @@
#include <linux/sched.h>
-#if defined(CONFIG_SMP) || defined(CONFIG_SND_DEBUG)
-
typedef atomic_t snd_use_lock_t;
/* initialize lock */
@@ -20,14 +18,4 @@ typedef atomic_t snd_use_lock_t;
void snd_use_lock_sync_helper(snd_use_lock_t *lock, const char *file, int line);
#define snd_use_lock_sync(lockp) snd_use_lock_sync_helper(lockp, __BASE_FILE__, __LINE__)
-#else /* SMP || CONFIG_SND_DEBUG */
-
-typedef spinlock_t snd_use_lock_t; /* dummy */
-#define snd_use_lock_init(lockp) /**/
-#define snd_use_lock_use(lockp) /**/
-#define snd_use_lock_free(lockp) /**/
-#define snd_use_lock_sync(lockp) /**/
-
-#endif /* SMP || CONFIG_SND_DEBUG */
-
#endif /* __SND_SEQ_LOCK_H */
diff --git a/sound/core/seq/seq_ports.c b/sound/core/seq/seq_ports.c
index fe686ee..f04714d 100644
--- a/sound/core/seq/seq_ports.c
+++ b/sound/core/seq/seq_ports.c
@@ -122,7 +122,9 @@ static void port_subs_info_init(struct snd_seq_port_subs_info *grp)
}
-/* create a port, port number is returned (-1 on failure) */
+/* create a port, port number is returned (-1 on failure);
+ * the caller needs to unref the port via snd_seq_port_unlock() appropriately
+ */
struct snd_seq_client_port *snd_seq_create_port(struct snd_seq_client *client,
int port)
{
@@ -151,6 +153,7 @@ struct snd_seq_client_port *snd_seq_create_port(struct snd_seq_client *client,
snd_use_lock_init(&new_port->use_lock);
port_subs_info_init(&new_port->c_src);
port_subs_info_init(&new_port->c_dest);
+ snd_use_lock_use(&new_port->use_lock);
num = port >= 0 ? port : 0;
mutex_lock(&client->ports_mutex);
@@ -165,9 +168,9 @@ struct snd_seq_client_port *snd_seq_create_port(struct snd_seq_client *client,
list_add_tail(&new_port->list, &p->list);
client->num_ports++;
new_port->addr.port = num; /* store the port number in the port */
+ sprintf(new_port->name, "port-%d", num);
write_unlock_irqrestore(&client->ports_lock, flags);
mutex_unlock(&client->ports_mutex);
- sprintf(new_port->name, "port-%d", num);
return new_port;
}
diff --git a/sound/core/seq/seq_virmidi.c b/sound/core/seq/seq_virmidi.c
index c82ed3e..2007649 100644
--- a/sound/core/seq/seq_virmidi.c
+++ b/sound/core/seq/seq_virmidi.c
@@ -77,13 +77,17 @@ static void snd_virmidi_init_event(struct snd_virmidi *vmidi,
* decode input event and put to read buffer of each opened file
*/
static int snd_virmidi_dev_receive_event(struct snd_virmidi_dev *rdev,
- struct snd_seq_event *ev)
+ struct snd_seq_event *ev,
+ bool atomic)
{
struct snd_virmidi *vmidi;
unsigned char msg[4];
int len;
- read_lock(&rdev->filelist_lock);
+ if (atomic)
+ read_lock(&rdev->filelist_lock);
+ else
+ down_read(&rdev->filelist_sem);
list_for_each_entry(vmidi, &rdev->filelist, list) {
if (!vmidi->trigger)
continue;
@@ -97,7 +101,10 @@ static int snd_virmidi_dev_receive_event(struct snd_virmidi_dev *rdev,
snd_rawmidi_receive(vmidi->substream, msg, len);
}
}
- read_unlock(&rdev->filelist_lock);
+ if (atomic)
+ read_unlock(&rdev->filelist_lock);
+ else
+ up_read(&rdev->filelist_sem);
return 0;
}
@@ -115,7 +122,7 @@ int snd_virmidi_receive(struct snd_rawmidi *rmidi, struct snd_seq_event *ev)
struct snd_virmidi_dev *rdev;
rdev = rmidi->private_data;
- return snd_virmidi_dev_receive_event(rdev, ev);
+ return snd_virmidi_dev_receive_event(rdev, ev, true);
}
#endif /* 0 */
@@ -130,7 +137,7 @@ static int snd_virmidi_event_input(struct snd_seq_event *ev, int direct,
rdev = private_data;
if (!(rdev->flags & SNDRV_VIRMIDI_USE))
return 0; /* ignored */
- return snd_virmidi_dev_receive_event(rdev, ev);
+ return snd_virmidi_dev_receive_event(rdev, ev, atomic);
}
/*
@@ -209,7 +216,6 @@ static int snd_virmidi_input_open(struct snd_rawmidi_substream *substream)
struct snd_virmidi_dev *rdev = substream->rmidi->private_data;
struct snd_rawmidi_runtime *runtime = substream->runtime;
struct snd_virmidi *vmidi;
- unsigned long flags;
vmidi = kzalloc(sizeof(*vmidi), GFP_KERNEL);
if (vmidi == NULL)
@@ -223,9 +229,11 @@ static int snd_virmidi_input_open(struct snd_rawmidi_substream *substream)
vmidi->client = rdev->client;
vmidi->port = rdev->port;
runtime->private_data = vmidi;
- write_lock_irqsave(&rdev->filelist_lock, flags);
+ down_write(&rdev->filelist_sem);
+ write_lock_irq(&rdev->filelist_lock);
list_add_tail(&vmidi->list, &rdev->filelist);
- write_unlock_irqrestore(&rdev->filelist_lock, flags);
+ write_unlock_irq(&rdev->filelist_lock);
+ up_write(&rdev->filelist_sem);
vmidi->rdev = rdev;
return 0;
}
@@ -264,9 +272,11 @@ static int snd_virmidi_input_close(struct snd_rawmidi_substream *substream)
struct snd_virmidi_dev *rdev = substream->rmidi->private_data;
struct snd_virmidi *vmidi = substream->runtime->private_data;
+ down_write(&rdev->filelist_sem);
write_lock_irq(&rdev->filelist_lock);
list_del(&vmidi->list);
write_unlock_irq(&rdev->filelist_lock);
+ up_write(&rdev->filelist_sem);
snd_midi_event_free(vmidi->parser);
substream->runtime->private_data = NULL;
kfree(vmidi);
@@ -520,6 +530,7 @@ int snd_virmidi_new(struct snd_card *card, int device, struct snd_rawmidi **rrmi
rdev->rmidi = rmidi;
rdev->device = device;
rdev->client = -1;
+ init_rwsem(&rdev->filelist_sem);
rwlock_init(&rdev->filelist_lock);
INIT_LIST_HEAD(&rdev->filelist);
rdev->seq_mode = SNDRV_VIRMIDI_SEQ_DISPATCH;
diff --git a/sound/core/timer_compat.c b/sound/core/timer_compat.c
index 6a437eb..59127b6 100644
--- a/sound/core/timer_compat.c
+++ b/sound/core/timer_compat.c
@@ -133,7 +133,8 @@ enum {
#endif /* CONFIG_X86_X32 */
};
-static long snd_timer_user_ioctl_compat(struct file *file, unsigned int cmd, unsigned long arg)
+static long __snd_timer_user_ioctl_compat(struct file *file, unsigned int cmd,
+ unsigned long arg)
{
void __user *argp = compat_ptr(arg);
@@ -153,7 +154,7 @@ static long snd_timer_user_ioctl_compat(struct file *file, unsigned int cmd, uns
case SNDRV_TIMER_IOCTL_PAUSE:
case SNDRV_TIMER_IOCTL_PAUSE_OLD:
case SNDRV_TIMER_IOCTL_NEXT_DEVICE:
- return snd_timer_user_ioctl(file, cmd, (unsigned long)argp);
+ return __snd_timer_user_ioctl(file, cmd, (unsigned long)argp);
case SNDRV_TIMER_IOCTL_GPARAMS32:
return snd_timer_user_gparams_compat(file, argp);
case SNDRV_TIMER_IOCTL_INFO32:
@@ -167,3 +168,15 @@ static long snd_timer_user_ioctl_compat(struct file *file, unsigned int cmd, uns
}
return -ENOIOCTLCMD;
}
+
+static long snd_timer_user_ioctl_compat(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ struct snd_timer_user *tu = file->private_data;
+ long ret;
+
+ mutex_lock(&tu->ioctl_lock);
+ ret = __snd_timer_user_ioctl_compat(file, cmd, arg);
+ mutex_unlock(&tu->ioctl_lock);
+ return ret;
+}
diff --git a/sound/hda/hdac_controller.c b/sound/hda/hdac_controller.c
index 0f41257..8761877 100644
--- a/sound/hda/hdac_controller.c
+++ b/sound/hda/hdac_controller.c
@@ -284,6 +284,11 @@ int snd_hdac_bus_parse_capabilities(struct hdac_bus *bus)
dev_dbg(bus->dev, "HDA capability ID: 0x%x\n",
(cur_cap & AZX_CAP_HDR_ID_MASK) >> AZX_CAP_HDR_ID_OFF);
+ if (cur_cap == -1) {
+ dev_dbg(bus->dev, "Invalid capability reg read\n");
+ break;
+ }
+
switch ((cur_cap & AZX_CAP_HDR_ID_MASK) >> AZX_CAP_HDR_ID_OFF) {
case AZX_ML_CAP_ID:
dev_dbg(bus->dev, "Found ML capability\n");
diff --git a/sound/pci/au88x0/au88x0_core.c b/sound/pci/au88x0/au88x0_core.c
index e1af24f..c308a4f 100644
--- a/sound/pci/au88x0/au88x0_core.c
+++ b/sound/pci/au88x0/au88x0_core.c
@@ -2279,6 +2279,9 @@ vortex_adb_allocroute(vortex_t *vortex, int dma, int nr_ch, int dir,
} else {
int src[2], mix[2];
+ if (nr_ch < 1)
+ return -EINVAL;
+
/* Get SRC and MIXER hardware resources. */
for (i = 0; i < nr_ch; i++) {
if ((mix[i] =
diff --git a/sound/pci/echoaudio/echoaudio.c b/sound/pci/echoaudio/echoaudio.c
index 9370717..286f5e3 100644
--- a/sound/pci/echoaudio/echoaudio.c
+++ b/sound/pci/echoaudio/echoaudio.c
@@ -1272,11 +1272,11 @@ static int snd_echo_mixer_info(struct snd_kcontrol *kcontrol,
chip = snd_kcontrol_chip(kcontrol);
uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
+ uinfo->count = 1;
uinfo->value.integer.min = ECHOGAIN_MINOUT;
uinfo->value.integer.max = ECHOGAIN_MAXOUT;
uinfo->dimen.d[0] = num_busses_out(chip);
uinfo->dimen.d[1] = num_busses_in(chip);
- uinfo->count = uinfo->dimen.d[0] * uinfo->dimen.d[1];
return 0;
}
@@ -1344,11 +1344,11 @@ static int snd_echo_vmixer_info(struct snd_kcontrol *kcontrol,
chip = snd_kcontrol_chip(kcontrol);
uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
+ uinfo->count = 1;
uinfo->value.integer.min = ECHOGAIN_MINOUT;
uinfo->value.integer.max = ECHOGAIN_MAXOUT;
uinfo->dimen.d[0] = num_busses_out(chip);
uinfo->dimen.d[1] = num_pipes_out(chip);
- uinfo->count = uinfo->dimen.d[0] * uinfo->dimen.d[1];
return 0;
}
@@ -1728,6 +1728,7 @@ static int snd_echo_vumeters_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
+ uinfo->count = 96;
uinfo->value.integer.min = ECHOGAIN_MINOUT;
uinfo->value.integer.max = 0;
#ifdef ECHOCARD_HAS_VMIXER
@@ -1737,7 +1738,6 @@ static int snd_echo_vumeters_info(struct snd_kcontrol *kcontrol,
#endif
uinfo->dimen.d[1] = 16; /* 16 channels */
uinfo->dimen.d[2] = 2; /* 0=level, 1=peak */
- uinfo->count = uinfo->dimen.d[0] * uinfo->dimen.d[1] * uinfo->dimen.d[2];
return 0;
}
diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
index 9913be8..e46c561 100644
--- a/sound/pci/hda/hda_codec.c
+++ b/sound/pci/hda/hda_codec.c
@@ -1755,7 +1755,7 @@ static int get_kctl_0dB_offset(struct hda_codec *codec,
return -1;
if (*step_to_check && *step_to_check != step) {
codec_err(codec, "Mismatching dB step for vmaster slave (%d!=%d)\n",
-- *step_to_check, step);
+ *step_to_check, step);
return -1;
}
*step_to_check = step;
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
index 775c678..bd65022 100644
--- a/sound/pci/hda/patch_hdmi.c
+++ b/sound/pci/hda/patch_hdmi.c
@@ -3685,6 +3685,7 @@ HDA_CODEC_ENTRY(0x80862808, "Broadwell HDMI", patch_i915_hsw_hdmi),
HDA_CODEC_ENTRY(0x80862809, "Skylake HDMI", patch_i915_hsw_hdmi),
HDA_CODEC_ENTRY(0x8086280a, "Broxton HDMI", patch_i915_hsw_hdmi),
HDA_CODEC_ENTRY(0x8086280b, "Kabylake HDMI", patch_i915_hsw_hdmi),
+HDA_CODEC_ENTRY(0x8086280d, "Geminilake HDMI", patch_i915_hsw_hdmi),
HDA_CODEC_ENTRY(0x80862880, "CedarTrail HDMI", patch_generic_hdmi),
HDA_CODEC_ENTRY(0x80862882, "Valleyview2 HDMI", patch_i915_byt_hdmi),
HDA_CODEC_ENTRY(0x80862883, "Braswell HDMI", patch_i915_byt_hdmi),
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 6f337f0..fe1d06d 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -329,6 +329,7 @@ static void alc_fill_eapd_coef(struct hda_codec *codec)
break;
case 0x10ec0225:
case 0x10ec0233:
+ case 0x10ec0236:
case 0x10ec0255:
case 0x10ec0256:
case 0x10ec0282:
@@ -909,6 +910,7 @@ static struct alc_codec_rename_pci_table rename_pci_tbl[] = {
{ 0x10ec0275, 0x1028, 0, "ALC3260" },
{ 0x10ec0899, 0x1028, 0, "ALC3861" },
{ 0x10ec0298, 0x1028, 0, "ALC3266" },
+ { 0x10ec0236, 0x1028, 0, "ALC3204" },
{ 0x10ec0256, 0x1028, 0, "ALC3246" },
{ 0x10ec0225, 0x1028, 0, "ALC3253" },
{ 0x10ec0295, 0x1028, 0, "ALC3254" },
@@ -3694,6 +3696,7 @@ static void alc_headset_mode_unplugged(struct hda_codec *codec)
alc_process_coef_fw(codec, coef0255_1);
alc_process_coef_fw(codec, coef0255);
break;
+ case 0x10ec0236:
case 0x10ec0256:
alc_process_coef_fw(codec, coef0256);
alc_process_coef_fw(codec, coef0255);
@@ -3777,6 +3780,7 @@ static void alc_headset_mode_mic_in(struct hda_codec *codec, hda_nid_t hp_pin,
switch (codec->core.vendor_id) {
+ case 0x10ec0236:
case 0x10ec0255:
case 0x10ec0256:
alc_write_coef_idx(codec, 0x45, 0xc489);
@@ -3885,6 +3889,7 @@ static void alc_headset_mode_default(struct hda_codec *codec)
case 0x10ec0295:
alc_process_coef_fw(codec, coef0225);
break;
+ case 0x10ec0236:
case 0x10ec0255:
case 0x10ec0256:
alc_process_coef_fw(codec, coef0255);
@@ -3971,6 +3976,7 @@ static void alc_headset_mode_ctia(struct hda_codec *codec)
case 0x10ec0255:
alc_process_coef_fw(codec, coef0255);
break;
+ case 0x10ec0236:
case 0x10ec0256:
alc_process_coef_fw(codec, coef0256);
break;
@@ -4064,6 +4070,7 @@ static void alc_headset_mode_omtp(struct hda_codec *codec)
case 0x10ec0255:
alc_process_coef_fw(codec, coef0255);
break;
+ case 0x10ec0236:
case 0x10ec0256:
alc_process_coef_fw(codec, coef0256);
break;
@@ -4131,6 +4138,7 @@ static void alc_determine_headset_type(struct hda_codec *codec)
};
switch (codec->core.vendor_id) {
+ case 0x10ec0236:
case 0x10ec0255:
case 0x10ec0256:
alc_process_coef_fw(codec, coef0255);
@@ -4335,6 +4343,7 @@ static void alc255_set_default_jack_type(struct hda_codec *codec)
case 0x10ec0255:
alc_process_coef_fw(codec, alc255fw);
break;
+ case 0x10ec0236:
case 0x10ec0256:
alc_process_coef_fw(codec, alc256fw);
break;
@@ -5852,6 +5861,14 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
ALC225_STANDARD_PINS,
{0x12, 0xb7a60130},
{0x1b, 0x90170110}),
+ SND_HDA_PIN_QUIRK(0x10ec0236, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
+ {0x12, 0x90a60140},
+ {0x14, 0x90170110},
+ {0x21, 0x02211020}),
+ SND_HDA_PIN_QUIRK(0x10ec0236, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
+ {0x12, 0x90a60140},
+ {0x14, 0x90170150},
+ {0x21, 0x02211020}),
SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL2_MIC_NO_PRESENCE,
{0x14, 0x90170110},
{0x21, 0x02211020}),
@@ -6226,6 +6243,7 @@ static int patch_alc269(struct hda_codec *codec)
case 0x10ec0255:
spec->codec_variant = ALC269_TYPE_ALC255;
break;
+ case 0x10ec0236:
case 0x10ec0256:
spec->codec_variant = ALC269_TYPE_ALC256;
spec->gen.mixer_nid = 0; /* ALC256 does not have any loopback mixer path */
@@ -7205,6 +7223,7 @@ static const struct hda_device_id snd_hda_id_realtek[] = {
HDA_CODEC_ENTRY(0x10ec0233, "ALC233", patch_alc269),
HDA_CODEC_ENTRY(0x10ec0234, "ALC234", patch_alc269),
HDA_CODEC_ENTRY(0x10ec0235, "ALC233", patch_alc269),
+ HDA_CODEC_ENTRY(0x10ec0236, "ALC236", patch_alc269),
HDA_CODEC_ENTRY(0x10ec0255, "ALC255", patch_alc269),
HDA_CODEC_ENTRY(0x10ec0256, "ALC256", patch_alc269),
HDA_CODEC_ENTRY(0x10ec0260, "ALC260", patch_alc260),
diff --git a/sound/soc/codecs/adau17x1.c b/sound/soc/codecs/adau17x1.c
index 439aa3f..79dcb1e 100644
--- a/sound/soc/codecs/adau17x1.c
+++ b/sound/soc/codecs/adau17x1.c
@@ -91,6 +91,27 @@ static int adau17x1_pll_event(struct snd_soc_dapm_widget *w,
return 0;
}
+static int adau17x1_adc_fixup(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+ struct adau *adau = snd_soc_codec_get_drvdata(codec);
+
+ /*
+ * If we are capturing, toggle the ADOSR bit in Converter Control 0 to
+ * avoid losing SNR (workaround from ADI). This must be done after
+ * the ADC(s) have been enabled. According to the data sheet, it is
+ * normally illegal to set this bit when the sampling rate is 96 kHz,
+ * but according to ADI it is acceptable for this workaround.
+ */
+ regmap_update_bits(adau->regmap, ADAU17X1_CONVERTER0,
+ ADAU17X1_CONVERTER0_ADOSR, ADAU17X1_CONVERTER0_ADOSR);
+ regmap_update_bits(adau->regmap, ADAU17X1_CONVERTER0,
+ ADAU17X1_CONVERTER0_ADOSR, 0);
+
+ return 0;
+}
+
static const char * const adau17x1_mono_stereo_text[] = {
"Stereo",
"Mono Left Channel (L+R)",
@@ -122,7 +143,8 @@ static const struct snd_soc_dapm_widget adau17x1_dapm_widgets[] = {
SND_SOC_DAPM_MUX("Right DAC Mode Mux", SND_SOC_NOPM, 0, 0,
&adau17x1_dac_mode_mux),
- SND_SOC_DAPM_ADC("Left Decimator", NULL, ADAU17X1_ADC_CONTROL, 0, 0),
+ SND_SOC_DAPM_ADC_E("Left Decimator", NULL, ADAU17X1_ADC_CONTROL, 0, 0,
+ adau17x1_adc_fixup, SND_SOC_DAPM_POST_PMU),
SND_SOC_DAPM_ADC("Right Decimator", NULL, ADAU17X1_ADC_CONTROL, 1, 0),
SND_SOC_DAPM_DAC("Left DAC", NULL, ADAU17X1_DAC_CONTROL0, 0, 0),
SND_SOC_DAPM_DAC("Right DAC", NULL, ADAU17X1_DAC_CONTROL0, 1, 0),
diff --git a/sound/soc/codecs/adau17x1.h b/sound/soc/codecs/adau17x1.h
index bf04b7e..db35003 100644
--- a/sound/soc/codecs/adau17x1.h
+++ b/sound/soc/codecs/adau17x1.h
@@ -129,5 +129,7 @@ bool adau17x1_has_dsp(struct adau *adau);
#define ADAU17X1_CONVERTER0_CONVSR_MASK 0x7
+#define ADAU17X1_CONVERTER0_ADOSR BIT(3)
+
#endif
diff --git a/sound/soc/codecs/rt5514.c b/sound/soc/codecs/rt5514.c
index f24b7cf..e024800 100644
--- a/sound/soc/codecs/rt5514.c
+++ b/sound/soc/codecs/rt5514.c
@@ -395,14 +395,14 @@ static const char * const rt5514_dmic_src[] = {
"DMIC1", "DMIC2"
};
-static const SOC_ENUM_SINGLE_DECL(
+static SOC_ENUM_SINGLE_DECL(
rt5514_stereo1_dmic_enum, RT5514_DIG_SOURCE_CTRL,
RT5514_AD0_DMIC_INPUT_SEL_SFT, rt5514_dmic_src);
static const struct snd_kcontrol_new rt5514_sto1_dmic_mux =
SOC_DAPM_ENUM("Stereo1 DMIC Source", rt5514_stereo1_dmic_enum);
-static const SOC_ENUM_SINGLE_DECL(
+static SOC_ENUM_SINGLE_DECL(
rt5514_stereo2_dmic_enum, RT5514_DIG_SOURCE_CTRL,
RT5514_AD1_DMIC_INPUT_SEL_SFT, rt5514_dmic_src);
diff --git a/sound/soc/codecs/rt5659.c b/sound/soc/codecs/rt5659.c
index db54550..635818f 100644
--- a/sound/soc/codecs/rt5659.c
+++ b/sound/soc/codecs/rt5659.c
@@ -1150,28 +1150,28 @@ static const char * const rt5659_data_select[] = {
"L/R", "R/L", "L/L", "R/R"
};
-static const SOC_ENUM_SINGLE_DECL(rt5659_if1_01_adc_enum,
+static SOC_ENUM_SINGLE_DECL(rt5659_if1_01_adc_enum,
RT5659_TDM_CTRL_2, RT5659_DS_ADC_SLOT01_SFT, rt5659_data_select);
-static const SOC_ENUM_SINGLE_DECL(rt5659_if1_23_adc_enum,
+static SOC_ENUM_SINGLE_DECL(rt5659_if1_23_adc_enum,
RT5659_TDM_CTRL_2, RT5659_DS_ADC_SLOT23_SFT, rt5659_data_select);
-static const SOC_ENUM_SINGLE_DECL(rt5659_if1_45_adc_enum,
+static SOC_ENUM_SINGLE_DECL(rt5659_if1_45_adc_enum,
RT5659_TDM_CTRL_2, RT5659_DS_ADC_SLOT45_SFT, rt5659_data_select);
-static const SOC_ENUM_SINGLE_DECL(rt5659_if1_67_adc_enum,
+static SOC_ENUM_SINGLE_DECL(rt5659_if1_67_adc_enum,
RT5659_TDM_CTRL_2, RT5659_DS_ADC_SLOT67_SFT, rt5659_data_select);
-static const SOC_ENUM_SINGLE_DECL(rt5659_if2_dac_enum,
+static SOC_ENUM_SINGLE_DECL(rt5659_if2_dac_enum,
RT5659_DIG_INF23_DATA, RT5659_IF2_DAC_SEL_SFT, rt5659_data_select);
-static const SOC_ENUM_SINGLE_DECL(rt5659_if2_adc_enum,
+static SOC_ENUM_SINGLE_DECL(rt5659_if2_adc_enum,
RT5659_DIG_INF23_DATA, RT5659_IF2_ADC_SEL_SFT, rt5659_data_select);
-static const SOC_ENUM_SINGLE_DECL(rt5659_if3_dac_enum,
+static SOC_ENUM_SINGLE_DECL(rt5659_if3_dac_enum,
RT5659_DIG_INF23_DATA, RT5659_IF3_DAC_SEL_SFT, rt5659_data_select);
-static const SOC_ENUM_SINGLE_DECL(rt5659_if3_adc_enum,
+static SOC_ENUM_SINGLE_DECL(rt5659_if3_adc_enum,
RT5659_DIG_INF23_DATA, RT5659_IF3_ADC_SEL_SFT, rt5659_data_select);
static const struct snd_kcontrol_new rt5659_if1_01_adc_swap_mux =
@@ -1207,31 +1207,31 @@ static unsigned int rt5659_asrc_clk_map_values[] = {
0, 1, 2, 3, 5, 6,
};
-static const SOC_VALUE_ENUM_SINGLE_DECL(
+static SOC_VALUE_ENUM_SINGLE_DECL(
rt5659_da_sto_asrc_enum, RT5659_ASRC_2, RT5659_DA_STO_T_SFT, 0x7,
rt5659_asrc_clk_src, rt5659_asrc_clk_map_values);
-static const SOC_VALUE_ENUM_SINGLE_DECL(
+static SOC_VALUE_ENUM_SINGLE_DECL(
rt5659_da_monol_asrc_enum, RT5659_ASRC_2, RT5659_DA_MONO_L_T_SFT, 0x7,
rt5659_asrc_clk_src, rt5659_asrc_clk_map_values);
-static const SOC_VALUE_ENUM_SINGLE_DECL(
+static SOC_VALUE_ENUM_SINGLE_DECL(
rt5659_da_monor_asrc_enum, RT5659_ASRC_2, RT5659_DA_MONO_R_T_SFT, 0x7,
rt5659_asrc_clk_src, rt5659_asrc_clk_map_values);
-static const SOC_VALUE_ENUM_SINGLE_DECL(
+static SOC_VALUE_ENUM_SINGLE_DECL(
rt5659_ad_sto1_asrc_enum, RT5659_ASRC_2, RT5659_AD_STO1_T_SFT, 0x7,
rt5659_asrc_clk_src, rt5659_asrc_clk_map_values);
-static const SOC_VALUE_ENUM_SINGLE_DECL(
+static SOC_VALUE_ENUM_SINGLE_DECL(
rt5659_ad_sto2_asrc_enum, RT5659_ASRC_3, RT5659_AD_STO2_T_SFT, 0x7,
rt5659_asrc_clk_src, rt5659_asrc_clk_map_values);
-static const SOC_VALUE_ENUM_SINGLE_DECL(
+static SOC_VALUE_ENUM_SINGLE_DECL(
rt5659_ad_monol_asrc_enum, RT5659_ASRC_3, RT5659_AD_MONO_L_T_SFT, 0x7,
rt5659_asrc_clk_src, rt5659_asrc_clk_map_values);
-static const SOC_VALUE_ENUM_SINGLE_DECL(
+static SOC_VALUE_ENUM_SINGLE_DECL(
rt5659_ad_monor_asrc_enum, RT5659_ASRC_3, RT5659_AD_MONO_R_T_SFT, 0x7,
rt5659_asrc_clk_src, rt5659_asrc_clk_map_values);
@@ -1930,14 +1930,14 @@ static const char * const rt5659_dac2_src[] = {
"IF1 DAC2", "IF2 DAC", "IF3 DAC", "Mono ADC MIX"
};
-static const SOC_ENUM_SINGLE_DECL(
+static SOC_ENUM_SINGLE_DECL(
rt5659_dac_l2_enum, RT5659_DAC_CTRL,
RT5659_DAC_L2_SEL_SFT, rt5659_dac2_src);
static const struct snd_kcontrol_new rt5659_dac_l2_mux =
SOC_DAPM_ENUM("DAC L2 Source", rt5659_dac_l2_enum);
-static const SOC_ENUM_SINGLE_DECL(
+static SOC_ENUM_SINGLE_DECL(
rt5659_dac_r2_enum, RT5659_DAC_CTRL,
RT5659_DAC_R2_SEL_SFT, rt5659_dac2_src);
@@ -1951,7 +1951,7 @@ static const char * const rt5659_sto1_adc1_src[] = {
"DAC MIX", "ADC"
};
-static const SOC_ENUM_SINGLE_DECL(
+static SOC_ENUM_SINGLE_DECL(
rt5659_sto1_adc1_enum, RT5659_STO1_ADC_MIXER,
RT5659_STO1_ADC1_SRC_SFT, rt5659_sto1_adc1_src);
@@ -1964,7 +1964,7 @@ static const char * const rt5659_sto1_adc_src[] = {
"ADC1", "ADC2"
};
-static const SOC_ENUM_SINGLE_DECL(
+static SOC_ENUM_SINGLE_DECL(
rt5659_sto1_adc_enum, RT5659_STO1_ADC_MIXER,
RT5659_STO1_ADC_SRC_SFT, rt5659_sto1_adc_src);
@@ -1977,7 +1977,7 @@ static const char * const rt5659_sto1_adc2_src[] = {
"DAC MIX", "DMIC"
};
-static const SOC_ENUM_SINGLE_DECL(
+static SOC_ENUM_SINGLE_DECL(
rt5659_sto1_adc2_enum, RT5659_STO1_ADC_MIXER,
RT5659_STO1_ADC2_SRC_SFT, rt5659_sto1_adc2_src);
@@ -1990,7 +1990,7 @@ static const char * const rt5659_sto1_dmic_src[] = {
"DMIC1", "DMIC2"
};
-static const SOC_ENUM_SINGLE_DECL(
+static SOC_ENUM_SINGLE_DECL(
rt5659_sto1_dmic_enum, RT5659_STO1_ADC_MIXER,
RT5659_STO1_DMIC_SRC_SFT, rt5659_sto1_dmic_src);
@@ -2004,7 +2004,7 @@ static const char * const rt5659_mono_adc_l2_src[] = {
"Mono DAC MIXL", "DMIC"
};
-static const SOC_ENUM_SINGLE_DECL(
+static SOC_ENUM_SINGLE_DECL(
rt5659_mono_adc_l2_enum, RT5659_MONO_ADC_MIXER,
RT5659_MONO_ADC_L2_SRC_SFT, rt5659_mono_adc_l2_src);
@@ -2018,7 +2018,7 @@ static const char * const rt5659_mono_adc_l1_src[] = {
"Mono DAC MIXL", "ADC"
};
-static const SOC_ENUM_SINGLE_DECL(
+static SOC_ENUM_SINGLE_DECL(
rt5659_mono_adc_l1_enum, RT5659_MONO_ADC_MIXER,
RT5659_MONO_ADC_L1_SRC_SFT, rt5659_mono_adc_l1_src);
@@ -2031,14 +2031,14 @@ static const char * const rt5659_mono_adc_src[] = {
"ADC1 L", "ADC1 R", "ADC2 L", "ADC2 R"
};
-static const SOC_ENUM_SINGLE_DECL(
+static SOC_ENUM_SINGLE_DECL(
rt5659_mono_adc_l_enum, RT5659_MONO_ADC_MIXER,
RT5659_MONO_ADC_L_SRC_SFT, rt5659_mono_adc_src);
static const struct snd_kcontrol_new rt5659_mono_adc_l_mux =
SOC_DAPM_ENUM("Mono ADC L Source", rt5659_mono_adc_l_enum);
-static const SOC_ENUM_SINGLE_DECL(
+static SOC_ENUM_SINGLE_DECL(
rt5659_mono_adcr_enum, RT5659_MONO_ADC_MIXER,
RT5659_MONO_ADC_R_SRC_SFT, rt5659_mono_adc_src);
@@ -2051,7 +2051,7 @@ static const char * const rt5659_mono_dmic_l_src[] = {
"DMIC1 L", "DMIC2 L"
};
-static const SOC_ENUM_SINGLE_DECL(
+static SOC_ENUM_SINGLE_DECL(
rt5659_mono_dmic_l_enum, RT5659_MONO_ADC_MIXER,
RT5659_MONO_DMIC_L_SRC_SFT, rt5659_mono_dmic_l_src);
@@ -2064,7 +2064,7 @@ static const char * const rt5659_mono_adc_r2_src[] = {
"Mono DAC MIXR", "DMIC"
};
-static const SOC_ENUM_SINGLE_DECL(
+static SOC_ENUM_SINGLE_DECL(
rt5659_mono_adc_r2_enum, RT5659_MONO_ADC_MIXER,
RT5659_MONO_ADC_R2_SRC_SFT, rt5659_mono_adc_r2_src);
@@ -2077,7 +2077,7 @@ static const char * const rt5659_mono_adc_r1_src[] = {
"Mono DAC MIXR", "ADC"
};
-static const SOC_ENUM_SINGLE_DECL(
+static SOC_ENUM_SINGLE_DECL(
rt5659_mono_adc_r1_enum, RT5659_MONO_ADC_MIXER,
RT5659_MONO_ADC_R1_SRC_SFT, rt5659_mono_adc_r1_src);
@@ -2090,7 +2090,7 @@ static const char * const rt5659_mono_dmic_r_src[] = {
"DMIC1 R", "DMIC2 R"
};
-static const SOC_ENUM_SINGLE_DECL(
+static SOC_ENUM_SINGLE_DECL(
rt5659_mono_dmic_r_enum, RT5659_MONO_ADC_MIXER,
RT5659_MONO_DMIC_R_SRC_SFT, rt5659_mono_dmic_r_src);
@@ -2104,14 +2104,14 @@ static const char * const rt5659_dac1_src[] = {
"IF1 DAC1", "IF2 DAC", "IF3 DAC"
};
-static const SOC_ENUM_SINGLE_DECL(
+static SOC_ENUM_SINGLE_DECL(
rt5659_dac_r1_enum, RT5659_AD_DA_MIXER,
RT5659_DAC1_R_SEL_SFT, rt5659_dac1_src);
static const struct snd_kcontrol_new rt5659_dac_r1_mux =
SOC_DAPM_ENUM("DAC R1 Source", rt5659_dac_r1_enum);
-static const SOC_ENUM_SINGLE_DECL(
+static SOC_ENUM_SINGLE_DECL(
rt5659_dac_l1_enum, RT5659_AD_DA_MIXER,
RT5659_DAC1_L_SEL_SFT, rt5659_dac1_src);
@@ -2124,14 +2124,14 @@ static const char * const rt5659_dig_dac_mix_src[] = {
"Stereo DAC Mixer", "Mono DAC Mixer"
};
-static const SOC_ENUM_SINGLE_DECL(
+static SOC_ENUM_SINGLE_DECL(
rt5659_dig_dac_mixl_enum, RT5659_DIG_MIXER,
RT5659_DAC_MIX_L_SFT, rt5659_dig_dac_mix_src);
static const struct snd_kcontrol_new rt5659_dig_dac_mixl_mux =
SOC_DAPM_ENUM("DAC Digital Mixer L Source", rt5659_dig_dac_mixl_enum);
-static const SOC_ENUM_SINGLE_DECL(
+static SOC_ENUM_SINGLE_DECL(
rt5659_dig_dac_mixr_enum, RT5659_DIG_MIXER,
RT5659_DAC_MIX_R_SFT, rt5659_dig_dac_mix_src);
@@ -2144,14 +2144,14 @@ static const char * const rt5659_alg_dac1_src[] = {
"DAC", "Stereo DAC Mixer"
};
-static const SOC_ENUM_SINGLE_DECL(
+static SOC_ENUM_SINGLE_DECL(
rt5659_alg_dac_l1_enum, RT5659_A_DAC_MUX,
RT5659_A_DACL1_SFT, rt5659_alg_dac1_src);
static const struct snd_kcontrol_new rt5659_alg_dac_l1_mux =
SOC_DAPM_ENUM("Analog DACL1 Source", rt5659_alg_dac_l1_enum);
-static const SOC_ENUM_SINGLE_DECL(
+static SOC_ENUM_SINGLE_DECL(
rt5659_alg_dac_r1_enum, RT5659_A_DAC_MUX,
RT5659_A_DACR1_SFT, rt5659_alg_dac1_src);
@@ -2164,14 +2164,14 @@ static const char * const rt5659_alg_dac2_src[] = {
"Stereo DAC Mixer", "Mono DAC Mixer"
};
-static const SOC_ENUM_SINGLE_DECL(
+static SOC_ENUM_SINGLE_DECL(
rt5659_alg_dac_l2_enum, RT5659_A_DAC_MUX,
RT5659_A_DACL2_SFT, rt5659_alg_dac2_src);
static const struct snd_kcontrol_new rt5659_alg_dac_l2_mux =
SOC_DAPM_ENUM("Analog DAC L2 Source", rt5659_alg_dac_l2_enum);
-static const SOC_ENUM_SINGLE_DECL(
+static SOC_ENUM_SINGLE_DECL(
rt5659_alg_dac_r2_enum, RT5659_A_DAC_MUX,
RT5659_A_DACR2_SFT, rt5659_alg_dac2_src);
@@ -2184,7 +2184,7 @@ static const char * const rt5659_if2_adc_in_src[] = {
"IF_ADC1", "IF_ADC2", "DAC_REF", "IF_ADC3"
};
-static const SOC_ENUM_SINGLE_DECL(
+static SOC_ENUM_SINGLE_DECL(
rt5659_if2_adc_in_enum, RT5659_DIG_INF23_DATA,
RT5659_IF2_ADC_IN_SFT, rt5659_if2_adc_in_src);
@@ -2197,7 +2197,7 @@ static const char * const rt5659_if3_adc_in_src[] = {
"IF_ADC1", "IF_ADC2", "DAC_REF", "Stereo2_ADC_L/R"
};
-static const SOC_ENUM_SINGLE_DECL(
+static SOC_ENUM_SINGLE_DECL(
rt5659_if3_adc_in_enum, RT5659_DIG_INF23_DATA,
RT5659_IF3_ADC_IN_SFT, rt5659_if3_adc_in_src);
@@ -2210,14 +2210,14 @@ static const char * const rt5659_pdm_src[] = {
"Mono DAC", "Stereo DAC"
};
-static const SOC_ENUM_SINGLE_DECL(
+static SOC_ENUM_SINGLE_DECL(
rt5659_pdm_l_enum, RT5659_PDM_OUT_CTRL,
RT5659_PDM1_L_SFT, rt5659_pdm_src);
static const struct snd_kcontrol_new rt5659_pdm_l_mux =
SOC_DAPM_ENUM("PDM L Source", rt5659_pdm_l_enum);
-static const SOC_ENUM_SINGLE_DECL(
+static SOC_ENUM_SINGLE_DECL(
rt5659_pdm_r_enum, RT5659_PDM_OUT_CTRL,
RT5659_PDM1_R_SFT, rt5659_pdm_src);
@@ -2230,7 +2230,7 @@ static const char * const rt5659_spdif_src[] = {
"IF1_DAC1", "IF1_DAC2", "IF2_DAC", "IF3_DAC"
};
-static const SOC_ENUM_SINGLE_DECL(
+static SOC_ENUM_SINGLE_DECL(
rt5659_spdif_enum, RT5659_SPDIF_CTRL,
RT5659_SPDIF_SEL_SFT, rt5659_spdif_src);
@@ -2250,7 +2250,7 @@ static const char * const rt5659_rx_adc_data_src[] = {
"NUL:AD2:DAC:AD1", "NUL:DAC:DAC:AD2", "NUL:DAC:AD2:DAC"
};
-static const SOC_ENUM_SINGLE_DECL(
+static SOC_ENUM_SINGLE_DECL(
rt5659_rx_adc_data_enum, RT5659_TDM_CTRL_2,
RT5659_ADCDAT_SRC_SFT, rt5659_rx_adc_data_src);
diff --git a/sound/soc/codecs/rt5660.c b/sound/soc/codecs/rt5660.c
index 9f0933c..e396b768 100644
--- a/sound/soc/codecs/rt5660.c
+++ b/sound/soc/codecs/rt5660.c
@@ -526,10 +526,10 @@ static const char * const rt5660_data_select[] = {
"L/R", "R/L", "L/L", "R/R"
};
-static const SOC_ENUM_SINGLE_DECL(rt5660_if1_dac_enum,
+static SOC_ENUM_SINGLE_DECL(rt5660_if1_dac_enum,
RT5660_DIG_INF1_DATA, RT5660_IF1_DAC_IN_SFT, rt5660_data_select);
-static const SOC_ENUM_SINGLE_DECL(rt5660_if1_adc_enum,
+static SOC_ENUM_SINGLE_DECL(rt5660_if1_adc_enum,
RT5660_DIG_INF1_DATA, RT5660_IF1_ADC_IN_SFT, rt5660_data_select);
static const struct snd_kcontrol_new rt5660_if1_dac_swap_mux =
diff --git a/sound/soc/codecs/wm_adsp.c b/sound/soc/codecs/wm_adsp.c
index b943dde..3bdd819 100644
--- a/sound/soc/codecs/wm_adsp.c
+++ b/sound/soc/codecs/wm_adsp.c
@@ -789,7 +789,10 @@ static int wm_coeff_put(struct snd_kcontrol *kctl,
mutex_lock(&ctl->dsp->pwr_lock);
- memcpy(ctl->cache, p, ctl->len);
+ if (ctl->flags & WMFW_CTL_FLAG_VOLATILE)
+ ret = -EPERM;
+ else
+ memcpy(ctl->cache, p, ctl->len);
ctl->set = 1;
if (ctl->enabled && ctl->dsp->running)
@@ -816,6 +819,8 @@ static int wm_coeff_tlv_put(struct snd_kcontrol *kctl,
ctl->set = 1;
if (ctl->enabled && ctl->dsp->running)
ret = wm_coeff_write_control(ctl, ctl->cache, size);
+ else if (ctl->flags & WMFW_CTL_FLAG_VOLATILE)
+ ret = -EPERM;
}
mutex_unlock(&ctl->dsp->pwr_lock);
diff --git a/sound/soc/intel/boards/bytcr_rt5640.c b/sound/soc/intel/boards/bytcr_rt5640.c
index bd19fad..c17f262 100644
--- a/sound/soc/intel/boards/bytcr_rt5640.c
+++ b/sound/soc/intel/boards/bytcr_rt5640.c
@@ -807,7 +807,6 @@ static int snd_byt_rt5640_mc_probe(struct platform_device *pdev)
static struct platform_driver snd_byt_rt5640_mc_driver = {
.driver = {
.name = "bytcr_rt5640",
- .pm = &snd_soc_pm_ops,
},
.probe = snd_byt_rt5640_mc_probe,
};
diff --git a/sound/soc/intel/boards/bytcr_rt5651.c b/sound/soc/intel/boards/bytcr_rt5651.c
index eabff3a..ae49f81 100644
--- a/sound/soc/intel/boards/bytcr_rt5651.c
+++ b/sound/soc/intel/boards/bytcr_rt5651.c
@@ -317,7 +317,6 @@ static int snd_byt_rt5651_mc_probe(struct platform_device *pdev)
static struct platform_driver snd_byt_rt5651_mc_driver = {
.driver = {
.name = "bytcr_rt5651",
- .pm = &snd_soc_pm_ops,
},
.probe = snd_byt_rt5651_mc_probe,
};
diff --git a/sound/soc/mediatek/Kconfig b/sound/soc/mediatek/Kconfig
index 05cf809..d7013bd 100644
--- a/sound/soc/mediatek/Kconfig
+++ b/sound/soc/mediatek/Kconfig
@@ -13,7 +13,7 @@
config SND_SOC_MT2701_CS42448
tristate "ASoc Audio driver for MT2701 with CS42448 codec"
- depends on SND_SOC_MT2701
+ depends on SND_SOC_MT2701 && I2C
select SND_SOC_CS42XX8_I2C
select SND_SOC_BT_SCO
help
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index d40bfef..172af54 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -360,6 +360,10 @@ static int dapm_kcontrol_data_alloc(struct snd_soc_dapm_widget *widget,
snd_soc_dapm_new_control_unlocked(widget->dapm,
&template);
kfree(name);
+ if (IS_ERR(data->widget)) {
+ ret = PTR_ERR(data->widget);
+ goto err_data;
+ }
if (!data->widget) {
ret = -ENOMEM;
goto err_data;
@@ -394,6 +398,10 @@ static int dapm_kcontrol_data_alloc(struct snd_soc_dapm_widget *widget,
data->widget = snd_soc_dapm_new_control_unlocked(
widget->dapm, &template);
kfree(name);
+ if (IS_ERR(data->widget)) {
+ ret = PTR_ERR(data->widget);
+ goto err_data;
+ }
if (!data->widget) {
ret = -ENOMEM;
goto err_data;
@@ -3327,11 +3335,22 @@ snd_soc_dapm_new_control(struct snd_soc_dapm_context *dapm,
mutex_lock_nested(&dapm->card->dapm_mutex, SND_SOC_DAPM_CLASS_RUNTIME);
w = snd_soc_dapm_new_control_unlocked(dapm, widget);
+ /* Do not nag about probe deferrals */
+ if (IS_ERR(w)) {
+ int ret = PTR_ERR(w);
+
+ if (ret != -EPROBE_DEFER)
+ dev_err(dapm->dev,
+ "ASoC: Failed to create DAPM control %s (%d)\n",
+ widget->name, ret);
+ goto out_unlock;
+ }
if (!w)
dev_err(dapm->dev,
"ASoC: Failed to create DAPM control %s\n",
widget->name);
+out_unlock:
mutex_unlock(&dapm->card->dapm_mutex);
return w;
}
@@ -3354,6 +3373,8 @@ snd_soc_dapm_new_control_unlocked(struct snd_soc_dapm_context *dapm,
w->regulator = devm_regulator_get(dapm->dev, w->name);
if (IS_ERR(w->regulator)) {
ret = PTR_ERR(w->regulator);
+ if (ret == -EPROBE_DEFER)
+ return ERR_PTR(ret);
dev_err(dapm->dev, "ASoC: Failed to request %s: %d\n",
w->name, ret);
return NULL;
@@ -3372,6 +3393,8 @@ snd_soc_dapm_new_control_unlocked(struct snd_soc_dapm_context *dapm,
w->clk = devm_clk_get(dapm->dev, w->name);
if (IS_ERR(w->clk)) {
ret = PTR_ERR(w->clk);
+ if (ret == -EPROBE_DEFER)
+ return ERR_PTR(ret);
dev_err(dapm->dev, "ASoC: Failed to request %s: %d\n",
w->name, ret);
return NULL;
@@ -3490,6 +3513,16 @@ int snd_soc_dapm_new_controls(struct snd_soc_dapm_context *dapm,
mutex_lock_nested(&dapm->card->dapm_mutex, SND_SOC_DAPM_CLASS_INIT);
for (i = 0; i < num; i++) {
w = snd_soc_dapm_new_control_unlocked(dapm, widget);
+ if (IS_ERR(w)) {
+ ret = PTR_ERR(w);
+ /* Do not nag about probe deferrals */
+ if (ret == -EPROBE_DEFER)
+ break;
+ dev_err(dapm->dev,
+ "ASoC: Failed to create DAPM control %s (%d)\n",
+ widget->name, ret);
+ break;
+ }
if (!w) {
dev_err(dapm->dev,
"ASoC: Failed to create DAPM control %s\n",
@@ -3766,6 +3799,15 @@ int snd_soc_dapm_new_pcm(struct snd_soc_card *card,
dev_dbg(card->dev, "ASoC: adding %s widget\n", link_name);
w = snd_soc_dapm_new_control_unlocked(&card->dapm, &template);
+ if (IS_ERR(w)) {
+ ret = PTR_ERR(w);
+ /* Do not nag about probe deferrals */
+ if (ret != -EPROBE_DEFER)
+ dev_err(card->dev,
+ "ASoC: Failed to create %s widget (%d)\n",
+ link_name, ret);
+ goto outfree_kcontrol_news;
+ }
if (!w) {
dev_err(card->dev, "ASoC: Failed to create %s widget\n",
link_name);
@@ -3817,6 +3859,16 @@ int snd_soc_dapm_new_dai_widgets(struct snd_soc_dapm_context *dapm,
template.name);
w = snd_soc_dapm_new_control_unlocked(dapm, &template);
+ if (IS_ERR(w)) {
+ int ret = PTR_ERR(w);
+
+ /* Do not nag about probe deferrals */
+ if (ret != -EPROBE_DEFER)
+ dev_err(dapm->dev,
+ "ASoC: Failed to create %s widget (%d)\n",
+ dai->driver->playback.stream_name, ret);
+ return ret;
+ }
if (!w) {
dev_err(dapm->dev, "ASoC: Failed to create %s widget\n",
dai->driver->playback.stream_name);
@@ -3836,6 +3888,16 @@ int snd_soc_dapm_new_dai_widgets(struct snd_soc_dapm_context *dapm,
template.name);
w = snd_soc_dapm_new_control_unlocked(dapm, &template);
+ if (IS_ERR(w)) {
+ int ret = PTR_ERR(w);
+
+ /* Do not nag about probe deferrals */
+ if (ret != -EPROBE_DEFER)
+ dev_err(dapm->dev,
+ "ASoC: Failed to create %s widget (%d)\n",
+ dai->driver->playback.stream_name, ret);
+ return ret;
+ }
if (!w) {
dev_err(dapm->dev, "ASoC: Failed to create %s widget\n",
dai->driver->capture.stream_name);
diff --git a/sound/soc/soc-topology.c b/sound/soc/soc-topology.c
index 6b05047..8a758c9 100644
--- a/sound/soc/soc-topology.c
+++ b/sound/soc/soc-topology.c
@@ -1473,6 +1473,15 @@ static int soc_tplg_dapm_widget_create(struct soc_tplg *tplg,
widget = snd_soc_dapm_new_control(dapm, &template);
else
widget = snd_soc_dapm_new_control_unlocked(dapm, &template);
+ if (IS_ERR(widget)) {
+ ret = PTR_ERR(widget);
+ /* Do not nag about probe deferrals */
+ if (ret != -EPROBE_DEFER)
+ dev_err(tplg->dev,
+ "ASoC: failed to create widget %s controls (%d)\n",
+ w->name, ret);
+ goto hdr_err;
+ }
if (widget == NULL) {
dev_err(tplg->dev, "ASoC: failed to create widget %s controls\n",
w->name);
diff --git a/sound/soc/sunxi/sun4i-spdif.c b/sound/soc/sunxi/sun4i-spdif.c
index 88fbb3a..048de15 100644
--- a/sound/soc/sunxi/sun4i-spdif.c
+++ b/sound/soc/sunxi/sun4i-spdif.c
@@ -403,14 +403,6 @@ static struct snd_soc_dai_driver sun4i_spdif_dai = {
.name = "spdif",
};
-static const struct snd_soc_dapm_widget dit_widgets[] = {
- SND_SOC_DAPM_OUTPUT("spdif-out"),
-};
-
-static const struct snd_soc_dapm_route dit_routes[] = {
- { "spdif-out", NULL, "Playback" },
-};
-
static const struct of_device_id sun4i_spdif_of_match[] = {
{ .compatible = "allwinner,sun4i-a10-spdif", },
{ .compatible = "allwinner,sun6i-a31-spdif", },
diff --git a/sound/usb/caiaq/device.c b/sound/usb/caiaq/device.c
index b871ba4..4458190 100644
--- a/sound/usb/caiaq/device.c
+++ b/sound/usb/caiaq/device.c
@@ -469,10 +469,12 @@ static int init_card(struct snd_usb_caiaqdev *cdev)
err = snd_usb_caiaq_send_command(cdev, EP1_CMD_GET_DEVICE_INFO, NULL, 0);
if (err)
- return err;
+ goto err_kill_urb;
- if (!wait_event_timeout(cdev->ep1_wait_queue, cdev->spec_received, HZ))
- return -ENODEV;
+ if (!wait_event_timeout(cdev->ep1_wait_queue, cdev->spec_received, HZ)) {
+ err = -ENODEV;
+ goto err_kill_urb;
+ }
usb_string(usb_dev, usb_dev->descriptor.iManufacturer,
cdev->vendor_name, CAIAQ_USB_STR_LEN);
@@ -507,6 +509,10 @@ static int init_card(struct snd_usb_caiaqdev *cdev)
setup_card(cdev);
return 0;
+
+ err_kill_urb:
+ usb_kill_urb(&cdev->ep1_in_urb);
+ return err;
}
static int snd_probe(struct usb_interface *intf,
diff --git a/sound/usb/card.c b/sound/usb/card.c
index a87a526..f029f8c 100644
--- a/sound/usb/card.c
+++ b/sound/usb/card.c
@@ -287,6 +287,7 @@ static int snd_usb_create_streams(struct snd_usb_audio *chip, int ctrlif)
struct usb_interface_descriptor *altsd;
struct usb_interface *usb_iface;
int i, protocol;
+ int rest_bytes;
usb_iface = usb_ifnum_to_if(dev, ctrlif);
if (!usb_iface) {
@@ -328,12 +329,31 @@ static int snd_usb_create_streams(struct snd_usb_audio *chip, int ctrlif)
return -EINVAL;
}
+ rest_bytes = (void *)(host_iface->extra + host_iface->extralen) -
+ control_header;
+
+ /* just to be sure -- this shouldn't hit at all */
+ if (rest_bytes <= 0) {
+ dev_err(&dev->dev, "invalid control header\n");
+ return -EINVAL;
+ }
+
h1 = control_header;
+ if (rest_bytes < sizeof(*h1)) {
+ dev_err(&dev->dev, "too short v1 buffer descriptor\n");
+ return -EINVAL;
+ }
+
if (!h1->bInCollection) {
dev_info(&dev->dev, "skipping empty audio interface (v1)\n");
return -EINVAL;
}
+ if (rest_bytes < h1->bLength) {
+ dev_err(&dev->dev, "invalid buffer length (v1)\n");
+ return -EINVAL;
+ }
+
if (h1->bLength < sizeof(*h1) + h1->bInCollection) {
dev_err(&dev->dev, "invalid UAC_HEADER (v1)\n");
return -EINVAL;
diff --git a/sound/usb/line6/driver.c b/sound/usb/line6/driver.c
index ab3c280..58d6249 100644
--- a/sound/usb/line6/driver.c
+++ b/sound/usb/line6/driver.c
@@ -775,9 +775,10 @@ int line6_probe(struct usb_interface *interface,
return 0;
error:
- if (line6->disconnect)
- line6->disconnect(line6);
- snd_card_free(card);
+ /* we can call disconnect callback here because no close-sync is
+ * needed yet at this point
+ */
+ line6_disconnect(interface);
return ret;
}
EXPORT_SYMBOL_GPL(line6_probe);
diff --git a/sound/usb/line6/podhd.c b/sound/usb/line6/podhd.c
index 49cd4a6..5ab9e0c 100644
--- a/sound/usb/line6/podhd.c
+++ b/sound/usb/line6/podhd.c
@@ -307,6 +307,9 @@ static int podhd_init(struct usb_line6 *line6,
line6->disconnect = podhd_disconnect;
+ init_timer(&pod->startup_timer);
+ INIT_WORK(&pod->startup_work, podhd_startup_workqueue);
+
if (pod->line6.properties->capabilities & LINE6_CAP_CONTROL) {
/* create sysfs entries: */
err = snd_card_add_dev_attr(line6->card, &podhd_dev_attr_group);
@@ -330,8 +333,6 @@ static int podhd_init(struct usb_line6 *line6,
}
/* init device and delay registering */
- init_timer(&pod->startup_timer);
- INIT_WORK(&pod->startup_work, podhd_startup_workqueue);
podhd_startup(pod);
return 0;
}
diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
index 6c3d62f..3501ff9 100644
--- a/sound/usb/mixer.c
+++ b/sound/usb/mixer.c
@@ -216,7 +216,6 @@ static int snd_usb_copy_string_desc(struct mixer_build *state,
int index, char *buf, int maxlen)
{
int len = usb_string(state->chip->dev, index, buf, maxlen - 1);
- buf[len] = 0;
return len;
}
@@ -2505,6 +2504,9 @@ static int parse_audio_unit(struct mixer_build *state, int unitid)
static void snd_usb_mixer_free(struct usb_mixer_interface *mixer)
{
+ /* kill pending URBs */
+ snd_usb_mixer_disconnect(mixer);
+
kfree(mixer->id_elems);
if (mixer->urb) {
kfree(mixer->urb->transfer_buffer);
@@ -2938,8 +2940,13 @@ int snd_usb_create_mixer(struct snd_usb_audio *chip, int ctrlif,
void snd_usb_mixer_disconnect(struct usb_mixer_interface *mixer)
{
- usb_kill_urb(mixer->urb);
- usb_kill_urb(mixer->rc_urb);
+ if (mixer->disconnected)
+ return;
+ if (mixer->urb)
+ usb_kill_urb(mixer->urb);
+ if (mixer->rc_urb)
+ usb_kill_urb(mixer->rc_urb);
+ mixer->disconnected = true;
}
#ifdef CONFIG_PM
diff --git a/sound/usb/mixer.h b/sound/usb/mixer.h
index 2b4b067..545d99b 100644
--- a/sound/usb/mixer.h
+++ b/sound/usb/mixer.h
@@ -22,6 +22,8 @@ struct usb_mixer_interface {
struct urb *rc_urb;
struct usb_ctrlrequest *rc_setup_packet;
u8 rc_buffer[6];
+
+ bool disconnected;
};
#define MAX_CHANNELS 16 /* max logical channels */
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
index 286efc3..7613b9e 100644
--- a/sound/usb/quirks.c
+++ b/sound/usb/quirks.c
@@ -1352,6 +1352,7 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip,
case USB_ID(0x20b1, 0x2008): /* Matrix Audio X-Sabre */
case USB_ID(0x20b1, 0x300a): /* Matrix Audio Mini-i Pro */
case USB_ID(0x22d9, 0x0416): /* OPPO HA-1 */
+ case USB_ID(0x2772, 0x0230): /* Pro-Ject Pre Box S2 Digital */
if (fp->altsetting == 2)
return SNDRV_PCM_FMTBIT_DSD_U32_BE;
break;
diff --git a/sound/usb/usb_audio_qmi_svc.c b/sound/usb/usb_audio_qmi_svc.c
index a7cda4a..0aeabfe 100644
--- a/sound/usb/usb_audio_qmi_svc.c
+++ b/sound/usb/usb_audio_qmi_svc.c
@@ -26,6 +26,7 @@
#include <linux/qmi_encdec.h>
#include <soc/qcom/msm_qmi_interface.h>
#include <linux/iommu.h>
+#include <linux/dma-mapping.h>
#include <linux/platform_device.h>
#include <linux/usb/audio-v3.h>
diff --git a/sound/usb/usx2y/usb_stream.c b/sound/usb/usx2y/usb_stream.c
index bf618e1..e7b934f 100644
--- a/sound/usb/usx2y/usb_stream.c
+++ b/sound/usb/usx2y/usb_stream.c
@@ -191,7 +191,8 @@ struct usb_stream *usb_stream_new(struct usb_stream_kernel *sk,
}
pg = get_order(read_size);
- sk->s = (void *) __get_free_pages(GFP_KERNEL|__GFP_COMP|__GFP_ZERO, pg);
+ sk->s = (void *) __get_free_pages(GFP_KERNEL|__GFP_COMP|__GFP_ZERO|
+ __GFP_NOWARN, pg);
if (!sk->s) {
snd_printk(KERN_WARNING "couldn't __get_free_pages()\n");
goto out;
@@ -211,7 +212,8 @@ struct usb_stream *usb_stream_new(struct usb_stream_kernel *sk,
pg = get_order(write_size);
sk->write_page =
- (void *)__get_free_pages(GFP_KERNEL|__GFP_COMP|__GFP_ZERO, pg);
+ (void *)__get_free_pages(GFP_KERNEL|__GFP_COMP|__GFP_ZERO|
+ __GFP_NOWARN, pg);
if (!sk->write_page) {
snd_printk(KERN_WARNING "couldn't __get_free_pages()\n");
usb_stream_free(sk);
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
index 4e778ea..415a9c3 100644
--- a/tools/perf/util/parse-events.c
+++ b/tools/perf/util/parse-events.c
@@ -309,10 +309,11 @@ __add_event(struct list_head *list, int *idx,
event_attr_init(attr);
- evsel = perf_evsel__new_idx(attr, (*idx)++);
+ evsel = perf_evsel__new_idx(attr, *idx);
if (!evsel)
return NULL;
+ (*idx)++;
evsel->cpus = cpu_map__get(cpus);
evsel->own_cpus = cpu_map__get(cpus);
diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c
index 3e199b5..9664b1f 100644
--- a/tools/power/x86/turbostat/turbostat.c
+++ b/tools/power/x86/turbostat/turbostat.c
@@ -2003,8 +2003,10 @@ int snapshot_gfx_mhz(void)
if (fp == NULL)
fp = fopen_or_die("/sys/class/graphics/fb0/device/drm/card0/gt_cur_freq_mhz", "r");
- else
+ else {
rewind(fp);
+ fflush(fp);
+ }
retval = fscanf(fp, "%d", &gfx_cur_mhz);
if (retval != 1)
diff --git a/tools/testing/selftests/firmware/fw_filesystem.sh b/tools/testing/selftests/firmware/fw_filesystem.sh
index 5c495ad..d8ac9ba 100755
--- a/tools/testing/selftests/firmware/fw_filesystem.sh
+++ b/tools/testing/selftests/firmware/fw_filesystem.sh
@@ -48,18 +48,18 @@
NAME=$(basename "$FW")
-if printf '\000' >"$DIR"/trigger_request; then
+if printf '\000' >"$DIR"/trigger_request 2> /dev/null; then
echo "$0: empty filename should not succeed" >&2
exit 1
fi
-if printf '\000' >"$DIR"/trigger_async_request; then
+if printf '\000' >"$DIR"/trigger_async_request 2> /dev/null; then
echo "$0: empty filename should not succeed (async)" >&2
exit 1
fi
# Request a firmware that doesn't exist, it should fail.
-if echo -n "nope-$NAME" >"$DIR"/trigger_request; then
+if echo -n "nope-$NAME" >"$DIR"/trigger_request 2> /dev/null; then
echo "$0: firmware shouldn't have loaded" >&2
exit 1
fi
diff --git a/tools/testing/selftests/firmware/fw_userhelper.sh b/tools/testing/selftests/firmware/fw_userhelper.sh
index b9983f8..01c626a 100755
--- a/tools/testing/selftests/firmware/fw_userhelper.sh
+++ b/tools/testing/selftests/firmware/fw_userhelper.sh
@@ -64,9 +64,33 @@
echo "ABCD0123" >"$FW"
NAME=$(basename "$FW")
+DEVPATH="$DIR"/"nope-$NAME"/loading
+
# Test failure when doing nothing (timeout works).
-echo 1 >/sys/class/firmware/timeout
-echo -n "$NAME" >"$DIR"/trigger_request
+echo -n 2 >/sys/class/firmware/timeout
+echo -n "nope-$NAME" >"$DIR"/trigger_request 2>/dev/null &
+
+# Give the kernel some time to load the loading file, must be less
+# than the timeout above.
+sleep 1
+if [ ! -f $DEVPATH ]; then
+ echo "$0: fallback mechanism immediately cancelled"
+ echo ""
+ echo "The file never appeared: $DEVPATH"
+ echo ""
+ echo "This might be a distribution udev rule setup by your distribution"
+ echo "to immediately cancel all fallback requests, this must be"
+ echo "removed before running these tests. To confirm look for"
+ echo "a firmware rule like /lib/udev/rules.d/50-firmware.rules"
+ echo "and see if you have something like this:"
+ echo ""
+ echo "SUBSYSTEM==\"firmware\", ACTION==\"add\", ATTR{loading}=\"-1\""
+ echo ""
+ echo "If you do remove this file or comment out this line before"
+ echo "proceeding with these tests."
+ exit 1
+fi
+
if diff -q "$FW" /dev/test_firmware >/dev/null ; then
echo "$0: firmware was not expected to match" >&2
exit 1
diff --git a/tools/testing/selftests/seccomp/seccomp_bpf.c b/tools/testing/selftests/seccomp/seccomp_bpf.c
index 03f1fa4..cbb0564 100644
--- a/tools/testing/selftests/seccomp/seccomp_bpf.c
+++ b/tools/testing/selftests/seccomp/seccomp_bpf.c
@@ -6,10 +6,18 @@
*/
#include <sys/types.h>
-#include <asm/siginfo.h>
-#define __have_siginfo_t 1
-#define __have_sigval_t 1
-#define __have_sigevent_t 1
+
+/*
+ * glibc 2.26 and later have SIGSYS in siginfo_t. Before that,
+ * we need to use the kernel's siginfo.h file and trick glibc
+ * into accepting it.
+ */
+#if !__GLIBC_PREREQ(2, 26)
+# include <asm/siginfo.h>
+# define __have_siginfo_t 1
+# define __have_sigval_t 1
+# define __have_sigevent_t 1
+#endif
#include <errno.h>
#include <linux/filter.h>
@@ -676,7 +684,7 @@ TEST_F_SIGNAL(TRAP, ign, SIGSYS)
syscall(__NR_getpid);
}
-static struct siginfo TRAP_info;
+static siginfo_t TRAP_info;
static volatile int TRAP_nr;
static void TRAP_action(int nr, siginfo_t *info, void *void_context)
{