Merge "ARM: dts: msm: Increase transfer time for fhd panel on sdm670"
diff --git a/Documentation/devicetree/bindings/arm/coresight.txt b/Documentation/devicetree/bindings/arm/coresight.txt
index 59c3356..9a9a6d0 100644
--- a/Documentation/devicetree/bindings/arm/coresight.txt
+++ b/Documentation/devicetree/bindings/arm/coresight.txt
@@ -154,6 +154,7 @@
 	* qcom,msr-fix-req: boolean, indicating if MSRs need to be programmed
 	  after enabling the subunit.
 
+	* qcom,dump-enable: boolean, specifies to dump MCMB data.
 * Optional properties for CTI:
 
 	* qcom,cti-gpio-trigin: cti trigger input driven by gpio.
diff --git a/Documentation/devicetree/bindings/arm/msm/rpmh-master-stat.txt b/Documentation/devicetree/bindings/arm/msm/rpmh-master-stat.txt
new file mode 100644
index 0000000..36e1a69
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/msm/rpmh-master-stat.txt
@@ -0,0 +1,18 @@
+* RPMH Master Stats
+
+Differet Subsystems maintains master data in SMEM.
+It tells about the individual masters information at any given
+time like "system sleep counts", "system sleep last entered at"
+and "system sleep accumulated duration" etc. These stats can be
+show to the user using the debugfs interface of the kernel.
+To achieve this, device tree node has been added.
+
+The required properties for rpmh-master-stats are:
+
+- compatible: "qcom,rpmh-master-stats".
+
+Example:
+
+qcom,rpmh-master-stats {
+	compatible = "qcom,rpmh-master-stats";
+};
diff --git a/Documentation/devicetree/bindings/firmware/qcom,scm.txt b/Documentation/devicetree/bindings/firmware/qcom,scm.txt
index bdba526..7cfc44b 100644
--- a/Documentation/devicetree/bindings/firmware/qcom,scm.txt
+++ b/Documentation/devicetree/bindings/firmware/qcom,scm.txt
@@ -13,6 +13,7 @@
  * "qcom,scm" for later processors (MSM8916, APQ8084, MSM8974, etc)
  * "android,firmware" for firmware image
  * "android,vbmeta" for setting system properties for verified boot.
+ * "android,system" for system partition properties.
 - clocks: One to three clocks may be required based on compatible.
  * Only core clock required for "qcom,scm-apq8064", "qcom,scm-msm8660", and "qcom,scm-msm8960"
  * Core, iface, and bus clocks required for "qcom,scm"
diff --git a/Documentation/devicetree/bindings/gpu/adreno.txt b/Documentation/devicetree/bindings/gpu/adreno.txt
index 69174ca..cb38d5a 100644
--- a/Documentation/devicetree/bindings/gpu/adreno.txt
+++ b/Documentation/devicetree/bindings/gpu/adreno.txt
@@ -124,6 +124,12 @@
 				mask   - mask for the relevant bits in the efuse register.
 				shift  - number of bits to right shift to get the speed bin
 				value.
+- qcom,gpu-disable-fuse:	GPU disable fuse
+				<offset mask shift>
+				offset - offset of the efuse register from the base.
+				mask   - mask for the relevant bits in the efuse register.
+				shift  - number of bits to right shift to get the disable_gpu
+				fuse bit value.
 - qcom,highest-bank-bit:
 				Specify the bit of the highest DDR bank. This
 				is programmed into protected registers and also
@@ -191,6 +197,9 @@
 - qcom,gpu-quirk-hfi-use-reg:
 				Use registers to replace DCVS HFI message to avoid GMU failure
 				to access system memory during IFPC
+- qcom,gpu-quirk-limit-uche-gbif-rw:
+				Limit number of read and write transactions from UCHE block to
+				GBIF to avoid possible deadlock between GBIF, SMMU and MEMNOC.
 
 KGSL Memory Pools:
 - qcom,gpu-mempools:		Container for sets of GPU mempools.Multiple sets
diff --git a/Documentation/devicetree/bindings/hwmon/jc42.txt b/Documentation/devicetree/bindings/hwmon/jc42.txt
index 07a2504..f569db5 100644
--- a/Documentation/devicetree/bindings/hwmon/jc42.txt
+++ b/Documentation/devicetree/bindings/hwmon/jc42.txt
@@ -34,6 +34,10 @@
 
 - reg: I2C address
 
+Optional properties:
+- smbus-timeout-disable: When set, the smbus timeout function will be disabled.
+			 This is not supported on all chips.
+
 Example:
 
 temp-sensor@1a {
diff --git a/Documentation/devicetree/bindings/input/touchscreen/synaptics_dsx_i2c.txt b/Documentation/devicetree/bindings/input/touchscreen/synaptics_dsx_i2c.txt
new file mode 100644
index 0000000..131942d
--- /dev/null
+++ b/Documentation/devicetree/bindings/input/touchscreen/synaptics_dsx_i2c.txt
@@ -0,0 +1,62 @@
+Synaptics DSXV27 touch controller
+
+Please add this description here: The Synaptics Touch controller is connected to the
+host processor via I2C. The controller generates interrupts when the user touches
+the panel. The host controller is expected to read the touch coordinates over I2C and
+pass the coordinates to the rest of the system.
+
+Required properties:
+
+ - compatible		           : should be "synaptics,dsx-i2c".
+ - reg			               : i2c slave address of the device.
+ - interrupt-parent	           : parent of interrupt.
+ - synaptics,irq-gpio	       : irq gpio.
+ - synaptics,reset-gpio	       : reset gpio.
+ - vdd_supply			   : digital voltage power supply needed to power device.
+ - avdd_supply			   : analog voltage power supply needed to power device.
+ - synaptics,pwr-reg-name	   : power reg name of digital voltage.
+ - synaptics,bus-reg-name	   : bus reg name of analog voltage.
+
+Optional property:
+ - synaptics,ub-i2c-addr       : addr of ub-i2c.
+ - synaptics,irq-on-state      : status of irq gpio.
+ - synaptics,cap-button-codes  : virtual key code mappings to be used.
+ - synaptics,vir-button-codes  : virtual key code and the response region on panel.
+ - synaptics,x-flip		       : modify orientation of the x axis.
+ - synaptics,y-flip		       : modify orientation of the y axis.
+ - synaptics,reset-delay-ms	   : reset delay for controller (ms), default 100.
+ - synaptics,power-delay-ms	   : power delay for controller (ms), default 100.
+ - synaptics,reset-active-ms	   : reset active time for controller (ms), default 20.
+ - synaptics,max-y-for-2d	   : maximal y value of the panel.
+ - clock-names			: Clock names used for secure touch. They are: "iface_clk", "core_clk"
+ - clocks			: Defined if 'clock-names' DT property is defined. These clocks
+				  are associated with the underlying I2C bus.
+
+Example:
+	i2c@78b7000 {
+		status = "ok";
+		synaptics@4b {
+			compatible = "synaptics,dsx-i2c";
+			reg = <0x4b>;
+			interrupt-parent = <&tlmm>;
+			interrupts = <65 0x2008>;
+			vdd_supply = <&pmtitanium_l17>;
+			avdd_supply = <&pmtitanium_l6>;
+			synaptics,pwr-reg-name = "vdd";
+			synaptics,bus-reg-name = "avdd";
+			synaptics,ub-i2c-addr = <0x2c>;
+			synaptics,irq-gpio = <&tlmm 65 0x2008>;
+			synaptics,reset-gpio = <&tlmm 99 0x2008>;
+			synaptics,irq-on-state = <0>;
+			synaptics,power-delay-ms = <200>;
+			synaptics,reset-delay-ms = <200>;
+			synaptics,reset-active-ms = <20>;
+			synaptics,max-y-for-2d = <1919>; /* remove if no virtual buttons */
+			synaptics,cap-button-codes = <139 172 158>;
+			synaptics,vir-button-codes = <139 180 2000 320 160 172 540 2000 320 160 158 900 2000 320 160>;
+			/* Underlying clocks used by secure touch */
+			clock-names = "iface_clk", "core_clk";
+			clocks = <&clock_gcc clk_gcc_blsp1_ahb_clk>,
+				<&clock_gcc clk_gcc_blsp1_qup3_i2c_apps_clk>;
+		};
+	};
diff --git a/Documentation/devicetree/bindings/leds/leds-qpnp-vibrator-ldo.txt b/Documentation/devicetree/bindings/leds/leds-qpnp-vibrator-ldo.txt
new file mode 100644
index 0000000..2865019
--- /dev/null
+++ b/Documentation/devicetree/bindings/leds/leds-qpnp-vibrator-ldo.txt
@@ -0,0 +1,50 @@
+Qualcomm Technologies, Inc. Vibrator-LDO
+
+QPNP (Qualcomm Technologies, Inc. Plug N Play) Vibrator-LDO is a peripheral
+on some QTI PMICs. It can be interfaced with the host processor via SPMI.
+
+Vibrator-LDO peripheral supports Eccentric Rotation Mass (ERM) vibrator.
+
+Properties:
+
+- compatible
+	Usage:      required
+	Value type: <string>
+	Definition: "qcom,qpnp-vibrator-ldo".
+
+- reg
+	Usage:      required
+	Value type: <u32>
+	Definition: Base address of vibrator-ldo peripheral.
+
+- qcom,vib-ldo-volt-uv
+	Usage:      required
+	Value type: <u32>
+	Definition: The optimal voltage requirement of the vibrator motor for
+		    a normal vibration. Value is specified in microvolts.
+
+- qcom,disable-overdrive
+	Usage:      optional
+	Value type: <empty>
+	Definition: Do not apply overdrive voltage.
+
+- qcom,vib-overdrive-volt-uv
+	Usage:      optional and not required if qcom,disable-overdrive present
+	Value type: <u32>
+	Definition: The voltage in microvolts used as overdrive factor for
+		    improving motor reactivity at the start of vibration.
+		    If this property not specified, a default value of
+		    2 times the value specified in qcom,vib-ldo-volt-uv
+		    property is used.
+
+=======
+Example
+=======
+
+pmi632_vib: qcom,vibrator@5700 {
+	compatible = "qcom,qpnp-vibrator-ldo";
+	reg = <0x5700 0x100>;
+	qcom,vib-ldo-volt-uv = <1504000>;
+	qcom,disable-overdrive;
+	qcom,vib-overdrive-volt-uv = <3544000>;
+};
diff --git a/Documentation/devicetree/bindings/media/video/msm-sde-rotator.txt b/Documentation/devicetree/bindings/media/video/msm-sde-rotator.txt
index db34047..5a92bf6 100644
--- a/Documentation/devicetree/bindings/media/video/msm-sde-rotator.txt
+++ b/Documentation/devicetree/bindings/media/video/msm-sde-rotator.txt
@@ -108,6 +108,10 @@
 				<rd_lut, wr_lut> indicating the safe lut
 				settings for the inline rotator sspp and
 				writeback client.
+- qcom,mdss-rot-qos-cpu-mask: A u32 value indicating desired PM QoS CPU
+				affine mask.
+- qcom,mdss-rot-qos-cpu-dma-latency: A u32 value indicating desired PM QoS CPU DMA
+				latency in usec.
 - qcom,mdss-rot-mode:		This is integer value indicates operation mode
 				of the rotator device
 - qcom,mdss-sbuf-headroom:	This integer value indicates stream buffer headroom in lines.
@@ -188,6 +192,9 @@
 		qcom,mdss-rot-danger-lut = <0x0 0x0>;
 		qcom,mdss-rot-safe-lut = <0x0000ffff 0x0>;
 
+		qcom,mdss-rot-qos-cpu-mask = <0xf>;
+		qcom,mdss-rot-qos-cpu-dma-latency = <75>;
+
 		qcom,mdss-inline-rot-qos-lut = <0x0 0x0 0x00112233 0x44556677>;
 		qcom,mdss-inline-rot-danger-lut = <0x0 0x0000ffff>;
 		qcom,mdss-inline-rot-safe-lut = <0x0 0x0000ff00>;
diff --git a/Documentation/devicetree/bindings/net/qcom,emac-dwc-eqos.txt b/Documentation/devicetree/bindings/net/qcom,emac-dwc-eqos.txt
index 8e56180..eff3d82 100644
--- a/Documentation/devicetree/bindings/net/qcom,emac-dwc-eqos.txt
+++ b/Documentation/devicetree/bindings/net/qcom,emac-dwc-eqos.txt
@@ -11,6 +11,16 @@
 - interrupts: Interrupt number used by this controller
 - io-macro-info: Internal io-macro-info
 
+Optional:
+- qcom,msm-bus,name: String representing the client-name
+- qcom,msm-bus,num-cases: Total number of usecases
+- qcom,msm-bus,num-paths: Total number of master-slave pairs
+- qcom,msm-bus,vectors-KBps: Arrays of unsigned integers representing:
+                             master-id, slave-id, arbitrated bandwidth
+                             in KBps, instantaneous bandwidth in KBps
+qcom,bus-vector-names: specifies string IDs for the corresponding bus vectors
+                       in the same order as qcom,msm-bus,vectors-KBps property.
+
 Internal io-macro-info:
 - io-macro-bypass-mode: <0 or 1> internal or external delay configuration
 - io-interface: <rgmii/mii/rmii> PHY interface used
@@ -35,6 +45,14 @@
 				"tx-ch4-intr", "rx-ch0-intr",
 				"rx-ch1-intr", "rx-ch2-intr",
 				"rx-ch3-intr";
+			qcom,msm-bus,name = "emac";
+			qcom,msm-bus,num-cases = <3>;
+			qcom,msm-bus,num-paths = <2>;
+			qcom,msm-bus,vectors-KBps =
+				<98 512 1250 0>, <1 781 0 40000>,  /* 10Mbps vote */
+				<98 512 12500 0>, <1 781 0 40000>,  /* 100Mbps vote */
+				<98 512 125000 0>, <1 781 0 40000>; /* 1000Mbps vote */
+			qcom,bus-vector-names = "10", "100", "1000";
 			io-macro-info {
 				io-macro-bypass-mode = <0>;
 				io-interface = "rgmii";
diff --git a/Documentation/devicetree/bindings/pil/pil-q6v5-mss.txt b/Documentation/devicetree/bindings/pil/pil-q6v5-mss.txt
index abbc560..793a965 100644
--- a/Documentation/devicetree/bindings/pil/pil-q6v5-mss.txt
+++ b/Documentation/devicetree/bindings/pil/pil-q6v5-mss.txt
@@ -95,6 +95,7 @@
 - qcom,qdsp6v56-1-10: Boolean- Present if the qdsp version is v56 1.10
 - qcom,override-acc-1: Override the default ACC settings with this value if present.
 - qcom,minidump-id: Unique id for each subsystem
+- qcom,reset-clk: Enable clock after MSS restart
 
 One child node to represent the MBA image may be specified, when the MBA image
 needs to be loaded in a specifically carved out memory region.
diff --git a/Documentation/devicetree/bindings/pil/subsys-pil-tz.txt b/Documentation/devicetree/bindings/pil/subsys-pil-tz.txt
index 4a69e03..f8329a9 100644
--- a/Documentation/devicetree/bindings/pil/subsys-pil-tz.txt
+++ b/Documentation/devicetree/bindings/pil/subsys-pil-tz.txt
@@ -68,6 +68,7 @@
 			first segment to end address of last segment will be collected without
 			leaving any hole in between.
 - qcom,ignore-ssr-failure: Boolean. If set, SSR failures are not considered fatal.
+- qcom,mas-crypto: Reference to the bus master of crypto core.
 
 Example:
 	qcom,venus@fdce0000 {
diff --git a/Documentation/devicetree/bindings/qdsp/msm-fastrpc.txt b/Documentation/devicetree/bindings/qdsp/msm-fastrpc.txt
index 0c5f696..fba7204 100644
--- a/Documentation/devicetree/bindings/qdsp/msm-fastrpc.txt
+++ b/Documentation/devicetree/bindings/qdsp/msm-fastrpc.txt
@@ -13,7 +13,7 @@
 Optional properties:
 - qcom,fastrpc-glink:	Flag to use glink instead of smd for IPC
 - qcom,rpc-latency-us:	FastRPC QoS latency vote
-- qcom,adsp-remoteheap-vmid:  FastRPC remote heap VMID number
+- qcom,adsp-remoteheap-vmid:  FastRPC remote heap VMID list
 
 Optional subnodes:
 - qcom,msm_fastrpc_compute_cb :	Child nodes representing the compute context
@@ -29,7 +29,7 @@
 		compatible = "qcom,msm-fastrpc-adsp";
 		qcom,fastrpc-glink;
 		qcom,rpc-latency-us = <2343>;
-		qcom,adsp-remoteheap-vmid = <37>;
+		qcom,adsp-remoteheap-vmid = <22 37>;
 
 		qcom,msm_fastrpc_compute_cb_1 {
 			compatible = "qcom,msm-fastrpc-compute-cb";
diff --git a/Documentation/devicetree/bindings/regulator/cpr-regulator.txt b/Documentation/devicetree/bindings/regulator/cpr-regulator.txt
new file mode 100644
index 0000000..1c4dfbf
--- /dev/null
+++ b/Documentation/devicetree/bindings/regulator/cpr-regulator.txt
@@ -0,0 +1,978 @@
+QTI CPR (Core Power Reduction) Regulator
+
+CPR regulator device is for QTI RBCPR (RapidBridge CPR) on
+	application processor core. It takes voltage corner level
+	as input and converts it to actual voltage based on the
+	suggestions from factory production process. When CPR is
+	enabled for application processer core, it will suggest
+	scaling the voltage up or down for best performance and
+	power of the core. The scaling based on factory production
+	process is called PVS (Process Voltage Scaling) with efuse
+	bits to indicate what bin (and voltage range) a chip is in.
+
+Required properties:
+- compatible:			Must be "qcom,cpr-regulator"
+- reg:				Register addresses for RBCPR, RBCPR clock
+				select, PVS and CPR eFuse address
+- reg-names:			Register names. Must be "rbcpr" and "efuse_addr".
+				"rbcpr_clk" is optional.
+- regulator-name:		A string used to describe the regulator
+- interrupts:			Interrupt line from RBCPR to interrupt controller.
+- qcom,cpr-fuse-corners:	Number of fuse corners present.  Many other properties
+				are sized based upon this value.
+- regulator-min-microvolt:	Minimum corner value which should be 1 to
+				represent the lowest supported corner.
+- regulator-max-microvolt:	Maximum corner value which should be equal to
+				qcom,cpr-fuse-corners if consumers request fuse
+				corners or the length of qcom,cpr-corner-map if
+				consumers request virtual corners.
+- qcom,cpr-voltage-ceiling:	Array of ceiling voltages in microvolts for fuse
+				corners ordered from lowest voltage corner to highest
+				voltage corner.  This property must be of length
+				defined by qcom,cpr-fuse-corners.
+- qcom,cpr-voltage-floor:	Array of floor voltages in microvolts for fuse
+				corners ordered from lowest voltage corner to highest
+				voltage corner.  This property must be of length
+				defined by qcom,cpr-fuse-corners.
+- vdd-apc-supply:		Regulator to supply VDD APC power
+- qcom,vdd-apc-step-up-limit:	Limit of vdd-apc-supply steps for scaling up.
+- qcom,vdd-apc-step-down-limit:	Limit of vdd-apc-supply steps for scaling down.
+- qcom,cpr-ref-clk:		The reference clock in kHz.
+- qcom,cpr-timer-delay:		The delay in microseconds for the timer interval.
+- qcom,cpr-timer-cons-up:	Consecutive number of timer interval (qcom,cpr-timer-delay)
+				occurred before issuing UP interrupt.
+- qcom,cpr-timer-cons-down:	Consecutive number of timer interval (qcom,cpr-timer-delay)
+				occurred before issuing DOWN interrupt.
+- qcom,cpr-irq-line:		Internal interrupt route signal of RBCPR, one of 0, 1 or 2.
+- qcom,cpr-step-quotient:	Defines the number of CPR quotient (i.e. Ring Oscillator(RO)
+				count) per vdd-apc-supply output voltage step.  A single
+				integer value may be specified which is to be used for all
+				RO's.  Alternatively, 8 integer values may be specified which
+				define the step quotients for RO0 to RO7 in order.
+- qcom,cpr-up-threshold:	The threshold for CPR to issue interrupt when
+				error_steps is greater than it when stepping up.
+- qcom,cpr-down-threshold:	The threshold for CPR to issue interrupt when
+				error_steps is greater than it when stepping down.
+- qcom,cpr-idle-clocks:		Idle clock cycles RO can be in.
+- qcom,cpr-gcnt-time:		The time for gate count in microseconds.
+- qcom,cpr-apc-volt-step:	The voltage in microvolt per CPR step, such as 5000uV.
+- qcom,cpr-fuse-row:		Array of row number of CPR fuse and method to read that row. It should have
+				index and value like this:
+				 [0] => the fuse row number
+				 [1] => fuse reading method, 0 for direct reading or 1 for SCM reading
+- qcom,cpr-fuse-target-quot:	Array of bit positions in the primary CPR fuse row defined
+				by qcom,cpr-fuse-row for the target quotients of each
+				fuse corner.  Each bit position corresponds to the LSB
+				of the quotient parameter.  The elements in the array
+				are ordered from lowest voltage corner to highest voltage
+				corner.  This property must be of length defined by
+				qcom,cpr-fuse-corners.
+- qcom,cpr-fuse-ro-sel:		Array of bit positions in the primary CPR fuse row defined
+				by qcom,cpr-fuse-row for the ring oscillator selection for each
+				fuse corner.  Each bit position corresponds to the LSB
+				of the RO select parameter.  The elements in the array
+				are ordered from lowest voltage corner to highest voltage
+				corner.  This property must be of length defined by
+				qcom,cpr-fuse-corners.
+
+Optional properties:
+- vdd-mx-supply:		Regulator to supply memory power as dependency
+				of VDD APC.
+- qcom,vdd-mx-vmax:		The maximum voltage in uV for vdd-mx-supply. This
+				is required when vdd-mx-supply is present.
+- qcom,vdd-mx-vmin-method:	The method to determine the minimum voltage for
+				vdd-mx-supply, which can be one of following
+				choices compared with VDD APC:
+				  0 => equal to the voltage(vmin) of VDD APC
+				  1 => equal to PVS corner ceiling voltage
+				  2 => equal to slow speed corner ceiling
+				  3 => equal to qcom,vdd-mx-vmax
+				  4 => equal to VDD_APC fuse corner mapped vdd-mx voltage
+				  5 => equal to VDD_APC virtual corner mapped vdd-mx voltage
+				This is required when vdd-mx-supply is present.
+- qcom,vdd-mx-corner-map:	Array of integers which defines the mapping from VDD_APC
+				voltage corners to vdd-mx-supply voltages.
+				Each element is a voltage to request from vdd-mx for the
+				corresponding fuse corner or virtual corner. The elements
+				in the array are ordered from lowest voltage corner
+				to highest voltage corner.  The length of this property
+				depends on the value of qcom,vdd-mx-vmin-method property.
+				When qcom,vdd-mx-vmin-method property has a value of 4, the length
+				of this property must be equal to the value defined by qcom,cpr-fuse-corners.
+				When qcom,vdd-mx-vmin-method property has a value of 5, the length of
+				this property must be equal to the number of elements in the qcom,cpr-corner-map
+				property.
+- qcom,pvs-voltage-table: 	Array of N-tuples in which each tuple specifies the
+				initial voltage in microvolts of the PVS bin for each
+				fuse voltage corner.  The location or 0-based index
+				of a tuple in the list corresponds to the PVS bin number.
+				Each tuple must be of length defined by qcom,cpr-fuse-corners.
+				A given cpr-regulator device must have either
+				qcom,pvs-voltage-table specified or
+				qcom,cpr-fuse-init-voltage (and its associated properties).
+- qcom,pvs-fuse-redun-sel:	Array of 5 elements to indicate where to read the bits, what value to
+				compare with in order to decide if the redundant PVS fuse bits would be
+				used instead of the original bits and method to read fuse row, reading
+				register through SCM or directly. The 5 elements with index [0..4] are:
+				  [0] => the fuse row number of the selector
+				  [1] => LSB bit position of the bits
+				  [2] => number of bits
+				  [3] => the value to indicate redundant selection
+				  [4] => fuse reading method, 0 for direct reading or 1 for SCM reading
+				When the value of the fuse bits specified by first 3 elements equals to
+				the value in 4th element, redundant PVS fuse bits should be selected.
+				Otherwise, the original PVS bits should be selected. If the 5th
+				element is 0, read the fuse row from register directly. Otherwise,
+				read it through SCM.
+				This property is required if qcom,pvs-voltage-table is present.
+- qcom,pvs-fuse:		Array of 4 elements to indicate the bits for PVS fuse and read method.
+				The array should have index and value like this:
+				  [0] => the PVS fuse row number
+				  [1] => LSB bit position of the bits
+				  [2] => number of bits
+				  [3] => fuse reading method, 0 for direct reading or 1 for SCM reading
+				This property is required if qcom,pvs-voltage-table is present.
+- qcom,pvs-fuse-redun:		Array of 4 elements to indicate the bits for redundant PVS fuse.
+				The array should have index and value like this:
+				  [0] => the redundant PVS fuse row number
+				  [1] => LSB bit position of the bits
+				  [2] => number of bits
+				  [3] => fuse reading method, 0 for direct reading or 1 for SCM reading
+				This property is required if qcom,pvs-voltage-table is present.
+- qcom,cpr-fuse-redun-sel:	Array of 5 elements to indicate where to read the bits, what value to
+				compare with in order to decide if the redundant CPR fuse bits would be
+				used instead of the original bits and method to read fuse row, using SCM
+				to read or read register directly. The 5 elements with index [0..4] are:
+				  [0] => the fuse row number of the selector
+				  [1] => LSB bit position of the bits
+				  [2] => number of bits
+				  [3] => the value to indicate redundant selection
+				  [4] => fuse reading method, 0 for direct reading or 1 for SCM reading
+				When the value of the fuse bits specified by first 3 elements equals to
+				the value in 4th element, redundant CPR fuse bits should be selected.
+				Otherwise, the original CPR bits should be selected. If the 5th element
+				is 0, read the fuse row from register directly. Otherwise, read it through
+				SCM.
+- qcom,cpr-fuse-redun-row:	Array of row number of redundant CPR fuse and method to read that
+				row. It should have index and value like this:
+				 [0] => the redundant fuse row number
+				 [1] => the value to indicate reading the fuse row directly or using SCM
+				This property is required if qcom,cpr-fuse-redun-sel is present.
+- qcom,cpr-fuse-redun-target-quot:	Array of bit positions in the redundant CPR fuse row defined
+				by qcom,cpr-fuse-redun-row for the target quotients of each
+				fuse corner.  Each bit position corresponds to the LSB
+				of the quotient parameter.  The elements in the array
+				are ordered from lowest voltage corner to highest voltage corner.
+				This property must be of length defined by qcom,cpr-fuse-corners.
+				This property is required if qcom,cpr-fuse-redun-sel is present.
+- qcom,cpr-fuse-redun-ro-sel:	Array of bit positions in the redundant CPR fuse row defined
+				by qcom,cpr-fuse-redun-row for the ring oscillator select of each
+				fuse corner.  Each bit position corresponds to the LSB of the RO
+				select parameter.  The elements in the array are ordered from
+				lowest voltage corner to highest voltage corner.
+				This property must be of length defined by qcom,cpr-fuse-corners.
+				This property is required if qcom,cpr-fuse-redun-sel is present.
+- qcom,cpr-fuse-redun-bp-cpr-disable:	Redundant bit position of the bit to indicate if CPR should be disable
+- qcom,cpr-fuse-redun-bp-scheme:	Redundant bit position of the bit to indicate if it's a global/local scheme
+					This property is required if cpr-fuse-redun-bp-cpr-disable
+					is present, and vise versa.
+- qcom,cpr-fuse-bp-cpr-disable:	Bit position of the bit to indicate if CPR should be disabled
+- qcom,cpr-fuse-bp-scheme:     Bit position of the bit to indicate if it's a global/local scheme
+- qcom,cpr-fuse-revision:	Array of 4 integer elements which define the location of the bits for
+				the CPR fusing revision fuse parameter.  The 4 elements are:
+				[0]: => the fuse row number of the bits
+				[1]: => LSB bit position of the bits
+				[2]: => the number of bits
+				[3]: => fuse reading method, 0 for direct reading or 1 for SCM reading
+				The fusing revision value is used to determine which specific adjustments
+				are required on some chips.
+- qcom,cpr-fuse-target-quot-size:	Array of target quotient parameter bit sizes in the primary
+				or redundant CPR fuse row for each fuse corner.  The elements in the
+				array are ordered from lowest voltage corner to highest voltage corner.
+				If this property is not present, then all target quotient fuse values
+				are assumed to be the default length of 12 bits.
+- qcom,cpr-fuse-target-quot-scale:	Array of doubles which defines the scaling coefficients to decode
+				the target quotients of each fuse corner.  The first element in each
+				double represents the offset to add to the scaled quotient.  The second
+				element represents the multiplier to scale the quotient by.  For example,
+				given a tuple <A B>, quot_decoded = A + (B * quot_raw).
+				The doubles in the array are ordered from lowest voltage corner to highest
+				voltage corner.  This property must contain a number of doubles equal to
+				the value of qcom,cpr-fuse-corners.  If this property is not present,
+				then all target quotient parameters are assumed to have an offset of 0
+				and a multiplier of 1 (i.e. no decoding needed).
+- qcom,cpr-enable:		Present: CPR enabled by default.
+				Not Present: CPR disable by default.
+- qcom,cpr-fuse-cond-min-volt-sel:	Array of 5 elements to indicate where to read the bits,  what value to
+				compare with in order to decide if the conditional minimum apc voltage needs
+				to be applied and the fuse reading method.
+				The 5 elements with index[0..4] are:
+				[0] => the fuse row number;
+				[1] => LSB bit position of the bits;
+				[2] => number of the bits;
+				[3] => the expected data to read;
+				[4] => fuse reading method, 0 for direct reading or 1 for SCM reading;
+				When the value of the fuse bits specified by first 3 elements is not equal to
+				the value in 4th element, then set the apc voltage for all parts running
+				at each voltage corner to be not lower than the voltage defined
+				using "qcom,cpr-cond-min-voltage".
+- qcom,cpr-cond-min-voltage:	Minimum voltage in microvolts allowed for cpr-regulator output if the fuse bits
+				defined in qcom,cpr-fuse-cond-min-volt-sel have not been programmed with the
+				expected data. This is required if cpr-fuse-cond-min-volt-sel is present.
+- qcom,cpr-fuse-uplift-sel: 	Array of 5 elements to indicate where to read the bits, what value to
+				compare with in order to enable or disable the pvs voltage uplift workaround,
+				and the fuse reading method.
+				The 5 elements with index[0..4] are:
+				[0]: => the fuse row number of the selector;
+				[1]: => LSB bit position of the bits;
+				[2]: => number of the bits;
+				[3]: => the value to indicate if the apc pvs voltage uplift workaround will
+					be enabled;
+				[4]: => fuse reading method, 0 for direct reading or 1 for SCM reading.
+				When the value of the fuse bits specified by first 3 elements equals to the
+				value in 4th element, the pvs voltage uplift workaround will be enabled.
+- qcom,speed-bin-fuse-sel:	Array of 4 elements to indicate where to read the speed bin of the processor,
+				and the fuse reading method.
+				The 4 elements with index[0..3] are:
+				[0]: => the fuse row number of the selector;
+				[1]: => LSB bit position of the bits;
+				[2]: => number of the bits;
+				[3]: => fuse reading method, 0 for direct reading or 1 for SCM reading.
+				This is required if cpr-fuse-uplift-disable-sel is present.
+- qcom,cpr-uplift-voltage:	Uplift in microvolts used for increasing pvs init voltage. If this property is present,
+				This is required if cpr-fuse-uplift-disable-sel is present.
+- qcom,cpr-uplift-max-volt:	Maximum voltage in microvolts used for pvs voltage uplift workaround to limit
+				the maximum pvs voltage.
+				This is required if cpr-fuse-uplift-disable-sel is present.
+- qcom,cpr-uplift-quotient:	Array of target quotient increments to add to the fused quotients of each
+				fuse corner as part of the PVS voltage uplift workaround.
+				The elements in the array are ordered from lowest voltage
+				corner to highest voltage corner.  This property must be of
+				length defined by qcom,cpr-fuse-corners.  This is required
+				if cpr-fuse-uplift-disable-sel is present.
+- qcom,cpr-uplift-speed-bin:	The speed bin value corresponding to one type of processor which needs to apply the
+				pvs voltage uplift workaround.
+				This is required if cpr-fuse-uplift-disable-sel is present.
+- qcom,cpr-fuse-version-map:	Array of integer tuples which each match to a given combination of CPR
+				fuse parameter values.  Each tuple consists of N + 3 elements.  Where
+				N is the number of fuse corners defined by the qcom,cpr-fuse-corners
+				property.  The elements in one tuple are:
+				[0]: =>		the speed bin of the CPU
+				[1]: =>		the PVS version of the CPU
+				[2]: =>		the CPR fuse revision
+				[3 - N+2]: =>	the ring oscillator select value of each fuse corner
+						ordered from lowest to highest
+				Any element in a tuple may use the value 0xffffffff as a wildcard
+				which will match against any fuse parameter value.  The first tuple
+				that matches against the fuse values read from hardware will be used.
+				This property is used by several properties to provide an index into
+				their lists.
+- qcom,cpr-allowed:		Integer values that specifies whether the closed loop CPR is allowed or
+				not for a particular fuse revision. If the qcom,cpr-fuse-version-map
+				property is specified, then qcom,cpr-allowed must contain the same number
+				of integers as that of the number of tuples in qcom,cpr-fuse-version-map.
+				If the integer value has a value 0 for a particular fuse revision, then it
+				is treated as if the closed loop operation is disabled in the fuse. If the
+				integer value has a value 1 for a particular fuse revision, then the closed
+				loop operation is enabled for that fuse revision. If nothing is specified
+				for a particular fuse revision, then the closed loop operation is enabled
+				for that fuse revision by default.
+- qcom,cpr-quotient-adjustment:	Array of integer tuples of target quotient adjustments to add to the fused
+				quotients of each fuse corner.  The elements in a tuple are ordered from
+				lowest voltage corner to highest voltage corner.  Each tuple must be of
+				length defined by qcom,cpr-fuse-corners.  If the qcom,cpr-fuse-version-map
+				property is specified, then qcom,cpr-quotient-adjustment must contain the
+				same number of tuples as qcom,cpr-fuse-version-map.  These tuples are then
+				mapped one-to-one in the order specified.  E.g. if the second
+				qcom,cpr-fuse-version-map tuple matches for a given device, then the quotient
+				adjustments defined in the second qcom,cpr-quotient-adjustment tuple will
+				be applied.  If the qcom,cpr-fuse-version-map property is not specified,
+				then qcom,cpr-quotient-adjustment must contain a single tuple which is then
+				applied unconditionally.  If this property is specified, then the quotient
+				adjustment values are added to the target quotient values read from fuses
+				before writing them into the CPR GCNT target control registers.
+				This property can be used to add or subtract static voltage margin from the
+				regulator managed by the CPR controller.
+- qcom,cpr-init-voltage-adjustment:  Array of integer tuples of initial voltage adjustments in microvolts to
+				add to the fused initial voltage values of each fuse corner.  The elements
+				in a tuple are ordered from lowest voltage corner to highest voltage corner.
+				Each tuple must be of the length defined by qcom,cpr-fuse-corners.  If the
+				qcom,cpr-fuse-version-map property is specified, then
+				qcom,cpr-init-voltage-adjustment must contain the same number of tuples as
+				qcom,cpr-fuse-version-map.  These tuples are then mapped one-to-one in the
+				order specified.  E.g. if the second qcom,cpr-fuse-version-map tuple matches
+				for a given device, then the initial voltage adjustments defined in the
+				second qcom,cpr-init-voltage-adjustment tuple will be applied.  If the
+				qcom,cpr-fuse-version-map property is not specified, then
+				qcom,cpr-init-voltage-adjustment must contain a single tuple which is then
+				applied unconditionally.  This property can be used to add or subtract
+				static initial voltage margin from the regulator managed by the CPR
+				controller.
+- qcom,cpr-quot-offset-adjustment:	Array of integer tuples of target quotient offset adjustments to add
+				to the fused quotient offsets of each fuse corner. The elements in a tuple
+				are ordered from lowest voltage corner to highest voltage corner. Each tuple
+				must be of length defined by qcom,cpr-fuse-corners. If the qcom,cpr-fuse-version-map
+				property is specified, then qcom,cpr-quot-offset-adjustment must contain the
+				same number of tuples as qcom,cpr-fuse-version-map.  These tuples are then
+				mapped one-to-one in the order specified.  E.g. if the second
+				qcom,cpr-fuse-version-map tuple matches for a given device, then the quotient
+				offset adjustments defined in the second qcom,cpr-quot-offset-adjustment tuple
+				will be applied. If the qcom,cpr-fuse-version-map property is not specified,
+				then qcom,cpr-quot-offset-adjustment must contain a single tuple which is then
+				applied unconditionally.  If this property is specified, then the quotient
+				offset adjustment values are added to the target quotient offset values read
+				from fuses.
+				This property can be used to add or subtract static quotient offset margin from
+				the regulator managed by the CPR controller.
+- qcom,cpr-clamp-timer-interval:	The number of 64 reference clock cycle blocks to delay for whenever
+					the clamp signal, sensor mask registers or sensor bypass registers
+					change.  The CPR controller loop is disabled during this delay.
+					Supported values are 0 to 255.  If this property is not specified,
+					then a value of 0 is assumed.  Note that if this property has a
+					value greater than 0, then software cannot accurately determine the
+					error_steps value that corresponds to a given CPR measurement
+					unless processor power collapsing is disabled.  If this property
+					has a value of 0, then the CPR controller loop is not disabled and
+					re-enabled while idle if the clamp signal changes.  Instead, it
+					will remain idle until software issues an ACK or NACK command.
+					This ensures that software can read the error_steps value which
+					resulted in the CPR up or down interrupt.  Setting this property to
+					a value greater than 0 is useful for resetting the CPR sensors of a
+					processor that uses BHS type voltage switches in order to avoid
+					anomalous CPR up interrupts when exiting from power collapse.
+- vdd-apc-optional-prim-supply:	Present: Regulator of highest priority to supply VDD APC power
+				Not Present: No such regulator.
+- vdd-apc-optional-sec-supply:	Present: Regulator of second highest priority to supply VDD APC power.
+				Not Present: No such regulator.
+- qcom,cpr-speed-bin-max-corners: Array of (N+2)-tuples in which each tuple maps a CPU speed bin and PVS version to
+				the maximum virtual voltage corner corresponding to each fuse corner.  The value N
+				corresponds to the number of fuse corners specified by qcom,cpr-fuse-corners.
+				The elements in one tuple are:
+				[0]: =>		the speed bin of the CPU. It may use the value 0xffffffff as a
+						wildcard to match any speed bin values.
+				[1]: =>		the PVS version of the CPU. It may use the value 0xffffffff as
+						a wildcard to match any PVS version values.
+				[2 - N+1]: =>	the max virtual voltage corner value corresponding to each fuse corner
+						for this speed bin, ordered from lowest voltage corner to highest
+						voltage corner.
+				No CPR target quotient scaling is applied on chips which have a speed bin + PVS version
+				pair that does not appear in one of the tuples in this property. If the property is
+				specified, then quotient scaling is enabled for the highest voltage corner. If this property is
+				not specified, then no quotient scaling can take place.
+- qcom,cpr-corner-map:		Array of elements of fuse corner value for each virtual corner.
+				The location or 1-based index of an element in the list corresponds to
+				the virtual corner value. For example, the first element in the list is the fuse corner
+				value that virtual corner 1 maps to.
+				This property is required if qcom,cpr-speed-bin-max-corners is present.
+- qcom,cpr-corner-frequency-map: Array of tuples in which a tuple describes a corner to application processor frequency
+				mapping.
+				The 2 elements in one tuple are:
+				[0]: => a virtual voltage corner.
+				[1]: => the application processor frequency in Hz corresponding to the virtual corner.
+				This property is required if qcom,cpr-speed-bin-max-corners is present.
+- qcom,pvs-version-fuse-sel:	Array of 4 elements to indicate where to read the pvs version of the processor,
+				and the fuse reading method.
+				The 4 elements with index[0..3] are:
+				[0]: => the fuse row number of the selector;
+				[1]: => LSB bit position of the bits;
+				[2]: => the number of bits;
+				[3]: => fuse reading method, 0 for direct reading or 1 for SCM reading.
+- qcom,cpr-voltage-ceiling-override: Array of (N+2)-tuples in which each tuple maps a CPU speed bin and PVS version
+				to the ceiling voltage to apply for each virtual voltage corner.  The value N
+				corresponds to the number of virtual corners as specified by the number of elements
+				in the qcom,cpr-corner-map property.
+				The elements in one tuple are:
+				[0]: =>		the speed bin of the CPU. It may use the value 0xffffffff as a
+						wildcard to match any speed bin values.
+				[1]: =>		the PVS version of the CPU. It may use the value 0xffffffff as a
+						wildcard to match any PVS version values.
+				[2 - N+1]: =>	the ceiling voltage value in microvolts corresponding to each virtual
+						corner for this speed bin, ordered from lowest voltage corner to
+						highest voltage corner.
+				No ceiling override is applied on chips which have a speed bin + PVS version
+				pair that does not appear in one of the tuples in this property.  If the property is
+				specified and the speed bin + PVS version matches, then the per-virtual-corner ceiling
+				voltages will be used in place of the per-fuse-corner ceiling voltages defined in the
+				qcom,cpr-voltage-ceiling property.  If this property is not specified, then the
+				per-fuse-corner ceiling voltages will always be used.
+- qcom,cpr-voltage-floor-override: Array of (N+2)-tuples in which each tuple maps a CPU speed bin and PVS version
+				to the floor voltage to apply for each virtual voltage corner.  The value N
+				corresponds to the number of virtual corners as specified by the number of elements
+				in the qcom,cpr-corner-map property.
+				The elements in one tuple are:
+				[0]: =>		the speed bin of the CPU. It may use the value 0xffffffff as a
+						wildcard to match any speed bin values.
+				[1]: =>		the PVS version of the CPU. It may use the value 0xffffffff as a
+						wildcard to match any PVS version values.
+				[2 - N+1]: =>	the floor voltage value in microvolts corresponding to each virtual
+						corner for this speed bin, ordered from lowest voltage corner to
+						highest voltage corner.
+				No floor override is applied on chips which have a speed bin + PVS version
+				pair that does not appear in one of the tuples in this property.  If the property is
+				specified and the speed bin + PVS version matches, then the per-virtual-corner floor
+				voltages will be used in place of the per-fuse-corner floor voltages defined in the
+				qcom,cpr-voltage-floor property.  If this property is not specified, then the
+				per-fuse-corner floor voltages will always be used.
+- qcom,cpr-floor-to-ceiling-max-range:	Array of integer tuples of floor-to-ceiling max range values in microvolts
+				to be subtracted from the ceiling voltage values of each virtual corner.
+				Supported values are those greater than or equal 0, or (-1). The value 0 for a corner
+				implies that the floor value for that corner has to equal to its ceiling value.
+				The value (-1) for a corner implies that no modification to the default floor voltage
+				is required. The elements in a tuple are ordered from lowest voltage corner to highest
+				voltage corner. Each tuple must be of the length equal to the number of virtual corners
+				as specified by the number of elements in the qcom,cpr-corner-map property. If the
+				qcom,cpr-fuse-version-map property is specified, then
+				qcom,cpr-dynamic-floor-override-adjustment must contain the same number of
+				tuples as qcom,cpr-fuse-version-map.  These tuples are then mapped one-to-one in the
+				order specified.  E.g. if the second qcom,cpr-fuse-version-map tuple matches
+				for a given device, then voltage adjustments defined in the second
+				qcom,cpr-dynamic-floor-override-adjustment tuple will be applied.  If the
+				qcom,cpr-fuse-version-map property is not specified, then
+				qcom,cpr-dynamic-floor-override-adjustment must contain a single tuple which
+				is then applied unconditionally.
+- qcom,cpr-virtual-corner-init-voltage-adjustment: Array of integer tuples of voltage adjustments in microvolts to be
+				added to the initial voltage values of each virtual corner.  The elements
+				in a tuple are ordered from lowest voltage corner to highest voltage corner.
+				Each tuple must be of the length equal to the number of virtual corners as
+				specified by the number of elements in the qcom,cpr-corner-map property. If the
+				qcom,cpr-fuse-version-map property is specified, then
+				qcom,cpr-virtual-corner-init-voltage-adjustment must contain the same number of
+				tuples as qcom,cpr-fuse-version-map.  These tuples are then mapped one-to-one in the
+				order specified.  E.g. if the second qcom,cpr-fuse-version-map tuple matches
+				for a given device, then voltage adjustments defined in the second
+				qcom,cpr-virtual-corner-init-voltage-adjustment tuple will be applied.  If the
+				qcom,cpr-fuse-version-map property is not specified, then
+				qcom,cpr-virtual-corner-init-voltage-adjustment must contain a single tuple which
+				is then applied unconditionally.
+- qcom,cpr-virtual-corner-quotient-adjustment: Array of integer tuples of quotient offsets to be added to
+				the scaled target quotient of each virtual corner. The elements
+				in a tuple are ordered from lowest voltage corner to highest voltage corner.
+				Each tuple must be of the length equal to the number of virtual corners as
+				specified by the number of elements in the qcom,cpr-corner-map property.
+				If the qcom,cpr-fuse-version-map property is specified, then
+				qcom,cpr-virtual-corner-quotient-adjustment must contain the same number of tuples as
+				qcom,cpr-fuse-version-map.  These tuples are then mapped one-to-one in the
+				order specified.  E.g. if the second qcom,cpr-fuse-version-map tuple matches
+				for a given device, then quotient adjustments defined in the second
+				qcom,cpr-virtual-corner-quotient-adjustment tuple will be applied.  If the
+				qcom,cpr-fuse-version-map property is not specified, then
+				qcom,cpr-virtual-corner-quotient-adjustment must contain a single tuple which is then
+				applied unconditionally.
+- qcom,cpr-cpus:		Array of CPU phandles which correspond to the cores that this cpr-regulator
+				device must monitor when adjusting the voltage and/or target quotient based
+				upon the number of online cores or make sure that one of them must be online
+				when performing de-aging measurements. This property must be specified in order to
+				utilize the qcom,cpr-online-cpu-virtual-corner-init-voltage-adjustment or
+				qcom,cpr-online-cpu-virtual-corner-quotient-adjustment or qcom,cpr-aging-sensor-id properties.
+- qcom,cpr-online-cpu-virtual-corner-init-voltage-adjustment:	Array of tuples where each tuple specifies
+				the voltage adjustment for each corner. These adjustments apply to the
+				initial voltage of each corner. The size of each tuple must be equal
+				to qcom,cpr-fuse-corners if consumers request fuse corners or the length of
+				qcom,cpr-corner-map if consumers request virtual corners. In each tuple, the
+				value corresponds to the voltage adjustment when running at that corner at
+				init, from lowest to highest. The tuples must be organized into 1 group if
+				qcom,cpr-fuse-version-map is not specified or the same number of groups as
+				the number of tuples in qcom,cpr-fuse-version-map. The i-th group of tuples
+				corresponds to the voltage adjustments for i-th fuse version map tuple. In
+				each group, there are 1 plus length of qcom,cpr-cpus tuples, each tuple
+				corresponds to the number of cores online, from 0 to the number of elements
+				in qcom,cpr-cpus.
+- qcom,cpr-online-cpu-init-voltage-as-ceiling:	Boolean which indicates that the ceiling voltage used for a
+				given virtual corner may be reduced to the per number of cores online,
+				per-virtual corner ceiling voltage value. This property takes precedence
+				over qcom,cpr-scaled-init-voltage-as-ceiling if both are specified.
+- qcom,cpr-online-cpu-virtual-corner-quotient-adjustment:	Array of tuples where each tuple specifies
+				the quotient adjustment for each corner. These adjustments will be applied
+				to each corner at run time. The size of each tuple must be equal to
+				qcom,cpr-fuse-corners if consumers request fuse corners or the length of
+				qcom,cpr-corner-map if consumers request virtual corners. In each tuple,
+				the value corresponds to the quotient adjustment when running at that corner,
+				from lowest to highest. The tuples must be organized into 1 group if
+				qcom,cpr-fuse-version-map is not specified or the same number of groups
+				as the number of tuples in qcom,cpr-fuse-version-map. The i-th group of
+				tuples corresponds to the quotient adjustments for i-th fuse version map
+				tuple. In each group, there are 1 plus length of qcom,cpr-cpus tuples,
+				each tuple corresponds to the number of cores online, from 0 to the
+				number of elements in qcom,cpr-cpus.
+- qcom,cpr-init-voltage-as-ceiling: Boolean which indicates that the ceiling voltage used for a given virtual
+				corner may be reduced to the per-fuse-corner initial voltage fuse value.
+- qcom,cpr-scaled-init-voltage-as-ceiling: Boolean which indicates that the ceiling voltage used for a given
+				virtual corner may be reduced to the interpolated, per-virtual-corner initial
+				voltage value.  Note that if both qcom,cpr-init-voltage-as-ceiling and
+				qcom,cpr-scaled-init-voltage-as-ceiling are specified, then
+				qcom,cpr-scaled-init-voltage-as-ceiling will take precedence since the interpolated
+				voltages are necessarily less than or equal to the fused initial voltage values.
+- qcom,cpr-voltage-scaling-factor-max: Array of values which define the maximum allowed scaling factor to apply
+				when calculating per-corner initial voltage values for each fuse corner.  The
+				array must be of length equal to the value of the qcom,cpr-fuse-corners property.
+				Each element in the array maps to the fuse corners in increasing order.
+				The elements have units of uV/MHz.  Each element corresponds to 'max_factor' in
+				the following equation:
+				init_voltage_min(f) = fuse_init_voltage(f) - (fuse_f_max - f) * max_factor
+				If this property is not specified, then the initial voltage for each virtual
+				corner will be set to the initial voltage of the associated fuse corner.
+- qcom,cpr-quot-adjust-scaling-factor-max: Array of values which define the maximum allowed scaling factor to
+				apply when calculating per-virtual-corner target quotients for each fuse
+				corner.  Two data formats are allowed for this property.  The primary one
+				requires that the array be of length equal to the value of the
+				qcom,cpr-fuse-corners property.  When using this format, each element in the
+				array maps to the fuse corners in increasing order.  The second depreciated
+				format allows for only a single element to be specified which defines the
+				maximum scaling factor for the highest fuse corner.  In this case, a value of
+				0 is assumed for the lower fuse corners.  The elements of this property have
+				units of QUOT/GHz.  Each element corresponds to 'max_factor' in the following
+				equation:
+				quot_min(f) = fuse_quot(f) - (fuse_f_max - f) * max_factor / 1000
+				where f and fuse_f_max have units of MHz.
+				This property is required if qcom,cpr-speed-bin-max-corners is present.
+- qcom,cpr-fuse-init-voltage:	Array of quadruples in which each quadruple specifies a fuse location to
+				read in order to get an initial voltage for a fuse corner. The fuse values
+				are encoded as voltage steps higher or lower than the voltages defined in
+				qcom,cpr-voltage-ceiling. Each step corresponds to the voltage defined by
+				the qcom,cpr-init-voltage-step property.
+				The 4 elements in one quadruple are:
+				[0]: => the fuse row number of the bits
+				[1]: => LSB bit position of the bits
+				[2]: => number of the bits
+				[3]: => fuse reading method, 0 for direct reading or 1 for SCM reading
+				The quadruples are ordered from the lowest voltage fuse corner to the
+				highest voltage fuse corner.
+				A given cpr-regulator device must have either qcom,cpr-fuse-init-voltage
+				specified or qcom,pvs-voltage-table (and its associated properties).
+- qcom,cpr-fuse-redun-init-voltage: Array of quadruples in which each quadruple specifies a fuse location
+				to read in order to get the redundant initial voltage for a fuse corner.
+				This property is the same as qcom,cpr-fuse-init-voltage except that it is
+				only utilized if a chip is configured to use the redundant set of fuse
+				values.  This property is required if qcom,cpr-fuse-redun-sel and
+				qcom,cpr-fuse-init-voltage are specified.
+- qcom,cpr-init-voltage-ref:	Array of reference voltages in microvolts used when decoding the initial
+				voltage fuse values.  The elements in the array are ordered from lowest
+				voltage corner to highest voltage corner.  This property must be of length
+				defined by qcom,cpr-fuse-corners.
+				This property is required if qcom,cpr-fuse-init-voltage is present.
+- qcom,cpr-init-voltage-step:	The voltage step size in microvolts of the CPR initial voltage fuses described by the
+				qcom,cpr-fuse-init-voltage property.
+				This property is required if qcom,cpr-fuse-init-voltage is present.
+- mem-acc-supply:		Regulator to vote for the memory accelerator configuration.
+				Not Present: memory accelerator configuration not supported.
+- qcom,mem-acc-corner-map:	Array of integer which defines the mapping from mem-acc corner value for each
+				virtual corner. Each element is a mem-acc state for the corresponding virtual corner.
+				The elements in the array are ordered from lowest voltage corner to highest voltage corner.
+- qcom,fuse-remap-source:	Array of quadruples in which each quadruple specifies a fuse location to
+				remap.  The 4 elements in one quadruple are:
+				[0]: => the fuse row number of the bits
+				[1]: => LSB bit position of the bits
+				[2]: => the number of bits
+				[3]: => fuse reading method, 0 for direct reading or 1 for SCM reading
+				The fuse bits for all quadruples are packed together in the order specified
+				into 64-bit virtual fuse rows beginning at the row number defined in the
+				qcom,fuse-remap-base-row property.  The remapped rows may be used by any
+				other properties.
+				Example:
+					qcom,fuse-remap-base-row = <1000>;
+					qcom,fuse-remap-source =
+							<13 57 2 0>,
+							<14 30 3 0>,
+							<20 1 7 0>,
+							<40 47 120 0>;
+
+					This results in the following bit remapping:
+
+					Row   Bits       Remap Row  Remap Bits
+					13    57..58  -->  1000      0..1
+					14    30..32  -->  1000      2..4
+					20     1..7   -->  1000      5..11
+					40    47..63  -->  1000     12..28
+					41     0..34  -->  1000     29..63
+					41    35..63  -->  1001      0..28
+					42     0..34  -->  1001     29..63
+					42    35..38  -->  1002      0..3
+
+					A tuple like this could then be used to reference some of the
+					concatenated bits from rows 13, 14, and 20:
+
+					qcom,cpr-fuse-init-voltage = <1000 0 6 0>;
+- qcom,fuse-remap-base-row:	Integer which defines the virtual row number to use as a base when remapping
+				fuse bits.  The remap base row number can be any value as long as it is
+				greater than all of the real row numbers addressed in other properties of
+				the cpr-regulator device node.  This property is required if
+				qcom,fuse-remap-source is specified.
+- qcom,cpr-quot-min-diff:	Integer which defines the minimum target-quotient difference between
+				the highest and (highest - 1) fuse corner to keep CPR enabled. If this
+				property is not specified a default value of 50 is used.
+- qcom,cpr-fuse-quot-offset:	Array of quadruples in which each quadruple specifies a fuse location to
+				read in order to get the quotient offset for a fuse corner. The fuse values
+				are encoded as the difference between quotients of that fuse corner and its
+				adjacent lower fuse corner divided by an unpacking multiplier value defined
+				under qcom,cpr-fuse-quot-offset-scale property.
+				The 4 elements in one quadruple are:
+				[0]: => the fuse row number of the bits
+				[1]: => LSB bit position of the bits
+				[2]: => number of the bits
+				[3]: => fuse reading method, 0 for direct reading or 1 for SCM reading
+				The quadruples are ordered from the lowest fuse corner to the highest
+				fuse corner.
+				Quotient offset read from the fuse locations above can be overridden with
+				the property qcom,cpr-quot-adjust-scaling-factor-max.
+- qcom,cpr-fuse-quot-offset-scale:	Array of integer values which defines the multipliers to decode the quotient offsets
+				of each fuse corner. The elements in the array are ordered from the lowest voltage fuse corner
+				to the highest voltage fuse corner. If this property is not present, then all target quotient
+				parameters are assumed to have a multiplier of 1 (i.e. no decoding needed).
+- qcom,cpr-redun-fuse-quot-offset: Array of quadruples in which each quadruple specifies a fuse location to
+				read in order to get the redundant quotient offset for a fuse corner. This
+				property is the same as qcom,cpr-fuse-quot-offset except that it is only
+				utilized if a chip is configured to use the redundant set of fuse values.
+- qcom,cpr-fuse-min-quot-diff:	Array of values which define the minimum difference allowed between the adjusted
+				quotients of the fuse corners. The length of the array should be equal to the value
+				of the qcom,cpr-fuse-corners property. Where each element in the array maps to the
+				fuse corners in increasing order.
+- qcom,cpr-min-quot-diff-adjustment:	Array of integer tuples of target quotient offsets to be added to
+				the adjusted target quotients of each fuse corner. When the quotient difference
+				between two adjacent fuse corners is insufficient, the quotient for the higher fuse corner is
+				replaced with that of the lower fuse corner plus the adjustment value.
+				The elements in a tuple are ordered from lowest voltage corner to highest voltage corner.
+				Each tuple must be of the length defined by qcom,cpr-fuse-corners.
+				If the qcom,cpr-fuse-version-map property is specified, then qcom,cpr-min-quot-diff-adjustment
+				must contain the same number of tuples as qcom,cpr-fuse-version-map.  These tuples are then mapped
+				one-to-one in the order specified.  E.g. if the second qcom,cpr-fuse-version-map tuple matches
+				for a given device, then the quotient adjustments defined in the
+				second qcom,cpr-min-quot-diff-adjustment tuple will be applied.  If the
+				qcom,cpr-fuse-version-map property is not specified, then
+				qcom,cpr-min-quot-diff-adjustment must contain a single tuple which is then
+				applied unconditionally. The qcom,cpr-min-quot-diff-adjustment property must be specified
+				if the qcom,cpr-fuse-min-quot-diff property is specified.
+- qcom,cpr-skip-voltage-change-during-suspend: Boolean property which indicates that the CPR voltage
+				should not be adjusted based upon the number of online cores while
+				entering or exiting system suspend.
+- rpm-apc-supply:		Regulator to notify RPM of the APC operating
+				corner
+- qcom,rpm-apc-corner-map:	Array of integers which define the mapping of
+				the RPM corner to the corresponding APC virtual
+				corner. This property must be defined if
+				'rpm-apc-supply' is present.
+- qcom,vsens-corner-map:	Array of integers which define the mapping of the VSENS corner to the
+				corresponding APC fuse corner. The qcom,vsens-corner-map and
+				vdd-vsense-corner-supply properties must both be specified for a given
+				cpr-regulator device or neither must be specified.
+- vdd-vsens-corner-supply:	Regulator to specify the current operating fuse corner to the Voltage Sensor.
+- vdd-vsens-voltage-supply:	Regulator to specify the corner floor/ceiling voltages to the Voltage Sensor.
+- qcom,cpr-aging-sensor-id:	Array of CPR sensor IDs to be used in the CPR de-aging algorithm. The number
+				of values should be equal to number of sensors selected for age calibration.
+				If this property is not specified, then the de-aging procedure is not enabled.
+- qcom,cpr-de-aging-allowed:	Integer values that specify whether the CPR de-aging procedure is allowed or
+				not for a particular fuse revision. If the qcom,cpr-fuse-version-map
+				property is specified, then qcom,cpr-de-aging-allowed must contain the same number
+				of elements as there are tuples in qcom,cpr-fuse-version-map. If qcom,cpr-fuse-version-map
+				is not specified, then qcom,cpr-de-aging-allowed must contain a single value that
+				is used unconditionally. An element value of 1 means that the CPR de-aging procedure
+				can be performed for parts with the corresponding fuse revision. An element value of 0
+				means that CPR de-aging cannot be performed.
+				This property is required if the qcom,cpr-aging-sensor-id property has been specified.
+- qcom,cpr-aging-ref-corner:	The vdd-apc-supply reference virtual voltage corner to be set during the CPR de-aging
+				measurements. This corner value is needed to set appropriate voltage on
+				the dependent voltage rails such as vdd-mx and mem-acc.
+				This property is required if the qcom,cpr-aging-sensor-id property has been specified.
+- qcom,cpr-aging-ref-voltage:	The vdd-apc-supply reference voltage in microvolts to be set during the
+				CPR de-aging measurements.
+				This property is required if the qcom,cpr-aging-sensor-id property has been specified.
+- qcom,cpr-max-aging-margin:	The maximum allowed aging voltage margin in microvolts. This is used to limit
+				the calculated aging voltage margin.
+				This property is required if the qcom,cpr-aging-sensor-id property has been specified.
+- qcom,cpr-non-collapsible-sensors: Array of CPR sensor IDs which are in non-collapsible domain. The sensor IDs not
+				specified in the array should be bypassed for the de-aging procedure. The number of
+				elements should be less than or equal to 32. The values of the array elements should
+				be greater than or equal to 0 and less than or equal to 31.
+				This property is required for power-domains with bypass mux present in HW.
+				This property can be required if the qcom,cpr-aging-sensor-id property has been specified.
+- qcom,cpr-aging-ro-scaling-factor:	The aging ring oscillator (RO) scaling factor with units of QUOT/V.
+				This value is used for calculating a voltage margin from RO measurements.
+				This property is required if the qcom,cpr-aging-sensor-id property has been specified.
+- qcom,cpr-ro-scaling-factor:	Array of scaling factors with units of QUOT/V for each ring oscillator ordered
+				from the lowest to the highest RO. These values are used to calculate
+				the aging voltage margin adjustment for all of the ROs. Since CPR2 supports
+				exactly 8 ROs, the array must contain 8 elements corresponding to RO0 through RO7 in order.
+				If a given RO is unused for a fuse corner, then its scaling factor may be specified as 0.
+				This property is required if the qcom,cpr-aging-sensor-id property has been specified.
+- qcom,cpr-aging-derate:	Array of scaling factors which define the amount of derating to apply to the reference
+				aging voltage margin adjustment for each of the fuse corners. Each element has units
+				of uV/mV. This property must be of length defined by qcom,cpr-fuse-corners.
+				The elements are ordered from the lowest to the highest fuse corner.
+				This property is required if the qcom,cpr-aging-sensor-id property has been specified.
+- qcom,cpr-fuse-aging-init-quot-diff:	Array of quadruples in which each quadruple specifies a fuse location to read in
+				order to get an initial quotient difference. The difference between quot min and quot max
+				is fused as the initial quotient difference.
+				The 4 elements in one quadruple are:
+				[0]: => the fuse row number of the bits
+				[1]: => LSB bit position of the bits
+				[2]: => number of the bits
+				[3]: => fuse reading method, 0 for direct reading or 1 for SCM reading
+				The number of quadruples should be equal to the number of values specified in
+				the qcom,cpr-aging-sensor-id property. This property is required if
+				the qcom,cpr-aging-sensor-id property has been specified.
+- qcom,cpr-thermal-sensor-id:	TSENS hardware sensor-id of the sensor which
+				needs to be monitored.
+- qcom,cpr-disable-temp-threshold:	The TSENS temperature threshold in degrees Celsius at which CPR
+				closed-loop is disabled. CPR closed-loop will stay disabled as long as the
+				temperature is below this threshold. This property is required
+				only if 'qcom,cpr-thermal-sensor-id' is present.
+- qcom,cpr-enable-temp-threshold:	The TSENS temperature threshold in degrees Celsius at which CPR
+				closed-loop is enabled. CPR closed-loop will stay enabled above this
+				temperature threshold. This property is required only if
+				'qcom,cpr-thermal-sensor-id' is present.
+- qcom,disable-closed-loop-in-pc:	Bool property to disable closed-loop CPR during
+				power-collapse. This can be enabled only for single core
+				designs. The property 'qcom,cpr-cpus' is required to enable this logic.
+Example:
+	apc_vreg_corner: regulator@f9018000 {
+		status = "okay";
+		compatible = "qcom,cpr-regulator";
+		reg = <0xf9018000 0x1000>, <0xfc4b8000 0x1000>;
+		reg-names = "rbcpr", "efuse_addr";
+		interrupts = <0 15 0>;
+		regulator-name = "apc_corner";
+		qcom,cpr-fuse-corners = <3>;
+		regulator-min-microvolt = <1>;
+		regulator-max-microvolt = <12>;
+
+		qcom,pvs-fuse = <22 6 5 1>;
+		qcom,pvs-fuse-redun-sel = <22 24 3 2 1>;
+		qcom,pvs-fuse-redun = <22 27 5 1>;
+
+		qcom,pvs-voltage-table =
+			<1050000 1150000 1350000>,
+			<1050000 1150000 1340000>,
+			<1050000 1150000 1330000>,
+			<1050000 1150000 1320000>,
+			<1050000 1150000 1310000>,
+			<1050000 1150000 1300000>,
+			<1050000 1150000 1290000>,
+			<1050000 1150000 1280000>,
+			<1050000 1150000 1270000>,
+			<1050000 1140000 1260000>,
+			<1050000 1130000 1250000>,
+			<1050000 1120000 1240000>,
+			<1050000 1110000 1230000>,
+			<1050000 1100000 1220000>,
+			<1050000 1090000 1210000>,
+			<1050000 1080000 1200000>,
+			<1050000 1070000 1190000>,
+			<1050000 1060000 1180000>,
+			<1050000 1050000 1170000>,
+			<1050000 1050000 1160000>,
+			<1050000 1050000 1150000>,
+			<1050000 1050000 1140000>,
+			<1050000 1050000 1140000>,
+			<1050000 1050000 1140000>,
+			<1050000 1050000 1140000>,
+			<1050000 1050000 1140000>,
+			<1050000 1050000 1140000>,
+			<1050000 1050000 1140000>,
+			<1050000 1050000 1140000>,
+			<1050000 1050000 1140000>,
+			<1050000 1050000 1140000>,
+			<1050000 1050000 1140000>;
+		qcom,cpr-voltage-ceiling = <1050000 1150000 1280000>;
+		qcom,cpr-voltage-floor = <1050000 1050000 1100000>;
+		vdd-apc-supply = <&pm8226_s2>;
+		vdd-apc-optional-prim-supply = <&ncp6335d>;
+		vdd-apc-optional-sec-supply = <&fan53555>;
+		vdd-mx-supply = <&pm8226_l3_ao>;
+		qcom,vdd-mx-vmax = <1350000>;
+		qcom,vdd-mx-vmin-method = <1>;
+		qcom,vdd-apc-step-up-limit = <1>;
+		qcom,vdd-apc-step-down-limit = <1>;
+		qcom,cpr-ref-clk = <19200>;
+		qcom,cpr-timer-delay = <5000>;
+		qcom,cpr-timer-cons-up = <1>;
+		qcom,cpr-timer-cons-down = <2>;
+		qcom,cpr-irq-line = <0>;
+		qcom,cpr-step-quotient = <15>;
+		qcom,cpr-up-threshold = <1>;
+		qcom,cpr-down-threshold = <2>;
+		qcom,cpr-idle-clocks = <5>;
+		qcom,cpr-gcnt-time = <1>;
+		qcom,cpr-clamp-timer-interval = <1>;
+		qcom,cpr-apc-volt-step = <5000>;
+
+		qcom,vsens-corner-map = <1 2 2>;
+		vdd-vsens-corner-supply = <&vsens_apc0_corner>;
+		vdd-vsens-voltage-supply = <&vsens_apc0_voltage>;
+
+		rpm-apc-supply = <&rpm_apc_vreg>;
+		qcom,rpm-apc-corner-map = <4 4 5 5 7 7 7 7 7 7 7 7>;
+
+		qcom,cpr-fuse-row = <138 1>;
+		qcom,cpr-fuse-bp-cpr-disable = <36>;
+		qcom,cpr-fuse-bp-scheme = <37>;
+		qcom,cpr-fuse-target-quot = <24 12 0>;
+		qcom,cpr-fuse-target-quot-size = <12 12 12>;
+		qcom,cpr-fuse-ro-sel = <54 38 41>;
+		qcom,cpr-fuse-revision = <140 26 2 0>;
+		qcom,cpr-fuse-redun-sel = <138 57 1 1 1>;
+		qcom,cpr-fuse-redun-row = <139 1>;
+		qcom,cpr-fuse-redun-target-quot = <24 12 0>;
+		qcom,cpr-fuse-redun-ro-sel = <46 36 39>;
+		qcom,cpr-fuse-cond-min-volt-sel = <54 42 6 7 1>;
+		qcom,cpr-cond-min-voltage = <1140000>;
+		qcom,cpr-fuse-uplift-sel = <22 53 1 0 0>;
+		qcom,cpr-uplift-voltage = <50000>;
+		qcom,cpr-uplift-quotient = <0 0 120>;
+		qcom,cpr-uplift-max-volt = <1350000>;
+		qcom,cpr-uplift-speed-bin = <1>;
+		qcom,speed-bin-fuse-sel = <22 0 3 0>;
+		qcom,cpr-corner-map = <1 1 2 2 3 3 3 3 3 3 3 3>;
+		qcom,cpr-corner-frequency-map =
+				<1 300000000>,
+				<2 384000000>,
+				<3 600000000>,
+				<4 787200000>,
+				<5 998400000>,
+				<6 1094400000>,
+				<7 1190400000>,
+				<8 1305600000>,
+				<9 1344000000>,
+				<10 1401600000>,
+				<11 1497600000>,
+				<12 1593600000>;
+		qcom,pvs-version-fuse-sel = <22 4 2 0>;
+		qcom,cpr-speed-bin-max-corners =
+				<0 1 2 4 7>,
+				<1 1 2 4 12>,
+				<2 1 2 4 10>,
+				<5 1 2 4 14>;
+		qcom,cpr-fuse-target-quot-scale =
+				<0 1>,
+				<0 1>,
+				<0 1>;
+		qcom,cpr-quot-adjust-scaling-factor-max = <0 650 650>;
+		qcom,cpr-fuse-quot-offset =
+				<138 53 5 0>,
+				<138 53 5 0>,
+				<138 48 5 0>,
+				<138 58 5 0>;
+		qcom,cpr-fuse-redun-quot-offset =
+				<200 53 5 0>,
+				<200 53 5 0>,
+				<200 48 5 0>,
+				<200 58 5 0>;
+		qcom,cpr-fuse-init-voltage =
+				<27 36 6 0>,
+				<27 18 6 0>,
+				<27 0 6 0>;
+		qcom,cpr-fuse-redun-init-voltage =
+				<140 36 6 0>,
+				<140 18 6 0>,
+				<140 0 6 0>;
+		qcom,cpr-init-voltage-ref = <1050000 1150000 1280000>;
+		qcom,cpr-init-voltage-step = <10000>;
+		qcom,cpr-voltage-ceiling-override =
+				<1 1 1050000 1050000 1150000 1150000 1280000
+				     1280000 1280000 1280000 1280000 1280000
+				     1280000 1280000>;
+		qcom,cpr-voltage-floor-override =
+				<1 1 1050000 1050000 1050000 1050000 1060000
+				     1070000 1080000 1090000 1100000 1100000
+				     1100000 1100000>;
+		qcom,cpr-scaled-init-voltage-as-ceiling;
+
+		qcom,cpr-fuse-version-map =
+				<0xffffffff 0xffffffff 2 4 4 4>,
+				<0xffffffff 0xffffffff 2 6 6 6>,
+				<0xffffffff 0xffffffff 3 4 4 4>;
+		qcom,cpr-quotient-adjustment =
+				<0 0 (-210)>,
+				<0 0 (-60)>,
+				<0 0 (-94)>;
+		qcom,cpr-quot-offset-adjustment =
+				<0 0 (-5)>;
+		qcom,cpr-init-voltage-adjustment =
+				<0 0 (-100000)>,
+				<0 0 (-100000)>,
+				<0 0 (-45000)>;
+		qcom,cpr-fuse-min-quot-diff = <0 0 40>;
+		qcom,cpr-min-quot-diff-adjustment =
+					<0 0 0>,
+					<0 0 72>,
+					<0 0 104>;
+		qcom,cpr-floor-to-ceiling-max-range =
+			<(-1) (-1) (-1) (-1) (-1) (-1) (-1) (-1) (-1) (-1) (-1) (-1)>,
+			<(-1) (-1) (-1) (-1) (-1) (-1) (-1) (-1) (-1) (-1) (-1) (-1)>,
+			<(-1) (-1) (-1) (-1) (-1) (-1) (-1) 50000 50000 50000 50000 50000>;
+		qcom,cpr-virtual-corner-init-voltage-adjustment =
+			<0 0 0 (-10000) 0 0 0 0 0 0 0 0>,
+			<0 0 0 0 0 0 0 0 0 0 0 (-20000)>,
+			<0 0 0 0 0 0 0 0 0 0 0 (-30000)>;
+		qcom,cpr-virtual-corner-quotient-adjustment =
+			<0 0 0 100 0 0 0 0 0 0 0 0>,
+			<0 0 0 0 0 0 0 0 0 0 0 (-300)>,
+			<0 0 0 (-60) 0 0 0 0 0 0 0 0>;
+		qcom,cpr-cpus = <&CPU0 &CPU1 &CPU2 &CPU3>;
+		qcom,cpr-online-cpu-virtual-corner-init-voltage-adjustment =
+			/* 1st fuse version tuple matched */
+			<0 0 0 (-10000) (-10000) (-10000) (-15000) (-15000) (-20000) 0 (-20000) (-30000) >, /* 0 CPUs online */
+			<0 0 0 (-10000) (-10000) (-10000) (-15000) (-15000) (-20000) 0 (-20000) (-30000) >, /* 1 CPUs online */
+			<0 0 0 (-5000) (-5000) (-5000) (-5000) (-5000) (-10000) 0 (-10000) (-10000) >, /* 2 CPUs online */
+			<0 0 0 0 0 0 0 0 0 0 0 0>, /* 3 CPUs online */
+			<0 0 0 0 0 0 0 0 0 0 0 0>, /* 4 CPUs online */
+			/* 2nd fuse version tuple matched */
+			<0 0 0 (-10000) (-10000) (-10000) (-15000) (-15000) (-20000) 0 (-20000) (-30000) >, /* 0 CPUs online */
+			<0 0 0 (-10000) (-10000) (-10000) (-15000) (-15000) (-20000) 0 (-20000) (-30000) >, /* 1 CPUs online */
+			<0 0 0 (-5000) (-5000) (-5000) (-5000) (-5000) (-10000) 0 (-10000) (-10000) >, /* 2 CPUs online */
+			<0 0 0 0 0 0 0 0 0 0 0 0>, /* 3 CPUs online */
+			<0 0 0 0 0 0 0 0 0 0 0 0>, /* 4 CPUs online */
+			/* 3rd fuse version tuple matched */
+			<0 0 0 (-10000) (-10000) (-10000) (-15000) (-15000) (-20000) 0 (-20000) (-30000) >, /* 0 CPUs online */
+			<0 0 0 (-10000) (-10000) (-10000) (-15000) (-15000) (-20000) 0 (-20000) (-30000) >, /* 1 CPUs online */
+			<0 0 0 (-5000) (-5000) (-5000) (-5000) (-5000) (-10000) 0 (-10000) (-10000) >, /* 2 CPUs online */
+			<0 0 0 0 0 0 0 0 0 0 0 0>, /* 3 CPUs online */
+			<0 0 0 0 0 0 0 0 0 0 0 0>; /* 4 CPUs online */
+		qcom,cpr-online-cpu-virtual-corner-quotient-adjustment =
+			/* 1st fuse version tuple matched */
+			<0 0 0 (-6) (-6) (-6) (-9) (-9) (-12) 0 (-12) (-18)>, /* 0 CPUs online */
+			<0 0 0 (-6) (-6) (-6) (-9) (-9) (-12) 0 (-12) (-18)>, /* 1 CPUs online */
+			<0 0 0 (-3) (-3) (-3) (-3) (-3) (-6) 0 (-6) (-6)>, /* 2 CPUs online */
+			<0 0 0 0 0 0 0 0 0 0 0 0>, /* 3 CPUs online */
+			<0 0 0 0 0 0 0 0 0 0 0 0>, /* 4 CPUs online */
+			/* 2nd fuse version tuple matched */
+			<0 0 0 (-6) (-6) (-6) (-9) (-9) (-12) 0 (-12) (-18)>, /* 0 CPUs online */
+			<0 0 0 (-6) (-6) (-6) (-9) (-9) (-12) 0 (-12) (-18)>, /* 1 CPUs online */
+			<0 0 0 (-3) (-3) (-3) (-3) (-3) (-6) 0 (-6) (-6)>, /* 2 CPUs online */
+			<0 0 0 0 0 0 0 0 0 0 0 0>, /* 3 CPUs online */
+			<0 0 0 0 0 0 0 0 0 0 0 0>, /* 4 CPUs online */
+			/* 3rd fuse version tuple matched */
+			<0 0 0 (-21) (-21) (-21) (-32) (-32) (-42) 0 (-42) (-63)>, /* 0 CPUs online */
+			<0 0 0 (-21) (-21) (-21) (-32) (-32) (-42) 0 (-42) (-63)>, /* 1 CPUs online */
+			<0 0 0 (-11) (-11) (-11) (-11) (-11) (-21) 0 (-21) (-21)>, /* 2 CPUs online */
+			<0 0 0 0 0 0 0 0 0 0 0 0>, /* 3 CPUs online */
+			<0 0 0 0 0 0 0 0 0 0 0 0>; /* 4 CPUs online */
+		qcom,cpr-allowed =
+			<0>,
+			<1>,
+			<1>;
+
+		qcom,fuse-remap-base-row = <1000>;
+		qcom,fuse-remap-source =
+				<140 7 3 0>,
+				<138 45 5 0>;
+		qcom,cpr-fuse-quot-offset-scale = <5 5 5>;
+
+		qcom,cpr-aging-sensor-id = <17, 18>;
+		qcom,cpr-aging-ref-corner = <4>;
+		qcom,cpr-aging-ref-voltage = <1050000>;
+		qcom,cpr-max-aging-margin = <15000>;
+		qcom,cpr-de-aging-allowed =
+				<0>,
+				<0>,
+				<1>;
+		qcom,cpr-non-collapsible-sensors= <7 12 17 22>;
+		qcom,cpr-aging-ro-scaling-factor = <3500>;
+		qcom,cpr-ro-scaling-factor = <0 2500 2500 2500 0 0 0 0>;
+		qcom,cpr-aging-derate = <1000 1000 1250>;
+		qcom,cpr-fuse-aging-init-quot-diff =
+				<101 0 8 0>,
+				<101 8 8 0>;
+
+		qcom,cpr-thermal-sensor-id = <9>;
+		qcom,cpr-disable-temp-threshold = <5>;
+		qcom,cpr-enable-temp-threshold = <10>;
+	};
diff --git a/Documentation/devicetree/bindings/regulator/rpmh-regulator.txt b/Documentation/devicetree/bindings/regulator/rpmh-regulator.txt
index 7de891e..b760758 100644
--- a/Documentation/devicetree/bindings/regulator/rpmh-regulator.txt
+++ b/Documentation/devicetree/bindings/regulator/rpmh-regulator.txt
@@ -44,6 +44,16 @@
 		    a particular PMIC found in the system.  This name must match
 		    to one that is defined by the bootloader.
 
+- qcom,regulator-type
+	Usage:      required if qcom,supported-modes is specified or if
+		    qcom,init-mode is specified in any subnodes
+	Value type: <string>
+	Definition: The physical type of the regulator including the PMIC
+		    family.  This is used for mode control.  Supported values:
+		    "pmic4-ldo", "pmic4-hfsmps", "pmic4-ftsmps", "pmic4-bob",
+		    "pmic5-ldo", "pmic5-hfsmps", "pmic5-ftsmps", and
+		    "pmic5-bob".
+
 - qcom,use-awake-state
 	Usage:      optional
 	Value type: <empty>
@@ -72,7 +82,7 @@
 	Value type: <prop-encoded-array>
 	Definition: A list of integers specifying the PMIC regulator modes
 		    supported by this regulator.  Supported values are
-		    RPMH_REGULATOR_MODE_* (i.e. 0 to 7).  Elements must be
+		    RPMH_REGULATOR_MODE_* (i.e. 0 to 4).  Elements must be
 		    specified in order from lowest to highest.
 
 - qcom,mode-threshold-currents
@@ -148,7 +158,7 @@
 	Usage:      optional; VRM regulators only
 	Value type: <u32>
 	Definition: Specifies the initial mode to request for a VRM regulator.
-		    Supported values are RPMH_REGULATOR_MODE_* (i.e. 0 to 7).
+		    Supported values are RPMH_REGULATOR_MODE_* (i.e. 0 to 4).
 
 - qcom,init-headroom-voltage
 	Usage:      optional; VRM regulators only
@@ -212,9 +222,10 @@
 	compatible = "qcom,rpmh-vrm-regulator";
 	mboxes = <&apps_rsc 0>;
 	qcom,resource-name = "smpa2";
+	qcom,regulator-type = "pmic4-smps";
 	qcom,supported-modes =
-		<RPMH_REGULATOR_MODE_SMPS_AUTO
-		 RPMH_REGULATOR_MODE_SMPS_PWM>;
+		<RPMH_REGULATOR_MODE_AUTO
+		 RPMH_REGULATOR_MODE_HPM>;
 	qcom,mode-threshold-currents = <0 2000000>;
 	pm8998_s2: regulator-s2 {
 		regulator-name = "pm8998_s2";
@@ -222,7 +233,7 @@
 		regulator-min-microvolt = <1100000>;
 		regulator-max-microvolt = <1200000>;
 		regulator-enable-ramp-delay = <200>;
-		qcom,init-mode = <RPMH_REGULATOR_MODE_SMPS_AUTO>;
+		qcom,init-mode = <RPMH_REGULATOR_MODE_AUTO>;
 		qcom,init-voltage = <1150000>;
 	};
 };
@@ -232,9 +243,10 @@
 	mboxes = <&disp_rsc 0>;
 	qcom,use-awake-state;
 	qcom,resource-name = "ldoa3";
+	qcom,regulator-type = "pmic4-ldo";
 	qcom,supported-modes =
-		<RPMH_REGULATOR_MODE_LDO_LPM
-		 RPMH_REGULATOR_MODE_LDO_HPM>;
+		<RPMH_REGULATOR_MODE_LPM
+		 RPMH_REGULATOR_MODE_HPM>;
 	qcom,mode-threshold-currents = <0 10000>;
 	qcom,always-wait-for-ack;
 	pm8998_l3_disp_ao: regulator-l3-ao {
@@ -250,7 +262,7 @@
 		qcom,set = <RPMH_REGULATOR_SET_SLEEP>;
 		regulator-min-microvolt = <1000000>;
 		regulator-max-microvolt = <1200000>;
-		qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+		qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		qcom,init-voltage = <1000000>;
 		qcom,init-enable = <0>;
 	};
@@ -260,6 +272,7 @@
 	compatible = "qcom,rpmh-vrm-regulator";
 	mboxes = <&apps_rsc 0>;
 	qcom,resource-name = "ldoa4";
+	qcom,regulator-type = "pmic4-ldo";
 	pm8998_l4-parent-supply = <&pm8998_s2>;
 	pm8998_l4: regulator-l4 {
 		regulator-name = "pm8998_l4";
diff --git a/Documentation/devicetree/bindings/usb/usb-device.txt b/Documentation/devicetree/bindings/usb/usb-device.txt
index 1c35e7b..03ab8f5 100644
--- a/Documentation/devicetree/bindings/usb/usb-device.txt
+++ b/Documentation/devicetree/bindings/usb/usb-device.txt
@@ -11,7 +11,7 @@
   be used, but a device adhering to this binding may leave out all except
   for usbVID,PID.
 - reg: the port number which this device is connecting to, the range
-  is 1-31.
+  is 1-255.
 
 Example:
 
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
index 9d2908d..7058d43 100644
--- a/Documentation/networking/ip-sysctl.txt
+++ b/Documentation/networking/ip-sysctl.txt
@@ -1288,6 +1288,10 @@
 	224.0.0.X range.
 	Default TRUE
 
+nf_ipv4_defrag_skip - BOOLEAN
+	Skip defragmentation per interface if set.
+	Default : 0 (always defrag)
+
 Alexey Kuznetsov.
 kuznet@ms2.inr.ac.ru
 
diff --git a/Makefile b/Makefile
index 061197a..323aa25 100644
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
 VERSION = 4
 PATCHLEVEL = 9
-SUBLEVEL = 65
+SUBLEVEL = 73
 EXTRAVERSION =
 NAME = Roaring Lionus
 
@@ -374,9 +374,6 @@
 CFLAGS_KERNEL	=
 AFLAGS_KERNEL	=
 LDFLAGS_vmlinux =
-CFLAGS_GCOV	:= -fprofile-arcs -ftest-coverage -fno-tree-loop-im $(call cc-disable-warning,maybe-uninitialized,)
-CFLAGS_KCOV	:= $(call cc-option,-fsanitize-coverage=trace-pc,)
-
 
 # Use USERINCLUDE when you must reference the UAPI directories only.
 USERINCLUDE    := \
@@ -397,21 +394,19 @@
 
 LINUXINCLUDE	+= $(filter-out $(LINUXINCLUDE),$(USERINCLUDE))
 
-KBUILD_CPPFLAGS := -D__KERNEL__
-
+KBUILD_AFLAGS   := -D__ASSEMBLY__
 KBUILD_CFLAGS   := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \
 		   -fno-strict-aliasing -fno-common \
 		   -Werror-implicit-function-declaration \
 		   -Wno-format-security \
-		   -std=gnu89 $(call cc-option,-fno-PIE)
-
-
+		   -std=gnu89
+KBUILD_CPPFLAGS := -D__KERNEL__
 KBUILD_AFLAGS_KERNEL :=
 KBUILD_CFLAGS_KERNEL :=
-KBUILD_AFLAGS   := -D__ASSEMBLY__ $(call cc-option,-fno-PIE)
 KBUILD_AFLAGS_MODULE  := -DMODULE
 KBUILD_CFLAGS_MODULE  := -DMODULE
 KBUILD_LDFLAGS_MODULE := -T $(srctree)/scripts/module-common.lds
+GCC_PLUGINS_CFLAGS :=
 
 # Read KERNELRELEASE from include/config/kernel.release (if it exists)
 KERNELRELEASE = $(shell cat include/config/kernel.release 2> /dev/null)
@@ -424,7 +419,7 @@
 export HOSTCXX HOSTCXXFLAGS LDFLAGS_MODULE CHECK CHECKFLAGS
 
 export KBUILD_CPPFLAGS NOSTDINC_FLAGS LINUXINCLUDE OBJCOPYFLAGS LDFLAGS
-export KBUILD_CFLAGS CFLAGS_KERNEL CFLAGS_MODULE CFLAGS_GCOV CFLAGS_KCOV CFLAGS_KASAN CFLAGS_UBSAN
+export KBUILD_CFLAGS CFLAGS_KERNEL CFLAGS_MODULE CFLAGS_KASAN CFLAGS_UBSAN
 export KBUILD_AFLAGS AFLAGS_KERNEL AFLAGS_MODULE
 export KBUILD_AFLAGS_MODULE KBUILD_CFLAGS_MODULE KBUILD_LDFLAGS_MODULE
 export KBUILD_AFLAGS_KERNEL KBUILD_CFLAGS_KERNEL
@@ -624,6 +619,12 @@
 # Defaults to vmlinux, but the arch makefile usually adds further targets
 all: vmlinux
 
+KBUILD_CFLAGS	+= $(call cc-option,-fno-PIE)
+KBUILD_AFLAGS	+= $(call cc-option,-fno-PIE)
+CFLAGS_GCOV	:= -fprofile-arcs -ftest-coverage -fno-tree-loop-im $(call cc-disable-warning,maybe-uninitialized,)
+CFLAGS_KCOV	:= $(call cc-option,-fsanitize-coverage=trace-pc,)
+export CFLAGS_GCOV CFLAGS_KCOV
+
 # The arch Makefile can set ARCH_{CPP,A,C}FLAGS to override the default
 # values of the respective KBUILD_* variables
 ARCH_CPPFLAGS :=
diff --git a/arch/arm/boot/dts/am335x-evmsk.dts b/arch/arm/boot/dts/am335x-evmsk.dts
index 975c36e..8e6b393 100644
--- a/arch/arm/boot/dts/am335x-evmsk.dts
+++ b/arch/arm/boot/dts/am335x-evmsk.dts
@@ -668,6 +668,7 @@
 	ti,non-removable;
 	bus-width = <4>;
 	cap-power-off-card;
+	keep-power-in-suspend;
 	pinctrl-names = "default";
 	pinctrl-0 = <&mmc2_pins>;
 
diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi
index 064d84f..ce54a70 100644
--- a/arch/arm/boot/dts/dra7.dtsi
+++ b/arch/arm/boot/dts/dra7.dtsi
@@ -282,6 +282,7 @@
 				device_type = "pci";
 				ranges = <0x81000000 0 0          0x03000 0 0x00010000
 					  0x82000000 0 0x20013000 0x13000 0 0xffed000>;
+				bus-range = <0x00 0xff>;
 				#interrupt-cells = <1>;
 				num-lanes = <1>;
 				linux,pci-domain = <0>;
@@ -318,6 +319,7 @@
 				device_type = "pci";
 				ranges = <0x81000000 0 0          0x03000 0 0x00010000
 					  0x82000000 0 0x30013000 0x13000 0 0xffed000>;
+				bus-range = <0x00 0xff>;
 				#interrupt-cells = <1>;
 				num-lanes = <1>;
 				linux,pci-domain = <1>;
diff --git a/arch/arm/boot/dts/logicpd-torpedo-37xx-devkit.dts b/arch/arm/boot/dts/logicpd-torpedo-37xx-devkit.dts
index 08cce17..b4575bb 100644
--- a/arch/arm/boot/dts/logicpd-torpedo-37xx-devkit.dts
+++ b/arch/arm/boot/dts/logicpd-torpedo-37xx-devkit.dts
@@ -192,7 +192,7 @@
 	interrupts-extended = <&intc 83 &omap3_pmx_core 0x11a>;
 	pinctrl-names = "default";
 	pinctrl-0 = <&mmc1_pins &mmc1_cd>;
-	cd-gpios = <&gpio4 31 IRQ_TYPE_LEVEL_LOW>;		/* gpio127 */
+	cd-gpios = <&gpio4 31 GPIO_ACTIVE_LOW>;		/* gpio127 */
 	vmmc-supply = <&vmmc1>;
 	bus-width = <4>;
 	cap-power-off-card;
@@ -249,9 +249,9 @@
 			OMAP3_CORE1_IOPAD(0x2110, PIN_INPUT | MUX_MODE0)   /* cam_xclka.cam_xclka */
 			OMAP3_CORE1_IOPAD(0x2112, PIN_INPUT | MUX_MODE0)   /* cam_pclk.cam_pclk */
 
-			OMAP3_CORE1_IOPAD(0x2114, PIN_INPUT | MUX_MODE0)   /* cam_d0.cam_d0 */
-			OMAP3_CORE1_IOPAD(0x2116, PIN_INPUT | MUX_MODE0)   /* cam_d1.cam_d1 */
-			OMAP3_CORE1_IOPAD(0x2118, PIN_INPUT | MUX_MODE0)   /* cam_d2.cam_d2 */
+			OMAP3_CORE1_IOPAD(0x2116, PIN_INPUT | MUX_MODE0)   /* cam_d0.cam_d0 */
+			OMAP3_CORE1_IOPAD(0x2118, PIN_INPUT | MUX_MODE0)   /* cam_d1.cam_d1 */
+			OMAP3_CORE1_IOPAD(0x211a, PIN_INPUT | MUX_MODE0)   /* cam_d2.cam_d2 */
 			OMAP3_CORE1_IOPAD(0x211c, PIN_INPUT | MUX_MODE0)   /* cam_d3.cam_d3 */
 			OMAP3_CORE1_IOPAD(0x211e, PIN_INPUT | MUX_MODE0)   /* cam_d4.cam_d4 */
 			OMAP3_CORE1_IOPAD(0x2120, PIN_INPUT | MUX_MODE0)   /* cam_d5.cam_d5 */
diff --git a/arch/arm/boot/dts/qcom/sdxpoorwills-audio-overlay.dtsi b/arch/arm/boot/dts/qcom/sdxpoorwills-audio-overlay.dtsi
index f90bd7f..e51d54b 100644
--- a/arch/arm/boot/dts/qcom/sdxpoorwills-audio-overlay.dtsi
+++ b/arch/arm/boot/dts/qcom/sdxpoorwills-audio-overlay.dtsi
@@ -17,7 +17,6 @@
 
 &snd_934x {
 	qcom,audio-routing =
-		"AIF4 VI", "MCLK",
 		"RX_BIAS", "MCLK",
 		"MADINPUT", "MCLK",
 		"AMIC2", "MIC BIAS2",
@@ -28,8 +27,6 @@
 		"MIC BIAS2", "ANCLeft Headset Mic",
 		"AMIC5", "MIC BIAS3",
 		"MIC BIAS3", "Handset Mic",
-		"DMIC0", "MIC BIAS1",
-		"MIC BIAS1", "Digital Mic0",
 		"DMIC1", "MIC BIAS1",
 		"MIC BIAS1", "Digital Mic1",
 		"DMIC2", "MIC BIAS3",
@@ -40,14 +37,13 @@
 		"MIC BIAS4", "Digital Mic4",
 		"DMIC5", "MIC BIAS4",
 		"MIC BIAS4", "Digital Mic5",
-		"SpkrLeft IN", "SPK1 OUT",
 		"SpkrRight IN", "SPK2 OUT";
 
 	qcom,msm-mbhc-hphl-swh = <1>;
 	qcom,msm-mbhc-gnd-swh = <1>;
 	qcom,msm-mbhc-hs-mic-max-threshold-mv = <1700>;
 	qcom,msm-mbhc-hs-mic-min-threshold-mv = <50>;
-	qcom,tavil-mclk-clk-freq = <12288000>;
+	qcom,tavil-mclk-clk-freq = <9600000>;
 
 	asoc-codec = <&stub_codec>;
 	asoc-codec-names = "msm-stub-codec.1";
@@ -66,23 +62,24 @@
 		interrupt-controller;
 		#interrupt-cells = <1>;
 		interrupt-parent = <&tlmm>;
-		qcom,gpio-connect = <&tlmm 71 0>;
+		qcom,gpio-connect = <&tlmm 90 0>;
 		pinctrl-names = "default";
 		pinctrl-0 = <&wcd_intr_default>;
 	};
 
 	clock_audio_up: audio_ext_clk_up {
 		compatible = "qcom,audio-ref-clk";
-		qcom,codec-mclk-clk-freq = <12288000>;
+		qcom,audio-ref-clk-gpio = <&tlmm 62 0>;
+		qcom,codec-mclk-clk-freq = <9600000>;
 		pinctrl-names = "sleep", "active";
 		pinctrl-0 = <&i2s_mclk_sleep>;
 		pinctrl-1 = <&i2s_mclk_active>;
 		#clock-cells = <1>;
 	};
 
-	wcd_rst_gpio: msm_cdc_pinctrl@77 {
+	wcd_rst_gpio: msm_cdc_pinctrl@86 {
 		compatible = "qcom,msm-cdc-pinctrl";
-		qcom,cdc-rst-n-gpio = <&tlmm 77 0>;
+		qcom,cdc-rst-n-gpio = <&tlmm 86 0>;
 		pinctrl-names = "aud_active", "aud_sleep";
 		pinctrl-0 = <&cdc_reset_active>;
 		pinctrl-1 = <&cdc_reset_sleep>;
@@ -91,8 +88,8 @@
 
 &i2c_3 {
 	wcd934x_cdc: tavil_codec {
-		compatible = "qcom,tavil-i2c-pgd";
-		elemental-addr = [00 01 50 02 17 02];
+		compatible = "qcom,tavil-i2c";
+		reg = <0x0d>;
 
 		interrupt-parent = <&wcd9xxx_intc>;
 		interrupts = <0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
@@ -135,7 +132,7 @@
 		qcom,cdc-micbias3-mv = <1800>;
 		qcom,cdc-micbias4-mv = <1800>;
 
-		qcom,cdc-mclk-clk-rate = <12288000>;
+		qcom,cdc-mclk-clk-rate = <9600000>;
 		qcom,cdc-dmic-sample-rate = <4800000>;
 
 		qcom,wdsp-cmpnt-dev-name = "tavil_codec";
diff --git a/arch/arm/boot/dts/qcom/sdxpoorwills-blsp.dtsi b/arch/arm/boot/dts/qcom/sdxpoorwills-blsp.dtsi
index 4fe2d1e..13e1fc3 100644
--- a/arch/arm/boot/dts/qcom/sdxpoorwills-blsp.dtsi
+++ b/arch/arm/boot/dts/qcom/sdxpoorwills-blsp.dtsi
@@ -103,7 +103,6 @@
 		pinctrl-names = "i2c_active", "i2c_sleep";
 		pinctrl-0 = <&i2c_3_active>;
 		pinctrl-1 = <&i2c_3_sleep>;
-		status = "disabled";
 	};
 
 	i2c_4: i2c@838000 { /* BLSP1 QUP4: GPIO: 76,77 */
diff --git a/arch/arm/boot/dts/qcom/sdxpoorwills-cdp.dts b/arch/arm/boot/dts/qcom/sdxpoorwills-cdp.dts
index 89945e3..261829f 100644
--- a/arch/arm/boot/dts/qcom/sdxpoorwills-cdp.dts
+++ b/arch/arm/boot/dts/qcom/sdxpoorwills-cdp.dts
@@ -15,6 +15,7 @@
 
 #include "sdxpoorwills.dtsi"
 #include "sdxpoorwills-pinctrl.dtsi"
+#include "sdxpoorwills-cdp-audio-overlay.dtsi"
 
 / {
 	model = "Qualcomm Technologies, Inc. SDXPOORWILLS CDP";
@@ -33,6 +34,26 @@
 	status = "ok";
 };
 
+&sdhc_1 {
+	vdd-supply = <&vreg_sd_mmc>;
+
+	vdd-io-supply = <&pmxpoorwills_l7>;
+	qcom,vdd-io-voltage-level = <1800000 2950000>;
+	qcom,vdd-io-current-level = <200 10000>;
+
+	pinctrl-names = "active", "sleep";
+	pinctrl-0 = <&sdc1_clk_on &sdc1_cmd_on &sdc1_data_on &sdc1_cd_on>;
+	pinctrl-1 = <&sdc1_clk_off &sdc1_cmd_off &sdc1_data_off &sdc1_cd_off>;
+
+	qcom,clk-rates = <400000 20000000 25000000 50000000 100000000
+							200000000>;
+	qcom,devfreq,freq-table = <50000000 200000000>;
+
+	cd-gpios = <&tlmm 93 0x1>;
+
+	status = "ok";
+};
+
 &pmxpoorwills_vadc {
 	chan@83 {
 		label = "vph_pwr";
diff --git a/arch/arm/boot/dts/qcom/sdxpoorwills-ion.dtsi b/arch/arm/boot/dts/qcom/sdxpoorwills-ion.dtsi
index afc8896..a09b149 100644
--- a/arch/arm/boot/dts/qcom/sdxpoorwills-ion.dtsi
+++ b/arch/arm/boot/dts/qcom/sdxpoorwills-ion.dtsi
@@ -20,5 +20,11 @@
 			reg = <25>;
 			qcom,ion-heap-type = "SYSTEM";
 		};
+
+		qcom,ion-heap@28 { /* AUDIO HEAP */
+			reg = <28>;
+			memory-region = <&audio_mem>;
+			qcom,ion-heap-type = "DMA";
+		};
 	};
 };
diff --git a/arch/arm/boot/dts/qcom/sdxpoorwills-mtp.dts b/arch/arm/boot/dts/qcom/sdxpoorwills-mtp.dts
index f580901..575febe 100644
--- a/arch/arm/boot/dts/qcom/sdxpoorwills-mtp.dts
+++ b/arch/arm/boot/dts/qcom/sdxpoorwills-mtp.dts
@@ -33,6 +33,26 @@
 	status = "ok";
 };
 
+&sdhc_1 {
+	vdd-supply = <&vreg_sd_mmc>;
+
+	vdd-io-supply = <&pmxpoorwills_l7>;
+	qcom,vdd-io-voltage-level = <1800000 2950000>;
+	qcom,vdd-io-current-level = <200 10000>;
+
+	pinctrl-names = "active", "sleep";
+	pinctrl-0 = <&sdc1_clk_on &sdc1_cmd_on &sdc1_data_on &sdc1_cd_on>;
+	pinctrl-1 = <&sdc1_clk_off &sdc1_cmd_off &sdc1_data_off &sdc1_cd_off>;
+
+	qcom,clk-rates = <400000 20000000 25000000 50000000 100000000
+							200000000>;
+	qcom,devfreq,freq-table = <50000000 200000000>;
+
+	cd-gpios = <&tlmm 93 0x1>;
+
+	status = "ok";
+};
+
 &pmxpoorwills_vadc {
 	chan@83 {
 		label = "vph_pwr";
diff --git a/arch/arm/boot/dts/qcom/sdxpoorwills-pinctrl.dtsi b/arch/arm/boot/dts/qcom/sdxpoorwills-pinctrl.dtsi
index 9b8e751..fa9c4f8 100644
--- a/arch/arm/boot/dts/qcom/sdxpoorwills-pinctrl.dtsi
+++ b/arch/arm/boot/dts/qcom/sdxpoorwills-pinctrl.dtsi
@@ -947,12 +947,12 @@
 		wcd9xxx_intr {
 			wcd_intr_default: wcd_intr_default{
 				mux {
-					pins = "gpio71";
+					pins = "gpio90";
 					function = "gpio";
 				};
 
 				config {
-					pins = "gpio71";
+					pins = "gpio90";
 					drive-strength = <2>; /* 2 mA */
 					bias-pull-down; /* pull down */
 					input-enable;
@@ -963,11 +963,11 @@
 		cdc_reset_ctrl {
 			cdc_reset_sleep: cdc_reset_sleep {
 				mux {
-					pins = "gpio77";
+					pins = "gpio86";
 					function = "gpio";
 				};
 				config {
-					pins = "gpio77";
+					pins = "gpio86";
 					drive-strength = <2>;
 					bias-disable;
 					output-low;
@@ -976,11 +976,11 @@
 
 			cdc_reset_active:cdc_reset_active {
 				mux {
-					pins = "gpio77";
+					pins = "gpio86";
 					function = "gpio";
 				};
 				config {
-					pins = "gpio77";
+					pins = "gpio86";
 					drive-strength = <8>;
 					bias-pull-down;
 					output-high;
@@ -1063,7 +1063,7 @@
 			pri_ws_active_master: pri_ws_active_master {
 				mux {
 					pins = "gpio12";
-					function = "pri_mi2s_ws_a";
+					function = "pri_mi2s";
 				};
 
 				config {
@@ -1077,7 +1077,7 @@
 			pri_sck_active_master: pri_sck_active_master {
 				mux {
 					pins = "gpio15";
-					function = "pri_mi2s_sck_a";
+					function = "pri_mi2s";
 				};
 
 				config {
@@ -1091,7 +1091,7 @@
 			pri_ws_active_slave: pri_ws_active_slave {
 				mux {
 					pins = "gpio12";
-					function = "pri_mi2s_ws_a";
+					function = "pri_mi2s";
 				};
 
 				config {
@@ -1104,7 +1104,7 @@
 			pri_sck_active_slave: pri_sck_active_slave {
 				mux {
 					pins = "gpio15";
-					function = "pri_mi2s_sck_a";
+					function = "pri_mi2s";
 				};
 
 				config {
@@ -1117,7 +1117,7 @@
 			pri_dout_active: pri_dout_active {
 				mux {
 					pins = "gpio14";
-					function = "pri_mi2s_data1_a";
+					function = "pri_mi2s";
 				};
 
 				config {
@@ -1147,7 +1147,7 @@
 			pri_din_active: pri_din_active {
 				mux {
 					pins = "gpio13";
-					function = "pri_mi2s_data0_a";
+					function = "pri_mi2s";
 				};
 
 				config {
@@ -1299,6 +1299,82 @@
 			};
 		};
 
+		/* SDC pin type */
+		sdc1_clk_on: sdc1_clk_on {
+			config {
+				pins = "sdc1_clk";
+				bias-disable;		/* NO pull */
+				drive-strength = <16>;	/* 16 MA */
+			};
+		};
+
+		sdc1_clk_off: sdc1_clk_off {
+			config {
+				pins = "sdc1_clk";
+				bias-disable;		/* NO pull */
+				drive-strength = <2>;	/* 2 MA */
+			};
+		};
+
+		sdc1_cmd_on: sdc1_cmd_on {
+			config {
+				pins = "sdc1_cmd";
+				bias-pull-up;		/* pull up */
+				drive-strength = <10>;	/* 10 MA */
+			};
+		};
+
+		sdc1_cmd_off: sdc1_cmd_off {
+			config {
+				pins = "sdc1_cmd";
+				num-grp-pins = <1>;
+				bias-pull-up;		/* pull up */
+				drive-strength = <2>;	/* 2 MA */
+			};
+		};
+
+		sdc1_data_on: sdc1_data_on {
+			config {
+				pins = "sdc1_data";
+				bias-pull-up;		/* pull up */
+				drive-strength = <10>;	/* 10 MA */
+			};
+		};
+
+		sdc1_data_off: sdc1_data_off {
+			config {
+				pins = "sdc1_data";
+				bias-pull-up;		/* pull up */
+				drive-strength = <2>;	/* 2 MA */
+			};
+		};
+
+		sdc1_cd_on: cd_on {
+			mux {
+				pins = "gpio93";
+				function = "gpio";
+			};
+
+			config {
+				pins = "gpio93";
+				drive-strength = <2>;
+				bias-pull-up;
+			};
+		};
+
+		sdc1_cd_off: cd_off {
+			mux {
+				pins = "gpio93";
+				function = "gpio";
+			};
+
+			config {
+				pins = "gpio93";
+				drive-strength = <2>;
+				bias-disable;
+			};
+		};
+
 		smb_int_default: smb_int_default {
 			mux {
 				pins = "gpio42";
diff --git a/arch/arm/boot/dts/qcom/sdxpoorwills-regulator.dtsi b/arch/arm/boot/dts/qcom/sdxpoorwills-regulator.dtsi
index e62c4a3..053348c 100644
--- a/arch/arm/boot/dts/qcom/sdxpoorwills-regulator.dtsi
+++ b/arch/arm/boot/dts/qcom/sdxpoorwills-regulator.dtsi
@@ -11,6 +11,7 @@
  */
 
 #include <dt-bindings/regulator/qcom,rpmh-regulator.h>
+#include <dt-bindings/gpio/gpio.h>
 
 &soc {
 	/* RPMh regulators */
@@ -77,9 +78,10 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa1";
+		qcom,regulator-type = "pmic5-ldo";
 		qcom,supported-modes =
-			<RPMH_REGULATOR_MODE_LDO_LPM
-			 RPMH_REGULATOR_MODE_LDO_HPM>;
+			<RPMH_REGULATOR_MODE_LPM
+			 RPMH_REGULATOR_MODE_HPM>;
 		qcom,mode-threshold-currents = <0 1>;
 		pmxpoorwills_l1: regualtor-pmxpoorwills-11 {
 			regulator-name = "pmxpoorwills_l1";
@@ -87,7 +89,7 @@
 			regulator-min-microvolt = <1200000>;
 			regulator-max-microvolt = <1200000>;
 			qcom,init-voltage = <1200000>;
-			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		};
 	};
 
@@ -95,9 +97,10 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa2";
+		qcom,regulator-type = "pmic5-ldo";
 		qcom,supported-modes =
-			<RPMH_REGULATOR_MODE_LDO_LPM
-			 RPMH_REGULATOR_MODE_LDO_HPM>;
+			<RPMH_REGULATOR_MODE_LPM
+			 RPMH_REGULATOR_MODE_HPM>;
 		qcom,mode-threshold-currents = <0 1>;
 		pmxpoorwills_l2: regualtor-pmxpoorwills-12 {
 			 regulator-name = "pmxpoorwills_l2";
@@ -105,7 +108,7 @@
 			 regulator-min-microvolt = <1128000>;
 			 regulator-max-microvolt = <1128000>;
 			 qcom,init-voltage = <1128000>;
-			 qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			 qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		 };
 	};
 
@@ -113,9 +116,10 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa3";
+		qcom,regulator-type = "pmic5-ldo";
 		qcom,supported-modes =
-			<RPMH_REGULATOR_MODE_LDO_LPM
-			 RPMH_REGULATOR_MODE_LDO_HPM>;
+			<RPMH_REGULATOR_MODE_LPM
+			 RPMH_REGULATOR_MODE_HPM>;
 		qcom,mode-threshold-currents = <0 1>;
 		pmxpoorwills_l3: regualtor-pmxpoorwills-l3 {
 			regulator-name = "pmxpoorwills_l3";
@@ -123,7 +127,7 @@
 			regulator-min-microvolt = <800000>;
 			regulator-max-microvolt = <800000>;
 			qcom,init-voltage = <800000>;
-			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		};
 	};
 
@@ -131,9 +135,10 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa4";
+		qcom,regulator-type = "pmic5-ldo";
 		qcom,supported-modes =
-			<RPMH_REGULATOR_MODE_LDO_LPM
-			 RPMH_REGULATOR_MODE_LDO_HPM>;
+			<RPMH_REGULATOR_MODE_LPM
+			 RPMH_REGULATOR_MODE_HPM>;
 		qcom,mode-threshold-currents = <0 1>;
 		pmxpoorwills_l4: regualtor-pmxpoorwills-l4 {
 			regulator-name = "pmxpoorwills_l4";
@@ -141,7 +146,7 @@
 			regulator-min-microvolt = <872000>;
 			regulator-max-microvolt = <872000>;
 			qcom,init-voltage = <872000>;
-			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		};
 	};
 
@@ -149,9 +154,10 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa5";
+		qcom,regulator-type = "pmic5-ldo";
 		qcom,supported-modes =
-			<RPMH_REGULATOR_MODE_LDO_LPM
-			 RPMH_REGULATOR_MODE_LDO_HPM>;
+			<RPMH_REGULATOR_MODE_LPM
+			 RPMH_REGULATOR_MODE_HPM>;
 		qcom,mode-threshold-currents = <0 1>;
 		pmxpoorwills_l5: regualtor-pmxpoorwills-l5 {
 			regulator-name = "pmxpoorwills_l5";
@@ -159,7 +165,7 @@
 			regulator-min-microvolt = <1704000>;
 			regulator-max-microvolt = <1704000>;
 			qcom,init-voltage = <1704000>;
-			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		};
 	};
 
@@ -167,9 +173,10 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa7";
+		qcom,regulator-type = "pmic5-ldo";
 		qcom,supported-modes =
-			<RPMH_REGULATOR_MODE_LDO_LPM
-			 RPMH_REGULATOR_MODE_LDO_HPM>;
+			<RPMH_REGULATOR_MODE_LPM
+			 RPMH_REGULATOR_MODE_HPM>;
 		qcom,mode-threshold-currents = <0 1>;
 		pmxpoorwills_l7: regualtor-pmxpoorwills-l7 {
 			regulator-name = "pmxpoorwills_l7";
@@ -177,7 +184,7 @@
 			regulator-min-microvolt = <1800000>;
 			regulator-max-microvolt = <2952000>;
 			qcom,init-voltage = <1800000>;
-			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		};
 	};
 
@@ -185,9 +192,10 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa8";
+		qcom,regulator-type = "pmic5-ldo";
 		qcom,supported-modes =
-			<RPMH_REGULATOR_MODE_LDO_LPM
-			 RPMH_REGULATOR_MODE_LDO_HPM>;
+			<RPMH_REGULATOR_MODE_LPM
+			 RPMH_REGULATOR_MODE_HPM>;
 		qcom,mode-threshold-currents = <0 1>;
 		pmxpoorwills_l8: regualtor-pmxpoorwills-l8 {
 			regulator-name = "pmxpoorwills_l8";
@@ -195,7 +203,7 @@
 			regulator-min-microvolt = <480000>;
 			regulator-max-microvolt = <900000>;
 			qcom,init-voltage = <480000>;
-			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		};
 	};
 
@@ -231,9 +239,10 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa10";
+		qcom,regulator-type = "pmic5-ldo";
 		qcom,supported-modes =
-			<RPMH_REGULATOR_MODE_LDO_LPM
-			 RPMH_REGULATOR_MODE_LDO_HPM>;
+			<RPMH_REGULATOR_MODE_LPM
+			 RPMH_REGULATOR_MODE_HPM>;
 		qcom,mode-threshold-currents = <0 1>;
 		pmxpoorwills_l10: regualtor-pmxpoorwills-l10 {
 			regulator-name = "pmxpoorwills_l10";
@@ -241,7 +250,7 @@
 			regulator-min-microvolt = <3088000>;
 			regulator-max-microvolt = <3088000>;
 			qcom,init-voltage = <3088000>;
-			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		};
 	};
 
@@ -249,9 +258,10 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa11";
+		qcom,regulator-type = "pmic5-ldo";
 		qcom,supported-modes =
-			<RPMH_REGULATOR_MODE_LDO_LPM
-			 RPMH_REGULATOR_MODE_LDO_HPM>;
+			<RPMH_REGULATOR_MODE_LPM
+			 RPMH_REGULATOR_MODE_HPM>;
 		qcom,mode-threshold-currents = <0 1>;
 		pmxpoorwills_l11: regualtor-pmxpoorwills-l11 {
 			  regulator-name = "pmxpoorwills_l11";
@@ -259,7 +269,7 @@
 			  regulator-min-microvolt = <1704000>;
 			  regulator-max-microvolt = <3000000>;
 			  qcom,init-voltage = <1704000>;
-			  qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			  qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		  };
 	};
 
@@ -267,9 +277,10 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa12";
+		qcom,regulator-type = "pmic5-ldo";
 		qcom,supported-modes =
-			<RPMH_REGULATOR_MODE_LDO_LPM
-			 RPMH_REGULATOR_MODE_LDO_HPM>;
+			<RPMH_REGULATOR_MODE_LPM
+			 RPMH_REGULATOR_MODE_HPM>;
 		qcom,mode-threshold-currents = <0 1>;
 		pmxpoorwills_l12: regualtor-pmxpoorwills-l12 {
 			  regulator-name = "pmxpoorwills_l12";
@@ -277,7 +288,7 @@
 			  regulator-min-microvolt = <2704000>;
 			  regulator-max-microvolt = <2704000>;
 			  qcom,init-voltage = <2704000>;
-			  qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			  qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		  };
 	};
 
@@ -285,9 +296,10 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa13";
+		qcom,regulator-type = "pmic5-ldo";
 		qcom,supported-modes =
-			<RPMH_REGULATOR_MODE_LDO_LPM
-			 RPMH_REGULATOR_MODE_LDO_HPM>;
+			<RPMH_REGULATOR_MODE_LPM
+			 RPMH_REGULATOR_MODE_HPM>;
 		qcom,mode-threshold-currents = <0 1>;
 		pmxpoorwills_l13: regualtor-pmxpoorwills-l13 {
 			  regulator-name = "pmxpoorwills_l13";
@@ -295,7 +307,7 @@
 			  regulator-min-microvolt = <1704000>;
 			  regulator-max-microvolt = <3000000>;
 			  qcom,init-voltage = <1704000>;
-			  qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			  qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		  };
 	};
 
@@ -303,9 +315,10 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa14";
+		qcom,regulator-type = "pmic5-ldo";
 		qcom,supported-modes =
-			<RPMH_REGULATOR_MODE_LDO_LPM
-			 RPMH_REGULATOR_MODE_LDO_HPM>;
+			<RPMH_REGULATOR_MODE_LPM
+			 RPMH_REGULATOR_MODE_HPM>;
 		qcom,mode-threshold-currents = <0 1>;
 		pmxpoorwills_l14: regualtor-pmxpoorwills-l14 {
 			  regulator-name = "pmxpoorwills_l14";
@@ -313,7 +326,7 @@
 			  regulator-min-microvolt = <600000>;
 			  regulator-max-microvolt = <800000>;
 			  qcom,init-voltage = <600000>;
-			  qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			  qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		  };
 	};
 
@@ -321,9 +334,10 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa16";
+		qcom,regulator-type = "pmic5-ldo";
 		qcom,supported-modes =
-			<RPMH_REGULATOR_MODE_LDO_LPM
-			 RPMH_REGULATOR_MODE_LDO_HPM>;
+			<RPMH_REGULATOR_MODE_LPM
+			 RPMH_REGULATOR_MODE_HPM>;
 		qcom,mode-threshold-currents = <0 1>;
 		pmxpoorwills_l16: regualtor-pmxpoorwills-l16 {
 			  regulator-name = "pmxpoorwills_l16";
@@ -331,7 +345,7 @@
 			  regulator-min-microvolt = <304000>;
 			  regulator-max-microvolt = <880000>;
 			  qcom,init-voltage = <304000>;
-			  qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			  qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		  };
 	};
 
@@ -359,4 +373,12 @@
 		regulator-max-microvolt = <1800000>;
 		regulator-always-on;
 	};
+
+	vreg_sd_mmc: vreg_sd_mmc {
+		compatible = "regulator-fixed";
+		regulator-name = "vreg_sd_mmc";
+		startup-delay-us = <4000>;
+		enable-active-high;
+		gpio = <&tlmm 92 GPIO_ACTIVE_HIGH>;
+	};
 };
diff --git a/arch/arm/boot/dts/qcom/sdxpoorwills.dtsi b/arch/arm/boot/dts/qcom/sdxpoorwills.dtsi
index f5351de..322e2cc 100644
--- a/arch/arm/boot/dts/qcom/sdxpoorwills.dtsi
+++ b/arch/arm/boot/dts/qcom/sdxpoorwills.dtsi
@@ -76,6 +76,7 @@
 
 	aliases {
 		qpic_nand1 = &qnand_1;
+		sdhc1 = &sdhc_1; /* SDC1 eMMC/SD/SDIO slot */
 	};
 
 	soc: soc { };
@@ -288,6 +289,45 @@
 		status = "disabled";
 	};
 
+	sdhc_1: sdhci@8804000 {
+		compatible = "qcom,sdhci-msm-v5";
+		reg = <0x8804000 0x1000>;
+		reg-names = "hc_mem";
+
+		interrupts = <0 210 0>, <0 227 0>;
+		interrupt-names = "hc_irq", "pwr_irq";
+
+		qcom,bus-width = <4>;
+
+		qcom,msm-bus,name = "sdhc1";
+		qcom,msm-bus,num-cases = <8>;
+		qcom,msm-bus,num-paths = <1>;
+		qcom,msm-bus,vectors-KBps =  <78 512 0 0>, /* No vote */
+				<78 512 1600 3200>,    /* 400 KB/s*/
+				<78 512 80000 160000>, /* 20 MB/s */
+				<78 512 100000 200000>, /* 25 MB/s */
+				<78 512 200000 400000>, /* 50 MB/s */
+				<78 512 400000 800000>, /* 100 MB/s */
+				<78 512 400000 800000>, /* 200 MB/s */
+				<78 512 2048000 4096000>; /* Max. bandwidth */
+		qcom,bus-bw-vectors-bps = <0 400000 20000000 25000000 50000000
+						100000000 200000000 4294967295>;
+
+		/* PM QoS */
+		qcom,pm-qos-cpu-groups = <0x0>;
+		qcom,pm-qos-cmdq-latency-us = <70>;
+		qcom,pm-qos-legacy-latency-us = <70>;
+		qcom,pm-qos-irq-type = "affine_cores";
+		qcom,pm-qos-irq-cpu = <0>;
+		qcom,pm-qos-irq-latency = <70>;
+
+		clocks = <&clock_gcc GCC_SDCC1_AHB_CLK>,
+			<&clock_gcc GCC_SDCC1_APPS_CLK>;
+		clock-names = "iface_clk", "core_clk";
+
+		status = "disabled";
+	};
+
 	qcom,msm-imem@1468B000 {
 		compatible = "qcom,msm-imem";
 		reg = <0x1468B000 0x1000>; /* Address and size of IMEM */
@@ -345,6 +385,7 @@
 		compatible = "qcom,pil-tz-generic";
 		qcom,pas-id = <0xf>;
 		qcom,firmware-name = "ipa_fws";
+		qcom,pil-force-shutdown;
 	};
 
 	spmi_bus: qcom,spmi@c440000 {
@@ -698,6 +739,20 @@
 			"tx-ch3-intr", "tx-ch4-intr",
 			"rx-ch0-intr", "rx-ch1-intr",
 			"rx-ch2-intr", "rx-ch3-intr";
+		qcom,msm-bus,name = "emac";
+		qcom,msm-bus,num-cases = <3>;
+		qcom,msm-bus,num-paths = <2>;
+		qcom,msm-bus,vectors-KBps =
+			<98 512 1250 0>, <1 781 0 40000>,  /* 10Mbps vote */
+			<98 512 12500 0>, <1 781 0 40000>,  /* 100Mbps vote */
+			<98 512 125000 0>, <1 781 0 40000>; /* 1000Mbps vote */
+		qcom,bus-vector-names = "10", "100", "1000";
+		clocks = <&clock_gcc GCC_ETH_AXI_CLK>,
+			<&clock_gcc GCC_ETH_PTP_CLK>,
+			<&clock_gcc GCC_ETH_RGMII_CLK>,
+			<&clock_gcc GCC_ETH_SLAVE_AHB_CLK>;
+		clock-names = "eth_axi_clk", "eth_ptp_clk",
+			"eth_rgmii_clk", "eth_slave_ahb_clk";
 		io-macro-info {
 			io-macro-bypass-mode = <0>;
 			io-interface = "rgmii";
diff --git a/arch/arm/configs/msm8953-perf_defconfig b/arch/arm/configs/msm8953-perf_defconfig
index fd1cac3..9d60c9e 100644
--- a/arch/arm/configs/msm8953-perf_defconfig
+++ b/arch/arm/configs/msm8953-perf_defconfig
@@ -73,6 +73,7 @@
 CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
 CONFIG_CPU_BOOST=y
 CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y
+CONFIG_CPU_FREQ_MSM=y
 CONFIG_CPU_IDLE=y
 CONFIG_VFP=y
 CONFIG_NEON=y
@@ -280,6 +281,7 @@
 CONFIG_INPUT_JOYSTICK=y
 CONFIG_INPUT_TOUCHSCREEN=y
 CONFIG_INPUT_MISC=y
+CONFIG_INPUT_QPNP_POWER_ON=y
 CONFIG_INPUT_UINPUT=y
 # CONFIG_SERIO_SERPORT is not set
 # CONFIG_VT is not set
@@ -300,9 +302,14 @@
 CONFIG_PINCTRL_MSM8953=y
 CONFIG_PINCTRL_QCOM_SPMI_PMIC=y
 CONFIG_GPIO_SYSFS=y
+CONFIG_GPIO_QPNP_PIN=y
+CONFIG_GPIO_QPNP_PIN_DEBUG=y
 CONFIG_POWER_SUPPLY=y
+CONFIG_QPNP_FG=y
 CONFIG_SMB135X_CHARGER=y
 CONFIG_SMB1351_USB_CHARGER=y
+CONFIG_QPNP_SMBCHARGER=y
+CONFIG_QPNP_TYPEC=y
 CONFIG_SENSORS_QPNP_ADC_VOLTAGE=y
 CONFIG_THERMAL=y
 CONFIG_THERMAL_QPNP=y
@@ -310,6 +317,7 @@
 CONFIG_THERMAL_TSENS=y
 CONFIG_MSM_BCL_PERIPHERAL_CTL=y
 CONFIG_QTI_THERMAL_LIMITS_DCVS=y
+CONFIG_MFD_SPMI_PMIC=y
 CONFIG_REGULATOR=y
 CONFIG_REGULATOR_FIXED_VOLTAGE=y
 CONFIG_REGULATOR_PROXY_CONSUMER=y
@@ -363,7 +371,10 @@
 CONFIG_NEW_LEDS=y
 CONFIG_LEDS_CLASS=y
 CONFIG_LEDS_QPNP=y
+CONFIG_LEDS_QPNP_FLASH=y
 CONFIG_LEDS_QPNP_WLED=y
+CONFIG_LEDS_QPNP_HAPTICS=y
+CONFIG_LEDS_QPNP_VIBRATOR_LDO=y
 CONFIG_LEDS_TRIGGERS=y
 CONFIG_EDAC=y
 CONFIG_EDAC_MM_EDAC=y
diff --git a/arch/arm/configs/msm8953_defconfig b/arch/arm/configs/msm8953_defconfig
index c126ccd..36c8651 100644
--- a/arch/arm/configs/msm8953_defconfig
+++ b/arch/arm/configs/msm8953_defconfig
@@ -79,6 +79,7 @@
 CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
 CONFIG_CPU_BOOST=y
 CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y
+CONFIG_CPU_FREQ_MSM=y
 CONFIG_CPU_IDLE=y
 CONFIG_VFP=y
 CONFIG_NEON=y
@@ -290,6 +291,7 @@
 CONFIG_INPUT_JOYSTICK=y
 CONFIG_INPUT_TOUCHSCREEN=y
 CONFIG_INPUT_MISC=y
+CONFIG_INPUT_QPNP_POWER_ON=y
 CONFIG_INPUT_UINPUT=y
 # CONFIG_SERIO_SERPORT is not set
 # CONFIG_VT is not set
@@ -310,9 +312,14 @@
 CONFIG_PINCTRL_MSM8953=y
 CONFIG_PINCTRL_QCOM_SPMI_PMIC=y
 CONFIG_GPIO_SYSFS=y
+CONFIG_GPIO_QPNP_PIN=y
+CONFIG_GPIO_QPNP_PIN_DEBUG=y
 CONFIG_POWER_SUPPLY=y
+CONFIG_QPNP_FG=y
 CONFIG_SMB135X_CHARGER=y
 CONFIG_SMB1351_USB_CHARGER=y
+CONFIG_QPNP_SMBCHARGER=y
+CONFIG_QPNP_TYPEC=y
 CONFIG_SENSORS_QPNP_ADC_VOLTAGE=y
 CONFIG_THERMAL=y
 CONFIG_THERMAL_QPNP=y
@@ -320,6 +327,7 @@
 CONFIG_THERMAL_TSENS=y
 CONFIG_MSM_BCL_PERIPHERAL_CTL=y
 CONFIG_QTI_THERMAL_LIMITS_DCVS=y
+CONFIG_MFD_SPMI_PMIC=y
 CONFIG_REGULATOR=y
 CONFIG_REGULATOR_FIXED_VOLTAGE=y
 CONFIG_REGULATOR_PROXY_CONSUMER=y
@@ -375,7 +383,10 @@
 CONFIG_NEW_LEDS=y
 CONFIG_LEDS_CLASS=y
 CONFIG_LEDS_QPNP=y
+CONFIG_LEDS_QPNP_FLASH=y
 CONFIG_LEDS_QPNP_WLED=y
+CONFIG_LEDS_QPNP_HAPTICS=y
+CONFIG_LEDS_QPNP_VIBRATOR_LDO=y
 CONFIG_LEDS_TRIGGERS=y
 CONFIG_EDAC=y
 CONFIG_EDAC_MM_EDAC=y
diff --git a/arch/arm/configs/sdxpoorwills-perf_defconfig b/arch/arm/configs/sdxpoorwills-perf_defconfig
index d3c8152..834dfb8 100644
--- a/arch/arm/configs/sdxpoorwills-perf_defconfig
+++ b/arch/arm/configs/sdxpoorwills-perf_defconfig
@@ -254,7 +254,6 @@
 CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
 CONFIG_USB_XHCI_HCD=y
 CONFIG_USB_EHCI_HCD=y
-CONFIG_USB_EHCI_MSM=y
 CONFIG_USB_ACM=y
 CONFIG_USB_STORAGE=y
 CONFIG_USB_STORAGE_DEBUG=y
diff --git a/arch/arm/configs/sdxpoorwills_defconfig b/arch/arm/configs/sdxpoorwills_defconfig
index a2846ca..760d4d4 100644
--- a/arch/arm/configs/sdxpoorwills_defconfig
+++ b/arch/arm/configs/sdxpoorwills_defconfig
@@ -290,6 +290,7 @@
 CONFIG_MMC_TEST=m
 CONFIG_MMC_SDHCI=y
 CONFIG_MMC_SDHCI_PLTFM=y
+CONFIG_MMC_SDHCI_MSM=y
 CONFIG_RTC_CLASS=y
 CONFIG_RTC_DRV_QPNP=y
 CONFIG_DMADEVICES=y
@@ -337,9 +338,7 @@
 CONFIG_MSM_PIL_SSR_GENERIC=y
 CONFIG_QCOM_COMMAND_DB=y
 CONFIG_MSM_PM=y
-CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND=y
 CONFIG_QCOM_DEVFREQ_DEVBW=y
-CONFIG_EXTCON=y
 CONFIG_IIO=y
 CONFIG_PWM=y
 CONFIG_PWM_QPNP=y
diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h
index 68b06f9..12f99fd 100644
--- a/arch/arm/include/asm/assembler.h
+++ b/arch/arm/include/asm/assembler.h
@@ -516,4 +516,22 @@ THUMB(	orr	\reg , \reg , #PSR_T_BIT	)
 #endif
 	.endm
 
+	.macro	bug, msg, line
+#ifdef CONFIG_THUMB2_KERNEL
+1:	.inst	0xde02
+#else
+1:	.inst	0xe7f001f2
+#endif
+#ifdef CONFIG_DEBUG_BUGVERBOSE
+	.pushsection .rodata.str, "aMS", %progbits, 1
+2:	.asciz	"\msg"
+	.popsection
+	.pushsection __bug_table, "aw"
+	.align	2
+	.word	1b, 2b
+	.hword	\line
+	.popsection
+#endif
+	.endm
+
 #endif /* __ASM_ASSEMBLER_H__ */
diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
index f13ae15..d2315ff 100644
--- a/arch/arm/include/asm/elf.h
+++ b/arch/arm/include/asm/elf.h
@@ -112,8 +112,12 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
 #define CORE_DUMP_USE_REGSET
 #define ELF_EXEC_PAGESIZE	4096
 
-/* This is the base location for PIE (ET_DYN with INTERP) loads. */
-#define ELF_ET_DYN_BASE		0x400000UL
+/* This is the location that an ET_DYN program is loaded if exec'ed.  Typical
+   use of this is to invoke "./ld.so someprog" to test out a new version of
+   the loader.  We need to make sure that it is out of the way of the program
+   that it will "exec", and that there is sufficient room for the brk.  */
+
+#define ELF_ET_DYN_BASE	(TASK_SIZE / 3 * 2)
 
 /* When the program starts, a1 contains a pointer to a function to be 
    registered with atexit, as per the SVR4 ABI.  A value of 0 means we 
diff --git a/arch/arm/include/asm/kvm_arm.h b/arch/arm/include/asm/kvm_arm.h
index e22089f..98d6de1 100644
--- a/arch/arm/include/asm/kvm_arm.h
+++ b/arch/arm/include/asm/kvm_arm.h
@@ -161,8 +161,7 @@
 #else
 #define VTTBR_X		(5 - KVM_T0SZ)
 #endif
-#define VTTBR_BADDR_SHIFT (VTTBR_X - 1)
-#define VTTBR_BADDR_MASK  (((_AC(1, ULL) << (40 - VTTBR_X)) - 1) << VTTBR_BADDR_SHIFT)
+#define VTTBR_BADDR_MASK  (((_AC(1, ULL) << (40 - VTTBR_X)) - 1) << VTTBR_X)
 #define VTTBR_VMID_SHIFT  _AC(48, ULL)
 #define VTTBR_VMID_MASK(size)	(_AT(u64, (1 << size) - 1) << VTTBR_VMID_SHIFT)
 
@@ -209,6 +208,7 @@
 #define HSR_EC_IABT_HYP	(0x21)
 #define HSR_EC_DABT	(0x24)
 #define HSR_EC_DABT_HYP	(0x25)
+#define HSR_EC_MAX	(0x3f)
 
 #define HSR_WFI_IS_WFE		(_AC(1, UL) << 0)
 
diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
index a58bbaa..d10e362 100644
--- a/arch/arm/include/asm/kvm_mmu.h
+++ b/arch/arm/include/asm/kvm_mmu.h
@@ -223,6 +223,16 @@ static inline unsigned int kvm_get_vmid_bits(void)
 	return 8;
 }
 
+static inline void *kvm_get_hyp_vector(void)
+{
+	return kvm_ksym_ref(__kvm_hyp_vector);
+}
+
+static inline int kvm_map_vectors(void)
+{
+	return 0;
+}
+
 #endif	/* !__ASSEMBLY__ */
 
 #endif /* __ARM_KVM_MMU_H__ */
diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
index 1f59ea05..b7e0125 100644
--- a/arch/arm/include/asm/uaccess.h
+++ b/arch/arm/include/asm/uaccess.h
@@ -478,11 +478,10 @@ extern unsigned long __must_check
 arm_copy_from_user(void *to, const void __user *from, unsigned long n);
 
 static inline unsigned long __must_check
-__copy_from_user(void *to, const void __user *from, unsigned long n)
+__arch_copy_from_user(void *to, const void __user *from, unsigned long n)
 {
 	unsigned int __ua_flags;
 
-	check_object_size(to, n, false);
 	__ua_flags = uaccess_save_and_enable();
 	n = arm_copy_from_user(to, from, n);
 	uaccess_restore(__ua_flags);
@@ -495,18 +494,15 @@ extern unsigned long __must_check
 __copy_to_user_std(void __user *to, const void *from, unsigned long n);
 
 static inline unsigned long __must_check
-__copy_to_user(void __user *to, const void *from, unsigned long n)
+__arch_copy_to_user(void __user *to, const void *from, unsigned long n)
 {
 #ifndef CONFIG_UACCESS_WITH_MEMCPY
 	unsigned int __ua_flags;
-
-	check_object_size(from, n, true);
 	__ua_flags = uaccess_save_and_enable();
 	n = arm_copy_to_user(to, from, n);
 	uaccess_restore(__ua_flags);
 	return n;
 #else
-	check_object_size(from, n, true);
 	return arm_copy_to_user(to, from, n);
 #endif
 }
@@ -526,25 +522,49 @@ __clear_user(void __user *addr, unsigned long n)
 }
 
 #else
-#define __copy_from_user(to, from, n)	(memcpy(to, (void __force *)from, n), 0)
-#define __copy_to_user(to, from, n)	(memcpy((void __force *)to, from, n), 0)
+#define __arch_copy_from_user(to, from, n)	\
+					(memcpy(to, (void __force *)from, n), 0)
+#define __arch_copy_to_user(to, from, n)	\
+					(memcpy((void __force *)to, from, n), 0)
 #define __clear_user(addr, n)		(memset((void __force *)addr, 0, n), 0)
 #endif
 
-static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
+static inline unsigned long __must_check
+__copy_from_user(void *to, const void __user *from, unsigned long n)
+{
+	check_object_size(to, n, false);
+	return __arch_copy_from_user(to, from, n);
+}
+
+static inline unsigned long __must_check
+copy_from_user(void *to, const void __user *from, unsigned long n)
 {
 	unsigned long res = n;
+
+	check_object_size(to, n, false);
+
 	if (likely(access_ok(VERIFY_READ, from, n)))
-		res = __copy_from_user(to, from, n);
+		res = __arch_copy_from_user(to, from, n);
 	if (unlikely(res))
 		memset(to + (n - res), 0, res);
 	return res;
 }
 
-static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
+static inline unsigned long __must_check
+__copy_to_user(void __user *to, const void *from, unsigned long n)
 {
+	check_object_size(from, n, true);
+
+	return __arch_copy_to_user(to, from, n);
+}
+
+static inline unsigned long __must_check
+copy_to_user(void __user *to, const void *from, unsigned long n)
+{
+	check_object_size(from, n, true);
+
 	if (access_ok(VERIFY_WRITE, to, n))
-		n = __copy_to_user(to, from, n);
+		n = __arch_copy_to_user(to, from, n);
 	return n;
 }
 
diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S
index 6391728..e056c9a 100644
--- a/arch/arm/kernel/entry-header.S
+++ b/arch/arm/kernel/entry-header.S
@@ -299,6 +299,8 @@
 	mov	r2, sp
 	ldr	r1, [r2, #\offset + S_PSR]	@ get calling cpsr
 	ldr	lr, [r2, #\offset + S_PC]!	@ get pc
+	tst	r1, #PSR_I_BIT | 0x0f
+	bne	1f
 	msr	spsr_cxsf, r1			@ save in spsr_svc
 #if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_32v6K)
 	@ We must avoid clrex due to Cortex-A15 erratum #830321
@@ -313,6 +315,7 @@
 						@ after ldm {}^
 	add	sp, sp, #\offset + PT_REGS_SIZE
 	movs	pc, lr				@ return & move spsr_svc into cpsr
+1:	bug	"Returning to usermode but unexpected PSR bits set?", \@
 #elif defined(CONFIG_CPU_V7M)
 	@ V7M restore.
 	@ Note that we don't need to do clrex here as clearing the local
@@ -328,6 +331,8 @@
 	ldr	r1, [sp, #\offset + S_PSR]	@ get calling cpsr
 	ldr	lr, [sp, #\offset + S_PC]	@ get pc
 	add	sp, sp, #\offset + S_SP
+	tst	r1, #PSR_I_BIT | 0x0f
+	bne	1f
 	msr	spsr_cxsf, r1			@ save in spsr_svc
 
 	@ We must avoid clrex due to Cortex-A15 erratum #830321
@@ -340,6 +345,7 @@
 	.endif
 	add	sp, sp, #PT_REGS_SIZE - S_SP
 	movs	pc, lr				@ return & move spsr_svc into cpsr
+1:	bug	"Returning to usermode but unexpected PSR bits set?", \@
 #endif	/* !CONFIG_THUMB2_KERNEL */
 	.endm
 
diff --git a/arch/arm/kvm/handle_exit.c b/arch/arm/kvm/handle_exit.c
index 066b6d4..42f5daf 100644
--- a/arch/arm/kvm/handle_exit.c
+++ b/arch/arm/kvm/handle_exit.c
@@ -79,7 +79,19 @@ static int kvm_handle_wfx(struct kvm_vcpu *vcpu, struct kvm_run *run)
 	return 1;
 }
 
+static int kvm_handle_unknown_ec(struct kvm_vcpu *vcpu, struct kvm_run *run)
+{
+	u32 hsr = kvm_vcpu_get_hsr(vcpu);
+
+	kvm_pr_unimpl("Unknown exception class: hsr: %#08x\n",
+		      hsr);
+
+	kvm_inject_undefined(vcpu);
+	return 1;
+}
+
 static exit_handle_fn arm_exit_handlers[] = {
+	[0 ... HSR_EC_MAX]	= kvm_handle_unknown_ec,
 	[HSR_EC_WFI]		= kvm_handle_wfx,
 	[HSR_EC_CP15_32]	= kvm_handle_cp15_32,
 	[HSR_EC_CP15_64]	= kvm_handle_cp15_64,
@@ -98,13 +110,6 @@ static exit_handle_fn kvm_get_exit_handler(struct kvm_vcpu *vcpu)
 {
 	u8 hsr_ec = kvm_vcpu_trap_get_class(vcpu);
 
-	if (hsr_ec >= ARRAY_SIZE(arm_exit_handlers) ||
-	    !arm_exit_handlers[hsr_ec]) {
-		kvm_err("Unknown exception class: hsr: %#08x\n",
-			(unsigned int)kvm_vcpu_get_hsr(vcpu));
-		BUG();
-	}
-
 	return arm_exit_handlers[hsr_ec];
 }
 
diff --git a/arch/arm/mach-omap1/dma.c b/arch/arm/mach-omap1/dma.c
index f6ba589..c821c1d 100644
--- a/arch/arm/mach-omap1/dma.c
+++ b/arch/arm/mach-omap1/dma.c
@@ -32,7 +32,6 @@
 #include "soc.h"
 
 #define OMAP1_DMA_BASE			(0xfffed800)
-#define OMAP1_LOGICAL_DMA_CH_COUNT	17
 
 static u32 enable_1510_mode;
 
@@ -348,8 +347,6 @@ static int __init omap1_system_dma_init(void)
 		goto exit_iounmap;
 	}
 
-	d->lch_count		= OMAP1_LOGICAL_DMA_CH_COUNT;
-
 	/* Valid attributes for omap1 plus processors */
 	if (cpu_is_omap15xx())
 		d->dev_caps = ENABLE_1510_MODE;
@@ -366,13 +363,14 @@ static int __init omap1_system_dma_init(void)
 	d->dev_caps		|= CLEAR_CSR_ON_READ;
 	d->dev_caps		|= IS_WORD_16;
 
-	if (cpu_is_omap15xx())
-		d->chan_count = 9;
-	else if (cpu_is_omap16xx() || cpu_is_omap7xx()) {
-		if (!(d->dev_caps & ENABLE_1510_MODE))
-			d->chan_count = 16;
+	/* available logical channels */
+	if (cpu_is_omap15xx()) {
+		d->lch_count = 9;
+	} else {
+		if (d->dev_caps & ENABLE_1510_MODE)
+			d->lch_count = 9;
 		else
-			d->chan_count = 9;
+			d->lch_count = 16;
 	}
 
 	p = dma_plat_info;
diff --git a/arch/arm/mach-omap2/gpmc-onenand.c b/arch/arm/mach-omap2/gpmc-onenand.c
index 8633c70..2944af8 100644
--- a/arch/arm/mach-omap2/gpmc-onenand.c
+++ b/arch/arm/mach-omap2/gpmc-onenand.c
@@ -367,7 +367,7 @@ static int gpmc_onenand_setup(void __iomem *onenand_base, int *freq_ptr)
 	return ret;
 }
 
-void gpmc_onenand_init(struct omap_onenand_platform_data *_onenand_data)
+int gpmc_onenand_init(struct omap_onenand_platform_data *_onenand_data)
 {
 	int err;
 	struct device *dev = &gpmc_onenand_device.dev;
@@ -393,15 +393,17 @@ void gpmc_onenand_init(struct omap_onenand_platform_data *_onenand_data)
 	if (err < 0) {
 		dev_err(dev, "Cannot request GPMC CS %d, error %d\n",
 			gpmc_onenand_data->cs, err);
-		return;
+		return err;
 	}
 
 	gpmc_onenand_resource.end = gpmc_onenand_resource.start +
 							ONENAND_IO_SIZE - 1;
 
-	if (platform_device_register(&gpmc_onenand_device) < 0) {
+	err = platform_device_register(&gpmc_onenand_device);
+	if (err) {
 		dev_err(dev, "Unable to register OneNAND device\n");
 		gpmc_cs_free(gpmc_onenand_data->cs);
-		return;
 	}
+
+	return err;
 }
diff --git a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
index 1cc4a6f..bca5415 100644
--- a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
@@ -3828,16 +3828,20 @@ static struct omap_hwmod_ocp_if *omap3xxx_dss_hwmod_ocp_ifs[] __initdata = {
  * Return: 0 if device named @dev_name is not likely to be accessible,
  * or 1 if it is likely to be accessible.
  */
-static int __init omap3xxx_hwmod_is_hs_ip_block_usable(struct device_node *bus,
-						       const char *dev_name)
+static bool __init omap3xxx_hwmod_is_hs_ip_block_usable(struct device_node *bus,
+							const char *dev_name)
 {
+	struct device_node *node;
+	bool available;
+
 	if (!bus)
-		return (omap_type() == OMAP2_DEVICE_TYPE_GP) ? 1 : 0;
+		return omap_type() == OMAP2_DEVICE_TYPE_GP;
 
-	if (of_device_is_available(of_find_node_by_name(bus, dev_name)))
-		return 1;
+	node = of_get_child_by_name(bus, dev_name);
+	available = of_device_is_available(node);
+	of_node_put(node);
 
-	return 0;
+	return available;
 }
 
 int __init omap3xxx_hwmod_init(void)
@@ -3906,15 +3910,20 @@ int __init omap3xxx_hwmod_init(void)
 
 	if (h_sham && omap3xxx_hwmod_is_hs_ip_block_usable(bus, "sham")) {
 		r = omap_hwmod_register_links(h_sham);
-		if (r < 0)
+		if (r < 0) {
+			of_node_put(bus);
 			return r;
+		}
 	}
 
 	if (h_aes && omap3xxx_hwmod_is_hs_ip_block_usable(bus, "aes")) {
 		r = omap_hwmod_register_links(h_aes);
-		if (r < 0)
+		if (r < 0) {
+			of_node_put(bus);
 			return r;
+		}
 	}
+	of_node_put(bus);
 
 	/*
 	 * Register hwmod links specific to certain ES levels of a
diff --git a/arch/arm/mach-omap2/pdata-quirks.c b/arch/arm/mach-omap2/pdata-quirks.c
index 770216b..88676fe 100644
--- a/arch/arm/mach-omap2/pdata-quirks.c
+++ b/arch/arm/mach-omap2/pdata-quirks.c
@@ -147,7 +147,7 @@ static struct ti_st_plat_data wilink_pdata = {
 	.nshutdown_gpio = 137,
 	.dev_name = "/dev/ttyO1",
 	.flow_cntrl = 1,
-	.baud_rate = 300000,
+	.baud_rate = 3000000,
 };
 
 static struct platform_device wl18xx_device = {
@@ -162,7 +162,7 @@ static struct ti_st_plat_data wilink7_pdata = {
 	.nshutdown_gpio = 162,
 	.dev_name = "/dev/ttyO1",
 	.flow_cntrl = 1,
-	.baud_rate = 300000,
+	.baud_rate = 3000000,
 };
 
 static struct platform_device wl128x_device = {
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index bfff16a..0a05c0a 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -1045,13 +1045,31 @@ static void arm_coherent_dma_free(struct device *dev, size_t size, void *cpu_add
 	__arm_dma_free(dev, size, cpu_addr, handle, attrs, true);
 }
 
+/*
+ * The whole dma_get_sgtable() idea is fundamentally unsafe - it seems
+ * that the intention is to allow exporting memory allocated via the
+ * coherent DMA APIs through the dma_buf API, which only accepts a
+ * scattertable.  This presents a couple of problems:
+ * 1. Not all memory allocated via the coherent DMA APIs is backed by
+ *    a struct page
+ * 2. Passing coherent DMA memory into the streaming APIs is not allowed
+ *    as we will try to flush the memory through a different alias to that
+ *    actually being used (and the flushes are redundant.)
+ */
 int arm_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
 		 void *cpu_addr, dma_addr_t handle, size_t size,
 		 unsigned long attrs)
 {
-	struct page *page = pfn_to_page(dma_to_pfn(dev, handle));
+	unsigned long pfn = dma_to_pfn(dev, handle);
+	struct page *page;
 	int ret;
 
+	/* If the PFN is not valid, we do not have a struct page */
+	if (!pfn_valid(pfn))
+		return -ENXIO;
+
+	page = pfn_to_page(pfn);
+
 	ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
 	if (unlikely(ret))
 		return ret;
diff --git a/arch/arm/mm/dump.c b/arch/arm/mm/dump.c
index 9fe8e24..e1f6f0d 100644
--- a/arch/arm/mm/dump.c
+++ b/arch/arm/mm/dump.c
@@ -126,8 +126,8 @@ static const struct prot_bits section_bits[] = {
 		.val	= PMD_SECT_USER,
 		.set	= "USR",
 	}, {
-		.mask	= L_PMD_SECT_RDONLY,
-		.val	= L_PMD_SECT_RDONLY,
+		.mask	= L_PMD_SECT_RDONLY | PMD_SECT_AP2,
+		.val	= L_PMD_SECT_RDONLY | PMD_SECT_AP2,
 		.set	= "ro",
 		.clear	= "RW",
 #elif __LINUX_ARM_ARCH__ >= 6
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index b46d914..cae69148a 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -678,8 +678,8 @@ static struct section_perm ro_perms[] = {
 		.start  = (unsigned long)_stext,
 		.end    = (unsigned long)__init_begin,
 #ifdef CONFIG_ARM_LPAE
-		.mask   = ~L_PMD_SECT_RDONLY,
-		.prot   = L_PMD_SECT_RDONLY,
+		.mask   = ~(L_PMD_SECT_RDONLY | PMD_SECT_AP2),
+		.prot   = L_PMD_SECT_RDONLY | PMD_SECT_AP2,
 #else
 		.mask   = ~(PMD_SECT_APX | PMD_SECT_AP_WRITE),
 		.prot   = PMD_SECT_APX | PMD_SECT_AP_WRITE,
diff --git a/arch/arm/probes/kprobes/core.c b/arch/arm/probes/kprobes/core.c
index a4ec240..3eb018f 100644
--- a/arch/arm/probes/kprobes/core.c
+++ b/arch/arm/probes/kprobes/core.c
@@ -433,6 +433,7 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
 	struct hlist_node *tmp;
 	unsigned long flags, orig_ret_address = 0;
 	unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline;
+	kprobe_opcode_t *correct_ret_addr = NULL;
 
 	INIT_HLIST_HEAD(&empty_rp);
 	kretprobe_hash_lock(current, &head, &flags);
@@ -455,15 +456,7 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
 			/* another task is sharing our hash bucket */
 			continue;
 
-		if (ri->rp && ri->rp->handler) {
-			__this_cpu_write(current_kprobe, &ri->rp->kp);
-			get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE;
-			ri->rp->handler(ri, regs);
-			__this_cpu_write(current_kprobe, NULL);
-		}
-
 		orig_ret_address = (unsigned long)ri->ret_addr;
-		recycle_rp_inst(ri, &empty_rp);
 
 		if (orig_ret_address != trampoline_address)
 			/*
@@ -475,6 +468,33 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
 	}
 
 	kretprobe_assert(ri, orig_ret_address, trampoline_address);
+
+	correct_ret_addr = ri->ret_addr;
+	hlist_for_each_entry_safe(ri, tmp, head, hlist) {
+		if (ri->task != current)
+			/* another task is sharing our hash bucket */
+			continue;
+
+		orig_ret_address = (unsigned long)ri->ret_addr;
+		if (ri->rp && ri->rp->handler) {
+			__this_cpu_write(current_kprobe, &ri->rp->kp);
+			get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE;
+			ri->ret_addr = correct_ret_addr;
+			ri->rp->handler(ri, regs);
+			__this_cpu_write(current_kprobe, NULL);
+		}
+
+		recycle_rp_inst(ri, &empty_rp);
+
+		if (orig_ret_address != trampoline_address)
+			/*
+			 * This is the real return address. Any other
+			 * instances associated with this task are for
+			 * other calls deeper on the call stack
+			 */
+			break;
+	}
+
 	kretprobe_hash_unlock(current, &flags);
 
 	hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
diff --git a/arch/arm/probes/kprobes/test-core.c b/arch/arm/probes/kprobes/test-core.c
index 9775de2..a48354d 100644
--- a/arch/arm/probes/kprobes/test-core.c
+++ b/arch/arm/probes/kprobes/test-core.c
@@ -976,7 +976,10 @@ static void coverage_end(void)
 void __naked __kprobes_test_case_start(void)
 {
 	__asm__ __volatile__ (
-		"stmdb	sp!, {r4-r11}				\n\t"
+		"mov	r2, sp					\n\t"
+		"bic	r3, r2, #7				\n\t"
+		"mov	sp, r3					\n\t"
+		"stmdb	sp!, {r2-r11}				\n\t"
 		"sub	sp, sp, #"__stringify(TEST_MEMORY_SIZE)"\n\t"
 		"bic	r0, lr, #1  @ r0 = inline data		\n\t"
 		"mov	r1, sp					\n\t"
@@ -996,7 +999,8 @@ void __naked __kprobes_test_case_end_32(void)
 		"movne	pc, r0					\n\t"
 		"mov	r0, r4					\n\t"
 		"add	sp, sp, #"__stringify(TEST_MEMORY_SIZE)"\n\t"
-		"ldmia	sp!, {r4-r11}				\n\t"
+		"ldmia	sp!, {r2-r11}				\n\t"
+		"mov	sp, r2					\n\t"
 		"mov	pc, r0					\n\t"
 	);
 }
@@ -1012,7 +1016,8 @@ void __naked __kprobes_test_case_end_16(void)
 		"bxne	r0					\n\t"
 		"mov	r0, r4					\n\t"
 		"add	sp, sp, #"__stringify(TEST_MEMORY_SIZE)"\n\t"
-		"ldmia	sp!, {r4-r11}				\n\t"
+		"ldmia	sp!, {r2-r11}				\n\t"
+		"mov	sp, r2					\n\t"
 		"bx	r0					\n\t"
 	);
 }
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 0848993..32a80d6 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -781,6 +781,34 @@
 	  However for 4K, we choose a higher default value, 11 as opposed to 10, giving us
 	  4M allocations matching the default size used by generic code.
 
+config UNMAP_KERNEL_AT_EL0
+	bool "Unmap kernel when running in userspace (aka \"KAISER\")" if EXPERT
+	default y
+	help
+	  Speculation attacks against some high-performance processors can
+	  be used to bypass MMU permission checks and leak kernel data to
+	  userspace. This can be defended against by unmapping the kernel
+	  when running in userspace, mapping it back in on exception entry
+	  via a trampoline page in the vector table.
+
+	  If unsure, say Y.
+
+config HARDEN_BRANCH_PREDICTOR
+	bool "Harden the branch predictor against aliasing attacks" if EXPERT
+	help
+	  Speculation attacks against some high-performance processors rely on
+	  being able to manipulate the branch predictor for a victim context by
+	  executing aliasing branches in the attacker context.  Such attacks
+	  can be partially mitigated against by clearing internal branch
+	  predictor state and limiting the prediction logic in some situations.
+
+	  This config option will take CPU-specific actions to harden the
+	  branch predictor against aliasing attacks and may rely on specific
+	  instruction sequences or control bits being set by the system
+	  firmware.
+
+	  If unsure, say Y.
+
 menuconfig ARMV8_DEPRECATED
 	bool "Emulate deprecated/obsolete ARMv8 instructions"
 	depends on COMPAT
diff --git a/arch/arm64/Kconfig.platforms b/arch/arm64/Kconfig.platforms
index d5a7418..d35cecb 100644
--- a/arch/arm64/Kconfig.platforms
+++ b/arch/arm64/Kconfig.platforms
@@ -121,6 +121,7 @@
 	select PM_OPP
 	select MFD_CORE
 	select SND_SOC_COMPRESS
+	select SND_HWDEP
 	help
 	  This enables support for the ARMv8 based Qualcomm chipsets.
 
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
index 1570602..b5e154e 100644
--- a/arch/arm64/Makefile
+++ b/arch/arm64/Makefile
@@ -14,8 +14,12 @@
 CPPFLAGS_vmlinux.lds = -DTEXT_OFFSET=$(TEXT_OFFSET)
 GZFLAGS		:=-9
 
-ifneq ($(CONFIG_RELOCATABLE),)
-LDFLAGS_vmlinux		+= -pie -shared -Bsymbolic
+ifeq ($(CONFIG_RELOCATABLE), y)
+# Pass --no-apply-dynamic-relocs to restore pre-binutils-2.27 behaviour
+# for relative relocs, since this leads to better Image compression
+# with the relocation offsets always being zero.
+LDFLAGS_vmlinux		+= -pie -shared -Bsymbolic \
+			$(call ld-option, --no-apply-dynamic-relocs)
 endif
 
 ifeq ($(CONFIG_ARM64_ERRATUM_843419),y)
diff --git a/arch/arm64/boot/dts/qcom/Makefile b/arch/arm64/boot/dts/qcom/Makefile
index 5a7e17e..b2adfb4 100644
--- a/arch/arm64/boot/dts/qcom/Makefile
+++ b/arch/arm64/boot/dts/qcom/Makefile
@@ -126,6 +126,7 @@
 		sda670-pm660a-mtp-overlay.dtbo \
 		qcs605-cdp-overlay.dtbo \
 		qcs605-mtp-overlay.dtbo \
+		qcs605-360camera-overlay.dtbo \
 		qcs605-external-codec-mtp-overlay.dtbo \
 		qcs605-lc-mtp-overlay.dtbo
 
@@ -156,6 +157,7 @@
 qcs605-mtp-overlay.dtbo-base := qcs605.dtb
 qcs605-external-codec-mtp-overlay.dtbo-base := qcs605.dtb
 qcs605-lc-mtp-overlay.dtbo-base := qcs605.dtb
+qcs605-360camera-overlay.dtbo-base := qcs605.dtb
 
 else
 dtb-$(CONFIG_ARCH_SDM670) += sdm670-rumi.dtb \
@@ -219,7 +221,10 @@
 	sdm450-qrd.dtb \
 	sdm450-pmi8940-mtp.dtb \
 	sdm450-pmi8937-mtp.dtb \
-	sdm450-iot-mtp.dtb
+	sdm450-iot-mtp.dtb \
+	sdm450-qrd-sku4.dtb \
+	sdm450-pmi632-cdp-s2.dtb \
+	sdm450-pmi632-mtp-s3.dtb
 endif
 
 always		:= $(dtb-y)
diff --git a/arch/arm64/boot/dts/qcom/dsi-panel-hx8399-truly-singlemipi-fhd-video.dtsi b/arch/arm64/boot/dts/qcom/dsi-panel-hx8399-truly-singlemipi-fhd-video.dtsi
new file mode 100644
index 0000000..3af01c1
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/dsi-panel-hx8399-truly-singlemipi-fhd-video.dtsi
@@ -0,0 +1,114 @@
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+&mdss_mdp {
+	dsi_hx8399_truly_cmd: qcom,mdss_dsi_hx8399_truly_cmd {
+		qcom,mdss-dsi-panel-name =
+			"hx8399 video mode dsi truly panel";
+		qcom,mdss-dsi-panel-type = "dsi_video_mode";
+
+		qcom,mdss-dsi-virtual-channel-id = <0>;
+		qcom,mdss-dsi-stream = <0>;
+		qcom,mdss-dsi-h-left-border = <0>;
+		qcom,mdss-dsi-h-right-border = <0>;
+		qcom,mdss-dsi-v-top-border = <0>;
+		qcom,mdss-dsi-v-bottom-border = <0>;
+		qcom,mdss-dsi-bpp = <24>;
+		qcom,mdss-dsi-color-order = "rgb_swap_rgb";
+		qcom,mdss-dsi-underflow-color = <0xff>;
+		qcom,mdss-dsi-border-color = <0>;
+		qcom,mdss-dsi-h-sync-pulse = <0>;
+		qcom,mdss-dsi-traffic-mode = "non_burst_sync_event";
+		qcom,mdss-dsi-lane-map = "lane_map_0123";
+		qcom,mdss-dsi-bllp-eof-power-mode;
+		qcom,mdss-dsi-bllp-power-mode;
+		qcom,mdss-dsi-tx-eot-append;
+		qcom,mdss-dsi-lane-0-state;
+		qcom,mdss-dsi-lane-1-state;
+		qcom,mdss-dsi-lane-2-state;
+		qcom,mdss-dsi-lane-3-state;
+		qcom,mdss-dsi-t-clk-pre = <0x30>;
+		qcom,mdss-dsi-t-clk-post = <0x0e>;
+		qcom,mdss-dsi-dma-trigger = "trigger_sw";
+		qcom,mdss-dsi-mdp-trigger = "none";
+		qcom,mdss-dsi-lp11-init;
+		qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
+		qcom,mdss-dsi-bl-min-level = <1>;
+		qcom,mdss-dsi-bl-max-level = <4095>;
+		qcom,mdss-dsi-reset-sequence = <1 10>, <0 10>, <1 10>;
+
+		qcom,mdss-dsi-display-timings {
+			timing@0 {
+				qcom,mdss-dsi-panel-width = <1080>;
+				qcom,mdss-dsi-panel-height = <2160>;
+				qcom,mdss-dsi-h-front-porch = <24>;
+				qcom,mdss-dsi-h-back-porch = <24>;
+				qcom,mdss-dsi-h-pulse-width = <16>;
+				qcom,mdss-dsi-h-sync-skew = <0>;
+				qcom,mdss-dsi-v-back-porch = <40>;
+				qcom,mdss-dsi-v-front-porch = <36>;
+				qcom,mdss-dsi-v-pulse-width = <2>;
+				qcom,mdss-dsi-panel-framerate = <60>;
+				qcom,mdss-dsi-on-command = [
+					39 01 00 00 00 00 04 B9 FF 83 99
+					39 01 00 00 00 00 02 D2 88
+					39 01 00 00 00 00 10 B1 02 04 74 94 01
+					   32 33 11 11 E6 5D 56 73 02 02
+					39 01 00 00 00 00 10 B2 00 80 80 CC 05
+					   07 5A 11 10 10 00 1E 70 03 D4
+					39 01 00 00 00 00 2D B4 00 FF 59 59 0C
+					   AC 00 00 0C 00 07 0A 00 28 07 08 0C
+					   21 03 00 00 00 AE 87 59 59 0C AC 00
+					   00 0C 00 07 0A 00 28 07 08 0C 01 00
+					   00 AE 01
+					39 01 00 00 05 00 22 D3 00 00 01 01 00
+					   00 10 10 00 00 03 00 03 00 08 78 08
+					   78 00 00 00 00 00 24 02 05 05 03 00
+					   00 00 05 40
+					39 01 00 00 05 00 21 D5 20 20 19 19 18
+					   18 02 03 00 01 24 24 18 18 18 18 24
+					   24 00 00 00 00 00 00 00 00 2F 2F 30
+					   30 31 31
+					39 01 00 00 05 00 21 D6 24 24 18 18 19
+					   19 01 00 03 02 24 24 18 18 18 18 20
+					   20 40 40 40 40 40 40 40 40 2F 2F 30
+					   30 31 31
+					39 01 00 00 00 00 02 BD 00
+					39 01 00 00 00 00 11 D8 AA AA AA AA AA
+					   AA AA AA AA BA AA AA AA BA AA AA
+					39 01 00 00 00 00 02 BD 01
+					39 01 00 00 00 00 11 D8 82 EA AA AA 82
+					   EA AA AA 82 EA AA AA 82 EA AA AA
+					39 01 00 00 00 00 02 BD 02
+					39 01 00 00 00 00 09 D8 FF FF C0 3F FF
+					   FF C0 3F
+					39 01 00 00 00 00 02 BD 00
+					39 01 00 00 05 00 37 E0 08 2A 39 35 74
+					   7C 87 7F 84 8A 8E 91 93 96 9B 9C 9E
+					   A5 A6 AE A1 AF B2 5C 58 63 74 08 2A
+					   39 35 74 7C 87 7F 84 8A 8E 91 93 96
+					   9B 9C 9E A5 A6 AE A1 AF B2 5C 58 63
+					   74
+					39 01 00 00 00 00 03 B6 7E 7E
+					39 01 00 00 00 00 02 CC 08
+					39 01 00 00 00 00 06 C7 00 08 00 01 08
+					39 01 00 00 00 00 03 C0 25 5A
+					05 01 00 00 78 00 02 11 00
+					05 01 00 00 14 00 02 29 00];
+				qcom,mdss-dsi-off-command = [05 01 00 00 14 00
+				  02 28 00 05 01 00 00 78 00 02 10 00];
+				qcom,mdss-dsi-on-command-state = "dsi_lp_mode";
+				qcom,mdss-dsi-off-command-state = "dsi_hs_mode";
+			};
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/dsi-panel-nt35695b-truly-fhd-cmd.dtsi b/arch/arm64/boot/dts/qcom/dsi-panel-nt35695b-truly-fhd-cmd.dtsi
index 5529ed1..32892a7 100644
--- a/arch/arm64/boot/dts/qcom/dsi-panel-nt35695b-truly-fhd-cmd.dtsi
+++ b/arch/arm64/boot/dts/qcom/dsi-panel-nt35695b-truly-fhd-cmd.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -181,7 +181,7 @@
 					15 01 00 00 00 00 02 ec 00
 					15 01 00 00 00 00 02 ff 10
 					15 01 00 00 00 00 02 bb 10
-					15 01 00 00 00 00 02 35 02
+					15 01 00 00 00 00 02 35 00
 					05 01 00 00 78 00 02 11 00
 					05 01 00 00 78 00 02 29 00];
 				qcom,mdss-dsi-off-command = [05 01 00 00 14
diff --git a/arch/arm64/boot/dts/qcom/msm-arm-smmu-8953.dtsi b/arch/arm64/boot/dts/qcom/msm-arm-smmu-8953.dtsi
new file mode 100644
index 0000000..49e840be
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msm-arm-smmu-8953.dtsi
@@ -0,0 +1,104 @@
+/*
+ *Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <dt-bindings/clock/msm-clocks-8953.h>
+
+&soc {
+	kgsl_smmu: arm,smmu-kgsl@1c40000 {
+		status = "ok";
+		compatible = "qcom,smmu-v2";
+		qcom,tz-device-id = "GPU";
+		reg = <0x1c40000 0x10000>;
+		#iommu-cells = <1>;
+		#global-interrupts = <0>;
+		interrupts =  <GIC_SPI 225 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 232 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 233 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 234 IRQ_TYPE_LEVEL_HIGH>;
+		qcom,dynamic;
+		qcom,use-3-lvl-tables;
+		qcom,enable-static-cb;
+		qcom,enable-smmu-halt;
+		qcom,skip-init;
+		vdd-supply = <&gdsc_oxili_cx>;
+		qcom,regulator-names = "vdd";
+		clocks = <&clock_gcc_gfx clk_gcc_oxili_ahb_clk>,
+			<&clock_gcc_gfx clk_gcc_bimc_gfx_clk>;
+		clock-names = "gpu_ahb_clk", "gcc_bimc_gfx_clk";
+	};
+
+	/* A test device to test the SMMU operation */
+	kgsl_iommu_test_device0 {
+		status = "disabled";
+		compatible = "iommu-debug-test";
+		/* The SID should be valid one to get the proper
+		 *SMR,S2CR indices.
+		 */
+		iommus = <&kgsl_smmu 0x0>;
+	};
+
+	apps_iommu: qcom,iommu@1e00000 {
+		status = "okay";
+		compatible = "qcom,qsmmu-v500";
+		reg = <0x1e00000 0x40000>,
+			<0x1ee2000 0x20>;
+		reg-names = "base", "tcu-base";
+		#iommu-cells = <2>;
+		qcom,tz-device-id = "APPS";
+		qcom,skip-init;
+		qcom,enable-static-cb;
+		qcom,use-3-lvl-tables;
+		qcom,disable-atos;
+		#global-interrupts = <0>;
+		#size-cells = <1>;
+		#address-cells = <1>;
+		ranges;
+		interrupts = <GIC_SPI 253 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 254 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 255 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 53 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 54 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 58 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 60 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 61 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 76 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 77 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 80 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 94 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 101 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 102 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 103 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 104 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 111 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 112 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 114 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 115 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 117 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 118 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 119 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>;
+		clocks = <&clock_gcc clk_gcc_smmu_cfg_clk>,
+			  <&clock_gcc clk_gcc_apss_tcu_async_clk>;
+		clock-names = "iface_clk", "core_clk";
+	};
+};
+
+#include "msm-arm-smmu-impl-defs-8953.dtsi"
diff --git a/arch/arm64/boot/dts/qcom/msm-arm-smmu-impl-defs-8953.dtsi b/arch/arm64/boot/dts/qcom/msm-arm-smmu-impl-defs-8953.dtsi
new file mode 100644
index 0000000..2122db9
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msm-arm-smmu-impl-defs-8953.dtsi
@@ -0,0 +1,25 @@
+/*
+ * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+&kgsl_smmu {
+	attach-impl-defs = <0x6000 0x270>,
+		<0x6060 0x1055>,
+		<0x6800 0x6>,
+		<0x6900 0x3ff>,
+		<0x6924 0x204>,
+		<0x6928 0x10800>,
+		<0x6930 0x400>,
+		<0x6960 0xffffffff>,
+		<0x6b64 0xa0000>,
+		<0x6b68 0xaaab92a>;
+};
diff --git a/arch/arm64/boot/dts/qcom/msm8937-regulator.dtsi b/arch/arm64/boot/dts/qcom/msm8937-regulator.dtsi
new file mode 100644
index 0000000..57272a4
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msm8937-regulator.dtsi
@@ -0,0 +1,449 @@
+/*
+ * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+&rpm_bus {
+	rpm-regulator-smpa1 {
+		status = "okay";
+		pm8937_s1: regulator-s1 {
+			regulator-min-microvolt = <1000000>;
+			regulator-max-microvolt = <1225000>;
+			qcom,init-voltage = <1000000>;
+			status = "okay";
+		};
+	};
+
+	/* VDD_CX supply */
+	rpm-regulator-smpa2 {
+		status = "okay";
+		pm8937_s2_level: regulator-s2-level {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8937_s2_level";
+			qcom,set = <3>;
+			regulator-min-microvolt =
+				<RPM_SMD_REGULATOR_LEVEL_RETENTION>;
+			regulator-max-microvolt =
+				<RPM_SMD_REGULATOR_LEVEL_BINNING>;
+			qcom,use-voltage-level;
+		};
+
+		pm8937_s2_floor_level: regulator-s2-floor-level {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8937_s2_floor_level";
+			qcom,set = <3>;
+			regulator-min-microvolt =
+				<RPM_SMD_REGULATOR_LEVEL_RETENTION>;
+			regulator-max-microvolt =
+				<RPM_SMD_REGULATOR_LEVEL_BINNING>;
+			qcom,use-voltage-floor-level;
+			qcom,always-send-voltage;
+		};
+
+		pm8937_s2_level_ao: regulator-s2-level-ao {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8937_s2_level_ao";
+			qcom,set = <1>;
+			regulator-min-microvolt =
+				<RPM_SMD_REGULATOR_LEVEL_RETENTION>;
+			regulator-max-microvolt =
+				<RPM_SMD_REGULATOR_LEVEL_BINNING>;
+			qcom,use-voltage-level;
+		};
+	};
+
+	rpm-regulator-smpa3 {
+		status = "okay";
+		pm8937_s3: regulator-s3 {
+			regulator-min-microvolt = <1300000>;
+			regulator-max-microvolt = <1300000>;
+			qcom,init-voltage = <1300000>;
+			status = "okay";
+		};
+	};
+
+	rpm-regulator-smpa4 {
+		status = "okay";
+		pm8937_s4: regulator-s4 {
+			regulator-min-microvolt = <2050000>;
+			regulator-max-microvolt = <2050000>;
+			qcom,init-voltage = <2050000>;
+			status = "okay";
+		};
+	};
+
+	rpm-regulator-ldoa2 {
+		status = "okay";
+		pm8937_l2: regulator-l2 {
+			regulator-min-microvolt = <1200000>;
+			regulator-max-microvolt = <1200000>;
+			qcom,init-voltage = <1200000>;
+			status = "okay";
+		};
+	};
+
+	/* VDD_MX supply */
+	rpm-regulator-ldoa3 {
+		status = "okay";
+		pm8937_l3_level_ao: regulator-l3-level-ao {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8937_l3_level_ao";
+			qcom,set = <1>;
+			regulator-min-microvolt =
+				<RPM_SMD_REGULATOR_LEVEL_RETENTION>;
+			regulator-max-microvolt =
+				<RPM_SMD_REGULATOR_LEVEL_TURBO>;
+			qcom,use-voltage-level;
+			qcom,always-send-voltage;
+		};
+
+		pm8937_l3_level_so: regulator-l3-level-so {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8937_l3_level_so";
+			qcom,set = <2>;
+			regulator-min-microvolt =
+				<RPM_SMD_REGULATOR_LEVEL_RETENTION>;
+			regulator-max-microvolt =
+				<RPM_SMD_REGULATOR_LEVEL_TURBO>;
+			qcom,init-voltage-level =
+				<RPM_SMD_REGULATOR_LEVEL_RETENTION>;
+			qcom,use-voltage-level;
+		};
+	};
+
+	rpm-regulator-ldoa5 {
+		status = "okay";
+		pm8937_l5: regulator-l5 {
+			regulator-min-microvolt = <1800000>;
+			regulator-max-microvolt = <1800000>;
+			qcom,init-voltage = <1800000>;
+			status = "okay";
+		};
+	};
+
+	rpm-regulator-ldoa6 {
+		status = "okay";
+		pm8937_l6: regulator-l6 {
+			regulator-min-microvolt = <1800000>;
+			regulator-max-microvolt = <1800000>;
+			qcom,init-voltage = <1800000>;
+			status = "okay";
+		};
+	};
+
+	rpm-regulator-ldoa7 {
+		status = "okay";
+		pm8937_l7: regulator-l7 {
+			regulator-min-microvolt = <1800000>;
+			regulator-max-microvolt = <1800000>;
+			qcom,init-voltage = <1800000>;
+			status = "okay";
+		};
+
+		pm8937_l7_ao: regulator-l7-ao {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8937_l7_ao";
+			qcom,set = <1>;
+			regulator-min-microvolt = <1800000>;
+			regulator-max-microvolt = <1800000>;
+			qcom,init-voltage = <1800000>;
+		};
+	};
+
+	rpm-regulator-ldoa8 {
+		status = "okay";
+		pm8937_l8: regulator-l8 {
+			regulator-min-microvolt = <2850000>;
+			regulator-max-microvolt = <2900000>;
+			qcom,init-voltage = <2900000>;
+			status = "okay";
+		};
+	};
+
+	rpm-regulator-ldoa9 {
+		status = "okay";
+		pm8937_l9: regulator-l9 {
+			regulator-min-microvolt = <3000000>;
+			regulator-max-microvolt = <3300000>;
+			qcom,init-voltage = <3000000>;
+			status = "okay";
+		};
+	};
+
+	rpm-regulator-ldoa10 {
+		status = "okay";
+		pm8937_l10: regulator-l10 {
+			regulator-min-microvolt = <2800000>;
+			regulator-max-microvolt = <3000000>;
+			qcom,init-voltage = <2800000>;
+			status = "okay";
+		};
+	};
+
+	rpm-regulator-ldoa11 {
+		status = "okay";
+		pm8937_l11: regulator-l11 {
+			regulator-min-microvolt = <2950000>;
+			regulator-max-microvolt = <2950000>;
+			qcom,init-voltage = <2950000>;
+			status = "okay";
+		};
+	};
+
+	rpm-regulator-ldoa12 {
+		status = "okay";
+		pm8937_l12: regulator-l12 {
+			regulator-min-microvolt = <1800000>;
+			regulator-max-microvolt = <2950000>;
+			qcom,init-voltage = <1800000>;
+			status = "okay";
+		};
+	};
+
+	rpm-regulator-ldoa13 {
+		status = "okay";
+		pm8937_l13: regulator-l13 {
+			regulator-min-microvolt = <3075000>;
+			regulator-max-microvolt = <3075000>;
+			qcom,init-voltage = <3075000>;
+			status = "okay";
+		};
+	};
+
+	rpm-regulator-ldoa14 {
+		status = "okay";
+		pm8937_l14: regulator-l14 {
+			regulator-min-microvolt = <1800000>;
+			regulator-max-microvolt = <3300000>;
+			qcom,init-voltage = <1800000>;
+			status = "okay";
+		};
+	};
+
+	rpm-regulator-ldoa15 {
+		status = "okay";
+		pm8937_l15: regulator-l15 {
+			regulator-min-microvolt = <1800000>;
+			regulator-max-microvolt = <3300000>;
+			qcom,init-voltage = <1800000>;
+			status = "okay";
+		};
+	};
+
+	rpm-regulator-ldoa16 {
+		status = "okay";
+		pm8937_l16: regulator-l16 {
+			regulator-min-microvolt = <1800000>;
+			regulator-max-microvolt = <1800000>;
+			qcom,init-voltage = <1800000>;
+			status = "okay";
+		};
+	};
+
+	rpm-regulator-ldoa17 {
+		status = "okay";
+		pm8937_l17: regulator-l17 {
+			regulator-min-microvolt = <2800000>;
+			regulator-max-microvolt = <2900000>;
+			qcom,init-voltage = <2800000>;
+			status = "okay";
+		};
+	};
+
+	rpm-regulator-ldoa19 {
+		status = "okay";
+		pm8937_l19: regulator-l19 {
+			regulator-min-microvolt = <1225000>;
+			regulator-max-microvolt = <1350000>;
+			qcom,init-voltage = <1225000>;
+			status = "okay";
+		};
+	};
+
+	rpm-regulator-ldoa22 {
+		status = "okay";
+		pm8937_l22: regulator-l22 {
+			regulator-min-microvolt = <2800000>;
+			regulator-max-microvolt = <2800000>;
+			qcom,init-voltage = <2800000>;
+			status = "okay";
+		};
+	};
+
+	rpm-regulator-ldoa23 {
+		status = "okay";
+		pm8937_l23: regulator-l23 {
+			regulator-min-microvolt = <1200000>;
+			regulator-max-microvolt = <1200000>;
+			qcom,init-voltage = <1200000>;
+			status = "okay";
+		};
+	};
+};
+
+/* SPM controlled regulators */
+&spmi_bus {
+	qcom,pm8937@1 {
+		/* PM8937 S5 + S6 = VDD_APC supply */
+		pm8937_s5: spm-regulator@2000 {
+			compatible = "qcom,spm-regulator";
+			reg = <0x2000 0x100>;
+			regulator-name = "pm8937_s5";
+			regulator-min-microvolt = <1050000>;
+			regulator-max-microvolt = <1350000>;
+		};
+	};
+};
+
+&soc {
+	mem_acc_vreg_corner: regulator@01946004 {
+		compatible = "qcom,mem-acc-regulator";
+		regulator-name = "mem_acc_corner";
+		regulator-min-microvolt = <1>;
+		regulator-max-microvolt = <3>;
+
+		qcom,acc-reg-addr-list =
+			<0x01942138 0x01942130 0x01942120
+			 0x01942124 0x01946000 0x01946004>;
+
+		qcom,acc-init-reg-config = <1 0xff>, <2 0x5555>, <6 0x55>;
+
+		qcom,num-acc-corners = <3>;
+		qcom,boot-acc-corner = <2>;
+		qcom,corner1-reg-config =
+			/* SVS+ => SVS+ */
+			<(-1) (-1)>,     <(-1) (-1)>,   <(-1) (-1)>,
+			<(-1) (-1)>,     <(-1) (-1)>,   <(-1) (-1)>,
+			/* SVS+ => NOM */
+			<  3 0x1041041>, <  4  0x1041>, <  5  0x2020202>,
+			<(-1) (-1)>,     <(-1) (-1)>,   <(-1) (-1)>,
+			/* SVS+ => TURBO/NOM+ */
+			<  3 0x1041041>, <  4  0x1041>, <  5  0x2020202>,
+			<  3 0x0>,       <  4  0x0>,    <  5  0x0>;
+
+		qcom,corner2-reg-config =
+			/* NOM => SVS+ */
+			<  3 0x30c30c3>, <  4  0x30c3>, <  5  0x6060606>,
+			/* NOM => NOM */
+			<(-1) (-1)>,     <(-1) (-1)>,   <(-1) (-1)>,
+			/* NOM => TURBO/NOM+ */
+			<  3 0x0>,       <  4  0x0>,    <  5  0x0>;
+
+		qcom,corner3-reg-config =
+			/* TURBO/NOM+ => SVS+ */
+			<  3 0x1041041>, <  4  0x1041>, <  5  0x2020202>,
+			<  3 0x30c30c3>, <  4  0x30c3>, <  5  0x6060606>,
+			/* TURBO/NOM+ => NOM */
+			<  3 0x1041041>, <  4  0x1041>, <  5  0x2020202>,
+			<(-1) (-1)>,     <(-1) (-1)>,   <(-1) (-1)>,
+			/* TURBO/NOM+ => TURBO/NOM+ */
+			<(-1) (-1)>,     <(-1) (-1)>,   <(-1) (-1)>,
+			<(-1) (-1)>,     <(-1) (-1)>,   <(-1) (-1)>;
+	};
+
+	apc_vreg_corner: regulator@b018000 {
+		compatible = "qcom,cpr-regulator";
+		reg = <0xb018000 0x1000>, <0xb011064 4>, <0xa4000 0x1000>;
+		reg-names = "rbcpr", "rbcpr_clk", "efuse_addr";
+		interrupts = <0 15 0>;
+		regulator-name = "apc_corner";
+		regulator-min-microvolt = <1>;
+		regulator-max-microvolt = <7>;
+
+		qcom,cpr-fuse-corners = <3>;
+		qcom,cpr-voltage-ceiling = <1155000 1225000 1350000>;
+		qcom,cpr-voltage-floor =   <1050000 1050000 1090000>;
+		vdd-apc-supply = <&pm8937_s5>;
+
+		mem-acc-supply = <&mem_acc_vreg_corner>;
+
+		qcom,cpr-ref-clk = <19200>;
+		qcom,cpr-timer-delay = <5000>;
+		qcom,cpr-timer-cons-up = <0>;
+		qcom,cpr-timer-cons-down = <2>;
+		qcom,cpr-irq-line = <0>;
+		qcom,cpr-step-quotient = <10>;
+		qcom,cpr-up-threshold = <2>;
+		qcom,cpr-down-threshold = <4>;
+		qcom,cpr-idle-clocks = <15>;
+		qcom,cpr-gcnt-time = <1>;
+		qcom,vdd-apc-step-up-limit = <1>;
+		qcom,vdd-apc-step-down-limit = <1>;
+		qcom,cpr-apc-volt-step = <5000>;
+
+		qcom,cpr-fuse-row = <67 0>;
+		qcom,cpr-fuse-target-quot = <42 24 6>;
+		qcom,cpr-fuse-ro-sel = <60 57 54>;
+		qcom,cpr-init-voltage-ref = <1155000 1225000 1350000>;
+		qcom,cpr-fuse-init-voltage =
+					<67 36 6 0>,
+					<67 18 6 0>,
+					<67  0 6 0>;
+		qcom,cpr-fuse-quot-offset =
+					<71 26 6 0>,
+					<71 20 6 0>,
+					<70 54 7 0>;
+		qcom,cpr-fuse-quot-offset-scale = <5 5 5>;
+		qcom,cpr-init-voltage-step = <10000>;
+		qcom,cpr-corner-map = <1 2 3 3 3 3 3>;
+		qcom,cpr-corner-frequency-map =
+				<1 960000000>,
+				<2 1094400000>,
+				<3 1209600000>,
+				<4 1248000000>,
+				<5 1344000000>,
+				<6 1401000000>,
+				<7 1497600000>;
+		qcom,speed-bin-fuse-sel = <37 34 3 0>;
+		qcom,cpr-speed-bin-max-corners =
+					<0 0 1 2 6>,
+					<1 0 1 2 7>,
+					<2 0 1 2 3>;
+		qcom,cpr-fuse-revision = <69 39 3 0>;
+		qcom,cpr-quot-adjust-scaling-factor-max = <0 1400 1400>;
+		qcom,cpr-voltage-scaling-factor-max = <0 2000 2000>;
+		qcom,cpr-scaled-init-voltage-as-ceiling;
+		qcom,cpr-fuse-version-map =
+			<0	(-1)	1	(-1)	(-1)	(-1)>,
+			<(-1)	(-1)	2	(-1)	(-1)	(-1)>,
+			<(-1)	(-1)	3	(-1)	(-1)	(-1)>,
+			<(-1)	(-1)  (-1)	(-1)	(-1)	(-1)>;
+		qcom,cpr-quotient-adjustment =
+				<(-20)	(-40)	(-20)>,
+				<0	(-40)	 (20)>,
+				<0	  0	 (20)>,
+				<0	  0	    0>;
+		qcom,cpr-init-voltage-adjustment =
+				<0		0	      0>,
+				<(10000)     (15000)	(20000)>,
+				<0		0	      0>,
+				<0		0	      0>;
+		qcom,cpr-enable;
+	};
+
+	eldo2_pm8937: eldo2 {
+		compatible = "regulator-fixed";
+		regulator-name = "eldo2_pm8937";
+		startup-delay-us = <0>;
+		enable-active-high;
+		gpio = <&pm8937_gpios 7 0>;
+		regulator-always-on;
+	};
+
+	adv_vreg: adv_vreg {
+		compatible = "regulator-fixed";
+		regulator-name = "adv_vreg";
+		startup-delay-us = <400>;
+		enable-active-high;
+		gpio = <&pm8937_gpios 8 0>;
+	};
+
+};
diff --git a/arch/arm64/boot/dts/qcom/msm8953-cpu.dtsi b/arch/arm64/boot/dts/qcom/msm8953-cpu.dtsi
index 8d80a40..d202d99 100644
--- a/arch/arm64/boot/dts/qcom/msm8953-cpu.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8953-cpu.dtsi
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -60,6 +60,7 @@
 			efficiency = <1024>;
 			sched-energy-costs = <&CPU_COST_0 &CLUSTER_COST_0>;
 			next-level-cache = <&L2_0>;
+			#cooling-cells = <2>;
 			L2_0: l2-cache {
 			      compatible = "arm,arch-cache";
 			      cache-level = <2>;
@@ -84,6 +85,7 @@
 			efficiency = <1024>;
 			sched-energy-costs = <&CPU_COST_0 &CLUSTER_COST_0>;
 			next-level-cache = <&L2_0>;
+			#cooling-cells = <2>;
 			L1_I_1: l1-icache {
 			      compatible = "arm,arch-cache";
 			      qcom,dump-size = <0x8800>;
@@ -102,6 +104,7 @@
 			efficiency = <1024>;
 			sched-energy-costs = <&CPU_COST_0 &CLUSTER_COST_0>;
 			next-level-cache = <&L2_0>;
+			#cooling-cells = <2>;
 			L1_I_2: l1-icache {
 			      compatible = "arm,arch-cache";
 			      qcom,dump-size = <0x8800>;
@@ -120,6 +123,7 @@
 			efficiency = <1024>;
 			sched-energy-costs = <&CPU_COST_0 &CLUSTER_COST_0>;
 			next-level-cache = <&L2_0>;
+			#cooling-cells = <2>;
 			L1_I_3: l1-icache {
 			      compatible = "arm,arch-cache";
 			      qcom,dump-size = <0x8800>;
@@ -138,6 +142,7 @@
 			efficiency = <1126>;
 			sched-energy-costs = <&CPU_COST_0 &CLUSTER_COST_1>;
 			next-level-cache = <&L2_1>;
+			#cooling-cells = <2>;
 			L2_1: l2-cache {
 			      compatible = "arm,arch-cache";
 			      cache-level = <2>;
@@ -162,6 +167,7 @@
 			efficiency = <1126>;
 			sched-energy-costs = <&CPU_COST_0 &CLUSTER_COST_1>;
 			next-level-cache = <&L2_1>;
+			#cooling-cells = <2>;
 			L1_I_101: l1-icache {
 			      compatible = "arm,arch-cache";
 			      qcom,dump-size = <0x8800>;
@@ -180,6 +186,7 @@
 			efficiency = <1126>;
 			sched-energy-costs = <&CPU_COST_0 &CLUSTER_COST_1>;
 			next-level-cache = <&L2_1>;
+			#cooling-cells = <2>;
 			L1_I_102: l1-icache {
 			      compatible = "arm,arch-cache";
 			      qcom,dump-size = <0x8800>;
@@ -198,6 +205,7 @@
 			efficiency = <1126>;
 			sched-energy-costs = <&CPU_COST_0 &CLUSTER_COST_1>;
 			next-level-cache = <&L2_1>;
+			#cooling-cells = <2>;
 			L1_I_103: l1-icache {
 			      compatible = "arm,arch-cache";
 			      qcom,dump-size = <0x8800>;
diff --git a/arch/arm64/boot/dts/qcom/msm8953-gpu.dtsi b/arch/arm64/boot/dts/qcom/msm8953-gpu.dtsi
new file mode 100644
index 0000000..96e8591
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msm8953-gpu.dtsi
@@ -0,0 +1,269 @@
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+&soc {
+	pil_gpu: qcom,kgsl-hyp {
+		compatible = "qcom,pil-tz-generic";
+		qcom,pas-id = <13>;
+		qcom,firmware-name = "a506_zap";
+		memory-region = <&gpu_mem>;
+		clocks = <&clock_gcc clk_gcc_crypto_clk>,
+		<&clock_gcc clk_gcc_crypto_ahb_clk>,
+		<&clock_gcc clk_gcc_crypto_axi_clk>,
+		<&clock_gcc clk_crypto_clk_src>;
+		clock-names = "scm_core_clk", "scm_iface_clk",
+				"scm_bus_clk", "scm_core_clk_src";
+		qcom,proxy-clock-names = "scm_core_clk", "scm_iface_clk",
+				"scm_bus_clk", "scm_core_clk_src";
+		qcom,scm_core_clk_src-freq = <80000000>;
+	};
+
+	msm_bus: qcom,kgsl-busmon {
+		label = "kgsl-busmon";
+		compatible = "qcom,kgsl-busmon";
+	};
+
+	gpubw: qcom,gpubw {
+		compatible = "qcom,devbw";
+		governor = "bw_vbif";
+		qcom,src-dst-ports = <26 512>;
+		/*
+		 * active-only flag is used while registering the bus
+		 * governor.It helps release the bus vote when the CPU
+		 * subsystem is inactiv3
+		 */
+		qcom,active-only;
+		qcom,bw-tbl =
+			< 0    >, /*  off */
+			< 1611 >, /* 1. DDR:211.20 MHz BIMC: 105.60 MHz */
+			< 2124 >, /* 2. DDR:278.40 MHz BIMC: 139.20 MHz */
+			< 2929 >, /* 3. DDR:384.00 MHz BIMC: 192.00 MHz */
+			< 3222 >, /* 4. DDR:422.40 MHz BIMC: 211.20 MHz */
+			< 4248 >, /* 5. DDR:556.80 MHz BIMC: 278.40 MHz */
+			< 5126 >, /* 6. DDR:672.00 MHz BIMC: 336.00 MHz */
+			< 5859 >, /* 7. DDR:768.00 MHz BIMC: 384.00 MHz */
+			< 6152 >, /* 8. DDR:806.40 MHz BIMC: 403.20 MHz */
+			< 6445 >, /* 9. DDR:844.80 MHz BIMC: 422.40 MHz */
+			< 7104 >; /*10. DDR:931.20 MHz BIMC: 465.60 MHz */
+	};
+
+	msm_gpu: qcom,kgsl-3d0@1c00000 {
+		label = "kgsl-3d0";
+		compatible = "qcom,kgsl-3d0", "qcom,kgsl-3d";
+		status = "ok";
+		reg = <0x1c00000 0x40000>;
+		reg-names = "kgsl_3d0_reg_memory";
+		interrupts = <0 33 0>;
+		interrupt-names = "kgsl_3d0_irq";
+		qcom,id = <0>;
+		qcom,chipid = <0x05000600>;
+
+		qcom,initial-pwrlevel = <4>;
+
+		qcom,idle-timeout = <80>; //msecs
+		qcom,deep-nap-timeout = <100>; //msecs
+		qcom,strtstp-sleepwake;
+
+		qcom,highest-bank-bit = <14>;
+
+		qcom,snapshot-size = <1048576>; //bytes
+
+		clocks = <&clock_gcc_gfx clk_gcc_oxili_gfx3d_clk>,
+			<&clock_gcc_gfx clk_gcc_oxili_ahb_clk>,
+			<&clock_gcc_gfx clk_gcc_bimc_gfx_clk>,
+			<&clock_gcc_gfx clk_gcc_bimc_gpu_clk>,
+			<&clock_gcc_gfx clk_gcc_oxili_timer_clk>,
+			<&clock_gcc_gfx clk_gcc_oxili_aon_clk>;
+
+		clock-names = "core_clk", "iface_clk",
+			      "mem_iface_clk", "alt_mem_iface_clk",
+			      "rbbmtimer_clk", "alwayson_clk";
+
+		/* Bus Scale Settings */
+		qcom,gpubw-dev = <&gpubw>;
+		qcom,bus-control;
+		qcom,bus-width = <16>;
+		qcom,msm-bus,name = "grp3d";
+		qcom,msm-bus,num-cases = <11>;
+		qcom,msm-bus,num-paths = <1>;
+		qcom,msm-bus,vectors-KBps =
+				<26 512 0 0>,	     /*  off          */
+				<26 512 0 1689600>, /* 1. 211.20 MHz */
+				<26 512 0 2227200>, /* 2. 278.40 MHz */
+				<26 512 0 3072000>, /* 3. 384.00 MHz */
+				<26 512 0 3379200>, /* 4.  422.40 MHz */
+				<26 512 0 4454400>, /* 5. 556.80 MHz */
+				<26 512 0 5376000>, /* 6. 672.00 MHz */
+				<26 512 0 6144000>, /* 7. 768.00 MHz */
+				<26 512 0 6451200>, /* 8. 806.40 MHz */
+				<26 512 0 6758400>, /* 9. 844.80 MHz */
+				<26 512 0 7449600>; /*10. 931.20 MHz */
+
+		/* GDSC regulator names */
+		regulator-names = "vddcx", "vdd";
+		/* GDSC oxili regulators */
+		vddcx-supply = <&gdsc_oxili_cx>;
+		vdd-supply = <&gdsc_oxili_gx>;
+
+		/* CPU latency parameter */
+		qcom,pm-qos-active-latency = <213>;
+		qcom,pm-qos-wakeup-latency = <213>;
+
+		/* Quirks */
+		qcom,gpu-quirk-two-pass-use-wfi;
+		qcom,gpu-quirk-dp2clockgating-disable;
+		qcom,gpu-quirk-lmloadkill-disable;
+
+		/* Trace bus */
+		coresight-id = <67>;
+		coresight-name = "coresight-gfx";
+		coresight-nr-inports = <0>;
+		coresight-outports = <0>;
+		coresight-child-list = <&funnel_mm>;
+		coresight-child-ports = <6>;
+
+		/* Enable context aware freq. scaling */
+		qcom,enable-ca-jump;
+
+		/* Context aware jump busy penalty in us */
+		qcom,ca-busy-penalty = <12000>;
+
+		/* Context aware jump target power level */
+		qcom,ca-target-pwrlevel = <3>;
+
+		/* GPU Mempools */
+		qcom,gpu-mempools {
+			#address-cells= <1>;
+			#size-cells = <0>;
+			compatible = "qcom,gpu-mempools";
+
+			qcom,mempool-max-pages = <32768>;
+
+			/* 4K Page Pool configuration */
+			qcom,gpu-mempool@0 {
+				reg = <0>;
+				qcom,mempool-page-size = <4096>;
+			};
+			/* 64K Page Pool configuration */
+			qcom,gpu-mempool@1 {
+				reg = <1>;
+				qcom,mempool-page-size  = <65536>;
+			};
+		};
+
+		/* Power levels */
+		qcom,gpu-pwrlevels {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			compatible = "qcom,gpu-pwrlevels";
+
+			/* TURBO */
+			qcom,gpu-pwrlevel@0 {
+				reg = <0>;
+				qcom,gpu-freq = <650000000>;
+				qcom,bus-freq = <10>;
+				qcom,bus-min = <10>;
+				qcom,bus-max = <10>;
+			};
+
+			/* NOM+ */
+			qcom,gpu-pwrlevel@1 {
+				reg = <1>;
+				qcom,gpu-freq = <560000000>;
+				qcom,bus-freq = <10>;
+				qcom,bus-min = <8>;
+				qcom,bus-max = <10>;
+			};
+
+			/* NOM */
+			qcom,gpu-pwrlevel@2 {
+				reg = <2>;
+				qcom,gpu-freq = <510000000>;
+				qcom,bus-freq = <9>;
+				qcom,bus-min = <6>;
+				qcom,bus-max = <10>;
+			};
+
+			/* SVS+ */
+			qcom,gpu-pwrlevel@3 {
+				reg = <3>;
+				qcom,gpu-freq = <400000000>;
+				qcom,bus-freq = <7>;
+				qcom,bus-min = <5>;
+				qcom,bus-max = <8>;
+			};
+
+			/* SVS */
+			qcom,gpu-pwrlevel@4 {
+				reg = <4>;
+				qcom,gpu-freq = <320000000>;
+				qcom,bus-freq = <4>;
+				qcom,bus-min = <2>;
+				qcom,bus-max = <6>;
+			};
+
+		       /* Low SVS */
+			qcom,gpu-pwrlevel@5 {
+				reg = <5>;
+				qcom,gpu-freq = <216000000>;
+				qcom,bus-freq = <1>;
+				qcom,bus-min = <1>;
+				qcom,bus-max = <4>;
+			};
+
+		       /* Min SVS */
+			qcom,gpu-pwrlevel@6 {
+				reg = <6>;
+				qcom,gpu-freq = <133300000>;
+				qcom,bus-freq = <1>;
+				qcom,bus-min = <1>;
+				qcom,bus-max = <4>;
+			};
+			/* XO */
+			qcom,gpu-pwrlevel@7 {
+				reg = <7>;
+				qcom,gpu-freq = <19200000>;
+				qcom,bus-freq = <0>;
+				qcom,bus-min = <0>;
+				qcom,bus-max = <0>;
+			};
+		};
+	};
+
+	kgsl_msm_iommu: qcom,kgsl-iommu@1c40000 {
+		compatible = "qcom,kgsl-smmu-v2";
+
+		reg = <0x1c40000 0x10000>;
+		qcom,protect = <0x40000 0x10000>;
+		qcom,micro-mmu-control = <0x6000>;
+
+		clocks = <&clock_gcc_gfx clk_gcc_oxili_ahb_clk>,
+			 <&clock_gcc_gfx clk_gcc_bimc_gfx_clk>;
+
+		clock-names = "gpu_ahb_clk", "gcc_bimc_gfx_clk";
+
+		qcom,secure_align_mask = <0xfff>;
+		qcom,retention;
+		gfx3d_user: gfx3d_user {
+			compatible = "qcom,smmu-kgsl-cb";
+			label = "gfx3d_user";
+			iommus = <&kgsl_smmu 0>;
+			qcom,gpu-offset = <0x48000>;
+		};
+		gfx3d_secure: gfx3d_secure {
+			compatible = "qcom,smmu-kgsl-cb";
+			iommus = <&kgsl_smmu 2>;
+			memory-region = <&secure_mem>;
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/msm8953-ion.dtsi b/arch/arm64/boot/dts/qcom/msm8953-ion.dtsi
new file mode 100644
index 0000000..34004b0
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msm8953-ion.dtsi
@@ -0,0 +1,36 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+&soc {
+	qcom,ion {
+		compatible = "qcom,msm-ion";
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		qcom,ion-heap@25 {
+			reg = <25>;
+			qcom,ion-heap-type = "SYSTEM";
+		};
+
+		qcom,ion-heap@8 { /* CP_MM HEAP */
+			reg = <8>;
+			memory-region = <&secure_mem>;
+			qcom,ion-heap-type = "SECURE_DMA";
+		};
+
+		qcom,ion-heap@27 { /* QSEECOM HEAP */
+			reg = <27>;
+			memory-region = <&qseecom_mem>;
+			qcom,ion-heap-type = "DMA";
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/msm8953-pm.dtsi b/arch/arm64/boot/dts/qcom/msm8953-pm.dtsi
index 0cbb0f2..da4f4df 100644
--- a/arch/arm64/boot/dts/qcom/msm8953-pm.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8953-pm.dtsi
@@ -264,4 +264,19 @@
 			};
 		};
 	};
+
+	qcom,rpm-stats@200000 {
+		compatible = "qcom,rpm-stats";
+		reg = <0x200000 0x1000>, <0x290014 0x4>, <0x29001c 0x4>;
+		reg-names = "phys_addr_base", "offset_addr";
+	};
+
+	qcom,rpm-master-stats@60150 {
+		compatible = "qcom,rpm-master-stats";
+		reg = <0x60150 0x5000>;
+		qcom,masters = "APSS", "MPSS", "PRONTO", "TZ", "LPASS";
+		qcom,master-stats-version = <2>;
+		qcom,master-offset = <4096>;
+	};
+
 };
diff --git a/arch/arm64/boot/dts/qcom/msm8953-regulator.dtsi b/arch/arm64/boot/dts/qcom/msm8953-regulator.dtsi
index e4634c4..9468181 100644
--- a/arch/arm64/boot/dts/qcom/msm8953-regulator.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8953-regulator.dtsi
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -60,6 +60,14 @@
 					<RPM_SMD_REGULATOR_LEVEL_TURBO>;
 			qcom,use-voltage-level;
 		};
+
+		cx_cdev: regulator-cx-cdev {
+			compatible = "qcom,regulator-cooling-device";
+			regulator-cdev-supply = <&pm8953_s2_floor_level>;
+			regulator-levels = <RPM_SMD_REGULATOR_LEVEL_NOM
+					RPM_SMD_REGULATOR_LEVEL_RETENTION>;
+			#cooling-cells = <2>;
+		};
 	};
 
 	rpm-regulator-smpa3 {
diff --git a/arch/arm64/boot/dts/qcom/msm8953-thermal.dtsi b/arch/arm64/boot/dts/qcom/msm8953-thermal.dtsi
new file mode 100644
index 0000000..208ef41
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msm8953-thermal.dtsi
@@ -0,0 +1,1022 @@
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <dt-bindings/thermal/thermal.h>
+
+&soc {
+	qmi-tmd-devices {
+		compatible = "qcom,qmi_cooling_devices";
+
+		modem {
+			qcom,instance-id = <0x0>;
+
+			modem_pa: modem_pa {
+				qcom,qmi-dev-name = "pa";
+				#cooling-cells = <2>;
+			};
+
+			modem_proc: modem_proc {
+				qcom,qmi-dev-name = "modem";
+				#cooling-cells = <2>;
+			};
+
+			modem_current: modem_current {
+				qcom,qmi-dev-name = "modem_current";
+				#cooling-cells = <2>;
+			};
+
+			modem_vdd: modem_vdd {
+				qcom,qmi-dev-name = "cpuv_restriction_cold";
+				#cooling-cells = <2>;
+			};
+		};
+	};
+};
+
+&thermal_zones {
+	mdm-core-usr {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-governor = "user_space";
+		thermal-sensors = <&tsens0 1>;
+		trips {
+			active-config0 {
+				temperature = <125000>;
+				hysteresis = <1000>;
+				type = "passive";
+			};
+		};
+	};
+
+	qdsp-usr {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-governor = "user_space";
+		thermal-sensors = <&tsens0 2>;
+		trips {
+			active-config0 {
+				temperature = <125000>;
+				hysteresis = <1000>;
+				type = "passive";
+			};
+		};
+	};
+
+	camera-usr {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-governor = "user_space";
+		thermal-sensors = <&tsens0 3>;
+		trips {
+			active-config0 {
+				temperature = <125000>;
+				hysteresis = <1000>;
+				type = "passive";
+			};
+		};
+	};
+
+	apc1-cpu0-usr {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-sensors = <&tsens0 4>;
+		thermal-governor = "user_space";
+		trips {
+			active-config0 {
+				temperature = <125000>;
+				hysteresis = <1000>;
+				type = "passive";
+			};
+		};
+	};
+
+	apc1-cpu1-usr {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-sensors = <&tsens0 5>;
+		thermal-governor = "user_space";
+		trips {
+			active-config0 {
+				temperature = <125000>;
+				hysteresis = <1000>;
+				type = "passive";
+			};
+		};
+	};
+
+	apc1-cpu2-usr {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-sensors = <&tsens0 6>;
+		thermal-governor = "user_space";
+		trips {
+			active-config0 {
+				temperature = <125000>;
+				hysteresis = <1000>;
+				type = "passive";
+			};
+		};
+	};
+
+	apc1-cpu3-usr {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-sensors = <&tsens0 7>;
+		thermal-governor = "user_space";
+		trips {
+			active-config0 {
+				temperature = <125000>;
+				hysteresis = <1000>;
+				type = "passive";
+			};
+		};
+	};
+
+	apc1-l2-usr {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-sensors = <&tsens0 8>;
+		thermal-governor = "user_space";
+		trips {
+			active-config0 {
+				temperature = <125000>;
+				hysteresis = <1000>;
+				type = "passive";
+			};
+		};
+	};
+
+	apc0-cpu0-usr {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-sensors = <&tsens0 9>;
+		thermal-governor = "user_space";
+		trips {
+			active-config0 {
+				temperature = <125000>;
+				hysteresis = <1000>;
+				type = "passive";
+			};
+		};
+	};
+
+	apc0-cpu1-usr {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-sensors = <&tsens0 10>;
+		thermal-governor = "user_space";
+		trips {
+			active-config0 {
+				temperature = <125000>;
+				hysteresis = <1000>;
+				type = "passive";
+			};
+		};
+	};
+
+	apc0-cpu2-usr {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-sensors = <&tsens0 11>;
+		thermal-governor = "user_space";
+		trips {
+			active-config0 {
+				temperature = <125000>;
+				hysteresis = <1000>;
+				type = "passive";
+			};
+		};
+	};
+
+	apc0-cpu3-usr {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-sensors = <&tsens0 12>;
+		thermal-governor = "user_space";
+		trips {
+			active-config0 {
+				temperature = <125000>;
+				hysteresis = <1000>;
+				type = "passive";
+			};
+		};
+	};
+
+	apc0-l2-usr {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-sensors = <&tsens0 13>;
+		thermal-governor = "user_space";
+		trips {
+			active-config0 {
+				temperature = <125000>;
+				hysteresis = <1000>;
+				type = "passive";
+			};
+		};
+	};
+
+	gpu0-usr {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-sensors = <&tsens0 14>;
+		thermal-governor = "user_space";
+		trips {
+			active-config0 {
+				temperature = <125000>;
+				hysteresis = <1000>;
+				type = "passive";
+			};
+		};
+	};
+
+	gpu1-usr {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-sensors = <&tsens0 15>;
+		thermal-governor = "user_space";
+		trips {
+			active-config0 {
+				temperature = <125000>;
+				hysteresis = <1000>;
+				type = "passive";
+			};
+		};
+	};
+
+	gpu1-step {
+		polling-delay-passive = <250>;
+		polling-delay = <0>;
+		thermal-sensors = <&tsens0 15>;
+		thermal-governor = "step_wise";
+		trips {
+			gpu_trip0: gpu-trip0 {
+				temperature = <95000>;
+				hysteresis = <0>;
+				type = "passive";
+			};
+		};
+	};
+
+	deca-cpu-max-step {
+		polling-delay-passive = <50>;
+		polling-delay = <100>;
+		thermal-governor = "step_wise";
+		trips {
+			cpu_trip:cpu-trip {
+				temperature = <95000>;
+				hysteresis = <0>;
+				type = "passive";
+			};
+		};
+		cooling-maps {
+			cpu0_cdev {
+				trip = <&cpu_trip>;
+				cooling-device =
+					<&CPU0 THERMAL_NO_LIMIT
+						(THERMAL_MAX_LIMIT-1)>;
+			};
+			cpu1_cdev {
+				trip = <&cpu_trip>;
+				cooling-device =
+					<&CPU1 THERMAL_NO_LIMIT
+						(THERMAL_MAX_LIMIT-1)>;
+			};
+			cpu2_cdev {
+				trip = <&cpu_trip>;
+				cooling-device =
+					<&CPU2 THERMAL_NO_LIMIT
+						(THERMAL_MAX_LIMIT-1)>;
+			};
+			cpu3_cdev {
+				trip = <&cpu_trip>;
+				cooling-device =
+					<&CPU3 THERMAL_NO_LIMIT
+						(THERMAL_MAX_LIMIT-1)>;
+			};
+			cpu4_cdev {
+				trip = <&cpu_trip>;
+				cooling-device =
+					<&CPU4 THERMAL_NO_LIMIT
+						(THERMAL_MAX_LIMIT-1)>;
+			};
+			cpu5_cdev {
+				trip = <&cpu_trip>;
+				cooling-device =
+					<&CPU5 THERMAL_NO_LIMIT
+						(THERMAL_MAX_LIMIT-1)>;
+			};
+			cpu6_cdev {
+				trip = <&cpu_trip>;
+				cooling-device =
+					<&CPU6 THERMAL_NO_LIMIT
+						(THERMAL_MAX_LIMIT-1)>;
+			};
+			cpu7_cdev {
+				trip = <&cpu_trip>;
+				cooling-device =
+					<&CPU7 THERMAL_NO_LIMIT
+						(THERMAL_MAX_LIMIT-1)>;
+			};
+		};
+	};
+
+	pop-mem-step {
+		polling-delay-passive = <250>;
+		polling-delay = <0>;
+		thermal-sensors = <&tsens0 2>;
+		thermal-governor = "step_wise";
+		trips {
+			pop_trip: pop-trip {
+				temperature = <70000>;
+				hysteresis = <0>;
+				type = "passive";
+			};
+		};
+		cooling-maps {
+			pop_cdev0 {
+				trip = <&pop_trip>;
+				cooling-device =
+					<&CPU0 THERMAL_NO_LIMIT
+						(THERMAL_MAX_LIMIT-1)>;
+			};
+			pop_cdev1 {
+				trip = <&pop_trip>;
+				cooling-device =
+					<&CPU1 THERMAL_NO_LIMIT
+						(THERMAL_MAX_LIMIT-1)>;
+			};
+			pop_cdev2 {
+				trip = <&pop_trip>;
+				cooling-device =
+					<&CPU2 THERMAL_NO_LIMIT
+						(THERMAL_MAX_LIMIT-1)>;
+			};
+			pop_cdev3 {
+				trip = <&pop_trip>;
+				cooling-device =
+					<&CPU3 THERMAL_NO_LIMIT
+						(THERMAL_MAX_LIMIT-1)>;
+			};
+			pop_cdev4 {
+				trip = <&pop_trip>;
+				cooling-device =
+					<&CPU4 THERMAL_NO_LIMIT
+						(THERMAL_MAX_LIMIT-1)>;
+			};
+			pop_cdev5 {
+				trip = <&pop_trip>;
+				cooling-device =
+					<&CPU5 THERMAL_NO_LIMIT
+						(THERMAL_MAX_LIMIT-1)>;
+			};
+			pop_cdev6 {
+				trip = <&pop_trip>;
+				cooling-device =
+					<&CPU6 THERMAL_NO_LIMIT
+						(THERMAL_MAX_LIMIT-1)>;
+			};
+			pop_cdev7 {
+				trip = <&pop_trip>;
+				cooling-device =
+					<&CPU7 THERMAL_NO_LIMIT
+						(THERMAL_MAX_LIMIT-1)>;
+			};
+		};
+	};
+
+	apc1-cpu0-step {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-sensors = <&tsens0 4>;
+		thermal-governor = "step_wise";
+		trips {
+			apc1_cpu0_trip: apc1-cpu0-trip {
+				temperature = <105000>;
+				hysteresis = <15000>;
+				type = "passive";
+			};
+		};
+		cooling-maps {
+			cpu4_cdev {
+				trip = <&apc1_cpu0_trip>;
+				cooling-device =
+					<&CPU4 THERMAL_MAX_LIMIT
+						THERMAL_MAX_LIMIT>;
+			};
+		};
+	};
+
+	apc1-cpu1-step {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-sensors = <&tsens0 5>;
+		thermal-governor = "step_wise";
+		trips {
+			apc1_cpu1_trip: apc1-cpu1-trip {
+				temperature = <105000>;
+				hysteresis = <15000>;
+				type = "passive";
+			};
+		};
+		cooling-maps {
+			cpu5_cdev {
+				trip = <&apc1_cpu1_trip>;
+				cooling-device =
+					<&CPU5 THERMAL_MAX_LIMIT
+						THERMAL_MAX_LIMIT>;
+			};
+		};
+	};
+
+	apc1-cpu2-step {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-sensors = <&tsens0 6>;
+		thermal-governor = "step_wise";
+		trips {
+			apc1_cpu2_trip: apc1-cpu2-trip {
+				temperature = <105000>;
+				hysteresis = <15000>;
+				type = "passive";
+			};
+		};
+		cooling-maps {
+			cpu6_cdev {
+				trip = <&apc1_cpu2_trip>;
+				cooling-device =
+					<&CPU6 THERMAL_MAX_LIMIT
+						THERMAL_MAX_LIMIT>;
+			};
+		};
+	};
+
+	apc1-cpu3-step {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-sensors = <&tsens0 7>;
+		thermal-governor = "step_wise";
+		trips {
+			apc1_cpu3_trip: apc1-cpu3-trip {
+				temperature = <105000>;
+				hysteresis = <15000>;
+				type = "passive";
+			};
+		};
+		cooling-maps {
+			cpu7_cdev {
+				trip = <&apc1_cpu3_trip>;
+				cooling-device =
+					<&CPU7 THERMAL_MAX_LIMIT
+						THERMAL_MAX_LIMIT>;
+			};
+		};
+	};
+
+	apc0-cpu0-step {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-sensors = <&tsens0 9>;
+		thermal-governor = "step_wise";
+		trips {
+			apc0_cpu0_trip: apc0-cpu0-trip {
+				temperature = <105000>;
+				hysteresis = <15000>;
+				type = "passive";
+			};
+		};
+		cooling-maps {
+			cpu0_cdev {
+				trip = <&apc0_cpu0_trip>;
+				cooling-device =
+					<&CPU0 THERMAL_MAX_LIMIT
+						THERMAL_MAX_LIMIT>;
+			};
+		};
+	};
+
+	apc0-cpu1-step {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-sensors = <&tsens0 10>;
+		thermal-governor = "step_wise";
+		trips {
+			apc0_cpu1_trip: apc0-cpu1-trip {
+				temperature = <105000>;
+				hysteresis = <15000>;
+				type = "passive";
+			};
+		};
+		cooling-maps {
+			cpu1_cdev {
+				trip = <&apc0_cpu1_trip>;
+				cooling-device =
+					<&CPU1 THERMAL_MAX_LIMIT
+						THERMAL_MAX_LIMIT>;
+			};
+		};
+	};
+
+	apc0-cpu2-step {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-sensors = <&tsens0 11>;
+		thermal-governor = "step_wise";
+		trips {
+			apc0_cpu2_trip: apc0-cpu2-trip {
+				temperature = <105000>;
+				hysteresis = <15000>;
+				type = "passive";
+			};
+		};
+		cooling-maps {
+			cpu2_cdev {
+				trip = <&apc0_cpu2_trip>;
+				cooling-device =
+					<&CPU2 THERMAL_MAX_LIMIT
+						THERMAL_MAX_LIMIT>;
+			};
+		};
+	};
+
+	apc0-cpu3-step {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-sensors = <&tsens0 12>;
+		thermal-governor = "step_wise";
+		trips {
+			apc0_cpu3_trip: apc0-cpu3-trip {
+				temperature = <105000>;
+				hysteresis = <15000>;
+				type = "passive";
+			};
+		};
+		cooling-maps {
+			cpu3_cdev {
+				trip = <&apc0_cpu3_trip>;
+				cooling-device =
+					<&CPU3 THERMAL_MAX_LIMIT
+						THERMAL_MAX_LIMIT>;
+			};
+		};
+	};
+
+	mdm-core-lowf {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-governor = "low_limits_floor";
+		thermal-sensors = <&tsens0 1>;
+		tracks-low;
+		trips {
+			mdm_core_trip: mdm-core-trip {
+				temperature = <5000>;
+				hysteresis = <5000>;
+				type = "passive";
+			};
+		};
+		cooling-maps {
+			cpu0_vdd_cdev {
+				trip = <&mdm_core_trip>;
+				cooling-device = <&CPU0 (THERMAL_MAX_LIMIT - 4)
+						(THERMAL_MAX_LIMIT - 4)>;
+			};
+			cx_vdd_cdev {
+				trip = <&mdm_core_trip>;
+				cooling-device = <&cx_cdev 0 0>;
+			};
+			modem_vdd_cdev {
+				trip = <&mdm_core_trip>;
+				cooling-device = <&modem_vdd 0 0>;
+			};
+		};
+	};
+
+	qdsp-lowf {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-governor = "low_limits_floor";
+		thermal-sensors = <&tsens0 2>;
+		tracks-low;
+		trips {
+			qdsp_trip: qdsp-trip {
+				temperature = <5000>;
+				hysteresis = <5000>;
+				type = "passive";
+			};
+		};
+		cooling-maps {
+			cpu0_vdd_cdev {
+				trip = <&qdsp_trip>;
+				cooling-device = <&CPU0 (THERMAL_MAX_LIMIT - 4)
+						(THERMAL_MAX_LIMIT - 4)>;
+			};
+			cx_vdd_cdev {
+				trip = <&qdsp_trip>;
+				cooling-device = <&cx_cdev 0 0>;
+			};
+			modem_vdd_cdev {
+				trip = <&qdsp_trip>;
+				cooling-device = <&modem_vdd 0 0>;
+			};
+		};
+	};
+
+	camera-lowf {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-governor = "low_limits_floor";
+		thermal-sensors = <&tsens0 3>;
+		tracks-low;
+		trips {
+			camera_trip: camera-trip {
+				temperature = <5000>;
+				hysteresis = <5000>;
+				type = "passive";
+			};
+		};
+		cooling-maps {
+			cpu0_vdd_cdev {
+				trip = <&camera_trip>;
+				cooling-device = <&CPU0 (THERMAL_MAX_LIMIT - 4)
+						(THERMAL_MAX_LIMIT - 4)>;
+			};
+			cx_vdd_cdev {
+				trip = <&camera_trip>;
+				cooling-device = <&cx_cdev 0 0>;
+			};
+			modem_vdd_cdev {
+				trip = <&camera_trip>;
+				cooling-device = <&modem_vdd 0 0>;
+			};
+		};
+	};
+
+	apc1-cpu0-lowf {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-governor = "low_limits_floor";
+		thermal-sensors = <&tsens0 4>;
+		tracks-low;
+		trips {
+			cpu4_trip: apc1-cpu0-trip {
+				temperature = <5000>;
+				hysteresis = <5000>;
+				type = "passive";
+			};
+		};
+		cooling-maps {
+			cpu0_vdd_cdev {
+				trip = <&cpu4_trip>;
+				cooling-device = <&CPU0 (THERMAL_MAX_LIMIT - 4)
+						(THERMAL_MAX_LIMIT - 4)>;
+			};
+			cx_vdd_cdev {
+				trip = <&cpu4_trip>;
+				cooling-device = <&cx_cdev 0 0>;
+			};
+			modem_vdd_cdev {
+				trip = <&cpu4_trip>;
+				cooling-device = <&modem_vdd 0 0>;
+			};
+		};
+	};
+
+	apc1-cpu1-lowf {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-governor = "low_limits_floor";
+		thermal-sensors = <&tsens0 5>;
+		tracks-low;
+		trips {
+			cpu5_trip: apc1-cpu0-trip {
+				temperature = <5000>;
+				hysteresis = <5000>;
+				type = "passive";
+			};
+		};
+		cooling-maps {
+			cpu0_vdd_cdev {
+				trip = <&cpu5_trip>;
+				cooling-device = <&CPU0 (THERMAL_MAX_LIMIT - 4)
+						(THERMAL_MAX_LIMIT - 4)>;
+			};
+			cx_vdd_cdev {
+				trip = <&cpu5_trip>;
+				cooling-device = <&cx_cdev 0 0>;
+			};
+			modem_vdd_cdev {
+				trip = <&cpu5_trip>;
+				cooling-device = <&modem_vdd 0 0>;
+			};
+		};
+	};
+
+	apc1-cpu2-lowf {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-governor = "low_limits_floor";
+		thermal-sensors = <&tsens0 6>;
+		tracks-low;
+		trips {
+			cpu6_trip: apc1-cpu2-trip {
+				temperature = <5000>;
+				hysteresis = <5000>;
+				type = "passive";
+			};
+		};
+		cooling-maps {
+			cpu0_vdd_cdev {
+				trip = <&cpu6_trip>;
+				cooling-device = <&CPU0 (THERMAL_MAX_LIMIT - 4)
+						(THERMAL_MAX_LIMIT - 4)>;
+			};
+			cx_vdd_cdev {
+				trip = <&cpu6_trip>;
+				cooling-device = <&cx_cdev 0 0>;
+			};
+			modem_vdd_cdev {
+				trip = <&cpu6_trip>;
+				cooling-device = <&modem_vdd 0 0>;
+			};
+		};
+	};
+
+	apc1-cpu3-lowf {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-governor = "low_limits_floor";
+		thermal-sensors = <&tsens0 7>;
+		tracks-low;
+		trips {
+			cpu7_trip: apc1-cpu3-trip {
+				temperature = <5000>;
+				hysteresis = <5000>;
+				type = "passive";
+			};
+		};
+		cooling-maps {
+			cpu0_vdd_cdev {
+				trip = <&cpu7_trip>;
+				cooling-device = <&CPU0 (THERMAL_MAX_LIMIT - 4)
+						(THERMAL_MAX_LIMIT - 4)>;
+			};
+			cx_vdd_cdev {
+				trip = <&cpu7_trip>;
+				cooling-device = <&cx_cdev 0 0>;
+			};
+			modem_vdd_cdev {
+				trip = <&cpu7_trip>;
+				cooling-device = <&modem_vdd 0 0>;
+			};
+		};
+	};
+
+	apc1-l2-lowf {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-governor = "low_limits_floor";
+		thermal-sensors = <&tsens0 8>;
+		tracks-low;
+		trips {
+			apc1_l2_trip: apc1-l2-trip {
+				temperature = <5000>;
+				hysteresis = <5000>;
+				type = "passive";
+			};
+		};
+		cooling-maps {
+			cpu0_vdd_cdev {
+				trip = <&apc1_l2_trip>;
+				cooling-device = <&CPU0 (THERMAL_MAX_LIMIT - 4)
+						(THERMAL_MAX_LIMIT - 4)>;
+			};
+			cx_vdd_cdev {
+				trip = <&apc1_l2_trip>;
+				cooling-device = <&cx_cdev 0 0>;
+			};
+			modem_vdd_cdev {
+				trip = <&apc1_l2_trip>;
+				cooling-device = <&modem_vdd 0 0>;
+			};
+		};
+	};
+
+	apc0-cpu0-lowf {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-governor = "low_limits_floor";
+		thermal-sensors = <&tsens0 9>;
+		tracks-low;
+		trips {
+			cpu0_trip: apc0-cpu0-trip {
+				temperature = <5000>;
+				hysteresis = <5000>;
+				type = "passive";
+			};
+		};
+		cooling-maps {
+			cpu0_vdd_cdev {
+				trip = <&cpu0_trip>;
+				cooling-device = <&CPU0 (THERMAL_MAX_LIMIT - 4)
+						(THERMAL_MAX_LIMIT - 4)>;
+			};
+			cx_vdd_cdev {
+				trip = <&cpu0_trip>;
+				cooling-device = <&cx_cdev 0 0>;
+			};
+			modem_vdd_cdev {
+				trip = <&cpu0_trip>;
+				cooling-device = <&modem_vdd 0 0>;
+			};
+		};
+	};
+
+	apc0-cpu1-lowf {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-governor = "low_limits_floor";
+		thermal-sensors = <&tsens0 10>;
+		tracks-low;
+		trips {
+			cpu1_trip: apc0-cpu1-trip {
+				temperature = <5000>;
+				hysteresis = <5000>;
+				type = "passive";
+			};
+		};
+		cooling-maps {
+			cpu0_vdd_cdev {
+				trip = <&cpu1_trip>;
+				cooling-device = <&CPU0 (THERMAL_MAX_LIMIT - 4)
+						(THERMAL_MAX_LIMIT - 4)>;
+			};
+			cx_vdd_cdev {
+				trip = <&cpu1_trip>;
+				cooling-device = <&cx_cdev 0 0>;
+			};
+			modem_vdd_cdev {
+				trip = <&cpu1_trip>;
+				cooling-device = <&modem_vdd 0 0>;
+			};
+		};
+	};
+
+	apc0-cpu2-lowf {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-governor = "low_limits_floor";
+		thermal-sensors = <&tsens0 11>;
+		tracks-low;
+		trips {
+			cpu2_trip: apc0-cpu2-trip {
+				temperature = <5000>;
+				hysteresis = <5000>;
+				type = "passive";
+			};
+		};
+		cooling-maps {
+			cpu0_vdd_cdev {
+				trip = <&cpu2_trip>;
+				cooling-device = <&CPU0 (THERMAL_MAX_LIMIT - 4)
+						(THERMAL_MAX_LIMIT - 4)>;
+			};
+			cx_vdd_cdev {
+				trip = <&cpu2_trip>;
+				cooling-device = <&cx_cdev 0 0>;
+			};
+			modem_vdd_cdev {
+				trip = <&cpu2_trip>;
+				cooling-device = <&modem_vdd 0 0>;
+			};
+		};
+	};
+
+	apc0-cpu3-lowf {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-governor = "low_limits_floor";
+		thermal-sensors = <&tsens0 12>;
+		tracks-low;
+		trips {
+			cpu3_trip: apc0-cpu3-trip {
+				temperature = <5000>;
+				hysteresis = <5000>;
+				type = "passive";
+			};
+		};
+		cooling-maps {
+			cpu0_vdd_cdev {
+				trip = <&cpu3_trip>;
+				cooling-device = <&CPU0 (THERMAL_MAX_LIMIT - 4)
+						(THERMAL_MAX_LIMIT - 4)>;
+			};
+			cx_vdd_cdev {
+				trip = <&cpu3_trip>;
+				cooling-device = <&cx_cdev 0 0>;
+			};
+			modem_vdd_cdev {
+				trip = <&cpu3_trip>;
+				cooling-device = <&modem_vdd 0 0>;
+			};
+		};
+	};
+
+	apc0-l2-lowf {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-governor = "low_limits_floor";
+		thermal-sensors = <&tsens0 13>;
+		tracks-low;
+		trips {
+			apc0_l2_trip: apc0-l2-trip {
+				temperature = <5000>;
+				hysteresis = <5000>;
+				type = "passive";
+			};
+		};
+		cooling-maps {
+			cpu0_vdd_cdev {
+				trip = <&apc0_l2_trip>;
+				cooling-device = <&CPU0 (THERMAL_MAX_LIMIT - 4)
+						(THERMAL_MAX_LIMIT - 4)>;
+			};
+			cx_vdd_cdev {
+				trip = <&apc0_l2_trip>;
+				cooling-device = <&cx_cdev 0 0>;
+			};
+			modem_vdd_cdev {
+				trip = <&apc0_l2_trip>;
+				cooling-device = <&modem_vdd 0 0>;
+			};
+		};
+	};
+
+	gpu0-lowf {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-governor = "low_limits_floor";
+		thermal-sensors = <&tsens0 14>;
+		tracks-low;
+		trips {
+			gpu0_trip: gpu0-trip {
+				temperature = <5000>;
+				hysteresis = <5000>;
+				type = "passive";
+			};
+		};
+		cooling-maps {
+			cpu0_vdd_cdev {
+				trip = <&gpu0_trip>;
+				cooling-device = <&CPU0 (THERMAL_MAX_LIMIT - 4)
+						(THERMAL_MAX_LIMIT - 4)>;
+			};
+			cx_vdd_cdev {
+				trip = <&gpu0_trip>;
+				cooling-device = <&cx_cdev 0 0>;
+			};
+			modem_vdd_cdev {
+				trip = <&gpu0_trip>;
+				cooling-device = <&modem_vdd 0 0>;
+			};
+		};
+	};
+
+	gpu1-lowf {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-governor = "low_limits_floor";
+		thermal-sensors = <&tsens0 15>;
+		tracks-low;
+		trips {
+			gpu1_trip: gpu1-trip {
+				temperature = <5000>;
+				hysteresis = <5000>;
+				type = "passive";
+			};
+		};
+		cooling-maps {
+			cpu0_vdd_cdev {
+				trip = <&gpu1_trip>;
+				cooling-device = <&CPU0 (THERMAL_MAX_LIMIT - 4)
+						(THERMAL_MAX_LIMIT - 4)>;
+			};
+			cx_vdd_cdev {
+				trip = <&gpu1_trip>;
+				cooling-device = <&cx_cdev 0 0>;
+			};
+			modem_vdd_cdev {
+				trip = <&gpu1_trip>;
+				cooling-device = <&modem_vdd 0 0>;
+			};
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/msm8953.dtsi b/arch/arm64/boot/dts/qcom/msm8953.dtsi
index 428e125..0d7932b 100644
--- a/arch/arm64/boot/dts/qcom/msm8953.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8953.dtsi
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -28,6 +28,32 @@
 		bootargs = "sched_enable_hmp=1 sched_enable_power_aware=1";
 	};
 
+	firmware: firmware {
+		android {
+			compatible = "android,firmware";
+			fstab {
+				compatible = "android,fstab";
+				vendor {
+					compatible = "android,vendor";
+					dev = "/dev/block/platform/soc/7824900.sdhci/by-name/vendor";
+					type = "ext4";
+					mnt_flags = "ro,barrier=1,discard";
+					fsmgr_flags = "wait";
+					status = "ok";
+				};
+				system {
+					compatible = "android,system";
+					dev = "/dev/block/platform/soc/7824900.sdhci/by-name/system";
+					type = "ext4";
+					mnt_flags = "ro,barrier=1,discard";
+					fsmgr_flags = "wait";
+					status = "ok";
+				};
+
+			};
+		};
+	};
+
 	reserved-memory {
 		#address-cells = <2>;
 		#size-cells = <2>;
@@ -134,6 +160,9 @@
 #include "msm8953-pm.dtsi"
 #include "msm8953-bus.dtsi"
 #include "msm8953-coresight.dtsi"
+#include "msm8953-ion.dtsi"
+#include "msm-arm-smmu-8953.dtsi"
+#include "msm8953-gpu.dtsi"
 
 &soc {
 	#address-cells = <1>;
@@ -274,217 +303,7 @@
 		qcom,pipe-attr-ee;
 	};
 
-	thermal_zones: thermal-zones {
-		mdm-core-usr {
-			polling-delay-passive = <0>;
-			polling-delay = <0>;
-			thermal-governor = "user_space";
-			thermal-sensors = <&tsens0 1>;
-			trips {
-				active-config0 {
-					temperature = <125000>;
-					hysteresis = <1000>;
-					type = "passive";
-				};
-			};
-		};
-
-		qdsp-usr {
-			polling-delay-passive = <0>;
-			polling-delay = <0>;
-			thermal-governor = "user_space";
-			thermal-sensors = <&tsens0 2>;
-			trips {
-				active-config0 {
-					temperature = <125000>;
-					hysteresis = <1000>;
-					type = "passive";
-				};
-			};
-		};
-
-		camera-usr {
-			polling-delay-passive = <0>;
-			polling-delay = <0>;
-			thermal-governor = "user_space";
-			thermal-sensors = <&tsens0 3>;
-			trips {
-				active-config0 {
-					temperature = <125000>;
-					hysteresis = <1000>;
-					type = "passive";
-				};
-			};
-		};
-
-		apc1_cpu0-usr {
-			polling-delay-passive = <0>;
-			polling-delay = <0>;
-			thermal-sensors = <&tsens0 4>;
-			thermal-governor = "user_space";
-			trips {
-				active-config0 {
-					temperature = <125000>;
-					hysteresis = <1000>;
-					type = "passive";
-				};
-			};
-		};
-
-		apc1_cpu1-usr {
-			polling-delay-passive = <0>;
-			polling-delay = <0>;
-			thermal-sensors = <&tsens0 5>;
-			thermal-governor = "user_space";
-			trips {
-				active-config0 {
-					temperature = <125000>;
-					hysteresis = <1000>;
-					type = "passive";
-				};
-			};
-		};
-
-		apc1_cpu2-usr {
-			polling-delay-passive = <0>;
-			polling-delay = <0>;
-			thermal-sensors = <&tsens0 6>;
-			thermal-governor = "user_space";
-			trips {
-				active-config0 {
-					temperature = <125000>;
-					hysteresis = <1000>;
-					type = "passive";
-				};
-			};
-		};
-
-		apc1_cpu3-usr {
-			polling-delay-passive = <0>;
-			polling-delay = <0>;
-			thermal-sensors = <&tsens0 7>;
-			thermal-governor = "user_space";
-			trips {
-				active-config0 {
-					temperature = <125000>;
-					hysteresis = <1000>;
-					type = "passive";
-				};
-			};
-		};
-
-		apc1_l2-usr {
-			polling-delay-passive = <0>;
-			polling-delay = <0>;
-			thermal-sensors = <&tsens0 8>;
-			thermal-governor = "user_space";
-			trips {
-				active-config0 {
-					temperature = <125000>;
-					hysteresis = <1000>;
-					type = "passive";
-				};
-			};
-		};
-
-		apc0_cpu0-usr {
-			polling-delay-passive = <0>;
-			polling-delay = <0>;
-			thermal-sensors = <&tsens0 9>;
-			thermal-governor = "user_space";
-			trips {
-				active-config0 {
-					temperature = <125000>;
-					hysteresis = <1000>;
-					type = "passive";
-				};
-			};
-		};
-
-		apc0_cpu1-usr {
-			polling-delay-passive = <0>;
-			polling-delay = <0>;
-			thermal-sensors = <&tsens0 10>;
-			thermal-governor = "user_space";
-			trips {
-				active-config0 {
-					temperature = <125000>;
-					hysteresis = <1000>;
-					type = "passive";
-				};
-			};
-		};
-
-		apc0_cpu2-usr {
-			polling-delay-passive = <0>;
-			polling-delay = <0>;
-			thermal-sensors = <&tsens0 11>;
-			thermal-governor = "user_space";
-			trips {
-				active-config0 {
-					temperature = <125000>;
-					hysteresis = <1000>;
-					type = "passive";
-				};
-			};
-		};
-
-		apc0_cpu3-usr {
-			polling-delay-passive = <0>;
-			polling-delay = <0>;
-			thermal-sensors = <&tsens0 12>;
-			thermal-governor = "user_space";
-			trips {
-				active-config0 {
-					temperature = <125000>;
-					hysteresis = <1000>;
-					type = "passive";
-				};
-			};
-		};
-
-		apc0_l2-usr {
-			polling-delay-passive = <0>;
-			polling-delay = <0>;
-			thermal-sensors = <&tsens0 13>;
-			thermal-governor = "user_space";
-			trips {
-				active-config0 {
-					temperature = <125000>;
-					hysteresis = <1000>;
-					type = "passive";
-				};
-			};
-		};
-
-		gpu0-usr {
-			polling-delay-passive = <0>;
-			polling-delay = <0>;
-			thermal-sensors = <&tsens0 14>;
-			thermal-governor = "user_space";
-			trips {
-				active-config0 {
-					temperature = <125000>;
-					hysteresis = <1000>;
-					type = "passive";
-				};
-			};
-		};
-
-		gpu1-usr {
-			polling-delay-passive = <0>;
-			polling-delay = <0>;
-			thermal-sensors = <&tsens0 15>;
-			thermal-governor = "user_space";
-			trips {
-				active-config0 {
-					temperature = <125000>;
-					hysteresis = <1000>;
-					type = "passive";
-				};
-			};
-		};
-	};
+	thermal_zones: thermal-zones {};
 
 	tsens0: tsens@4a8000 {
 		compatible = "qcom,msm8953-tsens";
@@ -522,13 +341,13 @@
 		clock-names = "core_clk_src", "core_clk",
 				"iface_clk", "bus_clk";
 		qcom,ce-opp-freq = <100000000>;
-		status = "disabled";
+		status = "okay";
 	};
 
 	qcom_tzlog: tz-log@08600720 {
 		compatible = "qcom,tz-log";
 		reg = <0x08600720 0x2000>;
-		status = "disabled";
+		status = "okay";
 	};
 
 	qcom_rng: qrng@e3000 {
@@ -544,7 +363,7 @@
 			<1 618 0 800>;          /* 100 MB/s */
 		clocks = <&clock_gcc clk_gcc_prng_ahb_clk>;
 		clock-names = "iface_clk";
-		status = "disabled";
+		status = "okay";
 	};
 
 	qcom_crypto: qcrypto@720000 {
@@ -577,7 +396,7 @@
 		qcom,use-sw-hmac-algo;
 		qcom,use-sw-aead-algo;
 		qcom,ce-opp-freq = <100000000>;
-		status = "disabled";
+		status = "okay";
 	};
 
 	qcom_cedev: qcedev@720000 {
@@ -603,7 +422,7 @@
 		clock-names = "core_clk_src", "core_clk",
 				"iface_clk", "bus_clk";
 		qcom,ce-opp-freq = <100000000>;
-		status = "disabled";
+		status = "okay";
 	};
 
 	blsp1_uart0: serial@78af000 {
@@ -1786,6 +1605,47 @@
 		qcom,reset-ep-after-lpm-resume;
 	};
 
+	qcom,mss@4080000 {
+		compatible = "qcom,pil-q6v55-mss";
+		reg = <0x04080000 0x100>,
+		      <0x0194f000 0x010>,
+		      <0x01950000 0x008>,
+		      <0x01951000 0x008>,
+		      <0x04020000 0x040>,
+		      <0x01871000 0x004>;
+		reg-names = "qdsp6_base", "halt_q6", "halt_modem", "halt_nc",
+				 "rmb_base", "restart_reg";
+
+		interrupts = <GIC_SPI 24 IRQ_TYPE_EDGE_RISING>;
+		vdd_mss-supply = <&pm8953_s1>;
+		vdd_cx-supply = <&pm8953_s2_level>;
+		vdd_cx-voltage = <RPM_SMD_REGULATOR_LEVEL_TURBO>;
+		vdd_mx-supply = <&pm8953_s7_level_ao>;
+		vdd_mx-uV = <RPM_SMD_REGULATOR_LEVEL_TURBO>;
+		vdd_pll-supply = <&pm8953_l7>;
+		qcom,vdd_pll = <1800000>;
+		vdd_mss-uV = <RPM_SMD_REGULATOR_LEVEL_TURBO>;
+
+		clocks = <&clock_gcc clk_xo_pil_mss_clk>,
+			 <&clock_gcc clk_gcc_mss_cfg_ahb_clk>,
+			 <&clock_gcc clk_gcc_mss_q6_bimc_axi_clk>,
+			 <&clock_gcc clk_gcc_boot_rom_ahb_clk>;
+		clock-names = "xo", "iface_clk", "bus_clk", "mem_clk";
+		qcom,proxy-clock-names = "xo";
+		qcom,active-clock-names = "iface_clk", "bus_clk", "mem_clk";
+
+		qcom,pas-id = <5>;
+		qcom,pil-mss-memsetup;
+		qcom,firmware-name = "modem";
+		qcom,pil-self-auth;
+		qcom,sysmon-id = <0>;
+		qcom,ssctl-instance-id = <0x12>;
+		qcom,qdsp6v56-1-10;
+		qcom,reset-clk;
+
+		memory-region = <&modem_mem>;
+	};
+
 	qcom,lpass@c200000 {
 		compatible = "qcom,pil-tz-generic";
 		reg = <0xc200000 0x00100>;
@@ -1794,6 +1654,7 @@
 		vdd_cx-supply = <&pm8953_s2_level>;
 		qcom,proxy-reg-names = "vdd_cx";
 		qcom,vdd_cx-uV-uA = <RPM_SMD_REGULATOR_LEVEL_TURBO 100000>;
+		qcom,mas-crypto = <&mas_crypto>;
 
 		clocks = <&clock_gcc clk_xo_pil_lpass_clk>,
 			 <&clock_gcc clk_gcc_crypto_clk>,
@@ -1825,6 +1686,8 @@
 		vdd_pronto_pll-supply = <&pm8953_l7>;
 		proxy-reg-names = "vdd_pronto_pll";
 		vdd_pronto_pll-uV-uA = <1800000 18000>;
+		qcom,mas-crypto = <&mas_crypto>;
+
 		clocks = <&clock_gcc clk_xo_pil_pronto_clk>,
 			 <&clock_gcc clk_gcc_crypto_clk>,
 			 <&clock_gcc clk_gcc_crypto_ahb_clk>,
@@ -1847,12 +1710,49 @@
 		memory-region = <&wcnss_fw_mem>;
 	};
 
+	qcom,venus@1de0000 {
+		compatible = "qcom,pil-tz-generic";
+		reg = <0x1de0000 0x4000>;
+
+		vdd-supply = <&gdsc_venus>;
+		qcom,proxy-reg-names = "vdd";
+
+		clocks = <&clock_gcc clk_gcc_venus0_vcodec0_clk>,
+			<&clock_gcc clk_gcc_venus0_ahb_clk>,
+			<&clock_gcc clk_gcc_venus0_axi_clk>,
+			<&clock_gcc clk_gcc_crypto_clk>,
+			<&clock_gcc clk_gcc_crypto_ahb_clk>,
+			<&clock_gcc clk_gcc_crypto_axi_clk>,
+			<&clock_gcc clk_crypto_clk_src>;
+
+		clock-names = "core_clk", "iface_clk", "bus_clk",
+				"scm_core_clk", "scm_iface_clk",
+				"scm_bus_clk", "scm_core_clk_src";
+
+		qcom,proxy-clock-names = "core_clk", "iface_clk",
+					"bus_clk", "scm_core_clk",
+					"scm_iface_clk", "scm_bus_clk",
+					"scm_core_clk_src";
+		qcom,scm_core_clk_src-freq = <80000000>;
+
+		qcom,msm-bus,name = "pil-venus";
+		qcom,msm-bus,num-cases = <2>;
+		qcom,msm-bus,num-paths = <1>;
+		qcom,msm-bus,vectors-KBps =
+				<63 512 0 0>,
+				<63 512 0 304000>;
+		qcom,pas-id = <9>;
+		qcom,proxy-timeout-ms = <100>;
+		qcom,firmware-name = "venus";
+		memory-region = <&venus_mem>;
+	};
 };
 
 #include "pm8953-rpm-regulator.dtsi"
 #include "pm8953.dtsi"
 #include "msm8953-regulator.dtsi"
 #include "msm-gdsc-8916.dtsi"
+#include "msm8953-thermal.dtsi"
 
 &gdsc_venus {
 	clock-names = "bus_clk", "core_clk";
diff --git a/arch/arm64/boot/dts/qcom/pm8937-rpm-regulator.dtsi b/arch/arm64/boot/dts/qcom/pm8937-rpm-regulator.dtsi
new file mode 100644
index 0000000..33a5e16
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/pm8937-rpm-regulator.dtsi
@@ -0,0 +1,366 @@
+/*
+ * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+&rpm_bus {
+	rpm-regulator-smpa1 {
+		compatible = "qcom,rpm-smd-regulator-resource";
+		qcom,resource-name = "smpa";
+		qcom,resource-id = <1>;
+		qcom,regulator-type = <1>;
+		qcom,hpm-min-load = <100000>;
+		status = "disabled";
+
+		regulator-s1 {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8937_s1";
+			qcom,set = <3>;
+			status = "disabled";
+		};
+	};
+
+	rpm-regulator-smpa2 {
+		compatible = "qcom,rpm-smd-regulator-resource";
+		qcom,resource-name = "smpa";
+		qcom,resource-id = <2>;
+		qcom,regulator-type = <1>;
+		qcom,hpm-min-load = <100000>;
+		status = "disabled";
+
+		regulator-s2 {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8937_s2";
+			qcom,set = <3>;
+			status = "disabled";
+		};
+	};
+
+	rpm-regulator-smpa3 {
+		compatible = "qcom,rpm-smd-regulator-resource";
+		qcom,resource-name = "smpa";
+		qcom,resource-id = <3>;
+		qcom,regulator-type = <1>;
+		qcom,hpm-min-load = <100000>;
+		status = "disabled";
+
+		regulator-s3 {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8937_s3";
+			qcom,set = <3>;
+			status = "disabled";
+		};
+	};
+
+	rpm-regulator-smpa4 {
+		compatible = "qcom,rpm-smd-regulator-resource";
+		qcom,resource-name = "smpa";
+		qcom,resource-id = <4>;
+		qcom,regulator-type = <1>;
+		qcom,hpm-min-load = <100000>;
+		status = "disabled";
+
+		regulator-s4 {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8937_s4";
+			qcom,set = <3>;
+			status = "disabled";
+		};
+	};
+
+	rpm-regulator-ldoa2 {
+		compatible = "qcom,rpm-smd-regulator-resource";
+		qcom,resource-name = "ldoa";
+		qcom,resource-id = <2>;
+		qcom,regulator-type = <0>;
+		qcom,hpm-min-load = <10000>;
+		status = "disabled";
+
+		regulator-l2 {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8937_l2";
+			qcom,set = <3>;
+			status = "disabled";
+		};
+	};
+
+	rpm-regulator-ldoa3 {
+		compatible = "qcom,rpm-smd-regulator-resource";
+		qcom,resource-name = "ldoa";
+		qcom,resource-id = <3>;
+		qcom,regulator-type = <0>;
+		qcom,hpm-min-load = <10000>;
+		status = "disabled";
+
+		regulator-l3 {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8937_l3";
+			qcom,set = <3>;
+			status = "disabled";
+		};
+	};
+
+	rpm-regulator-ldoa5 {
+		compatible = "qcom,rpm-smd-regulator-resource";
+		qcom,resource-name = "ldoa";
+		qcom,resource-id = <5>;
+		qcom,regulator-type = <0>;
+		qcom,hpm-min-load = <10000>;
+		status = "disabled";
+
+		regulator-l5 {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8937_l5";
+			qcom,set = <3>;
+			status = "disabled";
+		};
+	};
+
+	rpm-regulator-ldoa6 {
+		compatible = "qcom,rpm-smd-regulator-resource";
+		qcom,resource-name = "ldoa";
+		qcom,resource-id = <6>;
+		qcom,regulator-type = <0>;
+		qcom,hpm-min-load = <10000>;
+		status = "disabled";
+
+		regulator-l6 {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8937_l6";
+			qcom,set = <3>;
+			status = "disabled";
+		};
+	};
+
+	rpm-regulator-ldoa7 {
+		compatible = "qcom,rpm-smd-regulator-resource";
+		qcom,resource-name = "ldoa";
+		qcom,resource-id = <7>;
+		qcom,regulator-type = <0>;
+		qcom,hpm-min-load = <10000>;
+		status = "disabled";
+
+		regulator-l7 {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8937_l7";
+			qcom,set = <3>;
+			status = "disabled";
+		};
+	};
+
+	rpm-regulator-ldoa8 {
+		compatible = "qcom,rpm-smd-regulator-resource";
+		qcom,resource-name = "ldoa";
+		qcom,resource-id = <8>;
+		qcom,regulator-type = <0>;
+		qcom,hpm-min-load = <10000>;
+		status = "disabled";
+
+		regulator-l8 {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8937_l8";
+			qcom,set = <3>;
+			status = "disabled";
+		};
+	};
+
+	rpm-regulator-ldoa9 {
+		compatible = "qcom,rpm-smd-regulator-resource";
+		qcom,resource-name = "ldoa";
+		qcom,resource-id = <9>;
+		qcom,regulator-type = <0>;
+		qcom,hpm-min-load = <10000>;
+		status = "disabled";
+
+		regulator-l9 {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8937_l9";
+			qcom,set = <3>;
+			status = "disabled";
+		};
+	};
+
+	rpm-regulator-ldoa10 {
+		compatible = "qcom,rpm-smd-regulator-resource";
+		qcom,resource-name = "ldoa";
+		qcom,resource-id = <10>;
+		qcom,regulator-type = <0>;
+		qcom,hpm-min-load = <10000>;
+		status = "disabled";
+
+		regulator-l10 {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8937_l10";
+			qcom,set = <3>;
+			status = "disabled";
+		};
+	};
+
+	rpm-regulator-ldoa11 {
+		compatible = "qcom,rpm-smd-regulator-resource";
+		qcom,resource-name = "ldoa";
+		qcom,resource-id = <11>;
+		qcom,regulator-type = <0>;
+		qcom,hpm-min-load = <10000>;
+		status = "disabled";
+
+		regulator-l11 {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8937_l11";
+			qcom,set = <3>;
+			status = "disabled";
+		};
+	};
+
+	rpm-regulator-ldoa12 {
+		compatible = "qcom,rpm-smd-regulator-resource";
+		qcom,resource-name = "ldoa";
+		qcom,resource-id = <12>;
+		qcom,regulator-type = <0>;
+		qcom,hpm-min-load = <10000>;
+		status = "disabled";
+
+		regulator-l12 {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8937_l12";
+			qcom,set = <3>;
+			status = "disabled";
+		};
+	};
+
+	rpm-regulator-ldoa13 {
+		compatible = "qcom,rpm-smd-regulator-resource";
+		qcom,resource-name = "ldoa";
+		qcom,resource-id = <13>;
+		qcom,regulator-type = <0>;
+		qcom,hpm-min-load = <5000>;
+		status = "disabled";
+
+		regulator-l13 {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8937_l13";
+			qcom,set = <3>;
+			status = "disabled";
+		};
+	};
+
+	rpm-regulator-ldoa14 {
+		compatible = "qcom,rpm-smd-regulator-resource";
+		qcom,resource-name = "ldoa";
+		qcom,resource-id = <14>;
+		qcom,regulator-type = <0>;
+		qcom,hpm-min-load = <5000>;
+		status = "disabled";
+
+		regulator-l14 {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8937_l14";
+			qcom,set = <3>;
+			status = "disabled";
+		};
+	};
+
+	rpm-regulator-ldoa15 {
+		compatible = "qcom,rpm-smd-regulator-resource";
+		qcom,resource-name = "ldoa";
+		qcom,resource-id = <15>;
+		qcom,regulator-type = <0>;
+		qcom,hpm-min-load = <5000>;
+		status = "disabled";
+
+		regulator-l15 {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8937_l15";
+			qcom,set = <3>;
+			status = "disabled";
+		};
+	};
+
+	rpm-regulator-ldoa16 {
+		compatible = "qcom,rpm-smd-regulator-resource";
+		qcom,resource-name = "ldoa";
+		qcom,resource-id = <16>;
+		qcom,regulator-type = <0>;
+		qcom,hpm-min-load = <5000>;
+		status = "disabled";
+
+		regulator-l16 {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8937_l16";
+			qcom,set = <3>;
+			status = "disabled";
+		};
+	};
+
+	rpm-regulator-ldoa17 {
+		compatible = "qcom,rpm-smd-regulator-resource";
+		qcom,resource-name = "ldoa";
+		qcom,resource-id = <17>;
+		qcom,regulator-type = <0>;
+		qcom,hpm-min-load = <10000>;
+		status = "disabled";
+
+		regulator-l17 {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8937_l17";
+			qcom,set = <3>;
+			status = "disabled";
+		};
+	};
+
+	rpm-regulator-ldoa19 {
+		compatible = "qcom,rpm-smd-regulator-resource";
+		qcom,resource-name = "ldoa";
+		qcom,resource-id = <19>;
+		qcom,regulator-type = <0>;
+		qcom,hpm-min-load = <10000>;
+		status = "disabled";
+
+		regulator-l19 {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8937_l19";
+			qcom,set = <3>;
+			status = "disabled";
+		};
+	};
+
+	rpm-regulator-ldoa22 {
+		compatible = "qcom,rpm-smd-regulator-resource";
+		qcom,resource-name = "ldoa";
+		qcom,resource-id = <22>;
+		qcom,regulator-type = <0>;
+		qcom,hpm-min-load = <10000>;
+		status = "disabled";
+
+		regulator-l22 {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8937_l22";
+			qcom,set = <3>;
+			status = "disabled";
+		};
+	};
+
+	rpm-regulator-ldoa23 {
+		compatible = "qcom,rpm-smd-regulator-resource";
+		qcom,resource-name = "ldoa";
+		qcom,resource-id = <23>;
+		qcom,regulator-type = <0>;
+		qcom,hpm-min-load = <10000>;
+		status = "disabled";
+
+		regulator-l23 {
+			compatible = "qcom,rpm-smd-regulator";
+			regulator-name = "pm8937_l23";
+			qcom,set = <3>;
+			status = "disabled";
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/pm8937.dtsi b/arch/arm64/boot/dts/qcom/pm8937.dtsi
new file mode 100644
index 0000000..086a929
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/pm8937.dtsi
@@ -0,0 +1,383 @@
+/*
+ * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+&spmi_bus {
+
+	qcom,pm8937@0 {
+		spmi-slave-container;
+		reg = <0x0>;
+		#address-cells = <1>;
+		#size-cells = <1>;
+
+		pm8937_revid: qcom,revid@100 {
+			compatible = "qcom,qpnp-revid";
+			reg = <0x100 0x100>;
+		};
+
+		qcom,power-on@800 {
+			compatible = "qcom,qpnp-power-on";
+			reg = <0x800 0x100>;
+			interrupts = <0x0 0x8 0x0>,
+				<0x0 0x8 0x1>,
+				<0x0 0x8 0x4>,
+				<0x0 0x8 0x5>;
+			interrupt-names = "kpdpwr", "resin",
+				"resin-bark", "kpdpwr-resin-bark";
+			qcom,pon-dbc-delay = <15625>;
+			qcom,system-reset;
+
+			qcom,pon_1 {
+				qcom,pon-type = <0>;
+				qcom,pull-up = <1>;
+				linux,code = <116>;
+			};
+
+			qcom,pon_2 {
+				qcom,pon-type = <1>;
+				qcom,pull-up = <1>;
+				linux,code = <114>;
+			};
+		};
+
+		pm8937_temp_alarm: qcom,temp-alarm@2400 {
+			compatible = "qcom,qpnp-temp-alarm";
+			reg = <0x2400 0x100>;
+			interrupts = <0x0 0x24 0x0>;
+			label = "pm8937_tz";
+			qcom,channel-num = <8>;
+			qcom,threshold-set = <0>;
+			qcom,temp_alarm-vadc = <&pm8937_vadc>;
+		};
+
+		pm8937_coincell: qcom,coincell@2800 {
+			compatible = "qcom,qpnp-coincell";
+			reg = <0x2800 0x100>;
+		};
+
+		pm8937_rtc: qcom,pm8937_rtc {
+			spmi-dev-container;
+			compatible = "qcom,qpnp-rtc";
+			#address-cells = <1>;
+			#size-cells = <1>;
+			qcom,qpnp-rtc-write = <0>;
+			qcom,qpnp-rtc-alarm-pwrup = <0>;
+
+			qcom,pm8937_rtc_rw@6000 {
+				reg = <0x6000 0x100>;
+			};
+
+			qcom,pm8937_rtc_alarm@6100 {
+				reg = <0x6100 0x100>;
+				interrupts = <0x0 0x61 0x1>;
+			};
+		};
+
+		pm8937_mpps: mpps {
+			compatible = "qcom,qpnp-pin";
+			spmi-dev-container;
+			gpio-controller;
+			#gpio-cells = <2>;
+			#address-cells = <1>;
+			#size-cells = <1>;
+			label = "pm8937-mpp";
+
+			mpp@a000 {
+				reg = <0xa000 0x100>;
+				qcom,pin-num = <1>;
+				status = "disabled";
+			};
+
+			mpp@a100 {
+				/* MPP2 - PA_THERM config */
+				reg = <0xa100 0x100>;
+				qcom,pin-num = <2>;
+				qcom,mode = <4>; /* AIN input */
+				qcom,invert = <1>; /* Enable MPP */
+				qcom,ain-route = <1>; /* AMUX 6 */
+				qcom,master-en = <1>;
+				qcom,src-sel = <0>; /* Function constant */
+			};
+
+			mpp@a200 {
+				reg = <0xa200 0x100>;
+				qcom,pin-num = <3>;
+				status = "disabled";
+			};
+
+			mpp@a300 {
+				/* MPP4 - CASE_THERM config */
+				reg = <0xa300 0x100>;
+				qcom,pin-num = <4>;
+				qcom,mode = <4>; /* AIN input */
+				qcom,invert = <1>; /* Enable MPP */
+				qcom,ain-route = <3>; /* AMUX 8 */
+				qcom,master-en = <1>;
+				qcom,src-sel = <0>; /* Function constant */
+			};
+		};
+
+		pm8937_gpios: gpios {
+			spmi-dev-container;
+			compatible = "qcom,qpnp-pin";
+			gpio-controller;
+			#gpio-cells = <2>;
+			#address-cells = <1>;
+			#size-cells = <1>;
+			label = "pm8937-gpio";
+
+			gpio@c000 {
+				reg = <0xc000 0x100>;
+				qcom,pin-num = <1>;
+				status = "disabled";
+			};
+
+			gpio@c100 {
+				reg = <0xc100 0x100>;
+				qcom,pin-num = <2>;
+				status = "disabled";
+			};
+
+			gpio@c200 {
+				reg = <0xc200 0x100>;
+				qcom,pin-num = <3>;
+				status = "disabled";
+			};
+
+			gpio@c300 {
+				reg = <0xc300 0x100>;
+				qcom,pin-num = <4>;
+				status = "disabled";
+			};
+
+			gpio@c400 {
+				reg = <0xc400 0x100>;
+				qcom,pin-num = <5>;
+				status = "disabled";
+			};
+
+			gpio@c500 {
+				reg = <0xc500 0x100>;
+				qcom,pin-num = <6>;
+				status = "disabled";
+			};
+
+			gpio@c600 {
+				reg = <0xc600 0x100>;
+				qcom,pin-num = <7>;
+				status = "disabled";
+			};
+
+			gpio@c700 {
+				reg = <0xc700 0x100>;
+				qcom,pin-num = <8>;
+				status = "disabled";
+			};
+		};
+
+		pm8937_vadc: vadc@3100 {
+			compatible = "qcom,qpnp-vadc";
+			reg = <0x3100 0x100>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			interrupts = <0x0 0x31 0x0>;
+			interrupt-names = "eoc-int-en-set";
+			qcom,adc-bit-resolution = <15>;
+			qcom,adc-vdd-reference = <1800>;
+			qcom,vadc-poll-eoc;
+
+			chan@5 {
+				label = "vcoin";
+				reg = <5>;
+				qcom,decimation = <0>;
+				qcom,pre-div-channel-scaling = <1>;
+				qcom,calibration-type = "absolute";
+				qcom,scale-function = <0>;
+				qcom,hw-settle-time = <0>;
+				qcom,fast-avg-setup = <0>;
+			};
+
+			chan@7 {
+				label = "vph_pwr";
+				reg = <7>;
+				qcom,decimation = <0>;
+				qcom,pre-div-channel-scaling = <1>;
+				qcom,calibration-type = "absolute";
+				qcom,scale-function = <0>;
+				qcom,hw-settle-time = <0>;
+				qcom,fast-avg-setup = <0>;
+			};
+
+			chan@8 {
+				label = "die_temp";
+				reg = <8>;
+				qcom,decimation = <0>;
+				qcom,pre-div-channel-scaling = <0>;
+				qcom,calibration-type = "absolute";
+				qcom,scale-function = <3>;
+				qcom,hw-settle-time = <0>;
+				qcom,fast-avg-setup = <0>;
+			};
+
+			chan@9 {
+				label = "ref_625mv";
+				reg = <9>;
+				qcom,decimation = <0>;
+				qcom,pre-div-channel-scaling = <0>;
+				qcom,calibration-type = "absolute";
+				qcom,scale-function = <0>;
+				qcom,hw-settle-time = <0>;
+				qcom,fast-avg-setup = <0>;
+			};
+
+			chan@a {
+				label = "ref_1250v";
+				reg = <0xa>;
+				qcom,decimation = <0>;
+				qcom,pre-div-channel-scaling = <0>;
+				qcom,calibration-type = "absolute";
+				qcom,scale-function = <0>;
+				qcom,hw-settle-time = <0>;
+				qcom,fast-avg-setup = <0>;
+			};
+
+			chan@c {
+				label = "ref_buf_625mv";
+				reg = <0xc>;
+				qcom,decimation = <0>;
+				qcom,pre-div-channel-scaling = <0>;
+				qcom,calibration-type = "absolute";
+				qcom,scale-function = <0>;
+				qcom,hw-settle-time = <0>;
+				qcom,fast-avg-setup = <0>;
+			};
+
+			chan@36 {
+				label = "pa_therm0";
+				reg = <0x36>;
+				qcom,decimation = <0>;
+				qcom,pre-div-channel-scaling = <0>;
+				qcom,calibration-type = "ratiometric";
+				qcom,scale-function = <2>;
+				qcom,hw-settle-time = <2>;
+				qcom,fast-avg-setup = <0>;
+			};
+
+			chan@11 {
+				label = "pa_therm1";
+				reg = <0x11>;
+				qcom,decimation = <0>;
+				qcom,pre-div-channel-scaling = <0>;
+				qcom,calibration-type = "ratiometric";
+				qcom,scale-function = <2>;
+				qcom,hw-settle-time = <2>;
+				qcom,fast-avg-setup = <0>;
+				qcom,vadc-thermal-node;
+			};
+
+			chan@32 {
+				label = "xo_therm";
+				reg = <0x32>;
+				qcom,decimation = <0>;
+				qcom,pre-div-channel-scaling = <0>;
+				qcom,calibration-type = "ratiometric";
+				qcom,scale-function = <4>;
+				qcom,hw-settle-time = <2>;
+				qcom,fast-avg-setup = <0>;
+				qcom,vadc-thermal-node;
+			};
+
+			chan@3c {
+				label = "xo_therm_buf";
+				reg = <0x3c>;
+				qcom,decimation = <0>;
+				qcom,pre-div-channel-scaling = <0>;
+				qcom,calibration-type = "ratiometric";
+				qcom,scale-function = <4>;
+				qcom,hw-settle-time = <2>;
+				qcom,fast-avg-setup = <0>;
+				qcom,vadc-thermal-node;
+			};
+
+			chan@13 {
+				label = "case_therm";
+				reg = <0x13>;
+				qcom,decimation = <0>;
+				qcom,pre-div-channel-scaling = <0>;
+				qcom,calibration-type = "ratiometric";
+				qcom,scale-function = <2>;
+				qcom,hw-settle-time = <2>;
+				qcom,fast-avg-setup = <0>;
+				qcom,vadc-thermal-node;
+			};
+		};
+
+		pm8937_adc_tm: vadc@3400 {
+			compatible = "qcom,qpnp-adc-tm";
+			reg = <0x3400 0x100>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			interrupts =	<0x0 0x34 0x0>,
+					<0x0 0x34 0x3>,
+					<0x0 0x34 0x4>;
+			interrupt-names =	"eoc-int-en-set",
+						"high-thr-en-set",
+						"low-thr-en-set";
+			qcom,adc-bit-resolution = <15>;
+			qcom,adc-vdd-reference = <1800>;
+			qcom,adc_tm-vadc = <&pm8937_vadc>;
+
+			chan@36 {
+				label = "pa_therm0";
+				reg = <0x36>;
+				qcom,decimation = <0>;
+				qcom,pre-div-channel-scaling = <0>;
+				qcom,calibration-type = "ratiometric";
+				qcom,scale-function = <2>;
+				qcom,hw-settle-time = <2>;
+				qcom,fast-avg-setup = <0>;
+				qcom,btm-channel-number = <0x48>;
+				qcom,thermal-node;
+			};
+
+			chan@7 {
+				label = "vph_pwr";
+				reg = <0x7>;
+				qcom,decimation = <0>;
+				qcom,pre-div-channel-scaling = <1>;
+				qcom,calibration-type = "absolute";
+				qcom,scale-function = <0>;
+				qcom,hw-settle-time = <0>;
+				qcom,fast-avg-setup = <0>;
+				qcom,btm-channel-number = <0x68>;
+			};
+		};
+
+	};
+
+	pm8937_1: qcom,pm8937@1 {
+		spmi-slave-container;
+		reg = <0x1>;
+		#address-cells = <1>;
+		#size-cells = <1>;
+
+		pm8937_pwm: pwm@bc00 {
+			status = "disabled";
+			compatible = "qcom,qpnp-pwm";
+			reg = <0xbc00 0x100>;
+			reg-names = "qpnp-lpg-channel-base";
+			qcom,channel-id = <0>;
+			qcom,supported-sizes = <6>, <9>;
+			#pwm-cells = <2>;
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/pm8953.dtsi b/arch/arm64/boot/dts/qcom/pm8953.dtsi
index 0ddb9f5..d77de72 100644
--- a/arch/arm64/boot/dts/qcom/pm8953.dtsi
+++ b/arch/arm64/boot/dts/qcom/pm8953.dtsi
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -49,7 +49,7 @@
 			};
 		};
 
-		pm8953_temp_alarm: qcom,temp-alarm@2400 {
+		pm8953_tz: qcom,temp-alarm@2400 {
 			compatible = "qcom,qpnp-temp-alarm";
 			reg = <0x2400 0x100>;
 			interrupts = <0x0 0x24 0x0 IRQ_TYPE_EDGE_RISING>;
@@ -57,6 +57,7 @@
 			qcom,channel-num = <8>;
 			qcom,threshold-set = <0>;
 			qcom,temp_alarm-vadc = <&pm8953_vadc>;
+			#thermal-sensor-cells = <0>;
 		};
 
 		pm8953_coincell: qcom,coincell@2800 {
@@ -65,105 +66,34 @@
 		};
 
 		pm8953_mpps: mpps {
-			compatible = "qcom,qpnp-pin";
-			spmi-dev-container;
+			compatible = "qcom,spmi-mpp";
+			reg = <0xa000 0x400>;
+
+			interrupts = <0x0 0xa0 0 IRQ_TYPE_NONE>,
+				<0x0 0xa1 0 IRQ_TYPE_NONE>,
+				<0x0 0xa2 0 IRQ_TYPE_NONE>,
+				<0x0 0xa3 0 IRQ_TYPE_NONE>;
+			interrupt-names = "pm8953_mpp1", "pm8953_mpp2",
+					"pm8953_mpp3", "pm8953_mpp4";
+
 			gpio-controller;
 			#gpio-cells = <2>;
-			#address-cells = <1>;
-			#size-cells = <1>;
-			label = "pm8953-mpp";
-
-			mpp@a000 {
-				reg = <0xa000 0x100>;
-				qcom,pin-num = <1>;
-				status = "disabled";
-			};
-
-			mpp@a100 {
-				reg = <0xa100 0x100>;
-				qcom,pin-num = <2>;
-				/* MPP2 - PA_THERM config */
-				qcom,mode = <4>; /* AIN input */
-				qcom,invert = <1>; /* Enable MPP */
-				qcom,ain-route = <1>; /* AMUX 6 */
-				qcom,master-en = <1>;
-				qcom,src-sel = <0>; /* Function constant */
-			};
-
-			mpp@a200 {
-				reg = <0xa200 0x100>;
-				qcom,pin-num = <3>;
-				status = "disabled";
-			};
-
-			mpp@a300 {
-				reg = <0xa300 0x100>;
-				qcom,pin-num = <4>;
-				/* MPP4 - CASE_THERM config */
-				qcom,mode = <4>; /* AIN input */
-				qcom,invert = <1>; /* Enable MPP */
-				qcom,ain-route = <3>; /* AMUX 8 */
-				qcom,master-en = <1>;
-				qcom,src-sel = <0>; /* Function constant */
-			};
 		};
 
 		pm8953_gpios: gpios {
-			spmi-dev-container;
-			compatible = "qcom,qpnp-pin";
+			compatible = "qcom,spmi-gpio";
+			reg = <0xc000 0x800>;
+
+			interrupts = <0x0 0xc0 0 IRQ_TYPE_NONE>,
+				<0x0 0xc3 0 IRQ_TYPE_NONE>,
+				<0x0 0xc6 0 IRQ_TYPE_NONE>,
+				<0x0 0xc7 0 IRQ_TYPE_NONE>;
+			interrupt-names = "pm8953_gpio1", "pm8953_gpio4",
+					"pm8953_gpio7", "pm8953_gpio8";
+
 			gpio-controller;
 			#gpio-cells = <2>;
-			#address-cells = <1>;
-			#size-cells = <1>;
-			label = "pm8953-gpio";
-
-			gpio@c000 {
-				reg = <0xc000 0x100>;
-				qcom,pin-num = <1>;
-				status = "disabled";
-			};
-
-			gpio@c100 {
-				reg = <0xc100 0x100>;
-				qcom,pin-num = <2>;
-				status = "disabled";
-			};
-
-			gpio@c200 {
-				reg = <0xc200 0x100>;
-				qcom,pin-num = <3>;
-				status = "disabled";
-			};
-
-			gpio@c300 {
-				reg = <0xc300 0x100>;
-				qcom,pin-num = <4>;
-				status = "disabled";
-			};
-
-			gpio@c400 {
-				reg = <0xc400 0x100>;
-				qcom,pin-num = <5>;
-				status = "disabled";
-			};
-
-			gpio@c500 {
-				reg = <0xc500 0x100>;
-				qcom,pin-num = <6>;
-				status = "disabled";
-			};
-
-			gpio@c600 {
-				reg = <0xc600 0x100>;
-				qcom,pin-num = <7>;
-				status = "disabled";
-			};
-
-			gpio@c700 {
-				reg = <0xc700 0x100>;
-				qcom,pin-num = <8>;
-				status = "disabled";
-			};
+			qcom,gpios-disallowed = <2 3 5 6>;
 		};
 
 		pm8953_vadc: vadc@3100 {
@@ -373,3 +303,29 @@
 		};
 	};
 };
+
+&thermal_zones {
+	pm8953_tz {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-sensors = <&pm8953_tz>;
+
+		trips {
+			pm8953_trip0: pm8953-trip0 {
+				temperature = <105000>;
+				hysteresis = <0>;
+				type = "passive";
+			};
+			pm8953_trip1: pm8953-trip1 {
+				temperature = <125000>;
+				hysteresis = <0>;
+				type = "passive";
+			};
+			pm8953_trip2: pm8953-trip2 {
+				temperature = <145000>;
+				hysteresis = <0>;
+				type = "passive";
+			};
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/pmi632.dtsi b/arch/arm64/boot/dts/qcom/pmi632.dtsi
new file mode 100644
index 0000000..fe844bf
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/pmi632.dtsi
@@ -0,0 +1,126 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/spmi/spmi.h>
+#include <dt-bindings/msm/power-on.h>
+
+&spmi_bus {
+	qcom,pmi632@2 {
+		compatible = "qcom,spmi-pmic";
+		reg = <0x2 SPMI_USID>;
+		#address-cells = <2>;
+		#size-cells = <0>;
+
+		pmi632_revid: qcom,revid@100 {
+			compatible = "qcom,qpnp-revid";
+			reg = <0x100 0x100>;
+		};
+
+		pmi632_pon: qcom,power-on@800 {
+			compatible = "qcom,qpnp-power-on";
+			reg = <0x800 0x100>;
+			qcom,secondary-pon-reset;
+		};
+
+		pmi632_tz: qcom,temp-alarm@2400 {
+			compatible = "qcom,qpnp-temp-alarm";
+			reg = <0x2400 0x100>;
+			interrupts = <0x2 0x24 0x0 IRQ_TYPE_EDGE_RISING>;
+			label = "pmi632_tz";
+			#thermal-sensor-cells = <0>;
+		};
+
+		pmi632_gpios: pinctrl@c000 {
+			compatible = "qcom,spmi-gpio";
+			reg = <0xc000 0x800>;
+			interrupts = <0x2 0xc1 0 IRQ_TYPE_NONE>,
+					<0x2 0xc2 0 IRQ_TYPE_NONE>,
+					<0x2 0xc3 0 IRQ_TYPE_NONE>,
+					<0x2 0xc4 0 IRQ_TYPE_NONE>,
+					<0x2 0xc5 0 IRQ_TYPE_NONE>,
+					<0x2 0xc6 0 IRQ_TYPE_NONE>,
+					<0x2 0xc7 0 IRQ_TYPE_NONE>,
+			interrupt-names = "pmi632_gpio2", "pmi632_gpio3",
+					"pmi632_gpio4", "pmi632_gpio5",
+					"pmi632_gpio6", "pmi632_gpio7",
+					"pmi632_gpio8";
+			gpio-controller;
+			#gpio-cells = <2>;
+			qcom,gpios-disallowed = <1>;
+		};
+	};
+
+	pmi632_3: qcom,pmi632@3 {
+		compatible ="qcom,spmi-pmic";
+		reg = <0x3 SPMI_USID>;
+		#address-cells = <2>;
+		#size-cells = <0>;
+
+		pmi632_vib: qcom,vibrator@5700 {
+			compatible = "qcom,qpnp-vibrator-ldo";
+			reg = <0x5700 0x100>;
+			qcom,vib-ldo-volt-uv = <1504000>;
+			qcom,vib-overdrive-volt-uv = <3544000>;
+		};
+
+		pmi632_pwm_1: pwm@b300 {
+			compatible = "qcom,qpnp-pwm";
+			reg = <0xb300 0x100>;
+			reg-names = "qpnp-lpg-channel-base";
+			qcom,channel-id = <1>;
+			qcom,supported-sizes = <6>, <9>;
+			#pwm-cells = <2>;
+			status = "disabled";
+		};
+
+		pmi632_pwm_2: pwm@b400 {
+			compatible = "qcom,qpnp-pwm";
+			reg = <0xb400 0x100>;
+			reg-names = "qpnp-lpg-channel-base";
+			qcom,channel-id = <2>;
+			qcom,supported-sizes = <6>, <9>;
+			#pwm-cells = <2>;
+			status = "disabled";
+		};
+
+		pmi632_pwm_3: pwm@b500 {
+			compatible = "qcom,qpnp-pwm";
+			reg = <0xb500 0x100>;
+			reg-names = "qpnp-lpg-channel-base";
+			qcom,channel-id = <3>;
+			qcom,supported-sizes = <6>, <9>;
+			#pwm-cells = <2>;
+			status = "disabled";
+		};
+
+		pmi632_pwm_4: pwm@b600 {
+			compatible = "qcom,qpnp-pwm";
+			reg = <0xb600 0x100>;
+			reg-names = "qpnp-lpg-channel-base";
+			qcom,channel-id = <4>;
+			qcom,supported-sizes = <6>, <9>;
+			#pwm-cells = <2>;
+			status = "disabled";
+		};
+
+		pmi632_pwm_5: pwm@b700 {
+			compatible = "qcom,qpnp-pwm";
+			reg = <0xb700 0x100>;
+			reg-names = "qpnp-lpg-channel-base";
+			qcom,channel-id = <5>;
+			qcom,supported-sizes = <6>, <9>;
+			#pwm-cells = <2>;
+			status = "disabled";
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/pmi8937.dtsi b/arch/arm64/boot/dts/qcom/pmi8937.dtsi
new file mode 100644
index 0000000..d5b9945
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/pmi8937.dtsi
@@ -0,0 +1,540 @@
+/*
+ * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <dt-bindings/msm/power-on.h>
+
+&spmi_bus {
+
+	qcom,pmi8937@2 {
+		spmi-slave-container;
+		reg = <0x2>;
+		#address-cells = <1>;
+		#size-cells = <1>;
+
+		pmi8937_revid: qcom,revid@100 {
+			compatible = "qcom,qpnp-revid";
+			reg = <0x100 0x100>;
+		};
+
+		qcom,power-on@800 {
+			compatible = "qcom,qpnp-power-on";
+			reg = <0x800 0x100>;
+			qcom,secondary-pon-reset;
+			qcom,hard-reset-poweroff-type =
+				<PON_POWER_OFF_SHUTDOWN>;
+
+			pon_perph_reg: qcom,pon_perph_reg {
+				regulator-name = "pon_spare_reg";
+				qcom,pon-spare-reg-addr = <0x8c>;
+				qcom,pon-spare-reg-bit = <1>;
+			};
+		};
+
+		pmi8937_vadc: vadc@3100 {
+			compatible = "qcom,qpnp-vadc";
+			reg = <0x3100 0x100>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			interrupts = <0x2 0x31 0x0>;
+			interrupt-names = "eoc-int-en-set";
+			qcom,adc-bit-resolution = <15>;
+			qcom,adc-vdd-reference = <1800>;
+			qcom,vadc-poll-eoc;
+
+			chan@0 {
+				label = "usbin";
+				reg = <0>;
+				qcom,decimation = <0>;
+				qcom,pre-div-channel-scaling = <4>;
+				qcom,calibration-type = "absolute";
+				qcom,scale-function = <0>;
+				qcom,hw-settle-time = <0>;
+				qcom,fast-avg-setup = <0>;
+			};
+
+			chan@1 {
+				label = "dcin";
+				reg = <1>;
+				qcom,decimation = <0>;
+				qcom,pre-div-channel-scaling = <4>;
+				qcom,calibration-type = "absolute";
+				qcom,scale-function = <0>;
+				qcom,hw-settle-time = <0>;
+				qcom,fast-avg-setup = <0>;
+			};
+
+			chan@3 {
+				label = "vchg_sns";
+				reg = <3>;
+				qcom,decimation = <0>;
+				qcom,pre-div-channel-scaling = <1>;
+				qcom,calibration-type = "absolute";
+				qcom,scale-function = <0>;
+				qcom,hw-settle-time = <0>;
+				qcom,fast-avg-setup = <0>;
+			};
+
+			chan@9 {
+				label = "ref_625mv";
+				reg = <9>;
+				qcom,decimation = <0>;
+				qcom,pre-div-channel-scaling = <0>;
+				qcom,calibration-type = "absolute";
+				qcom,scale-function = <0>;
+				qcom,hw-settle-time = <0>;
+				qcom,fast-avg-setup = <0>;
+			};
+
+			chan@a {
+				label = "ref_1250v";
+				reg = <0xa>;
+				qcom,decimation = <0>;
+				qcom,pre-div-channel-scaling = <0>;
+				qcom,calibration-type = "absolute";
+				qcom,scale-function = <0>;
+				qcom,hw-settle-time = <0>;
+				qcom,fast-avg-setup = <0>;
+			};
+
+			chan@d {
+				label = "chg_temp";
+				reg = <0xd>;
+				qcom,decimation = <0>;
+				qcom,pre-div-channel-scaling = <0>;
+				qcom,calibration-type = "absolute";
+				qcom,scale-function = <16>;
+				qcom,hw-settle-time = <0>;
+				qcom,fast-avg-setup = <0>;
+			};
+
+			chan@43 {
+				label = "usb_dp";
+				reg = <0x43>;
+				qcom,decimation = <0>;
+				qcom,pre-div-channel-scaling = <1>;
+				qcom,calibration-type = "absolute";
+				qcom,scale-function = <0>;
+				qcom,hw-settle-time = <0>;
+				qcom,fast-avg-setup = <0>;
+			};
+
+			chan@44 {
+				label = "usb_dm";
+				reg = <0x44>;
+				qcom,decimation = <0>;
+				qcom,pre-div-channel-scaling = <1>;
+				qcom,calibration-type = "absolute";
+				qcom,scale-function = <0>;
+				qcom,hw-settle-time = <0>;
+				qcom,fast-avg-setup = <0>;
+			};
+		};
+
+		pmi8937_mpps: mpps {
+			spmi-dev-container;
+			compatible = "qcom,qpnp-pin";
+			gpio-controller;
+			#gpio-cells = <2>;
+			#address-cells = <1>;
+			#size-cells = <1>;
+			label = "pmi8937-mpp";
+
+			mpp@a000 {
+				reg = <0xa000 0x100>;
+				qcom,pin-num = <1>;
+				status = "disabled";
+			};
+
+			mpp@a100 {
+				reg = <0xa100 0x100>;
+				qcom,pin-num = <2>;
+				status = "disabled";
+			};
+
+			mpp@a300 {
+				reg = <0xa300 0x100>;
+				qcom,pin-num = <4>;
+				status = "disabled";
+			};
+		};
+
+		pmi8937_charger: qcom,qpnp-smbcharger {
+			spmi-dev-container;
+			compatible = "qcom,qpnp-smbcharger";
+			#address-cells = <1>;
+			#size-cells = <1>;
+
+			qcom,iterm-ma = <100>;
+			qcom,float-voltage-mv = <4200>;
+			qcom,resume-delta-mv = <200>;
+			qcom,chg-inhibit-fg;
+			qcom,rparasitic-uohm = <100000>;
+			qcom,bms-psy-name = "bms";
+			qcom,thermal-mitigation = <1500 700 600 0>;
+			qcom,parallel-usb-min-current-ma = <1400>;
+			qcom,parallel-usb-9v-min-current-ma = <900>;
+			qcom,parallel-allowed-lowering-ma = <500>;
+			qcom,pmic-revid = <&pmi8937_revid>;
+			qcom,force-aicl-rerun;
+			qcom,aicl-rerun-period-s = <180>;
+			qcom,autoadjust-vfloat;
+
+			qcom,chgr@1000 {
+				reg = <0x1000 0x100>;
+				interrupts =	<0x2 0x10 0x0>,
+						<0x2 0x10 0x1>,
+						<0x2 0x10 0x2>,
+						<0x2 0x10 0x3>,
+						<0x2 0x10 0x4>,
+						<0x2 0x10 0x5>,
+						<0x2 0x10 0x6>,
+						<0x2 0x10 0x7>;
+
+				interrupt-names =	"chg-error",
+							"chg-inhibit",
+							"chg-prechg-sft",
+							"chg-complete-chg-sft",
+							"chg-p2f-thr",
+							"chg-rechg-thr",
+							"chg-taper-thr",
+							"chg-tcc-thr";
+			};
+
+			qcom,otg@1100 {
+				reg = <0x1100 0x100>;
+				interrupts =	<0x2 0x11 0x0>,
+						<0x2 0x11 0x1>,
+						<0x2 0x11 0x3>;
+				interrupt-names =	"otg-fail",
+							"otg-oc",
+						"usbid-change";
+			};
+
+			qcom,bat-if@1200 {
+				reg = <0x1200 0x100>;
+				interrupts =	<0x2 0x12 0x0>,
+						<0x2 0x12 0x1>,
+						<0x2 0x12 0x2>,
+						<0x2 0x12 0x3>,
+					<0x2 0x12 0x4>,
+						<0x2 0x12 0x5>,
+						<0x2 0x12 0x6>,
+						<0x2 0x12 0x7>;
+
+				interrupt-names =	"batt-hot",
+							"batt-warm",
+							"batt-cold",
+							"batt-cool",
+						"batt-ov",
+							"batt-low",
+							"batt-missing",
+							"batt-term-missing";
+			};
+
+			qcom,usb-chgpth@1300 {
+				reg = <0x1300 0x100>;
+				interrupts =	<0x2 0x13 0x0>,
+						<0x2 0x13 0x1>,
+					<0x2 0x13 0x2>,
+						<0x2 0x13 0x5>;
+
+				interrupt-names =	"usbin-uv",
+						"usbin-ov",
+							"usbin-src-det",
+							"aicl-done";
+			};
+
+			qcom,dc-chgpth@1400 {
+				reg = <0x1400 0x100>;
+				interrupts =	<0x2 0x14 0x0>,
+						<0x2 0x14 0x1>;
+				interrupt-names =	"dcin-uv",
+							"dcin-ov";
+			};
+
+			qcom,chgr-misc@1600 {
+				reg = <0x1600 0x100>;
+				interrupts =	<0x2 0x16 0x0>,
+						<0x2 0x16 0x1>,
+						<0x2 0x16 0x2>,
+					<0x2 0x16 0x3>,
+						<0x2 0x16 0x4>,
+						<0x2 0x16 0x5>;
+
+				interrupt-names =	"power-ok",
+							"temp-shutdown",
+							"wdog-timeout",
+							"flash-fail",
+							"otst2",
+							"otst3";
+			};
+
+			smbcharger_charger_otg: qcom,smbcharger-boost-otg {
+				regulator-name = "smbcharger_charger_otg";
+			};
+		};
+
+		pmi8937_fg: qcom,fg {
+			spmi-dev-container;
+			compatible = "qcom,qpnp-fg";
+			#address-cells = <1>;
+			#size-cells = <1>;
+			qcom,resume-soc = <95>;
+			status = "okay";
+			qcom,bcl-lm-threshold-ma = <127>;
+			qcom,bcl-mh-threshold-ma = <405>;
+			qcom,fg-iterm-ma = <150>;
+			qcom,fg-chg-iterm-ma = <100>;
+			qcom,pmic-revid = <&pmi8937_revid>;
+			qcom,fg-cutoff-voltage-mv = <3500>;
+			qcom,cycle-counter-en;
+			qcom,capacity-learning-on;
+
+			qcom,fg-soc@4000 {
+			status = "okay";
+				reg = <0x4000 0x100>;
+				interrupts =	<0x2 0x40 0x0>,
+						<0x2 0x40 0x1>,
+						<0x2 0x40 0x2>,
+						<0x2 0x40 0x3>,
+						<0x2 0x40 0x4>,
+						<0x2 0x40 0x5>,
+						<0x2 0x40 0x6>;
+
+				interrupt-names =	"high-soc",
+							"low-soc",
+							"full-soc",
+							"empty-soc",
+							"delta-soc",
+							"first-est-done",
+							"update-soc";
+			};
+
+			qcom,fg-batt@4100 {
+				reg = <0x4100 0x100>;
+				interrupts =	<0x2 0x41 0x0>,
+						<0x2 0x41 0x1>,
+					<0x2 0x41 0x2>,
+						<0x2 0x41 0x3>,
+						<0x2 0x41 0x4>,
+						<0x2 0x41 0x5>,
+						<0x2 0x41 0x6>,
+						<0x2 0x41 0x7>;
+
+				interrupt-names =	"soft-cold",
+							"soft-hot",
+							"vbatt-low",
+							"batt-ided",
+							"batt-id-req",
+							"batt-unknown",
+							"batt-missing",
+							"batt-match";
+			};
+
+			qcom,revid-tp-rev@1f1 {
+				reg = <0x1f1 0x1>;
+			};
+
+			qcom,fg-memif@4400 {
+				status = "okay";
+				reg = <0x4400 0x100>;
+				interrupts =	<0x2 0x44 0x0>,
+						<0x2 0x44 0x2>;
+
+				interrupt-names =	"mem-avail",
+							"data-rcvry-sug";
+			};
+		};
+
+		bcl@4200 {
+			compatible = "qcom,msm-bcl";
+			reg = <0x4200 0xff>;
+			reg-names = "fg_user_adc";
+			interrupts = <0x2 0x42 0x0>,
+					<0x2 0x42 0x1>;
+			interrupt-names = "bcl-high-ibat-int",
+					"bcl-low-vbat-int";
+			qcom,vbat-scaling-factor = <39000>;
+			qcom,vbat-gain-numerator = <1>;
+			qcom,vbat-gain-denominator = <128>;
+			qcom,vbat-polling-delay-ms = <100>;
+			qcom,ibat-scaling-factor = <39000>;
+			qcom,ibat-gain-numerator = <1>;
+			qcom,ibat-gain-denominator = <128>;
+			qcom,ibat-offset-numerator = <1200>;
+			qcom,ibat-offset-denominator = <1>;
+			qcom,ibat-polling-delay-ms = <100>;
+		};
+
+		qcom,leds@a100 {
+			compatible = "qcom,leds-qpnp";
+			reg = <0xa100 0x100>;
+			label = "mpp";
+		};
+	};
+
+	qcom,pmi8937@3 {
+		spmi-slave-container;
+		reg = <0x3>;
+		#address-cells = <1>;
+		#size-cells = <1>;
+
+		pmi8937_pwm: pwm@b000 {
+			status = "disabled";
+			compatible = "qcom,qpnp-pwm";
+			reg = <0xb000 0x100>;
+			reg-names = "qpnp-lpg-channel-base";
+			qcom,channel-id = <0>;
+			qcom,supported-sizes = <6>, <9>;
+			#pwm-cells = <2>;
+		};
+
+		wled: qcom,leds@d800 {
+			compatible = "qcom,qpnp-wled";
+			reg = <0xd800 0x100>,
+				<0xd900 0x100>;
+			reg-names = "qpnp-wled-ctrl-base",
+					"qpnp-wled-sink-base";
+			interrupts = <0x3 0xd8 0x2>;
+			interrupt-names = "sc-irq";
+			status = "okay";
+			linux,name = "wled";
+			linux,default-trigger = "bkl-trigger";
+			qcom,fdbk-output = "auto";
+			qcom,vref-mv = <350>;
+			qcom,switch-freq-khz = <800>;
+			qcom,ovp-mv = <29500>;
+			qcom,ilim-ma = <980>;
+			qcom,boost-duty-ns = <26>;
+			qcom,mod-freq-khz = <9600>;
+			qcom,dim-mode = "hybrid";
+			qcom,dim-method = "linear";
+			qcom,hyb-thres = <625>;
+			qcom,sync-dly-us = <800>;
+			qcom,fs-curr-ua = <20000>;
+			qcom,led-strings-list = [00 01];
+			qcom,en-ext-pfet-sc-pro;
+			qcom,cons-sync-write-delay-us = <1000>;
+		};
+
+		flash_led: qcom,leds@d300 {
+			compatible = "qcom,qpnp-flash-led";
+			status = "okay";
+			reg = <0xd300 0x100>;
+			label = "flash";
+			qcom,headroom = <500>;
+			qcom,startup-dly = <128>;
+			qcom,clamp-curr = <200>;
+			qcom,pmic-charger-support;
+			qcom,self-check-enabled;
+			qcom,thermal-derate-enabled;
+			qcom,thermal-derate-threshold = <100>;
+			qcom,thermal-derate-rate = "5_PERCENT";
+			qcom,current-ramp-enabled;
+			qcom,ramp_up_step = "6P7_US";
+			qcom,ramp_dn_step = "6P7_US";
+			qcom,vph-pwr-droop-enabled;
+			qcom,vph-pwr-droop-threshold = <3000>;
+			qcom,vph-pwr-droop-debounce-time = <10>;
+			qcom,headroom-sense-ch0-enabled;
+			qcom,headroom-sense-ch1-enabled;
+			qcom,pmic-revid = <&pmi8937_revid>;
+
+			pmi8937_flash0: qcom,flash_0 {
+				label = "flash";
+				qcom,led-name = "led:flash_0";
+				qcom,default-led-trigger =
+						"flash0_trigger";
+				qcom,max-current = <1000>;
+				qcom,duration = <1280>;
+				qcom,id = <0>;
+				qcom,current = <625>;
+			};
+
+			pmi8937_flash1: qcom,flash_1 {
+				label = "flash";
+				qcom,led-name = "led:flash_1";
+				qcom,default-led-trigger =
+						"flash1_trigger";
+				qcom,max-current = <1000>;
+				qcom,duration = <1280>;
+				qcom,id = <1>;
+				qcom,current = <625>;
+			};
+
+			pmi8937_torch0: qcom,torch_0 {
+				label = "torch";
+				qcom,led-name = "led:torch_0";
+				qcom,default-led-trigger =
+						"torch0_trigger";
+				qcom,max-current = <200>;
+				qcom,id = <0>;
+				qcom,current = <120>;
+			};
+
+			pmi8937_torch1: qcom,torch_1 {
+				label = "torch";
+				qcom,led-name = "led:torch_1";
+				qcom,default-led-trigger =
+						"torch1_trigger";
+				qcom,max-current = <200>;
+				qcom,id = <1>;
+				qcom,current = <120>;
+			};
+
+			pmi8937_switch: qcom,switch {
+				label = "switch";
+				qcom,led-name = "led:switch";
+				qcom,default-led-trigger =
+						"switch_trigger";
+				qcom,max-current = <1000>;
+				qcom,duration = <1280>;
+				qcom,id = <2>;
+				qcom,current = <625>;
+				reg0 {
+					regulator-name = "pon_spare_reg";
+				};
+			};
+		};
+
+		pmi_haptic: qcom,haptic@c000 {
+			compatible = "qcom,qpnp-haptic";
+			reg = <0xc000 0x100>;
+			interrupts = <0x3 0xc0 0x0>,
+				<0x3 0xc0 0x1>;
+			interrupt-names = "sc-irq", "play-irq";
+			qcom,pmic-revid = <&pmi8937_revid>;
+			vcc_pon-supply = <&pon_perph_reg>;
+			qcom,play-mode = "direct";
+			qcom,wave-play-rate-us = <5263>;
+			qcom,actuator-type = "lra";
+			qcom,wave-shape = "square";
+			qcom,vmax-mv = <2000>;
+			qcom,ilim-ma = <800>;
+			qcom,sc-deb-cycles = <8>;
+			qcom,int-pwm-freq-khz = <505>;
+			qcom,en-brake;
+			qcom,brake-pattern = [03 03 00 00];
+			qcom,use-play-irq;
+			qcom,use-sc-irq;
+			qcom,wave-samples = [3e 3e 3e 3e 3e 3e 3e 3e];
+			qcom,wave-rep-cnt = <1>;
+			qcom,wave-samp-rep-cnt = <1>;
+			qcom,lra-auto-res-mode="qwd";
+			qcom,lra-high-z="opt1";
+			qcom,lra-res-cal-period = <4>;
+			qcom,correct-lra-drive-freq;
+			qcom,misc-trim-error-rc19p2-clk-reg-present;
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/pmi8940.dtsi b/arch/arm64/boot/dts/qcom/pmi8940.dtsi
new file mode 100644
index 0000000..59001ba
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/pmi8940.dtsi
@@ -0,0 +1,611 @@
+/*
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <dt-bindings/msm/power-on.h>
+
+&spmi_bus {
+
+	qcom,pmi8940@2 {
+		spmi-slave-container;
+		reg = <0x2>;
+		#address-cells = <1>;
+		#size-cells = <1>;
+
+		pmi8940_revid: qcom,revid@100 {
+			compatible = "qcom,qpnp-revid";
+			reg = <0x100 0x100>;
+		};
+
+		qcom,power-on@800 {
+			compatible = "qcom,qpnp-power-on";
+			reg = <0x800 0x100>;
+			qcom,secondary-pon-reset;
+			qcom,hard-reset-poweroff-type =
+				<PON_POWER_OFF_SHUTDOWN>;
+
+			pon_perph_reg: qcom,pon_perph_reg {
+				regulator-name = "pon_spare_reg";
+				qcom,pon-spare-reg-addr = <0x8c>;
+				qcom,pon-spare-reg-bit = <1>;
+			};
+		};
+
+		pmi8940_vadc: vadc@3100 {
+			compatible = "qcom,qpnp-vadc";
+			reg = <0x3100 0x100>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			interrupts = <0x2 0x31 0x0>;
+			interrupt-names = "eoc-int-en-set";
+			qcom,adc-bit-resolution = <15>;
+			qcom,adc-vdd-reference = <1800>;
+			qcom,vadc-poll-eoc;
+
+			chan@0 {
+				label = "usbin";
+				reg = <0>;
+				qcom,decimation = <0>;
+				qcom,pre-div-channel-scaling = <4>;
+				qcom,calibration-type = "absolute";
+				qcom,scale-function = <0>;
+				qcom,hw-settle-time = <0>;
+				qcom,fast-avg-setup = <0>;
+			};
+
+			chan@1 {
+				label = "dcin";
+				reg = <1>;
+				qcom,decimation = <0>;
+				qcom,pre-div-channel-scaling = <4>;
+				qcom,calibration-type = "absolute";
+				qcom,scale-function = <0>;
+				qcom,hw-settle-time = <0>;
+				qcom,fast-avg-setup = <0>;
+			};
+
+			chan@3 {
+				label = "vchg_sns";
+				reg = <3>;
+				qcom,decimation = <0>;
+				qcom,pre-div-channel-scaling = <1>;
+				qcom,calibration-type = "absolute";
+				qcom,scale-function = <0>;
+				qcom,hw-settle-time = <0>;
+				qcom,fast-avg-setup = <0>;
+			};
+
+			chan@9 {
+				label = "ref_625mv";
+				reg = <9>;
+				qcom,decimation = <0>;
+				qcom,pre-div-channel-scaling = <0>;
+				qcom,calibration-type = "absolute";
+				qcom,scale-function = <0>;
+				qcom,hw-settle-time = <0>;
+				qcom,fast-avg-setup = <0>;
+			};
+
+			chan@a {
+				label = "ref_1250v";
+				reg = <0xa>;
+				qcom,decimation = <0>;
+				qcom,pre-div-channel-scaling = <0>;
+				qcom,calibration-type = "absolute";
+				qcom,scale-function = <0>;
+				qcom,hw-settle-time = <0>;
+				qcom,fast-avg-setup = <0>;
+			};
+
+			chan@d {
+				label = "chg_temp";
+				reg = <0xd>;
+				qcom,decimation = <0>;
+				qcom,pre-div-channel-scaling = <0>;
+				qcom,calibration-type = "absolute";
+				qcom,scale-function = <16>;
+				qcom,hw-settle-time = <0>;
+				qcom,fast-avg-setup = <0>;
+			};
+
+			chan@43 {
+				label = "usb_dp";
+				reg = <0x43>;
+				qcom,decimation = <0>;
+				qcom,pre-div-channel-scaling = <1>;
+				qcom,calibration-type = "absolute";
+				qcom,scale-function = <0>;
+				qcom,hw-settle-time = <0>;
+				qcom,fast-avg-setup = <0>;
+			};
+
+			chan@44 {
+				label = "usb_dm";
+				reg = <0x44>;
+				qcom,decimation = <0>;
+				qcom,pre-div-channel-scaling = <1>;
+				qcom,calibration-type = "absolute";
+				qcom,scale-function = <0>;
+				qcom,hw-settle-time = <0>;
+				qcom,fast-avg-setup = <0>;
+			};
+		};
+
+		pmi8940_mpps: mpps {
+			spmi-dev-container;
+			compatible = "qcom,qpnp-pin";
+			gpio-controller;
+			#gpio-cells = <2>;
+			#address-cells = <1>;
+			#size-cells = <1>;
+			label = "pmi8940-mpp";
+
+			mpp@a000 {
+				reg = <0xa000 0x100>;
+				qcom,pin-num = <1>;
+				status = "disabled";
+			};
+
+			mpp@a100 {
+				reg = <0xa100 0x100>;
+				qcom,pin-num = <2>;
+				status = "disabled";
+			};
+
+			mpp@a300 {
+				reg = <0xa300 0x100>;
+				qcom,pin-num = <4>;
+				status = "disabled";
+			};
+		};
+
+		pmi8940_charger: qcom,qpnp-smbcharger {
+			spmi-dev-container;
+			compatible = "qcom,qpnp-smbcharger";
+			#address-cells = <1>;
+			#size-cells = <1>;
+
+			qcom,iterm-ma = <100>;
+			qcom,float-voltage-mv = <4200>;
+			qcom,resume-delta-mv = <200>;
+			qcom,chg-inhibit-fg;
+			qcom,rparasitic-uohm = <100000>;
+			qcom,bms-psy-name = "bms";
+			qcom,thermal-mitigation = <1500 700 600 0>;
+			qcom,pmic-revid = <&pmi8940_revid>;
+			qcom,force-aicl-rerun;
+			qcom,aicl-rerun-period-s = <180>;
+			qcom,autoadjust-vfloat;
+
+			qcom,chgr@1000 {
+				reg = <0x1000 0x100>;
+				interrupts =	<0x2 0x10 0x0>,
+						<0x2 0x10 0x1>,
+						<0x2 0x10 0x2>,
+						<0x2 0x10 0x3>,
+						<0x2 0x10 0x4>,
+						<0x2 0x10 0x5>,
+						<0x2 0x10 0x6>,
+						<0x2 0x10 0x7>;
+
+				interrupt-names =	"chg-error",
+							"chg-inhibit",
+							"chg-prechg-sft",
+							"chg-complete-chg-sft",
+							"chg-p2f-thr",
+							"chg-rechg-thr",
+							"chg-taper-thr",
+							"chg-tcc-thr";
+			};
+
+			qcom,otg@1100 {
+				reg = <0x1100 0x100>;
+				interrupts =	<0x2 0x11 0x0>,
+						<0x2 0x11 0x1>,
+						<0x2 0x11 0x3>;
+				interrupt-names =	"otg-fail",
+							"otg-oc",
+						"usbid-change";
+			};
+
+			qcom,bat-if@1200 {
+				reg = <0x1200 0x100>;
+				interrupts =	<0x2 0x12 0x0>,
+						<0x2 0x12 0x1>,
+						<0x2 0x12 0x2>,
+						<0x2 0x12 0x3>,
+					<0x2 0x12 0x4>,
+						<0x2 0x12 0x5>,
+						<0x2 0x12 0x6>,
+						<0x2 0x12 0x7>;
+
+				interrupt-names =	"batt-hot",
+							"batt-warm",
+							"batt-cold",
+							"batt-cool",
+						"batt-ov",
+							"batt-low",
+							"batt-missing",
+							"batt-term-missing";
+			};
+
+			qcom,usb-chgpth@1300 {
+				reg = <0x1300 0x100>;
+				interrupts =	<0x2 0x13 0x0>,
+						<0x2 0x13 0x1>,
+					<0x2 0x13 0x2>,
+						<0x2 0x13 0x5>;
+
+				interrupt-names =	"usbin-uv",
+						"usbin-ov",
+							"usbin-src-det",
+							"aicl-done";
+			};
+
+			qcom,dc-chgpth@1400 {
+				reg = <0x1400 0x100>;
+				interrupts =	<0x2 0x14 0x0>,
+						<0x2 0x14 0x1>;
+				interrupt-names =	"dcin-uv",
+							"dcin-ov";
+			};
+
+			qcom,chgr-misc@1600 {
+				reg = <0x1600 0x100>;
+				interrupts =	<0x2 0x16 0x0>,
+						<0x2 0x16 0x1>,
+						<0x2 0x16 0x2>,
+					<0x2 0x16 0x3>,
+						<0x2 0x16 0x4>,
+						<0x2 0x16 0x5>;
+
+				interrupt-names =	"power-ok",
+							"temp-shutdown",
+							"wdog-timeout",
+							"flash-fail",
+							"otst2",
+							"otst3";
+			};
+
+			smbcharger_charger_otg: qcom,smbcharger-boost-otg {
+				regulator-name = "smbcharger_charger_otg";
+			};
+		};
+
+		pmi8940_fg: qcom,fg {
+			spmi-dev-container;
+			compatible = "qcom,qpnp-fg";
+			#address-cells = <1>;
+			#size-cells = <1>;
+			qcom,resume-soc = <95>;
+			status = "okay";
+			qcom,bcl-lm-threshold-ma = <127>;
+			qcom,bcl-mh-threshold-ma = <405>;
+			qcom,fg-iterm-ma = <150>;
+			qcom,fg-chg-iterm-ma = <100>;
+			qcom,pmic-revid = <&pmi8940_revid>;
+			qcom,fg-cutoff-voltage-mv = <3500>;
+			qcom,cycle-counter-en;
+			qcom,capacity-learning-on;
+
+			qcom,fg-soc@4000 {
+			status = "okay";
+				reg = <0x4000 0x100>;
+				interrupts =	<0x2 0x40 0x0>,
+						<0x2 0x40 0x1>,
+						<0x2 0x40 0x2>,
+						<0x2 0x40 0x3>,
+						<0x2 0x40 0x4>,
+						<0x2 0x40 0x5>,
+						<0x2 0x40 0x6>;
+
+				interrupt-names =	"high-soc",
+							"low-soc",
+							"full-soc",
+							"empty-soc",
+							"delta-soc",
+							"first-est-done",
+							"update-soc";
+			};
+
+			qcom,fg-batt@4100 {
+				reg = <0x4100 0x100>;
+				interrupts =	<0x2 0x41 0x0>,
+						<0x2 0x41 0x1>,
+					<0x2 0x41 0x2>,
+						<0x2 0x41 0x3>,
+						<0x2 0x41 0x4>,
+						<0x2 0x41 0x5>,
+						<0x2 0x41 0x6>,
+						<0x2 0x41 0x7>;
+
+				interrupt-names =	"soft-cold",
+							"soft-hot",
+							"vbatt-low",
+							"batt-ided",
+							"batt-id-req",
+							"batt-unknown",
+							"batt-missing",
+							"batt-match";
+			};
+
+			qcom,revid-tp-rev@1f1 {
+				reg = <0x1f1 0x1>;
+			};
+
+			qcom,fg-memif@4400 {
+				status = "okay";
+				reg = <0x4400 0x100>;
+				interrupts =	<0x2 0x44 0x0>,
+						<0x2 0x44 0x2>;
+
+				interrupt-names =	"mem-avail",
+							"data-rcvry-sug";
+			};
+		};
+
+		bcl@4200 {
+			compatible = "qcom,msm-bcl";
+			reg = <0x4200 0xff>;
+			reg-names = "fg_user_adc";
+			interrupts = <0x2 0x42 0x0>,
+					<0x2 0x42 0x1>;
+			interrupt-names = "bcl-high-ibat-int",
+					"bcl-low-vbat-int";
+			qcom,vbat-scaling-factor = <39000>;
+			qcom,vbat-gain-numerator = <1>;
+			qcom,vbat-gain-denominator = <128>;
+			qcom,vbat-polling-delay-ms = <100>;
+			qcom,ibat-scaling-factor = <39000>;
+			qcom,ibat-gain-numerator = <1>;
+			qcom,ibat-gain-denominator = <128>;
+			qcom,ibat-offset-numerator = <1200>;
+			qcom,ibat-offset-denominator = <1>;
+			qcom,ibat-polling-delay-ms = <100>;
+		};
+
+		qcom,leds@a100 {
+			compatible = "qcom,leds-qpnp";
+			reg = <0xa100 0x100>;
+			label = "mpp";
+		};
+	};
+
+	qcom,pmi8940@3 {
+		spmi-slave-container;
+		reg = <0x3>;
+		#address-cells = <1>;
+		#size-cells = <1>;
+
+		pmi8940_pwm: pwm@b000 {
+			status = "disabled";
+			compatible = "qcom,qpnp-pwm";
+			reg = <0xb000 0x100>;
+			reg-names = "qpnp-lpg-channel-base";
+			qcom,channel-id = <0>;
+			qcom,supported-sizes = <6>, <9>;
+			#pwm-cells = <2>;
+		};
+
+		labibb: qpnp-labibb-regulator {
+			status = "disabled";
+			spmi-dev-container;
+			compatible = "qcom,qpnp-labibb-regulator";
+			#address-cells = <1>;
+			#size-cells = <1>;
+			qcom,pmic-revid = <&pmi8940_revid>;
+
+			ibb_regulator: qcom,ibb@dc00 {
+				reg = <0xdc00 0x100>;
+				reg-names = "ibb_reg";
+				regulator-name = "ibb_reg";
+
+				regulator-min-microvolt = <4600000>;
+				regulator-max-microvolt = <6000000>;
+
+				qcom,qpnp-ibb-min-voltage = <1400000>;
+				qcom,qpnp-ibb-step-size = <100000>;
+				qcom,qpnp-ibb-slew-rate = <2000000>;
+				qcom,qpnp-ibb-use-default-voltage;
+				qcom,qpnp-ibb-init-voltage = <5500000>;
+				qcom,qpnp-ibb-init-amoled-voltage = <4000000>;
+				qcom,qpnp-ibb-init-lcd-voltage = <5500000>;
+
+				qcom,qpnp-ibb-soft-start = <1000>;
+
+				qcom,qpnp-ibb-discharge-resistor = <32>;
+				qcom,qpnp-ibb-lab-pwrup-delay = <8000>;
+				qcom,qpnp-ibb-lab-pwrdn-delay = <8000>;
+				qcom,qpnp-ibb-en-discharge;
+
+				qcom,qpnp-ibb-full-pull-down;
+				qcom,qpnp-ibb-pull-down-enable;
+				qcom,qpnp-ibb-switching-clock-frequency =
+									<1480>;
+				qcom,qpnp-ibb-limit-maximum-current = <1550>;
+				qcom,qpnp-ibb-debounce-cycle = <16>;
+				qcom,qpnp-ibb-limit-max-current-enable;
+				qcom,qpnp-ibb-ps-enable;
+			};
+
+			lab_regulator: qcom,lab@de00 {
+				reg = <0xde00 0x100>;
+				reg-names = "lab";
+				regulator-name = "lab_reg";
+
+				regulator-min-microvolt = <4600000>;
+				regulator-max-microvolt = <6000000>;
+
+				qcom,qpnp-lab-min-voltage = <4600000>;
+				qcom,qpnp-lab-step-size = <100000>;
+				qcom,qpnp-lab-slew-rate = <5000>;
+				qcom,qpnp-lab-use-default-voltage;
+				qcom,qpnp-lab-init-voltage = <5500000>;
+				qcom,qpnp-lab-init-amoled-voltage = <4600000>;
+				qcom,qpnp-lab-init-lcd-voltage = <5500000>;
+
+				qcom,qpnp-lab-soft-start = <800>;
+
+				qcom,qpnp-lab-full-pull-down;
+				qcom,qpnp-lab-pull-down-enable;
+				qcom,qpnp-lab-switching-clock-frequency =
+									<1600>;
+				qcom,qpnp-lab-limit-maximum-current = <800>;
+				qcom,qpnp-lab-limit-max-current-enable;
+				qcom,qpnp-lab-ps-threshold = <40>;
+				qcom,qpnp-lab-ps-enable;
+				qcom,qpnp-lab-nfet-size = <100>;
+				qcom,qpnp-lab-pfet-size = <100>;
+				qcom,qpnp-lab-max-precharge-time = <500>;
+			};
+		};
+
+		wled: qcom,leds@d800 {
+			compatible = "qcom,qpnp-wled";
+			reg = <0xd800 0x100>,
+				<0xd900 0x100>;
+			reg-names = "qpnp-wled-ctrl-base",
+					"qpnp-wled-sink-base";
+			interrupts = <0x3 0xd8 0x2>;
+			interrupt-names = "sc-irq";
+			status = "okay";
+			linux,name = "wled";
+			linux,default-trigger = "bkl-trigger";
+			qcom,fdbk-output = "auto";
+			qcom,vref-mv = <350>;
+			qcom,switch-freq-khz = <800>;
+			qcom,ovp-mv = <29500>;
+			qcom,ilim-ma = <980>;
+			qcom,boost-duty-ns = <26>;
+			qcom,mod-freq-khz = <9600>;
+			qcom,dim-mode = "hybrid";
+			qcom,dim-method = "linear";
+			qcom,hyb-thres = <625>;
+			qcom,sync-dly-us = <800>;
+			qcom,fs-curr-ua = <20000>;
+			qcom,en-phase-stag;
+			qcom,led-strings-list = [00 01];
+			qcom,en-ext-pfet-sc-pro;
+			qcom,cons-sync-write-delay-us = <1000>;
+		};
+
+		flash_led: qcom,leds@d300 {
+			compatible = "qcom,qpnp-flash-led";
+			status = "okay";
+			reg = <0xd300 0x100>;
+			label = "flash";
+			qcom,headroom = <500>;
+			qcom,startup-dly = <128>;
+			qcom,clamp-curr = <200>;
+			qcom,pmic-charger-support;
+			qcom,self-check-enabled;
+			qcom,thermal-derate-enabled;
+			qcom,thermal-derate-threshold = <100>;
+			qcom,thermal-derate-rate = "5_PERCENT";
+			qcom,current-ramp-enabled;
+			qcom,ramp_up_step = "6P7_US";
+			qcom,ramp_dn_step = "6P7_US";
+			qcom,vph-pwr-droop-enabled;
+			qcom,vph-pwr-droop-threshold = <3000>;
+			qcom,vph-pwr-droop-debounce-time = <10>;
+			qcom,headroom-sense-ch0-enabled;
+			qcom,headroom-sense-ch1-enabled;
+			qcom,pmic-revid = <&pmi8940_revid>;
+
+			pmi8940_flash0: qcom,flash_0 {
+				label = "flash";
+				qcom,led-name = "led:flash_0";
+				qcom,default-led-trigger =
+						"flash0_trigger";
+				qcom,max-current = <1000>;
+				qcom,duration = <1280>;
+				qcom,id = <0>;
+				qcom,current = <625>;
+			};
+
+			pmi8940_flash1: qcom,flash_1 {
+				label = "flash";
+				qcom,led-name = "led:flash_1";
+				qcom,default-led-trigger =
+						"flash1_trigger";
+				qcom,max-current = <1000>;
+				qcom,duration = <1280>;
+				qcom,id = <1>;
+				qcom,current = <625>;
+			};
+
+			pmi8940_torch0: qcom,torch_0 {
+				label = "torch";
+				qcom,led-name = "led:torch_0";
+				qcom,default-led-trigger =
+						"torch0_trigger";
+				qcom,max-current = <200>;
+				qcom,id = <0>;
+				qcom,current = <120>;
+			};
+
+			pmi8940_torch1: qcom,torch_1 {
+				label = "torch";
+				qcom,led-name = "led:torch_1";
+				qcom,default-led-trigger =
+						"torch1_trigger";
+				qcom,max-current = <200>;
+				qcom,id = <1>;
+				qcom,current = <120>;
+			};
+
+			pmi8940_switch: qcom,switch {
+				label = "switch";
+				qcom,led-name = "led:switch";
+				qcom,default-led-trigger =
+						"switch_trigger";
+				qcom,max-current = <1000>;
+				qcom,duration = <1280>;
+				qcom,id = <2>;
+				qcom,current = <625>;
+				reg0 {
+					regulator-name = "pon_spare_reg";
+				};
+			};
+		};
+
+		pmi_haptic: qcom,haptic@c000 {
+			compatible = "qcom,qpnp-haptic";
+			reg = <0xc000 0x100>;
+			interrupts = <0x3 0xc0 0x0>,
+				<0x3 0xc0 0x1>;
+			interrupt-names = "sc-irq", "play-irq";
+			qcom,pmic-revid = <&pmi8940_revid>;
+			vcc_pon-supply = <&pon_perph_reg>;
+			qcom,play-mode = "direct";
+			qcom,wave-play-rate-us = <5263>;
+			qcom,actuator-type = "lra";
+			qcom,wave-shape = "square";
+			qcom,vmax-mv = <2000>;
+			qcom,ilim-ma = <800>;
+			qcom,sc-deb-cycles = <8>;
+			qcom,int-pwm-freq-khz = <505>;
+			qcom,en-brake;
+			qcom,brake-pattern = [03 03 00 00];
+			qcom,use-play-irq;
+			qcom,use-sc-irq;
+			qcom,wave-samples = [3e 3e 3e 3e 3e 3e 3e 3e];
+			qcom,wave-rep-cnt = <1>;
+			qcom,wave-samp-rep-cnt = <1>;
+			qcom,lra-auto-res-mode="qwd";
+			qcom,lra-high-z="opt1";
+			qcom,lra-res-cal-period = <4>;
+			qcom,correct-lra-drive-freq;
+			qcom,misc-trim-error-rc19p2-clk-reg-present;
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/pmi8950.dtsi b/arch/arm64/boot/dts/qcom/pmi8950.dtsi
index e731f5b..97be32d 100644
--- a/arch/arm64/boot/dts/qcom/pmi8950.dtsi
+++ b/arch/arm64/boot/dts/qcom/pmi8950.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -140,57 +140,30 @@
 		};
 
 		pmi8950_gpios: gpios {
-			compatible = "qcom,qpnp-pin";
+			compatible = "qcom,spmi-gpio";
+			reg = <0xc000 0x200>;
+
+			interrupts = <0x2 0xc0 0 IRQ_TYPE_NONE>,
+				<0x2 0xc1 0 IRQ_TYPE_NONE>;
+			interrupt-names = "pmi8950_gpio1", "pmi8950_gpio2";
+
 			gpio-controller;
 			#gpio-cells = <2>;
-			#address-cells = <1>;
-			#size-cells = <1>;
-			label = "pmi8950-gpio";
-
-			gpio@c000 {
-				reg = <0xc000 0x100>;
-				qcom,pin-num = <1>;
-				status = "disabled";
-			};
-
-			gpio@c100 {
-				reg = <0xc100 0x100>;
-				qcom,pin-num = <2>;
-				status = "disabled";
-			};
 		};
 
 		pmi8950_mpps: mpps {
-			compatible = "qcom,qpnp-pin";
+			compatible = "qcom,spmi-mpp";
+			reg = <0xa000 0x400>;
+
+			interrupts = <0x2 0xa0 0 IRQ_TYPE_NONE>,
+				<0x2 0xa1 0 IRQ_TYPE_NONE>,
+				<0x2 0xa2 0 IRQ_TYPE_NONE>,
+				<0x2 0xa3 0 IRQ_TYPE_NONE>;
+			interrupt-names = "pmi8950_mpp1", "pmi8950_mpp2",
+					  "pmi8950_mpp3", "pmi8950_mpp4";
+
 			gpio-controller;
 			#gpio-cells = <2>;
-			#address-cells = <1>;
-			#size-cells = <1>;
-			label = "pmi8950-mpp";
-
-			mpp@a000 {
-				reg = <0xa000 0x100>;
-				qcom,pin-num = <1>;
-				status = "disabled";
-			};
-
-			mpp@a100 {
-				reg = <0xa100 0x100>;
-				qcom,pin-num = <2>;
-				status = "disabled";
-			};
-
-			mpp@a200 {
-				reg = <0xa200 0x100>;
-				qcom,pin-num = <3>;
-				status = "disabled";
-			};
-
-			mpp@a300 {
-				reg = <0xa300 0x100>;
-				qcom,pin-num = <4>;
-				status = "disabled";
-			};
 		};
 
 		pmi8950_charger: qcom,qpnp-smbcharger {
diff --git a/arch/arm64/boot/dts/qcom/qcs605-360camera-overlay.dts b/arch/arm64/boot/dts/qcom/qcs605-360camera-overlay.dts
new file mode 100644
index 0000000..820f877
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/qcs605-360camera-overlay.dts
@@ -0,0 +1,34 @@
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+/plugin/;
+
+#include <dt-bindings/clock/qcom,gcc-sdm845.h>
+#include <dt-bindings/clock/qcom,camcc-sdm845.h>
+#include <dt-bindings/clock/qcom,dispcc-sdm845.h>
+#include <dt-bindings/clock/qcom,rpmh.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+#include "qcs605-360camera.dtsi"
+
+/ {
+	model = "Qualcomm Technologies, Inc. QCS605 PM660+PM660L VRcamera";
+	compatible = "qcom,qcs605-mtp", "qcom,qcs605", "qcom,mtp";
+	qcom,msm-id = <347 0x0>;
+	qcom,board-id = <8 5>;
+	qcom,pmic-id = <0x0001001b 0x0101011a 0x0 0x0>,
+		       <0x0001001b 0x0102001a 0x0 0x0>,
+		       <0x0001001b 0x0201011a 0x0 0x0>;
+};
+
diff --git a/arch/arm64/boot/dts/qcom/qcs605-360camera.dts b/arch/arm64/boot/dts/qcom/qcs605-360camera.dts
index 8caad4b..c62f39d 100644
--- a/arch/arm64/boot/dts/qcom/qcs605-360camera.dts
+++ b/arch/arm64/boot/dts/qcom/qcs605-360camera.dts
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -20,7 +20,7 @@
 / {
 	model = "Qualcomm Technologies, Inc. QCS605 PM660 + PM660L 360camera";
 	compatible = "qcom,qcs605-mtp", "qcom,qcs605", "qcom,mtp";
-	qcom,board-id = <0x0000000b 1>;
+	qcom,board-id = <8 5>;
 	qcom,pmic-id = <0x0001001b 0x0101011a 0x0 0x0>,
 		       <0x0001001b 0x0102001a 0x0 0x0>,
 		       <0x0001001b 0x0201011a 0x0 0x0>;
diff --git a/arch/arm64/boot/dts/qcom/qcs605-360camera.dtsi b/arch/arm64/boot/dts/qcom/qcs605-360camera.dtsi
index 6670edd..0983acf 100644
--- a/arch/arm64/boot/dts/qcom/qcs605-360camera.dtsi
+++ b/arch/arm64/boot/dts/qcom/qcs605-360camera.dtsi
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -39,6 +39,14 @@
 	status = "disabled";
 };
 
+&dsi_dual_nt36850_truly_cmd_display {
+	status = "disabled";
+};
+
+&dsi_dual_nt35597_truly_video {
+	status = "disabled";
+};
+
 &int_codec {
 	qcom,model = "sdm670-360cam-snd-card";
 	qcom,audio-routing =
diff --git a/arch/arm64/boot/dts/qcom/qcs605-pm660-pm8005-regulator.dtsi b/arch/arm64/boot/dts/qcom/qcs605-pm660-pm8005-regulator.dtsi
index a881ec4..382ba65 100644
--- a/arch/arm64/boot/dts/qcom/qcs605-pm660-pm8005-regulator.dtsi
+++ b/arch/arm64/boot/dts/qcom/qcs605-pm660-pm8005-regulator.dtsi
@@ -157,9 +157,10 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa1";
+		qcom,regulator-type = "pmic4-ldo";
 		qcom,supported-modes =
-			<RPMH_REGULATOR_MODE_LDO_LPM
-			 RPMH_REGULATOR_MODE_LDO_HPM>;
+			<RPMH_REGULATOR_MODE_LPM
+			 RPMH_REGULATOR_MODE_HPM>;
 		qcom,mode-threshold-currents = <0 1>;
 		pm660_l1: regulator-pm660-l1 {
 			regulator-name = "pm660_l1";
@@ -167,7 +168,7 @@
 			regulator-min-microvolt = <800000>;
 			regulator-max-microvolt = <800000>;
 			qcom,init-voltage = <800000>;
-			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		};
 	};
 
@@ -175,9 +176,10 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa2";
+		qcom,regulator-type = "pmic4-ldo";
 		qcom,supported-modes =
-			<RPMH_REGULATOR_MODE_LDO_LPM
-			 RPMH_REGULATOR_MODE_LDO_HPM>;
+			<RPMH_REGULATOR_MODE_LPM
+			 RPMH_REGULATOR_MODE_HPM>;
 		qcom,mode-threshold-currents = <0 1>;
 		pm660_l2: regulator-pm660-l2 {
 			regulator-name = "pm660_l2";
@@ -185,7 +187,7 @@
 			regulator-min-microvolt = <1144000>;
 			regulator-max-microvolt = <1256000>;
 			qcom,init-voltage = <1144000>;
-			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		};
 	};
 
@@ -193,9 +195,10 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa3";
+		qcom,regulator-type = "pmic4-ldo";
 		qcom,supported-modes =
-			<RPMH_REGULATOR_MODE_LDO_LPM
-			 RPMH_REGULATOR_MODE_LDO_HPM>;
+			<RPMH_REGULATOR_MODE_LPM
+			 RPMH_REGULATOR_MODE_HPM>;
 		qcom,mode-threshold-currents = <0 1>;
 		pm660_l3: regulator-pm660-l3 {
 			regulator-name = "pm660_l3";
@@ -203,7 +206,7 @@
 			regulator-min-microvolt = <1200000>;
 			regulator-max-microvolt = <1352000>;
 			qcom,init-voltage = <1200000>;
-			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		};
 	};
 
@@ -211,9 +214,10 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa5";
+		qcom,regulator-type = "pmic4-ldo";
 		qcom,supported-modes =
-			<RPMH_REGULATOR_MODE_LDO_LPM
-			 RPMH_REGULATOR_MODE_LDO_HPM>;
+			<RPMH_REGULATOR_MODE_LPM
+			 RPMH_REGULATOR_MODE_HPM>;
 		qcom,mode-threshold-currents = <0 1>;
 		pm660_l5: regulator-pm660-l5 {
 			regulator-name = "pm660_l5";
@@ -221,7 +225,7 @@
 			regulator-min-microvolt = <1200000>;
 			regulator-max-microvolt = <1304000>;
 			qcom,init-voltage = <1200000>;
-			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		};
 	};
 
@@ -229,9 +233,10 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa6";
+		qcom,regulator-type = "pmic4-ldo";
 		qcom,supported-modes =
-			<RPMH_REGULATOR_MODE_LDO_LPM
-			 RPMH_REGULATOR_MODE_LDO_HPM>;
+			<RPMH_REGULATOR_MODE_LPM
+			 RPMH_REGULATOR_MODE_HPM>;
 		qcom,mode-threshold-currents = <0 1>;
 		pm660_l6: regulator-pm660-l6 {
 			regulator-name = "pm660_l6";
@@ -239,7 +244,7 @@
 			regulator-min-microvolt = <880000>;
 			regulator-max-microvolt = <880000>;
 			qcom,init-voltage = <880000>;
-			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		};
 	};
 
@@ -260,9 +265,10 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa8";
+		qcom,regulator-type = "pmic4-ldo";
 		qcom,supported-modes =
-			<RPMH_REGULATOR_MODE_LDO_LPM
-			 RPMH_REGULATOR_MODE_LDO_HPM>;
+			<RPMH_REGULATOR_MODE_LPM
+			 RPMH_REGULATOR_MODE_HPM>;
 		qcom,mode-threshold-currents = <0 1>;
 		pm660_l8: regulator-pm660-l8 {
 			regulator-name = "pm660_l8";
@@ -270,7 +276,7 @@
 			regulator-min-microvolt = <1696000>;
 			regulator-max-microvolt = <1952000>;
 			qcom,init-voltage = <1696000>;
-			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		};
 	};
 
@@ -278,9 +284,10 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa9";
+		qcom,regulator-type = "pmic4-ldo";
 		qcom,supported-modes =
-			<RPMH_REGULATOR_MODE_LDO_LPM
-			 RPMH_REGULATOR_MODE_LDO_HPM>;
+			<RPMH_REGULATOR_MODE_LPM
+			 RPMH_REGULATOR_MODE_HPM>;
 		qcom,mode-threshold-currents = <0 1>;
 		pm660_l9: regulator-pm660-l9 {
 			regulator-name = "pm660_l9";
@@ -288,7 +295,7 @@
 			regulator-min-microvolt = <1616000>;
 			regulator-max-microvolt = <1984000>;
 			qcom,init-voltage = <1616000>;
-			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		};
 	};
 
@@ -296,9 +303,10 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa10";
+		qcom,regulator-type = "pmic4-ldo";
 		qcom,supported-modes =
-			<RPMH_REGULATOR_MODE_LDO_LPM
-			 RPMH_REGULATOR_MODE_LDO_HPM>;
+			<RPMH_REGULATOR_MODE_LPM
+			 RPMH_REGULATOR_MODE_HPM>;
 		qcom,mode-threshold-currents = <0 1>;
 		pm660_l10: regulator-pm660-l10 {
 			regulator-name = "pm660_l10";
@@ -306,7 +314,7 @@
 			regulator-min-microvolt = <1696000>;
 			regulator-max-microvolt = <1952000>;
 			qcom,init-voltage = <1696000>;
-			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		};
 	};
 
@@ -314,9 +322,10 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa11";
+		qcom,regulator-type = "pmic4-ldo";
 		qcom,supported-modes =
-			<RPMH_REGULATOR_MODE_LDO_LPM
-			 RPMH_REGULATOR_MODE_LDO_HPM>;
+			<RPMH_REGULATOR_MODE_LPM
+			 RPMH_REGULATOR_MODE_HPM>;
 		qcom,mode-threshold-currents = <0 1>;
 		pm660_l11: regulator-pm660-l11 {
 			regulator-name = "pm660_l11";
@@ -324,7 +333,7 @@
 			regulator-min-microvolt = <1800000>;
 			regulator-max-microvolt = <1904000>;
 			qcom,init-voltage = <1800000>;
-			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		};
 	};
 
@@ -332,9 +341,10 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa12";
+		qcom,regulator-type = "pmic4-ldo";
 		qcom,supported-modes =
-			<RPMH_REGULATOR_MODE_LDO_LPM
-			 RPMH_REGULATOR_MODE_LDO_HPM>;
+			<RPMH_REGULATOR_MODE_LPM
+			 RPMH_REGULATOR_MODE_HPM>;
 		qcom,mode-threshold-currents = <0 1>;
 		pm660_l12: regulator-pm660-l12 {
 			regulator-name = "pm660_l12";
@@ -342,7 +352,7 @@
 			regulator-min-microvolt = <1616000>;
 			regulator-max-microvolt = <1984000>;
 			qcom,init-voltage = <1616000>;
-			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		};
 	};
 
@@ -350,9 +360,10 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa13";
+		qcom,regulator-type = "pmic4-ldo";
 		qcom,supported-modes =
-			<RPMH_REGULATOR_MODE_LDO_LPM
-			 RPMH_REGULATOR_MODE_LDO_HPM>;
+			<RPMH_REGULATOR_MODE_LPM
+			 RPMH_REGULATOR_MODE_HPM>;
 		qcom,mode-threshold-currents = <0 1>;
 		pm660_l13: regulator-pm660-l13 {
 			regulator-name = "pm660_l13";
@@ -360,7 +371,7 @@
 			regulator-min-microvolt = <1696000>;
 			regulator-max-microvolt = <1904000>;
 			qcom,init-voltage = <1696000>;
-			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		};
 	};
 
@@ -368,9 +379,10 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa14";
+		qcom,regulator-type = "pmic4-ldo";
 		qcom,supported-modes =
-			<RPMH_REGULATOR_MODE_LDO_LPM
-			 RPMH_REGULATOR_MODE_LDO_HPM>;
+			<RPMH_REGULATOR_MODE_LPM
+			 RPMH_REGULATOR_MODE_HPM>;
 		qcom,mode-threshold-currents = <0 1>;
 		pm660_l14: regulator-pm660-l14 {
 			regulator-name = "pm660_l14";
@@ -378,7 +390,7 @@
 			regulator-min-microvolt = <1696000>;
 			regulator-max-microvolt = <1904000>;
 			qcom,init-voltage = <1696000>;
-			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		};
 	};
 
@@ -386,9 +398,10 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa15";
+		qcom,regulator-type = "pmic4-ldo";
 		qcom,supported-modes =
-			<RPMH_REGULATOR_MODE_LDO_LPM
-			 RPMH_REGULATOR_MODE_LDO_HPM>;
+			<RPMH_REGULATOR_MODE_LPM
+			 RPMH_REGULATOR_MODE_HPM>;
 		qcom,mode-threshold-currents = <0 1>;
 		pm660_l15: regulator-pm660-l15 {
 			regulator-name = "pm660_l15";
@@ -396,7 +409,7 @@
 			regulator-min-microvolt = <2896000>;
 			regulator-max-microvolt = <3000000>;
 			qcom,init-voltage = <2896000>;
-			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		};
 	};
 
@@ -404,9 +417,10 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa16";
+		qcom,regulator-type = "pmic4-ldo";
 		qcom,supported-modes =
-			<RPMH_REGULATOR_MODE_LDO_LPM
-			 RPMH_REGULATOR_MODE_LDO_HPM>;
+			<RPMH_REGULATOR_MODE_LPM
+			 RPMH_REGULATOR_MODE_HPM>;
 		qcom,mode-threshold-currents = <0 1>;
 		pm660_l16: regulator-pm660-l16 {
 		regulator-name = "pm660_l16";
@@ -414,7 +428,7 @@
 			regulator-min-microvolt = <2896000>;
 			regulator-max-microvolt = <3104000>;
 			qcom,init-voltage = <2896000>;
-			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		};
 	};
 
@@ -422,9 +436,10 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa17";
+		qcom,regulator-type = "pmic4-ldo";
 		qcom,supported-modes =
-			<RPMH_REGULATOR_MODE_LDO_LPM
-			 RPMH_REGULATOR_MODE_LDO_HPM>;
+			<RPMH_REGULATOR_MODE_LPM
+			 RPMH_REGULATOR_MODE_HPM>;
 		qcom,mode-threshold-currents = <0 1>;
 		pm660_l17: regulator-pm660-l17 {
 			regulator-name = "pm660_l17";
@@ -432,7 +447,7 @@
 			regulator-min-microvolt = <2920000>;
 			regulator-max-microvolt = <3232000>;
 			qcom,init-voltage = <2920000>;
-			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		};
 	};
 
@@ -440,9 +455,10 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa18";
+		qcom,regulator-type = "pmic4-ldo";
 		qcom,supported-modes =
-			<RPMH_REGULATOR_MODE_LDO_LPM
-			 RPMH_REGULATOR_MODE_LDO_HPM>;
+			<RPMH_REGULATOR_MODE_LPM
+			 RPMH_REGULATOR_MODE_HPM>;
 		qcom,mode-threshold-currents = <0 1>;
 		pm660_l18: regulator-pm660-l18 {
 			regulator-name = "pm660_l18";
@@ -450,7 +466,7 @@
 			regulator-min-microvolt = <1800000>;
 			regulator-max-microvolt = <3000000>;
 			qcom,init-voltage = <1800000>;
-			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		};
 	};
 
@@ -458,9 +474,10 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa19";
+		qcom,regulator-type = "pmic4-ldo";
 		qcom,supported-modes =
-			<RPMH_REGULATOR_MODE_LDO_LPM
-			 RPMH_REGULATOR_MODE_LDO_HPM>;
+			<RPMH_REGULATOR_MODE_LPM
+			 RPMH_REGULATOR_MODE_HPM>;
 		qcom,mode-threshold-currents = <0 1>;
 		pm660_l19: regulator-pm660-l19 {
 			regulator-name = "pm660_l19";
@@ -468,7 +485,7 @@
 			regulator-min-microvolt = <2944000>;
 			regulator-max-microvolt = <3304000>;
 			qcom,init-voltage = <2944000>;
-			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		};
 	};
 };
diff --git a/arch/arm64/boot/dts/qcom/sdm450-pmi632-cdp-s2.dts b/arch/arm64/boot/dts/qcom/sdm450-pmi632-cdp-s2.dts
new file mode 100644
index 0000000..616b8e5
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm450-pmi632-cdp-s2.dts
@@ -0,0 +1,25 @@
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+
+#include "sdm450.dtsi"
+#include "sdm450-pmi632-cdp-s2.dtsi"
+
+/ {
+	model = "Qualcomm Technologies, Inc. SDM450 + PMI632 CDP S2";
+	compatible = "qcom,sdm450-cdp", "qcom,sdm450", "qcom,cdp";
+	qcom,board-id = <1 2>;
+	qcom,pmic-id = <0x010016 0x25 0x0 0x0>;
+};
+
diff --git a/arch/arm64/boot/dts/qcom/sdm450-pmi632-cdp-s2.dtsi b/arch/arm64/boot/dts/qcom/sdm450-pmi632-cdp-s2.dtsi
new file mode 100644
index 0000000..220ec20
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm450-pmi632-cdp-s2.dtsi
@@ -0,0 +1,15 @@
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "msm8953-cdp.dtsi"
+
diff --git a/arch/arm64/boot/dts/qcom/sdm450-pmi632-mtp-s3.dts b/arch/arm64/boot/dts/qcom/sdm450-pmi632-mtp-s3.dts
new file mode 100644
index 0000000..b52bccf
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm450-pmi632-mtp-s3.dts
@@ -0,0 +1,25 @@
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+
+#include "sdm450.dtsi"
+#include "sdm450-pmi632-mtp-s3.dtsi"
+
+/ {
+	model = "Qualcomm Technologies, Inc. SDM450 + PMI632 MTP S3";
+	compatible = "qcom,sdm450-mtp", "qcom,sdm450", "qcom,mtp";
+	qcom,board-id = <8 3>;
+	qcom,pmic-id = <0x010016 0x25 0x0 0x0>;
+};
+
diff --git a/arch/arm64/boot/dts/qcom/sdm450-pmi632-mtp-s3.dtsi b/arch/arm64/boot/dts/qcom/sdm450-pmi632-mtp-s3.dtsi
new file mode 100644
index 0000000..adb7f47
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm450-pmi632-mtp-s3.dtsi
@@ -0,0 +1,15 @@
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "msm8953-mtp.dtsi"
+
diff --git a/arch/arm64/boot/dts/qcom/sdm450-qrd-sku4.dts b/arch/arm64/boot/dts/qcom/sdm450-qrd-sku4.dts
new file mode 100644
index 0000000..3770ebe
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm450-qrd-sku4.dts
@@ -0,0 +1,25 @@
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+
+#include "sdm450.dtsi"
+#include "sdm450-qrd-sku4.dtsi"
+
+/ {
+	model = "Qualcomm Technologies, Inc. SDM450 + PMI632 QRD SKU4";
+	compatible = "qcom,sdm450-qrd", "qcom,sdm450", "qcom,qrd";
+	qcom,board-id = <0xb 1>;
+	qcom,pmic-id = <0x010016 0x25 0x0 0x0>;
+};
+
diff --git a/arch/arm64/boot/dts/qcom/sdm450-qrd-sku4.dtsi b/arch/arm64/boot/dts/qcom/sdm450-qrd-sku4.dtsi
new file mode 100644
index 0000000..0a98528
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm450-qrd-sku4.dtsi
@@ -0,0 +1,19 @@
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "msm8953-qrd.dtsi"
+
+&i2c_3 {
+	status = "disabled";
+};
+
diff --git a/arch/arm64/boot/dts/qcom/sdm450.dtsi b/arch/arm64/boot/dts/qcom/sdm450.dtsi
index b080ff7..2f3e8c4 100644
--- a/arch/arm64/boot/dts/qcom/sdm450.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm450.dtsi
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -34,3 +34,16 @@
 &CPU7 {
 	efficiency = <1024>;
 };
+
+&clock_gcc_gfx {
+	compatible = "qcom,gcc-gfx-sdm450";
+	qcom,gfxfreq-corner =
+		<         0   0 >,
+		< 133330000   1 >,  /* Min SVS   */
+		< 216000000   2 >,  /* Low SVS   */
+		< 320000000   3 >,  /* SVS       */
+		< 400000000   4 >,  /* SVS Plus  */
+		< 510000000   5 >,  /* NOM       */
+		< 560000000   6 >,  /* Nom Plus  */
+		< 600000000   7 >;  /* Turbo     */
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm670-audio-overlay.dtsi b/arch/arm64/boot/dts/qcom/sdm670-audio-overlay.dtsi
index 6510fa2..5dd5c0d 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-audio-overlay.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-audio-overlay.dtsi
@@ -121,13 +121,6 @@
 		pinctrl-1 = <&wcd_usbc_analog_en1_idle>;
 	};
 
-	wcd_gnd_mic_swap_gpio: msm_cdc_pinctrl_gnd_mic_swap {
-		compatible = "qcom,msm-cdc-pinctrl";
-		pinctrl-names = "aud_active", "aud_sleep";
-		pinctrl-0 = <&wcd_gnd_mic_swap_active>;
-		pinctrl-1 = <&wcd_gnd_mic_swap_idle>;
-	};
-
 	cdc_pdm_gpios: cdc_pdm_pinctrl {
 		compatible = "qcom,msm-cdc-pinctrl";
 		pinctrl-names = "aud_active", "aud_sleep";
diff --git a/arch/arm64/boot/dts/qcom/sdm670-camera-sensor-360camera.dtsi b/arch/arm64/boot/dts/qcom/sdm670-camera-sensor-360camera.dtsi
index 18b0cd8..c40fff6 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-camera-sensor-360camera.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-camera-sensor-360camera.dtsi
@@ -11,247 +11,245 @@
  * GNU General Public License for more details.
  */
 
-&soc {
-	led_flash_rear: qcom,camera-flash@0 {
-		cell-index = <0>;
-		reg = <0x00 0x00>;
-		compatible = "qcom,camera-flash";
-		flash-source = <&pm660l_flash0 &pm660l_flash1>;
-		torch-source = <&pm660l_torch0 &pm660l_torch1>;
-		switch-source = <&pm660l_switch0>;
-		status = "ok";
-	};
+&led_flash_rear {
+	cell-index = <0>;
+	reg = <0x00 0x00>;
+	compatible = "qcom,camera-flash";
+	flash-source = <&pm660l_flash0 &pm660l_flash1>;
+	torch-source = <&pm660l_torch0 &pm660l_torch1>;
+	switch-source = <&pm660l_switch0>;
+	status = "ok";
+};
 
-	led_flash_front: qcom,camera-flash@1 {
-		cell-index = <1>;
-		reg = <0x01 0x00>;
-		compatible = "qcom,camera-flash";
-		flash-source = <&pm660l_flash2>;
-		torch-source = <&pm660l_torch2>;
-		switch-source = <&pm660l_switch1>;
-		status = "ok";
-	};
+&led_flash_front {
+	cell-index = <1>;
+	reg = <0x01 0x00>;
+	compatible = "qcom,camera-flash";
+	flash-source = <&pm660l_flash2>;
+	torch-source = <&pm660l_torch2>;
+	switch-source = <&pm660l_switch1>;
+	status = "ok";
+};
 
-	actuator_regulator: gpio-regulator@0 {
-		compatible = "regulator-fixed";
-		reg = <0x00 0x00>;
-		regulator-name = "actuator_regulator";
-		regulator-min-microvolt = <2800000>;
-		regulator-max-microvolt = <2800000>;
-		regulator-enable-ramp-delay = <100>;
-		enable-active-high;
-		gpio = <&tlmm 27 0>;
-	};
+&actuator_regulator {
+	compatible = "regulator-fixed";
+	reg = <0x00 0x00>;
+	regulator-name = "actuator_regulator";
+	regulator-min-microvolt = <2800000>;
+	regulator-max-microvolt = <2800000>;
+	regulator-enable-ramp-delay = <100>;
+	enable-active-high;
+	gpio = <&tlmm 27 0>;
+};
 
-	camera_ldo: gpio-regulator@2 {
-		compatible = "regulator-fixed";
-		reg = <0x02 0x00>;
-		regulator-name = "camera_ldo";
-		regulator-min-microvolt = <1352000>;
-		regulator-max-microvolt = <1352000>;
-		regulator-enable-ramp-delay = <233>;
-		enable-active-high;
-		gpio = <&pm660l_gpios 4 0>;
-		pinctrl-names = "default";
-		pinctrl-0 = <&camera_dvdd_en_default>;
-		vin-supply = <&pm660_s6>;
-	};
+&camera_ldo {
+	compatible = "regulator-fixed";
+	reg = <0x02 0x00>;
+	regulator-name = "camera_ldo";
+	regulator-min-microvolt = <1352000>;
+	regulator-max-microvolt = <1352000>;
+	regulator-enable-ramp-delay = <233>;
+	enable-active-high;
+	gpio = <&pm660l_gpios 4 0>;
+	pinctrl-names = "default";
+	pinctrl-0 = <&camera_dvdd_en_default>;
+	vin-supply = <&pm660_s6>;
+};
 
-	camera_rear_ldo: gpio-regulator@1 {
-		compatible = "regulator-fixed";
-		reg = <0x01 0x00>;
-		regulator-name = "camera_rear_ldo";
-		regulator-min-microvolt = <1352000>;
-		regulator-max-microvolt = <1352000>;
-		regulator-enable-ramp-delay = <135>;
-		enable-active-high;
-		gpio = <&pm660l_gpios 4 0>;
-		pinctrl-names = "default";
-		pinctrl-0 = <&camera_rear_dvdd_en_default>;
-		vin-supply = <&pm660_s6>;
-	};
+&camera_rear_ldo {
+	compatible = "regulator-fixed";
+	reg = <0x01 0x00>;
+	regulator-name = "camera_rear_ldo";
+	regulator-min-microvolt = <1352000>;
+	regulator-max-microvolt = <1352000>;
+	regulator-enable-ramp-delay = <135>;
+	enable-active-high;
+	gpio = <&pm660l_gpios 4 0>;
+	pinctrl-names = "default";
+	pinctrl-0 = <&camera_rear_dvdd_en_default>;
+	vin-supply = <&pm660_s6>;
+};
 
-	camera_vio_ldo: gpio-regulator@3 {
-		compatible = "regulator-fixed";
-		reg = <0x03 0x00>;
-		regulator-name = "camera_vio_ldo";
-		regulator-min-microvolt = <1800000>;
-		regulator-max-microvolt = <1800000>;
-		regulator-enable-ramp-delay = <233>;
-		enable-active-high;
-		gpio = <&tlmm 29 0>;
-		pinctrl-names = "default";
-		pinctrl-0 = <&cam_sensor_rear_vio>;
-		vin-supply = <&pm660_s4>;
-	};
+&camera_vio_ldo {
+	compatible = "regulator-fixed";
+	reg = <0x03 0x00>;
+	regulator-name = "camera_vio_ldo";
+	regulator-min-microvolt = <1800000>;
+	regulator-max-microvolt = <1800000>;
+	regulator-enable-ramp-delay = <233>;
+	enable-active-high;
+	gpio = <&tlmm 29 0>;
+	pinctrl-names = "default";
+	pinctrl-0 = <&cam_sensor_rear_vio>;
+	vin-supply = <&pm660_s4>;
+};
 
-	camera_vana_ldo: gpio-regulator@4 {
-		compatible = "regulator-fixed";
-		reg = <0x04 0x00>;
-		regulator-name = "camera_vana_ldo";
-		regulator-min-microvolt = <2850000>;
-		regulator-max-microvolt = <2850000>;
-		regulator-enable-ramp-delay = <233>;
-		enable-active-high;
-		gpio = <&tlmm 8 0>;
-		pinctrl-names = "default";
-		pinctrl-0 = <&cam_sensor_rear_vana>;
-		vin-supply = <&pm660l_bob>;
-	};
+&camera_vana_ldo {
+	compatible = "regulator-fixed";
+	reg = <0x04 0x00>;
+	regulator-name = "camera_vana_ldo";
+	regulator-min-microvolt = <2850000>;
+	regulator-max-microvolt = <2850000>;
+	regulator-enable-ramp-delay = <233>;
+	enable-active-high;
+	gpio = <&tlmm 8 0>;
+	pinctrl-names = "default";
+	pinctrl-0 = <&cam_sensor_rear_vana>;
+	vin-supply = <&pm660l_bob>;
+};
+
+&actuator_rear {
+	cell-index = <0>;
+	reg = <0x0>;
+	compatible = "qcom,actuator";
+	cci-master = <0>;
+	cam_vaf-supply = <&actuator_regulator>;
+	regulator-names = "cam_vaf";
+	rgltr-cntrl-support;
+	rgltr-min-voltage = <2800000>;
+	rgltr-max-voltage = <2800000>;
+	rgltr-load-current = <0>;
+};
+
+&actuator_front {
+	cell-index = <1>;
+	reg = <0x1>;
+	compatible = "qcom,actuator";
+	cci-master = <1>;
+	cam_vaf-supply = <&actuator_regulator>;
+	regulator-names = "cam_vaf";
+	rgltr-cntrl-support;
+	rgltr-min-voltage = <2800000>;
+	rgltr-max-voltage = <2800000>;
+	rgltr-load-current = <0>;
+};
+
+&ois_rear {
+	cell-index = <0>;
+	reg = <0x0>;
+	compatible = "qcom,ois";
+	cci-master = <0>;
+	cam_vaf-supply = <&actuator_regulator>;
+	regulator-names = "cam_vaf";
+	rgltr-cntrl-support;
+	rgltr-min-voltage = <2800000>;
+	rgltr-max-voltage = <2800000>;
+	rgltr-load-current = <0>;
+	status = "disabled";
+};
+
+&eeprom_rear {
+	cell-index = <0>;
+	reg = <0>;
+	compatible = "qcom,eeprom";
+	cam_vio-supply = <&camera_vio_ldo>;
+	cam_vana-supply = <&camera_vana_ldo>;
+	cam_vdig-supply = <&camera_rear_ldo>;
+	cam_clk-supply = <&titan_top_gdsc>;
+	cam_vaf-supply = <&actuator_regulator>;
+	regulator-names = "cam_vio", "cam_vana", "cam_vdig",
+		"cam_clk", "cam_vaf";
+	rgltr-cntrl-support;
+	rgltr-min-voltage = <1800000 2850000 1352000 0 2800000>;
+	rgltr-max-voltage = <1800000 2850000 1352000 0 2800000>;
+	rgltr-load-current = <0 80000 105000 0 0>;
+	gpio-no-mux = <0>;
+	pinctrl-names = "cam_default", "cam_suspend";
+	pinctrl-0 = <&cam_sensor_mclk0_active
+			&cam_sensor_rear_active>;
+	pinctrl-1 = <&cam_sensor_mclk0_suspend
+			&cam_sensor_rear_suspend>;
+	gpios = <&tlmm 13 0>,
+		<&tlmm 30 0>;
+	gpio-reset = <1>;
+	gpio-req-tbl-num = <0 1>;
+	gpio-req-tbl-flags = <1 0>;
+	gpio-req-tbl-label = "CAMIF_MCLK0",
+				"CAM_RESET0";
+	sensor-mode = <0>;
+	cci-master = <0>;
+	status = "ok";
+	clocks = <&clock_camcc CAM_CC_MCLK0_CLK>;
+	clock-names = "cam_clk";
+	clock-cntl-level = "turbo";
+	clock-rates = <24000000>;
+};
+
+&eeprom_rear_aux {
+	cell-index = <1>;
+	reg = <0x1>;
+	compatible = "qcom,eeprom";
+	cam_vio-supply = <&camera_vio_ldo>;
+	cam_vana-supply = <&camera_vana_ldo>;
+	cam_vdig-supply = <&camera_ldo>;
+	cam_clk-supply = <&titan_top_gdsc>;
+	cam_vaf-supply = <&actuator_regulator>;
+	regulator-names = "cam_vdig", "cam_vio", "cam_vana",
+		"cam_clk", "cam_vaf";
+	rgltr-cntrl-support;
+	rgltr-min-voltage = <1352000 1800000 2850000 0 2800000>;
+	rgltr-max-voltage = <1352000 1800000 2850000 0 2800000>;
+	rgltr-load-current = <105000 0 80000 0>;
+	gpio-no-mux = <0>;
+	pinctrl-names = "cam_default", "cam_suspend";
+	pinctrl-0 = <&cam_sensor_mclk1_active
+			&cam_sensor_rear2_active>;
+	pinctrl-1 = <&cam_sensor_mclk1_suspend
+			&cam_sensor_rear2_suspend>;
+	gpios = <&tlmm 14 0>,
+		<&tlmm 28 0>;
+	gpio-reset = <1>;
+	gpio-req-tbl-num = <0 1>;
+	gpio-req-tbl-flags = <1 0>;
+	gpio-req-tbl-label = "CAMIF_MCLK1",
+				"CAM_RESET1";
+	sensor-position = <0>;
+	sensor-mode = <0>;
+	cci-master = <1>;
+	status = "ok";
+	clock-names = "cam_clk";
+	clock-cntl-level = "turbo";
+	clock-rates = <24000000>;
+};
+
+&eeprom_front {
+	cell-index = <2>;
+	reg = <0x2>;
+	compatible = "qcom,eeprom";
+	cam_vio-supply = <&camera_vio_ldo>;
+	cam_vana-supply = <&camera_vana_ldo>;
+	cam_vdig-supply = <&camera_ldo>;
+	cam_clk-supply = <&titan_top_gdsc>;
+	cam_vaf-supply = <&actuator_regulator>;
+	regulator-names = "cam_vio", "cam_vana", "cam_vdig",
+		"cam_clk", "cam_vaf";
+	rgltr-cntrl-support;
+	rgltr-min-voltage = <1800000 2850000 1352000 0 2800000>;
+	rgltr-max-voltage = <1800000 2850000 1352000 0 2800000>;
+	rgltr-load-current = <0 80000 105000 0>;
+	gpio-no-mux = <0>;
+	pinctrl-names = "cam_default", "cam_suspend";
+	pinctrl-0 = <&cam_sensor_mclk2_active
+			 &cam_sensor_front_active>;
+	pinctrl-1 = <&cam_sensor_mclk2_suspend
+			 &cam_sensor_front_suspend>;
+	gpios = <&tlmm 15 0>,
+		<&tlmm 9 0>;
+	gpio-reset = <1>;
+	gpio-req-tbl-num = <0 1>;
+	gpio-req-tbl-flags = <1 0>;
+	gpio-req-tbl-label = "CAMIF_MCLK2",
+				"CAM_RESET2";
+	sensor-mode = <0>;
+	cci-master = <1>;
+	status = "ok";
+	clocks = <&clock_camcc CAM_CC_MCLK2_CLK>;
+	clock-names = "cam_clk";
+	clock-cntl-level = "turbo";
+	clock-rates = <24000000>;
 };
 
 &cam_cci {
-	actuator_rear: qcom,actuator@0 {
-		cell-index = <0>;
-		reg = <0x0>;
-		compatible = "qcom,actuator";
-		cci-master = <0>;
-		cam_vaf-supply = <&actuator_regulator>;
-		regulator-names = "cam_vaf";
-		rgltr-cntrl-support;
-		rgltr-min-voltage = <2800000>;
-		rgltr-max-voltage = <2800000>;
-		rgltr-load-current = <0>;
-	};
-
-	actuator_front: qcom,actuator@1 {
-		cell-index = <1>;
-		reg = <0x1>;
-		compatible = "qcom,actuator";
-		cci-master = <1>;
-		cam_vaf-supply = <&actuator_regulator>;
-		regulator-names = "cam_vaf";
-		rgltr-cntrl-support;
-		rgltr-min-voltage = <2800000>;
-		rgltr-max-voltage = <2800000>;
-		rgltr-load-current = <0>;
-	};
-
-	ois_rear: qcom,ois@0 {
-		cell-index = <0>;
-		reg = <0x0>;
-		compatible = "qcom,ois";
-		cci-master = <0>;
-		cam_vaf-supply = <&actuator_regulator>;
-		regulator-names = "cam_vaf";
-		rgltr-cntrl-support;
-		rgltr-min-voltage = <2800000>;
-		rgltr-max-voltage = <2800000>;
-		rgltr-load-current = <0>;
-		status = "disabled";
-	};
-
-	eeprom_rear: qcom,eeprom@0 {
-		cell-index = <0>;
-		reg = <0>;
-		compatible = "qcom,eeprom";
-		cam_vio-supply = <&camera_vio_ldo>;
-		cam_vana-supply = <&camera_vana_ldo>;
-		cam_vdig-supply = <&camera_rear_ldo>;
-		cam_clk-supply = <&titan_top_gdsc>;
-		cam_vaf-supply = <&actuator_regulator>;
-		regulator-names = "cam_vio", "cam_vana", "cam_vdig",
-			"cam_clk", "cam_vaf";
-		rgltr-cntrl-support;
-		rgltr-min-voltage = <1800000 2850000 1352000 0 2800000>;
-		rgltr-max-voltage = <1800000 2850000 1352000 0 2800000>;
-		rgltr-load-current = <0 80000 105000 0 0>;
-		gpio-no-mux = <0>;
-		pinctrl-names = "cam_default", "cam_suspend";
-		pinctrl-0 = <&cam_sensor_mclk0_active
-				&cam_sensor_rear_active>;
-		pinctrl-1 = <&cam_sensor_mclk0_suspend
-				&cam_sensor_rear_suspend>;
-		gpios = <&tlmm 13 0>,
-			<&tlmm 30 0>;
-		gpio-reset = <1>;
-		gpio-req-tbl-num = <0 1>;
-		gpio-req-tbl-flags = <1 0>;
-		gpio-req-tbl-label = "CAMIF_MCLK0",
-					"CAM_RESET0";
-		sensor-mode = <0>;
-		cci-master = <0>;
-		status = "ok";
-		clocks = <&clock_camcc CAM_CC_MCLK0_CLK>;
-		clock-names = "cam_clk";
-		clock-cntl-level = "turbo";
-		clock-rates = <24000000>;
-	};
-
-	eeprom_rear_aux: qcom,eeprom@1 {
-		cell-index = <1>;
-		reg = <0x1>;
-		compatible = "qcom,eeprom";
-		cam_vio-supply = <&camera_vio_ldo>;
-		cam_vana-supply = <&camera_vana_ldo>;
-		cam_vdig-supply = <&camera_ldo>;
-		cam_clk-supply = <&titan_top_gdsc>;
-		cam_vaf-supply = <&actuator_regulator>;
-		regulator-names = "cam_vdig", "cam_vio", "cam_vana",
-			"cam_clk", "cam_vaf";
-		rgltr-cntrl-support;
-		rgltr-min-voltage = <1352000 1800000 2850000 0 2800000>;
-		rgltr-max-voltage = <1352000 1800000 2850000 0 2800000>;
-		rgltr-load-current = <105000 0 80000 0>;
-		gpio-no-mux = <0>;
-		pinctrl-names = "cam_default", "cam_suspend";
-		pinctrl-0 = <&cam_sensor_mclk1_active
-				&cam_sensor_rear2_active>;
-		pinctrl-1 = <&cam_sensor_mclk1_suspend
-				&cam_sensor_rear2_suspend>;
-		gpios = <&tlmm 14 0>,
-			<&tlmm 28 0>;
-		gpio-reset = <1>;
-		gpio-req-tbl-num = <0 1>;
-		gpio-req-tbl-flags = <1 0>;
-		gpio-req-tbl-label = "CAMIF_MCLK1",
-					"CAM_RESET1";
-		sensor-position = <0>;
-		sensor-mode = <0>;
-		cci-master = <1>;
-		status = "ok";
-		clock-names = "cam_clk";
-		clock-cntl-level = "turbo";
-		clock-rates = <24000000>;
-	};
-
-	eeprom_front: qcom,eeprom@2 {
-		cell-index = <2>;
-		reg = <0x2>;
-		compatible = "qcom,eeprom";
-		cam_vio-supply = <&camera_vio_ldo>;
-		cam_vana-supply = <&camera_vana_ldo>;
-		cam_vdig-supply = <&camera_ldo>;
-		cam_clk-supply = <&titan_top_gdsc>;
-		cam_vaf-supply = <&actuator_regulator>;
-		regulator-names = "cam_vio", "cam_vana", "cam_vdig",
-			"cam_clk", "cam_vaf";
-		rgltr-cntrl-support;
-		rgltr-min-voltage = <1800000 2850000 1352000 0 2800000>;
-		rgltr-max-voltage = <1800000 2850000 1352000 0 2800000>;
-		rgltr-load-current = <0 80000 105000 0>;
-		gpio-no-mux = <0>;
-		pinctrl-names = "cam_default", "cam_suspend";
-		pinctrl-0 = <&cam_sensor_mclk2_active
-				 &cam_sensor_front_active>;
-		pinctrl-1 = <&cam_sensor_mclk2_suspend
-				 &cam_sensor_front_suspend>;
-		gpios = <&tlmm 15 0>,
-			<&tlmm 9 0>;
-		gpio-reset = <1>;
-		gpio-req-tbl-num = <0 1>;
-		gpio-req-tbl-flags = <1 0>;
-		gpio-req-tbl-label = "CAMIF_MCLK2",
-					"CAM_RESET2";
-		sensor-mode = <0>;
-		cci-master = <1>;
-		status = "ok";
-		clocks = <&clock_camcc CAM_CC_MCLK2_CLK>;
-		clock-names = "cam_clk";
-		clock-cntl-level = "turbo";
-		clock-rates = <24000000>;
-	};
-
 	qcom,cam-sensor@0 {
 		cell-index = <0>;
 		compatible = "qcom,cam-sensor";
diff --git a/arch/arm64/boot/dts/qcom/sdm670-coresight.dtsi b/arch/arm64/boot/dts/qcom/sdm670-coresight.dtsi
index 7928ab5..108eda5 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-coresight.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-coresight.dtsi
@@ -562,6 +562,8 @@
 		clocks = <&clock_aop QDSS_CLK>;
 		clock-names = "apb_pclk";
 
+		status = "disabled";
+
 		ports {
 			#address-cells = <1>;
 			#size-cells = <0>;
diff --git a/arch/arm64/boot/dts/qcom/sdm670-gpu.dtsi b/arch/arm64/boot/dts/qcom/sdm670-gpu.dtsi
index 9e75ee0..f287b21 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-gpu.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-gpu.dtsi
@@ -58,6 +58,7 @@
 		qcom,initial-pwrlevel = <3>;
 
 		qcom,gpu-quirk-hfi-use-reg;
+		qcom,gpu-quirk-limit-uche-gbif-rw;
 
 		/* <HZ/12> */
 		qcom,idle-timeout = <80>;
@@ -117,7 +118,7 @@
 		cache-slices = <&llcc 12>, <&llcc 11>;
 
 		/* CPU latency parameter */
-		qcom,pm-qos-active-latency = <914>;
+		qcom,pm-qos-active-latency = <899>;
 		qcom,pm-qos-wakeup-latency = <899>;
 
 		/* Enable context aware freq. scaling */
@@ -134,6 +135,8 @@
 			#size-cells = <0>;
 			compatible = "qcom,gpu-coresight";
 
+			status = "disabled";
+
 			qcom,gpu-coresight@0 {
 				reg = <0>;
 				coresight-name = "coresight-gfx";
diff --git a/arch/arm64/boot/dts/qcom/sdm670-int-cdc-usbc-audio-overlay.dtsi b/arch/arm64/boot/dts/qcom/sdm670-int-cdc-usbc-audio-overlay.dtsi
index cb0a386..22e9a7a 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-int-cdc-usbc-audio-overlay.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-int-cdc-usbc-audio-overlay.dtsi
@@ -14,5 +14,8 @@
 &int_codec {
 	qcom,msm-mbhc-usbc-audio-supported = <1>;
 	qcom,usbc-analog-en1-gpio = <&wcd_usbc_analog_en1_gpio>;
-	qcom,us-euro-gpios = <&wcd_gnd_mic_swap_gpio>;
+	qcom,usbc-analog-en2-gpio = <&tlmm 40 0>;
+	pinctrl-names = "aud_active", "aud_sleep";
+	pinctrl-0 = <&wcd_usbc_analog_en2_active>;
+	pinctrl-1 = <&wcd_usbc_analog_en2_idle>;
 };
diff --git a/arch/arm64/boot/dts/qcom/sdm670-ion.dtsi b/arch/arm64/boot/dts/qcom/sdm670-ion.dtsi
index 61ef7ff..3fd1229 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-ion.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-ion.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -21,18 +21,18 @@
 			qcom,ion-heap-type = "SYSTEM";
 		};
 
-		qcom,ion-heap@22 { /* ADSP HEAP */
-			reg = <22>;
-			memory-region = <&adsp_mem>;
-			qcom,ion-heap-type = "DMA";
-		};
-
 		qcom,ion-heap@27 { /* QSEECOM HEAP */
 			reg = <27>;
 			memory-region = <&qseecom_mem>;
 			qcom,ion-heap-type = "DMA";
 		};
 
+		qcom,ion-heap@19 { /* QSEECOM TA HEAP */
+			reg = <19>;
+			memory-region = <&qseecom_ta_mem>;
+			qcom,ion-heap-type = "DMA";
+		};
+
 		qcom,ion-heap@13 { /* SPSS HEAP */
 			reg = <13>;
 			memory-region = <&sp_mem>;
diff --git a/arch/arm64/boot/dts/qcom/sdm670-pinctrl.dtsi b/arch/arm64/boot/dts/qcom/sdm670-pinctrl.dtsi
index ffed74c..a85060e 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-pinctrl.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-pinctrl.dtsi
@@ -1521,8 +1521,8 @@
 			};
 		};
 
-		wcd_gnd_mic_swap {
-			wcd_gnd_mic_swap_idle: wcd_gnd_mic_swap_idle {
+		wcd_usbc_analog_en2 {
+			wcd_usbc_analog_en2_idle: wcd_usbc_ana_en2_idle {
 				mux {
 					pins = "gpio40";
 					function = "gpio";
@@ -1536,7 +1536,7 @@
 				};
 			};
 
-			wcd_gnd_mic_swap_active: wcd_gnd_mic_swap_active {
+			wcd_usbc_analog_en2_active: wcd_usbc_ana_en2_active {
 				mux {
 					pins = "gpio40";
 					function = "gpio";
@@ -1979,6 +1979,19 @@
 				drive-strength = <2>; /* 2 MA */
 			};
 		};
+
+		nx30p6093_intr_default: nx30p6093_intr_default {
+			mux {
+				pins = "gpio5";
+				function = "gpio";
+			};
+
+			config {
+				pins = "gpio5";
+				bias-disable;
+				input-enable;
+			};
+		};
 	};
 };
 
diff --git a/arch/arm64/boot/dts/qcom/sdm670-pm.dtsi b/arch/arm64/boot/dts/qcom/sdm670-pm.dtsi
index b330cf5..5bf8df7 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-pm.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-pm.dtsi
@@ -185,4 +185,8 @@
 		reg = <0xc300000 0x1000>, <0xc3f0004 0x4>;
 		reg-names = "phys_addr_base", "offset_addr";
 	};
+
+	qcom,rpmh-master-stats {
+		compatible = "qcom,rpmh-master-stats";
+	};
 };
diff --git a/arch/arm64/boot/dts/qcom/sdm670-qrd-sku2-overlay.dts b/arch/arm64/boot/dts/qcom/sdm670-qrd-sku2-overlay.dts
index 37eb4cd..73d1909 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-qrd-sku2-overlay.dts
+++ b/arch/arm64/boot/dts/qcom/sdm670-qrd-sku2-overlay.dts
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -30,3 +30,22 @@
 		       <0x0001001b 0x0102001a 0x0 0x0>,
 		       <0x0001001b 0x0201011a 0x0 0x0>;
 };
+
+&dsi_dual_nt36850_truly_cmd_display {
+	/delete-property/ qcom,dsi-display-active;
+};
+
+&dsi_hx8399_truly_cmd {
+	qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
+	qcom,mdss-dsi-bl-min-level = <1>;
+	qcom,mdss-dsi-bl-max-level = <4095>;
+	qcom,panel-mode-gpio = <&tlmm 76 0>;
+	qcom,mdss-dsi-mode-sel-gpio-state = "single_port";
+	qcom,platform-reset-gpio = <&tlmm 75 0>;
+	qcom,platform-te-gpio = <&tlmm 10 0>;
+};
+
+&dsi_hx8399_truly_cmd_display {
+	qcom,dsi-display-active;
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm670-qrd-sku2.dts b/arch/arm64/boot/dts/qcom/sdm670-qrd-sku2.dts
index dada4c6..680bc17 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-qrd-sku2.dts
+++ b/arch/arm64/boot/dts/qcom/sdm670-qrd-sku2.dts
@@ -24,3 +24,22 @@
 		       <0x0001001b 0x0102001a 0x0 0x0>,
 		       <0x0001001b 0x0201011a 0x0 0x0>;
 };
+
+&dsi_dual_nt36850_truly_cmd_display {
+	/delete-property/ qcom,dsi-display-active;
+};
+
+&dsi_hx8399_truly_cmd {
+	qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
+	qcom,mdss-dsi-bl-min-level = <1>;
+	qcom,mdss-dsi-bl-max-level = <4095>;
+	qcom,panel-mode-gpio = <&tlmm 76 0>;
+	qcom,mdss-dsi-mode-sel-gpio-state = "single_port";
+	qcom,platform-reset-gpio = <&tlmm 75 0>;
+	qcom,platform-te-gpio = <&tlmm 10 0>;
+};
+
+&dsi_hx8399_truly_cmd_display {
+	qcom,dsi-display-active;
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm670-qrd.dtsi b/arch/arm64/boot/dts/qcom/sdm670-qrd.dtsi
index cc4645f..d2a6640 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-qrd.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-qrd.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -160,9 +160,9 @@
 		    0x40 0x194 /* PLL_BIAS_CONTROL_1 */
 		    0x20 0x198 /* PLL_BIAS_CONTROL_2 */
 		    0x21 0x214 /* PWR_CTRL2 */
-		    0x07 0x220 /* IMP_CTRL1 */
-		    0x58 0x224 /* IMP_CTRL2 */
-		    0x77 0x240 /* TUNE1 */
+		    0x00 0x220 /* IMP_CTRL1 */
+		    0x1a 0x224 /* IMP_CTRL2 */
+		    0x47 0x240 /* TUNE1 */
 		    0x29 0x244 /* TUNE2 */
 		    0xca 0x248 /* TUNE3 */
 		    0x04 0x24c /* TUNE4 */
diff --git a/arch/arm64/boot/dts/qcom/sdm670-qupv3.dtsi b/arch/arm64/boot/dts/qcom/sdm670-qupv3.dtsi
index 225a6e6..6c143e4 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-qupv3.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-qupv3.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -32,7 +32,7 @@
 	 * instances only.
 	 */
 	qupv3_se6_4uart: qcom,qup_uart@0x898000 {
-		compatible = "qcom,msm-geni-serial-hs", "qcom,msm-geni-uart";
+		compatible = "qcom,msm-geni-serial-hs";
 		reg = <0x898000 0x4000>;
 		reg-names = "se_phys";
 		clock-names = "se-clk", "m-ahb", "s-ahb";
@@ -52,7 +52,7 @@
 	};
 
 	qupv3_se7_4uart: qcom,qup_uart@0x89c000 {
-		compatible = "qcom,msm-geni-serial-hs", "qcom,msm-geni-uart";
+		compatible = "qcom,msm-geni-serial-hs";
 		reg = <0x89c000 0x4000>;
 		reg-names = "se_phys";
 		clock-names = "se-clk", "m-ahb", "s-ahb";
@@ -425,7 +425,7 @@
 
 	/* Debug UART Instance for CDP/MTP platform */
 	qupv3_se9_2uart: qcom,qup_uart@0xa84000 {
-		compatible = "qcom,msm-geni-console", "qcom,msm-geni-uart";
+		compatible = "qcom,msm-geni-console";
 		reg = <0xa84000 0x4000>;
 		reg-names = "se_phys";
 		clock-names = "se-clk", "m-ahb", "s-ahb";
@@ -442,7 +442,7 @@
 
 	/* Debug UART Instance for RUMI platform */
 	qupv3_se10_2uart: qcom,qup_uart@0xa88000 {
-		compatible = "qcom,msm-geni-console", "qcom,msm-geni-uart";
+		compatible = "qcom,msm-geni-console";
 		reg = <0xa88000 0x4000>;
 		reg-names = "se_phys";
 		clock-names = "se-clk", "m-ahb", "s-ahb";
@@ -459,7 +459,7 @@
 
 	/* Debug UART Instance for CDP/MTP platform on SDM670 */
 	qupv3_se12_2uart: qcom,qup_uart@0xa90000 {
-		compatible = "qcom,msm-geni-console", "qcom,msm-geni-uart";
+		compatible = "qcom,msm-geni-console";
 		reg = <0xa90000 0x4000>;
 		reg-names = "se_phys";
 		clock-names = "se-clk", "m-ahb", "s-ahb";
diff --git a/arch/arm64/boot/dts/qcom/sdm670-regulator.dtsi b/arch/arm64/boot/dts/qcom/sdm670-regulator.dtsi
index 9898ada..62db873 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-regulator.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-regulator.dtsi
@@ -158,9 +158,10 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa1";
+		qcom,regulator-type = "pmic4-ldo";
 		qcom,supported-modes =
-			<RPMH_REGULATOR_MODE_LDO_LPM
-			 RPMH_REGULATOR_MODE_LDO_HPM>;
+			<RPMH_REGULATOR_MODE_LPM
+			 RPMH_REGULATOR_MODE_HPM>;
 		qcom,mode-threshold-currents = <0 1>;
 		proxy-supply = <&pm660_l1>;
 		pm660_l1: regulator-pm660-l1 {
@@ -171,7 +172,7 @@
 			qcom,proxy-consumer-enable;
 			qcom,proxy-consumer-current = <43600>;
 			qcom,init-voltage = <1200000>;
-			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		};
 	};
 
@@ -179,9 +180,10 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa2";
+		qcom,regulator-type = "pmic4-ldo";
 		qcom,supported-modes =
-			<RPMH_REGULATOR_MODE_LDO_LPM
-			 RPMH_REGULATOR_MODE_LDO_HPM>;
+			<RPMH_REGULATOR_MODE_LPM
+			 RPMH_REGULATOR_MODE_HPM>;
 		qcom,mode-threshold-currents = <0 1>;
 		pm660_l2: regulator-pm660-l2 {
 			regulator-name = "pm660_l2";
@@ -189,7 +191,7 @@
 			regulator-min-microvolt = <1000000>;
 			regulator-max-microvolt = <1000000>;
 			qcom,init-voltage = <1000000>;
-			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		};
 	};
 
@@ -197,9 +199,10 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa3";
+		qcom,regulator-type = "pmic4-ldo";
 		qcom,supported-modes =
-			<RPMH_REGULATOR_MODE_LDO_LPM
-			 RPMH_REGULATOR_MODE_LDO_HPM>;
+			<RPMH_REGULATOR_MODE_LPM
+			 RPMH_REGULATOR_MODE_HPM>;
 		qcom,mode-threshold-currents = <0 1>;
 		pm660_l3: regulator-pm660-l3 {
 			regulator-name = "pm660_l3";
@@ -207,7 +210,7 @@
 			regulator-min-microvolt = <1000000>;
 			regulator-max-microvolt = <1000000>;
 			qcom,init-voltage = <1000000>;
-			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		};
 	};
 
@@ -215,9 +218,10 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa5";
+		qcom,regulator-type = "pmic4-ldo";
 		qcom,supported-modes =
-			<RPMH_REGULATOR_MODE_LDO_LPM
-			 RPMH_REGULATOR_MODE_LDO_HPM>;
+			<RPMH_REGULATOR_MODE_LPM
+			 RPMH_REGULATOR_MODE_HPM>;
 		qcom,mode-threshold-currents = <0 1>;
 		pm660_l5: regulator-pm660-l5 {
 			regulator-name = "pm660_l5";
@@ -225,7 +229,7 @@
 			regulator-min-microvolt = <800000>;
 			regulator-max-microvolt = <800000>;
 			qcom,init-voltage = <800000>;
-			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		};
 	};
 
@@ -233,9 +237,10 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa6";
+		qcom,regulator-type = "pmic4-ldo";
 		qcom,supported-modes =
-			<RPMH_REGULATOR_MODE_LDO_LPM
-			 RPMH_REGULATOR_MODE_LDO_HPM>;
+			<RPMH_REGULATOR_MODE_LPM
+			 RPMH_REGULATOR_MODE_HPM>;
 		qcom,mode-threshold-currents = <0 1>;
 		pm660_l6: regulator-pm660-l6 {
 			regulator-name = "pm660_l6";
@@ -243,7 +248,7 @@
 			regulator-min-microvolt = <1248000>;
 			regulator-max-microvolt = <1304000>;
 			qcom,init-voltage = <1248000>;
-			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		};
 	};
 
@@ -251,9 +256,10 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa7";
+		qcom,regulator-type = "pmic4-ldo";
 		qcom,supported-modes =
-			<RPMH_REGULATOR_MODE_LDO_LPM
-			 RPMH_REGULATOR_MODE_LDO_HPM>;
+			<RPMH_REGULATOR_MODE_LPM
+			 RPMH_REGULATOR_MODE_HPM>;
 		qcom,mode-threshold-currents = <0 1>;
 		pm660_l7: regulator-pm660-l7 {
 			regulator-name = "pm660_l7";
@@ -261,7 +267,7 @@
 			regulator-min-microvolt = <1200000>;
 			regulator-max-microvolt = <1200000>;
 			qcom,init-voltage = <1200000>;
-			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		};
 	};
 
@@ -269,9 +275,10 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa8";
+		qcom,regulator-type = "pmic4-ldo";
 		qcom,supported-modes =
-			<RPMH_REGULATOR_MODE_LDO_LPM
-			 RPMH_REGULATOR_MODE_LDO_HPM>;
+			<RPMH_REGULATOR_MODE_LPM
+			 RPMH_REGULATOR_MODE_HPM>;
 		qcom,mode-threshold-currents = <0 1>;
 		pm660_l8: regulator-pm660-l8 {
 			regulator-name = "pm660_l8";
@@ -279,7 +286,7 @@
 			regulator-min-microvolt = <1800000>;
 			regulator-max-microvolt = <1800000>;
 			qcom,init-voltage = <1800000>;
-			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		};
 	};
 
@@ -287,9 +294,10 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa9";
+		qcom,regulator-type = "pmic4-ldo";
 		qcom,supported-modes =
-			<RPMH_REGULATOR_MODE_LDO_LPM
-			 RPMH_REGULATOR_MODE_LDO_HPM>;
+			<RPMH_REGULATOR_MODE_LPM
+			 RPMH_REGULATOR_MODE_HPM>;
 		qcom,mode-threshold-currents = <0 1>;
 		pm660_l9: regulator-pm660-l9 {
 			regulator-name = "pm660_l9";
@@ -297,7 +305,7 @@
 			regulator-min-microvolt = <1800000>;
 			regulator-max-microvolt = <1800000>;
 			qcom,init-voltage = <1800000>;
-			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		};
 	};
 
@@ -305,9 +313,10 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa10";
+		qcom,regulator-type = "pmic4-ldo";
 		qcom,supported-modes =
-			<RPMH_REGULATOR_MODE_LDO_LPM
-			 RPMH_REGULATOR_MODE_LDO_HPM>;
+			<RPMH_REGULATOR_MODE_LPM
+			 RPMH_REGULATOR_MODE_HPM>;
 		qcom,mode-threshold-currents = <0 1>;
 		pm660_l10: regulator-pm660-l10 {
 			regulator-name = "pm660_l10";
@@ -315,7 +324,7 @@
 			regulator-min-microvolt = <1800000>;
 			regulator-max-microvolt = <1800000>;
 			qcom,init-voltage = <1800000>;
-			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		};
 	};
 
@@ -323,9 +332,10 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa11";
+		qcom,regulator-type = "pmic4-ldo";
 		qcom,supported-modes =
-			<RPMH_REGULATOR_MODE_LDO_LPM
-			 RPMH_REGULATOR_MODE_LDO_HPM>;
+			<RPMH_REGULATOR_MODE_LPM
+			 RPMH_REGULATOR_MODE_HPM>;
 		qcom,mode-threshold-currents = <0 1>;
 		proxy-supply = <&pm660_l11>;
 		pm660_l11: regulator-pm660-l11 {
@@ -336,7 +346,7 @@
 			qcom,proxy-consumer-enable;
 			qcom,proxy-consumer-current = <115000>;
 			qcom,init-voltage = <1800000>;
-			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		};
 	};
 
@@ -344,9 +354,10 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa12";
+		qcom,regulator-type = "pmic4-ldo";
 		qcom,supported-modes =
-			<RPMH_REGULATOR_MODE_LDO_LPM
-			 RPMH_REGULATOR_MODE_LDO_HPM>;
+			<RPMH_REGULATOR_MODE_LPM
+			 RPMH_REGULATOR_MODE_HPM>;
 		qcom,mode-threshold-currents = <0 1>;
 		pm660_l12: regulator-pm660-l12 {
 			regulator-name = "pm660_l12";
@@ -354,7 +365,7 @@
 			regulator-min-microvolt = <1800000>;
 			regulator-max-microvolt = <1800000>;
 			qcom,init-voltage = <1800000>;
-			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		};
 	};
 
@@ -362,9 +373,10 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa13";
+		qcom,regulator-type = "pmic4-ldo";
 		qcom,supported-modes =
-			<RPMH_REGULATOR_MODE_LDO_LPM
-			 RPMH_REGULATOR_MODE_LDO_HPM>;
+			<RPMH_REGULATOR_MODE_LPM
+			 RPMH_REGULATOR_MODE_HPM>;
 		qcom,mode-threshold-currents = <0 1>;
 		pm660_l13: regulator-pm660-l13 {
 			regulator-name = "pm660_l13";
@@ -372,7 +384,7 @@
 			regulator-min-microvolt = <1800000>;
 			regulator-max-microvolt = <1800000>;
 			qcom,init-voltage = <1800000>;
-			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		};
 	};
 
@@ -380,9 +392,10 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa14";
+		qcom,regulator-type = "pmic4-ldo";
 		qcom,supported-modes =
-			<RPMH_REGULATOR_MODE_LDO_LPM
-			 RPMH_REGULATOR_MODE_LDO_HPM>;
+			<RPMH_REGULATOR_MODE_LPM
+			 RPMH_REGULATOR_MODE_HPM>;
 		qcom,mode-threshold-currents = <0 1>;
 		pm660_l14: regulator-pm660-l14 {
 			regulator-name = "pm660_l14";
@@ -390,7 +403,7 @@
 			regulator-min-microvolt = <1800000>;
 			regulator-max-microvolt = <1800000>;
 			qcom,init-voltage = <1800000>;
-			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		};
 	};
 
@@ -398,9 +411,10 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa15";
+		qcom,regulator-type = "pmic4-ldo";
 		qcom,supported-modes =
-			<RPMH_REGULATOR_MODE_LDO_LPM
-			 RPMH_REGULATOR_MODE_LDO_HPM>;
+			<RPMH_REGULATOR_MODE_LPM
+			 RPMH_REGULATOR_MODE_HPM>;
 		qcom,mode-threshold-currents = <0 1>;
 		pm660_l15: regulator-pm660-l15 {
 			regulator-name = "pm660_l15";
@@ -408,7 +422,7 @@
 			regulator-min-microvolt = <1800000>;
 			regulator-max-microvolt = <2950000>;
 			qcom,init-voltage = <1800000>;
-			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		};
 	};
 
@@ -416,9 +430,10 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa16";
+		qcom,regulator-type = "pmic4-ldo";
 		qcom,supported-modes =
-			<RPMH_REGULATOR_MODE_LDO_LPM
-			 RPMH_REGULATOR_MODE_LDO_HPM>;
+			<RPMH_REGULATOR_MODE_LPM
+			 RPMH_REGULATOR_MODE_HPM>;
 		qcom,mode-threshold-currents = <0 1>;
 		pm660_l16: regulator-pm660-l16 {
 		regulator-name = "pm660_l16";
@@ -426,7 +441,7 @@
 			regulator-min-microvolt = <2700000>;
 			regulator-max-microvolt = <2700000>;
 			qcom,init-voltage = <2700000>;
-			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		};
 	};
 
@@ -434,9 +449,10 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa17";
+		qcom,regulator-type = "pmic4-ldo";
 		qcom,supported-modes =
-			<RPMH_REGULATOR_MODE_LDO_LPM
-			 RPMH_REGULATOR_MODE_LDO_HPM>;
+			<RPMH_REGULATOR_MODE_LPM
+			 RPMH_REGULATOR_MODE_HPM>;
 		qcom,mode-threshold-currents = <0 1>;
 		pm660_l17: regulator-pm660-l17 {
 			regulator-name = "pm660_l17";
@@ -444,7 +460,7 @@
 			regulator-min-microvolt = <1800000>;
 			regulator-max-microvolt = <2950000>;
 			qcom,init-voltage = <1800000>;
-			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		};
 	};
 
@@ -452,9 +468,10 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa19";
+		qcom,regulator-type = "pmic4-ldo";
 		qcom,supported-modes =
-			<RPMH_REGULATOR_MODE_LDO_LPM
-			 RPMH_REGULATOR_MODE_LDO_HPM>;
+			<RPMH_REGULATOR_MODE_LPM
+			 RPMH_REGULATOR_MODE_HPM>;
 		qcom,mode-threshold-currents = <0 1>;
 		pm660_l19: regulator-pm660-l19 {
 			regulator-name = "pm660_l19";
@@ -462,7 +479,7 @@
 			regulator-min-microvolt = <3000000>;
 			regulator-max-microvolt = <3312000>;
 			qcom,init-voltage = <3000000>;
-			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		};
 	};
 
@@ -470,9 +487,10 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldob1";
+		qcom,regulator-type = "pmic4-ldo";
 		qcom,supported-modes =
-			<RPMH_REGULATOR_MODE_LDO_LPM
-			 RPMH_REGULATOR_MODE_LDO_HPM>;
+			<RPMH_REGULATOR_MODE_LPM
+			 RPMH_REGULATOR_MODE_HPM>;
 		qcom,mode-threshold-currents = <0 1>;
 		proxy-supply = <&pm660l_l1>;
 		pm660l_l1: regulator-pm660l-l1 {
@@ -483,7 +501,7 @@
 			qcom,proxy-consumer-enable;
 			qcom,proxy-consumer-current = <72000>;
 			qcom,init-voltage = <880000>;
-			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		};
 	};
 
@@ -491,9 +509,10 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldob2";
+		qcom,regulator-type = "pmic4-ldo";
 		qcom,supported-modes =
-			<RPMH_REGULATOR_MODE_LDO_LPM
-			 RPMH_REGULATOR_MODE_LDO_HPM>;
+			<RPMH_REGULATOR_MODE_LPM
+			 RPMH_REGULATOR_MODE_HPM>;
 		qcom,mode-threshold-currents = <0 1>;
 		pm660l_l2: regulator-pm660l-l2 {
 			regulator-name = "pm660l_l2";
@@ -501,7 +520,7 @@
 			regulator-min-microvolt = <1800000>;
 			regulator-max-microvolt = <2960000>;
 			qcom,init-voltage = <1800000>;
-			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		};
 	};
 
@@ -509,9 +528,10 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldob3";
+		qcom,regulator-type = "pmic4-ldo";
 		qcom,supported-modes =
-			<RPMH_REGULATOR_MODE_LDO_LPM
-			 RPMH_REGULATOR_MODE_LDO_HPM>;
+			<RPMH_REGULATOR_MODE_LPM
+			 RPMH_REGULATOR_MODE_HPM>;
 		qcom,mode-threshold-currents = <0 1>;
 		pm660l_l3: regulator-pm660l-l3 {
 			regulator-name = "pm660l_l3";
@@ -519,7 +539,7 @@
 			regulator-min-microvolt = <2850000>;
 			regulator-max-microvolt = <3008000>;
 			qcom,init-voltage = <2850000>;
-			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		};
 	};
 
@@ -527,9 +547,10 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldob4";
+		qcom,regulator-type = "pmic4-ldo";
 		qcom,supported-modes =
-			<RPMH_REGULATOR_MODE_LDO_LPM
-			 RPMH_REGULATOR_MODE_LDO_HPM>;
+			<RPMH_REGULATOR_MODE_LPM
+			 RPMH_REGULATOR_MODE_HPM>;
 		qcom,mode-threshold-currents = <0 1>;
 		pm660l_l4: regulator-pm660l-l4 {
 			regulator-name = "pm660l_l4";
@@ -537,7 +558,7 @@
 			regulator-min-microvolt = <2960000>;
 			regulator-max-microvolt = <2960000>;
 			qcom,init-voltage = <2960000>;
-			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		};
 	};
 
@@ -545,9 +566,10 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldob5";
+		qcom,regulator-type = "pmic4-ldo";
 		qcom,supported-modes =
-			<RPMH_REGULATOR_MODE_LDO_LPM
-			 RPMH_REGULATOR_MODE_LDO_HPM>;
+			<RPMH_REGULATOR_MODE_LPM
+			 RPMH_REGULATOR_MODE_HPM>;
 		qcom,mode-threshold-currents = <0 1>;
 		pm660l_l5: regulator-pm660l-l5 {
 			regulator-name = "pm660l_l5";
@@ -555,7 +577,7 @@
 			regulator-min-microvolt = <2960000>;
 			regulator-max-microvolt = <2960000>;
 			qcom,init-voltage = <2960000>;
-			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		};
 	};
 
@@ -563,9 +585,10 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldob6";
+		qcom,regulator-type = "pmic4-ldo";
 		qcom,supported-modes =
-			<RPMH_REGULATOR_MODE_LDO_LPM
-			 RPMH_REGULATOR_MODE_LDO_HPM>;
+			<RPMH_REGULATOR_MODE_LPM
+			 RPMH_REGULATOR_MODE_HPM>;
 		qcom,mode-threshold-currents = <0 1>;
 		pm660l_l6: regulator-pm660l-l6 {
 			regulator-name = "pm660l_l6";
@@ -573,7 +596,7 @@
 			regulator-min-microvolt = <3008000>;
 			regulator-max-microvolt = <3300000>;
 			qcom,init-voltage = <3008000>;
-			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		};
 	};
 
@@ -581,9 +604,10 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldob7";
+		qcom,regulator-type = "pmic4-ldo";
 		qcom,supported-modes =
-			<RPMH_REGULATOR_MODE_LDO_LPM
-			 RPMH_REGULATOR_MODE_LDO_HPM>;
+			<RPMH_REGULATOR_MODE_LPM
+			 RPMH_REGULATOR_MODE_HPM>;
 		qcom,mode-threshold-currents = <0 1>;
 		pm660l_l7: regulator-pm660l-l7 {
 			regulator-name = "pm660l_l7";
@@ -591,7 +615,7 @@
 			regulator-min-microvolt = <3088000>;
 			regulator-max-microvolt = <3100000>;
 			qcom,init-voltage = <3088000>;
-			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		};
 	};
 
@@ -599,9 +623,10 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldob8";
+		qcom,regulator-type = "pmic4-ldo";
 		qcom,supported-modes =
-			<RPMH_REGULATOR_MODE_LDO_LPM
-			 RPMH_REGULATOR_MODE_LDO_HPM>;
+			<RPMH_REGULATOR_MODE_LPM
+			 RPMH_REGULATOR_MODE_HPM>;
 		qcom,mode-threshold-currents = <0 1>;
 		pm660l_l8: regulator-pm660l-l8 {
 			regulator-name = "pm660l_l8";
@@ -609,7 +634,7 @@
 			regulator-min-microvolt = <3300000>;
 			regulator-max-microvolt = <3312000>;
 			qcom,init-voltage = <3300000>;
-			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		};
 	};
 
diff --git a/arch/arm64/boot/dts/qcom/sdm670-sde-display.dtsi b/arch/arm64/boot/dts/qcom/sdm670-sde-display.dtsi
index 36eb7ee..ce88d14 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-sde-display.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-sde-display.dtsi
@@ -26,6 +26,7 @@
 #include "dsi-panel-nt35695b-truly-fhd-cmd.dtsi"
 #include "dsi-panel-rm67195-amoled-fhd-cmd.dtsi"
 #include "dsi-panel-nt36850-truly-dualmipi-wqhd-cmd.dtsi"
+#include "dsi-panel-hx8399-truly-singlemipi-fhd-video.dtsi"
 #include <dt-bindings/clock/mdss-10nm-pll-clk.h>
 
 &soc {
@@ -465,6 +466,29 @@
 		ibb-supply = <&lcdb_ncp_vreg>;
 	};
 
+	dsi_hx8399_truly_cmd_display: qcom,dsi-display@16 {
+		compatible = "qcom,dsi-display";
+		label = "dsi_hx8399_truly_cmd_display";
+		qcom,display-type = "primary";
+
+		qcom,dsi-ctrl = <&mdss_dsi0>;
+		qcom,dsi-phy = <&mdss_dsi_phy0>;
+		clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>,
+			<&mdss_dsi0_pll PCLK_MUX_0_CLK>;
+		clock-names = "src_byte_clk", "src_pixel_clk";
+
+		pinctrl-names = "panel_active", "panel_suspend";
+		pinctrl-0 = <&sde_dsi_active &sde_te_active>;
+		pinctrl-1 = <&sde_dsi_suspend &sde_te_suspend>;
+		qcom,platform-te-gpio = <&tlmm 10 0>;
+		qcom,platform-reset-gpio = <&tlmm 75 0>;
+
+		qcom,dsi-panel = <&dsi_hx8399_truly_cmd>;
+		vddio-supply = <&pm660_l11>;
+		lab-supply = <&lcdb_ldo_vreg>;
+		ibb-supply = <&lcdb_ncp_vreg>;
+	};
+
 	sde_wb: qcom,wb-display@0 {
 		compatible = "qcom,wb-display";
 		cell-index = <0>;
@@ -625,15 +649,35 @@
 };
 
 &dsi_sim_cmd {
-	qcom,mdss-dsi-t-clk-post = <0x0d>;
-	qcom,mdss-dsi-t-clk-pre = <0x2d>;
+	qcom,mdss-dsi-t-clk-post = <0x0c>;
+	qcom,mdss-dsi-t-clk-pre = <0x29>;
 	qcom,mdss-dsi-display-timings {
 		timing@0{
-			qcom,mdss-dsi-panel-phy-timings = [00 1c 07 07 23 21 07
-				07 05 03 04 00];
 			qcom,display-topology = <1 0 1>,
-						<2 0 1>;
-			qcom,default-topology-index = <0>;
+						<2 2 1>;
+			qcom,default-topology-index = <1>;
+			qcom,panel-roi-alignment = <720 40 720 40 720 40>;
+			qcom,partial-update-enabled = "single_roi";
+			qcom,mdss-dsi-panel-phy-timings = [00 1a 06 06 22 20 07
+				07 04 03 04 00];
+		};
+		timing@1{
+			qcom,display-topology = <1 0 1>,
+						<2 2 1>;
+			qcom,default-topology-index = <1>;
+			qcom,panel-roi-alignment = <540 40 540 40 540 40>;
+			qcom,partial-update-enabled = "single_roi";
+			qcom,mdss-dsi-panel-phy-timings = [00 1a 06 06 22 20 07
+				07 04 03 04 00];
+		};
+		timing@2{
+			qcom,display-topology = <1 0 1>,
+						<2 2 1>;
+			qcom,default-topology-index = <1>;
+			qcom,panel-roi-alignment = <360 40 360 40 360 40>;
+			qcom,partial-update-enabled = "single_roi";
+			qcom,mdss-dsi-panel-phy-timings = [00 1a 06 06 22 20 07
+				07 04 03 04 00];
 		};
 	};
 };
@@ -709,8 +753,8 @@
 	qcom,mdss-dsi-t-clk-pre = <0x2d>;
 	qcom,mdss-dsi-display-timings {
 		 timing@0 {
-			qcom,mdss-dsi-panel-timings = [00 1c 08 07 23 22 07 07
-				05 03 04 00];
+			qcom,mdss-dsi-panel-phy-timings = [00 1c 07 07 23 21 07
+				07 05 03 04 00];
 			qcom,display-topology = <2 0 2>,
 						<1 0 2>;
 			qcom,default-topology-index = <0>;
@@ -724,8 +768,8 @@
 	qcom,ulps-enabled;
 	qcom,mdss-dsi-display-timings {
 		 timing@0 {
-			qcom,mdss-dsi-panel-timings = [00 1c 08 07 23 22 07 07
-				05 03 04 00];
+			qcom,mdss-dsi-panel-phy-timings = [00 1c 07 07 23 21 07
+				07 05 03 04 00];
 			qcom,display-topology = <2 0 2>,
 						<1 0 2>;
 			qcom,default-topology-index = <0>;
@@ -767,14 +811,14 @@
 };
 
 &dsi_nt35695b_truly_fhd_cmd {
-	qcom,mdss-dsi-t-clk-post = <0x07>;
-	qcom,mdss-dsi-t-clk-pre = <0x1c>;
+	qcom,mdss-dsi-t-clk-post = <0x0d>;
+	qcom,mdss-dsi-t-clk-pre = <0x2d>;
 	qcom,ulps-enabled;
 	qcom,mdss-mdp-transfer-time-us = <14500>;
 	qcom,mdss-dsi-display-timings {
 		timing@0 {
-			qcom,mdss-dsi-panel-phy-timings = [00 1c 05 06 0b 0c
-				05 07 05 03 04 00];
+			qcom,mdss-dsi-panel-phy-timings = [00 1c 08 07 23 22
+				07 07 05 03 04 00];
 			qcom,display-topology = <1 0 1>;
 			qcom,default-topology-index = <0>;
 		};
@@ -794,3 +838,16 @@
 		};
 	};
 };
+
+&dsi_hx8399_truly_cmd {
+	qcom,mdss-dsi-t-clk-post = <0x0E>;
+	qcom,mdss-dsi-t-clk-pre = <0x30>;
+	qcom,mdss-dsi-display-timings {
+		timing@0 {
+			qcom,mdss-dsi-panel-phy-timings = [00 1f 08 08 24 22 08
+				08 05 03 04 00];
+			qcom,display-topology = <1 0 1>;
+			qcom,default-topology-index = <0>;
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm670-sde.dtsi b/arch/arm64/boot/dts/qcom/sdm670-sde.dtsi
index a918687..7c4e682 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-sde.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-sde.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -397,6 +397,9 @@
 		qcom,mdss-inline-rot-danger-lut = <0x0055aaff 0x0000ffff>;
 		qcom,mdss-inline-rot-safe-lut = <0x0000f000 0x0000ff00>;
 
+		qcom,mdss-rot-qos-cpu-mask = <0xf>;
+		qcom,mdss-rot-qos-cpu-dma-latency = <75>;
+
 		qcom,mdss-default-ot-rd-limit = <32>;
 		qcom,mdss-default-ot-wr-limit = <32>;
 
diff --git a/arch/arm64/boot/dts/qcom/sdm670-usb.dtsi b/arch/arm64/boot/dts/qcom/sdm670-usb.dtsi
index 84c7459..2ce829d 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-usb.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-usb.dtsi
@@ -29,6 +29,7 @@
 &usb0 {
 	/delete-property/ iommus;
 	/delete-property/ qcom,smmu-s1-bypass;
+	qcom,pm-qos-latency = <601>; /* CPU-CLUSTER-WFI-LVL latency +1 */
 	extcon = <0>, <0>, <&eud>, <0>, <0>;
 };
 
diff --git a/arch/arm64/boot/dts/qcom/sdm670.dtsi b/arch/arm64/boot/dts/qcom/sdm670.dtsi
index e321329..dc03f88 100644
--- a/arch/arm64/boot/dts/qcom/sdm670.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -545,6 +545,14 @@
 			size = <0 0x1400000>;
 		};
 
+		qseecom_ta_mem: qseecom_ta_region {
+			compatible = "shared-dma-pool";
+			alloc-ranges = <0 0x00000000 0 0xffffffff>;
+			reusable;
+			alignment = <0 0x400000>;
+			size = <0 0x1000000>;
+		};
+
 		sp_mem: sp_region {  /* SPSS-HLOS ION shared mem */
 			compatible = "shared-dma-pool";
 			alloc-ranges = <0 0x00000000 0 0xffffffff>; /* 32-bit */
@@ -745,6 +753,7 @@
 			compatible = "qcom,memshare-peripheral";
 			qcom,peripheral-size = <0x500000>;
 			qcom,client-id = <1>;
+			qcom,allocate-boot-time;
 			label = "modem";
 		};
 	};
@@ -2386,7 +2395,7 @@
 
 	qcom,msm_fastrpc {
 		compatible = "qcom,msm-fastrpc-compute";
-		qcom,adsp-remoteheap-vmid = <37>;
+		qcom,adsp-remoteheap-vmid = <22 37>;
 
 		qcom,msm_fastrpc_compute_cb1 {
 			compatible = "qcom,msm-fastrpc-compute-cb";
@@ -2537,8 +2546,6 @@
 		qcom,count-unit = <0x10000>;
 		qcom,hw-timer-hz = <19200000>;
 		qcom,target-dev = <&cpubw>;
-		qcom,byte-mid-mask = <0xe000>;
-		qcom,byte-mid-match = <0xe000>;
 	};
 
 	memlat_cpu0: qcom,memlat-cpu0 {
@@ -2637,10 +2644,9 @@
 		qcom,cachemiss-ev = <0x17>;
 		qcom,core-dev-table =
 			<  576000  300000000 >,
-			<  748800  556800000 >,
-			<  998400  806400000 >,
-			< 1209660  940800000 >,
-			< 1516800 1190400000 >,
+			<  998400  556800000 >,
+			< 1209660  844800000 >,
+			< 1516800  940800000 >,
 			< 1612800 1382400000 >,
 			< 1708000 1440000000 >;
 	};
@@ -2932,3 +2938,17 @@
 &pm660_div_clk {
 	status = "ok";
 };
+
+&qupv3_se10_i2c {
+	nx30p6093: nx30p6093@36 {
+		status = "disabled";
+		compatible = "nxp,nx30p6093";
+		reg = <0x36>;
+		interrupt-parent = <&tlmm>;
+		interrupts = <5 IRQ_TYPE_NONE>;
+		nxp,long-wakeup-sec = <28800>; /* 8 hours */
+		nxp,short-wakeup-ms = <180000>; /* 3 mins */
+		pinctrl-names = "default";
+		pinctrl-0 = <&nx30p6093_intr_default>;
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-cdp.dtsi b/arch/arm64/boot/dts/qcom/sdm845-cdp.dtsi
index dffb5e0..5a88dc2 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-cdp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-cdp.dtsi
@@ -488,8 +488,8 @@
 
 		trips {
 			active-config0 {
-				temperature = <65000>;
-				hysteresis = <1000>;
+				temperature = <125000>;
+				hysteresis = <10000>;
 				type = "passive";
 			};
 		};
@@ -503,8 +503,8 @@
 
 		trips {
 			active-config0 {
-				temperature = <65000>;
-				hysteresis = <1000>;
+				temperature = <125000>;
+				hysteresis = <10000>;
 				type = "passive";
 			};
 		};
@@ -518,8 +518,8 @@
 
 		trips {
 			active-config0 {
-				temperature = <65000>;
-				hysteresis = <1000>;
+				temperature = <125000>;
+				hysteresis = <10000>;
 				type = "passive";
 			};
 		};
@@ -533,8 +533,8 @@
 
 		trips {
 			active-config0 {
-				temperature = <65000>;
-				hysteresis = <1000>;
+				temperature = <125000>;
+				hysteresis = <10000>;
 				type = "passive";
 			};
 		};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-coresight.dtsi b/arch/arm64/boot/dts/qcom/sdm845-coresight.dtsi
index fcfab09..a7cf880 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-coresight.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-coresight.dtsi
@@ -548,6 +548,8 @@
 		clocks = <&clock_aop QDSS_CLK>;
 		clock-names = "apb_pclk";
 
+		status = "disabled";
+
 		ports {
 			#address-cells = <1>;
 			#size-cells = <0>;
diff --git a/arch/arm64/boot/dts/qcom/sdm845-gpu.dtsi b/arch/arm64/boot/dts/qcom/sdm845-gpu.dtsi
index ee0ad1f..33bcaa6 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-gpu.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-gpu.dtsi
@@ -127,6 +127,8 @@
 			#size-cells = <0>;
 			compatible = "qcom,gpu-coresight";
 
+			status = "disabled";
+
 			qcom,gpu-coresight@0 {
 				reg = <0>;
 				coresight-name = "coresight-gfx";
diff --git a/arch/arm64/boot/dts/qcom/sdm845-mtp.dtsi b/arch/arm64/boot/dts/qcom/sdm845-mtp.dtsi
index 825f121..fc4b674 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-mtp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-mtp.dtsi
@@ -490,8 +490,8 @@
 
 		trips {
 			active-config0 {
-				temperature = <65000>;
-				hysteresis = <1000>;
+				temperature = <125000>;
+				hysteresis = <10000>;
 				type = "passive";
 			};
 		};
@@ -505,8 +505,8 @@
 
 		trips {
 			active-config0 {
-				temperature = <65000>;
-				hysteresis = <1000>;
+				temperature = <125000>;
+				hysteresis = <10000>;
 				type = "passive";
 			};
 		};
@@ -520,8 +520,8 @@
 
 		trips {
 			active-config0 {
-				temperature = <65000>;
-				hysteresis = <1000>;
+				temperature = <125000>;
+				hysteresis = <10000>;
 				type = "passive";
 			};
 		};
@@ -535,8 +535,8 @@
 
 		trips {
 			active-config0 {
-				temperature = <65000>;
-				hysteresis = <1000>;
+				temperature = <125000>;
+				hysteresis = <10000>;
 				type = "passive";
 			};
 		};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-pm.dtsi b/arch/arm64/boot/dts/qcom/sdm845-pm.dtsi
index b24ef1d..ee10cfc 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-pm.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-pm.dtsi
@@ -139,4 +139,8 @@
 		reg = <0xC300000 0x1000>, <0xC3F0004 0x4>;
 		reg-names = "phys_addr_base", "offset_addr";
 	};
+
+	qcom,rpmh-master-stats {
+		compatible = "qcom,rpmh-master-stats";
+	};
 };
diff --git a/arch/arm64/boot/dts/qcom/sdm845-qupv3.dtsi b/arch/arm64/boot/dts/qcom/sdm845-qupv3.dtsi
index 810afde..5fce5ff 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-qupv3.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-qupv3.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -32,7 +32,7 @@
 	 * instances only.
 	 */
 	qupv3_se6_4uart: qcom,qup_uart@0x898000 {
-		compatible = "qcom,msm-geni-serial-hs", "qcom,msm-geni-uart";
+		compatible = "qcom,msm-geni-serial-hs";
 		reg = <0x898000 0x4000>;
 		reg-names = "se_phys";
 		clock-names = "se-clk", "m-ahb", "s-ahb";
@@ -50,7 +50,7 @@
 	};
 
 	qupv3_se7_4uart: qcom,qup_uart@0x89c000 {
-		compatible = "qcom,msm-geni-serial-hs", "qcom,msm-geni-uart";
+		compatible = "qcom,msm-geni-serial-hs";
 		reg = <0x89c000 0x4000>;
 		reg-names = "se_phys";
 		clock-names = "se-clk", "m-ahb", "s-ahb";
@@ -423,7 +423,7 @@
 
 	/* Debug UART Instance for CDP/MTP platform */
 	qupv3_se9_2uart: qcom,qup_uart@0xa84000 {
-		compatible = "qcom,msm-geni-console", "qcom,msm-geni-uart";
+		compatible = "qcom,msm-geni-console";
 		reg = <0xa84000 0x4000>;
 		reg-names = "se_phys";
 		clock-names = "se-clk", "m-ahb", "s-ahb";
@@ -440,7 +440,7 @@
 
 	/* Debug UART Instance for RUMI platform */
 	qupv3_se10_2uart: qcom,qup_uart@0xa88000 {
-		compatible = "qcom,msm-geni-console", "qcom,msm-geni-uart";
+		compatible = "qcom,msm-geni-console";
 		reg = <0xa88000 0x4000>;
 		reg-names = "se_phys";
 		clock-names = "se-clk", "m-ahb", "s-ahb";
diff --git a/arch/arm64/boot/dts/qcom/sdm845-qvr.dtsi b/arch/arm64/boot/dts/qcom/sdm845-qvr.dtsi
index 00f0650..1825cd0 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-qvr.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-qvr.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018 The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -60,6 +60,30 @@
 	qcom,sw-jeita-enable;
 };
 
+&qupv3_se3_i2c {
+	status = "ok";
+	nq@28 {
+		compatible = "qcom,nq-nci";
+		reg = <0x28>;
+		qcom,nq-irq = <&tlmm 63 0x00>;
+		qcom,nq-ven = <&tlmm 12 0x00>;
+		qcom,nq-firm = <&tlmm 62 0x00>;
+		qcom,nq-clkreq = <&pm8998_gpios 21 0x00>;
+		qcom,nq-esepwr = <&tlmm 116 0x00>;
+		interrupt-parent = <&tlmm>;
+		qcom,clk-src = "BBCLK3";
+		interrupts = <63 0>;
+		interrupt-names = "nfc_irq";
+		pinctrl-names = "nfc_active", "nfc_suspend";
+		pinctrl-0 = <&nfc_int_active
+			     &nfc_enable_active
+			     &nfc_clk_default>;
+		pinctrl-1 = <&nfc_int_suspend &nfc_enable_suspend>;
+		clocks = <&clock_rpmh RPMH_LN_BB_CLK3>;
+		clock-names = "ref_clk";
+	};
+};
+
 &qupv3_se10_i2c {
 	status = "ok";
 };
@@ -106,6 +130,29 @@
 	};
 };
 
+&qusb_phy0 {
+		qcom,qusb-phy-init-seq =
+			/* <value reg_offset> */
+			   <0x23 0x210 /* PWR_CTRL1 */
+			    0x03 0x04  /* PLL_ANALOG_CONTROLS_TWO */
+			    0x7c 0x18c /* PLL_CLOCK_INVERTERS */
+			    0x80 0x2c  /* PLL_CMODE */
+			    0x0a 0x184 /* PLL_LOCK_DELAY */
+			    0x19 0xb4  /* PLL_DIGITAL_TIMERS_TWO */
+			    0x40 0x194 /* PLL_BIAS_CONTROL_1 */
+			    0x20 0x198 /* PLL_BIAS_CONTROL_2 */
+			    0x21 0x214 /* PWR_CTRL2 */
+			    0x00 0x220 /* IMP_CTRL1 */
+			    0x58 0x224 /* IMP_CTRL2 */
+			    0x27 0x240 /* TUNE1 */
+			    0x29 0x244 /* TUNE2 */
+			    0xca 0x248 /* TUNE3 */
+			    0x04 0x24c /* TUNE4 */
+			    0x03 0x250 /* TUNE5 */
+			    0x00 0x23c /* CHG_CTRL2 */
+			    0x22 0x210>; /* PWR_CTRL1 */
+};
+
 &pmi8998_haptics {
 	qcom,vmax-mv = <1800>;
 	qcom,wave-play-rate-us = <4255>;
@@ -132,6 +179,7 @@
 	vdd-hba-supply = <&ufs_phy_gdsc>;
 	vdd-hba-fixed-regulator;
 	vcc-supply = <&pm8998_l20>;
+	vcc-voltage-level = <2950000 2960000>;
 	vccq2-supply = <&pm8998_s4>;
 	vcc-max-microamp = <600000>;
 	vccq2-max-microamp = <600000>;
diff --git a/arch/arm64/boot/dts/qcom/sdm845-regulator.dtsi b/arch/arm64/boot/dts/qcom/sdm845-regulator.dtsi
index 9672b94..9d7c519 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-regulator.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-regulator.dtsi
@@ -167,9 +167,10 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa1";
+		qcom,regulator-type = "pmic4-ldo";
 		qcom,supported-modes =
-			<RPMH_REGULATOR_MODE_LDO_LPM
-			 RPMH_REGULATOR_MODE_LDO_HPM>;
+			<RPMH_REGULATOR_MODE_LPM
+			 RPMH_REGULATOR_MODE_HPM>;
 		qcom,mode-threshold-currents = <0 1>;
 		proxy-supply = <&pm8998_l1>;
 		pm8998_l1: regulator-l1 {
@@ -180,7 +181,7 @@
 			qcom,proxy-consumer-enable;
 			qcom,proxy-consumer-current = <72000>;
 			qcom,init-voltage = <880000>;
-			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		};
 
 		pm8998_l1_ao: regulator-l1-ao {
@@ -189,7 +190,7 @@
 			regulator-min-microvolt = <880000>;
 			regulator-max-microvolt = <880000>;
 			qcom,init-voltage = <880000>;
-			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		};
 
 		regulator-l1-so {
@@ -198,7 +199,7 @@
 			regulator-min-microvolt = <880000>;
 			regulator-max-microvolt = <880000>;
 			qcom,init-voltage = <880000>;
-			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 			qcom,init-enable = <0>;
 		};
 	};
@@ -207,9 +208,10 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa2";
+		qcom,regulator-type = "pmic4-ldo";
 		qcom,supported-modes =
-			<RPMH_REGULATOR_MODE_LDO_LPM
-			 RPMH_REGULATOR_MODE_LDO_HPM>;
+			<RPMH_REGULATOR_MODE_LPM
+			 RPMH_REGULATOR_MODE_HPM>;
 		qcom,mode-threshold-currents = <0 30000>;
 		pm8998_l2: regulator-l2 {
 			regulator-name = "pm8998_l2";
@@ -217,7 +219,7 @@
 			regulator-min-microvolt = <1200000>;
 			regulator-max-microvolt = <1200000>;
 			qcom,init-voltage = <1200000>;
-			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 			regulator-always-on;
 		};
 	};
@@ -226,9 +228,10 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa3";
+		qcom,regulator-type = "pmic4-ldo";
 		qcom,supported-modes =
-			<RPMH_REGULATOR_MODE_LDO_LPM
-			 RPMH_REGULATOR_MODE_LDO_HPM>;
+			<RPMH_REGULATOR_MODE_LPM
+			 RPMH_REGULATOR_MODE_HPM>;
 		qcom,mode-threshold-currents = <0 1>;
 		pm8998_l3: regulator-l3 {
 			regulator-name = "pm8998_l3";
@@ -236,7 +239,7 @@
 			regulator-min-microvolt = <1000000>;
 			regulator-max-microvolt = <1000000>;
 			qcom,init-voltage = <1000000>;
-			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		};
 	};
 
@@ -257,9 +260,10 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa5";
+		qcom,regulator-type = "pmic4-ldo";
 		qcom,supported-modes =
-			<RPMH_REGULATOR_MODE_LDO_LPM
-			 RPMH_REGULATOR_MODE_LDO_HPM>;
+			<RPMH_REGULATOR_MODE_LPM
+			 RPMH_REGULATOR_MODE_HPM>;
 		qcom,mode-threshold-currents = <0 1>;
 		pm8998_l5: regulator-l5 {
 			regulator-name = "pm8998_l5";
@@ -267,7 +271,7 @@
 			regulator-min-microvolt = <800000>;
 			regulator-max-microvolt = <800000>;
 			qcom,init-voltage = <800000>;
-			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		};
 	};
 
@@ -275,9 +279,10 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa6";
+		qcom,regulator-type = "pmic4-ldo";
 		qcom,supported-modes =
-			<RPMH_REGULATOR_MODE_LDO_LPM
-			 RPMH_REGULATOR_MODE_LDO_HPM>;
+			<RPMH_REGULATOR_MODE_LPM
+			 RPMH_REGULATOR_MODE_HPM>;
 		qcom,mode-threshold-currents = <0 1>;
 		pm8998_l6: regulator-l6 {
 			regulator-name = "pm8998_l6";
@@ -285,7 +290,7 @@
 			regulator-min-microvolt = <1856000>;
 			regulator-max-microvolt = <1856000>;
 			qcom,init-voltage = <1856000>;
-			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		};
 	};
 
@@ -293,9 +298,10 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa7";
+		qcom,regulator-type = "pmic4-ldo";
 		qcom,supported-modes =
-			<RPMH_REGULATOR_MODE_LDO_LPM
-			 RPMH_REGULATOR_MODE_LDO_HPM>;
+			<RPMH_REGULATOR_MODE_LPM
+			 RPMH_REGULATOR_MODE_HPM>;
 		qcom,mode-threshold-currents = <0 10000>;
 		pm8998_l7: regulator-l7 {
 			regulator-name = "pm8998_l7";
@@ -303,7 +309,7 @@
 			regulator-min-microvolt = <1800000>;
 			regulator-max-microvolt = <1800000>;
 			qcom,init-voltage = <1800000>;
-			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		};
 	};
 
@@ -311,9 +317,10 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa8";
+		qcom,regulator-type = "pmic4-ldo";
 		qcom,supported-modes =
-			<RPMH_REGULATOR_MODE_LDO_LPM
-			 RPMH_REGULATOR_MODE_LDO_HPM>;
+			<RPMH_REGULATOR_MODE_LPM
+			 RPMH_REGULATOR_MODE_HPM>;
 		qcom,mode-threshold-currents = <0 1>;
 		pm8998_l8: regulator-l8 {
 			regulator-name = "pm8998_l8";
@@ -321,7 +328,7 @@
 			regulator-min-microvolt = <1200000>;
 			regulator-max-microvolt = <1248000>;
 			qcom,init-voltage = <1200000>;
-			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		};
 	};
 
@@ -329,9 +336,10 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa9";
+		qcom,regulator-type = "pmic4-ldo";
 		qcom,supported-modes =
-			<RPMH_REGULATOR_MODE_LDO_LPM
-			 RPMH_REGULATOR_MODE_LDO_HPM>;
+			<RPMH_REGULATOR_MODE_LPM
+			 RPMH_REGULATOR_MODE_HPM>;
 		qcom,mode-threshold-currents = <0 1>;
 		pm8998_l9: regulator-l9 {
 			regulator-name = "pm8998_l9";
@@ -339,7 +347,7 @@
 			regulator-min-microvolt = <1704000>;
 			regulator-max-microvolt = <2928000>;
 			qcom,init-voltage = <1704000>;
-			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		};
 	};
 
@@ -347,9 +355,10 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa10";
+		qcom,regulator-type = "pmic4-ldo";
 		qcom,supported-modes =
-			<RPMH_REGULATOR_MODE_LDO_LPM
-			 RPMH_REGULATOR_MODE_LDO_HPM>;
+			<RPMH_REGULATOR_MODE_LPM
+			 RPMH_REGULATOR_MODE_HPM>;
 		qcom,mode-threshold-currents = <0 1>;
 		pm8998_l10: regulator-l10 {
 			regulator-name = "pm8998_l10";
@@ -357,7 +366,7 @@
 			regulator-min-microvolt = <1704000>;
 			regulator-max-microvolt = <2928000>;
 			qcom,init-voltage = <1704000>;
-			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		};
 	};
 
@@ -365,9 +374,10 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa11";
+		qcom,regulator-type = "pmic4-ldo";
 		qcom,supported-modes =
-			<RPMH_REGULATOR_MODE_LDO_LPM
-			 RPMH_REGULATOR_MODE_LDO_HPM>;
+			<RPMH_REGULATOR_MODE_LPM
+			 RPMH_REGULATOR_MODE_HPM>;
 		qcom,mode-threshold-currents = <0 1>;
 		pm8998_l11: regulator-l11 {
 			regulator-name = "pm8998_l11";
@@ -375,7 +385,7 @@
 			regulator-min-microvolt = <1000000>;
 			regulator-max-microvolt = <1048000>;
 			qcom,init-voltage = <1000000>;
-			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		};
 	};
 
@@ -383,9 +393,10 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa12";
+		qcom,regulator-type = "pmic4-ldo";
 		qcom,supported-modes =
-			<RPMH_REGULATOR_MODE_LDO_LPM
-			 RPMH_REGULATOR_MODE_LDO_HPM>;
+			<RPMH_REGULATOR_MODE_LPM
+			 RPMH_REGULATOR_MODE_HPM>;
 		qcom,mode-threshold-currents = <0 1>;
 		pm8998_l12: regulator-l12 {
 			regulator-name = "pm8998_l12";
@@ -393,7 +404,7 @@
 			regulator-min-microvolt = <1800000>;
 			regulator-max-microvolt = <1800000>;
 			qcom,init-voltage = <1800000>;
-			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		};
 	};
 
@@ -401,9 +412,10 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa13";
+		qcom,regulator-type = "pmic4-ldo";
 		qcom,supported-modes =
-			<RPMH_REGULATOR_MODE_LDO_LPM
-			 RPMH_REGULATOR_MODE_LDO_HPM>;
+			<RPMH_REGULATOR_MODE_LPM
+			 RPMH_REGULATOR_MODE_HPM>;
 		qcom,mode-threshold-currents = <0 10000>;
 		pm8998_l13: regulator-l13 {
 			regulator-name = "pm8998_l13";
@@ -411,7 +423,7 @@
 			regulator-min-microvolt = <1800000>;
 			regulator-max-microvolt = <2960000>;
 			qcom,init-voltage = <1800000>;
-			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		};
 	};
 
@@ -419,9 +431,10 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa14";
+		qcom,regulator-type = "pmic4-ldo";
 		qcom,supported-modes =
-			<RPMH_REGULATOR_MODE_LDO_LPM
-			 RPMH_REGULATOR_MODE_LDO_HPM>;
+			<RPMH_REGULATOR_MODE_LPM
+			 RPMH_REGULATOR_MODE_HPM>;
 		qcom,mode-threshold-currents = <0 10000>;
 		proxy-supply = <&pm8998_l14>;
 		pm8998_l14: regulator-l14 {
@@ -432,7 +445,7 @@
 			regulator-min-microvolt = <1800000>;
 			regulator-max-microvolt = <1880000>;
 			qcom,init-voltage = <1800000>;
-			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		};
 	};
 
@@ -440,9 +453,10 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa15";
+		qcom,regulator-type = "pmic4-ldo";
 		qcom,supported-modes =
-			<RPMH_REGULATOR_MODE_LDO_LPM
-			 RPMH_REGULATOR_MODE_LDO_HPM>;
+			<RPMH_REGULATOR_MODE_LPM
+			 RPMH_REGULATOR_MODE_HPM>;
 		qcom,mode-threshold-currents = <0 1>;
 		pm8998_l15: regulator-l15 {
 			regulator-name = "pm8998_l15";
@@ -450,7 +464,7 @@
 			regulator-min-microvolt = <1800000>;
 			regulator-max-microvolt = <1800000>;
 			qcom,init-voltage = <1800000>;
-			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		};
 	};
 
@@ -458,9 +472,10 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa16";
+		qcom,regulator-type = "pmic4-ldo";
 		qcom,supported-modes =
-			<RPMH_REGULATOR_MODE_LDO_LPM
-			 RPMH_REGULATOR_MODE_LDO_HPM>;
+			<RPMH_REGULATOR_MODE_LPM
+			 RPMH_REGULATOR_MODE_HPM>;
 		qcom,mode-threshold-currents = <0 1>;
 		pm8998_l16: regulator-l16 {
 			regulator-name = "pm8998_l16";
@@ -468,7 +483,7 @@
 			regulator-min-microvolt = <2704000>;
 			regulator-max-microvolt = <2704000>;
 			qcom,init-voltage = <2704000>;
-			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		};
 	};
 
@@ -476,9 +491,10 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa17";
+		qcom,regulator-type = "pmic4-ldo";
 		qcom,supported-modes =
-			<RPMH_REGULATOR_MODE_LDO_LPM
-			 RPMH_REGULATOR_MODE_LDO_HPM>;
+			<RPMH_REGULATOR_MODE_LPM
+			 RPMH_REGULATOR_MODE_HPM>;
 		qcom,mode-threshold-currents = <0 10000>;
 		pm8998_l17: regulator-l17 {
 			regulator-name = "pm8998_l17";
@@ -486,7 +502,7 @@
 			regulator-min-microvolt = <1304000>;
 			regulator-max-microvolt = <1304000>;
 			qcom,init-voltage = <1304000>;
-			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		};
 	};
 
@@ -494,9 +510,10 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa18";
+		qcom,regulator-type = "pmic4-ldo";
 		qcom,supported-modes =
-			<RPMH_REGULATOR_MODE_LDO_LPM
-			 RPMH_REGULATOR_MODE_LDO_HPM>;
+			<RPMH_REGULATOR_MODE_LPM
+			 RPMH_REGULATOR_MODE_HPM>;
 		qcom,mode-threshold-currents = <0 1>;
 		pm8998_l18: regulator-l18 {
 			regulator-name = "pm8998_l18";
@@ -504,7 +521,7 @@
 			regulator-min-microvolt = <2704000>;
 			regulator-max-microvolt = <2960000>;
 			qcom,init-voltage = <2704000>;
-			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		};
 	};
 
@@ -512,9 +529,10 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa19";
+		qcom,regulator-type = "pmic4-ldo";
 		qcom,supported-modes =
-			<RPMH_REGULATOR_MODE_LDO_LPM
-			 RPMH_REGULATOR_MODE_LDO_HPM>;
+			<RPMH_REGULATOR_MODE_LPM
+			 RPMH_REGULATOR_MODE_HPM>;
 		qcom,mode-threshold-currents = <0 1>;
 		pm8998_l19: regulator-l19 {
 			regulator-name = "pm8998_l19";
@@ -522,7 +540,7 @@
 			regulator-min-microvolt = <2856000>;
 			regulator-max-microvolt = <3104000>;
 			qcom,init-voltage = <2856000>;
-			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		};
 	};
 
@@ -530,9 +548,10 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa20";
+		qcom,regulator-type = "pmic4-ldo";
 		qcom,supported-modes =
-			<RPMH_REGULATOR_MODE_LDO_LPM
-			 RPMH_REGULATOR_MODE_LDO_HPM>;
+			<RPMH_REGULATOR_MODE_LPM
+			 RPMH_REGULATOR_MODE_HPM>;
 		qcom,mode-threshold-currents = <0 10000>;
 		pm8998_l20: regulator-l20 {
 			regulator-name = "pm8998_l20";
@@ -540,7 +559,7 @@
 			regulator-min-microvolt = <2704000>;
 			regulator-max-microvolt = <2960000>;
 			qcom,init-voltage = <2704000>;
-			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		};
 	};
 
@@ -548,9 +567,10 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa21";
+		qcom,regulator-type = "pmic4-ldo";
 		qcom,supported-modes =
-			<RPMH_REGULATOR_MODE_LDO_LPM
-			 RPMH_REGULATOR_MODE_LDO_HPM>;
+			<RPMH_REGULATOR_MODE_LPM
+			 RPMH_REGULATOR_MODE_HPM>;
 		qcom,mode-threshold-currents = <0 10000>;
 		pm8998_l21: regulator-l21 {
 			regulator-name = "pm8998_l21";
@@ -558,7 +578,7 @@
 			regulator-min-microvolt = <2704000>;
 			regulator-max-microvolt = <2960000>;
 			qcom,init-voltage = <2704000>;
-			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		};
 	};
 
@@ -566,9 +586,10 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa22";
+		qcom,regulator-type = "pmic4-ldo";
 		qcom,supported-modes =
-			<RPMH_REGULATOR_MODE_LDO_LPM
-			 RPMH_REGULATOR_MODE_LDO_HPM>;
+			<RPMH_REGULATOR_MODE_LPM
+			 RPMH_REGULATOR_MODE_HPM>;
 		qcom,mode-threshold-currents = <0 10000>;
 		pm8998_l22: regulator-l22 {
 			regulator-name = "pm8998_l22";
@@ -576,7 +597,7 @@
 			regulator-min-microvolt = <2864000>;
 			regulator-max-microvolt = <3312000>;
 			qcom,init-voltage = <2864000>;
-			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		};
 	};
 
@@ -584,9 +605,10 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa23";
+		qcom,regulator-type = "pmic4-ldo";
 		qcom,supported-modes =
-			<RPMH_REGULATOR_MODE_LDO_LPM
-			 RPMH_REGULATOR_MODE_LDO_HPM>;
+			<RPMH_REGULATOR_MODE_LPM
+			 RPMH_REGULATOR_MODE_HPM>;
 		qcom,mode-threshold-currents = <0 10000>;
 		pm8998_l23: regulator-l23 {
 			regulator-name = "pm8998_l23";
@@ -594,7 +616,7 @@
 			regulator-min-microvolt = <3000000>;
 			regulator-max-microvolt = <3312000>;
 			qcom,init-voltage = <3000000>;
-			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		};
 	};
 
@@ -602,9 +624,10 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa24";
+		qcom,regulator-type = "pmic4-ldo";
 		qcom,supported-modes =
-			<RPMH_REGULATOR_MODE_LDO_LPM
-			 RPMH_REGULATOR_MODE_LDO_HPM>;
+			<RPMH_REGULATOR_MODE_LPM
+			 RPMH_REGULATOR_MODE_HPM>;
 		qcom,mode-threshold-currents = <0 10000>;
 		pm8998_l24-parent-supply = <&pm8998_l12>;
 		pm8998_l24: regulator-l24 {
@@ -613,7 +636,7 @@
 			regulator-min-microvolt = <3088000>;
 			regulator-max-microvolt = <3088000>;
 			qcom,init-voltage = <3088000>;
-			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		};
 	};
 
@@ -621,9 +644,10 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa25";
+		qcom,regulator-type = "pmic4-ldo";
 		qcom,supported-modes =
-			<RPMH_REGULATOR_MODE_LDO_LPM
-			 RPMH_REGULATOR_MODE_LDO_HPM>;
+			<RPMH_REGULATOR_MODE_LPM
+			 RPMH_REGULATOR_MODE_HPM>;
 		qcom,mode-threshold-currents = <0 10000>;
 		pm8998_l25: regulator-l25 {
 			regulator-name = "pm8998_l25";
@@ -631,7 +655,7 @@
 			regulator-min-microvolt = <3000000>;
 			regulator-max-microvolt = <3312000>;
 			qcom,init-voltage = <3000000>;
-			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		};
 	};
 
@@ -639,9 +663,10 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa26";
+		qcom,regulator-type = "pmic4-ldo";
 		qcom,supported-modes =
-			<RPMH_REGULATOR_MODE_LDO_LPM
-			 RPMH_REGULATOR_MODE_LDO_HPM>;
+			<RPMH_REGULATOR_MODE_LPM
+			 RPMH_REGULATOR_MODE_HPM>;
 		qcom,mode-threshold-currents = <0 1>;
 		proxy-supply = <&pm8998_l26>;
 		pm8998_l26: regulator-l26 {
@@ -652,7 +677,7 @@
 			qcom,proxy-consumer-enable;
 			qcom,proxy-consumer-current = <43600>;
 			qcom,init-voltage = <1200000>;
-			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		};
 	};
 
@@ -673,9 +698,10 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa28";
+		qcom,regulator-type = "pmic4-ldo";
 		qcom,supported-modes =
-			<RPMH_REGULATOR_MODE_LDO_LPM
-			 RPMH_REGULATOR_MODE_LDO_HPM>;
+			<RPMH_REGULATOR_MODE_LPM
+			 RPMH_REGULATOR_MODE_HPM>;
 		qcom,mode-threshold-currents = <0 1>;
 		pm8998_l28: regulator-l28 {
 			regulator-name = "pm8998_l28";
@@ -683,7 +709,7 @@
 			regulator-min-microvolt = <2856000>;
 			regulator-max-microvolt = <3008000>;
 			qcom,init-voltage = <2856000>;
-			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		};
 	};
 
@@ -715,6 +741,7 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "bobb1";
+		qcom,regulator-type = "pmic4-bob";
 		qcom,send-defaults;
 
 		pmi8998_bob: regulator-bob {
@@ -723,7 +750,7 @@
 			regulator-min-microvolt = <3312000>;
 			regulator-max-microvolt = <3600000>;
 			qcom,init-voltage = <3312000>;
-			qcom,init-mode = <RPMH_REGULATOR_MODE_BOB_PASS>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_PASS>;
 		};
 
 		pmi8998_bob_ao: regulator-bob-ao {
@@ -732,7 +759,7 @@
 			regulator-min-microvolt = <3312000>;
 			regulator-max-microvolt = <3600000>;
 			qcom,init-voltage = <3312000>;
-			qcom,init-mode = <RPMH_REGULATOR_MODE_BOB_AUTO>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_AUTO>;
 		};
 	};
 
diff --git a/arch/arm64/boot/dts/qcom/sdm845.dtsi b/arch/arm64/boot/dts/qcom/sdm845.dtsi
index 91bab54..213dfdb 100644
--- a/arch/arm64/boot/dts/qcom/sdm845.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845.dtsi
@@ -2800,6 +2800,7 @@
 		qcom,pas-id = <0xf>;
 		qcom,firmware-name = "ipa_fws";
 		qcom,pil-force-shutdown;
+		memory-region = <&pil_ipa_fw_mem>;
 	};
 
 	qcom,chd_sliver {
@@ -3774,7 +3775,7 @@
 		};
 
 		fcm_dump {
-			qcom,dump-size = <0x400>;
+			qcom,dump-size = <0x8400>;
 			qcom,dump-id = <0xee>;
 		};
 
diff --git a/arch/arm64/configs/msm8953-perf_defconfig b/arch/arm64/configs/msm8953-perf_defconfig
index f862f576..4a732e3 100644
--- a/arch/arm64/configs/msm8953-perf_defconfig
+++ b/arch/arm64/configs/msm8953-perf_defconfig
@@ -83,6 +83,7 @@
 CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
 CONFIG_CPU_BOOST=y
 CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y
+CONFIG_CPU_FREQ_MSM=y
 CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
@@ -320,11 +321,17 @@
 CONFIG_MSM_APM=y
 CONFIG_SENSORS_QPNP_ADC_VOLTAGE=y
 CONFIG_THERMAL=y
+CONFIG_THERMAL_WRITABLE_TRIPS=y
+CONFIG_THERMAL_GOV_USER_SPACE=y
+CONFIG_THERMAL_GOV_LOW_LIMITS=y
+CONFIG_CPU_THERMAL=y
+CONFIG_DEVFREQ_THERMAL=y
 CONFIG_THERMAL_QPNP=y
 CONFIG_THERMAL_QPNP_ADC_TM=y
 CONFIG_THERMAL_TSENS=y
-CONFIG_MSM_BCL_PERIPHERAL_CTL=y
-CONFIG_QTI_THERMAL_LIMITS_DCVS=y
+CONFIG_QTI_VIRTUAL_SENSOR=y
+CONFIG_QTI_QMI_COOLING_DEVICE=y
+CONFIG_REGULATOR_COOLING_DEVICE=y
 CONFIG_MFD_SPMI_PMIC=y
 CONFIG_REGULATOR_FIXED_VOLTAGE=y
 CONFIG_REGULATOR_CPR4_APSS=y
@@ -400,9 +407,20 @@
 CONFIG_USB_GADGET_DEBUG_FS=y
 CONFIG_USB_GADGET_VBUS_DRAW=500
 CONFIG_USB_CONFIGFS=y
+CONFIG_USB_CONFIGFS_SERIAL=y
+CONFIG_USB_CONFIGFS_NCM=y
+CONFIG_USB_CONFIGFS_QCRNDIS=y
+CONFIG_USB_CONFIGFS_RMNET_BAM=y
+CONFIG_USB_CONFIGFS_MASS_STORAGE=y
 CONFIG_USB_CONFIGFS_F_FS=y
+CONFIG_USB_CONFIGFS_F_MTP=y
+CONFIG_USB_CONFIGFS_F_PTP=y
+CONFIG_USB_CONFIGFS_F_ACC=y
+CONFIG_USB_CONFIGFS_F_AUDIO_SRC=y
 CONFIG_USB_CONFIGFS_UEVENT=y
+CONFIG_USB_CONFIGFS_F_HID=y
 CONFIG_USB_CONFIGFS_F_DIAG=y
+CONFIG_USB_CONFIGFS_F_QDSS=y
 CONFIG_MMC=y
 CONFIG_MMC_PERF_PROFILING=y
 CONFIG_MMC_PARANOID_SD_INIT=y
@@ -421,6 +439,7 @@
 CONFIG_LEDS_QPNP_FLASH=y
 CONFIG_LEDS_QPNP_WLED=y
 CONFIG_LEDS_QPNP_HAPTICS=y
+CONFIG_LEDS_QPNP_VIBRATOR_LDO=y
 CONFIG_LEDS_TRIGGERS=y
 CONFIG_EDAC=y
 CONFIG_EDAC_MM_EDAC=y
@@ -446,6 +465,7 @@
 CONFIG_REMOTE_SPINLOCK_MSM=y
 CONFIG_MAILBOX=y
 CONFIG_ARM_SMMU=y
+CONFIG_QCOM_LAZY_MAPPING=y
 CONFIG_MSM_SPM=y
 CONFIG_MSM_L2_SPM=y
 CONFIG_MSM_BOOT_STATS=y
@@ -459,6 +479,7 @@
 CONFIG_MSM_SMEM=y
 CONFIG_MSM_SMD=y
 CONFIG_MSM_SMD_DEBUG=y
+CONFIG_MSM_TZ_SMMU=y
 CONFIG_MSM_SMP2P=y
 CONFIG_MSM_IPC_ROUTER_SMD_XPRT=y
 CONFIG_MSM_QMI_INTERFACE=y
diff --git a/arch/arm64/configs/msm8953_defconfig b/arch/arm64/configs/msm8953_defconfig
index ef6e00f..ec7aad8 100644
--- a/arch/arm64/configs/msm8953_defconfig
+++ b/arch/arm64/configs/msm8953_defconfig
@@ -90,6 +90,7 @@
 CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
 CONFIG_CPU_BOOST=y
 CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y
+CONFIG_CPU_FREQ_MSM=y
 CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
@@ -330,11 +331,17 @@
 CONFIG_MSM_APM=y
 CONFIG_SENSORS_QPNP_ADC_VOLTAGE=y
 CONFIG_THERMAL=y
+CONFIG_THERMAL_WRITABLE_TRIPS=y
+CONFIG_THERMAL_GOV_USER_SPACE=y
+CONFIG_THERMAL_GOV_LOW_LIMITS=y
+CONFIG_CPU_THERMAL=y
+CONFIG_DEVFREQ_THERMAL=y
 CONFIG_THERMAL_QPNP=y
 CONFIG_THERMAL_QPNP_ADC_TM=y
 CONFIG_THERMAL_TSENS=y
-CONFIG_MSM_BCL_PERIPHERAL_CTL=y
-CONFIG_QTI_THERMAL_LIMITS_DCVS=y
+CONFIG_QTI_VIRTUAL_SENSOR=y
+CONFIG_QTI_QMI_COOLING_DEVICE=y
+CONFIG_REGULATOR_COOLING_DEVICE=y
 CONFIG_MFD_SPMI_PMIC=y
 CONFIG_REGULATOR_FIXED_VOLTAGE=y
 CONFIG_REGULATOR_CPR4_APSS=y
@@ -411,9 +418,20 @@
 CONFIG_USB_GADGET_DEBUG_FS=y
 CONFIG_USB_GADGET_VBUS_DRAW=500
 CONFIG_USB_CONFIGFS=y
+CONFIG_USB_CONFIGFS_SERIAL=y
+CONFIG_USB_CONFIGFS_NCM=y
+CONFIG_USB_CONFIGFS_QCRNDIS=y
+CONFIG_USB_CONFIGFS_RMNET_BAM=y
+CONFIG_USB_CONFIGFS_MASS_STORAGE=y
 CONFIG_USB_CONFIGFS_F_FS=y
+CONFIG_USB_CONFIGFS_F_MTP=y
+CONFIG_USB_CONFIGFS_F_PTP=y
+CONFIG_USB_CONFIGFS_F_ACC=y
+CONFIG_USB_CONFIGFS_F_AUDIO_SRC=y
 CONFIG_USB_CONFIGFS_UEVENT=y
+CONFIG_USB_CONFIGFS_F_HID=y
 CONFIG_USB_CONFIGFS_F_DIAG=y
+CONFIG_USB_CONFIGFS_F_QDSS=y
 CONFIG_MMC=y
 CONFIG_MMC_PERF_PROFILING=y
 CONFIG_MMC_RING_BUFFER=y
@@ -433,6 +451,7 @@
 CONFIG_LEDS_QPNP_FLASH=y
 CONFIG_LEDS_QPNP_WLED=y
 CONFIG_LEDS_QPNP_HAPTICS=y
+CONFIG_LEDS_QPNP_VIBRATOR_LDO=y
 CONFIG_LEDS_TRIGGERS=y
 CONFIG_EDAC=y
 CONFIG_EDAC_MM_EDAC=y
@@ -458,6 +477,7 @@
 CONFIG_REMOTE_SPINLOCK_MSM=y
 CONFIG_MAILBOX=y
 CONFIG_ARM_SMMU=y
+CONFIG_QCOM_LAZY_MAPPING=y
 CONFIG_IOMMU_DEBUG=y
 CONFIG_IOMMU_DEBUG_TRACKING=y
 CONFIG_IOMMU_TESTS=y
@@ -476,6 +496,7 @@
 CONFIG_MSM_SMEM=y
 CONFIG_MSM_SMD=y
 CONFIG_MSM_SMD_DEBUG=y
+CONFIG_MSM_TZ_SMMU=y
 CONFIG_TRACER_PKT=y
 CONFIG_MSM_SMP2P=y
 CONFIG_MSM_IPC_ROUTER_SMD_XPRT=y
diff --git a/arch/arm64/configs/sdm670-perf_defconfig b/arch/arm64/configs/sdm670-perf_defconfig
index ee6135a..1904209 100644
--- a/arch/arm64/configs/sdm670-perf_defconfig
+++ b/arch/arm64/configs/sdm670-perf_defconfig
@@ -21,8 +21,6 @@
 CONFIG_CPUSETS=y
 CONFIG_CGROUP_CPUACCT=y
 CONFIG_CGROUP_SCHEDTUNE=y
-CONFIG_MEMCG=y
-CONFIG_MEMCG_SWAP=y
 CONFIG_BLK_CGROUP=y
 CONFIG_RT_GROUP_SCHED=y
 CONFIG_CGROUP_BPF=y
diff --git a/arch/arm64/configs/sdm670_defconfig b/arch/arm64/configs/sdm670_defconfig
index 995e658..670627d 100644
--- a/arch/arm64/configs/sdm670_defconfig
+++ b/arch/arm64/configs/sdm670_defconfig
@@ -22,8 +22,6 @@
 CONFIG_CPUSETS=y
 CONFIG_CGROUP_CPUACCT=y
 CONFIG_CGROUP_SCHEDTUNE=y
-CONFIG_MEMCG=y
-CONFIG_MEMCG_SWAP=y
 CONFIG_BLK_CGROUP=y
 CONFIG_DEBUG_BLK_CGROUP=y
 CONFIG_RT_GROUP_SCHED=y
diff --git a/arch/arm64/configs/sdm845-perf_defconfig b/arch/arm64/configs/sdm845-perf_defconfig
index f34f983..012b342 100644
--- a/arch/arm64/configs/sdm845-perf_defconfig
+++ b/arch/arm64/configs/sdm845-perf_defconfig
@@ -64,6 +64,7 @@
 CONFIG_ZSMALLOC=y
 CONFIG_BALANCE_ANON_FILE_RECLAIM=y
 CONFIG_SECCOMP=y
+CONFIG_HARDEN_BRANCH_PREDICTOR=y
 CONFIG_ARMV8_DEPRECATED=y
 CONFIG_SWP_EMULATION=y
 CONFIG_CP15_BARRIER_EMULATION=y
@@ -437,7 +438,7 @@
 CONFIG_MMC_CLKGATE=y
 CONFIG_MMC_BLOCK_MINORS=32
 CONFIG_MMC_BLOCK_DEFERRED_RESUME=y
-CONFIG_MMC_TEST=y
+CONFIG_MMC_TEST=m
 CONFIG_MMC_SDHCI=y
 CONFIG_MMC_SDHCI_PLTFM=y
 CONFIG_MMC_SDHCI_MSM=y
diff --git a/arch/arm64/configs/sdm845_defconfig b/arch/arm64/configs/sdm845_defconfig
index 3e1d5ad4..09f7b3e 100644
--- a/arch/arm64/configs/sdm845_defconfig
+++ b/arch/arm64/configs/sdm845_defconfig
@@ -68,6 +68,7 @@
 CONFIG_ZSMALLOC=y
 CONFIG_BALANCE_ANON_FILE_RECLAIM=y
 CONFIG_SECCOMP=y
+CONFIG_HARDEN_BRANCH_PREDICTOR=y
 CONFIG_ARMV8_DEPRECATED=y
 CONFIG_SWP_EMULATION=y
 CONFIG_CP15_BARRIER_EMULATION=y
@@ -440,7 +441,7 @@
 CONFIG_MMC_CLKGATE=y
 CONFIG_MMC_BLOCK_MINORS=32
 CONFIG_MMC_BLOCK_DEFERRED_RESUME=y
-CONFIG_MMC_TEST=y
+CONFIG_MMC_TEST=m
 CONFIG_MMC_SDHCI=y
 CONFIG_MMC_SDHCI_PLTFM=y
 CONFIG_MMC_SDHCI_MSM=y
diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h
index ef5970e..46d0448 100644
--- a/arch/arm64/include/asm/assembler.h
+++ b/arch/arm64/include/asm/assembler.h
@@ -452,17 +452,4 @@ alternative_endif
 	mrs	\rd, sp_el0
 	.endm
 
-/*
- * Errata workaround post TTBR0_EL1 update.
- */
-	.macro	post_ttbr0_update_workaround
-#ifdef CONFIG_CAVIUM_ERRATUM_27456
-alternative_if ARM64_WORKAROUND_CAVIUM_27456
-	ic	iallu
-	dsb	nsh
-	isb
-alternative_else_nop_endif
-#endif
-	.endm
-
 #endif	/* __ASM_ASSEMBLER_H */
diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h
index 87b4465..d64bf94 100644
--- a/arch/arm64/include/asm/cpucaps.h
+++ b/arch/arm64/include/asm/cpucaps.h
@@ -35,6 +35,10 @@
 #define ARM64_HYP_OFFSET_LOW			14
 #define ARM64_MISMATCHED_CACHE_LINE_SIZE	15
 
-#define ARM64_NCAPS				16
+#define ARM64_UNMAP_KERNEL_AT_EL0		16
+
+#define ARM64_HARDEN_BRANCH_PREDICTOR		17
+
+#define ARM64_NCAPS				18
 
 #endif /* __ASM_CPUCAPS_H */
diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h
index f8682a3..ddbf3b1 100644
--- a/arch/arm64/include/asm/cputype.h
+++ b/arch/arm64/include/asm/cputype.h
@@ -63,7 +63,6 @@
 ({									\
 	u32 _model = (midr) & MIDR_CPU_MODEL_MASK;			\
 	u32 rv = (midr) & (MIDR_REVISION_MASK | MIDR_VARIANT_MASK);	\
-									\
 	_model == (model) && rv >= (rv_min) && rv <= (rv_max);		\
  })
 
@@ -76,7 +75,11 @@
 #define ARM_CPU_PART_AEM_V8		0xD0F
 #define ARM_CPU_PART_FOUNDATION		0xD00
 #define ARM_CPU_PART_CORTEX_A57		0xD07
+#define ARM_CPU_PART_CORTEX_A72		0xD08
 #define ARM_CPU_PART_CORTEX_A53		0xD03
+#define ARM_CPU_PART_CORTEX_A73		0xD09
+#define ARM_CPU_PART_CORTEX_A75		0xD0A
+#define ARM_CPU_PART_KRYO3G		0x802
 
 #define APM_CPU_PART_POTENZA		0x000
 
@@ -87,6 +90,10 @@
 
 #define MIDR_CORTEX_A53 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A53)
 #define MIDR_CORTEX_A57 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A57)
+#define MIDR_CORTEX_A72 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A72)
+#define MIDR_CORTEX_A73 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A73)
+#define MIDR_CORTEX_A75 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A75)
+#define MIDR_KRYO3G	MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, ARM_CPU_PART_KRYO3G)
 #define MIDR_THUNDERX	MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX)
 #define MIDR_THUNDERX_81XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_81XX)
 
diff --git a/arch/arm64/include/asm/elf.h b/arch/arm64/include/asm/elf.h
index 1fb0230..40a8a94 100644
--- a/arch/arm64/include/asm/elf.h
+++ b/arch/arm64/include/asm/elf.h
@@ -169,7 +169,7 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
 #ifdef CONFIG_COMPAT
 
 /* PIE load location for compat arm. Must match ARM ELF_ET_DYN_BASE. */
-#define COMPAT_ELF_ET_DYN_BASE		0x000400000UL
+#define COMPAT_ELF_ET_DYN_BASE		(2 * TASK_SIZE_32 / 3)
 
 /* AArch32 registers. */
 #define COMPAT_ELF_NGREG		18
diff --git a/arch/arm64/include/asm/esr.h b/arch/arm64/include/asm/esr.h
index d14c478..85997c0 100644
--- a/arch/arm64/include/asm/esr.h
+++ b/arch/arm64/include/asm/esr.h
@@ -175,6 +175,12 @@
 #define ESR_ELx_SYS64_ISS_SYS_CTR_READ	(ESR_ELx_SYS64_ISS_SYS_CTR | \
 					 ESR_ELx_SYS64_ISS_DIR_READ)
 
+#define ESR_ELx_SYS64_ISS_SYS_CNTVCT	(ESR_ELx_SYS64_ISS_SYS_VAL(3, 3, 2, 14, 0) | \
+					 ESR_ELx_SYS64_ISS_DIR_READ)
+
+#define ESR_ELx_SYS64_ISS_SYS_CNTFRQ	(ESR_ELx_SYS64_ISS_SYS_VAL(3, 3, 0, 14, 0) | \
+					 ESR_ELx_SYS64_ISS_DIR_READ)
+
 #ifndef __ASSEMBLY__
 #include <asm/types.h>
 
diff --git a/arch/arm64/include/asm/fixmap.h b/arch/arm64/include/asm/fixmap.h
index caf86be..d8e5805 100644
--- a/arch/arm64/include/asm/fixmap.h
+++ b/arch/arm64/include/asm/fixmap.h
@@ -51,6 +51,12 @@ enum fixed_addresses {
 
 	FIX_EARLYCON_MEM_BASE,
 	FIX_TEXT_POKE0,
+
+#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+	FIX_ENTRY_TRAMP_DATA,
+	FIX_ENTRY_TRAMP_TEXT,
+#define TRAMP_VALIAS		(__fix_to_virt(FIX_ENTRY_TRAMP_TEXT))
+#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
 	__end_of_permanent_fixed_addresses,
 
 	/*
diff --git a/arch/arm64/include/asm/kernel-pgtable.h b/arch/arm64/include/asm/kernel-pgtable.h
index 7803343..77a27af 100644
--- a/arch/arm64/include/asm/kernel-pgtable.h
+++ b/arch/arm64/include/asm/kernel-pgtable.h
@@ -78,8 +78,16 @@
 /*
  * Initial memory map attributes.
  */
-#define SWAPPER_PTE_FLAGS	(PTE_TYPE_PAGE | PTE_AF | PTE_SHARED)
-#define SWAPPER_PMD_FLAGS	(PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S)
+#define _SWAPPER_PTE_FLAGS	(PTE_TYPE_PAGE | PTE_AF | PTE_SHARED)
+#define _SWAPPER_PMD_FLAGS	(PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S)
+
+#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+#define SWAPPER_PTE_FLAGS	(_SWAPPER_PTE_FLAGS | PTE_NG)
+#define SWAPPER_PMD_FLAGS	(_SWAPPER_PMD_FLAGS | PMD_SECT_NG)
+#else
+#define SWAPPER_PTE_FLAGS	_SWAPPER_PTE_FLAGS
+#define SWAPPER_PMD_FLAGS	_SWAPPER_PMD_FLAGS
+#endif
 
 #if ARM64_SWAPPER_USES_SECTION_MAPS
 #define SWAPPER_MM_MMUFLAGS	(PMD_ATTRINDX(MT_NORMAL) | SWAPPER_PMD_FLAGS)
diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h
index 2a2752b..0dbc1c6 100644
--- a/arch/arm64/include/asm/kvm_arm.h
+++ b/arch/arm64/include/asm/kvm_arm.h
@@ -170,8 +170,7 @@
 #define VTCR_EL2_FLAGS			(VTCR_EL2_COMMON_BITS | VTCR_EL2_TGRAN_FLAGS)
 #define VTTBR_X				(VTTBR_X_TGRAN_MAGIC - VTCR_EL2_T0SZ_IPA)
 
-#define VTTBR_BADDR_SHIFT (VTTBR_X - 1)
-#define VTTBR_BADDR_MASK  (((UL(1) << (PHYS_MASK_SHIFT - VTTBR_X)) - 1) << VTTBR_BADDR_SHIFT)
+#define VTTBR_BADDR_MASK  (((UL(1) << (PHYS_MASK_SHIFT - VTTBR_X)) - 1) << VTTBR_X)
 #define VTTBR_VMID_SHIFT  (UL(48))
 #define VTTBR_VMID_MASK(size) (_AT(u64, (1 << size) - 1) << VTTBR_VMID_SHIFT)
 
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index ef305f8..35ea9c1 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -313,5 +313,43 @@ static inline unsigned int kvm_get_vmid_bits(void)
 	return (cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR1_VMIDBITS_SHIFT) == 2) ? 16 : 8;
 }
 
+#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
+#include <asm/mmu.h>
+
+static inline void *kvm_get_hyp_vector(void)
+{
+	struct bp_hardening_data *data = arm64_get_bp_hardening_data();
+	void *vect = kvm_ksym_ref(__kvm_hyp_vector);
+
+	if (data->fn) {
+		vect = __bp_harden_hyp_vecs_start +
+		       data->hyp_vectors_slot * SZ_2K;
+
+		if (!has_vhe())
+			vect = lm_alias(vect);
+	}
+
+	return vect;
+}
+
+static inline int kvm_map_vectors(void)
+{
+	return create_hyp_mappings(kvm_ksym_ref(__bp_harden_hyp_vecs_start),
+				   kvm_ksym_ref(__bp_harden_hyp_vecs_end),
+				   PAGE_HYP_EXEC);
+}
+
+#else
+static inline void *kvm_get_hyp_vector(void)
+{
+	return kvm_ksym_ref(__kvm_hyp_vector);
+}
+
+static inline int kvm_map_vectors(void)
+{
+	return 0;
+}
+#endif
+
 #endif /* __ASSEMBLY__ */
 #endif /* __ARM64_KVM_MMU_H__ */
diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h
index 8d9fce0..f543df3 100644
--- a/arch/arm64/include/asm/mmu.h
+++ b/arch/arm64/include/asm/mmu.h
@@ -16,6 +16,13 @@
 #ifndef __ASM_MMU_H
 #define __ASM_MMU_H
 
+#define USER_ASID_FLAG	(UL(1) << 48)
+#define TTBR_ASID_MASK	(UL(0xffff) << 48)
+
+#ifndef __ASSEMBLY__
+
+#include <asm/percpu.h>
+
 typedef struct {
 	atomic64_t	id;
 	void		*vdso;
@@ -28,6 +35,49 @@ typedef struct {
  */
 #define ASID(mm)	((mm)->context.id.counter & 0xffff)
 
+static inline bool arm64_kernel_unmapped_at_el0(void)
+{
+	return IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0) &&
+	       cpus_have_cap(ARM64_UNMAP_KERNEL_AT_EL0);
+}
+
+typedef void (*bp_hardening_cb_t)(void);
+
+struct bp_hardening_data {
+	int			hyp_vectors_slot;
+	bp_hardening_cb_t	fn;
+};
+
+#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
+extern char __bp_harden_hyp_vecs_start[], __bp_harden_hyp_vecs_end[];
+
+DECLARE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
+
+static inline struct bp_hardening_data *arm64_get_bp_hardening_data(void)
+{
+	return raw_cpu_ptr(&bp_hardening_data);
+}
+
+static inline void arm64_apply_bp_hardening(void)
+{
+	struct bp_hardening_data *d;
+
+	if (!cpus_have_cap(ARM64_HARDEN_BRANCH_PREDICTOR))
+		return;
+
+	d = arm64_get_bp_hardening_data();
+	if (d->fn)
+		d->fn();
+}
+#else
+static inline struct bp_hardening_data *arm64_get_bp_hardening_data(void)
+{
+	return NULL;
+}
+
+static inline void arm64_apply_bp_hardening(void)	{ }
+#endif	/* CONFIG_HARDEN_BRANCH_PREDICTOR */
+
 extern void paging_init(void);
 extern void bootmem_init(void);
 extern void __iomem *early_io_map(phys_addr_t phys, unsigned long virt);
@@ -37,4 +87,5 @@ extern void create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
 			       pgprot_t prot, bool allow_block_mappings);
 extern void *fixmap_remap_fdt(phys_addr_t dt_phys);
 
+#endif	/* !__ASSEMBLY__ */
 #endif
diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h
index 8f8dde1..af0215a 100644
--- a/arch/arm64/include/asm/mmu_context.h
+++ b/arch/arm64/include/asm/mmu_context.h
@@ -57,6 +57,13 @@ static inline void cpu_set_reserved_ttbr0(void)
 	isb();
 }
 
+static inline void cpu_switch_mm(pgd_t *pgd, struct mm_struct *mm)
+{
+	BUG_ON(pgd == swapper_pg_dir);
+	cpu_set_reserved_ttbr0();
+	cpu_do_switch_mm(virt_to_phys(pgd),mm);
+}
+
 /*
  * TCR.T0SZ value to use when the ID map is active. Usually equals
  * TCR_T0SZ(VA_BITS), unless system RAM is positioned very high in
diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h
index eb0c2bd..8df4cb6 100644
--- a/arch/arm64/include/asm/pgtable-hwdef.h
+++ b/arch/arm64/include/asm/pgtable-hwdef.h
@@ -272,6 +272,7 @@
 #define TCR_TG1_4K		(UL(2) << TCR_TG1_SHIFT)
 #define TCR_TG1_64K		(UL(3) << TCR_TG1_SHIFT)
 
+#define TCR_A1			(UL(1) << 22)
 #define TCR_ASID16		(UL(1) << 36)
 #define TCR_TBI0		(UL(1) << 37)
 #define TCR_HA			(UL(1) << 39)
diff --git a/arch/arm64/include/asm/pgtable-prot.h b/arch/arm64/include/asm/pgtable-prot.h
index 2142c77..84b5283 100644
--- a/arch/arm64/include/asm/pgtable-prot.h
+++ b/arch/arm64/include/asm/pgtable-prot.h
@@ -34,8 +34,16 @@
 
 #include <asm/pgtable-types.h>
 
-#define PROT_DEFAULT		(PTE_TYPE_PAGE | PTE_AF | PTE_SHARED)
-#define PROT_SECT_DEFAULT	(PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S)
+#define _PROT_DEFAULT		(PTE_TYPE_PAGE | PTE_AF | PTE_SHARED)
+#define _PROT_SECT_DEFAULT	(PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S)
+
+#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+#define PROT_DEFAULT		(_PROT_DEFAULT | PTE_NG)
+#define PROT_SECT_DEFAULT	(_PROT_SECT_DEFAULT | PMD_SECT_NG)
+#else
+#define PROT_DEFAULT		_PROT_DEFAULT
+#define PROT_SECT_DEFAULT	_PROT_SECT_DEFAULT
+#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
 
 #define PROT_DEVICE_nGnRnE	(PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_DEVICE_nGnRnE))
 #define PROT_DEVICE_nGnRE	(PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_DEVICE_nGnRE))
@@ -48,6 +56,7 @@
 #define PROT_SECT_NORMAL_EXEC	(PROT_SECT_DEFAULT | PMD_SECT_UXN | PMD_ATTRINDX(MT_NORMAL))
 
 #define _PAGE_DEFAULT		(PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL))
+#define _HYP_PAGE_DEFAULT	(_PAGE_DEFAULT & ~PTE_NG)
 
 #define PAGE_KERNEL		__pgprot(_PAGE_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE)
 #define PAGE_KERNEL_RO		__pgprot(_PAGE_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_RDONLY)
@@ -55,15 +64,15 @@
 #define PAGE_KERNEL_EXEC	__pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE)
 #define PAGE_KERNEL_EXEC_CONT	__pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_CONT)
 
-#define PAGE_HYP		__pgprot(_PAGE_DEFAULT | PTE_HYP | PTE_HYP_XN)
-#define PAGE_HYP_EXEC		__pgprot(_PAGE_DEFAULT | PTE_HYP | PTE_RDONLY)
-#define PAGE_HYP_RO		__pgprot(_PAGE_DEFAULT | PTE_HYP | PTE_RDONLY | PTE_HYP_XN)
+#define PAGE_HYP		__pgprot(_HYP_PAGE_DEFAULT | PTE_HYP | PTE_HYP_XN)
+#define PAGE_HYP_EXEC		__pgprot(_HYP_PAGE_DEFAULT | PTE_HYP | PTE_RDONLY)
+#define PAGE_HYP_RO		__pgprot(_HYP_PAGE_DEFAULT | PTE_HYP | PTE_RDONLY | PTE_HYP_XN)
 #define PAGE_HYP_DEVICE		__pgprot(PROT_DEVICE_nGnRE | PTE_HYP)
 
 #define PAGE_S2			__pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_NORMAL) | PTE_S2_RDONLY)
 #define PAGE_S2_DEVICE		__pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_DEVICE_nGnRE) | PTE_S2_RDONLY | PTE_UXN)
 
-#define PAGE_NONE		__pgprot(((_PAGE_DEFAULT) & ~PTE_VALID) | PTE_PROT_NONE | PTE_PXN | PTE_UXN)
+#define PAGE_NONE		__pgprot(((_PAGE_DEFAULT) & ~PTE_VALID) | PTE_PROT_NONE | PTE_NG | PTE_PXN | PTE_UXN)
 #define PAGE_SHARED		__pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_WRITE)
 #define PAGE_SHARED_EXEC	__pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_WRITE)
 #define PAGE_COPY		__pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN)
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index c05ee84..9f1bba6 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -92,6 +92,8 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
 	((pte_val(pte) & (PTE_VALID | PTE_USER | PTE_UXN)) == (PTE_VALID | PTE_UXN))
 #define pte_valid_young(pte) \
 	((pte_val(pte) & (PTE_VALID | PTE_AF)) == (PTE_VALID | PTE_AF))
+#define pte_valid_user(pte) \
+	((pte_val(pte) & (PTE_VALID | PTE_USER)) == (PTE_VALID | PTE_USER))
 
 /*
  * Could the pte be present in the TLB? We must check mm_tlb_flush_pending
@@ -101,6 +103,18 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
 #define pte_accessible(mm, pte)	\
 	(mm_tlb_flush_pending(mm) ? pte_present(pte) : pte_valid_young(pte))
 
+/*
+ * p??_access_permitted() is true for valid user mappings (subject to the
+ * write permission check) other than user execute-only which do not have the
+ * PTE_USER bit set. PROT_NONE mappings do not have the PTE_VALID bit set.
+ */
+#define pte_access_permitted(pte, write) \
+	(pte_valid_user(pte) && (!(write) || pte_write(pte)))
+#define pmd_access_permitted(pmd, write) \
+	(pte_access_permitted(pmd_pte(pmd), (write)))
+#define pud_access_permitted(pud, write) \
+	(pte_access_permitted(pud_pte(pud), (write)))
+
 static inline pte_t clear_pte_bit(pte_t pte, pgprot_t prot)
 {
 	pte_val(pte) &= ~pgprot_val(prot);
@@ -707,6 +721,7 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
 
 extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
 extern pgd_t idmap_pg_dir[PTRS_PER_PGD];
+extern pgd_t tramp_pg_dir[PTRS_PER_PGD];
 
 /*
  * Encode and decode a swap entry:
diff --git a/arch/arm64/include/asm/proc-fns.h b/arch/arm64/include/asm/proc-fns.h
index 220633b7..9da52c2 100644
--- a/arch/arm64/include/asm/proc-fns.h
+++ b/arch/arm64/include/asm/proc-fns.h
@@ -39,12 +39,6 @@ extern u64 cpu_do_resume(phys_addr_t ptr, u64 idmap_ttbr);
 
 #include <asm/memory.h>
 
-#define cpu_switch_mm(pgd,mm)				\
-do {							\
-	BUG_ON(pgd == swapper_pg_dir);			\
-	cpu_do_switch_mm(virt_to_phys(pgd),mm);		\
-} while (0)
-
 #endif /* __ASSEMBLY__ */
 #endif /* __KERNEL__ */
 #endif /* __ASM_PROCFNS_H */
diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
index 7393cc7..88bbe36 100644
--- a/arch/arm64/include/asm/sysreg.h
+++ b/arch/arm64/include/asm/sysreg.h
@@ -117,6 +117,9 @@
 #define ID_AA64ISAR0_AES_SHIFT		4
 
 /* id_aa64pfr0 */
+#define ID_AA64PFR0_CSV3_SHIFT		60
+#define ID_AA64PFR0_CSV2_SHIFT		56
+#define ID_AA64PFR0_SVE_SHIFT		32
 #define ID_AA64PFR0_GIC_SHIFT		24
 #define ID_AA64PFR0_ASIMD_SHIFT		20
 #define ID_AA64PFR0_FP_SHIFT		16
diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h
index deab523..ad6bd8b 100644
--- a/arch/arm64/include/asm/tlbflush.h
+++ b/arch/arm64/include/asm/tlbflush.h
@@ -23,6 +23,7 @@
 
 #include <linux/sched.h>
 #include <asm/cputype.h>
+#include <asm/mmu.h>
 
 /*
  * Raw TLBI operations.
@@ -42,6 +43,11 @@
 
 #define __tlbi(op, ...)		__TLBI_N(op, ##__VA_ARGS__, 1, 0)
 
+#define __tlbi_user(op, arg) do {						\
+	if (arm64_kernel_unmapped_at_el0())					\
+		__tlbi(op, (arg) | USER_ASID_FLAG);				\
+} while (0)
+
 /*
  *	TLB Management
  *	==============
@@ -103,6 +109,7 @@ static inline void flush_tlb_mm(struct mm_struct *mm)
 
 	dsb(ishst);
 	__tlbi(aside1is, asid);
+	__tlbi_user(aside1is, asid);
 	dsb(ish);
 }
 
@@ -113,6 +120,7 @@ static inline void flush_tlb_page(struct vm_area_struct *vma,
 
 	dsb(ishst);
 	__tlbi(vale1is, addr);
+	__tlbi_user(vale1is, addr);
 	dsb(ish);
 }
 
@@ -139,10 +147,13 @@ static inline void __flush_tlb_range(struct vm_area_struct *vma,
 
 	dsb(ishst);
 	for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12)) {
-		if (last_level)
+		if (last_level) {
 			__tlbi(vale1is, addr);
-		else
+			__tlbi_user(vale1is, addr);
+		} else {
 			__tlbi(vae1is, addr);
+			__tlbi_user(vae1is, addr);
+		}
 	}
 	dsb(ish);
 }
@@ -182,6 +193,7 @@ static inline void __flush_tlb_pgtable(struct mm_struct *mm,
 	unsigned long addr = uaddr >> 12 | (ASID(mm) << 48);
 
 	__tlbi(vae1is, addr);
+	__tlbi_user(vae1is, addr);
 	dsb(ish);
 }
 
diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
index 4d9222a..8b38b0d 100644
--- a/arch/arm64/include/asm/uaccess.h
+++ b/arch/arm64/include/asm/uaccess.h
@@ -20,6 +20,7 @@
 
 #include <asm/alternative.h>
 #include <asm/kernel-pgtable.h>
+#include <asm/mmu.h>
 #include <asm/sysreg.h>
 
 #ifndef __ASSEMBLY__
@@ -133,15 +134,19 @@ static inline void __uaccess_ttbr0_disable(void)
 {
 	unsigned long ttbr;
 
+	ttbr = read_sysreg(ttbr1_el1);
 	/* reserved_ttbr0 placed at the end of swapper_pg_dir */
-	ttbr = read_sysreg(ttbr1_el1) + SWAPPER_DIR_SIZE;
-	write_sysreg(ttbr, ttbr0_el1);
+	write_sysreg(ttbr + SWAPPER_DIR_SIZE, ttbr0_el1);
+	isb();
+	/* Set reserved ASID */
+	ttbr &= ~TTBR_ASID_MASK;
+	write_sysreg(ttbr, ttbr1_el1);
 	isb();
 }
 
 static inline void __uaccess_ttbr0_enable(void)
 {
-	unsigned long flags;
+	unsigned long flags, ttbr0, ttbr1;
 
 	/*
 	 * Disable interrupts to avoid preemption between reading the 'ttbr0'
@@ -149,7 +154,16 @@ static inline void __uaccess_ttbr0_enable(void)
 	 * roll-over and an update of 'ttbr0'.
 	 */
 	local_irq_save(flags);
-	write_sysreg(current_thread_info()->ttbr0, ttbr0_el1);
+	ttbr0 = current_thread_info()->ttbr0;
+
+	/* Restore active ASID */
+	ttbr1 = read_sysreg(ttbr1_el1);
+	ttbr1 |= ttbr0 & TTBR_ASID_MASK;
+	write_sysreg(ttbr1, ttbr1_el1);
+	isb();
+
+	/* Restore user page table */
+	write_sysreg(ttbr0, ttbr0_el1);
 	isb();
 	local_irq_restore(flags);
 }
@@ -439,11 +453,20 @@ extern __must_check long strnlen_user(const char __user *str, long n);
 	add	\tmp1, \tmp1, #SWAPPER_DIR_SIZE	// reserved_ttbr0 at the end of swapper_pg_dir
 	msr	ttbr0_el1, \tmp1		// set reserved TTBR0_EL1
 	isb
+	sub     \tmp1, \tmp1, #SWAPPER_DIR_SIZE
+	bic     \tmp1, \tmp1, #TTBR_ASID_MASK
+	msr     ttbr1_el1, \tmp1                // set reserved ASID
+	isb
 	.endm
 
-	.macro	__uaccess_ttbr0_enable, tmp1
+	.macro	__uaccess_ttbr0_enable, tmp1, tmp2
 	get_thread_info \tmp1
 	ldr	\tmp1, [\tmp1, #TSK_TI_TTBR0]	// load saved TTBR0_EL1
+	mrs     \tmp2, ttbr1_el1
+	extr    \tmp2, \tmp2, \tmp1, #48
+	ror     \tmp2, \tmp2, #16
+	msr     ttbr1_el1, \tmp2                // set the active ASID
+	isb
 	msr	ttbr0_el1, \tmp1		// set the non-PAN TTBR0_EL1
 	isb
 	.endm
@@ -454,18 +477,18 @@ alternative_if_not ARM64_HAS_PAN
 alternative_else_nop_endif
 	.endm
 
-	.macro	uaccess_ttbr0_enable, tmp1, tmp2
+	.macro	uaccess_ttbr0_enable, tmp1, tmp2, tmp3
 alternative_if_not ARM64_HAS_PAN
-	save_and_disable_irq \tmp2		// avoid preemption
-	__uaccess_ttbr0_enable \tmp1
-	restore_irq \tmp2
+	save_and_disable_irq \tmp3		// avoid preemption
+	__uaccess_ttbr0_enable \tmp1, \tmp2
+	restore_irq \tmp3
 alternative_else_nop_endif
 	.endm
 #else
 	.macro	uaccess_ttbr0_disable, tmp1
 	.endm
 
-	.macro	uaccess_ttbr0_enable, tmp1, tmp2
+	.macro	uaccess_ttbr0_enable, tmp1, tmp2, tmp3
 	.endm
 #endif
 
@@ -479,8 +502,8 @@ alternative_if ARM64_ALT_PAN_NOT_UAO
 alternative_else_nop_endif
 	.endm
 
-	.macro	uaccess_enable_not_uao, tmp1, tmp2
-	uaccess_ttbr0_enable \tmp1, \tmp2
+	.macro	uaccess_enable_not_uao, tmp1, tmp2, tmp3
+	uaccess_ttbr0_enable \tmp1, \tmp2, \tmp3
 alternative_if ARM64_ALT_PAN_NOT_UAO
 	SET_PSTATE_PAN(0)
 alternative_else_nop_endif
diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile
index 2c03b01..446eabd 100644
--- a/arch/arm64/kernel/Makefile
+++ b/arch/arm64/kernel/Makefile
@@ -52,6 +52,10 @@
 arm64-obj-$(CONFIG_KEXEC)		+= machine_kexec.o relocate_kernel.o	\
 					   cpu-reset.o
 
+ifeq ($(CONFIG_KVM),y)
+arm64-obj-$(CONFIG_HARDEN_BRANCH_PREDICTOR)	+= bpi.o
+endif
+
 obj-y					+= $(arm64-obj-y) vdso/ probes/
 obj-m					+= $(arm64-obj-m)
 head-y					:= head.o
diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c
index b3bb7ef..5d2d356 100644
--- a/arch/arm64/kernel/asm-offsets.c
+++ b/arch/arm64/kernel/asm-offsets.c
@@ -24,6 +24,7 @@
 #include <linux/kvm_host.h>
 #include <linux/suspend.h>
 #include <asm/cpufeature.h>
+#include <asm/fixmap.h>
 #include <asm/thread_info.h>
 #include <asm/memory.h>
 #include <asm/smp_plat.h>
@@ -147,11 +148,14 @@ int main(void)
   DEFINE(ARM_SMCCC_RES_X2_OFFS,		offsetof(struct arm_smccc_res, a2));
   DEFINE(ARM_SMCCC_QUIRK_ID_OFFS,	offsetof(struct arm_smccc_quirk, id));
   DEFINE(ARM_SMCCC_QUIRK_STATE_OFFS,	offsetof(struct arm_smccc_quirk, state));
-
   BLANK();
   DEFINE(HIBERN_PBE_ORIG,	offsetof(struct pbe, orig_address));
   DEFINE(HIBERN_PBE_ADDR,	offsetof(struct pbe, address));
   DEFINE(HIBERN_PBE_NEXT,	offsetof(struct pbe, next));
   DEFINE(ARM64_FTR_SYSVAL,	offsetof(struct arm64_ftr_reg, sys_val));
+  BLANK();
+#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+  DEFINE(TRAMP_VALIAS,		TRAMP_VALIAS);
+#endif
   return 0;
 }
diff --git a/arch/arm64/kernel/bpi.S b/arch/arm64/kernel/bpi.S
new file mode 100644
index 0000000..dec95bd
--- /dev/null
+++ b/arch/arm64/kernel/bpi.S
@@ -0,0 +1,79 @@
+/*
+ * Contains CPU specific branch predictor invalidation sequences
+ *
+ * Copyright (C) 2018 ARM Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/linkage.h>
+
+.macro ventry target
+	.rept 31
+	nop
+	.endr
+	b	\target
+.endm
+
+.macro vectors target
+	ventry \target + 0x000
+	ventry \target + 0x080
+	ventry \target + 0x100
+	ventry \target + 0x180
+
+	ventry \target + 0x200
+	ventry \target + 0x280
+	ventry \target + 0x300
+	ventry \target + 0x380
+
+	ventry \target + 0x400
+	ventry \target + 0x480
+	ventry \target + 0x500
+	ventry \target + 0x580
+
+	ventry \target + 0x600
+	ventry \target + 0x680
+	ventry \target + 0x700
+	ventry \target + 0x780
+.endm
+
+	.align	11
+ENTRY(__bp_harden_hyp_vecs_start)
+	.rept 4
+	vectors __kvm_hyp_vector
+	.endr
+ENTRY(__bp_harden_hyp_vecs_end)
+ENTRY(__psci_hyp_bp_inval_start)
+	sub	sp, sp, #(8 * 18)
+	stp	x16, x17, [sp, #(16 * 0)]
+	stp	x14, x15, [sp, #(16 * 1)]
+	stp	x12, x13, [sp, #(16 * 2)]
+	stp	x10, x11, [sp, #(16 * 3)]
+	stp	x8, x9, [sp, #(16 * 4)]
+	stp	x6, x7, [sp, #(16 * 5)]
+	stp	x4, x5, [sp, #(16 * 6)]
+	stp	x2, x3, [sp, #(16 * 7)]
+	stp	x0, x1, [sp, #(16 * 8)]
+	mov	x0, #0x84000000
+	smc	#0
+	ldp	x16, x17, [sp, #(16 * 0)]
+	ldp	x14, x15, [sp, #(16 * 1)]
+	ldp	x12, x13, [sp, #(16 * 2)]
+	ldp	x10, x11, [sp, #(16 * 3)]
+	ldp	x8, x9, [sp, #(16 * 4)]
+	ldp	x6, x7, [sp, #(16 * 5)]
+	ldp	x4, x5, [sp, #(16 * 6)]
+	ldp	x2, x3, [sp, #(16 * 7)]
+	ldp	x0, x1, [sp, #(16 * 8)]
+	add	sp, sp, #(8 * 18)
+ENTRY(__psci_hyp_bp_inval_end)
diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
index b75e917..653359b 100644
--- a/arch/arm64/kernel/cpu_errata.c
+++ b/arch/arm64/kernel/cpu_errata.c
@@ -46,6 +46,100 @@ static int cpu_enable_trap_ctr_access(void *__unused)
 	return 0;
 }
 
+#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
+#include <asm/mmu_context.h>
+#include <asm/cacheflush.h>
+
+DEFINE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
+
+#ifdef CONFIG_KVM
+extern char __psci_hyp_bp_inval_start[], __psci_hyp_bp_inval_end[];
+
+static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start,
+				const char *hyp_vecs_end)
+{
+	void *dst = lm_alias(__bp_harden_hyp_vecs_start + slot * SZ_2K);
+	int i;
+
+	for (i = 0; i < SZ_2K; i += 0x80)
+		memcpy(dst + i, hyp_vecs_start, hyp_vecs_end - hyp_vecs_start);
+
+	flush_icache_range((uintptr_t)dst, (uintptr_t)dst + SZ_2K);
+}
+
+static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
+				      const char *hyp_vecs_start,
+				      const char *hyp_vecs_end)
+{
+	static int last_slot = -1;
+	static DEFINE_SPINLOCK(bp_lock);
+	int cpu, slot = -1;
+
+	spin_lock(&bp_lock);
+	for_each_possible_cpu(cpu) {
+		if (per_cpu(bp_hardening_data.fn, cpu) == fn) {
+			slot = per_cpu(bp_hardening_data.hyp_vectors_slot, cpu);
+			break;
+		}
+	}
+
+	if (slot == -1) {
+		last_slot++;
+		BUG_ON(((__bp_harden_hyp_vecs_end - __bp_harden_hyp_vecs_start)
+			/ SZ_2K) <= last_slot);
+		slot = last_slot;
+		__copy_hyp_vect_bpi(slot, hyp_vecs_start, hyp_vecs_end);
+	}
+
+	__this_cpu_write(bp_hardening_data.hyp_vectors_slot, slot);
+	__this_cpu_write(bp_hardening_data.fn, fn);
+	spin_unlock(&bp_lock);
+}
+#else
+#define __psci_hyp_bp_inval_start	NULL
+#define __psci_hyp_bp_inval_end		NULL
+
+static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
+				      const char *hyp_vecs_start,
+				      const char *hyp_vecs_end)
+{
+	__this_cpu_write(bp_hardening_data.fn, fn);
+}
+#endif	/* CONFIG_KVM */
+
+static void  install_bp_hardening_cb(const struct arm64_cpu_capabilities *entry,
+				     bp_hardening_cb_t fn,
+				     const char *hyp_vecs_start,
+				     const char *hyp_vecs_end)
+{
+	u64 pfr0;
+
+	if (!entry->matches(entry, SCOPE_LOCAL_CPU))
+		return;
+
+	pfr0 = read_cpuid(ID_AA64PFR0_EL1);
+	if (cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_CSV2_SHIFT))
+		return;
+
+	__install_bp_hardening_cb(fn, hyp_vecs_start, hyp_vecs_end);
+}
+
+#include <linux/psci.h>
+
+static int enable_psci_bp_hardening(void *data)
+{
+	const struct arm64_cpu_capabilities *entry = data;
+
+	if (psci_ops.get_version)
+		install_bp_hardening_cb(entry,
+				       (bp_hardening_cb_t)psci_ops.get_version,
+				       __psci_hyp_bp_inval_start,
+				       __psci_hyp_bp_inval_end);
+
+	return 0;
+}
+#endif	/* CONFIG_HARDEN_BRANCH_PREDICTOR */
+
 #define MIDR_RANGE(model, min, max) \
 	.def_scope = SCOPE_LOCAL_CPU, \
 	.matches = is_affected_midr_range, \
@@ -53,6 +147,13 @@ static int cpu_enable_trap_ctr_access(void *__unused)
 	.midr_range_min = min, \
 	.midr_range_max = max
 
+#define MIDR_ALL_VERSIONS(model) \
+	.def_scope = SCOPE_LOCAL_CPU, \
+	.matches = is_affected_midr_range, \
+	.midr_model = model, \
+	.midr_range_min = 0, \
+	.midr_range_max = (MIDR_VARIANT_MASK | MIDR_REVISION_MASK)
+
 const struct arm64_cpu_capabilities arm64_errata[] = {
 #if	defined(CONFIG_ARM64_ERRATUM_826319) || \
 	defined(CONFIG_ARM64_ERRATUM_827319) || \
@@ -130,6 +231,33 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
 		.def_scope = SCOPE_LOCAL_CPU,
 		.enable = cpu_enable_trap_ctr_access,
 	},
+#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
+	{
+		.capability = ARM64_HARDEN_BRANCH_PREDICTOR,
+		MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
+		.enable = enable_psci_bp_hardening,
+	},
+	{
+		.capability = ARM64_HARDEN_BRANCH_PREDICTOR,
+		MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
+		.enable = enable_psci_bp_hardening,
+	},
+	{
+		.capability = ARM64_HARDEN_BRANCH_PREDICTOR,
+		MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
+		.enable = enable_psci_bp_hardening,
+	},
+	{
+		.capability = ARM64_HARDEN_BRANCH_PREDICTOR,
+		MIDR_ALL_VERSIONS(MIDR_CORTEX_A75),
+		.enable = enable_psci_bp_hardening,
+	},
+	{
+		.capability = ARM64_HARDEN_BRANCH_PREDICTOR,
+		MIDR_ALL_VERSIONS(MIDR_KRYO3G),
+		.enable = enable_psci_bp_hardening,
+	},
+#endif
 	{
 	}
 };
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index 0127e1b..80ff3df5 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -100,6 +100,7 @@ static const struct arm64_ftr_bits ftr_id_aa64pfr0[] = {
 	ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64PFR0_GIC_SHIFT, 4, 0),
 	S_ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_ASIMD_SHIFT, 4, ID_AA64PFR0_ASIMD_NI),
 	S_ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_FP_SHIFT, 4, ID_AA64PFR0_FP_NI),
+	ARM64_FTR_BITS(FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_CSV3_SHIFT, 4, 0),
 	/* Linux doesn't care about the EL3 */
 	ARM64_FTR_BITS(FTR_NONSTRICT, FTR_EXACT, ID_AA64PFR0_EL3_SHIFT, 4, 0),
 	ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64PFR0_EL2_SHIFT, 4, 0),
@@ -748,6 +749,44 @@ static bool hyp_offset_low(const struct arm64_cpu_capabilities *entry,
 	return idmap_addr > GENMASK(VA_BITS - 2, 0) && !is_kernel_in_hyp_mode();
 }
 
+#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+static int __kpti_forced; /* 0: not forced, >0: forced on, <0: forced off */
+
+static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
+				int __unused)
+{
+	u64 pfr0 = read_system_reg(SYS_ID_AA64PFR0_EL1);
+
+	/* Forced on command line? */
+	if (__kpti_forced) {
+		pr_info_once("kernel page table isolation forced %s by command line option\n",
+			     __kpti_forced > 0 ? "ON" : "OFF");
+		return __kpti_forced > 0;
+	}
+
+	/* Useful for KASLR robustness */
+	if (IS_ENABLED(CONFIG_RANDOMIZE_BASE))
+		return true;
+
+	/* Defer to CPU feature registers */
+	return !cpuid_feature_extract_unsigned_field(pfr0,
+						     ID_AA64PFR0_CSV3_SHIFT);
+}
+
+static int __init parse_kpti(char *str)
+{
+	bool enabled;
+	int ret = strtobool(str, &enabled);
+
+	if (ret)
+		return ret;
+
+	__kpti_forced = enabled ? 1 : -1;
+	return 0;
+}
+__setup("kpti=", parse_kpti);
+#endif	/* CONFIG_UNMAP_KERNEL_AT_EL0 */
+
 static const struct arm64_cpu_capabilities arm64_features[] = {
 	{
 		.desc = "GIC system register CPU interface",
@@ -831,6 +870,14 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
 		.def_scope = SCOPE_SYSTEM,
 		.matches = hyp_offset_low,
 	},
+#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+	{
+		.desc = "Kernel page table isolation (KPTI)",
+		.capability = ARM64_UNMAP_KERNEL_AT_EL0,
+		.def_scope = SCOPE_SYSTEM,
+		.matches = unmap_kernel_at_el0,
+	},
+#endif
 	{},
 };
 
@@ -951,7 +998,7 @@ void __init enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps)
 			 * uses an IPI, giving us a PSTATE that disappears when
 			 * we return.
 			 */
-			stop_machine(caps->enable, NULL, cpu_online_mask);
+			stop_machine(caps->enable, (void *)caps, cpu_online_mask);
 }
 
 /*
@@ -1007,7 +1054,7 @@ verify_local_cpu_features(const struct arm64_cpu_capabilities *caps)
 			cpu_die_early();
 		}
 		if (caps->enable)
-			caps->enable(NULL);
+			caps->enable((void *)caps);
 	}
 }
 
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index 718c4c8..8030583 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -29,6 +29,7 @@
 #include <asm/esr.h>
 #include <asm/irq.h>
 #include <asm/memory.h>
+#include <asm/mmu.h>
 #include <asm/ptrace.h>
 #include <asm/thread_info.h>
 #include <asm/uaccess.h>
@@ -70,8 +71,31 @@
 #define BAD_FIQ		2
 #define BAD_ERROR	3
 
-	.macro	kernel_entry, el, regsize = 64
+	.macro kernel_ventry, el, label, regsize = 64
+	.align 7
+#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+alternative_if ARM64_UNMAP_KERNEL_AT_EL0
+	.if	\el == 0
+	.if	\regsize == 64
+	mrs	x30, tpidrro_el0
+	msr	tpidrro_el0, xzr
+	.else
+	mov	x30, xzr
+	.endif
+	.endif
+alternative_else_nop_endif
+#endif
+
 	sub	sp, sp, #S_FRAME_SIZE
+	b	el\()\el\()_\label
+	.endm
+
+	.macro tramp_alias, dst, sym
+	mov_q	\dst, TRAMP_VALIAS
+	add	\dst, \dst, #(\sym - .entry.tramp.text)
+	.endm
+
+	.macro	kernel_entry, el, regsize = 64
 	.if	\regsize == 32
 	mov	w0, w0				// zero upper 32 bits of x0
 	.endif
@@ -126,8 +150,8 @@
 alternative_else_nop_endif
 
 	.if	\el != 0
-	mrs	x21, ttbr0_el1
-	tst	x21, #0xffff << 48		// Check for the reserved ASID
+	mrs	x21, ttbr1_el1
+	tst	x21, #TTBR_ASID_MASK		// Check for the reserved ASID
 	orr	x23, x23, #PSR_PAN_BIT		// Set the emulated PAN in the saved SPSR
 	b.eq	1f				// TTBR0 access already disabled
 	and	x23, x23, #~PSR_PAN_BIT		// Clear the emulated PAN in the saved SPSR
@@ -190,7 +214,7 @@
 	tbnz	x22, #22, 1f			// Skip re-enabling TTBR0 access if the PSR_PAN_BIT is set
 	.endif
 
-	__uaccess_ttbr0_enable x0
+	__uaccess_ttbr0_enable x0, x1
 
 	.if	\el == 0
 	/*
@@ -199,7 +223,7 @@
 	 * Cavium erratum 27456 (broadcast TLBI instructions may cause I-cache
 	 * corruption).
 	 */
-	post_ttbr0_update_workaround
+	bl	post_ttbr_update_workaround
 	.endif
 1:
 	.if	\el != 0
@@ -211,18 +235,20 @@
 	.if	\el == 0
 	ldr	x23, [sp, #S_SP]		// load return stack pointer
 	msr	sp_el0, x23
+	tst	x22, #PSR_MODE32_BIT		// native task?
+	b.eq	3f
+
 #ifdef CONFIG_ARM64_ERRATUM_845719
 alternative_if ARM64_WORKAROUND_845719
-	tbz	x22, #4, 1f
 #ifdef CONFIG_PID_IN_CONTEXTIDR
 	mrs	x29, contextidr_el1
 	msr	contextidr_el1, x29
 #else
 	msr contextidr_el1, xzr
 #endif
-1:
 alternative_else_nop_endif
 #endif
+3:
 	.endif
 
 	msr	elr_el1, x21			// set up the return data
@@ -244,7 +270,21 @@
 	ldp	x28, x29, [sp, #16 * 14]
 	ldr	lr, [sp, #S_LR]
 	add	sp, sp, #S_FRAME_SIZE		// restore sp
-	eret					// return to kernel
+
+	.if	\el == 0
+alternative_insn eret, nop, ARM64_UNMAP_KERNEL_AT_EL0
+#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+	bne	4f
+	msr	far_el1, x30
+	tramp_alias	x30, tramp_exit_native
+	br	x30
+4:
+	tramp_alias	x30, tramp_exit_compat
+	br	x30
+#endif
+	.else
+	eret
+	.endif
 	.endm
 
 	.macro	irq_stack_entry
@@ -316,31 +356,31 @@
 
 	.align	11
 ENTRY(vectors)
-	ventry	el1_sync_invalid		// Synchronous EL1t
-	ventry	el1_irq_invalid			// IRQ EL1t
-	ventry	el1_fiq_invalid			// FIQ EL1t
-	ventry	el1_error_invalid		// Error EL1t
+	kernel_ventry	1, sync_invalid			// Synchronous EL1t
+	kernel_ventry	1, irq_invalid			// IRQ EL1t
+	kernel_ventry	1, fiq_invalid			// FIQ EL1t
+	kernel_ventry	1, error_invalid		// Error EL1t
 
-	ventry	el1_sync			// Synchronous EL1h
-	ventry	el1_irq				// IRQ EL1h
-	ventry	el1_fiq_invalid			// FIQ EL1h
-	ventry	el1_error_invalid		// Error EL1h
+	kernel_ventry	1, sync				// Synchronous EL1h
+	kernel_ventry	1, irq				// IRQ EL1h
+	kernel_ventry	1, fiq_invalid			// FIQ EL1h
+	kernel_ventry	1, error_invalid		// Error EL1h
 
-	ventry	el0_sync			// Synchronous 64-bit EL0
-	ventry	el0_irq				// IRQ 64-bit EL0
-	ventry	el0_fiq_invalid			// FIQ 64-bit EL0
-	ventry	el0_error_invalid		// Error 64-bit EL0
+	kernel_ventry	0, sync				// Synchronous 64-bit EL0
+	kernel_ventry	0, irq				// IRQ 64-bit EL0
+	kernel_ventry	0, fiq_invalid			// FIQ 64-bit EL0
+	kernel_ventry	0, error_invalid		// Error 64-bit EL0
 
 #ifdef CONFIG_COMPAT
-	ventry	el0_sync_compat			// Synchronous 32-bit EL0
-	ventry	el0_irq_compat			// IRQ 32-bit EL0
-	ventry	el0_fiq_invalid_compat		// FIQ 32-bit EL0
-	ventry	el0_error_invalid_compat	// Error 32-bit EL0
+	kernel_ventry	0, sync_compat, 32		// Synchronous 32-bit EL0
+	kernel_ventry	0, irq_compat, 32		// IRQ 32-bit EL0
+	kernel_ventry	0, fiq_invalid_compat, 32	// FIQ 32-bit EL0
+	kernel_ventry	0, error_invalid_compat, 32	// Error 32-bit EL0
 #else
-	ventry	el0_sync_invalid		// Synchronous 32-bit EL0
-	ventry	el0_irq_invalid			// IRQ 32-bit EL0
-	ventry	el0_fiq_invalid			// FIQ 32-bit EL0
-	ventry	el0_error_invalid		// Error 32-bit EL0
+	kernel_ventry	0, sync_invalid, 32		// Synchronous 32-bit EL0
+	kernel_ventry	0, irq_invalid, 32		// IRQ 32-bit EL0
+	kernel_ventry	0, fiq_invalid, 32		// FIQ 32-bit EL0
+	kernel_ventry	0, error_invalid, 32		// Error 32-bit EL0
 #endif
 END(vectors)
 
@@ -608,11 +648,14 @@
 	mrs	x26, far_el1
 	// enable interrupts before calling the main handler
 	enable_dbg_and_irq
+#ifdef CONFIG_TRACE_IRQFLAGS
+	bl	trace_hardirqs_off
+#endif
 	ct_user_exit
 	mov	x0, x26
 	mov	x1, x25
 	mov	x2, sp
-	bl	do_mem_abort
+	bl	do_el0_ia_bp_hardening
 	b	ret_to_user
 el0_fpsimd_acc:
 	/*
@@ -859,6 +902,119 @@
 
 	.popsection				// .entry.text
 
+#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+/*
+ * Exception vectors trampoline.
+ */
+	.pushsection ".entry.tramp.text", "ax"
+
+	.macro tramp_map_kernel, tmp
+	mrs	\tmp, ttbr1_el1
+	sub	\tmp, \tmp, #(SWAPPER_DIR_SIZE + RESERVED_TTBR0_SIZE)
+	bic	\tmp, \tmp, #USER_ASID_FLAG
+	msr	ttbr1_el1, \tmp
+#ifdef CONFIG_ARCH_MSM8996
+	/* ASID already in \tmp[63:48] */
+	movk	\tmp, #:abs_g2_nc:(TRAMP_VALIAS >> 12)
+	movk	\tmp, #:abs_g1_nc:(TRAMP_VALIAS >> 12)
+	/* 2MB boundary containing the vectors, so we nobble the walk cache */
+	movk	\tmp, #:abs_g0_nc:((TRAMP_VALIAS & ~(SZ_2M - 1)) >> 12)
+	isb
+	tlbi	vae1, \tmp
+	dsb	nsh
+#endif /* CONFIG_ARCH_MSM8996 */
+	.endm
+
+	.macro tramp_unmap_kernel, tmp
+	mrs	\tmp, ttbr1_el1
+	add	\tmp, \tmp, #(SWAPPER_DIR_SIZE + RESERVED_TTBR0_SIZE)
+	orr	\tmp, \tmp, #USER_ASID_FLAG
+	msr	ttbr1_el1, \tmp
+	/*
+	 * We avoid running the post_ttbr_update_workaround here because the
+	 * user and kernel ASIDs don't have conflicting mappings, so any
+	 * "blessing" as described in:
+	 *
+	 *   http://lkml.kernel.org/r/56BB848A.6060603@caviumnetworks.com
+	 *
+	 * will not hurt correctness. Whilst this may partially defeat the
+	 * point of using split ASIDs in the first place, it avoids
+	 * the hit of invalidating the entire I-cache on every return to
+	 * userspace.
+	 */
+	.endm
+
+	.macro tramp_ventry, regsize = 64
+	.align	7
+1:
+	.if	\regsize == 64
+	msr	tpidrro_el0, x30	// Restored in kernel_ventry
+	.endif
+	bl	2f
+	b	.
+2:
+	tramp_map_kernel	x30
+#ifdef CONFIG_RANDOMIZE_BASE
+	adr	x30, tramp_vectors + PAGE_SIZE
+#ifndef CONFIG_ARCH_MSM8996
+	isb
+#endif
+	ldr	x30, [x30]
+#else
+	ldr	x30, =vectors
+#endif
+	prfm	plil1strm, [x30, #(1b - tramp_vectors)]
+	msr	vbar_el1, x30
+	add	x30, x30, #(1b - tramp_vectors)
+	isb
+	ret
+	.endm
+
+	.macro tramp_exit, regsize = 64
+	adr	x30, tramp_vectors
+	msr	vbar_el1, x30
+	tramp_unmap_kernel	x30
+	.if	\regsize == 64
+	mrs	x30, far_el1
+	.endif
+	eret
+	.endm
+
+	.align	11
+ENTRY(tramp_vectors)
+	.space	0x400
+
+	tramp_ventry
+	tramp_ventry
+	tramp_ventry
+	tramp_ventry
+
+	tramp_ventry	32
+	tramp_ventry	32
+	tramp_ventry	32
+	tramp_ventry	32
+END(tramp_vectors)
+
+ENTRY(tramp_exit_native)
+	tramp_exit
+END(tramp_exit_native)
+
+ENTRY(tramp_exit_compat)
+	tramp_exit	32
+END(tramp_exit_compat)
+
+	.ltorg
+	.popsection				// .entry.tramp.text
+#ifdef CONFIG_RANDOMIZE_BASE
+	.pushsection ".rodata", "a"
+	.align PAGE_SHIFT
+	.globl	__entry_tramp_data_start
+__entry_tramp_data_start:
+	.quad	vectors
+	.popsection				// .rodata
+#endif /* CONFIG_RANDOMIZE_BASE */
+#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
+
 /*
  * Special system call wrappers.
  */
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index 5fe594e..ee0ea17 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -324,6 +324,15 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start,
 
 	memset(&p->thread.cpu_context, 0, sizeof(struct cpu_context));
 
+	/*
+	 * In case p was allocated the same task_struct pointer as some
+	 * other recently-exited task, make sure p is disassociated from
+	 * any cpu that may have run that now-exited task recently.
+	 * Otherwise we could erroneously skip reloading the FPSIMD
+	 * registers for p.
+	 */
+	fpsimd_flush_task_state(p);
+
 	if (likely(!(p->flags & PF_KTHREAD))) {
 		*childregs = *current_pt_regs();
 		childregs->regs[0] = 0;
@@ -366,17 +375,17 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start,
 
 static void tls_thread_switch(struct task_struct *next)
 {
-	unsigned long tpidr, tpidrro;
+	unsigned long tpidr;
 
 	tpidr = read_sysreg(tpidr_el0);
 	*task_user_tls(current) = tpidr;
 
-	tpidr = *task_user_tls(next);
-	tpidrro = is_compat_thread(task_thread_info(next)) ?
-		  next->thread.tp_value : 0;
+	if (is_compat_thread(task_thread_info(next)))
+		write_sysreg(next->thread.tp_value, tpidrro_el0);
+	else if (!arm64_kernel_unmapped_at_el0())
+		write_sysreg(0, tpidrro_el0);
 
-	write_sysreg(tpidr, tpidr_el0);
-	write_sysreg(tpidrro, tpidrro_el0);
+	write_sysreg(*task_user_tls(next), tpidr_el0);
 }
 
 /* Restore the UAO state depending on next's addr_limit */
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
index 19f3515..cd53836 100644
--- a/arch/arm64/kernel/traps.c
+++ b/arch/arm64/kernel/traps.c
@@ -33,6 +33,7 @@
 #include <linux/syscalls.h>
 
 #include <asm/atomic.h>
+#include <asm/barrier.h>
 #include <asm/bug.h>
 #include <asm/debug-monitors.h>
 #include <asm/esr.h>
@@ -540,6 +541,25 @@ static void ctr_read_handler(unsigned int esr, struct pt_regs *regs)
 	regs->pc += 4;
 }
 
+static void cntvct_read_handler(unsigned int esr, struct pt_regs *regs)
+{
+	int rt = (esr & ESR_ELx_SYS64_ISS_RT_MASK) >> ESR_ELx_SYS64_ISS_RT_SHIFT;
+
+	isb();
+	if (rt != 31)
+		regs->regs[rt] = arch_counter_get_cntvct();
+	regs->pc += 4;
+}
+
+static void cntfrq_read_handler(unsigned int esr, struct pt_regs *regs)
+{
+	int rt = (esr & ESR_ELx_SYS64_ISS_RT_MASK) >> ESR_ELx_SYS64_ISS_RT_SHIFT;
+
+	if (rt != 31)
+		regs->regs[rt] = read_sysreg(cntfrq_el0);
+	regs->pc += 4;
+}
+
 struct sys64_hook {
 	unsigned int esr_mask;
 	unsigned int esr_val;
@@ -558,6 +578,18 @@ static struct sys64_hook sys64_hooks[] = {
 		.esr_val = ESR_ELx_SYS64_ISS_SYS_CTR_READ,
 		.handler = ctr_read_handler,
 	},
+	{
+		/* Trap read access to CNTVCT_EL0 */
+		.esr_mask = ESR_ELx_SYS64_ISS_SYS_OP_MASK,
+		.esr_val = ESR_ELx_SYS64_ISS_SYS_CNTVCT,
+		.handler = cntvct_read_handler,
+	},
+	{
+		/* Trap read access to CNTFRQ_EL0 */
+		.esr_mask = ESR_ELx_SYS64_ISS_SYS_OP_MASK,
+		.esr_val = ESR_ELx_SYS64_ISS_SYS_CNTFRQ,
+		.handler = cntfrq_read_handler,
+	},
 	{},
 };
 
diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S
index b8deffa..34d3ed6 100644
--- a/arch/arm64/kernel/vmlinux.lds.S
+++ b/arch/arm64/kernel/vmlinux.lds.S
@@ -56,6 +56,17 @@
 #define HIBERNATE_TEXT
 #endif
 
+#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+#define TRAMP_TEXT					\
+	. = ALIGN(PAGE_SIZE);				\
+	VMLINUX_SYMBOL(__entry_tramp_text_start) = .;	\
+	*(.entry.tramp.text)				\
+	. = ALIGN(PAGE_SIZE);				\
+	VMLINUX_SYMBOL(__entry_tramp_text_end) = .;
+#else
+#define TRAMP_TEXT
+#endif
+
 /*
  * The size of the PE/COFF section that covers the kernel image, which
  * runs from stext to _edata, must be a round multiple of the PE/COFF
@@ -128,6 +139,7 @@
 			HYPERVISOR_TEXT
 			IDMAP_TEXT
 			HIBERNATE_TEXT
+			TRAMP_TEXT
 			*(.fixup)
 			*(.gnu.warning)
 		. = ALIGN(16);
@@ -221,6 +233,11 @@
 	. += RESERVED_TTBR0_SIZE;
 #endif
 
+#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+	tramp_pg_dir = .;
+	. += PAGE_SIZE;
+#endif
+
 	_end = .;
 
 	STABS_DEBUG
@@ -240,7 +257,10 @@
 ASSERT(__hibernate_exit_text_end - (__hibernate_exit_text_start & ~(SZ_4K - 1))
 	<= SZ_4K, "Hibernate exit text too big or misaligned")
 #endif
-
+#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+ASSERT((__entry_tramp_text_end - __entry_tramp_text_start) == PAGE_SIZE,
+	"Entry trampoline text too big")
+#endif
 /*
  * If padding is applied before .head.text, virt<->phys conversions will fail.
  */
diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c
index a204adf..85baada 100644
--- a/arch/arm64/kvm/handle_exit.c
+++ b/arch/arm64/kvm/handle_exit.c
@@ -125,7 +125,19 @@ static int kvm_handle_guest_debug(struct kvm_vcpu *vcpu, struct kvm_run *run)
 	return ret;
 }
 
+static int kvm_handle_unknown_ec(struct kvm_vcpu *vcpu, struct kvm_run *run)
+{
+	u32 hsr = kvm_vcpu_get_hsr(vcpu);
+
+	kvm_pr_unimpl("Unknown exception class: hsr: %#08x -- %s\n",
+		      hsr, esr_get_class_string(hsr));
+
+	kvm_inject_undefined(vcpu);
+	return 1;
+}
+
 static exit_handle_fn arm_exit_handlers[] = {
+	[0 ... ESR_ELx_EC_MAX]	= kvm_handle_unknown_ec,
 	[ESR_ELx_EC_WFx]	= kvm_handle_wfx,
 	[ESR_ELx_EC_CP15_32]	= kvm_handle_cp15_32,
 	[ESR_ELx_EC_CP15_64]	= kvm_handle_cp15_64,
@@ -151,13 +163,6 @@ static exit_handle_fn kvm_get_exit_handler(struct kvm_vcpu *vcpu)
 	u32 hsr = kvm_vcpu_get_hsr(vcpu);
 	u8 hsr_ec = ESR_ELx_EC(hsr);
 
-	if (hsr_ec >= ARRAY_SIZE(arm_exit_handlers) ||
-	    !arm_exit_handlers[hsr_ec]) {
-		kvm_err("Unknown exception class: hsr: %#08x -- %s\n",
-			hsr, esr_get_class_string(hsr));
-		BUG();
-	}
-
 	return arm_exit_handlers[hsr_ec];
 }
 
diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c
index 0c848c1..3eab6ac 100644
--- a/arch/arm64/kvm/hyp/switch.c
+++ b/arch/arm64/kvm/hyp/switch.c
@@ -17,6 +17,7 @@
 
 #include <linux/types.h>
 #include <linux/jump_label.h>
+#include <uapi/linux/psci.h>
 
 #include <asm/kvm_asm.h>
 #include <asm/kvm_emulate.h>
@@ -50,7 +51,7 @@ static void __hyp_text __activate_traps_vhe(void)
 	val &= ~CPACR_EL1_FPEN;
 	write_sysreg(val, cpacr_el1);
 
-	write_sysreg(__kvm_hyp_vector, vbar_el1);
+	write_sysreg(kvm_get_hyp_vector(), vbar_el1);
 }
 
 static void __hyp_text __activate_traps_nvhe(void)
@@ -308,6 +309,18 @@ int __hyp_text __kvm_vcpu_run(struct kvm_vcpu *vcpu)
 	if (exit_code == ARM_EXCEPTION_TRAP && !__populate_fault_info(vcpu))
 		goto again;
 
+	if (exit_code == ARM_EXCEPTION_TRAP &&
+	    (kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_HVC64 ||
+	     kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_HVC32) &&
+	    vcpu_get_reg(vcpu, 0) == PSCI_0_2_FN_PSCI_VERSION) {
+		u64 val = PSCI_RET_NOT_SUPPORTED;
+		if (test_bit(KVM_ARM_VCPU_PSCI_0_2, vcpu->arch.features))
+			val = 2;
+
+		vcpu_set_reg(vcpu, 0, val);
+		goto again;
+	}
+
 	if (static_branch_unlikely(&vgic_v2_cpuif_trap) &&
 	    exit_code == ARM_EXCEPTION_TRAP) {
 		bool valid;
diff --git a/arch/arm64/lib/clear_user.S b/arch/arm64/lib/clear_user.S
index d7150e3..dd65ca2 100644
--- a/arch/arm64/lib/clear_user.S
+++ b/arch/arm64/lib/clear_user.S
@@ -30,7 +30,7 @@
  * Alignment fixed up by hardware.
  */
 ENTRY(__clear_user)
-	uaccess_enable_not_uao x2, x3
+	uaccess_enable_not_uao x2, x3, x4
 	mov	x2, x1			// save the size for fixup return
 	subs	x1, x1, #8
 	b.mi	2f
diff --git a/arch/arm64/lib/copy_from_user.S b/arch/arm64/lib/copy_from_user.S
index cfe1339..7e7e687 100644
--- a/arch/arm64/lib/copy_from_user.S
+++ b/arch/arm64/lib/copy_from_user.S
@@ -64,7 +64,7 @@
 
 end	.req	x5
 ENTRY(__arch_copy_from_user)
-	uaccess_enable_not_uao x3, x4
+	uaccess_enable_not_uao x3, x4, x5
 	add	end, x0, x2
 #include "copy_template.S"
 	uaccess_disable_not_uao x3
diff --git a/arch/arm64/lib/copy_in_user.S b/arch/arm64/lib/copy_in_user.S
index 718b1c4..074d52f 100644
--- a/arch/arm64/lib/copy_in_user.S
+++ b/arch/arm64/lib/copy_in_user.S
@@ -65,7 +65,7 @@
 
 end	.req	x5
 ENTRY(__copy_in_user)
-	uaccess_enable_not_uao x3, x4
+	uaccess_enable_not_uao x3, x4, x5
 	add	end, x0, x2
 #include "copy_template.S"
 	uaccess_disable_not_uao x3
diff --git a/arch/arm64/lib/copy_to_user.S b/arch/arm64/lib/copy_to_user.S
index e99e31c..6711844 100644
--- a/arch/arm64/lib/copy_to_user.S
+++ b/arch/arm64/lib/copy_to_user.S
@@ -63,7 +63,7 @@
 
 end	.req	x5
 ENTRY(__arch_copy_to_user)
-	uaccess_enable_not_uao x3, x4
+	uaccess_enable_not_uao x3, x4, x5
 	add	end, x0, x2
 #include "copy_template.S"
 	uaccess_disable_not_uao x3
diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S
index 97de0eb..9dd6d32 100644
--- a/arch/arm64/mm/cache.S
+++ b/arch/arm64/mm/cache.S
@@ -122,7 +122,7 @@
  *	- end     - virtual end address of region
  */
 ENTRY(__flush_cache_user_range)
-	uaccess_ttbr0_enable x2, x3
+	uaccess_ttbr0_enable x2, x3, x4
 	dcache_line_size x2, x3
 	sub	x3, x2, #1
 	bic	x4, x0, x3
diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c
index 4c63cb1..da5add9 100644
--- a/arch/arm64/mm/context.c
+++ b/arch/arm64/mm/context.c
@@ -39,7 +39,16 @@ static cpumask_t tlb_flush_pending;
 
 #define ASID_MASK		(~GENMASK(asid_bits - 1, 0))
 #define ASID_FIRST_VERSION	(1UL << asid_bits)
-#define NUM_USER_ASIDS		ASID_FIRST_VERSION
+
+#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+#define NUM_USER_ASIDS		(ASID_FIRST_VERSION >> 1)
+#define asid2idx(asid)		(((asid) & ~ASID_MASK) >> 1)
+#define idx2asid(idx)		(((idx) << 1) & ~ASID_MASK)
+#else
+#define NUM_USER_ASIDS		(ASID_FIRST_VERSION)
+#define asid2idx(asid)		((asid) & ~ASID_MASK)
+#define idx2asid(idx)		asid2idx(idx)
+#endif
 
 /* Get the ASIDBits supported by the current CPU */
 static u32 get_cpu_asid_bits(void)
@@ -104,7 +113,7 @@ static void flush_context(unsigned int cpu)
 		 */
 		if (asid == 0)
 			asid = per_cpu(reserved_asids, i);
-		__set_bit(asid & ~ASID_MASK, asid_map);
+		__set_bit(asid2idx(asid), asid_map);
 		per_cpu(reserved_asids, i) = asid;
 	}
 
@@ -159,16 +168,16 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
 		 * We had a valid ASID in a previous life, so try to re-use
 		 * it if possible.
 		 */
-		asid &= ~ASID_MASK;
-		if (!__test_and_set_bit(asid, asid_map))
+		if (!__test_and_set_bit(asid2idx(asid), asid_map))
 			return newasid;
 	}
 
 	/*
 	 * Allocate a free ASID. If we can't find one, take a note of the
-	 * currently active ASIDs and mark the TLBs as requiring flushes.
-	 * We always count from ASID #1, as we use ASID #0 when setting a
-	 * reserved TTBR0 for the init_mm.
+	 * currently active ASIDs and mark the TLBs as requiring flushes.  We
+	 * always count from ASID #2 (index 1), as we use ASID #0 when setting
+	 * a reserved TTBR0 for the init_mm and we allocate ASIDs in even/odd
+	 * pairs.
 	 */
 	asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, cur_idx);
 	if (asid != NUM_USER_ASIDS)
@@ -185,7 +194,7 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
 set_asid:
 	__set_bit(asid, asid_map);
 	cur_idx = asid;
-	return asid | generation;
+	return idx2asid(asid) | generation;
 }
 
 void check_and_switch_context(struct mm_struct *mm, unsigned int cpu)
@@ -229,6 +238,17 @@ void check_and_switch_context(struct mm_struct *mm, unsigned int cpu)
 		cpu_switch_mm(mm->pgd, mm);
 }
 
+/* Errata workaround post TTBRx_EL1 update. */
+asmlinkage void post_ttbr_update_workaround(void)
+{
+	asm(ALTERNATIVE("nop; nop; nop",
+			"ic iallu; dsb nsh; isb",
+			ARM64_WORKAROUND_CAVIUM_27456,
+			CONFIG_CAVIUM_ERRATUM_27456));
+
+	arm64_apply_bp_hardening();
+}
+
 static int asids_init(void)
 {
 	asid_bits = get_cpu_asid_bits();
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index b5d88f8..2705e51 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -618,6 +618,22 @@ asmlinkage void __exception do_mem_abort(unsigned long addr, unsigned int esr,
 	arm64_notify_die("", regs, &info, esr);
 }
 
+asmlinkage void __exception do_el0_ia_bp_hardening(unsigned long addr,
+						   unsigned int esr,
+						   struct pt_regs *regs)
+{
+	/*
+	 * We've taken an instruction abort from userspace and not yet
+	 * re-enabled IRQs. If the address is a kernel address, apply
+	 * BP hardening prior to enabling IRQs and pre-emption.
+	 */
+	if (addr > TASK_SIZE)
+		arm64_apply_bp_hardening();
+
+	local_irq_enable();
+	do_mem_abort(addr, esr, regs);
+}
+
 /*
  * Handle stack alignment exceptions.
  */
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index f8ef496..2b35b67 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -299,6 +299,7 @@ void __init arm64_memblock_init(void)
 		arm64_dma_phys_limit = max_zone_dma_phys();
 	else
 		arm64_dma_phys_limit = PHYS_MASK + 1;
+	high_memory = __va(memblock_end_of_DRAM() - 1) + 1;
 	dma_contiguous_reserve(arm64_dma_phys_limit);
 
 	memblock_allow_resize();
@@ -325,7 +326,6 @@ void __init bootmem_init(void)
 	sparse_init();
 	zone_sizes_init(min, max);
 
-	high_memory = __va((max << PAGE_SHIFT) - 1) + 1;
 	memblock_dump_all();
 }
 
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 41efd5e..c66fa93 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -458,6 +458,37 @@ static void __init map_kernel_segment(pgd_t *pgd, void *va_start, void *va_end,
 	vm_area_add_early(vma);
 }
 
+#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+static int __init map_entry_trampoline(void)
+{
+	extern char __entry_tramp_text_start[];
+
+	pgprot_t prot = PAGE_KERNEL_EXEC;
+	phys_addr_t pa_start = __pa_symbol(__entry_tramp_text_start);
+
+	/* The trampoline is always mapped and can therefore be global */
+	pgprot_val(prot) &= ~PTE_NG;
+
+	/* Map only the text into the trampoline page table */
+	memset(tramp_pg_dir, 0, PGD_SIZE);
+	__create_pgd_mapping(tramp_pg_dir, pa_start, TRAMP_VALIAS, PAGE_SIZE,
+			     prot, pgd_pgtable_alloc, 0);
+
+	/* Map both the text and data into the kernel page table */
+	__set_fixmap(FIX_ENTRY_TRAMP_TEXT, pa_start, prot);
+	if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
+		extern char __entry_tramp_data_start[];
+
+		__set_fixmap(FIX_ENTRY_TRAMP_DATA,
+			     __pa_symbol(__entry_tramp_data_start),
+			     PAGE_KERNEL_RO);
+	}
+
+	return 0;
+}
+core_initcall(map_entry_trampoline);
+#endif
+
 /*
  * Create fine-grained mappings for the kernel.
  */
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
index 8d21250..fa20d13 100644
--- a/arch/arm64/mm/proc.S
+++ b/arch/arm64/mm/proc.S
@@ -184,12 +184,14 @@
  *	- pgd_phys - physical address of new TTB
  */
 ENTRY(cpu_do_switch_mm)
+	mrs	x2, ttbr1_el1
 	mmid	x1, x1				// get mm->context.id
-	bfi	x0, x1, #48, #16		// set the ASID
-	msr	ttbr0_el1, x0			// set TTBR0
+	bfi	x2, x1, #48, #16		// set the ASID
+	msr	ttbr1_el1, x2			// in TTBR1 (since TCR.A1 is set)
 	isb
-	post_ttbr0_update_workaround
-	ret
+	msr	ttbr0_el1, x0			// now update TTBR0
+	isb
+	b	post_ttbr_update_workaround	// Back to C code...
 ENDPROC(cpu_do_switch_mm)
 
 	.pushsection ".idmap.text", "ax"
@@ -270,7 +272,7 @@
 	 * both user and kernel.
 	 */
 	ldr	x10, =TCR_TxSZ(VA_BITS) | TCR_CACHE_FLAGS | TCR_SMP_FLAGS | \
-			TCR_TG_FLAGS | TCR_ASID16 | TCR_TBI0
+			TCR_TG_FLAGS | TCR_ASID16 | TCR_TBI0 | TCR_A1
 	tcr_set_idmap_t0sz	x10, x9
 
 	/*
diff --git a/arch/arm64/xen/hypercall.S b/arch/arm64/xen/hypercall.S
index b41aff2..f542252 100644
--- a/arch/arm64/xen/hypercall.S
+++ b/arch/arm64/xen/hypercall.S
@@ -100,7 +100,7 @@
 	 * need the explicit uaccess_enable/disable if the TTBR0 PAN emulation
 	 * is enabled (it implies that hardware UAO and PAN disabled).
 	 */
-	uaccess_ttbr0_enable x6, x7
+	uaccess_ttbr0_enable x6, x7, x8
 	hvc XEN_IMM
 
 	/*
diff --git a/arch/blackfin/Kconfig b/arch/blackfin/Kconfig
index 3c1bd64..88c4b77 100644
--- a/arch/blackfin/Kconfig
+++ b/arch/blackfin/Kconfig
@@ -319,11 +319,14 @@
 
 config GPIO_ADI
 	def_bool y
+	depends on !PINCTRL
 	depends on (BF51x || BF52x || BF53x || BF538 || BF539 || BF561)
 
-config PINCTRL
+config PINCTRL_BLACKFIN_ADI2
 	def_bool y
-	depends on BF54x || BF60x
+	depends on (BF54x || BF60x)
+	select PINCTRL
+	select PINCTRL_ADI2
 
 config MEM_MT48LC64M4A2FB_7E
 	bool
diff --git a/arch/blackfin/Kconfig.debug b/arch/blackfin/Kconfig.debug
index f3337ee..a93cf06 100644
--- a/arch/blackfin/Kconfig.debug
+++ b/arch/blackfin/Kconfig.debug
@@ -17,6 +17,7 @@
 
 config DEBUG_MMRS
 	tristate "Generate Blackfin MMR tree"
+	depends on !PINCTRL
 	select DEBUG_FS
 	help
 	  Create a tree of Blackfin MMRs via the debugfs tree.  If
diff --git a/arch/m68k/mm/mcfmmu.c b/arch/m68k/mm/mcfmmu.c
index 87131cd..6d3a504 100644
--- a/arch/m68k/mm/mcfmmu.c
+++ b/arch/m68k/mm/mcfmmu.c
@@ -169,7 +169,7 @@ void __init cf_bootmem_alloc(void)
 	max_pfn = max_low_pfn = PFN_DOWN(_ramend);
 	high_memory = (void *)_ramend;
 
-	m68k_virt_to_node_shift = fls(_ramend - _rambase - 1) - 6;
+	m68k_virt_to_node_shift = fls(_ramend - 1) - 6;
 	module_fixup(NULL, __start_fixup, __stop_fixup);
 
 	/* setup bootmem data */
diff --git a/arch/mips/bcm47xx/leds.c b/arch/mips/bcm47xx/leds.c
index d20ae63..46abe9e 100644
--- a/arch/mips/bcm47xx/leds.c
+++ b/arch/mips/bcm47xx/leds.c
@@ -330,7 +330,7 @@ bcm47xx_leds_linksys_wrt54g3gv2[] __initconst = {
 /* Verified on: WRT54GS V1.0 */
 static const struct gpio_led
 bcm47xx_leds_linksys_wrt54g_type_0101[] __initconst = {
-	BCM47XX_GPIO_LED(0, "green", "wlan", 0, LEDS_GPIO_DEFSTATE_OFF),
+	BCM47XX_GPIO_LED(0, "green", "wlan", 1, LEDS_GPIO_DEFSTATE_OFF),
 	BCM47XX_GPIO_LED(1, "green", "power", 0, LEDS_GPIO_DEFSTATE_ON),
 	BCM47XX_GPIO_LED(7, "green", "dmz", 1, LEDS_GPIO_DEFSTATE_OFF),
 };
diff --git a/arch/mips/boot/dts/brcm/Makefile b/arch/mips/boot/dts/brcm/Makefile
index d61bc2a..7d90a87 100644
--- a/arch/mips/boot/dts/brcm/Makefile
+++ b/arch/mips/boot/dts/brcm/Makefile
@@ -22,7 +22,6 @@
 	bcm63268-comtrend-vr-3032u.dtb \
 	bcm93384wvg.dtb \
 	bcm93384wvg_viper.dtb \
-	bcm96358nb4ser.dtb \
 	bcm96368mvwg.dtb \
 	bcm9ejtagprb.dtb \
 	bcm97125cbmb.dtb \
diff --git a/arch/mips/include/asm/asmmacro.h b/arch/mips/include/asm/asmmacro.h
index 83054f7..8333ce9 100644
--- a/arch/mips/include/asm/asmmacro.h
+++ b/arch/mips/include/asm/asmmacro.h
@@ -19,6 +19,9 @@
 #include <asm/asmmacro-64.h>
 #endif
 
+/* preprocessor replaces the fp in ".set fp=64" with $30 otherwise */
+#undef fp
+
 /*
  * Helper macros for generating raw instruction encodings.
  */
@@ -105,6 +108,7 @@
 	.macro	fpu_save_16odd thread
 	.set	push
 	.set	mips64r2
+	.set	fp=64
 	SET_HARDFLOAT
 	sdc1	$f1,  THREAD_FPR1(\thread)
 	sdc1	$f3,  THREAD_FPR3(\thread)
@@ -163,6 +167,7 @@
 	.macro	fpu_restore_16odd thread
 	.set	push
 	.set	mips64r2
+	.set	fp=64
 	SET_HARDFLOAT
 	ldc1	$f1,  THREAD_FPR1(\thread)
 	ldc1	$f3,  THREAD_FPR3(\thread)
@@ -234,9 +239,6 @@
 	.endm
 
 #ifdef TOOLCHAIN_SUPPORTS_MSA
-/* preprocessor replaces the fp in ".set fp=64" with $30 otherwise */
-#undef fp
-
 	.macro	_cfcmsa	rd, cs
 	.set	push
 	.set	mips32r2
diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
index 3de0260..11890e6 100644
--- a/arch/mips/kernel/ptrace.c
+++ b/arch/mips/kernel/ptrace.c
@@ -647,6 +647,19 @@ static const struct user_regset_view user_mips64_view = {
 	.n		= ARRAY_SIZE(mips64_regsets),
 };
 
+#ifdef CONFIG_MIPS32_N32
+
+static const struct user_regset_view user_mipsn32_view = {
+	.name		= "mipsn32",
+	.e_flags	= EF_MIPS_ABI2,
+	.e_machine	= ELF_ARCH,
+	.ei_osabi	= ELF_OSABI,
+	.regsets	= mips64_regsets,
+	.n		= ARRAY_SIZE(mips64_regsets),
+};
+
+#endif /* CONFIG_MIPS32_N32 */
+
 #endif /* CONFIG_64BIT */
 
 const struct user_regset_view *task_user_regset_view(struct task_struct *task)
@@ -658,6 +671,10 @@ const struct user_regset_view *task_user_regset_view(struct task_struct *task)
 	if (test_tsk_thread_flag(task, TIF_32BIT_REGS))
 		return &user_mips_view;
 #endif
+#ifdef CONFIG_MIPS32_N32
+	if (test_tsk_thread_flag(task, TIF_32BIT_ADDR))
+		return &user_mipsn32_view;
+#endif
 	return &user_mips64_view;
 #endif
 }
diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c
index 9ade60c..7f2519c 100644
--- a/arch/mips/math-emu/cp1emu.c
+++ b/arch/mips/math-emu/cp1emu.c
@@ -1781,7 +1781,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
 			SPFROMREG(fs, MIPSInst_FS(ir));
 			SPFROMREG(fd, MIPSInst_FD(ir));
 			rv.s = ieee754sp_maddf(fd, fs, ft);
-			break;
+			goto copcsr;
 		}
 
 		case fmsubf_op: {
@@ -1794,7 +1794,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
 			SPFROMREG(fs, MIPSInst_FS(ir));
 			SPFROMREG(fd, MIPSInst_FD(ir));
 			rv.s = ieee754sp_msubf(fd, fs, ft);
-			break;
+			goto copcsr;
 		}
 
 		case frint_op: {
@@ -1818,7 +1818,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
 			SPFROMREG(fs, MIPSInst_FS(ir));
 			rv.w = ieee754sp_2008class(fs);
 			rfmt = w_fmt;
-			break;
+			goto copcsr;
 		}
 
 		case fmin_op: {
@@ -1830,7 +1830,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
 			SPFROMREG(ft, MIPSInst_FT(ir));
 			SPFROMREG(fs, MIPSInst_FS(ir));
 			rv.s = ieee754sp_fmin(fs, ft);
-			break;
+			goto copcsr;
 		}
 
 		case fmina_op: {
@@ -1842,7 +1842,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
 			SPFROMREG(ft, MIPSInst_FT(ir));
 			SPFROMREG(fs, MIPSInst_FS(ir));
 			rv.s = ieee754sp_fmina(fs, ft);
-			break;
+			goto copcsr;
 		}
 
 		case fmax_op: {
@@ -1854,7 +1854,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
 			SPFROMREG(ft, MIPSInst_FT(ir));
 			SPFROMREG(fs, MIPSInst_FS(ir));
 			rv.s = ieee754sp_fmax(fs, ft);
-			break;
+			goto copcsr;
 		}
 
 		case fmaxa_op: {
@@ -1866,7 +1866,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
 			SPFROMREG(ft, MIPSInst_FT(ir));
 			SPFROMREG(fs, MIPSInst_FS(ir));
 			rv.s = ieee754sp_fmaxa(fs, ft);
-			break;
+			goto copcsr;
 		}
 
 		case fabs_op:
@@ -2110,7 +2110,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
 			DPFROMREG(fs, MIPSInst_FS(ir));
 			DPFROMREG(fd, MIPSInst_FD(ir));
 			rv.d = ieee754dp_maddf(fd, fs, ft);
-			break;
+			goto copcsr;
 		}
 
 		case fmsubf_op: {
@@ -2123,7 +2123,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
 			DPFROMREG(fs, MIPSInst_FS(ir));
 			DPFROMREG(fd, MIPSInst_FD(ir));
 			rv.d = ieee754dp_msubf(fd, fs, ft);
-			break;
+			goto copcsr;
 		}
 
 		case frint_op: {
@@ -2147,7 +2147,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
 			DPFROMREG(fs, MIPSInst_FS(ir));
 			rv.w = ieee754dp_2008class(fs);
 			rfmt = w_fmt;
-			break;
+			goto copcsr;
 		}
 
 		case fmin_op: {
@@ -2159,7 +2159,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
 			DPFROMREG(ft, MIPSInst_FT(ir));
 			DPFROMREG(fs, MIPSInst_FS(ir));
 			rv.d = ieee754dp_fmin(fs, ft);
-			break;
+			goto copcsr;
 		}
 
 		case fmina_op: {
@@ -2171,7 +2171,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
 			DPFROMREG(ft, MIPSInst_FT(ir));
 			DPFROMREG(fs, MIPSInst_FS(ir));
 			rv.d = ieee754dp_fmina(fs, ft);
-			break;
+			goto copcsr;
 		}
 
 		case fmax_op: {
@@ -2183,7 +2183,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
 			DPFROMREG(ft, MIPSInst_FT(ir));
 			DPFROMREG(fs, MIPSInst_FS(ir));
 			rv.d = ieee754dp_fmax(fs, ft);
-			break;
+			goto copcsr;
 		}
 
 		case fmaxa_op: {
@@ -2195,7 +2195,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
 			DPFROMREG(ft, MIPSInst_FT(ir));
 			DPFROMREG(fs, MIPSInst_FS(ir));
 			rv.d = ieee754dp_fmaxa(fs, ft);
-			break;
+			goto copcsr;
 		}
 
 		case fabs_op:
diff --git a/arch/mips/pci/pci-mt7620.c b/arch/mips/pci/pci-mt7620.c
index 628c513..a7962f7 100644
--- a/arch/mips/pci/pci-mt7620.c
+++ b/arch/mips/pci/pci-mt7620.c
@@ -121,7 +121,7 @@ static int wait_pciephy_busy(void)
 		else
 			break;
 		if (retry++ > WAITRETRY_MAX) {
-			printk(KERN_WARN "PCIE-PHY retry failed.\n");
+			pr_warn("PCIE-PHY retry failed.\n");
 			return -1;
 		}
 	}
diff --git a/arch/mips/ralink/mt7620.c b/arch/mips/ralink/mt7620.c
index 6f892c1..0696142 100644
--- a/arch/mips/ralink/mt7620.c
+++ b/arch/mips/ralink/mt7620.c
@@ -141,8 +141,8 @@ static struct rt2880_pmx_func i2c_grp_mt7628[] = {
 	FUNC("i2c", 0, 4, 2),
 };
 
-static struct rt2880_pmx_func refclk_grp_mt7628[] = { FUNC("reclk", 0, 36, 1) };
-static struct rt2880_pmx_func perst_grp_mt7628[] = { FUNC("perst", 0, 37, 1) };
+static struct rt2880_pmx_func refclk_grp_mt7628[] = { FUNC("refclk", 0, 37, 1) };
+static struct rt2880_pmx_func perst_grp_mt7628[] = { FUNC("perst", 0, 36, 1) };
 static struct rt2880_pmx_func wdt_grp_mt7628[] = { FUNC("wdt", 0, 38, 1) };
 static struct rt2880_pmx_func spi_grp_mt7628[] = { FUNC("spi", 0, 7, 4) };
 
diff --git a/arch/openrisc/include/asm/uaccess.h b/arch/openrisc/include/asm/uaccess.h
index 140faa1..1311e6b 100644
--- a/arch/openrisc/include/asm/uaccess.h
+++ b/arch/openrisc/include/asm/uaccess.h
@@ -211,7 +211,7 @@ do {									\
 	case 1: __get_user_asm(x, ptr, retval, "l.lbz"); break;		\
 	case 2: __get_user_asm(x, ptr, retval, "l.lhz"); break;		\
 	case 4: __get_user_asm(x, ptr, retval, "l.lwz"); break;		\
-	case 8: __get_user_asm2(x, ptr, retval);			\
+	case 8: __get_user_asm2(x, ptr, retval); break;			\
 	default: (x) = __get_user_bad();				\
 	}								\
 } while (0)
diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S
index 41e60a9..e775f80 100644
--- a/arch/parisc/kernel/syscall.S
+++ b/arch/parisc/kernel/syscall.S
@@ -690,15 +690,15 @@
 	/* ELF32 Process entry path */
 lws_compare_and_swap_2:
 #ifdef CONFIG_64BIT
-	/* Clip the input registers */
+	/* Clip the input registers. We don't need to clip %r23 as we
+	   only use it for word operations */
 	depdi	0, 31, 32, %r26
 	depdi	0, 31, 32, %r25
 	depdi	0, 31, 32, %r24
-	depdi	0, 31, 32, %r23
 #endif
 
 	/* Check the validity of the size pointer */
-	subi,>>= 4, %r23, %r0
+	subi,>>= 3, %r23, %r0
 	b,n	lws_exit_nosys
 
 	/* Jump to the functions which will load the old and new values into
diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
index 617dece..a60c9c6 100644
--- a/arch/powerpc/Makefile
+++ b/arch/powerpc/Makefile
@@ -72,8 +72,15 @@
 MULTIPLEWORD	:= -mmultiple
 endif
 
-cflags-$(CONFIG_CPU_BIG_ENDIAN)		+= $(call cc-option,-mbig-endian)
+ifdef CONFIG_PPC64
+cflags-$(CONFIG_CPU_BIG_ENDIAN)		+= $(call cc-option,-mabi=elfv1)
+cflags-$(CONFIG_CPU_BIG_ENDIAN)		+= $(call cc-option,-mcall-aixdesc)
+aflags-$(CONFIG_CPU_BIG_ENDIAN)		+= $(call cc-option,-mabi=elfv1)
+aflags-$(CONFIG_CPU_LITTLE_ENDIAN)	+= -mabi=elfv2
+endif
+
 cflags-$(CONFIG_CPU_LITTLE_ENDIAN)	+= -mlittle-endian
+cflags-$(CONFIG_CPU_BIG_ENDIAN)		+= $(call cc-option,-mbig-endian)
 ifneq ($(cc-name),clang)
   cflags-$(CONFIG_CPU_LITTLE_ENDIAN)	+= -mno-strict-align
 endif
@@ -113,7 +120,9 @@
 CFLAGS-$(CONFIG_PPC64)	+= $(call cc-option,-mabi=elfv2,$(call cc-option,-mcall-aixdesc))
 AFLAGS-$(CONFIG_PPC64)	+= $(call cc-option,-mabi=elfv2)
 else
+CFLAGS-$(CONFIG_PPC64)	+= $(call cc-option,-mabi=elfv1)
 CFLAGS-$(CONFIG_PPC64)	+= $(call cc-option,-mcall-aixdesc)
+AFLAGS-$(CONFIG_PPC64)	+= $(call cc-option,-mabi=elfv1)
 endif
 CFLAGS-$(CONFIG_PPC64)	+= $(call cc-option,-mcmodel=medium,$(call cc-option,-mminimal-toc))
 CFLAGS-$(CONFIG_PPC64)	+= $(call cc-option,-mno-pointers-to-nested-functions)
diff --git a/arch/powerpc/include/asm/book3s/64/hash.h b/arch/powerpc/include/asm/book3s/64/hash.h
index f61cad3..4c935f7 100644
--- a/arch/powerpc/include/asm/book3s/64/hash.h
+++ b/arch/powerpc/include/asm/book3s/64/hash.h
@@ -201,6 +201,10 @@ extern int __meminit hash__vmemmap_create_mapping(unsigned long start,
 					      unsigned long phys);
 extern void hash__vmemmap_remove_mapping(unsigned long start,
 				     unsigned long page_size);
+
+int hash__create_section_mapping(unsigned long start, unsigned long end);
+int hash__remove_section_mapping(unsigned long start, unsigned long end);
+
 #endif /* !__ASSEMBLY__ */
 #endif /* __KERNEL__ */
 #endif /* _ASM_POWERPC_BOOK3S_64_HASH_H */
diff --git a/arch/powerpc/include/asm/checksum.h b/arch/powerpc/include/asm/checksum.h
index 1e8fceb..430d038 100644
--- a/arch/powerpc/include/asm/checksum.h
+++ b/arch/powerpc/include/asm/checksum.h
@@ -53,17 +53,25 @@ static inline __sum16 csum_fold(__wsum sum)
 	return (__force __sum16)(~((__force u32)sum + tmp) >> 16);
 }
 
+static inline u32 from64to32(u64 x)
+{
+	/* add up 32-bit and 32-bit for 32+c bit */
+	x = (x & 0xffffffff) + (x >> 32);
+	/* add up carry.. */
+	x = (x & 0xffffffff) + (x >> 32);
+	return (u32)x;
+}
+
 static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, __u32 len,
 					__u8 proto, __wsum sum)
 {
 #ifdef __powerpc64__
-	unsigned long s = (__force u32)sum;
+	u64 s = (__force u32)sum;
 
 	s += (__force u32)saddr;
 	s += (__force u32)daddr;
 	s += proto + len;
-	s += (s >> 32);
-	return (__force __wsum) s;
+	return (__force __wsum) from64to32(s);
 #else
     __asm__("\n\
 	addc %0,%0,%1 \n\
@@ -100,7 +108,7 @@ static inline __wsum csum_add(__wsum csum, __wsum addend)
 
 #ifdef __powerpc64__
 	res += (__force u64)addend;
-	return (__force __wsum)((u32)res + (res >> 32));
+	return (__force __wsum) from64to32(res);
 #else
 	asm("addc %0,%0,%1;"
 	    "addze %0,%0;"
@@ -123,8 +131,7 @@ static inline __wsum ip_fast_csum_nofold(const void *iph, unsigned int ihl)
 
 	for (i = 0; i < ihl - 1; i++, ptr++)
 		s += *ptr;
-	s += (s >> 32);
-	return (__force __wsum)s;
+	return (__force __wsum)from64to32(s);
 #else
 	__wsum sum, tmp;
 
diff --git a/arch/powerpc/kernel/cpu_setup_power.S b/arch/powerpc/kernel/cpu_setup_power.S
index 7803756..9e05c88 100644
--- a/arch/powerpc/kernel/cpu_setup_power.S
+++ b/arch/powerpc/kernel/cpu_setup_power.S
@@ -97,6 +97,7 @@
 	beqlr
 	li	r0,0
 	mtspr	SPRN_LPID,r0
+	mtspr	SPRN_PID,r0
 	mfspr	r3,SPRN_LPCR
 	LOAD_REG_IMMEDIATE(r4, LPCR_PECEDH | LPCR_PECE_HVEE | LPCR_HVICE)
 	or	r3, r3, r4
@@ -119,6 +120,7 @@
 	beqlr
 	li	r0,0
 	mtspr	SPRN_LPID,r0
+	mtspr	SPRN_PID,r0
 	mfspr   r3,SPRN_LPCR
 	LOAD_REG_IMMEDIATE(r4, LPCR_PECEDH | LPCR_PECE_HVEE | LPCR_HVICE)
 	or	r3, r3, r4
diff --git a/arch/powerpc/kernel/signal.c b/arch/powerpc/kernel/signal.c
index bbe77ae..3600c0d 100644
--- a/arch/powerpc/kernel/signal.c
+++ b/arch/powerpc/kernel/signal.c
@@ -102,7 +102,7 @@ static void check_syscall_restart(struct pt_regs *regs, struct k_sigaction *ka,
 static void do_signal(struct task_struct *tsk)
 {
 	sigset_t *oldset = sigmask_to_save();
-	struct ksignal ksig;
+	struct ksignal ksig = { .sig = 0 };
 	int ret;
 	int is32 = is_32bit_task();
 
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index 78dabf06..bd66628 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -747,7 +747,7 @@ static unsigned long __init htab_get_table_size(void)
 }
 
 #ifdef CONFIG_MEMORY_HOTPLUG
-int create_section_mapping(unsigned long start, unsigned long end)
+int hash__create_section_mapping(unsigned long start, unsigned long end)
 {
 	int rc = htab_bolt_mapping(start, end, __pa(start),
 				   pgprot_val(PAGE_KERNEL), mmu_linear_psize,
@@ -761,7 +761,7 @@ int create_section_mapping(unsigned long start, unsigned long end)
 	return rc;
 }
 
-int remove_section_mapping(unsigned long start, unsigned long end)
+int hash__remove_section_mapping(unsigned long start, unsigned long end)
 {
 	int rc = htab_remove_mapping(start, end, mmu_linear_psize,
 				     mmu_kernel_ssize);
diff --git a/arch/powerpc/mm/pgtable-book3s64.c b/arch/powerpc/mm/pgtable-book3s64.c
index f4f437c..0fad7f6 100644
--- a/arch/powerpc/mm/pgtable-book3s64.c
+++ b/arch/powerpc/mm/pgtable-book3s64.c
@@ -125,3 +125,21 @@ void mmu_cleanup_all(void)
 	else if (mmu_hash_ops.hpte_clear_all)
 		mmu_hash_ops.hpte_clear_all();
 }
+
+#ifdef CONFIG_MEMORY_HOTPLUG
+int create_section_mapping(unsigned long start, unsigned long end)
+{
+	if (radix_enabled())
+		return -ENODEV;
+
+	return hash__create_section_mapping(start, end);
+}
+
+int remove_section_mapping(unsigned long start, unsigned long end)
+{
+	if (radix_enabled())
+		return -ENODEV;
+
+	return hash__remove_section_mapping(start, end);
+}
+#endif /* CONFIG_MEMORY_HOTPLUG */
diff --git a/arch/powerpc/mm/pgtable-radix.c b/arch/powerpc/mm/pgtable-radix.c
index 9a25dce..44c33ee 100644
--- a/arch/powerpc/mm/pgtable-radix.c
+++ b/arch/powerpc/mm/pgtable-radix.c
@@ -173,6 +173,10 @@ static void __init radix_init_pgtable(void)
 	 */
 	register_process_table(__pa(process_tb), 0, PRTB_SIZE_SHIFT - 12);
 	pr_info("Process table %p and radix root for kernel: %p\n", process_tb, init_mm.pgd);
+	asm volatile("ptesync" : : : "memory");
+	asm volatile(PPC_TLBIE_5(%0,%1,2,1,1) : :
+		     "r" (TLBIEL_INVAL_SET_LPID), "r" (0));
+	asm volatile("eieio; tlbsync; ptesync" : : : "memory");
 }
 
 static void __init radix_init_partition_table(void)
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
index 72c27b8..083f9274 100644
--- a/arch/powerpc/perf/core-book3s.c
+++ b/arch/powerpc/perf/core-book3s.c
@@ -401,8 +401,12 @@ static __u64 power_pmu_bhrb_to(u64 addr)
 	int ret;
 	__u64 target;
 
-	if (is_kernel_addr(addr))
-		return branch_target((unsigned int *)addr);
+	if (is_kernel_addr(addr)) {
+		if (probe_kernel_read(&instr, (void *)addr, sizeof(instr)))
+			return 0;
+
+		return branch_target(&instr);
+	}
 
 	/* Userspace: need copy instruction here then translate it */
 	pagefault_disable();
diff --git a/arch/powerpc/perf/hv-24x7.c b/arch/powerpc/perf/hv-24x7.c
index 7b2ca16..991c6a5 100644
--- a/arch/powerpc/perf/hv-24x7.c
+++ b/arch/powerpc/perf/hv-24x7.c
@@ -516,7 +516,7 @@ static int memord(const void *d1, size_t s1, const void *d2, size_t s2)
 {
 	if (s1 < s2)
 		return 1;
-	if (s2 > s1)
+	if (s1 > s2)
 		return -1;
 
 	return memcmp(d1, d2, s1);
diff --git a/arch/powerpc/platforms/powernv/opal-async.c b/arch/powerpc/platforms/powernv/opal-async.c
index 83bebee..0f7b16e 100644
--- a/arch/powerpc/platforms/powernv/opal-async.c
+++ b/arch/powerpc/platforms/powernv/opal-async.c
@@ -39,18 +39,18 @@ int __opal_async_get_token(void)
 	int token;
 
 	spin_lock_irqsave(&opal_async_comp_lock, flags);
-	token = find_first_bit(opal_async_complete_map, opal_max_async_tokens);
+	token = find_first_zero_bit(opal_async_token_map, opal_max_async_tokens);
 	if (token >= opal_max_async_tokens) {
 		token = -EBUSY;
 		goto out;
 	}
 
-	if (__test_and_set_bit(token, opal_async_token_map)) {
+	if (!__test_and_clear_bit(token, opal_async_complete_map)) {
 		token = -EBUSY;
 		goto out;
 	}
 
-	__clear_bit(token, opal_async_complete_map);
+	__set_bit(token, opal_async_token_map);
 
 out:
 	spin_unlock_irqrestore(&opal_async_comp_lock, flags);
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
index dcdfee0..f602307 100644
--- a/arch/powerpc/platforms/powernv/pci-ioda.c
+++ b/arch/powerpc/platforms/powernv/pci-ioda.c
@@ -2623,6 +2623,9 @@ static long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset,
 	level_shift = entries_shift + 3;
 	level_shift = max_t(unsigned, level_shift, PAGE_SHIFT);
 
+	if ((level_shift - 3) * levels + page_shift >= 60)
+		return -EINVAL;
+
 	/* Allocate TCE table */
 	addr = pnv_pci_ioda2_table_do_alloc_pages(nid, level_shift,
 			levels, tce_table_size, &offset, &total_allocated);
diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c
index efe8b6b..b33faa0 100644
--- a/arch/powerpc/platforms/powernv/setup.c
+++ b/arch/powerpc/platforms/powernv/setup.c
@@ -289,7 +289,7 @@ static unsigned long pnv_get_proc_freq(unsigned int cpu)
 {
 	unsigned long ret_freq;
 
-	ret_freq = cpufreq_quick_get(cpu) * 1000ul;
+	ret_freq = cpufreq_get(cpu) * 1000ul;
 
 	/*
 	 * If the backend cpufreq driver does not exist,
diff --git a/arch/powerpc/sysdev/axonram.c b/arch/powerpc/sysdev/axonram.c
index ada29ea..f523ac8 100644
--- a/arch/powerpc/sysdev/axonram.c
+++ b/arch/powerpc/sysdev/axonram.c
@@ -274,7 +274,9 @@ static int axon_ram_probe(struct platform_device *device)
 			if (bank->disk->major > 0)
 				unregister_blkdev(bank->disk->major,
 						bank->disk->disk_name);
-			del_gendisk(bank->disk);
+			if (bank->disk->flags & GENHD_FL_UP)
+				del_gendisk(bank->disk);
+			put_disk(bank->disk);
 		}
 		device->dev.platform_data = NULL;
 		if (bank->io_addr != 0)
@@ -299,6 +301,7 @@ axon_ram_remove(struct platform_device *device)
 	device_remove_file(&device->dev, &dev_attr_ecc);
 	free_irq(bank->irq_id, device);
 	del_gendisk(bank->disk);
+	put_disk(bank->disk);
 	iounmap((void __iomem *) bank->io_addr);
 	kfree(bank);
 
diff --git a/arch/powerpc/sysdev/ipic.c b/arch/powerpc/sysdev/ipic.c
index f267ee0..716353b 100644
--- a/arch/powerpc/sysdev/ipic.c
+++ b/arch/powerpc/sysdev/ipic.c
@@ -845,12 +845,12 @@ void ipic_disable_mcp(enum ipic_mcp_irq mcp_irq)
 
 u32 ipic_get_mcp_status(void)
 {
-	return ipic_read(primary_ipic->regs, IPIC_SERMR);
+	return ipic_read(primary_ipic->regs, IPIC_SERSR);
 }
 
 void ipic_clear_mcp_status(u32 mask)
 {
-	ipic_write(primary_ipic->regs, IPIC_SERMR, mask);
+	ipic_write(primary_ipic->regs, IPIC_SERSR, mask);
 }
 
 /* Return an interrupt vector or 0 if no interrupt is pending. */
diff --git a/arch/s390/include/asm/asm-prototypes.h b/arch/s390/include/asm/asm-prototypes.h
new file mode 100644
index 0000000..2c3413b
--- /dev/null
+++ b/arch/s390/include/asm/asm-prototypes.h
@@ -0,0 +1,8 @@
+#ifndef _ASM_S390_PROTOTYPES_H
+
+#include <linux/kvm_host.h>
+#include <linux/ftrace.h>
+#include <asm/fpu/api.h>
+#include <asm-generic/asm-prototypes.h>
+
+#endif /* _ASM_S390_PROTOTYPES_H */
diff --git a/arch/s390/include/asm/pci_insn.h b/arch/s390/include/asm/pci_insn.h
index 649eb62..9e02cb7 100644
--- a/arch/s390/include/asm/pci_insn.h
+++ b/arch/s390/include/asm/pci_insn.h
@@ -81,6 +81,6 @@ int zpci_refresh_trans(u64 fn, u64 addr, u64 range);
 int zpci_load(u64 *data, u64 req, u64 offset);
 int zpci_store(u64 data, u64 req, u64 offset);
 int zpci_store_block(const u64 *data, u64 req, u64 offset);
-void zpci_set_irq_ctrl(u16 ctl, char *unused, u8 isc);
+int zpci_set_irq_ctrl(u16 ctl, char *unused, u8 isc);
 
 #endif
diff --git a/arch/s390/include/asm/runtime_instr.h b/arch/s390/include/asm/runtime_instr.h
index 402ad6d..c54a931 100644
--- a/arch/s390/include/asm/runtime_instr.h
+++ b/arch/s390/include/asm/runtime_instr.h
@@ -85,6 +85,8 @@ static inline void restore_ri_cb(struct runtime_instr_cb *cb_next,
 		load_runtime_instr_cb(&runtime_instr_empty_cb);
 }
 
-void exit_thread_runtime_instr(void);
+struct task_struct;
+
+void runtime_instr_release(struct task_struct *tsk);
 
 #endif /* _RUNTIME_INSTR_H */
diff --git a/arch/s390/include/asm/switch_to.h b/arch/s390/include/asm/switch_to.h
index 12d45f0..ff2fbda 100644
--- a/arch/s390/include/asm/switch_to.h
+++ b/arch/s390/include/asm/switch_to.h
@@ -29,17 +29,16 @@ static inline void restore_access_regs(unsigned int *acrs)
 }
 
 #define switch_to(prev,next,last) do {					\
-	if (prev->mm) {							\
-		save_fpu_regs();					\
-		save_access_regs(&prev->thread.acrs[0]);		\
-		save_ri_cb(prev->thread.ri_cb);				\
-	}								\
-	if (next->mm) {							\
-		update_cr_regs(next);					\
-		set_cpu_flag(CIF_FPU);					\
-		restore_access_regs(&next->thread.acrs[0]);		\
-		restore_ri_cb(next->thread.ri_cb, prev->thread.ri_cb);	\
-	}								\
+	/* save_fpu_regs() sets the CIF_FPU flag, which enforces	\
+	 * a restore of the floating point / vector registers as	\
+	 * soon as the next task returns to user space			\
+	 */								\
+	save_fpu_regs();						\
+	save_access_regs(&prev->thread.acrs[0]);			\
+	save_ri_cb(prev->thread.ri_cb);					\
+	update_cr_regs(next);						\
+	restore_access_regs(&next->thread.acrs[0]);			\
+	restore_ri_cb(next->thread.ri_cb, prev->thread.ri_cb);		\
 	prev = __switch_to(prev,next);					\
 } while (0)
 
diff --git a/arch/s390/kernel/dis.c b/arch/s390/kernel/dis.c
index c74c592..aaf9dab 100644
--- a/arch/s390/kernel/dis.c
+++ b/arch/s390/kernel/dis.c
@@ -1548,6 +1548,7 @@ static struct s390_insn opcode_e7[] = {
 	{ "vfsq", 0xce, INSTR_VRR_VV000MM },
 	{ "vfs", 0xe2, INSTR_VRR_VVV00MM },
 	{ "vftci", 0x4a, INSTR_VRI_VVIMM },
+	{ "", 0, INSTR_INVALID }
 };
 
 static struct s390_insn opcode_eb[] = {
@@ -1953,7 +1954,7 @@ void show_code(struct pt_regs *regs)
 {
 	char *mode = user_mode(regs) ? "User" : "Krnl";
 	unsigned char code[64];
-	char buffer[64], *ptr;
+	char buffer[128], *ptr;
 	mm_segment_t old_fs;
 	unsigned long addr;
 	int start, end, opsize, hops, i;
@@ -2016,7 +2017,7 @@ void show_code(struct pt_regs *regs)
 		start += opsize;
 		pr_cont("%s", buffer);
 		ptr = buffer;
-		ptr += sprintf(ptr, "\n          ");
+		ptr += sprintf(ptr, "\n\t  ");
 		hops++;
 	}
 	pr_cont("\n");
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index 0c19686..29d8744 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -345,8 +345,10 @@ static __init void detect_machine_facilities(void)
 		S390_lowcore.machine_flags |= MACHINE_FLAG_IDTE;
 	if (test_facility(40))
 		S390_lowcore.machine_flags |= MACHINE_FLAG_LPP;
-	if (test_facility(50) && test_facility(73))
+	if (test_facility(50) && test_facility(73)) {
 		S390_lowcore.machine_flags |= MACHINE_FLAG_TE;
+		__ctl_set_bit(0, 55);
+	}
 	if (test_facility(51))
 		S390_lowcore.machine_flags |= MACHINE_FLAG_TLB_LC;
 	if (test_facility(129)) {
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index bba4fa7..8382fc6 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -70,8 +70,6 @@ extern void kernel_thread_starter(void);
  */
 void exit_thread(struct task_struct *tsk)
 {
-	if (tsk == current)
-		exit_thread_runtime_instr();
 }
 
 void flush_thread(void)
@@ -84,6 +82,7 @@ void release_thread(struct task_struct *dead_task)
 
 void arch_release_task_struct(struct task_struct *tsk)
 {
+	runtime_instr_release(tsk);
 }
 
 int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
@@ -120,6 +119,7 @@ int copy_thread(unsigned long clone_flags, unsigned long new_stackp,
 	memset(&p->thread.per_user, 0, sizeof(p->thread.per_user));
 	memset(&p->thread.per_event, 0, sizeof(p->thread.per_event));
 	clear_tsk_thread_flag(p, TIF_SINGLE_STEP);
+	p->thread.per_flags = 0;
 	/* Initialize per thread user and system timer values */
 	ti = task_thread_info(p);
 	ti->user_timer = 0;
diff --git a/arch/s390/kernel/runtime_instr.c b/arch/s390/kernel/runtime_instr.c
index fffa0e5..fd03a75 100644
--- a/arch/s390/kernel/runtime_instr.c
+++ b/arch/s390/kernel/runtime_instr.c
@@ -18,11 +18,24 @@
 /* empty control block to disable RI by loading it */
 struct runtime_instr_cb runtime_instr_empty_cb;
 
+void runtime_instr_release(struct task_struct *tsk)
+{
+	kfree(tsk->thread.ri_cb);
+}
+
 static void disable_runtime_instr(void)
 {
-	struct pt_regs *regs = task_pt_regs(current);
+	struct task_struct *task = current;
+	struct pt_regs *regs;
 
+	if (!task->thread.ri_cb)
+		return;
+	regs = task_pt_regs(task);
+	preempt_disable();
 	load_runtime_instr_cb(&runtime_instr_empty_cb);
+	kfree(task->thread.ri_cb);
+	task->thread.ri_cb = NULL;
+	preempt_enable();
 
 	/*
 	 * Make sure the RI bit is deleted from the PSW. If the user did not
@@ -43,17 +56,6 @@ static void init_runtime_instr_cb(struct runtime_instr_cb *cb)
 	cb->valid = 1;
 }
 
-void exit_thread_runtime_instr(void)
-{
-	struct task_struct *task = current;
-
-	if (!task->thread.ri_cb)
-		return;
-	disable_runtime_instr();
-	kfree(task->thread.ri_cb);
-	task->thread.ri_cb = NULL;
-}
-
 SYSCALL_DEFINE1(s390_runtime_instr, int, command)
 {
 	struct runtime_instr_cb *cb;
@@ -62,9 +64,7 @@ SYSCALL_DEFINE1(s390_runtime_instr, int, command)
 		return -EOPNOTSUPP;
 
 	if (command == S390_RUNTIME_INSTR_STOP) {
-		preempt_disable();
-		exit_thread_runtime_instr();
-		preempt_enable();
+		disable_runtime_instr();
 		return 0;
 	}
 
diff --git a/arch/s390/kernel/syscalls.S b/arch/s390/kernel/syscalls.S
index 9b59e62..709da45 100644
--- a/arch/s390/kernel/syscalls.S
+++ b/arch/s390/kernel/syscalls.S
@@ -369,10 +369,10 @@
 SYSCALL(sys_sendmmsg,compat_sys_sendmmsg)
 SYSCALL(sys_socket,sys_socket)
 SYSCALL(sys_socketpair,compat_sys_socketpair)		/* 360 */
-SYSCALL(sys_bind,sys_bind)
-SYSCALL(sys_connect,sys_connect)
+SYSCALL(sys_bind,compat_sys_bind)
+SYSCALL(sys_connect,compat_sys_connect)
 SYSCALL(sys_listen,sys_listen)
-SYSCALL(sys_accept4,sys_accept4)
+SYSCALL(sys_accept4,compat_sys_accept4)
 SYSCALL(sys_getsockopt,compat_sys_getsockopt)		/* 365 */
 SYSCALL(sys_setsockopt,compat_sys_setsockopt)
 SYSCALL(sys_getsockname,compat_sys_getsockname)
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
index e184353..c2905a1 100644
--- a/arch/s390/kvm/priv.c
+++ b/arch/s390/kvm/priv.c
@@ -197,8 +197,6 @@ static int try_handle_skey(struct kvm_vcpu *vcpu)
 		VCPU_EVENT(vcpu, 4, "%s", "retrying storage key operation");
 		return -EAGAIN;
 	}
-	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
-		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
 	return 0;
 }
 
@@ -209,6 +207,9 @@ static int handle_iske(struct kvm_vcpu *vcpu)
 	int reg1, reg2;
 	int rc;
 
+	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
+		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
+
 	rc = try_handle_skey(vcpu);
 	if (rc)
 		return rc != -EAGAIN ? rc : 0;
@@ -238,6 +239,9 @@ static int handle_rrbe(struct kvm_vcpu *vcpu)
 	int reg1, reg2;
 	int rc;
 
+	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
+		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
+
 	rc = try_handle_skey(vcpu);
 	if (rc)
 		return rc != -EAGAIN ? rc : 0;
@@ -273,6 +277,9 @@ static int handle_sske(struct kvm_vcpu *vcpu)
 	int reg1, reg2;
 	int rc;
 
+	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
+		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
+
 	rc = try_handle_skey(vcpu);
 	if (rc)
 		return rc != -EAGAIN ? rc : 0;
diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c
index 15ffc19..03a1d59 100644
--- a/arch/s390/pci/pci.c
+++ b/arch/s390/pci/pci.c
@@ -354,7 +354,8 @@ static void zpci_irq_handler(struct airq_struct *airq)
 				/* End of second scan with interrupts on. */
 				break;
 			/* First scan complete, reenable interrupts. */
-			zpci_set_irq_ctrl(SIC_IRQ_MODE_SINGLE, NULL, PCI_ISC);
+			if (zpci_set_irq_ctrl(SIC_IRQ_MODE_SINGLE, NULL, PCI_ISC))
+				break;
 			si = 0;
 			continue;
 		}
@@ -928,7 +929,7 @@ static int __init pci_base_init(void)
 	if (!s390_pci_probe)
 		return 0;
 
-	if (!test_facility(69) || !test_facility(71) || !test_facility(72))
+	if (!test_facility(69) || !test_facility(71))
 		return 0;
 
 	rc = zpci_debug_init();
diff --git a/arch/s390/pci/pci_insn.c b/arch/s390/pci/pci_insn.c
index fa8d7d4..248146d 100644
--- a/arch/s390/pci/pci_insn.c
+++ b/arch/s390/pci/pci_insn.c
@@ -7,6 +7,7 @@
 #include <linux/export.h>
 #include <linux/errno.h>
 #include <linux/delay.h>
+#include <asm/facility.h>
 #include <asm/pci_insn.h>
 #include <asm/pci_debug.h>
 #include <asm/processor.h>
@@ -91,11 +92,14 @@ int zpci_refresh_trans(u64 fn, u64 addr, u64 range)
 }
 
 /* Set Interruption Controls */
-void zpci_set_irq_ctrl(u16 ctl, char *unused, u8 isc)
+int zpci_set_irq_ctrl(u16 ctl, char *unused, u8 isc)
 {
+	if (!test_facility(72))
+		return -EIO;
 	asm volatile (
 		"	.insn	rsy,0xeb00000000d1,%[ctl],%[isc],%[u]\n"
 		: : [ctl] "d" (ctl), [isc] "d" (isc << 27), [u] "Q" (*unused));
+	return 0;
 }
 
 /* PCI Load */
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index 57154c6..0f183ff 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -2391,10 +2391,17 @@ void __init mem_init(void)
 {
 	high_memory = __va(last_valid_pfn << PAGE_SHIFT);
 
-	register_page_bootmem_info();
 	free_all_bootmem();
 
 	/*
+	 * Must be done after boot memory is put on freelist, because here we
+	 * might set fields in deferred struct pages that have not yet been
+	 * initialized, and free_all_bootmem() initializes all the reserved
+	 * deferred pages for us.
+	 */
+	register_page_bootmem_info();
+
+	/*
 	 * Set up the zero page, mark it reserved, so that page count
 	 * is not manipulated when freeing the page from user ptes.
 	 */
diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
index c7f2a52..83a73cf 100644
--- a/arch/sparc/mm/srmmu.c
+++ b/arch/sparc/mm/srmmu.c
@@ -54,6 +54,7 @@
 enum mbus_module srmmu_modtype;
 static unsigned int hwbug_bitmask;
 int vac_cache_size;
+EXPORT_SYMBOL(vac_cache_size);
 int vac_line_size;
 
 extern struct resource sparc_iomap;
diff --git a/arch/x86/crypto/salsa20_glue.c b/arch/x86/crypto/salsa20_glue.c
index 399a29d..cb91a64 100644
--- a/arch/x86/crypto/salsa20_glue.c
+++ b/arch/x86/crypto/salsa20_glue.c
@@ -59,13 +59,6 @@ static int encrypt(struct blkcipher_desc *desc,
 
 	salsa20_ivsetup(ctx, walk.iv);
 
-	if (likely(walk.nbytes == nbytes))
-	{
-		salsa20_encrypt_bytes(ctx, walk.src.virt.addr,
-				      walk.dst.virt.addr, nbytes);
-		return blkcipher_walk_done(desc, &walk, 0);
-	}
-
 	while (walk.nbytes >= 64) {
 		salsa20_encrypt_bytes(ctx, walk.src.virt.addr,
 				      walk.dst.virt.addr,
diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
index be20239..9dfeeec 100644
--- a/arch/x86/events/intel/ds.c
+++ b/arch/x86/events/intel/ds.c
@@ -1389,9 +1389,13 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs)
 			continue;
 
 		/* log dropped samples number */
-		if (error[bit])
+		if (error[bit]) {
 			perf_log_lost_samples(event, error[bit]);
 
+			if (perf_event_account_interrupt(event))
+				x86_pmu_stop(event, 0);
+		}
+
 		if (counts[bit]) {
 			__intel_pmu_pebs_event(event, iregs, base,
 					       top, bit, counts[bit]);
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index bdde807..cbd1d44 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1397,4 +1397,7 @@ static inline int kvm_cpu_get_apicid(int mps_cpu)
 #endif
 }
 
+void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
+		unsigned long start, unsigned long end);
+
 #endif /* _ASM_X86_KVM_HOST_H */
diff --git a/arch/x86/include/asm/syscalls.h b/arch/x86/include/asm/syscalls.h
index 91dfcaf..bad25bb 100644
--- a/arch/x86/include/asm/syscalls.h
+++ b/arch/x86/include/asm/syscalls.h
@@ -21,7 +21,7 @@ asmlinkage long sys_ioperm(unsigned long, unsigned long, int);
 asmlinkage long sys_iopl(unsigned int);
 
 /* kernel/ldt.c */
-asmlinkage int sys_modify_ldt(int, void __user *, unsigned long);
+asmlinkage long sys_modify_ldt(int, void __user *, unsigned long);
 
 /* kernel/signal.c */
 asmlinkage long sys_rt_sigreturn(void);
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index b89bef9..11cc600 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -720,7 +720,7 @@ static void __init acpi_set_irq_model_ioapic(void)
 #ifdef CONFIG_ACPI_HOTPLUG_CPU
 #include <acpi/processor.h>
 
-int acpi_map_cpu2node(acpi_handle handle, int cpu, int physid)
+static int acpi_map_cpu2node(acpi_handle handle, int cpu, int physid)
 {
 #ifdef CONFIG_ACPI_NUMA
 	int nid;
diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c
index 095ef7d..abfbb61b 100644
--- a/arch/x86/kernel/fpu/xstate.c
+++ b/arch/x86/kernel/fpu/xstate.c
@@ -1077,6 +1077,7 @@ int copyin_to_xsaves(const void *kbuf, const void __user *ubuf,
 	 * Add back in the features that came in from userspace:
 	 */
 	xsave->header.xfeatures |= xfeatures;
+	xsave->header.xcomp_bv = XCOMP_BV_COMPACTED_FORMAT | xsave->header.xfeatures;
 
 	return 0;
 }
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
index 932348fb..9512529 100644
--- a/arch/x86/kernel/hpet.c
+++ b/arch/x86/kernel/hpet.c
@@ -354,7 +354,7 @@ static int hpet_resume(struct clock_event_device *evt, int timer)
 
 		irq_domain_deactivate_irq(irq_get_irq_data(hdev->irq));
 		irq_domain_activate_irq(irq_get_irq_data(hdev->irq));
-		disable_irq(hdev->irq);
+		disable_hardirq(hdev->irq);
 		irq_set_affinity(hdev->irq, cpumask_of(hdev->cpu));
 		enable_irq(hdev->irq);
 	}
diff --git a/arch/x86/kernel/kprobes/ftrace.c b/arch/x86/kernel/kprobes/ftrace.c
index 5f8f0b3..2c0b0b6 100644
--- a/arch/x86/kernel/kprobes/ftrace.c
+++ b/arch/x86/kernel/kprobes/ftrace.c
@@ -26,7 +26,7 @@
 #include "common.h"
 
 static nokprobe_inline
-int __skip_singlestep(struct kprobe *p, struct pt_regs *regs,
+void __skip_singlestep(struct kprobe *p, struct pt_regs *regs,
 		      struct kprobe_ctlblk *kcb, unsigned long orig_ip)
 {
 	/*
@@ -41,20 +41,21 @@ int __skip_singlestep(struct kprobe *p, struct pt_regs *regs,
 	__this_cpu_write(current_kprobe, NULL);
 	if (orig_ip)
 		regs->ip = orig_ip;
-	return 1;
 }
 
 int skip_singlestep(struct kprobe *p, struct pt_regs *regs,
 		    struct kprobe_ctlblk *kcb)
 {
-	if (kprobe_ftrace(p))
-		return __skip_singlestep(p, regs, kcb, 0);
-	else
-		return 0;
+	if (kprobe_ftrace(p)) {
+		__skip_singlestep(p, regs, kcb, 0);
+		preempt_enable_no_resched();
+		return 1;
+	}
+	return 0;
 }
 NOKPROBE_SYMBOL(skip_singlestep);
 
-/* Ftrace callback handler for kprobes */
+/* Ftrace callback handler for kprobes -- called under preepmt disabed */
 void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
 			   struct ftrace_ops *ops, struct pt_regs *regs)
 {
@@ -77,13 +78,17 @@ void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
 		/* Kprobe handler expects regs->ip = ip + 1 as breakpoint hit */
 		regs->ip = ip + sizeof(kprobe_opcode_t);
 
+		/* To emulate trap based kprobes, preempt_disable here */
+		preempt_disable();
 		__this_cpu_write(current_kprobe, p);
 		kcb->kprobe_status = KPROBE_HIT_ACTIVE;
-		if (!p->pre_handler || !p->pre_handler(p, regs))
+		if (!p->pre_handler || !p->pre_handler(p, regs)) {
 			__skip_singlestep(p, regs, kcb, orig_ip);
+			preempt_enable_no_resched();
+		}
 		/*
 		 * If pre_handler returns !0, it sets regs->ip and
-		 * resets current kprobe.
+		 * resets current kprobe, and keep preempt count +1.
 		 */
 	}
 end:
diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
index 6707039..5f70014 100644
--- a/arch/x86/kernel/ldt.c
+++ b/arch/x86/kernel/ldt.c
@@ -12,6 +12,7 @@
 #include <linux/string.h>
 #include <linux/mm.h>
 #include <linux/smp.h>
+#include <linux/syscalls.h>
 #include <linux/slab.h>
 #include <linux/vmalloc.h>
 #include <linux/uaccess.h>
@@ -271,8 +272,8 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
 	return error;
 }
 
-asmlinkage int sys_modify_ldt(int func, void __user *ptr,
-			      unsigned long bytecount)
+SYSCALL_DEFINE3(modify_ldt, int , func , void __user * , ptr ,
+		unsigned long , bytecount)
 {
 	int ret = -ENOSYS;
 
@@ -290,5 +291,14 @@ asmlinkage int sys_modify_ldt(int func, void __user *ptr,
 		ret = write_ldt(ptr, bytecount, 0);
 		break;
 	}
-	return ret;
+	/*
+	 * The SYSCALL_DEFINE() macros give us an 'unsigned long'
+	 * return type, but tht ABI for sys_modify_ldt() expects
+	 * 'int'.  This cast gives us an int-sized value in %rax
+	 * for the return code.  The 'unsigned' is necessary so
+	 * the compiler does not try to sign-extend the negative
+	 * return codes into the high half of the register when
+	 * taking the value from int->long.
+	 */
+	return (unsigned int)ret;
 }
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index 72b737b..c8f8dd8 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -2395,9 +2395,21 @@ static int rsm_load_seg_64(struct x86_emulate_ctxt *ctxt, u64 smbase, int n)
 }
 
 static int rsm_enter_protected_mode(struct x86_emulate_ctxt *ctxt,
-				     u64 cr0, u64 cr4)
+				    u64 cr0, u64 cr3, u64 cr4)
 {
 	int bad;
+	u64 pcid;
+
+	/* In order to later set CR4.PCIDE, CR3[11:0] must be zero.  */
+	pcid = 0;
+	if (cr4 & X86_CR4_PCIDE) {
+		pcid = cr3 & 0xfff;
+		cr3 &= ~0xfff;
+	}
+
+	bad = ctxt->ops->set_cr(ctxt, 3, cr3);
+	if (bad)
+		return X86EMUL_UNHANDLEABLE;
 
 	/*
 	 * First enable PAE, long mode needs it before CR0.PG = 1 is set.
@@ -2416,6 +2428,12 @@ static int rsm_enter_protected_mode(struct x86_emulate_ctxt *ctxt,
 		bad = ctxt->ops->set_cr(ctxt, 4, cr4);
 		if (bad)
 			return X86EMUL_UNHANDLEABLE;
+		if (pcid) {
+			bad = ctxt->ops->set_cr(ctxt, 3, cr3 | pcid);
+			if (bad)
+				return X86EMUL_UNHANDLEABLE;
+		}
+
 	}
 
 	return X86EMUL_CONTINUE;
@@ -2426,11 +2444,11 @@ static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt, u64 smbase)
 	struct desc_struct desc;
 	struct desc_ptr dt;
 	u16 selector;
-	u32 val, cr0, cr4;
+	u32 val, cr0, cr3, cr4;
 	int i;
 
 	cr0 =                      GET_SMSTATE(u32, smbase, 0x7ffc);
-	ctxt->ops->set_cr(ctxt, 3, GET_SMSTATE(u32, smbase, 0x7ff8));
+	cr3 =                      GET_SMSTATE(u32, smbase, 0x7ff8);
 	ctxt->eflags =             GET_SMSTATE(u32, smbase, 0x7ff4) | X86_EFLAGS_FIXED;
 	ctxt->_eip =               GET_SMSTATE(u32, smbase, 0x7ff0);
 
@@ -2472,14 +2490,14 @@ static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt, u64 smbase)
 
 	ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smbase, 0x7ef8));
 
-	return rsm_enter_protected_mode(ctxt, cr0, cr4);
+	return rsm_enter_protected_mode(ctxt, cr0, cr3, cr4);
 }
 
 static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt, u64 smbase)
 {
 	struct desc_struct desc;
 	struct desc_ptr dt;
-	u64 val, cr0, cr4;
+	u64 val, cr0, cr3, cr4;
 	u32 base3;
 	u16 selector;
 	int i, r;
@@ -2496,7 +2514,7 @@ static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt, u64 smbase)
 	ctxt->ops->set_dr(ctxt, 7, (val & DR7_VOLATILE) | DR7_FIXED_1);
 
 	cr0 =                       GET_SMSTATE(u64, smbase, 0x7f58);
-	ctxt->ops->set_cr(ctxt, 3,  GET_SMSTATE(u64, smbase, 0x7f50));
+	cr3 =                       GET_SMSTATE(u64, smbase, 0x7f50);
 	cr4 =                       GET_SMSTATE(u64, smbase, 0x7f48);
 	ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smbase, 0x7f00));
 	val =                       GET_SMSTATE(u64, smbase, 0x7ed0);
@@ -2524,7 +2542,7 @@ static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt, u64 smbase)
 	dt.address =                GET_SMSTATE(u64, smbase, 0x7e68);
 	ctxt->ops->set_gdt(ctxt, &dt);
 
-	r = rsm_enter_protected_mode(ctxt, cr0, cr4);
+	r = rsm_enter_protected_mode(ctxt, cr0, cr3, cr4);
 	if (r != X86EMUL_CONTINUE)
 		return r;
 
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 3f05c04..b24b3c6 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -246,9 +246,14 @@ static inline void kvm_apic_set_ldr(struct kvm_lapic *apic, u32 id)
 	recalculate_apic_map(apic->vcpu->kvm);
 }
 
+static inline u32 kvm_apic_calc_x2apic_ldr(u32 id)
+{
+	return ((id >> 4) << 16) | (1 << (id & 0xf));
+}
+
 static inline void kvm_apic_set_x2apic_id(struct kvm_lapic *apic, u32 id)
 {
-	u32 ldr = ((id >> 4) << 16) | (1 << (id & 0xf));
+	u32 ldr = kvm_apic_calc_x2apic_ldr(id);
 
 	kvm_lapic_set_reg(apic, APIC_ID, id);
 	kvm_lapic_set_reg(apic, APIC_LDR, ldr);
@@ -2029,6 +2034,7 @@ static int kvm_apic_state_fixup(struct kvm_vcpu *vcpu,
 {
 	if (apic_x2apic_mode(vcpu->arch.apic)) {
 		u32 *id = (u32 *)(s->regs + APIC_ID);
+		u32 *ldr = (u32 *)(s->regs + APIC_LDR);
 
 		if (vcpu->kvm->arch.x2apic_format) {
 			if (*id != vcpu->vcpu_id)
@@ -2039,6 +2045,10 @@ static int kvm_apic_state_fixup(struct kvm_vcpu *vcpu,
 			else
 				*id <<= 24;
 		}
+
+		/* In x2APIC mode, the LDR is fixed and based on the id */
+		if (set)
+			*ldr = kvm_apic_calc_x2apic_ldr(*id);
 	}
 
 	return 0;
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index d29c745..0a324e1 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -5052,13 +5052,13 @@ int kvm_mmu_module_init(void)
 {
 	pte_list_desc_cache = kmem_cache_create("pte_list_desc",
 					    sizeof(struct pte_list_desc),
-					    0, 0, NULL);
+					    0, SLAB_ACCOUNT, NULL);
 	if (!pte_list_desc_cache)
 		goto nomem;
 
 	mmu_page_header_cache = kmem_cache_create("kvm_mmu_page_header",
 						  sizeof(struct kvm_mmu_page),
-						  0, 0, NULL);
+						  0, SLAB_ACCOUNT, NULL);
 	if (!mmu_page_header_cache)
 		goto nomem;
 
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 8ca1eca..8148d8c 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -1382,6 +1382,9 @@ static void avic_vm_destroy(struct kvm *kvm)
 	unsigned long flags;
 	struct kvm_arch *vm_data = &kvm->arch;
 
+	if (!avic)
+		return;
+
 	avic_free_vm_id(vm_data->avic_vm_id);
 
 	if (vm_data->avic_logical_id_table_page)
@@ -2149,6 +2152,8 @@ static int ud_interception(struct vcpu_svm *svm)
 	int er;
 
 	er = emulate_instruction(&svm->vcpu, EMULTYPE_TRAP_UD);
+	if (er == EMULATE_USER_EXIT)
+		return 0;
 	if (er != EMULATE_DONE)
 		kvm_queue_exception(&svm->vcpu, UD_VECTOR);
 	return 1;
@@ -3583,6 +3588,13 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
 	u32 ecx = msr->index;
 	u64 data = msr->data;
 	switch (ecx) {
+	case MSR_IA32_CR_PAT:
+		if (!kvm_mtrr_valid(vcpu, MSR_IA32_CR_PAT, data))
+			return 1;
+		vcpu->arch.pat = data;
+		svm->vmcb->save.g_pat = data;
+		mark_dirty(svm->vmcb, VMCB_NPT);
+		break;
 	case MSR_IA32_TSC:
 		kvm_write_tsc(vcpu, msr);
 		break;
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index a8ae57a..263e560 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -1199,6 +1199,11 @@ static inline bool cpu_has_vmx_invvpid_global(void)
 	return vmx_capability.vpid & VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT;
 }
 
+static inline bool cpu_has_vmx_invvpid(void)
+{
+	return vmx_capability.vpid & VMX_VPID_INVVPID_BIT;
+}
+
 static inline bool cpu_has_vmx_ept(void)
 {
 	return vmcs_config.cpu_based_2nd_exec_ctrl &
@@ -3816,6 +3821,12 @@ static void vmx_flush_tlb(struct kvm_vcpu *vcpu)
 	__vmx_flush_tlb(vcpu, to_vmx(vcpu)->vpid);
 }
 
+static void vmx_flush_tlb_ept_only(struct kvm_vcpu *vcpu)
+{
+	if (enable_ept)
+		vmx_flush_tlb(vcpu);
+}
+
 static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu)
 {
 	ulong cr0_guest_owned_bits = vcpu->arch.cr0_guest_owned_bits;
@@ -5502,6 +5513,8 @@ static int handle_exception(struct kvm_vcpu *vcpu)
 			return 1;
 		}
 		er = emulate_instruction(vcpu, EMULTYPE_TRAP_UD);
+		if (er == EMULATE_USER_EXIT)
+			return 0;
 		if (er != EMULATE_DONE)
 			kvm_queue_exception(vcpu, UD_VECTOR);
 		return 1;
@@ -6411,12 +6424,7 @@ static __init int hardware_setup(void)
 	memset(vmx_vmread_bitmap, 0xff, PAGE_SIZE);
 	memset(vmx_vmwrite_bitmap, 0xff, PAGE_SIZE);
 
-	/*
-	 * Allow direct access to the PC debug port (it is often used for I/O
-	 * delays, but the vmexits simply slow things down).
-	 */
 	memset(vmx_io_bitmap_a, 0xff, PAGE_SIZE);
-	clear_bit(0x80, vmx_io_bitmap_a);
 
 	memset(vmx_io_bitmap_b, 0xff, PAGE_SIZE);
 
@@ -6431,8 +6439,10 @@ static __init int hardware_setup(void)
 	if (boot_cpu_has(X86_FEATURE_NX))
 		kvm_enable_efer_bits(EFER_NX);
 
-	if (!cpu_has_vmx_vpid())
+	if (!cpu_has_vmx_vpid() || !cpu_has_vmx_invvpid() ||
+		!(cpu_has_vmx_invvpid_single() || cpu_has_vmx_invvpid_global()))
 		enable_vpid = 0;
+
 	if (!cpu_has_vmx_shadow_vmcs())
 		enable_shadow_vmcs = 0;
 	if (enable_shadow_vmcs)
@@ -7206,9 +7216,8 @@ static int handle_vmoff(struct kvm_vcpu *vcpu)
 static int handle_vmclear(struct kvm_vcpu *vcpu)
 {
 	struct vcpu_vmx *vmx = to_vmx(vcpu);
+	u32 zero = 0;
 	gpa_t vmptr;
-	struct vmcs12 *vmcs12;
-	struct page *page;
 
 	if (!nested_vmx_check_permission(vcpu))
 		return 1;
@@ -7219,22 +7228,9 @@ static int handle_vmclear(struct kvm_vcpu *vcpu)
 	if (vmptr == vmx->nested.current_vmptr)
 		nested_release_vmcs12(vmx);
 
-	page = nested_get_page(vcpu, vmptr);
-	if (page == NULL) {
-		/*
-		 * For accurate processor emulation, VMCLEAR beyond available
-		 * physical memory should do nothing at all. However, it is
-		 * possible that a nested vmx bug, not a guest hypervisor bug,
-		 * resulted in this case, so let's shut down before doing any
-		 * more damage:
-		 */
-		kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
-		return 1;
-	}
-	vmcs12 = kmap(page);
-	vmcs12->launch_state = 0;
-	kunmap(page);
-	nested_release_page(page);
+	kvm_vcpu_write_guest(vcpu,
+			vmptr + offsetof(struct vmcs12, launch_state),
+			&zero, sizeof(zero));
 
 	nested_free_vmcs02(vmx, vmptr);
 
@@ -8511,6 +8507,7 @@ static void vmx_set_virtual_x2apic_mode(struct kvm_vcpu *vcpu, bool set)
 	} else {
 		sec_exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE;
 		sec_exec_control |= SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
+		vmx_flush_tlb_ept_only(vcpu);
 	}
 	vmcs_write32(SECONDARY_VM_EXEC_CONTROL, sec_exec_control);
 
@@ -8536,8 +8533,10 @@ static void vmx_set_apic_access_page_addr(struct kvm_vcpu *vcpu, hpa_t hpa)
 	 */
 	if (!is_guest_mode(vcpu) ||
 	    !nested_cpu_has2(get_vmcs12(&vmx->vcpu),
-			     SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES))
+			     SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) {
 		vmcs_write64(APIC_ACCESS_ADDR, hpa);
+		vmx_flush_tlb_ept_only(vcpu);
+	}
 }
 
 static void vmx_hwapic_isr_update(struct kvm_vcpu *vcpu, int max_isr)
@@ -9560,10 +9559,8 @@ static inline bool nested_vmx_merge_msr_bitmap(struct kvm_vcpu *vcpu,
 		return false;
 
 	page = nested_get_page(vcpu, vmcs12->msr_bitmap);
-	if (!page) {
-		WARN_ON(1);
+	if (!page)
 		return false;
-	}
 	msr_bitmap_l1 = (unsigned long *)kmap(page);
 	if (!msr_bitmap_l1) {
 		nested_release_page_clean(page);
@@ -10112,6 +10109,9 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
 	if (nested_cpu_has_ept(vmcs12)) {
 		kvm_mmu_unload(vcpu);
 		nested_ept_init_mmu_context(vcpu);
+	} else if (nested_cpu_has2(vmcs12,
+				   SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) {
+		vmx_flush_tlb_ept_only(vcpu);
 	}
 
 	if (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_EFER)
@@ -10715,6 +10715,8 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
 	vmcs_writel(GUEST_SYSENTER_EIP, vmcs12->host_ia32_sysenter_eip);
 	vmcs_writel(GUEST_IDTR_BASE, vmcs12->host_idtr_base);
 	vmcs_writel(GUEST_GDTR_BASE, vmcs12->host_gdtr_base);
+	vmcs_write32(GUEST_IDTR_LIMIT, 0xFFFF);
+	vmcs_write32(GUEST_GDTR_LIMIT, 0xFFFF);
 
 	/* If not VM_EXIT_CLEAR_BNDCFGS, the L2 value propagates to L1.  */
 	if (vmcs12->vm_exit_controls & VM_EXIT_CLEAR_BNDCFGS)
@@ -10850,6 +10852,10 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
 		vmx->nested.change_vmcs01_virtual_x2apic_mode = false;
 		vmx_set_virtual_x2apic_mode(vcpu,
 				vcpu->arch.apic_base & X2APIC_ENABLE);
+	} else if (!nested_cpu_has_ept(vmcs12) &&
+		   nested_cpu_has2(vmcs12,
+				   SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) {
+		vmx_flush_tlb_ept_only(vcpu);
 	}
 
 	/* This is needed for same reason as it was needed in prepare_vmcs02 */
@@ -10899,8 +10905,10 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
  */
 static void vmx_leave_nested(struct kvm_vcpu *vcpu)
 {
-	if (is_guest_mode(vcpu))
+	if (is_guest_mode(vcpu)) {
+		to_vmx(vcpu)->nested.nested_run_pending = 0;
 		nested_vmx_vmexit(vcpu, -1, 0, 0);
+	}
 	free_nested(to_vmx(vcpu));
 }
 
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 595f814..7e28e6c 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1797,6 +1797,9 @@ static void kvm_setup_pvclock_page(struct kvm_vcpu *v)
 	 */
 	BUILD_BUG_ON(offsetof(struct pvclock_vcpu_time_info, version) != 0);
 
+	if (guest_hv_clock.version & 1)
+		++guest_hv_clock.version;  /* first time write, random junk */
+
 	vcpu->hv_clock.version = guest_hv_clock.version + 1;
 	kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
 				&vcpu->hv_clock,
@@ -5576,6 +5579,8 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu,
 			if (reexecute_instruction(vcpu, cr2, write_fault_to_spt,
 						emulation_type))
 				return EMULATE_DONE;
+			if (ctxt->have_exception && inject_emulated_exception(vcpu))
+				return EMULATE_DONE;
 			if (emulation_type & EMULTYPE_SKIP)
 				return EMULATE_FAIL;
 			return handle_emulation_failure(vcpu);
@@ -6521,6 +6526,20 @@ static void kvm_vcpu_flush_tlb(struct kvm_vcpu *vcpu)
 	kvm_x86_ops->tlb_flush(vcpu);
 }
 
+void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
+		unsigned long start, unsigned long end)
+{
+	unsigned long apic_address;
+
+	/*
+	 * The physical address of apic access page is stored in the VMCS.
+	 * Update it when it becomes invalid.
+	 */
+	apic_address = gfn_to_hva(kvm, APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT);
+	if (start <= apic_address && apic_address < end)
+		kvm_make_all_cpus_request(kvm, KVM_REQ_APIC_PAGE_RELOAD);
+}
+
 void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu)
 {
 	struct page *page = NULL;
@@ -7113,7 +7132,7 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
 #endif
 
 	kvm_rip_write(vcpu, regs->rip);
-	kvm_set_rflags(vcpu, regs->rflags);
+	kvm_set_rflags(vcpu, regs->rflags | X86_EFLAGS_FIXED);
 
 	vcpu->arch.exception.pending = false;
 
@@ -8424,11 +8443,11 @@ void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
 {
 	struct x86_exception fault;
 
-	trace_kvm_async_pf_ready(work->arch.token, work->gva);
 	if (work->wakeup_all)
 		work->arch.token = ~0; /* broadcast wakeup */
 	else
 		kvm_del_async_pf_gfn(vcpu, work->arch.gfn);
+	trace_kvm_async_pf_ready(work->arch.token, work->gva);
 
 	if ((vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED) &&
 	    !apf_put_user(vcpu, KVM_PV_REASON_PAGE_READY)) {
diff --git a/arch/x86/lib/x86-opcode-map.txt b/arch/x86/lib/x86-opcode-map.txt
index 767be7c..1754e09 100644
--- a/arch/x86/lib/x86-opcode-map.txt
+++ b/arch/x86/lib/x86-opcode-map.txt
@@ -896,7 +896,7 @@
 
 GrpTable: Grp3_1
 0: TEST Eb,Ib
-1:
+1: TEST Eb,Ib
 2: NOT Eb
 3: NEG Eb
 4: MUL AL,Eb
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 1dd7960..8b5ff88 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -1393,7 +1393,17 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code,
 	 * make sure we exit gracefully rather than endlessly redo
 	 * the fault.  Since we never set FAULT_FLAG_RETRY_NOWAIT, if
 	 * we get VM_FAULT_RETRY back, the mmap_sem has been unlocked.
+	 *
+	 * Note that handle_userfault() may also release and reacquire mmap_sem
+	 * (and not return with VM_FAULT_RETRY), when returning to userland to
+	 * repeat the page fault later with a VM_FAULT_NOPAGE retval
+	 * (potentially after handling any pending signal during the return to
+	 * userland). The return to userland is identified whenever
+	 * FAULT_FLAG_USER|FAULT_FLAG_KILLABLE are both set in flags.
+	 * Thus we have to be careful about not touching vma after handling the
+	 * fault, so we read the pkey beforehand.
 	 */
+	pkey = vma_pkey(vma);
 	fault = handle_mm_fault(vma, address, flags);
 	major |= fault & VM_FAULT_MAJOR;
 
@@ -1420,7 +1430,6 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code,
 		return;
 	}
 
-	pkey = vma_pkey(vma);
 	up_read(&mm->mmap_sem);
 	if (unlikely(fault & VM_FAULT_ERROR)) {
 		mm_fault_error(regs, error_code, address, &pkey, fault);
diff --git a/arch/x86/pci/broadcom_bus.c b/arch/x86/pci/broadcom_bus.c
index bb461cf..526536c 100644
--- a/arch/x86/pci/broadcom_bus.c
+++ b/arch/x86/pci/broadcom_bus.c
@@ -97,7 +97,7 @@ static int __init broadcom_postcore_init(void)
 	 * We should get host bridge information from ACPI unless the BIOS
 	 * doesn't support it.
 	 */
-	if (acpi_os_get_root_pointer())
+	if (!acpi_disabled && acpi_os_get_root_pointer())
 		return 0;
 #endif
 
diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c
index 9e42842..0f01751 100644
--- a/arch/x86/platform/uv/tlb_uv.c
+++ b/arch/x86/platform/uv/tlb_uv.c
@@ -1848,7 +1848,6 @@ static void pq_init(int node, int pnode)
 
 	ops.write_payload_first(pnode, first);
 	ops.write_payload_last(pnode, last);
-	ops.write_g_sw_ack(pnode, 0xffffUL);
 
 	/* in effect, all msg_type's are set to MSG_NOOP */
 	memset(pqp, 0, sizeof(struct bau_pq_entry) * DEST_Q_SIZE);
diff --git a/arch/x86/um/ldt.c b/arch/x86/um/ldt.c
index 836a1eb..3ee234b 100644
--- a/arch/x86/um/ldt.c
+++ b/arch/x86/um/ldt.c
@@ -6,6 +6,7 @@
 #include <linux/mm.h>
 #include <linux/sched.h>
 #include <linux/slab.h>
+#include <linux/syscalls.h>
 #include <linux/uaccess.h>
 #include <asm/unistd.h>
 #include <os.h>
@@ -369,7 +370,9 @@ void free_ldt(struct mm_context *mm)
 	mm->arch.ldt.entry_count = 0;
 }
 
-int sys_modify_ldt(int func, void __user *ptr, unsigned long bytecount)
+SYSCALL_DEFINE3(modify_ldt, int , func , void __user * , ptr ,
+		unsigned long , bytecount)
 {
-	return do_modify_ldt_skas(func, ptr, bytecount);
+	/* See non-um modify_ldt() for why we do this cast */
+	return (unsigned int)do_modify_ldt_skas(func, ptr, bytecount);
 }
diff --git a/block/badblocks.c b/block/badblocks.c
index 6ebcef2..2fe6c11 100644
--- a/block/badblocks.c
+++ b/block/badblocks.c
@@ -178,7 +178,7 @@ int badblocks_set(struct badblocks *bb, sector_t s, int sectors,
 
 	if (bb->shift < 0)
 		/* badblocks are disabled */
-		return 0;
+		return 1;
 
 	if (bb->shift) {
 		/* round the start down, and the end up */
diff --git a/block/blk-core.c b/block/blk-core.c
index 9fc567c..37b814a 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -284,6 +284,7 @@ EXPORT_SYMBOL(blk_stop_queue);
 void blk_sync_queue(struct request_queue *q)
 {
 	del_timer_sync(&q->timeout);
+	cancel_work_sync(&q->timeout_work);
 
 	if (q->mq_ops) {
 		struct blk_mq_hw_ctx *hctx;
@@ -528,8 +529,8 @@ void blk_set_queue_dying(struct request_queue *q)
 
 		blk_queue_for_each_rl(rl, q) {
 			if (rl->rq_pool) {
-				wake_up(&rl->wait[BLK_RW_SYNC]);
-				wake_up(&rl->wait[BLK_RW_ASYNC]);
+				wake_up_all(&rl->wait[BLK_RW_SYNC]);
+				wake_up_all(&rl->wait[BLK_RW_ASYNC]);
 			}
 		}
 	}
@@ -722,6 +723,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
 	setup_timer(&q->backing_dev_info.laptop_mode_wb_timer,
 		    laptop_mode_timer_fn, (unsigned long) q);
 	setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q);
+	INIT_WORK(&q->timeout_work, NULL);
 	INIT_LIST_HEAD(&q->queue_head);
 	INIT_LIST_HEAD(&q->timeout_list);
 	INIT_LIST_HEAD(&q->icq_list);
diff --git a/block/blk-mq-sysfs.c b/block/blk-mq-sysfs.c
index 01fb455..8c0894e 100644
--- a/block/blk-mq-sysfs.c
+++ b/block/blk-mq-sysfs.c
@@ -429,7 +429,7 @@ void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx)
 	kobject_init(&hctx->kobj, &blk_mq_hw_ktype);
 }
 
-static void blk_mq_sysfs_init(struct request_queue *q)
+void blk_mq_sysfs_init(struct request_queue *q)
 {
 	struct blk_mq_ctx *ctx;
 	int cpu;
@@ -449,8 +449,6 @@ int blk_mq_register_dev(struct device *dev, struct request_queue *q)
 
 	blk_mq_disable_hotplug();
 
-	blk_mq_sysfs_init(q);
-
 	ret = kobject_add(&q->mq_kobj, kobject_get(&dev->kobj), "%s", "mq");
 	if (ret < 0)
 		goto out;
diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
index dcf5ce3..4bc701b 100644
--- a/block/blk-mq-tag.c
+++ b/block/blk-mq-tag.c
@@ -311,6 +311,9 @@ int blk_mq_reinit_tagset(struct blk_mq_tag_set *set)
 	for (i = 0; i < set->nr_hw_queues; i++) {
 		struct blk_mq_tags *tags = set->tags[i];
 
+		if (!tags)
+			continue;
+
 		for (j = 0; j < tags->nr_tags; j++) {
 			if (!tags->rqs[j])
 				continue;
diff --git a/block/blk-mq.c b/block/blk-mq.c
index a7db634..74ff73f 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1707,7 +1707,6 @@ static void blk_mq_init_cpu_queues(struct request_queue *q,
 		struct blk_mq_ctx *__ctx = per_cpu_ptr(q->queue_ctx, i);
 		struct blk_mq_hw_ctx *hctx;
 
-		memset(__ctx, 0, sizeof(*__ctx));
 		__ctx->cpu = i;
 		spin_lock_init(&__ctx->lock);
 		INIT_LIST_HEAD(&__ctx->rq_list);
@@ -1970,6 +1969,9 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
 	if (!q->queue_ctx)
 		goto err_exit;
 
+	/* init q->mq_kobj and sw queues' kobjects */
+	blk_mq_sysfs_init(q);
+
 	q->queue_hw_ctx = kzalloc_node(nr_cpu_ids * sizeof(*(q->queue_hw_ctx)),
 						GFP_KERNEL, set->numa_node);
 	if (!q->queue_hw_ctx)
diff --git a/block/blk-mq.h b/block/blk-mq.h
index e5d2524..c55bcf6 100644
--- a/block/blk-mq.h
+++ b/block/blk-mq.h
@@ -50,6 +50,7 @@ static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q,
 /*
  * sysfs helpers
  */
+extern void blk_mq_sysfs_init(struct request_queue *q);
 extern int blk_mq_sysfs_register(struct request_queue *q);
 extern void blk_mq_sysfs_unregister(struct request_queue *q);
 extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx);
diff --git a/block/blk-timeout.c b/block/blk-timeout.c
index a30441a..220661a 100644
--- a/block/blk-timeout.c
+++ b/block/blk-timeout.c
@@ -135,8 +135,6 @@ void blk_timeout_work(struct work_struct *work)
 	struct request *rq, *tmp;
 	int next_set = 0;
 
-	if (blk_queue_enter(q, true))
-		return;
 	spin_lock_irqsave(q->queue_lock, flags);
 
 	list_for_each_entry_safe(rq, tmp, &q->timeout_list, timeout_list)
@@ -146,7 +144,6 @@ void blk_timeout_work(struct work_struct *work)
 		mod_timer(&q->timeout, round_jiffies_up(next));
 
 	spin_unlock_irqrestore(q->queue_lock, flags);
-	blk_queue_exit(q);
 }
 
 /**
diff --git a/crypto/asymmetric_keys/pkcs7_verify.c b/crypto/asymmetric_keys/pkcs7_verify.c
index 2ffd697..5a37962 100644
--- a/crypto/asymmetric_keys/pkcs7_verify.c
+++ b/crypto/asymmetric_keys/pkcs7_verify.c
@@ -150,7 +150,7 @@ static int pkcs7_find_key(struct pkcs7_message *pkcs7,
 		pr_devel("Sig %u: Found cert serial match X.509[%u]\n",
 			 sinfo->index, certix);
 
-		if (x509->pub->pkey_algo != sinfo->sig->pkey_algo) {
+		if (strcmp(x509->pub->pkey_algo, sinfo->sig->pkey_algo) != 0) {
 			pr_warn("Sig %u: X.509 algo and PKCS#7 sig algo don't match\n",
 				sinfo->index);
 			continue;
diff --git a/crypto/asymmetric_keys/x509_cert_parser.c b/crypto/asymmetric_keys/x509_cert_parser.c
index c80765b..029f705 100644
--- a/crypto/asymmetric_keys/x509_cert_parser.c
+++ b/crypto/asymmetric_keys/x509_cert_parser.c
@@ -408,6 +408,8 @@ int x509_extract_key_data(void *context, size_t hdrlen,
 	ctx->cert->pub->pkey_algo = "rsa";
 
 	/* Discard the BIT STRING metadata */
+	if (vlen < 1 || *(const u8 *)value != 0)
+		return -EBADMSG;
 	ctx->key = value + 1;
 	ctx->key_size = vlen - 1;
 	return 0;
diff --git a/crypto/asymmetric_keys/x509_public_key.c b/crypto/asymmetric_keys/x509_public_key.c
index fb73229..e16009a 100644
--- a/crypto/asymmetric_keys/x509_public_key.c
+++ b/crypto/asymmetric_keys/x509_public_key.c
@@ -125,7 +125,7 @@ int x509_check_for_self_signed(struct x509_certificate *cert)
 	}
 
 	ret = -EKEYREJECTED;
-	if (cert->pub->pkey_algo != cert->sig->pkey_algo)
+	if (strcmp(cert->pub->pkey_algo, cert->sig->pkey_algo) != 0)
 		goto out;
 
 	ret = public_key_verify_signature(cert->pub, cert->sig);
diff --git a/crypto/hmac.c b/crypto/hmac.c
index 72e38c0..ba07fb6 100644
--- a/crypto/hmac.c
+++ b/crypto/hmac.c
@@ -194,11 +194,15 @@ static int hmac_create(struct crypto_template *tmpl, struct rtattr **tb)
 	salg = shash_attr_alg(tb[1], 0, 0);
 	if (IS_ERR(salg))
 		return PTR_ERR(salg);
+	alg = &salg->base;
 
+	/* The underlying hash algorithm must be unkeyed */
 	err = -EINVAL;
+	if (crypto_shash_alg_has_setkey(salg))
+		goto out_put_alg;
+
 	ds = salg->digestsize;
 	ss = salg->statesize;
-	alg = &salg->base;
 	if (ds > alg->cra_blocksize ||
 	    ss < alg->cra_blocksize)
 		goto out_put_alg;
diff --git a/crypto/mcryptd.c b/crypto/mcryptd.c
index c207458..a14100e 100644
--- a/crypto/mcryptd.c
+++ b/crypto/mcryptd.c
@@ -80,6 +80,7 @@ static int mcryptd_init_queue(struct mcryptd_queue *queue,
 		pr_debug("cpu_queue #%d %p\n", cpu, queue->cpu_queue);
 		crypto_init_queue(&cpu_queue->queue, max_cpu_qlen);
 		INIT_WORK(&cpu_queue->work, mcryptd_queue_worker);
+		spin_lock_init(&cpu_queue->q_lock);
 	}
 	return 0;
 }
@@ -103,15 +104,16 @@ static int mcryptd_enqueue_request(struct mcryptd_queue *queue,
 	int cpu, err;
 	struct mcryptd_cpu_queue *cpu_queue;
 
-	cpu = get_cpu();
-	cpu_queue = this_cpu_ptr(queue->cpu_queue);
-	rctx->tag.cpu = cpu;
+	cpu_queue = raw_cpu_ptr(queue->cpu_queue);
+	spin_lock(&cpu_queue->q_lock);
+	cpu = smp_processor_id();
+	rctx->tag.cpu = smp_processor_id();
 
 	err = crypto_enqueue_request(&cpu_queue->queue, request);
 	pr_debug("enqueue request: cpu %d cpu_queue %p request %p\n",
 		 cpu, cpu_queue, request);
+	spin_unlock(&cpu_queue->q_lock);
 	queue_work_on(cpu, kcrypto_wq, &cpu_queue->work);
-	put_cpu();
 
 	return err;
 }
@@ -160,16 +162,11 @@ static void mcryptd_queue_worker(struct work_struct *work)
 	cpu_queue = container_of(work, struct mcryptd_cpu_queue, work);
 	i = 0;
 	while (i < MCRYPTD_BATCH || single_task_running()) {
-		/*
-		 * preempt_disable/enable is used to prevent
-		 * being preempted by mcryptd_enqueue_request()
-		 */
-		local_bh_disable();
-		preempt_disable();
+
+		spin_lock_bh(&cpu_queue->q_lock);
 		backlog = crypto_get_backlog(&cpu_queue->queue);
 		req = crypto_dequeue_request(&cpu_queue->queue);
-		preempt_enable();
-		local_bh_enable();
+		spin_unlock_bh(&cpu_queue->q_lock);
 
 		if (!req) {
 			mcryptd_opportunistic_flush();
@@ -184,7 +181,7 @@ static void mcryptd_queue_worker(struct work_struct *work)
 		++i;
 	}
 	if (cpu_queue->queue.qlen)
-		queue_work(kcrypto_wq, &cpu_queue->work);
+		queue_work_on(smp_processor_id(), kcrypto_wq, &cpu_queue->work);
 }
 
 void mcryptd_flusher(struct work_struct *__work)
diff --git a/crypto/rsa_helper.c b/crypto/rsa_helper.c
index 0b66dc8..cad395d 100644
--- a/crypto/rsa_helper.c
+++ b/crypto/rsa_helper.c
@@ -30,7 +30,7 @@ int rsa_get_n(void *context, size_t hdrlen, unsigned char tag,
 		return -EINVAL;
 
 	if (fips_enabled) {
-		while (!*ptr && n_sz) {
+		while (n_sz && !*ptr) {
 			ptr++;
 			n_sz--;
 		}
diff --git a/crypto/salsa20_generic.c b/crypto/salsa20_generic.c
index f550b5d..d7da0ee 100644
--- a/crypto/salsa20_generic.c
+++ b/crypto/salsa20_generic.c
@@ -188,13 +188,6 @@ static int encrypt(struct blkcipher_desc *desc,
 
 	salsa20_ivsetup(ctx, walk.iv);
 
-	if (likely(walk.nbytes == nbytes))
-	{
-		salsa20_encrypt_bytes(ctx, walk.dst.virt.addr,
-				      walk.src.virt.addr, nbytes);
-		return blkcipher_walk_done(desc, &walk, 0);
-	}
-
 	while (walk.nbytes >= 64) {
 		salsa20_encrypt_bytes(ctx, walk.dst.virt.addr,
 				      walk.src.virt.addr,
diff --git a/crypto/shash.c b/crypto/shash.c
index 4d8a671..9bd5044 100644
--- a/crypto/shash.c
+++ b/crypto/shash.c
@@ -24,11 +24,12 @@
 
 static const struct crypto_type crypto_shash_type;
 
-static int shash_no_setkey(struct crypto_shash *tfm, const u8 *key,
-			   unsigned int keylen)
+int shash_no_setkey(struct crypto_shash *tfm, const u8 *key,
+		    unsigned int keylen)
 {
 	return -ENOSYS;
 }
+EXPORT_SYMBOL_GPL(shash_no_setkey);
 
 static int shash_setkey_unaligned(struct crypto_shash *tfm, const u8 *key,
 				  unsigned int keylen)
diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c
index ae22f05..e3af318 100644
--- a/crypto/tcrypt.c
+++ b/crypto/tcrypt.c
@@ -342,7 +342,7 @@ static void test_aead_speed(const char *algo, int enc, unsigned int secs,
 			}
 
 			sg_init_aead(sg, xbuf,
-				    *b_size + (enc ? authsize : 0));
+				    *b_size + (enc ? 0 : authsize));
 
 			sg_init_aead(sgout, xoutbuf,
 				    *b_size + (enc ? authsize : 0));
@@ -350,7 +350,9 @@ static void test_aead_speed(const char *algo, int enc, unsigned int secs,
 			sg_set_buf(&sg[0], assoc, aad_size);
 			sg_set_buf(&sgout[0], assoc, aad_size);
 
-			aead_request_set_crypt(req, sg, sgout, *b_size, iv);
+			aead_request_set_crypt(req, sg, sgout,
+					       *b_size + (enc ? 0 : authsize),
+					       iv);
 			aead_request_set_ad(req, aad_size);
 
 			if (secs)
diff --git a/drivers/acpi/acpi_processor.c b/drivers/acpi/acpi_processor.c
index 3de3b6b..f43a586 100644
--- a/drivers/acpi/acpi_processor.c
+++ b/drivers/acpi/acpi_processor.c
@@ -182,11 +182,6 @@ int __weak arch_register_cpu(int cpu)
 
 void __weak arch_unregister_cpu(int cpu) {}
 
-int __weak acpi_map_cpu2node(acpi_handle handle, int cpu, int physid)
-{
-	return -ENODEV;
-}
-
 static int acpi_processor_hotadd_init(struct acpi_processor *pr)
 {
 	unsigned long long sta;
diff --git a/drivers/acpi/apei/erst.c b/drivers/acpi/apei/erst.c
index ec4f507..4558cc7 100644
--- a/drivers/acpi/apei/erst.c
+++ b/drivers/acpi/apei/erst.c
@@ -1020,7 +1020,7 @@ static ssize_t erst_reader(u64 *id, enum pstore_type_id *type, int *count,
 	/* The record may be cleared by others, try read next record */
 	if (len == -ENOENT)
 		goto skip;
-	else if (len < sizeof(*rcd)) {
+	else if (len < 0 || len < sizeof(*rcd)) {
 		rc = -EIO;
 		goto out;
 	}
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index 56190d0..0a3ca20 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -1197,7 +1197,6 @@ static int __init acpi_init(void)
 	acpi_wakeup_device_init();
 	acpi_debugger_init();
 	acpi_setup_sb_notify_handler();
-	acpi_set_processor_mapping();
 	return 0;
 }
 
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index 5187469..c3bcb7f 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -482,8 +482,11 @@ static inline void __acpi_ec_enable_event(struct acpi_ec *ec)
 {
 	if (!test_and_set_bit(EC_FLAGS_QUERY_ENABLED, &ec->flags))
 		ec_log_drv("event unblocked");
-	if (!test_bit(EC_FLAGS_QUERY_PENDING, &ec->flags))
-		advance_transaction(ec);
+	/*
+	 * Unconditionally invoke this once after enabling the event
+	 * handling mechanism to detect the pending events.
+	 */
+	advance_transaction(ec);
 }
 
 static inline void __acpi_ec_disable_event(struct acpi_ec *ec)
@@ -1458,11 +1461,10 @@ static int ec_install_handlers(struct acpi_ec *ec, bool handle_events)
 			if (test_bit(EC_FLAGS_STARTED, &ec->flags) &&
 			    ec->reference_count >= 1)
 				acpi_ec_enable_gpe(ec, true);
-
-			/* EC is fully operational, allow queries */
-			acpi_ec_enable_event(ec);
 		}
 	}
+	/* EC is fully operational, allow queries */
+	acpi_ec_enable_event(ec);
 
 	return 0;
 }
diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
index f3bc901..fe03d00 100644
--- a/drivers/acpi/nfit/core.c
+++ b/drivers/acpi/nfit/core.c
@@ -1390,6 +1390,11 @@ static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc,
 				dev_name(&adev_dimm->dev));
 		return -ENXIO;
 	}
+	/*
+	 * Record nfit_mem for the notification path to track back to
+	 * the nfit sysfs attributes for this dimm device object.
+	 */
+	dev_set_drvdata(&adev_dimm->dev, nfit_mem);
 
 	/*
 	 * Until standardization materializes we need to consider 4
@@ -1446,9 +1451,11 @@ static void shutdown_dimm_notify(void *data)
 			sysfs_put(nfit_mem->flags_attr);
 			nfit_mem->flags_attr = NULL;
 		}
-		if (adev_dimm)
+		if (adev_dimm) {
 			acpi_remove_notify_handler(adev_dimm->handle,
 					ACPI_DEVICE_NOTIFY, acpi_nvdimm_notify);
+			dev_set_drvdata(&adev_dimm->dev, NULL);
+		}
 	}
 	mutex_unlock(&acpi_desc->init_mutex);
 }
diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
index 5c78ee1..fd59ae8 100644
--- a/drivers/acpi/processor_core.c
+++ b/drivers/acpi/processor_core.c
@@ -280,79 +280,6 @@ int acpi_get_cpuid(acpi_handle handle, int type, u32 acpi_id)
 }
 EXPORT_SYMBOL_GPL(acpi_get_cpuid);
 
-#ifdef CONFIG_ACPI_HOTPLUG_CPU
-static bool __init
-map_processor(acpi_handle handle, phys_cpuid_t *phys_id, int *cpuid)
-{
-	int type, id;
-	u32 acpi_id;
-	acpi_status status;
-	acpi_object_type acpi_type;
-	unsigned long long tmp;
-	union acpi_object object = { 0 };
-	struct acpi_buffer buffer = { sizeof(union acpi_object), &object };
-
-	status = acpi_get_type(handle, &acpi_type);
-	if (ACPI_FAILURE(status))
-		return false;
-
-	switch (acpi_type) {
-	case ACPI_TYPE_PROCESSOR:
-		status = acpi_evaluate_object(handle, NULL, NULL, &buffer);
-		if (ACPI_FAILURE(status))
-			return false;
-		acpi_id = object.processor.proc_id;
-
-		/* validate the acpi_id */
-		if(acpi_processor_validate_proc_id(acpi_id))
-			return false;
-		break;
-	case ACPI_TYPE_DEVICE:
-		status = acpi_evaluate_integer(handle, "_UID", NULL, &tmp);
-		if (ACPI_FAILURE(status))
-			return false;
-		acpi_id = tmp;
-		break;
-	default:
-		return false;
-	}
-
-	type = (acpi_type == ACPI_TYPE_DEVICE) ? 1 : 0;
-
-	*phys_id = __acpi_get_phys_id(handle, type, acpi_id, false);
-	id = acpi_map_cpuid(*phys_id, acpi_id);
-
-	if (id < 0)
-		return false;
-	*cpuid = id;
-	return true;
-}
-
-static acpi_status __init
-set_processor_node_mapping(acpi_handle handle, u32 lvl, void *context,
-			   void **rv)
-{
-	phys_cpuid_t phys_id;
-	int cpu_id;
-
-	if (!map_processor(handle, &phys_id, &cpu_id))
-		return AE_ERROR;
-
-	acpi_map_cpu2node(handle, cpu_id, phys_id);
-	return AE_OK;
-}
-
-void __init acpi_set_processor_mapping(void)
-{
-	/* Set persistent cpu <-> node mapping for all processors. */
-	acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT,
-			    ACPI_UINT32_MAX, set_processor_node_mapping,
-			    NULL, NULL, NULL);
-}
-#else
-void __init acpi_set_processor_mapping(void) {}
-#endif /* CONFIG_ACPI_HOTPLUG_CPU */
-
 #ifdef CONFIG_ACPI_HOTPLUG_IOAPIC
 static int get_ioapic_id(struct acpi_subtable_header *entry, u32 gsi_base,
 			 u64 *phys_addr, int *ioapic_id)
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index 0e1ec37..6475a13 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -2329,8 +2329,8 @@ static void ata_eh_link_autopsy(struct ata_link *link)
 		if (dev->flags & ATA_DFLAG_DUBIOUS_XFER)
 			eflags |= ATA_EFLAG_DUBIOUS_XFER;
 		ehc->i.action |= ata_eh_speed_down(dev, eflags, all_err_mask);
+		trace_ata_eh_link_autopsy(dev, ehc->i.action, all_err_mask);
 	}
-	trace_ata_eh_link_autopsy(dev, ehc->i.action, all_err_mask);
 	DPRINTK("EXIT\n");
 }
 
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
index 051b615..8d22acd 100644
--- a/drivers/ata/libata-sff.c
+++ b/drivers/ata/libata-sff.c
@@ -1481,7 +1481,6 @@ unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc)
 		break;
 
 	default:
-		WARN_ON_ONCE(1);
 		return AC_ERR_SYSTEM;
 	}
 
diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
index 5fc81e2..e55f418 100644
--- a/drivers/atm/horizon.c
+++ b/drivers/atm/horizon.c
@@ -2802,7 +2802,7 @@ static int hrz_probe(struct pci_dev *pci_dev,
 	return err;
 
 out_free_irq:
-	free_irq(dev->irq, dev);
+	free_irq(irq, dev);
 out_free:
 	kfree(dev);
 out_release:
diff --git a/drivers/base/isa.c b/drivers/base/isa.c
index cd6ccdc..372d10a 100644
--- a/drivers/base/isa.c
+++ b/drivers/base/isa.c
@@ -39,7 +39,7 @@ static int isa_bus_probe(struct device *dev)
 {
 	struct isa_driver *isa_driver = dev->platform_data;
 
-	if (isa_driver->probe)
+	if (isa_driver && isa_driver->probe)
 		return isa_driver->probe(dev, to_isa_dev(dev)->id);
 
 	return 0;
@@ -49,7 +49,7 @@ static int isa_bus_remove(struct device *dev)
 {
 	struct isa_driver *isa_driver = dev->platform_data;
 
-	if (isa_driver->remove)
+	if (isa_driver && isa_driver->remove)
 		return isa_driver->remove(dev, to_isa_dev(dev)->id);
 
 	return 0;
@@ -59,7 +59,7 @@ static void isa_bus_shutdown(struct device *dev)
 {
 	struct isa_driver *isa_driver = dev->platform_data;
 
-	if (isa_driver->shutdown)
+	if (isa_driver && isa_driver->shutdown)
 		isa_driver->shutdown(dev, to_isa_dev(dev)->id);
 }
 
@@ -67,7 +67,7 @@ static int isa_bus_suspend(struct device *dev, pm_message_t state)
 {
 	struct isa_driver *isa_driver = dev->platform_data;
 
-	if (isa_driver->suspend)
+	if (isa_driver && isa_driver->suspend)
 		return isa_driver->suspend(dev, to_isa_dev(dev)->id, state);
 
 	return 0;
@@ -77,7 +77,7 @@ static int isa_bus_resume(struct device *dev)
 {
 	struct isa_driver *isa_driver = dev->platform_data;
 
-	if (isa_driver->resume)
+	if (isa_driver && isa_driver->resume)
 		return isa_driver->resume(dev, to_isa_dev(dev)->id);
 
 	return 0;
diff --git a/drivers/base/power/opp/core.c b/drivers/base/power/opp/core.c
index 6441dfd..a7c5b79 100644
--- a/drivers/base/power/opp/core.c
+++ b/drivers/base/power/opp/core.c
@@ -331,7 +331,7 @@ int dev_pm_opp_get_opp_count(struct device *dev)
 	opp_table = _find_opp_table(dev);
 	if (IS_ERR(opp_table)) {
 		count = PTR_ERR(opp_table);
-		dev_err(dev, "%s: OPP table not found (%d)\n",
+		dev_dbg(dev, "%s: OPP table not found (%d)\n",
 			__func__, count);
 		goto out_unlock;
 	}
diff --git a/drivers/base/power/opp/of.c b/drivers/base/power/opp/of.c
index b52c617..6937944 100644
--- a/drivers/base/power/opp/of.c
+++ b/drivers/base/power/opp/of.c
@@ -348,6 +348,7 @@ static int _of_add_opp_table_v2(struct device *dev, struct device_node *opp_np)
 		if (ret) {
 			dev_err(dev, "%s: Failed to add OPP, %d\n", __func__,
 				ret);
+			of_node_put(np);
 			goto free_table;
 		}
 	}
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 98b767d..7d506cb 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -654,7 +654,10 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
 		return nbd_size_set(nbd, bdev, nbd->blksize, arg);
 
 	case NBD_SET_TIMEOUT:
-		nbd->tag_set.timeout = arg * HZ;
+		if (arg) {
+			nbd->tag_set.timeout = arg * HZ;
+			blk_queue_rq_timeout(nbd->disk->queue, arg * HZ);
+		}
 		return 0;
 
 	case NBD_SET_FLAGS:
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 9701cc2..ed9de1b 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -1296,6 +1296,8 @@ static int zram_add(void)
 	blk_queue_io_min(zram->disk->queue, PAGE_SIZE);
 	blk_queue_io_opt(zram->disk->queue, PAGE_SIZE);
 	zram->disk->queue->limits.discard_granularity = PAGE_SIZE;
+	zram->disk->queue->limits.max_sectors = SECTORS_PER_PAGE;
+	zram->disk->queue->limits.chunk_sectors = 0;
 	blk_queue_max_discard_sectors(zram->disk->queue, UINT_MAX);
 	/*
 	 * zram_bio_discard() will clear all logical blocks if logical block
diff --git a/drivers/bus/arm-cci.c b/drivers/bus/arm-cci.c
index 8900823..10f5613 100644
--- a/drivers/bus/arm-cci.c
+++ b/drivers/bus/arm-cci.c
@@ -1755,14 +1755,17 @@ static int cci_pmu_probe(struct platform_device *pdev)
 	raw_spin_lock_init(&cci_pmu->hw_events.pmu_lock);
 	mutex_init(&cci_pmu->reserve_mutex);
 	atomic_set(&cci_pmu->active_events, 0);
-	cpumask_set_cpu(smp_processor_id(), &cci_pmu->cpus);
+	cpumask_set_cpu(get_cpu(), &cci_pmu->cpus);
 
 	ret = cci_pmu_init(cci_pmu, pdev);
-	if (ret)
+	if (ret) {
+		put_cpu();
 		return ret;
+	}
 
 	cpuhp_state_add_instance_nocalls(CPUHP_AP_PERF_ARM_CCI_ONLINE,
 					 &cci_pmu->node);
+	put_cpu();
 	pr_info("ARM %s PMU driver probed", cci_pmu->model->name);
 	return 0;
 }
diff --git a/drivers/bus/arm-ccn.c b/drivers/bus/arm-ccn.c
index aee8346..45d7ecc 100644
--- a/drivers/bus/arm-ccn.c
+++ b/drivers/bus/arm-ccn.c
@@ -1271,11 +1271,16 @@ static int arm_ccn_pmu_init(struct arm_ccn *ccn)
 		int len = snprintf(NULL, 0, "ccn_%d", ccn->dt.id);
 
 		name = devm_kzalloc(ccn->dev, len + 1, GFP_KERNEL);
+		if (!name) {
+			err = -ENOMEM;
+			goto error_choose_name;
+		}
 		snprintf(name, len + 1, "ccn_%d", ccn->dt.id);
 	}
 
 	/* Perf driver registration */
 	ccn->dt.pmu = (struct pmu) {
+		.module = THIS_MODULE,
 		.attr_groups = arm_ccn_pmu_attr_groups,
 		.task_ctx_nr = perf_invalid_context,
 		.event_init = arm_ccn_pmu_event_init,
@@ -1297,7 +1302,7 @@ static int arm_ccn_pmu_init(struct arm_ccn *ccn)
 	}
 
 	/* Pick one CPU which we will use to collect data from CCN... */
-	cpumask_set_cpu(smp_processor_id(), &ccn->dt.cpu);
+	cpumask_set_cpu(get_cpu(), &ccn->dt.cpu);
 
 	/* Also make sure that the overflow interrupt is handled by this CPU */
 	if (ccn->irq) {
@@ -1314,10 +1319,13 @@ static int arm_ccn_pmu_init(struct arm_ccn *ccn)
 
 	cpuhp_state_add_instance_nocalls(CPUHP_AP_PERF_ARM_CCN_ONLINE,
 					 &ccn->dt.node);
+	put_cpu();
 	return 0;
 
 error_pmu_register:
 error_set_affinity:
+	put_cpu();
+error_choose_name:
 	ida_simple_remove(&arm_ccn_pmu_ida, ccn->dt.id);
 	for (i = 0; i < ccn->num_xps; i++)
 		writel(0, ccn->xp[i].base + CCN_XP_DT_CONTROL);
@@ -1578,8 +1586,8 @@ static int __init arm_ccn_init(void)
 
 static void __exit arm_ccn_exit(void)
 {
-	cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_CCN_ONLINE);
 	platform_driver_unregister(&arm_ccn_driver);
+	cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_CCN_ONLINE);
 }
 
 module_init(arm_ccn_init);
diff --git a/drivers/char/adsprpc.c b/drivers/char/adsprpc.c
index 3058ce3..177fb3d 100644
--- a/drivers/char/adsprpc.c
+++ b/drivers/char/adsprpc.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -78,7 +78,8 @@
 #define FASTRPC_LINK_REMOTE_DISCONNECTING (0x8)
 #define FASTRPC_GLINK_INTENT_LEN  (64)
 
-#define PERF_KEYS "count:flush:map:copy:glink:getargs:putargs:invalidate:invoke"
+#define PERF_KEYS \
+	"count:flush:map:copy:glink:getargs:putargs:invalidate:invoke:tid:ptr"
 #define FASTRPC_STATIC_HANDLE_LISTENER (3)
 #define FASTRPC_STATIC_HANDLE_MAX (20)
 #define FASTRPC_LATENCY_CTRL_ENB  (1)
@@ -91,15 +92,22 @@
 #define PERF(enb, cnt, ff) \
 	{\
 		struct timespec startT = {0};\
-		if (enb) {\
+		int64_t *counter = cnt;\
+		if (enb && counter) {\
 			getnstimeofday(&startT);\
 		} \
 		ff ;\
-		if (enb) {\
-			cnt += getnstimediff(&startT);\
+		if (enb && counter) {\
+			*counter += getnstimediff(&startT);\
 		} \
 	}
 
+#define GET_COUNTER(perf_ptr, offset)  \
+	(perf_ptr != NULL ?\
+		(((offset >= 0) && (offset < PERF_KEY_MAX)) ?\
+			(int64_t *)(perf_ptr + offset)\
+				: (int64_t *)NULL) : (int64_t *)NULL)
+
 static int fastrpc_glink_open(int cid);
 static void fastrpc_glink_close(void *chan, int cid);
 static struct dentry *debugfs_root;
@@ -146,6 +154,12 @@ static inline uint64_t ptr_to_uint64(void *ptr)
 	return addr;
 }
 
+struct secure_vm {
+	int *vmid;
+	int *vmperm;
+	int vmcount;
+};
+
 struct fastrpc_file;
 
 struct fastrpc_buf {
@@ -234,7 +248,7 @@ struct fastrpc_channel_ctx {
 	int prevssrcount;
 	int issubsystemup;
 	int vmid;
-	int rhvmid;
+	struct secure_vm rhvm;
 	int ramdumpenabled;
 	void *remoteheap_ramdump_dev;
 	struct fastrpc_glink_info link;
@@ -278,6 +292,19 @@ struct fastrpc_mmap {
 	uintptr_t attr;
 };
 
+enum fastrpc_perfkeys {
+	PERF_COUNT = 0,
+	PERF_FLUSH = 1,
+	PERF_MAP = 2,
+	PERF_COPY = 3,
+	PERF_LINK = 4,
+	PERF_GETARGS = 5,
+	PERF_PUTARGS = 6,
+	PERF_INVARGS = 7,
+	PERF_INVOKE = 8,
+	PERF_KEY_MAX = 9,
+};
+
 struct fastrpc_perf {
 	int64_t count;
 	int64_t flush;
@@ -288,6 +315,8 @@ struct fastrpc_perf {
 	int64_t putargs;
 	int64_t invargs;
 	int64_t invoke;
+	int64_t tid;
+	struct hlist_node hn;
 };
 
 struct fastrpc_file {
@@ -307,10 +336,12 @@ struct fastrpc_file {
 	int pd;
 	int file_close;
 	struct fastrpc_apps *apps;
-	struct fastrpc_perf perf;
+	struct hlist_head perf;
 	struct dentry *debugfs_file;
+	struct mutex perf_mutex;
 	struct pm_qos_request pm_qos_req;
 	int qos_request;
+	struct mutex map_mutex;
 };
 
 static struct fastrpc_apps gfa;
@@ -342,6 +373,9 @@ static struct fastrpc_channel_ctx gcinfo[NUM_CHANNELS] = {
 	},
 };
 
+static int hlosvm[1] = {VMID_HLOS};
+static int hlosvmperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
+
 static inline int64_t getnstimediff(struct timespec *start)
 {
 	int64_t ns;
@@ -353,6 +387,46 @@ static inline int64_t getnstimediff(struct timespec *start)
 	return ns;
 }
 
+static inline int64_t *getperfcounter(struct fastrpc_file *fl, int key)
+{
+	int err = 0;
+	int64_t *val = NULL;
+	struct fastrpc_perf *perf = NULL, *fperf = NULL;
+	struct hlist_node *n = NULL;
+
+	VERIFY(err, !IS_ERR_OR_NULL(fl));
+	if (err)
+		goto bail;
+
+	mutex_lock(&fl->perf_mutex);
+	hlist_for_each_entry_safe(perf, n, &fl->perf, hn) {
+		if (perf->tid == current->pid) {
+			fperf = perf;
+			break;
+		}
+	}
+
+	if (IS_ERR_OR_NULL(fperf)) {
+		fperf = kzalloc(sizeof(*fperf), GFP_KERNEL);
+
+		VERIFY(err, !IS_ERR_OR_NULL(fperf));
+		if (err) {
+			mutex_unlock(&fl->perf_mutex);
+			kfree(fperf);
+			goto bail;
+		}
+
+		fperf->tid = current->pid;
+		hlist_add_head(&fperf->hn, &fl->perf);
+	}
+
+	val = ((int64_t *)fperf) + key;
+	mutex_unlock(&fl->perf_mutex);
+bail:
+	return val;
+}
+
+
 static void fastrpc_buf_free(struct fastrpc_buf *buf, int cache)
 {
 	struct fastrpc_file *fl = buf == NULL ? NULL : buf->fl;
@@ -910,7 +984,7 @@ static int context_build_overlap(struct smq_invoke_ctx *ctx)
 #define K_COPY_TO_USER(err, kernel, dst, src, size) \
 	do {\
 		if (!(kernel))\
-			VERIFY(err, 0 == copy_to_user((void __user *)(dst), \
+			VERIFY(err, 0 == copy_to_user((void __user *)(dst),\
 						(src), (size)));\
 		else\
 			memmove((dst), (src), (size));\
@@ -1128,6 +1202,7 @@ static int get_args(uint32_t kernel, struct smq_invoke_ctx *ctx)
 	int mflags = 0;
 	uint64_t *fdlist;
 	uint32_t *crclist;
+	int64_t *perf_counter = getperfcounter(ctx->fl, PERF_COUNT);
 
 	/* calculate size of the metadata */
 	rpra = NULL;
@@ -1135,6 +1210,7 @@ static int get_args(uint32_t kernel, struct smq_invoke_ctx *ctx)
 	pages = smq_phy_page_start(sc, list);
 	ipage = pages;
 
+	PERF(ctx->fl->profile, GET_COUNTER(perf_counter, PERF_MAP),
 	for (i = 0; i < bufs; ++i) {
 		uintptr_t buf = (uintptr_t)lpra[i].buf.pv;
 		size_t len = lpra[i].buf.len;
@@ -1145,6 +1221,7 @@ static int get_args(uint32_t kernel, struct smq_invoke_ctx *ctx)
 					mflags, &ctx->maps[i]);
 		ipage += 1;
 	}
+	PERF_END);
 	handles = REMOTE_SCALARS_INHANDLES(sc) + REMOTE_SCALARS_OUTHANDLES(sc);
 	for (i = bufs; i < bufs + handles; i++) {
 		VERIFY(err, !fastrpc_mmap_create(ctx->fl, ctx->fds[i],
@@ -1206,7 +1283,7 @@ static int get_args(uint32_t kernel, struct smq_invoke_ctx *ctx)
 	}
 
 	/* map ion buffers */
-	PERF(ctx->fl->profile, ctx->fl->perf.map,
+	PERF(ctx->fl->profile, GET_COUNTER(perf_counter, PERF_MAP),
 	for (i = 0; rpra && i < inbufs + outbufs; ++i) {
 		struct fastrpc_mmap *map = ctx->maps[i];
 		uint64_t buf = ptr_to_uint64(lpra[i].buf.pv);
@@ -1257,7 +1334,7 @@ static int get_args(uint32_t kernel, struct smq_invoke_ctx *ctx)
 	memset(crclist, 0, sizeof(uint32_t)*M_CRCLIST);
 
 	/* copy non ion buffers */
-	PERF(ctx->fl->profile, ctx->fl->perf.copy,
+	PERF(ctx->fl->profile, GET_COUNTER(perf_counter, PERF_COPY),
 	rlen = copylen - metalen;
 	for (oix = 0; oix < inbufs + outbufs; ++oix) {
 		int i = ctx->overps[oix]->raix;
@@ -1297,7 +1374,7 @@ static int get_args(uint32_t kernel, struct smq_invoke_ctx *ctx)
 	}
 	PERF_END);
 
-	PERF(ctx->fl->profile, ctx->fl->perf.flush,
+	PERF(ctx->fl->profile, GET_COUNTER(perf_counter, PERF_FLUSH),
 	for (oix = 0; oix < inbufs + outbufs; ++oix) {
 		int i = ctx->overps[oix]->raix;
 		struct fastrpc_mmap *map = ctx->maps[i];
@@ -1322,7 +1399,7 @@ static int get_args(uint32_t kernel, struct smq_invoke_ctx *ctx)
 	}
 
 	if (!ctx->fl->sctx->smmu.coherent) {
-		PERF(ctx->fl->profile, ctx->fl->perf.flush,
+		PERF(ctx->fl->profile, GET_COUNTER(perf_counter, PERF_FLUSH),
 		dmac_flush_range((char *)rpra, (char *)rpra + ctx->used);
 		PERF_END);
 	}
@@ -1501,6 +1578,7 @@ static void fastrpc_init(struct fastrpc_apps *me)
 	int i;
 
 	INIT_HLIST_HEAD(&me->drivers);
+	INIT_HLIST_HEAD(&me->maps);
 	spin_lock_init(&me->hlock);
 	mutex_init(&me->smd_mutex);
 	me->channel = &gcinfo[0];
@@ -1523,6 +1601,7 @@ static int fastrpc_internal_invoke(struct fastrpc_file *fl, uint32_t mode,
 	int interrupted = 0;
 	int err = 0;
 	struct timespec invoket = {0};
+	int64_t *perf_counter = getperfcounter(fl, PERF_COUNT);
 
 	if (fl->profile)
 		getnstimeofday(&invoket);
@@ -1553,16 +1632,20 @@ static int fastrpc_internal_invoke(struct fastrpc_file *fl, uint32_t mode,
 		goto bail;
 
 	if (REMOTE_SCALARS_LENGTH(ctx->sc)) {
-		PERF(fl->profile, fl->perf.getargs,
+		PERF(fl->profile, GET_COUNTER(perf_counter, PERF_GETARGS),
 		VERIFY(err, 0 == get_args(kernel, ctx));
 		PERF_END);
 		if (err)
 			goto bail;
 	}
 
-	if (!fl->sctx->smmu.coherent)
+	if (!fl->sctx->smmu.coherent) {
+		PERF(fl->profile, GET_COUNTER(perf_counter, PERF_INVARGS),
 		inv_args_pre(ctx);
-	PERF(fl->profile, fl->perf.link,
+		PERF_END);
+	}
+
+	PERF(fl->profile, GET_COUNTER(perf_counter, PERF_LINK),
 	VERIFY(err, 0 == fastrpc_invoke_send(ctx, kernel, invoke->handle));
 	PERF_END);
 
@@ -1578,7 +1661,7 @@ static int fastrpc_internal_invoke(struct fastrpc_file *fl, uint32_t mode,
 			goto bail;
 	}
 
-	PERF(fl->profile, fl->perf.invargs,
+	PERF(fl->profile, GET_COUNTER(perf_counter, PERF_INVARGS),
 	if (!fl->sctx->smmu.coherent)
 		inv_args(ctx);
 	PERF_END);
@@ -1587,7 +1670,7 @@ static int fastrpc_internal_invoke(struct fastrpc_file *fl, uint32_t mode,
 	if (err)
 		goto bail;
 
-	PERF(fl->profile, fl->perf.putargs,
+	PERF(fl->profile, GET_COUNTER(perf_counter, PERF_PUTARGS),
 	VERIFY(err, 0 == put_args(kernel, ctx, invoke->pra));
 	PERF_END);
 	if (err)
@@ -1601,10 +1684,18 @@ static int fastrpc_internal_invoke(struct fastrpc_file *fl, uint32_t mode,
 		err = ECONNRESET;
 
 	if (fl->profile && !interrupted) {
-		if (invoke->handle != FASTRPC_STATIC_HANDLE_LISTENER)
-			fl->perf.invoke += getnstimediff(&invoket);
-		if (invoke->handle > FASTRPC_STATIC_HANDLE_MAX)
-			fl->perf.count++;
+		if (invoke->handle != FASTRPC_STATIC_HANDLE_LISTENER) {
+			int64_t *count = GET_COUNTER(perf_counter, PERF_INVOKE);
+
+			if (count)
+				*count += getnstimediff(&invoket);
+		}
+		if (invoke->handle > FASTRPC_STATIC_HANDLE_MAX) {
+			int64_t *count = GET_COUNTER(perf_counter, PERF_COUNT);
+
+			if (count)
+				*count = *count+1;
+		}
 	}
 	return err;
 }
@@ -1620,10 +1711,6 @@ static int fastrpc_init_process(struct fastrpc_file *fl,
 	struct smq_phy_page pages[1];
 	struct fastrpc_mmap *file = NULL, *mem = NULL;
 	char *proc_name = NULL;
-	int srcVM[1] = {VMID_HLOS};
-	int destVM[1] = {me->channel[fl->cid].rhvmid};
-	int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
-	int hlosVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
 
 	VERIFY(err, 0 == (err = fastrpc_channel_open(fl)));
 	if (err)
@@ -1759,7 +1846,9 @@ static int fastrpc_init_process(struct fastrpc_file *fl,
 			phys = mem->phys;
 			size = mem->size;
 			VERIFY(err, !hyp_assign_phys(phys, (uint64_t)size,
-					srcVM, 1, destVM, destVMperm, 1));
+				hlosvm, 1, me->channel[fl->cid].rhvm.vmid,
+				me->channel[fl->cid].rhvm.vmperm,
+				me->channel[fl->cid].rhvm.vmcount));
 			if (err) {
 				pr_err("ADSPRPC: hyp_assign_phys fail err %d",
 							 err);
@@ -1805,7 +1894,9 @@ static int fastrpc_init_process(struct fastrpc_file *fl,
 	if (mem && err) {
 		if (mem->flags == ADSP_MMAP_REMOTE_HEAP_ADDR)
 			hyp_assign_phys(mem->phys, (uint64_t)mem->size,
-					destVM, 1, srcVM, hlosVMperm, 1);
+					me->channel[fl->cid].rhvm.vmid,
+					me->channel[fl->cid].rhvm.vmcount,
+					hlosvm, hlosvmperm, 1);
 		fastrpc_mmap_free(mem, 0);
 	}
 	if (file)
@@ -1898,13 +1989,10 @@ static int fastrpc_mmap_on_dsp(struct fastrpc_file *fl, uint32_t flags,
 		err = scm_call2(SCM_SIP_FNID(SCM_SVC_PIL,
 			TZ_PIL_PROTECT_MEM_SUBSYS_ID), &desc);
 	} else if (flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
-
-		int srcVM[1] = {VMID_HLOS};
-		int destVM[1] = {me->channel[fl->cid].rhvmid};
-		int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
-
 		VERIFY(err, !hyp_assign_phys(map->phys, (uint64_t)map->size,
-				srcVM, 1, destVM, destVMperm, 1));
+				hlosvm, 1, me->channel[fl->cid].rhvm.vmid,
+				me->channel[fl->cid].rhvm.vmperm,
+				me->channel[fl->cid].rhvm.vmcount));
 		if (err)
 			goto bail;
 	}
@@ -1917,7 +2005,6 @@ static int fastrpc_munmap_on_dsp_rh(struct fastrpc_file *fl,
 {
 	int err = 0;
 	struct fastrpc_apps *me = &gfa;
-	int srcVM[1] = {me->channel[fl->cid].rhvmid};
 	int destVM[1] = {VMID_HLOS};
 	int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
 
@@ -1955,7 +2042,9 @@ static int fastrpc_munmap_on_dsp_rh(struct fastrpc_file *fl,
 			TZ_PIL_CLEAR_PROTECT_MEM_SUBSYS_ID), &desc);
 	} else if (map->flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
 		VERIFY(err, !hyp_assign_phys(map->phys, (uint64_t)map->size,
-					srcVM, 1, destVM, destVMperm, 1));
+					me->channel[fl->cid].rhvm.vmid,
+					me->channel[fl->cid].rhvm.vmcount,
+					destVM, destVMperm, 1));
 		if (err)
 			goto bail;
 	}
@@ -2062,6 +2151,7 @@ static int fastrpc_internal_munmap(struct fastrpc_file *fl,
 	int err = 0;
 	struct fastrpc_mmap *map = NULL;
 
+	mutex_lock(&fl->map_mutex);
 	VERIFY(err, !fastrpc_mmap_remove(fl, ud->vaddrout, ud->size, &map));
 	if (err)
 		goto bail;
@@ -2072,6 +2162,7 @@ static int fastrpc_internal_munmap(struct fastrpc_file *fl,
 bail:
 	if (err && map)
 		fastrpc_mmap_add(map);
+	mutex_unlock(&fl->map_mutex);
 	return err;
 }
 
@@ -2105,10 +2196,12 @@ static int fastrpc_internal_mmap(struct fastrpc_file *fl,
 	struct fastrpc_mmap *map = NULL;
 	int err = 0;
 
+	mutex_lock(&fl->map_mutex);
 	if (!fastrpc_mmap_find(fl, ud->fd, (uintptr_t)ud->vaddrin,
-			 ud->size, ud->flags, 1, &map))
+			 ud->size, ud->flags, 1, &map)){
+		mutex_unlock(&fl->map_mutex);
 		return 0;
-
+	}
 	VERIFY(err, !fastrpc_mmap_create(fl, ud->fd, 0,
 			(uintptr_t)ud->vaddrin, ud->size,
 			 ud->flags, &map));
@@ -2121,6 +2214,7 @@ static int fastrpc_internal_mmap(struct fastrpc_file *fl,
  bail:
 	if (err && map)
 		fastrpc_mmap_free(map, 0);
+	mutex_unlock(&fl->map_mutex);
 	return err;
 }
 
@@ -2260,8 +2354,9 @@ static void fastrpc_session_free(struct fastrpc_channel_ctx *chan,
 
 static int fastrpc_file_free(struct fastrpc_file *fl)
 {
-	struct hlist_node *n;
+	struct hlist_node *n = NULL;
 	struct fastrpc_mmap *map = NULL;
+	struct fastrpc_perf *perf = NULL, *fperf = NULL;
 	int cid;
 
 	if (!fl)
@@ -2293,6 +2388,21 @@ static int fastrpc_file_free(struct fastrpc_file *fl)
 		fastrpc_session_free(&fl->apps->channel[cid], fl->sctx);
 	if (fl->secsctx)
 		fastrpc_session_free(&fl->apps->channel[cid], fl->secsctx);
+
+	mutex_lock(&fl->perf_mutex);
+	do {
+		struct hlist_node *pn = NULL;
+
+		fperf = NULL;
+		hlist_for_each_entry_safe(perf, pn, &fl->perf, hn) {
+			hlist_del_init(&perf->hn);
+			fperf = perf;
+			break;
+		}
+		kfree(fperf);
+	} while (fperf);
+	mutex_unlock(&fl->perf_mutex);
+	mutex_destroy(&fl->perf_mutex);
 	kfree(fl);
 	return 0;
 }
@@ -2306,6 +2416,7 @@ static int fastrpc_device_release(struct inode *inode, struct file *file)
 			pm_qos_remove_request(&fl->pm_qos_req);
 		if (fl->debugfs_file != NULL)
 			debugfs_remove(fl->debugfs_file);
+		mutex_destroy(&fl->map_mutex);
 		fastrpc_file_free(fl);
 		file->private_data = NULL;
 	}
@@ -2588,7 +2699,7 @@ static int fastrpc_channel_open(struct fastrpc_file *fl)
 		if (err)
 			pr_warn("adsprpc: initial intent fail for %d err %d\n",
 					 cid, err);
-		if (me->channel[cid].ssrcount !=
+		if (cid == 0 && me->channel[cid].ssrcount !=
 				 me->channel[cid].prevssrcount) {
 			if (fastrpc_mmap_remove_ssr(fl))
 				pr_err("ADSPRPC: SSR: Failed to unmap remote heap\n");
@@ -2617,6 +2728,7 @@ static int fastrpc_device_open(struct inode *inode, struct file *filp)
 	context_list_ctor(&fl->clst);
 	spin_lock_init(&fl->hlock);
 	INIT_HLIST_HEAD(&fl->maps);
+	INIT_HLIST_HEAD(&fl->perf);
 	INIT_HLIST_HEAD(&fl->bufs);
 	INIT_HLIST_NODE(&fl->hn);
 	fl->sessionid = 0;
@@ -2629,9 +2741,11 @@ static int fastrpc_device_open(struct inode *inode, struct file *filp)
 	memset(&fl->perf, 0, sizeof(fl->perf));
 	fl->qos_request = 0;
 	filp->private_data = fl;
+	mutex_init(&fl->map_mutex);
 	spin_lock(&me->hlock);
 	hlist_add_head(&fl->hn, &me->drivers);
 	spin_unlock(&me->hlock);
+	mutex_init(&fl->perf_mutex);
 	return 0;
 }
 
@@ -2815,8 +2929,23 @@ static long fastrpc_device_ioctl(struct file *file, unsigned int ioctl_num,
 				goto bail;
 		}
 		if (p.perf.data) {
-			K_COPY_TO_USER(err, 0, (void *)p.perf.data,
-						 &fl->perf, sizeof(fl->perf));
+			struct fastrpc_perf *perf = NULL, *fperf = NULL;
+			struct hlist_node *n = NULL;
+
+			mutex_lock(&fl->perf_mutex);
+			hlist_for_each_entry_safe(perf, n, &fl->perf, hn) {
+				if (perf->tid == current->pid) {
+					fperf = perf;
+					break;
+				}
+			}
+
+			mutex_unlock(&fl->perf_mutex);
+
+			if (fperf) {
+				K_COPY_TO_USER(err, 0, (void *)p.perf.data,
+					fperf, sizeof(*fperf));
+			}
 		}
 		K_COPY_TO_USER(err, 0, param, &p.perf, sizeof(p.perf));
 		if (err)
@@ -2991,6 +3120,46 @@ static int fastrpc_cb_probe(struct device *dev)
 	return err;
 }
 
+static void init_secure_vmid_list(struct device *dev, char *prop_name,
+						struct secure_vm *destvm)
+{
+	int err = 0;
+	u32 len = 0, i = 0;
+	u32 *rhvmlist = NULL;
+	u32 *rhvmpermlist = NULL;
+
+	if (!of_find_property(dev->of_node, prop_name, &len))
+		goto bail;
+	if (len == 0)
+		goto bail;
+	len /= sizeof(u32);
+	VERIFY(err, NULL != (rhvmlist = kcalloc(len, sizeof(u32), GFP_KERNEL)));
+	if (err)
+		goto bail;
+	VERIFY(err, NULL != (rhvmpermlist = kcalloc(len, sizeof(u32),
+					 GFP_KERNEL)));
+	if (err)
+		goto bail;
+	for (i = 0; i < len; i++) {
+		err = of_property_read_u32_index(dev->of_node, prop_name, i,
+								&rhvmlist[i]);
+		rhvmpermlist[i] = PERM_READ | PERM_WRITE | PERM_EXEC;
+		pr_info("ADSPRPC: Secure VMID = %d", rhvmlist[i]);
+		if (err) {
+			pr_err("ADSPRPC: Failed to read VMID\n");
+			goto bail;
+		}
+	}
+	destvm->vmid = rhvmlist;
+	destvm->vmperm = rhvmpermlist;
+	destvm->vmcount = len;
+bail:
+	if (err) {
+		kfree(rhvmlist);
+		kfree(rhvmpermlist);
+	}
+}
+
 static int fastrpc_probe(struct platform_device *pdev)
 {
 	int err = 0;
@@ -3005,10 +3174,9 @@ static int fastrpc_probe(struct platform_device *pdev)
 
 	if (of_device_is_compatible(dev->of_node,
 					"qcom,msm-fastrpc-compute")) {
-		of_property_read_u32(dev->of_node, "qcom,adsp-remoteheap-vmid",
-			&gcinfo[0].rhvmid);
+		init_secure_vmid_list(dev, "qcom,adsp-remoteheap-vmid",
+							&gcinfo[0].rhvm);
 
-		pr_info("ADSPRPC : vmids adsp=%d\n", gcinfo[0].rhvmid);
 
 		of_property_read_u32(dev->of_node, "qcom,rpc-latency-us",
 			&me->latency);
@@ -3089,6 +3257,8 @@ static void fastrpc_deinit(void)
 				sess->smmu.mapping = NULL;
 			}
 		}
+		kfree(chan->rhvm.vmid);
+		kfree(chan->rhvm.vmperm);
 	}
 }
 
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index a112c01..e0a5315 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -241,6 +241,9 @@ struct smi_info {
 	/* The timer for this si. */
 	struct timer_list   si_timer;
 
+	/* This flag is set, if the timer can be set */
+	bool		    timer_can_start;
+
 	/* This flag is set, if the timer is running (timer_pending() isn't enough) */
 	bool		    timer_running;
 
@@ -416,6 +419,8 @@ static enum si_sm_result start_next_msg(struct smi_info *smi_info)
 
 static void smi_mod_timer(struct smi_info *smi_info, unsigned long new_val)
 {
+	if (!smi_info->timer_can_start)
+		return;
 	smi_info->last_timeout_jiffies = jiffies;
 	mod_timer(&smi_info->si_timer, new_val);
 	smi_info->timer_running = true;
@@ -435,21 +440,18 @@ static void start_new_msg(struct smi_info *smi_info, unsigned char *msg,
 	smi_info->handlers->start_transaction(smi_info->si_sm, msg, size);
 }
 
-static void start_check_enables(struct smi_info *smi_info, bool start_timer)
+static void start_check_enables(struct smi_info *smi_info)
 {
 	unsigned char msg[2];
 
 	msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
 	msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD;
 
-	if (start_timer)
-		start_new_msg(smi_info, msg, 2);
-	else
-		smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
+	start_new_msg(smi_info, msg, 2);
 	smi_info->si_state = SI_CHECKING_ENABLES;
 }
 
-static void start_clear_flags(struct smi_info *smi_info, bool start_timer)
+static void start_clear_flags(struct smi_info *smi_info)
 {
 	unsigned char msg[3];
 
@@ -458,10 +460,7 @@ static void start_clear_flags(struct smi_info *smi_info, bool start_timer)
 	msg[1] = IPMI_CLEAR_MSG_FLAGS_CMD;
 	msg[2] = WDT_PRE_TIMEOUT_INT;
 
-	if (start_timer)
-		start_new_msg(smi_info, msg, 3);
-	else
-		smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3);
+	start_new_msg(smi_info, msg, 3);
 	smi_info->si_state = SI_CLEARING_FLAGS;
 }
 
@@ -496,11 +495,11 @@ static void start_getting_events(struct smi_info *smi_info)
  * Note that we cannot just use disable_irq(), since the interrupt may
  * be shared.
  */
-static inline bool disable_si_irq(struct smi_info *smi_info, bool start_timer)
+static inline bool disable_si_irq(struct smi_info *smi_info)
 {
 	if ((smi_info->irq) && (!smi_info->interrupt_disabled)) {
 		smi_info->interrupt_disabled = true;
-		start_check_enables(smi_info, start_timer);
+		start_check_enables(smi_info);
 		return true;
 	}
 	return false;
@@ -510,7 +509,7 @@ static inline bool enable_si_irq(struct smi_info *smi_info)
 {
 	if ((smi_info->irq) && (smi_info->interrupt_disabled)) {
 		smi_info->interrupt_disabled = false;
-		start_check_enables(smi_info, true);
+		start_check_enables(smi_info);
 		return true;
 	}
 	return false;
@@ -528,7 +527,7 @@ static struct ipmi_smi_msg *alloc_msg_handle_irq(struct smi_info *smi_info)
 
 	msg = ipmi_alloc_smi_msg();
 	if (!msg) {
-		if (!disable_si_irq(smi_info, true))
+		if (!disable_si_irq(smi_info))
 			smi_info->si_state = SI_NORMAL;
 	} else if (enable_si_irq(smi_info)) {
 		ipmi_free_smi_msg(msg);
@@ -544,7 +543,7 @@ static void handle_flags(struct smi_info *smi_info)
 		/* Watchdog pre-timeout */
 		smi_inc_stat(smi_info, watchdog_pretimeouts);
 
-		start_clear_flags(smi_info, true);
+		start_clear_flags(smi_info);
 		smi_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT;
 		if (smi_info->intf)
 			ipmi_smi_watchdog_pretimeout(smi_info->intf);
@@ -927,7 +926,7 @@ static enum si_sm_result smi_event_handler(struct smi_info *smi_info,
 		 * disable and messages disabled.
 		 */
 		if (smi_info->supports_event_msg_buff || smi_info->irq) {
-			start_check_enables(smi_info, true);
+			start_check_enables(smi_info);
 		} else {
 			smi_info->curr_msg = alloc_msg_handle_irq(smi_info);
 			if (!smi_info->curr_msg)
@@ -1234,6 +1233,7 @@ static int smi_start_processing(void       *send_info,
 
 	/* Set up the timer that drives the interface. */
 	setup_timer(&new_smi->si_timer, smi_timeout, (long)new_smi);
+	new_smi->timer_can_start = true;
 	smi_mod_timer(new_smi, jiffies + SI_TIMEOUT_JIFFIES);
 
 	/* Try to claim any interrupts. */
@@ -3448,10 +3448,12 @@ static void check_for_broken_irqs(struct smi_info *smi_info)
 	check_set_rcv_irq(smi_info);
 }
 
-static inline void wait_for_timer_and_thread(struct smi_info *smi_info)
+static inline void stop_timer_and_thread(struct smi_info *smi_info)
 {
 	if (smi_info->thread != NULL)
 		kthread_stop(smi_info->thread);
+
+	smi_info->timer_can_start = false;
 	if (smi_info->timer_running)
 		del_timer_sync(&smi_info->si_timer);
 }
@@ -3593,7 +3595,7 @@ static int try_smi_init(struct smi_info *new_smi)
 	 * Start clearing the flags before we enable interrupts or the
 	 * timer to avoid racing with the timer.
 	 */
-	start_clear_flags(new_smi, false);
+	start_clear_flags(new_smi);
 
 	/*
 	 * IRQ is defined to be set when non-zero.  req_events will
@@ -3671,7 +3673,7 @@ static int try_smi_init(struct smi_info *new_smi)
 	return 0;
 
 out_err_stop_timer:
-	wait_for_timer_and_thread(new_smi);
+	stop_timer_and_thread(new_smi);
 
 out_err:
 	new_smi->interrupt_disabled = true;
@@ -3865,7 +3867,7 @@ static void cleanup_one_si(struct smi_info *to_clean)
 	 */
 	if (to_clean->irq_cleanup)
 		to_clean->irq_cleanup(to_clean);
-	wait_for_timer_and_thread(to_clean);
+	stop_timer_and_thread(to_clean);
 
 	/*
 	 * Timeouts are stopped, now make sure the interrupts are off
@@ -3876,7 +3878,7 @@ static void cleanup_one_si(struct smi_info *to_clean)
 		poll(to_clean);
 		schedule_timeout_uninterruptible(1);
 	}
-	disable_si_irq(to_clean, false);
+	disable_si_irq(to_clean);
 	while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) {
 		poll(to_clean);
 		schedule_timeout_uninterruptible(1);
diff --git a/drivers/clk/hisilicon/clk-hi6220.c b/drivers/clk/hisilicon/clk-hi6220.c
index c0e8e1f..2bfaf22 100644
--- a/drivers/clk/hisilicon/clk-hi6220.c
+++ b/drivers/clk/hisilicon/clk-hi6220.c
@@ -144,7 +144,7 @@ static struct hisi_gate_clock hi6220_separated_gate_clks_sys[] __initdata = {
 	{ HI6220_BBPPLL_SEL,    "bbppll_sel",    "pll0_bbp_gate",  CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x270, 9,  0, },
 	{ HI6220_MEDIA_PLL_SRC, "media_pll_src", "pll_media_gate", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x270, 10, 0, },
 	{ HI6220_MMC2_SEL,      "mmc2_sel",      "mmc2_mux1",      CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x270, 11, 0, },
-	{ HI6220_CS_ATB_SYSPLL, "cs_atb_syspll", "syspll",         CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x270, 12, 0, },
+	{ HI6220_CS_ATB_SYSPLL, "cs_atb_syspll", "syspll",         CLK_SET_RATE_PARENT|CLK_IS_CRITICAL,   0x270, 12, 0, },
 };
 
 static struct hisi_mux_clock hi6220_mux_clks_sys[] __initdata = {
diff --git a/drivers/clk/imx/clk-imx6q.c b/drivers/clk/imx/clk-imx6q.c
index ce8ea10..93a1966 100644
--- a/drivers/clk/imx/clk-imx6q.c
+++ b/drivers/clk/imx/clk-imx6q.c
@@ -487,7 +487,7 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
 	clk[IMX6QDL_CLK_GPU2D_CORE] = imx_clk_gate2("gpu2d_core", "gpu2d_core_podf", base + 0x6c, 24);
 	clk[IMX6QDL_CLK_GPU3D_CORE]   = imx_clk_gate2("gpu3d_core",    "gpu3d_core_podf",   base + 0x6c, 26);
 	clk[IMX6QDL_CLK_HDMI_IAHB]    = imx_clk_gate2("hdmi_iahb",     "ahb",               base + 0x70, 0);
-	clk[IMX6QDL_CLK_HDMI_ISFR]    = imx_clk_gate2("hdmi_isfr",     "video_27m",         base + 0x70, 4);
+	clk[IMX6QDL_CLK_HDMI_ISFR]    = imx_clk_gate2("hdmi_isfr",     "mipi_core_cfg",     base + 0x70, 4);
 	clk[IMX6QDL_CLK_I2C1]         = imx_clk_gate2("i2c1",          "ipg_per",           base + 0x70, 6);
 	clk[IMX6QDL_CLK_I2C2]         = imx_clk_gate2("i2c2",          "ipg_per",           base + 0x70, 8);
 	clk[IMX6QDL_CLK_I2C3]         = imx_clk_gate2("i2c3",          "ipg_per",           base + 0x70, 10);
diff --git a/drivers/clk/mediatek/clk-mtk.h b/drivers/clk/mediatek/clk-mtk.h
index 9f24fcf..e425e50 100644
--- a/drivers/clk/mediatek/clk-mtk.h
+++ b/drivers/clk/mediatek/clk-mtk.h
@@ -185,6 +185,7 @@ struct mtk_pll_data {
 	uint32_t pcw_reg;
 	int pcw_shift;
 	const struct mtk_pll_div_table *div_table;
+	const char *parent_name;
 };
 
 void mtk_clk_register_plls(struct device_node *node,
diff --git a/drivers/clk/mediatek/clk-pll.c b/drivers/clk/mediatek/clk-pll.c
index 0c2deac..1502384 100644
--- a/drivers/clk/mediatek/clk-pll.c
+++ b/drivers/clk/mediatek/clk-pll.c
@@ -302,7 +302,10 @@ static struct clk *mtk_clk_register_pll(const struct mtk_pll_data *data,
 
 	init.name = data->name;
 	init.ops = &mtk_pll_ops;
-	init.parent_names = &parent_name;
+	if (data->parent_name)
+		init.parent_names = &data->parent_name;
+	else
+		init.parent_names = &parent_name;
 	init.num_parents = 1;
 
 	clk = clk_register(NULL, &pll->hw);
diff --git a/drivers/clk/msm/clock-gcc-8953.c b/drivers/clk/msm/clock-gcc-8953.c
index 797f851..e25da83 100644
--- a/drivers/clk/msm/clock-gcc-8953.c
+++ b/drivers/clk/msm/clock-gcc-8953.c
@@ -2786,6 +2786,7 @@ static struct branch_clk gcc_oxili_timer_clk = {
 	.base = &virt_bases[GFX_BASE],
 	.c = {
 		.dbg_name = "gcc_oxili_timer_clk",
+		.parent = &xo_clk_src.c,
 		.ops = &clk_ops_branch,
 		CLK_INIT(gcc_oxili_timer_clk.c),
 	},
diff --git a/drivers/clk/qcom/gcc-ipq4019.c b/drivers/clk/qcom/gcc-ipq4019.c
index b593065..8ab6ce4 100644
--- a/drivers/clk/qcom/gcc-ipq4019.c
+++ b/drivers/clk/qcom/gcc-ipq4019.c
@@ -525,10 +525,20 @@ static struct clk_rcg2  sdcc1_apps_clk_src = {
 };
 
 static const struct freq_tbl ftbl_gcc_apps_clk[] = {
-	F(48000000, P_XO,	   1, 0, 0),
+	F(48000000,  P_XO,         1, 0, 0),
 	F(200000000, P_FEPLL200,   1, 0, 0),
+	F(384000000, P_DDRPLLAPSS, 1, 0, 0),
+	F(413000000, P_DDRPLLAPSS, 1, 0, 0),
+	F(448000000, P_DDRPLLAPSS, 1, 0, 0),
+	F(488000000, P_DDRPLLAPSS, 1, 0, 0),
 	F(500000000, P_FEPLL500,   1, 0, 0),
-	F(626000000, P_DDRPLLAPSS, 1, 0, 0),
+	F(512000000, P_DDRPLLAPSS, 1, 0, 0),
+	F(537000000, P_DDRPLLAPSS, 1, 0, 0),
+	F(565000000, P_DDRPLLAPSS, 1, 0, 0),
+	F(597000000, P_DDRPLLAPSS, 1, 0, 0),
+	F(632000000, P_DDRPLLAPSS, 1, 0, 0),
+	F(672000000, P_DDRPLLAPSS, 1, 0, 0),
+	F(716000000, P_DDRPLLAPSS, 1, 0, 0),
 	{ }
 };
 
diff --git a/drivers/clk/sunxi-ng/ccu-sun6i-a31.c b/drivers/clk/sunxi-ng/ccu-sun6i-a31.c
index 0cca360..9fe0939 100644
--- a/drivers/clk/sunxi-ng/ccu-sun6i-a31.c
+++ b/drivers/clk/sunxi-ng/ccu-sun6i-a31.c
@@ -468,8 +468,8 @@ static SUNXI_CCU_MUX_WITH_GATE(daudio0_clk, "daudio0", daudio_parents,
 static SUNXI_CCU_MUX_WITH_GATE(daudio1_clk, "daudio1", daudio_parents,
 			       0x0b4, 16, 2, BIT(31), CLK_SET_RATE_PARENT);
 
-static SUNXI_CCU_M_WITH_GATE(spdif_clk, "spdif", "pll-audio",
-			     0x0c0, 0, 4, BIT(31), CLK_SET_RATE_PARENT);
+static SUNXI_CCU_MUX_WITH_GATE(spdif_clk, "spdif", daudio_parents,
+			       0x0c0, 16, 2, BIT(31), CLK_SET_RATE_PARENT);
 
 static SUNXI_CCU_GATE(usb_phy0_clk,	"usb-phy0",	"osc24M",
 		      0x0cc, BIT(8), 0);
@@ -608,7 +608,7 @@ static SUNXI_CCU_M_WITH_MUX_GATE(hdmi_clk, "hdmi", lcd_ch1_parents,
 				 0x150, 0, 4, 24, 2, BIT(31),
 				 CLK_SET_RATE_PARENT);
 
-static SUNXI_CCU_GATE(hdmi_ddc_clk, "hdmi-ddc", "osc24M", 0x150, BIT(30), 0);
+static SUNXI_CCU_GATE(hdmi_ddc_clk, "ddc", "osc24M", 0x150, BIT(30), 0);
 
 static SUNXI_CCU_GATE(ps_clk, "ps", "lcd1-ch1", 0x140, BIT(31), 0);
 
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-a33.c b/drivers/clk/sunxi-ng/ccu-sun8i-a33.c
index 9bd1f78..e1dc4e5 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-a33.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-a33.c
@@ -752,6 +752,13 @@ static const struct sunxi_ccu_desc sun8i_a33_ccu_desc = {
 	.num_resets	= ARRAY_SIZE(sun8i_a33_ccu_resets),
 };
 
+static struct ccu_mux_nb sun8i_a33_cpu_nb = {
+	.common		= &cpux_clk.common,
+	.cm		= &cpux_clk.mux,
+	.delay_us	= 1, /* > 8 clock cycles at 24 MHz */
+	.bypass_index	= 1, /* index of 24 MHz oscillator */
+};
+
 static void __init sun8i_a33_ccu_setup(struct device_node *node)
 {
 	void __iomem *reg;
@@ -775,6 +782,9 @@ static void __init sun8i_a33_ccu_setup(struct device_node *node)
 	writel(val, reg + SUN8I_A33_PLL_MIPI_REG);
 
 	sunxi_ccu_probe(node, reg, &sun8i_a33_ccu_desc);
+
+	ccu_mux_notifier_register(pll_cpux_clk.common.hw.clk,
+				  &sun8i_a33_cpu_nb);
 }
 CLK_OF_DECLARE(sun8i_a33_ccu, "allwinner,sun8i-a33-ccu",
 	       sun8i_a33_ccu_setup);
diff --git a/drivers/clk/sunxi/clk-sun9i-mmc.c b/drivers/clk/sunxi/clk-sun9i-mmc.c
index 6041bdb..f69f9e8 100644
--- a/drivers/clk/sunxi/clk-sun9i-mmc.c
+++ b/drivers/clk/sunxi/clk-sun9i-mmc.c
@@ -16,6 +16,7 @@
 
 #include <linux/clk.h>
 #include <linux/clk-provider.h>
+#include <linux/delay.h>
 #include <linux/init.h>
 #include <linux/of.h>
 #include <linux/of_device.h>
@@ -83,9 +84,20 @@ static int sun9i_mmc_reset_deassert(struct reset_controller_dev *rcdev,
 	return 0;
 }
 
+static int sun9i_mmc_reset_reset(struct reset_controller_dev *rcdev,
+				 unsigned long id)
+{
+	sun9i_mmc_reset_assert(rcdev, id);
+	udelay(10);
+	sun9i_mmc_reset_deassert(rcdev, id);
+
+	return 0;
+}
+
 static const struct reset_control_ops sun9i_mmc_reset_ops = {
 	.assert		= sun9i_mmc_reset_assert,
 	.deassert	= sun9i_mmc_reset_deassert,
+	.reset		= sun9i_mmc_reset_reset,
 };
 
 static int sun9i_a80_mmc_config_clk_probe(struct platform_device *pdev)
diff --git a/drivers/clk/tegra/clk-tegra30.c b/drivers/clk/tegra/clk-tegra30.c
index 8e2db5e..af520d8 100644
--- a/drivers/clk/tegra/clk-tegra30.c
+++ b/drivers/clk/tegra/clk-tegra30.c
@@ -963,7 +963,7 @@ static void __init tegra30_super_clk_init(void)
 	 * U71 divider of cclk_lp.
 	 */
 	clk = tegra_clk_register_divider("pll_p_out3_cclklp", "pll_p_out3",
-				clk_base + SUPER_CCLKG_DIVIDER, 0,
+				clk_base + SUPER_CCLKLP_DIVIDER, 0,
 				TEGRA_DIVIDER_INT, 16, 8, 1, NULL);
 	clk_register_clkdev(clk, "pll_p_out3_cclklp", NULL);
 
diff --git a/drivers/clk/ti/clk-dra7-atl.c b/drivers/clk/ti/clk-dra7-atl.c
index c773332..7d060ff 100644
--- a/drivers/clk/ti/clk-dra7-atl.c
+++ b/drivers/clk/ti/clk-dra7-atl.c
@@ -265,8 +265,7 @@ static int of_dra7_atl_clk_probe(struct platform_device *pdev)
 
 		/* Get configuration for the ATL instances */
 		snprintf(prop, sizeof(prop), "atl%u", i);
-		of_node_get(node);
-		cfg_node = of_find_node_by_name(node, prop);
+		cfg_node = of_get_child_by_name(node, prop);
 		if (cfg_node) {
 			ret = of_property_read_u32(cfg_node, "bws",
 						   &cdesc->bws);
diff --git a/drivers/clk/uniphier/clk-uniphier-sys.c b/drivers/clk/uniphier/clk-uniphier-sys.c
index 5d029991..481225a 100644
--- a/drivers/clk/uniphier/clk-uniphier-sys.c
+++ b/drivers/clk/uniphier/clk-uniphier-sys.c
@@ -98,7 +98,7 @@ const struct uniphier_clk_data uniphier_sld8_sys_clk_data[] = {
 const struct uniphier_clk_data uniphier_pro5_sys_clk_data[] = {
 	UNIPHIER_CLK_FACTOR("spll", -1, "ref", 120, 1),		/* 2400 MHz */
 	UNIPHIER_CLK_FACTOR("dapll1", -1, "ref", 128, 1),	/* 2560 MHz */
-	UNIPHIER_CLK_FACTOR("dapll2", -1, "ref", 144, 125),	/* 2949.12 MHz */
+	UNIPHIER_CLK_FACTOR("dapll2", -1, "dapll1", 144, 125),	/* 2949.12 MHz */
 	UNIPHIER_CLK_FACTOR("uart", 0, "dapll2", 1, 40),
 	UNIPHIER_CLK_FACTOR("i2c", 1, "spll", 1, 48),
 	UNIPHIER_PRO5_SYS_CLK_SD,
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index e2c6e43..84b6efe 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -315,6 +315,14 @@
 	  value").  The workaround will only be active if the
 	  fsl,erratum-a008585 property is found in the timer node.
 
+config ARM_ARCH_TIMER_VCT_ACCESS
+	bool "Support for ARM architected timer virtual counter access in userspace"
+	default !ARM64
+	depends on ARM_ARCH_TIMER
+	help
+	  This option enables support for reading the ARM architected timer's
+	  virtual counter in userspace.
+
 config ARM_GLOBAL_TIMER
 	bool "Support for the ARM global timer" if COMPILE_TEST
 	select CLKSRC_OF if OF
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
index 5db1897..4a4ee0f 100644
--- a/drivers/clocksource/arm_arch_timer.c
+++ b/drivers/clocksource/arm_arch_timer.c
@@ -48,6 +48,8 @@
 #define CNTFRQ		0x10
 #define CNTP_TVAL	0x28
 #define CNTP_CTL	0x2c
+#define CNTCVAL_LO	0x30
+#define CNTCVAL_HI	0x34
 #define CNTV_TVAL	0x38
 #define CNTV_CTL	0x3c
 
@@ -447,8 +449,13 @@ static void arch_counter_set_user_access(void)
 			| ARCH_TIMER_USR_VT_ACCESS_EN
 			| ARCH_TIMER_VIRT_EVT_EN);
 
-	/* Enable user access to the virtual and physical counters */
-	cntkctl |= ARCH_TIMER_USR_VCT_ACCESS_EN | ARCH_TIMER_USR_PCT_ACCESS_EN;
+	cntkctl |= ARCH_TIMER_USR_PCT_ACCESS_EN;
+
+	/* Enable user access to the virtual counter */
+	if (IS_ENABLED(CONFIG_ARM_ARCH_TIMER_VCT_ACCESS))
+		cntkctl |= ARCH_TIMER_USR_VCT_ACCESS_EN;
+	else
+		cntkctl &= ~ARCH_TIMER_USR_VCT_ACCESS_EN;
 
 	arch_timer_set_cntkctl(cntkctl);
 }
@@ -541,6 +548,23 @@ u32 arch_timer_get_rate(void)
 	return arch_timer_rate;
 }
 
+void arch_timer_mem_get_cval(u32 *lo, u32 *hi)
+{
+	u32 ctrl;
+
+	*lo = *hi = ~0U;
+
+	if (!arch_counter_base)
+		return;
+
+	ctrl = readl_relaxed_no_log(arch_counter_base + CNTV_CTL);
+
+	if (ctrl & ARCH_TIMER_CTRL_ENABLE) {
+		*lo = readl_relaxed_no_log(arch_counter_base + CNTCVAL_LO);
+		*hi = readl_relaxed_no_log(arch_counter_base + CNTCVAL_HI);
+	}
+}
+
 static u64 arch_counter_get_cntvct_mem(void)
 {
 	u32 vct_lo, vct_hi, tmp_hi;
@@ -873,7 +897,7 @@ static int __init arch_timer_init(void)
 		return ret;
 
 	arch_timer_kvm_info.virtual_irq = arch_timer_ppi[VIRT_PPI];
-	
+
 	return 0;
 }
 
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index e8c7af52..1d5dba9 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -990,11 +990,19 @@ static struct kobj_type ktype_cpufreq = {
 	.release	= cpufreq_sysfs_release,
 };
 
-static int add_cpu_dev_symlink(struct cpufreq_policy *policy,
-			       struct device *dev)
+static void add_cpu_dev_symlink(struct cpufreq_policy *policy, unsigned int cpu)
 {
+	struct device *dev = get_cpu_device(cpu);
+
+	if (!dev)
+		return;
+
+	if (cpumask_test_and_set_cpu(cpu, policy->real_cpus))
+		return;
+
 	dev_dbg(dev, "%s: Adding symlink\n", __func__);
-	return sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq");
+	if (sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq"))
+		dev_err(dev, "cpufreq symlink creation failed\n");
 }
 
 static void remove_cpu_dev_symlink(struct cpufreq_policy *policy,
@@ -1257,10 +1265,10 @@ static int cpufreq_online(unsigned int cpu)
 		policy->user_policy.min = policy->min;
 		policy->user_policy.max = policy->max;
 
-		write_lock_irqsave(&cpufreq_driver_lock, flags);
-		for_each_cpu(j, policy->related_cpus)
+		for_each_cpu(j, policy->related_cpus) {
 			per_cpu(cpufreq_cpu_data, j) = policy;
-		write_unlock_irqrestore(&cpufreq_driver_lock, flags);
+			add_cpu_dev_symlink(policy, j);
+		}
 	} else {
 		policy->min = policy->user_policy.min;
 		policy->max = policy->user_policy.max;
@@ -1357,13 +1365,15 @@ static int cpufreq_online(unsigned int cpu)
 
 	if (cpufreq_driver->exit)
 		cpufreq_driver->exit(policy);
+
+	for_each_cpu(j, policy->real_cpus)
+		remove_cpu_dev_symlink(policy, get_cpu_device(j));
+
 out_free_policy:
 	cpufreq_policy_free(policy, !new_policy);
 	return ret;
 }
 
-static int cpufreq_offline(unsigned int cpu);
-
 /**
  * cpufreq_add_dev - the cpufreq interface for a CPU device.
  * @dev: CPU device.
@@ -1385,16 +1395,10 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
 
 	/* Create sysfs link on CPU registration */
 	policy = per_cpu(cpufreq_cpu_data, cpu);
-	if (!policy || cpumask_test_and_set_cpu(cpu, policy->real_cpus))
-		return 0;
+	if (policy)
+		add_cpu_dev_symlink(policy, cpu);
 
-	ret = add_cpu_dev_symlink(policy, dev);
-	if (ret) {
-		cpumask_clear_cpu(cpu, policy->real_cpus);
-		cpufreq_offline(cpu);
-	}
-
-	return ret;
+	return 0;
 }
 
 static int cpufreq_offline(unsigned int cpu)
diff --git a/drivers/cpuidle/cpuidle-powernv.c b/drivers/cpuidle/cpuidle-powernv.c
index 7fe442c..854a567 100644
--- a/drivers/cpuidle/cpuidle-powernv.c
+++ b/drivers/cpuidle/cpuidle-powernv.c
@@ -164,6 +164,24 @@ static int powernv_cpuidle_driver_init(void)
 		drv->state_count += 1;
 	}
 
+	/*
+	 * On the PowerNV platform cpu_present may be less than cpu_possible in
+	 * cases when firmware detects the CPU, but it is not available to the
+	 * OS.  If CONFIG_HOTPLUG_CPU=n, then such CPUs are not hotplugable at
+	 * run time and hence cpu_devices are not created for those CPUs by the
+	 * generic topology_init().
+	 *
+	 * drv->cpumask defaults to cpu_possible_mask in
+	 * __cpuidle_driver_init().  This breaks cpuidle on PowerNV where
+	 * cpu_devices are not created for CPUs in cpu_possible_mask that
+	 * cannot be hot-added later at run time.
+	 *
+	 * Trying cpuidle_register_device() on a CPU without a cpu_device is
+	 * incorrect, so pass a correct CPU mask to the generic cpuidle driver.
+	 */
+
+	drv->cpumask = (struct cpumask *)cpu_present_mask;
+
 	return 0;
 }
 
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index a3e1de0..3eddf43 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -189,6 +189,7 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
 			return -EBUSY;
 		}
 		target_state = &drv->states[index];
+		broadcast = false;
 	}
 
 	/* Take note of the planned idle state. */
diff --git a/drivers/cpuidle/lpm-levels.c b/drivers/cpuidle/lpm-levels.c
index 5452ad8..0ea769c 100644
--- a/drivers/cpuidle/lpm-levels.c
+++ b/drivers/cpuidle/lpm-levels.c
@@ -1043,18 +1043,9 @@ static int cluster_configure(struct lpm_cluster *cluster, int idx,
 	}
 
 	if (level->notify_rpm) {
-		uint64_t us;
-		uint32_t pred_us;
-
-		us = get_cluster_sleep_time(cluster, NULL, from_idle,
-								&pred_us);
-
-		us = us + 1;
-
 		clear_predict_history();
 		clear_cl_predict_history();
-
-		if (system_sleep_enter(us))
+		if (system_sleep_enter())
 			return -EBUSY;
 	}
 	/* Notify cluster enter event after successfully config completion */
@@ -1261,6 +1252,13 @@ int get_cluster_id(struct lpm_cluster *cluster, int *aff_lvl)
 		state_id |= (level->psci_id & cluster->psci_mode_mask)
 					<< cluster->psci_mode_shift;
 		(*aff_lvl)++;
+
+		/*
+		 * We may have updated the broadcast timers, update
+		 * the wakeup value by reading the bc timer directly.
+		 */
+		if (level->notify_rpm)
+			system_sleep_update_wakeup();
 	}
 unlock_and_return:
 	spin_unlock(&cluster->sync_lock);
diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c
index 832a2c3..9e98a5f 100644
--- a/drivers/cpuidle/sysfs.c
+++ b/drivers/cpuidle/sysfs.c
@@ -613,6 +613,18 @@ int cpuidle_add_sysfs(struct cpuidle_device *dev)
 	struct device *cpu_dev = get_cpu_device((unsigned long)dev->cpu);
 	int error;
 
+	/*
+	 * Return if cpu_device is not setup for this CPU.
+	 *
+	 * This could happen if the arch did not set up cpu_device
+	 * since this CPU is not in cpu_present mask and the
+	 * driver did not send a correct CPU mask during registration.
+	 * Without this check we would end up passing bogus
+	 * value for &cpu_dev->kobj in kobject_init_and_add()
+	 */
+	if (!cpu_dev)
+		return -ENODEV;
+
 	kdev = kzalloc(sizeof(*kdev), GFP_KERNEL);
 	if (!kdev)
 		return -ENOMEM;
diff --git a/drivers/crypto/amcc/crypto4xx_core.h b/drivers/crypto/amcc/crypto4xx_core.h
index ecfdcfe..4f41d6d 100644
--- a/drivers/crypto/amcc/crypto4xx_core.h
+++ b/drivers/crypto/amcc/crypto4xx_core.h
@@ -34,12 +34,12 @@
 #define PPC405EX_CE_RESET                       0x00000008
 
 #define CRYPTO4XX_CRYPTO_PRIORITY		300
-#define PPC4XX_LAST_PD				63
-#define PPC4XX_NUM_PD				64
-#define PPC4XX_LAST_GD				1023
+#define PPC4XX_NUM_PD				256
+#define PPC4XX_LAST_PD				(PPC4XX_NUM_PD - 1)
 #define PPC4XX_NUM_GD				1024
-#define PPC4XX_LAST_SD				63
-#define PPC4XX_NUM_SD				64
+#define PPC4XX_LAST_GD				(PPC4XX_NUM_GD - 1)
+#define PPC4XX_NUM_SD				256
+#define PPC4XX_LAST_SD				(PPC4XX_NUM_SD - 1)
 #define PPC4XX_SD_BUFFER_SIZE			2048
 
 #define PD_ENTRY_INUSE				1
diff --git a/drivers/crypto/caam/intern.h b/drivers/crypto/caam/intern.h
index 5d4c050..e2bcacc 100644
--- a/drivers/crypto/caam/intern.h
+++ b/drivers/crypto/caam/intern.h
@@ -41,6 +41,7 @@ struct caam_drv_private_jr {
 	struct device		*dev;
 	int ridx;
 	struct caam_job_ring __iomem *rregs;	/* JobR's register space */
+	struct tasklet_struct irqtask;
 	int irq;			/* One per queue */
 
 	/* Number of scatterlist crypt transforms active on the JobR */
diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c
index 757c27f..9e7f281 100644
--- a/drivers/crypto/caam/jr.c
+++ b/drivers/crypto/caam/jr.c
@@ -73,6 +73,8 @@ static int caam_jr_shutdown(struct device *dev)
 
 	ret = caam_reset_hw_jr(dev);
 
+	tasklet_kill(&jrp->irqtask);
+
 	/* Release interrupt */
 	free_irq(jrp->irq, dev);
 
@@ -128,7 +130,7 @@ static irqreturn_t caam_jr_interrupt(int irq, void *st_dev)
 
 	/*
 	 * Check the output ring for ready responses, kick
-	 * the threaded irq if jobs done.
+	 * tasklet if jobs done.
 	 */
 	irqstate = rd_reg32(&jrp->rregs->jrintstatus);
 	if (!irqstate)
@@ -150,13 +152,18 @@ static irqreturn_t caam_jr_interrupt(int irq, void *st_dev)
 	/* Have valid interrupt at this point, just ACK and trigger */
 	wr_reg32(&jrp->rregs->jrintstatus, irqstate);
 
-	return IRQ_WAKE_THREAD;
+	preempt_disable();
+	tasklet_schedule(&jrp->irqtask);
+	preempt_enable();
+
+	return IRQ_HANDLED;
 }
 
-static irqreturn_t caam_jr_threadirq(int irq, void *st_dev)
+/* Deferred service handler, run as interrupt-fired tasklet */
+static void caam_jr_dequeue(unsigned long devarg)
 {
 	int hw_idx, sw_idx, i, head, tail;
-	struct device *dev = st_dev;
+	struct device *dev = (struct device *)devarg;
 	struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
 	void (*usercall)(struct device *dev, u32 *desc, u32 status, void *arg);
 	u32 *userdesc, userstatus;
@@ -230,8 +237,6 @@ static irqreturn_t caam_jr_threadirq(int irq, void *st_dev)
 
 	/* reenable / unmask IRQs */
 	clrsetbits_32(&jrp->rregs->rconfig_lo, JRCFG_IMSK, 0);
-
-	return IRQ_HANDLED;
 }
 
 /**
@@ -389,10 +394,11 @@ static int caam_jr_init(struct device *dev)
 
 	jrp = dev_get_drvdata(dev);
 
+	tasklet_init(&jrp->irqtask, caam_jr_dequeue, (unsigned long)dev);
+
 	/* Connect job ring interrupt handler. */
-	error = request_threaded_irq(jrp->irq, caam_jr_interrupt,
-				     caam_jr_threadirq, IRQF_SHARED,
-				     dev_name(dev), dev);
+	error = request_irq(jrp->irq, caam_jr_interrupt, IRQF_SHARED,
+			    dev_name(dev), dev);
 	if (error) {
 		dev_err(dev, "can't connect JobR %d interrupt (%d)\n",
 			jrp->ridx, jrp->irq);
@@ -454,6 +460,7 @@ static int caam_jr_init(struct device *dev)
 out_free_irq:
 	free_irq(jrp->irq, dev);
 out_kill_deq:
+	tasklet_kill(&jrp->irqtask);
 	return error;
 }
 
diff --git a/drivers/crypto/marvell/cesa.h b/drivers/crypto/marvell/cesa.h
index e423d33..3629184 100644
--- a/drivers/crypto/marvell/cesa.h
+++ b/drivers/crypto/marvell/cesa.h
@@ -273,7 +273,8 @@ struct mv_cesa_op_ctx {
 #define CESA_TDMA_SRC_IN_SRAM			BIT(30)
 #define CESA_TDMA_END_OF_REQ			BIT(29)
 #define CESA_TDMA_BREAK_CHAIN			BIT(28)
-#define CESA_TDMA_TYPE_MSK			GENMASK(27, 0)
+#define CESA_TDMA_SET_STATE			BIT(27)
+#define CESA_TDMA_TYPE_MSK			GENMASK(26, 0)
 #define CESA_TDMA_DUMMY				0
 #define CESA_TDMA_DATA				1
 #define CESA_TDMA_OP				2
diff --git a/drivers/crypto/marvell/hash.c b/drivers/crypto/marvell/hash.c
index 77712b3..662cf4d 100644
--- a/drivers/crypto/marvell/hash.c
+++ b/drivers/crypto/marvell/hash.c
@@ -280,13 +280,32 @@ static void mv_cesa_ahash_std_prepare(struct ahash_request *req)
 	sreq->offset = 0;
 }
 
+static void mv_cesa_ahash_dma_step(struct ahash_request *req)
+{
+	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
+	struct mv_cesa_req *base = &creq->base;
+
+	/* We must explicitly set the digest state. */
+	if (base->chain.first->flags & CESA_TDMA_SET_STATE) {
+		struct mv_cesa_engine *engine = base->engine;
+		int i;
+
+		/* Set the hash state in the IVDIG regs. */
+		for (i = 0; i < ARRAY_SIZE(creq->state); i++)
+			writel_relaxed(creq->state[i], engine->regs +
+				       CESA_IVDIG(i));
+	}
+
+	mv_cesa_dma_step(base);
+}
+
 static void mv_cesa_ahash_step(struct crypto_async_request *req)
 {
 	struct ahash_request *ahashreq = ahash_request_cast(req);
 	struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
 
 	if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
-		mv_cesa_dma_step(&creq->base);
+		mv_cesa_ahash_dma_step(ahashreq);
 	else
 		mv_cesa_ahash_std_step(ahashreq);
 }
@@ -562,11 +581,15 @@ static int mv_cesa_ahash_dma_req_init(struct ahash_request *req)
 	struct mv_cesa_ahash_dma_iter iter;
 	struct mv_cesa_op_ctx *op = NULL;
 	unsigned int frag_len;
+	bool set_state = false;
 	int ret;
 
 	basereq->chain.first = NULL;
 	basereq->chain.last = NULL;
 
+	if (!mv_cesa_mac_op_is_first_frag(&creq->op_tmpl))
+		set_state = true;
+
 	if (creq->src_nents) {
 		ret = dma_map_sg(cesa_dev->dev, req->src, creq->src_nents,
 				 DMA_TO_DEVICE);
@@ -650,6 +673,15 @@ static int mv_cesa_ahash_dma_req_init(struct ahash_request *req)
 	basereq->chain.last->flags |= (CESA_TDMA_END_OF_REQ |
 				       CESA_TDMA_BREAK_CHAIN);
 
+	if (set_state) {
+		/*
+		 * Put the CESA_TDMA_SET_STATE flag on the first tdma desc to
+		 * let the step logic know that the IVDIG registers should be
+		 * explicitly set before launching a TDMA chain.
+		 */
+		basereq->chain.first->flags |= CESA_TDMA_SET_STATE;
+	}
+
 	return 0;
 
 err_free_tdma:
diff --git a/drivers/crypto/marvell/tdma.c b/drivers/crypto/marvell/tdma.c
index 9fd7a5f..0cda6e3 100644
--- a/drivers/crypto/marvell/tdma.c
+++ b/drivers/crypto/marvell/tdma.c
@@ -112,7 +112,14 @@ void mv_cesa_tdma_chain(struct mv_cesa_engine *engine,
 		last->next = dreq->chain.first;
 		engine->chain.last = dreq->chain.last;
 
-		if (!(last->flags & CESA_TDMA_BREAK_CHAIN))
+		/*
+		 * Break the DMA chain if the CESA_TDMA_BREAK_CHAIN is set on
+		 * the last element of the current chain, or if the request
+		 * being queued needs the IV regs to be set before lauching
+		 * the request.
+		 */
+		if (!(last->flags & CESA_TDMA_BREAK_CHAIN) &&
+		    !(dreq->chain.first->flags & CESA_TDMA_SET_STATE))
 			last->next_dma = dreq->chain.first->cur_dma;
 	}
 }
diff --git a/drivers/crypto/s5p-sss.c b/drivers/crypto/s5p-sss.c
index dce1af0..a668286 100644
--- a/drivers/crypto/s5p-sss.c
+++ b/drivers/crypto/s5p-sss.c
@@ -805,8 +805,9 @@ static int s5p_aes_probe(struct platform_device *pdev)
 		dev_warn(dev, "feed control interrupt is not available.\n");
 		goto err_irq;
 	}
-	err = devm_request_irq(dev, pdata->irq_fc, s5p_aes_interrupt,
-			       IRQF_SHARED, pdev->name, pdev);
+	err = devm_request_threaded_irq(dev, pdata->irq_fc, NULL,
+					s5p_aes_interrupt, IRQF_ONESHOT,
+					pdev->name, pdev);
 	if (err < 0) {
 		dev_warn(dev, "feed control interrupt is not available.\n");
 		goto err_irq;
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index e2d323f..1c8d79d 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -1232,12 +1232,11 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
 			sg_link_tbl_len += authsize;
 	}
 
-	sg_count = talitos_sg_map(dev, areq->src, cryptlen, edesc,
-				  &desc->ptr[4], sg_count, areq->assoclen,
-				  tbl_off);
+	ret = talitos_sg_map(dev, areq->src, sg_link_tbl_len, edesc,
+			     &desc->ptr[4], sg_count, areq->assoclen, tbl_off);
 
-	if (sg_count > 1) {
-		tbl_off += sg_count;
+	if (ret > 1) {
+		tbl_off += ret;
 		sync_needed = true;
 	}
 
@@ -1248,14 +1247,15 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
 			dma_map_sg(dev, areq->dst, sg_count, DMA_FROM_DEVICE);
 	}
 
-	sg_count = talitos_sg_map(dev, areq->dst, cryptlen, edesc,
-				  &desc->ptr[5], sg_count, areq->assoclen,
-				  tbl_off);
+	ret = talitos_sg_map(dev, areq->dst, cryptlen, edesc, &desc->ptr[5],
+			     sg_count, areq->assoclen, tbl_off);
 
 	if (desc->hdr & DESC_HDR_TYPE_IPSEC_ESP)
 		to_talitos_ptr_ext_or(&desc->ptr[5], authsize, is_sec1);
 
-	if (sg_count > 1) {
+	/* ICV data */
+	if (ret > 1) {
+		tbl_off += ret;
 		edesc->icv_ool = true;
 		sync_needed = true;
 
@@ -1265,9 +1265,7 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
 				     sizeof(struct talitos_ptr) + authsize;
 
 			/* Add an entry to the link table for ICV data */
-			tbl_ptr += sg_count - 1;
-			to_talitos_ptr_ext_set(tbl_ptr, 0, is_sec1);
-			tbl_ptr++;
+			to_talitos_ptr_ext_set(tbl_ptr - 1, 0, is_sec1);
 			to_talitos_ptr_ext_set(tbl_ptr, DESC_PTR_LNKTBL_RETURN,
 					       is_sec1);
 			to_talitos_ptr_len(tbl_ptr, authsize, is_sec1);
@@ -1275,18 +1273,33 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
 			/* icv data follows link tables */
 			to_talitos_ptr(tbl_ptr, edesc->dma_link_tbl + offset,
 				       is_sec1);
+		} else {
+			dma_addr_t addr = edesc->dma_link_tbl;
+
+			if (is_sec1)
+				addr += areq->assoclen + cryptlen;
+			else
+				addr += sizeof(struct talitos_ptr) * tbl_off;
+
+			to_talitos_ptr(&desc->ptr[6], addr, is_sec1);
+			to_talitos_ptr_len(&desc->ptr[6], authsize, is_sec1);
+		}
+	} else if (!(desc->hdr & DESC_HDR_TYPE_IPSEC_ESP)) {
+		ret = talitos_sg_map(dev, areq->dst, authsize, edesc,
+				     &desc->ptr[6], sg_count, areq->assoclen +
+							      cryptlen,
+				     tbl_off);
+		if (ret > 1) {
+			tbl_off += ret;
+			edesc->icv_ool = true;
+			sync_needed = true;
+		} else {
+			edesc->icv_ool = false;
 		}
 	} else {
 		edesc->icv_ool = false;
 	}
 
-	/* ICV data */
-	if (!(desc->hdr & DESC_HDR_TYPE_IPSEC_ESP)) {
-		to_talitos_ptr_len(&desc->ptr[6], authsize, is_sec1);
-		to_talitos_ptr(&desc->ptr[6], edesc->dma_link_tbl +
-			       areq->assoclen + cryptlen, is_sec1);
-	}
-
 	/* iv out */
 	if (desc->hdr & DESC_HDR_TYPE_IPSEC_ESP)
 		map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv,
@@ -1494,12 +1507,20 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *cipher,
 			     const u8 *key, unsigned int keylen)
 {
 	struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+	u32 tmp[DES_EXPKEY_WORDS];
 
 	if (keylen > TALITOS_MAX_KEY_SIZE) {
 		crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
 		return -EINVAL;
 	}
 
+	if (unlikely(crypto_ablkcipher_get_flags(cipher) &
+		     CRYPTO_TFM_REQ_WEAK_KEY) &&
+	    !des_ekey(tmp, key)) {
+		crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_WEAK_KEY);
+		return -EINVAL;
+	}
+
 	memcpy(&ctx->key, key, keylen);
 	ctx->keylen = keylen;
 
@@ -2614,7 +2635,7 @@ static struct talitos_alg_template driver_algs[] = {
 				.ivsize = AES_BLOCK_SIZE,
 			}
 		},
-		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
+		.desc_hdr_template = DESC_HDR_TYPE_AESU_CTR_NONSNOOP |
 				     DESC_HDR_SEL0_AESU |
 				     DESC_HDR_MODE0_AESU_CTR,
 	},
@@ -3047,6 +3068,11 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
 		t_alg->algt.alg.aead.setkey = aead_setkey;
 		t_alg->algt.alg.aead.encrypt = aead_encrypt;
 		t_alg->algt.alg.aead.decrypt = aead_decrypt;
+		if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) &&
+		    !strncmp(alg->cra_name, "authenc(hmac(sha224)", 20)) {
+			kfree(t_alg);
+			return ERR_PTR(-ENOTSUPP);
+		}
 		break;
 	case CRYPTO_ALG_TYPE_AHASH:
 		alg = &t_alg->algt.alg.hash.halg.base;
diff --git a/drivers/devfreq/bimc-bwmon.c b/drivers/devfreq/bimc-bwmon.c
index f9b758f..33e16261 100644
--- a/drivers/devfreq/bimc-bwmon.c
+++ b/drivers/devfreq/bimc-bwmon.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -35,7 +35,8 @@
 #define	GLB_INT_EN(m)		((m)->global_base + 0x10C)
 #define MON_INT_STATUS(m)	((m)->base + 0x100)
 #define MON_INT_STATUS_MASK	0x03
-#define MON2_INT_STATUS_MASK	0xF0
+#define MON2_INT_STATUS_MASK	0xA0
+#define MON2_INT_DISABLE_MASK	0xF0
 #define MON2_INT_STATUS_SHIFT	4
 #define MON_INT_CLR(m)		((m)->base + 0x108)
 #define	MON_INT_EN(m)		((m)->base + 0x10C)
@@ -63,7 +64,8 @@
 #define MON3_INT_STATUS(m)	((m)->base + 0x00)
 #define MON3_INT_CLR(m)		((m)->base + 0x08)
 #define MON3_INT_EN(m)		((m)->base + 0x0C)
-#define MON3_INT_STATUS_MASK	0x0F
+#define MON3_INT_STATUS_MASK	0x0A
+#define MON3_INT_DISABLE_MASK	0x0F
 #define MON3_EN(m)		((m)->base + 0x10)
 #define MON3_CLEAR(m)		((m)->base + 0x14)
 #define MON3_MASK(m)		((m)->base + 0x18)
@@ -283,12 +285,12 @@ void mon_irq_disable(struct bwmon *m, enum mon_reg_type type)
 	case MON2:
 		mon_glb_irq_disable(m);
 		val = readl_relaxed(MON_INT_EN(m));
-		val &= ~MON2_INT_STATUS_MASK;
+		val &= ~MON2_INT_DISABLE_MASK;
 		writel_relaxed(val, MON_INT_EN(m));
 		break;
 	case MON3:
 		val = readl_relaxed(MON3_INT_EN(m));
-		val &= ~MON3_INT_STATUS_MASK;
+		val &= ~MON3_INT_DISABLE_MASK;
 		writel_relaxed(val, MON3_INT_EN(m));
 		break;
 	}
diff --git a/drivers/devfreq/governor_spdm_bw_hyp.c b/drivers/devfreq/governor_spdm_bw_hyp.c
index 5751ab6..7e7e0ee 100644
--- a/drivers/devfreq/governor_spdm_bw_hyp.c
+++ b/drivers/devfreq/governor_spdm_bw_hyp.c
@@ -1,5 +1,5 @@
 /*
- *Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ *Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
  *
  *This program is free software; you can redistribute it and/or modify
  *it under the terms of the GNU General Public License version 2 and
@@ -42,7 +42,7 @@ static int enable_clocks(void)
 
 	rpm_req = msm_rpm_create_request(MSM_RPM_CTX_ACTIVE_SET, SPDM_RES_TYPE,
 					 SPDM_RES_ID, 1);
-	if (!rpm_req)
+	if (IS_ERR_OR_NULL(rpm_req))
 		return -ENODEV;
 	msm_rpm_add_kvp_data(rpm_req, SPDM_KEY, (const uint8_t *)&one,
 			     sizeof(int));
@@ -61,7 +61,7 @@ static int disable_clocks(void)
 
 	rpm_req = msm_rpm_create_request(MSM_RPM_CTX_ACTIVE_SET, SPDM_RES_TYPE,
 					 SPDM_RES_ID, 1);
-	if (!rpm_req)
+	if (IS_ERR_OR_NULL(rpm_req))
 		return -ENODEV;
 	msm_rpm_add_kvp_data(rpm_req, SPDM_KEY, (const uint8_t *)&zero,
 			     sizeof(int));
diff --git a/drivers/dma-buf/fence.c b/drivers/dma-buf/fence.c
index 094548b..883b3be 100644
--- a/drivers/dma-buf/fence.c
+++ b/drivers/dma-buf/fence.c
@@ -280,6 +280,31 @@ int fence_add_callback(struct fence *fence, struct fence_cb *cb,
 EXPORT_SYMBOL(fence_add_callback);
 
 /**
+ * fence_get_status - returns the status upon completion
+ * @fence: [in]	the fence to query
+ *
+ * This wraps fence_get_status_locked() to return the error status
+ * condition on a signaled fence. See fence_get_status_locked() for more
+ * details.
+ *
+ * Returns 0 if the fence has not yet been signaled, 1 if the fence has
+ * been signaled without an error condition, or a negative error code
+ * if the fence has been completed in err.
+ */
+int fence_get_status(struct fence *fence)
+{
+	unsigned long flags;
+	int status;
+
+	spin_lock_irqsave(fence->lock, flags);
+	status = fence_get_status_locked(fence);
+	spin_unlock_irqrestore(fence->lock, flags);
+
+	return status;
+}
+EXPORT_SYMBOL(fence_get_status);
+
+/**
  * fence_remove_callback - remove a callback from the signaling list
  * @fence:	[in]	the fence to wait on
  * @cb:		[in]	the callback to remove
@@ -529,6 +554,7 @@ fence_init(struct fence *fence, const struct fence_ops *ops,
 	fence->context = context;
 	fence->seqno = seqno;
 	fence->flags = 0UL;
+	fence->error = 0;
 
 	trace_fence_init(fence);
 }
diff --git a/drivers/dma-buf/sw_sync.c b/drivers/dma-buf/sw_sync.c
index 0cb8d9d..9dc86d3 100644
--- a/drivers/dma-buf/sw_sync.c
+++ b/drivers/dma-buf/sw_sync.c
@@ -96,9 +96,9 @@ struct sync_timeline *sync_timeline_create(const char *name)
 	obj->context = fence_context_alloc(1);
 	strlcpy(obj->name, name, sizeof(obj->name));
 
-	INIT_LIST_HEAD(&obj->child_list_head);
-	INIT_LIST_HEAD(&obj->active_list_head);
-	spin_lock_init(&obj->child_list_lock);
+	obj->pt_tree = RB_ROOT;
+	INIT_LIST_HEAD(&obj->pt_list);
+	spin_lock_init(&obj->lock);
 
 	sync_timeline_debug_add(obj);
 
@@ -125,68 +125,6 @@ static void sync_timeline_put(struct sync_timeline *obj)
 	kref_put(&obj->kref, sync_timeline_free);
 }
 
-/**
- * sync_timeline_signal() - signal a status change on a sync_timeline
- * @obj:	sync_timeline to signal
- * @inc:	num to increment on timeline->value
- *
- * A sync implementation should call this any time one of it's fences
- * has signaled or has an error condition.
- */
-static void sync_timeline_signal(struct sync_timeline *obj, unsigned int inc)
-{
-	unsigned long flags;
-	struct sync_pt *pt, *next;
-
-	trace_sync_timeline(obj);
-
-	spin_lock_irqsave(&obj->child_list_lock, flags);
-
-	obj->value += inc;
-
-	list_for_each_entry_safe(pt, next, &obj->active_list_head,
-				 active_list) {
-		if (fence_is_signaled_locked(&pt->base))
-			list_del_init(&pt->active_list);
-	}
-
-	spin_unlock_irqrestore(&obj->child_list_lock, flags);
-}
-
-/**
- * sync_pt_create() - creates a sync pt
- * @parent:	fence's parent sync_timeline
- * @size:	size to allocate for this pt
- * @inc:	value of the fence
- *
- * Creates a new sync_pt as a child of @parent.  @size bytes will be
- * allocated allowing for implementation specific data to be kept after
- * the generic sync_timeline struct. Returns the sync_pt object or
- * NULL in case of error.
- */
-static struct sync_pt *sync_pt_create(struct sync_timeline *obj, int size,
-			     unsigned int value)
-{
-	unsigned long flags;
-	struct sync_pt *pt;
-
-	if (size < sizeof(*pt))
-		return NULL;
-
-	pt = kzalloc(size, GFP_KERNEL);
-	if (!pt)
-		return NULL;
-
-	spin_lock_irqsave(&obj->child_list_lock, flags);
-	sync_timeline_get(obj);
-	fence_init(&pt->base, &timeline_fence_ops, &obj->child_list_lock,
-		   obj->context, value);
-	list_add_tail(&pt->child_list, &obj->child_list_head);
-	INIT_LIST_HEAD(&pt->active_list);
-	spin_unlock_irqrestore(&obj->child_list_lock, flags);
-	return pt;
-}
-
 static const char *timeline_fence_get_driver_name(struct fence *fence)
 {
 	return "sw_sync";
@@ -203,13 +141,17 @@ static void timeline_fence_release(struct fence *fence)
 {
 	struct sync_pt *pt = fence_to_sync_pt(fence);
 	struct sync_timeline *parent = fence_parent(fence);
-	unsigned long flags;
 
-	spin_lock_irqsave(fence->lock, flags);
-	list_del(&pt->child_list);
-	if (!list_empty(&pt->active_list))
-		list_del(&pt->active_list);
-	spin_unlock_irqrestore(fence->lock, flags);
+	if (!list_empty(&pt->link)) {
+		unsigned long flags;
+
+		spin_lock_irqsave(fence->lock, flags);
+		if (!list_empty(&pt->link)) {
+			list_del(&pt->link);
+			rb_erase(&pt->node, &parent->pt_tree);
+		}
+		spin_unlock_irqrestore(fence->lock, flags);
+	}
 
 	sync_timeline_put(parent);
 	fence_free(fence);
@@ -219,18 +161,11 @@ static bool timeline_fence_signaled(struct fence *fence)
 {
 	struct sync_timeline *parent = fence_parent(fence);
 
-	return (fence->seqno > parent->value) ? false : true;
+	return !__fence_is_later(fence->seqno, parent->value);
 }
 
 static bool timeline_fence_enable_signaling(struct fence *fence)
 {
-	struct sync_pt *pt = fence_to_sync_pt(fence);
-	struct sync_timeline *parent = fence_parent(fence);
-
-	if (timeline_fence_signaled(fence))
-		return false;
-
-	list_add_tail(&pt->active_list, &parent->active_list_head);
 	return true;
 }
 
@@ -238,7 +173,7 @@ static void timeline_fence_disable_signaling(struct fence *fence)
 {
 	struct sync_pt *pt = container_of(fence, struct sync_pt, base);
 
-	list_del_init(&pt->active_list);
+	list_del_init(&pt->link);
 }
 
 static void timeline_fence_value_str(struct fence *fence,
@@ -267,6 +202,107 @@ static const struct fence_ops timeline_fence_ops = {
 	.timeline_value_str = timeline_fence_timeline_value_str,
 };
 
+/**
+ * sync_timeline_signal() - signal a status change on a sync_timeline
+ * @obj:	sync_timeline to signal
+ * @inc:	num to increment on timeline->value
+ *
+ * A sync implementation should call this any time one of it's fences
+ * has signaled or has an error condition.
+ */
+static void sync_timeline_signal(struct sync_timeline *obj, unsigned int inc)
+{
+	struct sync_pt *pt, *next;
+
+	trace_sync_timeline(obj);
+
+	spin_lock_irq(&obj->lock);
+
+	obj->value += inc;
+
+	list_for_each_entry_safe(pt, next, &obj->pt_list, link) {
+		if (!timeline_fence_signaled(&pt->base))
+			break;
+
+		list_del_init(&pt->link);
+		rb_erase(&pt->node, &obj->pt_tree);
+
+		/*
+		 * A signal callback may release the last reference to this
+		 * fence, causing it to be freed. That operation has to be
+		 * last to avoid a use after free inside this loop, and must
+		 * be after we remove the fence from the timeline in order to
+		 * prevent deadlocking on timeline->lock inside
+		 * timeline_fence_release().
+		 */
+		fence_signal_locked(&pt->base);
+	}
+
+	spin_unlock_irq(&obj->lock);
+}
+
+/**
+ * sync_pt_create() - creates a sync pt
+ * @parent:	fence's parent sync_timeline
+ * @inc:	value of the fence
+ *
+ * Creates a new sync_pt as a child of @parent.  @size bytes will be
+ * allocated allowing for implementation specific data to be kept after
+ * the generic sync_timeline struct. Returns the sync_pt object or
+ * NULL in case of error.
+ */
+static struct sync_pt *sync_pt_create(struct sync_timeline *obj,
+				      unsigned int value)
+{
+	struct sync_pt *pt;
+
+	pt = kzalloc(sizeof(*pt), GFP_KERNEL);
+	if (!pt)
+		return NULL;
+
+	sync_timeline_get(obj);
+	fence_init(&pt->base, &timeline_fence_ops, &obj->lock,
+		   obj->context, value);
+	INIT_LIST_HEAD(&pt->link);
+
+	spin_lock_irq(&obj->lock);
+	if (!fence_is_signaled_locked(&pt->base)) {
+		struct rb_node **p = &obj->pt_tree.rb_node;
+		struct rb_node *parent = NULL;
+
+		while (*p) {
+			struct sync_pt *other;
+			int cmp;
+
+			parent = *p;
+			other = rb_entry(parent, typeof(*pt), node);
+			cmp = value - other->base.seqno;
+			if (cmp > 0) {
+				p = &parent->rb_right;
+			} else if (cmp < 0) {
+				p = &parent->rb_left;
+			} else {
+				if (fence_get_rcu(&other->base)) {
+					fence_put(&pt->base);
+					pt = other;
+					goto unlock;
+				}
+				p = &parent->rb_left;
+			}
+		}
+		rb_link_node(&pt->node, parent, p);
+		rb_insert_color(&pt->node, &obj->pt_tree);
+
+		parent = rb_next(&pt->node);
+		list_add_tail(&pt->link,
+			      parent ? &rb_entry(parent, typeof(*pt), node)->link : &obj->pt_list);
+	}
+unlock:
+	spin_unlock_irq(&obj->lock);
+
+	return pt;
+}
+
 /*
  * *WARNING*
  *
@@ -293,8 +329,16 @@ static int sw_sync_debugfs_open(struct inode *inode, struct file *file)
 static int sw_sync_debugfs_release(struct inode *inode, struct file *file)
 {
 	struct sync_timeline *obj = file->private_data;
+	struct sync_pt *pt, *next;
 
-	smp_wmb();
+	spin_lock_irq(&obj->lock);
+
+	list_for_each_entry_safe(pt, next, &obj->pt_list, link) {
+		fence_set_error(&pt->base, -ENOENT);
+		fence_signal_locked(&pt->base);
+	}
+
+	spin_unlock_irq(&obj->lock);
 
 	sync_timeline_put(obj);
 	return 0;
@@ -317,7 +361,7 @@ static long sw_sync_ioctl_create_fence(struct sync_timeline *obj,
 		goto err;
 	}
 
-	pt = sync_pt_create(obj, sizeof(*pt), data.value);
+	pt = sync_pt_create(obj, data.value);
 	if (!pt) {
 		err = -ENOMEM;
 		goto err;
@@ -353,6 +397,11 @@ static long sw_sync_ioctl_inc(struct sync_timeline *obj, unsigned long arg)
 	if (copy_from_user(&value, (void __user *)arg, sizeof(value)))
 		return -EFAULT;
 
+	while (value > INT_MAX)  {
+		sync_timeline_signal(obj, INT_MAX);
+		value -= INT_MAX;
+	}
+
 	sync_timeline_signal(obj, value);
 
 	return 0;
diff --git a/drivers/dma-buf/sync_debug.c b/drivers/dma-buf/sync_debug.c
index 2dd4c3d..858263d 100644
--- a/drivers/dma-buf/sync_debug.c
+++ b/drivers/dma-buf/sync_debug.c
@@ -62,29 +62,29 @@ void sync_file_debug_remove(struct sync_file *sync_file)
 
 static const char *sync_status_str(int status)
 {
-	if (status == 0)
-		return "signaled";
+	if (status < 0)
+		return "error";
 
 	if (status > 0)
-		return "active";
+		return "signaled";
 
-	return "error";
+	return "active";
 }
 
-static void sync_print_fence(struct seq_file *s, struct fence *fence, bool show)
+static void sync_print_fence(struct seq_file *s,
+			     struct fence *fence, bool show)
 {
-	int status = 1;
 	struct sync_timeline *parent = fence_parent(fence);
+	int status;
 
-	if (fence_is_signaled_locked(fence))
-		status = fence->status;
+	status = fence_get_status_locked(fence);
 
 	seq_printf(s, "  %s%sfence %s",
 		   show ? parent->name : "",
 		   show ? "_" : "",
 		   sync_status_str(status));
 
-	if (status <= 0) {
+	if (status) {
 		struct timespec64 ts64 =
 			ktime_to_timespec64(fence->timestamp);
 
@@ -116,17 +116,15 @@ static void sync_print_fence(struct seq_file *s, struct fence *fence, bool show)
 static void sync_print_obj(struct seq_file *s, struct sync_timeline *obj)
 {
 	struct list_head *pos;
-	unsigned long flags;
 
 	seq_printf(s, "%s: %d\n", obj->name, obj->value);
 
-	spin_lock_irqsave(&obj->child_list_lock, flags);
-	list_for_each(pos, &obj->child_list_head) {
-		struct sync_pt *pt =
-			container_of(pos, struct sync_pt, child_list);
+	spin_lock_irq(&obj->lock);
+	list_for_each(pos, &obj->pt_list) {
+		struct sync_pt *pt = container_of(pos, struct sync_pt, link);
 		sync_print_fence(s, &pt->base, false);
 	}
-	spin_unlock_irqrestore(&obj->child_list_lock, flags);
+	spin_unlock_irq(&obj->lock);
 }
 
 static void sync_print_sync_file(struct seq_file *s,
@@ -135,7 +133,7 @@ static void sync_print_sync_file(struct seq_file *s,
 	int i;
 
 	seq_printf(s, "[%p] %s: %s\n", sync_file, sync_file->name,
-		   sync_status_str(!fence_is_signaled(sync_file->fence)));
+		   sync_status_str(fence_get_status(sync_file->fence)));
 
 	if (fence_is_array(sync_file->fence)) {
 		struct fence_array *array = to_fence_array(sync_file->fence);
@@ -149,12 +147,11 @@ static void sync_print_sync_file(struct seq_file *s,
 
 static int sync_debugfs_show(struct seq_file *s, void *unused)
 {
-	unsigned long flags;
 	struct list_head *pos;
 
 	seq_puts(s, "objs:\n--------------\n");
 
-	spin_lock_irqsave(&sync_timeline_list_lock, flags);
+	spin_lock_irq(&sync_timeline_list_lock);
 	list_for_each(pos, &sync_timeline_list_head) {
 		struct sync_timeline *obj =
 			container_of(pos, struct sync_timeline,
@@ -163,11 +160,11 @@ static int sync_debugfs_show(struct seq_file *s, void *unused)
 		sync_print_obj(s, obj);
 		seq_puts(s, "\n");
 	}
-	spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
+	spin_unlock_irq(&sync_timeline_list_lock);
 
 	seq_puts(s, "fences:\n--------------\n");
 
-	spin_lock_irqsave(&sync_file_list_lock, flags);
+	spin_lock_irq(&sync_file_list_lock);
 	list_for_each(pos, &sync_file_list_head) {
 		struct sync_file *sync_file =
 			container_of(pos, struct sync_file, sync_file_list);
@@ -175,7 +172,7 @@ static int sync_debugfs_show(struct seq_file *s, void *unused)
 		sync_print_sync_file(s, sync_file);
 		seq_puts(s, "\n");
 	}
-	spin_unlock_irqrestore(&sync_file_list_lock, flags);
+	spin_unlock_irq(&sync_file_list_lock);
 	return 0;
 }
 
diff --git a/drivers/dma-buf/sync_debug.h b/drivers/dma-buf/sync_debug.h
index d269aa6..9615dc0 100644
--- a/drivers/dma-buf/sync_debug.h
+++ b/drivers/dma-buf/sync_debug.h
@@ -14,6 +14,7 @@
 #define _LINUX_SYNC_H
 
 #include <linux/list.h>
+#include <linux/rbtree.h>
 #include <linux/spinlock.h>
 #include <linux/fence.h>
 
@@ -24,43 +25,41 @@
  * struct sync_timeline - sync object
  * @kref:		reference count on fence.
  * @name:		name of the sync_timeline. Useful for debugging
- * @child_list_head:	list of children sync_pts for this sync_timeline
- * @child_list_lock:	lock protecting @child_list_head and fence.status
- * @active_list_head:	list of active (unsignaled/errored) sync_pts
+ * @lock:		lock protecting @pt_list and @value
+ * @pt_tree:		rbtree of active (unsignaled/errored) sync_pts
+ * @pt_list:		list of active (unsignaled/errored) sync_pts
  * @sync_timeline_list:	membership in global sync_timeline_list
  */
 struct sync_timeline {
 	struct kref		kref;
 	char			name[32];
 
-	/* protected by child_list_lock */
+	/* protected by lock */
 	u64			context;
 	int			value;
 
-	struct list_head	child_list_head;
-	spinlock_t		child_list_lock;
-
-	struct list_head	active_list_head;
+	struct rb_root		pt_tree;
+	struct list_head	pt_list;
+	spinlock_t		lock;
 
 	struct list_head	sync_timeline_list;
 };
 
 static inline struct sync_timeline *fence_parent(struct fence *fence)
 {
-	return container_of(fence->lock, struct sync_timeline,
-			    child_list_lock);
+	return container_of(fence->lock, struct sync_timeline, lock);
 }
 
 /**
  * struct sync_pt - sync_pt object
  * @base: base fence object
- * @child_list: sync timeline child's list
- * @active_list: sync timeline active child's list
+ * @link: link on the sync timeline's list
+ * @node: node in the sync timeline's tree
  */
 struct sync_pt {
 	struct fence base;
-	struct list_head child_list;
-	struct list_head active_list;
+	struct list_head link;
+	struct rb_node node;
 };
 
 #ifdef CONFIG_SW_SYNC
diff --git a/drivers/dma-buf/sync_file.c b/drivers/dma-buf/sync_file.c
index 2f34a01..7053bb4 100644
--- a/drivers/dma-buf/sync_file.c
+++ b/drivers/dma-buf/sync_file.c
@@ -376,10 +376,8 @@ static void sync_fill_fence_info(struct fence *fence,
 		sizeof(info->obj_name));
 	strlcpy(info->driver_name, fence->ops->get_driver_name(fence),
 		sizeof(info->driver_name));
-	if (fence_is_signaled(fence))
-		info->status = fence->status >= 0 ? 1 : fence->status;
-	else
-		info->status = 0;
+
+	info->status = fence_get_status(fence);
 	info->timestamp_ns = ktime_to_ns(fence->timestamp);
 }
 
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 6b53526..3db94e8 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -1107,12 +1107,14 @@ static struct dmaengine_unmap_pool *__get_unmap_pool(int nr)
 	switch (order) {
 	case 0 ... 1:
 		return &unmap_pool[0];
+#if IS_ENABLED(CONFIG_DMA_ENGINE_RAID)
 	case 2 ... 4:
 		return &unmap_pool[1];
 	case 5 ... 7:
 		return &unmap_pool[2];
 	case 8:
 		return &unmap_pool[3];
+#endif
 	default:
 		BUG();
 		return NULL;
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index fbb7551..e0bd578 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -158,6 +158,12 @@ MODULE_PARM_DESC(run, "Run the test (default: false)");
 #define PATTERN_OVERWRITE	0x20
 #define PATTERN_COUNT_MASK	0x1f
 
+/* poor man's completion - we want to use wait_event_freezable() on it */
+struct dmatest_done {
+	bool			done;
+	wait_queue_head_t	*wait;
+};
+
 struct dmatest_thread {
 	struct list_head	node;
 	struct dmatest_info	*info;
@@ -166,6 +172,8 @@ struct dmatest_thread {
 	u8			**srcs;
 	u8			**dsts;
 	enum dma_transaction_type type;
+	wait_queue_head_t done_wait;
+	struct dmatest_done test_done;
 	bool			done;
 };
 
@@ -326,18 +334,25 @@ static unsigned int dmatest_verify(u8 **bufs, unsigned int start,
 	return error_count;
 }
 
-/* poor man's completion - we want to use wait_event_freezable() on it */
-struct dmatest_done {
-	bool			done;
-	wait_queue_head_t	*wait;
-};
 
 static void dmatest_callback(void *arg)
 {
 	struct dmatest_done *done = arg;
-
-	done->done = true;
-	wake_up_all(done->wait);
+	struct dmatest_thread *thread =
+		container_of(arg, struct dmatest_thread, done_wait);
+	if (!thread->done) {
+		done->done = true;
+		wake_up_all(done->wait);
+	} else {
+		/*
+		 * If thread->done, it means that this callback occurred
+		 * after the parent thread has cleaned up. This can
+		 * happen in the case that driver doesn't implement
+		 * the terminate_all() functionality and a dma operation
+		 * did not occur within the timeout period
+		 */
+		WARN(1, "dmatest: Kernel memory may be corrupted!!\n");
+	}
 }
 
 static unsigned int min_odd(unsigned int x, unsigned int y)
@@ -408,9 +423,8 @@ static unsigned long long dmatest_KBs(s64 runtime, unsigned long long len)
  */
 static int dmatest_func(void *data)
 {
-	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_wait);
 	struct dmatest_thread	*thread = data;
-	struct dmatest_done	done = { .wait = &done_wait };
+	struct dmatest_done	*done = &thread->test_done;
 	struct dmatest_info	*info;
 	struct dmatest_params	*params;
 	struct dma_chan		*chan;
@@ -637,9 +651,9 @@ static int dmatest_func(void *data)
 			continue;
 		}
 
-		done.done = false;
+		done->done = false;
 		tx->callback = dmatest_callback;
-		tx->callback_param = &done;
+		tx->callback_param = done;
 		cookie = tx->tx_submit(tx);
 
 		if (dma_submit_error(cookie)) {
@@ -652,21 +666,12 @@ static int dmatest_func(void *data)
 		}
 		dma_async_issue_pending(chan);
 
-		wait_event_freezable_timeout(done_wait, done.done,
+		wait_event_freezable_timeout(thread->done_wait, done->done,
 					     msecs_to_jiffies(params->timeout));
 
 		status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
 
-		if (!done.done) {
-			/*
-			 * We're leaving the timed out dma operation with
-			 * dangling pointer to done_wait.  To make this
-			 * correct, we'll need to allocate wait_done for
-			 * each test iteration and perform "who's gonna
-			 * free it this time?" dancing.  For now, just
-			 * leave it dangling.
-			 */
-			WARN(1, "dmatest: Kernel stack may be corrupted!!\n");
+		if (!done->done) {
 			dmaengine_unmap_put(um);
 			result("test timed out", total_tests, src_off, dst_off,
 			       len, 0);
@@ -747,7 +752,7 @@ static int dmatest_func(void *data)
 		dmatest_KBs(runtime, total_len), ret);
 
 	/* terminate all transfers on specified channels */
-	if (ret)
+	if (ret || failed_tests)
 		dmaengine_terminate_all(chan);
 
 	thread->done = true;
@@ -807,6 +812,8 @@ static int dmatest_add_threads(struct dmatest_info *info,
 		thread->info = info;
 		thread->chan = dtc->chan;
 		thread->type = type;
+		thread->test_done.wait = &thread->done_wait;
+		init_waitqueue_head(&thread->done_wait);
 		smp_wmb();
 		thread->task = kthread_create(dmatest_func, thread, "%s-%s%u",
 				dma_chan_name(chan), op, i);
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index 9f3dbc8..fb2e747 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -1694,7 +1694,6 @@ static bool _chan_ns(const struct pl330_dmac *pl330, int i)
 static struct pl330_thread *pl330_request_channel(struct pl330_dmac *pl330)
 {
 	struct pl330_thread *thrd = NULL;
-	unsigned long flags;
 	int chans, i;
 
 	if (pl330->state == DYING)
@@ -1702,8 +1701,6 @@ static struct pl330_thread *pl330_request_channel(struct pl330_dmac *pl330)
 
 	chans = pl330->pcfg.num_chan;
 
-	spin_lock_irqsave(&pl330->lock, flags);
-
 	for (i = 0; i < chans; i++) {
 		thrd = &pl330->channels[i];
 		if ((thrd->free) && (!_manager_ns(thrd) ||
@@ -1721,8 +1718,6 @@ static struct pl330_thread *pl330_request_channel(struct pl330_dmac *pl330)
 		thrd = NULL;
 	}
 
-	spin_unlock_irqrestore(&pl330->lock, flags);
-
 	return thrd;
 }
 
@@ -1740,7 +1735,6 @@ static inline void _free_event(struct pl330_thread *thrd, int ev)
 static void pl330_release_channel(struct pl330_thread *thrd)
 {
 	struct pl330_dmac *pl330;
-	unsigned long flags;
 
 	if (!thrd || thrd->free)
 		return;
@@ -1752,10 +1746,8 @@ static void pl330_release_channel(struct pl330_thread *thrd)
 
 	pl330 = thrd->dmac;
 
-	spin_lock_irqsave(&pl330->lock, flags);
 	_free_event(thrd, thrd->ev);
 	thrd->free = true;
-	spin_unlock_irqrestore(&pl330->lock, flags);
 }
 
 /* Initialize the structure for PL330 configuration, that can be used
@@ -2120,20 +2112,20 @@ static int pl330_alloc_chan_resources(struct dma_chan *chan)
 	struct pl330_dmac *pl330 = pch->dmac;
 	unsigned long flags;
 
-	spin_lock_irqsave(&pch->lock, flags);
+	spin_lock_irqsave(&pl330->lock, flags);
 
 	dma_cookie_init(chan);
 	pch->cyclic = false;
 
 	pch->thread = pl330_request_channel(pl330);
 	if (!pch->thread) {
-		spin_unlock_irqrestore(&pch->lock, flags);
+		spin_unlock_irqrestore(&pl330->lock, flags);
 		return -ENOMEM;
 	}
 
 	tasklet_init(&pch->task, pl330_tasklet, (unsigned long) pch);
 
-	spin_unlock_irqrestore(&pch->lock, flags);
+	spin_unlock_irqrestore(&pl330->lock, flags);
 
 	return 1;
 }
@@ -2236,12 +2228,13 @@ static int pl330_pause(struct dma_chan *chan)
 static void pl330_free_chan_resources(struct dma_chan *chan)
 {
 	struct dma_pl330_chan *pch = to_pchan(chan);
+	struct pl330_dmac *pl330 = pch->dmac;
 	unsigned long flags;
 
 	tasklet_kill(&pch->task);
 
 	pm_runtime_get_sync(pch->dmac->ddma.dev);
-	spin_lock_irqsave(&pch->lock, flags);
+	spin_lock_irqsave(&pl330->lock, flags);
 
 	pl330_release_channel(pch->thread);
 	pch->thread = NULL;
@@ -2249,7 +2242,7 @@ static void pl330_free_chan_resources(struct dma_chan *chan)
 	if (pch->cyclic)
 		list_splice_tail_init(&pch->work_list, &pch->dmac->desc_pool);
 
-	spin_unlock_irqrestore(&pch->lock, flags);
+	spin_unlock_irqrestore(&pl330->lock, flags);
 	pm_runtime_mark_last_busy(pch->dmac->ddma.dev);
 	pm_runtime_put_autosuspend(pch->dmac->ddma.dev);
 }
diff --git a/drivers/dma/qcom/gpi.c b/drivers/dma/qcom/gpi.c
index da0e81d..065b765 100644
--- a/drivers/dma/qcom/gpi.c
+++ b/drivers/dma/qcom/gpi.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -56,24 +56,6 @@
 	} while (0)
 
 /* gpii specific logging macros */
-#define GPII_REG(gpii, ch, fmt, ...) do { \
-	if (gpii->klog_lvl >= LOG_LVL_REG_ACCESS) \
-		pr_info("%s:%u:%s: " fmt, gpii->label, \
-			ch, __func__, ##__VA_ARGS__); \
-	if (gpii->ilctxt && gpii->ipc_log_lvl >= LOG_LVL_REG_ACCESS) \
-		ipc_log_string(gpii->ilctxt, \
-			       "ch:%u %s: " fmt, ch, \
-			       __func__, ##__VA_ARGS__); \
-	} while (0)
-#define GPII_VERB(gpii, ch, fmt, ...) do { \
-	if (gpii->klog_lvl >= LOG_LVL_VERBOSE) \
-		pr_info("%s:%u:%s: " fmt, gpii->label, \
-			ch, __func__, ##__VA_ARGS__); \
-	if (gpii->ilctxt && gpii->ipc_log_lvl >= LOG_LVL_VERBOSE) \
-		ipc_log_string(gpii->ilctxt, \
-			       "ch:%u %s: " fmt, ch, \
-			       __func__, ##__VA_ARGS__); \
-	} while (0)
 #define GPII_INFO(gpii, ch, fmt, ...) do { \
 	if (gpii->klog_lvl >= LOG_LVL_INFO) \
 		pr_info("%s:%u:%s: " fmt, gpii->label, ch, \
@@ -123,11 +105,33 @@ enum EV_PRIORITY {
 #define IPC_LOG_PAGES (40)
 #define GPI_DBG_LOG_SIZE (SZ_1K) /* size must be power of 2 */
 #define CMD_TIMEOUT_MS (1000)
+#define GPII_REG(gpii, ch, fmt, ...) do { \
+	if (gpii->klog_lvl >= LOG_LVL_REG_ACCESS) \
+		pr_info("%s:%u:%s: " fmt, gpii->label, \
+			ch, __func__, ##__VA_ARGS__); \
+	if (gpii->ilctxt && gpii->ipc_log_lvl >= LOG_LVL_REG_ACCESS) \
+		ipc_log_string(gpii->ilctxt, \
+			       "ch:%u %s: " fmt, ch, \
+			       __func__, ##__VA_ARGS__); \
+	} while (0)
+#define GPII_VERB(gpii, ch, fmt, ...) do { \
+	if (gpii->klog_lvl >= LOG_LVL_VERBOSE) \
+		pr_info("%s:%u:%s: " fmt, gpii->label, \
+			ch, __func__, ##__VA_ARGS__); \
+	if (gpii->ilctxt && gpii->ipc_log_lvl >= LOG_LVL_VERBOSE) \
+		ipc_log_string(gpii->ilctxt, \
+			       "ch:%u %s: " fmt, ch, \
+			       __func__, ##__VA_ARGS__); \
+	} while (0)
+
 #else
 #define IPC_LOG_PAGES (2)
 #define GPI_DBG_LOG_SIZE (0) /* size must be power of 2 */
 #define DEFAULT_IPC_LOG_LVL (LOG_LVL_ERROR)
 #define CMD_TIMEOUT_MS (250)
+/* verbose and register logging are disabled if !debug */
+#define GPII_REG(gpii, ch, fmt, ...)
+#define GPII_VERB(gpii, ch, fmt, ...)
 #endif
 
 #define GPI_LABEL_SIZE (256)
@@ -468,7 +472,6 @@ struct gpii_chan {
 	u32 req_tres; /* # of tre's client requested */
 	u32 dir;
 	struct gpi_ring ch_ring;
-	struct gpi_ring sg_ring; /* points to client scatterlist */
 	struct gpi_client_info client_info;
 };
 
@@ -510,7 +513,6 @@ struct gpii {
 struct gpi_desc {
 	struct virt_dma_desc vd;
 	void *wp; /* points to TRE last queued during issue_pending */
-	struct sg_tre *sg_tre; /* points to last scatterlist */
 	void *db; /* DB register to program */
 	struct gpii_chan *gpii_chan;
 };
@@ -936,11 +938,8 @@ static void gpi_generate_cb_event(struct gpii_chan *gpii_chan,
 /* process transfer completion interrupt */
 static void gpi_process_ieob(struct gpii *gpii)
 {
-	u32 ieob_irq;
 
-	ieob_irq = gpi_read_reg(gpii, gpii->ieob_src_reg);
-	gpi_write_reg(gpii, gpii->ieob_clr_reg, ieob_irq);
-	GPII_VERB(gpii, GPI_DBG_COMMON, "IEOB_IRQ:0x%x\n", ieob_irq);
+	gpi_write_reg(gpii, gpii->ieob_clr_reg, BIT(0));
 
 	/* process events based on priority */
 	if (likely(gpii->ev_priority >= EV_PRIORITY_TASKLET)) {
@@ -1106,6 +1105,14 @@ static irqreturn_t gpi_handle_irq(int irq, void *data)
 			type &= ~(GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_GLOB);
 		}
 
+		/* transfer complete interrupt */
+		if (type & GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_IEOB) {
+			GPII_VERB(gpii, GPI_DBG_COMMON,
+				  "process IEOB interrupts\n");
+			gpi_process_ieob(gpii);
+			type &= ~GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_IEOB;
+		}
+
 		/* event control irq */
 		if (type & GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_EV_CTRL) {
 			u32 ev_state;
@@ -1148,14 +1155,6 @@ static irqreturn_t gpi_handle_irq(int irq, void *data)
 			type &= ~(GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_CH_CTRL);
 		}
 
-		/* transfer complete interrupt */
-		if (type & GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_IEOB) {
-			GPII_VERB(gpii, GPI_DBG_COMMON,
-				  "process IEOB interrupts\n");
-			gpi_process_ieob(gpii);
-			type &= ~GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_IEOB;
-		}
-
 		if (type) {
 			GPII_CRITIC(gpii, GPI_DBG_COMMON,
 				 "Unhandled interrupt status:0x%x\n", type);
@@ -1176,11 +1175,10 @@ static irqreturn_t gpi_handle_irq(int irq, void *data)
 static void gpi_process_qup_notif_event(struct gpii_chan *gpii_chan,
 					struct qup_notif_event *notif_event)
 {
-	struct gpii *gpii = gpii_chan->gpii;
 	struct gpi_client_info *client_info = &gpii_chan->client_info;
 	struct msm_gpi_cb msm_gpi_cb;
 
-	GPII_VERB(gpii, gpii_chan->chid,
+	GPII_VERB(gpii_chan->gpii, gpii_chan->chid,
 		  "status:0x%x time:0x%x count:0x%x\n",
 		  notif_event->status, notif_event->time, notif_event->count);
 
@@ -1188,7 +1186,7 @@ static void gpi_process_qup_notif_event(struct gpii_chan *gpii_chan,
 	msm_gpi_cb.status = notif_event->status;
 	msm_gpi_cb.timestamp = notif_event->time;
 	msm_gpi_cb.count = notif_event->count;
-	GPII_VERB(gpii, gpii_chan->chid, "sending CB event:%s\n",
+	GPII_VERB(gpii_chan->gpii, gpii_chan->chid, "sending CB event:%s\n",
 		  TO_GPI_CB_EVENT_STR(msm_gpi_cb.cb_event));
 	client_info->callback(&gpii_chan->vc.chan, &msm_gpi_cb,
 			      client_info->cb_param);
@@ -1200,11 +1198,8 @@ static void gpi_process_imed_data_event(struct gpii_chan *gpii_chan,
 {
 	struct gpii *gpii = gpii_chan->gpii;
 	struct gpi_ring *ch_ring = &gpii_chan->ch_ring;
-	struct gpi_ring *sg_ring = &gpii_chan->sg_ring;
 	struct virt_dma_desc *vd;
 	struct gpi_desc *gpi_desc;
-	struct msm_gpi_tre *client_tre;
-	void *sg_tre;
 	void *tre = ch_ring->base +
 		(ch_ring->el_size * imed_event->tre_index);
 	struct msm_gpi_dma_async_tx_cb_param *tx_cb_param;
@@ -1262,8 +1257,6 @@ static void gpi_process_imed_data_event(struct gpii_chan *gpii_chan,
 	list_del(&vd->node);
 	spin_unlock_irqrestore(&gpii_chan->vc.lock, flags);
 
-	sg_tre = gpi_desc->sg_tre;
-	client_tre = ((struct sg_tre *)sg_tre)->ptr;
 
 	/*
 	 * RP pointed by Event is to last TRE processed,
@@ -1273,38 +1266,26 @@ static void gpi_process_imed_data_event(struct gpii_chan *gpii_chan,
 	if (tre >= (ch_ring->base + ch_ring->len))
 		tre = ch_ring->base;
 	ch_ring->rp = tre;
-	sg_tre += sg_ring->el_size;
-	if (sg_tre >= (sg_ring->base + sg_ring->len))
-		sg_tre = sg_ring->base;
-	sg_ring->rp = sg_tre;
 
 	/* make sure rp updates are immediately visible to all cores */
 	smp_wmb();
 
-	/* update Immediate data from Event back in to TRE if it's RX channel */
-	if (gpii_chan->dir == GPI_CHTYPE_DIR_IN) {
-		client_tre->dword[0] =
-			((struct msm_gpi_tre *)imed_event)->dword[0];
-		client_tre->dword[1] =
-			((struct msm_gpi_tre *)imed_event)->dword[1];
-		client_tre->dword[2] = MSM_GPI_DMA_IMMEDIATE_TRE_DWORD2(
-						      imed_event->length);
-	}
-
 	tx_cb_param = vd->tx.callback_param;
-	if (tx_cb_param) {
+	if (vd->tx.callback && tx_cb_param) {
+		struct msm_gpi_tre *imed_tre = &tx_cb_param->imed_tre;
+
 		GPII_VERB(gpii, gpii_chan->chid,
 			  "cb_length:%u compl_code:0x%x status:0x%x\n",
 			  imed_event->length, imed_event->code,
 			  imed_event->status);
+		/* Update immediate data if any from event */
+		*imed_tre = *((struct msm_gpi_tre *)imed_event);
 		tx_cb_param->length = imed_event->length;
 		tx_cb_param->completion_code = imed_event->code;
 		tx_cb_param->status = imed_event->status;
+		vd->tx.callback(tx_cb_param);
 	}
-
-	spin_lock_irqsave(&gpii_chan->vc.lock, flags);
-	vchan_cookie_complete(vd);
-	spin_unlock_irqrestore(&gpii_chan->vc.lock, flags);
+	kfree(gpi_desc);
 }
 
 /* processing transfer completion events */
@@ -1313,13 +1294,10 @@ static void gpi_process_xfer_compl_event(struct gpii_chan *gpii_chan,
 {
 	struct gpii *gpii = gpii_chan->gpii;
 	struct gpi_ring *ch_ring = &gpii_chan->ch_ring;
-	struct gpi_ring *sg_ring = &gpii_chan->sg_ring;
 	void *ev_rp = to_virtual(ch_ring, compl_event->ptr);
-	struct msm_gpi_tre *client_tre;
 	struct virt_dma_desc *vd;
 	struct msm_gpi_dma_async_tx_cb_param *tx_cb_param;
 	struct gpi_desc *gpi_desc;
-	void *sg_tre = NULL;
 	unsigned long flags;
 
 	/* only process events on active channel */
@@ -1366,8 +1344,6 @@ static void gpi_process_xfer_compl_event(struct gpii_chan *gpii_chan,
 	list_del(&vd->node);
 	spin_unlock_irqrestore(&gpii_chan->vc.lock, flags);
 
-	sg_tre = gpi_desc->sg_tre;
-	client_tre = ((struct sg_tre *)sg_tre)->ptr;
 
 	/*
 	 * RP pointed by Event is to last TRE processed,
@@ -1377,16 +1353,12 @@ static void gpi_process_xfer_compl_event(struct gpii_chan *gpii_chan,
 	if (ev_rp >= (ch_ring->base + ch_ring->len))
 		ev_rp = ch_ring->base;
 	ch_ring->rp = ev_rp;
-	sg_tre += sg_ring->el_size;
-	if (sg_tre >= (sg_ring->base + sg_ring->len))
-		sg_tre = sg_ring->base;
-	sg_ring->rp = sg_tre;
 
 	/* update must be visible to other cores */
 	smp_wmb();
 
 	tx_cb_param = vd->tx.callback_param;
-	if (tx_cb_param) {
+	if (vd->tx.callback && tx_cb_param) {
 		GPII_VERB(gpii, gpii_chan->chid,
 			  "cb_length:%u compl_code:0x%x status:0x%x\n",
 			  compl_event->length, compl_event->code,
@@ -1394,37 +1366,36 @@ static void gpi_process_xfer_compl_event(struct gpii_chan *gpii_chan,
 		tx_cb_param->length = compl_event->length;
 		tx_cb_param->completion_code = compl_event->code;
 		tx_cb_param->status = compl_event->status;
+		vd->tx.callback(tx_cb_param);
 	}
-
-	spin_lock_irqsave(&gpii_chan->vc.lock, flags);
-	vchan_cookie_complete(vd);
-	spin_unlock_irqrestore(&gpii_chan->vc.lock, flags);
+	kfree(gpi_desc);
 }
 
 /* process all events */
 static void gpi_process_events(struct gpii *gpii)
 {
 	struct gpi_ring *ev_ring = &gpii->ev_ring;
-	u32 cntxt_rp, local_rp;
+	phys_addr_t cntxt_rp, local_rp;
+	void *rp;
 	union gpi_event *gpi_event;
 	struct gpii_chan *gpii_chan;
 	u32 chid, type;
-	u32 ieob_irq;
 
 	cntxt_rp = gpi_read_reg(gpii, gpii->ev_ring_rp_lsb_reg);
-	local_rp = (u32)to_physical(ev_ring, (void *)ev_ring->rp);
+	rp = to_virtual(ev_ring, cntxt_rp);
+	local_rp = to_physical(ev_ring, ev_ring->rp);
 
-	GPII_VERB(gpii, GPI_DBG_COMMON, "cntxt_rp: 0x08%x local_rp:0x08%x\n",
-		  cntxt_rp, local_rp);
+	GPII_VERB(gpii, GPI_DBG_COMMON, "cntxt_rp:%pa local_rp:%pa\n",
+		  &cntxt_rp, &local_rp);
 
 	do {
-		while (local_rp != cntxt_rp) {
+		while (rp != ev_ring->rp) {
 			gpi_event = ev_ring->rp;
 			chid = gpi_event->xfer_compl_event.chid;
 			type = gpi_event->xfer_compl_event.type;
 			GPII_VERB(gpii, GPI_DBG_COMMON,
-				  "rp:0x%08x chid:%u type:0x%x %08x %08x %08x %08x\n",
-				  local_rp, chid, type,
+				  "chid:%u type:0x%x %08x %08x %08x %08x\n",
+				  chid, type,
 				  gpi_event->gpi_ere.dword[0],
 				  gpi_event->gpi_ere.dword[1],
 				  gpi_event->gpi_ere.dword[2],
@@ -1456,22 +1427,18 @@ static void gpi_process_events(struct gpii *gpii)
 					  type);
 			}
 			gpi_ring_recycle_ev_element(ev_ring);
-			local_rp = (u32)to_physical(ev_ring,
-						    (void *)ev_ring->rp);
 		}
 		gpi_write_ev_db(gpii, ev_ring, ev_ring->wp);
 
 		/* clear pending IEOB events */
-		ieob_irq = gpi_read_reg(gpii, gpii->ieob_src_reg);
-		gpi_write_reg(gpii, gpii->ieob_clr_reg, ieob_irq);
+		gpi_write_reg(gpii, gpii->ieob_clr_reg, BIT(0));
 
 		cntxt_rp = gpi_read_reg(gpii, gpii->ev_ring_rp_lsb_reg);
-		local_rp = (u32)to_physical(ev_ring, (void *)ev_ring->rp);
+		rp = to_virtual(ev_ring, cntxt_rp);
 
-	} while (cntxt_rp != local_rp);
+	} while (rp != ev_ring->rp);
 
-	GPII_VERB(gpii, GPI_DBG_COMMON, "exit: c_rp:0x%x l_rp:0x%x\n", cntxt_rp,
-		  local_rp);
+	GPII_VERB(gpii, GPI_DBG_COMMON, "exit: c_rp:%pa\n", &cntxt_rp);
 }
 
 /* processing events using tasklet */
@@ -1532,7 +1499,6 @@ static int gpi_reset_chan(struct gpii_chan *gpii_chan, enum gpi_cmd gpi_cmd)
 {
 	struct gpii *gpii = gpii_chan->gpii;
 	struct gpi_ring *ch_ring = &gpii_chan->ch_ring;
-	struct gpi_ring *sg_ring = &gpii_chan->sg_ring;
 	unsigned long flags;
 	LIST_HEAD(list);
 	int ret;
@@ -1549,8 +1515,6 @@ static int gpi_reset_chan(struct gpii_chan *gpii_chan, enum gpi_cmd gpi_cmd)
 	/* initialize the local ring ptrs */
 	ch_ring->rp = ch_ring->base;
 	ch_ring->wp = ch_ring->base;
-	sg_ring->rp = sg_ring->base;
-	sg_ring->wp = sg_ring->base;
 
 	/* visible to other cores */
 	smp_wmb();
@@ -1840,11 +1804,8 @@ static void gpi_ring_recycle_ev_element(struct gpi_ring *ring)
 static void gpi_free_ring(struct gpi_ring *ring,
 			  struct gpii *gpii)
 {
-	if (ring->dma_handle)
-		dma_free_coherent(gpii->gpi_dev->dev, ring->alloc_size,
-				  ring->pre_aligned, ring->dma_handle);
-	else
-		vfree(ring->pre_aligned);
+	dma_free_coherent(gpii->gpi_dev->dev, ring->alloc_size,
+			  ring->pre_aligned, ring->dma_handle);
 	memset(ring, 0, sizeof(*ring));
 }
 
@@ -1852,51 +1813,34 @@ static void gpi_free_ring(struct gpi_ring *ring,
 static int gpi_alloc_ring(struct gpi_ring *ring,
 			  u32 elements,
 			  u32 el_size,
-			  struct gpii *gpii,
-			  bool alloc_coherent)
+			  struct gpii *gpii)
 {
 	u64 len = elements * el_size;
 	int bit;
 
-	if (alloc_coherent) {
-		/* ring len must be power of 2 */
-		bit = find_last_bit((unsigned long *)&len, 32);
-		if (((1 << bit) - 1) & len)
-			bit++;
-		len = 1 << bit;
-		ring->alloc_size = (len + (len - 1));
-		GPII_INFO(gpii, GPI_DBG_COMMON,
-			  "#el:%u el_size:%u len:%u actual_len:%llu alloc_size:%lu\n",
-			  elements, el_size, (elements * el_size), len,
-			  ring->alloc_size);
-		ring->pre_aligned = dma_alloc_coherent(gpii->gpi_dev->dev,
-						       ring->alloc_size,
-						       &ring->dma_handle,
-						       GFP_KERNEL);
-		if (!ring->pre_aligned) {
-			GPII_CRITIC(gpii, GPI_DBG_COMMON,
-				    "could not alloc size:%lu mem for ring\n",
-				    ring->alloc_size);
-			return -ENOMEM;
-		}
-
-		/* align the physical mem */
-		ring->phys_addr = (ring->dma_handle + (len - 1)) & ~(len - 1);
-		ring->base = ring->pre_aligned +
-			(ring->phys_addr - ring->dma_handle);
-	} else {
-		ring->pre_aligned = vmalloc(len);
-		if (!ring->pre_aligned) {
-			GPII_CRITIC(gpii, GPI_DBG_COMMON,
-				    "could not allocsize:%llu mem for ring\n",
-				    len);
-			return -ENOMEM;
-		}
-		ring->phys_addr = 0;
-		ring->dma_handle = 0;
-		ring->base = ring->pre_aligned;
+	/* ring len must be power of 2 */
+	bit = find_last_bit((unsigned long *)&len, 32);
+	if (((1 << bit) - 1) & len)
+		bit++;
+	len = 1 << bit;
+	ring->alloc_size = (len + (len - 1));
+	GPII_INFO(gpii, GPI_DBG_COMMON,
+		  "#el:%u el_size:%u len:%u actual_len:%llu alloc_size:%lu\n",
+		  elements, el_size, (elements * el_size), len,
+		  ring->alloc_size);
+	ring->pre_aligned = dma_alloc_coherent(gpii->gpi_dev->dev,
+					       ring->alloc_size,
+					       &ring->dma_handle, GFP_KERNEL);
+	if (!ring->pre_aligned) {
+		GPII_CRITIC(gpii, GPI_DBG_COMMON,
+			    "could not alloc size:%lu mem for ring\n",
+			    ring->alloc_size);
+		return -ENOMEM;
 	}
 
+	/* align the physical mem */
+	ring->phys_addr = (ring->dma_handle + (len - 1)) & ~(len - 1);
+	ring->base = ring->pre_aligned + (ring->phys_addr - ring->dma_handle);
 	ring->rp = ring->base;
 	ring->wp = ring->base;
 	ring->len = len;
@@ -1920,8 +1864,7 @@ static int gpi_alloc_ring(struct gpi_ring *ring,
 static void gpi_queue_xfer(struct gpii *gpii,
 			   struct gpii_chan *gpii_chan,
 			   struct msm_gpi_tre *gpi_tre,
-			   void **wp,
-			   struct sg_tre **sg_tre)
+			   void **wp)
 {
 	struct msm_gpi_tre *ch_tre;
 	int ret;
@@ -1933,18 +1876,9 @@ static void gpi_queue_xfer(struct gpii *gpii,
 			    "Error adding ring element to xfer ring\n");
 		return;
 	}
-	/* get next sg tre location we can use */
-	ret = gpi_ring_add_element(&gpii_chan->sg_ring, (void **)sg_tre);
-	if (unlikely(ret)) {
-		GPII_CRITIC(gpii, gpii_chan->chid,
-			    "Error adding ring element to sg ring\n");
-		return;
-	}
 
 	/* copy the tre info */
 	memcpy(ch_tre, gpi_tre, sizeof(*ch_tre));
-	(*sg_tre)->ptr = gpi_tre;
-	(*sg_tre)->wp = ch_tre;
 	*wp = ch_tre;
 }
 
@@ -2122,14 +2056,12 @@ struct dma_async_tx_descriptor *gpi_prep_slave_sg(struct dma_chan *chan,
 {
 	struct gpii_chan *gpii_chan = to_gpii_chan(chan);
 	struct gpii *gpii = gpii_chan->gpii;
-	u32 nr, sg_nr;
+	u32 nr;
 	u32 nr_req = 0;
 	int i, j;
 	struct scatterlist *sg;
 	struct gpi_ring *ch_ring = &gpii_chan->ch_ring;
-	struct gpi_ring *sg_ring = &gpii_chan->sg_ring;
 	void *tre, *wp = NULL;
-	struct sg_tre *sg_tre = NULL;
 	const gfp_t gfp = GFP_ATOMIC;
 	struct gpi_desc *gpi_desc;
 
@@ -2143,20 +2075,17 @@ struct dma_async_tx_descriptor *gpi_prep_slave_sg(struct dma_chan *chan,
 
 	/* calculate # of elements required & available */
 	nr = gpi_ring_num_elements_avail(ch_ring);
-	sg_nr = gpi_ring_num_elements_avail(sg_ring);
 	for_each_sg(sgl, sg, sg_len, i) {
 		GPII_VERB(gpii, gpii_chan->chid,
 			  "%d of %u len:%u\n", i, sg_len, sg->length);
 		nr_req += (sg->length / ch_ring->el_size);
 	}
-	GPII_VERB(gpii, gpii_chan->chid,
-		  "nr_elements_avail:%u sg_avail:%u required:%u\n",
-		  nr, sg_nr, nr_req);
+	GPII_VERB(gpii, gpii_chan->chid, "el avail:%u req:%u\n", nr, nr_req);
 
-	if (nr < nr_req || sg_nr < nr_req) {
+	if (nr < nr_req) {
 		GPII_ERR(gpii, gpii_chan->chid,
-			 "not enough space in ring, avail:%u,%u required:%u\n",
-			 nr, sg_nr, nr_req);
+			 "not enough space in ring, avail:%u required:%u\n",
+			 nr, nr_req);
 		return NULL;
 	}
 
@@ -2171,12 +2100,11 @@ struct dma_async_tx_descriptor *gpi_prep_slave_sg(struct dma_chan *chan,
 	for_each_sg(sgl, sg, sg_len, i)
 		for (j = 0, tre = sg_virt(sg); j < sg->length;
 		     j += ch_ring->el_size, tre += ch_ring->el_size)
-			gpi_queue_xfer(gpii, gpii_chan, tre, &wp, &sg_tre);
+			gpi_queue_xfer(gpii, gpii_chan, tre, &wp);
 
 	/* set up the descriptor */
 	gpi_desc->db = ch_ring->wp;
 	gpi_desc->wp = wp;
-	gpi_desc->sg_tre = sg_tre;
 	gpi_desc->gpii_chan = gpii_chan;
 	GPII_VERB(gpii, gpii_chan->chid, "exit wp:0x%0llx rp:0x%0llx\n",
 		  to_physical(ch_ring, ch_ring->wp),
@@ -2271,7 +2199,7 @@ static int gpi_config(struct dma_chan *chan,
 		elements = max(gpii->gpii_chan[0].req_tres,
 			       gpii->gpii_chan[1].req_tres);
 		ret = gpi_alloc_ring(&gpii->ev_ring, elements << ev_factor,
-				     sizeof(union gpi_event), gpii, true);
+				     sizeof(union gpi_event), gpii);
 		if (ret) {
 			GPII_ERR(gpii, gpii_chan->chid,
 				 "error allocating mem for ev ring\n");
@@ -2396,7 +2324,6 @@ static void gpi_free_chan_resources(struct dma_chan *chan)
 
 	/* free all allocated memory */
 	gpi_free_ring(&gpii_chan->ch_ring, gpii);
-	gpi_free_ring(&gpii_chan->sg_ring, gpii);
 	vchan_free_chan_resources(&gpii_chan->vc);
 
 	write_lock_irq(&gpii->pm_lock);
@@ -2451,26 +2378,15 @@ static int gpi_alloc_chan_resources(struct dma_chan *chan)
 
 	/* allocate memory for transfer ring */
 	ret = gpi_alloc_ring(&gpii_chan->ch_ring, gpii_chan->req_tres,
-			     sizeof(struct msm_gpi_tre), gpii, true);
+			     sizeof(struct msm_gpi_tre), gpii);
 	if (ret) {
 		GPII_ERR(gpii, gpii_chan->chid,
 			 "error allocating xfer ring, ret:%d\n", ret);
 		goto xfer_alloc_err;
 	}
-
-	ret = gpi_alloc_ring(&gpii_chan->sg_ring, gpii_chan->ch_ring.elements,
-			     sizeof(struct sg_tre), gpii, false);
-	if (ret) {
-		GPII_ERR(gpii, gpii_chan->chid,
-			 "error allocating sg ring, ret:%d\n", ret);
-		goto sg_alloc_error;
-	}
 	mutex_unlock(&gpii->ctrl_lock);
 
 	return 0;
-
-sg_alloc_error:
-	gpi_free_ring(&gpii_chan->ch_ring, gpii);
 xfer_alloc_err:
 	mutex_unlock(&gpii->ctrl_lock);
 
diff --git a/drivers/dma/stm32-dma.c b/drivers/dma/stm32-dma.c
index 307547f..ae3f60b 100644
--- a/drivers/dma/stm32-dma.c
+++ b/drivers/dma/stm32-dma.c
@@ -884,7 +884,7 @@ static enum dma_status stm32_dma_tx_status(struct dma_chan *c,
 	struct virt_dma_desc *vdesc;
 	enum dma_status status;
 	unsigned long flags;
-	u32 residue;
+	u32 residue = 0;
 
 	status = dma_cookie_status(c, cookie, state);
 	if ((status == DMA_COMPLETE) || (!state))
@@ -892,16 +892,12 @@ static enum dma_status stm32_dma_tx_status(struct dma_chan *c,
 
 	spin_lock_irqsave(&chan->vchan.lock, flags);
 	vdesc = vchan_find_desc(&chan->vchan, cookie);
-	if (cookie == chan->desc->vdesc.tx.cookie) {
+	if (chan->desc && cookie == chan->desc->vdesc.tx.cookie)
 		residue = stm32_dma_desc_residue(chan, chan->desc,
 						 chan->next_sg);
-	} else if (vdesc) {
+	else if (vdesc)
 		residue = stm32_dma_desc_residue(chan,
 						 to_stm32_dma_desc(vdesc), 0);
-	} else {
-		residue = 0;
-	}
-
 	dma_set_residue(state, residue);
 
 	spin_unlock_irqrestore(&chan->vchan.lock, flags);
@@ -976,21 +972,18 @@ static struct dma_chan *stm32_dma_of_xlate(struct of_phandle_args *dma_spec,
 	struct stm32_dma_chan *chan;
 	struct dma_chan *c;
 
-	if (dma_spec->args_count < 3)
+	if (dma_spec->args_count < 4)
 		return NULL;
 
 	cfg.channel_id = dma_spec->args[0];
 	cfg.request_line = dma_spec->args[1];
 	cfg.stream_config = dma_spec->args[2];
-	cfg.threshold = 0;
+	cfg.threshold = dma_spec->args[3];
 
 	if ((cfg.channel_id >= STM32_DMA_MAX_CHANNELS) || (cfg.request_line >=
 				STM32_DMA_MAX_REQUEST_ID))
 		return NULL;
 
-	if (dma_spec->args_count > 3)
-		cfg.threshold = dma_spec->args[3];
-
 	chan = &dmadev->chan[cfg.channel_id];
 
 	c = dma_get_slave_channel(&chan->vchan.chan);
diff --git a/drivers/dma/ti-dma-crossbar.c b/drivers/dma/ti-dma-crossbar.c
index 88a00d0..43e88d8 100644
--- a/drivers/dma/ti-dma-crossbar.c
+++ b/drivers/dma/ti-dma-crossbar.c
@@ -49,12 +49,12 @@ struct ti_am335x_xbar_data {
 
 struct ti_am335x_xbar_map {
 	u16 dma_line;
-	u16 mux_val;
+	u8 mux_val;
 };
 
-static inline void ti_am335x_xbar_write(void __iomem *iomem, int event, u16 val)
+static inline void ti_am335x_xbar_write(void __iomem *iomem, int event, u8 val)
 {
-	writeb_relaxed(val & 0x1f, iomem + event);
+	writeb_relaxed(val, iomem + event);
 }
 
 static void ti_am335x_xbar_free(struct device *dev, void *route_data)
@@ -105,7 +105,7 @@ static void *ti_am335x_xbar_route_allocate(struct of_phandle_args *dma_spec,
 	}
 
 	map->dma_line = (u16)dma_spec->args[0];
-	map->mux_val = (u16)dma_spec->args[2];
+	map->mux_val = (u8)dma_spec->args[2];
 
 	dma_spec->args[2] = 0;
 	dma_spec->args_count = 2;
diff --git a/drivers/dma/zx296702_dma.c b/drivers/dma/zx296702_dma.c
index 245d759..6059d81 100644
--- a/drivers/dma/zx296702_dma.c
+++ b/drivers/dma/zx296702_dma.c
@@ -813,6 +813,7 @@ static int zx_dma_probe(struct platform_device *op)
 	INIT_LIST_HEAD(&d->slave.channels);
 	dma_cap_set(DMA_SLAVE, d->slave.cap_mask);
 	dma_cap_set(DMA_MEMCPY, d->slave.cap_mask);
+	dma_cap_set(DMA_CYCLIC, d->slave.cap_mask);
 	dma_cap_set(DMA_PRIVATE, d->slave.cap_mask);
 	d->slave.dev = &op->dev;
 	d->slave.device_free_chan_resources = zx_dma_free_chan_resources;
diff --git a/drivers/edac/i5000_edac.c b/drivers/edac/i5000_edac.c
index 72e07e3..16e0eb5 100644
--- a/drivers/edac/i5000_edac.c
+++ b/drivers/edac/i5000_edac.c
@@ -227,7 +227,7 @@
 #define			NREC_RDWR(x)		(((x)>>11) & 1)
 #define			NREC_RANK(x)		(((x)>>8) & 0x7)
 #define		NRECMEMB		0xC0
-#define			NREC_CAS(x)		(((x)>>16) & 0xFFFFFF)
+#define			NREC_CAS(x)		(((x)>>16) & 0xFFF)
 #define			NREC_RAS(x)		((x) & 0x7FFF)
 #define		NRECFGLOG		0xC4
 #define		NREEECFBDA		0xC8
@@ -371,7 +371,7 @@ struct i5000_error_info {
 	/* These registers are input ONLY if there was a
 	 * Non-Recoverable Error */
 	u16 nrecmema;		/* Non-Recoverable Mem log A */
-	u16 nrecmemb;		/* Non-Recoverable Mem log B */
+	u32 nrecmemb;		/* Non-Recoverable Mem log B */
 
 };
 
@@ -407,7 +407,7 @@ static void i5000_get_error_info(struct mem_ctl_info *mci,
 				NERR_FAT_FBD, &info->nerr_fat_fbd);
 		pci_read_config_word(pvt->branchmap_werrors,
 				NRECMEMA, &info->nrecmema);
-		pci_read_config_word(pvt->branchmap_werrors,
+		pci_read_config_dword(pvt->branchmap_werrors,
 				NRECMEMB, &info->nrecmemb);
 
 		/* Clear the error bits, by writing them back */
@@ -1293,7 +1293,7 @@ static int i5000_init_csrows(struct mem_ctl_info *mci)
 			dimm->mtype = MEM_FB_DDR2;
 
 			/* ask what device type on this row */
-			if (MTR_DRAM_WIDTH(mtr))
+			if (MTR_DRAM_WIDTH(mtr) == 8)
 				dimm->dtype = DEV_X8;
 			else
 				dimm->dtype = DEV_X4;
diff --git a/drivers/edac/i5400_edac.c b/drivers/edac/i5400_edac.c
index 6ef6ad1..2ea2f32 100644
--- a/drivers/edac/i5400_edac.c
+++ b/drivers/edac/i5400_edac.c
@@ -368,7 +368,7 @@ struct i5400_error_info {
 
 	/* These registers are input ONLY if there was a Non-Rec Error */
 	u16 nrecmema;		/* Non-Recoverable Mem log A */
-	u16 nrecmemb;		/* Non-Recoverable Mem log B */
+	u32 nrecmemb;		/* Non-Recoverable Mem log B */
 
 };
 
@@ -458,7 +458,7 @@ static void i5400_get_error_info(struct mem_ctl_info *mci,
 				NERR_FAT_FBD, &info->nerr_fat_fbd);
 		pci_read_config_word(pvt->branchmap_werrors,
 				NRECMEMA, &info->nrecmema);
-		pci_read_config_word(pvt->branchmap_werrors,
+		pci_read_config_dword(pvt->branchmap_werrors,
 				NRECMEMB, &info->nrecmemb);
 
 		/* Clear the error bits, by writing them back */
@@ -1207,13 +1207,14 @@ static int i5400_init_dimms(struct mem_ctl_info *mci)
 
 			dimm->nr_pages = size_mb << 8;
 			dimm->grain = 8;
-			dimm->dtype = MTR_DRAM_WIDTH(mtr) ? DEV_X8 : DEV_X4;
+			dimm->dtype = MTR_DRAM_WIDTH(mtr) == 8 ?
+				      DEV_X8 : DEV_X4;
 			dimm->mtype = MEM_FB_DDR2;
 			/*
 			 * The eccc mechanism is SDDC (aka SECC), with
 			 * is similar to Chipkill.
 			 */
-			dimm->edac_mode = MTR_DRAM_WIDTH(mtr) ?
+			dimm->edac_mode = MTR_DRAM_WIDTH(mtr) == 8 ?
 					  EDAC_S8ECD8ED : EDAC_S4ECD4ED;
 			ndimms++;
 		}
diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
index 5477522..3c47e63 100644
--- a/drivers/edac/sb_edac.c
+++ b/drivers/edac/sb_edac.c
@@ -2510,6 +2510,7 @@ static int ibridge_mci_bind_devs(struct mem_ctl_info *mci,
 			break;
 		case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA:
 			pvt->pci_ta = pdev;
+			break;
 		case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_RAS:
 			pvt->pci_ras = pdev;
 			break;
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index a4944e2..2f47c5b 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -120,8 +120,7 @@ static ssize_t systab_show(struct kobject *kobj,
 	return str - buf;
 }
 
-static struct kobj_attribute efi_attr_systab =
-			__ATTR(systab, 0400, systab_show, NULL);
+static struct kobj_attribute efi_attr_systab = __ATTR_RO_MODE(systab, 0400);
 
 #define EFI_FIELD(var) efi.var
 
@@ -385,7 +384,6 @@ int __init efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md)
 			return 0;
 		}
 	}
-	pr_err_once("requested map not found.\n");
 	return -ENOENT;
 }
 
diff --git a/drivers/firmware/efi/esrt.c b/drivers/firmware/efi/esrt.c
index 1491407..311c9d0 100644
--- a/drivers/firmware/efi/esrt.c
+++ b/drivers/firmware/efi/esrt.c
@@ -106,7 +106,7 @@ static const struct sysfs_ops esre_attr_ops = {
 };
 
 /* Generic ESRT Entry ("ESRE") support. */
-static ssize_t esre_fw_class_show(struct esre_entry *entry, char *buf)
+static ssize_t fw_class_show(struct esre_entry *entry, char *buf)
 {
 	char *str = buf;
 
@@ -117,18 +117,16 @@ static ssize_t esre_fw_class_show(struct esre_entry *entry, char *buf)
 	return str - buf;
 }
 
-static struct esre_attribute esre_fw_class = __ATTR(fw_class, 0400,
-	esre_fw_class_show, NULL);
+static struct esre_attribute esre_fw_class = __ATTR_RO_MODE(fw_class, 0400);
 
 #define esre_attr_decl(name, size, fmt) \
-static ssize_t esre_##name##_show(struct esre_entry *entry, char *buf) \
+static ssize_t name##_show(struct esre_entry *entry, char *buf) \
 { \
 	return sprintf(buf, fmt "\n", \
 		       le##size##_to_cpu(entry->esre.esre1->name)); \
 } \
 \
-static struct esre_attribute esre_##name = __ATTR(name, 0400, \
-	esre_##name##_show, NULL)
+static struct esre_attribute esre_##name = __ATTR_RO_MODE(name, 0400)
 
 esre_attr_decl(fw_type, 32, "%u");
 esre_attr_decl(fw_version, 32, "%u");
@@ -193,14 +191,13 @@ static int esre_create_sysfs_entry(void *esre, int entry_num)
 
 /* support for displaying ESRT fields at the top level */
 #define esrt_attr_decl(name, size, fmt) \
-static ssize_t esrt_##name##_show(struct kobject *kobj, \
+static ssize_t name##_show(struct kobject *kobj, \
 				  struct kobj_attribute *attr, char *buf)\
 { \
 	return sprintf(buf, fmt "\n", le##size##_to_cpu(esrt->name)); \
 } \
 \
-static struct kobj_attribute esrt_##name = __ATTR(name, 0400, \
-	esrt_##name##_show, NULL)
+static struct kobj_attribute esrt_##name = __ATTR_RO_MODE(name, 0400)
 
 esrt_attr_decl(fw_resource_count, 32, "%u");
 esrt_attr_decl(fw_resource_count_max, 32, "%u");
@@ -254,7 +251,7 @@ void __init efi_esrt_init(void)
 
 	rc = efi_mem_desc_lookup(efi.esrt, &md);
 	if (rc < 0) {
-		pr_err("ESRT header is not in the memory map.\n");
+		pr_warn("ESRT header is not in the memory map.\n");
 		return;
 	}
 
@@ -431,7 +428,7 @@ static int __init esrt_sysfs_init(void)
 err_remove_esrt:
 	kobject_put(esrt_kobj);
 err:
-	kfree(esrt);
+	memunmap(esrt);
 	esrt = NULL;
 	return error;
 }
diff --git a/drivers/firmware/efi/runtime-map.c b/drivers/firmware/efi/runtime-map.c
index 8e64b77..f377609 100644
--- a/drivers/firmware/efi/runtime-map.c
+++ b/drivers/firmware/efi/runtime-map.c
@@ -63,11 +63,11 @@ static ssize_t map_attr_show(struct kobject *kobj, struct attribute *attr,
 	return map_attr->show(entry, buf);
 }
 
-static struct map_attribute map_type_attr = __ATTR_RO(type);
-static struct map_attribute map_phys_addr_attr   = __ATTR_RO(phys_addr);
-static struct map_attribute map_virt_addr_attr  = __ATTR_RO(virt_addr);
-static struct map_attribute map_num_pages_attr  = __ATTR_RO(num_pages);
-static struct map_attribute map_attribute_attr  = __ATTR_RO(attribute);
+static struct map_attribute map_type_attr = __ATTR_RO_MODE(type, 0400);
+static struct map_attribute map_phys_addr_attr = __ATTR_RO_MODE(phys_addr, 0400);
+static struct map_attribute map_virt_addr_attr = __ATTR_RO_MODE(virt_addr, 0400);
+static struct map_attribute map_num_pages_attr = __ATTR_RO_MODE(num_pages, 0400);
+static struct map_attribute map_attribute_attr = __ATTR_RO_MODE(attribute, 0400);
 
 /*
  * These are default attributes that are added for every memmap entry.
diff --git a/drivers/firmware/psci.c b/drivers/firmware/psci.c
index 8fe8805..2b6b112 100644
--- a/drivers/firmware/psci.c
+++ b/drivers/firmware/psci.c
@@ -493,6 +493,8 @@ static void __init psci_init_migrate(void)
 static void __init psci_0_2_set_functions(void)
 {
 	pr_info("Using standard PSCI v0.2 function IDs\n");
+	psci_ops.get_version = psci_get_version;
+
 	psci_function_id[PSCI_FN_CPU_SUSPEND] =
 					PSCI_FN_NATIVE(0_2, CPU_SUSPEND);
 	psci_ops.cpu_suspend = psci_cpu_suspend;
diff --git a/drivers/gpio/gpio-altera.c b/drivers/gpio/gpio-altera.c
index 5bddbd5..3fe6a21 100644
--- a/drivers/gpio/gpio-altera.c
+++ b/drivers/gpio/gpio-altera.c
@@ -90,21 +90,18 @@ static int altera_gpio_irq_set_type(struct irq_data *d,
 
 	altera_gc = gpiochip_get_data(irq_data_get_irq_chip_data(d));
 
-	if (type == IRQ_TYPE_NONE)
+	if (type == IRQ_TYPE_NONE) {
+		irq_set_handler_locked(d, handle_bad_irq);
 		return 0;
-	if (type == IRQ_TYPE_LEVEL_HIGH &&
-		altera_gc->interrupt_trigger == IRQ_TYPE_LEVEL_HIGH)
+	}
+	if (type == altera_gc->interrupt_trigger) {
+		if (type == IRQ_TYPE_LEVEL_HIGH)
+			irq_set_handler_locked(d, handle_level_irq);
+		else
+			irq_set_handler_locked(d, handle_simple_irq);
 		return 0;
-	if (type == IRQ_TYPE_EDGE_RISING &&
-		altera_gc->interrupt_trigger == IRQ_TYPE_EDGE_RISING)
-		return 0;
-	if (type == IRQ_TYPE_EDGE_FALLING &&
-		altera_gc->interrupt_trigger == IRQ_TYPE_EDGE_FALLING)
-		return 0;
-	if (type == IRQ_TYPE_EDGE_BOTH &&
-		altera_gc->interrupt_trigger == IRQ_TYPE_EDGE_BOTH)
-		return 0;
-
+	}
+	irq_set_handler_locked(d, handle_bad_irq);
 	return -EINVAL;
 }
 
@@ -230,7 +227,6 @@ static void altera_gpio_irq_edge_handler(struct irq_desc *desc)
 	chained_irq_exit(chip, desc);
 }
 
-
 static void altera_gpio_irq_leveL_high_handler(struct irq_desc *desc)
 {
 	struct altera_gpio_chip *altera_gc;
@@ -310,7 +306,7 @@ static int altera_gpio_probe(struct platform_device *pdev)
 	altera_gc->interrupt_trigger = reg;
 
 	ret = gpiochip_irqchip_add(&altera_gc->mmchip.gc, &altera_irq_chip, 0,
-		handle_simple_irq, IRQ_TYPE_NONE);
+		handle_bad_irq, IRQ_TYPE_NONE);
 
 	if (ret) {
 		dev_err(&pdev->dev, "could not add irqchip\n");
diff --git a/drivers/gpio/gpio-mockup.c b/drivers/gpio/gpio-mockup.c
index 1ef85b0..d27e936 100644
--- a/drivers/gpio/gpio-mockup.c
+++ b/drivers/gpio/gpio-mockup.c
@@ -126,7 +126,7 @@ static int mockup_gpio_probe(struct platform_device *pdev)
 	int i;
 	int base;
 	int ngpio;
-	char chip_name[sizeof(GPIO_NAME) + 3];
+	char *chip_name;
 
 	if (gpio_mockup_params_nr < 2)
 		return -EINVAL;
@@ -146,8 +146,12 @@ static int mockup_gpio_probe(struct platform_device *pdev)
 			ngpio = gpio_mockup_ranges[i * 2 + 1] - base;
 
 		if (ngpio >= 0) {
-			sprintf(chip_name, "%s-%c", GPIO_NAME,
-				pins_name_start + i);
+			chip_name = devm_kasprintf(dev, GFP_KERNEL,
+						   "%s-%c", GPIO_NAME,
+						   pins_name_start + i);
+			if (!chip_name)
+				return -ENOMEM;
+
 			ret = mockup_gpio_add(dev, &cntr[i],
 					      chip_name, base, ngpio);
 		} else {
diff --git a/drivers/gpu/drm/amd/acp/Makefile b/drivers/gpu/drm/amd/acp/Makefile
index 8363cb5..8a08e81 100644
--- a/drivers/gpu/drm/amd/acp/Makefile
+++ b/drivers/gpu/drm/amd/acp/Makefile
@@ -3,6 +3,4 @@
 # of AMDSOC/AMDGPU drm driver.
 # It provides the HW control for ACP related functionalities.
 
-subdir-ccflags-y += -I$(AMDACPPATH)/ -I$(AMDACPPATH)/include
-
 AMD_ACP_FILES := $(AMDACPPATH)/acp_hw.o
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
index f8fdbd1..26afdff 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
@@ -1788,34 +1788,32 @@ void amdgpu_atombios_scratch_regs_restore(struct amdgpu_device *adev)
 		WREG32(mmBIOS_SCRATCH_0 + i, adev->bios_scratch[i]);
 }
 
-/* Atom needs data in little endian format
- * so swap as appropriate when copying data to
- * or from atom. Note that atom operates on
- * dw units.
+/* Atom needs data in little endian format so swap as appropriate when copying
+ * data to or from atom. Note that atom operates on dw units.
+ *
+ * Use to_le=true when sending data to atom and provide at least
+ * ALIGN(num_bytes,4) bytes in the dst buffer.
+ *
+ * Use to_le=false when receiving data from atom and provide ALIGN(num_bytes,4)
+ * byes in the src buffer.
  */
 void amdgpu_atombios_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le)
 {
 #ifdef __BIG_ENDIAN
-	u8 src_tmp[20], dst_tmp[20]; /* used for byteswapping */
-	u32 *dst32, *src32;
+	u32 src_tmp[5], dst_tmp[5];
 	int i;
+	u8 align_num_bytes = ALIGN(num_bytes, 4);
 
-	memcpy(src_tmp, src, num_bytes);
-	src32 = (u32 *)src_tmp;
-	dst32 = (u32 *)dst_tmp;
 	if (to_le) {
-		for (i = 0; i < ((num_bytes + 3) / 4); i++)
-			dst32[i] = cpu_to_le32(src32[i]);
-		memcpy(dst, dst_tmp, num_bytes);
+		memcpy(src_tmp, src, num_bytes);
+		for (i = 0; i < align_num_bytes / 4; i++)
+			dst_tmp[i] = cpu_to_le32(src_tmp[i]);
+		memcpy(dst, dst_tmp, align_num_bytes);
 	} else {
-		u8 dws = num_bytes & ~3;
-		for (i = 0; i < ((num_bytes + 3) / 4); i++)
-			dst32[i] = le32_to_cpu(src32[i]);
-		memcpy(dst, dst_tmp, dws);
-		if (num_bytes % 4) {
-			for (i = 0; i < (num_bytes % 4); i++)
-				dst[dws+i] = dst_tmp[dws+i];
-		}
+		memcpy(src_tmp, src, align_num_bytes);
+		for (i = 0; i < align_num_bytes / 4; i++)
+			dst_tmp[i] = le32_to_cpu(src_tmp[i]);
+		memcpy(dst, dst_tmp, num_bytes);
 	}
 #else
 	memcpy(dst, src, num_bytes);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index bfb4b91..f26d1fd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -240,6 +240,8 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
 	for (; i >= 0; i--)
 		drm_free_large(p->chunks[i].kdata);
 	kfree(p->chunks);
+	p->chunks = NULL;
+	p->nchunks = 0;
 put_ctx:
 	amdgpu_ctx_put(p->ctx);
 free_chunk:
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index e41d4ba..ce9797b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -2020,8 +2020,11 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
 	}
 
 	r = amdgpu_late_init(adev);
-	if (r)
+	if (r) {
+		if (fbcon)
+			console_unlock();
 		return r;
+	}
 
 	/* pin cursors */
 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index 743a12d..3bb2b9b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -648,7 +648,7 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx)
 	uint32_t allocated = 0;
 	uint32_t tmp, handle = 0;
 	uint32_t *size = &tmp;
-	int i, r, idx = 0;
+	int i, r = 0, idx = 0;
 
 	r = amdgpu_cs_sysvm_access_required(p);
 	if (r)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 968c426..4750375 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -744,7 +744,7 @@ static int amdgpu_vm_update_pd_or_shadow(struct amdgpu_device *adev,
 int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
                                    struct amdgpu_vm *vm)
 {
-	int r;
+	int r = 0;
 
 	r = amdgpu_vm_update_pd_or_shadow(adev, vm, true);
 	if (r)
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
index c2bd9f0..6d75fd0 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
@@ -565,11 +565,8 @@ static const struct drm_encoder_helper_funcs dce_virtual_encoder_helper_funcs =
 
 static void dce_virtual_encoder_destroy(struct drm_encoder *encoder)
 {
-	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
-
-	kfree(amdgpu_encoder->enc_priv);
 	drm_encoder_cleanup(encoder);
-	kfree(amdgpu_encoder);
+	kfree(encoder);
 }
 
 static const struct drm_encoder_funcs dce_virtual_encoder_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
index 50f0cf2..7522f79 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
@@ -182,7 +182,7 @@ static void vce_v3_0_set_vce_sw_clock_gating(struct amdgpu_device *adev,
 		WREG32(mmVCE_UENC_CLOCK_GATING_2, data);
 
 		data = RREG32(mmVCE_UENC_REG_CLOCK_GATING);
-		data &= ~0xffc00000;
+		data &= ~0x3ff;
 		WREG32(mmVCE_UENC_REG_CLOCK_GATING, data);
 
 		data = RREG32(mmVCE_UENC_DMA_DCLK_CTRL);
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c b/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c
index 4477c55..a8b59b3 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c
@@ -850,9 +850,9 @@ static int init_over_drive_limits(
 		const ATOM_Tonga_POWERPLAYTABLE *powerplay_table)
 {
 	hwmgr->platform_descriptor.overdriveLimit.engineClock =
-		le16_to_cpu(powerplay_table->ulMaxODEngineClock);
+		le32_to_cpu(powerplay_table->ulMaxODEngineClock);
 	hwmgr->platform_descriptor.overdriveLimit.memoryClock =
-		le16_to_cpu(powerplay_table->ulMaxODMemoryClock);
+		le32_to_cpu(powerplay_table->ulMaxODMemoryClock);
 
 	hwmgr->platform_descriptor.minOverdriveVDDC = 0;
 	hwmgr->platform_descriptor.maxOverdriveVDDC = 0;
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
index fe00bea..ca227e8 100644
--- a/drivers/gpu/drm/drm_fops.c
+++ b/drivers/gpu/drm/drm_fops.c
@@ -45,6 +45,8 @@
 /* from BKL pushdown */
 DEFINE_MUTEX(drm_global_mutex);
 
+#define MAX_DRM_OPEN_COUNT		20
+
 /**
  * DOC: file operations
  *
@@ -135,6 +137,11 @@ int drm_open(struct inode *inode, struct file *filp)
 	if (!dev->open_count++)
 		need_setup = 1;
 
+	if (dev->open_count >= MAX_DRM_OPEN_COUNT) {
+		retcode = -EPERM;
+		goto err_undo;
+	}
+
 	/* share address_space across all char-devs of a single device */
 	filp->f_mapping = dev->anon_inode->i_mapping;
 
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index ee07bb4..11f54df 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -348,14 +348,12 @@ static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
 
 	BUG_ON(!hole_node->hole_follows || node->allocated);
 
-	if (adj_start < start)
-		adj_start = start;
-	if (adj_end > end)
-		adj_end = end;
-
 	if (mm->color_adjust)
 		mm->color_adjust(hole_node, color, &adj_start, &adj_end);
 
+	adj_start = max(adj_start, start);
+	adj_end = min(adj_end, end);
+
 	if (flags & DRM_MM_CREATE_TOP)
 		adj_start = adj_end - size;
 
@@ -566,17 +564,15 @@ static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_
 			       flags & DRM_MM_SEARCH_BELOW) {
 		u64 hole_size = adj_end - adj_start;
 
-		if (adj_start < start)
-			adj_start = start;
-		if (adj_end > end)
-			adj_end = end;
-
 		if (mm->color_adjust) {
 			mm->color_adjust(entry, color, &adj_start, &adj_end);
 			if (adj_end <= adj_start)
 				continue;
 		}
 
+		adj_start = max(adj_start, start);
+		adj_end = min(adj_end, end);
+
 		if (!check_free_hole(adj_start, adj_end, size, alignment))
 			continue;
 
diff --git a/drivers/gpu/drm/drm_property.c b/drivers/gpu/drm/drm_property.c
index 9b79a5b..174057b 100644
--- a/drivers/gpu/drm/drm_property.c
+++ b/drivers/gpu/drm/drm_property.c
@@ -26,6 +26,9 @@
 
 #include "drm_crtc_internal.h"
 
+#define MAX_BLOB_PROP_SIZE	(PAGE_SIZE * 30)
+#define MAX_BLOB_PROP_COUNT	250
+
 /**
  * DOC: overview
  *
@@ -554,7 +557,8 @@ drm_property_create_blob(struct drm_device *dev, size_t length,
 	struct drm_property_blob *blob;
 	int ret;
 
-	if (!length || length > ULONG_MAX - sizeof(struct drm_property_blob))
+	if (!length || length > MAX_BLOB_PROP_SIZE -
+				sizeof(struct drm_property_blob))
 		return ERR_PTR(-EINVAL);
 
 	blob = vzalloc(sizeof(struct drm_property_blob)+length);
@@ -756,13 +760,20 @@ int drm_mode_createblob_ioctl(struct drm_device *dev,
 			      void *data, struct drm_file *file_priv)
 {
 	struct drm_mode_create_blob *out_resp = data;
-	struct drm_property_blob *blob;
+	struct drm_property_blob *blob, *bt;
 	void __user *blob_ptr;
 	int ret = 0;
+	u32 count = 0;
 
 	if (!drm_core_check_feature(dev, DRIVER_MODESET))
 		return -EINVAL;
 
+	list_for_each_entry(bt, &file_priv->blobs, head_file)
+		count++;
+
+	if (count == MAX_BLOB_PROP_COUNT)
+		return -EINVAL;
+
 	blob = drm_property_create_blob(dev, out_resp->length, NULL);
 	if (IS_ERR(blob))
 		return PTR_ERR(blob);
diff --git a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
index 6ca1f31..6dd09c3 100644
--- a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
@@ -46,7 +46,8 @@ enum decon_flag_bits {
 	BIT_CLKS_ENABLED,
 	BIT_IRQS_ENABLED,
 	BIT_WIN_UPDATED,
-	BIT_SUSPENDED
+	BIT_SUSPENDED,
+	BIT_REQUEST_UPDATE
 };
 
 struct decon_context {
@@ -315,6 +316,7 @@ static void decon_update_plane(struct exynos_drm_crtc *crtc,
 
 	/* window enable */
 	decon_set_bits(ctx, DECON_WINCONx(win), WINCONx_ENWIN_F, ~0);
+	set_bit(BIT_REQUEST_UPDATE, &ctx->flags);
 }
 
 static void decon_disable_plane(struct exynos_drm_crtc *crtc,
@@ -327,6 +329,7 @@ static void decon_disable_plane(struct exynos_drm_crtc *crtc,
 		return;
 
 	decon_set_bits(ctx, DECON_WINCONx(win), WINCONx_ENWIN_F, 0);
+	set_bit(BIT_REQUEST_UPDATE, &ctx->flags);
 }
 
 static void decon_atomic_flush(struct exynos_drm_crtc *crtc)
@@ -340,8 +343,8 @@ static void decon_atomic_flush(struct exynos_drm_crtc *crtc)
 	for (i = ctx->first_win; i < WINDOWS_NR; i++)
 		decon_shadow_protect_win(ctx, i, false);
 
-	/* standalone update */
-	decon_set_bits(ctx, DECON_UPDATE, STANDALONE_UPDATE_F, ~0);
+	if (test_and_clear_bit(BIT_REQUEST_UPDATE, &ctx->flags))
+		decon_set_bits(ctx, DECON_UPDATE, STANDALONE_UPDATE_F, ~0);
 
 	if (ctx->out_type & IFTYPE_I80)
 		set_bit(BIT_WIN_UPDATED, &ctx->flags);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index f2ae72b..2abc47b 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -246,6 +246,15 @@ struct exynos_drm_gem *exynos_drm_gem_create(struct drm_device *dev,
 	if (IS_ERR(exynos_gem))
 		return exynos_gem;
 
+	if (!is_drm_iommu_supported(dev) && (flags & EXYNOS_BO_NONCONTIG)) {
+		/*
+		 * when no IOMMU is available, all allocated buffers are
+		 * contiguous anyway, so drop EXYNOS_BO_NONCONTIG flag
+		 */
+		flags &= ~EXYNOS_BO_NONCONTIG;
+		DRM_WARN("Non-contiguous allocation is not supported without IOMMU, falling back to contiguous buffer\n");
+	}
+
 	/* set memory type and cache attribute from user side. */
 	exynos_gem->flags = flags;
 
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
index cc2fde2..c9eef0f 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
@@ -243,7 +243,6 @@ static int fsl_dcu_drm_pm_suspend(struct device *dev)
 		return PTR_ERR(fsl_dev->state);
 	}
 
-	clk_disable_unprepare(fsl_dev->pix_clk);
 	clk_disable_unprepare(fsl_dev->clk);
 
 	return 0;
@@ -266,6 +265,7 @@ static int fsl_dcu_drm_pm_resume(struct device *dev)
 	if (fsl_dev->tcon)
 		fsl_tcon_bypass_enable(fsl_dev->tcon);
 	fsl_dcu_drm_init_planes(fsl_dev->drm);
+	enable_irq(fsl_dev->irq);
 	drm_atomic_helper_resume(fsl_dev->drm, fsl_dev->state);
 
 	console_lock();
@@ -273,7 +273,6 @@ static int fsl_dcu_drm_pm_resume(struct device *dev)
 	console_unlock();
 
 	drm_kms_helper_poll_enable(fsl_dev->drm);
-	enable_irq(fsl_dev->irq);
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
index 7e7a4d4..0f563c9 100644
--- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
+++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
@@ -521,9 +521,12 @@ static void ade_crtc_atomic_begin(struct drm_crtc *crtc,
 {
 	struct ade_crtc *acrtc = to_ade_crtc(crtc);
 	struct ade_hw_ctx *ctx = acrtc->ctx;
+	struct drm_display_mode *mode = &crtc->state->mode;
+	struct drm_display_mode *adj_mode = &crtc->state->adjusted_mode;
 
 	if (!ctx->power_on)
 		(void)ade_power_up(ctx);
+	ade_ldi_set_mode(acrtc, mode, adj_mode);
 }
 
 static void ade_crtc_atomic_flush(struct drm_crtc *crtc,
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 3ce9ba3..a19ec06 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -457,6 +457,7 @@ struct intel_crtc_scaler_state {
 
 struct intel_pipe_wm {
 	struct intel_wm_level wm[5];
+	struct intel_wm_level raw_wm[5];
 	uint32_t linetime;
 	bool fbc_wm_enabled;
 	bool pipe_enabled;
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index 79aab9a..6769aa1 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -430,7 +430,9 @@ static bool
 gmbus_is_index_read(struct i2c_msg *msgs, int i, int num)
 {
 	return (i + 1 < num &&
-		!(msgs[i].flags & I2C_M_RD) && msgs[i].len <= 2 &&
+		msgs[i].addr == msgs[i + 1].addr &&
+		!(msgs[i].flags & I2C_M_RD) &&
+		(msgs[i].len == 1 || msgs[i].len == 2) &&
 		(msgs[i + 1].flags & I2C_M_RD));
 }
 
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 277a802..49de476 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -27,7 +27,6 @@
 
 #include <linux/cpufreq.h>
 #include <drm/drm_plane_helper.h>
-#include <drm/drm_atomic_helper.h>
 #include "i915_drv.h"
 #include "intel_drv.h"
 #include "../../../platform/x86/intel_ips.h"
@@ -2018,9 +2017,9 @@ static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv,
 				 const struct intel_crtc *intel_crtc,
 				 int level,
 				 struct intel_crtc_state *cstate,
-				 const struct intel_plane_state *pristate,
-				 const struct intel_plane_state *sprstate,
-				 const struct intel_plane_state *curstate,
+				 struct intel_plane_state *pristate,
+				 struct intel_plane_state *sprstate,
+				 struct intel_plane_state *curstate,
 				 struct intel_wm_level *result)
 {
 	uint16_t pri_latency = dev_priv->wm.pri_latency[level];
@@ -2342,24 +2341,28 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate)
 	struct intel_pipe_wm *pipe_wm;
 	struct drm_device *dev = state->dev;
 	const struct drm_i915_private *dev_priv = to_i915(dev);
-	struct drm_plane *plane;
-	const struct drm_plane_state *plane_state;
-	const struct intel_plane_state *pristate = NULL;
-	const struct intel_plane_state *sprstate = NULL;
-	const struct intel_plane_state *curstate = NULL;
+	struct intel_plane *intel_plane;
+	struct intel_plane_state *pristate = NULL;
+	struct intel_plane_state *sprstate = NULL;
+	struct intel_plane_state *curstate = NULL;
 	int level, max_level = ilk_wm_max_level(dev), usable_level;
 	struct ilk_wm_maximums max;
 
 	pipe_wm = &cstate->wm.ilk.optimal;
 
-	drm_atomic_crtc_state_for_each_plane_state(plane, plane_state, &cstate->base) {
-		const struct intel_plane_state *ps = to_intel_plane_state(plane_state);
+	for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
+		struct intel_plane_state *ps;
 
-		if (plane->type == DRM_PLANE_TYPE_PRIMARY)
+		ps = intel_atomic_get_existing_plane_state(state,
+							   intel_plane);
+		if (!ps)
+			continue;
+
+		if (intel_plane->base.type == DRM_PLANE_TYPE_PRIMARY)
 			pristate = ps;
-		else if (plane->type == DRM_PLANE_TYPE_OVERLAY)
+		else if (intel_plane->base.type == DRM_PLANE_TYPE_OVERLAY)
 			sprstate = ps;
-		else if (plane->type == DRM_PLANE_TYPE_CURSOR)
+		else if (intel_plane->base.type == DRM_PLANE_TYPE_CURSOR)
 			curstate = ps;
 	}
 
@@ -2381,9 +2384,11 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate)
 	if (pipe_wm->sprites_scaled)
 		usable_level = 0;
 
-	memset(&pipe_wm->wm, 0, sizeof(pipe_wm->wm));
 	ilk_compute_wm_level(dev_priv, intel_crtc, 0, cstate,
-			     pristate, sprstate, curstate, &pipe_wm->wm[0]);
+			     pristate, sprstate, curstate, &pipe_wm->raw_wm[0]);
+
+	memset(&pipe_wm->wm, 0, sizeof(pipe_wm->wm));
+	pipe_wm->wm[0] = pipe_wm->raw_wm[0];
 
 	if (IS_HASWELL(dev) || IS_BROADWELL(dev))
 		pipe_wm->linetime = hsw_compute_linetime_wm(cstate);
@@ -2393,8 +2398,8 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate)
 
 	ilk_compute_wm_reg_maximums(dev, 1, &max);
 
-	for (level = 1; level <= usable_level; level++) {
-		struct intel_wm_level *wm = &pipe_wm->wm[level];
+	for (level = 1; level <= max_level; level++) {
+		struct intel_wm_level *wm = &pipe_wm->raw_wm[level];
 
 		ilk_compute_wm_level(dev_priv, intel_crtc, level, cstate,
 				     pristate, sprstate, curstate, wm);
@@ -2404,10 +2409,13 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate)
 		 * register maximums since such watermarks are
 		 * always invalid.
 		 */
-		if (!ilk_validate_wm_level(level, &max, wm)) {
-			memset(wm, 0, sizeof(*wm));
-			break;
-		}
+		if (level > usable_level)
+			continue;
+
+		if (ilk_validate_wm_level(level, &max, wm))
+			pipe_wm->wm[level] = *wm;
+		else
+			usable_level = level;
 	}
 
 	return 0;
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
index cf83f65..48dfc16 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
@@ -321,7 +321,8 @@ static void mtk_drm_unbind(struct device *dev)
 {
 	struct mtk_drm_private *private = dev_get_drvdata(dev);
 
-	drm_put_dev(private->drm);
+	drm_dev_unregister(private->drm);
+	drm_dev_unref(private->drm);
 	private->drm = NULL;
 }
 
diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c
index 51cc57b..f2c0a0e 100644
--- a/drivers/gpu/drm/msm/dp/dp_display.c
+++ b/drivers/gpu/drm/msm/dp/dp_display.c
@@ -494,7 +494,8 @@ static int dp_display_process_hpd_high(struct dp_display_private *dp)
 	if (!dp->dp_display.connector)
 		return 0;
 
-	rc = dp->panel->read_sink_caps(dp->panel, dp->dp_display.connector);
+	rc = dp->panel->read_sink_caps(dp->panel,
+		dp->dp_display.connector, dp->usbpd->multi_func);
 	if (rc) {
 		if (rc == -ETIMEDOUT) {
 			pr_err("Sink cap read failed, skip notification\n");
diff --git a/drivers/gpu/drm/msm/dp/dp_panel.c b/drivers/gpu/drm/msm/dp/dp_panel.c
index b5dd9bc..0401760 100644
--- a/drivers/gpu/drm/msm/dp/dp_panel.c
+++ b/drivers/gpu/drm/msm/dp/dp_panel.c
@@ -104,7 +104,7 @@ static const u8 vendor_name[8] = {81, 117, 97, 108, 99, 111, 109, 109};
 static const u8 product_desc[16] = {83, 110, 97, 112, 100, 114, 97, 103,
 	111, 110, 0, 0, 0, 0, 0, 0};
 
-static int dp_panel_read_dpcd(struct dp_panel *dp_panel)
+static int dp_panel_read_dpcd(struct dp_panel *dp_panel, bool multi_func)
 {
 	int rlen, rc = 0;
 	struct dp_panel_private *panel;
@@ -176,6 +176,10 @@ static int dp_panel_read_dpcd(struct dp_panel *dp_panel)
 	link_info->num_lanes = dp_panel->dpcd[DP_MAX_LANE_COUNT] &
 				DP_MAX_LANE_COUNT_MASK;
 
+	if (multi_func)
+		link_info->num_lanes = min_t(unsigned int,
+			link_info->num_lanes, 2);
+
 	pr_debug("lane_count=%d\n", link_info->num_lanes);
 
 	if (drm_dp_enhanced_frame_cap(dpcd))
@@ -303,7 +307,7 @@ static int dp_panel_read_edid(struct dp_panel *dp_panel,
 }
 
 static int dp_panel_read_sink_caps(struct dp_panel *dp_panel,
-	struct drm_connector *connector)
+	struct drm_connector *connector, bool multi_func)
 {
 	int rc = 0;
 	struct dp_panel_private *panel;
@@ -315,7 +319,7 @@ static int dp_panel_read_sink_caps(struct dp_panel *dp_panel,
 
 	panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
 
-	rc = dp_panel_read_dpcd(dp_panel);
+	rc = dp_panel_read_dpcd(dp_panel, multi_func);
 	if (rc || !is_link_rate_valid(drm_dp_link_rate_to_bw_code(
 		dp_panel->link_info.rate)) || !is_lane_count_valid(
 		dp_panel->link_info.num_lanes) ||
diff --git a/drivers/gpu/drm/msm/dp/dp_panel.h b/drivers/gpu/drm/msm/dp/dp_panel.h
index 2583f61..6c2e186 100644
--- a/drivers/gpu/drm/msm/dp/dp_panel.h
+++ b/drivers/gpu/drm/msm/dp/dp_panel.h
@@ -80,7 +80,7 @@ struct dp_panel {
 	int (*deinit)(struct dp_panel *dp_panel);
 	int (*timing_cfg)(struct dp_panel *dp_panel);
 	int (*read_sink_caps)(struct dp_panel *dp_panel,
-		struct drm_connector *connector);
+		struct drm_connector *connector, bool multi_func);
 	u32 (*get_min_req_link_rate)(struct dp_panel *dp_panel);
 	u32 (*get_mode_bpp)(struct dp_panel *dp_panel, u32 mode_max_bpp,
 			u32 mode_pclk_khz);
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.h b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.h
index 77df585..ca58896 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -247,7 +247,7 @@ struct dsi_ctrl {
 	u32 cmd_buffer_iova;
 	u32 cmd_len;
 	void *vaddr;
-	u32 secure_mode;
+	bool secure_mode;
 
 	/* Debug Information */
 	struct dentry *debugfs_root;
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_display.c b/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
index 3d99172..c8edb09 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -262,6 +262,7 @@ static void dsi_display_aspace_cb_locked(void *cb_data, bool is_detach)
 		display_ctrl->ctrl->cmd_buffer_size = display->cmd_buffer_size;
 		display_ctrl->ctrl->cmd_buffer_iova = display->cmd_buffer_iova;
 		display_ctrl->ctrl->vaddr = display->vaddr;
+		display_ctrl->ctrl->secure_mode = is_detach;
 	}
 
 end:
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_drm.c b/drivers/gpu/drm/msm/dsi-staging/dsi_drm.c
index fd50256..a1e4685 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_drm.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_drm.c
@@ -320,9 +320,11 @@ static bool dsi_bridge_mode_fixup(struct drm_bridge *bridge,
 
 		cur_mode = crtc_state->crtc->mode;
 
+		/* No DMS/VRR when drm pipeline is changing */
 		if (!drm_mode_equal(&cur_mode, adjusted_mode) &&
-			(!(dsi_mode.dsi_mode_flags &
-				DSI_MODE_FLAG_VRR)))
+			(!(dsi_mode.dsi_mode_flags & DSI_MODE_FLAG_VRR)) &&
+			(!crtc_state->active_changed ||
+			 display->is_cont_splash_enabled))
 			dsi_mode.dsi_mode_flags |= DSI_MODE_FLAG_DMS;
 	}
 
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c
index 7671496..0ffece3 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c
@@ -2143,7 +2143,7 @@ static int dsi_panel_parse_phy_timing(struct dsi_display_mode *mode,
 		priv_info->phy_timing_len = len;
 	};
 
-	mode->pixel_clk_khz = (DSI_H_TOTAL(&mode->timing) *
+	mode->pixel_clk_khz = (mode->timing.h_active *
 			DSI_V_TOTAL(&mode->timing) *
 			mode->timing.refresh_rate) / 1000;
 	return rc;
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_pwr.c b/drivers/gpu/drm/msm/dsi-staging/dsi_pwr.c
index 5635346..c1ea8dd 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_pwr.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_pwr.c
@@ -111,13 +111,13 @@ static int dsi_pwr_parse_supply_node(struct device_node *root,
 			regs->vregs[i].post_off_sleep = tmp;
 		}
 
-		++i;
 		pr_debug("[%s] minv=%d maxv=%d, en_load=%d, dis_load=%d\n",
 			 regs->vregs[i].vreg_name,
 			 regs->vregs[i].min_voltage,
 			 regs->vregs[i].max_voltage,
 			 regs->vregs[i].enable_load,
 			 regs->vregs[i].disable_load);
+		++i;
 	}
 
 error:
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 00cf225..c1a670d 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
  * Copyright (C) 2013 Red Hat
  * Author: Rob Clark <robdclark@gmail.com>
  *
@@ -1436,42 +1436,6 @@ static int msm_release(struct inode *inode, struct file *filp)
 }
 
 /**
- * msm_drv_framebuffer_remove - remove and unreference a framebuffer object
- * @fb: framebuffer to remove
- */
-void msm_drv_framebuffer_remove(struct drm_framebuffer *fb)
-{
-	struct drm_device *dev;
-
-	if (!fb)
-		return;
-
-	dev = fb->dev;
-
-	WARN_ON(!list_empty(&fb->filp_head));
-
-	drm_framebuffer_unreference(fb);
-}
-
-struct msm_drv_rmfb2_work {
-	struct work_struct work;
-	struct list_head fbs;
-};
-
-static void msm_drv_rmfb2_work_fn(struct work_struct *w)
-{
-	struct msm_drv_rmfb2_work *arg = container_of(w, typeof(*arg), work);
-
-	while (!list_empty(&arg->fbs)) {
-		struct drm_framebuffer *fb =
-			list_first_entry(&arg->fbs, typeof(*fb), filp_head);
-
-		list_del_init(&fb->filp_head);
-		msm_drv_framebuffer_remove(fb);
-	}
-}
-
-/**
  * msm_ioctl_rmfb2 - remove an FB from the configuration
  * @dev: drm device for the ioctl
  * @data: data pointer for the ioctl
@@ -1514,25 +1478,7 @@ int msm_ioctl_rmfb2(struct drm_device *dev, void *data,
 	list_del_init(&fb->filp_head);
 	mutex_unlock(&file_priv->fbs_lock);
 
-	/*
-	 * we now own the reference that was stored in the fbs list
-	 *
-	 * drm_framebuffer_remove may fail with -EINTR on pending signals,
-	 * so run this in a separate stack as there's no way to correctly
-	 * handle this after the fb is already removed from the lookup table.
-	 */
-	if (drm_framebuffer_read_refcount(fb) > 1) {
-		struct msm_drv_rmfb2_work arg;
-
-		INIT_WORK_ONSTACK(&arg.work, msm_drv_rmfb2_work_fn);
-		INIT_LIST_HEAD(&arg.fbs);
-		list_add_tail(&fb->filp_head, &arg.fbs);
-
-		schedule_work(&arg.work);
-		flush_work(&arg.work);
-		destroy_work_on_stack(&arg.work);
-	} else
-		drm_framebuffer_unreference(fb);
+	drm_framebuffer_unreference(fb);
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/msm/sde/sde_ad4.h b/drivers/gpu/drm/msm/sde/sde_ad4.h
index 5a646e9..06f004c 100644
--- a/drivers/gpu/drm/msm/sde/sde_ad4.h
+++ b/drivers/gpu/drm/msm/sde/sde_ad4.h
@@ -58,7 +58,7 @@ enum ad_property {
  * enum ad_intr_resp_property - ad4 interrupt response enum
  */
 enum ad_intr_resp_property {
-	AD4_BACKLIGHT,
+	AD4_IN_OUT_BACKLIGHT,
 	AD4_RESPMAX,
 };
 
@@ -92,8 +92,10 @@ void sde_setup_dspp_ad4(struct sde_hw_dspp *dspp, void *cfg);
  * sde_read_intr_resp_ad4 - api to get ad4 interrupt status for event
  * @dspp: pointer to dspp object
  * @event: event for which response is needed
- * @resp: value of event requested
+ * @resp_in: read ad4 input value of event requested
+ * @resp_out: read ad4 output value of event requested
  */
-void sde_read_intr_resp_ad4(struct sde_hw_dspp *dspp, u32 event, u32 *resp);
+void sde_read_intr_resp_ad4(struct sde_hw_dspp *dspp, u32 event,
+			u32 *resp_in, u32 *resp_out);
 
 #endif /* _SDE_AD4_H_ */
diff --git a/drivers/gpu/drm/msm/sde/sde_color_processing.c b/drivers/gpu/drm/msm/sde/sde_color_processing.c
index 07b5536..42aea7e 100644
--- a/drivers/gpu/drm/msm/sde/sde_color_processing.c
+++ b/drivers/gpu/drm/msm/sde/sde_color_processing.c
@@ -24,6 +24,7 @@
 #include "sde_ad4.h"
 #include "sde_hw_interrupts.h"
 #include "sde_core_irq.h"
+#include "dsi_panel.h"
 
 struct sde_cp_node {
 	u32 property_id;
@@ -1575,7 +1576,8 @@ static void sde_cp_ad_interrupt_cb(void *arg, int irq_idx)
 
 static void sde_cp_notify_ad_event(struct drm_crtc *crtc_drm, void *arg)
 {
-	uint32_t bl = 0;
+	uint32_t input_bl = 0, output_bl = 0;
+	uint32_t scale = MAX_AD_BL_SCALE_LEVEL;
 	struct sde_hw_mixer *hw_lm = NULL;
 	struct sde_hw_dspp *hw_dspp = NULL;
 	u32 num_mixers;
@@ -1598,11 +1600,17 @@ static void sde_cp_notify_ad_event(struct drm_crtc *crtc_drm, void *arg)
 	if (!hw_dspp)
 		return;
 
-	hw_dspp->ops.ad_read_intr_resp(hw_dspp, AD4_BACKLIGHT, &bl);
+	hw_dspp->ops.ad_read_intr_resp(hw_dspp, AD4_IN_OUT_BACKLIGHT,
+			&input_bl, &output_bl);
+
+	if (!input_bl || input_bl < output_bl)
+		return;
+
+	scale = (output_bl * MAX_AD_BL_SCALE_LEVEL) / input_bl;
 	event.length = sizeof(u32);
 	event.type = DRM_EVENT_AD_BACKLIGHT;
 	msm_mode_object_event_notify(&crtc_drm->base, crtc_drm->dev,
-			&event, (u8 *)&bl);
+			&event, (u8 *)&scale);
 }
 
 int sde_cp_ad_interrupt(struct drm_crtc *crtc_drm, bool en,
diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.c b/drivers/gpu/drm/msm/sde/sde_crtc.c
index 43e6aaa..222aee9 100644
--- a/drivers/gpu/drm/msm/sde/sde_crtc.c
+++ b/drivers/gpu/drm/msm/sde/sde_crtc.c
@@ -1403,8 +1403,32 @@ static void _sde_crtc_program_lm_output_roi(struct drm_crtc *crtc)
 	}
 }
 
+/**
+ * _sde_crtc_calc_inline_prefill - calculate rotator start prefill
+ * @crtc: Pointer to drm crtc
+ * return: prefill time in lines
+ */
+static u32 _sde_crtc_calc_inline_prefill(struct drm_crtc *crtc)
+{
+	struct sde_kms *sde_kms;
+
+	if (!crtc) {
+		SDE_ERROR("invalid parameters\n");
+		return 0;
+	}
+
+	sde_kms = _sde_crtc_get_kms(crtc);
+	if (!sde_kms || !sde_kms->catalog) {
+		SDE_ERROR("invalid kms\n");
+		return 0;
+	}
+
+	return sde_kms->catalog->sbuf_prefill + sde_kms->catalog->sbuf_headroom;
+}
+
 static void _sde_crtc_blend_setup_mixer(struct drm_crtc *crtc,
-	struct sde_crtc *sde_crtc, struct sde_crtc_mixer *mixer)
+		struct drm_crtc_state *old_state, struct sde_crtc *sde_crtc,
+		struct sde_crtc_mixer *mixer)
 {
 	struct drm_plane *plane;
 	struct drm_framebuffer *fb;
@@ -1417,14 +1441,13 @@ static void _sde_crtc_blend_setup_mixer(struct drm_crtc *crtc,
 	struct sde_hw_stage_cfg *stage_cfg;
 	struct sde_rect plane_crtc_roi;
 
-	u32 flush_mask, flush_sbuf;
+	u32 flush_mask, flush_sbuf, prefill;
 	uint32_t stage_idx, lm_idx;
 	int zpos_cnt[SDE_STAGE_MAX + 1] = { 0 };
 	int i;
 	bool bg_alpha_enable = false;
-	u32 prefill = 0;
 
-	if (!sde_crtc || !mixer) {
+	if (!sde_crtc || !crtc->state || !mixer) {
 		SDE_ERROR("invalid sde_crtc or mixer\n");
 		return;
 	}
@@ -1434,8 +1457,10 @@ static void _sde_crtc_blend_setup_mixer(struct drm_crtc *crtc,
 	stage_cfg = &sde_crtc->stage_cfg;
 	cstate = to_sde_crtc_state(crtc->state);
 
-	cstate->sbuf_prefill_line = 0;
-	sde_crtc->sbuf_flush_mask = 0x0;
+	cstate->sbuf_prefill_line = _sde_crtc_calc_inline_prefill(crtc);
+	sde_crtc->sbuf_flush_mask_old = sde_crtc->sbuf_flush_mask_all;
+	sde_crtc->sbuf_flush_mask_all = 0x0;
+	sde_crtc->sbuf_flush_mask_delta = 0x0;
 
 	drm_atomic_crtc_for_each_plane(plane, crtc) {
 		state = plane->state;
@@ -1450,14 +1475,18 @@ static void _sde_crtc_blend_setup_mixer(struct drm_crtc *crtc,
 		pstate = to_sde_plane_state(state);
 		fb = state->fb;
 
-		prefill = sde_plane_rot_calc_prefill(plane);
-		if (prefill > cstate->sbuf_prefill_line)
+		/* assume all rotated planes report the same prefill amount */
+		prefill = sde_plane_rot_get_prefill(plane);
+		if (prefill)
 			cstate->sbuf_prefill_line = prefill;
 
 		sde_plane_get_ctl_flush(plane, ctl, &flush_mask, &flush_sbuf);
 
 		/* save sbuf flush value for later */
-		sde_crtc->sbuf_flush_mask |= flush_sbuf;
+		if (old_state && drm_atomic_get_existing_plane_state(
+					old_state->state, plane))
+			sde_crtc->sbuf_flush_mask_delta |= flush_sbuf;
+		sde_crtc->sbuf_flush_mask_all |= flush_sbuf;
 
 		SDE_DEBUG("crtc %d stage:%d - plane %d sspp %d fb %d\n",
 				crtc->base.id,
@@ -1584,8 +1613,11 @@ static void _sde_crtc_swap_mixers_for_right_partial_update(
 /**
  * _sde_crtc_blend_setup - configure crtc mixers
  * @crtc: Pointer to drm crtc structure
+ * @old_state: Pointer to old crtc state
+ * @add_planes: Whether or not to add planes to mixers
  */
-static void _sde_crtc_blend_setup(struct drm_crtc *crtc, bool add_planes)
+static void _sde_crtc_blend_setup(struct drm_crtc *crtc,
+		struct drm_crtc_state *old_state, bool add_planes)
 {
 	struct sde_crtc *sde_crtc;
 	struct sde_crtc_state *sde_crtc_state;
@@ -1632,7 +1664,7 @@ static void _sde_crtc_blend_setup(struct drm_crtc *crtc, bool add_planes)
 	memset(&sde_crtc->stage_cfg, 0, sizeof(struct sde_hw_stage_cfg));
 
 	if (add_planes)
-		_sde_crtc_blend_setup_mixer(crtc, sde_crtc, mixer);
+		_sde_crtc_blend_setup_mixer(crtc, old_state, sde_crtc, mixer);
 
 	for (i = 0; i < sde_crtc->num_mixers; i++) {
 		const struct sde_rect *lm_roi = &sde_crtc_state->lm_roi[i];
@@ -3198,7 +3230,7 @@ static void sde_crtc_atomic_begin(struct drm_crtc *crtc,
 	if (unlikely(!sde_crtc->num_mixers))
 		return;
 
-	_sde_crtc_blend_setup(crtc, true);
+	_sde_crtc_blend_setup(crtc, old_state, true);
 	_sde_crtc_dest_scaler_setup(crtc);
 
 	/* cancel the idle notify delayed work */
@@ -3422,23 +3454,29 @@ static int _sde_crtc_commit_kickoff_rot(struct drm_crtc *crtc,
 	sde_crtc = to_sde_crtc(crtc);
 
 	/*
-	 * Update sbuf configuration and flush bits if a flush
-	 * mask has been defined for either the current or
-	 * previous commit.
+	 * Update sbuf configuration and flush bits if either the rot_op_mode
+	 * is different or a rotator commit was performed.
 	 *
-	 * Updates are also required for the first commit after
-	 * sbuf_flush_mask becomes 0x0, to properly transition
-	 * the hardware out of sbuf mode.
+	 * In the case where the rot_op_mode has changed, further require that
+	 * the transition is either to or from offline mode unless
+	 * sbuf_flush_mask_delta is also non-zero (i.e., a corresponding plane
+	 * update was provided to the current commit).
 	 */
-	if (!sde_crtc->sbuf_flush_mask_old && !sde_crtc->sbuf_flush_mask)
-		return 0;
+	flush_mask = sde_crtc->sbuf_flush_mask_delta;
+	if ((sde_crtc->sbuf_op_mode_old != cstate->sbuf_cfg.rot_op_mode) &&
+		(sde_crtc->sbuf_op_mode_old == SDE_CTL_ROT_OP_MODE_OFFLINE ||
+		cstate->sbuf_cfg.rot_op_mode == SDE_CTL_ROT_OP_MODE_OFFLINE))
+		flush_mask |= sde_crtc->sbuf_flush_mask_all |
+			sde_crtc->sbuf_flush_mask_old;
 
-	flush_mask = sde_crtc->sbuf_flush_mask_old | sde_crtc->sbuf_flush_mask;
-	sde_crtc->sbuf_flush_mask_old = sde_crtc->sbuf_flush_mask;
+	if (!flush_mask &&
+		cstate->sbuf_cfg.rot_op_mode == SDE_CTL_ROT_OP_MODE_OFFLINE)
+		return 0;
 
 	SDE_ATRACE_BEGIN("crtc_kickoff_rot");
 
-	if (cstate->sbuf_cfg.rot_op_mode != SDE_CTL_ROT_OP_MODE_OFFLINE) {
+	if (cstate->sbuf_cfg.rot_op_mode != SDE_CTL_ROT_OP_MODE_OFFLINE &&
+			sde_crtc->sbuf_flush_mask_delta) {
 		drm_atomic_crtc_for_each_plane(plane, crtc) {
 			rc = sde_plane_kickoff_rot(plane);
 			if (rc) {
@@ -3474,12 +3512,16 @@ static int _sde_crtc_commit_kickoff_rot(struct drm_crtc *crtc,
 		/* explicitly trigger rotator for async modes */
 		if (cstate->sbuf_cfg.rot_op_mode ==
 				SDE_CTL_ROT_OP_MODE_INLINE_ASYNC &&
-				master_ctl->ops.trigger_rot_start) {
+				master_ctl->ops.trigger_rot_start)
 			master_ctl->ops.trigger_rot_start(master_ctl);
-			SDE_EVT32(DRMID(crtc), master_ctl->idx - CTL_0);
-		}
+		SDE_EVT32(DRMID(crtc), master_ctl->idx - CTL_0,
+				sde_crtc->sbuf_flush_mask_all,
+				sde_crtc->sbuf_flush_mask_delta);
 	}
 
+	/* save this in sde_crtc for next commit cycle */
+	sde_crtc->sbuf_op_mode_old = cstate->sbuf_cfg.rot_op_mode;
+
 	SDE_ATRACE_END("crtc_kickoff_rot");
 	return rc;
 }
@@ -3492,13 +3534,14 @@ static void _sde_crtc_remove_pipe_flush(struct sde_crtc *sde_crtc)
 {
 	struct sde_crtc_mixer *mixer;
 	struct sde_hw_ctl *ctl;
-	u32 i, flush_mask;
+	u32 i, n, flush_mask;
 
 	if (!sde_crtc)
 		return;
 
 	mixer = sde_crtc->mixers;
-	for (i = 0; i < sde_crtc->num_mixers; i++) {
+	n = min_t(size_t, sde_crtc->num_mixers, ARRAY_SIZE(sde_crtc->mixers));
+	for (i = 0; i < n; i++) {
 		ctl = mixer[i].hw_ctl;
 		if (!ctl || !ctl->ops.get_pending_flush ||
 				!ctl->ops.clear_pending_flush ||
@@ -3524,16 +3567,19 @@ static int _sde_crtc_reset_hw(struct drm_crtc *crtc,
 {
 	struct drm_plane *plane_halt[MAX_PLANES];
 	struct drm_plane *plane;
+	struct drm_encoder *encoder;
 	const struct drm_plane_state *pstate;
 	struct sde_crtc *sde_crtc;
+	struct sde_crtc_state *cstate;
 	struct sde_hw_ctl *ctl;
 	enum sde_ctl_rot_op_mode old_rot_op_mode;
-	signed int i, plane_count;
+	signed int i, n, plane_count;
 	int rc;
 
-	if (!crtc || !old_state)
+	if (!crtc || !crtc->dev || !old_state || !crtc->state)
 		return -EINVAL;
 	sde_crtc = to_sde_crtc(crtc);
+	cstate = to_sde_crtc_state(crtc->state);
 
 	old_rot_op_mode = to_sde_crtc_state(old_state)->sbuf_cfg.rot_op_mode;
 	SDE_EVT32(DRMID(crtc), old_rot_op_mode,
@@ -3545,7 +3591,8 @@ static int _sde_crtc_reset_hw(struct drm_crtc *crtc,
 	/* optionally generate a panic instead of performing a h/w reset */
 	SDE_DBG_CTRL("stop_ftrace", "reset_hw_panic");
 
-	for (i = 0; i < sde_crtc->num_mixers; ++i) {
+	n = min_t(size_t, sde_crtc->num_mixers, ARRAY_SIZE(sde_crtc->mixers));
+	for (i = 0; i < n; ++i) {
 		ctl = sde_crtc->mixers[i].hw_ctl;
 		if (!ctl || !ctl->ops.reset)
 			continue;
@@ -3570,14 +3617,13 @@ static int _sde_crtc_reset_hw(struct drm_crtc *crtc,
 	 * depending on the rotation mode; don't handle this for now
 	 * and just force a hard reset in those cases.
 	 */
-	if (i == sde_crtc->num_mixers &&
-			old_rot_op_mode == SDE_CTL_ROT_OP_MODE_OFFLINE)
+	if (i == n && old_rot_op_mode == SDE_CTL_ROT_OP_MODE_OFFLINE)
 		return false;
 
 	SDE_DEBUG("crtc%d: issuing hard reset\n", DRMID(crtc));
 
 	/* force all components in the system into reset at the same time */
-	for (i = 0; i < sde_crtc->num_mixers; ++i) {
+	for (i = 0; i < n; ++i) {
 		ctl = sde_crtc->mixers[i].hw_ctl;
 		if (!ctl || !ctl->ops.hard_reset)
 			continue;
@@ -3613,11 +3659,26 @@ static int _sde_crtc_reset_hw(struct drm_crtc *crtc,
 		sde_plane_reset_rot(plane, (struct drm_plane_state *)pstate);
 	}
 
+	/* provide safe "border color only" commit configuration for later */
+	cstate->sbuf_cfg.rot_op_mode = SDE_CTL_ROT_OP_MODE_OFFLINE;
+	_sde_crtc_commit_kickoff_rot(crtc, cstate);
+	_sde_crtc_remove_pipe_flush(sde_crtc);
+	_sde_crtc_blend_setup(crtc, old_state, false);
+
 	/* take h/w components out of reset */
 	for (i = plane_count - 1; i >= 0; --i)
 		sde_plane_halt_requests(plane_halt[i], false);
 
-	for (i = 0; i < sde_crtc->num_mixers; ++i) {
+	/* attempt to poll for start of frame cycle before reset release */
+	list_for_each_entry(encoder,
+			&crtc->dev->mode_config.encoder_list, head) {
+		if (encoder->crtc != crtc)
+			continue;
+		if (sde_encoder_get_intf_mode(encoder) == INTF_MODE_VIDEO)
+			sde_encoder_poll_line_counts(encoder);
+	}
+
+	for (i = 0; i < n; ++i) {
 		ctl = sde_crtc->mixers[i].hw_ctl;
 		if (!ctl || !ctl->ops.hard_reset)
 			continue;
@@ -3625,6 +3686,15 @@ static int _sde_crtc_reset_hw(struct drm_crtc *crtc,
 		ctl->ops.hard_reset(ctl, false);
 	}
 
+	list_for_each_entry(encoder,
+			&crtc->dev->mode_config.encoder_list, head) {
+		if (encoder->crtc != crtc)
+			continue;
+
+		if (sde_encoder_get_intf_mode(encoder) == INTF_MODE_VIDEO)
+			sde_encoder_kickoff(encoder, false);
+	}
+
 	return -EAGAIN;
 }
 
@@ -3649,7 +3719,7 @@ static bool _sde_crtc_prepare_for_kickoff_rot(struct drm_device *dev,
 	cstate = to_sde_crtc_state(crtc->state);
 
 	/* default to ASYNC mode for inline rotation */
-	cstate->sbuf_cfg.rot_op_mode = sde_crtc->sbuf_flush_mask ?
+	cstate->sbuf_cfg.rot_op_mode = sde_crtc->sbuf_flush_mask_all ?
 		SDE_CTL_ROT_OP_MODE_INLINE_ASYNC : SDE_CTL_ROT_OP_MODE_OFFLINE;
 
 	if (cstate->sbuf_cfg.rot_op_mode == SDE_CTL_ROT_OP_MODE_OFFLINE)
@@ -3749,11 +3819,6 @@ void sde_crtc_commit_kickoff(struct drm_crtc *crtc,
 		if (_sde_crtc_reset_hw(crtc, old_state,
 					!sde_crtc->reset_request))
 			is_error = true;
-
-		/* force offline rotation mode since the commit has no pipes */
-		if (is_error)
-			cstate->sbuf_cfg.rot_op_mode =
-				SDE_CTL_ROT_OP_MODE_OFFLINE;
 	}
 	sde_crtc->reset_request = reset_req;
 
@@ -3799,7 +3864,7 @@ void sde_crtc_commit_kickoff(struct drm_crtc *crtc,
 
 	if (is_error) {
 		_sde_crtc_remove_pipe_flush(sde_crtc);
-		_sde_crtc_blend_setup(crtc, false);
+		_sde_crtc_blend_setup(crtc, old_state, false);
 	}
 
 	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
@@ -4612,7 +4677,7 @@ static int sde_crtc_atomic_check(struct drm_crtc *crtc,
 
 		/* identify attached planes that are not in the delta state */
 		if (!drm_atomic_get_existing_plane_state(state->state, plane)) {
-			rc = sde_plane_confirm_hw_rsvps(plane, pstate);
+			rc = sde_plane_confirm_hw_rsvps(plane, pstate, state);
 			if (rc) {
 				SDE_ERROR("crtc%d confirmation hw failed %d\n",
 						crtc->base.id, rc);
diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.h b/drivers/gpu/drm/msm/sde/sde_crtc.h
index 589a667..1de3675 100644
--- a/drivers/gpu/drm/msm/sde/sde_crtc.h
+++ b/drivers/gpu/drm/msm/sde/sde_crtc.h
@@ -215,8 +215,10 @@ struct sde_crtc_event {
  * @misr_enable   : boolean entry indicates misr enable/disable status.
  * @misr_frame_count  : misr frame count provided by client
  * @misr_data     : store misr data before turning off the clocks.
- * @sbuf_flush_mask: flush mask for inline rotator
+ * @sbuf_op_mode_old : inline rotator op mode for previous commit cycle
  * @sbuf_flush_mask_old: inline rotator flush mask for previous commit
+ * @sbuf_flush_mask_all: inline rotator flush mask for all attached planes
+ * @sbuf_flush_mask_delta: inline rotator flush mask for current delta state
  * @idle_notify_work: delayed worker to notify idle timeout to user space
  * @power_event   : registered power event handle
  * @cur_perf      : current performance committed to clock/bandwidth driver
@@ -284,8 +286,10 @@ struct sde_crtc {
 	u32 misr_frame_count;
 	u32 misr_data[CRTC_DUAL_MIXERS];
 
-	u32 sbuf_flush_mask;
+	u32 sbuf_op_mode_old;
 	u32 sbuf_flush_mask_old;
+	u32 sbuf_flush_mask_all;
+	u32 sbuf_flush_mask_delta;
 	struct kthread_delayed_work idle_notify_work;
 
 	struct sde_power_event *power_event;
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder.c b/drivers/gpu/drm/msm/sde/sde_encoder.c
index 4008115..3b5e3f5 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder.c
@@ -121,6 +121,13 @@
  *	Event signals that there were no frame updates for
  *	IDLE_POWERCOLLAPSE_DURATION time. This would disable MDP/DSI core clocks
  *      and request RSC with IDLE state and change the resource state to IDLE.
+ * @SDE_ENC_RC_EVENT_EARLY_WAKEUP:
+ *	This event is triggered from the input event thread when touch event is
+ *	received from the input device. On receiving this event,
+ *      - If the device is in SDE_ENC_RC_STATE_IDLE state, it turns ON the
+	  clocks and enable RSC.
+ *      - If the device is in SDE_ENC_RC_STATE_ON state, it resets the delayed
+ *        off work since a new commit is imminent.
  */
 enum sde_enc_rc_events {
 	SDE_ENC_RC_EVENT_KICKOFF = 1,
@@ -129,7 +136,8 @@ enum sde_enc_rc_events {
 	SDE_ENC_RC_EVENT_STOP,
 	SDE_ENC_RC_EVENT_PRE_MODESET,
 	SDE_ENC_RC_EVENT_POST_MODESET,
-	SDE_ENC_RC_EVENT_ENTER_IDLE
+	SDE_ENC_RC_EVENT_ENTER_IDLE,
+	SDE_ENC_RC_EVENT_EARLY_WAKEUP,
 };
 
 /*
@@ -194,6 +202,8 @@ enum sde_enc_rc_states {
  * @delayed_off_work:		delayed worker to schedule disabling of
  *				clks and resources after IDLE_TIMEOUT time.
  * @vsync_event_work:		worker to handle vsync event for autorefresh
+ * @input_event_work:		worker to handle input device touch events
+ * @input_handler:			handler for input device events
  * @topology:                   topology of the display
  * @vblank_enabled:		boolean to track userspace vblank vote
  * @rsc_config:			rsc configuration for display vtotal, fps, etc.
@@ -238,6 +248,8 @@ struct sde_encoder_virt {
 	enum sde_enc_rc_states rc_state;
 	struct kthread_delayed_work delayed_off_work;
 	struct kthread_work vsync_event_work;
+	struct kthread_work input_event_work;
+	struct input_handler *input_handler;
 	struct msm_display_topology topology;
 	bool vblank_enabled;
 
@@ -709,6 +721,11 @@ void sde_encoder_destroy(struct drm_encoder *drm_enc)
 	drm_encoder_cleanup(drm_enc);
 	mutex_destroy(&sde_enc->enc_lock);
 
+	if (sde_enc->input_handler) {
+		input_unregister_handler(sde_enc->input_handler);
+		kfree(sde_enc->input_handler);
+	}
+
 	kfree(sde_enc);
 }
 
@@ -1816,6 +1833,45 @@ static int _sde_encoder_resource_control_helper(struct drm_encoder *drm_enc,
 	return 0;
 }
 
+static void sde_encoder_input_event_handler(struct input_handle *handle,
+	unsigned int type, unsigned int code, int value)
+{
+	struct drm_encoder *drm_enc = NULL;
+	struct sde_encoder_virt *sde_enc = NULL;
+	struct msm_drm_thread *disp_thread = NULL;
+	struct msm_drm_private *priv = NULL;
+
+	if (!handle || !handle->handler || !handle->handler->private) {
+		SDE_ERROR("invalid encoder for the input event\n");
+		return;
+	}
+
+	drm_enc = (struct drm_encoder *)handle->handler->private;
+	if (!drm_enc->dev || !drm_enc->dev->dev_private) {
+		SDE_ERROR("invalid parameters\n");
+		return;
+	}
+
+	priv = drm_enc->dev->dev_private;
+	sde_enc = to_sde_encoder_virt(drm_enc);
+	if (!sde_enc->crtc || (sde_enc->crtc->index
+			>= ARRAY_SIZE(priv->disp_thread))) {
+		SDE_DEBUG_ENC(sde_enc,
+			"invalid cached CRTC: %d or crtc index: %d\n",
+			sde_enc->crtc == NULL,
+			sde_enc->crtc ? sde_enc->crtc->index : -EINVAL);
+		return;
+	}
+
+	SDE_EVT32_VERBOSE(DRMID(drm_enc));
+
+	disp_thread = &priv->disp_thread[sde_enc->crtc->index];
+
+	kthread_queue_work(&disp_thread->worker,
+				&sde_enc->input_event_work);
+}
+
+
 static int sde_encoder_resource_control(struct drm_encoder *drm_enc,
 		u32 sw_event)
 {
@@ -1967,7 +2023,7 @@ static int sde_encoder_resource_control(struct drm_encoder *drm_enc,
 			idle_pc_duration = IDLE_POWERCOLLAPSE_DURATION;
 
 		if (!autorefresh_enabled)
-			kthread_queue_delayed_work(
+			kthread_mod_delayed_work(
 				&disp_thread->worker,
 				&sde_enc->delayed_off_work,
 				msecs_to_jiffies(idle_pc_duration));
@@ -2177,7 +2233,57 @@ static int sde_encoder_resource_control(struct drm_encoder *drm_enc,
 
 		mutex_unlock(&sde_enc->rc_lock);
 		break;
+	case SDE_ENC_RC_EVENT_EARLY_WAKEUP:
+		if (!sde_enc->crtc ||
+			sde_enc->crtc->index >= ARRAY_SIZE(priv->disp_thread)) {
+			SDE_DEBUG_ENC(sde_enc,
+				"invalid crtc:%d or crtc index:%d , sw_event:%u\n",
+				sde_enc->crtc == NULL,
+				sde_enc->crtc ? sde_enc->crtc->index : -EINVAL,
+				sw_event);
+			return -EINVAL;
+		}
 
+		disp_thread = &priv->disp_thread[sde_enc->crtc->index];
+
+		mutex_lock(&sde_enc->rc_lock);
+
+		if (sde_enc->rc_state == SDE_ENC_RC_STATE_ON) {
+			if (sde_enc->cur_master &&
+				sde_enc->cur_master->ops.is_autorefresh_enabled)
+				autorefresh_enabled =
+				sde_enc->cur_master->ops.is_autorefresh_enabled(
+							sde_enc->cur_master);
+			if (autorefresh_enabled) {
+				SDE_DEBUG_ENC(sde_enc,
+					"not handling early wakeup since auto refresh is enabled\n");
+				mutex_lock(&sde_enc->rc_lock);
+				return 0;
+			}
+
+			if (!sde_crtc_frame_pending(sde_enc->crtc))
+				kthread_mod_delayed_work(&disp_thread->worker,
+						&sde_enc->delayed_off_work,
+						msecs_to_jiffies(
+						IDLE_POWERCOLLAPSE_DURATION));
+		} else if (sde_enc->rc_state == SDE_ENC_RC_STATE_IDLE) {
+			/* enable all the clks and resources */
+			_sde_encoder_resource_control_rsc_update(drm_enc, true);
+			_sde_encoder_resource_control_helper(drm_enc, true);
+
+			kthread_mod_delayed_work(&disp_thread->worker,
+						&sde_enc->delayed_off_work,
+						msecs_to_jiffies(
+						IDLE_POWERCOLLAPSE_DURATION));
+
+			sde_enc->rc_state = SDE_ENC_RC_STATE_ON;
+		}
+
+		SDE_EVT32(DRMID(drm_enc), sw_event, sde_enc->rc_state,
+				SDE_ENC_RC_STATE_ON, SDE_EVTLOG_FUNC_CASE8);
+
+		mutex_unlock(&sde_enc->rc_lock);
+		break;
 	default:
 		SDE_EVT32(DRMID(drm_enc), sw_event, SDE_EVTLOG_ERROR);
 		SDE_ERROR("unexpected sw_event: %d\n", sw_event);
@@ -3402,6 +3508,116 @@ static void sde_encoder_vsync_event_handler(unsigned long data)
 				&sde_enc->vsync_event_work);
 }
 
+static void sde_encoder_input_event_work_handler(struct kthread_work *work)
+{
+	struct sde_encoder_virt *sde_enc = container_of(work,
+				struct sde_encoder_virt, input_event_work);
+
+	if (!sde_enc) {
+		SDE_ERROR("invalid sde encoder\n");
+		return;
+	}
+
+	sde_encoder_resource_control(&sde_enc->base,
+			SDE_ENC_RC_EVENT_EARLY_WAKEUP);
+}
+
+static int _sde_encoder_input_connect(struct input_handler *handler,
+	struct input_dev *dev, const struct input_device_id *id)
+{
+	struct input_handle *handle;
+	int rc = 0;
+
+	handle = kzalloc(sizeof(*handle), GFP_KERNEL);
+	if (!handle)
+		return -ENOMEM;
+
+	handle->dev = dev;
+	handle->handler = handler;
+	handle->name = handler->name;
+
+	rc = input_register_handle(handle);
+	if (rc) {
+		pr_err("failed to register input handle\n");
+		goto error;
+	}
+
+	rc = input_open_device(handle);
+	if (rc) {
+		pr_err("failed to open input device\n");
+		goto error_unregister;
+	}
+
+	return 0;
+
+error_unregister:
+	input_unregister_handle(handle);
+
+error:
+	kfree(handle);
+
+	return rc;
+}
+
+static void _sde_encoder_input_disconnect(struct input_handle *handle)
+{
+	 input_close_device(handle);
+	 input_unregister_handle(handle);
+	 kfree(handle);
+}
+
+/**
+ * Structure for specifying event parameters on which to receive callbacks.
+ * This structure will trigger a callback in case of a touch event (specified by
+ * EV_ABS) where there is a change in X and Y coordinates,
+ */
+static const struct input_device_id sde_input_ids[] = {
+	{
+		.flags = INPUT_DEVICE_ID_MATCH_EVBIT,
+		.evbit = { BIT_MASK(EV_ABS) },
+		.absbit = { [BIT_WORD(ABS_MT_POSITION_X)] =
+					BIT_MASK(ABS_MT_POSITION_X) |
+					BIT_MASK(ABS_MT_POSITION_Y) },
+	},
+	{ },
+};
+
+static int _sde_encoder_input_handler(
+		struct sde_encoder_virt *sde_enc)
+{
+	struct input_handler *input_handler = NULL;
+	int rc = 0;
+
+	if (sde_enc->input_handler) {
+		SDE_ERROR_ENC(sde_enc,
+				"input_handle is active. unexpected\n");
+		return -EINVAL;
+	}
+
+	input_handler = kzalloc(sizeof(*sde_enc->input_handler), GFP_KERNEL);
+	if (!input_handler)
+		return -ENOMEM;
+
+	input_handler->event = sde_encoder_input_event_handler;
+	input_handler->connect = _sde_encoder_input_connect;
+	input_handler->disconnect = _sde_encoder_input_disconnect;
+	input_handler->name = "sde";
+	input_handler->id_table = sde_input_ids;
+	input_handler->private = sde_enc;
+
+	rc = input_register_handler(input_handler);
+	if (rc) {
+		SDE_ERROR_ENC(sde_enc,
+			"input_register_handler failed, rc= %d\n", rc);
+		kfree(input_handler);
+		return rc;
+	}
+
+	sde_enc->input_handler = input_handler;
+
+	return rc;
+}
+
 static void sde_encoder_vsync_event_work_handler(struct kthread_work *work)
 {
 	struct sde_encoder_virt *sde_enc = container_of(work,
@@ -3443,6 +3659,51 @@ static void sde_encoder_vsync_event_work_handler(struct kthread_work *work)
 	_sde_encoder_power_enable(sde_enc, false);
 }
 
+int sde_encoder_poll_line_counts(struct drm_encoder *drm_enc)
+{
+	static const uint64_t timeout_us = 50000;
+	static const uint64_t sleep_us = 20;
+	struct sde_encoder_virt *sde_enc;
+	ktime_t cur_ktime, exp_ktime;
+	uint32_t line_count, tmp, i;
+
+	if (!drm_enc) {
+		SDE_ERROR("invalid encoder\n");
+		return -EINVAL;
+	}
+	sde_enc = to_sde_encoder_virt(drm_enc);
+	if (!sde_enc->cur_master ||
+			!sde_enc->cur_master->ops.get_line_count) {
+		SDE_DEBUG_ENC(sde_enc, "can't get master line count\n");
+		SDE_EVT32(DRMID(drm_enc), SDE_EVTLOG_ERROR);
+		return -EINVAL;
+	}
+
+	exp_ktime = ktime_add_ms(ktime_get(), timeout_us / 1000);
+
+	line_count = sde_enc->cur_master->ops.get_line_count(
+			sde_enc->cur_master);
+
+	for (i = 0; i < (timeout_us * 2 / sleep_us); ++i) {
+		tmp = line_count;
+		line_count = sde_enc->cur_master->ops.get_line_count(
+				sde_enc->cur_master);
+		if (line_count < tmp) {
+			SDE_EVT32(DRMID(drm_enc), line_count);
+			return 0;
+		}
+
+		cur_ktime = ktime_get();
+		if (ktime_compare_safe(exp_ktime, cur_ktime) <= 0)
+			break;
+
+		usleep_range(sleep_us / 2, sleep_us);
+	}
+
+	SDE_EVT32(DRMID(drm_enc), line_count, SDE_EVTLOG_ERROR);
+	return -ETIMEDOUT;
+}
+
 int sde_encoder_prepare_for_kickoff(struct drm_encoder *drm_enc,
 		struct sde_encoder_kickoff_params *params)
 {
@@ -3833,6 +4094,7 @@ static ssize_t _sde_encoder_misr_read(struct file *file,
 
 	for (i = 0; i < sde_enc->num_phys_encs; i++) {
 		struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
+
 		if (!phys || !phys->ops.collect_misr)
 			continue;
 
@@ -4216,6 +4478,13 @@ struct drm_encoder *sde_encoder_init(
 		sde_enc->rsc_client = NULL;
 	}
 
+	if (disp_info->capabilities & MSM_DISPLAY_CAP_CMD_MODE) {
+		ret = _sde_encoder_input_handler(sde_enc);
+		if (ret)
+			SDE_ERROR(
+			"input handler registration failed, rc = %d\n", ret);
+	}
+
 	mutex_init(&sde_enc->rc_lock);
 	kthread_init_delayed_work(&sde_enc->delayed_off_work,
 			sde_encoder_off_work);
@@ -4224,6 +4493,9 @@ struct drm_encoder *sde_encoder_init(
 	kthread_init_work(&sde_enc->vsync_event_work,
 			sde_encoder_vsync_event_work_handler);
 
+	kthread_init_work(&sde_enc->input_event_work,
+			sde_encoder_input_event_work_handler);
+
 	memcpy(&sde_enc->disp_info, disp_info, sizeof(*disp_info));
 
 	SDE_DEBUG_ENC(sde_enc, "created\n");
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder.h b/drivers/gpu/drm/msm/sde/sde_encoder.h
index 937bd18..8038eb6 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder.h
+++ b/drivers/gpu/drm/msm/sde/sde_encoder.h
@@ -108,6 +108,13 @@ void sde_encoder_register_frame_event_callback(struct drm_encoder *encoder,
 struct sde_rsc_client *sde_encoder_get_rsc_client(struct drm_encoder *encoder);
 
 /**
+ * sde_encoder_poll_line_counts - poll encoder line counts for start of frame
+ * @encoder:	encoder pointer
+ * @Returns:	zero on success
+ */
+int sde_encoder_poll_line_counts(struct drm_encoder *encoder);
+
+/**
  * sde_encoder_prepare_for_kickoff - schedule double buffer flip of the ctl
  *	path (i.e. ctl flush and start) at next appropriate time.
  *	Immediately: if no previous commit is outstanding.
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c b/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c
index ad27b7f..4a15e6f 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -299,6 +299,8 @@ static void programmable_rot_fetch_config(struct sde_encoder_phys *phys_enc,
 		rot_fetch_start_vsync_counter);
 
 	if (!phys_enc->sde_kms->splash_data.cont_splash_en) {
+		SDE_EVT32(DRMID(phys_enc->parent), f.enable, f.fetch_start);
+
 		phys_enc->hw_ctl->ops.get_bitmask_intf(
 				phys_enc->hw_ctl, &flush_mask,
 				vid_enc->hw_intf->idx);
@@ -308,10 +310,10 @@ static void programmable_rot_fetch_config(struct sde_encoder_phys *phys_enc,
 		spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
 		vid_enc->hw_intf->ops.setup_rot_start(vid_enc->hw_intf, &f);
 		spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
-	}
 
-	vid_enc->rot_fetch = f;
-	vid_enc->rot_fetch_valid = true;
+		vid_enc->rot_fetch = f;
+		vid_enc->rot_fetch_valid = true;
+	}
 }
 
 static bool sde_encoder_phys_vid_mode_fixup(
@@ -402,6 +404,7 @@ static void sde_encoder_phys_vid_vblank_irq(void *arg, int irq_idx)
 	struct sde_hw_ctl *hw_ctl;
 	unsigned long lock_flags;
 	u32 flush_register = 0;
+	u32 reset_status = 0;
 	int new_cnt = -1, old_cnt = -1;
 	u32 event = 0;
 
@@ -444,10 +447,15 @@ static void sde_encoder_phys_vid_vblank_irq(void *arg, int irq_idx)
 	if (flush_register == 0)
 		new_cnt = atomic_add_unless(&phys_enc->pending_kickoff_cnt,
 				-1, 0);
+
+	if (hw_ctl && hw_ctl->ops.get_reset)
+		reset_status = hw_ctl->ops.get_reset(hw_ctl);
+
 	spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
 
 	SDE_EVT32_IRQ(DRMID(phys_enc->parent), vid_enc->hw_intf->idx - INTF_0,
-			old_cnt, new_cnt, flush_register, event);
+			old_cnt, new_cnt, reset_status ? SDE_EVTLOG_ERROR : 0,
+			flush_register, event);
 
 	/* Signal any waiting atomic commit thread */
 	wake_up_all(&phys_enc->pending_kickoff_wq);
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_ad4.c b/drivers/gpu/drm/msm/sde/sde_hw_ad4.c
index bf48271..994bf3d 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_ad4.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_ad4.c
@@ -1248,16 +1248,20 @@ static int ad4_backlight_setup_ipcr(struct sde_hw_dspp *dspp,
 	return 0;
 }
 
-void sde_read_intr_resp_ad4(struct sde_hw_dspp *dspp, u32 event, u32 *resp)
+void sde_read_intr_resp_ad4(struct sde_hw_dspp *dspp, u32 event,
+		u32 *resp_in, u32 *resp_out)
 {
-	if (!dspp || !resp) {
-		DRM_ERROR("invalid params dspp %pK resp %pK\n", dspp, resp);
+	if (!dspp || !resp_in || !resp_out) {
+		DRM_ERROR("invalid params dspp %pK resp_in %pK resp_out %pK\n",
+				dspp, resp_in, resp_out);
 		return;
 	}
 
 	switch (event) {
-	case AD4_BACKLIGHT:
-		*resp = SDE_REG_READ(&dspp->hw,
+	case AD4_IN_OUT_BACKLIGHT:
+		*resp_in = SDE_REG_READ(&dspp->hw,
+				dspp->cap->sblk->ad.base + 0x2c);
+		*resp_out = SDE_REG_READ(&dspp->hw,
 				dspp->cap->sblk->ad.base + 0x48);
 		break;
 	default:
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_catalog.c b/drivers/gpu/drm/msm/sde/sde_hw_catalog.c
index c23afc5..545ed65 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_catalog.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_catalog.c
@@ -95,6 +95,7 @@
 #define PROP_BITVALUE_ACCESS(p, i, j, k)	((p + i)->bit_value[j][k])
 
 #define DEFAULT_SBUF_HEADROOM		(20)
+#define DEFAULT_SBUF_PREFILL		(128)
 
 /*
  * Default parameter values
@@ -1953,6 +1954,7 @@ static int sde_rot_parse_dt(struct device_node *np,
 	if (sde_cfg->rot_count) {
 		sde_cfg->has_sbuf = true;
 		sde_cfg->sbuf_headroom = DEFAULT_SBUF_HEADROOM;
+		sde_cfg->sbuf_prefill = DEFAULT_SBUF_PREFILL;
 	}
 
 end:
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_catalog.h b/drivers/gpu/drm/msm/sde/sde_hw_catalog.h
index 1cd65ea..aa6c482 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_catalog.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_catalog.h
@@ -918,6 +918,7 @@ struct sde_perf_cfg {
  * @ubwc_version       UBWC feature version (0x0 for not supported)
  * @has_sbuf           indicate if stream buffer is available
  * @sbuf_headroom      stream buffer headroom in lines
+ * @sbuf_prefill       stream buffer prefill default in lines
  * @has_idle_pc        indicate if idle power collapse feature is supported
  * @has_hdr            HDR feature support
  * @dma_formats        Supported formats for dma pipe
@@ -944,6 +945,7 @@ struct sde_mdss_cfg {
 	u32 ubwc_version;
 	bool has_sbuf;
 	u32 sbuf_headroom;
+	u32 sbuf_prefill;
 	bool has_idle_pc;
 	u32 vbif_qos_nlvl;
 	u32 ts_prefill_rev;
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_ctl.c b/drivers/gpu/drm/msm/sde/sde_hw_ctl.c
index b8c790f..303d96e 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_ctl.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_ctl.c
@@ -347,6 +347,13 @@ static u32 sde_hw_ctl_poll_reset_status(struct sde_hw_ctl *ctx, u32 timeout_us)
 	return status;
 }
 
+static u32 sde_hw_ctl_get_reset_status(struct sde_hw_ctl *ctx)
+{
+	if (!ctx)
+		return 0;
+	return (u32)SDE_REG_READ(&ctx->hw, CTL_SW_RESET);
+}
+
 static int sde_hw_ctl_reset_control(struct sde_hw_ctl *ctx)
 {
 	struct sde_hw_blk_reg_map *c = &ctx->hw;
@@ -632,6 +639,7 @@ static void _setup_ctl_ops(struct sde_hw_ctl_ops *ops,
 	ops->read_ctl_layers = sde_hw_ctl_read_ctl_layers;
 	ops->setup_intf_cfg = sde_hw_ctl_intf_cfg;
 	ops->reset = sde_hw_ctl_reset_control;
+	ops->get_reset = sde_hw_ctl_get_reset_status;
 	ops->hard_reset = sde_hw_ctl_hard_reset;
 	ops->wait_reset_status = sde_hw_ctl_wait_reset_status;
 	ops->clear_all_blendstages = sde_hw_ctl_clear_all_blendstages;
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_ctl.h b/drivers/gpu/drm/msm/sde/sde_hw_ctl.h
index 435fc21..9eb31f1 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_ctl.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_ctl.h
@@ -153,6 +153,13 @@ struct sde_hw_ctl_ops {
 	int (*reset)(struct sde_hw_ctl *c);
 
 	/**
+	 * get_reset - check ctl reset status bit
+	 * @ctx    : ctl path ctx pointer
+	 * Returns: current value of ctl reset status
+	 */
+	u32 (*get_reset)(struct sde_hw_ctl *ctx);
+
+	/**
 	 * hard_reset - force reset on ctl_path
 	 * @ctx    : ctl path ctx pointer
 	 * @enable : whether to enable/disable hard reset
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_dspp.h b/drivers/gpu/drm/msm/sde/sde_hw_dspp.h
index 2b64165..2d2ac5b 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_dspp.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_dspp.h
@@ -159,10 +159,11 @@ struct sde_hw_dspp_ops {
 	/**
 	 * ad_read_intr_resp - function to get interrupt response for ad
 	 * @event: Event for which response needs to be read
-	 * @resp: Pointer to u32 where response value is dumped.
+	 * @resp_in: Pointer to u32 where resp ad4 input value is dumped.
+	 * @resp_out: Pointer to u32 where resp ad4 output value is dumped.
 	 */
 	void (*ad_read_intr_resp)(struct sde_hw_dspp *ctx, u32 event,
-			u32 *resp);
+			u32 *resp_in, u32 *resp_out);
 
 };
 
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_rot.c b/drivers/gpu/drm/msm/sde/sde_hw_rot.c
index 8d386a8..facec3d 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_rot.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_rot.c
@@ -552,6 +552,28 @@ static void sde_hw_rot_to_v4l2_buffer(u32 drm_pixfmt, u64 drm_modifier,
 }
 
 /**
+ * sde_hw_rot_adjust_prefill_bw - update prefill bw based on pipe config
+ * @hw: Pointer to rotator hardware driver
+ * @data: Pointer to command descriptor
+ * @prefill_bw: adjusted prefill bw (output)
+ * return: 0 if success; error code otherwise
+ */
+static int sde_hw_rot_adjust_prefill_bw(struct sde_hw_rot *hw,
+		struct sde_hw_rot_cmd *data, u64 *prefill_bw)
+{
+	if (!hw || !data || !prefill_bw) {
+		SDE_ERROR("invalid parameter(s)\n");
+		return -EINVAL;
+	}
+
+	/* adjust bw for scaling */
+	if (data->dst_rect_h)
+		*prefill_bw = mult_frac(data->prefill_bw, data->crtc_h,
+				data->dst_rect_h);
+	return 0;
+}
+
+/**
  * sde_hw_rot_commit - commit/execute given rotator command
  * @hw: Pointer to rotator hardware driver
  * @data: Pointer to command descriptor
@@ -683,6 +705,8 @@ static int sde_hw_rot_commit(struct sde_hw_rot *hw, struct sde_hw_rot_cmd *data,
 				&rot_cmd.dst_planes);
 	}
 
+	sde_hw_rot_adjust_prefill_bw(hw, data, &rot_cmd.prefill_bw);
+
 	/* only process any command if client is master or for validation */
 	if (data->master || hw_cmd == SDE_HW_ROT_CMD_VALIDATE) {
 		SDE_DEBUG("dispatch seq:%d cmd:%d\n", data->sequence_id,
@@ -918,6 +942,7 @@ struct sde_hw_rot *sde_hw_rot_init(enum sde_rot idx,
 	/* Assign ops */
 	c->idx = idx;
 	c->caps = cfg;
+	c->catalog = m;
 	_setup_rot_ops(&c->ops, c->caps->features);
 	snprintf(c->name, ARRAY_SIZE(c->name), "sde_rot_%d", idx - ROT_0);
 
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_rot.h b/drivers/gpu/drm/msm/sde/sde_hw_rot.h
index ea88d05..59f30ed 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_rot.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_rot.h
@@ -73,6 +73,7 @@ enum sde_hw_rot_cmd_type {
  * @dst_rect_y: destination rectangle y coordinate
  * @dst_rect_w: destination rectangle width
  * @dst_rect_h: destination rectangle height
+ * @crtc_h: sspp output height
  * @priv_handle: private handle of rotator driver (output)
  */
 struct sde_hw_rot_cmd {
@@ -110,6 +111,7 @@ struct sde_hw_rot_cmd {
 	u32 dst_rect_y;
 	u32 dst_rect_w;
 	u32 dst_rect_h;
+	u32 crtc_h;
 	void *priv_handle;
 };
 
@@ -133,6 +135,7 @@ struct sde_hw_rot_ops {
  * @hw: hardware address map
  * @idx: instance index
  * @caps: capabilities bitmask
+ * @catalog: pointer to hardware catalog
  * @ops: operation table
  * @rot_ctx: pointer to private rotator context
  * @format_caps: pointer to pixel format capability  array
@@ -144,6 +147,7 @@ struct sde_hw_rot {
 	char name[SDE_HW_ROT_NAME_SIZE];
 	int idx;
 	const struct sde_rot_cfg *caps;
+	struct sde_mdss_cfg *catalog;
 	struct sde_hw_rot_ops ops;
 	void *rot_ctx;
 	struct sde_format_extended *format_caps;
diff --git a/drivers/gpu/drm/msm/sde/sde_kms.c b/drivers/gpu/drm/msm/sde/sde_kms.c
index d4d6998..2b0aa37 100644
--- a/drivers/gpu/drm/msm/sde/sde_kms.c
+++ b/drivers/gpu/drm/msm/sde/sde_kms.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
  * Copyright (C) 2013 Red Hat
  * Author: Rob Clark <robdclark@gmail.com>
  *
@@ -2999,12 +2999,19 @@ static int sde_kms_hw_init(struct msm_kms *kms)
 		SDE_DEBUG("added genpd provider %s\n", sde_kms->genpd.name);
 	}
 
-	if (sde_kms->splash_data.cont_splash_en)
+	if (sde_kms->splash_data.cont_splash_en) {
 		SDE_DEBUG("Skipping MDP Resources disable\n");
-	else
+	} else {
+		for (i = 0; i < SDE_POWER_HANDLE_DBUS_ID_MAX; i++)
+			sde_power_data_bus_set_quota(&priv->phandle,
+				sde_kms->core_client,
+				SDE_POWER_HANDLE_DATA_BUS_CLIENT_RT, i,
+				SDE_POWER_HANDLE_ENABLE_BUS_AB_QUOTA,
+				SDE_POWER_HANDLE_ENABLE_BUS_IB_QUOTA);
+
 		sde_power_resource_enable(&priv->phandle,
 						sde_kms->core_client, false);
-
+	}
 	return 0;
 
 genpd_err:
diff --git a/drivers/gpu/drm/msm/sde/sde_plane.c b/drivers/gpu/drm/msm/sde/sde_plane.c
index f2f870f..54a0c50 100644
--- a/drivers/gpu/drm/msm/sde/sde_plane.c
+++ b/drivers/gpu/drm/msm/sde/sde_plane.c
@@ -182,7 +182,7 @@ static struct drm_crtc_state *_sde_plane_get_crtc_state(
 	return cstate;
 }
 
-static bool sde_plane_enabled(struct drm_plane_state *state)
+static bool sde_plane_enabled(const struct drm_plane_state *state)
 {
 	return state && state->fb && state->crtc;
 }
@@ -1558,50 +1558,38 @@ static struct sde_crtc_res_ops fbo_res_ops = {
 	.get = _sde_plane_fbo_get,
 };
 
-/**
- * sde_plane_rot_calc_prefill - calculate rotator start prefill
- * @plane: Pointer to drm plane
- * return: prefill time in line
- */
-u32 sde_plane_rot_calc_prefill(struct drm_plane *plane)
+u32 sde_plane_rot_get_prefill(struct drm_plane *plane)
 {
 	struct drm_plane_state *state;
 	struct sde_plane_state *pstate;
 	struct sde_plane_rot_state *rstate;
 	struct sde_kms *sde_kms;
-	u32 blocksize = 128;
-	u32 prefill_line = 0;
+	u32 blocksize = 0;
 
 	if (!plane || !plane->state || !plane->state->fb) {
 		SDE_ERROR("invalid parameters\n");
 		return 0;
 	}
 
-	sde_kms = _sde_plane_get_kms(plane);
 	state = plane->state;
 	pstate = to_sde_plane_state(state);
 	rstate = &pstate->rot;
 
+	if (!rstate->out_fb_format)
+		return 0;
+
+	sde_kms = _sde_plane_get_kms(plane);
 	if (!sde_kms || !sde_kms->catalog) {
 		SDE_ERROR("invalid kms\n");
 		return 0;
 	}
 
-	if (rstate->out_fb_format)
-		sde_format_get_block_size(rstate->out_fb_format,
-				&blocksize, &blocksize);
+	/* return zero if out_fb_format isn't valid */
+	if (sde_format_get_block_size(rstate->out_fb_format,
+			&blocksize, &blocksize))
+		return 0;
 
-	prefill_line = blocksize + sde_kms->catalog->sbuf_headroom;
-	prefill_line = mult_frac(prefill_line, rstate->out_src_h >> 16,
-			state->crtc_h);
-	SDE_DEBUG(
-		"plane%d.%d blk:%u head:%u vdst/vsrc:%u/%u prefill:%u\n",
-			plane->base.id, rstate->sequence_id,
-			blocksize, sde_kms->catalog->sbuf_headroom,
-			state->crtc_h, rstate->out_src_h >> 16,
-			prefill_line);
-
-	return prefill_line;
+	return blocksize + sde_kms->catalog->sbuf_headroom;
 }
 
 /**
@@ -1937,6 +1925,7 @@ static int sde_plane_rot_submit_command(struct drm_plane *plane,
 	rot_cmd->dst_rect_y = 0;
 	rot_cmd->dst_rect_w = drm_rect_width(&rstate->out_rot_rect) >> 16;
 	rot_cmd->dst_rect_h = drm_rect_height(&rstate->out_rot_rect) >> 16;
+	rot_cmd->crtc_h = state->crtc_h;
 
 	if (hw_cmd == SDE_HW_ROT_CMD_COMMIT) {
 		struct sde_hw_fmt_layout layout;
@@ -2966,30 +2955,22 @@ int sde_plane_validate_multirect_v2(struct sde_multirect_plane_states *plane)
 }
 
 int sde_plane_confirm_hw_rsvps(struct drm_plane *plane,
-		const struct drm_plane_state *state)
+		const struct drm_plane_state *state,
+		struct drm_crtc_state *cstate)
 {
-	struct drm_crtc_state *cstate;
 	struct sde_plane_state *pstate;
 	struct sde_plane_rot_state *rstate;
 	struct sde_hw_blk *hw_blk;
 
-	if (!plane || !state) {
-		SDE_ERROR("invalid plane/state\n");
+	if (!plane || !state || !cstate) {
+		SDE_ERROR("invalid parameters\n");
 		return -EINVAL;
 	}
 
 	pstate = to_sde_plane_state(state);
 	rstate = &pstate->rot;
 
-	/* cstate will be null if crtc is disconnected from plane */
-	cstate = _sde_plane_get_crtc_state((struct drm_plane_state *)state);
-	if (IS_ERR_OR_NULL(cstate)) {
-		SDE_ERROR("invalid crtc state\n");
-		return -EINVAL;
-	}
-
-	if (sde_plane_enabled((struct drm_plane_state *)state) &&
-			rstate->out_sbuf) {
+	if (sde_plane_enabled(state) && rstate->out_sbuf) {
 		SDE_DEBUG("plane%d.%d acquire rotator, fb %d\n",
 				plane->base.id, rstate->sequence_id,
 				state->fb ? state->fb->base.id : -1);
@@ -3005,7 +2986,15 @@ int sde_plane_confirm_hw_rsvps(struct drm_plane *plane,
 					SDE_EVTLOG_ERROR);
 			return -EINVAL;
 		}
+
+		_sde_plane_rot_get_fb(plane, cstate, rstate);
+
+		SDE_EVT32(DRMID(plane), rstate->sequence_id,
+				state->fb ? state->fb->base.id : -1,
+				rstate->out_fb ? rstate->out_fb->base.id : -1,
+				hw_blk->id);
 	}
+
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/msm/sde/sde_plane.h b/drivers/gpu/drm/msm/sde/sde_plane.h
index e8b621c..ad58097 100644
--- a/drivers/gpu/drm/msm/sde/sde_plane.h
+++ b/drivers/gpu/drm/msm/sde/sde_plane.h
@@ -204,10 +204,12 @@ bool is_sde_plane_virtual(struct drm_plane *plane);
  * sde_plane_confirm_hw_rsvps - reserve an sbuf resource, if needed
  * @plane: Pointer to DRM plane object
  * @state: Pointer to plane state
+ * @cstate: Pointer to crtc state containing the resource pool
  * Returns: Zero on success
  */
 int sde_plane_confirm_hw_rsvps(struct drm_plane *plane,
-		const struct drm_plane_state *state);
+		const struct drm_plane_state *state,
+		struct drm_crtc_state *cstate);
 
 /**
  * sde_plane_get_ctl_flush - get control flush mask
@@ -220,11 +222,11 @@ void sde_plane_get_ctl_flush(struct drm_plane *plane, struct sde_hw_ctl *ctl,
 		u32 *flush_sspp, u32 *flush_rot);
 
 /**
- * sde_plane_rot_calc_prefill - calculate rotator start prefill
+ * sde_plane_rot_get_prefill - calculate rotator start prefill
  * @plane: Pointer to drm plane
- * return: prefill time in line
+ * return: prefill time in lines
  */
-u32 sde_plane_rot_calc_prefill(struct drm_plane *plane);
+u32 sde_plane_rot_get_prefill(struct drm_plane *plane);
 
 /**
  * sde_plane_restore - restore hw state if previously power collapsed
diff --git a/drivers/gpu/drm/msm/sde_hdcp_1x.c b/drivers/gpu/drm/msm/sde_hdcp_1x.c
index c012f9d..2f900ec 100644
--- a/drivers/gpu/drm/msm/sde_hdcp_1x.c
+++ b/drivers/gpu/drm/msm/sde_hdcp_1x.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2010-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2010-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -251,21 +251,17 @@ static int sde_hdcp_1x_count_one(u8 *array, u8 len)
 static int sde_hdcp_1x_load_keys(void *input)
 {
 	int rc = 0;
-	bool use_sw_keys = false;
-	u32 reg_val;
-	u32 ksv_lsb_addr, ksv_msb_addr;
 	u32 aksv_lsb, aksv_msb;
 	u8 aksv[5];
 	struct dss_io_data *dp_ahb;
 	struct dss_io_data *dp_aux;
 	struct dss_io_data *dp_link;
-	struct dss_io_data *qfprom_io;
 	struct sde_hdcp_1x *hdcp = input;
 	struct sde_hdcp_reg_set *reg_set;
 
 	if (!hdcp || !hdcp->init_data.dp_ahb ||
 		!hdcp->init_data.dp_aux ||
-		!hdcp->init_data.qfprom_io) {
+		!hdcp->init_data.dp_link) {
 		pr_err("invalid input\n");
 		rc = -EINVAL;
 		goto end;
@@ -282,38 +278,12 @@ static int sde_hdcp_1x_load_keys(void *input)
 	dp_ahb = hdcp->init_data.dp_ahb;
 	dp_aux = hdcp->init_data.dp_aux;
 	dp_link = hdcp->init_data.dp_link;
-	qfprom_io = hdcp->init_data.qfprom_io;
 	reg_set = &hdcp->reg_set;
 
-	/* On compatible hardware, use SW keys */
-	reg_val = DSS_REG_R(qfprom_io, SEC_CTRL_HW_VERSION);
-	if (reg_val >= HDCP_SEL_MIN_SEC_VERSION) {
-		reg_val = DSS_REG_R(qfprom_io,
-			QFPROM_RAW_FEAT_CONFIG_ROW0_MSB +
-			QFPROM_RAW_VERSION_4);
-
-		if (!(reg_val & BIT(23)))
-			use_sw_keys = true;
-	}
-
-	if (use_sw_keys) {
-		if (hdcp1_set_keys(&aksv_msb, &aksv_lsb)) {
-			pr_err("setting hdcp SW keys failed\n");
-			rc = -EINVAL;
-			goto end;
-		}
-	} else {
-		/* Fetch aksv from QFPROM, this info should be public. */
-		ksv_lsb_addr = HDCP_KSV_LSB;
-		ksv_msb_addr = HDCP_KSV_MSB;
-
-		if (hdcp->init_data.sec_access) {
-			ksv_lsb_addr += HDCP_KSV_VERSION_4_OFFSET;
-			ksv_msb_addr += HDCP_KSV_VERSION_4_OFFSET;
-		}
-
-		aksv_lsb = DSS_REG_R(qfprom_io, ksv_lsb_addr);
-		aksv_msb = DSS_REG_R(qfprom_io, ksv_msb_addr);
+	if (hdcp1_set_keys(&aksv_msb, &aksv_lsb)) {
+		pr_err("setting hdcp SW keys failed\n");
+		rc = -EINVAL;
+		goto end;
 	}
 
 	pr_debug("%s: AKSV=%02x%08x\n", SDE_HDCP_STATE_NAME,
diff --git a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
index af267c3..ee5883f 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
@@ -147,9 +147,6 @@ static int omap_gem_dmabuf_mmap(struct dma_buf *buffer,
 	struct drm_gem_object *obj = buffer->priv;
 	int ret = 0;
 
-	if (WARN_ON(!obj->filp))
-		return -EINVAL;
-
 	ret = drm_gem_mmap_obj(obj, omap_gem_mmap_size(obj), vma);
 	if (ret < 0)
 		return ret;
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index 27cb424..6f65846 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -369,6 +369,7 @@ static int panel_simple_remove(struct device *dev)
 	drm_panel_remove(&panel->base);
 
 	panel_simple_disable(&panel->base);
+	panel_simple_unprepare(&panel->base);
 
 	if (panel->ddc)
 		put_device(&panel->ddc->dev);
@@ -384,6 +385,7 @@ static void panel_simple_shutdown(struct device *dev)
 	struct panel_simple *panel = dev_get_drvdata(dev);
 
 	panel_simple_disable(&panel->base);
+	panel_simple_unprepare(&panel->base);
 }
 
 static const struct drm_display_mode ampire_am800480r3tmqwa1h_mode = {
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index 432cb46..fd7682b 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -45,34 +45,32 @@ static char *pre_emph_names[] = {
 
 /***** radeon AUX functions *****/
 
-/* Atom needs data in little endian format
- * so swap as appropriate when copying data to
- * or from atom. Note that atom operates on
- * dw units.
+/* Atom needs data in little endian format so swap as appropriate when copying
+ * data to or from atom. Note that atom operates on dw units.
+ *
+ * Use to_le=true when sending data to atom and provide at least
+ * ALIGN(num_bytes,4) bytes in the dst buffer.
+ *
+ * Use to_le=false when receiving data from atom and provide ALIGN(num_bytes,4)
+ * byes in the src buffer.
  */
 void radeon_atom_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le)
 {
 #ifdef __BIG_ENDIAN
-	u8 src_tmp[20], dst_tmp[20]; /* used for byteswapping */
-	u32 *dst32, *src32;
+	u32 src_tmp[5], dst_tmp[5];
 	int i;
+	u8 align_num_bytes = ALIGN(num_bytes, 4);
 
-	memcpy(src_tmp, src, num_bytes);
-	src32 = (u32 *)src_tmp;
-	dst32 = (u32 *)dst_tmp;
 	if (to_le) {
-		for (i = 0; i < ((num_bytes + 3) / 4); i++)
-			dst32[i] = cpu_to_le32(src32[i]);
-		memcpy(dst, dst_tmp, num_bytes);
+		memcpy(src_tmp, src, num_bytes);
+		for (i = 0; i < align_num_bytes / 4; i++)
+			dst_tmp[i] = cpu_to_le32(src_tmp[i]);
+		memcpy(dst, dst_tmp, align_num_bytes);
 	} else {
-		u8 dws = num_bytes & ~3;
-		for (i = 0; i < ((num_bytes + 3) / 4); i++)
-			dst32[i] = le32_to_cpu(src32[i]);
-		memcpy(dst, dst_tmp, dws);
-		if (num_bytes % 4) {
-			for (i = 0; i < (num_bytes % 4); i++)
-				dst[dws+i] = dst_tmp[dws+i];
-		}
+		memcpy(src_tmp, src, align_num_bytes);
+		for (i = 0; i < align_num_bytes / 4; i++)
+			dst_tmp[i] = le32_to_cpu(src_tmp[i]);
+		memcpy(dst, dst_tmp, num_bytes);
 	}
 #else
 	memcpy(dst, src, num_bytes);
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index 0daad44..af84705 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -252,7 +252,6 @@ static int radeonfb_create(struct drm_fb_helper *helper,
 	}
 
 	info->par = rfbdev;
-	info->skip_vt_switch = true;
 
 	ret = radeon_framebuffer_init(rdev->ddev, &rfbdev->rfb, &mode_cmd, gobj);
 	if (ret) {
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index 13ba73f..8bd9e6c 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -3029,6 +3029,16 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
 			max_sclk = 75000;
 			max_mclk = 80000;
 		}
+	} else if (rdev->family == CHIP_OLAND) {
+		if ((rdev->pdev->revision == 0xC7) ||
+		    (rdev->pdev->revision == 0x80) ||
+		    (rdev->pdev->revision == 0x81) ||
+		    (rdev->pdev->revision == 0x83) ||
+		    (rdev->pdev->revision == 0x87) ||
+		    (rdev->pdev->device == 0x6604) ||
+		    (rdev->pdev->device == 0x6605)) {
+			max_sclk = 75000;
+		}
 	}
 	/* Apply dpm quirks */
 	while (p && p->chip_device != 0) {
diff --git a/drivers/gpu/drm/sun4i/sun4i_backend.c b/drivers/gpu/drm/sun4i/sun4i_backend.c
index 6e6c59a..223944a 100644
--- a/drivers/gpu/drm/sun4i/sun4i_backend.c
+++ b/drivers/gpu/drm/sun4i/sun4i_backend.c
@@ -172,7 +172,7 @@ int sun4i_backend_update_layer_formats(struct sun4i_backend *backend,
 	ret = sun4i_backend_drm_format_to_layer(plane, fb->pixel_format, &val);
 	if (ret) {
 		DRM_DEBUG_DRIVER("Invalid format\n");
-		return val;
+		return ret;
 	}
 
 	regmap_update_bits(backend->regs, SUN4I_BACKEND_ATTCTL_REG1(layer),
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index bf6e216..7d22f98 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -473,6 +473,7 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
 	INIT_LIST_HEAD(&fbo->lru);
 	INIT_LIST_HEAD(&fbo->swap);
 	INIT_LIST_HEAD(&fbo->io_reserve_lru);
+	mutex_init(&fbo->wu_mutex);
 	fbo->moving = NULL;
 	drm_vma_node_reset(&fbo->vma_node);
 	atomic_set(&fbo->cpu_writers, 0);
diff --git a/drivers/gpu/msm/a6xx_reg.h b/drivers/gpu/msm/a6xx_reg.h
index 5bfed6f..fef45ec 100644
--- a/drivers/gpu/msm/a6xx_reg.h
+++ b/drivers/gpu/msm/a6xx_reg.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -941,6 +941,8 @@
 #define A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_4_H	0x1F84D
 #define A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_5_L	0x1F84E
 #define A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_5_H	0x1F84F
+#define A6XX_GMU_CX_GMU_ALWAYS_ON_COUNTER_L	0x1F888
+#define A6XX_GMU_CX_GMU_ALWAYS_ON_COUNTER_H	0x1F889
 #define A6XX_GMU_PWR_COL_INTER_FRAME_CTRL	0x1F8C0
 #define A6XX_GMU_PWR_COL_INTER_FRAME_HYST	0x1F8C1
 #define A6XX_GMU_PWR_COL_SPTPRAC_HYST		0x1F8C2
diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c
index 8d18fc2..7d11007 100644
--- a/drivers/gpu/msm/adreno.c
+++ b/drivers/gpu/msm/adreno.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2002,2007-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2002,2007-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -946,6 +946,8 @@ static struct {
 			"qcom,gpu-quirk-lmloadkill-disable" },
 	{ ADRENO_QUIRK_HFI_USE_REG, "qcom,gpu-quirk-hfi-use-reg" },
 	{ ADRENO_QUIRK_SECVID_SET_ONCE, "qcom,gpu-quirk-secvid-set-once" },
+	{ ADRENO_QUIRK_LIMIT_UCHE_GBIF_RW,
+			"qcom,gpu-quirk-limit-uche-gbif-rw" },
 };
 
 static int adreno_of_get_power(struct adreno_device *adreno_dev,
@@ -1084,6 +1086,33 @@ static void adreno_cx_dbgc_probe(struct kgsl_device *device)
 		KGSL_DRV_WARN(device, "cx_dbgc ioremap failed\n");
 }
 
+static bool adreno_is_gpu_disabled(struct adreno_device *adreno_dev)
+{
+	unsigned int row0;
+	unsigned int pte_row0_msb[3];
+	int ret;
+	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
+
+	if (of_property_read_u32_array(device->pdev->dev.of_node,
+		"qcom,gpu-disable-fuse", pte_row0_msb, 3))
+		return false;
+	/*
+	 * Read the fuse value to disable GPU driver if fuse
+	 * is blown. By default(fuse value is 0) GPU is enabled.
+	 */
+	if (adreno_efuse_map(adreno_dev))
+		return false;
+
+	ret = adreno_efuse_read_u32(adreno_dev, pte_row0_msb[0], &row0);
+	adreno_efuse_unmap(adreno_dev);
+
+	if (ret)
+		return false;
+
+	return (row0 >> pte_row0_msb[2]) &
+			pte_row0_msb[1] ? true : false;
+}
+
 static int adreno_probe(struct platform_device *pdev)
 {
 	struct kgsl_device *device;
@@ -1100,6 +1129,11 @@ static int adreno_probe(struct platform_device *pdev)
 	device = KGSL_DEVICE(adreno_dev);
 	device->pdev = pdev;
 
+	if (adreno_is_gpu_disabled(adreno_dev)) {
+		pr_err("adreno: GPU is disabled on this device");
+		return -ENODEV;
+	}
+
 	/* Get the chip ID from the DT and set up target specific parameters */
 	adreno_identify_gpu(adreno_dev);
 
diff --git a/drivers/gpu/msm/adreno.h b/drivers/gpu/msm/adreno.h
index bb173421..269c3a9 100644
--- a/drivers/gpu/msm/adreno.h
+++ b/drivers/gpu/msm/adreno.h
@@ -144,6 +144,12 @@
 #define ADRENO_QUIRK_HFI_USE_REG BIT(6)
 /* Only set protected SECVID registers once */
 #define ADRENO_QUIRK_SECVID_SET_ONCE BIT(7)
+/*
+ * Limit number of read and write transactions from
+ * UCHE block to GBIF to avoid possible deadlock
+ * between GBIF, SMMU and MEMNOC.
+ */
+#define ADRENO_QUIRK_LIMIT_UCHE_GBIF_RW BIT(8)
 
 /* Flags to control command packet settings */
 #define KGSL_CMD_FLAGS_NONE             0
@@ -1926,13 +1932,15 @@ static inline int adreno_vbif_clear_pending_transactions(
 		 * Need to release CX Halt explicitly in case of SW_RESET.
 		 * GX Halt release will be taken care by SW_RESET internally.
 		 */
-		adreno_writereg(adreno_dev, ADRENO_REG_RBBM_GPR0_CNTL,
-				GBIF_HALT_REQUEST);
-		ret = adreno_wait_for_vbif_halt_ack(device,
-				ADRENO_REG_RBBM_VBIF_GX_RESET_STATUS,
-				VBIF_RESET_ACK_MASK);
-		if (ret)
-			return ret;
+		if (gpudev->gx_is_on(adreno_dev)) {
+			adreno_writereg(adreno_dev, ADRENO_REG_RBBM_GPR0_CNTL,
+					GBIF_HALT_REQUEST);
+			ret = adreno_wait_for_vbif_halt_ack(device,
+					ADRENO_REG_RBBM_VBIF_GX_RESET_STATUS,
+					VBIF_RESET_ACK_MASK);
+			if (ret)
+				return ret;
+		}
 
 		adreno_writereg(adreno_dev, ADRENO_REG_GBIF_HALT, mask);
 		ret = adreno_wait_for_vbif_halt_ack(device,
diff --git a/drivers/gpu/msm/adreno_a6xx.c b/drivers/gpu/msm/adreno_a6xx.c
index b682abe..6275c19 100644
--- a/drivers/gpu/msm/adreno_a6xx.c
+++ b/drivers/gpu/msm/adreno_a6xx.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -55,7 +55,6 @@ static const struct adreno_vbif_data a630_vbif[] = {
 
 static const struct adreno_vbif_data a615_gbif[] = {
 	{A6XX_RBBM_VBIF_CLIENT_QOS_CNTL, 0x3},
-	{A6XX_UCHE_GBIF_GX_CONFIG, 0x10200F9},
 	{0, 0},
 };
 
@@ -378,6 +377,21 @@ static void _update_always_on_regs(struct adreno_device *adreno_dev)
 		A6XX_CP_ALWAYS_ON_COUNTER_HI;
 }
 
+static uint64_t read_AO_counter(struct kgsl_device *device)
+{
+	unsigned int l, h, h1;
+
+	kgsl_gmu_regread(device, A6XX_GMU_CX_GMU_ALWAYS_ON_COUNTER_H, &h);
+	kgsl_gmu_regread(device, A6XX_GMU_CX_GMU_ALWAYS_ON_COUNTER_L, &l);
+	kgsl_gmu_regread(device, A6XX_GMU_CX_GMU_ALWAYS_ON_COUNTER_H, &h1);
+
+	if (h == h1)
+		return (uint64_t) l | ((uint64_t) h << 32);
+
+	kgsl_gmu_regread(device, A6XX_GMU_CX_GMU_ALWAYS_ON_COUNTER_L, &l);
+	return (uint64_t) l | ((uint64_t) h1 << 32);
+}
+
 static void a6xx_pwrup_reglist_init(struct adreno_device *adreno_dev)
 {
 	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
@@ -670,6 +684,9 @@ static void a6xx_start(struct adreno_device *adreno_dev)
 	adreno_vbif_start(adreno_dev, a6xx_vbif_platforms,
 			ARRAY_SIZE(a6xx_vbif_platforms));
 
+	if (ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_LIMIT_UCHE_GBIF_RW))
+		kgsl_regwrite(device, A6XX_UCHE_GBIF_GX_CONFIG, 0x10200F9);
+
 	/* Make all blocks contribute to the GPU BUSY perf counter */
 	kgsl_regwrite(device, A6XX_RBBM_PERFCTR_GPU_BUSY_MASKED, 0xFFFFFFFF);
 
@@ -1548,7 +1565,7 @@ static void a6xx_sptprac_disable(struct adreno_device *adreno_dev)
 #define SP_CLK_OFF		BIT(4)
 #define GX_GDSC_POWER_OFF	BIT(6)
 #define GX_CLK_OFF		BIT(7)
-
+#define is_on(val)		(!(val & (GX_GDSC_POWER_OFF | GX_CLK_OFF)))
 /*
  * a6xx_gx_is_on() - Check if GX is on using pwr status register
  * @adreno_dev - Pointer to adreno_device
@@ -1564,7 +1581,7 @@ static bool a6xx_gx_is_on(struct adreno_device *adreno_dev)
 		return true;
 
 	kgsl_gmu_regread(device, A6XX_GMU_SPTPRAC_PWR_CLK_STATUS, &val);
-	return !(val & (GX_GDSC_POWER_OFF | GX_CLK_OFF));
+	return is_on(val);
 }
 
 /*
@@ -1940,48 +1957,65 @@ static bool a6xx_hw_isidle(struct adreno_device *adreno_dev)
 	return true;
 }
 
+static bool idle_trandition_complete(unsigned int idle_level,
+	unsigned int gmu_power_reg,
+	unsigned int sptprac_clk_reg)
+{
+	if (idle_level != gmu_power_reg)
+		return false;
+
+	switch (idle_level) {
+	case GPU_HW_IFPC:
+		if (is_on(sptprac_clk_reg))
+			return false;
+		break;
+	/* other GMU idle levels can be added here */
+	case GPU_HW_ACTIVE:
+	default:
+		break;
+	}
+	return true;
+}
+
 static int a6xx_wait_for_lowest_idle(struct adreno_device *adreno_dev)
 {
 	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
 	struct gmu_device *gmu = &device->gmu;
-	struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
-	unsigned int reg;
+	unsigned int reg, reg1;
 	unsigned long t;
+	uint64_t ts1, ts2, ts3;
 
 	if (!kgsl_gmu_isenabled(device))
 		return 0;
 
+	ts1 = read_AO_counter(device);
+
 	t = jiffies + msecs_to_jiffies(GMU_IDLE_TIMEOUT);
-	while (!time_after(jiffies, t)) {
-		adreno_read_gmureg(ADRENO_DEVICE(device),
-				ADRENO_REG_GMU_RPMH_POWER_STATE, &reg);
+	do {
+		kgsl_gmu_regread(device,
+			A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE, &reg);
+		kgsl_gmu_regread(device,
+			A6XX_GMU_SPTPRAC_PWR_CLK_STATUS, &reg1);
 
-		/* SPTPRAC PC has the same idle level as IFPC */
-		if ((reg == gmu->idle_level) ||
-				(gmu->idle_level == GPU_HW_SPTP_PC &&
-				reg == GPU_HW_IFPC)) {
-			/* IFPC is not complete until GX is off */
-			if (gmu->idle_level != GPU_HW_IFPC ||
-					!gpudev->gx_is_on(adreno_dev))
-				return 0;
-		}
-
+		if (idle_trandition_complete(gmu->idle_level, reg, reg1))
+			return 0;
 		/* Wait 100us to reduce unnecessary AHB bus traffic */
 		usleep_range(10, 100);
-	}
+	} while (!time_after(jiffies, t));
 
+	ts2 = read_AO_counter(device);
 	/* Check one last time */
-	adreno_read_gmureg(ADRENO_DEVICE(device),
-			ADRENO_REG_GMU_RPMH_POWER_STATE, &reg);
-	if ((reg == gmu->idle_level) ||
-			(gmu->idle_level == GPU_HW_SPTP_PC &&
-			reg == GPU_HW_IFPC)) {
-		if (gmu->idle_level != GPU_HW_IFPC ||
-				!gpudev->gx_is_on(adreno_dev))
-			return 0;
-	}
 
-	WARN(1, "Timeout waiting for lowest idle level: %d\n", reg);
+	kgsl_gmu_regread(device, A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE, &reg);
+	kgsl_gmu_regread(device, A6XX_GMU_SPTPRAC_PWR_CLK_STATUS, &reg1);
+
+	if (idle_trandition_complete(gmu->idle_level, reg, reg1))
+		return 0;
+
+	ts3 = read_AO_counter(device);
+	WARN(1, "Timeout waiting for lowest idle: %08x %llx %llx %llx %x\n",
+		reg, ts1, ts2, ts3, reg1);
+
 	return -ETIMEDOUT;
 }
 
diff --git a/drivers/gpu/msm/kgsl_pwrctrl.c b/drivers/gpu/msm/kgsl_pwrctrl.c
index cda7a5b..00ddddb 100644
--- a/drivers/gpu/msm/kgsl_pwrctrl.c
+++ b/drivers/gpu/msm/kgsl_pwrctrl.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2010-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2010-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -77,6 +77,12 @@ static void kgsl_pwrctrl_set_state(struct kgsl_device *device,
 static void kgsl_pwrctrl_request_state(struct kgsl_device *device,
 				unsigned int state);
 static int _isense_clk_set_rate(struct kgsl_pwrctrl *pwr, int level);
+static int kgsl_pwrctrl_clk_set_rate(struct clk *grp_clk, unsigned int freq,
+				const char *name);
+static void _gpu_clk_prepare_enable(struct kgsl_device *device,
+				struct clk *clk, const char *name);
+static void _bimc_clk_prepare_enable(struct kgsl_device *device,
+				struct clk *clk, const char *name);
 
 /**
  * _record_pwrevent() - Record the history of the new event
@@ -260,7 +266,8 @@ int kgsl_clk_set_rate(struct kgsl_device *device,
 		clear_bit(GMU_DCVS_REPLAY, &gmu->flags);
 	} else
 		/* Linux clock driver scales GPU freq */
-		ret = clk_set_rate(pwr->grp_clks[0], pl->gpu_freq);
+		ret = kgsl_pwrctrl_clk_set_rate(pwr->grp_clks[0],
+			pl->gpu_freq, clocks[0]);
 
 	if (ret)
 		KGSL_PWR_ERR(device, "GPU clk freq set failure: %d\n", ret);
@@ -477,9 +484,12 @@ void kgsl_pwrctrl_pwrlevel_change(struct kgsl_device *device,
 	if (pwr->gpu_bimc_int_clk) {
 		if (pwr->active_pwrlevel == 0 &&
 				!pwr->gpu_bimc_interface_enabled) {
-			clk_set_rate(pwr->gpu_bimc_int_clk,
-					pwr->gpu_bimc_int_clk_freq);
-			clk_prepare_enable(pwr->gpu_bimc_int_clk);
+			kgsl_pwrctrl_clk_set_rate(pwr->gpu_bimc_int_clk,
+					pwr->gpu_bimc_int_clk_freq,
+					"bimc_gpu_clk");
+			_bimc_clk_prepare_enable(device,
+					pwr->gpu_bimc_int_clk,
+					"bimc_gpu_clk");
 			pwr->gpu_bimc_interface_enabled = 1;
 		} else if (pwr->previous_pwrlevel == 0
 				&& pwr->gpu_bimc_interface_enabled) {
@@ -1740,24 +1750,23 @@ static void kgsl_pwrctrl_clk(struct kgsl_device *device, int state,
 					_isense_clk_set_rate(pwr,
 						pwr->active_pwrlevel);
 				}
-
-				for (i = KGSL_MAX_CLKS - 1; i > 0; i--)
-					clk_prepare(pwr->grp_clks[i]);
 			}
-			/*
-			 * as last step, enable grp_clk
-			 * this is to let GPU interrupt to come
-			 */
+
 			for (i = KGSL_MAX_CLKS - 1; i > 0; i--)
-				clk_enable(pwr->grp_clks[i]);
+				_gpu_clk_prepare_enable(device,
+						pwr->grp_clks[i], clocks[i]);
+
 			/* Enable the gpu-bimc-interface clocks */
 			if (pwr->gpu_bimc_int_clk) {
 				if (pwr->active_pwrlevel == 0 &&
 					!pwr->gpu_bimc_interface_enabled) {
-					clk_set_rate(pwr->gpu_bimc_int_clk,
-						pwr->gpu_bimc_int_clk_freq);
-					clk_prepare_enable(
-						pwr->gpu_bimc_int_clk);
+					kgsl_pwrctrl_clk_set_rate(
+						pwr->gpu_bimc_int_clk,
+						pwr->gpu_bimc_int_clk_freq,
+						"bimc_gpu_clk");
+					_bimc_clk_prepare_enable(device,
+						pwr->gpu_bimc_int_clk,
+						"bimc_gpu_clk");
 					pwr->gpu_bimc_interface_enabled = 1;
 				}
 			}
@@ -2085,7 +2094,54 @@ static int _isense_clk_set_rate(struct kgsl_pwrctrl *pwr, int level)
 	rate = clk_round_rate(pwr->grp_clks[pwr->isense_clk_indx],
 		level > pwr->isense_clk_on_level ?
 		KGSL_XO_CLK_FREQ : KGSL_ISENSE_CLK_FREQ);
-	return clk_set_rate(pwr->grp_clks[pwr->isense_clk_indx], rate);
+	return kgsl_pwrctrl_clk_set_rate(pwr->grp_clks[pwr->isense_clk_indx],
+			rate, clocks[pwr->isense_clk_indx]);
+}
+
+/*
+ * _gpu_clk_prepare_enable - Enable the specified GPU clock
+ * Try once to enable it and then BUG() for debug
+ */
+static void _gpu_clk_prepare_enable(struct kgsl_device *device,
+		struct clk *clk, const char *name)
+{
+	int ret;
+
+	if (device->state == KGSL_STATE_NAP) {
+		ret = clk_enable(clk);
+		if (ret)
+			goto err;
+		return;
+	}
+
+	ret = clk_prepare_enable(clk);
+	if (!ret)
+		return;
+err:
+	/* Failure is fatal so BUG() to facilitate debug */
+	KGSL_DRV_FATAL(device, "KGSL:%s enable error:%d\n", name, ret);
+}
+
+/*
+ * _bimc_clk_prepare_enable - Enable the specified GPU clock
+ *  Try once to enable it and then BUG() for debug
+ */
+static void _bimc_clk_prepare_enable(struct kgsl_device *device,
+		struct clk *clk, const char *name)
+{
+	int ret = clk_prepare_enable(clk);
+	/* Failure is fatal so BUG() to facilitate debug */
+	if (ret)
+		KGSL_DRV_FATAL(device, "KGSL:%s enable error:%d\n", name, ret);
+}
+
+static int kgsl_pwrctrl_clk_set_rate(struct clk *grp_clk, unsigned int freq,
+		const char *name)
+{
+	int ret = clk_set_rate(grp_clk, freq);
+
+	WARN(ret, "KGSL:%s set freq %d failed:%d\n", name, freq, ret);
+	return ret;
 }
 
 static inline void _close_pcl(struct kgsl_pwrctrl *pwr)
@@ -2224,8 +2280,11 @@ int kgsl_pwrctrl_init(struct kgsl_device *device)
 
 	kgsl_clk_set_rate(device, pwr->num_pwrlevels - 1);
 
-	clk_set_rate(pwr->grp_clks[6],
-		clk_round_rate(pwr->grp_clks[6], KGSL_RBBMTIMER_CLK_FREQ));
+	if (pwr->grp_clks[6] != NULL)
+		kgsl_pwrctrl_clk_set_rate(pwr->grp_clks[6],
+			clk_round_rate(pwr->grp_clks[6],
+			KGSL_RBBMTIMER_CLK_FREQ),
+			clocks[6]);
 
 	_isense_clk_set_rate(pwr, pwr->num_pwrlevels - 1);
 
@@ -2732,9 +2791,11 @@ _aware(struct kgsl_device *device)
 				 * GPU will not be powered on
 				 */
 				WARN_ONCE(1, "Failed to recover GMU\n");
-				device->snapshot->recovered = false;
+				if (device->snapshot)
+					device->snapshot->recovered = false;
 			} else {
-				device->snapshot->recovered = true;
+				if (device->snapshot)
+					device->snapshot->recovered = true;
 			}
 
 			clear_bit(GMU_FAULT, &gmu->flags);
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index cd4599c..8eed456 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -175,11 +175,11 @@
 	Support for Cherry Cymotion keyboard.
 
 config HID_CHICONY
-	tristate "Chicony Tactical pad"
+	tristate "Chicony devices"
 	depends on HID
 	default !EXPERT
 	---help---
-	Support for Chicony Tactical pad.
+	Support for Chicony Tactical pad and special keys on Chicony keyboards.
 
 config HID_CORSAIR
 	tristate "Corsair devices"
@@ -190,6 +190,7 @@
 
 	Supported devices:
 	- Vengeance K90
+	- Scimitar PRO RGB
 
 config HID_PRODIKEYS
 	tristate "Prodikeys PC-MIDI Keyboard support"
diff --git a/drivers/hid/hid-chicony.c b/drivers/hid/hid-chicony.c
index bc3cec1..f04ed9a 100644
--- a/drivers/hid/hid-chicony.c
+++ b/drivers/hid/hid-chicony.c
@@ -86,6 +86,7 @@ static const struct hid_device_id ch_devices[] = {
 	{ HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS2) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_AK1D) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_ACER_SWITCH12) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_JESS, USB_DEVICE_ID_JESS_ZEN_AIO_KBD) },
 	{ }
 };
 MODULE_DEVICE_TABLE(hid, ch_devices);
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index d42ace8..f2a7483 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1874,6 +1874,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
 	{ HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_AK1D) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_ACER_SWITCH12) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K90) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_SCIMITAR_PRO_RGB) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_PRODIKEYS_PCMIDI) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_CYGNAL, USB_DEVICE_ID_CYGNAL_CP2112) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_1) },
@@ -1908,6 +1909,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
 	{ HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A081) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A0C2) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_HUION, USB_DEVICE_ID_HUION_TABLET) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_JESS, USB_DEVICE_ID_JESS_ZEN_AIO_KBD) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_JESS2, USB_DEVICE_ID_JESS2_COLOR_RUMBLE_PAD) },
 	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ION, USB_DEVICE_ID_ICADE) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_KENSINGTON, USB_DEVICE_ID_KS_SLIMBLADE) },
@@ -2106,6 +2108,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
 	{ HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SIRIUS_BATTERY_FREE_TABLET) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_X_TENSIONS, USB_DEVICE_ID_SPEEDLINK_VAD_CEZANNE) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_XIN_MO, USB_DEVICE_ID_XIN_MO_DUAL_ARCADE) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_XIN_MO, USB_DEVICE_ID_THT_2P_ARCADE) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_ZEROPLUS, 0x0005) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_ZEROPLUS, 0x0030) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_ZYDACRON, USB_DEVICE_ID_ZYDACRON_REMOTE_CONTROL) },
diff --git a/drivers/hid/hid-corsair.c b/drivers/hid/hid-corsair.c
index c0303f6..9ba5d98 100644
--- a/drivers/hid/hid-corsair.c
+++ b/drivers/hid/hid-corsair.c
@@ -3,8 +3,10 @@
  *
  * Supported devices:
  *  - Vengeance K90 Keyboard
+ *  - Scimitar PRO RGB Gaming Mouse
  *
  * Copyright (c) 2015 Clement Vuchener
+ * Copyright (c) 2017 Oscar Campos
  */
 
 /*
@@ -670,10 +672,51 @@ static int corsair_input_mapping(struct hid_device *dev,
 	return 0;
 }
 
+/*
+ * The report descriptor of Corsair Scimitar RGB Pro gaming mouse is
+ * non parseable as they define two consecutive Logical Minimum for
+ * the Usage Page (Consumer) in rdescs bytes 75 and 77 being 77 0x16
+ * that should be obviousy 0x26 for Logical Magimum of 16 bits. This
+ * prevents poper parsing of the report descriptor due Logical
+ * Minimum being larger than Logical Maximum.
+ *
+ * This driver fixes the report descriptor for:
+ * - USB ID b1c:1b3e, sold as Scimitar RGB Pro Gaming mouse
+ */
+
+static __u8 *corsair_mouse_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+        unsigned int *rsize)
+{
+	struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
+
+	if (intf->cur_altsetting->desc.bInterfaceNumber == 1) {
+		/*
+		 * Corsair Scimitar RGB Pro report descriptor is broken and
+		 * defines two different Logical Minimum for the Consumer
+		 * Application. The byte 77 should be a 0x26 defining a 16
+		 * bits integer for the Logical Maximum but it is a 0x16
+		 * instead (Logical Minimum)
+		 */
+		switch (hdev->product) {
+		case USB_DEVICE_ID_CORSAIR_SCIMITAR_PRO_RGB:
+			if (*rsize >= 172 && rdesc[75] == 0x15 && rdesc[77] == 0x16
+			&& rdesc[78] == 0xff && rdesc[79] == 0x0f) {
+				hid_info(hdev, "Fixing up report descriptor\n");
+				rdesc[77] = 0x26;
+			}
+			break;
+		}
+
+	}
+	return rdesc;
+}
+
 static const struct hid_device_id corsair_devices[] = {
 	{ HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K90),
 		.driver_data = CORSAIR_USE_K90_MACRO |
 			       CORSAIR_USE_K90_BACKLIGHT },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR,
+            USB_DEVICE_ID_CORSAIR_SCIMITAR_PRO_RGB) },
 	{}
 };
 
@@ -686,10 +729,14 @@ static struct hid_driver corsair_driver = {
 	.event = corsair_event,
 	.remove = corsair_remove,
 	.input_mapping = corsair_input_mapping,
+	.report_fixup = corsair_mouse_report_fixup,
 };
 
 module_hid_driver(corsair_driver);
 
 MODULE_LICENSE("GPL");
+/* Original K90 driver author */
 MODULE_AUTHOR("Clement Vuchener");
+/* Scimitar PRO RGB driver author */
+MODULE_AUTHOR("Oscar Campos");
 MODULE_DESCRIPTION("HID driver for Corsair devices");
diff --git a/drivers/hid/hid-cp2112.c b/drivers/hid/hid-cp2112.c
index e06c134..7af7781 100644
--- a/drivers/hid/hid-cp2112.c
+++ b/drivers/hid/hid-cp2112.c
@@ -188,6 +188,8 @@ static int cp2112_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
 				 HID_REQ_GET_REPORT);
 	if (ret != CP2112_GPIO_CONFIG_LENGTH) {
 		hid_err(hdev, "error requesting GPIO config: %d\n", ret);
+		if (ret >= 0)
+			ret = -EIO;
 		goto exit;
 	}
 
@@ -197,8 +199,10 @@ static int cp2112_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
 	ret = hid_hw_raw_request(hdev, CP2112_GPIO_CONFIG, buf,
 				 CP2112_GPIO_CONFIG_LENGTH, HID_FEATURE_REPORT,
 				 HID_REQ_SET_REPORT);
-	if (ret < 0) {
+	if (ret != CP2112_GPIO_CONFIG_LENGTH) {
 		hid_err(hdev, "error setting GPIO config: %d\n", ret);
+		if (ret >= 0)
+			ret = -EIO;
 		goto exit;
 	}
 
@@ -206,7 +210,7 @@ static int cp2112_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
 
 exit:
 	mutex_unlock(&dev->lock);
-	return ret < 0 ? ret : -EIO;
+	return ret;
 }
 
 static void cp2112_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 08fd3f8..244b97c 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -277,6 +277,9 @@
 #define USB_DEVICE_ID_CORSAIR_K70RGB    0x1b13
 #define USB_DEVICE_ID_CORSAIR_STRAFE    0x1b15
 #define USB_DEVICE_ID_CORSAIR_K65RGB    0x1b17
+#define USB_DEVICE_ID_CORSAIR_K70RGB_RAPIDFIRE  0x1b38
+#define USB_DEVICE_ID_CORSAIR_K65RGB_RAPIDFIRE  0x1b39
+#define USB_DEVICE_ID_CORSAIR_SCIMITAR_PRO_RGB  0x1b3e
 
 #define USB_VENDOR_ID_CREATIVELABS	0x041e
 #define USB_DEVICE_ID_CREATIVE_SB_OMNI_SURROUND_51	0x322c
@@ -558,6 +561,7 @@
 
 #define USB_VENDOR_ID_JESS		0x0c45
 #define USB_DEVICE_ID_JESS_YUREX	0x1010
+#define USB_DEVICE_ID_JESS_ZEN_AIO_KBD	0x5112
 
 #define USB_VENDOR_ID_JESS2		0x0f30
 #define USB_DEVICE_ID_JESS2_COLOR_RUMBLE_PAD 0x0111
@@ -1076,6 +1080,7 @@
 
 #define USB_VENDOR_ID_XIN_MO			0x16c0
 #define USB_DEVICE_ID_XIN_MO_DUAL_ARCADE	0x05e1
+#define USB_DEVICE_ID_THT_2P_ARCADE		0x75e1
 
 #define USB_VENDOR_ID_XIROKU		0x1477
 #define USB_DEVICE_ID_XIROKU_SPX	0x1006
diff --git a/drivers/hid/hid-xinmo.c b/drivers/hid/hid-xinmo.c
index 7df5227..9ad7731 100644
--- a/drivers/hid/hid-xinmo.c
+++ b/drivers/hid/hid-xinmo.c
@@ -46,6 +46,7 @@ static int xinmo_event(struct hid_device *hdev, struct hid_field *field,
 
 static const struct hid_device_id xinmo_devices[] = {
 	{ HID_USB_DEVICE(USB_VENDOR_ID_XIN_MO, USB_DEVICE_ID_XIN_MO_DUAL_ARCADE) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_XIN_MO, USB_DEVICE_ID_THT_2P_ARCADE) },
 	{ }
 };
 
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index 2b16207..1916f80 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -80,6 +80,9 @@ static const struct hid_blacklist {
 	{ USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K70RGB, HID_QUIRK_NO_INIT_REPORTS },
 	{ USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K65RGB, HID_QUIRK_NO_INIT_REPORTS },
 	{ USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_STRAFE, HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL },
+	{ USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K70RGB_RAPIDFIRE, HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL },
+	{ USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K65RGB_RAPIDFIRE, HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL },
+	{ USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_SCIMITAR_PRO_RGB, HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL },
 	{ USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_CREATIVE_SB_OMNI_SURROUND_51, HID_QUIRK_NOGET },
 	{ USB_VENDOR_ID_DELL, USB_DEVICE_ID_DELL_PIXART_USB_OPTICAL_MOUSE, HID_QUIRK_ALWAYS_POLL },
 	{ USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET },
diff --git a/drivers/hv/hv_fcopy.c b/drivers/hv/hv_fcopy.c
index 75126e4..4442007 100644
--- a/drivers/hv/hv_fcopy.c
+++ b/drivers/hv/hv_fcopy.c
@@ -61,7 +61,6 @@ static DECLARE_WORK(fcopy_send_work, fcopy_send_data);
 static const char fcopy_devname[] = "vmbus/hv_fcopy";
 static u8 *recv_buffer;
 static struct hvutil_transport *hvt;
-static struct completion release_event;
 /*
  * This state maintains the version number registered by the daemon.
  */
@@ -322,7 +321,6 @@ static void fcopy_on_reset(void)
 
 	if (cancel_delayed_work_sync(&fcopy_timeout_work))
 		fcopy_respond_to_host(HV_E_FAIL);
-	complete(&release_event);
 }
 
 int hv_fcopy_init(struct hv_util_service *srv)
@@ -330,7 +328,6 @@ int hv_fcopy_init(struct hv_util_service *srv)
 	recv_buffer = srv->recv_buffer;
 	fcopy_transaction.recv_channel = srv->channel;
 
-	init_completion(&release_event);
 	/*
 	 * When this driver loads, the user level daemon that
 	 * processes the host requests may not yet be running.
@@ -352,5 +349,4 @@ void hv_fcopy_deinit(void)
 	fcopy_transaction.state = HVUTIL_DEVICE_DYING;
 	cancel_delayed_work_sync(&fcopy_timeout_work);
 	hvutil_transport_destroy(hvt);
-	wait_for_completion(&release_event);
 }
diff --git a/drivers/hv/hv_kvp.c b/drivers/hv/hv_kvp.c
index 3abfc59..5e1fdc8 100644
--- a/drivers/hv/hv_kvp.c
+++ b/drivers/hv/hv_kvp.c
@@ -88,7 +88,6 @@ static DECLARE_WORK(kvp_sendkey_work, kvp_send_key);
 static const char kvp_devname[] = "vmbus/hv_kvp";
 static u8 *recv_buffer;
 static struct hvutil_transport *hvt;
-static struct completion release_event;
 /*
  * Register the kernel component with the user-level daemon.
  * As part of this registration, pass the LIC version number.
@@ -717,7 +716,6 @@ static void kvp_on_reset(void)
 	if (cancel_delayed_work_sync(&kvp_timeout_work))
 		kvp_respond_to_host(NULL, HV_E_FAIL);
 	kvp_transaction.state = HVUTIL_DEVICE_INIT;
-	complete(&release_event);
 }
 
 int
@@ -726,7 +724,6 @@ hv_kvp_init(struct hv_util_service *srv)
 	recv_buffer = srv->recv_buffer;
 	kvp_transaction.recv_channel = srv->channel;
 
-	init_completion(&release_event);
 	/*
 	 * When this driver loads, the user level daemon that
 	 * processes the host requests may not yet be running.
@@ -750,5 +747,4 @@ void hv_kvp_deinit(void)
 	cancel_delayed_work_sync(&kvp_timeout_work);
 	cancel_work_sync(&kvp_sendkey_work);
 	hvutil_transport_destroy(hvt);
-	wait_for_completion(&release_event);
 }
diff --git a/drivers/hv/hv_snapshot.c b/drivers/hv/hv_snapshot.c
index a76e3db..a670713 100644
--- a/drivers/hv/hv_snapshot.c
+++ b/drivers/hv/hv_snapshot.c
@@ -66,7 +66,6 @@ static int dm_reg_value;
 static const char vss_devname[] = "vmbus/hv_vss";
 static __u8 *recv_buffer;
 static struct hvutil_transport *hvt;
-static struct completion release_event;
 
 static void vss_timeout_func(struct work_struct *dummy);
 static void vss_handle_request(struct work_struct *dummy);
@@ -331,13 +330,11 @@ static void vss_on_reset(void)
 	if (cancel_delayed_work_sync(&vss_timeout_work))
 		vss_respond_to_host(HV_E_FAIL);
 	vss_transaction.state = HVUTIL_DEVICE_INIT;
-	complete(&release_event);
 }
 
 int
 hv_vss_init(struct hv_util_service *srv)
 {
-	init_completion(&release_event);
 	if (vmbus_proto_version < VERSION_WIN8_1) {
 		pr_warn("Integration service 'Backup (volume snapshot)'"
 			" not supported on this host version.\n");
@@ -368,5 +365,4 @@ void hv_vss_deinit(void)
 	cancel_delayed_work_sync(&vss_timeout_work);
 	cancel_work_sync(&vss_handle_request_work);
 	hvutil_transport_destroy(hvt);
-	wait_for_completion(&release_event);
 }
diff --git a/drivers/hv/hv_utils_transport.c b/drivers/hv/hv_utils_transport.c
index c235a95..4402a71 100644
--- a/drivers/hv/hv_utils_transport.c
+++ b/drivers/hv/hv_utils_transport.c
@@ -182,10 +182,11 @@ static int hvt_op_release(struct inode *inode, struct file *file)
 	 * connects back.
 	 */
 	hvt_reset(hvt);
-	mutex_unlock(&hvt->lock);
 
 	if (mode_old == HVUTIL_TRANSPORT_DESTROY)
-		hvt_transport_free(hvt);
+		complete(&hvt->release);
+
+	mutex_unlock(&hvt->lock);
 
 	return 0;
 }
@@ -304,6 +305,7 @@ struct hvutil_transport *hvutil_transport_init(const char *name,
 
 	init_waitqueue_head(&hvt->outmsg_q);
 	mutex_init(&hvt->lock);
+	init_completion(&hvt->release);
 
 	spin_lock(&hvt_list_lock);
 	list_add(&hvt->list, &hvt_list);
@@ -351,6 +353,8 @@ void hvutil_transport_destroy(struct hvutil_transport *hvt)
 	if (hvt->cn_id.idx > 0 && hvt->cn_id.val > 0)
 		cn_del_callback(&hvt->cn_id);
 
-	if (mode_old != HVUTIL_TRANSPORT_CHARDEV)
-		hvt_transport_free(hvt);
+	if (mode_old == HVUTIL_TRANSPORT_CHARDEV)
+		wait_for_completion(&hvt->release);
+
+	hvt_transport_free(hvt);
 }
diff --git a/drivers/hv/hv_utils_transport.h b/drivers/hv/hv_utils_transport.h
index d98f522..79afb62 100644
--- a/drivers/hv/hv_utils_transport.h
+++ b/drivers/hv/hv_utils_transport.h
@@ -41,6 +41,7 @@ struct hvutil_transport {
 	int outmsg_len;                     /* its length */
 	wait_queue_head_t outmsg_q;         /* poll/read wait queue */
 	struct mutex lock;                  /* protects struct members */
+	struct completion release;          /* synchronize with fd release */
 };
 
 struct hvutil_transport *hvutil_transport_init(const char *name,
diff --git a/drivers/hwmon/asus_atk0110.c b/drivers/hwmon/asus_atk0110.c
index cccef87..975c43d 100644
--- a/drivers/hwmon/asus_atk0110.c
+++ b/drivers/hwmon/asus_atk0110.c
@@ -646,6 +646,9 @@ static int atk_read_value(struct atk_sensor_data *sensor, u64 *value)
 		else
 			err = atk_read_value_new(sensor, value);
 
+		if (err)
+			return err;
+
 		sensor->is_valid = true;
 		sensor->last_updated = jiffies;
 		sensor->cached_value = *value;
diff --git a/drivers/hwmon/jc42.c b/drivers/hwmon/jc42.c
index 1bf22ef..0f1f642 100644
--- a/drivers/hwmon/jc42.c
+++ b/drivers/hwmon/jc42.c
@@ -22,6 +22,7 @@
  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
+#include <linux/bitops.h>
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/slab.h>
@@ -45,6 +46,7 @@ static const unsigned short normal_i2c[] = {
 #define JC42_REG_TEMP		0x05
 #define JC42_REG_MANID		0x06
 #define JC42_REG_DEVICEID	0x07
+#define JC42_REG_SMBUS		0x22 /* NXP and Atmel, possibly others? */
 
 /* Status bits in temperature register */
 #define JC42_ALARM_CRIT_BIT	15
@@ -73,6 +75,9 @@ static const unsigned short normal_i2c[] = {
 #define ONS_MANID		0x1b09  /* ON Semiconductor */
 #define STM_MANID		0x104a  /* ST Microelectronics */
 
+/* SMBUS register */
+#define SMBUS_STMOUT		BIT(7)  /* SMBus time-out, active low */
+
 /* Supported chips */
 
 /* Analog Devices */
@@ -476,6 +481,22 @@ static int jc42_probe(struct i2c_client *client, const struct i2c_device_id *id)
 
 	data->extended = !!(cap & JC42_CAP_RANGE);
 
+	if (device_property_read_bool(dev, "smbus-timeout-disable")) {
+		int smbus;
+
+		/*
+		 * Not all chips support this register, but from a
+		 * quick read of various datasheets no chip appears
+		 * incompatible with the below attempt to disable
+		 * the timeout. And the whole thing is opt-in...
+		 */
+		smbus = i2c_smbus_read_word_swapped(client, JC42_REG_SMBUS);
+		if (smbus < 0)
+			return smbus;
+		i2c_smbus_write_word_swapped(client, JC42_REG_SMBUS,
+					     smbus | SMBUS_STMOUT);
+	}
+
 	config = i2c_smbus_read_word_swapped(client, JC42_REG_CONFIG);
 	if (config < 0)
 		return config;
diff --git a/drivers/hwmon/max31790.c b/drivers/hwmon/max31790.c
index c1b9275..281491c 100644
--- a/drivers/hwmon/max31790.c
+++ b/drivers/hwmon/max31790.c
@@ -311,7 +311,7 @@ static int max31790_write_pwm(struct device *dev, u32 attr, int channel,
 		data->pwm[channel] = val << 8;
 		err = i2c_smbus_write_word_swapped(client,
 						   MAX31790_REG_PWMOUT(channel),
-						   val);
+						   data->pwm[channel]);
 		break;
 	case hwmon_pwm_enable:
 		fan_config = data->fan_config[channel];
diff --git a/drivers/hwmon/qpnp-adc-voltage.c b/drivers/hwmon/qpnp-adc-voltage.c
index 4b5e206..6fde46e 100644
--- a/drivers/hwmon/qpnp-adc-voltage.c
+++ b/drivers/hwmon/qpnp-adc-voltage.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -2618,6 +2618,7 @@ static int32_t qpnp_vadc_init_thermal(struct qpnp_vadc_chip *vadc,
 				&qpnp_vadc_thermal_ops);
 			if (IS_ERR(vadc->vadc_therm_chan[i].tz_dev)) {
 				pr_err("thermal device register failed.\n");
+				rc = PTR_ERR(vadc->vadc_therm_chan[i].tz_dev);
 				goto thermal_err_sens;
 			}
 		}
diff --git a/drivers/hwtracing/coresight/coresight-tpdm.c b/drivers/hwtracing/coresight/coresight-tpdm.c
index 36e3db2..145af90 100644
--- a/drivers/hwtracing/coresight/coresight-tpdm.c
+++ b/drivers/hwtracing/coresight/coresight-tpdm.c
@@ -20,6 +20,7 @@
 #include <linux/bitmap.h>
 #include <linux/of.h>
 #include <linux/coresight.h>
+#include <soc/qcom/memory_dump.h>
 
 #include "coresight-priv.h"
 
@@ -225,6 +226,7 @@ struct dsb_dataset {
 struct mcmb_dataset {
 	uint8_t		mcmb_trig_lane;
 	uint8_t		mcmb_lane_select;
+	uint32_t		*mcmb_msr_dump_ptr;
 };
 
 struct cmb_dataset {
@@ -609,6 +611,11 @@ static void __tpdm_enable_mcmb(struct tpdm_drvdata *drvdata)
 	val = val & ~BM(10, 17);
 	val = val | (BMVAL(mcmb->mcmb_lane_select, 0, 7) << 10);
 
+	if (mcmb->mcmb_msr_dump_ptr) {
+		for (i = 0; i < TPDM_CMB_MAX_MSR; i++)
+			mcmb->mcmb_msr_dump_ptr[i] = drvdata->cmb->msr[i];
+	}
+
 	tpdm_writel(drvdata, val, TPDM_CMB_CR);
 	/* Set the enable bit */
 	val = val | BIT(0);
@@ -3910,6 +3917,12 @@ static int tpdm_datasets_alloc(struct tpdm_drvdata *drvdata)
 						  GFP_KERNEL);
 		if (!drvdata->cmb->mcmb)
 			return -ENOMEM;
+
+		if (of_property_read_bool(drvdata->dev->of_node,
+						    "qcom,dump-enable"))
+			drvdata->cmb->mcmb->mcmb_msr_dump_ptr =
+				(uint32_t *)get_msm_dump_ptr(
+						MSM_DUMP_DATA_TPDM_SWAO_MCMB);
 	}
 	return 0;
 }
diff --git a/drivers/hwtracing/intel_th/pci.c b/drivers/hwtracing/intel_th/pci.c
index 63b5db4..e0f3244 100644
--- a/drivers/hwtracing/intel_th/pci.c
+++ b/drivers/hwtracing/intel_th/pci.c
@@ -95,6 +95,11 @@ static const struct pci_device_id intel_th_pci_id_table[] = {
 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x9da6),
 		.driver_data = (kernel_ulong_t)0,
 	},
+	{
+		/* Gemini Lake */
+		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x318e),
+		.driver_data = (kernel_ulong_t)0,
+	},
 	{ 0 },
 };
 
diff --git a/drivers/i2c/busses/i2c-cadence.c b/drivers/i2c/busses/i2c-cadence.c
index 6869712..45d6771 100644
--- a/drivers/i2c/busses/i2c-cadence.c
+++ b/drivers/i2c/busses/i2c-cadence.c
@@ -962,10 +962,6 @@ static int cdns_i2c_probe(struct platform_device *pdev)
 		goto err_clk_dis;
 	}
 
-	ret = i2c_add_adapter(&id->adap);
-	if (ret < 0)
-		goto err_clk_dis;
-
 	/*
 	 * Cadence I2C controller has a bug wherein it generates
 	 * invalid read transaction after HW timeout in master receiver mode.
@@ -975,6 +971,10 @@ static int cdns_i2c_probe(struct platform_device *pdev)
 	 */
 	cdns_i2c_writereg(CDNS_I2C_TIMEOUT_MAX, CDNS_I2C_TIME_OUT_OFFSET);
 
+	ret = i2c_add_adapter(&id->adap);
+	if (ret < 0)
+		goto err_clk_dis;
+
 	dev_info(&pdev->dev, "%u kHz mmio %08lx irq %d\n",
 		 id->i2c_clk / 1000, (unsigned long)r_mem->start, id->irq);
 
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index eb3627f..e6fe21a 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -1592,6 +1592,9 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
 	/* Default timeout in interrupt mode: 200 ms */
 	priv->adapter.timeout = HZ / 5;
 
+	if (dev->irq == IRQ_NOTCONNECTED)
+		priv->features &= ~FEATURE_IRQ;
+
 	if (priv->features & FEATURE_IRQ) {
 		u16 pcictl, pcists;
 
diff --git a/drivers/i2c/busses/i2c-riic.c b/drivers/i2c/busses/i2c-riic.c
index 8f11d34..c811af4 100644
--- a/drivers/i2c/busses/i2c-riic.c
+++ b/drivers/i2c/busses/i2c-riic.c
@@ -218,8 +218,12 @@ static irqreturn_t riic_tend_isr(int irq, void *data)
 	}
 
 	if (riic->is_last || riic->err) {
-		riic_clear_set_bit(riic, 0, ICIER_SPIE, RIIC_ICIER);
+		riic_clear_set_bit(riic, ICIER_TEIE, ICIER_SPIE, RIIC_ICIER);
 		writeb(ICCR2_SP, riic->base + RIIC_ICCR2);
+	} else {
+		/* Transfer is complete, but do not send STOP */
+		riic_clear_set_bit(riic, ICIER_TEIE, 0, RIIC_ICIER);
+		complete(&riic->msg_done);
 	}
 
 	return IRQ_HANDLED;
diff --git a/drivers/iio/adc/ti-ads1015.c b/drivers/iio/adc/ti-ads1015.c
index 472641f..af05e20 100644
--- a/drivers/iio/adc/ti-ads1015.c
+++ b/drivers/iio/adc/ti-ads1015.c
@@ -269,6 +269,7 @@ int ads1015_get_adc_result(struct ads1015_data *data, int chan, int *val)
 
 		conv_time = DIV_ROUND_UP(USEC_PER_SEC, data->data_rate[dr_old]);
 		conv_time += DIV_ROUND_UP(USEC_PER_SEC, data->data_rate[dr]);
+		conv_time += conv_time / 10; /* 10% internal clock inaccuracy */
 		usleep_range(conv_time, conv_time + 1);
 		data->conv_invalid = false;
 	}
diff --git a/drivers/iio/light/cm3232.c b/drivers/iio/light/cm3232.c
index fe89b68..263e972 100644
--- a/drivers/iio/light/cm3232.c
+++ b/drivers/iio/light/cm3232.c
@@ -119,7 +119,7 @@ static int cm3232_reg_init(struct cm3232_chip *chip)
 	if (ret < 0)
 		dev_err(&chip->client->dev, "Error writing reg_cmd\n");
 
-	return 0;
+	return ret;
 }
 
 /**
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 809a028..a09d6ee 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -1482,7 +1482,7 @@ static struct rdma_id_private *cma_id_from_event(struct ib_cm_id *cm_id,
 	return id_priv;
 }
 
-static inline int cma_user_data_offset(struct rdma_id_private *id_priv)
+static inline u8 cma_user_data_offset(struct rdma_id_private *id_priv)
 {
 	return cma_family(id_priv) == AF_IB ? 0 : sizeof(struct cma_hdr);
 }
@@ -1877,7 +1877,8 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
 	struct rdma_id_private *listen_id, *conn_id = NULL;
 	struct rdma_cm_event event;
 	struct net_device *net_dev;
-	int offset, ret;
+	u8 offset;
+	int ret;
 
 	listen_id = cma_id_from_event(cm_id, ib_event, &net_dev);
 	if (IS_ERR(listen_id))
@@ -3309,7 +3310,8 @@ static int cma_resolve_ib_udp(struct rdma_id_private *id_priv,
 	struct ib_cm_sidr_req_param req;
 	struct ib_cm_id	*id;
 	void *private_data;
-	int offset, ret;
+	u8 offset;
+	int ret;
 
 	memset(&req, 0, sizeof req);
 	offset = cma_user_data_offset(id_priv);
@@ -3366,7 +3368,8 @@ static int cma_connect_ib(struct rdma_id_private *id_priv,
 	struct rdma_route *route;
 	void *private_data;
 	struct ib_cm_id	*id;
-	int offset, ret;
+	u8 offset;
+	int ret;
 
 	memset(&req, 0, sizeof req);
 	offset = cma_user_data_offset(id_priv);
diff --git a/drivers/infiniband/core/cq.c b/drivers/infiniband/core/cq.c
index a754fc7..ff12b8d 100644
--- a/drivers/infiniband/core/cq.c
+++ b/drivers/infiniband/core/cq.c
@@ -196,7 +196,7 @@ void ib_free_cq(struct ib_cq *cq)
 		irq_poll_disable(&cq->iop);
 		break;
 	case IB_POLL_WORKQUEUE:
-		flush_work(&cq->work);
+		cancel_work_sync(&cq->work);
 		break;
 	default:
 		WARN_ON_ONCE(1);
diff --git a/drivers/infiniband/hw/cxgb4/t4.h b/drivers/infiniband/hw/cxgb4/t4.h
index 862381a..b55adf5 100644
--- a/drivers/infiniband/hw/cxgb4/t4.h
+++ b/drivers/infiniband/hw/cxgb4/t4.h
@@ -171,7 +171,7 @@ struct t4_cqe {
 			__be32 msn;
 		} rcqe;
 		struct {
-			u32 stag;
+			__be32 stag;
 			u16 nada2;
 			u16 cidx;
 		} scqe;
diff --git a/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h b/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h
index 010c709..58c531d 100644
--- a/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h
+++ b/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h
@@ -675,8 +675,8 @@ struct fw_ri_fr_nsmr_tpte_wr {
 	__u16  wrid;
 	__u8   r1[3];
 	__u8   len16;
-	__u32  r2;
-	__u32  stag;
+	__be32  r2;
+	__be32  stag;
 	struct fw_ri_tpte tpte;
 	__u64  pbl[2];
 };
diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c
index 24d0820..4682909 100644
--- a/drivers/infiniband/hw/hfi1/chip.c
+++ b/drivers/infiniband/hw/hfi1/chip.c
@@ -9769,7 +9769,7 @@ int hfi1_get_ib_cfg(struct hfi1_pportdata *ppd, int which)
 		goto unimplemented;
 
 	case HFI1_IB_CFG_OP_VLS:
-		val = ppd->vls_operational;
+		val = ppd->actual_vls_operational;
 		break;
 	case HFI1_IB_CFG_VL_HIGH_CAP: /* VL arb high priority table size */
 		val = VL_ARB_HIGH_PRIO_TABLE_SIZE;
diff --git a/drivers/infiniband/hw/i40iw/i40iw_utils.c b/drivers/infiniband/hw/i40iw/i40iw_utils.c
index 6fd043b..7db2001 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_utils.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_utils.c
@@ -159,6 +159,9 @@ int i40iw_inetaddr_event(struct notifier_block *notifier,
 		return NOTIFY_DONE;
 
 	iwdev = &hdl->device;
+	if (iwdev->init_state < INET_NOTIFIER)
+		return NOTIFY_DONE;
+
 	netdev = iwdev->ldev->netdev;
 	upper_dev = netdev_master_upper_dev_get(netdev);
 	if (netdev != event_netdev)
@@ -231,6 +234,9 @@ int i40iw_inet6addr_event(struct notifier_block *notifier,
 		return NOTIFY_DONE;
 
 	iwdev = &hdl->device;
+	if (iwdev->init_state < INET_NOTIFIER)
+		return NOTIFY_DONE;
+
 	netdev = iwdev->ldev->netdev;
 	if (netdev != event_netdev)
 		return NOTIFY_DONE;
@@ -280,6 +286,8 @@ int i40iw_net_event(struct notifier_block *notifier, unsigned long event, void *
 		if (!iwhdl)
 			return NOTIFY_DONE;
 		iwdev = &iwhdl->device;
+		if (iwdev->init_state < INET_NOTIFIER)
+			return NOTIFY_DONE;
 		p = (__be32 *)neigh->primary_key;
 		i40iw_copy_ip_ntohl(local_ipaddr, p);
 		if (neigh->nud_state & NUD_VALID) {
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index c224543..709d649 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -1669,7 +1669,7 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
 			context->mtu_msgmax = (IB_MTU_4096 << 5) |
 					      ilog2(dev->dev->caps.max_gso_sz);
 		else
-			context->mtu_msgmax = (IB_MTU_4096 << 5) | 12;
+			context->mtu_msgmax = (IB_MTU_4096 << 5) | 13;
 	} else if (attr_mask & IB_QP_PATH_MTU) {
 		if (attr->path_mtu < IB_MTU_256 || attr->path_mtu > IB_MTU_4096) {
 			pr_err("path MTU (%u) is invalid\n",
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 786f640..a2120ff 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -2514,6 +2514,8 @@ static int create_umr_res(struct mlx5_ib_dev *dev)
 	qp->real_qp    = qp;
 	qp->uobject    = NULL;
 	qp->qp_type    = MLX5_IB_QPT_REG_UMR;
+	qp->send_cq    = init_attr->send_cq;
+	qp->recv_cq    = init_attr->recv_cq;
 
 	attr->qp_state = IB_QPS_INIT;
 	attr->port_num = 1;
diff --git a/drivers/infiniband/hw/qedr/qedr_cm.c b/drivers/infiniband/hw/qedr/qedr_cm.c
index 63890eb..eccf703 100644
--- a/drivers/infiniband/hw/qedr/qedr_cm.c
+++ b/drivers/infiniband/hw/qedr/qedr_cm.c
@@ -404,9 +404,9 @@ static inline int qedr_gsi_build_packet(struct qedr_dev *dev,
 	}
 
 	if (ether_addr_equal(udh.eth.smac_h, udh.eth.dmac_h))
-		packet->tx_dest = QED_ROCE_LL2_TX_DEST_NW;
-	else
 		packet->tx_dest = QED_ROCE_LL2_TX_DEST_LB;
+	else
+		packet->tx_dest = QED_ROCE_LL2_TX_DEST_NW;
 
 	packet->roce_mode = roce_mode;
 	memcpy(packet->header.vaddr, ud_header_buffer, header_size);
diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
index 4ba019e..35d5b89 100644
--- a/drivers/infiniband/hw/qedr/verbs.c
+++ b/drivers/infiniband/hw/qedr/verbs.c
@@ -1653,7 +1653,7 @@ static int qedr_update_qp_state(struct qedr_dev *dev,
 	int status = 0;
 
 	if (new_state == qp->state)
-		return 1;
+		return 0;
 
 	switch (qp->state) {
 	case QED_ROCE_QP_STATE_RESET:
diff --git a/drivers/infiniband/sw/rdmavt/mmap.c b/drivers/infiniband/sw/rdmavt/mmap.c
index e202b81..6b712ee 100644
--- a/drivers/infiniband/sw/rdmavt/mmap.c
+++ b/drivers/infiniband/sw/rdmavt/mmap.c
@@ -170,9 +170,9 @@ struct rvt_mmap_info *rvt_create_mmap_info(struct rvt_dev_info *rdi,
 
 	spin_lock_irq(&rdi->mmap_offset_lock);
 	if (rdi->mmap_offset == 0)
-		rdi->mmap_offset = PAGE_SIZE;
+		rdi->mmap_offset = ALIGN(PAGE_SIZE, SHMLBA);
 	ip->offset = rdi->mmap_offset;
-	rdi->mmap_offset += size;
+	rdi->mmap_offset += ALIGN(size, SHMLBA);
 	spin_unlock_irq(&rdi->mmap_offset_lock);
 
 	INIT_LIST_HEAD(&ip->pending_mmaps);
diff --git a/drivers/infiniband/sw/rxe/rxe_mmap.c b/drivers/infiniband/sw/rxe/rxe_mmap.c
index c572a4c..bd812e0 100644
--- a/drivers/infiniband/sw/rxe/rxe_mmap.c
+++ b/drivers/infiniband/sw/rxe/rxe_mmap.c
@@ -156,10 +156,10 @@ struct rxe_mmap_info *rxe_create_mmap_info(struct rxe_dev *rxe,
 	spin_lock_bh(&rxe->mmap_offset_lock);
 
 	if (rxe->mmap_offset == 0)
-		rxe->mmap_offset = PAGE_SIZE;
+		rxe->mmap_offset = ALIGN(PAGE_SIZE, SHMLBA);
 
 	ip->info.offset = rxe->mmap_offset;
-	rxe->mmap_offset += size;
+	rxe->mmap_offset += ALIGN(size, SHMLBA);
 
 	spin_unlock_bh(&rxe->mmap_offset_lock);
 
diff --git a/drivers/infiniband/sw/rxe/rxe_pool.c b/drivers/infiniband/sw/rxe/rxe_pool.c
index ee26a1b..1c4e5b2 100644
--- a/drivers/infiniband/sw/rxe/rxe_pool.c
+++ b/drivers/infiniband/sw/rxe/rxe_pool.c
@@ -412,6 +412,8 @@ void *rxe_alloc(struct rxe_pool *pool)
 	elem = kmem_cache_zalloc(pool_cache(pool),
 				 (pool->flags & RXE_POOL_ATOMIC) ?
 				 GFP_ATOMIC : GFP_KERNEL);
+	if (!elem)
+		return NULL;
 
 	elem->pool = pool;
 	kref_init(&elem->ref_cnt);
diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c
index 9d08478..5b0ca35 100644
--- a/drivers/infiniband/sw/rxe/rxe_req.c
+++ b/drivers/infiniband/sw/rxe/rxe_req.c
@@ -726,11 +726,11 @@ int rxe_requester(void *arg)
 	ret = rxe_xmit_packet(to_rdev(qp->ibqp.device), qp, &pkt, skb);
 	if (ret) {
 		qp->need_req_skb = 1;
-		kfree_skb(skb);
 
 		rollback_state(wqe, qp, &rollback_wqe, rollback_psn);
 
 		if (ret == -EAGAIN) {
+			kfree_skb(skb);
 			rxe_run_task(&qp->req.task, 1);
 			goto exit;
 		}
diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c
index 39101b1..39e31b2 100644
--- a/drivers/infiniband/sw/rxe/rxe_resp.c
+++ b/drivers/infiniband/sw/rxe/rxe_resp.c
@@ -799,18 +799,17 @@ static enum resp_states execute(struct rxe_qp *qp, struct rxe_pkt_info *pkt)
 		/* Unreachable */
 		WARN_ON(1);
 
-	/* We successfully processed this new request. */
-	qp->resp.msn++;
-
 	/* next expected psn, read handles this separately */
 	qp->resp.psn = (pkt->psn + 1) & BTH_PSN_MASK;
 
 	qp->resp.opcode = pkt->opcode;
 	qp->resp.status = IB_WC_SUCCESS;
 
-	if (pkt->mask & RXE_COMP_MASK)
+	if (pkt->mask & RXE_COMP_MASK) {
+		/* We successfully processed this new request. */
+		qp->resp.msn++;
 		return RESPST_COMPLETE;
-	else if (qp_type(qp) == IB_QPT_RC)
+	} else if (qp_type(qp) == IB_QPT_RC)
 		return RESPST_ACKNOWLEDGE;
 	else
 		return RESPST_CLEANUP;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index 830fecb..335bd2c 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -1177,10 +1177,15 @@ static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv,
 		ipoib_ib_dev_down(dev);
 
 	if (level == IPOIB_FLUSH_HEAVY) {
+		rtnl_lock();
 		if (test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
 			ipoib_ib_dev_stop(dev);
-		if (ipoib_ib_dev_open(dev) != 0)
+
+		result = ipoib_ib_dev_open(dev);
+		rtnl_unlock();
+		if (result)
 			return;
+
 		if (netif_queue_stopped(dev))
 			netif_start_queue(dev);
 	}
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h
index 0be6a7c..cb48e22 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.h
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.h
@@ -430,6 +430,7 @@ struct iser_fr_desc {
 	struct list_head		  list;
 	struct iser_reg_resources	  rsc;
 	struct iser_pi_context		 *pi_ctx;
+	struct list_head                  all_list;
 };
 
 /**
@@ -443,6 +444,7 @@ struct iser_fr_pool {
 	struct list_head        list;
 	spinlock_t              lock;
 	int                     size;
+	struct list_head        all_list;
 };
 
 /**
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index a4b791d..bc6f5bb 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -362,6 +362,7 @@ int iser_alloc_fastreg_pool(struct ib_conn *ib_conn,
 	int i, ret;
 
 	INIT_LIST_HEAD(&fr_pool->list);
+	INIT_LIST_HEAD(&fr_pool->all_list);
 	spin_lock_init(&fr_pool->lock);
 	fr_pool->size = 0;
 	for (i = 0; i < cmds_max; i++) {
@@ -373,6 +374,7 @@ int iser_alloc_fastreg_pool(struct ib_conn *ib_conn,
 		}
 
 		list_add_tail(&desc->list, &fr_pool->list);
+		list_add_tail(&desc->all_list, &fr_pool->all_list);
 		fr_pool->size++;
 	}
 
@@ -392,13 +394,13 @@ void iser_free_fastreg_pool(struct ib_conn *ib_conn)
 	struct iser_fr_desc *desc, *tmp;
 	int i = 0;
 
-	if (list_empty(&fr_pool->list))
+	if (list_empty(&fr_pool->all_list))
 		return;
 
 	iser_info("freeing conn %p fr pool\n", ib_conn);
 
-	list_for_each_entry_safe(desc, tmp, &fr_pool->list, list) {
-		list_del(&desc->list);
+	list_for_each_entry_safe(desc, tmp, &fr_pool->all_list, all_list) {
+		list_del(&desc->all_list);
 		iser_free_reg_res(&desc->rsc);
 		if (desc->pi_ctx)
 			iser_free_pi_ctx(desc->pi_ctx);
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index 1eee8f7..84f9185 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -648,12 +648,19 @@ static void srp_path_rec_completion(int status,
 static int srp_lookup_path(struct srp_rdma_ch *ch)
 {
 	struct srp_target_port *target = ch->target;
-	int ret;
+	int ret = -ENODEV;
 
 	ch->path.numb_path = 1;
 
 	init_completion(&ch->done);
 
+	/*
+	 * Avoid that the SCSI host can be removed by srp_remove_target()
+	 * before srp_path_rec_completion() is called.
+	 */
+	if (!scsi_host_get(target->scsi_host))
+		goto out;
+
 	ch->path_query_id = ib_sa_path_rec_get(&srp_sa_client,
 					       target->srp_host->srp_dev->dev,
 					       target->srp_host->port,
@@ -667,18 +674,24 @@ static int srp_lookup_path(struct srp_rdma_ch *ch)
 					       GFP_KERNEL,
 					       srp_path_rec_completion,
 					       ch, &ch->path_query);
-	if (ch->path_query_id < 0)
-		return ch->path_query_id;
+	ret = ch->path_query_id;
+	if (ret < 0)
+		goto put;
 
 	ret = wait_for_completion_interruptible(&ch->done);
 	if (ret < 0)
-		return ret;
+		goto put;
 
-	if (ch->status < 0)
+	ret = ch->status;
+	if (ret < 0)
 		shost_printk(KERN_WARNING, target->scsi_host,
 			     PFX "Path record query failed\n");
 
-	return ch->status;
+put:
+	scsi_host_put(target->scsi_host);
+
+out:
+	return ret;
 }
 
 static int srp_send_req(struct srp_rdma_ch *ch, bool multich)
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index 0b1f69e..b974897 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -2750,7 +2750,7 @@ static int srpt_parse_i_port_id(u8 i_port_id[16], const char *name)
 {
 	const char *p;
 	unsigned len, count, leading_zero_bytes;
-	int ret, rc;
+	int ret;
 
 	p = name;
 	if (strncasecmp(p, "0x", 2) == 0)
@@ -2762,10 +2762,9 @@ static int srpt_parse_i_port_id(u8 i_port_id[16], const char *name)
 	count = min(len / 2, 16U);
 	leading_zero_bytes = 16 - count;
 	memset(i_port_id, 0, leading_zero_bytes);
-	rc = hex2bin(i_port_id + leading_zero_bytes, p, count);
-	if (rc < 0)
-		pr_debug("hex2bin failed for srpt_parse_i_port_id: %d\n", rc);
-	ret = 0;
+	ret = hex2bin(i_port_id + leading_zero_bytes, p, count);
+	if (ret < 0)
+		pr_debug("hex2bin failed for srpt_parse_i_port_id: %d\n", ret);
 out:
 	return ret;
 }
diff --git a/drivers/input/misc/hbtp_input.c b/drivers/input/misc/hbtp_input.c
index 108ed032..90493b1 100644
--- a/drivers/input/misc/hbtp_input.c
+++ b/drivers/input/misc/hbtp_input.c
@@ -1181,12 +1181,6 @@ static int hbtp_dsi_panel_suspend(struct hbtp_data *ts)
 			pr_err("%s: failed to disable GPIO pins\n", __func__);
 			goto err_pin_disable;
 		}
-
-		rc = hbtp_pdev_power_on(ts, false);
-		if (rc) {
-			pr_err("%s: failed to disable power\n", __func__);
-			goto err_power_disable;
-		}
 		ts->power_suspended = true;
 		if (ts->input_dev) {
 			kobject_uevent_env(&ts->input_dev->dev.kobj,
@@ -1214,8 +1208,7 @@ static int hbtp_dsi_panel_suspend(struct hbtp_data *ts)
 	}
 	mutex_unlock(&hbtp->mutex);
 	return 0;
-err_power_disable:
-	hbtp_pinctrl_enable(ts, true);
+
 err_pin_disable:
 	mutex_unlock(&hbtp->mutex);
 	return rc;
@@ -1234,12 +1227,6 @@ static int hbtp_dsi_panel_early_resume(struct hbtp_data *ts)
 			mutex_unlock(&hbtp->mutex);
 			return 0;
 		}
-		rc = hbtp_pdev_power_on(ts, true);
-		if (rc) {
-			pr_err("%s: failed to enable panel power\n", __func__);
-			goto err_power_on;
-		}
-
 		rc = hbtp_pinctrl_enable(ts, true);
 
 		if (rc) {
@@ -1287,8 +1274,6 @@ static int hbtp_dsi_panel_early_resume(struct hbtp_data *ts)
 
 err_pin_enable:
 	hbtp_pdev_power_on(ts, false);
-err_power_on:
-	mutex_unlock(&hbtp->mutex);
 	return rc;
 }
 
@@ -1359,6 +1344,12 @@ static int hbtp_pdev_probe(struct platform_device *pdev)
 		hbtp->vcc_dig = vcc_dig;
 	}
 
+	error = hbtp_pdev_power_on(hbtp, true);
+	if (error) {
+		pr_err("%s: failed to power on\n", __func__);
+		return error;
+	}
+
 	return 0;
 }
 
diff --git a/drivers/input/misc/qpnp-power-on.c b/drivers/input/misc/qpnp-power-on.c
index 339f94c..febcd9c 100644
--- a/drivers/input/misc/qpnp-power-on.c
+++ b/drivers/input/misc/qpnp-power-on.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -2029,7 +2029,7 @@ static int qpnp_pon_probe(struct platform_device *pdev)
 		dev_err(&pdev->dev,
 			"Couldn't find reg in node = %s rc = %d\n",
 			pdev->dev.of_node->full_name, rc);
-		return rc;
+		goto err_out;
 	}
 	pon->base = base;
 
@@ -2041,7 +2041,8 @@ static int qpnp_pon_probe(struct platform_device *pdev)
 			pon->num_pon_config++;
 		} else {
 			pr_err("Unknown sub-node\n");
-			return -EINVAL;
+			rc = -EINVAL;
+			goto err_out;
 		}
 	}
 
@@ -2053,7 +2054,7 @@ static int qpnp_pon_probe(struct platform_device *pdev)
 	if (rc) {
 		dev_err(&pdev->dev, "Error in pon_regulator_init rc: %d\n",
 			rc);
-		return rc;
+		goto err_out;
 	}
 
 	if (!pon->num_pon_config)
@@ -2072,7 +2073,7 @@ static int qpnp_pon_probe(struct platform_device *pdev)
 		dev_err(&pon->pdev->dev,
 			"Unable to read PON_PERPH_SUBTYPE register rc: %d\n",
 			rc);
-		return rc;
+		goto err_out;
 	}
 	pon->subtype = temp;
 
@@ -2083,7 +2084,7 @@ static int qpnp_pon_probe(struct platform_device *pdev)
 		dev_err(&pon->pdev->dev,
 			"Unable to read addr=%x, rc(%d)\n",
 			QPNP_PON_REVISION2(pon), rc);
-		return rc;
+		goto err_out;
 	}
 
 	pon->pon_ver = temp;
@@ -2100,7 +2101,7 @@ static int qpnp_pon_probe(struct platform_device *pdev)
 		dev_err(&pon->pdev->dev,
 			"Invalid PON_PERPH_SUBTYPE value %x\n",
 			pon->subtype);
-		return -EINVAL;
+		goto err_out;
 	}
 
 	pr_debug("%s: pon_subtype=%x, pon_version=%x\n", __func__,
@@ -2111,7 +2112,7 @@ static int qpnp_pon_probe(struct platform_device *pdev)
 		dev_err(&pon->pdev->dev,
 			"Unable to store/clear WARM_RESET_REASONx registers rc: %d\n",
 			rc);
-		return rc;
+		goto err_out;
 	}
 
 	/* PON reason */
@@ -2120,7 +2121,7 @@ static int qpnp_pon_probe(struct platform_device *pdev)
 		dev_err(&pon->pdev->dev,
 			"Unable to read PON_RESASON1 reg rc: %d\n",
 			rc);
-		return rc;
+		goto err_out;
 	}
 
 	if (sys_reset)
@@ -2147,14 +2148,14 @@ static int qpnp_pon_probe(struct platform_device *pdev)
 		rc = read_gen2_pon_off_reason(pon, &poff_sts,
 						&reason_index_offset);
 		if (rc)
-			return rc;
+			goto err_out;
 	} else {
 		rc = regmap_bulk_read(pon->regmap, QPNP_POFF_REASON1(pon),
 			buf, 2);
 		if (rc) {
 			dev_err(&pon->pdev->dev, "Unable to read POFF_REASON regs rc:%d\n",
 				rc);
-			return rc;
+			goto err_out;
 		}
 		poff_sts = buf[0] | (buf[1] << 8);
 	}
@@ -2186,7 +2187,7 @@ static int qpnp_pon_probe(struct platform_device *pdev)
 			dev_err(&pon->pdev->dev,
 				"Unable to read s3 timer rc:%d\n",
 				rc);
-			return rc;
+			goto err_out;
 		}
 	} else {
 		if (s3_debounce > QPNP_PON_S3_TIMER_SECS_MAX) {
@@ -2205,7 +2206,7 @@ static int qpnp_pon_probe(struct platform_device *pdev)
 		if (rc) {
 			dev_err(&pdev->dev, "Unable to do SEC_ACCESS rc:%d\n",
 				rc);
-			return rc;
+			goto err_out;
 		}
 
 		rc = qpnp_pon_masked_write(pon, QPNP_PON_S3_DBC_CTL(pon),
@@ -2214,7 +2215,7 @@ static int qpnp_pon_probe(struct platform_device *pdev)
 			dev_err(&pdev->dev,
 				"Unable to set S3 debounce rc:%d\n",
 				rc);
-			return rc;
+			goto err_out;
 		}
 	}
 
@@ -2225,7 +2226,7 @@ static int qpnp_pon_probe(struct platform_device *pdev)
 	if (rc && rc != -EINVAL) {
 		dev_err(&pon->pdev->dev, "Unable to read s3 timer rc: %d\n",
 			rc);
-		return rc;
+		goto err_out;
 	}
 
 	if (!strcmp(s3_src, "kpdpwr"))
@@ -2247,7 +2248,7 @@ static int qpnp_pon_probe(struct platform_device *pdev)
 	if (rc) {
 		dev_err(&pdev->dev, "Unable to program s3 source rc: %d\n",
 			rc);
-		return rc;
+		goto err_out;
 	}
 
 	dev_set_drvdata(&pdev->dev, pon);
@@ -2259,7 +2260,7 @@ static int qpnp_pon_probe(struct platform_device *pdev)
 	if (rc) {
 		dev_err(&pdev->dev,
 			"Unable to initialize PON configurations rc: %d\n", rc);
-		return rc;
+		goto err_out;
 	}
 
 	rc = of_property_read_u32(pon->pdev->dev.of_node,
@@ -2268,21 +2269,21 @@ static int qpnp_pon_probe(struct platform_device *pdev)
 		if (rc != -EINVAL) {
 			dev_err(&pdev->dev,
 				"Unable to read debounce delay rc: %d\n", rc);
-			return rc;
+			goto err_out;
 		}
 	} else {
 		rc = qpnp_pon_set_dbc(pon, delay);
 		if (rc) {
 			dev_err(&pdev->dev,
 				"Unable to set PON debounce delay rc=%d\n", rc);
-			return rc;
+			goto err_out;
 		}
 	}
 	rc = qpnp_pon_get_dbc(pon, &pon->dbc_time_us);
 	if (rc) {
 		dev_err(&pdev->dev,
 			"Unable to get PON debounce delay rc=%d\n", rc);
-		return rc;
+		goto err_out;
 	}
 
 	pon->kpdpwr_dbc_enable = of_property_read_bool(pon->pdev->dev.of_node,
@@ -2295,7 +2296,7 @@ static int qpnp_pon_probe(struct platform_device *pdev)
 		if (rc != -EINVAL) {
 			dev_err(&pdev->dev, "Unable to read warm reset poweroff type rc: %d\n",
 				rc);
-			return rc;
+			goto err_out;
 		}
 		pon->warm_reset_poff_type = -EINVAL;
 	} else if (pon->warm_reset_poff_type <= PON_POWER_OFF_RESERVED ||
@@ -2311,7 +2312,7 @@ static int qpnp_pon_probe(struct platform_device *pdev)
 		if (rc != -EINVAL) {
 			dev_err(&pdev->dev, "Unable to read hard reset poweroff type rc: %d\n",
 				rc);
-			return rc;
+			goto err_out;
 		}
 		pon->hard_reset_poff_type = -EINVAL;
 	} else if (pon->hard_reset_poff_type <= PON_POWER_OFF_RESERVED ||
@@ -2327,7 +2328,7 @@ static int qpnp_pon_probe(struct platform_device *pdev)
 		if (rc != -EINVAL) {
 			dev_err(&pdev->dev, "Unable to read shutdown poweroff type rc: %d\n",
 				rc);
-			return rc;
+			goto err_out;
 		}
 		pon->shutdown_poff_type = -EINVAL;
 	} else if (pon->shutdown_poff_type <= PON_POWER_OFF_RESERVED ||
@@ -2339,7 +2340,7 @@ static int qpnp_pon_probe(struct platform_device *pdev)
 	rc = device_create_file(&pdev->dev, &dev_attr_debounce_us);
 	if (rc) {
 		dev_err(&pdev->dev, "sys file creation failed rc: %d\n", rc);
-		return rc;
+		goto err_out;
 	}
 
 	if (of_property_read_bool(pdev->dev.of_node,
@@ -2347,7 +2348,8 @@ static int qpnp_pon_probe(struct platform_device *pdev)
 		if (sys_reset) {
 			dev_err(&pdev->dev,
 				"qcom,system-reset property shouldn't be used along with qcom,secondary-pon-reset property\n");
-			return -EINVAL;
+			rc = -EINVAL;
+			goto err_out;
 		}
 		spin_lock_irqsave(&spon_list_slock, flags);
 		list_add(&pon->list, &spon_dev_list);
@@ -2361,6 +2363,10 @@ static int qpnp_pon_probe(struct platform_device *pdev)
 
 	qpnp_pon_debugfs_init(pdev);
 	return 0;
+
+err_out:
+	sys_reset_dev = NULL;
+	return rc;
 }
 
 static int qpnp_pon_remove(struct platform_device *pdev)
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
index dbf0983..d1051e3 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -520,6 +520,13 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = {
 			DMI_MATCH(DMI_PRODUCT_NAME, "IC4I"),
 		},
 	},
+	{
+		/* TUXEDO BU1406 */
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "Notebook"),
+			DMI_MATCH(DMI_PRODUCT_NAME, "N24_25BU"),
+		},
+	},
 	{ }
 };
 
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
index efca013..36777b3 100644
--- a/drivers/input/touchscreen/Kconfig
+++ b/drivers/input/touchscreen/Kconfig
@@ -1214,4 +1214,14 @@
 	  To compile this driver as a module, choose M here: the
 	  module will be called bu21023_ts.
 
+config TOUCHSCREEN_SYNAPTICS_DSX
+        bool "Synaptics Touchscreen Driver"
+        depends on I2C
+        help
+          Say Y here if you have a Synaptics Touchscreen.
+
+          If unsure, say N.
+
+source "drivers/input/touchscreen/synaptics_dsx/Kconfig"
+
 endif
diff --git a/drivers/input/touchscreen/Makefile b/drivers/input/touchscreen/Makefile
index 81b8645..0caab59 100644
--- a/drivers/input/touchscreen/Makefile
+++ b/drivers/input/touchscreen/Makefile
@@ -71,6 +71,7 @@
 obj-$(CONFIG_TOUCHSCREEN_SUN4I)		+= sun4i-ts.o
 obj-$(CONFIG_TOUCHSCREEN_SUR40)		+= sur40.o
 obj-$(CONFIG_TOUCHSCREEN_SURFACE3_SPI)	+= surface3_spi.o
+obj-$(CONFIG_TOUCHSCREEN_SYNAPTICS_DSX) += synaptics_dsx/
 obj-$(CONFIG_TOUCHSCREEN_TI_AM335X_TSC)	+= ti_am335x_tsc.o
 obj-$(CONFIG_TOUCHSCREEN_TOUCHIT213)	+= touchit213.o
 obj-$(CONFIG_TOUCHSCREEN_TOUCHRIGHT)	+= touchright.o
diff --git a/drivers/input/touchscreen/synaptics_dsx/Kconfig b/drivers/input/touchscreen/synaptics_dsx/Kconfig
new file mode 100644
index 0000000..b2fa115
--- /dev/null
+++ b/drivers/input/touchscreen/synaptics_dsx/Kconfig
@@ -0,0 +1,128 @@
+#
+# Synaptics DSX touchscreen driver configuration
+#
+menuconfig TOUCHSCREEN_SYNAPTICS_DSX
+	bool "Synaptics DSX touchscreen"
+	default y
+	help
+	  Say Y here if you have a Synaptics DSX touchscreen connected
+	  to your system.
+
+	  If unsure, say N.
+
+if TOUCHSCREEN_SYNAPTICS_DSX
+
+choice
+	default TOUCHSCREEN_SYNAPTICS_DSX_I2C
+	prompt "Synaptics DSX bus interface"
+config TOUCHSCREEN_SYNAPTICS_DSX_I2C
+	bool "RMI over I2C"
+	depends on I2C
+config TOUCHSCREEN_SYNAPTICS_DSX_SPI
+	bool "RMI over SPI"
+	depends on SPI_MASTER
+config TOUCHSCREEN_SYNAPTICS_DSX_RMI_HID_I2C
+	bool "HID over I2C"
+	depends on I2C
+endchoice
+
+config TOUCHSCREEN_SYNAPTICS_DSX_CORE
+	tristate "Synaptics DSX core driver module"
+	depends on I2C || SPI_MASTER
+	help
+	  Say Y here to enable basic touch reporting functionality.
+
+	  If unsure, say N.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called synaptics_dsx_core.
+
+config TOUCHSCREEN_SYNAPTICS_DSX_RMI_DEV
+	tristate "Synaptics DSX RMI device module"
+	depends on TOUCHSCREEN_SYNAPTICS_DSX_CORE
+	help
+	  Say Y here to enable support for direct RMI register access.
+
+	  If unsure, say N.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called synaptics_dsx_rmi_dev.
+
+config TOUCHSCREEN_SYNAPTICS_DSX_FW_UPDATE
+	tristate "Synaptics DSX firmware update module"
+	depends on TOUCHSCREEN_SYNAPTICS_DSX_CORE
+	help
+	  Say Y here to enable support for doing firmware update.
+
+	  If unsure, say N.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called synaptics_dsx_fw_update.
+
+config TOUCHSCREEN_SYNAPTICS_DSX_TEST_REPORTING
+	tristate "Synaptics DSX test reporting module"
+	depends on TOUCHSCREEN_SYNAPTICS_DSX_CORE
+	help
+	  Say Y here to enable support for retrieving production test reports.
+
+	  If unsure, say N.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called synaptics_dsx_test_reporting.
+
+config TOUCHSCREEN_SYNAPTICS_DSX_PROXIMITY
+	tristate "Synaptics DSX proximity module"
+	depends on TOUCHSCREEN_SYNAPTICS_DSX_CORE
+	help
+	  Say Y here to enable support for proximity functionality.
+
+	  If unsure, say N.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called synaptics_dsx_proximity.
+
+config TOUCHSCREEN_SYNAPTICS_DSX_ACTIVE_PEN
+	tristate "Synaptics DSX active pen module"
+	depends on TOUCHSCREEN_SYNAPTICS_DSX_CORE
+	help
+	  Say Y here to enable support for active pen functionality.
+
+	  If unsure, say N.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called synaptics_dsx_active_pen.
+
+config TOUCHSCREEN_SYNAPTICS_DSX_GESTURE
+	tristate "Synaptics DSX user defined gesture module"
+	depends on TOUCHSCREEN_SYNAPTICS_DSX_CORE
+	help
+	  Say Y here to enable support for user defined gesture functionality.
+
+	  If unsure, say N.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called synaptics_dsx_gesture.
+
+config TOUCHSCREEN_SYNAPTICS_DSX_VIDEO
+	tristate "Synaptics DSX video module"
+	depends on TOUCHSCREEN_SYNAPTICS_DSX_CORE
+	help
+	  Say Y here to enable support for video communication functionality.
+
+	  If unsure, say N.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called synaptics_dsx_video.
+
+config TOUCHSCREEN_SYNAPTICS_DSX_DEBUG
+	tristate "Synaptics DSX debug module"
+	depends on TOUCHSCREEN_SYNAPTICS_DSX_CORE
+	help
+	  Say Y here to enable support for firmware debug functionality.
+
+	  If unsure, say N.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called synaptics_dsx_debug.
+
+endif
diff --git a/drivers/input/touchscreen/synaptics_dsx/Makefile b/drivers/input/touchscreen/synaptics_dsx/Makefile
new file mode 100644
index 0000000..191dcdc
--- /dev/null
+++ b/drivers/input/touchscreen/synaptics_dsx/Makefile
@@ -0,0 +1,18 @@
+#
+# Makefile for the Synaptics DSX touchscreen driver.
+#
+
+# Each configuration option enables a list of files.
+
+obj-$(CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_I2C) += synaptics_dsx_i2c.o
+obj-$(CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_SPI) += synaptics_dsx_spi.o
+obj-$(CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_RMI_HID_I2C) += synaptics_dsx_rmi_hid_i2c.o
+obj-$(CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_CORE) += synaptics_dsx_core.o
+obj-$(CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_RMI_DEV) += synaptics_dsx_rmi_dev.o
+obj-$(CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_FW_UPDATE) += synaptics_dsx_fw_update.o
+obj-$(CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_TEST_REPORTING) += synaptics_dsx_test_reporting.o
+obj-$(CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_PROXIMITY) += synaptics_dsx_proximity.o
+obj-$(CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_ACTIVE_PEN) += synaptics_dsx_active_pen.o
+obj-$(CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_GESTURE) += synaptics_dsx_gesture.o
+obj-$(CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_VIDEO) += synaptics_dsx_video.o
+obj-$(CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_DEBUG) += synaptics_dsx_debug.o
diff --git a/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_active_pen.c b/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_active_pen.c
new file mode 100644
index 0000000..3666e87
--- /dev/null
+++ b/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_active_pen.c
@@ -0,0 +1,607 @@
+/*
+ * Synaptics DSX touchscreen driver
+ *
+ * Copyright (C) 2012-2016 Synaptics Incorporated. All rights reserved.
+ *
+ * Copyright (C) 2012 Alexandra Chin <alexandra.chin@tw.synaptics.com>
+ * Copyright (C) 2012 Scott Lin <scott.lin@tw.synaptics.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * INFORMATION CONTAINED IN THIS DOCUMENT IS PROVIDED "AS-IS," AND SYNAPTICS
+ * EXPRESSLY DISCLAIMS ALL EXPRESS AND IMPLIED WARRANTIES, INCLUDING ANY
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE,
+ * AND ANY WARRANTIES OF NON-INFRINGEMENT OF ANY INTELLECTUAL PROPERTY RIGHTS.
+ * IN NO EVENT SHALL SYNAPTICS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, PUNITIVE, OR CONSEQUENTIAL DAMAGES ARISING OUT OF OR IN CONNECTION
+ * WITH THE USE OF THE INFORMATION CONTAINED IN THIS DOCUMENT, HOWEVER CAUSED
+ * AND BASED ON ANY THEORY OF LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * NEGLIGENCE OR OTHER TORTIOUS ACTION, AND EVEN IF SYNAPTICS WAS ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE. IF A TRIBUNAL OF COMPETENT JURISDICTION DOES
+ * NOT PERMIT THE DISCLAIMER OF DIRECT DAMAGES OR ANY OTHER DAMAGES, SYNAPTICS'
+ * TOTAL CUMULATIVE LIABILITY TO ANY PARTY SHALL NOT EXCEED ONE HUNDRED U.S.
+ * DOLLARS.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/input.h>
+#include <linux/platform_device.h>
+#include <linux/input/synaptics_dsx.h>
+#include "synaptics_dsx_core.h"
+
+#define APEN_PHYS_NAME "synaptics_dsx/active_pen"
+
+#define ACTIVE_PEN_MAX_PRESSURE_16BIT 65535
+#define ACTIVE_PEN_MAX_PRESSURE_8BIT 255
+
+struct synaptics_rmi4_f12_query_8 {
+	union {
+		struct {
+			unsigned char size_of_query9;
+			struct {
+				unsigned char data0_is_present:1;
+				unsigned char data1_is_present:1;
+				unsigned char data2_is_present:1;
+				unsigned char data3_is_present:1;
+				unsigned char data4_is_present:1;
+				unsigned char data5_is_present:1;
+				unsigned char data6_is_present:1;
+				unsigned char data7_is_present:1;
+			} __packed;
+		};
+		unsigned char data[2];
+	};
+};
+
+struct apen_data_8b_pressure {
+	union {
+		struct {
+			unsigned char status_pen:1;
+			unsigned char status_invert:1;
+			unsigned char status_barrel:1;
+			unsigned char status_reserved:5;
+			unsigned char x_lsb;
+			unsigned char x_msb;
+			unsigned char y_lsb;
+			unsigned char y_msb;
+			unsigned char pressure_msb;
+			unsigned char battery_state;
+			unsigned char pen_id_0_7;
+			unsigned char pen_id_8_15;
+			unsigned char pen_id_16_23;
+			unsigned char pen_id_24_31;
+		} __packed;
+		unsigned char data[11];
+	};
+};
+
+struct apen_data {
+	union {
+		struct {
+			unsigned char status_pen:1;
+			unsigned char status_invert:1;
+			unsigned char status_barrel:1;
+			unsigned char status_reserved:5;
+			unsigned char x_lsb;
+			unsigned char x_msb;
+			unsigned char y_lsb;
+			unsigned char y_msb;
+			unsigned char pressure_lsb;
+			unsigned char pressure_msb;
+			unsigned char battery_state;
+			unsigned char pen_id_0_7;
+			unsigned char pen_id_8_15;
+			unsigned char pen_id_16_23;
+			unsigned char pen_id_24_31;
+		} __packed;
+		unsigned char data[12];
+	};
+};
+
+struct synaptics_rmi4_apen_handle {
+	bool apen_present;
+	unsigned char intr_mask;
+	unsigned char battery_state;
+	unsigned short query_base_addr;
+	unsigned short control_base_addr;
+	unsigned short data_base_addr;
+	unsigned short command_base_addr;
+	unsigned short apen_data_addr;
+	unsigned short max_pressure;
+	unsigned int pen_id;
+	struct input_dev *apen_dev;
+	struct apen_data *apen_data;
+	struct synaptics_rmi4_data *rmi4_data;
+};
+
+static struct synaptics_rmi4_apen_handle *apen;
+
+DECLARE_COMPLETION(apen_remove_complete);
+
+static void apen_lift(void)
+{
+	input_report_key(apen->apen_dev, BTN_TOUCH, 0);
+	input_report_key(apen->apen_dev, BTN_TOOL_PEN, 0);
+	input_report_key(apen->apen_dev, BTN_TOOL_RUBBER, 0);
+	input_sync(apen->apen_dev);
+	apen->apen_present = false;
+}
+
+static void apen_report(void)
+{
+	int retval;
+	int x;
+	int y;
+	int pressure;
+	static int invert = -1;
+	struct apen_data_8b_pressure *apen_data_8b;
+	struct synaptics_rmi4_data *rmi4_data = apen->rmi4_data;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			apen->apen_data_addr,
+			apen->apen_data->data,
+			sizeof(apen->apen_data->data));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read active pen data\n",
+				__func__);
+		return;
+	}
+
+	if (apen->apen_data->status_pen == 0) {
+		if (apen->apen_present)
+			apen_lift();
+
+		dev_dbg(rmi4_data->pdev->dev.parent,
+				"%s: No active pen data\n",
+				__func__);
+
+		return;
+	}
+
+	x = (apen->apen_data->x_msb << 8) | (apen->apen_data->x_lsb);
+	y = (apen->apen_data->y_msb << 8) | (apen->apen_data->y_lsb);
+
+	if ((x == -1) && (y == -1)) {
+		if (apen->apen_present)
+			apen_lift();
+
+		dev_dbg(rmi4_data->pdev->dev.parent,
+				"%s: Active pen in range but no valid x & y\n",
+				__func__);
+
+		return;
+	}
+
+	if (!apen->apen_present)
+		invert = -1;
+
+	if (invert != -1 && invert != apen->apen_data->status_invert)
+		apen_lift();
+
+	invert = apen->apen_data->status_invert;
+
+	if (apen->max_pressure == ACTIVE_PEN_MAX_PRESSURE_16BIT) {
+		pressure = (apen->apen_data->pressure_msb << 8) |
+				apen->apen_data->pressure_lsb;
+		apen->battery_state = apen->apen_data->battery_state;
+		apen->pen_id = (apen->apen_data->pen_id_24_31 << 24) |
+				(apen->apen_data->pen_id_16_23 << 16) |
+				(apen->apen_data->pen_id_8_15 << 8) |
+				apen->apen_data->pen_id_0_7;
+	} else {
+		apen_data_8b = (struct apen_data_8b_pressure *)apen->apen_data;
+		pressure = apen_data_8b->pressure_msb;
+		apen->battery_state = apen_data_8b->battery_state;
+		apen->pen_id = (apen_data_8b->pen_id_24_31 << 24) |
+				(apen_data_8b->pen_id_16_23 << 16) |
+				(apen_data_8b->pen_id_8_15 << 8) |
+				apen_data_8b->pen_id_0_7;
+	}
+
+	input_report_key(apen->apen_dev, BTN_TOUCH, pressure > 0 ? 1 : 0);
+	input_report_key(apen->apen_dev,
+			apen->apen_data->status_invert > 0 ?
+			BTN_TOOL_RUBBER : BTN_TOOL_PEN, 1);
+	input_report_key(apen->apen_dev,
+			BTN_STYLUS, apen->apen_data->status_barrel > 0 ?
+			1 : 0);
+	input_report_abs(apen->apen_dev, ABS_X, x);
+	input_report_abs(apen->apen_dev, ABS_Y, y);
+	input_report_abs(apen->apen_dev, ABS_PRESSURE, pressure);
+
+	input_sync(apen->apen_dev);
+
+	dev_dbg(rmi4_data->pdev->dev.parent,
+			"%s: Active pen: status = %d, invert = %d, barrel = %d, x = %d, y = %d, pressure = %d\n",
+			__func__,
+			apen->apen_data->status_pen,
+			apen->apen_data->status_invert,
+			apen->apen_data->status_barrel,
+			x, y, pressure);
+
+	apen->apen_present = true;
+}
+
+static void apen_set_params(void)
+{
+	input_set_abs_params(apen->apen_dev, ABS_X, 0,
+			apen->rmi4_data->sensor_max_x, 0, 0);
+	input_set_abs_params(apen->apen_dev, ABS_Y, 0,
+			apen->rmi4_data->sensor_max_y, 0, 0);
+	input_set_abs_params(apen->apen_dev, ABS_PRESSURE, 0,
+			apen->max_pressure, 0, 0);
+
+	return;
+}
+
+static int apen_pressure(struct synaptics_rmi4_f12_query_8 *query_8)
+{
+	int retval;
+	unsigned char ii;
+	unsigned char data_reg_presence;
+	unsigned char size_of_query_9;
+	unsigned char *query_9;
+	unsigned char *data_desc;
+	struct synaptics_rmi4_data *rmi4_data = apen->rmi4_data;
+
+	data_reg_presence = query_8->data[1];
+
+	size_of_query_9 = query_8->size_of_query9;
+	query_9 = kmalloc(size_of_query_9, GFP_KERNEL);
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			apen->query_base_addr + 9,
+			query_9,
+			size_of_query_9);
+	if (retval < 0)
+		goto exit;
+
+	data_desc = query_9;
+
+	for (ii = 0; ii < 6; ii++) {
+		if (!(data_reg_presence & (1 << ii)))
+			continue; /* The data register is not present */
+		data_desc++; /* Jump over the size entry */
+		while (*data_desc & (1 << 7))
+			data_desc++;
+		data_desc++; /* Go to the next descriptor */
+	}
+
+	data_desc++; /* Jump over the size entry */
+	/* Check for the presence of subpackets 1 and 2 */
+	if ((*data_desc & (3 << 1)) == (3 << 1))
+		apen->max_pressure = ACTIVE_PEN_MAX_PRESSURE_16BIT;
+	else
+		apen->max_pressure = ACTIVE_PEN_MAX_PRESSURE_8BIT;
+
+exit:
+	kfree(query_9);
+
+	return retval;
+}
+
+static int apen_reg_init(void)
+{
+	int retval;
+	unsigned char data_offset;
+	unsigned char size_of_query8;
+	struct synaptics_rmi4_f12_query_8 query_8;
+	struct synaptics_rmi4_data *rmi4_data = apen->rmi4_data;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			apen->query_base_addr + 7,
+			&size_of_query8,
+			sizeof(size_of_query8));
+	if (retval < 0)
+		return retval;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			apen->query_base_addr + 8,
+			query_8.data,
+			sizeof(query_8.data));
+	if (retval < 0)
+		return retval;
+
+	if ((size_of_query8 >= 2) && (query_8.data6_is_present)) {
+		data_offset = query_8.data0_is_present +
+				query_8.data1_is_present +
+				query_8.data2_is_present +
+				query_8.data3_is_present +
+				query_8.data4_is_present +
+				query_8.data5_is_present;
+		apen->apen_data_addr = apen->data_base_addr + data_offset;
+		retval = apen_pressure(&query_8);
+		if (retval < 0)
+			return retval;
+	} else {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Active pen support unavailable\n",
+				__func__);
+		retval = -ENODEV;
+	}
+
+	return retval;
+}
+
+static int apen_scan_pdt(void)
+{
+	int retval;
+	unsigned char ii;
+	unsigned char page;
+	unsigned char intr_count = 0;
+	unsigned char intr_off;
+	unsigned char intr_src;
+	unsigned short addr;
+	struct synaptics_rmi4_fn_desc fd;
+	struct synaptics_rmi4_data *rmi4_data = apen->rmi4_data;
+
+	for (page = 0; page < PAGES_TO_SERVICE; page++) {
+		for (addr = PDT_START; addr > PDT_END; addr -= PDT_ENTRY_SIZE) {
+			addr |= (page << 8);
+
+			retval = synaptics_rmi4_reg_read(rmi4_data,
+					addr,
+					(unsigned char *)&fd,
+					sizeof(fd));
+			if (retval < 0)
+				return retval;
+
+			addr &= ~(MASK_8BIT << 8);
+
+			if (fd.fn_number) {
+				dev_dbg(rmi4_data->pdev->dev.parent,
+						"%s: Found F%02x\n",
+						__func__, fd.fn_number);
+				switch (fd.fn_number) {
+				case SYNAPTICS_RMI4_F12:
+					goto f12_found;
+				}
+			} else {
+				break;
+			}
+
+			intr_count += fd.intr_src_count;
+		}
+	}
+
+	dev_err(rmi4_data->pdev->dev.parent,
+			"%s: Failed to find F12\n",
+			__func__);
+	return -EINVAL;
+
+f12_found:
+	apen->query_base_addr = fd.query_base_addr | (page << 8);
+	apen->control_base_addr = fd.ctrl_base_addr | (page << 8);
+	apen->data_base_addr = fd.data_base_addr | (page << 8);
+	apen->command_base_addr = fd.cmd_base_addr | (page << 8);
+
+	retval = apen_reg_init();
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to initialize active pen registers\n",
+				__func__);
+		return retval;
+	}
+
+	apen->intr_mask = 0;
+	intr_src = fd.intr_src_count;
+	intr_off = intr_count % 8;
+	for (ii = intr_off;
+			ii < (intr_src + intr_off);
+			ii++) {
+		apen->intr_mask |= 1 << ii;
+	}
+
+	rmi4_data->intr_mask[0] |= apen->intr_mask;
+
+	addr = rmi4_data->f01_ctrl_base_addr + 1;
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			addr,
+			&(rmi4_data->intr_mask[0]),
+			sizeof(rmi4_data->intr_mask[0]));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to set interrupt enable bit\n",
+				__func__);
+		return retval;
+	}
+
+	return 0;
+}
+
+static void synaptics_rmi4_apen_attn(struct synaptics_rmi4_data *rmi4_data,
+		unsigned char intr_mask)
+{
+	if (!apen)
+		return;
+
+	if (apen->intr_mask & intr_mask)
+		apen_report();
+
+	return;
+}
+
+static int synaptics_rmi4_apen_init(struct synaptics_rmi4_data *rmi4_data)
+{
+	int retval;
+
+	if (apen) {
+		dev_dbg(rmi4_data->pdev->dev.parent,
+				"%s: Handle already exists\n",
+				__func__);
+		return 0;
+	}
+
+	apen = kzalloc(sizeof(*apen), GFP_KERNEL);
+	if (!apen) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to alloc mem for apen\n",
+				__func__);
+		retval = -ENOMEM;
+		goto exit;
+	}
+
+	apen->apen_data = kzalloc(sizeof(*(apen->apen_data)), GFP_KERNEL);
+	if (!apen->apen_data) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to alloc mem for apen_data\n",
+				__func__);
+		retval = -ENOMEM;
+		goto exit_free_apen;
+	}
+
+	apen->rmi4_data = rmi4_data;
+
+	retval = apen_scan_pdt();
+	if (retval < 0)
+		goto exit_free_apen_data;
+
+	apen->apen_dev = input_allocate_device();
+	if (apen->apen_dev == NULL) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to allocate active pen device\n",
+				__func__);
+		retval = -ENOMEM;
+		goto exit_free_apen_data;
+	}
+
+	apen->apen_dev->name = ACTIVE_PEN_DRIVER_NAME;
+	apen->apen_dev->phys = APEN_PHYS_NAME;
+	apen->apen_dev->id.product = SYNAPTICS_DSX_DRIVER_PRODUCT;
+	apen->apen_dev->id.version = SYNAPTICS_DSX_DRIVER_VERSION;
+	apen->apen_dev->dev.parent = rmi4_data->pdev->dev.parent;
+	input_set_drvdata(apen->apen_dev, rmi4_data);
+
+	set_bit(EV_KEY, apen->apen_dev->evbit);
+	set_bit(EV_ABS, apen->apen_dev->evbit);
+	set_bit(BTN_TOUCH, apen->apen_dev->keybit);
+	set_bit(BTN_TOOL_PEN, apen->apen_dev->keybit);
+	set_bit(BTN_TOOL_RUBBER, apen->apen_dev->keybit);
+	set_bit(BTN_STYLUS, apen->apen_dev->keybit);
+#ifdef INPUT_PROP_DIRECT
+	set_bit(INPUT_PROP_DIRECT, apen->apen_dev->propbit);
+#endif
+
+	apen_set_params();
+
+	retval = input_register_device(apen->apen_dev);
+	if (retval) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to register active pen device\n",
+				__func__);
+		goto exit_free_input_device;
+	}
+
+	return 0;
+
+exit_free_input_device:
+	input_free_device(apen->apen_dev);
+
+exit_free_apen_data:
+	kfree(apen->apen_data);
+
+exit_free_apen:
+	kfree(apen);
+	apen = NULL;
+
+exit:
+	return retval;
+}
+
+static void synaptics_rmi4_apen_remove(struct synaptics_rmi4_data *rmi4_data)
+{
+	if (!apen)
+		goto exit;
+
+	input_unregister_device(apen->apen_dev);
+	kfree(apen->apen_data);
+	kfree(apen);
+	apen = NULL;
+
+exit:
+	complete(&apen_remove_complete);
+}
+
+static void synaptics_rmi4_apen_reset(struct synaptics_rmi4_data *rmi4_data)
+{
+	if (!apen) {
+		synaptics_rmi4_apen_init(rmi4_data);
+		return;
+	}
+
+	apen_lift();
+
+	apen_scan_pdt();
+}
+
+static void synaptics_rmi4_apen_reinit(struct synaptics_rmi4_data *rmi4_data)
+{
+	if (!apen)
+		return;
+
+	apen_lift();
+}
+
+static void synaptics_rmi4_apen_e_suspend(struct synaptics_rmi4_data *rmi4_data)
+{
+	if (!apen)
+		return;
+
+	apen_lift();
+}
+
+static void synaptics_rmi4_apen_suspend(struct synaptics_rmi4_data *rmi4_data)
+{
+	if (!apen)
+		return;
+
+	apen_lift();
+}
+
+static struct synaptics_rmi4_exp_fn active_pen_module = {
+	.fn_type = RMI_ACTIVE_PEN,
+	.init = synaptics_rmi4_apen_init,
+	.remove = synaptics_rmi4_apen_remove,
+	.reset = synaptics_rmi4_apen_reset,
+	.reinit = synaptics_rmi4_apen_reinit,
+	.early_suspend = synaptics_rmi4_apen_e_suspend,
+	.suspend = synaptics_rmi4_apen_suspend,
+	.resume = NULL,
+	.late_resume = NULL,
+	.attn = synaptics_rmi4_apen_attn,
+};
+
+static int __init rmi4_active_pen_module_init(void)
+{
+	synaptics_rmi4_new_function(&active_pen_module, true);
+
+	return 0;
+}
+
+static void __exit rmi4_active_pen_module_exit(void)
+{
+	synaptics_rmi4_new_function(&active_pen_module, false);
+
+	wait_for_completion(&apen_remove_complete);
+}
+
+module_init(rmi4_active_pen_module_init);
+module_exit(rmi4_active_pen_module_exit);
+
+MODULE_AUTHOR("Synaptics, Inc.");
+MODULE_DESCRIPTION("Synaptics DSX Active Pen Module");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_core.c b/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_core.c
new file mode 100644
index 0000000..9ce3026
--- /dev/null
+++ b/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_core.c
@@ -0,0 +1,4879 @@
+/*
+ * Synaptics DSX touchscreen driver
+ *
+ * Copyright (C) 2012-2016 Synaptics Incorporated. All rights reserved.
+ *
+ * Copyright (C) 2012 Alexandra Chin <alexandra.chin@tw.synaptics.com>
+ * Copyright (C) 2012 Scott Lin <scott.lin@tw.synaptics.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * INFORMATION CONTAINED IN THIS DOCUMENT IS PROVIDED "AS-IS," AND SYNAPTICS
+ * EXPRESSLY DISCLAIMS ALL EXPRESS AND IMPLIED WARRANTIES, INCLUDING ANY
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE,
+ * AND ANY WARRANTIES OF NON-INFRINGEMENT OF ANY INTELLECTUAL PROPERTY RIGHTS.
+ * IN NO EVENT SHALL SYNAPTICS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, PUNITIVE, OR CONSEQUENTIAL DAMAGES ARISING OUT OF OR IN CONNECTION
+ * WITH THE USE OF THE INFORMATION CONTAINED IN THIS DOCUMENT, HOWEVER CAUSED
+ * AND BASED ON ANY THEORY OF LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * NEGLIGENCE OR OTHER TORTIOUS ACTION, AND EVEN IF SYNAPTICS WAS ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE. IF A TRIBUNAL OF COMPETENT JURISDICTION DOES
+ * NOT PERMIT THE DISCLAIMER OF DIRECT DAMAGES OR ANY OTHER DAMAGES, SYNAPTICS'
+ * TOTAL CUMULATIVE LIABILITY TO ANY PARTY SHALL NOT EXCEED ONE HUNDRED U.S.
+ * DOLLARS.
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/input.h>
+#include <linux/gpio.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/input/synaptics_dsx.h>
+#include "synaptics_dsx_core.h"
+#ifdef KERNEL_ABOVE_2_6_38
+#include <linux/input/mt.h>
+#endif
+
+#include <linux/msm_drm_notify.h>
+
+#define INPUT_PHYS_NAME "synaptics_dsx/touch_input"
+#define STYLUS_PHYS_NAME "synaptics_dsx/stylus"
+
+#define VIRTUAL_KEY_MAP_FILE_NAME "virtualkeys." PLATFORM_DRIVER_NAME
+
+#ifdef KERNEL_ABOVE_2_6_38
+#define TYPE_B_PROTOCOL
+#endif
+
+/*
+#define USE_DATA_SERVER
+*/
+
+#define WAKEUP_GESTURE false
+
+#define NO_0D_WHILE_2D
+#define REPORT_2D_Z
+#define REPORT_2D_W
+/*
+#define REPORT_2D_PRESSURE
+*/
+
+#define F12_DATA_15_WORKAROUND
+
+#define IGNORE_FN_INIT_FAILURE
+#define FB_READY_RESET
+#define FB_READY_WAIT_MS 100
+#define FB_READY_TIMEOUT_S 30
+#ifdef SYNA_TDDI
+#define TDDI_LPWG_WAIT_US 10
+#endif
+#define RPT_TYPE (1 << 0)
+#define RPT_X_LSB (1 << 1)
+#define RPT_X_MSB (1 << 2)
+#define RPT_Y_LSB (1 << 3)
+#define RPT_Y_MSB (1 << 4)
+#define RPT_Z (1 << 5)
+#define RPT_WX (1 << 6)
+#define RPT_WY (1 << 7)
+#define RPT_DEFAULT (RPT_TYPE | RPT_X_LSB | RPT_X_MSB | RPT_Y_LSB | RPT_Y_MSB)
+
+#define REBUILD_WORK_DELAY_MS 500 /* ms */
+
+#define EXP_FN_WORK_DELAY_MS 500 /* ms */
+#define MAX_F11_TOUCH_WIDTH 15
+#define MAX_F12_TOUCH_WIDTH 255
+
+#define CHECK_STATUS_TIMEOUT_MS 100
+
+#define F01_STD_QUERY_LEN 21
+#define F01_BUID_ID_OFFSET 18
+
+#define STATUS_NO_ERROR 0x00
+#define STATUS_RESET_OCCURRED 0x01
+#define STATUS_INVALID_CONFIG 0x02
+#define STATUS_DEVICE_FAILURE 0x03
+#define STATUS_CONFIG_CRC_FAILURE 0x04
+#define STATUS_FIRMWARE_CRC_FAILURE 0x05
+#define STATUS_CRC_IN_PROGRESS 0x06
+
+#define NORMAL_OPERATION (0 << 0)
+#define SENSOR_SLEEP (1 << 0)
+#define NO_SLEEP_OFF (0 << 2)
+#define NO_SLEEP_ON (1 << 2)
+#define CONFIGURED (1 << 7)
+
+#define F11_CONTINUOUS_MODE 0x00
+#define F11_WAKEUP_GESTURE_MODE 0x04
+#define F12_CONTINUOUS_MODE 0x00
+#define F12_WAKEUP_GESTURE_MODE 0x02
+#define F12_UDG_DETECT 0x0f
+
+static int synaptics_rmi4_check_status(struct synaptics_rmi4_data *rmi4_data,
+		bool *was_in_bl_mode);
+static int synaptics_rmi4_free_fingers(struct synaptics_rmi4_data *rmi4_data);
+static int synaptics_rmi4_reinit_device(struct synaptics_rmi4_data *rmi4_data);
+static int synaptics_rmi4_reset_device(struct synaptics_rmi4_data *rmi4_data,
+		bool rebuild);
+#ifdef CONFIG_FB
+static int synaptics_rmi4_dsi_panel_notifier_cb(struct notifier_block *self,
+		unsigned long event, void *data);
+#endif
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+#ifndef CONFIG_FB
+#define USE_EARLYSUSPEND
+#endif
+#endif
+
+#ifdef USE_EARLYSUSPEND
+static int synaptics_rmi4_early_suspend(struct early_suspend *h);
+
+static int synaptics_rmi4_late_resume(struct early_suspend *h);
+#endif
+
+static int synaptics_rmi4_suspend(struct device *dev);
+
+static int synaptics_rmi4_resume(struct device *dev);
+
+static ssize_t synaptics_rmi4_f01_reset_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t synaptics_rmi4_f01_productinfo_show(struct device *dev,
+		struct device_attribute *attr, char *buf);
+
+static ssize_t synaptics_rmi4_f01_buildid_show(struct device *dev,
+		struct device_attribute *attr, char *buf);
+
+static ssize_t synaptics_rmi4_f01_flashprog_show(struct device *dev,
+		struct device_attribute *attr, char *buf);
+
+static ssize_t synaptics_rmi4_0dbutton_show(struct device *dev,
+		struct device_attribute *attr, char *buf);
+
+static ssize_t synaptics_rmi4_0dbutton_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t synaptics_rmi4_suspend_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t synaptics_rmi4_wake_gesture_show(struct device *dev,
+		struct device_attribute *attr, char *buf);
+
+static ssize_t synaptics_rmi4_wake_gesture_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count);
+
+#ifdef USE_DATA_SERVER
+static ssize_t synaptics_rmi4_synad_pid_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count);
+#endif
+
+static ssize_t synaptics_rmi4_virtual_key_map_show(struct kobject *kobj,
+		struct kobj_attribute *attr, char *buf);
+
+struct synaptics_rmi4_f01_device_status {
+	union {
+		struct {
+			unsigned char status_code:4;
+			unsigned char reserved:2;
+			unsigned char flash_prog:1;
+			unsigned char unconfigured:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct synaptics_rmi4_f11_query_0_5 {
+	union {
+		struct {
+			/* query 0 */
+			unsigned char f11_query0_b0__2:3;
+			unsigned char has_query_9:1;
+			unsigned char has_query_11:1;
+			unsigned char has_query_12:1;
+			unsigned char has_query_27:1;
+			unsigned char has_query_28:1;
+
+			/* query 1 */
+			unsigned char num_of_fingers:3;
+			unsigned char has_rel:1;
+			unsigned char has_abs:1;
+			unsigned char has_gestures:1;
+			unsigned char has_sensitibity_adjust:1;
+			unsigned char f11_query1_b7:1;
+
+			/* query 2 */
+			unsigned char num_of_x_electrodes;
+
+			/* query 3 */
+			unsigned char num_of_y_electrodes;
+
+			/* query 4 */
+			unsigned char max_electrodes:7;
+			unsigned char f11_query4_b7:1;
+
+			/* query 5 */
+			unsigned char abs_data_size:2;
+			unsigned char has_anchored_finger:1;
+			unsigned char has_adj_hyst:1;
+			unsigned char has_dribble:1;
+			unsigned char has_bending_correction:1;
+			unsigned char has_large_object_suppression:1;
+			unsigned char has_jitter_filter:1;
+		} __packed;
+		unsigned char data[6];
+	};
+};
+
+struct synaptics_rmi4_f11_query_7_8 {
+	union {
+		struct {
+			/* query 7 */
+			unsigned char has_single_tap:1;
+			unsigned char has_tap_and_hold:1;
+			unsigned char has_double_tap:1;
+			unsigned char has_early_tap:1;
+			unsigned char has_flick:1;
+			unsigned char has_press:1;
+			unsigned char has_pinch:1;
+			unsigned char has_chiral_scroll:1;
+
+			/* query 8 */
+			unsigned char has_palm_detect:1;
+			unsigned char has_rotate:1;
+			unsigned char has_touch_shapes:1;
+			unsigned char has_scroll_zones:1;
+			unsigned char individual_scroll_zones:1;
+			unsigned char has_multi_finger_scroll:1;
+			unsigned char has_multi_finger_scroll_edge_motion:1;
+			unsigned char has_multi_finger_scroll_inertia:1;
+		} __packed;
+		unsigned char data[2];
+	};
+};
+
+struct synaptics_rmi4_f11_query_9 {
+	union {
+		struct {
+			unsigned char has_pen:1;
+			unsigned char has_proximity:1;
+			unsigned char has_large_object_sensitivity:1;
+			unsigned char has_suppress_on_large_object_detect:1;
+			unsigned char has_two_pen_thresholds:1;
+			unsigned char has_contact_geometry:1;
+			unsigned char has_pen_hover_discrimination:1;
+			unsigned char has_pen_hover_and_edge_filters:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct synaptics_rmi4_f11_query_12 {
+	union {
+		struct {
+			unsigned char has_small_object_detection:1;
+			unsigned char has_small_object_detection_tuning:1;
+			unsigned char has_8bit_w:1;
+			unsigned char has_2d_adjustable_mapping:1;
+			unsigned char has_general_information_2:1;
+			unsigned char has_physical_properties:1;
+			unsigned char has_finger_limit:1;
+			unsigned char has_linear_cofficient_2:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct synaptics_rmi4_f11_query_27 {
+	union {
+		struct {
+			unsigned char f11_query27_b0:1;
+			unsigned char has_pen_position_correction:1;
+			unsigned char has_pen_jitter_filter_coefficient:1;
+			unsigned char has_group_decomposition:1;
+			unsigned char has_wakeup_gesture:1;
+			unsigned char has_small_finger_correction:1;
+			unsigned char has_data_37:1;
+			unsigned char f11_query27_b7:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct synaptics_rmi4_f11_ctrl_6_9 {
+	union {
+		struct {
+			unsigned char sensor_max_x_pos_7_0;
+			unsigned char sensor_max_x_pos_11_8:4;
+			unsigned char f11_ctrl7_b4__7:4;
+			unsigned char sensor_max_y_pos_7_0;
+			unsigned char sensor_max_y_pos_11_8:4;
+			unsigned char f11_ctrl9_b4__7:4;
+		} __packed;
+		unsigned char data[4];
+	};
+};
+
+struct synaptics_rmi4_f11_data_1_5 {
+	union {
+		struct {
+			unsigned char x_position_11_4;
+			unsigned char y_position_11_4;
+			unsigned char x_position_3_0:4;
+			unsigned char y_position_3_0:4;
+			unsigned char wx:4;
+			unsigned char wy:4;
+			unsigned char z;
+		} __packed;
+		unsigned char data[5];
+	};
+};
+
+struct synaptics_rmi4_f12_query_5 {
+	union {
+		struct {
+			unsigned char size_of_query6;
+			struct {
+				unsigned char ctrl0_is_present:1;
+				unsigned char ctrl1_is_present:1;
+				unsigned char ctrl2_is_present:1;
+				unsigned char ctrl3_is_present:1;
+				unsigned char ctrl4_is_present:1;
+				unsigned char ctrl5_is_present:1;
+				unsigned char ctrl6_is_present:1;
+				unsigned char ctrl7_is_present:1;
+			} __packed;
+			struct {
+				unsigned char ctrl8_is_present:1;
+				unsigned char ctrl9_is_present:1;
+				unsigned char ctrl10_is_present:1;
+				unsigned char ctrl11_is_present:1;
+				unsigned char ctrl12_is_present:1;
+				unsigned char ctrl13_is_present:1;
+				unsigned char ctrl14_is_present:1;
+				unsigned char ctrl15_is_present:1;
+			} __packed;
+			struct {
+				unsigned char ctrl16_is_present:1;
+				unsigned char ctrl17_is_present:1;
+				unsigned char ctrl18_is_present:1;
+				unsigned char ctrl19_is_present:1;
+				unsigned char ctrl20_is_present:1;
+				unsigned char ctrl21_is_present:1;
+				unsigned char ctrl22_is_present:1;
+				unsigned char ctrl23_is_present:1;
+			} __packed;
+			struct {
+				unsigned char ctrl24_is_present:1;
+				unsigned char ctrl25_is_present:1;
+				unsigned char ctrl26_is_present:1;
+				unsigned char ctrl27_is_present:1;
+				unsigned char ctrl28_is_present:1;
+				unsigned char ctrl29_is_present:1;
+				unsigned char ctrl30_is_present:1;
+				unsigned char ctrl31_is_present:1;
+			} __packed;
+			struct {
+				unsigned char ctrl32_is_present:1;
+				unsigned char ctrl33_is_present:1;
+				unsigned char ctrl34_is_present:1;
+				unsigned char ctrl35_is_present:1;
+				unsigned char ctrl36_is_present:1;
+				unsigned char ctrl37_is_present:1;
+				unsigned char ctrl38_is_present:1;
+				unsigned char ctrl39_is_present:1;
+			} __packed;
+			struct {
+				unsigned char ctrl40_is_present:1;
+				unsigned char ctrl41_is_present:1;
+				unsigned char ctrl42_is_present:1;
+				unsigned char ctrl43_is_present:1;
+				unsigned char ctrl44_is_present:1;
+				unsigned char ctrl45_is_present:1;
+				unsigned char ctrl46_is_present:1;
+				unsigned char ctrl47_is_present:1;
+			} __packed;
+			struct {
+				unsigned char ctrl48_is_present:1;
+				unsigned char ctrl49_is_present:1;
+				unsigned char ctrl50_is_present:1;
+				unsigned char ctrl51_is_present:1;
+				unsigned char ctrl52_is_present:1;
+				unsigned char ctrl53_is_present:1;
+				unsigned char ctrl54_is_present:1;
+				unsigned char ctrl55_is_present:1;
+			} __packed;
+			struct {
+				unsigned char ctrl56_is_present:1;
+				unsigned char ctrl57_is_present:1;
+				unsigned char ctrl58_is_present:1;
+				unsigned char ctrl59_is_present:1;
+				unsigned char ctrl60_is_present:1;
+				unsigned char ctrl61_is_present:1;
+				unsigned char ctrl62_is_present:1;
+				unsigned char ctrl63_is_present:1;
+			} __packed;
+		};
+		unsigned char data[9];
+	};
+};
+
+struct synaptics_rmi4_f12_query_8 {
+	union {
+		struct {
+			unsigned char size_of_query9;
+			struct {
+				unsigned char data0_is_present:1;
+				unsigned char data1_is_present:1;
+				unsigned char data2_is_present:1;
+				unsigned char data3_is_present:1;
+				unsigned char data4_is_present:1;
+				unsigned char data5_is_present:1;
+				unsigned char data6_is_present:1;
+				unsigned char data7_is_present:1;
+			} __packed;
+			struct {
+				unsigned char data8_is_present:1;
+				unsigned char data9_is_present:1;
+				unsigned char data10_is_present:1;
+				unsigned char data11_is_present:1;
+				unsigned char data12_is_present:1;
+				unsigned char data13_is_present:1;
+				unsigned char data14_is_present:1;
+				unsigned char data15_is_present:1;
+			} __packed;
+			struct {
+				unsigned char data16_is_present:1;
+				unsigned char data17_is_present:1;
+				unsigned char data18_is_present:1;
+				unsigned char data19_is_present:1;
+				unsigned char data20_is_present:1;
+				unsigned char data21_is_present:1;
+				unsigned char data22_is_present:1;
+				unsigned char data23_is_present:1;
+			} __packed;
+			struct {
+				unsigned char data24_is_present:1;
+				unsigned char data25_is_present:1;
+				unsigned char data26_is_present:1;
+				unsigned char data27_is_present:1;
+				unsigned char data28_is_present:1;
+				unsigned char data29_is_present:1;
+				unsigned char data30_is_present:1;
+				unsigned char data31_is_present:1;
+			} __packed;
+		};
+		unsigned char data[5];
+	};
+};
+
+struct synaptics_rmi4_f12_ctrl_8 {
+	union {
+		struct {
+			unsigned char max_x_coord_lsb;
+			unsigned char max_x_coord_msb;
+			unsigned char max_y_coord_lsb;
+			unsigned char max_y_coord_msb;
+			unsigned char rx_pitch_lsb;
+			unsigned char rx_pitch_msb;
+			unsigned char tx_pitch_lsb;
+			unsigned char tx_pitch_msb;
+			unsigned char low_rx_clip;
+			unsigned char high_rx_clip;
+			unsigned char low_tx_clip;
+			unsigned char high_tx_clip;
+			unsigned char num_of_rx;
+			unsigned char num_of_tx;
+		};
+		unsigned char data[14];
+	};
+};
+
+struct synaptics_rmi4_f12_ctrl_23 {
+	union {
+		struct {
+			unsigned char finger_enable:1;
+			unsigned char active_stylus_enable:1;
+			unsigned char palm_enable:1;
+			unsigned char unclassified_object_enable:1;
+			unsigned char hovering_finger_enable:1;
+			unsigned char gloved_finger_enable:1;
+			unsigned char f12_ctr23_00_b6__7:2;
+			unsigned char max_reported_objects;
+			unsigned char f12_ctr23_02_b0:1;
+			unsigned char report_active_stylus_as_finger:1;
+			unsigned char report_palm_as_finger:1;
+			unsigned char report_unclassified_object_as_finger:1;
+			unsigned char report_hovering_finger_as_finger:1;
+			unsigned char report_gloved_finger_as_finger:1;
+			unsigned char report_narrow_object_swipe_as_finger:1;
+			unsigned char report_handedge_as_finger:1;
+			unsigned char cover_enable:1;
+			unsigned char stylus_enable:1;
+			unsigned char eraser_enable:1;
+			unsigned char small_object_enable:1;
+			unsigned char f12_ctr23_03_b4__7:4;
+			unsigned char report_cover_as_finger:1;
+			unsigned char report_stylus_as_finger:1;
+			unsigned char report_eraser_as_finger:1;
+			unsigned char report_small_object_as_finger:1;
+			unsigned char f12_ctr23_04_b4__7:4;
+		};
+		unsigned char data[5];
+	};
+};
+
+struct synaptics_rmi4_f12_ctrl_31 {
+	union {
+		struct {
+			unsigned char max_x_coord_lsb;
+			unsigned char max_x_coord_msb;
+			unsigned char max_y_coord_lsb;
+			unsigned char max_y_coord_msb;
+			unsigned char rx_pitch_lsb;
+			unsigned char rx_pitch_msb;
+			unsigned char rx_clip_low;
+			unsigned char rx_clip_high;
+			unsigned char wedge_clip_low;
+			unsigned char wedge_clip_high;
+			unsigned char num_of_p;
+			unsigned char num_of_q;
+		};
+		unsigned char data[12];
+	};
+};
+
+struct synaptics_rmi4_f12_ctrl_58 {
+	union {
+		struct {
+			unsigned char reporting_format;
+			unsigned char f12_ctr58_00_reserved;
+			unsigned char min_force_lsb;
+			unsigned char min_force_msb;
+			unsigned char max_force_lsb;
+			unsigned char max_force_msb;
+			unsigned char light_press_threshold_lsb;
+			unsigned char light_press_threshold_msb;
+			unsigned char light_press_hysteresis_lsb;
+			unsigned char light_press_hysteresis_msb;
+			unsigned char hard_press_threshold_lsb;
+			unsigned char hard_press_threshold_msb;
+			unsigned char hard_press_hysteresis_lsb;
+			unsigned char hard_press_hysteresis_msb;
+		};
+		unsigned char data[14];
+	};
+};
+
+struct synaptics_rmi4_f12_finger_data {
+	unsigned char object_type_and_status;
+	unsigned char x_lsb;
+	unsigned char x_msb;
+	unsigned char y_lsb;
+	unsigned char y_msb;
+#ifdef REPORT_2D_Z
+	unsigned char z;
+#endif
+#ifdef REPORT_2D_W
+	unsigned char wx;
+	unsigned char wy;
+#endif
+};
+
+struct synaptics_rmi4_f1a_query {
+	union {
+		struct {
+			unsigned char max_button_count:3;
+			unsigned char f1a_query0_b3__4:2;
+			unsigned char has_query4:1;
+			unsigned char has_query3:1;
+			unsigned char has_query2:1;
+			unsigned char has_general_control:1;
+			unsigned char has_interrupt_enable:1;
+			unsigned char has_multibutton_select:1;
+			unsigned char has_tx_rx_map:1;
+			unsigned char has_perbutton_threshold:1;
+			unsigned char has_release_threshold:1;
+			unsigned char has_strongestbtn_hysteresis:1;
+			unsigned char has_filter_strength:1;
+		} __packed;
+		unsigned char data[2];
+	};
+};
+
+struct synaptics_rmi4_f1a_query_4 {
+	union {
+		struct {
+			unsigned char has_ctrl19:1;
+			unsigned char f1a_query4_b1__4:4;
+			unsigned char has_ctrl24:1;
+			unsigned char f1a_query4_b6__7:2;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct synaptics_rmi4_f1a_control_0 {
+	union {
+		struct {
+			unsigned char multibutton_report:2;
+			unsigned char filter_mode:2;
+			unsigned char reserved:4;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct synaptics_rmi4_f1a_control {
+	struct synaptics_rmi4_f1a_control_0 general_control;
+	unsigned char button_int_enable;
+	unsigned char multi_button;
+	unsigned char *txrx_map;
+	unsigned char *button_threshold;
+	unsigned char button_release_threshold;
+	unsigned char strongest_button_hysteresis;
+	unsigned char filter_strength;
+};
+
+struct synaptics_rmi4_f1a_handle {
+	int button_bitmask_size;
+	unsigned char max_count;
+	unsigned char valid_button_count;
+	unsigned char *button_data_buffer;
+	unsigned char *button_map;
+	struct synaptics_rmi4_f1a_query button_query;
+	struct synaptics_rmi4_f1a_control button_control;
+};
+
+struct synaptics_rmi4_exp_fhandler {
+	struct synaptics_rmi4_exp_fn *exp_fn;
+	bool insert;
+	bool remove;
+	struct list_head link;
+};
+
+struct synaptics_rmi4_exp_fn_data {
+	bool initialized;
+	bool queue_work;
+	struct mutex mutex;
+	struct list_head list;
+	struct delayed_work work;
+	struct workqueue_struct *workqueue;
+	struct synaptics_rmi4_data *rmi4_data;
+};
+
+static struct synaptics_rmi4_exp_fn_data exp_data;
+
+static struct synaptics_dsx_button_map *vir_button_map;
+
+#ifdef USE_DATA_SERVER
+static pid_t synad_pid;
+static struct task_struct *synad_task;
+static struct siginfo interrupt_signal;
+#endif
+
+static struct device_attribute attrs[] = {
+	__ATTR(reset, 0220,
+			synaptics_rmi4_show_error,
+			synaptics_rmi4_f01_reset_store),
+	__ATTR(productinfo, 0444,
+			synaptics_rmi4_f01_productinfo_show,
+			synaptics_rmi4_store_error),
+	__ATTR(buildid, 0444,
+			synaptics_rmi4_f01_buildid_show,
+			synaptics_rmi4_store_error),
+	__ATTR(flashprog, 0444,
+			synaptics_rmi4_f01_flashprog_show,
+			synaptics_rmi4_store_error),
+	__ATTR(0dbutton, 0664,
+			synaptics_rmi4_0dbutton_show,
+			synaptics_rmi4_0dbutton_store),
+	__ATTR(suspend, 0220,
+			synaptics_rmi4_show_error,
+			synaptics_rmi4_suspend_store),
+	__ATTR(wake_gesture, 0664,
+			synaptics_rmi4_wake_gesture_show,
+			synaptics_rmi4_wake_gesture_store),
+#ifdef USE_DATA_SERVER
+	__ATTR(synad_pid, 0220,
+			synaptics_rmi4_show_error,
+			synaptics_rmi4_synad_pid_store),
+#endif
+};
+
+static struct kobj_attribute virtual_key_map_attr = {
+	.attr = {
+		.name = VIRTUAL_KEY_MAP_FILE_NAME,
+		.mode = 0444,
+	},
+	.show = synaptics_rmi4_virtual_key_map_show,
+};
+
+static ssize_t synaptics_rmi4_f01_reset_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int retval;
+	unsigned int reset;
+	struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev);
+
+	if (kstrtouint(buf, 10, &reset) != 1)
+		return -EINVAL;
+
+	if (reset != 1)
+		return -EINVAL;
+
+	retval = synaptics_rmi4_reset_device(rmi4_data, false);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to issue reset command, error = %d\n",
+				__func__, retval);
+		return retval;
+	}
+
+	return count;
+}
+
+static ssize_t synaptics_rmi4_f01_productinfo_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev);
+
+	return snprintf(buf, PAGE_SIZE, "0x%02x 0x%02x\n",
+			(rmi4_data->rmi4_mod_info.product_info[0]),
+			(rmi4_data->rmi4_mod_info.product_info[1]));
+}
+
+static ssize_t synaptics_rmi4_f01_buildid_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev);
+
+	return snprintf(buf, PAGE_SIZE, "%u\n",
+			rmi4_data->firmware_id);
+}
+
+static ssize_t synaptics_rmi4_f01_flashprog_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	int retval;
+	struct synaptics_rmi4_f01_device_status device_status;
+	struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev);
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			rmi4_data->f01_data_base_addr,
+			device_status.data,
+			sizeof(device_status.data));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read device status, error = %d\n",
+				__func__, retval);
+		return retval;
+	}
+
+	return snprintf(buf, PAGE_SIZE, "%u\n",
+			device_status.flash_prog);
+}
+
+static ssize_t synaptics_rmi4_0dbutton_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev);
+
+	return snprintf(buf, PAGE_SIZE, "%u\n",
+			rmi4_data->button_0d_enabled);
+}
+
+static ssize_t synaptics_rmi4_0dbutton_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int retval;
+	unsigned int input;
+	unsigned char ii;
+	unsigned char intr_enable;
+	struct synaptics_rmi4_fn *fhandler;
+	struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev);
+	struct synaptics_rmi4_device_info *rmi;
+
+	rmi = &(rmi4_data->rmi4_mod_info);
+
+	if (kstrtouint(buf, 10, &input) != 1)
+		return -EINVAL;
+
+	input = input > 0 ? 1 : 0;
+
+	if (rmi4_data->button_0d_enabled == input)
+		return count;
+
+	if (list_empty(&rmi->support_fn_list))
+		return -ENODEV;
+
+	list_for_each_entry(fhandler, &rmi->support_fn_list, link) {
+		if (fhandler->fn_number == SYNAPTICS_RMI4_F1A) {
+			ii = fhandler->intr_reg_num;
+
+			retval = synaptics_rmi4_reg_read(rmi4_data,
+					rmi4_data->f01_ctrl_base_addr + 1 + ii,
+					&intr_enable,
+					sizeof(intr_enable));
+			if (retval < 0)
+				return retval;
+
+			if (input == 1)
+				intr_enable |= fhandler->intr_mask;
+			else
+				intr_enable &= ~fhandler->intr_mask;
+
+			retval = synaptics_rmi4_reg_write(rmi4_data,
+					rmi4_data->f01_ctrl_base_addr + 1 + ii,
+					&intr_enable,
+					sizeof(intr_enable));
+			if (retval < 0)
+				return retval;
+		}
+	}
+
+	rmi4_data->button_0d_enabled = input;
+
+	return count;
+}
+
+static ssize_t synaptics_rmi4_suspend_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	unsigned int input;
+
+	if (kstrtouint(buf, 10, &input) != 1)
+		return -EINVAL;
+
+	if (input == 1)
+		synaptics_rmi4_suspend(dev);
+	else if (input == 0)
+		synaptics_rmi4_resume(dev);
+	else
+		return -EINVAL;
+
+	return count;
+}
+
+static ssize_t synaptics_rmi4_wake_gesture_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev);
+
+	return snprintf(buf, PAGE_SIZE, "%u\n",
+			rmi4_data->enable_wakeup_gesture);
+}
+
+static ssize_t synaptics_rmi4_wake_gesture_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	unsigned int input;
+	struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev);
+
+	if (kstrtouint(buf, 10, &input) != 1)
+		return -EINVAL;
+
+	input = input > 0 ? 1 : 0;
+
+	if (rmi4_data->f11_wakeup_gesture || rmi4_data->f12_wakeup_gesture)
+		rmi4_data->enable_wakeup_gesture = input;
+
+	return count;
+}
+
+#ifdef USE_DATA_SERVER
+static ssize_t synaptics_rmi4_synad_pid_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	unsigned int input;
+
+	if (kstrtouint(buf, 10, &input) != 1)
+		return -EINVAL;
+
+	synad_pid = input;
+
+	if (synad_pid) {
+		synad_task = pid_task(find_vpid(synad_pid), PIDTYPE_PID);
+		if (!synad_task)
+			return -EINVAL;
+	}
+
+	return count;
+}
+#endif
+
+static ssize_t synaptics_rmi4_virtual_key_map_show(struct kobject *kobj,
+		struct kobj_attribute *attr, char *buf)
+{
+	int ii;
+	int cnt;
+	int count = 0;
+
+	for (ii = 0; ii < vir_button_map->nbuttons; ii++) {
+		cnt = snprintf(buf, PAGE_SIZE - count, "0x01:%d:%d:%d:%d:%d\n",
+				vir_button_map->map[ii * 5 + 0],
+				vir_button_map->map[ii * 5 + 1],
+				vir_button_map->map[ii * 5 + 2],
+				vir_button_map->map[ii * 5 + 3],
+				vir_button_map->map[ii * 5 + 4]);
+		buf += cnt;
+		count += cnt;
+	}
+
+	return count;
+}
+
+static int synaptics_rmi4_f11_wg(struct synaptics_rmi4_data *rmi4_data,
+		bool enable)
+{
+	int retval;
+	unsigned char reporting_control;
+	struct synaptics_rmi4_fn *fhandler;
+	struct synaptics_rmi4_device_info *rmi;
+
+	rmi = &(rmi4_data->rmi4_mod_info);
+
+	list_for_each_entry(fhandler, &rmi->support_fn_list, link) {
+		if (fhandler->fn_number == SYNAPTICS_RMI4_F11)
+			break;
+	}
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			fhandler->full_addr.ctrl_base,
+			&reporting_control,
+			sizeof(reporting_control));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to change reporting mode\n",
+				__func__);
+		return retval;
+	}
+
+	reporting_control = (reporting_control & ~MASK_3BIT);
+	if (enable)
+		reporting_control |= F11_WAKEUP_GESTURE_MODE;
+	else
+		reporting_control |= F11_CONTINUOUS_MODE;
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			fhandler->full_addr.ctrl_base,
+			&reporting_control,
+			sizeof(reporting_control));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to change reporting mode\n",
+				__func__);
+		return retval;
+	}
+
+	return retval;
+}
+
+static int synaptics_rmi4_f12_wg(struct synaptics_rmi4_data *rmi4_data,
+		bool enable)
+{
+	int retval;
+	unsigned char offset;
+	unsigned char reporting_control[3];
+	struct synaptics_rmi4_f12_extra_data *extra_data;
+	struct synaptics_rmi4_fn *fhandler;
+	struct synaptics_rmi4_device_info *rmi;
+
+	rmi = &(rmi4_data->rmi4_mod_info);
+
+	list_for_each_entry(fhandler, &rmi->support_fn_list, link) {
+		if (fhandler->fn_number == SYNAPTICS_RMI4_F12)
+			break;
+	}
+
+	extra_data = (struct synaptics_rmi4_f12_extra_data *)fhandler->extra;
+	offset = extra_data->ctrl20_offset;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			fhandler->full_addr.ctrl_base + offset,
+			reporting_control,
+			sizeof(reporting_control));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to change reporting mode\n",
+				__func__);
+		return retval;
+	}
+
+	if (enable)
+		reporting_control[rmi4_data->set_wakeup_gesture] = F12_WAKEUP_GESTURE_MODE;
+	else
+		reporting_control[rmi4_data->set_wakeup_gesture] = F12_CONTINUOUS_MODE;
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			fhandler->full_addr.ctrl_base + offset,
+			reporting_control,
+			sizeof(reporting_control));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to change reporting mode\n",
+				__func__);
+		return retval;
+	}
+
+	return retval;
+}
+
+static void synaptics_rmi4_wakeup_gesture(struct synaptics_rmi4_data *rmi4_data,
+		bool enable)
+{
+	if (rmi4_data->f11_wakeup_gesture)
+		synaptics_rmi4_f11_wg(rmi4_data, enable);
+	else if (rmi4_data->f12_wakeup_gesture)
+		synaptics_rmi4_f12_wg(rmi4_data, enable);
+}
+
+static int synaptics_rmi4_f11_abs_report(struct synaptics_rmi4_data *rmi4_data,
+		struct synaptics_rmi4_fn *fhandler)
+{
+	int retval;
+	unsigned char touch_count = 0; /* number of touch points */
+	unsigned char reg_index;
+	unsigned char finger;
+	unsigned char fingers_supported;
+	unsigned char num_of_finger_status_regs;
+	unsigned char finger_shift;
+	unsigned char finger_status;
+	unsigned char finger_status_reg[3];
+	unsigned char detected_gestures;
+	unsigned short data_addr;
+	unsigned short data_offset;
+	int x;
+	int y;
+	int wx;
+	int wy;
+	int temp;
+	struct synaptics_rmi4_f11_data_1_5 data;
+	struct synaptics_rmi4_f11_extra_data *extra_data;
+
+	/*
+	 * The number of finger status registers is determined by the
+	 * maximum number of fingers supported - 2 bits per finger. So
+	 * the number of finger status registers to read is:
+	 * register_count = ceil(max_num_of_fingers / 4)
+	 */
+	fingers_supported = fhandler->num_of_data_points;
+	num_of_finger_status_regs = (fingers_supported + 3) / 4;
+	data_addr = fhandler->full_addr.data_base;
+
+	extra_data = (struct synaptics_rmi4_f11_extra_data *)fhandler->extra;
+
+	if (rmi4_data->suspend && rmi4_data->enable_wakeup_gesture) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				data_addr + extra_data->data38_offset,
+				&detected_gestures,
+				sizeof(detected_gestures));
+		if (retval < 0)
+			return 0;
+
+		if (detected_gestures) {
+			input_report_key(rmi4_data->input_dev, KEY_WAKEUP, 1);
+			input_sync(rmi4_data->input_dev);
+			input_report_key(rmi4_data->input_dev, KEY_WAKEUP, 0);
+			input_sync(rmi4_data->input_dev);
+			rmi4_data->suspend = false;
+		}
+/*		synaptics_rmi4_wakeup_gesture(rmi4_data, false); */
+		return 0;
+	}
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			data_addr,
+			finger_status_reg,
+			num_of_finger_status_regs);
+	if (retval < 0)
+		return 0;
+
+	mutex_lock(&(rmi4_data->rmi4_report_mutex));
+
+	for (finger = 0; finger < fingers_supported; finger++) {
+		reg_index = finger / 4;
+		finger_shift = (finger % 4) * 2;
+		finger_status = (finger_status_reg[reg_index] >> finger_shift)
+				& MASK_2BIT;
+
+		/*
+		 * Each 2-bit finger status field represents the following:
+		 * 00 = finger not present
+		 * 01 = finger present and data accurate
+		 * 10 = finger present but data may be inaccurate
+		 * 11 = reserved
+		 */
+#ifdef TYPE_B_PROTOCOL
+		input_mt_slot(rmi4_data->input_dev, finger);
+		input_mt_report_slot_state(rmi4_data->input_dev,
+				MT_TOOL_FINGER, finger_status);
+#endif
+
+		if (finger_status) {
+			data_offset = data_addr +
+					num_of_finger_status_regs +
+					(finger * sizeof(data.data));
+			retval = synaptics_rmi4_reg_read(rmi4_data,
+					data_offset,
+					data.data,
+					sizeof(data.data));
+			if (retval < 0) {
+				touch_count = 0;
+				goto exit;
+			}
+
+			x = (data.x_position_11_4 << 4) | data.x_position_3_0;
+			y = (data.y_position_11_4 << 4) | data.y_position_3_0;
+			wx = data.wx;
+			wy = data.wy;
+
+			if (rmi4_data->hw_if->board_data->swap_axes) {
+				temp = x;
+				x = y;
+				y = temp;
+				temp = wx;
+				wx = wy;
+				wy = temp;
+			}
+
+			if (rmi4_data->hw_if->board_data->x_flip)
+				x = rmi4_data->sensor_max_x - x;
+			if (rmi4_data->hw_if->board_data->y_flip)
+				y = rmi4_data->sensor_max_y - y;
+
+			input_report_key(rmi4_data->input_dev,
+					BTN_TOUCH, 1);
+			input_report_key(rmi4_data->input_dev,
+					BTN_TOOL_FINGER, 1);
+			input_report_abs(rmi4_data->input_dev,
+					ABS_MT_POSITION_X, x);
+			input_report_abs(rmi4_data->input_dev,
+					ABS_MT_POSITION_Y, y);
+#ifdef REPORT_2D_W
+			input_report_abs(rmi4_data->input_dev,
+					ABS_MT_TOUCH_MAJOR, max(wx, wy));
+			input_report_abs(rmi4_data->input_dev,
+					ABS_MT_TOUCH_MINOR, min(wx, wy));
+#endif
+#ifndef TYPE_B_PROTOCOL
+			input_mt_sync(rmi4_data->input_dev);
+#endif
+
+			dev_dbg(rmi4_data->pdev->dev.parent,
+					"%s: Finger %d: status = 0x%02x, x = %d, y = %d, wx = %d, wy = %d\n",
+					__func__, finger,
+					finger_status,
+					x, y, wx, wy);
+
+			touch_count++;
+		}
+	}
+
+	if (touch_count == 0) {
+		input_report_key(rmi4_data->input_dev,
+				BTN_TOUCH, 0);
+		input_report_key(rmi4_data->input_dev,
+				BTN_TOOL_FINGER, 0);
+#ifndef TYPE_B_PROTOCOL
+		input_mt_sync(rmi4_data->input_dev);
+#endif
+	}
+
+	input_sync(rmi4_data->input_dev);
+
+exit:
+	mutex_unlock(&(rmi4_data->rmi4_report_mutex));
+
+	return touch_count;
+}
+
+static int synaptics_rmi4_f12_abs_report(struct synaptics_rmi4_data *rmi4_data,
+		struct synaptics_rmi4_fn *fhandler)
+{
+	int retval;
+	unsigned char touch_count = 0; /* number of touch points */
+	unsigned char index;
+	unsigned char finger;
+	unsigned char fingers_to_process;
+	unsigned char finger_status;
+	unsigned char size_of_2d_data;
+	unsigned char gesture_type;
+	unsigned short data_addr;
+	int x;
+	int y;
+	int wx;
+	int wy;
+	int temp;
+#if defined(REPORT_2D_PRESSURE) || defined(F51_DISCRETE_FORCE)
+	int pressure;
+#endif
+#ifdef REPORT_2D_PRESSURE
+	unsigned char f_fingers;
+	unsigned char f_lsb;
+	unsigned char f_msb;
+	unsigned char *f_data;
+#endif
+#ifdef F51_DISCRETE_FORCE
+	unsigned char force_level;
+#endif
+	struct synaptics_rmi4_f12_extra_data *extra_data;
+	struct synaptics_rmi4_f12_finger_data *data;
+	struct synaptics_rmi4_f12_finger_data *finger_data;
+	static unsigned char finger_presence;
+	static unsigned char stylus_presence;
+#ifdef F12_DATA_15_WORKAROUND
+	static unsigned char objects_already_present;
+#endif
+
+	fingers_to_process = fhandler->num_of_data_points;
+	data_addr = fhandler->full_addr.data_base;
+	extra_data = (struct synaptics_rmi4_f12_extra_data *)fhandler->extra;
+	size_of_2d_data = sizeof(struct synaptics_rmi4_f12_finger_data);
+
+	if (rmi4_data->suspend && rmi4_data->enable_wakeup_gesture) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				data_addr + extra_data->data4_offset,
+				rmi4_data->gesture_detection,
+				sizeof(rmi4_data->gesture_detection));
+		if (retval < 0)
+			return 0;
+
+		gesture_type = rmi4_data->gesture_detection[0];
+
+		if (gesture_type && gesture_type != F12_UDG_DETECT) {
+			input_report_key(rmi4_data->input_dev, KEY_WAKEUP, 1);
+			input_sync(rmi4_data->input_dev);
+			input_report_key(rmi4_data->input_dev, KEY_WAKEUP, 0);
+			input_sync(rmi4_data->input_dev);
+			/* synaptics_rmi4_wakeup_gesture(rmi4_data, false); */
+			/* rmi4_data->suspend = false; */
+		}
+
+		return 0;
+	}
+
+	/* Determine the total number of fingers to process */
+	if (extra_data->data15_size) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				data_addr + extra_data->data15_offset,
+				extra_data->data15_data,
+				extra_data->data15_size);
+		if (retval < 0)
+			return 0;
+
+		/* Start checking from the highest bit */
+		index = extra_data->data15_size - 1; /* Highest byte */
+		finger = (fingers_to_process - 1) % 8; /* Highest bit */
+		do {
+			if (extra_data->data15_data[index] & (1 << finger))
+				break;
+
+			if (finger) {
+				finger--;
+			} else if (index > 0) {
+				index--; /* Move to the next lower byte */
+				finger = 7;
+			}
+
+			fingers_to_process--;
+		} while (fingers_to_process);
+
+		dev_dbg(rmi4_data->pdev->dev.parent,
+			"%s: Number of fingers to process = %d\n",
+			__func__, fingers_to_process);
+	}
+
+#ifdef F12_DATA_15_WORKAROUND
+	fingers_to_process = max(fingers_to_process, objects_already_present);
+#endif
+
+	if (!fingers_to_process) {
+		synaptics_rmi4_free_fingers(rmi4_data);
+		finger_presence = 0;
+		stylus_presence = 0;
+		return 0;
+	}
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			data_addr + extra_data->data1_offset,
+			(unsigned char *)fhandler->data,
+			fingers_to_process * size_of_2d_data);
+	if (retval < 0)
+		return 0;
+
+	data = (struct synaptics_rmi4_f12_finger_data *)fhandler->data;
+
+#ifdef REPORT_2D_PRESSURE
+	if (rmi4_data->report_pressure) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				data_addr + extra_data->data29_offset,
+				extra_data->data29_data,
+				extra_data->data29_size);
+		if (retval < 0)
+			return 0;
+	}
+#endif
+
+	mutex_lock(&(rmi4_data->rmi4_report_mutex));
+
+	for (finger = 0; finger < fingers_to_process; finger++) {
+		finger_data = data + finger;
+		finger_status = finger_data->object_type_and_status;
+
+#ifdef F12_DATA_15_WORKAROUND
+		objects_already_present = finger + 1;
+#endif
+
+		x = (finger_data->x_msb << 8) | (finger_data->x_lsb);
+		y = (finger_data->y_msb << 8) | (finger_data->y_lsb);
+#ifdef REPORT_2D_W
+		wx = finger_data->wx;
+		wy = finger_data->wy;
+#endif
+
+		if (rmi4_data->hw_if->board_data->swap_axes) {
+			temp = x;
+			x = y;
+			y = temp;
+			temp = wx;
+			wx = wy;
+			wy = temp;
+		}
+
+		if (rmi4_data->hw_if->board_data->x_flip)
+			x = rmi4_data->sensor_max_x - x;
+		if (rmi4_data->hw_if->board_data->y_flip)
+			y = rmi4_data->sensor_max_y - y;
+
+		switch (finger_status) {
+		case F12_FINGER_STATUS:
+		case F12_GLOVED_FINGER_STATUS:
+			/* Stylus has priority over fingers */
+			if (stylus_presence)
+				break;
+#ifdef TYPE_B_PROTOCOL
+			input_mt_slot(rmi4_data->input_dev, finger);
+			input_mt_report_slot_state(rmi4_data->input_dev,
+					MT_TOOL_FINGER, 1);
+#endif
+
+			input_report_key(rmi4_data->input_dev,
+					BTN_TOUCH, 1);
+			input_report_key(rmi4_data->input_dev,
+					BTN_TOOL_FINGER, 1);
+			input_report_abs(rmi4_data->input_dev,
+					ABS_MT_POSITION_X, x);
+			input_report_abs(rmi4_data->input_dev,
+					ABS_MT_POSITION_Y, y);
+#ifdef REPORT_2D_W
+			if (rmi4_data->wedge_sensor) {
+				input_report_abs(rmi4_data->input_dev,
+						ABS_MT_TOUCH_MAJOR, wx);
+				input_report_abs(rmi4_data->input_dev,
+						ABS_MT_TOUCH_MINOR, wx);
+			} else {
+				input_report_abs(rmi4_data->input_dev,
+						ABS_MT_TOUCH_MAJOR,
+						max(wx, wy));
+				input_report_abs(rmi4_data->input_dev,
+						ABS_MT_TOUCH_MINOR,
+						min(wx, wy));
+			}
+#endif
+#ifdef REPORT_2D_PRESSURE
+			if (rmi4_data->report_pressure) {
+				f_fingers = extra_data->data29_size / 2;
+				f_data = extra_data->data29_data;
+				if (finger + 1 > f_fingers) {
+					pressure = 1;
+				} else {
+					f_lsb = finger * 2;
+					f_msb = finger * 2 + 1;
+					pressure = (int)f_data[f_lsb] << 0 |
+							(int)f_data[f_msb] << 8;
+				}
+				pressure = pressure > 0 ? pressure : 1;
+				if (pressure > rmi4_data->force_max)
+					pressure = rmi4_data->force_max;
+				input_report_abs(rmi4_data->input_dev,
+						ABS_MT_PRESSURE, pressure);
+			}
+#elif defined(F51_DISCRETE_FORCE)
+			if (finger == 0) {
+				retval = synaptics_rmi4_reg_read(rmi4_data,
+						FORCE_LEVEL_ADDR,
+						&force_level,
+						sizeof(force_level));
+				if (retval < 0)
+					return 0;
+				pressure = force_level > 0 ? force_level : 1;
+			} else {
+				pressure = 1;
+			}
+			input_report_abs(rmi4_data->input_dev,
+					ABS_MT_PRESSURE, pressure);
+#endif
+#ifndef TYPE_B_PROTOCOL
+			input_mt_sync(rmi4_data->input_dev);
+#endif
+
+			dev_dbg(rmi4_data->pdev->dev.parent,
+					"%s: Finger %d: status = 0x%02x, x = %d, y = %d, wx = %d, wy = %d\n",
+					__func__, finger,
+					finger_status,
+					x, y, wx, wy);
+
+			finger_presence = 1;
+			touch_count++;
+			break;
+		case F12_PALM_STATUS:
+			dev_dbg(rmi4_data->pdev->dev.parent,
+					"%s: Finger %d: x = %d, y = %d, wx = %d, wy = %d\n",
+					__func__, finger,
+					x, y, wx, wy);
+			break;
+		case F12_STYLUS_STATUS:
+		case F12_ERASER_STATUS:
+			if (!rmi4_data->stylus_enable)
+				break;
+			/* Stylus has priority over fingers */
+			if (finger_presence) {
+				mutex_unlock(&(rmi4_data->rmi4_report_mutex));
+				synaptics_rmi4_free_fingers(rmi4_data);
+				mutex_lock(&(rmi4_data->rmi4_report_mutex));
+				finger_presence = 0;
+			}
+			if (stylus_presence) {/* Allow one stylus at a timee */
+				if (finger + 1 != stylus_presence)
+					break;
+			}
+			input_report_key(rmi4_data->stylus_dev,
+					BTN_TOUCH, 1);
+			if (finger_status == F12_STYLUS_STATUS) {
+				input_report_key(rmi4_data->stylus_dev,
+						BTN_TOOL_PEN, 1);
+			} else {
+				input_report_key(rmi4_data->stylus_dev,
+						BTN_TOOL_RUBBER, 1);
+			}
+			input_report_abs(rmi4_data->stylus_dev,
+					ABS_X, x);
+			input_report_abs(rmi4_data->stylus_dev,
+					ABS_Y, y);
+			input_sync(rmi4_data->stylus_dev);
+
+			stylus_presence = finger + 1;
+			touch_count++;
+			break;
+		default:
+#ifdef TYPE_B_PROTOCOL
+			input_mt_slot(rmi4_data->input_dev, finger);
+			input_mt_report_slot_state(rmi4_data->input_dev,
+					MT_TOOL_FINGER, 0);
+#endif
+			break;
+		}
+	}
+
+	if (touch_count == 0) {
+		finger_presence = 0;
+#ifdef F12_DATA_15_WORKAROUND
+		objects_already_present = 0;
+#endif
+		input_report_key(rmi4_data->input_dev,
+				BTN_TOUCH, 0);
+		input_report_key(rmi4_data->input_dev,
+				BTN_TOOL_FINGER, 0);
+#ifndef TYPE_B_PROTOCOL
+		input_mt_sync(rmi4_data->input_dev);
+#endif
+
+		if (rmi4_data->stylus_enable) {
+			stylus_presence = 0;
+			input_report_key(rmi4_data->stylus_dev,
+					BTN_TOUCH, 0);
+			input_report_key(rmi4_data->stylus_dev,
+					BTN_TOOL_PEN, 0);
+			if (rmi4_data->eraser_enable) {
+				input_report_key(rmi4_data->stylus_dev,
+						BTN_TOOL_RUBBER, 0);
+			}
+			input_sync(rmi4_data->stylus_dev);
+		}
+	}
+
+	input_sync(rmi4_data->input_dev);
+
+	mutex_unlock(&(rmi4_data->rmi4_report_mutex));
+
+	return touch_count;
+}
+
+static int synaptics_rmi4_f1a_report(struct synaptics_rmi4_data *rmi4_data,
+		struct synaptics_rmi4_fn *fhandler)
+{
+	int retval;
+	unsigned char touch_count = 0;
+	unsigned char button;
+	unsigned char index;
+	unsigned char shift;
+	unsigned char status;
+	unsigned char *data;
+	unsigned short data_addr = fhandler->full_addr.data_base;
+	struct synaptics_rmi4_f1a_handle *f1a = fhandler->data;
+	static unsigned char do_once = 1;
+	static bool current_status[MAX_NUMBER_OF_BUTTONS];
+#ifdef NO_0D_WHILE_2D
+	static bool before_2d_status[MAX_NUMBER_OF_BUTTONS];
+	static bool while_2d_status[MAX_NUMBER_OF_BUTTONS];
+#endif
+
+	if (do_once) {
+		memset(current_status, 0, sizeof(current_status));
+#ifdef NO_0D_WHILE_2D
+		memset(before_2d_status, 0, sizeof(before_2d_status));
+		memset(while_2d_status, 0, sizeof(while_2d_status));
+#endif
+		do_once = 0;
+	}
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			data_addr,
+			f1a->button_data_buffer,
+			f1a->button_bitmask_size);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read button data registers\n",
+				__func__);
+		return retval;
+	}
+
+	data = f1a->button_data_buffer;
+
+	mutex_lock(&(rmi4_data->rmi4_report_mutex));
+
+	for (button = 0; button < f1a->valid_button_count; button++) {
+		index = button / 8;
+		shift = button % 8;
+		status = ((data[index] >> shift) & MASK_1BIT);
+
+		if (current_status[button] == status)
+			continue;
+		else
+			current_status[button] = status;
+
+		dev_dbg(rmi4_data->pdev->dev.parent,
+				"%s: Button %d (code %d) ->%d\n",
+				__func__, button,
+				f1a->button_map[button],
+				status);
+#ifdef NO_0D_WHILE_2D
+		if (rmi4_data->fingers_on_2d == false) {
+			if (status == 1) {
+				before_2d_status[button] = 1;
+			} else {
+				if (while_2d_status[button] == 1) {
+					while_2d_status[button] = 0;
+					continue;
+				} else {
+					before_2d_status[button] = 0;
+				}
+			}
+			touch_count++;
+			input_report_key(rmi4_data->input_dev,
+					f1a->button_map[button],
+					status);
+		} else {
+			if (before_2d_status[button] == 1) {
+				before_2d_status[button] = 0;
+				touch_count++;
+				input_report_key(rmi4_data->input_dev,
+						f1a->button_map[button],
+						status);
+			} else {
+				if (status == 1)
+					while_2d_status[button] = 1;
+				else
+					while_2d_status[button] = 0;
+			}
+		}
+#else
+		touch_count++;
+		input_report_key(rmi4_data->input_dev,
+				f1a->button_map[button],
+				status);
+#endif
+	}
+
+	if (touch_count)
+		input_sync(rmi4_data->input_dev);
+
+	mutex_unlock(&(rmi4_data->rmi4_report_mutex));
+
+	return retval;
+}
+
+static void synaptics_rmi4_report_touch(struct synaptics_rmi4_data *rmi4_data,
+		struct synaptics_rmi4_fn *fhandler)
+{
+	unsigned char touch_count_2d;
+
+	dev_dbg(rmi4_data->pdev->dev.parent,
+			"%s: Function %02x reporting\n",
+			__func__, fhandler->fn_number);
+
+	switch (fhandler->fn_number) {
+	case SYNAPTICS_RMI4_F11:
+		touch_count_2d = synaptics_rmi4_f11_abs_report(rmi4_data,
+				fhandler);
+
+		if (touch_count_2d)
+			rmi4_data->fingers_on_2d = true;
+		else
+			rmi4_data->fingers_on_2d = false;
+		break;
+	case SYNAPTICS_RMI4_F12:
+		touch_count_2d = synaptics_rmi4_f12_abs_report(rmi4_data,
+				fhandler);
+
+		if (touch_count_2d)
+			rmi4_data->fingers_on_2d = true;
+		else
+			rmi4_data->fingers_on_2d = false;
+		break;
+	case SYNAPTICS_RMI4_F1A:
+		synaptics_rmi4_f1a_report(rmi4_data, fhandler);
+		break;
+#ifdef USE_DATA_SERVER
+	case SYNAPTICS_RMI4_F21:
+		if (synad_pid)
+			send_sig_info(SIGIO, &interrupt_signal, synad_task);
+		break;
+#endif
+	default:
+		break;
+	}
+}
+
+static int synaptics_rmi4_sensor_report(struct synaptics_rmi4_data *rmi4_data,
+		bool report)
+{
+	int retval;
+	unsigned char data[MAX_INTR_REGISTERS + 1];
+	unsigned char *intr = &data[1];
+	bool was_in_bl_mode;
+	struct synaptics_rmi4_f01_device_status status;
+	struct synaptics_rmi4_fn *fhandler;
+	struct synaptics_rmi4_exp_fhandler *exp_fhandler;
+	struct synaptics_rmi4_device_info *rmi;
+
+	rmi = &(rmi4_data->rmi4_mod_info);
+
+	/*
+	 * Get interrupt status information from F01 Data1 register to
+	 * determine the source(s) that are flagging the interrupt.
+	 */
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			rmi4_data->f01_data_base_addr,
+			data,
+			rmi4_data->num_of_intr_regs + 1);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read interrupt status\n",
+				__func__);
+		return retval;
+	}
+
+	status.data[0] = data[0];
+	if (status.status_code == STATUS_CRC_IN_PROGRESS) {
+		retval = synaptics_rmi4_check_status(rmi4_data,
+				&was_in_bl_mode);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to check status\n",
+					__func__);
+			return retval;
+		}
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				rmi4_data->f01_data_base_addr,
+				status.data,
+				sizeof(status.data));
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to read device status\n",
+					__func__);
+			return retval;
+		}
+	}
+	if (status.unconfigured && !status.flash_prog) {
+		pr_notice("%s: spontaneous reset detected\n", __func__);
+		retval = synaptics_rmi4_reinit_device(rmi4_data);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to reinit device\n",
+					__func__);
+		}
+	}
+
+	if (!report)
+		return retval;
+
+	/*
+	 * Traverse the function handler list and service the source(s)
+	 * of the interrupt accordingly.
+	 */
+	if (!list_empty(&rmi->support_fn_list)) {
+		list_for_each_entry(fhandler, &rmi->support_fn_list, link) {
+			if (fhandler->num_of_data_sources) {
+				if (fhandler->intr_mask &
+						intr[fhandler->intr_reg_num]) {
+					synaptics_rmi4_report_touch(rmi4_data,
+							fhandler);
+				}
+			}
+		}
+	}
+
+	mutex_lock(&exp_data.mutex);
+	if (!list_empty(&exp_data.list)) {
+		list_for_each_entry(exp_fhandler, &exp_data.list, link) {
+			if (!exp_fhandler->insert &&
+					!exp_fhandler->remove &&
+					(exp_fhandler->exp_fn->attn != NULL))
+				exp_fhandler->exp_fn->attn(rmi4_data, intr[0]);
+		}
+	}
+	mutex_unlock(&exp_data.mutex);
+
+	return retval;
+}
+
+static irqreturn_t synaptics_rmi4_irq(int irq, void *data)
+{
+	struct synaptics_rmi4_data *rmi4_data = data;
+	const struct synaptics_dsx_board_data *bdata =
+			rmi4_data->hw_if->board_data;
+
+	if (gpio_get_value(bdata->irq_gpio) != bdata->irq_on_state)
+		goto exit;
+
+	synaptics_rmi4_sensor_report(rmi4_data, true);
+
+exit:
+	return IRQ_HANDLED;
+}
+
+static int synaptics_rmi4_int_enable(struct synaptics_rmi4_data *rmi4_data,
+		bool enable)
+{
+	int retval = 0;
+	unsigned char ii;
+	unsigned char zero = 0x00;
+	unsigned char *intr_mask;
+	unsigned short intr_addr;
+
+	intr_mask = rmi4_data->intr_mask;
+
+	for (ii = 0; ii < rmi4_data->num_of_intr_regs; ii++) {
+		if (intr_mask[ii] != 0x00) {
+			intr_addr = rmi4_data->f01_ctrl_base_addr + 1 + ii;
+			if (enable) {
+				retval = synaptics_rmi4_reg_write(rmi4_data,
+						intr_addr,
+						&(intr_mask[ii]),
+						sizeof(intr_mask[ii]));
+				if (retval < 0)
+					return retval;
+			} else {
+				retval = synaptics_rmi4_reg_write(rmi4_data,
+						intr_addr,
+						&zero,
+						sizeof(zero));
+				if (retval < 0)
+					return retval;
+			}
+		}
+	}
+
+	return retval;
+}
+
+static int synaptics_rmi4_irq_enable(struct synaptics_rmi4_data *rmi4_data,
+		bool enable, bool attn_only)
+{
+	int retval = 0;
+	unsigned char data[MAX_INTR_REGISTERS];
+	const struct synaptics_dsx_board_data *bdata =
+			rmi4_data->hw_if->board_data;
+
+	mutex_lock(&(rmi4_data->rmi4_irq_enable_mutex));
+
+	if (attn_only) {
+		retval = synaptics_rmi4_int_enable(rmi4_data, enable);
+		goto exit;
+	}
+
+	if (enable) {
+		if (rmi4_data->irq_enabled) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Interrupt already enabled\n",
+					__func__);
+			goto exit;
+		}
+
+		retval = synaptics_rmi4_int_enable(rmi4_data, false);
+		if (retval < 0)
+			goto exit;
+
+		/* Clear interrupts */
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				rmi4_data->f01_data_base_addr + 1,
+				data,
+				rmi4_data->num_of_intr_regs);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to read interrupt status\n",
+					__func__);
+			goto exit;
+		}
+
+		retval = request_threaded_irq(rmi4_data->irq, NULL,
+				synaptics_rmi4_irq, bdata->irq_flags,
+				PLATFORM_DRIVER_NAME, rmi4_data);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to create irq thread\n",
+					__func__);
+			goto exit;
+		}
+
+		retval = synaptics_rmi4_int_enable(rmi4_data, true);
+		if (retval < 0)
+			goto exit;
+
+		rmi4_data->irq_enabled = true;
+	} else {
+		if (rmi4_data->irq_enabled) {
+			disable_irq(rmi4_data->irq);
+			free_irq(rmi4_data->irq, rmi4_data);
+			rmi4_data->irq_enabled = false;
+		}
+	}
+
+exit:
+	mutex_unlock(&(rmi4_data->rmi4_irq_enable_mutex));
+
+	return retval;
+}
+
+static void synaptics_rmi4_set_intr_mask(struct synaptics_rmi4_fn *fhandler,
+		struct synaptics_rmi4_fn_desc *fd,
+		unsigned int intr_count)
+{
+	unsigned char ii;
+	unsigned char intr_offset;
+
+	fhandler->intr_reg_num = (intr_count + 7) / 8;
+	if (fhandler->intr_reg_num != 0)
+		fhandler->intr_reg_num -= 1;
+
+	/* Set an enable bit for each data source */
+	intr_offset = intr_count % 8;
+	fhandler->intr_mask = 0;
+	for (ii = intr_offset;
+			ii < (fd->intr_src_count + intr_offset);
+			ii++)
+		fhandler->intr_mask |= 1 << ii;
+}
+
+static int synaptics_rmi4_f01_init(struct synaptics_rmi4_data *rmi4_data,
+		struct synaptics_rmi4_fn *fhandler,
+		struct synaptics_rmi4_fn_desc *fd,
+		unsigned int intr_count)
+{
+	fhandler->fn_number = fd->fn_number;
+	fhandler->num_of_data_sources = fd->intr_src_count;
+	fhandler->data = NULL;
+	fhandler->extra = NULL;
+
+	synaptics_rmi4_set_intr_mask(fhandler, fd, intr_count);
+
+	rmi4_data->f01_query_base_addr = fd->query_base_addr;
+	rmi4_data->f01_ctrl_base_addr = fd->ctrl_base_addr;
+	rmi4_data->f01_data_base_addr = fd->data_base_addr;
+	rmi4_data->f01_cmd_base_addr = fd->cmd_base_addr;
+
+	return 0;
+}
+
+static int synaptics_rmi4_f11_init(struct synaptics_rmi4_data *rmi4_data,
+		struct synaptics_rmi4_fn *fhandler,
+		struct synaptics_rmi4_fn_desc *fd,
+		unsigned int intr_count)
+{
+	int retval;
+	int temp;
+	unsigned char offset;
+	unsigned char fingers_supported;
+	struct synaptics_rmi4_f11_extra_data *extra_data;
+	struct synaptics_rmi4_f11_query_0_5 query_0_5;
+	struct synaptics_rmi4_f11_query_7_8 query_7_8;
+	struct synaptics_rmi4_f11_query_9 query_9;
+	struct synaptics_rmi4_f11_query_12 query_12;
+	struct synaptics_rmi4_f11_query_27 query_27;
+	struct synaptics_rmi4_f11_ctrl_6_9 control_6_9;
+	const struct synaptics_dsx_board_data *bdata =
+				rmi4_data->hw_if->board_data;
+
+	fhandler->fn_number = fd->fn_number;
+	fhandler->num_of_data_sources = fd->intr_src_count;
+	fhandler->extra = kmalloc(sizeof(*extra_data), GFP_KERNEL);
+	if (!fhandler->extra) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to alloc mem for fhandler->extra\n",
+				__func__);
+		return -ENOMEM;
+	}
+	extra_data = (struct synaptics_rmi4_f11_extra_data *)fhandler->extra;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			fhandler->full_addr.query_base,
+			query_0_5.data,
+			sizeof(query_0_5.data));
+	if (retval < 0)
+		return retval;
+
+	/* Maximum number of fingers supported */
+	if (query_0_5.num_of_fingers <= 4)
+		fhandler->num_of_data_points = query_0_5.num_of_fingers + 1;
+	else if (query_0_5.num_of_fingers == 5)
+		fhandler->num_of_data_points = 10;
+
+	rmi4_data->num_of_fingers = fhandler->num_of_data_points;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			fhandler->full_addr.ctrl_base + 6,
+			control_6_9.data,
+			sizeof(control_6_9.data));
+	if (retval < 0)
+		return retval;
+
+	/* Maximum x and y */
+	rmi4_data->sensor_max_x = control_6_9.sensor_max_x_pos_7_0 |
+			(control_6_9.sensor_max_x_pos_11_8 << 8);
+	rmi4_data->sensor_max_y = control_6_9.sensor_max_y_pos_7_0 |
+			(control_6_9.sensor_max_y_pos_11_8 << 8);
+	dev_dbg(rmi4_data->pdev->dev.parent,
+			"%s: Function %02x max x = %d max y = %d\n",
+			__func__, fhandler->fn_number,
+			rmi4_data->sensor_max_x,
+			rmi4_data->sensor_max_y);
+
+	rmi4_data->max_touch_width = MAX_F11_TOUCH_WIDTH;
+
+	if (bdata->swap_axes) {
+		temp = rmi4_data->sensor_max_x;
+		rmi4_data->sensor_max_x = rmi4_data->sensor_max_y;
+		rmi4_data->sensor_max_y = temp;
+	}
+
+	synaptics_rmi4_set_intr_mask(fhandler, fd, intr_count);
+
+	fhandler->data = NULL;
+
+	offset = sizeof(query_0_5.data);
+
+	/* query 6 */
+	if (query_0_5.has_rel)
+		offset += 1;
+
+	/* queries 7 8 */
+	if (query_0_5.has_gestures) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				fhandler->full_addr.query_base + offset,
+				query_7_8.data,
+				sizeof(query_7_8.data));
+		if (retval < 0)
+			return retval;
+
+		offset += sizeof(query_7_8.data);
+	}
+
+	/* query 9 */
+	if (query_0_5.has_query_9) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				fhandler->full_addr.query_base + offset,
+				query_9.data,
+				sizeof(query_9.data));
+		if (retval < 0)
+			return retval;
+
+		offset += sizeof(query_9.data);
+	}
+
+	/* query 10 */
+	if (query_0_5.has_gestures && query_7_8.has_touch_shapes)
+		offset += 1;
+
+	/* query 11 */
+	if (query_0_5.has_query_11)
+		offset += 1;
+
+	/* query 12 */
+	if (query_0_5.has_query_12) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				fhandler->full_addr.query_base + offset,
+				query_12.data,
+				sizeof(query_12.data));
+		if (retval < 0)
+			return retval;
+
+		offset += sizeof(query_12.data);
+	}
+
+	/* query 13 */
+	if (query_0_5.has_jitter_filter)
+		offset += 1;
+
+	/* query 14 */
+	if (query_0_5.has_query_12 && query_12.has_general_information_2)
+		offset += 1;
+
+	/* queries 15 16 17 18 19 20 21 22 23 24 25 26*/
+	if (query_0_5.has_query_12 && query_12.has_physical_properties)
+		offset += 12;
+
+	/* query 27 */
+	if (query_0_5.has_query_27) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				fhandler->full_addr.query_base + offset,
+				query_27.data,
+				sizeof(query_27.data));
+		if (retval < 0)
+			return retval;
+
+		rmi4_data->f11_wakeup_gesture = query_27.has_wakeup_gesture;
+	}
+
+	if (!rmi4_data->f11_wakeup_gesture)
+		return retval;
+
+	/* data 0 */
+	fingers_supported = fhandler->num_of_data_points;
+	offset = (fingers_supported + 3) / 4;
+
+	/* data 1 2 3 4 5 */
+	offset += 5 * fingers_supported;
+
+	/* data 6 7 */
+	if (query_0_5.has_rel)
+		offset += 2 * fingers_supported;
+
+	/* data 8 */
+	if (query_0_5.has_gestures && query_7_8.data[0])
+		offset += 1;
+
+	/* data 9 */
+	if (query_0_5.has_gestures && (query_7_8.data[0] || query_7_8.data[1]))
+		offset += 1;
+
+	/* data 10 */
+	if (query_0_5.has_gestures &&
+			(query_7_8.has_pinch || query_7_8.has_flick))
+		offset += 1;
+
+	/* data 11 12 */
+	if (query_0_5.has_gestures &&
+			(query_7_8.has_flick || query_7_8.has_rotate))
+		offset += 2;
+
+	/* data 13 */
+	if (query_0_5.has_gestures && query_7_8.has_touch_shapes)
+		offset += (fingers_supported + 3) / 4;
+
+	/* data 14 15 */
+	if (query_0_5.has_gestures &&
+			(query_7_8.has_scroll_zones ||
+			query_7_8.has_multi_finger_scroll ||
+			query_7_8.has_chiral_scroll))
+		offset += 2;
+
+	/* data 16 17 */
+	if (query_0_5.has_gestures &&
+			(query_7_8.has_scroll_zones &&
+			query_7_8.individual_scroll_zones))
+		offset += 2;
+
+	/* data 18 19 20 21 22 23 24 25 26 27 */
+	if (query_0_5.has_query_9 && query_9.has_contact_geometry)
+		offset += 10 * fingers_supported;
+
+	/* data 28 */
+	if (query_0_5.has_bending_correction ||
+			query_0_5.has_large_object_suppression)
+		offset += 1;
+
+	/* data 29 30 31 */
+	if (query_0_5.has_query_9 && query_9.has_pen_hover_discrimination)
+		offset += 3;
+
+	/* data 32 */
+	if (query_0_5.has_query_12 &&
+			query_12.has_small_object_detection_tuning)
+		offset += 1;
+
+	/* data 33 34 */
+	if (query_0_5.has_query_27 && query_27.f11_query27_b0)
+		offset += 2;
+
+	/* data 35 */
+	if (query_0_5.has_query_12 && query_12.has_8bit_w)
+		offset += fingers_supported;
+
+	/* data 36 */
+	if (query_0_5.has_bending_correction)
+		offset += 1;
+
+	/* data 37 */
+	if (query_0_5.has_query_27 && query_27.has_data_37)
+		offset += 1;
+
+	/* data 38 */
+	if (query_0_5.has_query_27 && query_27.has_wakeup_gesture)
+		extra_data->data38_offset = offset;
+
+	return retval;
+}
+
+static int synaptics_rmi4_f12_set_enables(struct synaptics_rmi4_data *rmi4_data,
+		unsigned short ctrl28)
+{
+	int retval;
+	static unsigned short ctrl_28_address;
+
+	if (ctrl28)
+		ctrl_28_address = ctrl28;
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			ctrl_28_address,
+			&rmi4_data->report_enable,
+			sizeof(rmi4_data->report_enable));
+	if (retval < 0)
+		return retval;
+
+	return retval;
+}
+
+static int synaptics_rmi4_f12_find_sub(struct synaptics_rmi4_data *rmi4_data,
+		struct synaptics_rmi4_fn *fhandler,
+		unsigned char *presence, unsigned char presence_size,
+		unsigned char structure_offset, unsigned char reg,
+		unsigned char sub)
+{
+	int retval;
+	unsigned char cnt;
+	unsigned char regnum;
+	unsigned char bitnum;
+	unsigned char p_index;
+	unsigned char s_index;
+	unsigned char offset;
+	unsigned char max_reg;
+	unsigned char *structure;
+
+	max_reg = (presence_size - 1) * 8 - 1;
+
+	if (reg > max_reg) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Register number (%d) over limit\n",
+				__func__, reg);
+		return -EINVAL;
+	}
+
+	p_index = reg / 8 + 1;
+	bitnum = reg % 8;
+	if ((presence[p_index] & (1 << bitnum)) == 0x00) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Register %d is not present\n",
+				__func__, reg);
+		return -EINVAL;
+	}
+
+	structure = kmalloc(presence[0], GFP_KERNEL);
+	if (!structure) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to alloc mem for structure register\n",
+				__func__);
+		return -ENOMEM;
+	}
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			fhandler->full_addr.query_base + structure_offset,
+			structure,
+			presence[0]);
+	if (retval < 0)
+		goto exit;
+
+	s_index = 0;
+
+	for (regnum = 0; regnum < reg; regnum++) {
+		p_index = regnum / 8 + 1;
+		bitnum = regnum % 8;
+		if ((presence[p_index] & (1 << bitnum)) == 0x00)
+			continue;
+
+		if (structure[s_index] == 0x00)
+			s_index += 3;
+		else
+			s_index++;
+
+		while (structure[s_index] & ~MASK_7BIT)
+			s_index++;
+
+		s_index++;
+	}
+
+	cnt = 0;
+	s_index++;
+	offset = sub / 7;
+	bitnum = sub % 7;
+
+	do {
+		if (cnt == offset) {
+			if (structure[s_index + cnt] & (1 << bitnum))
+				retval = 1;
+			else
+				retval = 0;
+			goto exit;
+		}
+		cnt++;
+	} while (structure[s_index + cnt - 1] & ~MASK_7BIT);
+
+	retval = 0;
+
+exit:
+	kfree(structure);
+
+	return retval;
+}
+
+static int synaptics_rmi4_f12_init(struct synaptics_rmi4_data *rmi4_data,
+		struct synaptics_rmi4_fn *fhandler,
+		struct synaptics_rmi4_fn_desc *fd,
+		unsigned int intr_count)
+{
+	int retval = 0;
+	int temp;
+	unsigned char subpacket;
+	unsigned char ctrl_23_size;
+	unsigned char size_of_2d_data;
+	unsigned char size_of_query5;
+	unsigned char size_of_query8;
+	unsigned char ctrl_8_offset;
+	unsigned char ctrl_20_offset;
+	unsigned char ctrl_23_offset;
+	unsigned char ctrl_28_offset;
+	unsigned char ctrl_31_offset;
+	unsigned char ctrl_58_offset;
+	unsigned char num_of_fingers;
+	struct synaptics_rmi4_f12_extra_data *extra_data;
+	struct synaptics_rmi4_f12_query_5 *query_5 = NULL;
+	struct synaptics_rmi4_f12_query_8 *query_8 = NULL;
+	struct synaptics_rmi4_f12_ctrl_8 *ctrl_8 = NULL;
+	struct synaptics_rmi4_f12_ctrl_23 *ctrl_23 = NULL;
+	struct synaptics_rmi4_f12_ctrl_31 *ctrl_31 = NULL;
+	struct synaptics_rmi4_f12_ctrl_58 *ctrl_58 = NULL;
+	const struct synaptics_dsx_board_data *bdata =
+				rmi4_data->hw_if->board_data;
+
+	fhandler->fn_number = fd->fn_number;
+	fhandler->num_of_data_sources = fd->intr_src_count;
+	fhandler->extra = kmalloc(sizeof(*extra_data), GFP_KERNEL);
+	if (!fhandler->extra) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to alloc mem for fhandler->extra\n",
+				__func__);
+		return -ENOMEM;
+	}
+	extra_data = (struct synaptics_rmi4_f12_extra_data *)fhandler->extra;
+	size_of_2d_data = sizeof(struct synaptics_rmi4_f12_finger_data);
+
+	query_5 = kzalloc(sizeof(*query_5), GFP_KERNEL);
+	if (!query_5) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to alloc mem for query_5\n",
+				__func__);
+		retval = -ENOMEM;
+		goto exit;
+	}
+
+	query_8 = kzalloc(sizeof(*query_8), GFP_KERNEL);
+	if (!query_8) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to alloc mem for query_8\n",
+				__func__);
+		retval = -ENOMEM;
+		goto exit;
+	}
+
+	ctrl_8 = kzalloc(sizeof(*ctrl_8), GFP_KERNEL);
+	if (!ctrl_8) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to alloc mem for ctrl_8\n",
+				__func__);
+		retval = -ENOMEM;
+		goto exit;
+	}
+
+	ctrl_23 = kzalloc(sizeof(*ctrl_23), GFP_KERNEL);
+	if (!ctrl_23) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to alloc mem for ctrl_23\n",
+				__func__);
+		retval = -ENOMEM;
+		goto exit;
+	}
+
+	ctrl_31 = kzalloc(sizeof(*ctrl_31), GFP_KERNEL);
+	if (!ctrl_31) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to alloc mem for ctrl_31\n",
+				__func__);
+		retval = -ENOMEM;
+		goto exit;
+	}
+
+	ctrl_58 = kzalloc(sizeof(*ctrl_58), GFP_KERNEL);
+	if (!ctrl_58) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to alloc mem for ctrl_58\n",
+				__func__);
+		retval = -ENOMEM;
+		goto exit;
+	}
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			fhandler->full_addr.query_base + 4,
+			&size_of_query5,
+			sizeof(size_of_query5));
+	if (retval < 0)
+		goto exit;
+
+	if (size_of_query5 > sizeof(query_5->data))
+		size_of_query5 = sizeof(query_5->data);
+	memset(query_5->data, 0x00, sizeof(query_5->data));
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			fhandler->full_addr.query_base + 5,
+			query_5->data,
+			size_of_query5);
+	if (retval < 0)
+		goto exit;
+
+	ctrl_8_offset = query_5->ctrl0_is_present +
+			query_5->ctrl1_is_present +
+			query_5->ctrl2_is_present +
+			query_5->ctrl3_is_present +
+			query_5->ctrl4_is_present +
+			query_5->ctrl5_is_present +
+			query_5->ctrl6_is_present +
+			query_5->ctrl7_is_present;
+
+	ctrl_20_offset = ctrl_8_offset +
+			query_5->ctrl8_is_present +
+			query_5->ctrl9_is_present +
+			query_5->ctrl10_is_present +
+			query_5->ctrl11_is_present +
+			query_5->ctrl12_is_present +
+			query_5->ctrl13_is_present +
+			query_5->ctrl14_is_present +
+			query_5->ctrl15_is_present +
+			query_5->ctrl16_is_present +
+			query_5->ctrl17_is_present +
+			query_5->ctrl18_is_present +
+			query_5->ctrl19_is_present;
+
+	ctrl_23_offset = ctrl_20_offset +
+			query_5->ctrl20_is_present +
+			query_5->ctrl21_is_present +
+			query_5->ctrl22_is_present;
+
+	ctrl_28_offset = ctrl_23_offset +
+			query_5->ctrl23_is_present +
+			query_5->ctrl24_is_present +
+			query_5->ctrl25_is_present +
+			query_5->ctrl26_is_present +
+			query_5->ctrl27_is_present;
+
+	ctrl_31_offset = ctrl_28_offset +
+			query_5->ctrl28_is_present +
+			query_5->ctrl29_is_present +
+			query_5->ctrl30_is_present;
+
+	ctrl_58_offset = ctrl_31_offset +
+			query_5->ctrl31_is_present +
+			query_5->ctrl32_is_present +
+			query_5->ctrl33_is_present +
+			query_5->ctrl34_is_present +
+			query_5->ctrl35_is_present +
+			query_5->ctrl36_is_present +
+			query_5->ctrl37_is_present +
+			query_5->ctrl38_is_present +
+			query_5->ctrl39_is_present +
+			query_5->ctrl40_is_present +
+			query_5->ctrl41_is_present +
+			query_5->ctrl42_is_present +
+			query_5->ctrl43_is_present +
+			query_5->ctrl44_is_present +
+			query_5->ctrl45_is_present +
+			query_5->ctrl46_is_present +
+			query_5->ctrl47_is_present +
+			query_5->ctrl48_is_present +
+			query_5->ctrl49_is_present +
+			query_5->ctrl50_is_present +
+			query_5->ctrl51_is_present +
+			query_5->ctrl52_is_present +
+			query_5->ctrl53_is_present +
+			query_5->ctrl54_is_present +
+			query_5->ctrl55_is_present +
+			query_5->ctrl56_is_present +
+			query_5->ctrl57_is_present;
+
+	ctrl_23_size = 2;
+	for (subpacket = 2; subpacket <= 4; subpacket++) {
+		retval = synaptics_rmi4_f12_find_sub(rmi4_data,
+				fhandler, query_5->data, sizeof(query_5->data),
+				6, 23, subpacket);
+		if (retval == 1)
+			ctrl_23_size++;
+		else if (retval < 0)
+			goto exit;
+
+	}
+
+	retval = synaptics_rmi4_f12_find_sub(rmi4_data,
+			fhandler, query_5->data, sizeof(query_5->data),
+			6, 20, 0);
+	if (retval == 1)
+		rmi4_data->set_wakeup_gesture = 2;
+	else if (retval == 0)
+		rmi4_data->set_wakeup_gesture = 0;
+	else if (retval < 0)
+		goto exit;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			fhandler->full_addr.ctrl_base + ctrl_23_offset,
+			ctrl_23->data,
+			ctrl_23_size);
+	if (retval < 0)
+		goto exit;
+
+	/* Maximum number of fingers supported */
+	fhandler->num_of_data_points = min_t(unsigned char,
+			ctrl_23->max_reported_objects,
+			(unsigned char)F12_FINGERS_TO_SUPPORT);
+
+	num_of_fingers = fhandler->num_of_data_points;
+	rmi4_data->num_of_fingers = num_of_fingers;
+
+	rmi4_data->stylus_enable = ctrl_23->stylus_enable;
+	rmi4_data->eraser_enable = ctrl_23->eraser_enable;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			fhandler->full_addr.query_base + 7,
+			&size_of_query8,
+			sizeof(size_of_query8));
+	if (retval < 0)
+		goto exit;
+
+	if (size_of_query8 > sizeof(query_8->data))
+		size_of_query8 = sizeof(query_8->data);
+	memset(query_8->data, 0x00, sizeof(query_8->data));
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			fhandler->full_addr.query_base + 8,
+			query_8->data,
+			size_of_query8);
+	if (retval < 0)
+		goto exit;
+
+	/* Determine the presence of the Data0 register */
+	extra_data->data1_offset = query_8->data0_is_present;
+
+	if ((size_of_query8 >= 3) && (query_8->data15_is_present)) {
+		extra_data->data15_offset = query_8->data0_is_present +
+				query_8->data1_is_present +
+				query_8->data2_is_present +
+				query_8->data3_is_present +
+				query_8->data4_is_present +
+				query_8->data5_is_present +
+				query_8->data6_is_present +
+				query_8->data7_is_present +
+				query_8->data8_is_present +
+				query_8->data9_is_present +
+				query_8->data10_is_present +
+				query_8->data11_is_present +
+				query_8->data12_is_present +
+				query_8->data13_is_present +
+				query_8->data14_is_present;
+		extra_data->data15_size = (num_of_fingers + 7) / 8;
+	} else {
+		extra_data->data15_size = 0;
+	}
+
+#ifdef REPORT_2D_PRESSURE
+	if ((size_of_query8 >= 5) && (query_8->data29_is_present)) {
+		extra_data->data29_offset = query_8->data0_is_present +
+				query_8->data1_is_present +
+				query_8->data2_is_present +
+				query_8->data3_is_present +
+				query_8->data4_is_present +
+				query_8->data5_is_present +
+				query_8->data6_is_present +
+				query_8->data7_is_present +
+				query_8->data8_is_present +
+				query_8->data9_is_present +
+				query_8->data10_is_present +
+				query_8->data11_is_present +
+				query_8->data12_is_present +
+				query_8->data13_is_present +
+				query_8->data14_is_present +
+				query_8->data15_is_present +
+				query_8->data16_is_present +
+				query_8->data17_is_present +
+				query_8->data18_is_present +
+				query_8->data19_is_present +
+				query_8->data20_is_present +
+				query_8->data21_is_present +
+				query_8->data22_is_present +
+				query_8->data23_is_present +
+				query_8->data24_is_present +
+				query_8->data25_is_present +
+				query_8->data26_is_present +
+				query_8->data27_is_present +
+				query_8->data28_is_present;
+		extra_data->data29_size = 0;
+		for (subpacket = 0; subpacket <= num_of_fingers; subpacket++) {
+			retval = synaptics_rmi4_f12_find_sub(rmi4_data,
+					fhandler, query_8->data,
+					sizeof(query_8->data),
+					9, 29, subpacket);
+			if (retval == 1)
+				extra_data->data29_size += 2;
+			else if (retval < 0)
+				goto exit;
+		}
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				fhandler->full_addr.ctrl_base + ctrl_58_offset,
+				ctrl_58->data,
+				sizeof(ctrl_58->data));
+		if (retval < 0)
+			goto exit;
+		rmi4_data->force_min =
+				(int)(ctrl_58->min_force_lsb << 0) |
+				(int)(ctrl_58->min_force_msb << 8);
+		rmi4_data->force_max =
+				(int)(ctrl_58->max_force_lsb << 0) |
+				(int)(ctrl_58->max_force_msb << 8);
+		rmi4_data->report_pressure = true;
+	} else {
+		extra_data->data29_size = 0;
+		rmi4_data->report_pressure = false;
+	}
+#endif
+
+	rmi4_data->report_enable = RPT_DEFAULT;
+#ifdef REPORT_2D_Z
+	rmi4_data->report_enable |= RPT_Z;
+#endif
+#ifdef REPORT_2D_W
+	rmi4_data->report_enable |= (RPT_WX | RPT_WY);
+#endif
+
+	retval = synaptics_rmi4_f12_set_enables(rmi4_data,
+			fhandler->full_addr.ctrl_base + ctrl_28_offset);
+	if (retval < 0)
+		goto exit;
+
+	if (query_5->ctrl8_is_present) {
+		rmi4_data->wedge_sensor = false;
+
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				fhandler->full_addr.ctrl_base + ctrl_8_offset,
+				ctrl_8->data,
+				sizeof(ctrl_8->data));
+		if (retval < 0)
+			goto exit;
+
+		/* Maximum x and y */
+		rmi4_data->sensor_max_x =
+				((unsigned int)ctrl_8->max_x_coord_lsb << 0) |
+				((unsigned int)ctrl_8->max_x_coord_msb << 8);
+		rmi4_data->sensor_max_y =
+				((unsigned int)ctrl_8->max_y_coord_lsb << 0) |
+				((unsigned int)ctrl_8->max_y_coord_msb << 8);
+
+		rmi4_data->max_touch_width = MAX_F12_TOUCH_WIDTH;
+	} else {
+		rmi4_data->wedge_sensor = true;
+
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				fhandler->full_addr.ctrl_base + ctrl_31_offset,
+				ctrl_31->data,
+				sizeof(ctrl_31->data));
+		if (retval < 0)
+			goto exit;
+
+		/* Maximum x and y */
+		rmi4_data->sensor_max_x =
+				((unsigned int)ctrl_31->max_x_coord_lsb << 0) |
+				((unsigned int)ctrl_31->max_x_coord_msb << 8);
+		rmi4_data->sensor_max_y =
+				((unsigned int)ctrl_31->max_y_coord_lsb << 0) |
+				((unsigned int)ctrl_31->max_y_coord_msb << 8);
+
+		rmi4_data->max_touch_width = MAX_F12_TOUCH_WIDTH;
+	}
+
+	dev_dbg(rmi4_data->pdev->dev.parent,
+			"%s: Function %02x max x = %d max y = %d\n",
+			__func__, fhandler->fn_number,
+			rmi4_data->sensor_max_x,
+			rmi4_data->sensor_max_y);
+
+	if (bdata->swap_axes) {
+		temp = rmi4_data->sensor_max_x;
+		rmi4_data->sensor_max_x = rmi4_data->sensor_max_y;
+		rmi4_data->sensor_max_y = temp;
+	}
+
+	rmi4_data->f12_wakeup_gesture = query_5->ctrl27_is_present;
+	if (rmi4_data->f12_wakeup_gesture) {
+		extra_data->ctrl20_offset = ctrl_20_offset;
+		extra_data->data4_offset = query_8->data0_is_present +
+				query_8->data1_is_present +
+				query_8->data2_is_present +
+				query_8->data3_is_present;
+	}
+
+	synaptics_rmi4_set_intr_mask(fhandler, fd, intr_count);
+
+	/* Allocate memory for finger data storage space */
+	fhandler->data_size = num_of_fingers * size_of_2d_data;
+	fhandler->data = kmalloc(fhandler->data_size, GFP_KERNEL);
+	if (!fhandler->data) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to alloc mem for fhandler->data\n",
+				__func__);
+		retval = -ENOMEM;
+		goto exit;
+	}
+
+exit:
+	kfree(query_5);
+	kfree(query_8);
+	kfree(ctrl_8);
+	kfree(ctrl_23);
+	kfree(ctrl_31);
+	kfree(ctrl_58);
+
+	return retval;
+}
+
+static int synaptics_rmi4_f1a_alloc_mem(struct synaptics_rmi4_data *rmi4_data,
+		struct synaptics_rmi4_fn *fhandler)
+{
+	int retval;
+	struct synaptics_rmi4_f1a_handle *f1a;
+
+	f1a = kzalloc(sizeof(*f1a), GFP_KERNEL);
+	if (!f1a) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to alloc mem for function handle\n",
+				__func__);
+		return -ENOMEM;
+	}
+
+	fhandler->data = (void *)f1a;
+	fhandler->extra = NULL;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			fhandler->full_addr.query_base,
+			f1a->button_query.data,
+			sizeof(f1a->button_query.data));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read query registers\n",
+				__func__);
+		return retval;
+	}
+
+	f1a->max_count = f1a->button_query.max_button_count + 1;
+
+	f1a->button_control.txrx_map = kzalloc(f1a->max_count * 2, GFP_KERNEL);
+	if (!f1a->button_control.txrx_map) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to alloc mem for tx rx mapping\n",
+				__func__);
+		return -ENOMEM;
+	}
+
+	f1a->button_bitmask_size = (f1a->max_count + 7) / 8;
+
+	f1a->button_data_buffer = kcalloc(f1a->button_bitmask_size,
+			sizeof(*(f1a->button_data_buffer)), GFP_KERNEL);
+	if (!f1a->button_data_buffer) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to alloc mem for data buffer\n",
+				__func__);
+		return -ENOMEM;
+	}
+
+	f1a->button_map = kcalloc(f1a->max_count,
+			sizeof(*(f1a->button_map)), GFP_KERNEL);
+	if (!f1a->button_map) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to alloc mem for button map\n",
+				__func__);
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+static int synaptics_rmi4_f1a_button_map(struct synaptics_rmi4_data *rmi4_data,
+		struct synaptics_rmi4_fn *fhandler)
+{
+	int retval;
+	unsigned char ii;
+	unsigned char offset = 0;
+	struct synaptics_rmi4_f1a_query_4 query_4;
+	struct synaptics_rmi4_f1a_handle *f1a = fhandler->data;
+	const struct synaptics_dsx_board_data *bdata =
+			rmi4_data->hw_if->board_data;
+
+	rmi4_data->valid_button_count = f1a->valid_button_count;
+
+	offset = f1a->button_query.has_general_control +
+			f1a->button_query.has_interrupt_enable +
+			f1a->button_query.has_multibutton_select;
+
+	if (f1a->button_query.has_tx_rx_map) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				fhandler->full_addr.ctrl_base + offset,
+				f1a->button_control.txrx_map,
+				f1a->max_count * 2);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to read tx rx mapping\n",
+					__func__);
+			return retval;
+		}
+
+		rmi4_data->button_txrx_mapping = f1a->button_control.txrx_map;
+	}
+
+	if (f1a->button_query.has_query4) {
+		offset = 2 + f1a->button_query.has_query2 +
+				f1a->button_query.has_query3;
+
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				fhandler->full_addr.query_base + offset,
+				query_4.data,
+				sizeof(query_4.data));
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to read button features 4\n",
+					__func__);
+			return retval;
+		}
+
+		if (query_4.has_ctrl24)
+			rmi4_data->external_afe_buttons = true;
+		else
+			rmi4_data->external_afe_buttons = false;
+	}
+
+	if (!bdata->cap_button_map) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: cap_button_map is NULL in board file\n",
+				__func__);
+		return -ENODEV;
+	} else if (!bdata->cap_button_map->map) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Button map is missing in board file\n",
+				__func__);
+		return -ENODEV;
+	} else {
+		if (bdata->cap_button_map->nbuttons != f1a->max_count) {
+			f1a->valid_button_count = min(f1a->max_count,
+					bdata->cap_button_map->nbuttons);
+		} else {
+			f1a->valid_button_count = f1a->max_count;
+		}
+
+		for (ii = 0; ii < f1a->valid_button_count; ii++)
+			f1a->button_map[ii] = bdata->cap_button_map->map[ii];
+
+		rmi4_data->valid_button_count = f1a->valid_button_count;
+	}
+
+	return 0;
+}
+
+static void synaptics_rmi4_f1a_kfree(struct synaptics_rmi4_fn *fhandler)
+{
+	struct synaptics_rmi4_f1a_handle *f1a = fhandler->data;
+
+	if (f1a) {
+		kfree(f1a->button_control.txrx_map);
+		kfree(f1a->button_data_buffer);
+		kfree(f1a->button_map);
+		kfree(f1a);
+		fhandler->data = NULL;
+	}
+}
+
+static int synaptics_rmi4_f1a_init(struct synaptics_rmi4_data *rmi4_data,
+		struct synaptics_rmi4_fn *fhandler,
+		struct synaptics_rmi4_fn_desc *fd,
+		unsigned int intr_count)
+{
+	int retval;
+
+	fhandler->fn_number = fd->fn_number;
+	fhandler->num_of_data_sources = fd->intr_src_count;
+
+	synaptics_rmi4_set_intr_mask(fhandler, fd, intr_count);
+
+	retval = synaptics_rmi4_f1a_alloc_mem(rmi4_data, fhandler);
+	if (retval < 0)
+		goto error_exit;
+
+	retval = synaptics_rmi4_f1a_button_map(rmi4_data, fhandler);
+	if (retval < 0)
+		goto error_exit;
+
+	rmi4_data->button_0d_enabled = 1;
+
+	return 0;
+
+error_exit:
+	synaptics_rmi4_f1a_kfree(fhandler);
+
+	return retval;
+}
+
+static void synaptics_rmi4_empty_fn_list(struct synaptics_rmi4_data *rmi4_data)
+{
+	struct synaptics_rmi4_fn *fhandler;
+	struct synaptics_rmi4_fn *fhandler_temp;
+	struct synaptics_rmi4_device_info *rmi;
+
+	rmi = &(rmi4_data->rmi4_mod_info);
+
+	if (!list_empty(&rmi->support_fn_list)) {
+		list_for_each_entry_safe(fhandler,
+				fhandler_temp,
+				&rmi->support_fn_list,
+				link) {
+			if (fhandler->fn_number == SYNAPTICS_RMI4_F1A) {
+				synaptics_rmi4_f1a_kfree(fhandler);
+			} else {
+				kfree(fhandler->extra);
+				kfree(fhandler->data);
+			}
+			list_del(&fhandler->link);
+			kfree(fhandler);
+		}
+	}
+	INIT_LIST_HEAD(&rmi->support_fn_list);
+}
+
+static int synaptics_rmi4_check_status(struct synaptics_rmi4_data *rmi4_data,
+		bool *was_in_bl_mode)
+{
+	int retval;
+	int timeout = CHECK_STATUS_TIMEOUT_MS;
+	struct synaptics_rmi4_f01_device_status status;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			rmi4_data->f01_data_base_addr,
+			status.data,
+			sizeof(status.data));
+	if (retval < 0)
+		return retval;
+
+	while (status.status_code == STATUS_CRC_IN_PROGRESS) {
+		if (timeout > 0)
+			msleep(20);
+		else
+			return -EINVAL;
+
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				rmi4_data->f01_data_base_addr,
+				status.data,
+				sizeof(status.data));
+		if (retval < 0)
+			return retval;
+
+		timeout -= 20;
+	}
+
+	if (timeout != CHECK_STATUS_TIMEOUT_MS)
+		*was_in_bl_mode = true;
+
+	if (status.flash_prog == 1) {
+		rmi4_data->flash_prog_mode = true;
+		pr_notice("%s: In flash prog mode, status = 0x%02x\n",
+				__func__,
+				status.status_code);
+	} else {
+		rmi4_data->flash_prog_mode = false;
+	}
+
+	return 0;
+}
+
+static int synaptics_rmi4_set_configured(struct synaptics_rmi4_data *rmi4_data)
+{
+	int retval;
+	unsigned char device_ctrl;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			rmi4_data->f01_ctrl_base_addr,
+			&device_ctrl,
+			sizeof(device_ctrl));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to set configured\n",
+				__func__);
+		return retval;
+	}
+
+	rmi4_data->no_sleep_setting = device_ctrl & NO_SLEEP_ON;
+	device_ctrl |= CONFIGURED;
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			rmi4_data->f01_ctrl_base_addr,
+			&device_ctrl,
+			sizeof(device_ctrl));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to set configured\n",
+				__func__);
+	}
+
+	return retval;
+}
+
+static int synaptics_rmi4_alloc_fh(struct synaptics_rmi4_fn **fhandler,
+		struct synaptics_rmi4_fn_desc *rmi_fd, int page_number)
+{
+	*fhandler = kzalloc(sizeof(**fhandler), GFP_KERNEL);
+	if (!(*fhandler))
+		return -ENOMEM;
+
+	(*fhandler)->full_addr.data_base =
+			(rmi_fd->data_base_addr |
+			(page_number << 8));
+	(*fhandler)->full_addr.ctrl_base =
+			(rmi_fd->ctrl_base_addr |
+			(page_number << 8));
+	(*fhandler)->full_addr.cmd_base =
+			(rmi_fd->cmd_base_addr |
+			(page_number << 8));
+	(*fhandler)->full_addr.query_base =
+			(rmi_fd->query_base_addr |
+			(page_number << 8));
+
+	return 0;
+}
+
+static int synaptics_rmi4_query_device(struct synaptics_rmi4_data *rmi4_data)
+{
+	int retval;
+	unsigned char page_number;
+	unsigned char intr_count;
+	unsigned char *f01_query;
+	unsigned short pdt_entry_addr;
+	bool f01found;
+	bool f35found;
+	bool was_in_bl_mode;
+	struct synaptics_rmi4_fn_desc rmi_fd;
+	struct synaptics_rmi4_fn *fhandler;
+	struct synaptics_rmi4_device_info *rmi;
+
+	rmi = &(rmi4_data->rmi4_mod_info);
+
+rescan_pdt:
+	f01found = false;
+	f35found = false;
+	was_in_bl_mode = false;
+	intr_count = 0;
+	INIT_LIST_HEAD(&rmi->support_fn_list);
+
+	/* Scan the page description tables of the pages to service */
+	for (page_number = 0; page_number < PAGES_TO_SERVICE; page_number++) {
+		for (pdt_entry_addr = PDT_START; pdt_entry_addr > PDT_END;
+				pdt_entry_addr -= PDT_ENTRY_SIZE) {
+			pdt_entry_addr |= (page_number << 8);
+
+			retval = synaptics_rmi4_reg_read(rmi4_data,
+					pdt_entry_addr,
+					(unsigned char *)&rmi_fd,
+					sizeof(rmi_fd));
+			if (retval < 0)
+				return retval;
+
+			pdt_entry_addr &= ~(MASK_8BIT << 8);
+
+			fhandler = NULL;
+
+			if (rmi_fd.fn_number == 0) {
+				dev_dbg(rmi4_data->pdev->dev.parent,
+						"%s: Reached end of PDT\n",
+						__func__);
+				break;
+			}
+
+			dev_dbg(rmi4_data->pdev->dev.parent,
+					"%s: F%02x found (page %d)\n",
+					__func__, rmi_fd.fn_number,
+					page_number);
+
+			switch (rmi_fd.fn_number) {
+			case SYNAPTICS_RMI4_F01:
+				if (rmi_fd.intr_src_count == 0)
+					break;
+
+				f01found = true;
+
+				retval = synaptics_rmi4_alloc_fh(&fhandler,
+						&rmi_fd, page_number);
+				if (retval < 0) {
+					dev_err(rmi4_data->pdev->dev.parent,
+							"%s: Failed to alloc for F%d\n",
+							__func__,
+							rmi_fd.fn_number);
+					return retval;
+				}
+
+				retval = synaptics_rmi4_f01_init(rmi4_data,
+						fhandler, &rmi_fd, intr_count);
+				if (retval < 0)
+					return retval;
+
+				retval = synaptics_rmi4_check_status(rmi4_data,
+						&was_in_bl_mode);
+				if (retval < 0) {
+					dev_err(rmi4_data->pdev->dev.parent,
+							"%s: Failed to check status\n",
+							__func__);
+					return retval;
+				}
+
+				if (was_in_bl_mode) {
+					kfree(fhandler);
+					fhandler = NULL;
+					goto rescan_pdt;
+				}
+
+				if (rmi4_data->flash_prog_mode)
+					goto flash_prog_mode;
+
+				break;
+			case SYNAPTICS_RMI4_F11:
+				if (rmi_fd.intr_src_count == 0)
+					break;
+
+				retval = synaptics_rmi4_alloc_fh(&fhandler,
+						&rmi_fd, page_number);
+				if (retval < 0) {
+					dev_err(rmi4_data->pdev->dev.parent,
+							"%s: Failed to alloc for F%d\n",
+							__func__,
+							rmi_fd.fn_number);
+					return retval;
+				}
+
+				retval = synaptics_rmi4_f11_init(rmi4_data,
+						fhandler, &rmi_fd, intr_count);
+				if (retval < 0)
+					return retval;
+				break;
+			case SYNAPTICS_RMI4_F12:
+				if (rmi_fd.intr_src_count == 0)
+					break;
+
+				retval = synaptics_rmi4_alloc_fh(&fhandler,
+						&rmi_fd, page_number);
+				if (retval < 0) {
+					dev_err(rmi4_data->pdev->dev.parent,
+							"%s: Failed to alloc for F%d\n",
+							__func__,
+							rmi_fd.fn_number);
+					return retval;
+				}
+
+				retval = synaptics_rmi4_f12_init(rmi4_data,
+						fhandler, &rmi_fd, intr_count);
+				if (retval < 0)
+					return retval;
+				break;
+			case SYNAPTICS_RMI4_F1A:
+				if (rmi_fd.intr_src_count == 0)
+					break;
+
+				retval = synaptics_rmi4_alloc_fh(&fhandler,
+						&rmi_fd, page_number);
+				if (retval < 0) {
+					dev_err(rmi4_data->pdev->dev.parent,
+							"%s: Failed to alloc for F%d\n",
+							__func__,
+							rmi_fd.fn_number);
+					return retval;
+				}
+
+				retval = synaptics_rmi4_f1a_init(rmi4_data,
+						fhandler, &rmi_fd, intr_count);
+				if (retval < 0) {
+#ifdef IGNORE_FN_INIT_FAILURE
+					kfree(fhandler);
+					fhandler = NULL;
+#else
+					return retval;
+#endif
+				}
+				break;
+#ifdef USE_DATA_SERVER
+			case SYNAPTICS_RMI4_F21:
+				if (rmi_fd.intr_src_count == 0)
+					break;
+
+				retval = synaptics_rmi4_alloc_fh(&fhandler,
+						&rmi_fd, page_number);
+				if (retval < 0) {
+					dev_err(rmi4_data->pdev->dev.parent,
+							"%s: Failed to alloc for F%d\n",
+							__func__,
+							rmi_fd.fn_number);
+					return retval;
+				}
+
+				fhandler->fn_number = rmi_fd.fn_number;
+				fhandler->num_of_data_sources =
+						rmi_fd.intr_src_count;
+
+				synaptics_rmi4_set_intr_mask(fhandler, &rmi_fd,
+						intr_count);
+				break;
+#endif
+			case SYNAPTICS_RMI4_F35:
+				f35found = true;
+				break;
+#ifdef F51_DISCRETE_FORCE
+			case SYNAPTICS_RMI4_F51:
+				rmi4_data->f51_query_base_addr =
+						rmi_fd.query_base_addr |
+						(page_number << 8);
+				break;
+#endif
+			}
+
+			/* Accumulate the interrupt count */
+			intr_count += rmi_fd.intr_src_count;
+
+			if (fhandler && rmi_fd.intr_src_count) {
+				list_add_tail(&fhandler->link,
+						&rmi->support_fn_list);
+			}
+		}
+	}
+
+	if (!f01found) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to find F01\n",
+				__func__);
+		if (!f35found) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to find F35\n",
+					__func__);
+			return -EINVAL;
+		} else {
+			pr_notice("%s: In microbootloader mode\n",
+					__func__);
+			return 0;
+		}
+	}
+
+flash_prog_mode:
+	rmi4_data->num_of_intr_regs = (intr_count + 7) / 8;
+	dev_dbg(rmi4_data->pdev->dev.parent,
+			"%s: Number of interrupt registers = %d\n",
+			__func__, rmi4_data->num_of_intr_regs);
+
+	f01_query = kmalloc(F01_STD_QUERY_LEN, GFP_KERNEL);
+	if (!f01_query) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to alloc mem for f01_query\n",
+				__func__);
+		return -ENOMEM;
+	}
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			rmi4_data->f01_query_base_addr,
+			f01_query,
+			F01_STD_QUERY_LEN);
+	if (retval < 0) {
+		kfree(f01_query);
+		return retval;
+	}
+
+	/* RMI Version 4.0 currently supported */
+	rmi->version_major = 4;
+	rmi->version_minor = 0;
+
+	rmi->manufacturer_id = f01_query[0];
+	rmi->product_props = f01_query[1];
+	rmi->product_info[0] = f01_query[2];
+	rmi->product_info[1] = f01_query[3];
+	retval = secure_memcpy(rmi->product_id_string,
+			sizeof(rmi->product_id_string),
+			&f01_query[11],
+			F01_STD_QUERY_LEN - 11,
+			PRODUCT_ID_SIZE);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to copy product ID string\n",
+				__func__);
+	}
+
+	kfree(f01_query);
+
+	if (rmi->manufacturer_id != 1) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Non-Synaptics device found, manufacturer ID = %d\n",
+				__func__, rmi->manufacturer_id);
+	}
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			rmi4_data->f01_query_base_addr + F01_BUID_ID_OFFSET,
+			rmi->build_id,
+			sizeof(rmi->build_id));
+	if (retval < 0)
+		return retval;
+
+	rmi4_data->firmware_id = (unsigned int)rmi->build_id[0] +
+			(unsigned int)rmi->build_id[1] * 0x100 +
+			(unsigned int)rmi->build_id[2] * 0x10000;
+
+	memset(rmi4_data->intr_mask, 0x00, sizeof(rmi4_data->intr_mask));
+
+	/*
+	 * Map out the interrupt bit masks for the interrupt sources
+	 * from the registered function handlers.
+	 */
+	if (!list_empty(&rmi->support_fn_list)) {
+		list_for_each_entry(fhandler, &rmi->support_fn_list, link) {
+			if (fhandler->num_of_data_sources) {
+				rmi4_data->intr_mask[fhandler->intr_reg_num] |=
+						fhandler->intr_mask;
+			}
+		}
+	}
+
+	if (rmi4_data->f11_wakeup_gesture || rmi4_data->f12_wakeup_gesture)
+		rmi4_data->enable_wakeup_gesture = WAKEUP_GESTURE;
+	else
+		rmi4_data->enable_wakeup_gesture = false;
+
+	synaptics_rmi4_set_configured(rmi4_data);
+
+	return 0;
+}
+
+static int synaptics_rmi4_gpio_setup(int gpio, bool config, int dir, int state)
+{
+	int retval = 0;
+	unsigned char buf[16];
+
+	if (config) {
+		snprintf(buf, PAGE_SIZE, "dsx_gpio_%u\n", gpio);
+
+		retval = gpio_request(gpio, buf);
+		if (retval) {
+			pr_err("%s: Failed to get gpio %d (code: %d)",
+					__func__, gpio, retval);
+			return retval;
+		}
+
+		if (dir == 0)
+			retval = gpio_direction_input(gpio);
+		else
+			retval = gpio_direction_output(gpio, state);
+		if (retval) {
+			pr_err("%s: Failed to set gpio %d direction",
+					__func__, gpio);
+			return retval;
+		}
+	} else {
+		gpio_free(gpio);
+	}
+
+	return retval;
+}
+
+static void synaptics_rmi4_set_params(struct synaptics_rmi4_data *rmi4_data)
+{
+	unsigned char ii;
+	struct synaptics_rmi4_f1a_handle *f1a;
+	struct synaptics_rmi4_fn *fhandler;
+	struct synaptics_rmi4_device_info *rmi;
+
+	rmi = &(rmi4_data->rmi4_mod_info);
+
+	input_set_abs_params(rmi4_data->input_dev,
+			ABS_MT_POSITION_X, 0,
+			rmi4_data->sensor_max_x, 0, 0);
+	input_set_abs_params(rmi4_data->input_dev,
+			ABS_MT_POSITION_Y, 0,
+			rmi4_data->sensor_max_y, 0, 0);
+#ifdef REPORT_2D_W
+	input_set_abs_params(rmi4_data->input_dev,
+			ABS_MT_TOUCH_MAJOR, 0,
+			rmi4_data->max_touch_width, 0, 0);
+	input_set_abs_params(rmi4_data->input_dev,
+			ABS_MT_TOUCH_MINOR, 0,
+			rmi4_data->max_touch_width, 0, 0);
+#endif
+
+	rmi4_data->input_settings.sensor_max_x = rmi4_data->sensor_max_x;
+	rmi4_data->input_settings.sensor_max_y = rmi4_data->sensor_max_y;
+	rmi4_data->input_settings.max_touch_width = rmi4_data->max_touch_width;
+
+#ifdef REPORT_2D_PRESSURE
+	if (rmi4_data->report_pressure) {
+		input_set_abs_params(rmi4_data->input_dev,
+				ABS_MT_PRESSURE, rmi4_data->force_min,
+				rmi4_data->force_max, 0, 0);
+
+		rmi4_data->input_settings.force_min = rmi4_data->force_min;
+		rmi4_data->input_settings.force_max = rmi4_data->force_max;
+	}
+#elif defined(F51_DISCRETE_FORCE)
+	input_set_abs_params(rmi4_data->input_dev,
+			ABS_MT_PRESSURE, 0,
+			FORCE_LEVEL_MAX, 0, 0);
+#endif
+
+#ifdef TYPE_B_PROTOCOL
+#ifdef KERNEL_ABOVE_3_6
+	input_mt_init_slots(rmi4_data->input_dev,
+			rmi4_data->num_of_fingers, INPUT_MT_DIRECT);
+#else
+	input_mt_init_slots(rmi4_data->input_dev,
+			rmi4_data->num_of_fingers);
+#endif
+#endif
+
+	rmi4_data->input_settings.num_of_fingers = rmi4_data->num_of_fingers;
+
+	f1a = NULL;
+	if (!list_empty(&rmi->support_fn_list)) {
+		list_for_each_entry(fhandler, &rmi->support_fn_list, link) {
+			if (fhandler->fn_number == SYNAPTICS_RMI4_F1A)
+				f1a = fhandler->data;
+		}
+	}
+
+	if (f1a) {
+		for (ii = 0; ii < f1a->valid_button_count; ii++) {
+			set_bit(f1a->button_map[ii],
+					rmi4_data->input_dev->keybit);
+			input_set_capability(rmi4_data->input_dev,
+					EV_KEY, f1a->button_map[ii]);
+		}
+
+		rmi4_data->input_settings.valid_button_count =
+				f1a->valid_button_count;
+	}
+
+	if (vir_button_map->nbuttons) {
+		for (ii = 0; ii < vir_button_map->nbuttons; ii++) {
+			set_bit(vir_button_map->map[ii * 5],
+					rmi4_data->input_dev->keybit);
+			input_set_capability(rmi4_data->input_dev,
+					EV_KEY, vir_button_map->map[ii * 5]);
+		}
+	}
+
+	if (rmi4_data->f11_wakeup_gesture || rmi4_data->f12_wakeup_gesture) {
+		set_bit(KEY_WAKEUP, rmi4_data->input_dev->keybit);
+		input_set_capability(rmi4_data->input_dev, EV_KEY, KEY_WAKEUP);
+	}
+}
+
+static int synaptics_rmi4_set_input_dev(struct synaptics_rmi4_data *rmi4_data)
+{
+	int retval;
+	const struct synaptics_dsx_board_data *bdata =
+				rmi4_data->hw_if->board_data;
+
+	rmi4_data->input_dev = input_allocate_device();
+	if (rmi4_data->input_dev == NULL) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to allocate input device\n",
+				__func__);
+		retval = -ENOMEM;
+		goto err_input_device;
+	}
+
+	retval = synaptics_rmi4_query_device(rmi4_data);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to query device\n",
+				__func__);
+		goto err_query_device;
+	}
+
+	rmi4_data->input_dev->name = PLATFORM_DRIVER_NAME;
+	rmi4_data->input_dev->phys = INPUT_PHYS_NAME;
+	rmi4_data->input_dev->id.product = SYNAPTICS_DSX_DRIVER_PRODUCT;
+	rmi4_data->input_dev->id.version = SYNAPTICS_DSX_DRIVER_VERSION;
+	rmi4_data->input_dev->dev.parent = rmi4_data->pdev->dev.parent;
+	input_set_drvdata(rmi4_data->input_dev, rmi4_data);
+
+	set_bit(EV_SYN, rmi4_data->input_dev->evbit);
+	set_bit(EV_KEY, rmi4_data->input_dev->evbit);
+	set_bit(EV_ABS, rmi4_data->input_dev->evbit);
+	set_bit(BTN_TOUCH, rmi4_data->input_dev->keybit);
+	set_bit(BTN_TOOL_FINGER, rmi4_data->input_dev->keybit);
+#ifdef INPUT_PROP_DIRECT
+	set_bit(INPUT_PROP_DIRECT, rmi4_data->input_dev->propbit);
+#endif
+
+	if (bdata->max_y_for_2d >= 0)
+		rmi4_data->sensor_max_y = bdata->max_y_for_2d;
+
+	synaptics_rmi4_set_params(rmi4_data);
+
+	retval = input_register_device(rmi4_data->input_dev);
+	if (retval) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to register input device\n",
+				__func__);
+		goto err_register_input;
+	}
+
+	rmi4_data->input_settings.stylus_enable = rmi4_data->stylus_enable;
+	rmi4_data->input_settings.eraser_enable = rmi4_data->eraser_enable;
+
+	if (!rmi4_data->stylus_enable)
+		return 0;
+
+	rmi4_data->stylus_dev = input_allocate_device();
+	if (rmi4_data->stylus_dev == NULL) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to allocate stylus device\n",
+				__func__);
+		retval = -ENOMEM;
+		goto err_stylus_device;
+	}
+
+	rmi4_data->stylus_dev->name = STYLUS_DRIVER_NAME;
+	rmi4_data->stylus_dev->phys = STYLUS_PHYS_NAME;
+	rmi4_data->stylus_dev->id.product = SYNAPTICS_DSX_DRIVER_PRODUCT;
+	rmi4_data->stylus_dev->id.version = SYNAPTICS_DSX_DRIVER_VERSION;
+	rmi4_data->stylus_dev->dev.parent = rmi4_data->pdev->dev.parent;
+	input_set_drvdata(rmi4_data->stylus_dev, rmi4_data);
+
+	set_bit(EV_KEY, rmi4_data->stylus_dev->evbit);
+	set_bit(EV_ABS, rmi4_data->stylus_dev->evbit);
+	set_bit(BTN_TOUCH, rmi4_data->stylus_dev->keybit);
+	set_bit(BTN_TOOL_PEN, rmi4_data->stylus_dev->keybit);
+	if (rmi4_data->eraser_enable)
+		set_bit(BTN_TOOL_RUBBER, rmi4_data->stylus_dev->keybit);
+#ifdef INPUT_PROP_DIRECT
+	set_bit(INPUT_PROP_DIRECT, rmi4_data->stylus_dev->propbit);
+#endif
+
+	input_set_abs_params(rmi4_data->stylus_dev, ABS_X, 0,
+			rmi4_data->sensor_max_x, 0, 0);
+	input_set_abs_params(rmi4_data->stylus_dev, ABS_Y, 0,
+			rmi4_data->sensor_max_y, 0, 0);
+
+	retval = input_register_device(rmi4_data->stylus_dev);
+	if (retval) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to register stylus device\n",
+				__func__);
+		goto err_register_stylus;
+	}
+
+	return 0;
+
+err_register_stylus:
+	rmi4_data->stylus_dev = NULL;
+
+err_stylus_device:
+	input_unregister_device(rmi4_data->input_dev);
+	rmi4_data->input_dev = NULL;
+
+err_register_input:
+err_query_device:
+	synaptics_rmi4_empty_fn_list(rmi4_data);
+	input_free_device(rmi4_data->input_dev);
+
+err_input_device:
+	return retval;
+}
+
+static int synaptics_rmi4_set_gpio(struct synaptics_rmi4_data *rmi4_data)
+{
+	int retval;
+	const struct synaptics_dsx_board_data *bdata =
+			rmi4_data->hw_if->board_data;
+
+	retval = synaptics_rmi4_gpio_setup(
+			bdata->irq_gpio,
+			true, 0, 0);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to configure attention GPIO\n",
+				__func__);
+		goto err_gpio_irq;
+	}
+
+	if (bdata->power_gpio >= 0) {
+		retval = synaptics_rmi4_gpio_setup(
+				bdata->power_gpio,
+				true, 1, !bdata->power_on_state);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to configure power GPIO\n",
+					__func__);
+			goto err_gpio_power;
+		}
+	}
+
+	if (bdata->reset_gpio >= 0) {
+		retval = synaptics_rmi4_gpio_setup(
+				bdata->reset_gpio,
+				true, 1, !bdata->reset_on_state);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to configure reset GPIO\n",
+					__func__);
+			goto err_gpio_reset;
+		}
+	}
+
+	if (bdata->power_gpio >= 0) {
+		gpio_set_value(bdata->power_gpio, bdata->power_on_state);
+		msleep(bdata->power_delay_ms);
+	}
+
+	if (bdata->reset_gpio >= 0) {
+		gpio_set_value(bdata->reset_gpio, bdata->reset_on_state);
+		msleep(bdata->reset_active_ms);
+		gpio_set_value(bdata->reset_gpio, !bdata->reset_on_state);
+		msleep(bdata->reset_delay_ms);
+	}
+
+	return 0;
+
+err_gpio_reset:
+	if (bdata->power_gpio >= 0)
+		synaptics_rmi4_gpio_setup(bdata->power_gpio, false, 0, 0);
+
+err_gpio_power:
+	synaptics_rmi4_gpio_setup(bdata->irq_gpio, false, 0, 0);
+
+err_gpio_irq:
+	return retval;
+}
+
+static int synaptics_dsx_pinctrl_init(struct synaptics_rmi4_data *rmi4_data)
+{
+	int retval;
+
+	/* Get pinctrl if target uses pinctrl */
+	rmi4_data->ts_pinctrl = devm_pinctrl_get((rmi4_data->pdev->dev.parent));
+	if (IS_ERR_OR_NULL(rmi4_data->ts_pinctrl)) {
+		retval = PTR_ERR(rmi4_data->ts_pinctrl);
+		dev_err(rmi4_data->pdev->dev.parent,
+			"Target does not use pinctrl %d\n", retval);
+		goto err_pinctrl_get;
+	}
+
+	rmi4_data->pinctrl_state_active
+		= pinctrl_lookup_state(rmi4_data->ts_pinctrl, "pmx_ts_active");
+	if (IS_ERR_OR_NULL(rmi4_data->pinctrl_state_active)) {
+		retval = PTR_ERR(rmi4_data->pinctrl_state_active);
+		dev_err(rmi4_data->pdev->dev.parent,
+			"Can not lookup %s pinstate %d\n",
+			PINCTRL_STATE_ACTIVE, retval);
+		goto err_pinctrl_lookup;
+	}
+
+	rmi4_data->pinctrl_state_suspend
+		= pinctrl_lookup_state(rmi4_data->ts_pinctrl, "pmx_ts_suspend");
+	if (IS_ERR_OR_NULL(rmi4_data->pinctrl_state_suspend)) {
+		retval = PTR_ERR(rmi4_data->pinctrl_state_suspend);
+		dev_err(rmi4_data->pdev->dev.parent,
+			"Can not lookup %s pinstate %d\n",
+			PINCTRL_STATE_SUSPEND, retval);
+		goto err_pinctrl_lookup;
+	}
+
+	rmi4_data->pinctrl_state_release
+		= pinctrl_lookup_state(rmi4_data->ts_pinctrl, "pmx_ts_release");
+	if (IS_ERR_OR_NULL(rmi4_data->pinctrl_state_release)) {
+		retval = PTR_ERR(rmi4_data->pinctrl_state_release);
+		dev_err(rmi4_data->pdev->dev.parent,
+			"Can not lookup %s pinstate %d\n",
+			PINCTRL_STATE_RELEASE, retval);
+	}
+
+	return 0;
+
+err_pinctrl_lookup:
+	devm_pinctrl_put(rmi4_data->ts_pinctrl);
+err_pinctrl_get:
+	rmi4_data->ts_pinctrl = NULL;
+	return retval;
+}
+
+
+static int synaptics_rmi4_get_reg(struct synaptics_rmi4_data *rmi4_data,
+		bool get)
+{
+	int retval;
+	const struct synaptics_dsx_board_data *bdata =
+			rmi4_data->hw_if->board_data;
+
+	if (!get) {
+		retval = 0;
+		goto regulator_put;
+	}
+
+	if ((bdata->pwr_reg_name != NULL) && (*bdata->pwr_reg_name != 0)) {
+		rmi4_data->pwr_reg = regulator_get(rmi4_data->pdev->dev.parent,
+				bdata->pwr_reg_name);
+		if (IS_ERR(rmi4_data->pwr_reg)) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to get power regulator\n",
+					__func__);
+			retval = PTR_ERR(rmi4_data->pwr_reg);
+			goto regulator_put;
+		}
+	}
+
+	retval = regulator_set_load(rmi4_data->pwr_reg,
+		20000);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+			"%s: Failed to set regulator current avdd\n",
+				__func__);
+		goto regulator_put;
+	}
+
+	retval = regulator_set_voltage(rmi4_data->pwr_reg,
+			3000000,
+			3000000);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to set regulator voltage avdd\n",
+				__func__);
+		goto regulator_put;
+	}
+
+	if ((bdata->bus_reg_name != NULL) && (*bdata->bus_reg_name != 0)) {
+		rmi4_data->bus_reg = regulator_get(rmi4_data->pdev->dev.parent,
+				bdata->bus_reg_name);
+		if (IS_ERR(rmi4_data->bus_reg)) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to get bus pullup regulator\n",
+					__func__);
+			retval = PTR_ERR(rmi4_data->bus_reg);
+			goto regulator_put;
+		}
+	}
+
+	retval = regulator_set_load(rmi4_data->bus_reg,
+		62000);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to set regulator current vdd\n",
+				__func__);
+		goto regulator_put;
+	}
+
+	retval = regulator_set_voltage(rmi4_data->bus_reg,
+			1800000,
+			1800000);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to set regulator voltage avdd\n",
+				__func__);
+		goto regulator_put;
+	}
+
+	return 0;
+
+regulator_put:
+	if (rmi4_data->pwr_reg) {
+		regulator_put(rmi4_data->pwr_reg);
+		rmi4_data->pwr_reg = NULL;
+	}
+
+	if (rmi4_data->bus_reg) {
+		regulator_put(rmi4_data->bus_reg);
+		rmi4_data->bus_reg = NULL;
+	}
+
+	return retval;
+}
+
+static int synaptics_rmi4_enable_reg(struct synaptics_rmi4_data *rmi4_data,
+		bool enable)
+{
+	int retval;
+	const struct synaptics_dsx_board_data *bdata =
+			rmi4_data->hw_if->board_data;
+
+	if (!enable) {
+		retval = 0;
+		goto disable_pwr_reg;
+	}
+
+	if (rmi4_data->bus_reg && rmi4_data->vdd_status == 0) {
+		retval = regulator_enable(rmi4_data->bus_reg);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to enable bus pullup regulator\n",
+					__func__);
+			goto exit;
+		}
+		rmi4_data->vdd_status = 1;
+	}
+
+	if (rmi4_data->pwr_reg && rmi4_data->avdd_status == 0) {
+		retval = regulator_enable(rmi4_data->pwr_reg);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to enable power regulator\n",
+					__func__);
+			goto disable_bus_reg;
+		}
+		rmi4_data->avdd_status = 1;
+		msleep(bdata->power_delay_ms);
+	}
+
+	return 0;
+
+disable_pwr_reg:
+	if (rmi4_data->pwr_reg && rmi4_data->avdd_status == 1) {
+		regulator_disable(rmi4_data->pwr_reg);
+		rmi4_data->avdd_status = 0;
+	}
+
+disable_bus_reg:
+	if (rmi4_data->bus_reg && rmi4_data->vdd_status == 1) {
+		regulator_disable(rmi4_data->bus_reg);
+		rmi4_data->vdd_status = 0;
+	}
+
+exit:
+	return retval;
+}
+
+static int synaptics_rmi4_free_fingers(struct synaptics_rmi4_data *rmi4_data)
+{
+	unsigned char ii;
+
+	mutex_lock(&(rmi4_data->rmi4_report_mutex));
+
+#ifdef TYPE_B_PROTOCOL
+	for (ii = 0; ii < rmi4_data->num_of_fingers; ii++) {
+		input_mt_slot(rmi4_data->input_dev, ii);
+		input_mt_report_slot_state(rmi4_data->input_dev,
+				MT_TOOL_FINGER, 0);
+	}
+#endif
+	input_report_key(rmi4_data->input_dev,
+			BTN_TOUCH, 0);
+	input_report_key(rmi4_data->input_dev,
+			BTN_TOOL_FINGER, 0);
+#ifndef TYPE_B_PROTOCOL
+	input_mt_sync(rmi4_data->input_dev);
+#endif
+	input_sync(rmi4_data->input_dev);
+
+	if (rmi4_data->stylus_enable) {
+		input_report_key(rmi4_data->stylus_dev,
+				BTN_TOUCH, 0);
+		input_report_key(rmi4_data->stylus_dev,
+				BTN_TOOL_PEN, 0);
+		if (rmi4_data->eraser_enable) {
+			input_report_key(rmi4_data->stylus_dev,
+					BTN_TOOL_RUBBER, 0);
+		}
+		input_sync(rmi4_data->stylus_dev);
+	}
+
+	mutex_unlock(&(rmi4_data->rmi4_report_mutex));
+
+	rmi4_data->fingers_on_2d = false;
+
+	return 0;
+}
+
+static int synaptics_rmi4_sw_reset(struct synaptics_rmi4_data *rmi4_data)
+{
+	int retval;
+	unsigned char command = 0x01;
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			rmi4_data->f01_cmd_base_addr,
+			&command,
+			sizeof(command));
+	if (retval < 0)
+		return retval;
+
+	msleep(rmi4_data->hw_if->board_data->reset_delay_ms);
+
+	if (rmi4_data->hw_if->ui_hw_init) {
+		retval = rmi4_data->hw_if->ui_hw_init(rmi4_data);
+		if (retval < 0)
+			return retval;
+	}
+
+	return 0;
+}
+
+static int synaptics_rmi4_do_rebuild(struct synaptics_rmi4_data *rmi4_data)
+{
+	struct synaptics_rmi4_input_settings *settings;
+
+	settings = &(rmi4_data->input_settings);
+
+	if (settings->num_of_fingers != rmi4_data->num_of_fingers)
+		return 1;
+
+	if (settings->valid_button_count != rmi4_data->valid_button_count)
+		return 1;
+
+	if (settings->max_touch_width != rmi4_data->max_touch_width)
+		return 1;
+
+	if (settings->sensor_max_x != rmi4_data->sensor_max_x)
+		return 1;
+
+	if (settings->sensor_max_y != rmi4_data->sensor_max_y)
+		return 1;
+
+	if (settings->force_min != rmi4_data->force_min)
+		return 1;
+
+	if (settings->force_max != rmi4_data->force_max)
+		return 1;
+
+	if (settings->stylus_enable != rmi4_data->stylus_enable)
+		return 1;
+
+	if (settings->eraser_enable != rmi4_data->eraser_enable)
+		return 1;
+
+	return 0;
+}
+
+static void synaptics_rmi4_rebuild_work(struct work_struct *work)
+{
+	int retval;
+	unsigned char attr_count;
+	struct synaptics_rmi4_exp_fhandler *exp_fhandler;
+	struct delayed_work *delayed_work =
+			container_of(work, struct delayed_work, work);
+	struct synaptics_rmi4_data *rmi4_data =
+			container_of(delayed_work, struct synaptics_rmi4_data,
+			rb_work);
+
+	mutex_lock(&(rmi4_data->rmi4_reset_mutex));
+
+	mutex_lock(&exp_data.mutex);
+
+	synaptics_rmi4_irq_enable(rmi4_data, false, false);
+
+	if (!list_empty(&exp_data.list)) {
+		list_for_each_entry(exp_fhandler, &exp_data.list, link)
+			if (exp_fhandler->exp_fn->remove != NULL)
+				exp_fhandler->exp_fn->remove(rmi4_data);
+	}
+
+	for (attr_count = 0; attr_count < ARRAY_SIZE(attrs); attr_count++) {
+		sysfs_remove_file(&rmi4_data->input_dev->dev.kobj,
+				&attrs[attr_count].attr);
+	}
+
+	synaptics_rmi4_free_fingers(rmi4_data);
+	synaptics_rmi4_empty_fn_list(rmi4_data);
+	input_unregister_device(rmi4_data->input_dev);
+	rmi4_data->input_dev = NULL;
+	if (rmi4_data->stylus_enable) {
+		input_unregister_device(rmi4_data->stylus_dev);
+		rmi4_data->stylus_dev = NULL;
+	}
+
+	retval = synaptics_rmi4_set_input_dev(rmi4_data);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to set up input device\n",
+				__func__);
+		goto exit;
+	}
+
+	for (attr_count = 0; attr_count < ARRAY_SIZE(attrs); attr_count++) {
+		retval = sysfs_create_file(&rmi4_data->input_dev->dev.kobj,
+				&attrs[attr_count].attr);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to create sysfs attributes\n",
+					__func__);
+			goto exit;
+		}
+	}
+
+	if (!list_empty(&exp_data.list)) {
+		list_for_each_entry(exp_fhandler, &exp_data.list, link)
+			if (exp_fhandler->exp_fn->init != NULL)
+				exp_fhandler->exp_fn->init(rmi4_data);
+	}
+
+exit:
+	synaptics_rmi4_irq_enable(rmi4_data, true, false);
+
+	mutex_unlock(&exp_data.mutex);
+
+	mutex_unlock(&(rmi4_data->rmi4_reset_mutex));
+}
+
+static int synaptics_rmi4_reinit_device(struct synaptics_rmi4_data *rmi4_data)
+{
+	int retval;
+	struct synaptics_rmi4_fn *fhandler;
+	struct synaptics_rmi4_exp_fhandler *exp_fhandler;
+	struct synaptics_rmi4_device_info *rmi;
+
+	rmi = &(rmi4_data->rmi4_mod_info);
+
+	mutex_lock(&(rmi4_data->rmi4_reset_mutex));
+
+	synaptics_rmi4_free_fingers(rmi4_data);
+
+	if (!list_empty(&rmi->support_fn_list)) {
+		list_for_each_entry(fhandler, &rmi->support_fn_list, link) {
+			if (fhandler->fn_number == SYNAPTICS_RMI4_F12) {
+				synaptics_rmi4_f12_set_enables(rmi4_data, 0);
+				break;
+			}
+		}
+	}
+
+	retval = synaptics_rmi4_int_enable(rmi4_data, true);
+	if (retval < 0)
+		goto exit;
+
+	mutex_lock(&exp_data.mutex);
+	if (!list_empty(&exp_data.list)) {
+		list_for_each_entry(exp_fhandler, &exp_data.list, link)
+			if (exp_fhandler->exp_fn->reinit != NULL)
+				exp_fhandler->exp_fn->reinit(rmi4_data);
+	}
+	mutex_unlock(&exp_data.mutex);
+
+	synaptics_rmi4_set_configured(rmi4_data);
+
+	retval = 0;
+
+exit:
+	mutex_unlock(&(rmi4_data->rmi4_reset_mutex));
+	return retval;
+}
+
+static int synaptics_rmi4_reset_device(struct synaptics_rmi4_data *rmi4_data,
+		bool rebuild)
+{
+	int retval;
+	struct synaptics_rmi4_exp_fhandler *exp_fhandler;
+
+	mutex_lock(&(rmi4_data->rmi4_reset_mutex));
+
+	synaptics_rmi4_irq_enable(rmi4_data, false, false);
+
+	retval = synaptics_rmi4_sw_reset(rmi4_data);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to issue reset command\n",
+				__func__);
+		goto exit;
+	}
+
+	synaptics_rmi4_free_fingers(rmi4_data);
+
+	synaptics_rmi4_empty_fn_list(rmi4_data);
+
+	retval = synaptics_rmi4_query_device(rmi4_data);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to query device\n",
+				__func__);
+		goto exit;
+	}
+
+	mutex_lock(&exp_data.mutex);
+	if (!list_empty(&exp_data.list)) {
+		list_for_each_entry(exp_fhandler, &exp_data.list, link)
+			if (exp_fhandler->exp_fn->reset != NULL)
+				exp_fhandler->exp_fn->reset(rmi4_data);
+	}
+	mutex_unlock(&exp_data.mutex);
+
+	retval = 0;
+
+exit:
+	synaptics_rmi4_irq_enable(rmi4_data, true, false);
+
+	mutex_unlock(&(rmi4_data->rmi4_reset_mutex));
+
+	if (rebuild && synaptics_rmi4_do_rebuild(rmi4_data)) {
+		queue_delayed_work(rmi4_data->rb_workqueue,
+				&rmi4_data->rb_work,
+				msecs_to_jiffies(REBUILD_WORK_DELAY_MS));
+	}
+
+	return retval;
+}
+
+#ifdef FB_READY_RESET
+static void synaptics_rmi4_reset_work(struct work_struct *work)
+{
+	int retval = 0;
+	unsigned int timeout;
+	struct synaptics_rmi4_data *rmi4_data =
+			container_of(work, struct synaptics_rmi4_data,
+			reset_work);
+
+	timeout = FB_READY_TIMEOUT_S * 1000 / FB_READY_WAIT_MS + 1;
+
+	while (!rmi4_data->fb_ready) {
+		msleep(FB_READY_WAIT_MS);
+		timeout--;
+		if (timeout == 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Timed out waiting for FB ready\n",
+					__func__);
+			goto err;
+		}
+	}
+
+	mutex_lock(&rmi4_data->rmi4_exp_init_mutex);
+
+	retval = synaptics_rmi4_reset_device(rmi4_data, false);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to issue reset command\n",
+				__func__);
+	}
+
+	mutex_unlock(&rmi4_data->rmi4_exp_init_mutex);
+err:
+
+	dev_err(rmi4_data->pdev->dev.parent,
+		"%s: Timed out waiting for FB ready\n",
+		__func__);
+
+}
+#endif
+
+static int synaptics_rmi4_sleep_enable(struct synaptics_rmi4_data *rmi4_data,
+		bool enable)
+{
+	int retval;
+	unsigned char device_ctrl;
+	unsigned char no_sleep_setting = rmi4_data->no_sleep_setting;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			rmi4_data->f01_ctrl_base_addr,
+			&device_ctrl,
+			sizeof(device_ctrl));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read device control\n",
+				__func__);
+		return retval;
+	}
+
+	device_ctrl = device_ctrl & ~MASK_3BIT;
+	if (enable)
+		device_ctrl = device_ctrl | SENSOR_SLEEP;
+	else
+		device_ctrl = device_ctrl | no_sleep_setting | NORMAL_OPERATION;
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			rmi4_data->f01_ctrl_base_addr,
+			&device_ctrl,
+			sizeof(device_ctrl));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to write device control\n",
+				__func__);
+		return retval;
+	}
+
+	rmi4_data->sensor_sleep = enable;
+
+	return retval;
+}
+
+static void synaptics_rmi4_exp_fn_work(struct work_struct *work)
+{
+	struct synaptics_rmi4_exp_fhandler *exp_fhandler;
+	struct synaptics_rmi4_exp_fhandler *exp_fhandler_temp;
+	struct synaptics_rmi4_data *rmi4_data = exp_data.rmi4_data;
+
+	mutex_lock(&rmi4_data->rmi4_exp_init_mutex);
+	mutex_lock(&rmi4_data->rmi4_reset_mutex);
+	mutex_lock(&exp_data.mutex);
+	if (!list_empty(&exp_data.list)) {
+		list_for_each_entry_safe(exp_fhandler,
+				exp_fhandler_temp,
+				&exp_data.list,
+				link) {
+			if ((exp_fhandler->exp_fn->init != NULL) &&
+					exp_fhandler->insert) {
+				exp_fhandler->exp_fn->init(rmi4_data);
+				exp_fhandler->insert = false;
+			} else if ((exp_fhandler->exp_fn->remove != NULL) &&
+					exp_fhandler->remove) {
+				exp_fhandler->exp_fn->remove(rmi4_data);
+				list_del(&exp_fhandler->link);
+				kfree(exp_fhandler);
+			}
+		}
+	}
+	mutex_unlock(&exp_data.mutex);
+	mutex_unlock(&rmi4_data->rmi4_reset_mutex);
+	mutex_unlock(&rmi4_data->rmi4_exp_init_mutex);
+}
+
+void synaptics_rmi4_new_function(struct synaptics_rmi4_exp_fn *exp_fn,
+		bool insert)
+{
+	struct synaptics_rmi4_exp_fhandler *exp_fhandler;
+
+	if (!exp_data.initialized) {
+		mutex_init(&exp_data.mutex);
+		INIT_LIST_HEAD(&exp_data.list);
+		exp_data.initialized = true;
+	}
+
+	mutex_lock(&exp_data.mutex);
+	if (insert) {
+		exp_fhandler = kzalloc(sizeof(*exp_fhandler), GFP_KERNEL);
+		if (!exp_fhandler) {
+			pr_err("%s: Failed to alloc mem for expansion function\n",
+					__func__);
+			goto exit;
+		}
+		exp_fhandler->exp_fn = exp_fn;
+		exp_fhandler->insert = true;
+		exp_fhandler->remove = false;
+		list_add_tail(&exp_fhandler->link, &exp_data.list);
+	} else if (!list_empty(&exp_data.list)) {
+		list_for_each_entry(exp_fhandler, &exp_data.list, link) {
+			if (exp_fhandler->exp_fn->fn_type == exp_fn->fn_type) {
+				exp_fhandler->insert = false;
+				exp_fhandler->remove = true;
+				goto exit;
+			}
+		}
+	}
+
+exit:
+	mutex_unlock(&exp_data.mutex);
+
+	if (exp_data.queue_work) {
+		queue_delayed_work(exp_data.workqueue,
+				&exp_data.work,
+				msecs_to_jiffies(EXP_FN_WORK_DELAY_MS));
+	}
+}
+EXPORT_SYMBOL(synaptics_rmi4_new_function);
+
+static int synaptics_rmi4_probe(struct platform_device *pdev)
+{
+	int retval;
+	unsigned char attr_count;
+	struct synaptics_rmi4_data *rmi4_data;
+	const struct synaptics_dsx_hw_interface *hw_if;
+	const struct synaptics_dsx_board_data *bdata;
+
+	hw_if = pdev->dev.platform_data;
+	if (!hw_if) {
+		dev_err(&pdev->dev,
+				"%s: No hardware interface found\n",
+				__func__);
+		return -EINVAL;
+	}
+
+	bdata = hw_if->board_data;
+	if (!bdata) {
+		dev_err(&pdev->dev,
+				"%s: No board data found\n",
+				__func__);
+		return -EINVAL;
+	}
+
+	rmi4_data = kzalloc(sizeof(*rmi4_data), GFP_KERNEL);
+	if (!rmi4_data) {
+		dev_err(&pdev->dev,
+				"%s: Failed to alloc mem for rmi4_data\n",
+				__func__);
+		return -ENOMEM;
+	}
+
+	rmi4_data->pdev = pdev;
+	rmi4_data->current_page = MASK_8BIT;
+	rmi4_data->hw_if = hw_if;
+	rmi4_data->suspend = false;
+	rmi4_data->irq_enabled = false;
+	rmi4_data->fingers_on_2d = false;
+
+	rmi4_data->reset_device = synaptics_rmi4_reset_device;
+	rmi4_data->irq_enable = synaptics_rmi4_irq_enable;
+	rmi4_data->sleep_enable = synaptics_rmi4_sleep_enable;
+	rmi4_data->report_touch = synaptics_rmi4_report_touch;
+
+	mutex_init(&(rmi4_data->rmi4_reset_mutex));
+	mutex_init(&(rmi4_data->rmi4_report_mutex));
+	mutex_init(&(rmi4_data->rmi4_io_ctrl_mutex));
+	mutex_init(&(rmi4_data->rmi4_exp_init_mutex));
+	mutex_init(&(rmi4_data->rmi4_irq_enable_mutex));
+
+	platform_set_drvdata(pdev, rmi4_data);
+
+	vir_button_map = bdata->vir_button_map;
+
+	retval = synaptics_rmi4_get_reg(rmi4_data, true);
+	if (retval < 0) {
+		dev_err(&pdev->dev,
+				"%s: Failed to get regulators\n",
+				__func__);
+		goto err_get_reg;
+	}
+
+	retval = synaptics_rmi4_enable_reg(rmi4_data, true);
+	if (retval < 0) {
+		dev_err(&pdev->dev,
+				"%s: Failed to enable regulators\n",
+				__func__);
+		goto err_enable_reg;
+	}
+
+	retval = synaptics_rmi4_set_gpio(rmi4_data);
+	if (retval < 0) {
+		dev_err(&pdev->dev,
+				"%s: Failed to set up GPIO's\n",
+				__func__);
+		goto err_set_gpio;
+	}
+
+	retval = synaptics_dsx_pinctrl_init(rmi4_data);
+		if (!retval && rmi4_data->ts_pinctrl) {
+			/*
+			* Pinctrl handle is optional. If pinctrl handle is found
+			* let pins to be configured in active state. If not
+			* found continue further without error.
+			*/
+			retval = pinctrl_select_state(rmi4_data->ts_pinctrl,
+					rmi4_data->pinctrl_state_active);
+			if (retval < 0) {
+				dev_err(&pdev->dev,
+					"%s: Failed to select %s pinstate %d\n",
+					__func__, PINCTRL_STATE_ACTIVE, retval);
+			}
+		}
+
+	if (hw_if->ui_hw_init) {
+		retval = hw_if->ui_hw_init(rmi4_data);
+		if (retval < 0) {
+			dev_err(&pdev->dev,
+					"%s: Failed to initialize hardware interface\n",
+					__func__);
+			goto err_ui_hw_init;
+		}
+	}
+
+	retval = synaptics_rmi4_set_input_dev(rmi4_data);
+	if (retval < 0) {
+		dev_err(&pdev->dev,
+				"%s: Failed to set up input device\n",
+				__func__);
+		goto err_set_input_dev;
+	}
+
+#ifdef CONFIG_FB
+	rmi4_data->fb_notifier.notifier_call = synaptics_rmi4_dsi_panel_notifier_cb;
+	retval = msm_drm_register_client(&rmi4_data->fb_notifier);
+	if (retval < 0) {
+
+
+		dev_err(&pdev->dev,
+				"%s: Failed to register fb notifier client\n",
+				__func__);
+	}
+#endif
+
+#ifdef USE_EARLYSUSPEND
+	rmi4_data->early_suspend.level = EARLY_SUSPEND_LEVEL_BLANK_SCREEN + 1;
+	rmi4_data->early_suspend.suspend = synaptics_rmi4_early_suspend;
+	rmi4_data->early_suspend.resume = synaptics_rmi4_late_resume;
+	register_early_suspend(&rmi4_data->early_suspend);
+#endif
+
+	if (!exp_data.initialized) {
+		mutex_init(&exp_data.mutex);
+		INIT_LIST_HEAD(&exp_data.list);
+		exp_data.initialized = true;
+	}
+
+	rmi4_data->irq = gpio_to_irq(bdata->irq_gpio);
+
+	retval = synaptics_rmi4_irq_enable(rmi4_data, true, false);
+	if (retval < 0) {
+		dev_err(&pdev->dev,
+				"%s: Failed to enable attention interrupt\n",
+				__func__);
+		goto err_enable_irq;
+	}
+
+	if (vir_button_map->nbuttons) {
+		rmi4_data->board_prop_dir = kobject_create_and_add(
+				"board_properties", NULL);
+		if (!rmi4_data->board_prop_dir) {
+			dev_err(&pdev->dev,
+					"%s: Failed to create board_properties directory\n",
+					__func__);
+			goto err_virtual_buttons;
+		} else {
+			retval = sysfs_create_file(rmi4_data->board_prop_dir,
+					&virtual_key_map_attr.attr);
+			if (retval < 0) {
+				dev_err(&pdev->dev,
+						"%s: Failed to create virtual key map file\n",
+						__func__);
+				goto err_virtual_buttons;
+			}
+		}
+	}
+
+	for (attr_count = 0; attr_count < ARRAY_SIZE(attrs); attr_count++) {
+		retval = sysfs_create_file(&rmi4_data->input_dev->dev.kobj,
+				&attrs[attr_count].attr);
+		if (retval < 0) {
+			dev_err(&pdev->dev,
+					"%s: Failed to create sysfs attributes\n",
+					__func__);
+			goto err_sysfs;
+		}
+	}
+
+#ifdef USE_DATA_SERVER
+	memset(&interrupt_signal, 0, sizeof(interrupt_signal));
+	interrupt_signal.si_signo = SIGIO;
+	interrupt_signal.si_code = SI_USER;
+#endif
+
+	rmi4_data->rb_workqueue =
+			create_singlethread_workqueue("dsx_rebuild_workqueue");
+	INIT_DELAYED_WORK(&rmi4_data->rb_work, synaptics_rmi4_rebuild_work);
+
+	exp_data.workqueue = create_singlethread_workqueue("dsx_exp_workqueue");
+	INIT_DELAYED_WORK(&exp_data.work, synaptics_rmi4_exp_fn_work);
+	exp_data.rmi4_data = rmi4_data;
+	exp_data.queue_work = true;
+	queue_delayed_work(exp_data.workqueue,
+			&exp_data.work,
+			0);
+
+#ifdef FB_READY_RESET
+	rmi4_data->reset_workqueue =
+			create_singlethread_workqueue("dsx_reset_workqueue");
+	INIT_WORK(&rmi4_data->reset_work, synaptics_rmi4_reset_work);
+	queue_work(rmi4_data->reset_workqueue, &rmi4_data->reset_work);
+#endif
+
+	return retval;
+
+err_sysfs:
+	for (attr_count--; attr_count >= 0; attr_count--) {
+		sysfs_remove_file(&rmi4_data->input_dev->dev.kobj,
+				&attrs[attr_count].attr);
+	}
+
+err_virtual_buttons:
+	if (rmi4_data->board_prop_dir) {
+		sysfs_remove_file(rmi4_data->board_prop_dir,
+				&virtual_key_map_attr.attr);
+		kobject_put(rmi4_data->board_prop_dir);
+	}
+
+	synaptics_rmi4_irq_enable(rmi4_data, false, false);
+
+err_enable_irq:
+#ifdef CONFIG_FB
+	msm_drm_unregister_client(&rmi4_data->fb_notifier);
+#endif
+
+#ifdef USE_EARLYSUSPEND
+	unregister_early_suspend(&rmi4_data->early_suspend);
+#endif
+
+	synaptics_rmi4_empty_fn_list(rmi4_data);
+	input_unregister_device(rmi4_data->input_dev);
+	rmi4_data->input_dev = NULL;
+	if (rmi4_data->stylus_enable) {
+		input_unregister_device(rmi4_data->stylus_dev);
+		rmi4_data->stylus_dev = NULL;
+	}
+
+err_set_input_dev:
+	synaptics_rmi4_gpio_setup(bdata->irq_gpio, false, 0, 0);
+
+	if (bdata->reset_gpio >= 0)
+		synaptics_rmi4_gpio_setup(bdata->reset_gpio, false, 0, 0);
+
+	if (bdata->power_gpio >= 0)
+		synaptics_rmi4_gpio_setup(bdata->power_gpio, false, 0, 0);
+
+err_ui_hw_init:
+err_set_gpio:
+	synaptics_rmi4_enable_reg(rmi4_data, false);
+
+	if (rmi4_data->ts_pinctrl) {
+		if (IS_ERR_OR_NULL(rmi4_data->pinctrl_state_release)) {
+			devm_pinctrl_put(rmi4_data->ts_pinctrl);
+			rmi4_data->ts_pinctrl = NULL;
+		} else {
+			retval = pinctrl_select_state(
+				rmi4_data->ts_pinctrl,
+				rmi4_data->pinctrl_state_release);
+			if (retval)
+				dev_err(&pdev->dev,
+					"%s: Failed to create sysfs attributes\n",
+					__func__);
+		}
+	}
+
+err_enable_reg:
+	synaptics_rmi4_get_reg(rmi4_data, false);
+
+err_get_reg:
+	kfree(rmi4_data);
+
+	return retval;
+}
+
+static int synaptics_rmi4_remove(struct platform_device *pdev)
+{
+	unsigned char attr_count;
+	struct synaptics_rmi4_data *rmi4_data = platform_get_drvdata(pdev);
+	const struct synaptics_dsx_board_data *bdata =
+			rmi4_data->hw_if->board_data;
+
+#ifdef FB_READY_RESET
+	cancel_work_sync(&rmi4_data->reset_work);
+	flush_workqueue(rmi4_data->reset_workqueue);
+	destroy_workqueue(rmi4_data->reset_workqueue);
+#endif
+
+	cancel_delayed_work_sync(&exp_data.work);
+	flush_workqueue(exp_data.workqueue);
+	destroy_workqueue(exp_data.workqueue);
+
+	cancel_delayed_work_sync(&rmi4_data->rb_work);
+	flush_workqueue(rmi4_data->rb_workqueue);
+	destroy_workqueue(rmi4_data->rb_workqueue);
+
+	for (attr_count = 0; attr_count < ARRAY_SIZE(attrs); attr_count++) {
+		sysfs_remove_file(&rmi4_data->input_dev->dev.kobj,
+				&attrs[attr_count].attr);
+	}
+
+	if (rmi4_data->board_prop_dir) {
+		sysfs_remove_file(rmi4_data->board_prop_dir,
+				&virtual_key_map_attr.attr);
+		kobject_put(rmi4_data->board_prop_dir);
+	}
+
+	synaptics_rmi4_irq_enable(rmi4_data, false, false);
+
+#ifdef CONFIG_FB
+	msm_drm_unregister_client(&rmi4_data->fb_notifier);
+#endif
+
+#ifdef USE_EARLYSUSPEND
+	unregister_early_suspend(&rmi4_data->early_suspend);
+#endif
+
+	synaptics_rmi4_empty_fn_list(rmi4_data);
+	input_unregister_device(rmi4_data->input_dev);
+	rmi4_data->input_dev = NULL;
+	if (rmi4_data->stylus_enable) {
+		input_unregister_device(rmi4_data->stylus_dev);
+		rmi4_data->stylus_dev = NULL;
+	}
+
+	synaptics_rmi4_gpio_setup(bdata->irq_gpio, false, 0, 0);
+
+	if (bdata->reset_gpio >= 0)
+		synaptics_rmi4_gpio_setup(bdata->reset_gpio, false, 0, 0);
+
+	if (bdata->power_gpio >= 0)
+		synaptics_rmi4_gpio_setup(bdata->power_gpio, false, 0, 0);
+
+	if (rmi4_data->ts_pinctrl) {
+			if (IS_ERR_OR_NULL(rmi4_data->pinctrl_state_release)) {
+				devm_pinctrl_put(rmi4_data->ts_pinctrl);
+				rmi4_data->ts_pinctrl = NULL;
+			} else {
+				pinctrl_select_state(
+					rmi4_data->ts_pinctrl,
+					rmi4_data->pinctrl_state_release);
+			}
+		}
+
+	synaptics_rmi4_enable_reg(rmi4_data, false);
+	synaptics_rmi4_get_reg(rmi4_data, false);
+
+	kfree(rmi4_data);
+
+	return 0;
+}
+
+#ifdef CONFIG_FB
+static int synaptics_rmi4_dsi_panel_notifier_cb(struct notifier_block *self,
+		unsigned long event, void *data)
+{
+	int transition;
+	struct msm_drm_notifier *evdata = data;
+	struct synaptics_rmi4_data *rmi4_data =
+			container_of(self, struct synaptics_rmi4_data,
+			fb_notifier);
+
+	if (!evdata || (evdata->id != 0))
+		return 0;
+
+	if (evdata && evdata->data && rmi4_data) {
+		if (event == MSM_DRM_EVENT_BLANK) {
+			transition = *(int *)evdata->data;
+			if (transition == MSM_DRM_BLANK_POWERDOWN) {
+				synaptics_rmi4_suspend(&rmi4_data->pdev->dev);
+				rmi4_data->fb_ready = false;
+			} else if (transition == MSM_DRM_BLANK_UNBLANK) {
+				synaptics_rmi4_resume(&rmi4_data->pdev->dev);
+				rmi4_data->fb_ready = true;
+			}
+		}
+	}
+
+	return 0;
+}
+#endif
+
+#ifdef USE_EARLYSUSPEND
+static int synaptics_rmi4_early_suspend(struct early_suspend *h)
+{
+	struct synaptics_rmi4_exp_fhandler *exp_fhandler;
+	struct synaptics_rmi4_data *rmi4_data =
+			container_of(h, struct synaptics_rmi4_data,
+			early_suspend);
+	unsigned char device_ctrl;
+
+	if (rmi4_data->stay_awake)
+		return retval;
+
+	if (rmi4_data->enable_wakeup_gesture) {
+		if (rmi4_data->no_sleep_setting) {
+			synaptics_rmi4_reg_read(rmi4_data,
+					rmi4_data->f01_ctrl_base_addr,
+					&device_ctrl,
+					sizeof(device_ctrl));
+			device_ctrl = device_ctrl & ~NO_SLEEP_ON;
+			synaptics_rmi4_reg_write(rmi4_data,
+					rmi4_data->f01_ctrl_base_addr,
+					&device_ctrl,
+					sizeof(device_ctrl));
+		}
+		synaptics_rmi4_wakeup_gesture(rmi4_data, true);
+		enable_irq_wake(rmi4_data->irq);
+		goto exit;
+	}
+
+#ifdef SYNA_TDDI
+	if (rmi4_data->no_sleep_setting) {
+		synaptics_rmi4_reg_read(rmi4_data,
+				rmi4_data->f01_ctrl_base_addr,
+				&device_ctrl,
+				sizeof(device_ctrl));
+		device_ctrl = device_ctrl & ~NO_SLEEP_ON;
+		synaptics_rmi4_reg_write(rmi4_data,
+				rmi4_data->f01_ctrl_base_addr,
+				&device_ctrl,
+				sizeof(device_ctrl));
+	}
+	synaptics_rmi4_wakeup_gesture(rmi4_data, true);
+	usleep(TDDI_LPWG_WAIT_US);
+#endif
+	synaptics_rmi4_irq_enable(rmi4_data, false, false);
+	synaptics_rmi4_sleep_enable(rmi4_data, true);
+	synaptics_rmi4_free_fingers(rmi4_data);
+
+exit:
+	mutex_lock(&exp_data.mutex);
+	if (!list_empty(&exp_data.list)) {
+		list_for_each_entry(exp_fhandler, &exp_data.list, link)
+			if (exp_fhandler->exp_fn->early_suspend != NULL)
+				exp_fhandler->exp_fn->early_suspend(rmi4_data);
+	}
+	mutex_unlock(&exp_data.mutex);
+
+	rmi4_data->suspend = true;
+
+	return retval;
+}
+
+static int synaptics_rmi4_late_resume(struct early_suspend *h)
+{
+#ifdef FB_READY_RESET
+	int retval;
+#endif
+	struct synaptics_rmi4_exp_fhandler *exp_fhandler;
+	struct synaptics_rmi4_data *rmi4_data =
+			container_of(h, struct synaptics_rmi4_data,
+			early_suspend);
+
+	if (rmi4_data->stay_awake)
+		return retval;
+
+	if (rmi4_data->enable_wakeup_gesture) {
+		disable_irq_wake(rmi4_data->irq);
+		goto exit;
+	}
+
+	rmi4_data->current_page = MASK_8BIT;
+
+	if (rmi4_data->suspend) {
+		synaptics_rmi4_sleep_enable(rmi4_data, false);
+		synaptics_rmi4_irq_enable(rmi4_data, true, false);
+	}
+
+exit:
+#ifdef FB_READY_RESET
+	if (rmi4_data->suspend) {
+		retval = synaptics_rmi4_reset_device(rmi4_data, false);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to issue reset command\n",
+					__func__);
+		}
+	}
+#endif
+	mutex_lock(&exp_data.mutex);
+	if (!list_empty(&exp_data.list)) {
+		list_for_each_entry(exp_fhandler, &exp_data.list, link)
+			if (exp_fhandler->exp_fn->late_resume != NULL)
+				exp_fhandler->exp_fn->late_resume(rmi4_data);
+	}
+	mutex_unlock(&exp_data.mutex);
+
+	rmi4_data->suspend = false;
+
+	return retval;
+}
+#endif
+
+static int synaptics_rmi4_suspend(struct device *dev)
+{
+	struct synaptics_rmi4_exp_fhandler *exp_fhandler;
+	struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev);
+	unsigned char device_ctrl;
+
+	if (rmi4_data->stay_awake)
+		return 0;
+
+	if (rmi4_data->enable_wakeup_gesture) {
+		if (rmi4_data->no_sleep_setting) {
+			synaptics_rmi4_reg_read(rmi4_data,
+					rmi4_data->f01_ctrl_base_addr,
+					&device_ctrl,
+					sizeof(device_ctrl));
+			device_ctrl = device_ctrl & ~NO_SLEEP_ON;
+			synaptics_rmi4_reg_write(rmi4_data,
+					rmi4_data->f01_ctrl_base_addr,
+					&device_ctrl,
+					sizeof(device_ctrl));
+		}
+		synaptics_rmi4_wakeup_gesture(rmi4_data, true);
+		enable_irq_wake(rmi4_data->irq);
+		goto exit;
+	}
+
+	if (!rmi4_data->suspend) {
+#ifdef SYNA_TDDI
+		if (rmi4_data->no_sleep_setting) {
+			synaptics_rmi4_reg_read(rmi4_data,
+					rmi4_data->f01_ctrl_base_addr,
+					&device_ctrl,
+					sizeof(device_ctrl));
+			device_ctrl = device_ctrl & ~NO_SLEEP_ON;
+			synaptics_rmi4_reg_write(rmi4_data,
+					rmi4_data->f01_ctrl_base_addr,
+					&device_ctrl,
+					sizeof(device_ctrl));
+		}
+		synaptics_rmi4_wakeup_gesture(rmi4_data, true);
+		usleep(TDDI_LPWG_WAIT_US);
+#endif
+		synaptics_rmi4_irq_enable(rmi4_data, false, false);
+		synaptics_rmi4_sleep_enable(rmi4_data, true);
+		synaptics_rmi4_free_fingers(rmi4_data);
+	}
+
+	if (rmi4_data->ts_pinctrl)
+		pinctrl_select_state(rmi4_data->ts_pinctrl,
+					rmi4_data->pinctrl_state_suspend);
+
+	synaptics_rmi4_enable_reg(rmi4_data, false);
+
+exit:
+	mutex_lock(&exp_data.mutex);
+	if (!list_empty(&exp_data.list)) {
+		list_for_each_entry(exp_fhandler, &exp_data.list, link)
+			if (exp_fhandler->exp_fn->suspend != NULL)
+				exp_fhandler->exp_fn->suspend(rmi4_data);
+	}
+	mutex_unlock(&exp_data.mutex);
+
+	rmi4_data->suspend = true;
+
+	return 0;
+}
+
+static int synaptics_rmi4_resume(struct device *dev)
+{
+#ifdef FB_READY_RESET
+	int retval;
+#endif
+	struct synaptics_rmi4_exp_fhandler *exp_fhandler;
+	struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev);
+
+	if (rmi4_data->stay_awake)
+		return 0;
+
+	if (rmi4_data->enable_wakeup_gesture) {
+		disable_irq_wake(rmi4_data->irq);
+		synaptics_rmi4_wakeup_gesture(rmi4_data, false);
+		goto exit;
+	}
+
+	synaptics_rmi4_enable_reg(rmi4_data, true);
+
+	if (rmi4_data->ts_pinctrl)
+		pinctrl_select_state(rmi4_data->ts_pinctrl,
+			rmi4_data->pinctrl_state_active);
+
+	rmi4_data->current_page = MASK_8BIT;
+
+	synaptics_rmi4_sleep_enable(rmi4_data, false);
+	synaptics_rmi4_irq_enable(rmi4_data, true, false);
+
+exit:
+#ifdef FB_READY_RESET
+	retval = synaptics_rmi4_reset_device(rmi4_data, false);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to issue reset command\n",
+				__func__);
+	}
+#endif
+	mutex_lock(&exp_data.mutex);
+	if (!list_empty(&exp_data.list)) {
+		list_for_each_entry(exp_fhandler, &exp_data.list, link)
+			if (exp_fhandler->exp_fn->resume != NULL)
+				exp_fhandler->exp_fn->resume(rmi4_data);
+	}
+	mutex_unlock(&exp_data.mutex);
+
+	rmi4_data->suspend = false;
+
+	return 0;
+}
+
+#ifdef CONFIG_PM
+static const struct dev_pm_ops synaptics_rmi4_dev_pm_ops = {
+#ifndef CONFIG_FB
+	.suspend = synaptics_rmi4_suspend,
+	.resume = synaptics_rmi4_resume,
+#endif
+};
+#endif
+
+static struct platform_driver synaptics_rmi4_driver = {
+	.driver = {
+		.name = PLATFORM_DRIVER_NAME,
+		.owner = THIS_MODULE,
+#ifdef CONFIG_PM
+		.pm = &synaptics_rmi4_dev_pm_ops,
+#endif
+	},
+	.probe = synaptics_rmi4_probe,
+	.remove = synaptics_rmi4_remove,
+};
+
+static int __init synaptics_rmi4_init(void)
+{
+	int retval;
+
+	retval = synaptics_rmi4_bus_init();
+	if (retval)
+		return retval;
+
+	return platform_driver_register(&synaptics_rmi4_driver);
+}
+
+static void __exit synaptics_rmi4_exit(void)
+{
+	platform_driver_unregister(&synaptics_rmi4_driver);
+
+	synaptics_rmi4_bus_exit();
+}
+
+module_init(synaptics_rmi4_init);
+module_exit(synaptics_rmi4_exit);
+
+MODULE_AUTHOR("Synaptics, Inc.");
+MODULE_DESCRIPTION("Synaptics DSX Touch Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_core.h b/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_core.h
new file mode 100644
index 0000000..3e0c0db
--- /dev/null
+++ b/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_core.h
@@ -0,0 +1,535 @@
+/*
+ * Synaptics DSX touchscreen driver
+ *
+ * Copyright (C) 2012-2016 Synaptics Incorporated. All rights reserved.
+ *
+ * Copyright (C) 2012 Alexandra Chin <alexandra.chin@tw.synaptics.com>
+ * Copyright (C) 2012 Scott Lin <scott.lin@tw.synaptics.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * INFORMATION CONTAINED IN THIS DOCUMENT IS PROVIDED "AS-IS," AND SYNAPTICS
+ * EXPRESSLY DISCLAIMS ALL EXPRESS AND IMPLIED WARRANTIES, INCLUDING ANY
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE,
+ * AND ANY WARRANTIES OF NON-INFRINGEMENT OF ANY INTELLECTUAL PROPERTY RIGHTS.
+ * IN NO EVENT SHALL SYNAPTICS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, PUNITIVE, OR CONSEQUENTIAL DAMAGES ARISING OUT OF OR IN CONNECTION
+ * WITH THE USE OF THE INFORMATION CONTAINED IN THIS DOCUMENT, HOWEVER CAUSED
+ * AND BASED ON ANY THEORY OF LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * NEGLIGENCE OR OTHER TORTIOUS ACTION, AND EVEN IF SYNAPTICS WAS ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE. IF A TRIBUNAL OF COMPETENT JURISDICTION DOES
+ * NOT PERMIT THE DISCLAIMER OF DIRECT DAMAGES OR ANY OTHER DAMAGES, SYNAPTICS'
+ * TOTAL CUMULATIVE LIABILITY TO ANY PARTY SHALL NOT EXCEED ONE HUNDRED U.S.
+ * DOLLARS.
+ */
+
+#ifndef _SYNAPTICS_DSX_RMI4_H_
+#define _SYNAPTICS_DSX_RMI4_H_
+
+#define SYNAPTICS_DS4 (1 << 0)
+#define SYNAPTICS_DS5 (1 << 1)
+#define SYNAPTICS_DSX_DRIVER_PRODUCT (SYNAPTICS_DS4 | SYNAPTICS_DS5)
+#define SYNAPTICS_DSX_DRIVER_VERSION 0x2070
+
+#include <linux/version.h>
+#ifdef CONFIG_FB
+#include <linux/notifier.h>
+#include <linux/fb.h>
+#endif
+#ifdef CONFIG_HAS_EARLYSUSPEND
+#include <linux/earlysuspend.h>
+#endif
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 38))
+#define KERNEL_ABOVE_2_6_38
+#endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
+#define KERNEL_ABOVE_3_6
+#endif
+
+#ifdef KERNEL_ABOVE_2_6_38
+#define sstrtoul(...) kstrtoul(__VA_ARGS__)
+#else
+#define sstrtoul(...) strict_strtoul(__VA_ARGS__)
+#endif
+/*
+*#define F51_DISCRETE_FORCE
+*#ifdef F51_DISCRETE_FORCE
+*#define FORCE_LEVEL_ADDR 0x0419
+*#define FORCE_LEVEL_MAX 255
+*#define CAL_DATA_SIZE 144
+*#endif
+*#define SYNA_TDDI
+*/
+#define PDT_PROPS (0X00EF)
+#define PDT_START (0x00E9)
+#define PDT_END (0x00D0)
+#define PDT_ENTRY_SIZE (0x0006)
+#define PAGES_TO_SERVICE (10)
+#define PAGE_SELECT_LEN (2)
+#define ADDRESS_LEN (2)
+
+#define SYNAPTICS_RMI4_F01 (0x01)
+#define SYNAPTICS_RMI4_F11 (0x11)
+#define SYNAPTICS_RMI4_F12 (0x12)
+#define SYNAPTICS_RMI4_F1A (0x1A)
+#define SYNAPTICS_RMI4_F21 (0x21)
+#define SYNAPTICS_RMI4_F34 (0x34)
+#define SYNAPTICS_RMI4_F35 (0x35)
+#define SYNAPTICS_RMI4_F38 (0x38)
+#define SYNAPTICS_RMI4_F51 (0x51)
+#define SYNAPTICS_RMI4_F54 (0x54)
+#define SYNAPTICS_RMI4_F55 (0x55)
+#define SYNAPTICS_RMI4_FDB (0xDB)
+
+#define PRODUCT_INFO_SIZE 2
+#define PRODUCT_ID_SIZE 10
+#define BUILD_ID_SIZE 3
+
+#define F12_FINGERS_TO_SUPPORT 10
+#define F12_NO_OBJECT_STATUS 0x00
+#define F12_FINGER_STATUS 0x01
+#define F12_ACTIVE_STYLUS_STATUS 0x02
+#define F12_PALM_STATUS 0x03
+#define F12_HOVERING_FINGER_STATUS 0x05
+#define F12_GLOVED_FINGER_STATUS 0x06
+#define F12_NARROW_OBJECT_STATUS 0x07
+#define F12_HAND_EDGE_STATUS 0x08
+#define F12_COVER_STATUS 0x0A
+#define F12_STYLUS_STATUS 0x0B
+#define F12_ERASER_STATUS 0x0C
+#define F12_SMALL_OBJECT_STATUS 0x0D
+
+#define F12_GESTURE_DETECTION_LEN 5
+
+#define MAX_NUMBER_OF_BUTTONS 4
+#define MAX_INTR_REGISTERS 4
+
+#define MASK_16BIT 0xFFFF
+#define MASK_8BIT 0xFF
+#define MASK_7BIT 0x7F
+#define MASK_6BIT 0x3F
+#define MASK_5BIT 0x1F
+#define MASK_4BIT 0x0F
+#define MASK_3BIT 0x07
+#define MASK_2BIT 0x03
+#define MASK_1BIT 0x01
+
+#define PINCTRL_STATE_ACTIVE    "pmx_ts_active"
+#define PINCTRL_STATE_SUSPEND   "pmx_ts_suspend"
+#define PINCTRL_STATE_RELEASE   "pmx_ts_release"
+
+enum exp_fn {
+	RMI_DEV = 0,
+	RMI_FW_UPDATER,
+	RMI_TEST_REPORTING,
+	RMI_PROXIMITY,
+	RMI_ACTIVE_PEN,
+	RMI_GESTURE,
+	RMI_VIDEO,
+	RMI_DEBUG,
+	RMI_LAST,
+};
+
+/*
+ * struct synaptics_rmi4_fn_desc - function descriptor fields in PDT entry
+ * @query_base_addr: base address for query registers
+ * @cmd_base_addr: base address for command registers
+ * @ctrl_base_addr: base address for control registers
+ * @data_base_addr: base address for data registers
+ * @intr_src_count: number of interrupt sources
+ * @fn_version: version of function
+ * @fn_number: function number
+ */
+struct synaptics_rmi4_fn_desc {
+	union {
+		struct {
+			unsigned char query_base_addr;
+			unsigned char cmd_base_addr;
+			unsigned char ctrl_base_addr;
+			unsigned char data_base_addr;
+			unsigned char intr_src_count:3;
+			unsigned char reserved_1:2;
+			unsigned char fn_version:2;
+			unsigned char reserved_2:1;
+			unsigned char fn_number;
+		} __packed;
+		unsigned char data[6];
+	};
+};
+
+/*
+ * synaptics_rmi4_fn_full_addr - full 16-bit base addresses
+ * @query_base: 16-bit base address for query registers
+ * @cmd_base: 16-bit base address for command registers
+ * @ctrl_base: 16-bit base address for control registers
+ * @data_base: 16-bit base address for data registers
+ */
+struct synaptics_rmi4_fn_full_addr {
+	unsigned short query_base;
+	unsigned short cmd_base;
+	unsigned short ctrl_base;
+	unsigned short data_base;
+};
+
+/*
+ * struct synaptics_rmi4_f11_extra_data - extra data of F$11
+ * @data38_offset: offset to F11_2D_DATA38 register
+ */
+struct synaptics_rmi4_f11_extra_data {
+	unsigned char data38_offset;
+};
+
+/*
+ * struct synaptics_rmi4_f12_extra_data - extra data of F$12
+ * @data1_offset: offset to F12_2D_DATA01 register
+ * @data4_offset: offset to F12_2D_DATA04 register
+ * @data15_offset: offset to F12_2D_DATA15 register
+ * @data15_size: size of F12_2D_DATA15 register
+ * @data15_data: buffer for reading F12_2D_DATA15 register
+ * @data29_offset: offset to F12_2D_DATA29 register
+ * @data29_size: size of F12_2D_DATA29 register
+ * @data29_data: buffer for reading F12_2D_DATA29 register
+ * @ctrl20_offset: offset to F12_2D_CTRL20 register
+ */
+struct synaptics_rmi4_f12_extra_data {
+	unsigned char data1_offset;
+	unsigned char data4_offset;
+	unsigned char data15_offset;
+	unsigned char data15_size;
+	unsigned char data15_data[(F12_FINGERS_TO_SUPPORT + 7) / 8];
+	unsigned char data29_offset;
+	unsigned char data29_size;
+	unsigned char data29_data[F12_FINGERS_TO_SUPPORT * 2];
+	unsigned char ctrl20_offset;
+};
+
+/*
+ * struct synaptics_rmi4_fn - RMI function handler
+ * @fn_number: function number
+ * @num_of_data_sources: number of data sources
+ * @num_of_data_points: maximum number of fingers supported
+ * @intr_reg_num: index to associated interrupt register
+ * @intr_mask: interrupt mask
+ * @full_addr: full 16-bit base addresses of function registers
+ * @link: linked list for function handlers
+ * @data_size: size of private data
+ * @data: pointer to private data
+ * @extra: pointer to extra data
+ */
+struct synaptics_rmi4_fn {
+	unsigned char fn_number;
+	unsigned char num_of_data_sources;
+	unsigned char num_of_data_points;
+	unsigned char intr_reg_num;
+	unsigned char intr_mask;
+	struct synaptics_rmi4_fn_full_addr full_addr;
+	struct list_head link;
+	int data_size;
+	void *data;
+	void *extra;
+};
+
+/*
+ * struct synaptics_rmi4_input_settings - current input settings
+ * @num_of_fingers: maximum number of fingers for 2D touch
+ * @valid_button_count: number of valid 0D buttons
+ * @max_touch_width: maximum touch width
+ * @sensor_max_x: maximum x coordinate for 2D touch
+ * @sensor_max_y: maximum y coordinate for 2D touch
+ * @force_min: minimum force value
+ * @force_max: maximum force value
+ * @stylus_enable: flag to indicate reporting of stylus data
+ * @eraser_enable: flag to indicate reporting of eraser data
+ */
+struct synaptics_rmi4_input_settings {
+	unsigned char num_of_fingers;
+	unsigned char valid_button_count;
+	unsigned char max_touch_width;
+	int sensor_max_x;
+	int sensor_max_y;
+	int force_min;
+	int force_max;
+	bool stylus_enable;
+	bool eraser_enable;
+};
+
+/*
+ * struct synaptics_rmi4_device_info - device information
+ * @version_major: RMI protocol major version number
+ * @version_minor: RMI protocol minor version number
+ * @manufacturer_id: manufacturer ID
+ * @product_props: product properties
+ * @product_info: product information
+ * @product_id_string: product ID
+ * @build_id: firmware build ID
+ * @support_fn_list: linked list for function handlers
+ */
+struct synaptics_rmi4_device_info {
+	unsigned int version_major;
+	unsigned int version_minor;
+	unsigned char manufacturer_id;
+	unsigned char product_props;
+	unsigned char product_info[PRODUCT_INFO_SIZE];
+	unsigned char product_id_string[PRODUCT_ID_SIZE + 1];
+	unsigned char build_id[BUILD_ID_SIZE];
+	struct list_head support_fn_list;
+};
+
+/*
+ * struct synaptics_rmi4_data - RMI4 device instance data
+ * @pdev: pointer to platform device
+ * @input_dev: pointer to associated input device
+ * @stylus_dev: pointer to associated stylus device
+ * @hw_if: pointer to hardware interface data
+ * @rmi4_mod_info: device information
+ * @board_prop_dir: /sys/board_properties directory for virtual key map file
+ * @pwr_reg: pointer to regulator for power control
+ * @bus_reg: pointer to regulator for bus pullup control
+ * @rmi4_reset_mutex: mutex for software reset
+ * @rmi4_report_mutex: mutex for input event reporting
+ * @rmi4_io_ctrl_mutex: mutex for communication interface I/O
+ * @rmi4_exp_init_mutex: mutex for expansion function module initialization
+ * @rmi4_irq_enable_mutex: mutex for enabling/disabling interrupt
+ * @rb_work: work for rebuilding input device
+ * @rb_workqueue: workqueue for rebuilding input device
+ * @fb_notifier: framebuffer notifier client
+ * @reset_work: work for issuing reset after display framebuffer ready
+ * @reset_workqueue: workqueue for issuing reset after display framebuffer ready
+ * @early_suspend: early suspend power management
+ * @current_page: current RMI page for register access
+ * @button_0d_enabled: switch for enabling 0d button support
+ * @num_of_tx: number of Tx channels for 2D touch
+ * @num_of_rx: number of Rx channels for 2D touch
+ * @num_of_fingers: maximum number of fingers for 2D touch
+ * @max_touch_width: maximum touch width
+ * @valid_button_count: number of valid 0D buttons
+ * @report_enable: input data to report for F$12
+ * @no_sleep_setting: default setting of NoSleep in F01_RMI_CTRL00 register
+ * @gesture_detection: detected gesture type and properties
+ * @intr_mask: interrupt enable mask
+ * @button_txrx_mapping: Tx Rx mapping of 0D buttons
+ * @num_of_intr_regs: number of interrupt registers
+ * @f01_query_base_addr: query base address for f$01
+ * @f01_cmd_base_addr: command base address for f$01
+ * @f01_ctrl_base_addr: control base address for f$01
+ * @f01_data_base_addr: data base address for f$01
+ * @f51_query_base_addr: query base address for f$51
+ * @firmware_id: firmware build ID
+ * @irq: attention interrupt
+ * @sensor_max_x: maximum x coordinate for 2D touch
+ * @sensor_max_y: maximum y coordinate for 2D touch
+ * @force_min: minimum force value
+ * @force_max: maximum force value
+ * @set_wakeup_gesture: location of set wakeup gesture
+ * @flash_prog_mode: flag to indicate flash programming mode status
+ * @irq_enabled: flag to indicate attention interrupt enable status
+ * @fingers_on_2d: flag to indicate presence of fingers in 2D area
+ * @suspend: flag to indicate whether in suspend state
+ * @sensor_sleep: flag to indicate sleep state of sensor
+ * @stay_awake: flag to indicate whether to stay awake during suspend
+ * @fb_ready: flag to indicate whether display framebuffer in ready state
+ * @f11_wakeup_gesture: flag to indicate support for wakeup gestures in F$11
+ * @f12_wakeup_gesture: flag to indicate support for wakeup gestures in F$12
+ * @enable_wakeup_gesture: flag to indicate usage of wakeup gestures
+ * @wedge_sensor: flag to indicate use of wedge sensor
+ * @report_pressure: flag to indicate reporting of pressure data
+ * @stylus_enable: flag to indicate reporting of stylus data
+ * @eraser_enable: flag to indicate reporting of eraser data
+ * @external_afe_buttons: flag to indicate presence of external AFE buttons
+ * @reset_device: pointer to device reset function
+ * @irq_enable: pointer to interrupt enable function
+ * @sleep_enable: pointer to sleep enable function
+ * @report_touch: pointer to touch reporting function
+ */
+struct synaptics_rmi4_data {
+	struct platform_device *pdev;
+	struct input_dev *input_dev;
+	struct input_dev *stylus_dev;
+	const struct synaptics_dsx_hw_interface *hw_if;
+	struct synaptics_rmi4_device_info rmi4_mod_info;
+	struct synaptics_rmi4_input_settings input_settings;
+	struct kobject *board_prop_dir;
+	struct regulator *pwr_reg;
+	struct regulator *bus_reg;
+	struct mutex rmi4_reset_mutex;
+	struct mutex rmi4_report_mutex;
+	struct mutex rmi4_io_ctrl_mutex;
+	struct mutex rmi4_exp_init_mutex;
+	struct mutex rmi4_irq_enable_mutex;
+	struct delayed_work rb_work;
+	struct workqueue_struct *rb_workqueue;
+	struct pinctrl *ts_pinctrl;
+	struct pinctrl_state *pinctrl_state_active;
+	struct pinctrl_state *pinctrl_state_suspend;
+	struct pinctrl_state *pinctrl_state_release;
+#ifdef CONFIG_FB
+	struct notifier_block fb_notifier;
+	struct work_struct reset_work;
+	struct workqueue_struct *reset_workqueue;
+#endif
+#ifdef CONFIG_HAS_EARLYSUSPEND
+	struct early_suspend early_suspend;
+#endif
+	unsigned char current_page;
+	unsigned char button_0d_enabled;
+	unsigned char num_of_tx;
+	unsigned char num_of_rx;
+	unsigned char num_of_fingers;
+	unsigned char max_touch_width;
+	unsigned char valid_button_count;
+	unsigned char report_enable;
+	unsigned char no_sleep_setting;
+	unsigned char gesture_detection[F12_GESTURE_DETECTION_LEN];
+	unsigned char intr_mask[MAX_INTR_REGISTERS];
+	unsigned char *button_txrx_mapping;
+	unsigned short num_of_intr_regs;
+	unsigned short f01_query_base_addr;
+	unsigned short f01_cmd_base_addr;
+	unsigned short f01_ctrl_base_addr;
+	unsigned short f01_data_base_addr;
+#ifdef F51_DISCRETE_FORCE
+	unsigned short f51_query_base_addr;
+#endif
+	unsigned int firmware_id;
+	int irq;
+	int sensor_max_x;
+	int sensor_max_y;
+	int force_min;
+	int force_max;
+	int set_wakeup_gesture;
+	int avdd_status;
+	int vdd_status;
+	bool flash_prog_mode;
+	bool irq_enabled;
+	bool fingers_on_2d;
+	bool suspend;
+	bool sensor_sleep;
+	bool stay_awake;
+	bool fb_ready;
+	bool f11_wakeup_gesture;
+	bool f12_wakeup_gesture;
+	bool enable_wakeup_gesture;
+	bool wedge_sensor;
+	bool report_pressure;
+	bool stylus_enable;
+	bool eraser_enable;
+	bool external_afe_buttons;
+	int (*reset_device)(struct synaptics_rmi4_data *rmi4_data,
+			bool rebuild);
+	int (*irq_enable)(struct synaptics_rmi4_data *rmi4_data, bool enable,
+			bool attn_only);
+	int (*sleep_enable)(struct synaptics_rmi4_data *rmi4_data,
+			bool enable);
+	void (*report_touch)(struct synaptics_rmi4_data *rmi4_data,
+			struct synaptics_rmi4_fn *fhandler);
+};
+
+struct synaptics_dsx_bus_access {
+	unsigned char type;
+	int (*read)(struct synaptics_rmi4_data *rmi4_data, unsigned short addr,
+		unsigned char *data, unsigned int length);
+	int (*write)(struct synaptics_rmi4_data *rmi4_data, unsigned short addr,
+		unsigned char *data, unsigned int length);
+};
+
+struct synaptics_dsx_hw_interface {
+	struct synaptics_dsx_board_data *board_data;
+	const struct synaptics_dsx_bus_access *bus_access;
+	int (*bl_hw_init)(struct synaptics_rmi4_data *rmi4_data);
+	int (*ui_hw_init)(struct synaptics_rmi4_data *rmi4_data);
+};
+
+struct synaptics_rmi4_exp_fn {
+	enum exp_fn fn_type;
+	int (*init)(struct synaptics_rmi4_data *rmi4_data);
+	void (*remove)(struct synaptics_rmi4_data *rmi4_data);
+	void (*reset)(struct synaptics_rmi4_data *rmi4_data);
+	void (*reinit)(struct synaptics_rmi4_data *rmi4_data);
+	void (*early_suspend)(struct synaptics_rmi4_data *rmi4_data);
+	void (*suspend)(struct synaptics_rmi4_data *rmi4_data);
+	void (*resume)(struct synaptics_rmi4_data *rmi4_data);
+	void (*late_resume)(struct synaptics_rmi4_data *rmi4_data);
+	void (*attn)(struct synaptics_rmi4_data *rmi4_data,
+			unsigned char intr_mask);
+};
+
+int synaptics_rmi4_bus_init(void);
+
+void synaptics_rmi4_bus_exit(void);
+
+void synaptics_rmi4_new_function(struct synaptics_rmi4_exp_fn *exp_fn_module,
+		bool insert);
+
+int synaptics_fw_updater(const unsigned char *fw_data);
+
+static inline int synaptics_rmi4_reg_read(
+		struct synaptics_rmi4_data *rmi4_data,
+		unsigned short addr,
+		unsigned char *data,
+		unsigned int len)
+{
+	return rmi4_data->hw_if->bus_access->read(rmi4_data, addr, data, len);
+}
+
+static inline int synaptics_rmi4_reg_write(
+		struct synaptics_rmi4_data *rmi4_data,
+		unsigned short addr,
+		unsigned char *data,
+		unsigned int len)
+{
+	return rmi4_data->hw_if->bus_access->write(rmi4_data, addr, data, len);
+}
+
+static inline ssize_t synaptics_rmi4_show_error(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	dev_warn(dev, "%s Attempted to read from write-only attribute %s\n",
+			__func__, attr->attr.name);
+	return -EPERM;
+}
+
+static inline ssize_t synaptics_rmi4_store_error(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	dev_warn(dev, "%s Attempted to write to read-only attribute %s\n",
+			__func__, attr->attr.name);
+	return -EPERM;
+}
+
+static inline int secure_memcpy(unsigned char *dest, unsigned int dest_size,
+		const unsigned char *src, unsigned int src_size,
+		unsigned int count)
+{
+	if (dest == NULL || src == NULL)
+		return -EINVAL;
+
+	if (count > dest_size || count > src_size)
+		return -EINVAL;
+
+	memcpy((void *)dest, (const void *)src, count);
+
+	return 0;
+}
+
+static inline void batohs(unsigned short *dest, unsigned char *src)
+{
+	*dest = src[1] * 0x100 + src[0];
+}
+
+static inline void hstoba(unsigned char *dest, unsigned short src)
+{
+	dest[0] = src % 0x100;
+	dest[1] = src / 0x100;
+}
+
+#endif
diff --git a/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_fw_update.c b/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_fw_update.c
new file mode 100644
index 0000000..7f62e01
--- /dev/null
+++ b/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_fw_update.c
@@ -0,0 +1,5809 @@
+/*
+ * Synaptics DSX touchscreen driver
+ *
+ * Copyright (C) 2012-2016 Synaptics Incorporated. All rights reserved.
+ *
+ * Copyright (C) 2012 Alexandra Chin <alexandra.chin@tw.synaptics.com>
+ * Copyright (C) 2012 Scott Lin <scott.lin@tw.synaptics.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * INFORMATION CONTAINED IN THIS DOCUMENT IS PROVIDED "AS-IS," AND SYNAPTICS
+ * EXPRESSLY DISCLAIMS ALL EXPRESS AND IMPLIED WARRANTIES, INCLUDING ANY
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE,
+ * AND ANY WARRANTIES OF NON-INFRINGEMENT OF ANY INTELLECTUAL PROPERTY RIGHTS.
+ * IN NO EVENT SHALL SYNAPTICS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, PUNITIVE, OR CONSEQUENTIAL DAMAGES ARISING OUT OF OR IN CONNECTION
+ * WITH THE USE OF THE INFORMATION CONTAINED IN THIS DOCUMENT, HOWEVER CAUSED
+ * AND BASED ON ANY THEORY OF LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * NEGLIGENCE OR OTHER TORTIOUS ACTION, AND EVEN IF SYNAPTICS WAS ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE. IF A TRIBUNAL OF COMPETENT JURISDICTION DOES
+ * NOT PERMIT THE DISCLAIMER OF DIRECT DAMAGES OR ANY OTHER DAMAGES, SYNAPTICS'
+ * TOTAL CUMULATIVE LIABILITY TO ANY PARTY SHALL NOT EXCEED ONE HUNDRED U.S.
+ * DOLLARS.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/input.h>
+#include <linux/firmware.h>
+#include <linux/platform_device.h>
+#include <linux/input/synaptics_dsx.h>
+#include "synaptics_dsx_core.h"
+
+#define FW_IHEX_NAME "synaptics/startup_fw_update.bin"
+#define FW_IMAGE_NAME "synaptics/startup_fw_update.img"
+/*
+*#define DO_STARTUP_FW_UPDATE
+*/
+/*
+*#ifdef DO_STARTUP_FW_UPDATE
+*#ifdef CONFIG_FB
+*#define WAIT_FOR_FB_READY
+*#define FB_READY_WAIT_MS 100
+*#define FB_READY_TIMEOUT_S 30
+*#endif
+*#endif
+*/
+/*
+*#define MAX_WRITE_SIZE 4096
+*/
+
+#define ENABLE_SYS_REFLASH false
+#define FORCE_UPDATE false
+#define DO_LOCKDOWN false
+
+#define MAX_IMAGE_NAME_LEN 256
+#define MAX_FIRMWARE_ID_LEN 10
+
+#define IMAGE_HEADER_VERSION_05 0x05
+#define IMAGE_HEADER_VERSION_06 0x06
+#define IMAGE_HEADER_VERSION_10 0x10
+
+#define IMAGE_AREA_OFFSET 0x100
+#define LOCKDOWN_SIZE 0x50
+
+#define MAX_UTILITY_PARAMS 20
+
+#define V5V6_BOOTLOADER_ID_OFFSET 0
+#define V5V6_CONFIG_ID_SIZE 4
+
+#define V5_PROPERTIES_OFFSET 2
+#define V5_BLOCK_SIZE_OFFSET 3
+#define V5_BLOCK_COUNT_OFFSET 5
+#define V5_BLOCK_NUMBER_OFFSET 0
+#define V5_BLOCK_DATA_OFFSET 2
+
+#define V6_PROPERTIES_OFFSET 1
+#define V6_BLOCK_SIZE_OFFSET 2
+#define V6_BLOCK_COUNT_OFFSET 3
+#define V6_PROPERTIES_2_OFFSET 4
+#define V6_GUEST_CODE_BLOCK_COUNT_OFFSET 5
+#define V6_BLOCK_NUMBER_OFFSET 0
+#define V6_BLOCK_DATA_OFFSET 1
+#define V6_FLASH_COMMAND_OFFSET 2
+#define V6_FLASH_STATUS_OFFSET 3
+
+#define V7_CONFIG_ID_SIZE 32
+
+#define V7_FLASH_STATUS_OFFSET 0
+#define V7_PARTITION_ID_OFFSET 1
+#define V7_BLOCK_NUMBER_OFFSET 2
+#define V7_TRANSFER_LENGTH_OFFSET 3
+#define V7_COMMAND_OFFSET 4
+#define V7_PAYLOAD_OFFSET 5
+
+#define V7_PARTITION_SUPPORT_BYTES 4
+
+#define F35_ERROR_CODE_OFFSET 0
+#define F35_FLASH_STATUS_OFFSET 5
+#define F35_CHUNK_NUM_LSB_OFFSET 0
+#define F35_CHUNK_NUM_MSB_OFFSET 1
+#define F35_CHUNK_DATA_OFFSET 2
+#define F35_CHUNK_COMMAND_OFFSET 18
+
+#define F35_CHUNK_SIZE 16
+#define F35_ERASE_ALL_WAIT_MS 5000
+#define F35_RESET_WAIT_MS 250
+
+#define SLEEP_MODE_NORMAL (0x00)
+#define SLEEP_MODE_SENSOR_SLEEP (0x01)
+#define SLEEP_MODE_RESERVED0 (0x02)
+#define SLEEP_MODE_RESERVED1 (0x03)
+
+#define ENABLE_WAIT_MS (1 * 1000)
+#define WRITE_WAIT_MS (3 * 1000)
+#define ERASE_WAIT_MS (5 * 1000)
+
+#define MIN_SLEEP_TIME_US 50
+#define MAX_SLEEP_TIME_US 100
+
+#define INT_DISABLE_WAIT_MS 20
+#define ENTER_FLASH_PROG_WAIT_MS 20
+#define READ_CONFIG_WAIT_MS 20
+
+static int fwu_do_reflash(void);
+
+static int fwu_recovery_check_status(void);
+
+static ssize_t fwu_sysfs_show_image(struct file *data_file,
+		struct kobject *kobj, struct bin_attribute *attributes,
+		char *buf, loff_t pos, size_t count);
+
+static ssize_t fwu_sysfs_store_image(struct file *data_file,
+		struct kobject *kobj, struct bin_attribute *attributes,
+		char *buf, loff_t pos, size_t count);
+
+static ssize_t fwu_sysfs_do_recovery_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t fwu_sysfs_do_reflash_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t fwu_sysfs_write_config_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t fwu_sysfs_read_config_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t fwu_sysfs_config_area_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t fwu_sysfs_image_name_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t fwu_sysfs_image_size_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t fwu_sysfs_block_size_show(struct device *dev,
+		struct device_attribute *attr, char *buf);
+
+static ssize_t fwu_sysfs_firmware_block_count_show(struct device *dev,
+		struct device_attribute *attr, char *buf);
+
+static ssize_t fwu_sysfs_configuration_block_count_show(struct device *dev,
+		struct device_attribute *attr, char *buf);
+
+static ssize_t fwu_sysfs_disp_config_block_count_show(struct device *dev,
+		struct device_attribute *attr, char *buf);
+
+static ssize_t fwu_sysfs_perm_config_block_count_show(struct device *dev,
+		struct device_attribute *attr, char *buf);
+
+static ssize_t fwu_sysfs_bl_config_block_count_show(struct device *dev,
+		struct device_attribute *attr, char *buf);
+
+static ssize_t fwu_sysfs_utility_parameter_block_count_show(struct device *dev,
+		struct device_attribute *attr, char *buf);
+
+static ssize_t fwu_sysfs_guest_code_block_count_show(struct device *dev,
+		struct device_attribute *attr, char *buf);
+
+static ssize_t fwu_sysfs_write_guest_code_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count);
+
+#ifdef SYNA_TDDI
+static ssize_t fwu_sysfs_write_lockdown_code_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t fwu_sysfs_read_lockdown_code_show(struct device *dev,
+		struct device_attribute *attr, char *buf);
+#endif
+
+enum f34_version {
+	F34_V0 = 0,
+	F34_V1,
+	F34_V2,
+};
+
+enum bl_version {
+	BL_V5 = 5,
+	BL_V6 = 6,
+	BL_V7 = 7,
+	BL_V8 = 8,
+};
+
+enum flash_area {
+	NONE = 0,
+	UI_FIRMWARE,
+	UI_CONFIG,
+};
+
+enum update_mode {
+	NORMAL = 1,
+	FORCE = 2,
+	LOCKDOWN = 8,
+};
+
+enum config_area {
+	UI_CONFIG_AREA = 0,
+	PM_CONFIG_AREA,
+	BL_CONFIG_AREA,
+	DP_CONFIG_AREA,
+	FLASH_CONFIG_AREA,
+#ifdef SYNA_TDDI
+	TDDI_FORCE_CONFIG_AREA,
+	TDDI_LCM_DATA_AREA,
+	TDDI_OEM_DATA_AREA,
+#endif
+	UPP_AREA,
+};
+
+enum v7_status {
+	SUCCESS = 0x00,
+	DEVICE_NOT_IN_BOOTLOADER_MODE,
+	INVALID_PARTITION,
+	INVALID_COMMAND,
+	INVALID_BLOCK_OFFSET,
+	INVALID_TRANSFER,
+	NOT_ERASED,
+	FLASH_PROGRAMMING_KEY_INCORRECT,
+	BAD_PARTITION_TABLE,
+	CHECKSUM_FAILED,
+	FLASH_HARDWARE_FAILURE = 0x1f,
+};
+
+enum v7_partition_id {
+	BOOTLOADER_PARTITION = 0x01,
+	DEVICE_CONFIG_PARTITION,
+	FLASH_CONFIG_PARTITION,
+	MANUFACTURING_BLOCK_PARTITION,
+	GUEST_SERIALIZATION_PARTITION,
+	GLOBAL_PARAMETERS_PARTITION,
+	CORE_CODE_PARTITION,
+	CORE_CONFIG_PARTITION,
+	GUEST_CODE_PARTITION,
+	DISPLAY_CONFIG_PARTITION,
+	EXTERNAL_TOUCH_AFE_CONFIG_PARTITION,
+	UTILITY_PARAMETER_PARTITION,
+};
+
+enum v7_flash_command {
+	CMD_V7_IDLE = 0x00,
+	CMD_V7_ENTER_BL,
+	CMD_V7_READ,
+	CMD_V7_WRITE,
+	CMD_V7_ERASE,
+	CMD_V7_ERASE_AP,
+	CMD_V7_SENSOR_ID,
+};
+
+enum v5v6_flash_command {
+	CMD_V5V6_IDLE = 0x0,
+	CMD_V5V6_WRITE_FW = 0x2,
+	CMD_V5V6_ERASE_ALL = 0x3,
+	CMD_V5V6_WRITE_LOCKDOWN = 0x4,
+	CMD_V5V6_READ_CONFIG = 0x5,
+	CMD_V5V6_WRITE_CONFIG = 0x6,
+	CMD_V5V6_ERASE_UI_CONFIG = 0x7,
+	CMD_V5V6_ERASE_BL_CONFIG = 0x9,
+	CMD_V5V6_ERASE_DISP_CONFIG = 0xa,
+	CMD_V5V6_ERASE_GUEST_CODE = 0xb,
+	CMD_V5V6_WRITE_GUEST_CODE = 0xc,
+	CMD_V5V6_ERASE_CHIP = 0x0d,
+	CMD_V5V6_ENABLE_FLASH_PROG = 0xf,
+#ifdef SYNA_TDDI
+	CMD_V5V6_ERASE_FORCE_CONFIG = 0x11,
+	CMD_V5V6_READ_FORCE_CONFIG = 0x12,
+	CMD_V5V6_WRITE_FORCE_CONFIG = 0x13,
+	CMD_V5V6_ERASE_LOCKDOWN_DATA = 0x1a,
+	CMD_V5V6_READ_LOCKDOWN_DATA = 0x1b,
+	CMD_V5V6_WRITE_LOCKDOWN_DATA = 0x1c,
+	CMD_V5V6_ERASE_LCM_DATA = 0x1d,
+	CMD_V5V6_ERASE_OEM_DATA = 0x1e,
+#endif
+};
+
+enum flash_command {
+	CMD_IDLE = 0,
+	CMD_WRITE_FW,
+	CMD_WRITE_CONFIG,
+	CMD_WRITE_LOCKDOWN,
+	CMD_WRITE_GUEST_CODE,
+	CMD_WRITE_BOOTLOADER,
+	CMD_WRITE_UTILITY_PARAM,
+	CMD_READ_CONFIG,
+	CMD_ERASE_ALL,
+	CMD_ERASE_UI_FIRMWARE,
+	CMD_ERASE_UI_CONFIG,
+	CMD_ERASE_BL_CONFIG,
+	CMD_ERASE_DISP_CONFIG,
+	CMD_ERASE_FLASH_CONFIG,
+	CMD_ERASE_GUEST_CODE,
+	CMD_ERASE_BOOTLOADER,
+	CMD_ERASE_UTILITY_PARAMETER,
+	CMD_ENABLE_FLASH_PROG,
+#ifdef SYNA_TDDI
+	CMD_ERASE_CHIP,
+	CMD_ERASE_FORCE_CONFIG,
+	CMD_READ_FORCE_CONFIG,
+	CMD_WRITE_FORCE_CONFIG,
+	CMD_ERASE_LOCKDOWN_DATA,
+	CMD_READ_LOCKDOWN_DATA,
+	CMD_WRITE_LOCKDOWN_DATA,
+	CMD_ERASE_LCM_DATA,
+	CMD_READ_LCM_DATA,
+	CMD_WRITE_LCM_DATA,
+	CMD_ERASE_OEM_DATA,
+	CMD_READ_OEM_DATA,
+	CMD_WRITE_OEM_DATA,
+#endif
+};
+
+enum f35_flash_command {
+	CMD_F35_IDLE = 0x0,
+	CMD_F35_RESERVED = 0x1,
+	CMD_F35_WRITE_CHUNK = 0x2,
+	CMD_F35_ERASE_ALL = 0x3,
+	CMD_F35_RESET = 0x10,
+};
+
+enum container_id {
+	TOP_LEVEL_CONTAINER = 0,
+	UI_CONTAINER,
+	UI_CONFIG_CONTAINER,
+	BL_CONTAINER,
+	BL_IMAGE_CONTAINER,
+	BL_CONFIG_CONTAINER,
+	BL_LOCKDOWN_INFO_CONTAINER,
+	PERMANENT_CONFIG_CONTAINER,
+	GUEST_CODE_CONTAINER,
+	BL_PROTOCOL_DESCRIPTOR_CONTAINER,
+	UI_PROTOCOL_DESCRIPTOR_CONTAINER,
+	RMI_SELF_DISCOVERY_CONTAINER,
+	RMI_PAGE_CONTENT_CONTAINER,
+	GENERAL_INFORMATION_CONTAINER,
+	DEVICE_CONFIG_CONTAINER,
+	FLASH_CONFIG_CONTAINER,
+	GUEST_SERIALIZATION_CONTAINER,
+	GLOBAL_PARAMETERS_CONTAINER,
+	CORE_CODE_CONTAINER,
+	CORE_CONFIG_CONTAINER,
+	DISPLAY_CONFIG_CONTAINER,
+	EXTERNAL_TOUCH_AFE_CONFIG_CONTAINER,
+	UTILITY_CONTAINER,
+	UTILITY_PARAMETER_CONTAINER,
+};
+
+enum utility_parameter_id {
+	UNUSED = 0,
+	FORCE_PARAMETER,
+	ANTI_BENDING_PARAMETER,
+};
+
+struct pdt_properties {
+	union {
+		struct {
+			unsigned char reserved_1:6;
+			unsigned char has_bsr:1;
+			unsigned char reserved_2:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct partition_table {
+	unsigned char partition_id:5;
+	unsigned char byte_0_reserved:3;
+	unsigned char byte_1_reserved;
+	unsigned char partition_length_7_0;
+	unsigned char partition_length_15_8;
+	unsigned char start_physical_address_7_0;
+	unsigned char start_physical_address_15_8;
+	unsigned char partition_properties_7_0;
+	unsigned char partition_properties_15_8;
+} __packed;
+
+struct f01_device_control {
+	union {
+		struct {
+			unsigned char sleep_mode:2;
+			unsigned char nosleep:1;
+			unsigned char reserved:2;
+			unsigned char charger_connected:1;
+			unsigned char report_rate:1;
+			unsigned char configured:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f34_v7_query_0 {
+	union {
+		struct {
+			unsigned char subpacket_1_size:3;
+			unsigned char has_config_id:1;
+			unsigned char f34_query0_b4:1;
+			unsigned char has_thqa:1;
+			unsigned char f34_query0_b6__7:2;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f34_v7_query_1_7 {
+	union {
+		struct {
+			/* query 1 */
+			unsigned char bl_minor_revision;
+			unsigned char bl_major_revision;
+
+			/* query 2 */
+			unsigned char bl_fw_id_7_0;
+			unsigned char bl_fw_id_15_8;
+			unsigned char bl_fw_id_23_16;
+			unsigned char bl_fw_id_31_24;
+
+			/* query 3 */
+			unsigned char minimum_write_size;
+			unsigned char block_size_7_0;
+			unsigned char block_size_15_8;
+			unsigned char flash_page_size_7_0;
+			unsigned char flash_page_size_15_8;
+
+			/* query 4 */
+			unsigned char adjustable_partition_area_size_7_0;
+			unsigned char adjustable_partition_area_size_15_8;
+
+			/* query 5 */
+			unsigned char flash_config_length_7_0;
+			unsigned char flash_config_length_15_8;
+
+			/* query 6 */
+			unsigned char payload_length_7_0;
+			unsigned char payload_length_15_8;
+
+			/* query 7 */
+			unsigned char f34_query7_b0:1;
+			unsigned char has_bootloader:1;
+			unsigned char has_device_config:1;
+			unsigned char has_flash_config:1;
+			unsigned char has_manufacturing_block:1;
+			unsigned char has_guest_serialization:1;
+			unsigned char has_global_parameters:1;
+			unsigned char has_core_code:1;
+			unsigned char has_core_config:1;
+			unsigned char has_guest_code:1;
+			unsigned char has_display_config:1;
+			unsigned char f34_query7_b11__15:5;
+			unsigned char f34_query7_b16__23;
+			unsigned char f34_query7_b24__31;
+		} __packed;
+		unsigned char data[21];
+	};
+};
+
+struct f34_v7_data0 {
+	union {
+		struct {
+			unsigned char operation_status:5;
+			unsigned char device_cfg_status:2;
+			unsigned char bl_mode:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f34_v7_data_1_5 {
+	union {
+		struct {
+			unsigned char partition_id:5;
+			unsigned char f34_data1_b5__7:3;
+			unsigned char block_offset_7_0;
+			unsigned char block_offset_15_8;
+			unsigned char transfer_length_7_0;
+			unsigned char transfer_length_15_8;
+			unsigned char command;
+			unsigned char payload_0;
+			unsigned char payload_1;
+		} __packed;
+		unsigned char data[8];
+	};
+};
+
+struct f34_v5v6_flash_properties {
+	union {
+		struct {
+			unsigned char reg_map:1;
+			unsigned char unlocked:1;
+			unsigned char has_config_id:1;
+			unsigned char has_pm_config:1;
+			unsigned char has_bl_config:1;
+			unsigned char has_disp_config:1;
+			unsigned char has_ctrl1:1;
+			unsigned char has_query4:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f34_v5v6_flash_properties_2 {
+	union {
+		struct {
+			unsigned char has_guest_code:1;
+			unsigned char f34_query4_b1:1;
+			unsigned char has_gesture_config:1;
+			unsigned char has_force_config:1;
+			unsigned char has_lockdown_data:1;
+			unsigned char has_lcm_data:1;
+			unsigned char has_oem_data:1;
+			unsigned char f34_query4_b7:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct register_offset {
+	unsigned char properties;
+	unsigned char properties_2;
+	unsigned char block_size;
+	unsigned char block_count;
+	unsigned char gc_block_count;
+	unsigned char flash_status;
+	unsigned char partition_id;
+	unsigned char block_number;
+	unsigned char transfer_length;
+	unsigned char flash_cmd;
+	unsigned char payload;
+};
+
+struct block_count {
+	unsigned short ui_firmware;
+	unsigned short ui_config;
+	unsigned short dp_config;
+	unsigned short pm_config;
+	unsigned short fl_config;
+	unsigned short bl_image;
+	unsigned short bl_config;
+	unsigned short utility_param;
+	unsigned short lockdown;
+	unsigned short guest_code;
+#ifdef SYNA_TDDI
+	unsigned short tddi_force_config;
+	unsigned short tddi_lockdown_data;
+	unsigned short tddi_lcm_data;
+	unsigned short tddi_oem_data;
+#endif
+	unsigned short total_count;
+};
+
+struct physical_address {
+	unsigned short ui_firmware;
+	unsigned short ui_config;
+	unsigned short dp_config;
+	unsigned short pm_config;
+	unsigned short fl_config;
+	unsigned short bl_image;
+	unsigned short bl_config;
+	unsigned short utility_param;
+	unsigned short lockdown;
+	unsigned short guest_code;
+};
+
+struct container_descriptor {
+	unsigned char content_checksum[4];
+	unsigned char container_id[2];
+	unsigned char minor_version;
+	unsigned char major_version;
+	unsigned char reserved_08;
+	unsigned char reserved_09;
+	unsigned char reserved_0a;
+	unsigned char reserved_0b;
+	unsigned char container_option_flags[4];
+	unsigned char content_options_length[4];
+	unsigned char content_options_address[4];
+	unsigned char content_length[4];
+	unsigned char content_address[4];
+};
+
+struct image_header_10 {
+	unsigned char checksum[4];
+	unsigned char reserved_04;
+	unsigned char reserved_05;
+	unsigned char minor_header_version;
+	unsigned char major_header_version;
+	unsigned char reserved_08;
+	unsigned char reserved_09;
+	unsigned char reserved_0a;
+	unsigned char reserved_0b;
+	unsigned char top_level_container_start_addr[4];
+};
+
+struct image_header_05_06 {
+	/* 0x00 - 0x0f */
+	unsigned char checksum[4];
+	unsigned char reserved_04;
+	unsigned char reserved_05;
+	unsigned char options_firmware_id:1;
+	unsigned char options_bootloader:1;
+	unsigned char options_guest_code:1;
+	unsigned char options_tddi:1;
+	unsigned char options_reserved:4;
+	unsigned char header_version;
+	unsigned char firmware_size[4];
+	unsigned char config_size[4];
+	/* 0x10 - 0x1f */
+	unsigned char product_id[PRODUCT_ID_SIZE];
+	unsigned char package_id[2];
+	unsigned char package_id_revision[2];
+	unsigned char product_info[PRODUCT_INFO_SIZE];
+	/* 0x20 - 0x2f */
+	unsigned char bootloader_addr[4];
+	unsigned char bootloader_size[4];
+	unsigned char ui_addr[4];
+	unsigned char ui_size[4];
+	/* 0x30 - 0x3f */
+	unsigned char ds_id[16];
+	/* 0x40 - 0x4f */
+	union {
+		struct {
+			unsigned char cstmr_product_id[PRODUCT_ID_SIZE];
+			unsigned char reserved_4a_4f[6];
+		};
+		struct {
+			unsigned char dsp_cfg_addr[4];
+			unsigned char dsp_cfg_size[4];
+			unsigned char reserved_48_4f[8];
+		};
+	};
+	/* 0x50 - 0x53 */
+	unsigned char firmware_id[4];
+};
+
+struct block_data {
+	unsigned int size;
+	const unsigned char *data;
+};
+
+struct image_metadata {
+	bool contains_firmware_id;
+	bool contains_bootloader;
+	bool contains_guest_code;
+	bool contains_disp_config;
+	bool contains_perm_config;
+	bool contains_flash_config;
+	bool contains_utility_param;
+	unsigned int firmware_id;
+	unsigned int checksum;
+	unsigned int bootloader_size;
+	unsigned int disp_config_offset;
+	unsigned char bl_version;
+	unsigned char product_id[PRODUCT_ID_SIZE + 1];
+	unsigned char cstmr_product_id[PRODUCT_ID_SIZE + 1];
+	unsigned char utility_param_id[MAX_UTILITY_PARAMS];
+	struct block_data bootloader;
+	struct block_data utility;
+	struct block_data ui_firmware;
+	struct block_data ui_config;
+	struct block_data dp_config;
+	struct block_data pm_config;
+	struct block_data fl_config;
+	struct block_data bl_image;
+	struct block_data bl_config;
+	struct block_data utility_param[MAX_UTILITY_PARAMS];
+	struct block_data lockdown;
+	struct block_data guest_code;
+	struct block_count blkcount;
+	struct physical_address phyaddr;
+};
+
+struct synaptics_rmi4_fwu_handle {
+	enum bl_version bl_version;
+	bool initialized;
+	bool in_bl_mode;
+	bool in_ub_mode;
+	bool bl_mode_device;
+	bool force_update;
+	bool do_lockdown;
+	bool has_guest_code;
+#ifdef SYNA_TDDI
+	bool has_force_config;
+	bool has_lockdown_data;
+	bool has_lcm_data;
+	bool has_oem_data;
+#endif
+	bool has_utility_param;
+	bool new_partition_table;
+	bool incompatible_partition_tables;
+	bool write_bootloader;
+	unsigned int data_pos;
+	unsigned char *ext_data_source;
+	unsigned char *read_config_buf;
+	unsigned char intr_mask;
+	unsigned char command;
+	unsigned char bootloader_id[2];
+	unsigned char config_id[32];
+	unsigned char flash_status;
+	unsigned char partitions;
+#ifdef F51_DISCRETE_FORCE
+	unsigned char *cal_data;
+	unsigned short cal_data_off;
+	unsigned short cal_data_size;
+	unsigned short cal_data_buf_size;
+	unsigned short cal_packet_data_size;
+#endif
+	unsigned short block_size;
+	unsigned short config_size;
+	unsigned short config_area;
+	unsigned short config_block_count;
+	unsigned short flash_config_length;
+	unsigned short payload_length;
+	unsigned short partition_table_bytes;
+	unsigned short read_config_buf_size;
+	const unsigned char *config_data;
+	const unsigned char *image;
+	unsigned char *image_name;
+	unsigned int image_size;
+	struct image_metadata img;
+	struct register_offset off;
+	struct block_count blkcount;
+	struct physical_address phyaddr;
+	struct f34_v5v6_flash_properties flash_properties;
+	struct synaptics_rmi4_fn_desc f34_fd;
+	struct synaptics_rmi4_fn_desc f35_fd;
+	struct synaptics_rmi4_data *rmi4_data;
+	struct workqueue_struct *fwu_workqueue;
+	struct work_struct fwu_work;
+};
+
+static struct bin_attribute dev_attr_data = {
+	.attr = {
+		.name = "data",
+		.mode = 0664,
+	},
+	.size = 0,
+	.read = fwu_sysfs_show_image,
+	.write = fwu_sysfs_store_image,
+};
+
+static struct device_attribute attrs[] = {
+	__ATTR(dorecovery, 0220,
+			synaptics_rmi4_show_error,
+			fwu_sysfs_do_recovery_store),
+	__ATTR(doreflash, 0220,
+			synaptics_rmi4_show_error,
+			fwu_sysfs_do_reflash_store),
+	__ATTR(writeconfig, 0220,
+			synaptics_rmi4_show_error,
+			fwu_sysfs_write_config_store),
+	__ATTR(readconfig, 0220,
+			synaptics_rmi4_show_error,
+			fwu_sysfs_read_config_store),
+	__ATTR(configarea, 0220,
+			synaptics_rmi4_show_error,
+			fwu_sysfs_config_area_store),
+	__ATTR(imagename, 0220,
+			synaptics_rmi4_show_error,
+			fwu_sysfs_image_name_store),
+	__ATTR(imagesize, 0220,
+			synaptics_rmi4_show_error,
+			fwu_sysfs_image_size_store),
+	__ATTR(blocksize, 0444,
+			fwu_sysfs_block_size_show,
+			synaptics_rmi4_store_error),
+	__ATTR(fwblockcount, 0444,
+			fwu_sysfs_firmware_block_count_show,
+			synaptics_rmi4_store_error),
+	__ATTR(configblockcount, 0444,
+			fwu_sysfs_configuration_block_count_show,
+			synaptics_rmi4_store_error),
+	__ATTR(dispconfigblockcount, 0444,
+			fwu_sysfs_disp_config_block_count_show,
+			synaptics_rmi4_store_error),
+	__ATTR(permconfigblockcount, 0444,
+			fwu_sysfs_perm_config_block_count_show,
+			synaptics_rmi4_store_error),
+	__ATTR(blconfigblockcount, 0444,
+			fwu_sysfs_bl_config_block_count_show,
+			synaptics_rmi4_store_error),
+	__ATTR(uppblockcount, 0444,
+			fwu_sysfs_utility_parameter_block_count_show,
+			synaptics_rmi4_store_error),
+	__ATTR(guestcodeblockcount, 0444,
+			fwu_sysfs_guest_code_block_count_show,
+			synaptics_rmi4_store_error),
+	__ATTR(writeguestcode, 0220,
+			synaptics_rmi4_show_error,
+			fwu_sysfs_write_guest_code_store),
+#ifdef SYNA_TDDI
+	__ATTR(lockdowncode, 0664,
+			fwu_sysfs_read_lockdown_code_show,
+			fwu_sysfs_write_lockdown_code_store),
+#endif
+};
+
+static struct synaptics_rmi4_fwu_handle *fwu;
+
+DECLARE_COMPLETION(fwu_remove_complete);
+
+DEFINE_MUTEX(fwu_sysfs_mutex);
+
+static void calculate_checksum(unsigned short *data, unsigned long len,
+		unsigned long *result)
+{
+	unsigned long temp;
+	unsigned long sum1 = 0xffff;
+	unsigned long sum2 = 0xffff;
+
+	*result = 0xffffffff;
+
+	while (len--) {
+		temp = *data;
+		sum1 += temp;
+		sum2 += sum1;
+		sum1 = (sum1 & 0xffff) + (sum1 >> 16);
+		sum2 = (sum2 & 0xffff) + (sum2 >> 16);
+		data++;
+	}
+
+	*result = sum2 << 16 | sum1;
+
+	return;
+}
+
+static void convert_to_little_endian(unsigned char *dest, unsigned long src)
+{
+	dest[0] = (unsigned char)(src & 0xff);
+	dest[1] = (unsigned char)((src >> 8) & 0xff);
+	dest[2] = (unsigned char)((src >> 16) & 0xff);
+	dest[3] = (unsigned char)((src >> 24) & 0xff);
+
+	return;
+}
+
+static unsigned int le_to_uint(const unsigned char *ptr)
+{
+	return (unsigned int)ptr[0] +
+			(unsigned int)ptr[1] * 0x100 +
+			(unsigned int)ptr[2] * 0x10000 +
+			(unsigned int)ptr[3] * 0x1000000;
+}
+
+#ifdef F51_DISCRETE_FORCE
+static int fwu_f51_force_data_init(void)
+{
+	int retval;
+	unsigned char query_count;
+	unsigned char packet_info;
+	unsigned char offset[2];
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			rmi4_data->f51_query_base_addr + 7,
+			offset,
+			sizeof(offset));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read force data offset\n",
+				__func__);
+		return retval;
+	}
+
+	fwu->cal_data_off = offset[0] | offset[1] << 8;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			rmi4_data->f51_query_base_addr,
+			&query_count,
+			sizeof(query_count));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read number of F51 query registers\n",
+				__func__);
+		return retval;
+	}
+
+	if (query_count >= 10) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				rmi4_data->f51_query_base_addr + 9,
+				&packet_info,
+				sizeof(packet_info));
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to read F51 packet register info\n",
+					__func__);
+			return retval;
+		}
+
+		if (packet_info & MASK_1BIT) {
+			fwu->cal_packet_data_size = packet_info >> 1;
+			fwu->cal_packet_data_size *= 2;
+		} else {
+			fwu->cal_packet_data_size = 0;
+		}
+	} else {
+		fwu->cal_packet_data_size = 0;
+	}
+
+	fwu->cal_data_size = CAL_DATA_SIZE + fwu->cal_packet_data_size;
+	if (fwu->cal_data_size > fwu->cal_data_buf_size) {
+		kfree(fwu->cal_data);
+		fwu->cal_data_buf_size = fwu->cal_data_size;
+		fwu->cal_data = kmalloc(fwu->cal_data_buf_size, GFP_KERNEL);
+		if (!fwu->cal_data) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to alloc mem for fwu->cal_data\n",
+					__func__);
+			fwu->cal_data_buf_size = 0;
+			return -ENOMEM;
+		}
+	}
+
+	return 0;
+}
+#endif
+
+static int fwu_allocate_read_config_buf(unsigned int count)
+{
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	if (count > fwu->read_config_buf_size) {
+		kfree(fwu->read_config_buf);
+		fwu->read_config_buf = kzalloc(count, GFP_KERNEL);
+		if (!fwu->read_config_buf) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to alloc mem for fwu->read_config_buf\n",
+					__func__);
+			fwu->read_config_buf_size = 0;
+			return -ENOMEM;
+		}
+		fwu->read_config_buf_size = count;
+	}
+
+	return 0;
+}
+
+static void fwu_compare_partition_tables(void)
+{
+	fwu->incompatible_partition_tables = false;
+
+	if (fwu->phyaddr.bl_image != fwu->img.phyaddr.bl_image)
+		fwu->incompatible_partition_tables = true;
+	else if (fwu->phyaddr.lockdown != fwu->img.phyaddr.lockdown)
+		fwu->incompatible_partition_tables = true;
+	else if (fwu->phyaddr.bl_config != fwu->img.phyaddr.bl_config)
+		fwu->incompatible_partition_tables = true;
+	else if (fwu->phyaddr.utility_param != fwu->img.phyaddr.utility_param)
+		fwu->incompatible_partition_tables = true;
+
+	if (fwu->bl_version == BL_V7) {
+		if (fwu->phyaddr.fl_config != fwu->img.phyaddr.fl_config)
+			fwu->incompatible_partition_tables = true;
+	}
+
+	fwu->new_partition_table = false;
+
+	if (fwu->phyaddr.ui_firmware != fwu->img.phyaddr.ui_firmware)
+		fwu->new_partition_table = true;
+	else if (fwu->phyaddr.ui_config != fwu->img.phyaddr.ui_config)
+		fwu->new_partition_table = true;
+
+	if (fwu->flash_properties.has_disp_config) {
+		if (fwu->phyaddr.dp_config != fwu->img.phyaddr.dp_config)
+			fwu->new_partition_table = true;
+	}
+
+	if (fwu->has_guest_code) {
+		if (fwu->phyaddr.guest_code != fwu->img.phyaddr.guest_code)
+			fwu->new_partition_table = true;
+	}
+
+	return;
+}
+
+static void fwu_parse_partition_table(const unsigned char *partition_table,
+		struct block_count *blkcount, struct physical_address *phyaddr)
+{
+	unsigned char ii;
+	unsigned char index;
+	unsigned char offset;
+	unsigned short partition_length;
+	unsigned short physical_address;
+	struct partition_table *ptable;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	for (ii = 0; ii < fwu->partitions; ii++) {
+		index = ii * 8 + 2;
+		ptable = (struct partition_table *)&partition_table[index];
+		partition_length = ptable->partition_length_15_8 << 8 |
+				ptable->partition_length_7_0;
+		physical_address = ptable->start_physical_address_15_8 << 8 |
+				ptable->start_physical_address_7_0;
+		dev_dbg(rmi4_data->pdev->dev.parent,
+				"%s: Partition entry %d:\n",
+				__func__, ii);
+		for (offset = 0; offset < 8; offset++) {
+			dev_dbg(rmi4_data->pdev->dev.parent,
+					"%s: 0x%02x\n",
+					__func__,
+					partition_table[index + offset]);
+		}
+		switch (ptable->partition_id) {
+		case CORE_CODE_PARTITION:
+			blkcount->ui_firmware = partition_length;
+			phyaddr->ui_firmware = physical_address;
+			dev_dbg(rmi4_data->pdev->dev.parent,
+					"%s: Core code block count: %d\n",
+					__func__, blkcount->ui_firmware);
+			blkcount->total_count += partition_length;
+			break;
+		case CORE_CONFIG_PARTITION:
+			blkcount->ui_config = partition_length;
+			phyaddr->ui_config = physical_address;
+			dev_dbg(rmi4_data->pdev->dev.parent,
+					"%s: Core config block count: %d\n",
+					__func__, blkcount->ui_config);
+			blkcount->total_count += partition_length;
+			break;
+		case BOOTLOADER_PARTITION:
+			blkcount->bl_image = partition_length;
+			phyaddr->bl_image = physical_address;
+			dev_dbg(rmi4_data->pdev->dev.parent,
+					"%s: Bootloader block count: %d\n",
+					__func__, blkcount->bl_image);
+			blkcount->total_count += partition_length;
+			break;
+		case UTILITY_PARAMETER_PARTITION:
+			blkcount->utility_param = partition_length;
+			phyaddr->utility_param = physical_address;
+			dev_dbg(rmi4_data->pdev->dev.parent,
+					"%s: Utility parameter block count: %d\n",
+					__func__, blkcount->utility_param);
+			blkcount->total_count += partition_length;
+			break;
+		case DISPLAY_CONFIG_PARTITION:
+			blkcount->dp_config = partition_length;
+			phyaddr->dp_config = physical_address;
+			dev_dbg(rmi4_data->pdev->dev.parent,
+					"%s: Display config block count: %d\n",
+					__func__, blkcount->dp_config);
+			blkcount->total_count += partition_length;
+			break;
+		case FLASH_CONFIG_PARTITION:
+			blkcount->fl_config = partition_length;
+			phyaddr->fl_config = physical_address;
+			dev_dbg(rmi4_data->pdev->dev.parent,
+					"%s: Flash config block count: %d\n",
+					__func__, blkcount->fl_config);
+			blkcount->total_count += partition_length;
+			break;
+		case GUEST_CODE_PARTITION:
+			blkcount->guest_code = partition_length;
+			phyaddr->guest_code = physical_address;
+			dev_dbg(rmi4_data->pdev->dev.parent,
+					"%s: Guest code block count: %d\n",
+					__func__, blkcount->guest_code);
+			blkcount->total_count += partition_length;
+			break;
+		case GUEST_SERIALIZATION_PARTITION:
+			blkcount->pm_config = partition_length;
+			phyaddr->pm_config = physical_address;
+			dev_dbg(rmi4_data->pdev->dev.parent,
+					"%s: Guest serialization block count: %d\n",
+					__func__, blkcount->pm_config);
+			blkcount->total_count += partition_length;
+			break;
+		case GLOBAL_PARAMETERS_PARTITION:
+			blkcount->bl_config = partition_length;
+			phyaddr->bl_config = physical_address;
+			dev_dbg(rmi4_data->pdev->dev.parent,
+					"%s: Global parameters block count: %d\n",
+					__func__, blkcount->bl_config);
+			blkcount->total_count += partition_length;
+			break;
+		case DEVICE_CONFIG_PARTITION:
+			blkcount->lockdown = partition_length;
+			phyaddr->lockdown = physical_address;
+			dev_dbg(rmi4_data->pdev->dev.parent,
+					"%s: Device config block count: %d\n",
+					__func__, blkcount->lockdown);
+			blkcount->total_count += partition_length;
+			break;
+		};
+	}
+
+	return;
+}
+
+static void fwu_parse_image_header_10_utility(const unsigned char *image)
+{
+	unsigned char ii;
+	unsigned char num_of_containers;
+	unsigned int addr;
+	unsigned int container_id;
+	unsigned int length;
+	const unsigned char *content;
+	struct container_descriptor *descriptor;
+
+	num_of_containers = fwu->img.utility.size / 4;
+
+	for (ii = 0; ii < num_of_containers; ii++) {
+		if (ii >= MAX_UTILITY_PARAMS)
+			continue;
+		addr = le_to_uint(fwu->img.utility.data + (ii * 4));
+		descriptor = (struct container_descriptor *)(image + addr);
+		container_id = descriptor->container_id[0] |
+				descriptor->container_id[1] << 8;
+		content = image + le_to_uint(descriptor->content_address);
+		length = le_to_uint(descriptor->content_length);
+		switch (container_id) {
+		case UTILITY_PARAMETER_CONTAINER:
+			fwu->img.utility_param[ii].data = content;
+			fwu->img.utility_param[ii].size = length;
+			fwu->img.utility_param_id[ii] = content[0];
+			break;
+		default:
+			break;
+		};
+	}
+
+	return;
+}
+
+static void fwu_parse_image_header_10_bootloader(const unsigned char *image)
+{
+	unsigned char ii;
+	unsigned char num_of_containers;
+	unsigned int addr;
+	unsigned int container_id;
+	unsigned int length;
+	const unsigned char *content;
+	struct container_descriptor *descriptor;
+
+	num_of_containers = (fwu->img.bootloader.size - 4) / 4;
+
+	for (ii = 1; ii <= num_of_containers; ii++) {
+		addr = le_to_uint(fwu->img.bootloader.data + (ii * 4));
+		descriptor = (struct container_descriptor *)(image + addr);
+		container_id = descriptor->container_id[0] |
+				descriptor->container_id[1] << 8;
+		content = image + le_to_uint(descriptor->content_address);
+		length = le_to_uint(descriptor->content_length);
+		switch (container_id) {
+		case BL_IMAGE_CONTAINER:
+			fwu->img.bl_image.data = content;
+			fwu->img.bl_image.size = length;
+			break;
+		case BL_CONFIG_CONTAINER:
+		case GLOBAL_PARAMETERS_CONTAINER:
+			fwu->img.bl_config.data = content;
+			fwu->img.bl_config.size = length;
+			break;
+		case BL_LOCKDOWN_INFO_CONTAINER:
+		case DEVICE_CONFIG_CONTAINER:
+			fwu->img.lockdown.data = content;
+			fwu->img.lockdown.size = length;
+			break;
+		default:
+			break;
+		};
+	}
+
+	return;
+}
+
+static void fwu_parse_image_header_10(void)
+{
+	unsigned char ii;
+	unsigned char num_of_containers;
+	unsigned int addr;
+	unsigned int offset;
+	unsigned int container_id;
+	unsigned int length;
+	const unsigned char *image;
+	const unsigned char *content;
+	struct container_descriptor *descriptor;
+	struct image_header_10 *header;
+
+	image = fwu->image;
+	header = (struct image_header_10 *)image;
+
+	fwu->img.checksum = le_to_uint(header->checksum);
+
+	/* address of top level container */
+	offset = le_to_uint(header->top_level_container_start_addr);
+	descriptor = (struct container_descriptor *)(image + offset);
+
+	/* address of top level container content */
+	offset = le_to_uint(descriptor->content_address);
+	num_of_containers = le_to_uint(descriptor->content_length) / 4;
+
+	for (ii = 0; ii < num_of_containers; ii++) {
+		addr = le_to_uint(image + offset);
+		offset += 4;
+		descriptor = (struct container_descriptor *)(image + addr);
+		container_id = descriptor->container_id[0] |
+				descriptor->container_id[1] << 8;
+		content = image + le_to_uint(descriptor->content_address);
+		length = le_to_uint(descriptor->content_length);
+		switch (container_id) {
+		case UI_CONTAINER:
+		case CORE_CODE_CONTAINER:
+			fwu->img.ui_firmware.data = content;
+			fwu->img.ui_firmware.size = length;
+			break;
+		case UI_CONFIG_CONTAINER:
+		case CORE_CONFIG_CONTAINER:
+			fwu->img.ui_config.data = content;
+			fwu->img.ui_config.size = length;
+			break;
+		case BL_CONTAINER:
+			fwu->img.bl_version = *content;
+			fwu->img.bootloader.data = content;
+			fwu->img.bootloader.size = length;
+			fwu_parse_image_header_10_bootloader(image);
+			break;
+		case UTILITY_CONTAINER:
+			fwu->img.utility.data = content;
+			fwu->img.utility.size = length;
+			fwu_parse_image_header_10_utility(image);
+			break;
+		case GUEST_CODE_CONTAINER:
+			fwu->img.contains_guest_code = true;
+			fwu->img.guest_code.data = content;
+			fwu->img.guest_code.size = length;
+			break;
+		case DISPLAY_CONFIG_CONTAINER:
+			fwu->img.contains_disp_config = true;
+			fwu->img.dp_config.data = content;
+			fwu->img.dp_config.size = length;
+			break;
+		case PERMANENT_CONFIG_CONTAINER:
+		case GUEST_SERIALIZATION_CONTAINER:
+			fwu->img.contains_perm_config = true;
+			fwu->img.pm_config.data = content;
+			fwu->img.pm_config.size = length;
+			break;
+		case FLASH_CONFIG_CONTAINER:
+			fwu->img.contains_flash_config = true;
+			fwu->img.fl_config.data = content;
+			fwu->img.fl_config.size = length;
+			break;
+		case GENERAL_INFORMATION_CONTAINER:
+			fwu->img.contains_firmware_id = true;
+			fwu->img.firmware_id = le_to_uint(content + 4);
+			break;
+		default:
+			break;
+		}
+	}
+
+	return;
+}
+
+static void fwu_parse_image_header_05_06(void)
+{
+	int retval;
+	const unsigned char *image;
+	struct image_header_05_06 *header;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	image = fwu->image;
+	header = (struct image_header_05_06 *)image;
+
+	fwu->img.checksum = le_to_uint(header->checksum);
+
+	fwu->img.bl_version = header->header_version;
+
+	fwu->img.contains_bootloader = header->options_bootloader;
+	if (fwu->img.contains_bootloader)
+		fwu->img.bootloader_size = le_to_uint(header->bootloader_size);
+
+	fwu->img.ui_firmware.size = le_to_uint(header->firmware_size);
+	if (fwu->img.ui_firmware.size) {
+		fwu->img.ui_firmware.data = image + IMAGE_AREA_OFFSET;
+		if (fwu->img.contains_bootloader)
+			fwu->img.ui_firmware.data += fwu->img.bootloader_size;
+	}
+
+	if ((fwu->img.bl_version == BL_V6) && header->options_tddi)
+		fwu->img.ui_firmware.data = image + IMAGE_AREA_OFFSET;
+
+	fwu->img.ui_config.size = le_to_uint(header->config_size);
+	if (fwu->img.ui_config.size) {
+		fwu->img.ui_config.data = fwu->img.ui_firmware.data +
+				fwu->img.ui_firmware.size;
+	}
+
+	if (fwu->img.contains_bootloader || header->options_tddi)
+		fwu->img.contains_disp_config = true;
+	else
+		fwu->img.contains_disp_config = false;
+
+	if (fwu->img.contains_disp_config) {
+		fwu->img.disp_config_offset = le_to_uint(header->dsp_cfg_addr);
+		fwu->img.dp_config.size = le_to_uint(header->dsp_cfg_size);
+		fwu->img.dp_config.data = image + fwu->img.disp_config_offset;
+	} else {
+		retval = secure_memcpy(fwu->img.cstmr_product_id,
+				sizeof(fwu->img.cstmr_product_id),
+				header->cstmr_product_id,
+				sizeof(header->cstmr_product_id),
+				PRODUCT_ID_SIZE);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to copy custom product ID string\n",
+					__func__);
+		}
+		fwu->img.cstmr_product_id[PRODUCT_ID_SIZE] = 0;
+	}
+
+	fwu->img.contains_firmware_id = header->options_firmware_id;
+	if (fwu->img.contains_firmware_id)
+		fwu->img.firmware_id = le_to_uint(header->firmware_id);
+
+	retval = secure_memcpy(fwu->img.product_id,
+			sizeof(fwu->img.product_id),
+			header->product_id,
+			sizeof(header->product_id),
+			PRODUCT_ID_SIZE);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to copy product ID string\n",
+				__func__);
+	}
+	fwu->img.product_id[PRODUCT_ID_SIZE] = 0;
+
+	fwu->img.lockdown.size = LOCKDOWN_SIZE;
+	fwu->img.lockdown.data = image + IMAGE_AREA_OFFSET - LOCKDOWN_SIZE;
+
+	return;
+}
+
+static int fwu_parse_image_info(void)
+{
+	struct image_header_10 *header;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	header = (struct image_header_10 *)fwu->image;
+
+	memset(&fwu->img, 0x00, sizeof(fwu->img));
+
+	switch (header->major_header_version) {
+	case IMAGE_HEADER_VERSION_10:
+		fwu_parse_image_header_10();
+		break;
+	case IMAGE_HEADER_VERSION_05:
+	case IMAGE_HEADER_VERSION_06:
+		fwu_parse_image_header_05_06();
+		break;
+	default:
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Unsupported image file format (0x%02x)\n",
+				__func__, header->major_header_version);
+		return -EINVAL;
+	}
+
+	if (fwu->bl_version == BL_V7 || fwu->bl_version == BL_V8) {
+		if (!fwu->img.contains_flash_config) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: No flash config found in firmware image\n",
+					__func__);
+			return -EINVAL;
+		}
+
+		fwu_parse_partition_table(fwu->img.fl_config.data,
+				&fwu->img.blkcount, &fwu->img.phyaddr);
+
+		if (fwu->img.blkcount.utility_param)
+			fwu->img.contains_utility_param = true;
+
+		fwu_compare_partition_tables();
+	} else {
+		fwu->new_partition_table = false;
+		fwu->incompatible_partition_tables = false;
+	}
+
+	return 0;
+}
+
+static int fwu_read_flash_status(void)
+{
+	int retval;
+	unsigned char status;
+	unsigned char command;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			fwu->f34_fd.data_base_addr + fwu->off.flash_status,
+			&status,
+			sizeof(status));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read flash status\n",
+				__func__);
+		return retval;
+	}
+
+	fwu->in_bl_mode = status >> 7;
+
+	if (fwu->bl_version == BL_V5)
+		fwu->flash_status = (status >> 4) & MASK_3BIT;
+	else if (fwu->bl_version == BL_V6)
+		fwu->flash_status = status & MASK_3BIT;
+	else if (fwu->bl_version == BL_V7 || fwu->bl_version == BL_V8)
+		fwu->flash_status = status & MASK_5BIT;
+
+	if (fwu->write_bootloader)
+		fwu->flash_status = 0x00;
+
+	if (fwu->flash_status != 0x00) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Flash status = %d, command = 0x%02x\n",
+				__func__, fwu->flash_status, fwu->command);
+	}
+
+	if (fwu->bl_version == BL_V7 || fwu->bl_version == BL_V8) {
+		if (fwu->flash_status == 0x08)
+			fwu->flash_status = 0x00;
+	}
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			fwu->f34_fd.data_base_addr + fwu->off.flash_cmd,
+			&command,
+			sizeof(command));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read flash command\n",
+				__func__);
+		return retval;
+	}
+
+	if (fwu->bl_version == BL_V5)
+		fwu->command = command & MASK_4BIT;
+	else if (fwu->bl_version == BL_V6)
+		fwu->command = command & MASK_6BIT;
+	else if (fwu->bl_version == BL_V7 || fwu->bl_version == BL_V8)
+		fwu->command = command;
+
+	if (fwu->write_bootloader)
+		fwu->command = 0x00;
+
+	return 0;
+}
+
+static int fwu_wait_for_idle(int timeout_ms, bool poll)
+{
+	int count = 0;
+	int timeout_count = ((timeout_ms * 1000) / MAX_SLEEP_TIME_US) + 1;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	do {
+		usleep_range(MIN_SLEEP_TIME_US, MAX_SLEEP_TIME_US);
+
+		count++;
+		if (poll || (count == timeout_count))
+			fwu_read_flash_status();
+
+		if ((fwu->command == CMD_IDLE) && (fwu->flash_status == 0x00))
+			return 0;
+	} while (count < timeout_count);
+
+	dev_err(rmi4_data->pdev->dev.parent,
+			"%s: Timed out waiting for idle status\n",
+			__func__);
+
+	return -ETIMEDOUT;
+}
+
+static int fwu_write_f34_v7_command_single_transaction(unsigned char cmd)
+{
+	int retval;
+	unsigned char data_base;
+	struct f34_v7_data_1_5 data_1_5;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	data_base = fwu->f34_fd.data_base_addr;
+
+	memset(data_1_5.data, 0x00, sizeof(data_1_5.data));
+
+	switch (cmd) {
+	case CMD_ERASE_ALL:
+		data_1_5.partition_id = CORE_CODE_PARTITION;
+		data_1_5.command = CMD_V7_ERASE_AP;
+		break;
+	case CMD_ERASE_UI_FIRMWARE:
+		data_1_5.partition_id = CORE_CODE_PARTITION;
+		data_1_5.command = CMD_V7_ERASE;
+		break;
+	case CMD_ERASE_BL_CONFIG:
+		data_1_5.partition_id = GLOBAL_PARAMETERS_PARTITION;
+		data_1_5.command = CMD_V7_ERASE;
+		break;
+	case CMD_ERASE_UI_CONFIG:
+		data_1_5.partition_id = CORE_CONFIG_PARTITION;
+		data_1_5.command = CMD_V7_ERASE;
+		break;
+	case CMD_ERASE_DISP_CONFIG:
+		data_1_5.partition_id = DISPLAY_CONFIG_PARTITION;
+		data_1_5.command = CMD_V7_ERASE;
+		break;
+	case CMD_ERASE_FLASH_CONFIG:
+		data_1_5.partition_id = FLASH_CONFIG_PARTITION;
+		data_1_5.command = CMD_V7_ERASE;
+		break;
+	case CMD_ERASE_GUEST_CODE:
+		data_1_5.partition_id = GUEST_CODE_PARTITION;
+		data_1_5.command = CMD_V7_ERASE;
+		break;
+	case CMD_ERASE_BOOTLOADER:
+		data_1_5.partition_id = BOOTLOADER_PARTITION;
+		data_1_5.command = CMD_V7_ERASE;
+		break;
+	case CMD_ERASE_UTILITY_PARAMETER:
+		data_1_5.partition_id = UTILITY_PARAMETER_PARTITION;
+		data_1_5.command = CMD_V7_ERASE;
+		break;
+	case CMD_ENABLE_FLASH_PROG:
+		data_1_5.partition_id = BOOTLOADER_PARTITION;
+		data_1_5.command = CMD_V7_ENTER_BL;
+		break;
+	};
+
+	data_1_5.payload_0 = fwu->bootloader_id[0];
+	data_1_5.payload_1 = fwu->bootloader_id[1];
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			data_base + fwu->off.partition_id,
+			data_1_5.data,
+			sizeof(data_1_5.data));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to write single transaction command\n",
+				__func__);
+		return retval;
+	}
+
+	return 0;
+}
+
+static int fwu_write_f34_v7_command(unsigned char cmd)
+{
+	int retval;
+	unsigned char data_base;
+	unsigned char command;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	data_base = fwu->f34_fd.data_base_addr;
+
+	switch (cmd) {
+	case CMD_WRITE_FW:
+	case CMD_WRITE_CONFIG:
+	case CMD_WRITE_LOCKDOWN:
+	case CMD_WRITE_GUEST_CODE:
+	case CMD_WRITE_BOOTLOADER:
+	case CMD_WRITE_UTILITY_PARAM:
+		command = CMD_V7_WRITE;
+		break;
+	case CMD_READ_CONFIG:
+		command = CMD_V7_READ;
+		break;
+	case CMD_ERASE_ALL:
+		command = CMD_V7_ERASE_AP;
+		break;
+	case CMD_ERASE_UI_FIRMWARE:
+	case CMD_ERASE_BL_CONFIG:
+	case CMD_ERASE_UI_CONFIG:
+	case CMD_ERASE_DISP_CONFIG:
+	case CMD_ERASE_FLASH_CONFIG:
+	case CMD_ERASE_GUEST_CODE:
+	case CMD_ERASE_BOOTLOADER:
+	case CMD_ERASE_UTILITY_PARAMETER:
+		command = CMD_V7_ERASE;
+		break;
+	case CMD_ENABLE_FLASH_PROG:
+		command = CMD_V7_ENTER_BL;
+		break;
+	default:
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Invalid command 0x%02x\n",
+				__func__, cmd);
+		return -EINVAL;
+	};
+
+	fwu->command = command;
+
+	switch (cmd) {
+	case CMD_ERASE_ALL:
+	case CMD_ERASE_UI_FIRMWARE:
+	case CMD_ERASE_BL_CONFIG:
+	case CMD_ERASE_UI_CONFIG:
+	case CMD_ERASE_DISP_CONFIG:
+	case CMD_ERASE_FLASH_CONFIG:
+	case CMD_ERASE_GUEST_CODE:
+	case CMD_ERASE_BOOTLOADER:
+	case CMD_ERASE_UTILITY_PARAMETER:
+	case CMD_ENABLE_FLASH_PROG:
+		retval = fwu_write_f34_v7_command_single_transaction(cmd);
+		if (retval < 0)
+			return retval;
+		else
+			return 0;
+	default:
+		break;
+	};
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			data_base + fwu->off.flash_cmd,
+			&command,
+			sizeof(command));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to write flash command\n",
+				__func__);
+		return retval;
+	}
+
+	return 0;
+}
+
+static int fwu_write_f34_v5v6_command(unsigned char cmd)
+{
+	int retval;
+	unsigned char data_base;
+	unsigned char command;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	data_base = fwu->f34_fd.data_base_addr;
+
+	switch (cmd) {
+	case CMD_IDLE:
+		command = CMD_V5V6_IDLE;
+		break;
+	case CMD_WRITE_FW:
+		command = CMD_V5V6_WRITE_FW;
+		break;
+	case CMD_WRITE_CONFIG:
+		command = CMD_V5V6_WRITE_CONFIG;
+		break;
+	case CMD_WRITE_LOCKDOWN:
+		command = CMD_V5V6_WRITE_LOCKDOWN;
+		break;
+	case CMD_WRITE_GUEST_CODE:
+		command = CMD_V5V6_WRITE_GUEST_CODE;
+		break;
+	case CMD_READ_CONFIG:
+		command = CMD_V5V6_READ_CONFIG;
+		break;
+	case CMD_ERASE_ALL:
+		command = CMD_V5V6_ERASE_ALL;
+		break;
+	case CMD_ERASE_UI_CONFIG:
+		command = CMD_V5V6_ERASE_UI_CONFIG;
+		break;
+	case CMD_ERASE_DISP_CONFIG:
+		command = CMD_V5V6_ERASE_DISP_CONFIG;
+		break;
+	case CMD_ERASE_GUEST_CODE:
+		command = CMD_V5V6_ERASE_GUEST_CODE;
+		break;
+	case CMD_ENABLE_FLASH_PROG:
+		command = CMD_V5V6_ENABLE_FLASH_PROG;
+		break;
+#ifdef SYNA_TDDI
+	case CMD_ERASE_CHIP:
+		command = CMD_V5V6_ERASE_CHIP;
+		break;
+	case CMD_ERASE_FORCE_CONFIG:
+		command = CMD_V5V6_ERASE_FORCE_CONFIG;
+		break;
+	case CMD_READ_FORCE_CONFIG:
+		command = CMD_V5V6_READ_FORCE_CONFIG;
+		break;
+	case CMD_WRITE_FORCE_CONFIG:
+		command = CMD_V5V6_WRITE_CONFIG;
+		break;
+	case CMD_ERASE_LOCKDOWN_DATA:
+		command = CMD_V5V6_ERASE_LOCKDOWN_DATA;
+		break;
+	case CMD_READ_LOCKDOWN_DATA:
+		command = CMD_V5V6_READ_LOCKDOWN_DATA;
+		break;
+	case CMD_WRITE_LOCKDOWN_DATA:
+		command = CMD_V5V6_WRITE_LOCKDOWN_DATA;
+		break;
+	case CMD_ERASE_LCM_DATA:
+		command = CMD_V5V6_ERASE_LCM_DATA;
+		break;
+	case CMD_ERASE_OEM_DATA:
+		command = CMD_V5V6_ERASE_OEM_DATA;
+		break;
+#endif
+	default:
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Invalid command 0x%02x\n",
+				__func__, cmd);
+		return -EINVAL;
+	}
+
+	switch (cmd) {
+	case CMD_ERASE_ALL:
+	case CMD_ERASE_UI_CONFIG:
+	case CMD_ERASE_DISP_CONFIG:
+	case CMD_ERASE_GUEST_CODE:
+#ifdef SYNA_TDDI
+	case CMD_ERASE_CHIP:
+	case CMD_ERASE_FORCE_CONFIG:
+	case CMD_ERASE_LOCKDOWN_DATA:
+	case CMD_ERASE_LCM_DATA:
+	case CMD_ERASE_OEM_DATA:
+#endif
+	case CMD_ENABLE_FLASH_PROG:
+		retval = synaptics_rmi4_reg_write(rmi4_data,
+				data_base + fwu->off.payload,
+				fwu->bootloader_id,
+				sizeof(fwu->bootloader_id));
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to write bootloader ID\n",
+					__func__);
+			return retval;
+		}
+		break;
+	default:
+		break;
+	};
+
+	fwu->command = command;
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			data_base + fwu->off.flash_cmd,
+			&command,
+			sizeof(command));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to write command 0x%02x\n",
+				__func__, command);
+		return retval;
+	}
+
+	return 0;
+}
+
+static int fwu_write_f34_command(unsigned char cmd)
+{
+	int retval;
+
+	if (fwu->bl_version == BL_V7 || fwu->bl_version == BL_V8)
+		retval = fwu_write_f34_v7_command(cmd);
+	else
+		retval = fwu_write_f34_v5v6_command(cmd);
+
+	return retval;
+}
+
+static int fwu_write_f34_v7_partition_id(unsigned char cmd)
+{
+	int retval;
+	unsigned char data_base;
+	unsigned char partition;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	data_base = fwu->f34_fd.data_base_addr;
+
+	switch (cmd) {
+	case CMD_WRITE_FW:
+		partition = CORE_CODE_PARTITION;
+		break;
+	case CMD_WRITE_CONFIG:
+	case CMD_READ_CONFIG:
+		if (fwu->config_area == UI_CONFIG_AREA)
+			partition = CORE_CONFIG_PARTITION;
+		else if (fwu->config_area == DP_CONFIG_AREA)
+			partition = DISPLAY_CONFIG_PARTITION;
+		else if (fwu->config_area == PM_CONFIG_AREA)
+			partition = GUEST_SERIALIZATION_PARTITION;
+		else if (fwu->config_area == BL_CONFIG_AREA)
+			partition = GLOBAL_PARAMETERS_PARTITION;
+		else if (fwu->config_area == FLASH_CONFIG_AREA)
+			partition = FLASH_CONFIG_PARTITION;
+		else if (fwu->config_area == UPP_AREA)
+			partition = UTILITY_PARAMETER_PARTITION;
+		break;
+	case CMD_WRITE_LOCKDOWN:
+		partition = DEVICE_CONFIG_PARTITION;
+		break;
+	case CMD_WRITE_GUEST_CODE:
+		partition = GUEST_CODE_PARTITION;
+		break;
+	case CMD_WRITE_BOOTLOADER:
+		partition = BOOTLOADER_PARTITION;
+		break;
+	case CMD_WRITE_UTILITY_PARAM:
+		partition = UTILITY_PARAMETER_PARTITION;
+		break;
+	case CMD_ERASE_ALL:
+		partition = CORE_CODE_PARTITION;
+		break;
+	case CMD_ERASE_BL_CONFIG:
+		partition = GLOBAL_PARAMETERS_PARTITION;
+		break;
+	case CMD_ERASE_UI_CONFIG:
+		partition = CORE_CONFIG_PARTITION;
+		break;
+	case CMD_ERASE_DISP_CONFIG:
+		partition = DISPLAY_CONFIG_PARTITION;
+		break;
+	case CMD_ERASE_FLASH_CONFIG:
+		partition = FLASH_CONFIG_PARTITION;
+		break;
+	case CMD_ERASE_GUEST_CODE:
+		partition = GUEST_CODE_PARTITION;
+		break;
+	case CMD_ERASE_BOOTLOADER:
+		partition = BOOTLOADER_PARTITION;
+		break;
+	case CMD_ENABLE_FLASH_PROG:
+		partition = BOOTLOADER_PARTITION;
+		break;
+	default:
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Invalid command 0x%02x\n",
+				__func__, cmd);
+		return -EINVAL;
+	};
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			data_base + fwu->off.partition_id,
+			&partition,
+			sizeof(partition));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to write partition ID\n",
+				__func__);
+		return retval;
+	}
+
+	return 0;
+}
+
+static int fwu_write_f34_partition_id(unsigned char cmd)
+{
+	int retval;
+
+	if (fwu->bl_version == BL_V7 || fwu->bl_version == BL_V8)
+		retval = fwu_write_f34_v7_partition_id(cmd);
+	else
+		retval = 0;
+
+	return retval;
+}
+
+static int fwu_read_f34_v7_partition_table(unsigned char *partition_table)
+{
+	int retval;
+	unsigned char data_base;
+	unsigned char length[2];
+	unsigned short block_number = 0;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	data_base = fwu->f34_fd.data_base_addr;
+
+	fwu->config_area = FLASH_CONFIG_AREA;
+
+	retval = fwu_write_f34_partition_id(CMD_READ_CONFIG);
+	if (retval < 0)
+		return retval;
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			data_base + fwu->off.block_number,
+			(unsigned char *)&block_number,
+			sizeof(block_number));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to write block number\n",
+				__func__);
+		return retval;
+	}
+
+	length[0] = (unsigned char)(fwu->flash_config_length & MASK_8BIT);
+	length[1] = (unsigned char)(fwu->flash_config_length >> 8);
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			data_base + fwu->off.transfer_length,
+			length,
+			sizeof(length));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to write transfer length\n",
+				__func__);
+		return retval;
+	}
+
+	retval = fwu_write_f34_command(CMD_READ_CONFIG);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to write command\n",
+				__func__);
+		return retval;
+	}
+
+	msleep(READ_CONFIG_WAIT_MS);
+
+	retval = fwu_wait_for_idle(WRITE_WAIT_MS, true);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to wait for idle status\n",
+				__func__);
+		return retval;
+	}
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			data_base + fwu->off.payload,
+			partition_table,
+			fwu->partition_table_bytes);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read block data\n",
+				__func__);
+		return retval;
+	}
+
+	return 0;
+}
+
+static int fwu_read_f34_v7_queries(void)
+{
+	int retval;
+	unsigned char ii;
+	unsigned char query_base;
+	unsigned char index;
+	unsigned char offset;
+	unsigned char *ptable;
+	struct f34_v7_query_0 query_0;
+	struct f34_v7_query_1_7 query_1_7;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	query_base = fwu->f34_fd.query_base_addr;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			query_base,
+			query_0.data,
+			sizeof(query_0.data));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read query 0\n",
+				__func__);
+		return retval;
+	}
+
+	offset = query_0.subpacket_1_size + 1;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			query_base + offset,
+			query_1_7.data,
+			sizeof(query_1_7.data));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read queries 1 to 7\n",
+				__func__);
+		return retval;
+	}
+
+	fwu->bootloader_id[0] = query_1_7.bl_minor_revision;
+	fwu->bootloader_id[1] = query_1_7.bl_major_revision;
+
+	if (fwu->bootloader_id[1] == BL_V8)
+		fwu->bl_version = BL_V8;
+
+	fwu->block_size = query_1_7.block_size_15_8 << 8 |
+			query_1_7.block_size_7_0;
+
+	fwu->flash_config_length = query_1_7.flash_config_length_15_8 << 8 |
+			query_1_7.flash_config_length_7_0;
+
+	fwu->payload_length = query_1_7.payload_length_15_8 << 8 |
+			query_1_7.payload_length_7_0;
+
+	fwu->off.flash_status = V7_FLASH_STATUS_OFFSET;
+	fwu->off.partition_id = V7_PARTITION_ID_OFFSET;
+	fwu->off.block_number = V7_BLOCK_NUMBER_OFFSET;
+	fwu->off.transfer_length = V7_TRANSFER_LENGTH_OFFSET;
+	fwu->off.flash_cmd = V7_COMMAND_OFFSET;
+	fwu->off.payload = V7_PAYLOAD_OFFSET;
+
+	index = sizeof(query_1_7.data) - V7_PARTITION_SUPPORT_BYTES;
+
+	fwu->partitions = 0;
+	for (offset = 0; offset < V7_PARTITION_SUPPORT_BYTES; offset++) {
+		for (ii = 0; ii < 8; ii++) {
+			if (query_1_7.data[index + offset] & (1 << ii))
+				fwu->partitions++;
+		}
+
+		dev_dbg(rmi4_data->pdev->dev.parent,
+				"%s: Supported partitions: 0x%02x\n",
+				__func__, query_1_7.data[index + offset]);
+	}
+
+	fwu->partition_table_bytes = fwu->partitions * 8 + 2;
+
+	ptable = kzalloc(fwu->partition_table_bytes, GFP_KERNEL);
+	if (!ptable) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to alloc mem for partition table\n",
+				__func__);
+		return -ENOMEM;
+	}
+
+	retval = fwu_read_f34_v7_partition_table(ptable);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read partition table\n",
+				__func__);
+		kfree(ptable);
+		return retval;
+	}
+
+	fwu_parse_partition_table(ptable, &fwu->blkcount, &fwu->phyaddr);
+
+	if (fwu->blkcount.dp_config)
+		fwu->flash_properties.has_disp_config = 1;
+	else
+		fwu->flash_properties.has_disp_config = 0;
+
+	if (fwu->blkcount.pm_config)
+		fwu->flash_properties.has_pm_config = 1;
+	else
+		fwu->flash_properties.has_pm_config = 0;
+
+	if (fwu->blkcount.bl_config)
+		fwu->flash_properties.has_bl_config = 1;
+	else
+		fwu->flash_properties.has_bl_config = 0;
+
+	if (fwu->blkcount.guest_code)
+		fwu->has_guest_code = 1;
+	else
+		fwu->has_guest_code = 0;
+
+	if (fwu->blkcount.utility_param)
+		fwu->has_utility_param = 1;
+	else
+		fwu->has_utility_param = 0;
+
+	kfree(ptable);
+
+	return 0;
+}
+
+static int fwu_read_f34_v5v6_queries(void)
+{
+	int retval;
+	unsigned char count;
+	unsigned char base;
+	unsigned char offset;
+	unsigned char buf[10];
+	struct f34_v5v6_flash_properties_2 properties_2;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	base = fwu->f34_fd.query_base_addr;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			base + V5V6_BOOTLOADER_ID_OFFSET,
+			fwu->bootloader_id,
+			sizeof(fwu->bootloader_id));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read bootloader ID\n",
+				__func__);
+		return retval;
+	}
+
+	if (fwu->bl_version == BL_V5) {
+		fwu->off.properties = V5_PROPERTIES_OFFSET;
+		fwu->off.block_size = V5_BLOCK_SIZE_OFFSET;
+		fwu->off.block_count = V5_BLOCK_COUNT_OFFSET;
+		fwu->off.block_number = V5_BLOCK_NUMBER_OFFSET;
+		fwu->off.payload = V5_BLOCK_DATA_OFFSET;
+	} else if (fwu->bl_version == BL_V6) {
+		fwu->off.properties = V6_PROPERTIES_OFFSET;
+		fwu->off.properties_2 = V6_PROPERTIES_2_OFFSET;
+		fwu->off.block_size = V6_BLOCK_SIZE_OFFSET;
+		fwu->off.block_count = V6_BLOCK_COUNT_OFFSET;
+		fwu->off.gc_block_count = V6_GUEST_CODE_BLOCK_COUNT_OFFSET;
+		fwu->off.block_number = V6_BLOCK_NUMBER_OFFSET;
+		fwu->off.payload = V6_BLOCK_DATA_OFFSET;
+	}
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			base + fwu->off.block_size,
+			buf,
+			2);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read block size info\n",
+				__func__);
+		return retval;
+	}
+
+	batohs(&fwu->block_size, &(buf[0]));
+
+	if (fwu->bl_version == BL_V5) {
+		fwu->off.flash_cmd = fwu->off.payload + fwu->block_size;
+		fwu->off.flash_status = fwu->off.flash_cmd;
+	} else if (fwu->bl_version == BL_V6) {
+		fwu->off.flash_cmd = V6_FLASH_COMMAND_OFFSET;
+		fwu->off.flash_status = V6_FLASH_STATUS_OFFSET;
+	}
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			base + fwu->off.properties,
+			fwu->flash_properties.data,
+			sizeof(fwu->flash_properties.data));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read flash properties\n",
+				__func__);
+		return retval;
+	}
+
+	count = 4;
+
+	if (fwu->flash_properties.has_pm_config)
+		count += 2;
+
+	if (fwu->flash_properties.has_bl_config)
+		count += 2;
+
+	if (fwu->flash_properties.has_disp_config)
+		count += 2;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			base + fwu->off.block_count,
+			buf,
+			count);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read block count info\n",
+				__func__);
+		return retval;
+	}
+
+	batohs(&fwu->blkcount.ui_firmware, &(buf[0]));
+	batohs(&fwu->blkcount.ui_config, &(buf[2]));
+
+	count = 4;
+
+	if (fwu->flash_properties.has_pm_config) {
+		batohs(&fwu->blkcount.pm_config, &(buf[count]));
+		count += 2;
+	}
+
+	if (fwu->flash_properties.has_bl_config) {
+		batohs(&fwu->blkcount.bl_config, &(buf[count]));
+		count += 2;
+	}
+
+	if (fwu->flash_properties.has_disp_config)
+		batohs(&fwu->blkcount.dp_config, &(buf[count]));
+
+	fwu->has_guest_code = false;
+#ifdef SYNA_TDDI
+	fwu->has_force_config = false;
+	fwu->has_lockdown_data = false;
+	fwu->has_lcm_data = false;
+	fwu->has_oem_data = false;
+#endif
+
+	if (fwu->flash_properties.has_query4) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				base + fwu->off.properties_2,
+				properties_2.data,
+				sizeof(properties_2.data));
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to read flash properties 2\n",
+					__func__);
+			return retval;
+		}
+		offset = fwu->off.properties_2 + 1;
+		count = 0;
+		if (properties_2.has_guest_code) {
+			retval = synaptics_rmi4_reg_read(rmi4_data,
+					base + offset + count,
+					buf,
+					2);
+			if (retval < 0) {
+				dev_err(rmi4_data->pdev->dev.parent,
+						"%s: Failed to read guest code block count\n",
+						__func__);
+				return retval;
+			}
+
+			batohs(&fwu->blkcount.guest_code, &(buf[0]));
+			count++;
+			fwu->has_guest_code = true;
+		}
+#ifdef SYNA_TDDI
+		if (properties_2.has_force_config) {
+			retval = synaptics_rmi4_reg_read(rmi4_data,
+					base + offset + count,
+					buf,
+					2);
+			if (retval < 0) {
+				dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to read tddi force block count\n",
+					__func__);
+				return retval;
+			}
+			batohs(&fwu->blkcount.tddi_force_config, &(buf[0]));
+			count++;
+			fwu->has_force_config = true;
+		}
+		if (properties_2.has_lockdown_data) {
+			retval = synaptics_rmi4_reg_read(rmi4_data,
+					base + offset + count,
+					buf,
+					2);
+			if (retval < 0) {
+				dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to read tddi lockdown block count\n",
+					__func__);
+				return retval;
+			}
+			batohs(&fwu->blkcount.tddi_lockdown_data, &(buf[0]));
+			count++;
+			fwu->has_lockdown_data = true;
+		}
+		if (properties_2.has_lcm_data) {
+			retval = synaptics_rmi4_reg_read(rmi4_data,
+					base + offset + count,
+					buf,
+					2);
+			if (retval < 0) {
+				dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to read tddi lcm block count\n",
+					__func__);
+				return retval;
+			}
+			batohs(&fwu->blkcount.tddi_lcm_data, &(buf[0]));
+			count++;
+			fwu->has_lcm_data = true;
+		}
+		if (properties_2.has_oem_data) {
+			retval = synaptics_rmi4_reg_read(rmi4_data,
+					base + offset + count,
+					buf,
+					2);
+			if (retval < 0) {
+				dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to read tddi oem block count\n",
+					__func__);
+				return retval;
+			}
+			batohs(&fwu->blkcount.tddi_oem_data, &(buf[0]));
+			fwu->has_oem_data = true;
+		}
+#endif
+	}
+
+	fwu->has_utility_param = false;
+
+	return 0;
+}
+
+static int fwu_read_f34_queries(void)
+{
+	int retval;
+
+	memset(&fwu->blkcount, 0x00, sizeof(fwu->blkcount));
+	memset(&fwu->phyaddr, 0x00, sizeof(fwu->phyaddr));
+
+	if (fwu->bl_version == BL_V7)
+		retval = fwu_read_f34_v7_queries();
+	else
+		retval = fwu_read_f34_v5v6_queries();
+
+	return retval;
+}
+
+static int fwu_write_f34_v7_blocks(unsigned char *block_ptr,
+		unsigned short block_cnt, unsigned char command)
+{
+	int retval;
+	unsigned char data_base;
+	unsigned char length[2];
+	unsigned short transfer;
+	unsigned short remaining = block_cnt;
+	unsigned short block_number = 0;
+	unsigned short left_bytes;
+	unsigned short write_size;
+	unsigned short max_write_size;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	data_base = fwu->f34_fd.data_base_addr;
+
+	retval = fwu_write_f34_partition_id(command);
+	if (retval < 0)
+		return retval;
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			data_base + fwu->off.block_number,
+			(unsigned char *)&block_number,
+			sizeof(block_number));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to write block number\n",
+				__func__);
+		return retval;
+	}
+
+	do {
+		if (remaining / fwu->payload_length)
+			transfer = fwu->payload_length;
+		else
+			transfer = remaining;
+
+		length[0] = (unsigned char)(transfer & MASK_8BIT);
+		length[1] = (unsigned char)(transfer >> 8);
+
+		retval = synaptics_rmi4_reg_write(rmi4_data,
+				data_base + fwu->off.transfer_length,
+				length,
+				sizeof(length));
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to write transfer length (remaining = %d)\n",
+					__func__, remaining);
+			return retval;
+		}
+
+		retval = fwu_write_f34_command(command);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to write command (remaining = %d)\n",
+					__func__, remaining);
+			return retval;
+		}
+
+#ifdef MAX_WRITE_SIZE
+		max_write_size = MAX_WRITE_SIZE;
+		if (max_write_size >= transfer * fwu->block_size)
+			max_write_size = transfer * fwu->block_size;
+		else if (max_write_size > fwu->block_size)
+			max_write_size -= max_write_size % fwu->block_size;
+		else
+			max_write_size = fwu->block_size;
+#else
+		max_write_size = transfer * fwu->block_size;
+#endif
+		left_bytes = transfer * fwu->block_size;
+
+		do {
+			if (left_bytes / max_write_size)
+				write_size = max_write_size;
+			else
+				write_size = left_bytes;
+
+			retval = synaptics_rmi4_reg_write(rmi4_data,
+					data_base + fwu->off.payload,
+					block_ptr,
+					write_size);
+			if (retval < 0) {
+				dev_err(rmi4_data->pdev->dev.parent,
+						"%s: Failed to write block data (remaining = %d)\n",
+						__func__, remaining);
+				return retval;
+			}
+
+			block_ptr += write_size;
+			left_bytes -= write_size;
+		} while (left_bytes);
+
+		retval = fwu_wait_for_idle(WRITE_WAIT_MS, false);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to wait for idle status (remaining = %d)\n",
+					__func__, remaining);
+			return retval;
+		}
+
+		remaining -= transfer;
+	} while (remaining);
+
+	return 0;
+}
+
+static int fwu_write_f34_v5v6_blocks(unsigned char *block_ptr,
+		unsigned short block_cnt, unsigned char command)
+{
+	int retval;
+	unsigned char data_base;
+	unsigned char block_number[] = {0, 0};
+	unsigned short blk;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	data_base = fwu->f34_fd.data_base_addr;
+
+	block_number[1] |= (fwu->config_area << 5);
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			data_base + fwu->off.block_number,
+			block_number,
+			sizeof(block_number));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to write block number\n",
+				__func__);
+		return retval;
+	}
+
+	for (blk = 0; blk < block_cnt; blk++) {
+		retval = synaptics_rmi4_reg_write(rmi4_data,
+				data_base + fwu->off.payload,
+				block_ptr,
+				fwu->block_size);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to write block data (block %d)\n",
+					__func__, blk);
+			return retval;
+		}
+
+		retval = fwu_write_f34_command(command);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to write command for block %d\n",
+					__func__, blk);
+			return retval;
+		}
+
+		retval = fwu_wait_for_idle(WRITE_WAIT_MS, false);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to wait for idle status (block %d)\n",
+					__func__, blk);
+			return retval;
+		}
+
+		block_ptr += fwu->block_size;
+	}
+
+	return 0;
+}
+
+static int fwu_write_f34_blocks(unsigned char *block_ptr,
+		unsigned short block_cnt, unsigned char cmd)
+{
+	int retval;
+
+	if (fwu->bl_version == BL_V7 || fwu->bl_version == BL_V8)
+		retval = fwu_write_f34_v7_blocks(block_ptr, block_cnt, cmd);
+	else
+		retval = fwu_write_f34_v5v6_blocks(block_ptr, block_cnt, cmd);
+
+	return retval;
+}
+
+static int fwu_read_f34_v7_blocks(unsigned short block_cnt,
+		unsigned char command)
+{
+	int retval;
+	unsigned char data_base;
+	unsigned char length[2];
+	unsigned short transfer;
+	unsigned short remaining = block_cnt;
+	unsigned short block_number = 0;
+	unsigned short index = 0;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	data_base = fwu->f34_fd.data_base_addr;
+
+	retval = fwu_write_f34_partition_id(command);
+	if (retval < 0)
+		return retval;
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			data_base + fwu->off.block_number,
+			(unsigned char *)&block_number,
+			sizeof(block_number));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to write block number\n",
+				__func__);
+		return retval;
+	}
+
+	do {
+		if (remaining / fwu->payload_length)
+			transfer = fwu->payload_length;
+		else
+			transfer = remaining;
+
+		length[0] = (unsigned char)(transfer & MASK_8BIT);
+		length[1] = (unsigned char)(transfer >> 8);
+
+		retval = synaptics_rmi4_reg_write(rmi4_data,
+				data_base + fwu->off.transfer_length,
+				length,
+				sizeof(length));
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to write transfer length (remaining = %d)\n",
+					__func__, remaining);
+			return retval;
+		}
+
+		retval = fwu_write_f34_command(command);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to write command (remaining = %d)\n",
+					__func__, remaining);
+			return retval;
+		}
+
+		retval = fwu_wait_for_idle(WRITE_WAIT_MS, false);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to wait for idle status (remaining = %d)\n",
+					__func__, remaining);
+			return retval;
+		}
+
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				data_base + fwu->off.payload,
+				&fwu->read_config_buf[index],
+				transfer * fwu->block_size);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to read block data (remaining = %d)\n",
+					__func__, remaining);
+			return retval;
+		}
+
+		index += (transfer * fwu->block_size);
+		remaining -= transfer;
+	} while (remaining);
+
+	return 0;
+}
+
+static int fwu_read_f34_v5v6_blocks(unsigned short block_cnt,
+		unsigned char command)
+{
+	int retval;
+	unsigned char data_base;
+	unsigned char block_number[] = {0, 0};
+	unsigned short blk;
+	unsigned short index = 0;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	data_base = fwu->f34_fd.data_base_addr;
+
+	block_number[1] |= (fwu->config_area << 5);
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			data_base + fwu->off.block_number,
+			block_number,
+			sizeof(block_number));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to write block number\n",
+				__func__);
+		return retval;
+	}
+
+	for (blk = 0; blk < block_cnt; blk++) {
+		retval = fwu_write_f34_command(command);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to write read config command\n",
+					__func__);
+			return retval;
+		}
+
+		retval = fwu_wait_for_idle(WRITE_WAIT_MS, false);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to wait for idle status\n",
+					__func__);
+			return retval;
+		}
+
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				data_base + fwu->off.payload,
+				&fwu->read_config_buf[index],
+				fwu->block_size);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to read block data (block %d)\n",
+					__func__, blk);
+			return retval;
+		}
+
+		index += fwu->block_size;
+	}
+
+	return 0;
+}
+
+static int fwu_read_f34_blocks(unsigned short block_cnt, unsigned char cmd)
+{
+	int retval;
+
+	if (fwu->bl_version == BL_V7 || fwu->bl_version == BL_V8)
+		retval = fwu_read_f34_v7_blocks(block_cnt, cmd);
+	else
+		retval = fwu_read_f34_v5v6_blocks(block_cnt, cmd);
+
+	return retval;
+}
+
+static int fwu_get_image_firmware_id(unsigned int *fw_id)
+{
+	int retval;
+	unsigned char index = 0;
+	char *strptr;
+	char *firmware_id;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	if (fwu->img.contains_firmware_id) {
+		*fw_id = fwu->img.firmware_id;
+	} else {
+		strptr = strnstr(fwu->image_name, "PR", MAX_IMAGE_NAME_LEN);
+		if (!strptr) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: No valid PR number (PRxxxxxxx) found in image file name (%s)\n",
+					__func__, fwu->image_name);
+			return -EINVAL;
+		}
+
+		strptr += 2;
+		firmware_id = kzalloc(MAX_FIRMWARE_ID_LEN, GFP_KERNEL);
+		if (!firmware_id) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to alloc mem for firmware_id\n",
+					__func__);
+			return -ENOMEM;
+		}
+		while (strptr[index] >= '0' && strptr[index] <= '9') {
+			firmware_id[index] = strptr[index];
+			index++;
+			if (index == MAX_FIRMWARE_ID_LEN - 1)
+				break;
+		}
+
+		retval = sstrtoul(firmware_id, 10, (unsigned long *)fw_id);
+		kfree(firmware_id);
+		if (retval) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to obtain image firmware ID\n",
+					__func__);
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+static int fwu_get_device_config_id(void)
+{
+	int retval;
+	unsigned char config_id_size;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	if (fwu->bl_version == BL_V7 || fwu->bl_version == BL_V8)
+		config_id_size = V7_CONFIG_ID_SIZE;
+	else
+		config_id_size = V5V6_CONFIG_ID_SIZE;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+				fwu->f34_fd.ctrl_base_addr,
+				fwu->config_id,
+				config_id_size);
+	if (retval < 0)
+		return retval;
+
+	return 0;
+}
+
+static enum flash_area fwu_go_nogo(void)
+{
+	int retval;
+	enum flash_area flash_area = NONE;
+	unsigned char ii;
+	unsigned char config_id_size;
+	unsigned int device_fw_id;
+	unsigned int image_fw_id;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	if (fwu->force_update) {
+		flash_area = UI_FIRMWARE;
+		goto exit;
+	}
+
+	/* Update both UI and config if device is in bootloader mode */
+	if (fwu->bl_mode_device) {
+		flash_area = UI_FIRMWARE;
+		goto exit;
+	}
+
+	/* Get device firmware ID */
+	device_fw_id = rmi4_data->firmware_id;
+	dev_info(rmi4_data->pdev->dev.parent,
+			"%s: Device firmware ID = %d\n",
+			__func__, device_fw_id);
+
+	/* Get image firmware ID */
+	retval = fwu_get_image_firmware_id(&image_fw_id);
+	if (retval < 0) {
+		flash_area = NONE;
+		goto exit;
+	}
+	dev_info(rmi4_data->pdev->dev.parent,
+			"%s: Image firmware ID = %d\n",
+			__func__, image_fw_id);
+
+	if (image_fw_id > device_fw_id) {
+		flash_area = UI_FIRMWARE;
+		goto exit;
+	} else if (image_fw_id < device_fw_id) {
+		dev_info(rmi4_data->pdev->dev.parent,
+				"%s: Image firmware ID older than device firmware ID\n",
+				__func__);
+		flash_area = NONE;
+		goto exit;
+	}
+
+	/* Get device config ID */
+	retval = fwu_get_device_config_id();
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read device config ID\n",
+				__func__);
+		flash_area = NONE;
+		goto exit;
+	}
+
+	if (fwu->bl_version == BL_V7 || fwu->bl_version == BL_V8)
+		config_id_size = V7_CONFIG_ID_SIZE;
+	else
+		config_id_size = V5V6_CONFIG_ID_SIZE;
+
+	for (ii = 0; ii < config_id_size; ii++) {
+		if (fwu->img.ui_config.data[ii] > fwu->config_id[ii]) {
+			flash_area = UI_CONFIG;
+			goto exit;
+		} else if (fwu->img.ui_config.data[ii] < fwu->config_id[ii]) {
+			flash_area = NONE;
+			goto exit;
+		}
+	}
+
+	flash_area = NONE;
+
+exit:
+	if (flash_area == NONE) {
+		dev_info(rmi4_data->pdev->dev.parent,
+				"%s: No need to do reflash\n",
+				__func__);
+	} else {
+		dev_info(rmi4_data->pdev->dev.parent,
+				"%s: Updating %s\n",
+				__func__,
+				flash_area == UI_FIRMWARE ?
+				"UI firmware and config" :
+				"UI config only");
+	}
+
+	return flash_area;
+}
+
+static int fwu_scan_pdt(void)
+{
+	int retval;
+	unsigned char ii;
+	unsigned char intr_count = 0;
+	unsigned char intr_off;
+	unsigned char intr_src;
+	unsigned short addr;
+	bool f01found = false;
+	bool f34found = false;
+	bool f35found = false;
+	struct synaptics_rmi4_fn_desc rmi_fd;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	fwu->in_ub_mode = false;
+
+	for (addr = PDT_START; addr > PDT_END; addr -= PDT_ENTRY_SIZE) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				addr,
+				(unsigned char *)&rmi_fd,
+				sizeof(rmi_fd));
+		if (retval < 0)
+			return retval;
+
+		if (rmi_fd.fn_number) {
+			dev_dbg(rmi4_data->pdev->dev.parent,
+					"%s: Found F%02x\n",
+					__func__, rmi_fd.fn_number);
+			switch (rmi_fd.fn_number) {
+			case SYNAPTICS_RMI4_F01:
+				f01found = true;
+
+				rmi4_data->f01_query_base_addr =
+						rmi_fd.query_base_addr;
+				rmi4_data->f01_ctrl_base_addr =
+						rmi_fd.ctrl_base_addr;
+				rmi4_data->f01_data_base_addr =
+						rmi_fd.data_base_addr;
+				rmi4_data->f01_cmd_base_addr =
+						rmi_fd.cmd_base_addr;
+				break;
+			case SYNAPTICS_RMI4_F34:
+				f34found = true;
+				fwu->f34_fd.query_base_addr =
+						rmi_fd.query_base_addr;
+				fwu->f34_fd.ctrl_base_addr =
+						rmi_fd.ctrl_base_addr;
+				fwu->f34_fd.data_base_addr =
+						rmi_fd.data_base_addr;
+
+				switch (rmi_fd.fn_version) {
+				case F34_V0:
+					fwu->bl_version = BL_V5;
+					break;
+				case F34_V1:
+					fwu->bl_version = BL_V6;
+					break;
+				case F34_V2:
+					fwu->bl_version = BL_V7;
+					break;
+				default:
+					dev_err(rmi4_data->pdev->dev.parent,
+							"%s: Unrecognized F34 version\n",
+							__func__);
+					return -EINVAL;
+				}
+
+				fwu->intr_mask = 0;
+				intr_src = rmi_fd.intr_src_count;
+				intr_off = intr_count % 8;
+				for (ii = intr_off;
+						ii < (intr_src + intr_off);
+						ii++) {
+					fwu->intr_mask |= 1 << ii;
+				}
+				break;
+			case SYNAPTICS_RMI4_F35:
+				f35found = true;
+				fwu->f35_fd.query_base_addr =
+						rmi_fd.query_base_addr;
+				fwu->f35_fd.ctrl_base_addr =
+						rmi_fd.ctrl_base_addr;
+				fwu->f35_fd.data_base_addr =
+						rmi_fd.data_base_addr;
+				fwu->f35_fd.cmd_base_addr =
+						rmi_fd.cmd_base_addr;
+				break;
+			}
+		} else {
+			break;
+		}
+
+		intr_count += rmi_fd.intr_src_count;
+	}
+
+	if (!f01found || !f34found) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to find both F01 and F34\n",
+				__func__);
+		if (!f35found) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to find F35\n",
+					__func__);
+			return -EINVAL;
+		} else {
+			fwu->in_ub_mode = true;
+			dev_dbg(rmi4_data->pdev->dev.parent,
+					"%s: In microbootloader mode\n",
+					__func__);
+			fwu_recovery_check_status();
+			return 0;
+		}
+	}
+
+	rmi4_data->intr_mask[0] |= fwu->intr_mask;
+
+	addr = rmi4_data->f01_ctrl_base_addr + 1;
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			addr,
+			&(rmi4_data->intr_mask[0]),
+			sizeof(rmi4_data->intr_mask[0]));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to set interrupt enable bit\n",
+				__func__);
+		return retval;
+	}
+
+	return 0;
+}
+
+static int fwu_enter_flash_prog(void)
+{
+	int retval;
+	struct f01_device_control f01_device_control;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	retval = fwu_read_flash_status();
+	if (retval < 0)
+		return retval;
+
+	if (fwu->in_bl_mode)
+		return 0;
+
+	retval = rmi4_data->irq_enable(rmi4_data, false, true);
+	if (retval < 0)
+		return retval;
+
+	msleep(INT_DISABLE_WAIT_MS);
+
+	retval = fwu_write_f34_command(CMD_ENABLE_FLASH_PROG);
+	if (retval < 0)
+		return retval;
+
+	retval = fwu_wait_for_idle(ENABLE_WAIT_MS, false);
+	if (retval < 0)
+		return retval;
+
+	if (!fwu->in_bl_mode) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: BL mode not entered\n",
+				__func__);
+		return -EINVAL;
+	}
+
+	if (rmi4_data->hw_if->bl_hw_init) {
+		retval = rmi4_data->hw_if->bl_hw_init(rmi4_data);
+		if (retval < 0)
+			return retval;
+	}
+
+	retval = fwu_scan_pdt();
+	if (retval < 0)
+		return retval;
+
+	retval = fwu_read_f34_queries();
+	if (retval < 0)
+		return retval;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			rmi4_data->f01_ctrl_base_addr,
+			f01_device_control.data,
+			sizeof(f01_device_control.data));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read F01 device control\n",
+				__func__);
+		return retval;
+	}
+
+	f01_device_control.nosleep = true;
+	f01_device_control.sleep_mode = SLEEP_MODE_NORMAL;
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			rmi4_data->f01_ctrl_base_addr,
+			f01_device_control.data,
+			sizeof(f01_device_control.data));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to write F01 device control\n",
+				__func__);
+		return retval;
+	}
+
+	msleep(ENTER_FLASH_PROG_WAIT_MS);
+
+	return retval;
+}
+
+static int fwu_check_ui_firmware_size(void)
+{
+	unsigned short block_count;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	block_count = fwu->img.ui_firmware.size / fwu->block_size;
+
+	if (block_count != fwu->blkcount.ui_firmware) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: UI firmware size mismatch\n",
+				__func__);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int fwu_check_ui_configuration_size(void)
+{
+	unsigned short block_count;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	block_count = fwu->img.ui_config.size / fwu->block_size;
+
+	if (block_count != fwu->blkcount.ui_config) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: UI configuration size mismatch\n",
+				__func__);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int fwu_check_dp_configuration_size(void)
+{
+	unsigned short block_count;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	block_count = fwu->img.dp_config.size / fwu->block_size;
+
+	if (block_count != fwu->blkcount.dp_config) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Display configuration size mismatch\n",
+				__func__);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int fwu_check_pm_configuration_size(void)
+{
+	unsigned short block_count;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	block_count = fwu->img.pm_config.size / fwu->block_size;
+
+	if (block_count != fwu->blkcount.pm_config) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Permanent configuration size mismatch\n",
+				__func__);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int fwu_check_bl_configuration_size(void)
+{
+	unsigned short block_count;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	block_count = fwu->img.bl_config.size / fwu->block_size;
+
+	if (block_count != fwu->blkcount.bl_config) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Bootloader configuration size mismatch\n",
+				__func__);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int fwu_check_guest_code_size(void)
+{
+	unsigned short block_count;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	block_count = fwu->img.guest_code.size / fwu->block_size;
+	if (block_count != fwu->blkcount.guest_code) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Guest code size mismatch\n",
+				__func__);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int fwu_erase_configuration(void)
+{
+	int retval;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	switch (fwu->config_area) {
+	case UI_CONFIG_AREA:
+		retval = fwu_write_f34_command(CMD_ERASE_UI_CONFIG);
+		if (retval < 0)
+			return retval;
+		break;
+	case DP_CONFIG_AREA:
+		retval = fwu_write_f34_command(CMD_ERASE_DISP_CONFIG);
+		if (retval < 0)
+			return retval;
+		break;
+	case BL_CONFIG_AREA:
+		retval = fwu_write_f34_command(CMD_ERASE_BL_CONFIG);
+		if (retval < 0)
+			return retval;
+		break;
+	case FLASH_CONFIG_AREA:
+		retval = fwu_write_f34_command(CMD_ERASE_FLASH_CONFIG);
+		if (retval < 0)
+			return retval;
+		break;
+	case UPP_AREA:
+		retval = fwu_write_f34_command(CMD_ERASE_UTILITY_PARAMETER);
+		if (retval < 0)
+			return retval;
+	default:
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Invalid config area\n",
+				__func__);
+		return -EINVAL;
+	}
+
+	dev_dbg(rmi4_data->pdev->dev.parent,
+			"%s: Erase command written\n",
+			__func__);
+
+	retval = fwu_wait_for_idle(ERASE_WAIT_MS, false);
+	if (retval < 0)
+		return retval;
+
+	dev_dbg(rmi4_data->pdev->dev.parent,
+			"%s: Idle status detected\n",
+			__func__);
+
+	return retval;
+}
+
+static int fwu_erase_bootloader(void)
+{
+	int retval;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	retval = fwu_write_f34_command(CMD_ERASE_BOOTLOADER);
+	if (retval < 0)
+		return retval;
+
+	dev_dbg(rmi4_data->pdev->dev.parent,
+			"%s: Erase command written\n",
+			__func__);
+
+	retval = fwu_wait_for_idle(ERASE_WAIT_MS, false);
+	if (retval < 0)
+		return retval;
+
+	dev_dbg(rmi4_data->pdev->dev.parent,
+			"%s: Idle status detected\n",
+			__func__);
+
+	return 0;
+}
+
+#ifdef SYNA_TDDI
+static int fwu_erase_lockdown_data(void)
+{
+	int retval;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	retval = fwu_write_f34_command(CMD_ERASE_LOCKDOWN_DATA);
+	if (retval < 0)
+		return retval;
+
+	dev_dbg(rmi4_data->pdev->dev.parent,
+			"%s: Erase command written\n",
+			__func__);
+
+	msleep(100);
+
+	retval = fwu_wait_for_idle(ERASE_WAIT_MS, false);
+	if (retval < 0)
+		return retval;
+
+	dev_dbg(rmi4_data->pdev->dev.parent,
+			"%s: Idle status detected\n",
+			__func__);
+
+	return 0;
+}
+
+#endif
+
+static int fwu_erase_guest_code(void)
+{
+	int retval;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	retval = fwu_write_f34_command(CMD_ERASE_GUEST_CODE);
+	if (retval < 0)
+		return retval;
+
+	dev_dbg(rmi4_data->pdev->dev.parent,
+			"%s: Erase command written\n",
+			__func__);
+
+	retval = fwu_wait_for_idle(ERASE_WAIT_MS, false);
+	if (retval < 0)
+		return retval;
+
+	dev_dbg(rmi4_data->pdev->dev.parent,
+			"%s: Idle status detected\n",
+			__func__);
+
+	return 0;
+}
+
+static int fwu_erase_all(void)
+{
+	int retval;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	if (fwu->bl_version == BL_V7) {
+		retval = fwu_write_f34_command(CMD_ERASE_UI_FIRMWARE);
+		if (retval < 0)
+			return retval;
+
+		dev_dbg(rmi4_data->pdev->dev.parent,
+				"%s: Erase command written\n",
+				__func__);
+
+		retval = fwu_wait_for_idle(ERASE_WAIT_MS, false);
+		if (retval < 0)
+			return retval;
+
+		dev_dbg(rmi4_data->pdev->dev.parent,
+				"%s: Idle status detected\n",
+				__func__);
+
+		fwu->config_area = UI_CONFIG_AREA;
+		retval = fwu_erase_configuration();
+		if (retval < 0)
+			return retval;
+	} else {
+		retval = fwu_write_f34_command(CMD_ERASE_ALL);
+		if (retval < 0)
+			return retval;
+
+		dev_dbg(rmi4_data->pdev->dev.parent,
+				"%s: Erase all command written\n",
+				__func__);
+
+		retval = fwu_wait_for_idle(ERASE_WAIT_MS, false);
+		if (!(fwu->bl_version == BL_V8 &&
+				fwu->flash_status == BAD_PARTITION_TABLE)) {
+			if (retval < 0)
+				return retval;
+		}
+
+		dev_dbg(rmi4_data->pdev->dev.parent,
+				"%s: Idle status detected\n",
+				__func__);
+
+		if (fwu->bl_version == BL_V8)
+			return 0;
+	}
+
+	if (fwu->flash_properties.has_disp_config) {
+		fwu->config_area = DP_CONFIG_AREA;
+		retval = fwu_erase_configuration();
+		if (retval < 0)
+			return retval;
+	}
+
+	if (fwu->has_guest_code) {
+		retval = fwu_erase_guest_code();
+		if (retval < 0)
+			return retval;
+	}
+
+	return 0;
+}
+
+static int fwu_write_firmware(void)
+{
+	unsigned short firmware_block_count;
+
+	firmware_block_count = fwu->img.ui_firmware.size / fwu->block_size;
+
+	return fwu_write_f34_blocks((unsigned char *)fwu->img.ui_firmware.data,
+			firmware_block_count, CMD_WRITE_FW);
+}
+
+static int fwu_write_bootloader(void)
+{
+	int retval;
+	unsigned short bootloader_block_count;
+
+	bootloader_block_count = fwu->img.bl_image.size / fwu->block_size;
+
+	fwu->write_bootloader = true;
+	retval = fwu_write_f34_blocks((unsigned char *)fwu->img.bl_image.data,
+			bootloader_block_count, CMD_WRITE_BOOTLOADER);
+	fwu->write_bootloader = false;
+
+	return retval;
+}
+
+static int fwu_write_utility_parameter(void)
+{
+	int retval;
+	unsigned char ii;
+	unsigned char checksum_array[4];
+	unsigned char *pbuf;
+	unsigned short remaining_size;
+	unsigned short utility_param_size;
+	unsigned long checksum;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	utility_param_size = fwu->blkcount.utility_param * fwu->block_size;
+	retval = fwu_allocate_read_config_buf(utility_param_size);
+	if (retval < 0)
+		return retval;
+	memset(fwu->read_config_buf, 0x00, utility_param_size);
+
+	pbuf = fwu->read_config_buf;
+	remaining_size = utility_param_size - 4;
+
+	for (ii = 0; ii < MAX_UTILITY_PARAMS; ii++) {
+		if (fwu->img.utility_param_id[ii] == UNUSED)
+			continue;
+
+#ifdef F51_DISCRETE_FORCE
+		if (fwu->img.utility_param_id[ii] == FORCE_PARAMETER) {
+			if (fwu->bl_mode_device) {
+				dev_info(rmi4_data->pdev->dev.parent,
+						"%s: Device in bootloader mode, skipping calibration data restoration\n",
+						__func__);
+				goto image_param;
+			}
+			retval = secure_memcpy(&(pbuf[4]),
+					remaining_size - 4,
+					fwu->cal_data,
+					fwu->cal_data_buf_size,
+					fwu->cal_data_size);
+			if (retval < 0) {
+				dev_err(rmi4_data->pdev->dev.parent,
+						"%s: Failed to copy force calibration data\n",
+						__func__);
+				return retval;
+			}
+			pbuf[0] = FORCE_PARAMETER;
+			pbuf[1] = 0x00;
+			pbuf[2] = (4 + fwu->cal_data_size) / 2;
+			pbuf += (fwu->cal_data_size + 4);
+			remaining_size -= (fwu->cal_data_size + 4);
+			continue;
+		}
+image_param:
+#endif
+
+		retval = secure_memcpy(pbuf,
+				remaining_size,
+				fwu->img.utility_param[ii].data,
+				fwu->img.utility_param[ii].size,
+				fwu->img.utility_param[ii].size);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to copy utility parameter data\n",
+					__func__);
+			return retval;
+		}
+		pbuf += fwu->img.utility_param[ii].size;
+		remaining_size -= fwu->img.utility_param[ii].size;
+	}
+
+	calculate_checksum((unsigned short *)fwu->read_config_buf,
+			((utility_param_size - 4) / 2),
+			&checksum);
+
+	convert_to_little_endian(checksum_array, checksum);
+
+	fwu->read_config_buf[utility_param_size - 4] = checksum_array[0];
+	fwu->read_config_buf[utility_param_size - 3] = checksum_array[1];
+	fwu->read_config_buf[utility_param_size - 2] = checksum_array[2];
+	fwu->read_config_buf[utility_param_size - 1] = checksum_array[3];
+
+	retval = fwu_write_f34_blocks((unsigned char *)fwu->read_config_buf,
+			fwu->blkcount.utility_param, CMD_WRITE_UTILITY_PARAM);
+	if (retval < 0)
+		return retval;
+
+	return 0;
+}
+
+static int fwu_write_configuration(void)
+{
+	return fwu_write_f34_blocks((unsigned char *)fwu->config_data,
+			fwu->config_block_count, CMD_WRITE_CONFIG);
+}
+
+static int fwu_write_ui_configuration(void)
+{
+	fwu->config_area = UI_CONFIG_AREA;
+	fwu->config_data = fwu->img.ui_config.data;
+	fwu->config_size = fwu->img.ui_config.size;
+	fwu->config_block_count = fwu->config_size / fwu->block_size;
+
+	return fwu_write_configuration();
+}
+
+static int fwu_write_dp_configuration(void)
+{
+	fwu->config_area = DP_CONFIG_AREA;
+	fwu->config_data = fwu->img.dp_config.data;
+	fwu->config_size = fwu->img.dp_config.size;
+	fwu->config_block_count = fwu->config_size / fwu->block_size;
+
+	return fwu_write_configuration();
+}
+
+static int fwu_write_pm_configuration(void)
+{
+	fwu->config_area = PM_CONFIG_AREA;
+	fwu->config_data = fwu->img.pm_config.data;
+	fwu->config_size = fwu->img.pm_config.size;
+	fwu->config_block_count = fwu->config_size / fwu->block_size;
+
+	return fwu_write_configuration();
+}
+
+#ifdef SYNA_TDDI
+static int fwu_write_tddi_lockdown_data(void)
+{
+	int retval;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	retval = fwu_write_f34_blocks(fwu->read_config_buf,
+			fwu->blkcount.tddi_lockdown_data,
+			CMD_WRITE_LOCKDOWN_DATA);
+	if (retval < 0)
+		return retval;
+	rmi4_data->reset_device(rmi4_data, false);
+	return 0;
+}
+#endif
+
+static int fwu_write_flash_configuration(void)
+{
+	int retval;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	fwu->config_area = FLASH_CONFIG_AREA;
+	fwu->config_data = fwu->img.fl_config.data;
+	fwu->config_size = fwu->img.fl_config.size;
+	fwu->config_block_count = fwu->config_size / fwu->block_size;
+
+	if (fwu->config_block_count != fwu->blkcount.fl_config) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Flash configuration size mismatch\n",
+				__func__);
+		return -EINVAL;
+	}
+
+	retval = fwu_erase_configuration();
+	if (retval < 0)
+		return retval;
+
+	retval = fwu_write_configuration();
+	if (retval < 0)
+		return retval;
+
+	rmi4_data->reset_device(rmi4_data, false);
+
+	return 0;
+}
+
+static int fwu_write_guest_code(void)
+{
+	int retval;
+	unsigned short guest_code_block_count;
+
+	guest_code_block_count = fwu->img.guest_code.size / fwu->block_size;
+
+	retval = fwu_write_f34_blocks((unsigned char *)fwu->img.guest_code.data,
+			guest_code_block_count, CMD_WRITE_GUEST_CODE);
+	if (retval < 0)
+		return retval;
+
+	return 0;
+}
+
+static int fwu_write_lockdown(void)
+{
+	unsigned short lockdown_block_count;
+
+	lockdown_block_count = fwu->img.lockdown.size / fwu->block_size;
+
+	return fwu_write_f34_blocks((unsigned char *)fwu->img.lockdown.data,
+			lockdown_block_count, CMD_WRITE_LOCKDOWN);
+}
+
+static int fwu_write_partition_table_v8(void)
+{
+	int retval;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	fwu->config_area = FLASH_CONFIG_AREA;
+	fwu->config_data = fwu->img.fl_config.data;
+	fwu->config_size = fwu->img.fl_config.size;
+	fwu->config_block_count = fwu->config_size / fwu->block_size;
+
+	if (fwu->config_block_count != fwu->blkcount.fl_config) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Flash configuration size mismatch\n",
+				__func__);
+		return -EINVAL;
+	}
+
+	retval = fwu_write_configuration();
+	if (retval < 0)
+		return retval;
+
+	rmi4_data->reset_device(rmi4_data, false);
+
+	return 0;
+}
+
+static int fwu_write_partition_table_v7(void)
+{
+	int retval;
+	unsigned short block_count;
+
+	block_count = fwu->blkcount.bl_config;
+	fwu->config_area = BL_CONFIG_AREA;
+	fwu->config_size = fwu->block_size * block_count;
+
+	retval = fwu_allocate_read_config_buf(fwu->config_size);
+	if (retval < 0)
+		return retval;
+
+	retval = fwu_read_f34_blocks(block_count, CMD_READ_CONFIG);
+	if (retval < 0)
+		return retval;
+
+	retval = fwu_erase_configuration();
+	if (retval < 0)
+		return retval;
+
+	retval = fwu_write_flash_configuration();
+	if (retval < 0)
+		return retval;
+
+	fwu->config_area = BL_CONFIG_AREA;
+	fwu->config_data = fwu->read_config_buf;
+	fwu->config_size = fwu->img.bl_config.size;
+	fwu->config_block_count = fwu->config_size / fwu->block_size;
+
+	retval = fwu_write_configuration();
+	if (retval < 0)
+		return retval;
+
+	return 0;
+}
+
+static int fwu_write_bl_area_v7(void)
+{
+	int retval;
+	bool has_utility_param;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	has_utility_param = fwu->has_utility_param;
+
+	if (fwu->has_utility_param) {
+		fwu->config_area = UPP_AREA;
+		retval = fwu_erase_configuration();
+		if (retval < 0)
+			return retval;
+	}
+
+	fwu->config_area = BL_CONFIG_AREA;
+	retval = fwu_erase_configuration();
+	if (retval < 0)
+		return retval;
+
+	fwu->config_area = FLASH_CONFIG_AREA;
+	retval = fwu_erase_configuration();
+	if (retval < 0)
+		return retval;
+
+	retval = fwu_erase_bootloader();
+	if (retval < 0)
+		return retval;
+
+	retval = fwu_write_bootloader();
+	if (retval < 0)
+		return retval;
+
+	msleep(rmi4_data->hw_if->board_data->reset_delay_ms);
+	rmi4_data->reset_device(rmi4_data, false);
+
+	fwu->config_area = FLASH_CONFIG_AREA;
+	fwu->config_data = fwu->img.fl_config.data;
+	fwu->config_size = fwu->img.fl_config.size;
+	fwu->config_block_count = fwu->config_size / fwu->block_size;
+	retval = fwu_write_configuration();
+	if (retval < 0)
+		return retval;
+	rmi4_data->reset_device(rmi4_data, false);
+
+	fwu->config_area = BL_CONFIG_AREA;
+	fwu->config_data = fwu->img.bl_config.data;
+	fwu->config_size = fwu->img.bl_config.size;
+	fwu->config_block_count = fwu->config_size / fwu->block_size;
+	retval = fwu_write_configuration();
+	if (retval < 0)
+		return retval;
+
+	if (fwu->img.contains_utility_param) {
+		retval = fwu_write_utility_parameter();
+		if (retval < 0)
+			return retval;
+	}
+
+	return 0;
+}
+
+static int fwu_do_reflash(void)
+{
+	int retval;
+	bool do_bl_update = false;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	if (!fwu->new_partition_table) {
+		retval = fwu_check_ui_firmware_size();
+		if (retval < 0)
+			return retval;
+
+		retval = fwu_check_ui_configuration_size();
+		if (retval < 0)
+			return retval;
+
+		if (fwu->flash_properties.has_disp_config &&
+				fwu->img.contains_disp_config) {
+			retval = fwu_check_dp_configuration_size();
+			if (retval < 0)
+				return retval;
+		}
+
+		if (fwu->has_guest_code && fwu->img.contains_guest_code) {
+			retval = fwu_check_guest_code_size();
+			if (retval < 0)
+				return retval;
+		}
+	} else if (fwu->bl_version == BL_V7) {
+		retval = fwu_check_bl_configuration_size();
+		if (retval < 0)
+			return retval;
+	}
+
+	if (!fwu->has_utility_param && fwu->img.contains_utility_param) {
+		if (fwu->bl_version == BL_V7 || fwu->bl_version == BL_V8)
+			do_bl_update = true;
+	}
+
+	if (fwu->has_utility_param && !fwu->img.contains_utility_param) {
+		if (fwu->bl_version == BL_V7 || fwu->bl_version == BL_V8)
+			do_bl_update = true;
+	}
+
+	if (!do_bl_update && fwu->incompatible_partition_tables) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Incompatible partition tables\n",
+				__func__);
+		return -EINVAL;
+	} else if (!do_bl_update && fwu->new_partition_table) {
+		if (!fwu->force_update) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Partition table mismatch\n",
+					__func__);
+			return -EINVAL;
+		}
+	}
+
+	retval = fwu_erase_all();
+	if (retval < 0)
+		return retval;
+
+	if (do_bl_update) {
+		retval = fwu_write_bl_area_v7();
+		if (retval < 0)
+			return retval;
+		pr_notice("%s: Bootloader area programmed\n", __func__);
+	} else if (fwu->bl_version == BL_V7 && fwu->new_partition_table) {
+		retval = fwu_write_partition_table_v7();
+		if (retval < 0)
+			return retval;
+		pr_notice("%s: Partition table programmed\n", __func__);
+	} else if (fwu->bl_version == BL_V8) {
+		retval = fwu_write_partition_table_v8();
+		if (retval < 0)
+			return retval;
+		pr_notice("%s: Partition table programmed\n", __func__);
+	}
+
+	fwu->config_area = UI_CONFIG_AREA;
+	if (fwu->flash_properties.has_disp_config &&
+			fwu->img.contains_disp_config) {
+		retval = fwu_write_dp_configuration();
+		if (retval < 0)
+			return retval;
+		pr_notice("%s: Display configuration programmed\n", __func__);
+	}
+
+	retval = fwu_write_ui_configuration();
+	if (retval < 0)
+		return retval;
+	pr_notice("%s: Configuration programmed\n", __func__);
+
+	if (fwu->has_guest_code && fwu->img.contains_guest_code) {
+		retval = fwu_write_guest_code();
+		if (retval < 0)
+			return retval;
+		pr_notice("%s: Guest code programmed\n", __func__);
+	}
+
+	retval = fwu_write_firmware();
+	if (retval < 0)
+		return retval;
+	pr_notice("%s: Firmware programmed\n", __func__);
+
+	return retval;
+}
+
+static int fwu_do_read_config(void)
+{
+	int retval;
+	unsigned short block_count;
+	unsigned short config_area;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	switch (fwu->config_area) {
+	case UI_CONFIG_AREA:
+		block_count = fwu->blkcount.ui_config;
+		break;
+	case DP_CONFIG_AREA:
+		if (!fwu->flash_properties.has_disp_config) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Display configuration not supported\n",
+					__func__);
+			return -EINVAL;
+		}
+		block_count = fwu->blkcount.dp_config;
+		break;
+	case PM_CONFIG_AREA:
+		if (!fwu->flash_properties.has_pm_config) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Permanent configuration not supported\n",
+					__func__);
+			return -EINVAL;
+		}
+		block_count = fwu->blkcount.pm_config;
+		break;
+	case BL_CONFIG_AREA:
+		if (!fwu->flash_properties.has_bl_config) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Bootloader configuration not supported\n",
+					__func__);
+			return -EINVAL;
+		}
+		block_count = fwu->blkcount.bl_config;
+		break;
+	case UPP_AREA:
+		if (!fwu->has_utility_param) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Utility parameter not supported\n",
+					__func__);
+			return -EINVAL;
+		}
+		block_count = fwu->blkcount.utility_param;
+		break;
+#ifdef SYNA_TDDI
+	case TDDI_FORCE_CONFIG_AREA:
+		if (!fwu->has_force_config) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: force configuration not supported\n",
+					__func__);
+			return -EINVAL;
+		}
+		block_count = fwu->blkcount.tddi_force_config;
+		break;
+	case TDDI_OEM_DATA_AREA:
+		if (!fwu->has_oem_data) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: oem data not supported\n",
+					__func__);
+			return -EINVAL;
+		}
+		block_count = fwu->blkcount.tddi_oem_data;
+		break;
+	case TDDI_LCM_DATA_AREA:
+		if (!fwu->has_lcm_data) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: lcm data not supported\n",
+					__func__);
+			return -EINVAL;
+		}
+		block_count = fwu->blkcount.tddi_lcm_data;
+		break;
+#endif
+	default:
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Invalid config area\n",
+				__func__);
+		return -EINVAL;
+	}
+
+	if (block_count == 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Invalid block count\n",
+				__func__);
+		return -EINVAL;
+	}
+
+	mutex_lock(&rmi4_data->rmi4_exp_init_mutex);
+
+	if (fwu->bl_version == BL_V5 || fwu->bl_version == BL_V6) {
+		config_area = fwu->config_area;
+		retval = fwu_enter_flash_prog();
+		fwu->config_area = config_area;
+		if (retval < 0)
+			goto exit;
+	}
+
+	fwu->config_size = fwu->block_size * block_count;
+
+	retval = fwu_allocate_read_config_buf(fwu->config_size);
+	if (retval < 0)
+		goto exit;
+
+	retval = fwu_read_f34_blocks(block_count, CMD_READ_CONFIG);
+
+exit:
+	if (fwu->bl_version == BL_V5 || fwu->bl_version == BL_V6)
+		rmi4_data->reset_device(rmi4_data, false);
+
+	mutex_unlock(&rmi4_data->rmi4_exp_init_mutex);
+
+	return retval;
+}
+
+#ifdef SYNA_TDDI
+static int fwu_do_read_tddi_lockdown_data(void)
+{
+	int retval = -EINVAL;
+	unsigned short block_count;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	block_count = fwu->blkcount.tddi_lockdown_data;
+	fwu->config_size = fwu->block_size * block_count;
+
+	if (fwu->bl_version != BL_V6) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Not support lockdown data in bl v.%d\n",
+				__func__,
+				fwu->bl_version);
+		goto exit;
+	} else if (!fwu->has_lockdown_data) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Not support lockdown data\n", __func__);
+		goto exit;
+	}
+
+	kfree(fwu->read_config_buf);
+
+	fwu->read_config_buf = kzalloc(fwu->config_size, GFP_KERNEL);
+
+	if (!fwu->read_config_buf) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to alloc mem for fwu->read_config_buf\n",
+				__func__);
+		fwu->read_config_buf_size = 0;
+		retval = -ENOMEM;
+		goto exit;
+	}
+	fwu->read_config_buf_size = fwu->config_size;
+	retval = fwu_read_f34_blocks(block_count, CMD_READ_LOCKDOWN_DATA);
+exit:
+	return retval;
+}
+
+int get_tddi_lockdown_data(unsigned char *lockdown_data, unsigned short leng)
+{
+	int retval;
+
+	retval = fwu_do_read_tddi_lockdown_data();
+	if (retval < 0)
+		return retval;
+	memcpy(lockdown_data, fwu->read_config_buf, leng);
+	return retval;
+}
+
+int set_tddi_lockdown_data(unsigned char *lockdown_data, unsigned short leng)
+{
+	int retval = -EINVAL;
+	unsigned long checksum;
+	unsigned char checksum_array[4];
+	unsigned short blk_cnt;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	if (fwu->bl_version != BL_V6) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Not support lockdown data in bl v.%d\n",
+				__func__,
+				fwu->bl_version);
+		goto exit;
+	} else if (!fwu->has_lockdown_data) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Not support lockdown data\n", __func__);
+		goto exit;
+	}
+
+	retval = fwu_enter_flash_prog();
+	if (retval < 0)
+		goto exit;
+
+	retval = fwu_erase_lockdown_data();
+	if (retval < 0)
+		goto exit;
+
+	blk_cnt = fwu->blkcount.tddi_lockdown_data;
+
+	fwu->config_size = fwu->blkcount.tddi_lockdown_data * fwu->block_size;
+	retval = fwu_allocate_read_config_buf(fwu->config_size);
+	if (retval < 0)
+		goto exit;
+	memset(fwu->read_config_buf, 0x00, fwu->config_size);
+	retval = secure_memcpy(fwu->read_config_buf, fwu->config_size,
+			lockdown_data, leng, leng);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to copy tddi lockdwon data\n",
+				__func__);
+		goto exit;
+	}
+
+	calculate_checksum((unsigned short *)fwu->read_config_buf,
+			((fwu->config_size - 4) / 2),
+			&checksum);
+
+	convert_to_little_endian(checksum_array, checksum);
+
+	fwu->read_config_buf[blk_cnt * fwu->block_size - 4] = checksum_array[0];
+	fwu->read_config_buf[blk_cnt * fwu->block_size - 3] = checksum_array[1];
+	fwu->read_config_buf[blk_cnt * fwu->block_size - 2] = checksum_array[2];
+	fwu->read_config_buf[blk_cnt * fwu->block_size - 1] = checksum_array[3];
+	retval = fwu_write_tddi_lockdown_data();
+exit:
+	return retval;
+}
+#endif
+
+static int fwu_do_lockdown_v7(void)
+{
+	int retval;
+	struct f34_v7_data0 status;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	retval = fwu_enter_flash_prog();
+	if (retval < 0)
+		return retval;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			fwu->f34_fd.data_base_addr + fwu->off.flash_status,
+			status.data,
+			sizeof(status.data));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read flash status\n",
+				__func__);
+		return retval;
+	}
+
+	if (status.device_cfg_status == 2) {
+		dev_info(rmi4_data->pdev->dev.parent,
+				"%s: Device already locked down\n",
+				__func__);
+		return 0;
+	}
+
+	retval = fwu_write_lockdown();
+	if (retval < 0)
+		return retval;
+
+	pr_notice("%s: Lockdown programmed\n", __func__);
+
+	return retval;
+}
+
+static int fwu_do_lockdown_v5v6(void)
+{
+	int retval;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+#ifdef SYNA_TDDI
+	unsigned char *img_ld;
+
+	img_ld = (unsigned char *)fwu->img.lockdown.data;
+	if (fwu->has_lockdown_data) {
+		retval = set_tddi_lockdown_data(img_ld,
+				LOCKDOWN_SIZE);
+		if (retval < 0)
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to write lockdown data\n",
+					__func__);
+		return retval;
+	}
+#endif
+
+	retval = fwu_enter_flash_prog();
+	if (retval < 0)
+		return retval;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			fwu->f34_fd.query_base_addr + fwu->off.properties,
+			fwu->flash_properties.data,
+			sizeof(fwu->flash_properties.data));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read flash properties\n",
+				__func__);
+		return retval;
+	}
+
+	if (fwu->flash_properties.unlocked == 0) {
+		dev_info(rmi4_data->pdev->dev.parent,
+				"%s: Device already locked down\n",
+				__func__);
+		return 0;
+	}
+
+	retval = fwu_write_lockdown();
+	if (retval < 0)
+		return retval;
+
+	pr_notice("%s: Lockdown programmed\n", __func__);
+
+	return retval;
+}
+
+#ifdef F51_DISCRETE_FORCE
+static int fwu_do_restore_f51_cal_data(void)
+{
+	int retval;
+	unsigned char checksum_array[4];
+	unsigned short block_count;
+	unsigned long checksum;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	block_count = fwu->blkcount.ui_config;
+	fwu->config_size = fwu->block_size * block_count;
+	fwu->config_area = UI_CONFIG_AREA;
+
+	retval = fwu_allocate_read_config_buf(fwu->config_size);
+	if (retval < 0)
+		return retval;
+
+	retval = fwu_read_f34_blocks(block_count, CMD_READ_CONFIG);
+	if (retval < 0)
+		return retval;
+
+	retval = secure_memcpy(&fwu->read_config_buf[fwu->cal_data_off],
+			fwu->cal_data_size, fwu->cal_data,
+			fwu->cal_data_buf_size, fwu->cal_data_size);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to restore calibration data\n",
+				__func__);
+		return retval;
+	}
+
+	calculate_checksum((unsigned short *)fwu->read_config_buf,
+			((fwu->config_size - 4) / 2),
+			&checksum);
+
+	convert_to_little_endian(checksum_array, checksum);
+
+	fwu->read_config_buf[fwu->config_size - 4] = checksum_array[0];
+	fwu->read_config_buf[fwu->config_size - 3] = checksum_array[1];
+	fwu->read_config_buf[fwu->config_size - 2] = checksum_array[2];
+	fwu->read_config_buf[fwu->config_size - 1] = checksum_array[3];
+
+	retval = fwu_enter_flash_prog();
+	if (retval < 0)
+		return retval;
+
+	fwu->config_area = UI_CONFIG_AREA;
+	fwu->config_data = fwu->read_config_buf;
+	fwu->config_block_count = fwu->config_size / fwu->block_size;
+
+	retval = fwu_erase_configuration();
+	if (retval < 0)
+		return retval;
+
+	retval = fwu_write_configuration();
+	if (retval < 0)
+		return retval;
+
+	return 0;
+}
+#endif
+
+static int fwu_start_write_guest_code(void)
+{
+	int retval;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	retval = fwu_parse_image_info();
+	if (retval < 0)
+		return -EINVAL;
+
+	if (!fwu->has_guest_code) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Guest code not supported\n",
+				__func__);
+		return -EINVAL;
+	}
+
+	if (!fwu->img.contains_guest_code) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: No guest code in firmware image\n",
+				__func__);
+		return -EINVAL;
+	}
+
+	if (rmi4_data->sensor_sleep) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Sensor sleeping\n",
+				__func__);
+		return -ENODEV;
+	}
+
+	rmi4_data->stay_awake = true;
+
+	mutex_lock(&rmi4_data->rmi4_exp_init_mutex);
+
+	pr_notice("%s: Start of write guest code process\n", __func__);
+
+	retval = fwu_enter_flash_prog();
+	if (retval < 0)
+		goto exit;
+
+	retval = fwu_check_guest_code_size();
+	if (retval < 0)
+		goto exit;
+
+	retval = fwu_erase_guest_code();
+	if (retval < 0)
+		goto exit;
+
+	retval = fwu_write_guest_code();
+	if (retval < 0)
+		goto exit;
+
+	pr_notice("%s: Guest code programmed\n", __func__);
+
+exit:
+	rmi4_data->reset_device(rmi4_data, false);
+
+	pr_notice("%s: End of write guest code process\n", __func__);
+
+	mutex_unlock(&rmi4_data->rmi4_exp_init_mutex);
+
+	rmi4_data->stay_awake = false;
+
+	return retval;
+}
+
+static int fwu_start_write_config(void)
+{
+	int retval;
+	unsigned short config_area;
+	unsigned int device_fw_id;
+	unsigned int image_fw_id;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	retval = fwu_parse_image_info();
+	if (retval < 0)
+		return -EINVAL;
+
+	switch (fwu->config_area) {
+	case UI_CONFIG_AREA:
+		device_fw_id = rmi4_data->firmware_id;
+		retval = fwu_get_image_firmware_id(&image_fw_id);
+		if (retval < 0)
+			return retval;
+		if (device_fw_id != image_fw_id) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Device and image firmware IDs don't match\n",
+					__func__);
+			return -EINVAL;
+		}
+		retval = fwu_check_ui_configuration_size();
+		if (retval < 0)
+			return retval;
+		break;
+	case DP_CONFIG_AREA:
+		if (!fwu->flash_properties.has_disp_config) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Display configuration not supported\n",
+					__func__);
+			return -EINVAL;
+		}
+		if (!fwu->img.contains_disp_config) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: No display configuration in firmware image\n",
+					__func__);
+			return -EINVAL;
+		}
+		retval = fwu_check_dp_configuration_size();
+		if (retval < 0)
+			return retval;
+		break;
+	case PM_CONFIG_AREA:
+		if (!fwu->flash_properties.has_pm_config) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Permanent configuration not supported\n",
+					__func__);
+			return -EINVAL;
+		}
+		if (!fwu->img.contains_perm_config) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: No permanent configuration in firmware image\n",
+					__func__);
+			return -EINVAL;
+		}
+		retval = fwu_check_pm_configuration_size();
+		if (retval < 0)
+			return retval;
+		break;
+	default:
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Configuration not supported\n",
+				__func__);
+		return -EINVAL;
+	}
+
+	if (rmi4_data->sensor_sleep) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Sensor sleeping\n",
+				__func__);
+		return -ENODEV;
+	}
+
+	rmi4_data->stay_awake = true;
+
+	mutex_lock(&rmi4_data->rmi4_exp_init_mutex);
+
+	pr_notice("%s: Start of write config process\n", __func__);
+
+	config_area = fwu->config_area;
+
+	retval = fwu_enter_flash_prog();
+	if (retval < 0)
+		goto exit;
+
+	fwu->config_area = config_area;
+
+	if (fwu->config_area != PM_CONFIG_AREA) {
+		retval = fwu_erase_configuration();
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to erase config\n",
+					__func__);
+			goto exit;
+		}
+	}
+
+	switch (fwu->config_area) {
+	case UI_CONFIG_AREA:
+		retval = fwu_write_ui_configuration();
+		if (retval < 0)
+			goto exit;
+		break;
+	case DP_CONFIG_AREA:
+		retval = fwu_write_dp_configuration();
+		if (retval < 0)
+			goto exit;
+		break;
+	case PM_CONFIG_AREA:
+		retval = fwu_write_pm_configuration();
+		if (retval < 0)
+			goto exit;
+		break;
+	}
+
+	pr_notice("%s: Config written\n", __func__);
+
+exit:
+	switch (fwu->config_area) {
+	case UI_CONFIG_AREA:
+		rmi4_data->reset_device(rmi4_data, true);
+		break;
+	case DP_CONFIG_AREA:
+	case PM_CONFIG_AREA:
+		rmi4_data->reset_device(rmi4_data, false);
+		break;
+	}
+
+	pr_notice("%s: End of write config process\n", __func__);
+
+	mutex_unlock(&rmi4_data->rmi4_exp_init_mutex);
+
+	rmi4_data->stay_awake = false;
+
+	return retval;
+}
+
+static int fwu_start_reflash(void)
+{
+	int retval = 0;
+	enum flash_area flash_area;
+	bool do_rebuild = false;
+	const struct firmware *fw_entry = NULL;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	if (rmi4_data->sensor_sleep) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Sensor sleeping\n",
+				__func__);
+		return -ENODEV;
+	}
+
+	rmi4_data->stay_awake = true;
+
+	mutex_lock(&rmi4_data->rmi4_exp_init_mutex);
+
+	pr_notice("%s: Start of reflash process\n", __func__);
+
+	if (fwu->image == NULL) {
+		retval = secure_memcpy(fwu->image_name, MAX_IMAGE_NAME_LEN,
+				FW_IMAGE_NAME, sizeof(FW_IMAGE_NAME),
+				sizeof(FW_IMAGE_NAME));
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to copy image file name\n",
+					__func__);
+			goto exit;
+		}
+		dev_dbg(rmi4_data->pdev->dev.parent,
+				"%s: Requesting firmware image %s\n",
+				__func__, fwu->image_name);
+
+		retval = request_firmware(&fw_entry, fwu->image_name,
+				rmi4_data->pdev->dev.parent);
+		if (retval != 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Firmware image %s not available\n",
+					__func__, fwu->image_name);
+			retval = -EINVAL;
+			goto exit;
+		}
+
+		dev_dbg(rmi4_data->pdev->dev.parent,
+				"%s: Firmware image size = %d\n",
+				__func__, (unsigned int)fw_entry->size);
+
+		fwu->image = fw_entry->data;
+	}
+
+	retval = fwu_parse_image_info();
+	if (retval < 0)
+		goto exit;
+
+	if (fwu->blkcount.total_count != fwu->img.blkcount.total_count) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Flash size mismatch\n",
+				__func__);
+		retval = -EINVAL;
+		goto exit;
+	}
+
+	if (fwu->bl_version != fwu->img.bl_version) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Bootloader version mismatch\n",
+				__func__);
+		retval = -EINVAL;
+		goto exit;
+	}
+
+	retval = fwu_read_flash_status();
+	if (retval < 0)
+		goto exit;
+
+	if (fwu->in_bl_mode) {
+		fwu->bl_mode_device = true;
+		dev_info(rmi4_data->pdev->dev.parent,
+				"%s: Device in bootloader mode\n",
+				__func__);
+	} else {
+		fwu->bl_mode_device = false;
+	}
+
+	flash_area = fwu_go_nogo();
+
+	if (flash_area != NONE) {
+		retval = fwu_enter_flash_prog();
+		if (retval < 0) {
+			rmi4_data->reset_device(rmi4_data, false);
+			goto exit;
+		}
+	}
+
+#ifdef F51_DISCRETE_FORCE
+	if (flash_area != NONE && !fwu->bl_mode_device) {
+		fwu->config_size = fwu->block_size * fwu->blkcount.ui_config;
+		fwu->config_area = UI_CONFIG_AREA;
+
+		retval = fwu_allocate_read_config_buf(fwu->config_size);
+		if (retval < 0) {
+			rmi4_data->reset_device(rmi4_data, false);
+			goto exit;
+		}
+
+		retval = fwu_read_f34_blocks(fwu->blkcount.ui_config,
+				CMD_READ_CONFIG);
+		if (retval < 0) {
+			rmi4_data->reset_device(rmi4_data, false);
+			goto exit;
+		}
+
+		retval = secure_memcpy(fwu->cal_data, fwu->cal_data_buf_size,
+				&fwu->read_config_buf[fwu->cal_data_off],
+				fwu->cal_data_size, fwu->cal_data_size);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to save calibration data\n",
+					__func__);
+			rmi4_data->reset_device(rmi4_data, false);
+			goto exit;
+		}
+	}
+#endif
+
+	switch (flash_area) {
+	case UI_FIRMWARE:
+		do_rebuild = true;
+		retval = fwu_do_reflash();
+#ifdef F51_DISCRETE_FORCE
+		if (retval < 0)
+			break;
+
+		if (fwu->has_utility_param || fwu->img.contains_utility_param)
+			break;
+
+		rmi4_data->reset_device(rmi4_data, false);
+
+		if (fwu->bl_mode_device || fwu->in_bl_mode) {
+			dev_info(rmi4_data->pdev->dev.parent,
+					"%s: Device in bootloader mode, skipping calibration data restoration\n",
+					__func__);
+			break;
+		}
+
+		retval = fwu_do_restore_f51_cal_data();
+#endif
+		break;
+	case UI_CONFIG:
+		do_rebuild = true;
+		retval = fwu_check_ui_configuration_size();
+		if (retval < 0)
+			break;
+		fwu->config_area = UI_CONFIG_AREA;
+		retval = fwu_erase_configuration();
+		if (retval < 0)
+			break;
+		retval = fwu_write_ui_configuration();
+#ifdef F51_DISCRETE_FORCE
+		if (retval < 0)
+			break;
+
+		if (fwu->has_utility_param)
+			break;
+
+		retval = fwu_do_restore_f51_cal_data();
+#endif
+		break;
+	case NONE:
+	default:
+		break;
+	}
+
+	if (retval < 0) {
+		do_rebuild = false;
+		rmi4_data->reset_device(rmi4_data, false);
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to do reflash\n",
+				__func__);
+		goto exit;
+	}
+
+	if (fwu->do_lockdown && (fwu->img.lockdown.data != NULL)) {
+		switch (fwu->bl_version) {
+		case BL_V5:
+		case BL_V6:
+			retval = fwu_do_lockdown_v5v6();
+			if (retval < 0) {
+				dev_err(rmi4_data->pdev->dev.parent,
+						"%s: Failed to do lockdown\n",
+						__func__);
+			}
+			rmi4_data->reset_device(rmi4_data, false);
+			break;
+		case BL_V7:
+		case BL_V8:
+			retval = fwu_do_lockdown_v7();
+			if (retval < 0) {
+				dev_err(rmi4_data->pdev->dev.parent,
+						"%s: Failed to do lockdown\n",
+						__func__);
+			}
+			rmi4_data->reset_device(rmi4_data, false);
+			break;
+		default:
+			break;
+		}
+	}
+
+exit:
+	if (fw_entry)
+		release_firmware(fw_entry);
+
+	if (do_rebuild)
+		rmi4_data->reset_device(rmi4_data, true);
+
+	pr_notice("%s: End of reflash process\n", __func__);
+
+	mutex_unlock(&rmi4_data->rmi4_exp_init_mutex);
+
+	rmi4_data->stay_awake = false;
+
+	return retval;
+}
+
+static int fwu_recovery_check_status(void)
+{
+	int retval;
+	unsigned char data_base;
+	unsigned char status;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	data_base = fwu->f35_fd.data_base_addr;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			data_base + F35_ERROR_CODE_OFFSET,
+			&status,
+			1);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read status\n",
+				__func__);
+		return retval;
+	}
+
+	status = status & MASK_5BIT;
+
+	if (status != 0x00) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Recovery mode status = %d\n",
+				__func__, status);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int fwu_recovery_erase_completion(void)
+{
+	int retval;
+	unsigned char data_base;
+	unsigned char command;
+	unsigned char status;
+	unsigned int timeout = F35_ERASE_ALL_WAIT_MS / 20;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	data_base = fwu->f35_fd.data_base_addr;
+
+	do {
+		command = 0x01;
+		retval = synaptics_rmi4_reg_write(rmi4_data,
+				fwu->f35_fd.cmd_base_addr,
+				&command,
+				sizeof(command));
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to issue command\n",
+					__func__);
+			return retval;
+		}
+
+		do {
+			retval = synaptics_rmi4_reg_read(rmi4_data,
+					fwu->f35_fd.cmd_base_addr,
+					&command,
+					sizeof(command));
+			if (retval < 0) {
+				dev_err(rmi4_data->pdev->dev.parent,
+						"%s: Failed to read command status\n",
+						__func__);
+				return retval;
+			}
+
+			if ((command & 0x01) == 0x00)
+				break;
+
+			msleep(20);
+			timeout--;
+		} while (timeout > 0);
+
+		if (timeout == 0)
+			goto exit;
+
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				data_base + F35_FLASH_STATUS_OFFSET,
+				&status,
+				sizeof(status));
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to read flash status\n",
+					__func__);
+			return retval;
+		}
+
+		if ((status & 0x01) == 0x00)
+			break;
+
+		msleep(20);
+		timeout--;
+	} while (timeout > 0);
+
+exit:
+	if (timeout == 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Timed out waiting for flash erase completion\n",
+				__func__);
+		return -ETIMEDOUT;
+	}
+
+	return 0;
+}
+
+static int fwu_recovery_erase_all(void)
+{
+	int retval;
+	unsigned char ctrl_base;
+	unsigned char command = CMD_F35_ERASE_ALL;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	ctrl_base = fwu->f35_fd.ctrl_base_addr;
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			ctrl_base + F35_CHUNK_COMMAND_OFFSET,
+			&command,
+			sizeof(command));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to issue erase all command\n",
+				__func__);
+		return retval;
+	}
+
+	if (fwu->f35_fd.cmd_base_addr) {
+		retval = fwu_recovery_erase_completion();
+		if (retval < 0)
+			return retval;
+	} else {
+		msleep(F35_ERASE_ALL_WAIT_MS);
+	}
+
+	retval = fwu_recovery_check_status();
+	if (retval < 0)
+		return retval;
+
+	return 0;
+}
+
+static int fwu_recovery_write_chunk(void)
+{
+	int retval;
+	unsigned char ctrl_base;
+	unsigned char chunk_number[] = {0, 0};
+	unsigned char chunk_spare;
+	unsigned char chunk_size;
+	unsigned char buf[F35_CHUNK_SIZE + 1];
+	unsigned short chunk;
+	unsigned short chunk_total;
+	unsigned short bytes_written = 0;
+	unsigned char *chunk_ptr = (unsigned char *)fwu->image;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	ctrl_base = fwu->f35_fd.ctrl_base_addr;
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			ctrl_base + F35_CHUNK_NUM_LSB_OFFSET,
+			chunk_number,
+			sizeof(chunk_number));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to write chunk number\n",
+				__func__);
+		return retval;
+	}
+
+	buf[sizeof(buf) - 1] = CMD_F35_WRITE_CHUNK;
+
+	chunk_total = fwu->image_size / F35_CHUNK_SIZE;
+	chunk_spare = fwu->image_size % F35_CHUNK_SIZE;
+	if (chunk_spare)
+		chunk_total++;
+
+	for (chunk = 0; chunk < chunk_total; chunk++) {
+		if (chunk_spare && chunk == chunk_total - 1)
+			chunk_size = chunk_spare;
+		else
+			chunk_size = F35_CHUNK_SIZE;
+
+		memset(buf, 0x00, F35_CHUNK_SIZE);
+		secure_memcpy(buf, sizeof(buf), chunk_ptr,
+					fwu->image_size - bytes_written,
+					chunk_size);
+
+		retval = synaptics_rmi4_reg_write(rmi4_data,
+				ctrl_base + F35_CHUNK_DATA_OFFSET,
+				buf,
+				sizeof(buf));
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to write chunk data (chunk %d)\n",
+					__func__, chunk);
+			return retval;
+		}
+		chunk_ptr += chunk_size;
+		bytes_written += chunk_size;
+	}
+
+	retval = fwu_recovery_check_status();
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to write chunk data\n",
+				__func__);
+		return retval;
+	}
+
+	return 0;
+}
+
+static int fwu_recovery_reset(void)
+{
+	int retval;
+	unsigned char ctrl_base;
+	unsigned char command = CMD_F35_RESET;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	ctrl_base = fwu->f35_fd.ctrl_base_addr;
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			ctrl_base + F35_CHUNK_COMMAND_OFFSET,
+			&command,
+			sizeof(command));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to issue reset command\n",
+				__func__);
+		return retval;
+	}
+
+	msleep(F35_RESET_WAIT_MS);
+
+	return 0;
+}
+
+static int fwu_start_recovery(void)
+{
+	int retval;
+	const struct firmware *fw_entry = NULL;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	if (rmi4_data->sensor_sleep) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Sensor sleeping\n",
+				__func__);
+		return -ENODEV;
+	}
+
+	rmi4_data->stay_awake = true;
+
+	mutex_lock(&rmi4_data->rmi4_exp_init_mutex);
+
+	pr_notice("%s: Start of recovery process\n", __func__);
+
+	if (fwu->image == NULL) {
+		retval = secure_memcpy(fwu->image_name, MAX_IMAGE_NAME_LEN,
+				FW_IHEX_NAME, sizeof(FW_IHEX_NAME),
+				sizeof(FW_IHEX_NAME));
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to copy ihex file name\n",
+					__func__);
+			goto exit;
+		}
+		dev_dbg(rmi4_data->pdev->dev.parent,
+				"%s: Requesting firmware ihex %s\n",
+				__func__, fwu->image_name);
+
+		retval = request_firmware(&fw_entry, fwu->image_name,
+				rmi4_data->pdev->dev.parent);
+		if (retval != 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Firmware ihex %s not available\n",
+					__func__, fwu->image_name);
+			retval = -EINVAL;
+			goto exit;
+		}
+
+		dev_dbg(rmi4_data->pdev->dev.parent,
+				"%s: Firmware image size = %d\n",
+				__func__, (unsigned int)fw_entry->size);
+
+		fwu->image = fw_entry->data;
+		fwu->image_size = fw_entry->size;
+	}
+
+	retval = rmi4_data->irq_enable(rmi4_data, false, false);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to disable interrupt\n",
+				__func__);
+		goto exit;
+	}
+
+	retval = fwu_recovery_erase_all();
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to do erase all in recovery mode\n",
+				__func__);
+		goto exit;
+	}
+
+	pr_notice("%s: External flash erased\n", __func__);
+
+	retval = fwu_recovery_write_chunk();
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to write chunk data in recovery mode\n",
+				__func__);
+		goto exit;
+	}
+
+	pr_notice("%s: Chunk data programmed\n", __func__);
+
+	retval = fwu_recovery_reset();
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to reset device in recovery mode\n",
+				__func__);
+		goto exit;
+	}
+
+	pr_notice("%s: Recovery mode reset issued\n", __func__);
+
+	rmi4_data->reset_device(rmi4_data, true);
+
+	retval = 0;
+
+exit:
+	if (fw_entry)
+		release_firmware(fw_entry);
+
+	pr_notice("%s: End of recovery process\n", __func__);
+
+	mutex_unlock(&rmi4_data->rmi4_exp_init_mutex);
+
+	rmi4_data->stay_awake = false;
+
+	return retval;
+}
+
+int synaptics_fw_updater(const unsigned char *fw_data)
+{
+	int retval;
+
+	if (!fwu)
+		return -ENODEV;
+
+	if (!fwu->initialized)
+		return -ENODEV;
+
+	if (fwu->in_ub_mode) {
+		fwu->image = NULL;
+		retval = fwu_start_recovery();
+		if (retval < 0)
+			return retval;
+	}
+
+	fwu->image = fw_data;
+
+	retval = fwu_start_reflash();
+
+	fwu->image = NULL;
+
+	return retval;
+}
+EXPORT_SYMBOL(synaptics_fw_updater);
+
+#ifdef DO_STARTUP_FW_UPDATE
+static void fwu_startup_fw_update_work(struct work_struct *work)
+{
+	static unsigned char do_once = 1;
+#ifdef WAIT_FOR_FB_READY
+	unsigned int timeout;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+#endif
+
+	if (!do_once)
+		return;
+	do_once = 0;
+
+#ifdef WAIT_FOR_FB_READY
+	timeout = FB_READY_TIMEOUT_S * 1000 / FB_READY_WAIT_MS + 1;
+
+	while (!rmi4_data->fb_ready) {
+		msleep(FB_READY_WAIT_MS);
+		timeout--;
+		if (timeout == 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Timed out waiting for FB ready\n",
+					__func__);
+			return;
+		}
+	}
+#endif
+
+	synaptics_fw_updater(NULL);
+
+	return;
+}
+#endif
+
+static ssize_t fwu_sysfs_show_image(struct file *data_file,
+		struct kobject *kobj, struct bin_attribute *attributes,
+		char *buf, loff_t pos, size_t count)
+{
+	int retval;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	if (!mutex_trylock(&fwu_sysfs_mutex))
+		return -EBUSY;
+
+	if (count < fwu->config_size) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Not enough space (%d bytes) in buffer\n",
+				__func__, (unsigned int)count);
+		retval = -EINVAL;
+		goto exit;
+	}
+
+	retval = secure_memcpy(buf, count, fwu->read_config_buf,
+			fwu->read_config_buf_size, fwu->config_size);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to copy config data\n",
+				__func__);
+		goto exit;
+	} else {
+		retval = fwu->config_size;
+	}
+
+exit:
+	mutex_unlock(&fwu_sysfs_mutex);
+	return retval;
+}
+
+static ssize_t fwu_sysfs_store_image(struct file *data_file,
+		struct kobject *kobj, struct bin_attribute *attributes,
+		char *buf, loff_t pos, size_t count)
+{
+	int retval;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	if (!mutex_trylock(&fwu_sysfs_mutex))
+		return -EBUSY;
+
+	retval = secure_memcpy(&fwu->ext_data_source[fwu->data_pos],
+			fwu->image_size - fwu->data_pos, buf, count, count);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to copy image data\n",
+				__func__);
+		goto exit;
+	} else {
+		retval = count;
+	}
+
+	fwu->data_pos += count;
+
+exit:
+	mutex_unlock(&fwu_sysfs_mutex);
+	return retval;
+}
+
+static ssize_t fwu_sysfs_do_recovery_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int retval;
+	unsigned int input;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	if (!mutex_trylock(&fwu_sysfs_mutex))
+		return -EBUSY;
+
+	if (kstrtouint(buf, 10, &input) != 1) {
+		retval = -EINVAL;
+		goto exit;
+	}
+
+	if (!fwu->in_ub_mode) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Not in microbootloader mode\n",
+				__func__);
+		retval = -EINVAL;
+		goto exit;
+	}
+
+	if (!fwu->ext_data_source) {
+		retval = -EINVAL;
+		goto exit;
+	} else {
+		fwu->image = fwu->ext_data_source;
+	}
+
+	retval = fwu_start_recovery();
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to do recovery\n",
+				__func__);
+		goto exit;
+	}
+
+	retval = count;
+
+exit:
+	kfree(fwu->ext_data_source);
+	fwu->ext_data_source = NULL;
+	fwu->image = NULL;
+	mutex_unlock(&fwu_sysfs_mutex);
+	return retval;
+}
+
+static ssize_t fwu_sysfs_do_reflash_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int retval;
+	unsigned int input;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	if (!mutex_trylock(&fwu_sysfs_mutex))
+		return -EBUSY;
+
+	if (kstrtouint(buf, 10, &input) != 1) {
+		retval = -EINVAL;
+		goto exit;
+	}
+
+	if (fwu->in_ub_mode) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: In microbootloader mode\n",
+				__func__);
+		retval = -EINVAL;
+		goto exit;
+	}
+
+	if (!fwu->ext_data_source) {
+		retval =  -EINVAL;
+		goto exit;
+	} else {
+		fwu->image = fwu->ext_data_source;
+	}
+
+	if (input & LOCKDOWN) {
+		fwu->do_lockdown = true;
+		input &= ~LOCKDOWN;
+	}
+
+	if ((input != NORMAL) && (input != FORCE)) {
+		retval = -EINVAL;
+		goto exit;
+	}
+
+	if (input == FORCE)
+		fwu->force_update = true;
+
+	retval = synaptics_fw_updater(fwu->image);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to do reflash\n",
+				__func__);
+		goto exit;
+	}
+
+	retval = count;
+
+exit:
+	kfree(fwu->ext_data_source);
+	fwu->ext_data_source = NULL;
+	fwu->image = NULL;
+	fwu->force_update = FORCE_UPDATE;
+	fwu->do_lockdown = DO_LOCKDOWN;
+	mutex_unlock(&fwu_sysfs_mutex);
+	return retval;
+}
+
+static ssize_t fwu_sysfs_write_config_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int retval;
+	unsigned int input;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	if (!mutex_trylock(&fwu_sysfs_mutex))
+		return -EBUSY;
+
+	if (kstrtouint(buf, 10, &input) != 1) {
+		retval = -EINVAL;
+		goto exit;
+	}
+
+	if (input != 1) {
+		retval = -EINVAL;
+		goto exit;
+	}
+
+	if (fwu->in_ub_mode) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: In microbootloader mode\n",
+				__func__);
+		retval = -EINVAL;
+		goto exit;
+	}
+
+	if (!fwu->ext_data_source) {
+		retval = -EINVAL;
+		goto exit;
+	} else {
+		fwu->image = fwu->ext_data_source;
+	}
+
+	retval = fwu_start_write_config();
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to write config\n",
+				__func__);
+		goto exit;
+	}
+
+	retval = count;
+
+exit:
+	kfree(fwu->ext_data_source);
+	fwu->ext_data_source = NULL;
+	fwu->image = NULL;
+	mutex_unlock(&fwu_sysfs_mutex);
+	return retval;
+}
+
+static ssize_t fwu_sysfs_read_config_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int retval;
+	unsigned int input;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	if (kstrtouint(buf, 10, &input) != 1)
+		return -EINVAL;
+
+	if (input != 1)
+		return -EINVAL;
+
+	if (!mutex_trylock(&fwu_sysfs_mutex))
+		return -EBUSY;
+
+	if (fwu->in_ub_mode) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: In microbootloader mode\n",
+				__func__);
+		retval =  -EINVAL;
+		goto exit;
+	}
+
+	retval = fwu_do_read_config();
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read config\n",
+				__func__);
+		goto exit;
+	}
+
+	retval = count;
+
+exit:
+	mutex_unlock(&fwu_sysfs_mutex);
+	return retval;
+}
+
+static ssize_t fwu_sysfs_config_area_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int retval;
+	unsigned long config_area;
+
+	retval = sstrtoul(buf, 10, &config_area);
+	if (retval)
+		return retval;
+
+	if (!mutex_trylock(&fwu_sysfs_mutex))
+		return -EBUSY;
+
+	fwu->config_area = config_area;
+
+	mutex_unlock(&fwu_sysfs_mutex);
+
+	return count;
+}
+
+static ssize_t fwu_sysfs_image_name_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int retval;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	if (!mutex_trylock(&fwu_sysfs_mutex))
+		return -EBUSY;
+
+	retval = secure_memcpy(fwu->image_name, MAX_IMAGE_NAME_LEN,
+			buf, count, count);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to copy image file name\n",
+				__func__);
+	} else {
+		retval = count;
+	}
+
+	mutex_unlock(&fwu_sysfs_mutex);
+
+	return retval;
+}
+
+static ssize_t fwu_sysfs_image_size_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int retval;
+	unsigned long size;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	retval = sstrtoul(buf, 10, &size);
+	if (retval)
+		return retval;
+
+	if (!mutex_trylock(&fwu_sysfs_mutex))
+		return -EBUSY;
+
+	fwu->image_size = size;
+	fwu->data_pos = 0;
+
+	kfree(fwu->ext_data_source);
+	fwu->ext_data_source = kzalloc(fwu->image_size, GFP_KERNEL);
+	if (!fwu->ext_data_source) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to alloc mem for image data\n",
+				__func__);
+		retval = -ENOMEM;
+	} else {
+		retval = count;
+	}
+
+	mutex_unlock(&fwu_sysfs_mutex);
+
+	return retval;
+}
+
+static ssize_t fwu_sysfs_block_size_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	int retval;
+
+	if (!mutex_trylock(&fwu_sysfs_mutex))
+		return -EBUSY;
+
+	retval =  snprintf(buf, PAGE_SIZE, "%u\n", fwu->block_size);
+
+	mutex_unlock(&fwu_sysfs_mutex);
+
+	return retval;
+}
+
+static ssize_t fwu_sysfs_firmware_block_count_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	int retval;
+
+	if (!mutex_trylock(&fwu_sysfs_mutex))
+		return -EBUSY;
+
+	retval = snprintf(buf, PAGE_SIZE, "%u\n", fwu->blkcount.ui_firmware);
+
+	mutex_unlock(&fwu_sysfs_mutex);
+
+	return retval;
+}
+
+static ssize_t fwu_sysfs_configuration_block_count_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	int retval;
+
+	if (!mutex_trylock(&fwu_sysfs_mutex))
+		return -EBUSY;
+
+	retval = snprintf(buf, PAGE_SIZE, "%u\n", fwu->blkcount.ui_config);
+
+	mutex_unlock(&fwu_sysfs_mutex);
+
+	return retval;
+}
+
+static ssize_t fwu_sysfs_disp_config_block_count_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	int retval;
+
+	if (!mutex_trylock(&fwu_sysfs_mutex))
+		return -EBUSY;
+
+	retval = snprintf(buf, PAGE_SIZE, "%u\n", fwu->blkcount.dp_config);
+
+	mutex_unlock(&fwu_sysfs_mutex);
+
+	return retval;
+}
+
+static ssize_t fwu_sysfs_perm_config_block_count_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	int retval;
+
+	if (!mutex_trylock(&fwu_sysfs_mutex))
+		return -EBUSY;
+
+	retval = snprintf(buf, PAGE_SIZE, "%u\n", fwu->blkcount.pm_config);
+
+	mutex_unlock(&fwu_sysfs_mutex);
+
+	return retval;
+}
+
+static ssize_t fwu_sysfs_bl_config_block_count_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	int retval;
+
+	if (!mutex_trylock(&fwu_sysfs_mutex))
+		return -EBUSY;
+
+	retval = snprintf(buf, PAGE_SIZE, "%u\n", fwu->blkcount.bl_config);
+
+	mutex_unlock(&fwu_sysfs_mutex);
+
+	return retval;
+}
+
+static ssize_t fwu_sysfs_utility_parameter_block_count_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	int retval;
+
+	if (!mutex_trylock(&fwu_sysfs_mutex))
+		return -EBUSY;
+
+	retval = snprintf(buf, PAGE_SIZE, "%u\n", fwu->blkcount.utility_param);
+
+	mutex_unlock(&fwu_sysfs_mutex);
+
+	return retval;
+}
+
+static ssize_t fwu_sysfs_guest_code_block_count_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	int retval;
+
+	if (!mutex_trylock(&fwu_sysfs_mutex))
+		return -EBUSY;
+
+	retval = snprintf(buf, PAGE_SIZE, "%u\n", fwu->blkcount.guest_code);
+
+	mutex_unlock(&fwu_sysfs_mutex);
+
+	return retval;
+}
+
+static ssize_t fwu_sysfs_write_guest_code_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int retval;
+	unsigned int input;
+	struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+	if (!mutex_trylock(&fwu_sysfs_mutex))
+		return -EBUSY;
+
+	if (kstrtouint(buf, 10, &input) != 1) {
+		retval = -EINVAL;
+		goto exit;
+	}
+
+	if (input != 1) {
+		retval = -EINVAL;
+		goto exit;
+	}
+
+	if (fwu->in_ub_mode) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: In microbootloader mode\n",
+				__func__);
+		retval = -EINVAL;
+		goto exit;
+	}
+
+	if (!fwu->ext_data_source) {
+		retval = -EINVAL;
+		goto exit;
+	} else {
+		fwu->image = fwu->ext_data_source;
+	}
+
+	retval = fwu_start_write_guest_code();
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to write guest code\n",
+				__func__);
+		goto exit;
+	}
+
+	retval = count;
+
+exit:
+	kfree(fwu->ext_data_source);
+	fwu->ext_data_source = NULL;
+	fwu->image = NULL;
+	mutex_unlock(&fwu_sysfs_mutex);
+	return retval;
+}
+
+#ifdef SYNA_TDDI
+static ssize_t fwu_sysfs_read_lockdown_code_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	unsigned short lockdown_data_size;
+	unsigned char *lockdown_data;
+	char ld_val[2];
+	int retval = 0;
+	int i = 0;
+
+	if (!mutex_trylock(&fwu_sysfs_mutex))
+		return -EBUSY;
+
+	lockdown_data_size = fwu->blkcount.tddi_lockdown_data * fwu->block_size;
+	lockdown_data = kzalloc(lockdown_data_size, GFP_KERNEL);
+	if (!lockdown_data) {
+		mutex_unlock(&fwu_sysfs_mutex);
+		return -ENOMEM;
+	}
+
+	if (get_tddi_lockdown_data(lockdown_data, lockdown_data_size) < 0) {
+		kfree(lockdown_data);
+		mutex_unlock(&fwu_sysfs_mutex);
+		return -EINVAL;
+	}
+
+	for (i = 0; i < lockdown_data_size; i++) {
+		retval += snprintf(ld_val, PAGE_SIZE, "%02x",
+				*(lockdown_data + i));
+		strlcat(buf, ld_val, lockdown_data_size);
+	}
+	*(buf + retval) = '\n';
+	kfree(lockdown_data);
+	mutex_unlock(&fwu_sysfs_mutex);
+	return retval + 1;
+}
+
+static ssize_t fwu_sysfs_write_lockdown_code_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	unsigned short lockdown_data_size = (count - 1) / 2;
+	unsigned char *lockdown_data;
+	unsigned char temp[2];
+	int ld_val;
+	int i = 0;
+
+	for (i = 0; i < (count - 1); i++) {
+		if (((*buf >= '0') && (*buf <= '9')) ||
+				(('a' < *buf) && (*buf > 'f')) ||
+				(('A' < *buf) && (*buf > 'F')))
+			continue;
+		else
+			return -EINVAL;
+	}
+
+	if (count % 2 != 1)
+		return -EINVAL;
+
+	lockdown_data = kzalloc(lockdown_data_size, GFP_KERNEL);
+	if (!lockdown_data)
+		return -ENOMEM;
+
+	for (i = 0; i < lockdown_data_size; i++) {
+		memcpy(temp, (buf + 2 * i), sizeof(temp));
+		if (kstrtoint(temp, 16, &ld_val) == 1)
+			*(lockdown_data + i) = ld_val & 0xff;
+	}
+
+	if (!mutex_trylock(&fwu_sysfs_mutex))
+		return -EBUSY;
+
+	if (set_tddi_lockdown_data(lockdown_data, lockdown_data_size) < 0) {
+		kfree(lockdown_data);
+		mutex_unlock(&fwu_sysfs_mutex);
+		return -EINVAL;
+	}
+	kfree(lockdown_data);
+	mutex_unlock(&fwu_sysfs_mutex);
+	return count;
+}
+#endif
+static void synaptics_rmi4_fwu_attn(struct synaptics_rmi4_data *rmi4_data,
+		unsigned char intr_mask)
+{
+	if (!fwu)
+		return;
+
+	if (fwu->intr_mask & intr_mask)
+		fwu_read_flash_status();
+
+	return;
+}
+
+static int synaptics_rmi4_fwu_init(struct synaptics_rmi4_data *rmi4_data)
+{
+	int retval;
+	unsigned char attr_count;
+	struct pdt_properties pdt_props;
+
+	if (fwu) {
+		dev_dbg(rmi4_data->pdev->dev.parent,
+				"%s: Handle already exists\n",
+				__func__);
+		return 0;
+	}
+
+	fwu = kzalloc(sizeof(*fwu), GFP_KERNEL);
+	if (!fwu) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to alloc mem for fwu\n",
+				__func__);
+		retval = -ENOMEM;
+		goto exit;
+	}
+
+	fwu->image_name = kzalloc(MAX_IMAGE_NAME_LEN, GFP_KERNEL);
+	if (!fwu->image_name) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to alloc mem for image name\n",
+				__func__);
+		retval = -ENOMEM;
+		goto exit_free_fwu;
+	}
+
+	fwu->rmi4_data = rmi4_data;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			PDT_PROPS,
+			pdt_props.data,
+			sizeof(pdt_props.data));
+	if (retval < 0) {
+		dev_dbg(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read PDT properties, assuming 0x00\n",
+				__func__);
+	} else if (pdt_props.has_bsr) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Reflash for LTS not currently supported\n",
+				__func__);
+		retval = -ENODEV;
+		goto exit_free_mem;
+	}
+
+	retval = fwu_scan_pdt();
+	if (retval < 0)
+		goto exit_free_mem;
+
+	if (!fwu->in_ub_mode) {
+		retval = fwu_read_f34_queries();
+		if (retval < 0)
+			goto exit_free_mem;
+
+		retval = fwu_get_device_config_id();
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to read device config ID\n",
+					__func__);
+			goto exit_free_mem;
+		}
+	}
+
+	fwu->force_update = FORCE_UPDATE;
+	fwu->do_lockdown = DO_LOCKDOWN;
+	fwu->initialized = true;
+
+#ifdef DO_STARTUP_FW_UPDATE
+	fwu->fwu_workqueue = create_singlethread_workqueue("fwu_workqueue");
+	INIT_WORK(&fwu->fwu_work, fwu_startup_fw_update_work);
+	queue_work(fwu->fwu_workqueue,
+			&fwu->fwu_work);
+#endif
+
+#ifdef F51_DISCRETE_FORCE
+	fwu_read_flash_status();
+	if (!fwu->in_bl_mode) {
+		retval = fwu_f51_force_data_init();
+		if (retval < 0)
+			goto exit_free_mem;
+	}
+#endif
+
+	if (ENABLE_SYS_REFLASH == false)
+		return 0;
+
+	retval = sysfs_create_bin_file(&rmi4_data->input_dev->dev.kobj,
+			&dev_attr_data);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to create sysfs bin file\n",
+				__func__);
+		goto exit_free_mem;
+	}
+
+	for (attr_count = 0; attr_count < ARRAY_SIZE(attrs); attr_count++) {
+		retval = sysfs_create_file(&rmi4_data->input_dev->dev.kobj,
+				&attrs[attr_count].attr);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to create sysfs attributes\n",
+					__func__);
+			retval = -ENODEV;
+			goto exit_remove_attrs;
+		}
+	}
+
+	return 0;
+
+exit_remove_attrs:
+	for (attr_count--; attr_count >= 0; attr_count--) {
+		sysfs_remove_file(&rmi4_data->input_dev->dev.kobj,
+				&attrs[attr_count].attr);
+	}
+
+	sysfs_remove_bin_file(&rmi4_data->input_dev->dev.kobj, &dev_attr_data);
+
+exit_free_mem:
+	kfree(fwu->image_name);
+
+exit_free_fwu:
+	kfree(fwu);
+	fwu = NULL;
+
+exit:
+	return retval;
+}
+
+static void synaptics_rmi4_fwu_remove(struct synaptics_rmi4_data *rmi4_data)
+{
+	unsigned char attr_count;
+
+	if (!fwu)
+		goto exit;
+
+#ifdef DO_STARTUP_FW_UPDATE
+	cancel_work_sync(&fwu->fwu_work);
+	flush_workqueue(fwu->fwu_workqueue);
+	destroy_workqueue(fwu->fwu_workqueue);
+#endif
+
+#ifdef F51_DISCRETE_FORCE
+	kfree(fwu->cal_data);
+#endif
+	kfree(fwu->read_config_buf);
+	kfree(fwu->image_name);
+	kfree(fwu);
+	fwu = NULL;
+
+	if (ENABLE_SYS_REFLASH == false)
+		goto exit;
+
+	for (attr_count = 0; attr_count < ARRAY_SIZE(attrs); attr_count++) {
+		sysfs_remove_file(&rmi4_data->input_dev->dev.kobj,
+				&attrs[attr_count].attr);
+	}
+
+	sysfs_remove_bin_file(&rmi4_data->input_dev->dev.kobj, &dev_attr_data);
+
+exit:
+	complete(&fwu_remove_complete);
+
+	return;
+}
+
+static void synaptics_rmi4_fwu_reset(struct synaptics_rmi4_data *rmi4_data)
+{
+	int retval;
+
+	if (!fwu) {
+		synaptics_rmi4_fwu_init(rmi4_data);
+		return;
+	}
+
+	retval = fwu_scan_pdt();
+	if (retval < 0)
+		return;
+
+	if (!fwu->in_ub_mode)
+		fwu_read_f34_queries();
+
+#ifdef F51_DISCRETE_FORCE
+	fwu_read_flash_status();
+	if (!fwu->in_bl_mode)
+		fwu_f51_force_data_init();
+#endif
+
+	return;
+}
+
+static struct synaptics_rmi4_exp_fn fwu_module = {
+	.fn_type = RMI_FW_UPDATER,
+	.init = synaptics_rmi4_fwu_init,
+	.remove = synaptics_rmi4_fwu_remove,
+	.reset = synaptics_rmi4_fwu_reset,
+	.reinit = NULL,
+	.early_suspend = NULL,
+	.suspend = NULL,
+	.resume = NULL,
+	.late_resume = NULL,
+	.attn = synaptics_rmi4_fwu_attn,
+};
+
+static int __init rmi4_fw_update_module_init(void)
+{
+	synaptics_rmi4_new_function(&fwu_module, true);
+
+	return 0;
+}
+
+static void __exit rmi4_fw_update_module_exit(void)
+{
+	synaptics_rmi4_new_function(&fwu_module, false);
+
+	wait_for_completion(&fwu_remove_complete);
+
+	return;
+}
+
+module_init(rmi4_fw_update_module_init);
+module_exit(rmi4_fw_update_module_exit);
+
+MODULE_AUTHOR("Synaptics, Inc.");
+MODULE_DESCRIPTION("Synaptics DSX FW Update Module");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_gesture.c b/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_gesture.c
new file mode 100644
index 0000000..875670b
--- /dev/null
+++ b/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_gesture.c
@@ -0,0 +1,2308 @@
+/*
+ * Synaptics DSX touchscreen driver
+ *
+ * Copyright (C) 2012-2016 Synaptics Incorporated. All rights reserved.
+ *
+ * Copyright (C) 2012 Alexandra Chin <alexandra.chin@tw.synaptics.com>
+ * Copyright (C) 2012 Scott Lin <scott.lin@tw.synaptics.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * INFORMATION CONTAINED IN THIS DOCUMENT IS PROVIDED "AS-IS," AND SYNAPTICS
+ * EXPRESSLY DISCLAIMS ALL EXPRESS AND IMPLIED WARRANTIES, INCLUDING ANY
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE,
+ * AND ANY WARRANTIES OF NON-INFRINGEMENT OF ANY INTELLECTUAL PROPERTY RIGHTS.
+ * IN NO EVENT SHALL SYNAPTICS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, PUNITIVE, OR CONSEQUENTIAL DAMAGES ARISING OUT OF OR IN CONNECTION
+ * WITH THE USE OF THE INFORMATION CONTAINED IN THIS DOCUMENT, HOWEVER CAUSED
+ * AND BASED ON ANY THEORY OF LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * NEGLIGENCE OR OTHER TORTIOUS ACTION, AND EVEN IF SYNAPTICS WAS ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE. IF A TRIBUNAL OF COMPETENT JURISDICTION DOES
+ * NOT PERMIT THE DISCLAIMER OF DIRECT DAMAGES OR ANY OTHER DAMAGES, SYNAPTICS'
+ * TOTAL CUMULATIVE LIABILITY TO ANY PARTY SHALL NOT EXCEED ONE HUNDRED U.S.
+ * DOLLARS.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/input.h>
+#include <linux/platform_device.h>
+#include <linux/input/synaptics_dsx.h>
+#include "synaptics_dsx_core.h"
+
+#define GESTURE_PHYS_NAME "synaptics_dsx/gesture"
+
+#define TUNING_SYSFS_DIR_NAME "tuning"
+
+#define STORE_GESTURES
+#ifdef STORE_GESTURES
+#define GESTURES_TO_STORE 10
+#endif
+
+#define CTRL23_FINGER_REPORT_ENABLE_BIT 0
+#define CTRL27_UDG_ENABLE_BIT 4
+#define WAKEUP_GESTURE_MODE 0x02
+
+static ssize_t udg_sysfs_engine_enable_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t udg_sysfs_detection_enable_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t udg_sysfs_detection_score_show(struct device *dev,
+		struct device_attribute *attr, char *buf);
+
+static ssize_t udg_sysfs_detection_index_show(struct device *dev,
+		struct device_attribute *attr, char *buf);
+
+static ssize_t udg_sysfs_registration_enable_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t udg_sysfs_registration_begin_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t udg_sysfs_registration_status_show(struct device *dev,
+		struct device_attribute *attr, char *buf);
+
+static ssize_t udg_sysfs_template_size_show(struct device *dev,
+		struct device_attribute *attr, char *buf);
+
+static ssize_t udg_sysfs_template_max_index_show(struct device *dev,
+		struct device_attribute *attr, char *buf);
+
+static ssize_t udg_sysfs_template_detection_show(struct device *dev,
+		struct device_attribute *attr, char *buf);
+
+static ssize_t udg_sysfs_template_index_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t udg_sysfs_template_valid_show(struct device *dev,
+		struct device_attribute *attr, char *buf);
+
+static ssize_t udg_sysfs_template_valid_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t udg_sysfs_template_clear_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t udg_sysfs_trace_size_show(struct device *dev,
+		struct device_attribute *attr, char *buf);
+
+static ssize_t udg_sysfs_template_data_show(struct file *data_file,
+		struct kobject *kobj, struct bin_attribute *attributes,
+		char *buf, loff_t pos, size_t count);
+
+static ssize_t udg_sysfs_template_data_store(struct file *data_file,
+		struct kobject *kobj, struct bin_attribute *attributes,
+		char *buf, loff_t pos, size_t count);
+
+static ssize_t udg_sysfs_trace_data_show(struct file *data_file,
+		struct kobject *kobj, struct bin_attribute *attributes,
+		char *buf, loff_t pos, size_t count);
+
+static ssize_t udg_sysfs_template_displacement_show(struct device *dev,
+		struct device_attribute *attr, char *buf);
+
+static ssize_t udg_sysfs_template_displacement_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t udg_sysfs_rotation_invariance_show(struct device *dev,
+		struct device_attribute *attr, char *buf);
+
+static ssize_t udg_sysfs_rotation_invariance_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t udg_sysfs_scale_invariance_show(struct device *dev,
+		struct device_attribute *attr, char *buf);
+
+static ssize_t udg_sysfs_scale_invariance_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t udg_sysfs_threshold_factor_show(struct device *dev,
+		struct device_attribute *attr, char *buf);
+
+static ssize_t udg_sysfs_threshold_factor_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t udg_sysfs_match_metric_threshold_show(struct device *dev,
+		struct device_attribute *attr, char *buf);
+
+static ssize_t udg_sysfs_match_metric_threshold_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t udg_sysfs_max_inter_stroke_time_show(struct device *dev,
+		struct device_attribute *attr, char *buf);
+
+static ssize_t udg_sysfs_max_inter_stroke_time_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count);
+
+static int udg_read_tuning_params(void);
+
+static int udg_write_tuning_params(void);
+
+static int udg_detection_enable(bool enable);
+
+static int udg_engine_enable(bool enable);
+
+static int udg_set_index(unsigned char index);
+
+#ifdef STORE_GESTURES
+static int udg_read_valid_data(void);
+static int udg_write_valid_data(void);
+static int udg_read_template_data(unsigned char index);
+static int udg_write_template_data(void);
+#endif
+
+enum gesture_type {
+	DETECTION = 0x0f,
+	REGISTRATION = 0x10,
+};
+
+struct udg_tuning {
+	union {
+		struct {
+			unsigned char maximum_number_of_templates;
+			unsigned char template_size;
+			unsigned char template_disp_lsb;
+			unsigned char template_disp_msb;
+			unsigned char rotation_inv_lsb;
+			unsigned char rotation_inv_msb;
+			unsigned char scale_inv_lsb;
+			unsigned char scale_inv_msb;
+			unsigned char thres_factor_lsb;
+			unsigned char thres_factor_msb;
+			unsigned char metric_thres_lsb;
+			unsigned char metric_thres_msb;
+			unsigned char inter_stroke_lsb;
+			unsigned char inter_stroke_msb;
+		} __packed;
+		unsigned char data[14];
+	};
+};
+
+struct udg_addr {
+	unsigned short data_4;
+	unsigned short ctrl_18;
+	unsigned short ctrl_20;
+	unsigned short ctrl_23;
+	unsigned short ctrl_27;
+	unsigned short ctrl_41;
+	unsigned short trace_x;
+	unsigned short trace_y;
+	unsigned short trace_segment;
+	unsigned short template_helper;
+	unsigned short template_data;
+	unsigned short template_flags;
+};
+
+struct synaptics_rmi4_f12_query_0 {
+	union {
+		struct {
+			struct {
+				unsigned char has_register_descriptors:1;
+				unsigned char has_closed_cover:1;
+				unsigned char has_fast_glove_detect:1;
+				unsigned char has_dribble:1;
+				unsigned char has_4p4_jitter_filter_strength:1;
+				unsigned char f12_query0_s0_b5__7:3;
+			} __packed;
+			struct {
+				unsigned char max_num_templates:4;
+				unsigned char f12_query0_s1_b4__7:4;
+				unsigned char template_size_lsb;
+				unsigned char template_size_msb;
+			} __packed;
+		};
+		unsigned char data[4];
+	};
+};
+
+struct synaptics_rmi4_f12_query_5 {
+	union {
+		struct {
+			unsigned char size_of_query6;
+			struct {
+				unsigned char ctrl0_is_present:1;
+				unsigned char ctrl1_is_present:1;
+				unsigned char ctrl2_is_present:1;
+				unsigned char ctrl3_is_present:1;
+				unsigned char ctrl4_is_present:1;
+				unsigned char ctrl5_is_present:1;
+				unsigned char ctrl6_is_present:1;
+				unsigned char ctrl7_is_present:1;
+			} __packed;
+			struct {
+				unsigned char ctrl8_is_present:1;
+				unsigned char ctrl9_is_present:1;
+				unsigned char ctrl10_is_present:1;
+				unsigned char ctrl11_is_present:1;
+				unsigned char ctrl12_is_present:1;
+				unsigned char ctrl13_is_present:1;
+				unsigned char ctrl14_is_present:1;
+				unsigned char ctrl15_is_present:1;
+			} __packed;
+			struct {
+				unsigned char ctrl16_is_present:1;
+				unsigned char ctrl17_is_present:1;
+				unsigned char ctrl18_is_present:1;
+				unsigned char ctrl19_is_present:1;
+				unsigned char ctrl20_is_present:1;
+				unsigned char ctrl21_is_present:1;
+				unsigned char ctrl22_is_present:1;
+				unsigned char ctrl23_is_present:1;
+			} __packed;
+			struct {
+				unsigned char ctrl24_is_present:1;
+				unsigned char ctrl25_is_present:1;
+				unsigned char ctrl26_is_present:1;
+				unsigned char ctrl27_is_present:1;
+				unsigned char ctrl28_is_present:1;
+				unsigned char ctrl29_is_present:1;
+				unsigned char ctrl30_is_present:1;
+				unsigned char ctrl31_is_present:1;
+			} __packed;
+			struct {
+				unsigned char ctrl32_is_present:1;
+				unsigned char ctrl33_is_present:1;
+				unsigned char ctrl34_is_present:1;
+				unsigned char ctrl35_is_present:1;
+				unsigned char ctrl36_is_present:1;
+				unsigned char ctrl37_is_present:1;
+				unsigned char ctrl38_is_present:1;
+				unsigned char ctrl39_is_present:1;
+			} __packed;
+			struct {
+				unsigned char ctrl40_is_present:1;
+				unsigned char ctrl41_is_present:1;
+				unsigned char ctrl42_is_present:1;
+				unsigned char ctrl43_is_present:1;
+				unsigned char ctrl44_is_present:1;
+				unsigned char ctrl45_is_present:1;
+				unsigned char ctrl46_is_present:1;
+				unsigned char ctrl47_is_present:1;
+			} __packed;
+		};
+		unsigned char data[7];
+	};
+};
+
+struct synaptics_rmi4_f12_query_8 {
+	union {
+		struct {
+			unsigned char size_of_query9;
+			struct {
+				unsigned char data0_is_present:1;
+				unsigned char data1_is_present:1;
+				unsigned char data2_is_present:1;
+				unsigned char data3_is_present:1;
+				unsigned char data4_is_present:1;
+				unsigned char data5_is_present:1;
+				unsigned char data6_is_present:1;
+				unsigned char data7_is_present:1;
+			} __packed;
+			struct {
+				unsigned char data8_is_present:1;
+				unsigned char data9_is_present:1;
+				unsigned char data10_is_present:1;
+				unsigned char data11_is_present:1;
+				unsigned char data12_is_present:1;
+				unsigned char data13_is_present:1;
+				unsigned char data14_is_present:1;
+				unsigned char data15_is_present:1;
+			} __packed;
+			struct {
+				unsigned char data16_is_present:1;
+				unsigned char data17_is_present:1;
+				unsigned char data18_is_present:1;
+				unsigned char data19_is_present:1;
+				unsigned char data20_is_present:1;
+				unsigned char data21_is_present:1;
+				unsigned char data22_is_present:1;
+				unsigned char data23_is_present:1;
+			} __packed;
+		};
+		unsigned char data[4];
+	};
+};
+
+struct synaptics_rmi4_f12_control_41 {
+	union {
+		struct {
+			unsigned char enable_registration:1;
+			unsigned char template_index:4;
+			unsigned char begin:1;
+			unsigned char f12_ctrl41_b6__7:2;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct synaptics_rmi4_udg_handle {
+	atomic_t attn_event;
+	unsigned char intr_mask;
+	unsigned char report_flags;
+	unsigned char object_type_enable1;
+	unsigned char object_type_enable2;
+	unsigned char trace_size;
+	unsigned char template_index;
+	unsigned char max_num_templates;
+	unsigned char detection_score;
+	unsigned char detection_index;
+	unsigned char detection_status;
+	unsigned char registration_status;
+	unsigned char *ctrl_buf;
+	unsigned char *trace_data_buf;
+	unsigned char *template_data_buf;
+#ifdef STORE_GESTURES
+	unsigned char gestures_to_store;
+	unsigned char *storage_buf;
+	unsigned char valid_buf[2];
+#endif
+	unsigned short trace_data_buf_size;
+	unsigned short template_size;
+	unsigned short template_data_size;
+	unsigned short query_base_addr;
+	unsigned short control_base_addr;
+	unsigned short data_base_addr;
+	unsigned short command_base_addr;
+	unsigned short ctrl_18_sub10_off;
+	unsigned short ctrl_20_sub1_off;
+	unsigned short ctrl_23_sub3_off;
+	unsigned short ctrl_27_sub5_off;
+	struct input_dev *udg_dev;
+	struct kobject *tuning_dir;
+	struct udg_addr addr;
+	struct udg_tuning tuning;
+	struct synaptics_rmi4_data *rmi4_data;
+};
+
+static struct device_attribute attrs[] = {
+	__ATTR(engine_enable, 0220,
+			synaptics_rmi4_show_error,
+			udg_sysfs_engine_enable_store),
+	__ATTR(detection_enable, 0220,
+			synaptics_rmi4_show_error,
+			udg_sysfs_detection_enable_store),
+	__ATTR(detection_score, 0444,
+			udg_sysfs_detection_score_show,
+			synaptics_rmi4_store_error),
+	__ATTR(detection_index, 0444,
+			udg_sysfs_detection_index_show,
+			synaptics_rmi4_store_error),
+	__ATTR(registration_enable, 0220,
+			synaptics_rmi4_show_error,
+			udg_sysfs_registration_enable_store),
+	__ATTR(registration_begin, 0220,
+			synaptics_rmi4_show_error,
+			udg_sysfs_registration_begin_store),
+	__ATTR(registration_status, 0444,
+			udg_sysfs_registration_status_show,
+			synaptics_rmi4_store_error),
+	__ATTR(template_size, 0444,
+			udg_sysfs_template_size_show,
+			synaptics_rmi4_store_error),
+	__ATTR(template_max_index, 0444,
+			udg_sysfs_template_max_index_show,
+			synaptics_rmi4_store_error),
+	__ATTR(template_detection, 0444,
+			udg_sysfs_template_detection_show,
+			synaptics_rmi4_store_error),
+	__ATTR(template_index, 0220,
+			synaptics_rmi4_show_error,
+			udg_sysfs_template_index_store),
+	__ATTR(template_valid, 0664,
+			udg_sysfs_template_valid_show,
+			udg_sysfs_template_valid_store),
+	__ATTR(template_clear, 0220,
+			synaptics_rmi4_show_error,
+			udg_sysfs_template_clear_store),
+	__ATTR(trace_size, 0444,
+			udg_sysfs_trace_size_show,
+			synaptics_rmi4_store_error),
+};
+
+static struct bin_attribute template_data = {
+	.attr = {
+		.name = "template_data",
+		.mode = 0664,
+	},
+	.size = 0,
+	.read = udg_sysfs_template_data_show,
+	.write = udg_sysfs_template_data_store,
+};
+
+static struct bin_attribute trace_data = {
+	.attr = {
+		.name = "trace_data",
+		.mode = 0444,
+	},
+	.size = 0,
+	.read = udg_sysfs_trace_data_show,
+	.write = NULL,
+};
+
+static struct device_attribute params[] = {
+	__ATTR(template_displacement, 0664,
+			udg_sysfs_template_displacement_show,
+			udg_sysfs_template_displacement_store),
+	__ATTR(rotation_invariance, 0664,
+			udg_sysfs_rotation_invariance_show,
+			udg_sysfs_rotation_invariance_store),
+	__ATTR(scale_invariance, 0664,
+			udg_sysfs_scale_invariance_show,
+			udg_sysfs_scale_invariance_store),
+	__ATTR(threshold_factor, 0664,
+			udg_sysfs_threshold_factor_show,
+			udg_sysfs_threshold_factor_store),
+	__ATTR(match_metric_threshold, 0664,
+			udg_sysfs_match_metric_threshold_show,
+			udg_sysfs_match_metric_threshold_store),
+	__ATTR(max_inter_stroke_time, 0664,
+			udg_sysfs_max_inter_stroke_time_show,
+			udg_sysfs_max_inter_stroke_time_store),
+};
+
+static struct synaptics_rmi4_udg_handle *udg;
+
+static unsigned char ctrl_18_sub_size[] = {10, 10, 10, 2, 3, 4, 3, 3, 1, 1};
+static unsigned char ctrl_20_sub_size[] = {2};
+static unsigned char ctrl_23_sub_size[] = {1, 1, 1};
+static unsigned char ctrl_27_sub_size[] = {1, 5, 2, 1, 7};
+
+DECLARE_COMPLETION(udg_remove_complete);
+
+static ssize_t udg_sysfs_engine_enable_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int retval;
+	bool enable;
+	unsigned int input;
+
+	if (kstrtouint(buf, 10, &input) != 1)
+		return -EINVAL;
+
+	if (input == 1)
+		enable = true;
+	else if (input == 0)
+		enable = false;
+	else
+		return -EINVAL;
+
+	retval = udg_engine_enable(enable);
+	if (retval < 0)
+		return retval;
+
+	return count;
+}
+
+static ssize_t udg_sysfs_detection_enable_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int retval;
+	bool enable;
+	unsigned int input;
+
+	if (kstrtouint(buf, 10, &input) != 1)
+		return -EINVAL;
+
+	if (input == 1)
+		enable = true;
+	else if (input == 0)
+		enable = false;
+	else
+		return -EINVAL;
+
+	udg->detection_status = 0;
+
+	retval = udg_detection_enable(enable);
+	if (retval < 0)
+		return retval;
+
+	return count;
+}
+
+static ssize_t udg_sysfs_detection_score_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%u\n", udg->detection_score);
+}
+
+static ssize_t udg_sysfs_detection_index_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%u\n", udg->detection_index);
+}
+
+static ssize_t udg_sysfs_registration_enable_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int retval;
+	bool enable;
+	unsigned int input;
+	struct synaptics_rmi4_f12_control_41 control_41;
+	struct synaptics_rmi4_data *rmi4_data = udg->rmi4_data;
+
+	if (kstrtouint(buf, 10, &input) != 1)
+		return -EINVAL;
+
+	if (input == 1)
+		enable = true;
+	else if (input == 0)
+		enable = false;
+	else
+		return -EINVAL;
+
+	if (enable) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				udg->addr.ctrl_23,
+				udg->ctrl_buf,
+				udg->ctrl_23_sub3_off + 1);
+		if (retval < 0)
+			return retval;
+
+		udg->ctrl_buf[0] = 0;
+		udg->ctrl_buf[0] |= (1 << CTRL23_FINGER_REPORT_ENABLE_BIT);
+		if (udg->ctrl_23_sub3_off)
+			udg->ctrl_buf[udg->ctrl_23_sub3_off] = 0;
+
+		retval = synaptics_rmi4_reg_write(rmi4_data,
+				udg->addr.ctrl_23,
+				udg->ctrl_buf,
+				udg->ctrl_23_sub3_off + 1);
+		if (retval < 0)
+			return retval;
+	} else {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				udg->addr.ctrl_23,
+				udg->ctrl_buf,
+				udg->ctrl_23_sub3_off + 1);
+		if (retval < 0)
+			return retval;
+
+		udg->ctrl_buf[0] = udg->object_type_enable1;
+		if (udg->ctrl_23_sub3_off) {
+			udg->ctrl_buf[udg->ctrl_23_sub3_off] =
+					udg->object_type_enable2;
+		}
+
+		retval = synaptics_rmi4_reg_write(rmi4_data,
+				udg->addr.ctrl_23,
+				udg->ctrl_buf,
+				udg->ctrl_23_sub3_off + 1);
+		if (retval < 0)
+			return retval;
+	}
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			udg->addr.ctrl_41,
+			control_41.data,
+			sizeof(control_41.data));
+	if (retval < 0)
+		return retval;
+
+	control_41.enable_registration = enable ? 1 : 0;
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			udg->addr.ctrl_41,
+			control_41.data,
+			sizeof(control_41.data));
+	if (retval < 0)
+		return retval;
+
+	return count;
+}
+
+static ssize_t udg_sysfs_registration_begin_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int retval;
+	bool begin;
+	unsigned int input;
+	struct synaptics_rmi4_f12_control_41 control_41;
+	struct synaptics_rmi4_data *rmi4_data = udg->rmi4_data;
+
+	if (kstrtouint(buf, 10, &input) != 1)
+		return -EINVAL;
+
+	if (input == 1)
+		begin = true;
+	else if (input == 0)
+		begin = false;
+	else
+		return -EINVAL;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			udg->addr.ctrl_41,
+			control_41.data,
+			sizeof(control_41.data));
+	if (retval < 0)
+		return retval;
+
+	control_41.begin = begin ? 1 : 0;
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			udg->addr.ctrl_41,
+			control_41.data,
+			sizeof(control_41.data));
+	if (retval < 0)
+		return retval;
+
+	return count;
+}
+
+static ssize_t udg_sysfs_registration_status_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "0x%02x\n", udg->registration_status);
+}
+
+static ssize_t udg_sysfs_template_size_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%u\n", udg->template_size);
+}
+
+static ssize_t udg_sysfs_template_max_index_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%u\n", udg->max_num_templates - 1);
+}
+
+static ssize_t udg_sysfs_template_detection_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	int retval;
+	int attn_event;
+	unsigned char detection_status;
+	struct synaptics_rmi4_data *rmi4_data = udg->rmi4_data;
+
+	attn_event = atomic_read(&udg->attn_event);
+	atomic_set(&udg->attn_event, 0);
+
+	if (attn_event == 0)
+		return snprintf(buf, PAGE_SIZE, "0\n");
+
+	if (udg->detection_status == 0) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				udg->addr.data_4,
+				rmi4_data->gesture_detection,
+				sizeof(rmi4_data->gesture_detection));
+		if (retval < 0)
+			return retval;
+
+		udg->detection_status = rmi4_data->gesture_detection[0];
+	}
+
+	detection_status = udg->detection_status;
+	udg->detection_status = 0;
+
+	switch (detection_status) {
+	case DETECTION:
+		udg->detection_score = rmi4_data->gesture_detection[1];
+		udg->detection_index = rmi4_data->gesture_detection[4];
+		udg->trace_size = rmi4_data->gesture_detection[3];
+		break;
+	case REGISTRATION:
+		udg->registration_status = rmi4_data->gesture_detection[1];
+		udg->trace_size = rmi4_data->gesture_detection[3];
+		break;
+	default:
+		return snprintf(buf, PAGE_SIZE, "0\n");
+	}
+
+	return snprintf(buf, PAGE_SIZE, "0x%02x\n", detection_status);
+}
+
+static ssize_t udg_sysfs_template_index_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int retval;
+	unsigned long index;
+
+	retval = sstrtoul(buf, 10, &index);
+	if (retval)
+		return retval;
+
+	retval = udg_set_index((unsigned char)index);
+	if (retval < 0)
+		return retval;
+
+	return count;
+}
+
+static ssize_t udg_sysfs_template_valid_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	int retval;
+	unsigned char valid;
+	unsigned char offset;
+	unsigned char byte_num;
+	unsigned char template_flags[2];
+	struct synaptics_rmi4_data *rmi4_data = udg->rmi4_data;
+
+	byte_num = udg->template_index / 8;
+	offset = udg->template_index % 8;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			udg->addr.template_flags,
+			template_flags,
+			sizeof(template_flags));
+	if (retval < 0)
+		return retval;
+
+	valid = (template_flags[byte_num] & (1 << offset)) >> offset;
+
+	return snprintf(buf, PAGE_SIZE, "%u\n", valid);
+}
+
+static ssize_t udg_sysfs_template_valid_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int retval;
+	unsigned long valid;
+	unsigned char offset;
+	unsigned char byte_num;
+	unsigned char template_flags[2];
+	struct synaptics_rmi4_data *rmi4_data = udg->rmi4_data;
+
+	retval = sstrtoul(buf, 10, &valid);
+	if (retval)
+		return retval;
+
+	if (valid > 0)
+		valid = 1;
+
+	byte_num = udg->template_index / 8;
+	offset = udg->template_index % 8;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			udg->addr.template_flags,
+			template_flags,
+			sizeof(template_flags));
+	if (retval < 0)
+		return retval;
+
+	if (valid)
+		template_flags[byte_num] |= (1 << offset);
+	else
+		template_flags[byte_num] &= ~(1 << offset);
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			udg->addr.template_flags,
+			template_flags,
+			sizeof(template_flags));
+	if (retval < 0)
+		return retval;
+
+#ifdef STORE_GESTURES
+	udg_read_valid_data();
+#endif
+
+	return count;
+}
+
+static ssize_t udg_sysfs_template_clear_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int retval;
+	unsigned int input;
+	const char cmd[] = {'0', 0};
+	struct synaptics_rmi4_data *rmi4_data = udg->rmi4_data;
+
+	if (kstrtouint(buf, 10, &input) != 1)
+		return -EINVAL;
+
+	if (input != 1)
+		return -EINVAL;
+
+	memset(udg->template_data_buf, 0x00, udg->template_data_size);
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			udg->addr.template_data,
+			udg->template_data_buf,
+			udg->template_data_size);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to clear template data\n",
+				__func__);
+		return retval;
+	}
+
+	retval = udg_sysfs_template_valid_store(dev, attr, cmd, 1);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to clear valid bit\n",
+				__func__);
+		return retval;
+	}
+
+#ifdef STORE_GESTURES
+	udg_read_template_data(udg->template_index);
+	udg_read_valid_data();
+#endif
+
+	return count;
+}
+
+static ssize_t udg_sysfs_trace_size_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%u\n", udg->trace_size);
+}
+
+static ssize_t udg_sysfs_trace_data_show(struct file *data_file,
+		struct kobject *kobj, struct bin_attribute *attributes,
+		char *buf, loff_t pos, size_t count)
+{
+	int retval;
+	unsigned short index = 0;
+	unsigned short trace_data_size;
+	struct synaptics_rmi4_data *rmi4_data = udg->rmi4_data;
+
+	trace_data_size = udg->trace_size * 5;
+
+	if (trace_data_size == 0)
+		return -EINVAL;
+
+	if (count < trace_data_size) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Not enough space (%d bytes) in buffer\n",
+				__func__, (unsigned int)count);
+		return -EINVAL;
+	}
+
+	if (udg->trace_data_buf_size < trace_data_size) {
+		if (udg->trace_data_buf_size)
+			kfree(udg->trace_data_buf);
+		udg->trace_data_buf = kzalloc(trace_data_size, GFP_KERNEL);
+		if (!udg->trace_data_buf) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to alloc mem for trace data buffer\n",
+					__func__);
+			udg->trace_data_buf_size = 0;
+			return -ENOMEM;
+		}
+		udg->trace_data_buf_size = trace_data_size;
+	}
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			udg->addr.trace_x,
+			&udg->trace_data_buf[index],
+			udg->trace_size * 2);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read trace X data\n",
+				__func__);
+		return retval;
+	} else {
+		index += udg->trace_size * 2;
+	}
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			udg->addr.trace_y,
+			&udg->trace_data_buf[index],
+			udg->trace_size * 2);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read trace Y data\n",
+				__func__);
+		return retval;
+	} else {
+		index += udg->trace_size * 2;
+	}
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			udg->addr.trace_segment,
+			&udg->trace_data_buf[index],
+			udg->trace_size);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read trace segment data\n",
+				__func__);
+		return retval;
+	}
+
+	retval = secure_memcpy(buf, count, udg->trace_data_buf,
+			udg->trace_data_buf_size, trace_data_size);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to copy trace data\n",
+				__func__);
+		return retval;
+	}
+
+	return trace_data_size;
+}
+
+static ssize_t udg_sysfs_template_data_show(struct file *data_file,
+		struct kobject *kobj, struct bin_attribute *attributes,
+		char *buf, loff_t pos, size_t count)
+{
+	int retval;
+	struct synaptics_rmi4_data *rmi4_data = udg->rmi4_data;
+
+	if (count < udg->template_data_size) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Not enough space (%d bytes) in buffer\n",
+				__func__, (unsigned int)count);
+		return -EINVAL;
+	}
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			udg->addr.template_data,
+			udg->template_data_buf,
+			udg->template_data_size);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read template data\n",
+				__func__);
+		return retval;
+	}
+
+	retval = secure_memcpy(buf, count, udg->template_data_buf,
+			udg->template_data_size, udg->template_data_size);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to copy template data\n",
+				__func__);
+		return retval;
+	}
+
+#ifdef STORE_GESTURES
+	udg_read_template_data(udg->template_index);
+	udg_read_valid_data();
+#endif
+
+	return udg->template_data_size;
+}
+
+static ssize_t udg_sysfs_template_data_store(struct file *data_file,
+		struct kobject *kobj, struct bin_attribute *attributes,
+		char *buf, loff_t pos, size_t count)
+{
+	int retval;
+	struct synaptics_rmi4_data *rmi4_data = udg->rmi4_data;
+
+	retval = secure_memcpy(udg->template_data_buf, udg->template_data_size,
+			buf, count, count);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to copy template data\n",
+				__func__);
+		return retval;
+	}
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			udg->addr.template_data,
+			udg->template_data_buf,
+			count);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to write template data\n",
+				__func__);
+		return retval;
+	}
+
+#ifdef STORE_GESTURES
+	udg_read_template_data(udg->template_index);
+	udg_read_valid_data();
+#endif
+
+	return count;
+}
+
+static ssize_t udg_sysfs_template_displacement_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	int retval;
+	unsigned short template_displacement;
+
+	retval = udg_read_tuning_params();
+	if (retval < 0)
+		return retval;
+
+	template_displacement =
+			((unsigned short)udg->tuning.template_disp_lsb << 0) |
+			((unsigned short)udg->tuning.template_disp_msb << 8);
+
+	return snprintf(buf, PAGE_SIZE, "%u\n", template_displacement);
+}
+
+static ssize_t udg_sysfs_template_displacement_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int retval;
+	unsigned long input;
+
+	retval = sstrtoul(buf, 10, &input);
+	if (retval)
+		return retval;
+
+	retval = udg_read_tuning_params();
+	if (retval < 0)
+		return retval;
+
+	udg->tuning.template_disp_lsb = (unsigned char)(input >> 0);
+	udg->tuning.template_disp_msb = (unsigned char)(input >> 8);
+
+	retval = udg_write_tuning_params();
+	if (retval < 0)
+		return retval;
+
+	return count;
+}
+
+static ssize_t udg_sysfs_rotation_invariance_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	int retval;
+	unsigned short rotation_invariance;
+
+	retval = udg_read_tuning_params();
+	if (retval < 0)
+		return retval;
+
+	rotation_invariance =
+			((unsigned short)udg->tuning.rotation_inv_lsb << 0) |
+			((unsigned short)udg->tuning.rotation_inv_msb << 8);
+
+	return snprintf(buf, PAGE_SIZE, "%u\n", rotation_invariance);
+}
+
+static ssize_t udg_sysfs_rotation_invariance_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int retval;
+	unsigned long input;
+
+	retval = sstrtoul(buf, 10, &input);
+	if (retval)
+		return retval;
+
+	retval = udg_read_tuning_params();
+	if (retval < 0)
+		return retval;
+
+	udg->tuning.rotation_inv_lsb = (unsigned char)(input >> 0);
+	udg->tuning.rotation_inv_msb = (unsigned char)(input >> 8);
+
+	retval = udg_write_tuning_params();
+	if (retval < 0)
+		return retval;
+
+	return count;
+}
+
+static ssize_t udg_sysfs_scale_invariance_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	int retval;
+	unsigned short scale_invariance;
+
+	retval = udg_read_tuning_params();
+	if (retval < 0)
+		return retval;
+
+	scale_invariance =
+			((unsigned short)udg->tuning.scale_inv_lsb << 0) |
+			((unsigned short)udg->tuning.scale_inv_msb << 8);
+
+	return snprintf(buf, PAGE_SIZE, "%u\n", scale_invariance);
+}
+
+static ssize_t udg_sysfs_scale_invariance_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int retval;
+	unsigned long input;
+
+	retval = sstrtoul(buf, 10, &input);
+	if (retval)
+		return retval;
+
+	retval = udg_read_tuning_params();
+	if (retval < 0)
+		return retval;
+
+	udg->tuning.scale_inv_lsb = (unsigned char)(input >> 0);
+	udg->tuning.scale_inv_msb = (unsigned char)(input >> 8);
+
+	retval = udg_write_tuning_params();
+	if (retval < 0)
+		return retval;
+
+	return count;
+}
+
+static ssize_t udg_sysfs_threshold_factor_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	int retval;
+	unsigned short threshold_factor;
+
+	retval = udg_read_tuning_params();
+	if (retval < 0)
+		return retval;
+
+	threshold_factor =
+			((unsigned short)udg->tuning.thres_factor_lsb << 0) |
+			((unsigned short)udg->tuning.thres_factor_msb << 8);
+
+	return snprintf(buf, PAGE_SIZE, "%u\n", threshold_factor);
+}
+
+static ssize_t udg_sysfs_threshold_factor_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int retval;
+	unsigned long input;
+
+	retval = sstrtoul(buf, 10, &input);
+	if (retval)
+		return retval;
+
+	retval = udg_read_tuning_params();
+	if (retval < 0)
+		return retval;
+
+	udg->tuning.thres_factor_lsb = (unsigned char)(input >> 0);
+	udg->tuning.thres_factor_msb = (unsigned char)(input >> 8);
+
+	retval = udg_write_tuning_params();
+	if (retval < 0)
+		return retval;
+
+	return count;
+}
+
+static ssize_t udg_sysfs_match_metric_threshold_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	int retval;
+	unsigned short match_metric_threshold;
+
+	retval = udg_read_tuning_params();
+	if (retval < 0)
+		return retval;
+
+	match_metric_threshold =
+			((unsigned short)udg->tuning.metric_thres_lsb << 0) |
+			((unsigned short)udg->tuning.metric_thres_msb << 8);
+
+	return snprintf(buf, PAGE_SIZE, "%u\n", match_metric_threshold);
+}
+
+static ssize_t udg_sysfs_match_metric_threshold_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int retval;
+	unsigned long input;
+
+	retval = sstrtoul(buf, 10, &input);
+	if (retval)
+		return retval;
+
+	retval = udg_read_tuning_params();
+	if (retval < 0)
+		return retval;
+
+	udg->tuning.metric_thres_lsb = (unsigned char)(input >> 0);
+	udg->tuning.metric_thres_msb = (unsigned char)(input >> 8);
+
+	retval = udg_write_tuning_params();
+	if (retval < 0)
+		return retval;
+
+	return count;
+}
+
+static ssize_t udg_sysfs_max_inter_stroke_time_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	int retval;
+	unsigned short max_inter_stroke_time;
+
+	retval = udg_read_tuning_params();
+	if (retval < 0)
+		return retval;
+
+	max_inter_stroke_time =
+			((unsigned short)udg->tuning.inter_stroke_lsb << 0) |
+			((unsigned short)udg->tuning.inter_stroke_msb << 8);
+
+	return snprintf(buf, PAGE_SIZE, "%u\n", max_inter_stroke_time);
+}
+
+static ssize_t udg_sysfs_max_inter_stroke_time_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int retval;
+	unsigned long input;
+
+	retval = sstrtoul(buf, 10, &input);
+	if (retval)
+		return retval;
+
+	retval = udg_read_tuning_params();
+	if (retval < 0)
+		return retval;
+
+	udg->tuning.inter_stroke_lsb = (unsigned char)(input >> 0);
+	udg->tuning.inter_stroke_msb = (unsigned char)(input >> 8);
+
+	retval = udg_write_tuning_params();
+	if (retval < 0)
+		return retval;
+
+	return count;
+}
+
+static int udg_ctrl_subpacket(unsigned char ctrlreg,
+		unsigned char subpacket,
+		struct synaptics_rmi4_f12_query_5 *query_5)
+{
+	int retval;
+	unsigned char cnt;
+	unsigned char regnum;
+	unsigned char bitnum;
+	unsigned char q5_index;
+	unsigned char q6_index;
+	unsigned char offset;
+	unsigned char max_ctrlreg;
+	unsigned char *query_6;
+	struct synaptics_rmi4_data *rmi4_data = udg->rmi4_data;
+
+	max_ctrlreg = (sizeof(query_5->data) - 1) * 8 - 1;
+
+	if (ctrlreg > max_ctrlreg) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Control register number (%d) over limit\n",
+				__func__, ctrlreg);
+		return -EINVAL;
+	}
+
+	q5_index = ctrlreg / 8 + 1;
+	bitnum = ctrlreg % 8;
+	if ((query_5->data[q5_index] & (1 << bitnum)) == 0x00) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Control %d is not present\n",
+				__func__, ctrlreg);
+		return -EINVAL;
+	}
+
+	query_6 = kmalloc(query_5->size_of_query6, GFP_KERNEL);
+	if (!query_6) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to alloc mem for query 6\n",
+				__func__);
+		return -ENOMEM;
+	}
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			udg->query_base_addr + 6,
+			query_6,
+			query_5->size_of_query6);
+	if (retval < 0)
+		goto exit;
+
+	q6_index = 0;
+
+	for (regnum = 0; regnum < ctrlreg; regnum++) {
+		q5_index = regnum / 8 + 1;
+		bitnum = regnum % 8;
+		if ((query_5->data[q5_index] & (1 << bitnum)) == 0x00)
+			continue;
+
+		if (query_6[q6_index] == 0x00)
+			q6_index += 3;
+		else
+			q6_index++;
+
+		while (query_6[q6_index] & ~MASK_7BIT)
+			q6_index++;
+
+		q6_index++;
+	}
+
+	cnt = 0;
+	q6_index++;
+	offset = subpacket / 7;
+	bitnum = subpacket % 7;
+
+	do {
+		if (cnt == offset) {
+			if (query_6[q6_index + cnt] & (1 << bitnum))
+				retval = 1;
+			else
+				retval = 0;
+			goto exit;
+		}
+		cnt++;
+	} while (query_6[q6_index + cnt - 1] & ~MASK_7BIT);
+
+	retval = 0;
+
+exit:
+	kfree(query_6);
+
+	return retval;
+}
+
+static int udg_read_tuning_params(void)
+{
+	int retval;
+	struct synaptics_rmi4_data *rmi4_data = udg->rmi4_data;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			udg->addr.ctrl_18,
+			udg->ctrl_buf,
+			udg->ctrl_18_sub10_off + sizeof(struct udg_tuning));
+	if (retval < 0)
+		return retval;
+
+	secure_memcpy(udg->tuning.data,
+			sizeof(udg->tuning.data),
+			(unsigned char *)&udg->ctrl_buf[udg->ctrl_18_sub10_off],
+			sizeof(struct udg_tuning),
+			sizeof(struct udg_tuning));
+
+	return 0;
+}
+
+static int udg_write_tuning_params(void)
+{
+	int retval;
+	struct synaptics_rmi4_data *rmi4_data = udg->rmi4_data;
+
+	secure_memcpy((unsigned char *)&udg->ctrl_buf[udg->ctrl_18_sub10_off],
+			sizeof(struct udg_tuning),
+			udg->tuning.data,
+			sizeof(udg->tuning.data),
+			sizeof(struct udg_tuning));
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			udg->addr.ctrl_18,
+			udg->ctrl_buf,
+			udg->ctrl_18_sub10_off + sizeof(struct udg_tuning));
+	if (retval < 0)
+		return retval;
+
+	return 0;
+}
+
+static int udg_detection_enable(bool enable)
+{
+	int retval;
+	struct synaptics_rmi4_data *rmi4_data = udg->rmi4_data;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			udg->addr.ctrl_20,
+			udg->ctrl_buf,
+			udg->ctrl_20_sub1_off + 1);
+	if (retval < 0)
+		return retval;
+
+	if (enable)
+		udg->ctrl_buf[udg->ctrl_20_sub1_off] = WAKEUP_GESTURE_MODE;
+	else
+		udg->ctrl_buf[udg->ctrl_20_sub1_off] = udg->report_flags;
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			udg->addr.ctrl_20,
+			udg->ctrl_buf,
+			udg->ctrl_20_sub1_off + 1);
+	if (retval < 0)
+		return retval;
+
+	return 0;
+}
+
+static int udg_engine_enable(bool enable)
+{
+	int retval;
+	struct synaptics_rmi4_data *rmi4_data = udg->rmi4_data;
+
+	if (enable) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				udg->addr.ctrl_27,
+				udg->ctrl_buf,
+				udg->ctrl_27_sub5_off + 1);
+		if (retval < 0)
+			return retval;
+
+		udg->ctrl_buf[udg->ctrl_27_sub5_off] |=
+				(1 << CTRL27_UDG_ENABLE_BIT);
+
+		retval = synaptics_rmi4_reg_write(rmi4_data,
+				udg->addr.ctrl_27,
+				udg->ctrl_buf,
+				udg->ctrl_27_sub5_off + 1);
+		if (retval < 0)
+			return retval;
+	} else {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				udg->addr.ctrl_27,
+				udg->ctrl_buf,
+				udg->ctrl_27_sub5_off + 1);
+		if (retval < 0)
+			return retval;
+
+		udg->ctrl_buf[udg->ctrl_27_sub5_off] &=
+				~(1 << CTRL27_UDG_ENABLE_BIT);
+
+		retval = synaptics_rmi4_reg_write(rmi4_data,
+				udg->addr.ctrl_27,
+				udg->ctrl_buf,
+				udg->ctrl_27_sub5_off + 1);
+		if (retval < 0)
+			return retval;
+	}
+
+	return 0;
+}
+
+static void udg_report(void)
+{
+	int retval;
+	struct synaptics_rmi4_data *rmi4_data = udg->rmi4_data;
+
+	atomic_set(&udg->attn_event, 1);
+
+	if (rmi4_data->suspend) {
+		if (rmi4_data->gesture_detection[0] == 0) {
+			retval = synaptics_rmi4_reg_read(rmi4_data,
+					udg->addr.data_4,
+					rmi4_data->gesture_detection,
+					sizeof(rmi4_data->gesture_detection));
+			if (retval < 0) {
+				dev_err(rmi4_data->pdev->dev.parent,
+						"%s: Failed to read gesture detection\n",
+						__func__);
+				return;
+			}
+		}
+
+		udg->detection_status = rmi4_data->gesture_detection[0];
+		rmi4_data->gesture_detection[0] = 0;
+
+		if (udg->detection_status == DETECTION) {
+			input_report_key(udg->udg_dev, KEY_WAKEUP, 1);
+			input_sync(udg->udg_dev);
+			input_report_key(udg->udg_dev, KEY_WAKEUP, 0);
+			input_sync(udg->udg_dev);
+			rmi4_data->suspend = false;
+		}
+	}
+
+	return;
+}
+
+static int udg_set_index(unsigned char index)
+{
+	int retval;
+	struct synaptics_rmi4_f12_control_41 control_41;
+	struct synaptics_rmi4_data *rmi4_data = udg->rmi4_data;
+
+	if (index >= udg->max_num_templates)
+		return -EINVAL;
+
+	udg->template_index = index;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			udg->addr.ctrl_41,
+			control_41.data,
+			sizeof(control_41.data));
+	if (retval < 0)
+		return retval;
+
+	control_41.template_index = udg->template_index;
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			udg->addr.ctrl_41,
+			control_41.data,
+			sizeof(control_41.data));
+	if (retval < 0)
+		return retval;
+
+	return 0;
+}
+
+#ifdef STORE_GESTURES
+static int udg_read_valid_data(void)
+{
+	int retval;
+	struct synaptics_rmi4_data *rmi4_data = udg->rmi4_data;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			udg->addr.template_flags,
+			udg->valid_buf,
+			sizeof(udg->valid_buf));
+	if (retval < 0)
+		return retval;
+
+	return 0;
+}
+
+static int udg_write_valid_data(void)
+{
+	int retval;
+	struct synaptics_rmi4_data *rmi4_data = udg->rmi4_data;
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			udg->addr.template_flags,
+			udg->valid_buf,
+			sizeof(udg->valid_buf));
+	if (retval < 0)
+		return retval;
+
+	return 0;
+}
+
+static int udg_read_template_data(unsigned char index)
+{
+	int retval;
+	unsigned char *storage;
+	struct synaptics_rmi4_data *rmi4_data = udg->rmi4_data;
+
+	udg_set_index(index);
+	storage = &(udg->storage_buf[index * udg->template_data_size]);
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			udg->addr.template_data,
+			storage,
+			udg->template_data_size);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read template data\n",
+				__func__);
+		return retval;
+	}
+
+	return 0;
+}
+
+static int udg_write_template_data(void)
+{
+	int retval;
+	unsigned char ii;
+	unsigned char *storage;
+	struct synaptics_rmi4_data *rmi4_data = udg->rmi4_data;
+
+	for (ii = 0; ii < udg->gestures_to_store; ii++) {
+		udg_set_index(ii);
+		storage = &(udg->storage_buf[ii * udg->template_data_size]);
+
+		retval = synaptics_rmi4_reg_write(rmi4_data,
+				udg->addr.template_data,
+				storage,
+				udg->template_data_size);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to write template data\n",
+					__func__);
+			return retval;
+		}
+	}
+
+	return 0;
+}
+#endif
+
+static int udg_reg_init(void)
+{
+	int retval;
+	unsigned char ii;
+	unsigned char data_offset;
+	unsigned char size_of_query;
+	unsigned char ctrl_18_offset;
+	unsigned char ctrl_20_offset;
+	unsigned char ctrl_23_offset;
+	unsigned char ctrl_27_offset;
+	unsigned char ctrl_41_offset;
+	struct synaptics_rmi4_f12_query_0 query_0;
+	struct synaptics_rmi4_f12_query_5 query_5;
+	struct synaptics_rmi4_f12_query_8 query_8;
+	struct synaptics_rmi4_data *rmi4_data = udg->rmi4_data;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			udg->query_base_addr + 7,
+			&size_of_query,
+			sizeof(size_of_query));
+	if (retval < 0)
+		return retval;
+
+	if (size_of_query < 4) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: User defined gesture support unavailable (missing data registers)\n",
+				__func__);
+		retval = -ENODEV;
+		return retval;
+	}
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			udg->query_base_addr + 8,
+			query_8.data,
+			sizeof(query_8.data));
+	if (retval < 0)
+		return retval;
+
+	if ((query_8.data16_is_present) &&
+			(query_8.data17_is_present) &&
+			(query_8.data18_is_present) &&
+			(query_8.data19_is_present) &&
+			(query_8.data20_is_present) &&
+			(query_8.data21_is_present)) {
+		data_offset = query_8.data0_is_present +
+				query_8.data1_is_present +
+				query_8.data2_is_present +
+				query_8.data3_is_present;
+		udg->addr.data_4 = udg->data_base_addr + data_offset;
+		data_offset = data_offset +
+				query_8.data4_is_present +
+				query_8.data5_is_present +
+				query_8.data6_is_present +
+				query_8.data7_is_present +
+				query_8.data8_is_present +
+				query_8.data9_is_present +
+				query_8.data10_is_present +
+				query_8.data11_is_present +
+				query_8.data12_is_present +
+				query_8.data13_is_present +
+				query_8.data14_is_present +
+				query_8.data15_is_present;
+		udg->addr.trace_x = udg->data_base_addr + data_offset;
+		udg->addr.trace_y = udg->addr.trace_x + 1;
+		udg->addr.trace_segment = udg->addr.trace_y + 1;
+		udg->addr.template_helper = udg->addr.trace_segment + 1;
+		udg->addr.template_data = udg->addr.template_helper + 1;
+		udg->addr.template_flags = udg->addr.template_data + 1;
+	} else {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: User defined gesture support unavailable (missing data registers)\n",
+				__func__);
+		retval = -ENODEV;
+		return retval;
+	}
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			udg->query_base_addr + 4,
+			&size_of_query,
+			sizeof(size_of_query));
+	if (retval < 0)
+		return retval;
+
+	if (size_of_query < 7) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: User defined gesture support unavailable (missing control registers)\n",
+				__func__);
+		retval = -ENODEV;
+		return retval;
+	}
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			udg->query_base_addr + 5,
+			query_5.data,
+			sizeof(query_5.data));
+	if (retval < 0)
+		return retval;
+
+	ctrl_18_offset = query_5.ctrl0_is_present +
+			query_5.ctrl1_is_present +
+			query_5.ctrl2_is_present +
+			query_5.ctrl3_is_present +
+			query_5.ctrl4_is_present +
+			query_5.ctrl5_is_present +
+			query_5.ctrl6_is_present +
+			query_5.ctrl7_is_present +
+			query_5.ctrl8_is_present +
+			query_5.ctrl9_is_present +
+			query_5.ctrl10_is_present +
+			query_5.ctrl11_is_present +
+			query_5.ctrl12_is_present +
+			query_5.ctrl13_is_present +
+			query_5.ctrl14_is_present +
+			query_5.ctrl15_is_present +
+			query_5.ctrl16_is_present +
+			query_5.ctrl17_is_present;
+
+	ctrl_20_offset = ctrl_18_offset +
+			query_5.ctrl18_is_present +
+			query_5.ctrl19_is_present;
+
+	ctrl_23_offset = ctrl_20_offset +
+			query_5.ctrl20_is_present +
+			query_5.ctrl21_is_present +
+			query_5.ctrl22_is_present;
+
+	ctrl_27_offset = ctrl_23_offset+
+			query_5.ctrl23_is_present +
+			query_5.ctrl24_is_present +
+			query_5.ctrl25_is_present +
+			query_5.ctrl26_is_present;
+
+	ctrl_41_offset = ctrl_27_offset+
+			query_5.ctrl27_is_present +
+			query_5.ctrl28_is_present +
+			query_5.ctrl29_is_present +
+			query_5.ctrl30_is_present +
+			query_5.ctrl31_is_present +
+			query_5.ctrl32_is_present +
+			query_5.ctrl33_is_present +
+			query_5.ctrl34_is_present +
+			query_5.ctrl35_is_present +
+			query_5.ctrl36_is_present +
+			query_5.ctrl37_is_present +
+			query_5.ctrl38_is_present +
+			query_5.ctrl39_is_present +
+			query_5.ctrl40_is_present;
+
+	udg->addr.ctrl_18 = udg->control_base_addr + ctrl_18_offset;
+	udg->addr.ctrl_20 = udg->control_base_addr + ctrl_20_offset;
+	udg->addr.ctrl_23 = udg->control_base_addr + ctrl_23_offset;
+	udg->addr.ctrl_27 = udg->control_base_addr + ctrl_27_offset;
+	udg->addr.ctrl_41 = udg->control_base_addr + ctrl_41_offset;
+
+	udg->ctrl_18_sub10_off = 0;
+	for (ii = 0; ii < 10; ii++) {
+		retval = udg_ctrl_subpacket(18, ii, &query_5);
+		if (retval == 1)
+			udg->ctrl_18_sub10_off += ctrl_18_sub_size[ii];
+		else if (retval < 0)
+			return retval;
+	}
+
+	udg->ctrl_20_sub1_off = 0;
+	for (ii = 0; ii < 1; ii++) {
+		retval = udg_ctrl_subpacket(20, ii, &query_5);
+		if (retval == 1)
+			udg->ctrl_20_sub1_off += ctrl_20_sub_size[ii];
+		else if (retval < 0)
+			return retval;
+	}
+
+	udg->ctrl_23_sub3_off = 0;
+	for (ii = 0; ii < 3; ii++) {
+		retval = udg_ctrl_subpacket(23, ii, &query_5);
+		if (retval == 1)
+			udg->ctrl_23_sub3_off += ctrl_23_sub_size[ii];
+		else if (retval < 0)
+			return retval;
+	}
+
+	retval = udg_ctrl_subpacket(23, 3, &query_5);
+	if (retval == 0)
+		udg->ctrl_23_sub3_off = 0;
+	else if (retval < 0)
+		return retval;
+
+	udg->ctrl_27_sub5_off = 0;
+	for (ii = 0; ii < 5; ii++) {
+		retval = udg_ctrl_subpacket(27, ii, &query_5);
+		if (retval == 1)
+			udg->ctrl_27_sub5_off += ctrl_27_sub_size[ii];
+		else if (retval < 0)
+			return retval;
+	}
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			udg->query_base_addr + 0,
+			query_0.data,
+			sizeof(query_0.data));
+	if (retval < 0)
+		return retval;
+
+	udg->max_num_templates = query_0.max_num_templates;
+	udg->template_size =
+			((unsigned short)query_0.template_size_lsb << 0) |
+			((unsigned short)query_0.template_size_msb << 8);
+	udg->template_data_size = udg->template_size * 4 * 2 + 4 + 1;
+
+#ifdef STORE_GESTURES
+	udg->gestures_to_store = udg->max_num_templates;
+	if (GESTURES_TO_STORE < udg->gestures_to_store)
+		udg->gestures_to_store = GESTURES_TO_STORE;
+#endif
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			udg->addr.ctrl_20,
+			udg->ctrl_buf,
+			udg->ctrl_20_sub1_off + 1);
+	if (retval < 0)
+		return retval;
+
+	udg->report_flags = udg->ctrl_buf[udg->ctrl_20_sub1_off];
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			udg->addr.ctrl_23,
+			udg->ctrl_buf,
+			udg->ctrl_23_sub3_off + 1);
+	if (retval < 0)
+		return retval;
+
+	udg->object_type_enable1 = udg->ctrl_buf[0];
+	if (udg->ctrl_23_sub3_off)
+		udg->object_type_enable2 = udg->ctrl_buf[udg->ctrl_23_sub3_off];
+
+	return retval;
+}
+
+static int udg_scan_pdt(void)
+{
+	int retval;
+	unsigned char ii;
+	unsigned char page;
+	unsigned char intr_count = 0;
+	unsigned char intr_off;
+	unsigned char intr_src;
+	unsigned short addr;
+	struct synaptics_rmi4_fn_desc fd;
+	struct synaptics_rmi4_data *rmi4_data = udg->rmi4_data;
+
+	for (page = 0; page < PAGES_TO_SERVICE; page++) {
+		for (addr = PDT_START; addr > PDT_END; addr -= PDT_ENTRY_SIZE) {
+			addr |= (page << 8);
+
+			retval = synaptics_rmi4_reg_read(rmi4_data,
+					addr,
+					(unsigned char *)&fd,
+					sizeof(fd));
+			if (retval < 0)
+				return retval;
+
+			addr &= ~(MASK_8BIT << 8);
+
+			if (fd.fn_number) {
+				dev_dbg(rmi4_data->pdev->dev.parent,
+						"%s: Found F%02x\n",
+						__func__, fd.fn_number);
+				switch (fd.fn_number) {
+				case SYNAPTICS_RMI4_F12:
+					goto f12_found;
+					break;
+				}
+			} else {
+				break;
+			}
+
+			intr_count += fd.intr_src_count;
+		}
+	}
+
+	dev_err(rmi4_data->pdev->dev.parent,
+			"%s: Failed to find F12\n",
+			__func__);
+	return -EINVAL;
+
+f12_found:
+	udg->query_base_addr = fd.query_base_addr | (page << 8);
+	udg->control_base_addr = fd.ctrl_base_addr | (page << 8);
+	udg->data_base_addr = fd.data_base_addr | (page << 8);
+	udg->command_base_addr = fd.cmd_base_addr | (page << 8);
+
+	retval = udg_reg_init();
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to initialize user defined gesture registers\n",
+				__func__);
+		return retval;
+	}
+
+	udg->intr_mask = 0;
+	intr_src = fd.intr_src_count;
+	intr_off = intr_count % 8;
+	for (ii = intr_off;
+			ii < (intr_src + intr_off);
+			ii++) {
+		udg->intr_mask |= 1 << ii;
+	}
+
+	rmi4_data->intr_mask[0] |= udg->intr_mask;
+
+	addr = rmi4_data->f01_ctrl_base_addr + 1;
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			addr,
+			&rmi4_data->intr_mask[0],
+			sizeof(rmi4_data->intr_mask[0]));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to set interrupt enable bit\n",
+				__func__);
+		return retval;
+	}
+
+	return 0;
+}
+
+static void synaptics_rmi4_udg_attn(struct synaptics_rmi4_data *rmi4_data,
+		unsigned char intr_mask)
+{
+	if (!udg)
+		return;
+
+	if (udg->intr_mask & intr_mask)
+		udg_report();
+
+	return;
+}
+
+static int synaptics_rmi4_udg_init(struct synaptics_rmi4_data *rmi4_data)
+{
+	int retval;
+	unsigned char ii;
+	unsigned char size;
+	unsigned char attr_count;
+	unsigned char param_count;
+
+	if (udg) {
+		dev_dbg(rmi4_data->pdev->dev.parent,
+				"%s: Handle already exists\n",
+				__func__);
+		return 0;
+	}
+
+	udg = kzalloc(sizeof(*udg), GFP_KERNEL);
+	if (!udg) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to alloc mem for udg\n",
+				__func__);
+		retval = -ENOMEM;
+		goto exit;
+	}
+
+	size = 0;
+	for (ii = 0; ii < sizeof(ctrl_18_sub_size); ii++)
+		size += ctrl_18_sub_size[ii];
+	size += sizeof(struct udg_tuning);
+	udg->ctrl_buf = kzalloc(size, GFP_KERNEL);
+	if (!udg->ctrl_buf) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to alloc mem for ctrl_buf\n",
+				__func__);
+		retval = -ENOMEM;
+		goto exit_free_udg;
+	}
+
+	udg->rmi4_data = rmi4_data;
+
+	retval = udg_scan_pdt();
+	if (retval < 0)
+		goto exit_free_ctrl_buf;
+
+	udg->template_data_buf = kzalloc(udg->template_data_size, GFP_KERNEL);
+	if (!udg->template_data_buf) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to alloc mem for template_data_buf\n",
+				__func__);
+		retval = -ENOMEM;
+		goto exit_free_ctrl_buf;
+	}
+
+#ifdef STORE_GESTURES
+	udg->storage_buf = kzalloc(
+			udg->template_data_size * udg->gestures_to_store,
+			GFP_KERNEL);
+	if (!udg->storage_buf) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to alloc mem for storage_buf\n",
+				__func__);
+		kfree(udg->template_data_buf);
+		retval = -ENOMEM;
+		goto exit_free_ctrl_buf;
+	}
+#endif
+
+	udg->udg_dev = input_allocate_device();
+	if (udg->udg_dev == NULL) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to allocate gesture device\n",
+				__func__);
+		retval = -ENOMEM;
+		goto exit_free_template_data_buf;
+	}
+
+	udg->udg_dev->name = GESTURE_DRIVER_NAME;
+	udg->udg_dev->phys = GESTURE_PHYS_NAME;
+	udg->udg_dev->id.product = SYNAPTICS_DSX_DRIVER_PRODUCT;
+	udg->udg_dev->id.version = SYNAPTICS_DSX_DRIVER_VERSION;
+	udg->udg_dev->dev.parent = rmi4_data->pdev->dev.parent;
+	input_set_drvdata(udg->udg_dev, rmi4_data);
+
+	set_bit(EV_KEY, udg->udg_dev->evbit);
+	set_bit(KEY_WAKEUP, udg->udg_dev->keybit);
+	input_set_capability(udg->udg_dev, EV_KEY, KEY_WAKEUP);
+
+	retval = input_register_device(udg->udg_dev);
+	if (retval) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to register gesture device\n",
+				__func__);
+		input_free_device(udg->udg_dev);
+		goto exit_free_template_data_buf;
+	}
+
+	udg->tuning_dir = kobject_create_and_add(TUNING_SYSFS_DIR_NAME,
+			&udg->udg_dev->dev.kobj);
+	if (!udg->tuning_dir) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to create tuning sysfs directory\n",
+				__func__);
+		goto exit_unregister_input_device;
+	}
+
+	retval = sysfs_create_bin_file(&udg->udg_dev->dev.kobj, &template_data);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to create template data bin file\n",
+				__func__);
+		goto exit_remove_sysfs_directory;
+	}
+
+	retval = sysfs_create_bin_file(&udg->udg_dev->dev.kobj, &trace_data);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to create trace data bin file\n",
+				__func__);
+		goto exit_remove_bin_file;
+	}
+
+	for (attr_count = 0; attr_count < ARRAY_SIZE(attrs); attr_count++) {
+		retval = sysfs_create_file(&udg->udg_dev->dev.kobj,
+				&attrs[attr_count].attr);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to create sysfs attributes\n",
+					__func__);
+			retval = -ENODEV;
+			goto exit_remove_attrs;
+		}
+	}
+
+	for (param_count = 0; param_count < ARRAY_SIZE(params); param_count++) {
+		retval = sysfs_create_file(udg->tuning_dir,
+				&params[param_count].attr);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to create tuning parameters\n",
+					__func__);
+			retval = -ENODEV;
+			goto exit_remove_params;
+		}
+	}
+
+	retval = udg_engine_enable(true);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to enable gesture engine\n",
+				__func__);
+		goto exit_remove_params;
+	}
+
+	return 0;
+
+exit_remove_params:
+	for (param_count--; param_count >= 0; param_count--) {
+		sysfs_remove_file(udg->tuning_dir,
+				&params[param_count].attr);
+	}
+
+exit_remove_attrs:
+	for (attr_count--; attr_count >= 0; attr_count--) {
+		sysfs_remove_file(&udg->udg_dev->dev.kobj,
+				&attrs[attr_count].attr);
+	}
+
+	sysfs_remove_bin_file(&udg->udg_dev->dev.kobj, &trace_data);
+
+exit_remove_bin_file:
+	sysfs_remove_bin_file(&udg->udg_dev->dev.kobj, &template_data);
+
+exit_remove_sysfs_directory:
+	kobject_put(udg->tuning_dir);
+
+exit_unregister_input_device:
+	input_unregister_device(udg->udg_dev);
+
+exit_free_template_data_buf:
+#ifdef STORE_GESTURES
+	kfree(udg->storage_buf);
+#endif
+	kfree(udg->template_data_buf);
+
+exit_free_ctrl_buf:
+	kfree(udg->ctrl_buf);
+
+exit_free_udg:
+	kfree(udg);
+	udg = NULL;
+
+exit:
+	return retval;
+}
+
+static void synaptics_rmi4_udg_remove(struct synaptics_rmi4_data *rmi4_data)
+{
+	unsigned char count;
+
+	if (!udg)
+		goto exit;
+
+	for (count = 0; count < ARRAY_SIZE(params); count++) {
+		sysfs_remove_file(udg->tuning_dir,
+				&params[count].attr);
+	}
+
+	for (count = 0; count < ARRAY_SIZE(attrs); count++) {
+		sysfs_remove_file(&udg->udg_dev->dev.kobj,
+				&attrs[count].attr);
+	}
+
+	sysfs_remove_bin_file(&udg->udg_dev->dev.kobj, &trace_data);
+	sysfs_remove_bin_file(&udg->udg_dev->dev.kobj, &template_data);
+	kobject_put(udg->tuning_dir);
+
+	input_unregister_device(udg->udg_dev);
+#ifdef STORE_GESTURES
+	kfree(udg->storage_buf);
+#endif
+	kfree(udg->template_data_buf);
+	kfree(udg->trace_data_buf);
+	kfree(udg->ctrl_buf);
+	kfree(udg);
+	udg = NULL;
+
+exit:
+	complete(&udg_remove_complete);
+
+	return;
+}
+
+static void synaptics_rmi4_udg_reset(struct synaptics_rmi4_data *rmi4_data)
+{
+	if (!udg) {
+		synaptics_rmi4_udg_init(rmi4_data);
+		return;
+	}
+
+	udg_scan_pdt();
+	udg_engine_enable(true);
+#ifdef STORE_GESTURES
+	udg_write_template_data();
+	udg_write_valid_data();
+#endif
+
+	return;
+}
+
+static void synaptics_rmi4_udg_reinit(struct synaptics_rmi4_data *rmi4_data)
+{
+	if (!udg)
+		return;
+
+	udg_engine_enable(true);
+#ifdef STORE_GESTURES
+	udg_write_template_data();
+	udg_write_valid_data();
+#endif
+
+	return;
+}
+
+static void synaptics_rmi4_udg_e_suspend(struct synaptics_rmi4_data *rmi4_data)
+{
+	if (!udg)
+		return;
+
+	rmi4_data->sleep_enable(rmi4_data, false);
+	rmi4_data->irq_enable(rmi4_data, true, false);
+	enable_irq_wake(rmi4_data->irq);
+
+	udg_engine_enable(true);
+	udg_detection_enable(true);
+
+	return;
+}
+
+static void synaptics_rmi4_udg_suspend(struct synaptics_rmi4_data *rmi4_data)
+{
+	if (!udg)
+		return;
+
+	rmi4_data->sleep_enable(rmi4_data, false);
+	rmi4_data->irq_enable(rmi4_data, true, false);
+	enable_irq_wake(rmi4_data->irq);
+
+	udg_engine_enable(true);
+	udg_detection_enable(true);
+
+	return;
+}
+
+static void synaptics_rmi4_udg_resume(struct synaptics_rmi4_data *rmi4_data)
+{
+	if (!udg)
+		return;
+
+	disable_irq_wake(rmi4_data->irq);
+	udg_detection_enable(false);
+
+	return;
+}
+
+static void synaptics_rmi4_udg_l_resume(struct synaptics_rmi4_data *rmi4_data)
+{
+	if (!udg)
+		return;
+
+	disable_irq_wake(rmi4_data->irq);
+	udg_detection_enable(false);
+
+	return;
+}
+
+static struct synaptics_rmi4_exp_fn gesture_module = {
+	.fn_type = RMI_GESTURE,
+	.init = synaptics_rmi4_udg_init,
+	.remove = synaptics_rmi4_udg_remove,
+	.reset = synaptics_rmi4_udg_reset,
+	.reinit = synaptics_rmi4_udg_reinit,
+	.early_suspend = synaptics_rmi4_udg_e_suspend,
+	.suspend = synaptics_rmi4_udg_suspend,
+	.resume = synaptics_rmi4_udg_resume,
+	.late_resume = synaptics_rmi4_udg_l_resume,
+	.attn = synaptics_rmi4_udg_attn,
+};
+
+static int __init rmi4_gesture_module_init(void)
+{
+	synaptics_rmi4_new_function(&gesture_module, true);
+
+	return 0;
+}
+
+static void __exit rmi4_gesture_module_exit(void)
+{
+	synaptics_rmi4_new_function(&gesture_module, false);
+
+	wait_for_completion(&udg_remove_complete);
+
+	return;
+}
+
+module_init(rmi4_gesture_module_init);
+module_exit(rmi4_gesture_module_exit);
+
+MODULE_AUTHOR("Synaptics, Inc.");
+MODULE_DESCRIPTION("Synaptics DSX User Defined Gesture Module");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_i2c.c b/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_i2c.c
new file mode 100644
index 0000000..8776d4a
--- /dev/null
+++ b/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_i2c.c
@@ -0,0 +1,606 @@
+/*
+ * Synaptics DSX touchscreen driver
+ *
+ * Copyright (C) 2012-2016 Synaptics Incorporated. All rights reserved.
+ *
+ * Copyright (C) 2012 Alexandra Chin <alexandra.chin@tw.synaptics.com>
+ * Copyright (C) 2012 Scott Lin <scott.lin@tw.synaptics.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * INFORMATION CONTAINED IN THIS DOCUMENT IS PROVIDED "AS-IS," AND SYNAPTICS
+ * EXPRESSLY DISCLAIMS ALL EXPRESS AND IMPLIED WARRANTIES, INCLUDING ANY
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE,
+ * AND ANY WARRANTIES OF NON-INFRINGEMENT OF ANY INTELLECTUAL PROPERTY RIGHTS.
+ * IN NO EVENT SHALL SYNAPTICS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, PUNITIVE, OR CONSEQUENTIAL DAMAGES ARISING OUT OF OR IN CONNECTION
+ * WITH THE USE OF THE INFORMATION CONTAINED IN THIS DOCUMENT, HOWEVER CAUSED
+ * AND BASED ON ANY THEORY OF LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * NEGLIGENCE OR OTHER TORTIOUS ACTION, AND EVEN IF SYNAPTICS WAS ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE. IF A TRIBUNAL OF COMPETENT JURISDICTION DOES
+ * NOT PERMIT THE DISCLAIMER OF DIRECT DAMAGES OR ANY OTHER DAMAGES, SYNAPTICS'
+ * TOTAL CUMULATIVE LIABILITY TO ANY PARTY SHALL NOT EXCEED ONE HUNDRED U.S.
+ * DOLLARS.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/delay.h>
+#include <linux/input.h>
+#include <linux/types.h>
+#include <linux/of_gpio.h>
+#include <linux/platform_device.h>
+#include <linux/input/synaptics_dsx.h>
+#include "synaptics_dsx_core.h"
+
+#define SYN_I2C_RETRY_TIMES 10
+#define rd_msgs  1
+
+static unsigned char *wr_buf;
+
+static struct synaptics_dsx_hw_interface hw_if;
+
+static struct platform_device *synaptics_dsx_i2c_device;
+
+#ifdef CONFIG_OF
+static int parse_dt(struct device *dev, struct synaptics_dsx_board_data *bdata)
+{
+	int retval;
+	u32 value;
+	const char *name;
+	struct property *prop;
+	struct device_node *np = dev->of_node;
+
+	bdata->irq_gpio = of_get_named_gpio_flags(np,
+			"synaptics,irq-gpio", 0,
+			(enum of_gpio_flags *)&bdata->irq_flags);
+
+	retval = of_property_read_u32(np, "synaptics,irq-on-state",
+			&value);
+	if (retval < 0)
+		bdata->irq_on_state = 0;
+	else
+		bdata->irq_on_state = value;
+
+	retval = of_property_read_string(np, "synaptics,pwr-reg-name", &name);
+	if (retval < 0)
+		bdata->pwr_reg_name = NULL;
+	else
+		bdata->pwr_reg_name = name;
+
+	retval = of_property_read_string(np, "synaptics,bus-reg-name", &name);
+	if (retval < 0)
+		bdata->bus_reg_name = NULL;
+	else
+		bdata->bus_reg_name = name;
+
+	prop = of_find_property(np, "synaptics,power-gpio", NULL);
+	if (prop && prop->length) {
+		bdata->power_gpio = of_get_named_gpio_flags(np,
+				"synaptics,power-gpio", 0, NULL);
+		retval = of_property_read_u32(np, "synaptics,power-on-state",
+				&value);
+		if (retval < 0) {
+			dev_err(dev, "%s: Unable to read synaptics,power-on-state property\n",
+					__func__);
+			return retval;
+		} else {
+			bdata->power_on_state = value;
+		}
+	} else {
+		bdata->power_gpio = -1;
+	}
+
+	prop = of_find_property(np, "synaptics,power-delay-ms", NULL);
+	if (prop && prop->length) {
+		retval = of_property_read_u32(np, "synaptics,power-delay-ms",
+				&value);
+		if (retval < 0) {
+			dev_err(dev, "%s: Unable to read synaptics,power-delay-ms property\n",
+					__func__);
+			return retval;
+		} else {
+			bdata->power_delay_ms = value;
+		}
+	} else {
+		bdata->power_delay_ms = 0;
+	}
+
+	prop = of_find_property(np, "synaptics,reset-gpio", NULL);
+	if (prop && prop->length) {
+		bdata->reset_gpio = of_get_named_gpio_flags(np,
+				"synaptics,reset-gpio", 0, NULL);
+		retval = of_property_read_u32(np, "synaptics,reset-on-state",
+				&value);
+		if (retval < 0) {
+			dev_err(dev, "%s: Unable to read synaptics,reset-on-state property\n",
+					__func__);
+			return retval;
+		} else {
+			bdata->reset_on_state = value;
+		}
+		retval = of_property_read_u32(np, "synaptics,reset-active-ms",
+				&value);
+		if (retval < 0) {
+			dev_err(dev, "%s: Unable to read synaptics,reset-active-ms property\n",
+					__func__);
+			return retval;
+		} else {
+			bdata->reset_active_ms = value;
+		}
+	} else {
+		bdata->reset_gpio = -1;
+	}
+
+	prop = of_find_property(np, "synaptics,reset-delay-ms", NULL);
+	if (prop && prop->length) {
+		retval = of_property_read_u32(np, "synaptics,reset-delay-ms",
+				&value);
+		if (retval < 0) {
+			dev_err(dev, "%s: Unable to read synaptics,reset-delay-ms property\n",
+					__func__);
+			return retval;
+		} else {
+			bdata->reset_delay_ms = value;
+		}
+	} else {
+		bdata->reset_delay_ms = 0;
+	}
+
+	prop = of_find_property(np, "synaptics,max-y-for-2d", NULL);
+	if (prop && prop->length) {
+		retval = of_property_read_u32(np, "synaptics,max-y-for-2d",
+				&value);
+		if (retval < 0) {
+			dev_err(dev, "%s: Unable to read synaptics,max-y-for-2d property\n",
+					__func__);
+			return retval;
+		} else {
+			bdata->max_y_for_2d = value;
+		}
+	} else {
+		bdata->max_y_for_2d = -1;
+	}
+
+	bdata->swap_axes = of_property_read_bool(np, "synaptics,swap-axes");
+	bdata->x_flip = of_property_read_bool(np, "synaptics,x-flip");
+	bdata->y_flip = of_property_read_bool(np, "synaptics,y-flip");
+
+	prop = of_find_property(np, "synaptics,ub-i2c-addr", NULL);
+	if (prop && prop->length) {
+		retval = of_property_read_u32(np, "synaptics,ub-i2c-addr",
+				&value);
+		if (retval < 0) {
+			dev_err(dev, "%s: Unable to read synaptics,ub-i2c-addr property\n",
+					__func__);
+			return retval;
+		} else {
+			bdata->ub_i2c_addr = (unsigned short)value;
+		}
+	} else {
+		bdata->ub_i2c_addr = -1;
+	}
+
+	prop = of_find_property(np, "synaptics,cap-button-codes", NULL);
+	if (prop && prop->length) {
+		bdata->cap_button_map->map = devm_kzalloc(dev,
+				prop->length,
+				GFP_KERNEL);
+		if (!bdata->cap_button_map->map)
+			return -ENOMEM;
+		bdata->cap_button_map->nbuttons = prop->length / sizeof(u32);
+		retval = of_property_read_u32_array(np,
+				"synaptics,cap-button-codes",
+				bdata->cap_button_map->map,
+				bdata->cap_button_map->nbuttons);
+		if (retval < 0) {
+			bdata->cap_button_map->nbuttons = 0;
+			bdata->cap_button_map->map = NULL;
+		}
+	} else {
+		bdata->cap_button_map->nbuttons = 0;
+		bdata->cap_button_map->map = NULL;
+	}
+
+	prop = of_find_property(np, "synaptics,vir-button-codes", NULL);
+	if (prop && prop->length) {
+		bdata->vir_button_map->map = devm_kzalloc(dev,
+				prop->length,
+				GFP_KERNEL);
+		if (!bdata->vir_button_map->map)
+			return -ENOMEM;
+		bdata->vir_button_map->nbuttons = prop->length / sizeof(u32);
+		bdata->vir_button_map->nbuttons /= 5;
+		retval = of_property_read_u32_array(np,
+				"synaptics,vir-button-codes",
+				bdata->vir_button_map->map,
+				bdata->vir_button_map->nbuttons * 5);
+		if (retval < 0) {
+			bdata->vir_button_map->nbuttons = 0;
+			bdata->vir_button_map->map = NULL;
+		}
+	} else {
+		bdata->vir_button_map->nbuttons = 0;
+		bdata->vir_button_map->map = NULL;
+	}
+
+	return 0;
+}
+#endif
+
+static int synaptics_rmi4_i2c_alloc_buf(struct synaptics_rmi4_data *rmi4_data,
+		unsigned int count)
+{
+	static unsigned int buf_size;
+
+	if (count > buf_size) {
+		if (buf_size)
+			kfree(wr_buf);
+		wr_buf = kzalloc(count, GFP_KERNEL);
+		if (!wr_buf) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to alloc mem for buffer\n",
+					__func__);
+			buf_size = 0;
+			return -ENOMEM;
+		}
+		buf_size = count;
+	}
+
+	return 0;
+}
+
+static void synaptics_rmi4_i2c_check_addr(struct synaptics_rmi4_data *rmi4_data,
+		struct i2c_client *i2c)
+{
+	if (hw_if.board_data->ub_i2c_addr == -1)
+		return;
+
+	if (hw_if.board_data->i2c_addr == i2c->addr)
+		hw_if.board_data->i2c_addr = hw_if.board_data->ub_i2c_addr;
+	else
+		hw_if.board_data->i2c_addr = i2c->addr;
+
+	return;
+}
+
+static int synaptics_rmi4_i2c_set_page(struct synaptics_rmi4_data *rmi4_data,
+		unsigned short addr)
+{
+	int retval = 0;
+	unsigned char retry;
+	unsigned char buf[PAGE_SELECT_LEN];
+	unsigned char page;
+	struct i2c_client *i2c = to_i2c_client(rmi4_data->pdev->dev.parent);
+	struct i2c_msg msg[2];
+
+	msg[0].addr = hw_if.board_data->i2c_addr;
+	msg[0].flags = 0;
+	msg[0].len = PAGE_SELECT_LEN;
+	msg[0].buf = buf;
+
+	page = ((addr >> 8) & MASK_8BIT);
+	buf[0] = MASK_8BIT;
+	buf[1] = page;
+
+	if (page != rmi4_data->current_page) {
+		for (retry = 0; retry < SYN_I2C_RETRY_TIMES; retry++) {
+			if (i2c_transfer(i2c->adapter, &msg[0], 1) == 1) {
+				rmi4_data->current_page = page;
+				retval = PAGE_SELECT_LEN;
+				break;
+			}
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: I2C retry %d\n",
+					__func__, retry + 1);
+			msleep(20);
+
+			if (retry == SYN_I2C_RETRY_TIMES / 2) {
+				synaptics_rmi4_i2c_check_addr(rmi4_data, i2c);
+				msg[0].addr = hw_if.board_data->i2c_addr;
+			}
+		}
+	} else {
+		retval = PAGE_SELECT_LEN;
+	}
+
+	return retval;
+}
+
+static int synaptics_rmi4_i2c_read(struct synaptics_rmi4_data *rmi4_data,
+		unsigned short addr, unsigned char *data, unsigned int length)
+{
+	int retval = 0;
+	unsigned char retry;
+	unsigned char buf;
+	unsigned char index = 0;
+	unsigned char xfer_msgs;
+	unsigned char remaining_msgs;
+	unsigned short i2c_addr;
+	unsigned short data_offset = 0;
+	unsigned int remaining_length = length;
+	struct i2c_client *i2c = to_i2c_client(rmi4_data->pdev->dev.parent);
+	struct i2c_adapter *adap = i2c->adapter;
+	struct i2c_msg msg[rd_msgs + 1];
+
+	mutex_lock(&rmi4_data->rmi4_io_ctrl_mutex);
+
+	retval = synaptics_rmi4_i2c_set_page(rmi4_data, addr);
+	if (retval != PAGE_SELECT_LEN) {
+		retval = -EIO;
+		goto exit;
+	}
+
+	msg[0].addr = hw_if.board_data->i2c_addr;
+	msg[0].flags = 0;
+	msg[0].len = 1;
+	msg[0].buf = &buf;
+	msg[rd_msgs].addr = hw_if.board_data->i2c_addr;
+	msg[rd_msgs].flags = I2C_M_RD;
+	msg[rd_msgs].len = (unsigned short)remaining_length;
+	msg[rd_msgs].buf = &data[data_offset];
+
+	buf = addr & MASK_8BIT;
+
+	remaining_msgs = rd_msgs + 1;
+
+	while (remaining_msgs) {
+		xfer_msgs = remaining_msgs;
+		for (retry = 0; retry < SYN_I2C_RETRY_TIMES; retry++) {
+			retval = i2c_transfer(adap, &msg[index], xfer_msgs);
+			if (retval == xfer_msgs)
+				break;
+
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: I2C retry %d\n",
+					__func__, retry + 1);
+			msleep(20);
+
+			if (retry == SYN_I2C_RETRY_TIMES / 2) {
+				synaptics_rmi4_i2c_check_addr(rmi4_data, i2c);
+				i2c_addr = hw_if.board_data->i2c_addr;
+				msg[0].addr = i2c_addr;
+				msg[rd_msgs].addr = i2c_addr;
+			}
+		}
+
+		if (retry == SYN_I2C_RETRY_TIMES) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: I2C read over retry limit\n",
+					__func__);
+			retval = -EIO;
+			goto exit;
+		}
+
+		remaining_msgs -= xfer_msgs;
+		index += xfer_msgs;
+	}
+
+	retval = length;
+
+exit:
+	mutex_unlock(&rmi4_data->rmi4_io_ctrl_mutex);
+
+	return retval;
+}
+
+static int synaptics_rmi4_i2c_write(struct synaptics_rmi4_data *rmi4_data,
+		unsigned short addr, unsigned char *data, unsigned int length)
+{
+	int retval;
+	unsigned char retry;
+	struct i2c_client *i2c = to_i2c_client(rmi4_data->pdev->dev.parent);
+	struct i2c_msg msg[2];
+
+	retval = synaptics_rmi4_i2c_alloc_buf(rmi4_data, length + 1);
+	if (retval < 0)
+		return retval;
+
+	mutex_lock(&rmi4_data->rmi4_io_ctrl_mutex);
+
+	retval = synaptics_rmi4_i2c_set_page(rmi4_data, addr);
+	if (retval != PAGE_SELECT_LEN) {
+		retval = -EIO;
+		goto exit;
+	}
+
+	msg[0].addr = hw_if.board_data->i2c_addr;
+	msg[0].flags = 0;
+	msg[0].len = (unsigned short)(length + 1);
+	msg[0].buf = wr_buf;
+
+	wr_buf[0] = addr & MASK_8BIT;
+	retval = secure_memcpy(&wr_buf[1], length, &data[0], length, length);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to copy data\n",
+				__func__);
+		goto exit;
+	}
+
+	for (retry = 0; retry < SYN_I2C_RETRY_TIMES; retry++) {
+		if (i2c_transfer(i2c->adapter, &msg[0], 1) == 1) {
+			retval = length;
+			break;
+		}
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: I2C retry %d\n",
+				__func__, retry + 1);
+		msleep(20);
+
+		if (retry == SYN_I2C_RETRY_TIMES / 2) {
+			synaptics_rmi4_i2c_check_addr(rmi4_data, i2c);
+			msg[0].addr = hw_if.board_data->i2c_addr;
+		}
+	}
+
+	if (retry == SYN_I2C_RETRY_TIMES) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: I2C write over retry limit\n",
+				__func__);
+		retval = -EIO;
+	}
+
+exit:
+	mutex_unlock(&rmi4_data->rmi4_io_ctrl_mutex);
+
+	return retval;
+}
+
+static struct synaptics_dsx_bus_access bus_access = {
+	.type = BUS_I2C,
+	.read = synaptics_rmi4_i2c_read,
+	.write = synaptics_rmi4_i2c_write,
+};
+
+static void synaptics_rmi4_i2c_dev_release(struct device *dev)
+{
+	kfree(synaptics_dsx_i2c_device);
+
+	return;
+}
+
+static int synaptics_rmi4_i2c_probe(struct i2c_client *client,
+		const struct i2c_device_id *dev_id)
+{
+	int retval;
+
+	if (!i2c_check_functionality(client->adapter,
+			I2C_FUNC_SMBUS_BYTE_DATA)) {
+		dev_err(&client->dev,
+				"%s: SMBus byte data commands not supported by host\n",
+				__func__);
+		return -EIO;
+	}
+
+	synaptics_dsx_i2c_device = kzalloc(
+			sizeof(struct platform_device),
+			GFP_KERNEL);
+	if (!synaptics_dsx_i2c_device) {
+		dev_err(&client->dev,
+				"%s: Failed to allocate memory for synaptics_dsx_i2c_device\n",
+				__func__);
+		return -ENOMEM;
+	}
+
+#ifdef CONFIG_OF
+	if (client->dev.of_node) {
+		hw_if.board_data = devm_kzalloc(&client->dev,
+				sizeof(struct synaptics_dsx_board_data),
+				GFP_KERNEL);
+		if (!hw_if.board_data) {
+			dev_err(&client->dev,
+					"%s: Failed to allocate memory for board data\n",
+					__func__);
+			return -ENOMEM;
+		}
+		hw_if.board_data->cap_button_map = devm_kzalloc(&client->dev,
+				sizeof(struct synaptics_dsx_button_map),
+				GFP_KERNEL);
+		if (!hw_if.board_data->cap_button_map) {
+			dev_err(&client->dev,
+					"%s: Failed to allocate memory for 0D button map\n",
+					__func__);
+			return -ENOMEM;
+		}
+		hw_if.board_data->vir_button_map = devm_kzalloc(&client->dev,
+				sizeof(struct synaptics_dsx_button_map),
+				GFP_KERNEL);
+		if (!hw_if.board_data->vir_button_map) {
+			dev_err(&client->dev,
+					"%s: Failed to allocate memory for virtual button map\n",
+					__func__);
+			return -ENOMEM;
+		}
+		parse_dt(&client->dev, hw_if.board_data);
+	}
+#else
+	hw_if.board_data = client->dev.platform_data;
+#endif
+
+	hw_if.bus_access = &bus_access;
+	hw_if.board_data->i2c_addr = client->addr;
+
+	synaptics_dsx_i2c_device->name = PLATFORM_DRIVER_NAME;
+	synaptics_dsx_i2c_device->id = 0;
+	synaptics_dsx_i2c_device->num_resources = 0;
+	synaptics_dsx_i2c_device->dev.parent = &client->dev;
+	synaptics_dsx_i2c_device->dev.platform_data = &hw_if;
+	synaptics_dsx_i2c_device->dev.release = synaptics_rmi4_i2c_dev_release;
+
+	retval = platform_device_register(synaptics_dsx_i2c_device);
+	if (retval) {
+		dev_err(&client->dev,
+				"%s: Failed to register platform device\n",
+				__func__);
+		return -ENODEV;
+	}
+
+	return 0;
+}
+
+static int synaptics_rmi4_i2c_remove(struct i2c_client *client)
+{
+	platform_device_unregister(synaptics_dsx_i2c_device);
+
+	return 0;
+}
+
+static const struct i2c_device_id synaptics_rmi4_id_table[] = {
+	{I2C_DRIVER_NAME, 0},
+	{},
+};
+MODULE_DEVICE_TABLE(i2c, synaptics_rmi4_id_table);
+
+#ifdef CONFIG_OF
+static struct of_device_id synaptics_rmi4_of_match_table[] = {
+	{
+		.compatible = "synaptics,dsx-i2c",
+	},
+	{},
+};
+MODULE_DEVICE_TABLE(of, synaptics_rmi4_of_match_table);
+#else
+#define synaptics_rmi4_of_match_table NULL
+#endif
+
+static struct i2c_driver synaptics_rmi4_i2c_driver = {
+	.driver = {
+		.name = I2C_DRIVER_NAME,
+		.owner = THIS_MODULE,
+		.of_match_table = synaptics_rmi4_of_match_table,
+	},
+	.probe = synaptics_rmi4_i2c_probe,
+	.remove = synaptics_rmi4_i2c_remove,
+	.id_table = synaptics_rmi4_id_table,
+};
+
+int synaptics_rmi4_bus_init(void)
+{
+	return i2c_add_driver(&synaptics_rmi4_i2c_driver);
+}
+EXPORT_SYMBOL(synaptics_rmi4_bus_init);
+
+void synaptics_rmi4_bus_exit(void)
+{
+	kfree(wr_buf);
+
+	i2c_del_driver(&synaptics_rmi4_i2c_driver);
+
+	return;
+}
+EXPORT_SYMBOL(synaptics_rmi4_bus_exit);
+
+MODULE_AUTHOR("Synaptics, Inc.");
+MODULE_DESCRIPTION("Synaptics DSX I2C Bus Support Module");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_proximity.c b/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_proximity.c
new file mode 100644
index 0000000..518b805
--- /dev/null
+++ b/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_proximity.c
@@ -0,0 +1,692 @@
+/*
+ * Synaptics DSX touchscreen driver
+ *
+ * Copyright (C) 2012-2016 Synaptics Incorporated. All rights reserved.
+ *
+ * Copyright (C) 2012 Alexandra Chin <alexandra.chin@tw.synaptics.com>
+ * Copyright (C) 2012 Scott Lin <scott.lin@tw.synaptics.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * INFORMATION CONTAINED IN THIS DOCUMENT IS PROVIDED "AS-IS," AND SYNAPTICS
+ * EXPRESSLY DISCLAIMS ALL EXPRESS AND IMPLIED WARRANTIES, INCLUDING ANY
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE,
+ * AND ANY WARRANTIES OF NON-INFRINGEMENT OF ANY INTELLECTUAL PROPERTY RIGHTS.
+ * IN NO EVENT SHALL SYNAPTICS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, PUNITIVE, OR CONSEQUENTIAL DAMAGES ARISING OUT OF OR IN CONNECTION
+ * WITH THE USE OF THE INFORMATION CONTAINED IN THIS DOCUMENT, HOWEVER CAUSED
+ * AND BASED ON ANY THEORY OF LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * NEGLIGENCE OR OTHER TORTIOUS ACTION, AND EVEN IF SYNAPTICS WAS ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE. IF A TRIBUNAL OF COMPETENT JURISDICTION DOES
+ * NOT PERMIT THE DISCLAIMER OF DIRECT DAMAGES OR ANY OTHER DAMAGES, SYNAPTICS'
+ * TOTAL CUMULATIVE LIABILITY TO ANY PARTY SHALL NOT EXCEED ONE HUNDRED U.S.
+ * DOLLARS.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/input.h>
+#include <linux/platform_device.h>
+#include <linux/input/synaptics_dsx.h>
+#include "synaptics_dsx_core.h"
+
+#define PROX_PHYS_NAME "synaptics_dsx/proximity"
+
+#define HOVER_Z_MAX (255)
+
+#define HOVERING_FINGER_EN (1 << 4)
+
+static ssize_t synaptics_rmi4_hover_finger_en_show(struct device *dev,
+		struct device_attribute *attr, char *buf);
+
+static ssize_t synaptics_rmi4_hover_finger_en_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count);
+
+static struct device_attribute attrs[] = {
+	__ATTR(hover_finger_en, 0664,
+			synaptics_rmi4_hover_finger_en_show,
+			synaptics_rmi4_hover_finger_en_store),
+};
+
+struct synaptics_rmi4_f12_query_5 {
+	union {
+		struct {
+			unsigned char size_of_query6;
+			struct {
+				unsigned char ctrl0_is_present:1;
+				unsigned char ctrl1_is_present:1;
+				unsigned char ctrl2_is_present:1;
+				unsigned char ctrl3_is_present:1;
+				unsigned char ctrl4_is_present:1;
+				unsigned char ctrl5_is_present:1;
+				unsigned char ctrl6_is_present:1;
+				unsigned char ctrl7_is_present:1;
+			} __packed;
+			struct {
+				unsigned char ctrl8_is_present:1;
+				unsigned char ctrl9_is_present:1;
+				unsigned char ctrl10_is_present:1;
+				unsigned char ctrl11_is_present:1;
+				unsigned char ctrl12_is_present:1;
+				unsigned char ctrl13_is_present:1;
+				unsigned char ctrl14_is_present:1;
+				unsigned char ctrl15_is_present:1;
+			} __packed;
+			struct {
+				unsigned char ctrl16_is_present:1;
+				unsigned char ctrl17_is_present:1;
+				unsigned char ctrl18_is_present:1;
+				unsigned char ctrl19_is_present:1;
+				unsigned char ctrl20_is_present:1;
+				unsigned char ctrl21_is_present:1;
+				unsigned char ctrl22_is_present:1;
+				unsigned char ctrl23_is_present:1;
+			} __packed;
+		};
+		unsigned char data[4];
+	};
+};
+
+struct synaptics_rmi4_f12_query_8 {
+	union {
+		struct {
+			unsigned char size_of_query9;
+			struct {
+				unsigned char data0_is_present:1;
+				unsigned char data1_is_present:1;
+				unsigned char data2_is_present:1;
+				unsigned char data3_is_present:1;
+				unsigned char data4_is_present:1;
+				unsigned char data5_is_present:1;
+				unsigned char data6_is_present:1;
+				unsigned char data7_is_present:1;
+			} __packed;
+		};
+		unsigned char data[2];
+	};
+};
+
+struct prox_finger_data {
+	union {
+		struct {
+			unsigned char object_type_and_status;
+			unsigned char x_lsb;
+			unsigned char x_msb;
+			unsigned char y_lsb;
+			unsigned char y_msb;
+			unsigned char z;
+		} __packed;
+		unsigned char proximity_data[6];
+	};
+};
+
+struct synaptics_rmi4_prox_handle {
+	bool hover_finger_present;
+	bool hover_finger_en;
+	unsigned char intr_mask;
+	unsigned short query_base_addr;
+	unsigned short control_base_addr;
+	unsigned short data_base_addr;
+	unsigned short command_base_addr;
+	unsigned short hover_finger_en_addr;
+	unsigned short hover_finger_data_addr;
+	struct input_dev *prox_dev;
+	struct prox_finger_data *finger_data;
+	struct synaptics_rmi4_data *rmi4_data;
+};
+
+static struct synaptics_rmi4_prox_handle *prox;
+
+DECLARE_COMPLETION(prox_remove_complete);
+
+static void prox_hover_finger_lift(void)
+{
+	input_report_key(prox->prox_dev, BTN_TOUCH, 0);
+	input_report_key(prox->prox_dev, BTN_TOOL_FINGER, 0);
+	input_sync(prox->prox_dev);
+	prox->hover_finger_present = false;
+
+	return;
+}
+
+static void prox_hover_finger_report(void)
+{
+	int retval;
+	int x;
+	int y;
+	int z;
+	struct prox_finger_data *data;
+	struct synaptics_rmi4_data *rmi4_data = prox->rmi4_data;
+
+	data = prox->finger_data;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			prox->hover_finger_data_addr,
+			data->proximity_data,
+			sizeof(data->proximity_data));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read hovering finger data\n",
+				__func__);
+		return;
+	}
+
+	if (data->object_type_and_status != F12_HOVERING_FINGER_STATUS) {
+		if (prox->hover_finger_present)
+			prox_hover_finger_lift();
+
+		return;
+	}
+
+	x = (data->x_msb << 8) | (data->x_lsb);
+	y = (data->y_msb << 8) | (data->y_lsb);
+	z = HOVER_Z_MAX - data->z;
+
+	input_report_key(prox->prox_dev, BTN_TOUCH, 0);
+	input_report_key(prox->prox_dev, BTN_TOOL_FINGER, 1);
+	input_report_abs(prox->prox_dev, ABS_X, x);
+	input_report_abs(prox->prox_dev, ABS_Y, y);
+	input_report_abs(prox->prox_dev, ABS_DISTANCE, z);
+
+	input_sync(prox->prox_dev);
+
+	dev_dbg(rmi4_data->pdev->dev.parent,
+			"%s: x = %d y = %d z = %d\n",
+			__func__, x, y, z);
+
+	prox->hover_finger_present = true;
+
+	return;
+}
+
+static int prox_set_hover_finger_en(void)
+{
+	int retval;
+	unsigned char object_report_enable;
+	struct synaptics_rmi4_data *rmi4_data = prox->rmi4_data;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			prox->hover_finger_en_addr,
+			&object_report_enable,
+			sizeof(object_report_enable));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read from object report enable register\n",
+				__func__);
+		return retval;
+	}
+
+	if (prox->hover_finger_en)
+		object_report_enable |= HOVERING_FINGER_EN;
+	else
+		object_report_enable &= ~HOVERING_FINGER_EN;
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			prox->hover_finger_en_addr,
+			&object_report_enable,
+			sizeof(object_report_enable));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to write to object report enable register\n",
+				__func__);
+		return retval;
+	}
+
+	return 0;
+}
+
+static void prox_set_params(void)
+{
+	input_set_abs_params(prox->prox_dev, ABS_X, 0,
+			prox->rmi4_data->sensor_max_x, 0, 0);
+	input_set_abs_params(prox->prox_dev, ABS_Y, 0,
+			prox->rmi4_data->sensor_max_y, 0, 0);
+	input_set_abs_params(prox->prox_dev, ABS_DISTANCE, 0,
+			HOVER_Z_MAX, 0, 0);
+
+	return;
+}
+
+static int prox_reg_init(void)
+{
+	int retval;
+	unsigned char ctrl_23_offset;
+	unsigned char data_1_offset;
+	struct synaptics_rmi4_f12_query_5 query_5;
+	struct synaptics_rmi4_f12_query_8 query_8;
+	struct synaptics_rmi4_data *rmi4_data = prox->rmi4_data;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			prox->query_base_addr + 5,
+			query_5.data,
+			sizeof(query_5.data));
+	if (retval < 0)
+		return retval;
+
+	ctrl_23_offset = query_5.ctrl0_is_present +
+			query_5.ctrl1_is_present +
+			query_5.ctrl2_is_present +
+			query_5.ctrl3_is_present +
+			query_5.ctrl4_is_present +
+			query_5.ctrl5_is_present +
+			query_5.ctrl6_is_present +
+			query_5.ctrl7_is_present +
+			query_5.ctrl8_is_present +
+			query_5.ctrl9_is_present +
+			query_5.ctrl10_is_present +
+			query_5.ctrl11_is_present +
+			query_5.ctrl12_is_present +
+			query_5.ctrl13_is_present +
+			query_5.ctrl14_is_present +
+			query_5.ctrl15_is_present +
+			query_5.ctrl16_is_present +
+			query_5.ctrl17_is_present +
+			query_5.ctrl18_is_present +
+			query_5.ctrl19_is_present +
+			query_5.ctrl20_is_present +
+			query_5.ctrl21_is_present +
+			query_5.ctrl22_is_present;
+
+	prox->hover_finger_en_addr = prox->control_base_addr + ctrl_23_offset;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			prox->query_base_addr + 8,
+			query_8.data,
+			sizeof(query_8.data));
+	if (retval < 0)
+		return retval;
+
+	data_1_offset = query_8.data0_is_present;
+	prox->hover_finger_data_addr = prox->data_base_addr + data_1_offset;
+
+	return retval;
+}
+
+static int prox_scan_pdt(void)
+{
+	int retval;
+	unsigned char ii;
+	unsigned char page;
+	unsigned char intr_count = 0;
+	unsigned char intr_off;
+	unsigned char intr_src;
+	unsigned short addr;
+	struct synaptics_rmi4_fn_desc fd;
+	struct synaptics_rmi4_data *rmi4_data = prox->rmi4_data;
+
+	for (page = 0; page < PAGES_TO_SERVICE; page++) {
+		for (addr = PDT_START; addr > PDT_END; addr -= PDT_ENTRY_SIZE) {
+			addr |= (page << 8);
+
+			retval = synaptics_rmi4_reg_read(rmi4_data,
+					addr,
+					(unsigned char *)&fd,
+					sizeof(fd));
+			if (retval < 0)
+				return retval;
+
+			addr &= ~(MASK_8BIT << 8);
+
+			if (fd.fn_number) {
+				dev_dbg(rmi4_data->pdev->dev.parent,
+						"%s: Found F%02x\n",
+						__func__, fd.fn_number);
+				switch (fd.fn_number) {
+				case SYNAPTICS_RMI4_F12:
+					goto f12_found;
+					break;
+				}
+			} else {
+				break;
+			}
+
+			intr_count += fd.intr_src_count;
+		}
+	}
+
+	dev_err(rmi4_data->pdev->dev.parent,
+			"%s: Failed to find F12\n",
+			__func__);
+	return -EINVAL;
+
+f12_found:
+	prox->query_base_addr = fd.query_base_addr | (page << 8);
+	prox->control_base_addr = fd.ctrl_base_addr | (page << 8);
+	prox->data_base_addr = fd.data_base_addr | (page << 8);
+	prox->command_base_addr = fd.cmd_base_addr | (page << 8);
+
+	retval = prox_reg_init();
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to initialize proximity registers\n",
+				__func__);
+		return retval;
+	}
+
+	prox->intr_mask = 0;
+	intr_src = fd.intr_src_count;
+	intr_off = intr_count % 8;
+	for (ii = intr_off;
+			ii < (intr_src + intr_off);
+			ii++) {
+		prox->intr_mask |= 1 << ii;
+	}
+
+	rmi4_data->intr_mask[0] |= prox->intr_mask;
+
+	addr = rmi4_data->f01_ctrl_base_addr + 1;
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			addr,
+			&(rmi4_data->intr_mask[0]),
+			sizeof(rmi4_data->intr_mask[0]));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to set interrupt enable bit\n",
+				__func__);
+		return retval;
+	}
+
+	return 0;
+}
+
+static ssize_t synaptics_rmi4_hover_finger_en_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	if (!prox)
+		return -ENODEV;
+
+	return snprintf(buf, PAGE_SIZE, "%u\n",
+			prox->hover_finger_en);
+}
+
+static ssize_t synaptics_rmi4_hover_finger_en_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int retval;
+	unsigned int input;
+	struct synaptics_rmi4_data *rmi4_data = prox->rmi4_data;
+
+	if (!prox)
+		return -ENODEV;
+
+	if (kstrtouint(buf, 16, &input) != 1)
+		return -EINVAL;
+
+	if (input == 1)
+		prox->hover_finger_en = true;
+	else if (input == 0)
+		prox->hover_finger_en = false;
+	else
+		return -EINVAL;
+
+	retval = prox_set_hover_finger_en();
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to change hovering finger enable setting\n",
+				__func__);
+		return retval;
+	}
+
+	return count;
+}
+
+int synaptics_rmi4_prox_hover_finger_en(bool enable)
+{
+	int retval;
+
+	if (!prox)
+		return -ENODEV;
+
+	prox->hover_finger_en = enable;
+
+	retval = prox_set_hover_finger_en();
+	if (retval < 0)
+		return retval;
+
+	return 0;
+}
+EXPORT_SYMBOL(synaptics_rmi4_prox_hover_finger_en);
+
+static void synaptics_rmi4_prox_attn(struct synaptics_rmi4_data *rmi4_data,
+		unsigned char intr_mask)
+{
+	if (!prox)
+		return;
+
+	if (prox->intr_mask & intr_mask)
+		prox_hover_finger_report();
+
+	return;
+}
+
+static int synaptics_rmi4_prox_init(struct synaptics_rmi4_data *rmi4_data)
+{
+	int retval;
+	unsigned char attr_count;
+
+	if (prox) {
+		dev_dbg(rmi4_data->pdev->dev.parent,
+				"%s: Handle already exists\n",
+				__func__);
+		return 0;
+	}
+
+	prox = kzalloc(sizeof(*prox), GFP_KERNEL);
+	if (!prox) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to alloc mem for prox\n",
+				__func__);
+		retval = -ENOMEM;
+		goto exit;
+	}
+
+	prox->finger_data = kzalloc(sizeof(*(prox->finger_data)), GFP_KERNEL);
+	if (!prox->finger_data) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to alloc mem for finger_data\n",
+				__func__);
+		retval = -ENOMEM;
+		goto exit_free_prox;
+	}
+
+	prox->rmi4_data = rmi4_data;
+
+	retval = prox_scan_pdt();
+	if (retval < 0)
+		goto exit_free_finger_data;
+
+	prox->hover_finger_en = true;
+
+	retval = prox_set_hover_finger_en();
+	if (retval < 0)
+		return retval;
+
+	prox->prox_dev = input_allocate_device();
+	if (prox->prox_dev == NULL) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to allocate proximity device\n",
+				__func__);
+		retval = -ENOMEM;
+		goto exit_free_finger_data;
+	}
+
+	prox->prox_dev->name = PROXIMITY_DRIVER_NAME;
+	prox->prox_dev->phys = PROX_PHYS_NAME;
+	prox->prox_dev->id.product = SYNAPTICS_DSX_DRIVER_PRODUCT;
+	prox->prox_dev->id.version = SYNAPTICS_DSX_DRIVER_VERSION;
+	prox->prox_dev->dev.parent = rmi4_data->pdev->dev.parent;
+	input_set_drvdata(prox->prox_dev, rmi4_data);
+
+	set_bit(EV_KEY, prox->prox_dev->evbit);
+	set_bit(EV_ABS, prox->prox_dev->evbit);
+	set_bit(BTN_TOUCH, prox->prox_dev->keybit);
+	set_bit(BTN_TOOL_FINGER, prox->prox_dev->keybit);
+#ifdef INPUT_PROP_DIRECT
+	set_bit(INPUT_PROP_DIRECT, prox->prox_dev->propbit);
+#endif
+
+	prox_set_params();
+
+	retval = input_register_device(prox->prox_dev);
+	if (retval) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to register proximity device\n",
+				__func__);
+		goto exit_free_input_device;
+	}
+
+	for (attr_count = 0; attr_count < ARRAY_SIZE(attrs); attr_count++) {
+		retval = sysfs_create_file(&rmi4_data->input_dev->dev.kobj,
+				&attrs[attr_count].attr);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to create sysfs attributes\n",
+					__func__);
+			goto exit_free_sysfs;
+		}
+	}
+
+	return 0;
+
+exit_free_sysfs:
+	for (attr_count--; attr_count >= 0; attr_count--) {
+		sysfs_remove_file(&rmi4_data->input_dev->dev.kobj,
+				&attrs[attr_count].attr);
+	}
+
+	input_unregister_device(prox->prox_dev);
+	prox->prox_dev = NULL;
+
+exit_free_input_device:
+	if (prox->prox_dev)
+		input_free_device(prox->prox_dev);
+
+exit_free_finger_data:
+	kfree(prox->finger_data);
+
+exit_free_prox:
+	kfree(prox);
+	prox = NULL;
+
+exit:
+	return retval;
+}
+
+static void synaptics_rmi4_prox_remove(struct synaptics_rmi4_data *rmi4_data)
+{
+	unsigned char attr_count;
+
+	if (!prox)
+		goto exit;
+
+	for (attr_count = 0; attr_count < ARRAY_SIZE(attrs); attr_count++) {
+		sysfs_remove_file(&rmi4_data->input_dev->dev.kobj,
+				&attrs[attr_count].attr);
+	}
+
+	input_unregister_device(prox->prox_dev);
+	kfree(prox->finger_data);
+	kfree(prox);
+	prox = NULL;
+
+exit:
+	complete(&prox_remove_complete);
+
+	return;
+}
+
+static void synaptics_rmi4_prox_reset(struct synaptics_rmi4_data *rmi4_data)
+{
+	if (!prox) {
+		synaptics_rmi4_prox_init(rmi4_data);
+		return;
+	}
+
+	prox_hover_finger_lift();
+
+	prox_scan_pdt();
+
+	prox_set_hover_finger_en();
+
+	return;
+}
+
+static void synaptics_rmi4_prox_reinit(struct synaptics_rmi4_data *rmi4_data)
+{
+	if (!prox)
+		return;
+
+	prox_hover_finger_lift();
+
+	prox_set_hover_finger_en();
+
+	return;
+}
+
+static void synaptics_rmi4_prox_e_suspend(struct synaptics_rmi4_data *rmi4_data)
+{
+	if (!prox)
+		return;
+
+	prox_hover_finger_lift();
+
+	return;
+}
+
+static void synaptics_rmi4_prox_suspend(struct synaptics_rmi4_data *rmi4_data)
+{
+	if (!prox)
+		return;
+
+	prox_hover_finger_lift();
+
+	return;
+}
+
+static struct synaptics_rmi4_exp_fn proximity_module = {
+	.fn_type = RMI_PROXIMITY,
+	.init = synaptics_rmi4_prox_init,
+	.remove = synaptics_rmi4_prox_remove,
+	.reset = synaptics_rmi4_prox_reset,
+	.reinit = synaptics_rmi4_prox_reinit,
+	.early_suspend = synaptics_rmi4_prox_e_suspend,
+	.suspend = synaptics_rmi4_prox_suspend,
+	.resume = NULL,
+	.late_resume = NULL,
+	.attn = synaptics_rmi4_prox_attn,
+};
+
+static int __init rmi4_proximity_module_init(void)
+{
+	synaptics_rmi4_new_function(&proximity_module, true);
+
+	return 0;
+}
+
+static void __exit rmi4_proximity_module_exit(void)
+{
+	synaptics_rmi4_new_function(&proximity_module, false);
+
+	wait_for_completion(&prox_remove_complete);
+
+	return;
+}
+
+module_init(rmi4_proximity_module_init);
+module_exit(rmi4_proximity_module_exit);
+
+MODULE_AUTHOR("Synaptics, Inc.");
+MODULE_DESCRIPTION("Synaptics DSX Proximity Module");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_rmi_dev.c b/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_rmi_dev.c
new file mode 100644
index 0000000..61cf979
--- /dev/null
+++ b/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_rmi_dev.c
@@ -0,0 +1,1064 @@
+/*
+ * Synaptics DSX touchscreen driver
+ *
+ * Copyright (C) 2012-2016 Synaptics Incorporated. All rights reserved.
+ *
+ * Copyright (C) 2012 Alexandra Chin <alexandra.chin@tw.synaptics.com>
+ * Copyright (C) 2012 Scott Lin <scott.lin@tw.synaptics.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * INFORMATION CONTAINED IN THIS DOCUMENT IS PROVIDED "AS-IS," AND SYNAPTICS
+ * EXPRESSLY DISCLAIMS ALL EXPRESS AND IMPLIED WARRANTIES, INCLUDING ANY
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE,
+ * AND ANY WARRANTIES OF NON-INFRINGEMENT OF ANY INTELLECTUAL PROPERTY RIGHTS.
+ * IN NO EVENT SHALL SYNAPTICS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, PUNITIVE, OR CONSEQUENTIAL DAMAGES ARISING OUT OF OR IN CONNECTION
+ * WITH THE USE OF THE INFORMATION CONTAINED IN THIS DOCUMENT, HOWEVER CAUSED
+ * AND BASED ON ANY THEORY OF LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * NEGLIGENCE OR OTHER TORTIOUS ACTION, AND EVEN IF SYNAPTICS WAS ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE. IF A TRIBUNAL OF COMPETENT JURISDICTION DOES
+ * NOT PERMIT THE DISCLAIMER OF DIRECT DAMAGES OR ANY OTHER DAMAGES, SYNAPTICS'
+ * TOTAL CUMULATIVE LIABILITY TO ANY PARTY SHALL NOT EXCEED ONE HUNDRED U.S.
+ * DOLLARS.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/input.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/gpio.h>
+#include <linux/uaccess.h>
+#include <linux/cdev.h>
+#include <linux/platform_device.h>
+#include <linux/input/synaptics_dsx.h>
+#include "synaptics_dsx_core.h"
+
+#define CHAR_DEVICE_NAME "rmi"
+#define DEVICE_CLASS_NAME "rmidev"
+#define SYSFS_FOLDER_NAME "rmidev"
+#define DEV_NUMBER 1
+#define REG_ADDR_LIMIT 0xFFFF
+
+#define RMIDEV_MAJOR_NUM 0
+
+static ssize_t rmidev_sysfs_data_show(struct file *data_file,
+		struct kobject *kobj, struct bin_attribute *attributes,
+		char *buf, loff_t pos, size_t count);
+
+static ssize_t rmidev_sysfs_data_store(struct file *data_file,
+		struct kobject *kobj, struct bin_attribute *attributes,
+		char *buf, loff_t pos, size_t count);
+
+static ssize_t rmidev_sysfs_open_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t rmidev_sysfs_release_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t rmidev_sysfs_attn_state_show(struct device *dev,
+		struct device_attribute *attr, char *buf);
+
+static ssize_t rmidev_sysfs_pid_show(struct device *dev,
+		struct device_attribute *attr, char *buf);
+
+static ssize_t rmidev_sysfs_pid_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t rmidev_sysfs_term_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t rmidev_sysfs_intr_mask_show(struct device *dev,
+		struct device_attribute *attr, char *buf);
+
+static ssize_t rmidev_sysfs_intr_mask_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t rmidev_sysfs_concurrent_show(struct device *dev,
+		struct device_attribute *attr, char *buf);
+
+static ssize_t rmidev_sysfs_concurrent_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count);
+
+struct rmidev_handle {
+	dev_t dev_no;
+	pid_t pid;
+	unsigned char intr_mask;
+	unsigned char *tmpbuf;
+	unsigned int tmpbuf_size;
+	struct device dev;
+	struct synaptics_rmi4_data *rmi4_data;
+	struct kobject *sysfs_dir;
+	struct siginfo interrupt_signal;
+	struct siginfo terminate_signal;
+	struct task_struct *task;
+	void *data;
+	bool concurrent;
+};
+
+struct rmidev_data {
+	int ref_count;
+	struct cdev main_dev;
+	struct class *device_class;
+	struct mutex file_mutex;
+	struct rmidev_handle *rmi_dev;
+};
+
+static struct bin_attribute attr_data = {
+	.attr = {
+		.name = "data",
+		.mode = 0664,
+	},
+	.size = 0,
+	.read = rmidev_sysfs_data_show,
+	.write = rmidev_sysfs_data_store,
+};
+
+static struct device_attribute attrs[] = {
+	__ATTR(open, 0220,
+			synaptics_rmi4_show_error,
+			rmidev_sysfs_open_store),
+	__ATTR(release, 0220,
+			synaptics_rmi4_show_error,
+			rmidev_sysfs_release_store),
+	__ATTR(attn_state, 0444,
+			rmidev_sysfs_attn_state_show,
+			synaptics_rmi4_store_error),
+	__ATTR(pid, 0664,
+			rmidev_sysfs_pid_show,
+			rmidev_sysfs_pid_store),
+	__ATTR(term, 0220,
+			synaptics_rmi4_show_error,
+			rmidev_sysfs_term_store),
+	__ATTR(intr_mask, 0664,
+			rmidev_sysfs_intr_mask_show,
+			rmidev_sysfs_intr_mask_store),
+	__ATTR(concurrent, 0664,
+			rmidev_sysfs_concurrent_show,
+			rmidev_sysfs_concurrent_store),
+};
+
+static int rmidev_major_num = RMIDEV_MAJOR_NUM;
+
+static struct class *rmidev_device_class;
+
+static struct rmidev_handle *rmidev;
+
+DECLARE_COMPLETION(rmidev_remove_complete);
+
+static irqreturn_t rmidev_sysfs_irq(int irq, void *data)
+{
+	struct synaptics_rmi4_data *rmi4_data = data;
+
+	sysfs_notify(&rmi4_data->input_dev->dev.kobj,
+			SYSFS_FOLDER_NAME, "attn_state");
+
+	return IRQ_HANDLED;
+}
+
+static int rmidev_sysfs_irq_enable(struct synaptics_rmi4_data *rmi4_data,
+		bool enable)
+{
+	int retval = 0;
+	unsigned char intr_status[MAX_INTR_REGISTERS];
+	unsigned long irq_flags = IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING |
+			IRQF_ONESHOT;
+
+	mutex_lock(&(rmi4_data->rmi4_irq_enable_mutex));
+
+	if (enable) {
+		if (rmi4_data->irq_enabled) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Interrupt already enabled\n",
+					__func__);
+			goto exit;
+		}
+
+		/* Clear interrupts first */
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				rmi4_data->f01_data_base_addr + 1,
+				intr_status,
+				rmi4_data->num_of_intr_regs);
+		if (retval < 0)
+			goto exit;
+
+		retval = request_threaded_irq(rmi4_data->irq, NULL,
+				rmidev_sysfs_irq, irq_flags,
+				PLATFORM_DRIVER_NAME, rmi4_data);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to create irq thread\n",
+					__func__);
+			goto exit;
+		}
+
+		rmi4_data->irq_enabled = true;
+	} else {
+		if (rmi4_data->irq_enabled) {
+			disable_irq(rmi4_data->irq);
+			free_irq(rmi4_data->irq, rmi4_data);
+			rmi4_data->irq_enabled = false;
+		}
+	}
+
+exit:
+	mutex_unlock(&(rmi4_data->rmi4_irq_enable_mutex));
+
+	return retval;
+}
+
+static ssize_t rmidev_sysfs_data_show(struct file *data_file,
+		struct kobject *kobj, struct bin_attribute *attributes,
+		char *buf, loff_t pos, size_t count)
+{
+	int retval;
+	unsigned char intr_status = 0;
+	unsigned int length = (unsigned int)count;
+	unsigned short address = (unsigned short)pos;
+	struct synaptics_rmi4_fn *fhandler;
+	struct synaptics_rmi4_device_info *rmi;
+	struct synaptics_rmi4_data *rmi4_data = rmidev->rmi4_data;
+
+	rmi = &(rmi4_data->rmi4_mod_info);
+
+	if (length > (REG_ADDR_LIMIT - address)) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Out of register map limit\n",
+				__func__);
+		return -EINVAL;
+	}
+
+	if (length) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				address,
+				(unsigned char *)buf,
+				length);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to read data\n",
+					__func__);
+			return retval;
+		}
+	} else {
+		return -EINVAL;
+	}
+
+	if (!rmidev->concurrent)
+		goto exit;
+
+	if (address != rmi4_data->f01_data_base_addr)
+		goto exit;
+
+	if (length <= 1)
+		goto exit;
+
+	intr_status = buf[1];
+
+	if (!list_empty(&rmi->support_fn_list)) {
+		list_for_each_entry(fhandler, &rmi->support_fn_list, link) {
+			if (fhandler->num_of_data_sources) {
+				if (fhandler->intr_mask & intr_status) {
+					rmi4_data->report_touch(rmi4_data,
+							fhandler);
+				}
+			}
+		}
+	}
+
+exit:
+	return length;
+}
+
+static ssize_t rmidev_sysfs_data_store(struct file *data_file,
+		struct kobject *kobj, struct bin_attribute *attributes,
+		char *buf, loff_t pos, size_t count)
+{
+	int retval;
+	unsigned int length = (unsigned int)count;
+	unsigned short address = (unsigned short)pos;
+	struct synaptics_rmi4_data *rmi4_data = rmidev->rmi4_data;
+
+	if (length > (REG_ADDR_LIMIT - address)) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Out of register map limit\n",
+				__func__);
+		return -EINVAL;
+	}
+
+	if (length) {
+		retval = synaptics_rmi4_reg_write(rmi4_data,
+				address,
+				(unsigned char *)buf,
+				length);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to write data\n",
+					__func__);
+			return retval;
+		}
+	} else {
+		return -EINVAL;
+	}
+
+	return length;
+}
+
+static ssize_t rmidev_sysfs_open_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	unsigned int input;
+	struct synaptics_rmi4_data *rmi4_data = rmidev->rmi4_data;
+
+	if (kstrtouint(buf, 10, &input) != 1)
+		return -EINVAL;
+
+	if (input != 1)
+		return -EINVAL;
+
+	if (rmi4_data->sensor_sleep) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Sensor sleeping\n",
+				__func__);
+		return -ENODEV;
+	}
+
+	rmi4_data->stay_awake = true;
+
+	rmi4_data->irq_enable(rmi4_data, false, false);
+	rmidev_sysfs_irq_enable(rmi4_data, true);
+
+	dev_dbg(rmi4_data->pdev->dev.parent,
+			"%s: Attention interrupt disabled\n",
+			__func__);
+
+	return count;
+}
+
+static ssize_t rmidev_sysfs_release_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	unsigned int input;
+	struct synaptics_rmi4_data *rmi4_data = rmidev->rmi4_data;
+
+	if (kstrtouint(buf, 10, &input) != 1)
+		return -EINVAL;
+
+	if (input != 1)
+		return -EINVAL;
+
+	rmidev_sysfs_irq_enable(rmi4_data, false);
+
+	rmi4_data->reset_device(rmi4_data, false);
+
+	rmi4_data->stay_awake = false;
+
+	return count;
+}
+
+static ssize_t rmidev_sysfs_attn_state_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	int attn_state;
+	struct synaptics_rmi4_data *rmi4_data = rmidev->rmi4_data;
+	const struct synaptics_dsx_board_data *bdata =
+			rmi4_data->hw_if->board_data;
+
+	attn_state = gpio_get_value(bdata->irq_gpio);
+
+	return snprintf(buf, PAGE_SIZE, "%u\n", attn_state);
+}
+
+static ssize_t rmidev_sysfs_pid_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%u\n", rmidev->pid);
+}
+
+static ssize_t rmidev_sysfs_pid_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	unsigned int input;
+	struct synaptics_rmi4_data *rmi4_data = rmidev->rmi4_data;
+
+	if (kstrtouint(buf, 10, &input) != 1)
+		return -EINVAL;
+
+	rmidev->pid = input;
+
+	if (rmidev->pid) {
+		rmidev->task = pid_task(find_vpid(rmidev->pid), PIDTYPE_PID);
+		if (!rmidev->task) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to locate PID of data logging tool\n",
+					__func__);
+			return -EINVAL;
+		}
+	}
+
+	return count;
+}
+
+static ssize_t rmidev_sysfs_term_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	unsigned int input;
+
+	if (kstrtouint(buf, 10, &input) != 1)
+		return -EINVAL;
+
+	if (input != 1)
+		return -EINVAL;
+
+	if (rmidev->pid)
+		send_sig_info(SIGTERM, &rmidev->terminate_signal, rmidev->task);
+
+	return count;
+}
+
+static ssize_t rmidev_sysfs_intr_mask_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "0x%02x\n", rmidev->intr_mask);
+}
+
+static ssize_t rmidev_sysfs_intr_mask_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	unsigned int input;
+
+	if (kstrtouint(buf, 10, &input) != 1)
+		return -EINVAL;
+
+	rmidev->intr_mask = (unsigned char)input;
+
+	return count;
+}
+
+static ssize_t rmidev_sysfs_concurrent_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%d\n", rmidev->concurrent);
+}
+
+static ssize_t rmidev_sysfs_concurrent_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	unsigned int input;
+
+	if (kstrtouint(buf, 10, &input) != 1)
+		return -EINVAL;
+
+	rmidev->concurrent = input > 0 ? true : false;
+
+	return count;
+}
+
+static int rmidev_allocate_buffer(int count)
+{
+	if (count + 1 > rmidev->tmpbuf_size) {
+		if (rmidev->tmpbuf_size)
+			kfree(rmidev->tmpbuf);
+		rmidev->tmpbuf = kzalloc(count + 1, GFP_KERNEL);
+		if (!rmidev->tmpbuf) {
+			dev_err(rmidev->rmi4_data->pdev->dev.parent,
+					"%s: Failed to alloc mem for buffer\n",
+					__func__);
+			rmidev->tmpbuf_size = 0;
+			return -ENOMEM;
+		}
+		rmidev->tmpbuf_size = count + 1;
+	}
+
+	return 0;
+}
+
+/*
+ * rmidev_llseek - set register address to access for RMI device
+ *
+ * @filp: pointer to file structure
+ * @off:
+ *	if whence == SEEK_SET,
+ *		off: 16-bit RMI register address
+ *	if whence == SEEK_CUR,
+ *		off: offset from current position
+ *	if whence == SEEK_END,
+ *		off: offset from end position (0xFFFF)
+ * @whence: SEEK_SET, SEEK_CUR, or SEEK_END
+ */
+static loff_t rmidev_llseek(struct file *filp, loff_t off, int whence)
+{
+	loff_t newpos;
+	struct rmidev_data *dev_data = filp->private_data;
+	struct synaptics_rmi4_data *rmi4_data = rmidev->rmi4_data;
+
+	if (IS_ERR(dev_data)) {
+		pr_err("%s: Pointer of char device data is invalid", __func__);
+		return -EBADF;
+	}
+
+	mutex_lock(&(dev_data->file_mutex));
+
+	switch (whence) {
+	case SEEK_SET:
+		newpos = off;
+		break;
+	case SEEK_CUR:
+		newpos = filp->f_pos + off;
+		break;
+	case SEEK_END:
+		newpos = REG_ADDR_LIMIT + off;
+		break;
+	default:
+		newpos = -EINVAL;
+		goto clean_up;
+	}
+
+	if (newpos < 0 || newpos > REG_ADDR_LIMIT) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: New position 0x%04x is invalid\n",
+				__func__, (unsigned int)newpos);
+		newpos = -EINVAL;
+		goto clean_up;
+	}
+
+	filp->f_pos = newpos;
+
+clean_up:
+	mutex_unlock(&(dev_data->file_mutex));
+
+	return newpos;
+}
+
+/*
+ * rmidev_read: read register data from RMI device
+ *
+ * @filp: pointer to file structure
+ * @buf: pointer to user space buffer
+ * @count: number of bytes to read
+ * @f_pos: starting RMI register address
+ */
+static ssize_t rmidev_read(struct file *filp, char __user *buf,
+		size_t count, loff_t *f_pos)
+{
+	ssize_t retval;
+	unsigned char intr_status = 0;
+	unsigned short address;
+	struct rmidev_data *dev_data = filp->private_data;
+	struct synaptics_rmi4_fn *fhandler;
+	struct synaptics_rmi4_device_info *rmi;
+	struct synaptics_rmi4_data *rmi4_data = rmidev->rmi4_data;
+
+	rmi = &(rmi4_data->rmi4_mod_info);
+
+	if (IS_ERR(dev_data)) {
+		pr_err("%s: Pointer of char device data is invalid", __func__);
+		return -EBADF;
+	}
+
+	if (count == 0)
+		return 0;
+
+	if (count > (REG_ADDR_LIMIT - *f_pos))
+		count = REG_ADDR_LIMIT - *f_pos;
+
+	address = (unsigned short)(*f_pos);
+
+	mutex_lock(&(dev_data->file_mutex));
+
+	rmidev_allocate_buffer(count);
+
+	retval = synaptics_rmi4_reg_read(rmidev->rmi4_data,
+			*f_pos,
+			rmidev->tmpbuf,
+			count);
+	if (retval < 0)
+		goto clean_up;
+
+	if (copy_to_user(buf, rmidev->tmpbuf, count))
+		retval = -EFAULT;
+	else
+		*f_pos += retval;
+
+	if (!rmidev->concurrent)
+		goto clean_up;
+
+	if (address != rmi4_data->f01_data_base_addr)
+		goto clean_up;
+
+	if (count <= 1)
+		goto clean_up;
+
+	intr_status = rmidev->tmpbuf[1];
+
+	if (!list_empty(&rmi->support_fn_list)) {
+		list_for_each_entry(fhandler, &rmi->support_fn_list, link) {
+			if (fhandler->num_of_data_sources) {
+				if (fhandler->intr_mask & intr_status) {
+					rmi4_data->report_touch(rmi4_data,
+							fhandler);
+				}
+			}
+		}
+	}
+
+clean_up:
+	mutex_unlock(&(dev_data->file_mutex));
+
+	return retval;
+}
+
+/*
+ * rmidev_write: write register data to RMI device
+ *
+ * @filp: pointer to file structure
+ * @buf: pointer to user space buffer
+ * @count: number of bytes to write
+ * @f_pos: starting RMI register address
+ */
+static ssize_t rmidev_write(struct file *filp, const char __user *buf,
+		size_t count, loff_t *f_pos)
+{
+	ssize_t retval;
+	struct rmidev_data *dev_data = filp->private_data;
+
+	if (IS_ERR(dev_data)) {
+		pr_err("%s: Pointer of char device data is invalid", __func__);
+		return -EBADF;
+	}
+
+	if (count == 0)
+		return 0;
+
+	if (count > (REG_ADDR_LIMIT - *f_pos))
+		count = REG_ADDR_LIMIT - *f_pos;
+
+	mutex_lock(&(dev_data->file_mutex));
+
+	rmidev_allocate_buffer(count);
+
+	if (copy_from_user(rmidev->tmpbuf, buf, count))
+		return -EFAULT;
+
+	retval = synaptics_rmi4_reg_write(rmidev->rmi4_data,
+			*f_pos,
+			rmidev->tmpbuf,
+			count);
+	if (retval >= 0)
+		*f_pos += retval;
+
+	mutex_unlock(&(dev_data->file_mutex));
+
+	return retval;
+}
+
+static int rmidev_open(struct inode *inp, struct file *filp)
+{
+	int retval = 0;
+	struct synaptics_rmi4_data *rmi4_data = rmidev->rmi4_data;
+	struct rmidev_data *dev_data =
+			container_of(inp->i_cdev, struct rmidev_data, main_dev);
+
+	if (!dev_data)
+		return -EACCES;
+
+	if (rmi4_data->sensor_sleep) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Sensor sleeping\n",
+				__func__);
+		return -ENODEV;
+	}
+
+	rmi4_data->stay_awake = true;
+
+	filp->private_data = dev_data;
+
+	mutex_lock(&(dev_data->file_mutex));
+
+	rmi4_data->irq_enable(rmi4_data, false, false);
+	dev_dbg(rmi4_data->pdev->dev.parent,
+			"%s: Attention interrupt disabled\n",
+			__func__);
+
+	if (dev_data->ref_count < 1)
+		dev_data->ref_count++;
+	else
+		retval = -EACCES;
+
+	mutex_unlock(&(dev_data->file_mutex));
+
+	return retval;
+}
+
+static int rmidev_release(struct inode *inp, struct file *filp)
+{
+	struct synaptics_rmi4_data *rmi4_data = rmidev->rmi4_data;
+	struct rmidev_data *dev_data =
+			container_of(inp->i_cdev, struct rmidev_data, main_dev);
+
+	if (!dev_data)
+		return -EACCES;
+
+	mutex_lock(&(dev_data->file_mutex));
+
+	dev_data->ref_count--;
+	if (dev_data->ref_count < 0)
+		dev_data->ref_count = 0;
+
+	rmi4_data->reset_device(rmi4_data, false);
+
+	rmi4_data->stay_awake = false;
+
+	mutex_unlock(&(dev_data->file_mutex));
+
+	return 0;
+}
+
+static const struct file_operations rmidev_fops = {
+	.owner = THIS_MODULE,
+	.llseek = rmidev_llseek,
+	.read = rmidev_read,
+	.write = rmidev_write,
+	.open = rmidev_open,
+	.release = rmidev_release,
+};
+
+static void rmidev_device_cleanup(struct rmidev_data *dev_data)
+{
+	dev_t devno;
+	struct synaptics_rmi4_data *rmi4_data = rmidev->rmi4_data;
+
+	if (dev_data) {
+		devno = dev_data->main_dev.dev;
+
+		if (dev_data->device_class)
+			device_destroy(dev_data->device_class, devno);
+
+		cdev_del(&dev_data->main_dev);
+
+		unregister_chrdev_region(devno, 1);
+
+		dev_dbg(rmi4_data->pdev->dev.parent,
+				"%s: rmidev device removed\n",
+				__func__);
+	}
+
+	return;
+}
+
+static char *rmi_char_devnode(struct device *dev, umode_t *mode)
+{
+	if (!mode)
+		return NULL;
+
+	*mode = (S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | S_IROTH | S_IWOTH);
+
+	return kasprintf(GFP_KERNEL, "rmi/%s", dev_name(dev));
+}
+
+static int rmidev_create_device_class(void)
+{
+	if (rmidev_device_class != NULL)
+		return 0;
+
+	rmidev_device_class = class_create(THIS_MODULE, DEVICE_CLASS_NAME);
+
+	if (IS_ERR(rmidev_device_class)) {
+		pr_err("%s: Failed to create /dev/%s\n",
+				__func__, CHAR_DEVICE_NAME);
+		return -ENODEV;
+	}
+
+	rmidev_device_class->devnode = rmi_char_devnode;
+
+	return 0;
+}
+
+static void rmidev_attn(struct synaptics_rmi4_data *rmi4_data,
+		unsigned char intr_mask)
+{
+	if (!rmidev)
+		return;
+
+	if (rmidev->pid && (rmidev->intr_mask & intr_mask))
+		send_sig_info(SIGIO, &rmidev->interrupt_signal, rmidev->task);
+
+	return;
+}
+
+static int rmidev_init_device(struct synaptics_rmi4_data *rmi4_data)
+{
+	int retval;
+	dev_t dev_no;
+	unsigned char attr_count;
+	struct rmidev_data *dev_data;
+	struct device *device_ptr;
+	const struct synaptics_dsx_board_data *bdata =
+			rmi4_data->hw_if->board_data;
+
+	if (rmidev) {
+		dev_dbg(rmi4_data->pdev->dev.parent,
+				"%s: Handle already exists\n",
+				__func__);
+		return 0;
+	}
+
+	rmidev = kzalloc(sizeof(*rmidev), GFP_KERNEL);
+	if (!rmidev) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to alloc mem for rmidev\n",
+				__func__);
+		retval = -ENOMEM;
+		goto err_rmidev;
+	}
+
+	rmidev->rmi4_data = rmi4_data;
+
+	memset(&rmidev->interrupt_signal, 0, sizeof(rmidev->interrupt_signal));
+	rmidev->interrupt_signal.si_signo = SIGIO;
+	rmidev->interrupt_signal.si_code = SI_USER;
+
+	memset(&rmidev->terminate_signal, 0, sizeof(rmidev->terminate_signal));
+	rmidev->terminate_signal.si_signo = SIGTERM;
+	rmidev->terminate_signal.si_code = SI_USER;
+
+	retval = rmidev_create_device_class();
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to create device class\n",
+				__func__);
+		goto err_device_class;
+	}
+
+	if (rmidev_major_num) {
+		dev_no = MKDEV(rmidev_major_num, DEV_NUMBER);
+		retval = register_chrdev_region(dev_no, 1, CHAR_DEVICE_NAME);
+	} else {
+		retval = alloc_chrdev_region(&dev_no, 0, 1, CHAR_DEVICE_NAME);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to allocate char device region\n",
+					__func__);
+			goto err_device_region;
+		}
+
+		rmidev_major_num = MAJOR(dev_no);
+		dev_dbg(rmi4_data->pdev->dev.parent,
+				"%s: Major number of rmidev = %d\n",
+				__func__, rmidev_major_num);
+	}
+
+	dev_data = kzalloc(sizeof(*dev_data), GFP_KERNEL);
+	if (!dev_data) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to alloc mem for dev_data\n",
+				__func__);
+		retval = -ENOMEM;
+		goto err_dev_data;
+	}
+
+	mutex_init(&dev_data->file_mutex);
+	dev_data->rmi_dev = rmidev;
+	rmidev->data = dev_data;
+
+	cdev_init(&dev_data->main_dev, &rmidev_fops);
+
+	retval = cdev_add(&dev_data->main_dev, dev_no, 1);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to add rmi char device\n",
+				__func__);
+		goto err_char_device;
+	}
+
+	dev_set_name(&rmidev->dev, "rmidev%d", MINOR(dev_no));
+	dev_data->device_class = rmidev_device_class;
+
+	device_ptr = device_create(dev_data->device_class, NULL, dev_no,
+			NULL, CHAR_DEVICE_NAME"%d", MINOR(dev_no));
+	if (IS_ERR(device_ptr)) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to create rmi char device\n",
+				__func__);
+		retval = -ENODEV;
+		goto err_char_device;
+	}
+
+	retval = gpio_export(bdata->irq_gpio, false);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to export attention gpio\n",
+				__func__);
+	} else {
+		retval = gpio_export_link(&(rmi4_data->input_dev->dev),
+				"attn", bdata->irq_gpio);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s Failed to create gpio symlink\n",
+					__func__);
+		} else {
+			dev_dbg(rmi4_data->pdev->dev.parent,
+					"%s: Exported attention gpio %d\n",
+					__func__, bdata->irq_gpio);
+		}
+	}
+
+	rmidev->sysfs_dir = kobject_create_and_add(SYSFS_FOLDER_NAME,
+			&rmi4_data->input_dev->dev.kobj);
+	if (!rmidev->sysfs_dir) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to create sysfs directory\n",
+				__func__);
+		retval = -ENODEV;
+		goto err_sysfs_dir;
+	}
+
+	retval = sysfs_create_bin_file(rmidev->sysfs_dir,
+			&attr_data);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to create sysfs bin file\n",
+				__func__);
+		goto err_sysfs_bin;
+	}
+
+	for (attr_count = 0; attr_count < ARRAY_SIZE(attrs); attr_count++) {
+		retval = sysfs_create_file(rmidev->sysfs_dir,
+				&attrs[attr_count].attr);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to create sysfs attributes\n",
+					__func__);
+			retval = -ENODEV;
+			goto err_sysfs_attrs;
+		}
+	}
+
+	return 0;
+
+err_sysfs_attrs:
+	for (attr_count--; attr_count >= 0; attr_count--)
+		sysfs_remove_file(rmidev->sysfs_dir, &attrs[attr_count].attr);
+
+	sysfs_remove_bin_file(rmidev->sysfs_dir, &attr_data);
+
+err_sysfs_bin:
+	kobject_put(rmidev->sysfs_dir);
+
+err_sysfs_dir:
+	sysfs_remove_link(&(rmi4_data->input_dev->dev.kobj), "attn");
+	gpio_unexport(bdata->irq_gpio);
+
+err_char_device:
+	rmidev_device_cleanup(dev_data);
+	kfree(dev_data);
+
+err_dev_data:
+	unregister_chrdev_region(dev_no, 1);
+
+err_device_region:
+	if (rmidev_device_class != NULL) {
+		class_destroy(rmidev_device_class);
+		rmidev_device_class = NULL;
+	}
+
+err_device_class:
+	kfree(rmidev);
+	rmidev = NULL;
+
+err_rmidev:
+	return retval;
+}
+
+static void rmidev_remove_device(struct synaptics_rmi4_data *rmi4_data)
+{
+	unsigned char attr_count;
+	struct rmidev_data *dev_data;
+	const struct synaptics_dsx_board_data *bdata =
+			rmi4_data->hw_if->board_data;
+
+	if (!rmidev)
+		goto exit;
+
+	rmidev_major_num = RMIDEV_MAJOR_NUM;
+
+	for (attr_count = 0; attr_count < ARRAY_SIZE(attrs); attr_count++)
+		sysfs_remove_file(rmidev->sysfs_dir, &attrs[attr_count].attr);
+
+	sysfs_remove_bin_file(rmidev->sysfs_dir, &attr_data);
+
+	kobject_put(rmidev->sysfs_dir);
+
+	sysfs_remove_link(&(rmi4_data->input_dev->dev.kobj), "attn");
+	gpio_unexport(bdata->irq_gpio);
+
+	dev_data = rmidev->data;
+	if (dev_data) {
+		rmidev_device_cleanup(dev_data);
+		kfree(dev_data);
+	}
+
+	unregister_chrdev_region(rmidev->dev_no, 1);
+
+	if (rmidev_device_class != NULL) {
+		class_destroy(rmidev_device_class);
+		rmidev_device_class = NULL;
+	}
+
+	kfree(rmidev->tmpbuf);
+
+	kfree(rmidev);
+	rmidev = NULL;
+
+exit:
+	complete(&rmidev_remove_complete);
+
+	return;
+}
+
+static struct synaptics_rmi4_exp_fn rmidev_module = {
+	.fn_type = RMI_DEV,
+	.init = rmidev_init_device,
+	.remove = rmidev_remove_device,
+	.reset = NULL,
+	.reinit = NULL,
+	.early_suspend = NULL,
+	.suspend = NULL,
+	.resume = NULL,
+	.late_resume = NULL,
+	.attn = rmidev_attn,
+};
+
+static int __init rmidev_module_init(void)
+{
+	synaptics_rmi4_new_function(&rmidev_module, true);
+
+	return 0;
+}
+
+static void __exit rmidev_module_exit(void)
+{
+	synaptics_rmi4_new_function(&rmidev_module, false);
+
+	wait_for_completion(&rmidev_remove_complete);
+
+	return;
+}
+
+module_init(rmidev_module_init);
+module_exit(rmidev_module_exit);
+
+MODULE_AUTHOR("Synaptics, Inc.");
+MODULE_DESCRIPTION("Synaptics DSX RMI Dev Module");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_rmi_hid_i2c.c b/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_rmi_hid_i2c.c
new file mode 100644
index 0000000..244e97e
--- /dev/null
+++ b/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_rmi_hid_i2c.c
@@ -0,0 +1,1006 @@
+/*
+ * Synaptics DSX touchscreen driver
+ *
+ * Copyright (C) 2012-2016 Synaptics Incorporated. All rights reserved.
+ *
+ * Copyright (C) 2012 Alexandra Chin <alexandra.chin@tw.synaptics.com>
+ * Copyright (C) 2012 Scott Lin <scott.lin@tw.synaptics.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * INFORMATION CONTAINED IN THIS DOCUMENT IS PROVIDED "AS-IS," AND SYNAPTICS
+ * EXPRESSLY DISCLAIMS ALL EXPRESS AND IMPLIED WARRANTIES, INCLUDING ANY
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE,
+ * AND ANY WARRANTIES OF NON-INFRINGEMENT OF ANY INTELLECTUAL PROPERTY RIGHTS.
+ * IN NO EVENT SHALL SYNAPTICS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, PUNITIVE, OR CONSEQUENTIAL DAMAGES ARISING OUT OF OR IN CONNECTION
+ * WITH THE USE OF THE INFORMATION CONTAINED IN THIS DOCUMENT, HOWEVER CAUSED
+ * AND BASED ON ANY THEORY OF LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * NEGLIGENCE OR OTHER TORTIOUS ACTION, AND EVEN IF SYNAPTICS WAS ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE. IF A TRIBUNAL OF COMPETENT JURISDICTION DOES
+ * NOT PERMIT THE DISCLAIMER OF DIRECT DAMAGES OR ANY OTHER DAMAGES, SYNAPTICS'
+ * TOTAL CUMULATIVE LIABILITY TO ANY PARTY SHALL NOT EXCEED ONE HUNDRED U.S.
+ * DOLLARS.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/i2c.h>
+#include <linux/delay.h>
+#include <linux/input.h>
+#include <linux/gpio.h>
+#include <linux/types.h>
+#include <linux/of_gpio.h>
+#include <linux/platform_device.h>
+#include <linux/input/synaptics_dsx.h>
+#include "synaptics_dsx_core.h"
+
+#define SYN_I2C_RETRY_TIMES 10
+
+#define REPORT_ID_GET_BLOB 0x07
+#define REPORT_ID_WRITE 0x09
+#define REPORT_ID_READ_ADDRESS 0x0a
+#define REPORT_ID_READ_DATA 0x0b
+#define REPORT_ID_SET_RMI_MODE 0x0f
+
+#define PREFIX_USAGE_PAGE_1BYTE 0x05
+#define PREFIX_USAGE_PAGE_2BYTES 0x06
+#define PREFIX_USAGE 0x09
+#define PREFIX_REPORT_ID 0x85
+#define PREFIX_REPORT_COUNT_1BYTE 0x95
+#define PREFIX_REPORT_COUNT_2BYTES 0x96
+
+#define USAGE_GET_BLOB 0xc5
+#define USAGE_WRITE 0x02
+#define USAGE_READ_ADDRESS 0x03
+#define USAGE_READ_DATA 0x04
+#define USAGE_SET_MODE 0x06
+
+#define FEATURE_REPORT_TYPE 0x03
+
+#define VENDOR_DEFINED_PAGE 0xff00
+
+#define BLOB_REPORT_SIZE 256
+
+#define RESET_COMMAND 0x01
+#define GET_REPORT_COMMAND 0x02
+#define SET_REPORT_COMMAND 0x03
+#define SET_POWER_COMMAND 0x08
+
+#define FINGER_MODE 0x00
+#define RMI_MODE 0x02
+
+struct hid_report_info {
+	unsigned char get_blob_id;
+	unsigned char write_id;
+	unsigned char read_addr_id;
+	unsigned char read_data_id;
+	unsigned char set_mode_id;
+	unsigned int blob_size;
+};
+
+static struct hid_report_info hid_report;
+
+struct hid_device_descriptor {
+	unsigned short device_descriptor_length;
+	unsigned short format_version;
+	unsigned short report_descriptor_length;
+	unsigned short report_descriptor_index;
+	unsigned short input_register_index;
+	unsigned short input_report_max_length;
+	unsigned short output_register_index;
+	unsigned short output_report_max_length;
+	unsigned short command_register_index;
+	unsigned short data_register_index;
+	unsigned short vendor_id;
+	unsigned short product_id;
+	unsigned short version_id;
+	unsigned int reserved;
+};
+
+static struct hid_device_descriptor hid_dd;
+
+struct i2c_rw_buffer {
+	unsigned char *read;
+	unsigned char *write;
+	unsigned int read_size;
+	unsigned int write_size;
+};
+
+static struct i2c_rw_buffer buffer;
+
+#ifdef CONFIG_OF
+static int parse_dt(struct device *dev, struct synaptics_dsx_board_data *bdata)
+{
+	int retval;
+	u32 value;
+	const char *name;
+	struct property *prop;
+	struct device_node *np = dev->of_node;
+
+	bdata->irq_gpio = of_get_named_gpio_flags(np,
+			"synaptics,irq-gpio", 0,
+			(enum of_gpio_flags *)&bdata->irq_flags);
+
+	retval = of_property_read_u32(np, "synaptics,irq-on-state",
+			&value);
+	if (retval < 0)
+		bdata->irq_on_state = 0;
+	else
+		bdata->irq_on_state = value;
+
+	retval = of_property_read_string(np, "synaptics,pwr-reg-name", &name);
+	if (retval < 0)
+		bdata->pwr_reg_name = NULL;
+	else
+		bdata->pwr_reg_name = name;
+
+	retval = of_property_read_string(np, "synaptics,bus-reg-name", &name);
+	if (retval < 0)
+		bdata->bus_reg_name = NULL;
+	else
+		bdata->bus_reg_name = name;
+
+	prop = of_find_property(np, "synaptics,power-gpio", NULL);
+	if (prop && prop->length) {
+		bdata->power_gpio = of_get_named_gpio_flags(np,
+				"synaptics,power-gpio", 0, NULL);
+		retval = of_property_read_u32(np, "synaptics,power-on-state",
+				&value);
+		if (retval < 0) {
+			dev_err(dev, "%s: Unable to read synaptics,power-on-state property\n",
+					__func__);
+			return retval;
+		} else {
+			bdata->power_on_state = value;
+		}
+	} else {
+		bdata->power_gpio = -1;
+	}
+
+	prop = of_find_property(np, "synaptics,power-delay-ms", NULL);
+	if (prop && prop->length) {
+		retval = of_property_read_u32(np, "synaptics,power-delay-ms",
+				&value);
+		if (retval < 0) {
+			dev_err(dev, "%s: Unable to read synaptics,power-delay-ms property\n",
+					__func__);
+			return retval;
+		} else {
+			bdata->power_delay_ms = value;
+		}
+	} else {
+		bdata->power_delay_ms = 0;
+	}
+
+	prop = of_find_property(np, "synaptics,reset-gpio", NULL);
+	if (prop && prop->length) {
+		bdata->reset_gpio = of_get_named_gpio_flags(np,
+				"synaptics,reset-gpio", 0, NULL);
+		retval = of_property_read_u32(np, "synaptics,reset-on-state",
+				&value);
+		if (retval < 0) {
+			dev_err(dev, "%s: Unable to read synaptics,reset-on-state property\n",
+					__func__);
+			return retval;
+		} else {
+			bdata->reset_on_state = value;
+		}
+		retval = of_property_read_u32(np, "synaptics,reset-active-ms",
+				&value);
+		if (retval < 0) {
+			dev_err(dev, "%s: Unable to read synaptics,reset-active-ms property\n",
+					__func__);
+			return retval;
+		} else {
+			bdata->reset_active_ms = value;
+		}
+	} else {
+		bdata->reset_gpio = -1;
+	}
+
+	prop = of_find_property(np, "synaptics,reset-delay-ms", NULL);
+	if (prop && prop->length) {
+		retval = of_property_read_u32(np, "synaptics,reset-delay-ms",
+				&value);
+		if (retval < 0) {
+			dev_err(dev, "%s: Unable to read synaptics,reset-delay-ms property\n",
+					__func__);
+			return retval;
+		} else {
+			bdata->reset_delay_ms = value;
+		}
+	} else {
+		bdata->reset_delay_ms = 0;
+	}
+
+	prop = of_find_property(np, "synaptics,dev-dscrptr-addr", NULL);
+	if (prop && prop->length) {
+		retval = of_property_read_u32(np, "synaptics,dev-dscrptr-addr",
+				&value);
+		if (retval < 0) {
+			dev_err(dev, "%s: Unable to read synaptics,dev-dscrptr-addr property\n",
+					__func__);
+			return retval;
+		} else {
+			bdata->device_descriptor_addr = (unsigned short)value;
+		}
+	} else {
+		bdata->device_descriptor_addr = 0;
+	}
+
+	prop = of_find_property(np, "synaptics,max-y-for-2d", NULL);
+	if (prop && prop->length) {
+		retval = of_property_read_u32(np, "synaptics,max-y-for-2d",
+				&value);
+		if (retval < 0) {
+			dev_err(dev, "%s: Unable to read synaptics,max-y-for-2d property\n",
+					__func__);
+			return retval;
+		} else {
+			bdata->max_y_for_2d = value;
+		}
+	} else {
+		bdata->max_y_for_2d = -1;
+	}
+
+	prop = of_find_property(np, "synaptics,swap-axes", NULL);
+	bdata->swap_axes = prop > 0 ? true : false;
+
+	prop = of_find_property(np, "synaptics,x-flip", NULL);
+	bdata->x_flip = prop > 0 ? true : false;
+
+	prop = of_find_property(np, "synaptics,y-flip", NULL);
+	bdata->y_flip = prop > 0 ? true : false;
+
+	prop = of_find_property(np, "synaptics,ub-i2c-addr", NULL);
+	if (prop && prop->length) {
+		retval = of_property_read_u32(np, "synaptics,ub-i2c-addr",
+				&value);
+		if (retval < 0) {
+			dev_err(dev, "%s: Unable to read synaptics,ub-i2c-addr property\n",
+					__func__);
+			return retval;
+		} else {
+			bdata->ub_i2c_addr = (unsigned short)value;
+		}
+	} else {
+		bdata->ub_i2c_addr = -1;
+	}
+
+	prop = of_find_property(np, "synaptics,cap-button-codes", NULL);
+	if (prop && prop->length) {
+		bdata->cap_button_map->map = devm_kzalloc(dev,
+				prop->length,
+				GFP_KERNEL);
+		if (!bdata->cap_button_map->map)
+			return -ENOMEM;
+		bdata->cap_button_map->nbuttons = prop->length / sizeof(u32);
+		retval = of_property_read_u32_array(np,
+				"synaptics,cap-button-codes",
+				bdata->cap_button_map->map,
+				bdata->cap_button_map->nbuttons);
+		if (retval < 0) {
+			bdata->cap_button_map->nbuttons = 0;
+			bdata->cap_button_map->map = NULL;
+		}
+	} else {
+		bdata->cap_button_map->nbuttons = 0;
+		bdata->cap_button_map->map = NULL;
+	}
+
+	prop = of_find_property(np, "synaptics,vir-button-codes", NULL);
+	if (prop && prop->length) {
+		bdata->vir_button_map->map = devm_kzalloc(dev,
+				prop->length,
+				GFP_KERNEL);
+		if (!bdata->vir_button_map->map)
+			return -ENOMEM;
+		bdata->vir_button_map->nbuttons = prop->length / sizeof(u32);
+		bdata->vir_button_map->nbuttons /= 5;
+		retval = of_property_read_u32_array(np,
+				"synaptics,vir-button-codes",
+				bdata->vir_button_map->map,
+				bdata->vir_button_map->nbuttons * 5);
+		if (retval < 0) {
+			bdata->vir_button_map->nbuttons = 0;
+			bdata->vir_button_map->map = NULL;
+		}
+	} else {
+		bdata->vir_button_map->nbuttons = 0;
+		bdata->vir_button_map->map = NULL;
+	}
+
+	return 0;
+}
+#endif
+
+static int do_i2c_transfer(struct i2c_client *client, struct i2c_msg *msg)
+{
+	unsigned char retry;
+
+	for (retry = 0; retry < SYN_I2C_RETRY_TIMES; retry++) {
+		if (i2c_transfer(client->adapter, msg, 1) == 1)
+			break;
+		dev_err(&client->dev,
+				"%s: I2C retry %d\n",
+				__func__, retry + 1);
+		msleep(20);
+	}
+
+	if (retry == SYN_I2C_RETRY_TIMES) {
+		dev_err(&client->dev,
+				"%s: I2C transfer over retry limit\n",
+				__func__);
+		return -EIO;
+	}
+
+	return 0;
+}
+
+static int check_buffer(unsigned char **buffer, unsigned int *buffer_size,
+		unsigned int length)
+{
+	if (*buffer_size < length) {
+		if (*buffer_size)
+			kfree(*buffer);
+		*buffer = kzalloc(length, GFP_KERNEL);
+		if (!(*buffer))
+			return -ENOMEM;
+		*buffer_size = length;
+	}
+
+	return 0;
+}
+
+static int generic_read(struct i2c_client *client, unsigned short length)
+{
+	int retval;
+	struct i2c_msg msg[] = {
+		{
+			.addr = client->addr,
+			.flags = I2C_M_RD,
+			.len = length,
+		}
+	};
+
+	check_buffer(&buffer.read, &buffer.read_size, length);
+	msg[0].buf = buffer.read;
+
+	retval = do_i2c_transfer(client, msg);
+
+	return retval;
+}
+
+static int generic_write(struct i2c_client *client, unsigned short length)
+{
+	int retval;
+	struct i2c_msg msg[] = {
+		{
+			.addr = client->addr,
+			.flags = 0,
+			.len = length,
+			.buf = buffer.write,
+		}
+	};
+
+	retval = do_i2c_transfer(client, msg);
+
+	return retval;
+}
+
+static void traverse_report_descriptor(unsigned int *index)
+{
+	unsigned char size;
+	unsigned char *buf = buffer.read;
+
+	size = buf[*index] & MASK_2BIT;
+	switch (size) {
+	case 0: /* 0 bytes */
+		*index += 1;
+		break;
+	case 1: /* 1 byte */
+		*index += 2;
+		break;
+	case 2: /* 2 bytes */
+		*index += 3;
+		break;
+	case 3: /* 4 bytes */
+		*index += 5;
+		break;
+	default:
+		break;
+	}
+
+	return;
+}
+
+static void find_blob_size(unsigned int index)
+{
+	unsigned int ii = index;
+	unsigned char *buf = buffer.read;
+
+	while (ii < hid_dd.report_descriptor_length) {
+		if (buf[ii] == PREFIX_REPORT_COUNT_1BYTE) {
+			hid_report.blob_size = buf[ii + 1];
+			return;
+		} else if (buf[ii] == PREFIX_REPORT_COUNT_2BYTES) {
+			hid_report.blob_size = buf[ii + 1] | (buf[ii + 2] << 8);
+			return;
+		}
+		traverse_report_descriptor(&ii);
+	}
+
+	return;
+}
+
+static void find_reports(unsigned int index)
+{
+	unsigned int ii = index;
+	unsigned char *buf = buffer.read;
+	static unsigned int report_id_index;
+	static unsigned char report_id;
+	static unsigned short usage_page;
+
+	if (buf[ii] == PREFIX_REPORT_ID) {
+		report_id = buf[ii + 1];
+		report_id_index = ii;
+		return;
+	}
+
+	if (buf[ii] == PREFIX_USAGE_PAGE_1BYTE) {
+		usage_page = buf[ii + 1];
+		return;
+	} else if (buf[ii] == PREFIX_USAGE_PAGE_2BYTES) {
+		usage_page = buf[ii + 1] | (buf[ii + 2] << 8);
+		return;
+	}
+
+	if ((usage_page == VENDOR_DEFINED_PAGE) && (buf[ii] == PREFIX_USAGE)) {
+		switch (buf[ii + 1]) {
+		case USAGE_GET_BLOB:
+			hid_report.get_blob_id = report_id;
+			find_blob_size(report_id_index);
+			break;
+		case USAGE_WRITE:
+			hid_report.write_id = report_id;
+			break;
+		case USAGE_READ_ADDRESS:
+			hid_report.read_addr_id = report_id;
+			break;
+		case USAGE_READ_DATA:
+			hid_report.read_data_id = report_id;
+			break;
+		case USAGE_SET_MODE:
+			hid_report.set_mode_id = report_id;
+			break;
+		default:
+			break;
+		}
+	}
+
+	return;
+}
+
+static int parse_report_descriptor(struct synaptics_rmi4_data *rmi4_data)
+{
+	int retval;
+	unsigned int ii = 0;
+	unsigned char *buf;
+	struct i2c_client *i2c = to_i2c_client(rmi4_data->pdev->dev.parent);
+
+	buffer.write[0] = hid_dd.report_descriptor_index & MASK_8BIT;
+	buffer.write[1] = hid_dd.report_descriptor_index >> 8;
+	retval = generic_write(i2c, 2);
+	if (retval < 0)
+		return retval;
+	retval = generic_read(i2c, hid_dd.report_descriptor_length);
+	if (retval < 0)
+		return retval;
+
+	buf = buffer.read;
+
+	hid_report.get_blob_id = REPORT_ID_GET_BLOB;
+	hid_report.write_id = REPORT_ID_WRITE;
+	hid_report.read_addr_id = REPORT_ID_READ_ADDRESS;
+	hid_report.read_data_id = REPORT_ID_READ_DATA;
+	hid_report.set_mode_id = REPORT_ID_SET_RMI_MODE;
+	hid_report.blob_size = BLOB_REPORT_SIZE;
+
+	while (ii < hid_dd.report_descriptor_length) {
+		find_reports(ii);
+		traverse_report_descriptor(&ii);
+	}
+
+	return 0;
+}
+
+static int switch_to_rmi(struct synaptics_rmi4_data *rmi4_data)
+{
+	int retval;
+	struct i2c_client *i2c = to_i2c_client(rmi4_data->pdev->dev.parent);
+
+	mutex_lock(&rmi4_data->rmi4_io_ctrl_mutex);
+
+	check_buffer(&buffer.write, &buffer.write_size, 11);
+
+	/* set rmi mode */
+	buffer.write[0] = hid_dd.command_register_index & MASK_8BIT;
+	buffer.write[1] = hid_dd.command_register_index >> 8;
+	buffer.write[2] = (FEATURE_REPORT_TYPE << 4) | hid_report.set_mode_id;
+	buffer.write[3] = SET_REPORT_COMMAND;
+	buffer.write[4] = hid_report.set_mode_id;
+	buffer.write[5] = hid_dd.data_register_index & MASK_8BIT;
+	buffer.write[6] = hid_dd.data_register_index >> 8;
+	buffer.write[7] = 0x04;
+	buffer.write[8] = 0x00;
+	buffer.write[9] = hid_report.set_mode_id;
+	buffer.write[10] = RMI_MODE;
+
+	retval = generic_write(i2c, 11);
+
+	mutex_unlock(&rmi4_data->rmi4_io_ctrl_mutex);
+
+	return retval;
+}
+
+static int check_report_mode(struct synaptics_rmi4_data *rmi4_data)
+{
+	int retval;
+	unsigned short report_size;
+	struct i2c_client *i2c = to_i2c_client(rmi4_data->pdev->dev.parent);
+
+	mutex_lock(&rmi4_data->rmi4_io_ctrl_mutex);
+
+	check_buffer(&buffer.write, &buffer.write_size, 7);
+
+	buffer.write[0] = hid_dd.command_register_index & MASK_8BIT;
+	buffer.write[1] = hid_dd.command_register_index >> 8;
+	buffer.write[2] = (FEATURE_REPORT_TYPE << 4) | hid_report.set_mode_id;
+	buffer.write[3] = GET_REPORT_COMMAND;
+	buffer.write[4] = hid_report.set_mode_id;
+	buffer.write[5] = hid_dd.data_register_index & MASK_8BIT;
+	buffer.write[6] = hid_dd.data_register_index >> 8;
+
+	retval = generic_write(i2c, 7);
+	if (retval < 0)
+		goto exit;
+
+	retval = generic_read(i2c, 2);
+	if (retval < 0)
+		goto exit;
+
+	report_size = (buffer.read[1] << 8) | buffer.read[0];
+
+	retval = generic_write(i2c, 7);
+	if (retval < 0)
+		goto exit;
+
+	retval = generic_read(i2c, report_size);
+	if (retval < 0)
+		goto exit;
+
+	retval = buffer.read[3];
+	dev_dbg(rmi4_data->pdev->dev.parent,
+			"%s: Report mode = %d\n",
+			__func__, retval);
+
+exit:
+	mutex_unlock(&rmi4_data->rmi4_io_ctrl_mutex);
+
+	return retval;
+}
+
+static int hid_i2c_init(struct synaptics_rmi4_data *rmi4_data)
+{
+	int retval;
+	struct i2c_client *i2c = to_i2c_client(rmi4_data->pdev->dev.parent);
+	const struct synaptics_dsx_board_data *bdata =
+			rmi4_data->hw_if->board_data;
+
+	mutex_lock(&rmi4_data->rmi4_io_ctrl_mutex);
+
+	check_buffer(&buffer.write, &buffer.write_size, 6);
+
+	/* read device descriptor */
+	buffer.write[0] = bdata->device_descriptor_addr & MASK_8BIT;
+	buffer.write[1] = bdata->device_descriptor_addr >> 8;
+	retval = generic_write(i2c, 2);
+	if (retval < 0)
+		goto exit;
+	retval = generic_read(i2c, sizeof(hid_dd));
+	if (retval < 0)
+		goto exit;
+	retval = secure_memcpy((unsigned char *)&hid_dd,
+			sizeof(struct hid_device_descriptor),
+			buffer.read,
+			buffer.read_size,
+			sizeof(hid_dd));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to copy device descriptor data\n",
+				__func__);
+		goto exit;
+	}
+
+	retval = parse_report_descriptor(rmi4_data);
+	if (retval < 0)
+		goto exit;
+
+	/* set power */
+	buffer.write[0] = hid_dd.command_register_index & MASK_8BIT;
+	buffer.write[1] = hid_dd.command_register_index >> 8;
+	buffer.write[2] = 0x00;
+	buffer.write[3] = SET_POWER_COMMAND;
+	retval = generic_write(i2c, 4);
+	if (retval < 0)
+		goto exit;
+
+	/* reset */
+	buffer.write[0] = hid_dd.command_register_index & MASK_8BIT;
+	buffer.write[1] = hid_dd.command_register_index >> 8;
+	buffer.write[2] = 0x00;
+	buffer.write[3] = RESET_COMMAND;
+	retval = generic_write(i2c, 4);
+	if (retval < 0)
+		goto exit;
+
+	while (gpio_get_value(bdata->irq_gpio))
+		msleep(20);
+
+	retval = generic_read(i2c, hid_dd.input_report_max_length);
+	if (retval < 0)
+		goto exit;
+
+	/* get blob */
+	buffer.write[0] = hid_dd.command_register_index & MASK_8BIT;
+	buffer.write[1] = hid_dd.command_register_index >> 8;
+	buffer.write[2] = (FEATURE_REPORT_TYPE << 4) | hid_report.get_blob_id;
+	buffer.write[3] = 0x02;
+	buffer.write[4] = hid_dd.data_register_index & MASK_8BIT;
+	buffer.write[5] = hid_dd.data_register_index >> 8;
+
+	retval = generic_write(i2c, 6);
+	if (retval < 0)
+		goto exit;
+
+	msleep(20);
+
+	retval = generic_read(i2c, hid_report.blob_size + 3);
+	if (retval < 0)
+		goto exit;
+
+exit:
+	mutex_unlock(&rmi4_data->rmi4_io_ctrl_mutex);
+
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to initialize HID/I2C interface\n",
+				__func__);
+		return retval;
+	}
+
+	retval = switch_to_rmi(rmi4_data);
+
+	return retval;
+}
+
+static int synaptics_rmi4_i2c_read(struct synaptics_rmi4_data *rmi4_data,
+		unsigned short addr, unsigned char *data, unsigned int length)
+{
+	int retval;
+	unsigned char retry;
+	unsigned char recover = 1;
+	unsigned short report_length;
+	struct i2c_client *i2c = to_i2c_client(rmi4_data->pdev->dev.parent);
+	struct i2c_msg msg[] = {
+		{
+			.addr = i2c->addr,
+			.flags = 0,
+			.len = hid_dd.output_report_max_length + 2,
+		},
+		{
+			.addr = i2c->addr,
+			.flags = I2C_M_RD,
+			.len = (unsigned short)(length + 4),
+		},
+	};
+
+recover:
+	mutex_lock(&rmi4_data->rmi4_io_ctrl_mutex);
+
+	check_buffer(&buffer.write, &buffer.write_size,
+			hid_dd.output_report_max_length + 2);
+	msg[0].buf = buffer.write;
+	buffer.write[0] = hid_dd.output_register_index & MASK_8BIT;
+	buffer.write[1] = hid_dd.output_register_index >> 8;
+	buffer.write[2] = hid_dd.output_report_max_length & MASK_8BIT;
+	buffer.write[3] = hid_dd.output_report_max_length >> 8;
+	buffer.write[4] = hid_report.read_addr_id;
+	buffer.write[5] = 0x00;
+	buffer.write[6] = addr & MASK_8BIT;
+	buffer.write[7] = addr >> 8;
+	buffer.write[8] = (unsigned char)length;
+	buffer.write[9] = (unsigned char)(length >> 8);
+
+	check_buffer(&buffer.read, &buffer.read_size, length + 4);
+	msg[1].buf = buffer.read;
+
+	retval = do_i2c_transfer(i2c, &msg[0]);
+	if (retval != 0)
+		goto exit;
+
+	retry = 0;
+	do {
+		retval = do_i2c_transfer(i2c, &msg[1]);
+		if (retval == 0)
+			retval = length;
+		else
+			goto exit;
+
+		report_length = (buffer.read[1] << 8) | buffer.read[0];
+		if (report_length == hid_dd.input_report_max_length) {
+			retval = secure_memcpy(&data[0], length,
+					&buffer.read[4], buffer.read_size - 4,
+					length);
+			if (retval < 0) {
+				dev_err(rmi4_data->pdev->dev.parent,
+						"%s: Failed to copy data\n",
+						__func__);
+			} else {
+				retval = length;
+			}
+			goto exit;
+		}
+
+		msleep(20);
+		retry++;
+	} while (retry < SYN_I2C_RETRY_TIMES);
+
+	dev_err(rmi4_data->pdev->dev.parent,
+			"%s: Failed to receive read report\n",
+			__func__);
+	retval = -EIO;
+
+exit:
+	mutex_unlock(&rmi4_data->rmi4_io_ctrl_mutex);
+
+	if ((retval != length) && (recover == 1)) {
+		recover = 0;
+		if (check_report_mode(rmi4_data) != RMI_MODE) {
+			retval = hid_i2c_init(rmi4_data);
+			if (retval == 0)
+				goto recover;
+		}
+	}
+
+	return retval;
+}
+
+static int synaptics_rmi4_i2c_write(struct synaptics_rmi4_data *rmi4_data,
+		unsigned short addr, unsigned char *data, unsigned int length)
+{
+	int retval;
+	unsigned char recover = 1;
+	unsigned int msg_length;
+	struct i2c_client *i2c = to_i2c_client(rmi4_data->pdev->dev.parent);
+	struct i2c_msg msg[] = {
+		{
+			.addr = i2c->addr,
+			.flags = 0,
+		}
+	};
+
+	if ((length + 10) < (hid_dd.output_report_max_length + 2))
+		msg_length = hid_dd.output_report_max_length + 2;
+	else
+		msg_length = length + 10;
+
+recover:
+	mutex_lock(&rmi4_data->rmi4_io_ctrl_mutex);
+
+	check_buffer(&buffer.write, &buffer.write_size, msg_length);
+	msg[0].len = (unsigned short)msg_length;
+	msg[0].buf = buffer.write;
+	buffer.write[0] = hid_dd.output_register_index & MASK_8BIT;
+	buffer.write[1] = hid_dd.output_register_index >> 8;
+	buffer.write[2] = hid_dd.output_report_max_length & MASK_8BIT;
+	buffer.write[3] = hid_dd.output_report_max_length >> 8;
+	buffer.write[4] = hid_report.write_id;
+	buffer.write[5] = 0x00;
+	buffer.write[6] = addr & MASK_8BIT;
+	buffer.write[7] = addr >> 8;
+	buffer.write[8] = (unsigned char)length;
+	buffer.write[9] = (unsigned char)(length >> 8);
+	retval = secure_memcpy(&buffer.write[10], buffer.write_size - 10,
+			&data[0], length, length);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to copy data\n",
+				__func__);
+	} else {
+		retval = do_i2c_transfer(i2c, msg);
+		if (retval == 0)
+			retval = length;
+	}
+
+	mutex_unlock(&rmi4_data->rmi4_io_ctrl_mutex);
+
+	if ((retval != length) && (recover == 1)) {
+		recover = 0;
+		if (check_report_mode(rmi4_data) != RMI_MODE) {
+			retval = hid_i2c_init(rmi4_data);
+			if (retval == 0)
+				goto recover;
+		}
+	}
+
+	return retval;
+}
+
+static struct synaptics_dsx_bus_access bus_access = {
+	.type = BUS_I2C,
+	.read = synaptics_rmi4_i2c_read,
+	.write = synaptics_rmi4_i2c_write,
+};
+
+static struct synaptics_dsx_hw_interface hw_if;
+
+static struct platform_device *synaptics_dsx_i2c_device;
+
+static void synaptics_rmi4_i2c_dev_release(struct device *dev)
+{
+	kfree(synaptics_dsx_i2c_device);
+
+	return;
+}
+
+static int synaptics_rmi4_i2c_probe(struct i2c_client *client,
+		const struct i2c_device_id *dev_id)
+{
+	int retval;
+
+	if (!i2c_check_functionality(client->adapter,
+			I2C_FUNC_SMBUS_BYTE_DATA)) {
+		dev_err(&client->dev,
+				"%s: SMBus byte data commands not supported by host\n",
+				__func__);
+		return -EIO;
+	}
+
+	synaptics_dsx_i2c_device = kzalloc(
+			sizeof(struct platform_device),
+			GFP_KERNEL);
+	if (!synaptics_dsx_i2c_device) {
+		dev_err(&client->dev,
+				"%s: Failed to allocate memory for synaptics_dsx_i2c_device\n",
+				__func__);
+		return -ENOMEM;
+	}
+
+#ifdef CONFIG_OF
+	if (client->dev.of_node) {
+		hw_if.board_data = devm_kzalloc(&client->dev,
+				sizeof(struct synaptics_dsx_board_data),
+				GFP_KERNEL);
+		if (!hw_if.board_data) {
+			dev_err(&client->dev,
+					"%s: Failed to allocate memory for board data\n",
+					__func__);
+			return -ENOMEM;
+		}
+		hw_if.board_data->cap_button_map = devm_kzalloc(&client->dev,
+				sizeof(struct synaptics_dsx_button_map),
+				GFP_KERNEL);
+		if (!hw_if.board_data->cap_button_map) {
+			dev_err(&client->dev,
+					"%s: Failed to allocate memory for 0D button map\n",
+					__func__);
+			return -ENOMEM;
+		}
+		hw_if.board_data->vir_button_map = devm_kzalloc(&client->dev,
+				sizeof(struct synaptics_dsx_button_map),
+				GFP_KERNEL);
+		if (!hw_if.board_data->vir_button_map) {
+			dev_err(&client->dev,
+					"%s: Failed to allocate memory for virtual button map\n",
+					__func__);
+			return -ENOMEM;
+		}
+		parse_dt(&client->dev, hw_if.board_data);
+	}
+#else
+	hw_if.board_data = client->dev.platform_data;
+#endif
+
+	hw_if.bus_access = &bus_access;
+	hw_if.bl_hw_init = switch_to_rmi;
+	hw_if.ui_hw_init = hid_i2c_init;
+
+	synaptics_dsx_i2c_device->name = PLATFORM_DRIVER_NAME;
+	synaptics_dsx_i2c_device->id = 0;
+	synaptics_dsx_i2c_device->num_resources = 0;
+	synaptics_dsx_i2c_device->dev.parent = &client->dev;
+	synaptics_dsx_i2c_device->dev.platform_data = &hw_if;
+	synaptics_dsx_i2c_device->dev.release = synaptics_rmi4_i2c_dev_release;
+
+	retval = platform_device_register(synaptics_dsx_i2c_device);
+	if (retval) {
+		dev_err(&client->dev,
+				"%s: Failed to register platform device\n",
+				__func__);
+		return -ENODEV;
+	}
+
+	return 0;
+}
+
+static int synaptics_rmi4_i2c_remove(struct i2c_client *client)
+{
+	if (buffer.read_size)
+		kfree(buffer.read);
+
+	if (buffer.write_size)
+		kfree(buffer.write);
+
+	platform_device_unregister(synaptics_dsx_i2c_device);
+
+	return 0;
+}
+
+static const struct i2c_device_id synaptics_rmi4_id_table[] = {
+	{I2C_DRIVER_NAME, 0},
+	{},
+};
+MODULE_DEVICE_TABLE(i2c, synaptics_rmi4_id_table);
+
+#ifdef CONFIG_OF
+static struct of_device_id synaptics_rmi4_of_match_table[] = {
+	{
+		.compatible = "synaptics,dsx-rmi-hid-i2c",
+	},
+	{},
+};
+MODULE_DEVICE_TABLE(of, synaptics_rmi4_of_match_table);
+#else
+#define synaptics_rmi4_of_match_table NULL
+#endif
+
+static struct i2c_driver synaptics_rmi4_i2c_driver = {
+	.driver = {
+		.name = I2C_DRIVER_NAME,
+		.owner = THIS_MODULE,
+		.of_match_table = synaptics_rmi4_of_match_table,
+	},
+	.probe = synaptics_rmi4_i2c_probe,
+	.remove = synaptics_rmi4_i2c_remove,
+	.id_table = synaptics_rmi4_id_table,
+};
+
+int synaptics_rmi4_bus_init(void)
+{
+	return i2c_add_driver(&synaptics_rmi4_i2c_driver);
+}
+EXPORT_SYMBOL(synaptics_rmi4_bus_init);
+
+void synaptics_rmi4_bus_exit(void)
+{
+	i2c_del_driver(&synaptics_rmi4_i2c_driver);
+
+	return;
+}
+EXPORT_SYMBOL(synaptics_rmi4_bus_exit);
+
+MODULE_AUTHOR("Synaptics, Inc.");
+MODULE_DESCRIPTION("Synaptics DSX I2C Bus Support Module");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_spi.c b/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_spi.c
new file mode 100644
index 0000000..e2dafbb
--- /dev/null
+++ b/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_spi.c
@@ -0,0 +1,712 @@
+/*
+ * Synaptics DSX touchscreen driver
+ *
+ * Copyright (C) 2012-2016 Synaptics Incorporated. All rights reserved.
+ *
+ * Copyright (C) 2012 Alexandra Chin <alexandra.chin@tw.synaptics.com>
+ * Copyright (C) 2012 Scott Lin <scott.lin@tw.synaptics.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * INFORMATION CONTAINED IN THIS DOCUMENT IS PROVIDED "AS-IS," AND SYNAPTICS
+ * EXPRESSLY DISCLAIMS ALL EXPRESS AND IMPLIED WARRANTIES, INCLUDING ANY
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE,
+ * AND ANY WARRANTIES OF NON-INFRINGEMENT OF ANY INTELLECTUAL PROPERTY RIGHTS.
+ * IN NO EVENT SHALL SYNAPTICS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, PUNITIVE, OR CONSEQUENTIAL DAMAGES ARISING OUT OF OR IN CONNECTION
+ * WITH THE USE OF THE INFORMATION CONTAINED IN THIS DOCUMENT, HOWEVER CAUSED
+ * AND BASED ON ANY THEORY OF LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * NEGLIGENCE OR OTHER TORTIOUS ACTION, AND EVEN IF SYNAPTICS WAS ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE. IF A TRIBUNAL OF COMPETENT JURISDICTION DOES
+ * NOT PERMIT THE DISCLAIMER OF DIRECT DAMAGES OR ANY OTHER DAMAGES, SYNAPTICS'
+ * TOTAL CUMULATIVE LIABILITY TO ANY PARTY SHALL NOT EXCEED ONE HUNDRED U.S.
+ * DOLLARS.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/spi/spi.h>
+#include <linux/delay.h>
+#include <linux/input.h>
+#include <linux/types.h>
+#include <linux/of_gpio.h>
+#include <linux/platform_device.h>
+#include <linux/input/synaptics_dsx.h>
+#include "synaptics_dsx_core.h"
+
+#define SPI_READ 0x80
+#define SPI_WRITE 0x00
+
+static unsigned char *buf;
+
+static struct spi_transfer *xfer;
+
+#ifdef CONFIG_OF
+static int parse_dt(struct device *dev, struct synaptics_dsx_board_data *bdata)
+{
+	int retval;
+	u32 value;
+	const char *name;
+	struct property *prop;
+	struct device_node *np = dev->of_node;
+
+	bdata->irq_gpio = of_get_named_gpio_flags(np,
+			"synaptics,irq-gpio", 0,
+			(enum of_gpio_flags *)&bdata->irq_flags);
+
+	retval = of_property_read_u32(np, "synaptics,irq-on-state",
+			&value);
+	if (retval < 0)
+		bdata->irq_on_state = 0;
+	else
+		bdata->irq_on_state = value;
+
+	retval = of_property_read_string(np, "synaptics,pwr-reg-name", &name);
+	if (retval < 0)
+		bdata->pwr_reg_name = NULL;
+	else
+		bdata->pwr_reg_name = name;
+
+	retval = of_property_read_string(np, "synaptics,bus-reg-name", &name);
+	if (retval < 0)
+		bdata->bus_reg_name = NULL;
+	else
+		bdata->bus_reg_name = name;
+
+	prop = of_find_property(np, "synaptics,power-gpio", NULL);
+	if (prop && prop->length) {
+		bdata->power_gpio = of_get_named_gpio_flags(np,
+				"synaptics,power-gpio", 0, NULL);
+		retval = of_property_read_u32(np, "synaptics,power-on-state",
+				&value);
+		if (retval < 0) {
+			dev_err(dev, "%s: Unable to read synaptics,power-on-state property\n",
+					__func__);
+			return retval;
+		} else {
+			bdata->power_on_state = value;
+		}
+	} else {
+		bdata->power_gpio = -1;
+	}
+
+	prop = of_find_property(np, "synaptics,power-delay-ms", NULL);
+	if (prop && prop->length) {
+		retval = of_property_read_u32(np, "synaptics,power-delay-ms",
+				&value);
+		if (retval < 0) {
+			dev_err(dev, "%s: Unable to read synaptics,power-delay-ms property\n",
+					__func__);
+			return retval;
+		} else {
+			bdata->power_delay_ms = value;
+		}
+	} else {
+		bdata->power_delay_ms = 0;
+	}
+
+	prop = of_find_property(np, "synaptics,reset-gpio", NULL);
+	if (prop && prop->length) {
+		bdata->reset_gpio = of_get_named_gpio_flags(np,
+				"synaptics,reset-gpio", 0, NULL);
+		retval = of_property_read_u32(np, "synaptics,reset-on-state",
+				&value);
+		if (retval < 0) {
+			dev_err(dev, "%s: Unable to read synaptics,reset-on-state property\n",
+					__func__);
+			return retval;
+		} else {
+			bdata->reset_on_state = value;
+		}
+		retval = of_property_read_u32(np, "synaptics,reset-active-ms",
+				&value);
+		if (retval < 0) {
+			dev_err(dev, "%s: Unable to read synaptics,reset-active-ms property\n",
+					__func__);
+			return retval;
+		} else {
+			bdata->reset_active_ms = value;
+		}
+	} else {
+		bdata->reset_gpio = -1;
+	}
+
+	prop = of_find_property(np, "synaptics,reset-delay-ms", NULL);
+	if (prop && prop->length) {
+		retval = of_property_read_u32(np, "synaptics,reset-delay-ms",
+				&value);
+		if (retval < 0) {
+			dev_err(dev, "%s: Unable to read synaptics,reset-delay-ms property\n",
+					__func__);
+			return retval;
+		} else {
+			bdata->reset_delay_ms = value;
+		}
+	} else {
+		bdata->reset_delay_ms = 0;
+	}
+
+	prop = of_find_property(np, "synaptics,byte-delay-us", NULL);
+	if (prop && prop->length) {
+		retval = of_property_read_u32(np, "synaptics,byte-delay-us",
+				&value);
+		if (retval < 0) {
+			dev_err(dev, "%s: Unable to read synaptics,byte-delay-us property\n",
+					__func__);
+			return retval;
+		} else {
+			bdata->byte_delay_us = value;
+		}
+	} else {
+		bdata->byte_delay_us = 0;
+	}
+
+	prop = of_find_property(np, "synaptics,block-delay-us", NULL);
+	if (prop && prop->length) {
+		retval = of_property_read_u32(np, "synaptics,block-delay-us",
+				&value);
+		if (retval < 0) {
+			dev_err(dev, "%s: Unable to read synaptics,block-delay-us property\n",
+					__func__);
+			return retval;
+		} else {
+			bdata->block_delay_us = value;
+		}
+	} else {
+		bdata->block_delay_us = 0;
+	}
+
+	prop = of_find_property(np, "synaptics,address-delay-us", NULL);
+	if (prop && prop->length) {
+		retval = of_property_read_u32(np, "synaptics,address-delay-us",
+				&value);
+		if (retval < 0) {
+			dev_err(dev, "%s: Unable to read synaptics,address-delay-us property\n",
+					__func__);
+			return retval;
+		} else {
+			bdata->addr_delay_us = value;
+		}
+	} else {
+		bdata->addr_delay_us = 0;
+	}
+
+	prop = of_find_property(np, "synaptics,max-y-for-2d", NULL);
+	if (prop && prop->length) {
+		retval = of_property_read_u32(np, "synaptics,max-y-for-2d",
+				&value);
+		if (retval < 0) {
+			dev_err(dev, "%s: Unable to read synaptics,max-y-for-2d property\n",
+					__func__);
+			return retval;
+		} else {
+			bdata->max_y_for_2d = value;
+		}
+	} else {
+		bdata->max_y_for_2d = -1;
+	}
+
+	prop = of_find_property(np, "synaptics,swap-axes", NULL);
+	bdata->swap_axes = prop > 0 ? true : false;
+
+	prop = of_find_property(np, "synaptics,x-flip", NULL);
+	bdata->x_flip = prop > 0 ? true : false;
+
+	prop = of_find_property(np, "synaptics,y-flip", NULL);
+	bdata->y_flip = prop > 0 ? true : false;
+
+	prop = of_find_property(np, "synaptics,ub-i2c-addr", NULL);
+	if (prop && prop->length) {
+		retval = of_property_read_u32(np, "synaptics,ub-i2c-addr",
+				&value);
+		if (retval < 0) {
+			dev_err(dev, "%s: Unable to read synaptics,ub-i2c-addr property\n",
+					__func__);
+			return retval;
+		} else {
+			bdata->ub_i2c_addr = (unsigned short)value;
+		}
+	} else {
+		bdata->ub_i2c_addr = -1;
+	}
+
+	prop = of_find_property(np, "synaptics,cap-button-codes", NULL);
+	if (prop && prop->length) {
+		bdata->cap_button_map->map = devm_kzalloc(dev,
+				prop->length,
+				GFP_KERNEL);
+		if (!bdata->cap_button_map->map)
+			return -ENOMEM;
+		bdata->cap_button_map->nbuttons = prop->length / sizeof(u32);
+		retval = of_property_read_u32_array(np,
+				"synaptics,cap-button-codes",
+				bdata->cap_button_map->map,
+				bdata->cap_button_map->nbuttons);
+		if (retval < 0) {
+			bdata->cap_button_map->nbuttons = 0;
+			bdata->cap_button_map->map = NULL;
+		}
+	} else {
+		bdata->cap_button_map->nbuttons = 0;
+		bdata->cap_button_map->map = NULL;
+	}
+
+	prop = of_find_property(np, "synaptics,vir-button-codes", NULL);
+	if (prop && prop->length) {
+		bdata->vir_button_map->map = devm_kzalloc(dev,
+				prop->length,
+				GFP_KERNEL);
+		if (!bdata->vir_button_map->map)
+			return -ENOMEM;
+		bdata->vir_button_map->nbuttons = prop->length / sizeof(u32);
+		bdata->vir_button_map->nbuttons /= 5;
+		retval = of_property_read_u32_array(np,
+				"synaptics,vir-button-codes",
+				bdata->vir_button_map->map,
+				bdata->vir_button_map->nbuttons * 5);
+		if (retval < 0) {
+			bdata->vir_button_map->nbuttons = 0;
+			bdata->vir_button_map->map = NULL;
+		}
+	} else {
+		bdata->vir_button_map->nbuttons = 0;
+		bdata->vir_button_map->map = NULL;
+	}
+
+	return 0;
+}
+#endif
+
+static int synaptics_rmi4_spi_alloc_buf(struct synaptics_rmi4_data *rmi4_data,
+		unsigned int size, unsigned int count)
+{
+	static unsigned int buf_size;
+	static unsigned int xfer_count;
+
+	if (size > buf_size) {
+		if (buf_size)
+			kfree(buf);
+		buf = kmalloc(size, GFP_KERNEL);
+		if (!buf) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to alloc mem for buf\n",
+					__func__);
+			buf_size = 0;
+			return -ENOMEM;
+		}
+		buf_size = size;
+	}
+
+	if (count > xfer_count) {
+		if (xfer_count)
+			kfree(xfer);
+		xfer = kcalloc(count, sizeof(struct spi_transfer), GFP_KERNEL);
+		if (!xfer) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to alloc mem for xfer\n",
+					__func__);
+			xfer_count = 0;
+			return -ENOMEM;
+		}
+		xfer_count = count;
+	} else {
+		memset(xfer, 0, count * sizeof(struct spi_transfer));
+	}
+
+	return 0;
+}
+
+static int synaptics_rmi4_spi_set_page(struct synaptics_rmi4_data *rmi4_data,
+		unsigned short addr)
+{
+	int retval;
+	unsigned int index;
+	unsigned int byte_count = PAGE_SELECT_LEN + 1;
+	unsigned char page;
+	struct spi_message msg;
+	struct spi_device *spi = to_spi_device(rmi4_data->pdev->dev.parent);
+	const struct synaptics_dsx_board_data *bdata =
+			rmi4_data->hw_if->board_data;
+
+	page = ((addr >> 8) & MASK_8BIT);
+	if ((page >> 7) == (rmi4_data->current_page >> 7))
+		return PAGE_SELECT_LEN;
+
+	spi_message_init(&msg);
+
+	retval = synaptics_rmi4_spi_alloc_buf(rmi4_data, byte_count,
+			byte_count);
+	if (retval < 0)
+		return retval;
+
+	buf[0] = SPI_WRITE;
+	buf[1] = MASK_8BIT;
+	buf[2] = page;
+
+	if (bdata->byte_delay_us == 0) {
+		xfer[0].len = byte_count;
+		xfer[0].tx_buf = &buf[0];
+		if (bdata->block_delay_us)
+			xfer[0].delay_usecs = bdata->block_delay_us;
+		spi_message_add_tail(&xfer[0], &msg);
+	} else {
+		for (index = 0; index < byte_count; index++) {
+			xfer[index].len = 1;
+			xfer[index].tx_buf = &buf[index];
+			if (index == 1)
+				xfer[index].delay_usecs = bdata->addr_delay_us;
+			else
+				xfer[index].delay_usecs = bdata->byte_delay_us;
+			spi_message_add_tail(&xfer[index], &msg);
+		}
+		if (bdata->block_delay_us)
+			xfer[index - 1].delay_usecs = bdata->block_delay_us;
+	}
+
+	retval = spi_sync(spi, &msg);
+	if (retval == 0) {
+		rmi4_data->current_page = page;
+		retval = PAGE_SELECT_LEN;
+	} else {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to complete SPI transfer, error = %d\n",
+				__func__, retval);
+	}
+
+	return retval;
+}
+
+static int synaptics_rmi4_spi_read(struct synaptics_rmi4_data *rmi4_data,
+		unsigned short addr, unsigned char *data, unsigned int length)
+{
+	int retval;
+	unsigned int index;
+	unsigned int byte_count = length + ADDRESS_LEN;
+	unsigned char txbuf[ADDRESS_LEN];
+	struct spi_message msg;
+	struct spi_device *spi = to_spi_device(rmi4_data->pdev->dev.parent);
+	const struct synaptics_dsx_board_data *bdata =
+			rmi4_data->hw_if->board_data;
+
+	spi_message_init(&msg);
+
+	txbuf[0] = (addr >> 8) | SPI_READ;
+	txbuf[1] = addr & MASK_8BIT;
+
+	mutex_lock(&rmi4_data->rmi4_io_ctrl_mutex);
+
+	retval = synaptics_rmi4_spi_set_page(rmi4_data, addr);
+	if (retval != PAGE_SELECT_LEN) {
+		mutex_unlock(&rmi4_data->rmi4_io_ctrl_mutex);
+		return -EIO;
+	}
+
+	if (bdata->byte_delay_us == 0) {
+		retval = synaptics_rmi4_spi_alloc_buf(rmi4_data, length,
+				2);
+	} else {
+		retval = synaptics_rmi4_spi_alloc_buf(rmi4_data, length,
+				byte_count);
+	}
+	if (retval < 0) {
+		mutex_unlock(&rmi4_data->rmi4_io_ctrl_mutex);
+		return retval;
+	}
+
+	if (bdata->byte_delay_us == 0) {
+		xfer[0].len = ADDRESS_LEN;
+		xfer[0].tx_buf = &txbuf[0];
+		spi_message_add_tail(&xfer[0], &msg);
+		xfer[1].len = length;
+		xfer[1].rx_buf = &buf[0];
+		if (bdata->block_delay_us)
+			xfer[1].delay_usecs = bdata->block_delay_us;
+		spi_message_add_tail(&xfer[1], &msg);
+	} else {
+		for (index = 0; index < byte_count; index++) {
+			xfer[index].len = 1;
+			if (index < ADDRESS_LEN)
+				xfer[index].tx_buf = &txbuf[index];
+			else
+				xfer[index].rx_buf = &buf[index - ADDRESS_LEN];
+			if (index == 1)
+				xfer[index].delay_usecs = bdata->addr_delay_us;
+			else
+				xfer[index].delay_usecs = bdata->byte_delay_us;
+			spi_message_add_tail(&xfer[index], &msg);
+		}
+		if (bdata->block_delay_us)
+			xfer[index - 1].delay_usecs = bdata->block_delay_us;
+	}
+
+	retval = spi_sync(spi, &msg);
+	if (retval == 0) {
+		retval = secure_memcpy(data, length, buf, length, length);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to copy data\n",
+					__func__);
+		} else {
+			retval = length;
+		}
+	} else {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to complete SPI transfer, error = %d\n",
+				__func__, retval);
+	}
+
+	mutex_unlock(&rmi4_data->rmi4_io_ctrl_mutex);
+
+	return retval;
+}
+
+static int synaptics_rmi4_spi_write(struct synaptics_rmi4_data *rmi4_data,
+		unsigned short addr, unsigned char *data, unsigned int length)
+{
+	int retval;
+	unsigned int index;
+	unsigned int byte_count = length + ADDRESS_LEN;
+	struct spi_message msg;
+	struct spi_device *spi = to_spi_device(rmi4_data->pdev->dev.parent);
+	const struct synaptics_dsx_board_data *bdata =
+			rmi4_data->hw_if->board_data;
+
+	spi_message_init(&msg);
+
+	mutex_lock(&rmi4_data->rmi4_io_ctrl_mutex);
+
+	retval = synaptics_rmi4_spi_set_page(rmi4_data, addr);
+	if (retval != PAGE_SELECT_LEN) {
+		mutex_unlock(&rmi4_data->rmi4_io_ctrl_mutex);
+		return -EIO;
+	}
+
+	if (bdata->byte_delay_us == 0) {
+		retval = synaptics_rmi4_spi_alloc_buf(rmi4_data, byte_count,
+				1);
+	} else {
+		retval = synaptics_rmi4_spi_alloc_buf(rmi4_data, byte_count,
+				byte_count);
+	}
+	if (retval < 0) {
+		mutex_unlock(&rmi4_data->rmi4_io_ctrl_mutex);
+		return retval;
+	}
+
+	buf[0] = (addr >> 8) & ~SPI_READ;
+	buf[1] = addr & MASK_8BIT;
+	retval = secure_memcpy(&buf[ADDRESS_LEN],
+			byte_count - ADDRESS_LEN, data, length, length);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to copy data\n",
+				__func__);
+		mutex_unlock(&rmi4_data->rmi4_io_ctrl_mutex);
+		return retval;
+	}
+
+	if (bdata->byte_delay_us == 0) {
+		xfer[0].len = byte_count;
+		xfer[0].tx_buf = &buf[0];
+		if (bdata->block_delay_us)
+			xfer[0].delay_usecs = bdata->block_delay_us;
+		spi_message_add_tail(xfer, &msg);
+	} else {
+		for (index = 0; index < byte_count; index++) {
+			xfer[index].len = 1;
+			xfer[index].tx_buf = &buf[index];
+			if (index == 1)
+				xfer[index].delay_usecs = bdata->addr_delay_us;
+			else
+				xfer[index].delay_usecs = bdata->byte_delay_us;
+			spi_message_add_tail(&xfer[index], &msg);
+		}
+		if (bdata->block_delay_us)
+			xfer[index - 1].delay_usecs = bdata->block_delay_us;
+	}
+
+	retval = spi_sync(spi, &msg);
+	if (retval == 0) {
+		retval = length;
+	} else {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to complete SPI transfer, error = %d\n",
+				__func__, retval);
+	}
+
+	mutex_unlock(&rmi4_data->rmi4_io_ctrl_mutex);
+
+	return retval;
+}
+
+static struct synaptics_dsx_bus_access bus_access = {
+	.type = BUS_SPI,
+	.read = synaptics_rmi4_spi_read,
+	.write = synaptics_rmi4_spi_write,
+};
+
+static struct synaptics_dsx_hw_interface hw_if;
+
+static struct platform_device *synaptics_dsx_spi_device;
+
+static void synaptics_rmi4_spi_dev_release(struct device *dev)
+{
+	kfree(synaptics_dsx_spi_device);
+
+	return;
+}
+
+static int synaptics_rmi4_spi_probe(struct spi_device *spi)
+{
+	int retval;
+
+	if (spi->master->flags & SPI_MASTER_HALF_DUPLEX) {
+		dev_err(&spi->dev,
+				"%s: Full duplex not supported by host\n",
+				__func__);
+		return -EIO;
+	}
+
+	synaptics_dsx_spi_device = kzalloc(
+			sizeof(struct platform_device),
+			GFP_KERNEL);
+	if (!synaptics_dsx_spi_device) {
+		dev_err(&spi->dev,
+				"%s: Failed to allocate memory for synaptics_dsx_spi_device\n",
+				__func__);
+		return -ENOMEM;
+	}
+
+#ifdef CONFIG_OF
+	if (spi->dev.of_node) {
+		hw_if.board_data = devm_kzalloc(&spi->dev,
+				sizeof(struct synaptics_dsx_board_data),
+				GFP_KERNEL);
+		if (!hw_if.board_data) {
+			dev_err(&spi->dev,
+					"%s: Failed to allocate memory for board data\n",
+					__func__);
+			return -ENOMEM;
+		}
+		hw_if.board_data->cap_button_map = devm_kzalloc(&spi->dev,
+				sizeof(struct synaptics_dsx_button_map),
+				GFP_KERNEL);
+		if (!hw_if.board_data->cap_button_map) {
+			dev_err(&spi->dev,
+					"%s: Failed to allocate memory for 0D button map\n",
+					__func__);
+			return -ENOMEM;
+		}
+		hw_if.board_data->vir_button_map = devm_kzalloc(&spi->dev,
+				sizeof(struct synaptics_dsx_button_map),
+				GFP_KERNEL);
+		if (!hw_if.board_data->vir_button_map) {
+			dev_err(&spi->dev,
+					"%s: Failed to allocate memory for virtual button map\n",
+					__func__);
+			return -ENOMEM;
+		}
+		parse_dt(&spi->dev, hw_if.board_data);
+	}
+#else
+	hw_if.board_data = spi->dev.platform_data;
+#endif
+
+	hw_if.bus_access = &bus_access;
+
+	spi->bits_per_word = 8;
+	spi->mode = SPI_MODE_3;
+
+	retval = spi_setup(spi);
+	if (retval < 0) {
+		dev_err(&spi->dev,
+				"%s: Failed to perform SPI setup\n",
+				__func__);
+		return retval;
+	}
+
+	synaptics_dsx_spi_device->name = PLATFORM_DRIVER_NAME;
+	synaptics_dsx_spi_device->id = 0;
+	synaptics_dsx_spi_device->num_resources = 0;
+	synaptics_dsx_spi_device->dev.parent = &spi->dev;
+	synaptics_dsx_spi_device->dev.platform_data = &hw_if;
+	synaptics_dsx_spi_device->dev.release = synaptics_rmi4_spi_dev_release;
+
+	retval = platform_device_register(synaptics_dsx_spi_device);
+	if (retval) {
+		dev_err(&spi->dev,
+				"%s: Failed to register platform device\n",
+				__func__);
+		return -ENODEV;
+	}
+
+	return 0;
+}
+
+static int synaptics_rmi4_spi_remove(struct spi_device *spi)
+{
+	platform_device_unregister(synaptics_dsx_spi_device);
+
+	return 0;
+}
+
+static const struct spi_device_id synaptics_rmi4_id_table[] = {
+	{SPI_DRIVER_NAME, 0},
+	{},
+};
+MODULE_DEVICE_TABLE(spi, synaptics_rmi4_id_table);
+
+#ifdef CONFIG_OF
+static struct of_device_id synaptics_rmi4_of_match_table[] = {
+	{
+		.compatible = "synaptics,dsx-spi",
+	},
+	{},
+};
+MODULE_DEVICE_TABLE(of, synaptics_rmi4_of_match_table);
+#else
+#define synaptics_rmi4_of_match_table NULL
+#endif
+
+static struct spi_driver synaptics_rmi4_spi_driver = {
+	.driver = {
+		.name = SPI_DRIVER_NAME,
+		.owner = THIS_MODULE,
+		.of_match_table = synaptics_rmi4_of_match_table,
+	},
+	.probe = synaptics_rmi4_spi_probe,
+	.remove = synaptics_rmi4_spi_remove,
+	.id_table = synaptics_rmi4_id_table,
+};
+
+
+int synaptics_rmi4_bus_init(void)
+{
+	return spi_register_driver(&synaptics_rmi4_spi_driver);
+}
+EXPORT_SYMBOL(synaptics_rmi4_bus_init);
+
+void synaptics_rmi4_bus_exit(void)
+{
+	kfree(buf);
+
+	kfree(xfer);
+
+	spi_unregister_driver(&synaptics_rmi4_spi_driver);
+
+	return;
+}
+EXPORT_SYMBOL(synaptics_rmi4_bus_exit);
+
+MODULE_AUTHOR("Synaptics, Inc.");
+MODULE_DESCRIPTION("Synaptics DSX SPI Bus Support Module");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_test_reporting.c b/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_test_reporting.c
new file mode 100644
index 0000000..606e737
--- /dev/null
+++ b/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_test_reporting.c
@@ -0,0 +1,5356 @@
+/*
+ * Synaptics DSX touchscreen driver
+ *
+ * Copyright (C) 2012-2016 Synaptics Incorporated. All rights reserved.
+ *
+ * Copyright (C) 2012 Alexandra Chin <alexandra.chin@tw.synaptics.com>
+ * Copyright (C) 2012 Scott Lin <scott.lin@tw.synaptics.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * INFORMATION CONTAINED IN THIS DOCUMENT IS PROVIDED "AS-IS," AND SYNAPTICS
+ * EXPRESSLY DISCLAIMS ALL EXPRESS AND IMPLIED WARRANTIES, INCLUDING ANY
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE,
+ * AND ANY WARRANTIES OF NON-INFRINGEMENT OF ANY INTELLECTUAL PROPERTY RIGHTS.
+ * IN NO EVENT SHALL SYNAPTICS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, PUNITIVE, OR CONSEQUENTIAL DAMAGES ARISING OUT OF OR IN CONNECTION
+ * WITH THE USE OF THE INFORMATION CONTAINED IN THIS DOCUMENT, HOWEVER CAUSED
+ * AND BASED ON ANY THEORY OF LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * NEGLIGENCE OR OTHER TORTIOUS ACTION, AND EVEN IF SYNAPTICS WAS ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE. IF A TRIBUNAL OF COMPETENT JURISDICTION DOES
+ * NOT PERMIT THE DISCLAIMER OF DIRECT DAMAGES OR ANY OTHER DAMAGES, SYNAPTICS'
+ * TOTAL CUMULATIVE LIABILITY TO ANY PARTY SHALL NOT EXCEED ONE HUNDRED U.S.
+ * DOLLARS.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/input.h>
+#include <linux/ctype.h>
+#include <linux/hrtimer.h>
+#include <linux/platform_device.h>
+#include <linux/input/synaptics_dsx.h>
+#include "synaptics_dsx_core.h"
+
+#define SYSFS_FOLDER_NAME "f54"
+
+#define GET_REPORT_TIMEOUT_S 3
+#define CALIBRATION_TIMEOUT_S 10
+#define COMMAND_TIMEOUT_100MS 20
+
+#define NO_SLEEP_OFF (0 << 2)
+#define NO_SLEEP_ON (1 << 2)
+
+#define STATUS_IDLE 0
+#define STATUS_BUSY 1
+#define STATUS_ERROR 2
+
+#define REPORT_INDEX_OFFSET 1
+#define REPORT_DATA_OFFSET 3
+
+#define SENSOR_RX_MAPPING_OFFSET 1
+#define SENSOR_TX_MAPPING_OFFSET 2
+
+#define COMMAND_GET_REPORT 1
+#define COMMAND_FORCE_CAL 2
+#define COMMAND_FORCE_UPDATE 4
+
+#define CONTROL_NO_AUTO_CAL 1
+
+#define CONTROL_0_SIZE 1
+#define CONTROL_1_SIZE 1
+#define CONTROL_2_SIZE 2
+#define CONTROL_3_SIZE 1
+#define CONTROL_4_6_SIZE 3
+#define CONTROL_7_SIZE 1
+#define CONTROL_8_9_SIZE 3
+#define CONTROL_10_SIZE 1
+#define CONTROL_11_SIZE 2
+#define CONTROL_12_13_SIZE 2
+#define CONTROL_14_SIZE 1
+#define CONTROL_15_SIZE 1
+#define CONTROL_16_SIZE 1
+#define CONTROL_17_SIZE 1
+#define CONTROL_18_SIZE 1
+#define CONTROL_19_SIZE 1
+#define CONTROL_20_SIZE 1
+#define CONTROL_21_SIZE 2
+#define CONTROL_22_26_SIZE 7
+#define CONTROL_27_SIZE 1
+#define CONTROL_28_SIZE 2
+#define CONTROL_29_SIZE 1
+#define CONTROL_30_SIZE 1
+#define CONTROL_31_SIZE 1
+#define CONTROL_32_35_SIZE 8
+#define CONTROL_36_SIZE 1
+#define CONTROL_37_SIZE 1
+#define CONTROL_38_SIZE 1
+#define CONTROL_39_SIZE 1
+#define CONTROL_40_SIZE 1
+#define CONTROL_41_SIZE 1
+#define CONTROL_42_SIZE 2
+#define CONTROL_43_54_SIZE 13
+#define CONTROL_55_56_SIZE 2
+#define CONTROL_57_SIZE 1
+#define CONTROL_58_SIZE 1
+#define CONTROL_59_SIZE 2
+#define CONTROL_60_62_SIZE 3
+#define CONTROL_63_SIZE 1
+#define CONTROL_64_67_SIZE 4
+#define CONTROL_68_73_SIZE 8
+#define CONTROL_70_73_SIZE 6
+#define CONTROL_74_SIZE 2
+#define CONTROL_75_SIZE 1
+#define CONTROL_76_SIZE 1
+#define CONTROL_77_78_SIZE 2
+#define CONTROL_79_83_SIZE 5
+#define CONTROL_84_85_SIZE 2
+#define CONTROL_86_SIZE 1
+#define CONTROL_87_SIZE 1
+#define CONTROL_88_SIZE 1
+#define CONTROL_89_SIZE 1
+#define CONTROL_90_SIZE 1
+#define CONTROL_91_SIZE 1
+#define CONTROL_92_SIZE 1
+#define CONTROL_93_SIZE 1
+#define CONTROL_94_SIZE 1
+#define CONTROL_95_SIZE 1
+#define CONTROL_96_SIZE 1
+#define CONTROL_97_SIZE 1
+#define CONTROL_98_SIZE 1
+#define CONTROL_99_SIZE 1
+#define CONTROL_100_SIZE 1
+#define CONTROL_101_SIZE 1
+#define CONTROL_102_SIZE 1
+#define CONTROL_103_SIZE 1
+#define CONTROL_104_SIZE 1
+#define CONTROL_105_SIZE 1
+#define CONTROL_106_SIZE 1
+#define CONTROL_107_SIZE 1
+#define CONTROL_108_SIZE 1
+#define CONTROL_109_SIZE 1
+#define CONTROL_110_SIZE 1
+#define CONTROL_111_SIZE 1
+#define CONTROL_112_SIZE 1
+#define CONTROL_113_SIZE 1
+#define CONTROL_114_SIZE 1
+#define CONTROL_115_SIZE 1
+#define CONTROL_116_SIZE 1
+#define CONTROL_117_SIZE 1
+#define CONTROL_118_SIZE 1
+#define CONTROL_119_SIZE 1
+#define CONTROL_120_SIZE 1
+#define CONTROL_121_SIZE 1
+#define CONTROL_122_SIZE 1
+#define CONTROL_123_SIZE 1
+#define CONTROL_124_SIZE 1
+#define CONTROL_125_SIZE 1
+#define CONTROL_126_SIZE 1
+#define CONTROL_127_SIZE 1
+#define CONTROL_128_SIZE 1
+#define CONTROL_129_SIZE 1
+#define CONTROL_130_SIZE 1
+#define CONTROL_131_SIZE 1
+#define CONTROL_132_SIZE 1
+#define CONTROL_133_SIZE 1
+#define CONTROL_134_SIZE 1
+#define CONTROL_135_SIZE 1
+#define CONTROL_136_SIZE 1
+#define CONTROL_137_SIZE 1
+#define CONTROL_138_SIZE 1
+#define CONTROL_139_SIZE 1
+#define CONTROL_140_SIZE 1
+#define CONTROL_141_SIZE 1
+#define CONTROL_142_SIZE 1
+#define CONTROL_143_SIZE 1
+#define CONTROL_144_SIZE 1
+#define CONTROL_145_SIZE 1
+#define CONTROL_146_SIZE 1
+#define CONTROL_147_SIZE 1
+#define CONTROL_148_SIZE 1
+#define CONTROL_149_SIZE 1
+#define CONTROL_150_SIZE 1
+#define CONTROL_151_SIZE 1
+#define CONTROL_152_SIZE 1
+#define CONTROL_153_SIZE 1
+#define CONTROL_154_SIZE 1
+#define CONTROL_155_SIZE 1
+#define CONTROL_156_SIZE 1
+#define CONTROL_157_158_SIZE 2
+#define CONTROL_163_SIZE 1
+#define CONTROL_165_SIZE 1
+#define CONTROL_166_SIZE 1
+#define CONTROL_167_SIZE 1
+#define CONTROL_168_SIZE 1
+#define CONTROL_169_SIZE 1
+#define CONTROL_171_SIZE 1
+#define CONTROL_172_SIZE 1
+#define CONTROL_173_SIZE 1
+#define CONTROL_174_SIZE 1
+#define CONTROL_175_SIZE 1
+#define CONTROL_176_SIZE 1
+#define CONTROL_177_178_SIZE 2
+#define CONTROL_179_SIZE 1
+#define CONTROL_182_SIZE 1
+#define CONTROL_183_SIZE 1
+#define CONTROL_185_SIZE 1
+#define CONTROL_186_SIZE 1
+#define CONTROL_187_SIZE 1
+#define CONTROL_188_SIZE 1
+
+#define HIGH_RESISTANCE_DATA_SIZE 6
+#define FULL_RAW_CAP_MIN_MAX_DATA_SIZE 4
+#define TRX_OPEN_SHORT_DATA_SIZE 7
+
+#define concat(a, b) a##b
+
+#define attrify(propname) (&dev_attr_##propname.attr)
+
+#define show_prototype(propname)\
+static ssize_t concat(test_sysfs, _##propname##_show)(\
+		struct device *dev,\
+		struct device_attribute *attr,\
+		char *buf);\
+\
+static struct device_attribute dev_attr_##propname =\
+		__ATTR(propname, 0444,\
+		concat(test_sysfs, _##propname##_show),\
+		synaptics_rmi4_store_error);
+
+#define store_prototype(propname)\
+static ssize_t concat(test_sysfs, _##propname##_store)(\
+		struct device *dev,\
+		struct device_attribute *attr,\
+		const char *buf, size_t count);\
+\
+static struct device_attribute dev_attr_##propname =\
+		__ATTR(propname, 0220,\
+		synaptics_rmi4_show_error,\
+		concat(test_sysfs, _##propname##_store));
+
+#define show_store_prototype(propname)\
+static ssize_t concat(test_sysfs, _##propname##_show)(\
+		struct device *dev,\
+		struct device_attribute *attr,\
+		char *buf);\
+\
+static ssize_t concat(test_sysfs, _##propname##_store)(\
+		struct device *dev,\
+		struct device_attribute *attr,\
+		const char *buf, size_t count);\
+\
+static struct device_attribute dev_attr_##propname =\
+		__ATTR(propname, 0664,\
+		concat(test_sysfs, _##propname##_show),\
+		concat(test_sysfs, _##propname##_store));
+
+#define disable_cbc(ctrl_num)\
+do {\
+	retval = synaptics_rmi4_reg_read(rmi4_data,\
+			f54->control.ctrl_num->address,\
+			f54->control.ctrl_num->data,\
+			sizeof(f54->control.ctrl_num->data));\
+	if (retval < 0) {\
+		dev_err(rmi4_data->pdev->dev.parent,\
+				"%s: Failed to disable CBC (" #ctrl_num ")\n",\
+				__func__);\
+		return retval;\
+	} \
+	f54->control.ctrl_num->cbc_tx_carrier_selection = 0;\
+	retval = synaptics_rmi4_reg_write(rmi4_data,\
+			f54->control.ctrl_num->address,\
+			f54->control.ctrl_num->data,\
+			sizeof(f54->control.ctrl_num->data));\
+	if (retval < 0) {\
+		dev_err(rmi4_data->pdev->dev.parent,\
+				"%s: Failed to disable CBC (" #ctrl_num ")\n",\
+				__func__);\
+		return retval;\
+	} \
+} while (0)
+
+enum f54_report_types {
+	F54_8BIT_IMAGE = 1,
+	F54_16BIT_IMAGE = 2,
+	F54_RAW_16BIT_IMAGE = 3,
+	F54_HIGH_RESISTANCE = 4,
+	F54_TX_TO_TX_SHORTS = 5,
+	F54_RX_TO_RX_SHORTS_1 = 7,
+	F54_TRUE_BASELINE = 9,
+	F54_FULL_RAW_CAP_MIN_MAX = 13,
+	F54_RX_OPENS_1 = 14,
+	F54_TX_OPENS = 15,
+	F54_TX_TO_GND_SHORTS = 16,
+	F54_RX_TO_RX_SHORTS_2 = 17,
+	F54_RX_OPENS_2 = 18,
+	F54_FULL_RAW_CAP = 19,
+	F54_FULL_RAW_CAP_NO_RX_COUPLING = 20,
+	F54_SENSOR_SPEED = 22,
+	F54_ADC_RANGE = 23,
+	F54_TRX_OPENS = 24,
+	F54_TRX_TO_GND_SHORTS = 25,
+	F54_TRX_SHORTS = 26,
+	F54_ABS_RAW_CAP = 38,
+	F54_ABS_DELTA_CAP = 40,
+	F54_ABS_HYBRID_DELTA_CAP = 59,
+	F54_ABS_HYBRID_RAW_CAP = 63,
+	F54_AMP_FULL_RAW_CAP = 78,
+	F54_AMP_RAW_ADC = 83,
+	F54_FULL_RAW_CAP_TDDI = 92,
+	INVALID_REPORT_TYPE = -1,
+};
+
+enum f54_afe_cal {
+	F54_AFE_CAL,
+	F54_AFE_IS_CAL,
+};
+
+struct f54_query {
+	union {
+		struct {
+			/* query 0 */
+			unsigned char num_of_rx_electrodes;
+
+			/* query 1 */
+			unsigned char num_of_tx_electrodes;
+
+			/* query 2 */
+			unsigned char f54_query2_b0__1:2;
+			unsigned char has_baseline:1;
+			unsigned char has_image8:1;
+			unsigned char f54_query2_b4__5:2;
+			unsigned char has_image16:1;
+			unsigned char f54_query2_b7:1;
+
+			/* queries 3.0 and 3.1 */
+			unsigned short clock_rate;
+
+			/* query 4 */
+			unsigned char touch_controller_family;
+
+			/* query 5 */
+			unsigned char has_pixel_touch_threshold_adjustment:1;
+			unsigned char f54_query5_b1__7:7;
+
+			/* query 6 */
+			unsigned char has_sensor_assignment:1;
+			unsigned char has_interference_metric:1;
+			unsigned char has_sense_frequency_control:1;
+			unsigned char has_firmware_noise_mitigation:1;
+			unsigned char has_ctrl11:1;
+			unsigned char has_two_byte_report_rate:1;
+			unsigned char has_one_byte_report_rate:1;
+			unsigned char has_relaxation_control:1;
+
+			/* query 7 */
+			unsigned char curve_compensation_mode:2;
+			unsigned char f54_query7_b2__7:6;
+
+			/* query 8 */
+			unsigned char f54_query8_b0:1;
+			unsigned char has_iir_filter:1;
+			unsigned char has_cmn_removal:1;
+			unsigned char has_cmn_maximum:1;
+			unsigned char has_touch_hysteresis:1;
+			unsigned char has_edge_compensation:1;
+			unsigned char has_per_frequency_noise_control:1;
+			unsigned char has_enhanced_stretch:1;
+
+			/* query 9 */
+			unsigned char has_force_fast_relaxation:1;
+			unsigned char has_multi_metric_state_machine:1;
+			unsigned char has_signal_clarity:1;
+			unsigned char has_variance_metric:1;
+			unsigned char has_0d_relaxation_control:1;
+			unsigned char has_0d_acquisition_control:1;
+			unsigned char has_status:1;
+			unsigned char has_slew_metric:1;
+
+			/* query 10 */
+			unsigned char has_h_blank:1;
+			unsigned char has_v_blank:1;
+			unsigned char has_long_h_blank:1;
+			unsigned char has_startup_fast_relaxation:1;
+			unsigned char has_esd_control:1;
+			unsigned char has_noise_mitigation2:1;
+			unsigned char has_noise_state:1;
+			unsigned char has_energy_ratio_relaxation:1;
+
+			/* query 11 */
+			unsigned char has_excessive_noise_reporting:1;
+			unsigned char has_slew_option:1;
+			unsigned char has_two_overhead_bursts:1;
+			unsigned char has_query13:1;
+			unsigned char has_one_overhead_burst:1;
+			unsigned char f54_query11_b5:1;
+			unsigned char has_ctrl88:1;
+			unsigned char has_query15:1;
+
+			/* query 12 */
+			unsigned char number_of_sensing_frequencies:4;
+			unsigned char f54_query12_b4__7:4;
+		} __packed;
+		unsigned char data[14];
+	};
+};
+
+struct f54_query_13 {
+	union {
+		struct {
+			unsigned char has_ctrl86:1;
+			unsigned char has_ctrl87:1;
+			unsigned char has_ctrl87_sub0:1;
+			unsigned char has_ctrl87_sub1:1;
+			unsigned char has_ctrl87_sub2:1;
+			unsigned char has_cidim:1;
+			unsigned char has_noise_mitigation_enhancement:1;
+			unsigned char has_rail_im:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f54_query_15 {
+	union {
+		struct {
+			unsigned char has_ctrl90:1;
+			unsigned char has_transmit_strength:1;
+			unsigned char has_ctrl87_sub3:1;
+			unsigned char has_query16:1;
+			unsigned char has_query20:1;
+			unsigned char has_query21:1;
+			unsigned char has_query22:1;
+			unsigned char has_query25:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f54_query_16 {
+	union {
+		struct {
+			unsigned char has_query17:1;
+			unsigned char has_data17:1;
+			unsigned char has_ctrl92:1;
+			unsigned char has_ctrl93:1;
+			unsigned char has_ctrl94_query18:1;
+			unsigned char has_ctrl95_query19:1;
+			unsigned char has_ctrl99:1;
+			unsigned char has_ctrl100:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f54_query_21 {
+	union {
+		struct {
+			unsigned char has_abs_rx:1;
+			unsigned char has_abs_tx:1;
+			unsigned char has_ctrl91:1;
+			unsigned char has_ctrl96:1;
+			unsigned char has_ctrl97:1;
+			unsigned char has_ctrl98:1;
+			unsigned char has_data19:1;
+			unsigned char has_query24_data18:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f54_query_22 {
+	union {
+		struct {
+			unsigned char has_packed_image:1;
+			unsigned char has_ctrl101:1;
+			unsigned char has_dynamic_sense_display_ratio:1;
+			unsigned char has_query23:1;
+			unsigned char has_ctrl103_query26:1;
+			unsigned char has_ctrl104:1;
+			unsigned char has_ctrl105:1;
+			unsigned char has_query28:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f54_query_23 {
+	union {
+		struct {
+			unsigned char has_ctrl102:1;
+			unsigned char has_ctrl102_sub1:1;
+			unsigned char has_ctrl102_sub2:1;
+			unsigned char has_ctrl102_sub4:1;
+			unsigned char has_ctrl102_sub5:1;
+			unsigned char has_ctrl102_sub9:1;
+			unsigned char has_ctrl102_sub10:1;
+			unsigned char has_ctrl102_sub11:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f54_query_25 {
+	union {
+		struct {
+			unsigned char has_ctrl106:1;
+			unsigned char has_ctrl102_sub12:1;
+			unsigned char has_ctrl107:1;
+			unsigned char has_ctrl108:1;
+			unsigned char has_ctrl109:1;
+			unsigned char has_data20:1;
+			unsigned char f54_query25_b6:1;
+			unsigned char has_query27:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f54_query_27 {
+	union {
+		struct {
+			unsigned char has_ctrl110:1;
+			unsigned char has_data21:1;
+			unsigned char has_ctrl111:1;
+			unsigned char has_ctrl112:1;
+			unsigned char has_ctrl113:1;
+			unsigned char has_data22:1;
+			unsigned char has_ctrl114:1;
+			unsigned char has_query29:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f54_query_29 {
+	union {
+		struct {
+			unsigned char has_ctrl115:1;
+			unsigned char has_ground_ring_options:1;
+			unsigned char has_lost_bursts_tuning:1;
+			unsigned char has_aux_exvcom2_select:1;
+			unsigned char has_ctrl116:1;
+			unsigned char has_data23:1;
+			unsigned char has_ctrl117:1;
+			unsigned char has_query30:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f54_query_30 {
+	union {
+		struct {
+			unsigned char has_ctrl118:1;
+			unsigned char has_ctrl119:1;
+			unsigned char has_ctrl120:1;
+			unsigned char has_ctrl121:1;
+			unsigned char has_ctrl122_query31:1;
+			unsigned char has_ctrl123:1;
+			unsigned char has_ctrl124:1;
+			unsigned char has_query32:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f54_query_32 {
+	union {
+		struct {
+			unsigned char has_ctrl125:1;
+			unsigned char has_ctrl126:1;
+			unsigned char has_ctrl127:1;
+			unsigned char has_abs_charge_pump_disable:1;
+			unsigned char has_query33:1;
+			unsigned char has_data24:1;
+			unsigned char has_query34:1;
+			unsigned char has_query35:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f54_query_33 {
+	union {
+		struct {
+			unsigned char has_ctrl128:1;
+			unsigned char has_ctrl129:1;
+			unsigned char has_ctrl130:1;
+			unsigned char has_ctrl131:1;
+			unsigned char has_ctrl132:1;
+			unsigned char has_ctrl133:1;
+			unsigned char has_ctrl134:1;
+			unsigned char has_query36:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f54_query_35 {
+	union {
+		struct {
+			unsigned char has_data25:1;
+			unsigned char has_ctrl135:1;
+			unsigned char has_ctrl136:1;
+			unsigned char has_ctrl137:1;
+			unsigned char has_ctrl138:1;
+			unsigned char has_ctrl139:1;
+			unsigned char has_data26:1;
+			unsigned char has_ctrl140:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f54_query_36 {
+	union {
+		struct {
+			unsigned char has_ctrl141:1;
+			unsigned char has_ctrl142:1;
+			unsigned char has_query37:1;
+			unsigned char has_ctrl143:1;
+			unsigned char has_ctrl144:1;
+			unsigned char has_ctrl145:1;
+			unsigned char has_ctrl146:1;
+			unsigned char has_query38:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f54_query_38 {
+	union {
+		struct {
+			unsigned char has_ctrl147:1;
+			unsigned char has_ctrl148:1;
+			unsigned char has_ctrl149:1;
+			unsigned char has_ctrl150:1;
+			unsigned char has_ctrl151:1;
+			unsigned char has_ctrl152:1;
+			unsigned char has_ctrl153:1;
+			unsigned char has_query39:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f54_query_39 {
+	union {
+		struct {
+			unsigned char has_ctrl154:1;
+			unsigned char has_ctrl155:1;
+			unsigned char has_ctrl156:1;
+			unsigned char has_ctrl160:1;
+			unsigned char has_ctrl157_ctrl158:1;
+			unsigned char f54_query39_b5__6:2;
+			unsigned char has_query40:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f54_query_40 {
+	union {
+		struct {
+			unsigned char has_ctrl169:1;
+			unsigned char has_ctrl163_query41:1;
+			unsigned char f54_query40_b2:1;
+			unsigned char has_ctrl165_query42:1;
+			unsigned char has_ctrl166:1;
+			unsigned char has_ctrl167:1;
+			unsigned char has_ctrl168:1;
+			unsigned char has_query43:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f54_query_43 {
+	union {
+		struct {
+			unsigned char f54_query43_b0__1:2;
+			unsigned char has_ctrl171:1;
+			unsigned char has_ctrl172_query44_query45:1;
+			unsigned char has_ctrl173:1;
+			unsigned char has_ctrl174:1;
+			unsigned char has_ctrl175:1;
+			unsigned char has_query46:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f54_query_46 {
+	union {
+		struct {
+			unsigned char has_ctrl176:1;
+			unsigned char has_ctrl177_ctrl178:1;
+			unsigned char has_ctrl179:1;
+			unsigned char f54_query46_b3:1;
+			unsigned char has_data27:1;
+			unsigned char has_data28:1;
+			unsigned char f54_query46_b6:1;
+			unsigned char has_query47:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f54_query_47 {
+	union {
+		struct {
+			unsigned char f54_query47_b0:1;
+			unsigned char has_ctrl182:1;
+			unsigned char has_ctrl183:1;
+			unsigned char f54_query47_b3:1;
+			unsigned char has_ctrl185:1;
+			unsigned char has_ctrl186:1;
+			unsigned char has_ctrl187:1;
+			unsigned char has_query49:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f54_query_49 {
+	union {
+		struct {
+			unsigned char f54_query49_b0__1:2;
+			unsigned char has_ctrl188:1;
+			unsigned char has_data31:1;
+			unsigned char f54_query49_b4__6:3;
+			unsigned char has_query50:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f54_query_50 {
+	union {
+		struct {
+			unsigned char f54_query50_b0__6:7;
+			unsigned char has_query51:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f54_query_51 {
+	union {
+		struct {
+			unsigned char f54_query51_b0__4:5;
+			unsigned char has_query53_query54_ctrl198:1;
+			unsigned char has_ctrl199:1;
+			unsigned char has_query55:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f54_query_55 {
+	union {
+		struct {
+			unsigned char has_query56:1;
+			unsigned char has_data33_data34:1;
+			unsigned char has_alt_report_rate:1;
+			unsigned char has_ctrl200:1;
+			unsigned char has_ctrl201_ctrl202:1;
+			unsigned char has_ctrl203:1;
+			unsigned char has_ctrl204:1;
+			unsigned char has_query57:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f54_query_57 {
+	union {
+		struct {
+			unsigned char has_ctrl205:1;
+			unsigned char has_ctrl206:1;
+			unsigned char has_usb_bulk_read:1;
+			unsigned char has_ctrl207:1;
+			unsigned char has_ctrl208:1;
+			unsigned char has_ctrl209:1;
+			unsigned char has_ctrl210:1;
+			unsigned char has_query58:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f54_query_58 {
+	union {
+		struct {
+			unsigned char has_query59:1;
+			unsigned char has_query60:1;
+			unsigned char has_ctrl211:1;
+			unsigned char has_ctrl212:1;
+			unsigned char has_hybrid_abs_tx_axis_filtering:1;
+			unsigned char has_hybrid_abs_tx_interpolation:1;
+			unsigned char has_ctrl213:1;
+			unsigned char has_query61:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f54_query_61 {
+	union {
+		struct {
+			unsigned char has_ctrl214:1;
+			unsigned char has_ctrl215_query62_query63:1;
+			unsigned char f54_query_61_b2:1;
+			unsigned char has_ctrl216:1;
+			unsigned char has_ctrl217:1;
+			unsigned char has_misc_host_ctrl:1;
+			unsigned char hybrid_abs_buttons:1;
+			unsigned char has_query64:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f54_query_64 {
+	union {
+		struct {
+			unsigned char has_ctrl101_sub1:1;
+			unsigned char has_ctrl220:1;
+			unsigned char has_ctrl221:1;
+			unsigned char has_ctrl222:1;
+			unsigned char has_ctrl219_sub1:1;
+			unsigned char has_ctrl103_sub3:1;
+			unsigned char has_ctrl224_ctrl226_ctrl227:1;
+			unsigned char has_query65:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f54_query_65 {
+	union {
+		struct {
+			unsigned char f54_query_65_b0__1:2;
+			unsigned char has_ctrl101_sub2:1;
+			unsigned char f54_query_65_b3__4:2;
+			unsigned char has_query66_ctrl231:1;
+			unsigned char has_ctrl232:1;
+			unsigned char has_query67:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f54_query_67 {
+	union {
+		struct {
+			unsigned char has_abs_doze_spatial_filter_en:1;
+			unsigned char has_abs_doze_avg_filter_enhancement_en:1;
+			unsigned char has_single_display_pulse:1;
+			unsigned char f54_query_67_b3__4:2;
+			unsigned char has_ctrl235_ctrl236:1;
+			unsigned char f54_query_67_b6:1;
+			unsigned char has_query68:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f54_query_68 {
+	union {
+		struct {
+			unsigned char f54_query_68_b0:1;
+			unsigned char has_ctrl238:1;
+			unsigned char has_ctrl238_sub1:1;
+			unsigned char has_ctrl238_sub2:1;
+			unsigned char has_ctrl239:1;
+			unsigned char has_freq_filter_bw_ext:1;
+			unsigned char is_tddi_hic:1;
+			unsigned char has_query69:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f54_query_69 {
+	union {
+		struct {
+			unsigned char has_ctrl240_sub0:1;
+			unsigned char has_ctrl240_sub1_sub2:1;
+			unsigned char has_ctrl240_sub3:1;
+			unsigned char has_ctrl240_sub4:1;
+			unsigned char f54_query_69_b4__7:4;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f54_data_31 {
+	union {
+		struct {
+			unsigned char is_calibration_crc:1;
+			unsigned char calibration_crc:1;
+			unsigned char short_test_row_number:5;
+		} __packed;
+		struct {
+			unsigned char data[1];
+			unsigned short address;
+		} __packed;
+	};
+};
+
+struct f54_control_7 {
+	union {
+		struct {
+			unsigned char cbc_cap:3;
+			unsigned char cbc_polarity:1;
+			unsigned char cbc_tx_carrier_selection:1;
+			unsigned char f54_ctrl7_b5__7:3;
+		} __packed;
+		struct {
+			unsigned char data[1];
+			unsigned short address;
+		} __packed;
+	};
+};
+
+struct f54_control_41 {
+	union {
+		struct {
+			unsigned char no_signal_clarity:1;
+			unsigned char f54_ctrl41_b1__7:7;
+		} __packed;
+		struct {
+			unsigned char data[1];
+			unsigned short address;
+		} __packed;
+	};
+};
+
+struct f54_control_57 {
+	union {
+		struct {
+			unsigned char cbc_cap:3;
+			unsigned char cbc_polarity:1;
+			unsigned char cbc_tx_carrier_selection:1;
+			unsigned char f54_ctrl57_b5__7:3;
+		} __packed;
+		struct {
+			unsigned char data[1];
+			unsigned short address;
+		} __packed;
+	};
+};
+
+struct f54_control_86 {
+	union {
+		struct {
+			unsigned char enable_high_noise_state:1;
+			unsigned char dynamic_sense_display_ratio:2;
+			unsigned char f54_ctrl86_b3__7:5;
+		} __packed;
+		struct {
+			unsigned char data[1];
+			unsigned short address;
+		} __packed;
+	};
+};
+
+struct f54_control_88 {
+	union {
+		struct {
+			unsigned char tx_low_reference_polarity:1;
+			unsigned char tx_high_reference_polarity:1;
+			unsigned char abs_low_reference_polarity:1;
+			unsigned char abs_polarity:1;
+			unsigned char cbc_polarity:1;
+			unsigned char cbc_tx_carrier_selection:1;
+			unsigned char charge_pump_enable:1;
+			unsigned char cbc_abs_auto_servo:1;
+		} __packed;
+		struct {
+			unsigned char data[1];
+			unsigned short address;
+		} __packed;
+	};
+};
+
+struct f54_control_110 {
+	union {
+		struct {
+			unsigned char active_stylus_rx_feedback_cap;
+			unsigned char active_stylus_rx_feedback_cap_reference;
+			unsigned char active_stylus_low_reference;
+			unsigned char active_stylus_high_reference;
+			unsigned char active_stylus_gain_control;
+			unsigned char active_stylus_gain_control_reference;
+			unsigned char active_stylus_timing_mode;
+			unsigned char active_stylus_discovery_bursts;
+			unsigned char active_stylus_detection_bursts;
+			unsigned char active_stylus_discovery_noise_multiplier;
+			unsigned char active_stylus_detection_envelope_min;
+			unsigned char active_stylus_detection_envelope_max;
+			unsigned char active_stylus_lose_count;
+		} __packed;
+		struct {
+			unsigned char data[13];
+			unsigned short address;
+		} __packed;
+	};
+};
+
+struct f54_control_149 {
+	union {
+		struct {
+			unsigned char trans_cbc_global_cap_enable:1;
+			unsigned char f54_ctrl149_b1__7:7;
+		} __packed;
+		struct {
+			unsigned char data[1];
+			unsigned short address;
+		} __packed;
+	};
+};
+
+struct f54_control_188 {
+	union {
+		struct {
+			unsigned char start_calibration:1;
+			unsigned char start_is_calibration:1;
+			unsigned char frequency:2;
+			unsigned char start_production_test:1;
+			unsigned char short_test_calibration:1;
+			unsigned char f54_ctrl188_b7:1;
+		} __packed;
+		struct {
+			unsigned char data[1];
+			unsigned short address;
+		} __packed;
+	};
+};
+
+struct f54_control {
+	struct f54_control_7 *reg_7;
+	struct f54_control_41 *reg_41;
+	struct f54_control_57 *reg_57;
+	struct f54_control_86 *reg_86;
+	struct f54_control_88 *reg_88;
+	struct f54_control_110 *reg_110;
+	struct f54_control_149 *reg_149;
+	struct f54_control_188 *reg_188;
+};
+
+struct synaptics_rmi4_f54_handle {
+	bool no_auto_cal;
+	bool skip_preparation;
+	unsigned char status;
+	unsigned char intr_mask;
+	unsigned char intr_reg_num;
+	unsigned char tx_assigned;
+	unsigned char rx_assigned;
+	unsigned char *report_data;
+	unsigned short query_base_addr;
+	unsigned short control_base_addr;
+	unsigned short data_base_addr;
+	unsigned short command_base_addr;
+	unsigned short fifoindex;
+	unsigned int report_size;
+	unsigned int data_buffer_size;
+	unsigned int data_pos;
+	enum f54_report_types report_type;
+	struct f54_query query;
+	struct f54_query_13 query_13;
+	struct f54_query_15 query_15;
+	struct f54_query_16 query_16;
+	struct f54_query_21 query_21;
+	struct f54_query_22 query_22;
+	struct f54_query_23 query_23;
+	struct f54_query_25 query_25;
+	struct f54_query_27 query_27;
+	struct f54_query_29 query_29;
+	struct f54_query_30 query_30;
+	struct f54_query_32 query_32;
+	struct f54_query_33 query_33;
+	struct f54_query_35 query_35;
+	struct f54_query_36 query_36;
+	struct f54_query_38 query_38;
+	struct f54_query_39 query_39;
+	struct f54_query_40 query_40;
+	struct f54_query_43 query_43;
+	struct f54_query_46 query_46;
+	struct f54_query_47 query_47;
+	struct f54_query_49 query_49;
+	struct f54_query_50 query_50;
+	struct f54_query_51 query_51;
+	struct f54_query_55 query_55;
+	struct f54_query_57 query_57;
+	struct f54_query_58 query_58;
+	struct f54_query_61 query_61;
+	struct f54_query_64 query_64;
+	struct f54_query_65 query_65;
+	struct f54_query_67 query_67;
+	struct f54_query_68 query_68;
+	struct f54_query_69 query_69;
+	struct f54_data_31 data_31;
+	struct f54_control control;
+	struct mutex status_mutex;
+	struct kobject *sysfs_dir;
+	struct hrtimer watchdog;
+	struct work_struct timeout_work;
+	struct work_struct test_report_work;
+	struct workqueue_struct *test_report_workqueue;
+	struct synaptics_rmi4_data *rmi4_data;
+};
+
+struct f55_query {
+	union {
+		struct {
+			/* query 0 */
+			unsigned char num_of_rx_electrodes;
+
+			/* query 1 */
+			unsigned char num_of_tx_electrodes;
+
+			/* query 2 */
+			unsigned char has_sensor_assignment:1;
+			unsigned char has_edge_compensation:1;
+			unsigned char curve_compensation_mode:2;
+			unsigned char has_ctrl6:1;
+			unsigned char has_alternate_transmitter_assignment:1;
+			unsigned char has_single_layer_multi_touch:1;
+			unsigned char has_query5:1;
+		} __packed;
+		unsigned char data[3];
+	};
+};
+
+struct f55_query_3 {
+	union {
+		struct {
+			unsigned char has_ctrl8:1;
+			unsigned char has_ctrl9:1;
+			unsigned char has_oncell_pattern_support:1;
+			unsigned char has_data0:1;
+			unsigned char has_single_wide_pattern_support:1;
+			unsigned char has_mirrored_tx_pattern_support:1;
+			unsigned char has_discrete_pattern_support:1;
+			unsigned char has_query9:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f55_query_5 {
+	union {
+		struct {
+			unsigned char has_corner_compensation:1;
+			unsigned char has_ctrl12:1;
+			unsigned char has_trx_configuration:1;
+			unsigned char has_ctrl13:1;
+			unsigned char f55_query5_b4:1;
+			unsigned char has_ctrl14:1;
+			unsigned char has_basis_function:1;
+			unsigned char has_query17:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f55_query_17 {
+	union {
+		struct {
+			unsigned char f55_query17_b0:1;
+			unsigned char has_ctrl16:1;
+			unsigned char has_ctrl18_ctrl19:1;
+			unsigned char has_ctrl17:1;
+			unsigned char has_ctrl20:1;
+			unsigned char has_ctrl21:1;
+			unsigned char has_ctrl22:1;
+			unsigned char has_query18:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f55_query_18 {
+	union {
+		struct {
+			unsigned char has_ctrl23:1;
+			unsigned char has_ctrl24:1;
+			unsigned char has_query19:1;
+			unsigned char has_ctrl25:1;
+			unsigned char has_ctrl26:1;
+			unsigned char has_ctrl27_query20:1;
+			unsigned char has_ctrl28_query21:1;
+			unsigned char has_query22:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f55_query_22 {
+	union {
+		struct {
+			unsigned char has_ctrl29:1;
+			unsigned char has_query23:1;
+			unsigned char has_guard_disable:1;
+			unsigned char has_ctrl30:1;
+			unsigned char has_ctrl31:1;
+			unsigned char has_ctrl32:1;
+			unsigned char has_query24_through_query27:1;
+			unsigned char has_query28:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f55_query_23 {
+	union {
+		struct {
+			unsigned char amp_sensor_enabled:1;
+			unsigned char image_transposed:1;
+			unsigned char first_column_at_left_side:1;
+			unsigned char size_of_column2mux:5;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f55_query_28 {
+	union {
+		struct {
+			unsigned char f55_query28_b0__4:5;
+			unsigned char has_ctrl37:1;
+			unsigned char has_query29:1;
+			unsigned char has_query30:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f55_query_30 {
+	union {
+		struct {
+			unsigned char has_ctrl38:1;
+			unsigned char has_query31_query32:1;
+			unsigned char has_ctrl39:1;
+			unsigned char has_ctrl40:1;
+			unsigned char has_ctrl41:1;
+			unsigned char has_ctrl42:1;
+			unsigned char has_ctrl43_ctrl44:1;
+			unsigned char has_query33:1;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f55_query_33 {
+	union {
+		struct {
+			unsigned char has_extended_amp_pad:1;
+			unsigned char has_extended_amp_btn:1;
+			unsigned char has_ctrl45_ctrl46:1;
+			unsigned char f55_query33_b3:1;
+			unsigned char has_ctrl47_sub0_sub1:1;
+			unsigned char f55_query33_b5__7:3;
+		} __packed;
+		unsigned char data[1];
+	};
+};
+
+struct f55_control_43 {
+	union {
+		struct {
+			unsigned char swap_sensor_side:1;
+			unsigned char f55_ctrl43_b1__7:7;
+			unsigned char afe_l_mux_size:4;
+			unsigned char afe_r_mux_size:4;
+		} __packed;
+		unsigned char data[2];
+	};
+};
+
+struct synaptics_rmi4_f55_handle {
+	bool amp_sensor;
+	bool extended_amp;
+	bool has_force;
+	unsigned char size_of_column2mux;
+	unsigned char afe_mux_offset;
+	unsigned char force_tx_offset;
+	unsigned char force_rx_offset;
+	unsigned char *tx_assignment;
+	unsigned char *rx_assignment;
+	unsigned char *force_tx_assignment;
+	unsigned char *force_rx_assignment;
+	unsigned short query_base_addr;
+	unsigned short control_base_addr;
+	unsigned short data_base_addr;
+	unsigned short command_base_addr;
+	struct f55_query query;
+	struct f55_query_3 query_3;
+	struct f55_query_5 query_5;
+	struct f55_query_17 query_17;
+	struct f55_query_18 query_18;
+	struct f55_query_22 query_22;
+	struct f55_query_23 query_23;
+	struct f55_query_28 query_28;
+	struct f55_query_30 query_30;
+	struct f55_query_33 query_33;
+};
+
+struct f21_query_2 {
+	union {
+		struct {
+			unsigned char size_of_query3;
+			struct {
+				unsigned char query0_is_present:1;
+				unsigned char query1_is_present:1;
+				unsigned char query2_is_present:1;
+				unsigned char query3_is_present:1;
+				unsigned char query4_is_present:1;
+				unsigned char query5_is_present:1;
+				unsigned char query6_is_present:1;
+				unsigned char query7_is_present:1;
+			} __packed;
+			struct {
+				unsigned char query8_is_present:1;
+				unsigned char query9_is_present:1;
+				unsigned char query10_is_present:1;
+				unsigned char query11_is_present:1;
+				unsigned char query12_is_present:1;
+				unsigned char query13_is_present:1;
+				unsigned char query14_is_present:1;
+				unsigned char query15_is_present:1;
+			} __packed;
+		};
+		unsigned char data[3];
+	};
+};
+
+struct f21_query_5 {
+	union {
+		struct {
+			unsigned char size_of_query6;
+			struct {
+				unsigned char ctrl0_is_present:1;
+				unsigned char ctrl1_is_present:1;
+				unsigned char ctrl2_is_present:1;
+				unsigned char ctrl3_is_present:1;
+				unsigned char ctrl4_is_present:1;
+				unsigned char ctrl5_is_present:1;
+				unsigned char ctrl6_is_present:1;
+				unsigned char ctrl7_is_present:1;
+			} __packed;
+			struct {
+				unsigned char ctrl8_is_present:1;
+				unsigned char ctrl9_is_present:1;
+				unsigned char ctrl10_is_present:1;
+				unsigned char ctrl11_is_present:1;
+				unsigned char ctrl12_is_present:1;
+				unsigned char ctrl13_is_present:1;
+				unsigned char ctrl14_is_present:1;
+				unsigned char ctrl15_is_present:1;
+			} __packed;
+			struct {
+				unsigned char ctrl16_is_present:1;
+				unsigned char ctrl17_is_present:1;
+				unsigned char ctrl18_is_present:1;
+				unsigned char ctrl19_is_present:1;
+				unsigned char ctrl20_is_present:1;
+				unsigned char ctrl21_is_present:1;
+				unsigned char ctrl22_is_present:1;
+				unsigned char ctrl23_is_present:1;
+			} __packed;
+		};
+		unsigned char data[4];
+	};
+};
+
+struct f21_query_11 {
+	union {
+		struct {
+			unsigned char has_high_resolution_force:1;
+			unsigned char has_force_sensing_txrx_mapping:1;
+			unsigned char f21_query11_00_b2__7:6;
+			unsigned char f21_query11_00_reserved;
+			unsigned char max_number_of_force_sensors;
+			unsigned char max_number_of_force_txs;
+			unsigned char max_number_of_force_rxs;
+			unsigned char f21_query11_01_reserved;
+		} __packed;
+		unsigned char data[6];
+	};
+};
+
+struct synaptics_rmi4_f21_handle {
+	bool has_force;
+	unsigned char tx_assigned;
+	unsigned char rx_assigned;
+	unsigned char max_num_of_tx;
+	unsigned char max_num_of_rx;
+	unsigned char max_num_of_txrx;
+	unsigned char *force_txrx_assignment;
+	unsigned short query_base_addr;
+	unsigned short control_base_addr;
+	unsigned short data_base_addr;
+	unsigned short command_base_addr;
+};
+
+show_prototype(num_of_mapped_tx)
+show_prototype(num_of_mapped_rx)
+show_prototype(tx_mapping)
+show_prototype(rx_mapping)
+show_prototype(num_of_mapped_force_tx)
+show_prototype(num_of_mapped_force_rx)
+show_prototype(force_tx_mapping)
+show_prototype(force_rx_mapping)
+show_prototype(report_size)
+show_prototype(status)
+store_prototype(do_preparation)
+store_prototype(force_cal)
+store_prototype(get_report)
+store_prototype(resume_touch)
+store_prototype(do_afe_calibration)
+show_store_prototype(report_type)
+show_store_prototype(fifoindex)
+show_store_prototype(no_auto_cal)
+show_store_prototype(read_report)
+
+static struct attribute *attrs[] = {
+	attrify(num_of_mapped_tx),
+	attrify(num_of_mapped_rx),
+	attrify(tx_mapping),
+	attrify(rx_mapping),
+	attrify(num_of_mapped_force_tx),
+	attrify(num_of_mapped_force_rx),
+	attrify(force_tx_mapping),
+	attrify(force_rx_mapping),
+	attrify(report_size),
+	attrify(status),
+	attrify(do_preparation),
+	attrify(force_cal),
+	attrify(get_report),
+	attrify(resume_touch),
+	attrify(do_afe_calibration),
+	attrify(report_type),
+	attrify(fifoindex),
+	attrify(no_auto_cal),
+	attrify(read_report),
+	NULL,
+};
+
+static struct attribute_group attr_group = {
+	.attrs = attrs,
+};
+
+static ssize_t test_sysfs_data_read(struct file *data_file,
+		struct kobject *kobj, struct bin_attribute *attributes,
+		char *buf, loff_t pos, size_t count);
+
+static struct bin_attribute test_report_data = {
+	.attr = {
+		.name = "report_data",
+		.mode = 0444,
+	},
+	.size = 0,
+	.read = test_sysfs_data_read,
+};
+
+static struct synaptics_rmi4_f54_handle *f54;
+static struct synaptics_rmi4_f55_handle *f55;
+static struct synaptics_rmi4_f21_handle *f21;
+
+DECLARE_COMPLETION(test_remove_complete);
+
+static bool test_report_type_valid(enum f54_report_types report_type)
+{
+	switch (report_type) {
+	case F54_8BIT_IMAGE:
+	case F54_16BIT_IMAGE:
+	case F54_RAW_16BIT_IMAGE:
+	case F54_HIGH_RESISTANCE:
+	case F54_TX_TO_TX_SHORTS:
+	case F54_RX_TO_RX_SHORTS_1:
+	case F54_TRUE_BASELINE:
+	case F54_FULL_RAW_CAP_MIN_MAX:
+	case F54_RX_OPENS_1:
+	case F54_TX_OPENS:
+	case F54_TX_TO_GND_SHORTS:
+	case F54_RX_TO_RX_SHORTS_2:
+	case F54_RX_OPENS_2:
+	case F54_FULL_RAW_CAP:
+	case F54_FULL_RAW_CAP_NO_RX_COUPLING:
+	case F54_SENSOR_SPEED:
+	case F54_ADC_RANGE:
+	case F54_TRX_OPENS:
+	case F54_TRX_TO_GND_SHORTS:
+	case F54_TRX_SHORTS:
+	case F54_ABS_RAW_CAP:
+	case F54_ABS_DELTA_CAP:
+	case F54_ABS_HYBRID_DELTA_CAP:
+	case F54_ABS_HYBRID_RAW_CAP:
+	case F54_AMP_FULL_RAW_CAP:
+	case F54_AMP_RAW_ADC:
+	case F54_FULL_RAW_CAP_TDDI:
+		return true;
+		break;
+	default:
+		f54->report_type = INVALID_REPORT_TYPE;
+		f54->report_size = 0;
+		return false;
+	}
+}
+
+static void test_set_report_size(void)
+{
+	int retval;
+	unsigned char tx = f54->tx_assigned;
+	unsigned char rx = f54->rx_assigned;
+	struct synaptics_rmi4_data *rmi4_data = f54->rmi4_data;
+
+	switch (f54->report_type) {
+	case F54_8BIT_IMAGE:
+		f54->report_size = tx * rx;
+		break;
+	case F54_16BIT_IMAGE:
+	case F54_RAW_16BIT_IMAGE:
+	case F54_TRUE_BASELINE:
+	case F54_FULL_RAW_CAP:
+	case F54_FULL_RAW_CAP_NO_RX_COUPLING:
+	case F54_SENSOR_SPEED:
+	case F54_AMP_FULL_RAW_CAP:
+	case F54_AMP_RAW_ADC:
+	case F54_FULL_RAW_CAP_TDDI:
+		f54->report_size = 2 * tx * rx;
+		break;
+	case F54_HIGH_RESISTANCE:
+		f54->report_size = HIGH_RESISTANCE_DATA_SIZE;
+		break;
+	case F54_TX_TO_TX_SHORTS:
+	case F54_TX_OPENS:
+	case F54_TX_TO_GND_SHORTS:
+		f54->report_size = (tx + 7) / 8;
+		break;
+	case F54_RX_TO_RX_SHORTS_1:
+	case F54_RX_OPENS_1:
+		if (rx < tx)
+			f54->report_size = 2 * rx * rx;
+		else
+			f54->report_size = 2 * tx * rx;
+		break;
+	case F54_FULL_RAW_CAP_MIN_MAX:
+		f54->report_size = FULL_RAW_CAP_MIN_MAX_DATA_SIZE;
+		break;
+	case F54_RX_TO_RX_SHORTS_2:
+	case F54_RX_OPENS_2:
+		if (rx <= tx)
+			f54->report_size = 0;
+		else
+			f54->report_size = 2 * rx * (rx - tx);
+		break;
+	case F54_ADC_RANGE:
+		if (f54->query.has_signal_clarity) {
+			retval = synaptics_rmi4_reg_read(rmi4_data,
+					f54->control.reg_41->address,
+					f54->control.reg_41->data,
+					sizeof(f54->control.reg_41->data));
+			if (retval < 0) {
+				dev_dbg(rmi4_data->pdev->dev.parent,
+						"%s: Failed to read control reg_41\n",
+						__func__);
+				f54->report_size = 0;
+				break;
+			}
+			if (!f54->control.reg_41->no_signal_clarity) {
+				if (tx % 4)
+					tx += 4 - (tx % 4);
+			}
+		}
+		f54->report_size = 2 * tx * rx;
+		break;
+	case F54_TRX_OPENS:
+	case F54_TRX_TO_GND_SHORTS:
+	case F54_TRX_SHORTS:
+		f54->report_size = TRX_OPEN_SHORT_DATA_SIZE;
+		break;
+	case F54_ABS_RAW_CAP:
+	case F54_ABS_DELTA_CAP:
+	case F54_ABS_HYBRID_DELTA_CAP:
+	case F54_ABS_HYBRID_RAW_CAP:
+		tx += f21->tx_assigned;
+		rx += f21->rx_assigned;
+		f54->report_size = 4 * (tx + rx);
+		break;
+	default:
+		f54->report_size = 0;
+	}
+
+	return;
+}
+
+static int test_set_interrupt(bool set)
+{
+	int retval;
+	unsigned char ii;
+	unsigned char zero = 0x00;
+	unsigned char *intr_mask;
+	unsigned short f01_ctrl_reg;
+	struct synaptics_rmi4_data *rmi4_data = f54->rmi4_data;
+
+	intr_mask = rmi4_data->intr_mask;
+	f01_ctrl_reg = rmi4_data->f01_ctrl_base_addr + 1 + f54->intr_reg_num;
+
+	if (!set) {
+		retval = synaptics_rmi4_reg_write(rmi4_data,
+				f01_ctrl_reg,
+				&zero,
+				sizeof(zero));
+		if (retval < 0)
+			return retval;
+	}
+
+	for (ii = 0; ii < rmi4_data->num_of_intr_regs; ii++) {
+		if (intr_mask[ii] != 0x00) {
+			f01_ctrl_reg = rmi4_data->f01_ctrl_base_addr + 1 + ii;
+			if (set) {
+				retval = synaptics_rmi4_reg_write(rmi4_data,
+						f01_ctrl_reg,
+						&zero,
+						sizeof(zero));
+				if (retval < 0)
+					return retval;
+			} else {
+				retval = synaptics_rmi4_reg_write(rmi4_data,
+						f01_ctrl_reg,
+						&(intr_mask[ii]),
+						sizeof(intr_mask[ii]));
+				if (retval < 0)
+					return retval;
+			}
+		}
+	}
+
+	f01_ctrl_reg = rmi4_data->f01_ctrl_base_addr + 1 + f54->intr_reg_num;
+
+	if (set) {
+		retval = synaptics_rmi4_reg_write(rmi4_data,
+				f01_ctrl_reg,
+				&f54->intr_mask,
+				1);
+		if (retval < 0)
+			return retval;
+	}
+
+	return 0;
+}
+
+static int test_wait_for_command_completion(void)
+{
+	int retval;
+	unsigned char value;
+	unsigned char timeout_count;
+	struct synaptics_rmi4_data *rmi4_data = f54->rmi4_data;
+
+	timeout_count = 0;
+	do {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f54->command_base_addr,
+				&value,
+				sizeof(value));
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to read command register\n",
+					__func__);
+			return retval;
+		}
+
+		if (value == 0x00)
+			break;
+
+		msleep(100);
+		timeout_count++;
+	} while (timeout_count < COMMAND_TIMEOUT_100MS);
+
+	if (timeout_count == COMMAND_TIMEOUT_100MS) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Timed out waiting for command completion\n",
+				__func__);
+		return -ETIMEDOUT;
+	}
+
+	return 0;
+}
+
+static int test_do_command(unsigned char command)
+{
+	int retval;
+	struct synaptics_rmi4_data *rmi4_data = f54->rmi4_data;
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			f54->command_base_addr,
+			&command,
+			sizeof(command));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to write command\n",
+				__func__);
+		return retval;
+	}
+
+	retval = test_wait_for_command_completion();
+	if (retval < 0)
+		return retval;
+
+	return 0;
+}
+
+static int test_do_preparation(void)
+{
+	int retval;
+	unsigned char value;
+	unsigned char zero = 0x00;
+	unsigned char device_ctrl;
+	struct synaptics_rmi4_data *rmi4_data = f54->rmi4_data;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			rmi4_data->f01_ctrl_base_addr,
+			&device_ctrl,
+			sizeof(device_ctrl));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to set no sleep\n",
+				__func__);
+		return retval;
+	}
+
+	device_ctrl |= NO_SLEEP_ON;
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			rmi4_data->f01_ctrl_base_addr,
+			&device_ctrl,
+			sizeof(device_ctrl));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to set no sleep\n",
+				__func__);
+		return retval;
+	}
+
+	if (f54->skip_preparation)
+		return 0;
+
+	switch (f54->report_type) {
+	case F54_16BIT_IMAGE:
+	case F54_RAW_16BIT_IMAGE:
+	case F54_SENSOR_SPEED:
+	case F54_ADC_RANGE:
+	case F54_ABS_RAW_CAP:
+	case F54_ABS_DELTA_CAP:
+	case F54_ABS_HYBRID_DELTA_CAP:
+	case F54_ABS_HYBRID_RAW_CAP:
+	case F54_FULL_RAW_CAP_TDDI:
+		break;
+	case F54_AMP_RAW_ADC:
+		if (f54->query_49.has_ctrl188) {
+			retval = synaptics_rmi4_reg_read(rmi4_data,
+					f54->control.reg_188->address,
+					f54->control.reg_188->data,
+					sizeof(f54->control.reg_188->data));
+			if (retval < 0) {
+				dev_err(rmi4_data->pdev->dev.parent,
+						"%s: Failed to set start production test\n",
+						__func__);
+				return retval;
+			}
+			f54->control.reg_188->start_production_test = 1;
+			retval = synaptics_rmi4_reg_write(rmi4_data,
+					f54->control.reg_188->address,
+					f54->control.reg_188->data,
+					sizeof(f54->control.reg_188->data));
+			if (retval < 0) {
+				dev_err(rmi4_data->pdev->dev.parent,
+						"%s: Failed to set start production test\n",
+						__func__);
+				return retval;
+			}
+		}
+		break;
+	default:
+		if (f54->query.touch_controller_family == 1)
+			disable_cbc(reg_7);
+		else if (f54->query.has_ctrl88)
+			disable_cbc(reg_88);
+
+		if (f54->query.has_0d_acquisition_control)
+			disable_cbc(reg_57);
+
+		if ((f54->query.has_query15) &&
+				(f54->query_15.has_query25) &&
+				(f54->query_25.has_query27) &&
+				(f54->query_27.has_query29) &&
+				(f54->query_29.has_query30) &&
+				(f54->query_30.has_query32) &&
+				(f54->query_32.has_query33) &&
+				(f54->query_33.has_query36) &&
+				(f54->query_36.has_query38) &&
+				(f54->query_38.has_ctrl149)) {
+			retval = synaptics_rmi4_reg_write(rmi4_data,
+					f54->control.reg_149->address,
+					&zero,
+					sizeof(f54->control.reg_149->data));
+			if (retval < 0) {
+				dev_err(rmi4_data->pdev->dev.parent,
+						"%s: Failed to disable global CBC\n",
+						__func__);
+				return retval;
+			}
+		}
+
+		if (f54->query.has_signal_clarity) {
+			retval = synaptics_rmi4_reg_read(rmi4_data,
+					f54->control.reg_41->address,
+					&value,
+					sizeof(f54->control.reg_41->data));
+			if (retval < 0) {
+				dev_err(rmi4_data->pdev->dev.parent,
+						"%s: Failed to disable signal clarity\n",
+						__func__);
+				return retval;
+			}
+			value |= 0x01;
+			retval = synaptics_rmi4_reg_write(rmi4_data,
+					f54->control.reg_41->address,
+					&value,
+					sizeof(f54->control.reg_41->data));
+			if (retval < 0) {
+				dev_err(rmi4_data->pdev->dev.parent,
+						"%s: Failed to disable signal clarity\n",
+						__func__);
+				return retval;
+			}
+		}
+
+		retval = test_do_command(COMMAND_FORCE_UPDATE);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to do force update\n",
+					__func__);
+			return retval;
+		}
+
+		retval = test_do_command(COMMAND_FORCE_CAL);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to do force cal\n",
+					__func__);
+			return retval;
+		}
+	}
+
+	return 0;
+}
+
+static int test_do_afe_calibration(enum f54_afe_cal mode)
+{
+	int retval;
+	unsigned char timeout = CALIBRATION_TIMEOUT_S;
+	unsigned char timeout_count = 0;
+	struct synaptics_rmi4_data *rmi4_data = f54->rmi4_data;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			f54->control.reg_188->address,
+			f54->control.reg_188->data,
+			sizeof(f54->control.reg_188->data));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to start calibration\n",
+				__func__);
+		return retval;
+	}
+
+	if (mode == F54_AFE_CAL)
+		f54->control.reg_188->start_calibration = 1;
+	else if (mode == F54_AFE_IS_CAL)
+		f54->control.reg_188->start_is_calibration = 1;
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			f54->control.reg_188->address,
+			f54->control.reg_188->data,
+			sizeof(f54->control.reg_188->data));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to start calibration\n",
+				__func__);
+		return retval;
+	}
+
+	do {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f54->control.reg_188->address,
+				f54->control.reg_188->data,
+				sizeof(f54->control.reg_188->data));
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to complete calibration\n",
+					__func__);
+			return retval;
+		}
+
+		if (mode == F54_AFE_CAL) {
+			if (!f54->control.reg_188->start_calibration)
+				break;
+		} else if (mode == F54_AFE_IS_CAL) {
+			if (!f54->control.reg_188->start_is_calibration)
+				break;
+		}
+
+		if (timeout_count == timeout) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Timed out waiting for calibration completion\n",
+					__func__);
+			return -EBUSY;
+		}
+
+		timeout_count++;
+		msleep(1000);
+	} while (true);
+
+	/* check CRC */
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			f54->data_31.address,
+			f54->data_31.data,
+			sizeof(f54->data_31.data));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read calibration CRC\n",
+				__func__);
+		return retval;
+	}
+
+	if (mode == F54_AFE_CAL) {
+		if (f54->data_31.calibration_crc == 0)
+			return 0;
+	} else if (mode == F54_AFE_IS_CAL) {
+		if (f54->data_31.is_calibration_crc == 0)
+			return 0;
+	}
+
+	dev_err(rmi4_data->pdev->dev.parent,
+			"%s: Failed to read calibration CRC\n",
+			__func__);
+
+	return -EINVAL;
+}
+
+static int test_check_for_idle_status(void)
+{
+	int retval;
+	struct synaptics_rmi4_data *rmi4_data = f54->rmi4_data;
+
+	switch (f54->status) {
+	case STATUS_IDLE:
+		retval = 0;
+		break;
+	case STATUS_BUSY:
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Status busy\n",
+				__func__);
+		retval = -EINVAL;
+		break;
+	case STATUS_ERROR:
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Status error\n",
+				__func__);
+		retval = -EINVAL;
+		break;
+	default:
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Invalid status (%d)\n",
+				__func__, f54->status);
+		retval = -EINVAL;
+	}
+
+	return retval;
+}
+
+static void test_timeout_work(struct work_struct *work)
+{
+	int retval;
+	unsigned char command;
+	struct synaptics_rmi4_data *rmi4_data = f54->rmi4_data;
+
+	mutex_lock(&f54->status_mutex);
+
+	if (f54->status == STATUS_BUSY) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f54->command_base_addr,
+				&command,
+				sizeof(command));
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to read command register\n",
+					__func__);
+		} else if (command & COMMAND_GET_REPORT) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Report type not supported by FW\n",
+					__func__);
+		} else {
+			queue_work(f54->test_report_workqueue,
+					&f54->test_report_work);
+			goto exit;
+		}
+		f54->status = STATUS_ERROR;
+		f54->report_size = 0;
+	}
+
+exit:
+	mutex_unlock(&f54->status_mutex);
+
+	return;
+}
+
+static enum hrtimer_restart test_get_report_timeout(struct hrtimer *timer)
+{
+	schedule_work(&(f54->timeout_work));
+
+	return HRTIMER_NORESTART;
+}
+
+static ssize_t test_sysfs_num_of_mapped_tx_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%u\n", f54->tx_assigned);
+}
+
+static ssize_t test_sysfs_num_of_mapped_rx_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%u\n", f54->rx_assigned);
+}
+
+static ssize_t test_sysfs_tx_mapping_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	int cnt;
+	int count = 0;
+	unsigned char ii;
+	unsigned char tx_num;
+	unsigned char tx_electrodes;
+
+	if (!f55)
+		return -EINVAL;
+
+	tx_electrodes = f55->query.num_of_tx_electrodes;
+
+	for (ii = 0; ii < tx_electrodes; ii++) {
+		tx_num = f55->tx_assignment[ii];
+		if (tx_num == 0xff)
+			cnt = snprintf(buf, PAGE_SIZE - count, "xx ");
+		else
+			cnt = snprintf(buf, PAGE_SIZE - count, "%02u ", tx_num);
+		buf += cnt;
+		count += cnt;
+	}
+
+	snprintf(buf, PAGE_SIZE - count, "\n");
+	count++;
+
+	return count;
+}
+
+static ssize_t test_sysfs_rx_mapping_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	int cnt;
+	int count = 0;
+	unsigned char ii;
+	unsigned char rx_num;
+	unsigned char rx_electrodes;
+
+	if (!f55)
+		return -EINVAL;
+
+	rx_electrodes = f55->query.num_of_rx_electrodes;
+
+	for (ii = 0; ii < rx_electrodes; ii++) {
+		rx_num = f55->rx_assignment[ii];
+		if (rx_num == 0xff)
+			cnt = snprintf(buf, PAGE_SIZE - count, "xx ");
+		else
+			cnt = snprintf(buf, PAGE_SIZE - count, "%02u ", rx_num);
+		buf += cnt;
+		count += cnt;
+	}
+
+	snprintf(buf, PAGE_SIZE - count, "\n");
+	count++;
+
+	return count;
+}
+
+static ssize_t test_sysfs_num_of_mapped_force_tx_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%u\n", f21->tx_assigned);
+}
+
+static ssize_t test_sysfs_num_of_mapped_force_rx_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%u\n", f21->rx_assigned);
+}
+
+static ssize_t test_sysfs_force_tx_mapping_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	int cnt;
+	int count = 0;
+	unsigned char ii;
+	unsigned char tx_num;
+	unsigned char tx_electrodes;
+
+	if ((!f55 || !f55->has_force) && (!f21 || !f21->has_force))
+		return -EINVAL;
+
+	if (f55->has_force) {
+		tx_electrodes = f55->query.num_of_tx_electrodes;
+
+		for (ii = 0; ii < tx_electrodes; ii++) {
+			tx_num = f55->force_tx_assignment[ii];
+			if (tx_num == 0xff) {
+				cnt = snprintf(buf, PAGE_SIZE - count, "xx ");
+			} else {
+				cnt = snprintf(buf, PAGE_SIZE - count, "%02u ",
+						tx_num);
+			}
+			buf += cnt;
+			count += cnt;
+		}
+	} else if (f21->has_force) {
+		tx_electrodes = f21->max_num_of_tx;
+
+		for (ii = 0; ii < tx_electrodes; ii++) {
+			tx_num = f21->force_txrx_assignment[ii];
+			if (tx_num == 0xff) {
+				cnt = snprintf(buf, PAGE_SIZE - count, "xx ");
+			} else {
+				cnt = snprintf(buf, PAGE_SIZE - count, "%02u ",
+						tx_num);
+			}
+			buf += cnt;
+			count += cnt;
+		}
+	}
+
+	snprintf(buf, PAGE_SIZE - count, "\n");
+	count++;
+
+	return count;
+}
+
+static ssize_t test_sysfs_force_rx_mapping_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	int cnt;
+	int count = 0;
+	unsigned char ii;
+	unsigned char offset;
+	unsigned char rx_num;
+	unsigned char rx_electrodes;
+
+	if ((!f55 || !f55->has_force) && (!f21 || !f21->has_force))
+		return -EINVAL;
+
+	if (f55->has_force) {
+		rx_electrodes = f55->query.num_of_rx_electrodes;
+
+		for (ii = 0; ii < rx_electrodes; ii++) {
+			rx_num = f55->force_rx_assignment[ii];
+			if (rx_num == 0xff)
+				cnt = snprintf(buf, PAGE_SIZE - count, "xx ");
+			else
+				cnt = snprintf(buf, PAGE_SIZE - count, "%02u ",
+						rx_num);
+			buf += cnt;
+			count += cnt;
+		}
+	} else if (f21->has_force) {
+		offset = f21->max_num_of_tx;
+		rx_electrodes = f21->max_num_of_rx;
+
+		for (ii = offset; ii < (rx_electrodes + offset); ii++) {
+			rx_num = f21->force_txrx_assignment[ii];
+			if (rx_num == 0xff)
+				cnt = snprintf(buf, PAGE_SIZE - count, "xx ");
+			else
+				cnt = snprintf(buf, PAGE_SIZE - count, "%02u ",
+						rx_num);
+			buf += cnt;
+			count += cnt;
+		}
+	}
+
+	snprintf(buf, PAGE_SIZE - count, "\n");
+	count++;
+
+	return count;
+}
+
+static ssize_t test_sysfs_report_size_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%u\n", f54->report_size);
+}
+
+static ssize_t test_sysfs_status_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	int retval;
+
+	mutex_lock(&f54->status_mutex);
+
+	retval = snprintf(buf, PAGE_SIZE, "%u\n", f54->status);
+
+	mutex_unlock(&f54->status_mutex);
+
+	return retval;
+}
+
+static ssize_t test_sysfs_do_preparation_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int retval;
+	unsigned long setting;
+	struct synaptics_rmi4_data *rmi4_data = f54->rmi4_data;
+
+	retval = sstrtoul(buf, 10, &setting);
+	if (retval)
+		return retval;
+
+	if (setting != 1)
+		return -EINVAL;
+
+	mutex_lock(&f54->status_mutex);
+
+	retval = test_check_for_idle_status();
+	if (retval < 0)
+		goto exit;
+
+	retval = test_do_preparation();
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to do preparation\n",
+				__func__);
+		goto exit;
+	}
+
+	retval = count;
+
+exit:
+	mutex_unlock(&f54->status_mutex);
+
+	return retval;
+}
+
+static ssize_t test_sysfs_force_cal_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int retval;
+	unsigned long setting;
+	struct synaptics_rmi4_data *rmi4_data = f54->rmi4_data;
+
+	retval = sstrtoul(buf, 10, &setting);
+	if (retval)
+		return retval;
+
+	if (setting != 1)
+		return -EINVAL;
+
+	mutex_lock(&f54->status_mutex);
+
+	retval = test_check_for_idle_status();
+	if (retval < 0)
+		goto exit;
+
+	retval = test_do_command(COMMAND_FORCE_CAL);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to do force cal\n",
+				__func__);
+		goto exit;
+	}
+
+	retval = count;
+
+exit:
+	mutex_unlock(&f54->status_mutex);
+
+	return retval;
+}
+
+static ssize_t test_sysfs_get_report_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int retval;
+	unsigned char command;
+	unsigned long setting;
+	struct synaptics_rmi4_data *rmi4_data = f54->rmi4_data;
+
+	retval = sstrtoul(buf, 10, &setting);
+	if (retval)
+		return retval;
+
+	if (setting != 1)
+		return -EINVAL;
+
+	mutex_lock(&f54->status_mutex);
+
+	retval = test_check_for_idle_status();
+	if (retval < 0)
+		goto exit;
+
+	if (!test_report_type_valid(f54->report_type)) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Invalid report type\n",
+				__func__);
+		retval = -EINVAL;
+		goto exit;
+	}
+
+	test_set_interrupt(true);
+
+	command = (unsigned char)COMMAND_GET_REPORT;
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			f54->command_base_addr,
+			&command,
+			sizeof(command));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to write get report command\n",
+				__func__);
+		goto exit;
+	}
+
+	f54->status = STATUS_BUSY;
+	f54->report_size = 0;
+	f54->data_pos = 0;
+
+	hrtimer_start(&f54->watchdog,
+			ktime_set(GET_REPORT_TIMEOUT_S, 0),
+			HRTIMER_MODE_REL);
+
+	retval = count;
+
+exit:
+	mutex_unlock(&f54->status_mutex);
+
+	return retval;
+}
+
+static ssize_t test_sysfs_resume_touch_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int retval;
+	unsigned char device_ctrl;
+	unsigned long setting;
+	struct synaptics_rmi4_data *rmi4_data = f54->rmi4_data;
+
+	retval = sstrtoul(buf, 10, &setting);
+	if (retval)
+		return retval;
+
+	if (setting != 1)
+		return -EINVAL;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			rmi4_data->f01_ctrl_base_addr,
+			&device_ctrl,
+			sizeof(device_ctrl));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to restore no sleep setting\n",
+				__func__);
+		return retval;
+	}
+
+	device_ctrl = device_ctrl & ~NO_SLEEP_ON;
+	device_ctrl |= rmi4_data->no_sleep_setting;
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			rmi4_data->f01_ctrl_base_addr,
+			&device_ctrl,
+			sizeof(device_ctrl));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to restore no sleep setting\n",
+				__func__);
+		return retval;
+	}
+
+	test_set_interrupt(false);
+
+	if (f54->skip_preparation)
+		return count;
+
+	switch (f54->report_type) {
+	case F54_16BIT_IMAGE:
+	case F54_RAW_16BIT_IMAGE:
+	case F54_SENSOR_SPEED:
+	case F54_ADC_RANGE:
+	case F54_ABS_RAW_CAP:
+	case F54_ABS_DELTA_CAP:
+	case F54_ABS_HYBRID_DELTA_CAP:
+	case F54_ABS_HYBRID_RAW_CAP:
+	case F54_FULL_RAW_CAP_TDDI:
+		break;
+	case F54_AMP_RAW_ADC:
+		if (f54->query_49.has_ctrl188) {
+			retval = synaptics_rmi4_reg_read(rmi4_data,
+					f54->control.reg_188->address,
+					f54->control.reg_188->data,
+					sizeof(f54->control.reg_188->data));
+			if (retval < 0) {
+				dev_err(rmi4_data->pdev->dev.parent,
+						"%s: Failed to set start production test\n",
+						__func__);
+				return retval;
+			}
+			f54->control.reg_188->start_production_test = 0;
+			retval = synaptics_rmi4_reg_write(rmi4_data,
+					f54->control.reg_188->address,
+					f54->control.reg_188->data,
+					sizeof(f54->control.reg_188->data));
+			if (retval < 0) {
+				dev_err(rmi4_data->pdev->dev.parent,
+						"%s: Failed to set start production test\n",
+						__func__);
+				return retval;
+			}
+		}
+		break;
+	default:
+		rmi4_data->reset_device(rmi4_data, false);
+	}
+
+	return count;
+}
+
+static ssize_t test_sysfs_do_afe_calibration_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int retval;
+	unsigned long setting;
+	struct synaptics_rmi4_data *rmi4_data = f54->rmi4_data;
+
+	retval = sstrtoul(buf, 10, &setting);
+	if (retval)
+		return retval;
+
+	if (!f54->query_49.has_ctrl188) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: F54_ANALOG_Ctrl188 not found\n",
+				__func__);
+		return -EINVAL;
+	}
+
+	if (setting == 0 || setting == 1)
+		retval = test_do_afe_calibration((enum f54_afe_cal)setting);
+	else
+		return -EINVAL;
+
+	if (retval)
+		return retval;
+	else
+		return count;
+}
+
+static ssize_t test_sysfs_report_type_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%u\n", f54->report_type);
+}
+
+static ssize_t test_sysfs_report_type_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int retval;
+	unsigned char data;
+	unsigned long setting;
+	struct synaptics_rmi4_data *rmi4_data = f54->rmi4_data;
+
+	retval = sstrtoul(buf, 10, &setting);
+	if (retval)
+		return retval;
+
+	mutex_lock(&f54->status_mutex);
+
+	retval = test_check_for_idle_status();
+	if (retval < 0)
+		goto exit;
+
+	if (!test_report_type_valid((enum f54_report_types)setting)) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Report type not supported by driver\n",
+				__func__);
+		retval = -EINVAL;
+		goto exit;
+	}
+
+	f54->report_type = (enum f54_report_types)setting;
+	data = (unsigned char)setting;
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			f54->data_base_addr,
+			&data,
+			sizeof(data));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to write report type\n",
+				__func__);
+		goto exit;
+	}
+
+	retval = count;
+
+exit:
+	mutex_unlock(&f54->status_mutex);
+
+	return retval;
+}
+
+static ssize_t test_sysfs_fifoindex_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	int retval;
+	unsigned char data[2];
+	struct synaptics_rmi4_data *rmi4_data = f54->rmi4_data;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			f54->data_base_addr + REPORT_INDEX_OFFSET,
+			data,
+			sizeof(data));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read report index\n",
+				__func__);
+		return retval;
+	}
+
+	batohs(&f54->fifoindex, data);
+
+	return snprintf(buf, PAGE_SIZE, "%u\n", f54->fifoindex);
+}
+
+static ssize_t test_sysfs_fifoindex_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int retval;
+	unsigned char data[2];
+	unsigned long setting;
+	struct synaptics_rmi4_data *rmi4_data = f54->rmi4_data;
+
+	retval = sstrtoul(buf, 10, &setting);
+	if (retval)
+		return retval;
+
+	f54->fifoindex = setting;
+
+	hstoba(data, (unsigned short)setting);
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			f54->data_base_addr + REPORT_INDEX_OFFSET,
+			data,
+			sizeof(data));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to write report index\n",
+				__func__);
+		return retval;
+	}
+
+	return count;
+}
+
+static ssize_t test_sysfs_no_auto_cal_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%u\n", f54->no_auto_cal);
+}
+
+static ssize_t test_sysfs_no_auto_cal_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int retval;
+	unsigned char data;
+	unsigned long setting;
+	struct synaptics_rmi4_data *rmi4_data = f54->rmi4_data;
+
+	retval = sstrtoul(buf, 10, &setting);
+	if (retval)
+		return retval;
+
+	if (setting > 1)
+		return -EINVAL;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			f54->control_base_addr,
+			&data,
+			sizeof(data));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read no auto cal setting\n",
+				__func__);
+		return retval;
+	}
+
+	if (setting)
+		data |= CONTROL_NO_AUTO_CAL;
+	else
+		data &= ~CONTROL_NO_AUTO_CAL;
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			f54->control_base_addr,
+			&data,
+			sizeof(data));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to write no auto cal setting\n",
+				__func__);
+		return retval;
+	}
+
+	f54->no_auto_cal = (setting == 1);
+
+	return count;
+}
+
+static ssize_t test_sysfs_read_report_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	unsigned int ii;
+	unsigned int jj;
+	int cnt;
+	int count = 0;
+	int tx_num = f54->tx_assigned;
+	int rx_num = f54->rx_assigned;
+	char *report_data_8;
+	short *report_data_16;
+	int *report_data_32;
+	unsigned short *report_data_u16;
+	unsigned int *report_data_u32;
+
+	switch (f54->report_type) {
+	case F54_8BIT_IMAGE:
+		report_data_8 = (char *)f54->report_data;
+		for (ii = 0; ii < f54->report_size; ii++) {
+			cnt = snprintf(buf, PAGE_SIZE - count, "%03d: %d\n",
+					ii, *report_data_8);
+			report_data_8++;
+			buf += cnt;
+			count += cnt;
+		}
+		break;
+	case F54_AMP_RAW_ADC:
+		report_data_u16 = (unsigned short *)f54->report_data;
+		cnt = snprintf(buf, PAGE_SIZE - count, "tx = %d\nrx = %d\n",
+				tx_num, rx_num);
+		buf += cnt;
+		count += cnt;
+
+		for (ii = 0; ii < tx_num; ii++) {
+			for (jj = 0; jj < (rx_num - 1); jj++) {
+				cnt = snprintf(buf, PAGE_SIZE - count, "%-4d ",
+						*report_data_u16);
+				report_data_u16++;
+				buf += cnt;
+				count += cnt;
+			}
+			cnt = snprintf(buf, PAGE_SIZE - count, "%-4d\n",
+					*report_data_u16);
+			report_data_u16++;
+			buf += cnt;
+			count += cnt;
+		}
+		break;
+	case F54_16BIT_IMAGE:
+	case F54_RAW_16BIT_IMAGE:
+	case F54_TRUE_BASELINE:
+	case F54_FULL_RAW_CAP:
+	case F54_FULL_RAW_CAP_NO_RX_COUPLING:
+	case F54_SENSOR_SPEED:
+	case F54_AMP_FULL_RAW_CAP:
+	case F54_FULL_RAW_CAP_TDDI:
+		report_data_16 = (short *)f54->report_data;
+		cnt = snprintf(buf, PAGE_SIZE - count, "tx = %d\nrx = %d\n",
+				tx_num, rx_num);
+		buf += cnt;
+		count += cnt;
+
+		for (ii = 0; ii < tx_num; ii++) {
+			for (jj = 0; jj < (rx_num - 1); jj++) {
+				cnt = snprintf(buf, PAGE_SIZE - count, "%-4d ",
+						*report_data_16);
+				report_data_16++;
+				buf += cnt;
+				count += cnt;
+			}
+			cnt = snprintf(buf, PAGE_SIZE - count, "%-4d\n",
+					*report_data_16);
+			report_data_16++;
+			buf += cnt;
+			count += cnt;
+		}
+		break;
+	case F54_HIGH_RESISTANCE:
+	case F54_FULL_RAW_CAP_MIN_MAX:
+		report_data_16 = (short *)f54->report_data;
+		for (ii = 0; ii < f54->report_size; ii += 2) {
+			cnt = snprintf(buf, PAGE_SIZE - count, "%03d: %d\n",
+					ii / 2, *report_data_16);
+			report_data_16++;
+			buf += cnt;
+			count += cnt;
+		}
+		break;
+	case F54_ABS_RAW_CAP:
+	case F54_ABS_HYBRID_RAW_CAP:
+		tx_num += f21->tx_assigned;
+		rx_num += f21->rx_assigned;
+		report_data_u32 = (unsigned int *)f54->report_data;
+		cnt = snprintf(buf, PAGE_SIZE - count, "rx ");
+		buf += cnt;
+		count += cnt;
+		for (ii = 0; ii < rx_num; ii++) {
+			cnt = snprintf(buf, PAGE_SIZE - count, "     %2d", ii);
+			buf += cnt;
+			count += cnt;
+		}
+		cnt = snprintf(buf, PAGE_SIZE - count, "\n");
+		buf += cnt;
+		count += cnt;
+
+		cnt = snprintf(buf, PAGE_SIZE - count, "   ");
+		buf += cnt;
+		count += cnt;
+		for (ii = 0; ii < rx_num; ii++) {
+			cnt = snprintf(buf, PAGE_SIZE - count, "  %5u",
+					*report_data_u32);
+			report_data_u32++;
+			buf += cnt;
+			count += cnt;
+		}
+		cnt = snprintf(buf, PAGE_SIZE - count, "\n");
+		buf += cnt;
+		count += cnt;
+
+		cnt = snprintf(buf, PAGE_SIZE - count, "tx ");
+		buf += cnt;
+		count += cnt;
+		for (ii = 0; ii < tx_num; ii++) {
+			cnt = snprintf(buf, PAGE_SIZE - count, "     %2d", ii);
+			buf += cnt;
+			count += cnt;
+		}
+		cnt = snprintf(buf, PAGE_SIZE - count, "\n");
+		buf += cnt;
+		count += cnt;
+
+		cnt = snprintf(buf, PAGE_SIZE - count, "   ");
+		buf += cnt;
+		count += cnt;
+		for (ii = 0; ii < tx_num; ii++) {
+			cnt = snprintf(buf, PAGE_SIZE - count, "  %5u",
+					*report_data_u32);
+			report_data_u32++;
+			buf += cnt;
+			count += cnt;
+		}
+		cnt = snprintf(buf, PAGE_SIZE - count, "\n");
+		buf += cnt;
+		count += cnt;
+		break;
+	case F54_ABS_DELTA_CAP:
+	case F54_ABS_HYBRID_DELTA_CAP:
+		tx_num += f21->tx_assigned;
+		rx_num += f21->rx_assigned;
+		report_data_32 = (int *)f54->report_data;
+		cnt = snprintf(buf, PAGE_SIZE - count, "rx ");
+		buf += cnt;
+		count += cnt;
+		for (ii = 0; ii < rx_num; ii++) {
+			cnt = snprintf(buf, PAGE_SIZE - count, "     %2d", ii);
+			buf += cnt;
+			count += cnt;
+		}
+		cnt = snprintf(buf, PAGE_SIZE - count, "\n");
+		buf += cnt;
+		count += cnt;
+
+		cnt = snprintf(buf, PAGE_SIZE - count, "   ");
+		buf += cnt;
+		count += cnt;
+		for (ii = 0; ii < rx_num; ii++) {
+			cnt = snprintf(buf, PAGE_SIZE - count, "  %5d",
+					*report_data_32);
+			report_data_32++;
+			buf += cnt;
+			count += cnt;
+		}
+		cnt = snprintf(buf, PAGE_SIZE - count, "\n");
+		buf += cnt;
+		count += cnt;
+
+		cnt = snprintf(buf, PAGE_SIZE - count, "tx ");
+		buf += cnt;
+		count += cnt;
+		for (ii = 0; ii < tx_num; ii++) {
+			cnt = snprintf(buf, PAGE_SIZE - count, "     %2d", ii);
+			buf += cnt;
+			count += cnt;
+		}
+		cnt = snprintf(buf, PAGE_SIZE - count, "\n");
+		buf += cnt;
+		count += cnt;
+
+		cnt = snprintf(buf, PAGE_SIZE - count, "   ");
+		buf += cnt;
+		count += cnt;
+		for (ii = 0; ii < tx_num; ii++) {
+			cnt = snprintf(buf, PAGE_SIZE - count, "  %5d",
+					*report_data_32);
+			report_data_32++;
+			buf += cnt;
+			count += cnt;
+		}
+		cnt = snprintf(buf, PAGE_SIZE - count, "\n");
+		buf += cnt;
+		count += cnt;
+		break;
+	default:
+		for (ii = 0; ii < f54->report_size; ii++) {
+			cnt = snprintf(buf, PAGE_SIZE - count, "%03d: 0x%02x\n",
+					ii, f54->report_data[ii]);
+			buf += cnt;
+			count += cnt;
+		}
+	}
+
+	snprintf(buf, PAGE_SIZE - count, "\n");
+	count++;
+
+	return count;
+}
+
+static ssize_t test_sysfs_read_report_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int retval;
+	unsigned char timeout = GET_REPORT_TIMEOUT_S * 10;
+	unsigned char timeout_count;
+	const char cmd[] = {'1', 0};
+	struct synaptics_rmi4_data *rmi4_data = f54->rmi4_data;
+
+	retval = test_sysfs_report_type_store(dev, attr, buf, count);
+	if (retval < 0)
+		goto exit;
+
+	retval = test_sysfs_do_preparation_store(dev, attr, cmd, 1);
+	if (retval < 0)
+		goto exit;
+
+	retval = test_sysfs_get_report_store(dev, attr, cmd, 1);
+	if (retval < 0)
+		goto exit;
+
+	timeout_count = 0;
+	do {
+		if (f54->status != STATUS_BUSY)
+			break;
+		msleep(100);
+		timeout_count++;
+	} while (timeout_count < timeout);
+
+	if ((f54->status != STATUS_IDLE) || (f54->report_size == 0)) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read report\n",
+				__func__);
+		retval = -EINVAL;
+		goto exit;
+	}
+
+	retval = test_sysfs_resume_touch_store(dev, attr, cmd, 1);
+	if (retval < 0)
+		goto exit;
+
+	return count;
+
+exit:
+	rmi4_data->reset_device(rmi4_data, false);
+
+	return retval;
+}
+
+static ssize_t test_sysfs_data_read(struct file *data_file,
+		struct kobject *kobj, struct bin_attribute *attributes,
+		char *buf, loff_t pos, size_t count)
+{
+	int retval;
+	unsigned int read_size;
+	struct synaptics_rmi4_data *rmi4_data = f54->rmi4_data;
+
+	mutex_lock(&f54->status_mutex);
+
+	retval = test_check_for_idle_status();
+	if (retval < 0)
+		goto exit;
+
+	if (!f54->report_data) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Report type %d data not available\n",
+				__func__, f54->report_type);
+		retval = -EINVAL;
+		goto exit;
+	}
+
+	if ((f54->data_pos + count) > f54->report_size)
+		read_size = f54->report_size - f54->data_pos;
+	else
+		read_size = min_t(unsigned int, count, f54->report_size);
+
+	retval = secure_memcpy(buf, count, f54->report_data + f54->data_pos,
+			f54->data_buffer_size - f54->data_pos, read_size);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to copy report data\n",
+				__func__);
+		goto exit;
+	}
+	f54->data_pos += read_size;
+	retval = read_size;
+
+exit:
+	mutex_unlock(&f54->status_mutex);
+
+	return retval;
+}
+
+static void test_report_work(struct work_struct *work)
+{
+	int retval;
+	unsigned char report_index[2];
+	struct synaptics_rmi4_data *rmi4_data = f54->rmi4_data;
+
+	mutex_lock(&f54->status_mutex);
+
+	if (f54->status != STATUS_BUSY) {
+		retval = f54->status;
+		goto exit;
+	}
+
+	retval = test_wait_for_command_completion();
+	if (retval < 0) {
+		retval = STATUS_ERROR;
+		goto exit;
+	}
+
+	test_set_report_size();
+	if (f54->report_size == 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Report data size = 0\n",
+				__func__);
+		retval = STATUS_ERROR;
+		goto exit;
+	}
+
+	if (f54->data_buffer_size < f54->report_size) {
+		if (f54->data_buffer_size)
+			kfree(f54->report_data);
+		f54->report_data = kzalloc(f54->report_size, GFP_KERNEL);
+		if (!f54->report_data) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to alloc mem for data buffer\n",
+					__func__);
+			f54->data_buffer_size = 0;
+			retval = STATUS_ERROR;
+			goto exit;
+		}
+		f54->data_buffer_size = f54->report_size;
+	}
+
+	report_index[0] = 0;
+	report_index[1] = 0;
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			f54->data_base_addr + REPORT_INDEX_OFFSET,
+			report_index,
+			sizeof(report_index));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to write report data index\n",
+				__func__);
+		retval = STATUS_ERROR;
+		goto exit;
+	}
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			f54->data_base_addr + REPORT_DATA_OFFSET,
+			f54->report_data,
+			f54->report_size);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read report data\n",
+				__func__);
+		retval = STATUS_ERROR;
+		goto exit;
+	}
+
+	retval = STATUS_IDLE;
+
+exit:
+	mutex_unlock(&f54->status_mutex);
+
+	if (retval == STATUS_ERROR)
+		f54->report_size = 0;
+
+	f54->status = retval;
+
+	return;
+}
+
+static void test_remove_sysfs(void)
+{
+	sysfs_remove_group(f54->sysfs_dir, &attr_group);
+	sysfs_remove_bin_file(f54->sysfs_dir, &test_report_data);
+	kobject_put(f54->sysfs_dir);
+
+	return;
+}
+
+static int test_set_sysfs(void)
+{
+	int retval;
+	struct synaptics_rmi4_data *rmi4_data = f54->rmi4_data;
+
+	f54->sysfs_dir = kobject_create_and_add(SYSFS_FOLDER_NAME,
+			&rmi4_data->input_dev->dev.kobj);
+	if (!f54->sysfs_dir) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to create sysfs directory\n",
+				__func__);
+		goto exit_directory;
+	}
+
+	retval = sysfs_create_bin_file(f54->sysfs_dir, &test_report_data);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to create sysfs bin file\n",
+				__func__);
+		goto exit_bin_file;
+	}
+
+	retval = sysfs_create_group(f54->sysfs_dir, &attr_group);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to create sysfs attributes\n",
+				__func__);
+		goto exit_attributes;
+	}
+
+	return 0;
+
+exit_attributes:
+	sysfs_remove_group(f54->sysfs_dir, &attr_group);
+	sysfs_remove_bin_file(f54->sysfs_dir, &test_report_data);
+
+exit_bin_file:
+	kobject_put(f54->sysfs_dir);
+
+exit_directory:
+	return -ENODEV;
+}
+
+static void test_free_control_mem(void)
+{
+	struct f54_control control = f54->control;
+
+	kfree(control.reg_7);
+	kfree(control.reg_41);
+	kfree(control.reg_57);
+	kfree(control.reg_86);
+	kfree(control.reg_88);
+	kfree(control.reg_110);
+	kfree(control.reg_149);
+	kfree(control.reg_188);
+
+	return;
+}
+
+static void test_set_data(void)
+{
+	unsigned short reg_addr;
+
+	reg_addr = f54->data_base_addr + REPORT_DATA_OFFSET + 1;
+
+	/* data 4 */
+	if (f54->query.has_sense_frequency_control)
+		reg_addr++;
+
+	/* data 5 reserved */
+
+	/* data 6 */
+	if (f54->query.has_interference_metric)
+		reg_addr += 2;
+
+	/* data 7 */
+	if (f54->query.has_one_byte_report_rate |
+			f54->query.has_two_byte_report_rate)
+		reg_addr++;
+	if (f54->query.has_two_byte_report_rate)
+		reg_addr++;
+
+	/* data 8 */
+	if (f54->query.has_variance_metric)
+		reg_addr += 2;
+
+	/* data 9 */
+	if (f54->query.has_multi_metric_state_machine)
+		reg_addr += 2;
+
+	/* data 10 */
+	if (f54->query.has_multi_metric_state_machine |
+			f54->query.has_noise_state)
+		reg_addr++;
+
+	/* data 11 */
+	if (f54->query.has_status)
+		reg_addr++;
+
+	/* data 12 */
+	if (f54->query.has_slew_metric)
+		reg_addr += 2;
+
+	/* data 13 */
+	if (f54->query.has_multi_metric_state_machine)
+		reg_addr += 2;
+
+	/* data 14 */
+	if (f54->query_13.has_cidim)
+		reg_addr++;
+
+	/* data 15 */
+	if (f54->query_13.has_rail_im)
+		reg_addr++;
+
+	/* data 16 */
+	if (f54->query_13.has_noise_mitigation_enhancement)
+		reg_addr++;
+
+	/* data 17 */
+	if (f54->query_16.has_data17)
+		reg_addr++;
+
+	/* data 18 */
+	if (f54->query_21.has_query24_data18)
+		reg_addr++;
+
+	/* data 19 */
+	if (f54->query_21.has_data19)
+		reg_addr++;
+
+	/* data_20 */
+	if (f54->query_25.has_ctrl109)
+		reg_addr++;
+
+	/* data 21 */
+	if (f54->query_27.has_data21)
+		reg_addr++;
+
+	/* data 22 */
+	if (f54->query_27.has_data22)
+		reg_addr++;
+
+	/* data 23 */
+	if (f54->query_29.has_data23)
+		reg_addr++;
+
+	/* data 24 */
+	if (f54->query_32.has_data24)
+		reg_addr++;
+
+	/* data 25 */
+	if (f54->query_35.has_data25)
+		reg_addr++;
+
+	/* data 26 */
+	if (f54->query_35.has_data26)
+		reg_addr++;
+
+	/* data 27 */
+	if (f54->query_46.has_data27)
+		reg_addr++;
+
+	/* data 28 */
+	if (f54->query_46.has_data28)
+		reg_addr++;
+
+	/* data 29 30 reserved */
+
+	/* data 31 */
+	if (f54->query_49.has_data31) {
+		f54->data_31.address = reg_addr;
+		reg_addr++;
+	}
+
+	return;
+}
+
+static int test_set_controls(void)
+{
+	int retval;
+	unsigned char length;
+	unsigned char num_of_sensing_freqs;
+	unsigned short reg_addr = f54->control_base_addr;
+	struct f54_control *control = &f54->control;
+	struct synaptics_rmi4_data *rmi4_data = f54->rmi4_data;
+
+	num_of_sensing_freqs = f54->query.number_of_sensing_frequencies;
+
+	/* control 0 */
+	reg_addr += CONTROL_0_SIZE;
+
+	/* control 1 */
+	if ((f54->query.touch_controller_family == 0) ||
+			(f54->query.touch_controller_family == 1))
+		reg_addr += CONTROL_1_SIZE;
+
+	/* control 2 */
+	reg_addr += CONTROL_2_SIZE;
+
+	/* control 3 */
+	if (f54->query.has_pixel_touch_threshold_adjustment)
+		reg_addr += CONTROL_3_SIZE;
+
+	/* controls 4 5 6 */
+	if ((f54->query.touch_controller_family == 0) ||
+			(f54->query.touch_controller_family == 1))
+		reg_addr += CONTROL_4_6_SIZE;
+
+	/* control 7 */
+	if (f54->query.touch_controller_family == 1) {
+		control->reg_7 = kzalloc(sizeof(*(control->reg_7)),
+				GFP_KERNEL);
+		if (!control->reg_7)
+			goto exit_no_mem;
+		control->reg_7->address = reg_addr;
+		reg_addr += CONTROL_7_SIZE;
+	}
+
+	/* controls 8 9 */
+	if ((f54->query.touch_controller_family == 0) ||
+			(f54->query.touch_controller_family == 1))
+		reg_addr += CONTROL_8_9_SIZE;
+
+	/* control 10 */
+	if (f54->query.has_interference_metric)
+		reg_addr += CONTROL_10_SIZE;
+
+	/* control 11 */
+	if (f54->query.has_ctrl11)
+		reg_addr += CONTROL_11_SIZE;
+
+	/* controls 12 13 */
+	if (f54->query.has_relaxation_control)
+		reg_addr += CONTROL_12_13_SIZE;
+
+	/* controls 14 15 16 */
+	if (f54->query.has_sensor_assignment) {
+		reg_addr += CONTROL_14_SIZE;
+		reg_addr += CONTROL_15_SIZE * f54->query.num_of_rx_electrodes;
+		reg_addr += CONTROL_16_SIZE * f54->query.num_of_tx_electrodes;
+	}
+
+	/* controls 17 18 19 */
+	if (f54->query.has_sense_frequency_control) {
+		reg_addr += CONTROL_17_SIZE * num_of_sensing_freqs;
+		reg_addr += CONTROL_18_SIZE * num_of_sensing_freqs;
+		reg_addr += CONTROL_19_SIZE * num_of_sensing_freqs;
+	}
+
+	/* control 20 */
+	reg_addr += CONTROL_20_SIZE;
+
+	/* control 21 */
+	if (f54->query.has_sense_frequency_control)
+		reg_addr += CONTROL_21_SIZE;
+
+	/* controls 22 23 24 25 26 */
+	if (f54->query.has_firmware_noise_mitigation)
+		reg_addr += CONTROL_22_26_SIZE;
+
+	/* control 27 */
+	if (f54->query.has_iir_filter)
+		reg_addr += CONTROL_27_SIZE;
+
+	/* control 28 */
+	if (f54->query.has_firmware_noise_mitigation)
+		reg_addr += CONTROL_28_SIZE;
+
+	/* control 29 */
+	if (f54->query.has_cmn_removal)
+		reg_addr += CONTROL_29_SIZE;
+
+	/* control 30 */
+	if (f54->query.has_cmn_maximum)
+		reg_addr += CONTROL_30_SIZE;
+
+	/* control 31 */
+	if (f54->query.has_touch_hysteresis)
+		reg_addr += CONTROL_31_SIZE;
+
+	/* controls 32 33 34 35 */
+	if (f54->query.has_edge_compensation)
+		reg_addr += CONTROL_32_35_SIZE;
+
+	/* control 36 */
+	if ((f54->query.curve_compensation_mode == 1) ||
+			(f54->query.curve_compensation_mode == 2)) {
+		if (f54->query.curve_compensation_mode == 1) {
+			length = max(f54->query.num_of_rx_electrodes,
+					f54->query.num_of_tx_electrodes);
+		} else if (f54->query.curve_compensation_mode == 2) {
+			length = f54->query.num_of_rx_electrodes;
+		}
+		reg_addr += CONTROL_36_SIZE * length;
+	}
+
+	/* control 37 */
+	if (f54->query.curve_compensation_mode == 2)
+		reg_addr += CONTROL_37_SIZE * f54->query.num_of_tx_electrodes;
+
+	/* controls 38 39 40 */
+	if (f54->query.has_per_frequency_noise_control) {
+		reg_addr += CONTROL_38_SIZE * num_of_sensing_freqs;
+		reg_addr += CONTROL_39_SIZE * num_of_sensing_freqs;
+		reg_addr += CONTROL_40_SIZE * num_of_sensing_freqs;
+	}
+
+	/* control 41 */
+	if (f54->query.has_signal_clarity) {
+		control->reg_41 = kzalloc(sizeof(*(control->reg_41)),
+				GFP_KERNEL);
+		if (!control->reg_41)
+			goto exit_no_mem;
+		control->reg_41->address = reg_addr;
+		reg_addr += CONTROL_41_SIZE;
+	}
+
+	/* control 42 */
+	if (f54->query.has_variance_metric)
+		reg_addr += CONTROL_42_SIZE;
+
+	/* controls 43 44 45 46 47 48 49 50 51 52 53 54 */
+	if (f54->query.has_multi_metric_state_machine)
+		reg_addr += CONTROL_43_54_SIZE;
+
+	/* controls 55 56 */
+	if (f54->query.has_0d_relaxation_control)
+		reg_addr += CONTROL_55_56_SIZE;
+
+	/* control 57 */
+	if (f54->query.has_0d_acquisition_control) {
+		control->reg_57 = kzalloc(sizeof(*(control->reg_57)),
+				GFP_KERNEL);
+		if (!control->reg_57)
+			goto exit_no_mem;
+		control->reg_57->address = reg_addr;
+		reg_addr += CONTROL_57_SIZE;
+	}
+
+	/* control 58 */
+	if (f54->query.has_0d_acquisition_control)
+		reg_addr += CONTROL_58_SIZE;
+
+	/* control 59 */
+	if (f54->query.has_h_blank)
+		reg_addr += CONTROL_59_SIZE;
+
+	/* controls 60 61 62 */
+	if ((f54->query.has_h_blank) ||
+			(f54->query.has_v_blank) ||
+			(f54->query.has_long_h_blank))
+		reg_addr += CONTROL_60_62_SIZE;
+
+	/* control 63 */
+	if ((f54->query.has_h_blank) ||
+			(f54->query.has_v_blank) ||
+			(f54->query.has_long_h_blank) ||
+			(f54->query.has_slew_metric) ||
+			(f54->query.has_slew_option) ||
+			(f54->query.has_noise_mitigation2))
+		reg_addr += CONTROL_63_SIZE;
+
+	/* controls 64 65 66 67 */
+	if (f54->query.has_h_blank)
+		reg_addr += CONTROL_64_67_SIZE * 7;
+	else if ((f54->query.has_v_blank) ||
+			(f54->query.has_long_h_blank))
+		reg_addr += CONTROL_64_67_SIZE;
+
+	/* controls 68 69 70 71 72 73 */
+	if ((f54->query.has_h_blank) ||
+			(f54->query.has_v_blank) ||
+			(f54->query.has_long_h_blank)) {
+		if (f54->query_68.is_tddi_hic)
+			reg_addr += CONTROL_70_73_SIZE;
+		else
+			reg_addr += CONTROL_68_73_SIZE;
+	}
+
+	/* control 74 */
+	if (f54->query.has_slew_metric)
+		reg_addr += CONTROL_74_SIZE;
+
+	/* control 75 */
+	if (f54->query.has_enhanced_stretch)
+		reg_addr += CONTROL_75_SIZE * num_of_sensing_freqs;
+
+	/* control 76 */
+	if (f54->query.has_startup_fast_relaxation)
+		reg_addr += CONTROL_76_SIZE;
+
+	/* controls 77 78 */
+	if (f54->query.has_esd_control)
+		reg_addr += CONTROL_77_78_SIZE;
+
+	/* controls 79 80 81 82 83 */
+	if (f54->query.has_noise_mitigation2)
+		reg_addr += CONTROL_79_83_SIZE;
+
+	/* controls 84 85 */
+	if (f54->query.has_energy_ratio_relaxation)
+		reg_addr += CONTROL_84_85_SIZE;
+
+	/* control 86 */
+	if (f54->query_13.has_ctrl86) {
+		control->reg_86 = kzalloc(sizeof(*(control->reg_86)),
+				GFP_KERNEL);
+		if (!control->reg_86)
+			goto exit_no_mem;
+		control->reg_86->address = reg_addr;
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f54->control.reg_86->address,
+				f54->control.reg_86->data,
+				sizeof(f54->control.reg_86->data));
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to read sense display ratio\n",
+					__func__);
+			return retval;
+		}
+		reg_addr += CONTROL_86_SIZE;
+	}
+
+	/* control 87 */
+	if (f54->query_13.has_ctrl87)
+		reg_addr += CONTROL_87_SIZE;
+
+	/* control 88 */
+	if (f54->query.has_ctrl88) {
+		control->reg_88 = kzalloc(sizeof(*(control->reg_88)),
+				GFP_KERNEL);
+		if (!control->reg_88)
+			goto exit_no_mem;
+		control->reg_88->address = reg_addr;
+		reg_addr += CONTROL_88_SIZE;
+	}
+
+	/* control 89 */
+	if (f54->query_13.has_cidim ||
+			f54->query_13.has_noise_mitigation_enhancement ||
+			f54->query_13.has_rail_im)
+		reg_addr += CONTROL_89_SIZE;
+
+	/* control 90 */
+	if (f54->query_15.has_ctrl90)
+		reg_addr += CONTROL_90_SIZE;
+
+	/* control 91 */
+	if (f54->query_21.has_ctrl91)
+		reg_addr += CONTROL_91_SIZE;
+
+	/* control 92 */
+	if (f54->query_16.has_ctrl92)
+		reg_addr += CONTROL_92_SIZE;
+
+	/* control 93 */
+	if (f54->query_16.has_ctrl93)
+		reg_addr += CONTROL_93_SIZE;
+
+	/* control 94 */
+	if (f54->query_16.has_ctrl94_query18)
+		reg_addr += CONTROL_94_SIZE;
+
+	/* control 95 */
+	if (f54->query_16.has_ctrl95_query19)
+		reg_addr += CONTROL_95_SIZE;
+
+	/* control 96 */
+	if (f54->query_21.has_ctrl96)
+		reg_addr += CONTROL_96_SIZE;
+
+	/* control 97 */
+	if (f54->query_21.has_ctrl97)
+		reg_addr += CONTROL_97_SIZE;
+
+	/* control 98 */
+	if (f54->query_21.has_ctrl98)
+		reg_addr += CONTROL_98_SIZE;
+
+	/* control 99 */
+	if (f54->query.touch_controller_family == 2)
+		reg_addr += CONTROL_99_SIZE;
+
+	/* control 100 */
+	if (f54->query_16.has_ctrl100)
+		reg_addr += CONTROL_100_SIZE;
+
+	/* control 101 */
+	if (f54->query_22.has_ctrl101)
+		reg_addr += CONTROL_101_SIZE;
+
+	/* control 102 */
+	if (f54->query_23.has_ctrl102)
+		reg_addr += CONTROL_102_SIZE;
+
+	/* control 103 */
+	if (f54->query_22.has_ctrl103_query26) {
+		f54->skip_preparation = true;
+		reg_addr += CONTROL_103_SIZE;
+	}
+
+	/* control 104 */
+	if (f54->query_22.has_ctrl104)
+		reg_addr += CONTROL_104_SIZE;
+
+	/* control 105 */
+	if (f54->query_22.has_ctrl105)
+		reg_addr += CONTROL_105_SIZE;
+
+	/* control 106 */
+	if (f54->query_25.has_ctrl106)
+		reg_addr += CONTROL_106_SIZE;
+
+	/* control 107 */
+	if (f54->query_25.has_ctrl107)
+		reg_addr += CONTROL_107_SIZE;
+
+	/* control 108 */
+	if (f54->query_25.has_ctrl108)
+		reg_addr += CONTROL_108_SIZE;
+
+	/* control 109 */
+	if (f54->query_25.has_ctrl109)
+		reg_addr += CONTROL_109_SIZE;
+
+	/* control 110 */
+	if (f54->query_27.has_ctrl110) {
+		control->reg_110 = kzalloc(sizeof(*(control->reg_110)),
+				GFP_KERNEL);
+		if (!control->reg_110)
+			goto exit_no_mem;
+		control->reg_110->address = reg_addr;
+		reg_addr += CONTROL_110_SIZE;
+	}
+
+	/* control 111 */
+	if (f54->query_27.has_ctrl111)
+		reg_addr += CONTROL_111_SIZE;
+
+	/* control 112 */
+	if (f54->query_27.has_ctrl112)
+		reg_addr += CONTROL_112_SIZE;
+
+	/* control 113 */
+	if (f54->query_27.has_ctrl113)
+		reg_addr += CONTROL_113_SIZE;
+
+	/* control 114 */
+	if (f54->query_27.has_ctrl114)
+		reg_addr += CONTROL_114_SIZE;
+
+	/* control 115 */
+	if (f54->query_29.has_ctrl115)
+		reg_addr += CONTROL_115_SIZE;
+
+	/* control 116 */
+	if (f54->query_29.has_ctrl116)
+		reg_addr += CONTROL_116_SIZE;
+
+	/* control 117 */
+	if (f54->query_29.has_ctrl117)
+		reg_addr += CONTROL_117_SIZE;
+
+	/* control 118 */
+	if (f54->query_30.has_ctrl118)
+		reg_addr += CONTROL_118_SIZE;
+
+	/* control 119 */
+	if (f54->query_30.has_ctrl119)
+		reg_addr += CONTROL_119_SIZE;
+
+	/* control 120 */
+	if (f54->query_30.has_ctrl120)
+		reg_addr += CONTROL_120_SIZE;
+
+	/* control 121 */
+	if (f54->query_30.has_ctrl121)
+		reg_addr += CONTROL_121_SIZE;
+
+	/* control 122 */
+	if (f54->query_30.has_ctrl122_query31)
+		reg_addr += CONTROL_122_SIZE;
+
+	/* control 123 */
+	if (f54->query_30.has_ctrl123)
+		reg_addr += CONTROL_123_SIZE;
+
+	/* control 124 */
+	if (f54->query_30.has_ctrl124)
+		reg_addr += CONTROL_124_SIZE;
+
+	/* control 125 */
+	if (f54->query_32.has_ctrl125)
+		reg_addr += CONTROL_125_SIZE;
+
+	/* control 126 */
+	if (f54->query_32.has_ctrl126)
+		reg_addr += CONTROL_126_SIZE;
+
+	/* control 127 */
+	if (f54->query_32.has_ctrl127)
+		reg_addr += CONTROL_127_SIZE;
+
+	/* control 128 */
+	if (f54->query_33.has_ctrl128)
+		reg_addr += CONTROL_128_SIZE;
+
+	/* control 129 */
+	if (f54->query_33.has_ctrl129)
+		reg_addr += CONTROL_129_SIZE;
+
+	/* control 130 */
+	if (f54->query_33.has_ctrl130)
+		reg_addr += CONTROL_130_SIZE;
+
+	/* control 131 */
+	if (f54->query_33.has_ctrl131)
+		reg_addr += CONTROL_131_SIZE;
+
+	/* control 132 */
+	if (f54->query_33.has_ctrl132)
+		reg_addr += CONTROL_132_SIZE;
+
+	/* control 133 */
+	if (f54->query_33.has_ctrl133)
+		reg_addr += CONTROL_133_SIZE;
+
+	/* control 134 */
+	if (f54->query_33.has_ctrl134)
+		reg_addr += CONTROL_134_SIZE;
+
+	/* control 135 */
+	if (f54->query_35.has_ctrl135)
+		reg_addr += CONTROL_135_SIZE;
+
+	/* control 136 */
+	if (f54->query_35.has_ctrl136)
+		reg_addr += CONTROL_136_SIZE;
+
+	/* control 137 */
+	if (f54->query_35.has_ctrl137)
+		reg_addr += CONTROL_137_SIZE;
+
+	/* control 138 */
+	if (f54->query_35.has_ctrl138)
+		reg_addr += CONTROL_138_SIZE;
+
+	/* control 139 */
+	if (f54->query_35.has_ctrl139)
+		reg_addr += CONTROL_139_SIZE;
+
+	/* control 140 */
+	if (f54->query_35.has_ctrl140)
+		reg_addr += CONTROL_140_SIZE;
+
+	/* control 141 */
+	if (f54->query_36.has_ctrl141)
+		reg_addr += CONTROL_141_SIZE;
+
+	/* control 142 */
+	if (f54->query_36.has_ctrl142)
+		reg_addr += CONTROL_142_SIZE;
+
+	/* control 143 */
+	if (f54->query_36.has_ctrl143)
+		reg_addr += CONTROL_143_SIZE;
+
+	/* control 144 */
+	if (f54->query_36.has_ctrl144)
+		reg_addr += CONTROL_144_SIZE;
+
+	/* control 145 */
+	if (f54->query_36.has_ctrl145)
+		reg_addr += CONTROL_145_SIZE;
+
+	/* control 146 */
+	if (f54->query_36.has_ctrl146)
+		reg_addr += CONTROL_146_SIZE;
+
+	/* control 147 */
+	if (f54->query_38.has_ctrl147)
+		reg_addr += CONTROL_147_SIZE;
+
+	/* control 148 */
+	if (f54->query_38.has_ctrl148)
+		reg_addr += CONTROL_148_SIZE;
+
+	/* control 149 */
+	if (f54->query_38.has_ctrl149) {
+		control->reg_149 = kzalloc(sizeof(*(control->reg_149)),
+				GFP_KERNEL);
+		if (!control->reg_149)
+			goto exit_no_mem;
+		control->reg_149->address = reg_addr;
+		reg_addr += CONTROL_149_SIZE;
+	}
+
+	/* control 150 */
+	if (f54->query_38.has_ctrl150)
+		reg_addr += CONTROL_150_SIZE;
+
+	/* control 151 */
+	if (f54->query_38.has_ctrl151)
+		reg_addr += CONTROL_151_SIZE;
+
+	/* control 152 */
+	if (f54->query_38.has_ctrl152)
+		reg_addr += CONTROL_152_SIZE;
+
+	/* control 153 */
+	if (f54->query_38.has_ctrl153)
+		reg_addr += CONTROL_153_SIZE;
+
+	/* control 154 */
+	if (f54->query_39.has_ctrl154)
+		reg_addr += CONTROL_154_SIZE;
+
+	/* control 155 */
+	if (f54->query_39.has_ctrl155)
+		reg_addr += CONTROL_155_SIZE;
+
+	/* control 156 */
+	if (f54->query_39.has_ctrl156)
+		reg_addr += CONTROL_156_SIZE;
+
+	/* controls 157 158 */
+	if (f54->query_39.has_ctrl157_ctrl158)
+		reg_addr += CONTROL_157_158_SIZE;
+
+	/* controls 159 to 162 reserved */
+
+	/* control 163 */
+	if (f54->query_40.has_ctrl163_query41)
+		reg_addr += CONTROL_163_SIZE;
+
+	/* control 164 reserved */
+
+	/* control 165 */
+	if (f54->query_40.has_ctrl165_query42)
+		reg_addr += CONTROL_165_SIZE;
+
+	/* control 166 */
+	if (f54->query_40.has_ctrl166)
+		reg_addr += CONTROL_166_SIZE;
+
+	/* control 167 */
+	if (f54->query_40.has_ctrl167)
+		reg_addr += CONTROL_167_SIZE;
+
+	/* control 168 */
+	if (f54->query_40.has_ctrl168)
+		reg_addr += CONTROL_168_SIZE;
+
+	/* control 169 */
+	if (f54->query_40.has_ctrl169)
+		reg_addr += CONTROL_169_SIZE;
+
+	/* control 170 reserved */
+
+	/* control 171 */
+	if (f54->query_43.has_ctrl171)
+		reg_addr += CONTROL_171_SIZE;
+
+	/* control 172 */
+	if (f54->query_43.has_ctrl172_query44_query45)
+		reg_addr += CONTROL_172_SIZE;
+
+	/* control 173 */
+	if (f54->query_43.has_ctrl173)
+		reg_addr += CONTROL_173_SIZE;
+
+	/* control 174 */
+	if (f54->query_43.has_ctrl174)
+		reg_addr += CONTROL_174_SIZE;
+
+	/* control 175 */
+	if (f54->query_43.has_ctrl175)
+		reg_addr += CONTROL_175_SIZE;
+
+	/* control 176 */
+	if (f54->query_46.has_ctrl176)
+		reg_addr += CONTROL_176_SIZE;
+
+	/* controls 177 178 */
+	if (f54->query_46.has_ctrl177_ctrl178)
+		reg_addr += CONTROL_177_178_SIZE;
+
+	/* control 179 */
+	if (f54->query_46.has_ctrl179)
+		reg_addr += CONTROL_179_SIZE;
+
+	/* controls 180 to 181 reserved */
+
+	/* control 182 */
+	if (f54->query_47.has_ctrl182)
+		reg_addr += CONTROL_182_SIZE;
+
+	/* control 183 */
+	if (f54->query_47.has_ctrl183)
+		reg_addr += CONTROL_183_SIZE;
+
+	/* control 184 reserved */
+
+	/* control 185 */
+	if (f54->query_47.has_ctrl185)
+		reg_addr += CONTROL_185_SIZE;
+
+	/* control 186 */
+	if (f54->query_47.has_ctrl186)
+		reg_addr += CONTROL_186_SIZE;
+
+	/* control 187 */
+	if (f54->query_47.has_ctrl187)
+		reg_addr += CONTROL_187_SIZE;
+
+	/* control 188 */
+	if (f54->query_49.has_ctrl188) {
+		control->reg_188 = kzalloc(sizeof(*(control->reg_188)),
+				GFP_KERNEL);
+		if (!control->reg_188)
+			goto exit_no_mem;
+		control->reg_188->address = reg_addr;
+		reg_addr += CONTROL_188_SIZE;
+	}
+
+	return 0;
+
+exit_no_mem:
+	dev_err(rmi4_data->pdev->dev.parent,
+			"%s: Failed to alloc mem for control registers\n",
+			__func__);
+	return -ENOMEM;
+}
+
+static int test_set_queries(void)
+{
+	int retval;
+	unsigned char offset;
+	struct synaptics_rmi4_data *rmi4_data = f54->rmi4_data;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			f54->query_base_addr,
+			f54->query.data,
+			sizeof(f54->query.data));
+	if (retval < 0)
+		return retval;
+
+	offset = sizeof(f54->query.data);
+
+	/* query 12 */
+	if (f54->query.has_sense_frequency_control == 0)
+		offset -= 1;
+
+	/* query 13 */
+	if (f54->query.has_query13) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f54->query_base_addr + offset,
+				f54->query_13.data,
+				sizeof(f54->query_13.data));
+		if (retval < 0)
+			return retval;
+		offset += 1;
+	}
+
+	/* query 14 */
+	if (f54->query_13.has_ctrl87)
+		offset += 1;
+
+	/* query 15 */
+	if (f54->query.has_query15) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f54->query_base_addr + offset,
+				f54->query_15.data,
+				sizeof(f54->query_15.data));
+		if (retval < 0)
+			return retval;
+		offset += 1;
+	}
+
+	/* query 16 */
+	if (f54->query_15.has_query16) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f54->query_base_addr + offset,
+				f54->query_16.data,
+				sizeof(f54->query_16.data));
+		if (retval < 0)
+			return retval;
+		offset += 1;
+	}
+
+	/* query 17 */
+	if (f54->query_16.has_query17)
+		offset += 1;
+
+	/* query 18 */
+	if (f54->query_16.has_ctrl94_query18)
+		offset += 1;
+
+	/* query 19 */
+	if (f54->query_16.has_ctrl95_query19)
+		offset += 1;
+
+	/* query 20 */
+	if (f54->query_15.has_query20)
+		offset += 1;
+
+	/* query 21 */
+	if (f54->query_15.has_query21) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f54->query_base_addr + offset,
+				f54->query_21.data,
+				sizeof(f54->query_21.data));
+		if (retval < 0)
+			return retval;
+		offset += 1;
+	}
+
+	/* query 22 */
+	if (f54->query_15.has_query22) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f54->query_base_addr + offset,
+				f54->query_22.data,
+				sizeof(f54->query_22.data));
+		if (retval < 0)
+			return retval;
+		offset += 1;
+	}
+
+	/* query 23 */
+	if (f54->query_22.has_query23) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f54->query_base_addr + offset,
+				f54->query_23.data,
+				sizeof(f54->query_23.data));
+		if (retval < 0)
+			return retval;
+		offset += 1;
+	}
+
+	/* query 24 */
+	if (f54->query_21.has_query24_data18)
+		offset += 1;
+
+	/* query 25 */
+	if (f54->query_15.has_query25) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f54->query_base_addr + offset,
+				f54->query_25.data,
+				sizeof(f54->query_25.data));
+		if (retval < 0)
+			return retval;
+		offset += 1;
+	}
+
+	/* query 26 */
+	if (f54->query_22.has_ctrl103_query26)
+		offset += 1;
+
+	/* query 27 */
+	if (f54->query_25.has_query27) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f54->query_base_addr + offset,
+				f54->query_27.data,
+				sizeof(f54->query_27.data));
+		if (retval < 0)
+			return retval;
+		offset += 1;
+	}
+
+	/* query 28 */
+	if (f54->query_22.has_query28)
+		offset += 1;
+
+	/* query 29 */
+	if (f54->query_27.has_query29) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f54->query_base_addr + offset,
+				f54->query_29.data,
+				sizeof(f54->query_29.data));
+		if (retval < 0)
+			return retval;
+		offset += 1;
+	}
+
+	/* query 30 */
+	if (f54->query_29.has_query30) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f54->query_base_addr + offset,
+				f54->query_30.data,
+				sizeof(f54->query_30.data));
+		if (retval < 0)
+			return retval;
+		offset += 1;
+	}
+
+	/* query 31 */
+	if (f54->query_30.has_ctrl122_query31)
+		offset += 1;
+
+	/* query 32 */
+	if (f54->query_30.has_query32) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f54->query_base_addr + offset,
+				f54->query_32.data,
+				sizeof(f54->query_32.data));
+		if (retval < 0)
+			return retval;
+		offset += 1;
+	}
+
+	/* query 33 */
+	if (f54->query_32.has_query33) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f54->query_base_addr + offset,
+				f54->query_33.data,
+				sizeof(f54->query_33.data));
+		if (retval < 0)
+			return retval;
+		offset += 1;
+	}
+
+	/* query 34 */
+	if (f54->query_32.has_query34)
+		offset += 1;
+
+	/* query 35 */
+	if (f54->query_32.has_query35) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f54->query_base_addr + offset,
+				f54->query_35.data,
+				sizeof(f54->query_35.data));
+		if (retval < 0)
+			return retval;
+		offset += 1;
+	}
+
+	/* query 36 */
+	if (f54->query_33.has_query36) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f54->query_base_addr + offset,
+				f54->query_36.data,
+				sizeof(f54->query_36.data));
+		if (retval < 0)
+			return retval;
+		offset += 1;
+	}
+
+	/* query 37 */
+	if (f54->query_36.has_query37)
+		offset += 1;
+
+	/* query 38 */
+	if (f54->query_36.has_query38) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f54->query_base_addr + offset,
+				f54->query_38.data,
+				sizeof(f54->query_38.data));
+		if (retval < 0)
+			return retval;
+		offset += 1;
+	}
+
+	/* query 39 */
+	if (f54->query_38.has_query39) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f54->query_base_addr + offset,
+				f54->query_39.data,
+				sizeof(f54->query_39.data));
+		if (retval < 0)
+			return retval;
+		offset += 1;
+	}
+
+	/* query 40 */
+	if (f54->query_39.has_query40) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f54->query_base_addr + offset,
+				f54->query_40.data,
+				sizeof(f54->query_40.data));
+		if (retval < 0)
+			return retval;
+		offset += 1;
+	}
+
+	/* query 41 */
+	if (f54->query_40.has_ctrl163_query41)
+		offset += 1;
+
+	/* query 42 */
+	if (f54->query_40.has_ctrl165_query42)
+		offset += 1;
+
+	/* query 43 */
+	if (f54->query_40.has_query43) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f54->query_base_addr + offset,
+				f54->query_43.data,
+				sizeof(f54->query_43.data));
+		if (retval < 0)
+			return retval;
+		offset += 1;
+	}
+
+	if (f54->query_43.has_ctrl172_query44_query45)
+		offset += 2;
+
+	/* query 46 */
+	if (f54->query_43.has_query46) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f54->query_base_addr + offset,
+				f54->query_46.data,
+				sizeof(f54->query_46.data));
+		if (retval < 0)
+			return retval;
+		offset += 1;
+	}
+
+	/* query 47 */
+	if (f54->query_46.has_query47) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f54->query_base_addr + offset,
+				f54->query_47.data,
+				sizeof(f54->query_47.data));
+		if (retval < 0)
+			return retval;
+		offset += 1;
+	}
+
+	/* query 48 reserved */
+
+	/* query 49 */
+	if (f54->query_47.has_query49) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f54->query_base_addr + offset,
+				f54->query_49.data,
+				sizeof(f54->query_49.data));
+		if (retval < 0)
+			return retval;
+		offset += 1;
+	}
+
+	/* query 50 */
+	if (f54->query_49.has_query50) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f54->query_base_addr + offset,
+				f54->query_50.data,
+				sizeof(f54->query_50.data));
+		if (retval < 0)
+			return retval;
+		offset += 1;
+	}
+
+	/* query 51 */
+	if (f54->query_50.has_query51) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f54->query_base_addr + offset,
+				f54->query_51.data,
+				sizeof(f54->query_51.data));
+		if (retval < 0)
+			return retval;
+		offset += 1;
+	}
+
+	/* query 53 54 */
+	if (f54->query_51.has_query53_query54_ctrl198)
+		offset += 2;
+
+	/* query 55 */
+	if (f54->query_51.has_query55) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f54->query_base_addr + offset,
+				f54->query_55.data,
+				sizeof(f54->query_55.data));
+		if (retval < 0)
+			return retval;
+		offset += 1;
+	}
+
+	/* query 56 */
+	if (f54->query_55.has_query56)
+		offset += 1;
+
+	/* query 57 */
+	if (f54->query_55.has_query57) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f54->query_base_addr + offset,
+				f54->query_57.data,
+				sizeof(f54->query_57.data));
+		if (retval < 0)
+			return retval;
+		offset += 1;
+	}
+
+	/* query 58 */
+	if (f54->query_57.has_query58) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f54->query_base_addr + offset,
+				f54->query_58.data,
+				sizeof(f54->query_58.data));
+		if (retval < 0)
+			return retval;
+		offset += 1;
+	}
+
+	/* query 59 */
+	if (f54->query_58.has_query59)
+		offset += 1;
+
+	/* query 60 */
+	if (f54->query_58.has_query60)
+		offset += 1;
+
+	/* query 61 */
+	if (f54->query_58.has_query61) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f54->query_base_addr + offset,
+				f54->query_61.data,
+				sizeof(f54->query_61.data));
+		if (retval < 0)
+			return retval;
+		offset += 1;
+	}
+
+	/* query 62 63 */
+	if (f54->query_61.has_ctrl215_query62_query63)
+		offset += 2;
+
+	/* query 64 */
+	if (f54->query_61.has_query64) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f54->query_base_addr + offset,
+				f54->query_64.data,
+				sizeof(f54->query_64.data));
+		if (retval < 0)
+			return retval;
+		offset += 1;
+	}
+
+	/* query 65 */
+	if (f54->query_64.has_query65) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f54->query_base_addr + offset,
+				f54->query_65.data,
+				sizeof(f54->query_65.data));
+		if (retval < 0)
+			return retval;
+		offset += 1;
+	}
+
+	/* query 66 */
+	if (f54->query_65.has_query66_ctrl231)
+		offset += 1;
+
+	/* query 67 */
+	if (f54->query_65.has_query67) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f54->query_base_addr + offset,
+				f54->query_67.data,
+				sizeof(f54->query_67.data));
+		if (retval < 0)
+			return retval;
+		offset += 1;
+	}
+
+	/* query 68 */
+	if (f54->query_67.has_query68) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f54->query_base_addr + offset,
+				f54->query_68.data,
+				sizeof(f54->query_68.data));
+		if (retval < 0)
+			return retval;
+		offset += 1;
+	}
+
+	/* query 68 */
+	if (f54->query_68.has_query69) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f54->query_base_addr + offset,
+				f54->query_69.data,
+				sizeof(f54->query_69.data));
+		if (retval < 0)
+			return retval;
+		offset += 1;
+	}
+
+	return 0;
+}
+
+static void test_f54_set_regs(struct synaptics_rmi4_data *rmi4_data,
+		struct synaptics_rmi4_fn_desc *fd,
+		unsigned int intr_count,
+		unsigned char page)
+{
+	unsigned char ii;
+	unsigned char intr_offset;
+
+	f54->query_base_addr = fd->query_base_addr | (page << 8);
+	f54->control_base_addr = fd->ctrl_base_addr | (page << 8);
+	f54->data_base_addr = fd->data_base_addr | (page << 8);
+	f54->command_base_addr = fd->cmd_base_addr | (page << 8);
+
+	f54->intr_reg_num = (intr_count + 7) / 8;
+	if (f54->intr_reg_num != 0)
+		f54->intr_reg_num -= 1;
+
+	f54->intr_mask = 0;
+	intr_offset = intr_count % 8;
+	for (ii = intr_offset;
+			ii < (fd->intr_src_count + intr_offset);
+			ii++) {
+		f54->intr_mask |= 1 << ii;
+	}
+
+	return;
+}
+
+static int test_f55_set_controls(void)
+{
+	unsigned char offset = 0;
+
+	/* controls 0 1 2 */
+	if (f55->query.has_sensor_assignment)
+		offset += 3;
+
+	/* control 3 */
+	if (f55->query.has_edge_compensation)
+		offset++;
+
+	/* control 4 */
+	if (f55->query.curve_compensation_mode == 0x1 ||
+			f55->query.curve_compensation_mode == 0x2)
+		offset++;
+
+	/* control 5 */
+	if (f55->query.curve_compensation_mode == 0x2)
+		offset++;
+
+	/* control 6 */
+	if (f55->query.has_ctrl6)
+		offset++;
+
+	/* control 7 */
+	if (f55->query.has_alternate_transmitter_assignment)
+		offset++;
+
+	/* control 8 */
+	if (f55->query_3.has_ctrl8)
+		offset++;
+
+	/* control 9 */
+	if (f55->query_3.has_ctrl9)
+		offset++;
+
+	/* control 10 */
+	if (f55->query_5.has_corner_compensation)
+		offset++;
+
+	/* control 11 */
+	if (f55->query.curve_compensation_mode == 0x3)
+		offset++;
+
+	/* control 12 */
+	if (f55->query_5.has_ctrl12)
+		offset++;
+
+	/* control 13 */
+	if (f55->query_5.has_ctrl13)
+		offset++;
+
+	/* control 14 */
+	if (f55->query_5.has_ctrl14)
+		offset++;
+
+	/* control 15 */
+	if (f55->query_5.has_basis_function)
+		offset++;
+
+	/* control 16 */
+	if (f55->query_17.has_ctrl16)
+		offset++;
+
+	/* control 17 */
+	if (f55->query_17.has_ctrl17)
+		offset++;
+
+	/* controls 18 19 */
+	if (f55->query_17.has_ctrl18_ctrl19)
+		offset += 2;
+
+	/* control 20 */
+	if (f55->query_17.has_ctrl20)
+		offset++;
+
+	/* control 21 */
+	if (f55->query_17.has_ctrl21)
+		offset++;
+
+	/* control 22 */
+	if (f55->query_17.has_ctrl22)
+		offset++;
+
+	/* control 23 */
+	if (f55->query_18.has_ctrl23)
+		offset++;
+
+	/* control 24 */
+	if (f55->query_18.has_ctrl24)
+		offset++;
+
+	/* control 25 */
+	if (f55->query_18.has_ctrl25)
+		offset++;
+
+	/* control 26 */
+	if (f55->query_18.has_ctrl26)
+		offset++;
+
+	/* control 27 */
+	if (f55->query_18.has_ctrl27_query20)
+		offset++;
+
+	/* control 28 */
+	if (f55->query_18.has_ctrl28_query21)
+		offset++;
+
+	/* control 29 */
+	if (f55->query_22.has_ctrl29)
+		offset++;
+
+	/* control 30 */
+	if (f55->query_22.has_ctrl30)
+		offset++;
+
+	/* control 31 */
+	if (f55->query_22.has_ctrl31)
+		offset++;
+
+	/* control 32 */
+	if (f55->query_22.has_ctrl32)
+		offset++;
+
+	/* controls 33 34 35 36 reserved */
+
+	/* control 37 */
+	if (f55->query_28.has_ctrl37)
+		offset++;
+
+	/* control 38 */
+	if (f55->query_30.has_ctrl38)
+		offset++;
+
+	/* control 39 */
+	if (f55->query_30.has_ctrl39)
+		offset++;
+
+	/* control 40 */
+	if (f55->query_30.has_ctrl40)
+		offset++;
+
+	/* control 41 */
+	if (f55->query_30.has_ctrl41)
+		offset++;
+
+	/* control 42 */
+	if (f55->query_30.has_ctrl42)
+		offset++;
+
+	/* controls 43 44 */
+	if (f55->query_30.has_ctrl43_ctrl44) {
+		f55->afe_mux_offset = offset;
+		offset += 2;
+	}
+
+	/* controls 45 46 */
+	if (f55->query_33.has_ctrl45_ctrl46) {
+		f55->has_force = true;
+		f55->force_tx_offset = offset;
+		f55->force_rx_offset = offset + 1;
+		offset += 2;
+	}
+
+	return 0;
+}
+
+static int test_f55_set_queries(void)
+{
+	int retval;
+	unsigned char offset;
+	struct synaptics_rmi4_data *rmi4_data = f54->rmi4_data;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			f55->query_base_addr,
+			f55->query.data,
+			sizeof(f55->query.data));
+	if (retval < 0)
+		return retval;
+
+	offset = sizeof(f55->query.data);
+
+	/* query 3 */
+	if (f55->query.has_single_layer_multi_touch) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f55->query_base_addr + offset,
+				f55->query_3.data,
+				sizeof(f55->query_3.data));
+		if (retval < 0)
+			return retval;
+		offset += 1;
+	}
+
+	/* query 4 */
+	if (f55->query_3.has_ctrl9)
+		offset += 1;
+
+	/* query 5 */
+	if (f55->query.has_query5) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f55->query_base_addr + offset,
+				f55->query_5.data,
+				sizeof(f55->query_5.data));
+		if (retval < 0)
+			return retval;
+		offset += 1;
+	}
+
+	/* queries 6 7 */
+	if (f55->query.curve_compensation_mode == 0x3)
+		offset += 2;
+
+	/* query 8 */
+	if (f55->query_3.has_ctrl8)
+		offset += 1;
+
+	/* query 9 */
+	if (f55->query_3.has_query9)
+		offset += 1;
+
+	/* queries 10 11 12 13 14 15 16 */
+	if (f55->query_5.has_basis_function)
+		offset += 7;
+
+	/* query 17 */
+	if (f55->query_5.has_query17) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f55->query_base_addr + offset,
+				f55->query_17.data,
+				sizeof(f55->query_17.data));
+		if (retval < 0)
+			return retval;
+		offset += 1;
+	}
+
+	/* query 18 */
+	if (f55->query_17.has_query18) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f55->query_base_addr + offset,
+				f55->query_18.data,
+				sizeof(f55->query_18.data));
+		if (retval < 0)
+			return retval;
+		offset += 1;
+	}
+
+	/* query 19 */
+	if (f55->query_18.has_query19)
+		offset += 1;
+
+	/* query 20 */
+	if (f55->query_18.has_ctrl27_query20)
+		offset += 1;
+
+	/* query 21 */
+	if (f55->query_18.has_ctrl28_query21)
+		offset += 1;
+
+	/* query 22 */
+	if (f55->query_18.has_query22) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f55->query_base_addr + offset,
+				f55->query_22.data,
+				sizeof(f55->query_22.data));
+		if (retval < 0)
+			return retval;
+		offset += 1;
+	}
+
+	/* query 23 */
+	if (f55->query_22.has_query23) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f55->query_base_addr + offset,
+				f55->query_23.data,
+				sizeof(f55->query_23.data));
+		if (retval < 0)
+			return retval;
+		offset += 1;
+
+		f55->amp_sensor = f55->query_23.amp_sensor_enabled;
+		f55->size_of_column2mux = f55->query_23.size_of_column2mux;
+	}
+
+	/* queries 24 25 26 27 reserved */
+
+	/* query 28 */
+	if (f55->query_22.has_query28) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f55->query_base_addr + offset,
+				f55->query_28.data,
+				sizeof(f55->query_28.data));
+		if (retval < 0)
+			return retval;
+		offset += 1;
+	}
+
+	/* query 29 */
+	if (f55->query_28.has_query29)
+		offset += 1;
+
+	/* query 30 */
+	if (f55->query_28.has_query30) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f55->query_base_addr + offset,
+				f55->query_30.data,
+				sizeof(f55->query_30.data));
+		if (retval < 0)
+			return retval;
+		offset += 1;
+	}
+
+	/* queries 31 32 */
+	if (f55->query_30.has_query31_query32)
+		offset += 2;
+
+	/* query 33 */
+	if (f55->query_30.has_query33) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f55->query_base_addr + offset,
+				f55->query_33.data,
+				sizeof(f55->query_33.data));
+		if (retval < 0)
+			return retval;
+		offset += 1;
+
+		f55->extended_amp = f55->query_33.has_extended_amp_pad;
+	}
+
+	return 0;
+}
+
+static void test_f55_init(struct synaptics_rmi4_data *rmi4_data)
+{
+	int retval;
+	unsigned char ii;
+	unsigned char rx_electrodes;
+	unsigned char tx_electrodes;
+	struct f55_control_43 ctrl_43;
+
+	retval = test_f55_set_queries();
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read F55 query registers\n",
+				__func__);
+		return;
+	}
+
+	if (!f55->query.has_sensor_assignment)
+		return;
+
+	retval = test_f55_set_controls();
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to set up F55 control registers\n",
+				__func__);
+		return;
+	}
+
+	tx_electrodes = f55->query.num_of_tx_electrodes;
+	rx_electrodes = f55->query.num_of_rx_electrodes;
+
+	f55->tx_assignment = kzalloc(tx_electrodes, GFP_KERNEL);
+	f55->rx_assignment = kzalloc(rx_electrodes, GFP_KERNEL);
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			f55->control_base_addr + SENSOR_TX_MAPPING_OFFSET,
+			f55->tx_assignment,
+			tx_electrodes);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read F55 tx assignment\n",
+				__func__);
+		return;
+	}
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			f55->control_base_addr + SENSOR_RX_MAPPING_OFFSET,
+			f55->rx_assignment,
+			rx_electrodes);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read F55 rx assignment\n",
+				__func__);
+		return;
+	}
+
+	f54->tx_assigned = 0;
+	for (ii = 0; ii < tx_electrodes; ii++) {
+		if (f55->tx_assignment[ii] != 0xff)
+			f54->tx_assigned++;
+	}
+
+	f54->rx_assigned = 0;
+	for (ii = 0; ii < rx_electrodes; ii++) {
+		if (f55->rx_assignment[ii] != 0xff)
+			f54->rx_assigned++;
+	}
+
+	if (f55->amp_sensor) {
+		f54->tx_assigned = f55->size_of_column2mux;
+		f54->rx_assigned /= 2;
+	}
+
+	if (f55->extended_amp) {
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f55->control_base_addr + f55->afe_mux_offset,
+				ctrl_43.data,
+				sizeof(ctrl_43.data));
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to read F55 AFE mux sizes\n",
+					__func__);
+			return;
+		}
+
+		f54->tx_assigned = ctrl_43.afe_l_mux_size +
+				ctrl_43.afe_r_mux_size;
+	}
+
+	/* force mapping */
+	if (f55->has_force) {
+		f55->force_tx_assignment = kzalloc(tx_electrodes, GFP_KERNEL);
+		f55->force_rx_assignment = kzalloc(rx_electrodes, GFP_KERNEL);
+
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f55->control_base_addr + f55->force_tx_offset,
+				f55->force_tx_assignment,
+				tx_electrodes);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to read F55 force tx assignment\n",
+					__func__);
+			return;
+		}
+
+		retval = synaptics_rmi4_reg_read(rmi4_data,
+				f55->control_base_addr + f55->force_rx_offset,
+				f55->force_rx_assignment,
+				rx_electrodes);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to read F55 force rx assignment\n",
+					__func__);
+			return;
+		}
+
+		for (ii = 0; ii < tx_electrodes; ii++) {
+			if (f55->force_tx_assignment[ii] != 0xff)
+				f54->tx_assigned++;
+		}
+
+		for (ii = 0; ii < rx_electrodes; ii++) {
+			if (f55->force_rx_assignment[ii] != 0xff)
+				f54->rx_assigned++;
+		}
+	}
+
+	return;
+}
+
+static void test_f55_set_regs(struct synaptics_rmi4_data *rmi4_data,
+		struct synaptics_rmi4_fn_desc *fd,
+		unsigned char page)
+{
+	f55 = kzalloc(sizeof(*f55), GFP_KERNEL);
+	if (!f55) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to alloc mem for F55\n",
+				__func__);
+		return;
+	}
+
+	f55->query_base_addr = fd->query_base_addr | (page << 8);
+	f55->control_base_addr = fd->ctrl_base_addr | (page << 8);
+	f55->data_base_addr = fd->data_base_addr | (page << 8);
+	f55->command_base_addr = fd->cmd_base_addr | (page << 8);
+
+	return;
+}
+
+static void test_f21_init(struct synaptics_rmi4_data *rmi4_data)
+{
+	int retval;
+	unsigned char ii;
+	unsigned char size_of_query2;
+	unsigned char size_of_query5;
+	unsigned char query_11_offset;
+	unsigned char ctrl_4_offset;
+	struct f21_query_2 *query_2 = NULL;
+	struct f21_query_5 *query_5 = NULL;
+	struct f21_query_11 *query_11 = NULL;
+
+	query_2 = kzalloc(sizeof(*query_2), GFP_KERNEL);
+	if (!query_2) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to alloc mem for query_2\n",
+				__func__);
+		goto exit;
+	}
+
+	query_5 = kzalloc(sizeof(*query_5), GFP_KERNEL);
+	if (!query_5) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to alloc mem for query_5\n",
+				__func__);
+		goto exit;
+	}
+
+	query_11 = kzalloc(sizeof(*query_11), GFP_KERNEL);
+	if (!query_11) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to alloc mem for query_11\n",
+				__func__);
+		goto exit;
+	}
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			f21->query_base_addr + 1,
+			&size_of_query2,
+			sizeof(size_of_query2));
+	if (retval < 0)
+		goto exit;
+
+	if (size_of_query2 > sizeof(query_2->data))
+		size_of_query2 = sizeof(query_2->data);
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			f21->query_base_addr + 2,
+			query_2->data,
+			size_of_query2);
+	if (retval < 0)
+		goto exit;
+
+	if (!query_2->query11_is_present) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: No F21 force capabilities\n",
+				__func__);
+		goto exit;
+	}
+
+	query_11_offset = query_2->query0_is_present +
+			query_2->query1_is_present +
+			query_2->query2_is_present +
+			query_2->query3_is_present +
+			query_2->query4_is_present +
+			query_2->query5_is_present +
+			query_2->query6_is_present +
+			query_2->query7_is_present +
+			query_2->query8_is_present +
+			query_2->query9_is_present +
+			query_2->query10_is_present;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			f21->query_base_addr + 11,
+			query_11->data,
+			sizeof(query_11->data));
+	if (retval < 0)
+		goto exit;
+
+	if (!query_11->has_force_sensing_txrx_mapping) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: No F21 force mapping\n",
+				__func__);
+		goto exit;
+	}
+
+	f21->max_num_of_tx = query_11->max_number_of_force_txs;
+	f21->max_num_of_rx = query_11->max_number_of_force_rxs;
+	f21->max_num_of_txrx = f21->max_num_of_tx + f21->max_num_of_rx;
+
+	f21->force_txrx_assignment = kzalloc(f21->max_num_of_txrx, GFP_KERNEL);
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			f21->query_base_addr + 4,
+			&size_of_query5,
+			sizeof(size_of_query5));
+	if (retval < 0)
+		goto exit;
+
+	if (size_of_query5 > sizeof(query_5->data))
+		size_of_query5 = sizeof(query_5->data);
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			f21->query_base_addr + 5,
+			query_5->data,
+			size_of_query5);
+	if (retval < 0)
+		goto exit;
+
+	ctrl_4_offset = query_5->ctrl0_is_present +
+			query_5->ctrl1_is_present +
+			query_5->ctrl2_is_present +
+			query_5->ctrl3_is_present;
+
+	retval = synaptics_rmi4_reg_read(rmi4_data,
+			f21->control_base_addr + ctrl_4_offset,
+			f21->force_txrx_assignment,
+			f21->max_num_of_txrx);
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read F21 force txrx assignment\n",
+				__func__);
+		goto exit;
+	}
+
+	f21->has_force = true;
+
+	for (ii = 0; ii < f21->max_num_of_tx; ii++) {
+		if (f21->force_txrx_assignment[ii] != 0xff)
+			f21->tx_assigned++;
+	}
+
+	for (ii = f21->max_num_of_tx; ii < f21->max_num_of_txrx; ii++) {
+		if (f21->force_txrx_assignment[ii] != 0xff)
+			f21->rx_assigned++;
+	}
+
+exit:
+	kfree(query_2);
+	kfree(query_5);
+	kfree(query_11);
+
+	return;
+}
+
+static void test_f21_set_regs(struct synaptics_rmi4_data *rmi4_data,
+		struct synaptics_rmi4_fn_desc *fd,
+		unsigned char page)
+{
+	f21 = kzalloc(sizeof(*f21), GFP_KERNEL);
+	if (!f21) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to alloc mem for F21\n",
+				__func__);
+		return;
+	}
+
+	f21->query_base_addr = fd->query_base_addr | (page << 8);
+	f21->control_base_addr = fd->ctrl_base_addr | (page << 8);
+	f21->data_base_addr = fd->data_base_addr | (page << 8);
+	f21->command_base_addr = fd->cmd_base_addr | (page << 8);
+
+	return;
+}
+
+static int test_scan_pdt(void)
+{
+	int retval;
+	unsigned char intr_count = 0;
+	unsigned char page;
+	unsigned short addr;
+	bool f54found = false;
+	bool f55found = false;
+	struct synaptics_rmi4_fn_desc rmi_fd;
+	struct synaptics_rmi4_data *rmi4_data = f54->rmi4_data;
+
+	for (page = 0; page < PAGES_TO_SERVICE; page++) {
+		for (addr = PDT_START; addr > PDT_END; addr -= PDT_ENTRY_SIZE) {
+			addr |= (page << 8);
+
+			retval = synaptics_rmi4_reg_read(rmi4_data,
+					addr,
+					(unsigned char *)&rmi_fd,
+					sizeof(rmi_fd));
+			if (retval < 0)
+				return retval;
+
+			addr &= ~(MASK_8BIT << 8);
+
+			if (!rmi_fd.fn_number)
+				break;
+
+			switch (rmi_fd.fn_number) {
+			case SYNAPTICS_RMI4_F54:
+				test_f54_set_regs(rmi4_data,
+						&rmi_fd, intr_count, page);
+				f54found = true;
+				break;
+			case SYNAPTICS_RMI4_F55:
+				test_f55_set_regs(rmi4_data,
+						&rmi_fd, page);
+				f55found = true;
+				break;
+			case SYNAPTICS_RMI4_F21:
+				test_f21_set_regs(rmi4_data,
+						&rmi_fd, page);
+				break;
+			default:
+				break;
+			}
+
+			if (f54found && f55found)
+				goto pdt_done;
+
+			intr_count += rmi_fd.intr_src_count;
+		}
+	}
+
+	if (!f54found) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to find F54\n",
+				__func__);
+		return -EINVAL;
+	}
+
+pdt_done:
+	return 0;
+}
+
+static void synaptics_rmi4_test_attn(struct synaptics_rmi4_data *rmi4_data,
+		unsigned char intr_mask)
+{
+	if (!f54)
+		return;
+
+	if (f54->intr_mask & intr_mask)
+		queue_work(f54->test_report_workqueue, &f54->test_report_work);
+
+	return;
+}
+
+static int synaptics_rmi4_test_init(struct synaptics_rmi4_data *rmi4_data)
+{
+	int retval;
+
+	if (f54) {
+		dev_dbg(rmi4_data->pdev->dev.parent,
+				"%s: Handle already exists\n",
+				__func__);
+		return 0;
+	}
+
+	f54 = kzalloc(sizeof(*f54), GFP_KERNEL);
+	if (!f54) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to alloc mem for F54\n",
+				__func__);
+		retval = -ENOMEM;
+		goto exit;
+	}
+
+	f54->rmi4_data = rmi4_data;
+
+	f55 = NULL;
+
+	f21 = NULL;
+
+	retval = test_scan_pdt();
+	if (retval < 0)
+		goto exit_free_mem;
+
+	retval = test_set_queries();
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read F54 query registers\n",
+				__func__);
+		goto exit_free_mem;
+	}
+
+	f54->tx_assigned = f54->query.num_of_tx_electrodes;
+	f54->rx_assigned = f54->query.num_of_rx_electrodes;
+
+	retval = test_set_controls();
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to set up F54 control registers\n",
+				__func__);
+		goto exit_free_control;
+	}
+
+	test_set_data();
+
+	if (f55)
+		test_f55_init(rmi4_data);
+
+	if (f21)
+		test_f21_init(rmi4_data);
+
+	if (rmi4_data->external_afe_buttons)
+		f54->tx_assigned++;
+
+	retval = test_set_sysfs();
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to create sysfs entries\n",
+				__func__);
+		goto exit_sysfs;
+	}
+
+	f54->test_report_workqueue =
+			create_singlethread_workqueue("test_report_workqueue");
+	INIT_WORK(&f54->test_report_work, test_report_work);
+
+	hrtimer_init(&f54->watchdog, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+	f54->watchdog.function = test_get_report_timeout;
+	INIT_WORK(&f54->timeout_work, test_timeout_work);
+
+	mutex_init(&f54->status_mutex);
+	f54->status = STATUS_IDLE;
+
+	return 0;
+
+exit_sysfs:
+	if (f21)
+		kfree(f21->force_txrx_assignment);
+
+	if (f55) {
+		kfree(f55->tx_assignment);
+		kfree(f55->rx_assignment);
+		kfree(f55->force_tx_assignment);
+		kfree(f55->force_rx_assignment);
+	}
+
+exit_free_control:
+	test_free_control_mem();
+
+exit_free_mem:
+	kfree(f21);
+	f21 = NULL;
+	kfree(f55);
+	f55 = NULL;
+	kfree(f54);
+	f54 = NULL;
+
+exit:
+	return retval;
+}
+
+static void synaptics_rmi4_test_remove(struct synaptics_rmi4_data *rmi4_data)
+{
+	if (!f54)
+		goto exit;
+
+	hrtimer_cancel(&f54->watchdog);
+
+	cancel_work_sync(&f54->test_report_work);
+	flush_workqueue(f54->test_report_workqueue);
+	destroy_workqueue(f54->test_report_workqueue);
+
+	test_remove_sysfs();
+
+	if (f21)
+		kfree(f21->force_txrx_assignment);
+
+	if (f55) {
+		kfree(f55->tx_assignment);
+		kfree(f55->rx_assignment);
+		kfree(f55->force_tx_assignment);
+		kfree(f55->force_rx_assignment);
+	}
+
+	test_free_control_mem();
+
+	if (f54->data_buffer_size)
+		kfree(f54->report_data);
+
+	kfree(f21);
+	f21 = NULL;
+
+	kfree(f55);
+	f55 = NULL;
+
+	kfree(f54);
+	f54 = NULL;
+
+exit:
+	complete(&test_remove_complete);
+
+	return;
+}
+
+static void synaptics_rmi4_test_reset(struct synaptics_rmi4_data *rmi4_data)
+{
+	int retval;
+
+	if (!f54) {
+		synaptics_rmi4_test_init(rmi4_data);
+		return;
+	}
+
+	if (f21)
+		kfree(f21->force_txrx_assignment);
+
+	if (f55) {
+		kfree(f55->tx_assignment);
+		kfree(f55->rx_assignment);
+		kfree(f55->force_tx_assignment);
+		kfree(f55->force_rx_assignment);
+	}
+
+	test_free_control_mem();
+
+	kfree(f55);
+	f55 = NULL;
+
+	kfree(f21);
+	f21 = NULL;
+
+	retval = test_scan_pdt();
+	if (retval < 0)
+		goto exit_free_mem;
+
+	retval = test_set_queries();
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to read F54 query registers\n",
+				__func__);
+		goto exit_free_mem;
+	}
+
+	f54->tx_assigned = f54->query.num_of_tx_electrodes;
+	f54->rx_assigned = f54->query.num_of_rx_electrodes;
+
+	retval = test_set_controls();
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to set up F54 control registers\n",
+				__func__);
+		goto exit_free_control;
+	}
+
+	test_set_data();
+
+	if (f55)
+		test_f55_init(rmi4_data);
+
+	if (f21)
+		test_f21_init(rmi4_data);
+
+	if (rmi4_data->external_afe_buttons)
+		f54->tx_assigned++;
+
+	f54->status = STATUS_IDLE;
+
+	return;
+
+exit_free_control:
+	test_free_control_mem();
+
+exit_free_mem:
+	hrtimer_cancel(&f54->watchdog);
+
+	cancel_work_sync(&f54->test_report_work);
+	flush_workqueue(f54->test_report_workqueue);
+	destroy_workqueue(f54->test_report_workqueue);
+
+	test_remove_sysfs();
+
+	if (f54->data_buffer_size)
+		kfree(f54->report_data);
+
+	kfree(f21);
+	f21 = NULL;
+
+	kfree(f55);
+	f55 = NULL;
+
+	kfree(f54);
+	f54 = NULL;
+
+	return;
+}
+
+static struct synaptics_rmi4_exp_fn test_module = {
+	.fn_type = RMI_TEST_REPORTING,
+	.init = synaptics_rmi4_test_init,
+	.remove = synaptics_rmi4_test_remove,
+	.reset = synaptics_rmi4_test_reset,
+	.reinit = NULL,
+	.early_suspend = NULL,
+	.suspend = NULL,
+	.resume = NULL,
+	.late_resume = NULL,
+	.attn = synaptics_rmi4_test_attn,
+};
+
+static int __init rmi4_test_module_init(void)
+{
+	synaptics_rmi4_new_function(&test_module, true);
+
+	return 0;
+}
+
+static void __exit rmi4_test_module_exit(void)
+{
+	synaptics_rmi4_new_function(&test_module, false);
+
+	wait_for_completion(&test_remove_complete);
+
+	return;
+}
+
+module_init(rmi4_test_module_init);
+module_exit(rmi4_test_module_exit);
+
+MODULE_AUTHOR("Synaptics, Inc.");
+MODULE_DESCRIPTION("Synaptics DSX Test Reporting Module");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_video.c b/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_video.c
new file mode 100644
index 0000000..b9ae0ac
--- /dev/null
+++ b/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_video.c
@@ -0,0 +1,416 @@
+/*
+ * Synaptics DSX touchscreen driver
+ *
+ * Copyright (C) 2012-2016 Synaptics Incorporated. All rights reserved.
+ *
+ * Copyright (C) 2012 Alexandra Chin <alexandra.chin@tw.synaptics.com>
+ * Copyright (C) 2012 Scott Lin <scott.lin@tw.synaptics.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * INFORMATION CONTAINED IN THIS DOCUMENT IS PROVIDED "AS-IS," AND SYNAPTICS
+ * EXPRESSLY DISCLAIMS ALL EXPRESS AND IMPLIED WARRANTIES, INCLUDING ANY
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE,
+ * AND ANY WARRANTIES OF NON-INFRINGEMENT OF ANY INTELLECTUAL PROPERTY RIGHTS.
+ * IN NO EVENT SHALL SYNAPTICS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, PUNITIVE, OR CONSEQUENTIAL DAMAGES ARISING OUT OF OR IN CONNECTION
+ * WITH THE USE OF THE INFORMATION CONTAINED IN THIS DOCUMENT, HOWEVER CAUSED
+ * AND BASED ON ANY THEORY OF LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * NEGLIGENCE OR OTHER TORTIOUS ACTION, AND EVEN IF SYNAPTICS WAS ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE. IF A TRIBUNAL OF COMPETENT JURISDICTION DOES
+ * NOT PERMIT THE DISCLAIMER OF DIRECT DAMAGES OR ANY OTHER DAMAGES, SYNAPTICS'
+ * TOTAL CUMULATIVE LIABILITY TO ANY PARTY SHALL NOT EXCEED ONE HUNDRED U.S.
+ * DOLLARS.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/input.h>
+#include <linux/platform_device.h>
+#include <linux/input/synaptics_dsx.h>
+#include "synaptics_dsx_core.h"
+
+#define SYSFS_FOLDER_NAME "video"
+
+/*
+*#define RMI_DCS_SUSPEND_RESUME
+*/
+
+static ssize_t video_sysfs_dcs_write_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t video_sysfs_param_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count);
+
+static int video_send_dcs_command(unsigned char command_opcode);
+
+struct f38_command {
+	union {
+		struct {
+			unsigned char command_opcode;
+			unsigned char register_access:1;
+			unsigned char gamma_page:1;
+			unsigned char f38_control1_b2__7:6;
+			unsigned char parameter_field_1;
+			unsigned char parameter_field_2;
+			unsigned char parameter_field_3;
+			unsigned char parameter_field_4;
+			unsigned char send_to_dcs:1;
+			unsigned char f38_command6_b1__7:7;
+		} __packed;
+		unsigned char data[7];
+	};
+};
+
+struct synaptics_rmi4_video_handle {
+	unsigned char param;
+	unsigned short query_base_addr;
+	unsigned short control_base_addr;
+	unsigned short data_base_addr;
+	unsigned short command_base_addr;
+	struct synaptics_rmi4_data *rmi4_data;
+	struct kobject *sysfs_dir;
+};
+
+#ifdef RMI_DCS_SUSPEND_RESUME
+struct dcs_command {
+	unsigned char command;
+	unsigned int wait_time;
+};
+
+static struct dcs_command suspend_sequence[] = {
+	{
+		.command = 0x28,
+		.wait_time = 200,
+	},
+	{
+		.command = 0x10,
+		.wait_time = 200,
+	},
+};
+
+static struct dcs_command resume_sequence[] = {
+	{
+		.command = 0x11,
+		.wait_time = 200,
+	},
+	{
+		.command = 0x29,
+		.wait_time = 200,
+	},
+};
+#endif
+
+static struct device_attribute attrs[] = {
+	__ATTR(dcs_write, 0220,
+			synaptics_rmi4_show_error,
+			video_sysfs_dcs_write_store),
+	__ATTR(param, 0220,
+			synaptics_rmi4_show_error,
+			video_sysfs_param_store),
+};
+
+static struct synaptics_rmi4_video_handle *video;
+
+DECLARE_COMPLETION(video_remove_complete);
+
+static ssize_t video_sysfs_dcs_write_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int retval;
+	unsigned int input;
+
+	if (kstrtouint(buf, 16, &input) != 1)
+		return -EINVAL;
+
+	retval = video_send_dcs_command((unsigned char)input);
+	if (retval < 0)
+		return retval;
+
+	return count;
+}
+
+static ssize_t video_sysfs_param_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	unsigned int input;
+
+	if (kstrtouint(buf, 16, &input) != 1)
+		return -EINVAL;
+
+	video->param = (unsigned char)input;
+
+	return count;
+}
+
+static int video_send_dcs_command(unsigned char command_opcode)
+{
+	int retval;
+	struct f38_command command;
+	struct synaptics_rmi4_data *rmi4_data = video->rmi4_data;
+
+	memset(&command, 0x00, sizeof(command));
+
+	command.command_opcode = command_opcode;
+	command.parameter_field_1 = video->param;
+	command.send_to_dcs = 1;
+
+	video->param = 0;
+
+	retval = synaptics_rmi4_reg_write(rmi4_data,
+			video->command_base_addr,
+			command.data,
+			sizeof(command.data));
+	if (retval < 0) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to send DCS command\n",
+				__func__);
+		return retval;
+	}
+
+	return 0;
+}
+
+static int video_scan_pdt(void)
+{
+	int retval;
+	unsigned char page;
+	unsigned short addr;
+	bool f38_found = false;
+	struct synaptics_rmi4_fn_desc rmi_fd;
+	struct synaptics_rmi4_data *rmi4_data = video->rmi4_data;
+
+	for (page = 0; page < PAGES_TO_SERVICE; page++) {
+		for (addr = PDT_START; addr > PDT_END; addr -= PDT_ENTRY_SIZE) {
+			addr |= (page << 8);
+
+			retval = synaptics_rmi4_reg_read(rmi4_data,
+					addr,
+					(unsigned char *)&rmi_fd,
+					sizeof(rmi_fd));
+			if (retval < 0)
+				return retval;
+
+			addr &= ~(MASK_8BIT << 8);
+
+			if (!rmi_fd.fn_number)
+				break;
+
+			if (rmi_fd.fn_number == SYNAPTICS_RMI4_F38) {
+				f38_found = true;
+				goto f38_found;
+			}
+		}
+	}
+
+	if (!f38_found) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to find F38\n",
+				__func__);
+		return -EINVAL;
+	}
+
+f38_found:
+	video->query_base_addr = rmi_fd.query_base_addr | (page << 8);
+	video->control_base_addr = rmi_fd.ctrl_base_addr | (page << 8);
+	video->data_base_addr = rmi_fd.data_base_addr | (page << 8);
+	video->command_base_addr = rmi_fd.cmd_base_addr | (page << 8);
+
+	return 0;
+}
+
+static int synaptics_rmi4_video_init(struct synaptics_rmi4_data *rmi4_data)
+{
+	int retval;
+	unsigned char attr_count;
+
+	if (video) {
+		dev_dbg(rmi4_data->pdev->dev.parent,
+				"%s: Handle already exists\n",
+				__func__);
+		return 0;
+	}
+
+	video = kzalloc(sizeof(*video), GFP_KERNEL);
+	if (!video) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to alloc mem for video\n",
+				__func__);
+		retval = -ENOMEM;
+		goto exit;
+	}
+
+	video->rmi4_data = rmi4_data;
+
+	retval = video_scan_pdt();
+	if (retval < 0) {
+		retval = 0;
+		goto exit_scan_pdt;
+	}
+
+	video->sysfs_dir = kobject_create_and_add(SYSFS_FOLDER_NAME,
+			&rmi4_data->input_dev->dev.kobj);
+	if (!video->sysfs_dir) {
+		dev_err(rmi4_data->pdev->dev.parent,
+				"%s: Failed to create sysfs directory\n",
+				__func__);
+		retval = -ENODEV;
+		goto exit_sysfs_dir;
+	}
+
+	for (attr_count = 0; attr_count < ARRAY_SIZE(attrs); attr_count++) {
+		retval = sysfs_create_file(video->sysfs_dir,
+				&attrs[attr_count].attr);
+		if (retval < 0) {
+			dev_err(rmi4_data->pdev->dev.parent,
+					"%s: Failed to create sysfs attributes\n",
+					__func__);
+			retval = -ENODEV;
+			goto exit_sysfs_attrs;
+		}
+	}
+
+	return 0;
+
+exit_sysfs_attrs:
+	for (attr_count--; attr_count >= 0; attr_count--)
+		sysfs_remove_file(video->sysfs_dir, &attrs[attr_count].attr);
+
+	kobject_put(video->sysfs_dir);
+
+exit_sysfs_dir:
+exit_scan_pdt:
+	kfree(video);
+	video = NULL;
+
+exit:
+	return retval;
+}
+
+static void synaptics_rmi4_video_remove(struct synaptics_rmi4_data *rmi4_data)
+{
+	unsigned char attr_count;
+
+	if (!video)
+		goto exit;
+
+	for (attr_count = 0; attr_count < ARRAY_SIZE(attrs); attr_count++)
+		sysfs_remove_file(video->sysfs_dir, &attrs[attr_count].attr);
+
+	kobject_put(video->sysfs_dir);
+
+	kfree(video);
+	video = NULL;
+
+exit:
+	complete(&video_remove_complete);
+
+	return;
+}
+
+static void synaptics_rmi4_video_reset(struct synaptics_rmi4_data *rmi4_data)
+{
+	if (!video)
+		synaptics_rmi4_video_init(rmi4_data);
+
+	return;
+}
+
+#ifdef RMI_DCS_SUSPEND_RESUME
+static void synaptics_rmi4_video_suspend(struct synaptics_rmi4_data *rmi4_data)
+{
+	int retval;
+	unsigned char ii;
+	unsigned char command;
+	unsigned char num_of_cmds;
+
+	if (!video)
+		return;
+
+	num_of_cmds = ARRAY_SIZE(suspend_sequence);
+
+	for (ii = 0; ii < num_of_cmds; ii++) {
+		command = suspend_sequence[ii].command;
+		retval = video_send_dcs_command(command);
+		if (retval < 0)
+			return;
+		msleep(suspend_sequence[ii].wait_time);
+	}
+
+	return;
+}
+
+static void synaptics_rmi4_video_resume(struct synaptics_rmi4_data *rmi4_data)
+{
+	int retval;
+	unsigned char ii;
+	unsigned char command;
+	unsigned char num_of_cmds;
+
+	if (!video)
+		return;
+
+	num_of_cmds = ARRAY_SIZE(resume_sequence);
+
+	for (ii = 0; ii < num_of_cmds; ii++) {
+		command = resume_sequence[ii].command;
+		retval = video_send_dcs_command(command);
+		if (retval < 0)
+			return;
+		msleep(resume_sequence[ii].wait_time);
+	}
+
+	return;
+}
+#endif
+
+static struct synaptics_rmi4_exp_fn video_module = {
+	.fn_type = RMI_VIDEO,
+	.init = synaptics_rmi4_video_init,
+	.remove = synaptics_rmi4_video_remove,
+	.reset = synaptics_rmi4_video_reset,
+	.reinit = NULL,
+	.early_suspend = NULL,
+#ifdef RMI_DCS_SUSPEND_RESUME
+	.suspend = synaptics_rmi4_video_suspend,
+	.resume = synaptics_rmi4_video_resume,
+#else
+	.suspend = NULL,
+	.resume = NULL,
+#endif
+	.late_resume = NULL,
+	.attn = NULL,
+};
+
+static int __init rmi4_video_module_init(void)
+{
+	synaptics_rmi4_new_function(&video_module, true);
+
+	return 0;
+}
+
+static void __exit rmi4_video_module_exit(void)
+{
+	synaptics_rmi4_new_function(&video_module, false);
+
+	wait_for_completion(&video_remove_complete);
+
+	return;
+}
+
+module_init(rmi4_video_module_init);
+module_exit(rmi4_video_module_exit);
+
+MODULE_AUTHOR("Synaptics, Inc.");
+MODULE_DESCRIPTION("Synaptics DSX Video Module");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 1a0b110..0c910a8 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -3211,7 +3211,7 @@ static void amd_iommu_apply_dm_region(struct device *dev,
 	unsigned long start, end;
 
 	start = IOVA_PFN(region->start);
-	end   = IOVA_PFN(region->start + region->length);
+	end   = IOVA_PFN(region->start + region->length - 1);
 
 	WARN_ON_ONCE(reserve_iova(&dma_dom->iovad, start, end) == NULL);
 }
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index 28ef920..d8d9011 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -55,11 +55,10 @@
 #include <linux/remote_spinlock.h>
 #include <linux/ktime.h>
 #include <trace/events/iommu.h>
-#include <soc/qcom/msm_tz_smmu.h>
-#include <soc/qcom/scm.h>
 #include <linux/notifier.h>
 
 #include <linux/amba/bus.h>
+#include <soc/qcom/msm_tz_smmu.h>
 
 #include "io-pgtable.h"
 
@@ -2736,6 +2735,39 @@ static bool arm_smmu_capable(enum iommu_cap cap)
 	}
 }
 
+#ifdef CONFIG_MSM_TZ_SMMU
+static struct arm_smmu_device *arm_smmu_get_by_addr(void __iomem *addr)
+{
+	struct arm_smmu_device *smmu;
+	unsigned long flags;
+
+	spin_lock_irqsave(&arm_smmu_devices_lock, flags);
+	list_for_each_entry(smmu, &arm_smmu_devices, list) {
+		unsigned long base = (unsigned long)smmu->base;
+		unsigned long mask = ~(smmu->size - 1);
+
+		if ((base & mask) == ((unsigned long)addr & mask)) {
+			spin_unlock_irqrestore(&arm_smmu_devices_lock, flags);
+			return smmu;
+		}
+	}
+	spin_unlock_irqrestore(&arm_smmu_devices_lock, flags);
+	return NULL;
+}
+
+bool arm_smmu_skip_write(void __iomem *addr)
+{
+	struct arm_smmu_device *smmu;
+
+	smmu = arm_smmu_get_by_addr(addr);
+	if (smmu &&
+	    ((unsigned long)addr & (smmu->size - 1)) >= (smmu->size >> 1))
+		return false;
+	else
+		return true;
+}
+#endif
+
 static struct arm_smmu_device *arm_smmu_get_by_list(struct device_node *np)
 {
 	struct arm_smmu_device *smmu;
diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c
index c7820b3..beef59e 100644
--- a/drivers/iommu/exynos-iommu.c
+++ b/drivers/iommu/exynos-iommu.c
@@ -543,7 +543,10 @@ static void sysmmu_tlb_invalidate_flpdcache(struct sysmmu_drvdata *data,
 	if (is_sysmmu_active(data) && data->version >= MAKE_MMU_VER(3, 3)) {
 		clk_enable(data->clk_master);
 		if (sysmmu_block(data)) {
-			__sysmmu_tlb_invalidate_entry(data, iova, 1);
+			if (data->version >= MAKE_MMU_VER(5, 0))
+				__sysmmu_tlb_invalidate(data);
+			else
+				__sysmmu_tlb_invalidate_entry(data, iova, 1);
 			sysmmu_unblock(data);
 		}
 		clk_disable(data->clk_master);
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 002f8a42..88bbc8c 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -2245,10 +2245,12 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
 		uint64_t tmp;
 
 		if (!sg_res) {
+			unsigned int pgoff = sg->offset & ~PAGE_MASK;
+
 			sg_res = aligned_nrpages(sg->offset, sg->length);
-			sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset;
+			sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + pgoff;
 			sg->dma_length = sg->length;
-			pteval = page_to_phys(sg_page(sg)) | prot;
+			pteval = (sg_phys(sg) - pgoff) | prot;
 			phys_pfn = pteval >> VTD_PAGE_SHIFT;
 		}
 
@@ -3894,7 +3896,7 @@ static int intel_nontranslate_map_sg(struct device *hddev,
 
 	for_each_sg(sglist, sg, nelems, i) {
 		BUG_ON(!sg_page(sg));
-		sg->dma_address = page_to_phys(sg_page(sg)) + sg->offset;
+		sg->dma_address = sg_phys(sg);
 		sg->dma_length = sg->length;
 	}
 	return nelems;
diff --git a/drivers/iommu/io-pgtable-arm-v7s.c b/drivers/iommu/io-pgtable-arm-v7s.c
index f50e51c..d68a552 100644
--- a/drivers/iommu/io-pgtable-arm-v7s.c
+++ b/drivers/iommu/io-pgtable-arm-v7s.c
@@ -418,8 +418,12 @@ static int __arm_v7s_map(struct arm_v7s_io_pgtable *data, unsigned long iova,
 			pte |= ARM_V7S_ATTR_NS_TABLE;
 
 		__arm_v7s_set_pte(ptep, pte, 1, cfg);
-	} else {
+	} else if (ARM_V7S_PTE_IS_TABLE(pte, lvl)) {
 		cptep = iopte_deref(pte, lvl);
+	} else {
+		/* We require an unmap first */
+		WARN_ON(!selftest_running);
+		return -EEXIST;
 	}
 
 	/* Rinse, repeat */
diff --git a/drivers/iommu/mtk_iommu_v1.c b/drivers/iommu/mtk_iommu_v1.c
index b8aeb07..68c6050 100644
--- a/drivers/iommu/mtk_iommu_v1.c
+++ b/drivers/iommu/mtk_iommu_v1.c
@@ -703,7 +703,7 @@ static struct platform_driver mtk_iommu_driver = {
 	.probe	= mtk_iommu_probe,
 	.remove	= mtk_iommu_remove,
 	.driver	= {
-		.name = "mtk-iommu",
+		.name = "mtk-iommu-v1",
 		.of_match_table = mtk_iommu_of_ids,
 		.pm = &mtk_iommu_pm_ops,
 	}
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index ee50a61..9c8ec67 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -278,6 +278,7 @@
 
 config MVEBU_ODMI
 	bool
+	select GENERIC_MSI_IRQ_DOMAIN
 
 config MVEBU_PIC
 	bool
diff --git a/drivers/irqchip/irq-crossbar.c b/drivers/irqchip/irq-crossbar.c
index 05bbf17..1070b7b 100644
--- a/drivers/irqchip/irq-crossbar.c
+++ b/drivers/irqchip/irq-crossbar.c
@@ -199,7 +199,7 @@ static const struct irq_domain_ops crossbar_domain_ops = {
 static int __init crossbar_of_init(struct device_node *node)
 {
 	int i, size, reserved = 0;
-	u32 max = 0, entry;
+	u32 max = 0, entry, reg_size;
 	const __be32 *irqsr;
 	int ret = -ENOMEM;
 
@@ -276,9 +276,9 @@ static int __init crossbar_of_init(struct device_node *node)
 	if (!cb->register_offsets)
 		goto err_irq_map;
 
-	of_property_read_u32(node, "ti,reg-size", &size);
+	of_property_read_u32(node, "ti,reg-size", &reg_size);
 
-	switch (size) {
+	switch (reg_size) {
 	case 1:
 		cb->write = crossbar_writeb;
 		break;
@@ -304,7 +304,7 @@ static int __init crossbar_of_init(struct device_node *node)
 			continue;
 
 		cb->register_offsets[i] = reserved;
-		reserved += size;
+		reserved += reg_size;
 	}
 
 	of_property_read_u32(node, "ti,irqs-safe-map", &cb->safe_map);
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index 01f9435..c67e813 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -1066,18 +1066,18 @@ static void __init gic_populate_ppi_partitions(struct device_node *gic_node)
 	int nr_parts;
 	struct partition_affinity *parts;
 
-	parts_node = of_find_node_by_name(gic_node, "ppi-partitions");
+	parts_node = of_get_child_by_name(gic_node, "ppi-partitions");
 	if (!parts_node)
 		return;
 
 	nr_parts = of_get_child_count(parts_node);
 
 	if (!nr_parts)
-		return;
+		goto out_put_node;
 
 	parts = kzalloc(sizeof(*parts) * nr_parts, GFP_KERNEL);
 	if (WARN_ON(!parts))
-		return;
+		goto out_put_node;
 
 	for_each_child_of_node(parts_node, child_part) {
 		struct partition_affinity *part;
@@ -1144,6 +1144,9 @@ static void __init gic_populate_ppi_partitions(struct device_node *gic_node)
 
 		gic_data.ppi_descs[i] = desc;
 	}
+
+out_put_node:
+	of_node_put(parts_node);
 }
 
 static void __init gic_of_setup_kvm_info(struct device_node *node)
diff --git a/drivers/isdn/capi/kcapi.c b/drivers/isdn/capi/kcapi.c
index 823f698..dd7e38a 100644
--- a/drivers/isdn/capi/kcapi.c
+++ b/drivers/isdn/capi/kcapi.c
@@ -1032,6 +1032,7 @@ static int old_capi_manufacturer(unsigned int cmd, void __user *data)
 						     sizeof(avmb1_carddef))))
 				return -EFAULT;
 			cdef.cardtype = AVM_CARDTYPE_B1;
+			cdef.cardnr = 0;
 		} else {
 			if ((retval = copy_from_user(&cdef, data,
 						     sizeof(avmb1_extcarddef))))
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index b8f30cd..26e03ba 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -706,6 +706,14 @@
 	  module provides haptic feedback for user actions such as a long press
 	  on the touch screen.
 
+config LEDS_QPNP_VIBRATOR_LDO
+	tristate "Vibrator-LDO support for QPNP PMIC"
+	depends on LEDS_CLASS && MFD_SPMI_PMIC
+	help
+	  This option enables device driver support for the vibrator-ldo
+	  peripheral found on Qualcomm Technologies, Inc. QPNP PMICs.
+	  The vibrator-ldo peripheral is capable of driving ERM vibrators.
+
 comment "LED Triggers"
 source "drivers/leds/trigger/Kconfig"
 
diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile
index ba9bb8d..5514391 100644
--- a/drivers/leds/Makefile
+++ b/drivers/leds/Makefile
@@ -76,6 +76,7 @@
 obj-$(CONFIG_LEDS_QPNP_FLASH_V2)	+= leds-qpnp-flash-v2.o
 obj-$(CONFIG_LEDS_QPNP_WLED)		+= leds-qpnp-wled.o
 obj-$(CONFIG_LEDS_QPNP_HAPTICS)	+= leds-qpnp-haptics.o
+obj-$(CONFIG_LEDS_QPNP_VIBRATOR_LDO)	+= leds-qpnp-vibrator-ldo.o
 
 # LED SPI Drivers
 obj-$(CONFIG_LEDS_DAC124S085)		+= leds-dac124s085.o
diff --git a/drivers/leds/leds-qpnp-haptics.c b/drivers/leds/leds-qpnp-haptics.c
index 1eaa652..ebdff87 100644
--- a/drivers/leds/leds-qpnp-haptics.c
+++ b/drivers/leds/leds-qpnp-haptics.c
@@ -1,4 +1,5 @@
-/* Copyright (c) 2014-2015, 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2015, 2017-2018, The Linux Foundation.
+ * All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -1434,8 +1435,7 @@ static ssize_t qpnp_haptics_show_duration(struct device *dev,
 		time_us = ktime_to_us(time_rem);
 	}
 
-	return snprintf(buf, PAGE_SIZE, "%lld\n", time_us / 1000);
-	return 0;
+	return snprintf(buf, PAGE_SIZE, "%lld\n", div_s64(time_us, 1000));
 }
 
 static ssize_t qpnp_haptics_store_duration(struct device *dev,
diff --git a/drivers/leds/leds-qpnp-vibrator-ldo.c b/drivers/leds/leds-qpnp-vibrator-ldo.c
new file mode 100644
index 0000000..6a14324
--- /dev/null
+++ b/drivers/leds/leds-qpnp-vibrator-ldo.c
@@ -0,0 +1,550 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt)	"%s: " fmt, __func__
+
+#include <linux/errno.h>
+#include <linux/hrtimer.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/leds.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of_device.h>
+#include <linux/regmap.h>
+#include <linux/workqueue.h>
+
+/* Vibrator-LDO register definitions */
+#define QPNP_VIB_LDO_REG_STATUS1	0x08
+#define QPNP_VIB_LDO_VREG_READY		BIT(7)
+
+#define QPNP_VIB_LDO_REG_VSET_LB	0x40
+
+#define QPNP_VIB_LDO_REG_EN_CTL		0x46
+#define QPNP_VIB_LDO_EN			BIT(7)
+
+/* Vibrator-LDO voltage settings */
+#define QPNP_VIB_LDO_VMIN_UV		1504000
+#define QPNP_VIB_LDO_VMAX_UV		3544000
+#define QPNP_VIB_LDO_VOLT_STEP_UV	8000
+
+/*
+ * Define vibration periods: default(5sec), min(50ms), max(15sec) and
+ * overdrive(30ms).
+ */
+#define QPNP_VIB_MIN_PLAY_MS		50
+#define QPNP_VIB_PLAY_MS		5000
+#define QPNP_VIB_MAX_PLAY_MS		15000
+#define QPNP_VIB_OVERDRIVE_PLAY_MS	30
+
+struct vib_ldo_chip {
+	struct led_classdev	cdev;
+	struct regmap		*regmap;
+	struct mutex		lock;
+	struct hrtimer		stop_timer;
+	struct hrtimer		overdrive_timer;
+	struct work_struct	vib_work;
+	struct work_struct	overdrive_work;
+
+	u16			base;
+	int			vmax_uV;
+	int			overdrive_volt_uV;
+	int			ldo_uV;
+	int			state;
+	u64			vib_play_ms;
+	bool			vib_enabled;
+	bool			disable_overdrive;
+};
+
+static int qpnp_vib_ldo_set_voltage(struct vib_ldo_chip *chip, int new_uV)
+{
+	unsigned int val;
+	u32 vlevel;
+	u8 reg[2];
+	int ret;
+
+	if (chip->ldo_uV == new_uV)
+		return 0;
+
+	vlevel = roundup(new_uV, QPNP_VIB_LDO_VOLT_STEP_UV) / 1000;
+	reg[0] = vlevel & 0xff;
+	reg[1] = (vlevel & 0xff00) >> 8;
+	ret = regmap_bulk_write(chip->regmap,
+				chip->base + QPNP_VIB_LDO_REG_VSET_LB, reg, 2);
+	if (ret < 0) {
+		pr_err("regmap write failed, ret=%d\n", ret);
+		return ret;
+	}
+
+	if (chip->vib_enabled) {
+		ret = regmap_read_poll_timeout(chip->regmap,
+					chip->base + QPNP_VIB_LDO_REG_STATUS1,
+					val, val & QPNP_VIB_LDO_VREG_READY,
+					100, 1000);
+		if (ret < 0) {
+			pr_err("Vibrator LDO vreg_ready timeout, status=0x%02x, ret=%d\n",
+				val, ret);
+			return ret;
+		}
+	}
+
+	chip->ldo_uV = new_uV;
+	return ret;
+}
+
+static inline int qpnp_vib_ldo_enable(struct vib_ldo_chip *chip, bool enable)
+{
+	unsigned int val;
+	int ret;
+
+	if (chip->vib_enabled == enable)
+		return 0;
+
+	ret = regmap_update_bits(chip->regmap,
+				chip->base + QPNP_VIB_LDO_REG_EN_CTL,
+				QPNP_VIB_LDO_EN,
+				enable ? QPNP_VIB_LDO_EN : 0);
+	if (ret < 0) {
+		pr_err("Program Vibrator LDO %s is failed, ret=%d\n",
+			enable ? "enable" : "disable", ret);
+		return ret;
+	}
+
+	if (enable) {
+		ret = regmap_read_poll_timeout(chip->regmap,
+					chip->base + QPNP_VIB_LDO_REG_STATUS1,
+					val, val & QPNP_VIB_LDO_VREG_READY,
+					100, 1000);
+		if (ret < 0) {
+			pr_err("Vibrator LDO vreg_ready timeout, status=0x%02x, ret=%d\n",
+				val, ret);
+			return ret;
+		}
+	}
+
+	chip->vib_enabled = enable;
+
+	return ret;
+}
+
+static int qpnp_vibrator_play_on(struct vib_ldo_chip *chip)
+{
+	int volt_uV;
+	int ret;
+
+	volt_uV = chip->vmax_uV;
+	if (!chip->disable_overdrive)
+		volt_uV = chip->overdrive_volt_uV ? chip->overdrive_volt_uV
+				: min(chip->vmax_uV * 2, QPNP_VIB_LDO_VMAX_UV);
+
+	ret = qpnp_vib_ldo_set_voltage(chip, volt_uV);
+	if (ret < 0) {
+		pr_err("set voltage = %duV failed, ret=%d\n", volt_uV, ret);
+		return ret;
+	}
+	pr_debug("voltage set to %d uV\n", volt_uV);
+
+	ret = qpnp_vib_ldo_enable(chip, true);
+	if (ret < 0) {
+		pr_err("vibration enable failed, ret=%d\n", ret);
+		return ret;
+	}
+
+	if (!chip->disable_overdrive)
+		hrtimer_start(&chip->overdrive_timer,
+			ms_to_ktime(QPNP_VIB_OVERDRIVE_PLAY_MS),
+			HRTIMER_MODE_REL);
+
+	return ret;
+}
+
+static void qpnp_vib_work(struct work_struct *work)
+{
+	struct vib_ldo_chip *chip = container_of(work, struct vib_ldo_chip,
+						vib_work);
+	int ret = 0;
+
+	if (chip->state) {
+		if (!chip->vib_enabled)
+			ret = qpnp_vibrator_play_on(chip);
+
+		if (ret == 0)
+			hrtimer_start(&chip->stop_timer,
+				      ms_to_ktime(chip->vib_play_ms),
+				      HRTIMER_MODE_REL);
+	} else {
+		if (!chip->disable_overdrive) {
+			hrtimer_cancel(&chip->overdrive_timer);
+			cancel_work_sync(&chip->overdrive_work);
+		}
+		qpnp_vib_ldo_enable(chip, false);
+	}
+}
+
+static enum hrtimer_restart vib_stop_timer(struct hrtimer *timer)
+{
+	struct vib_ldo_chip *chip = container_of(timer, struct vib_ldo_chip,
+					     stop_timer);
+
+	chip->state = 0;
+	schedule_work(&chip->vib_work);
+	return HRTIMER_NORESTART;
+}
+
+static void qpnp_vib_overdrive_work(struct work_struct *work)
+{
+	struct vib_ldo_chip *chip = container_of(work, struct vib_ldo_chip,
+					     overdrive_work);
+	int ret;
+
+	mutex_lock(&chip->lock);
+
+	/* LDO voltage update not required if Vibration disabled */
+	if (!chip->vib_enabled)
+		goto unlock;
+
+	ret = qpnp_vib_ldo_set_voltage(chip, chip->vmax_uV);
+	if (ret < 0) {
+		pr_err("set vibration voltage = %duV failed, ret=%d\n",
+			chip->vmax_uV, ret);
+		qpnp_vib_ldo_enable(chip, false);
+		goto unlock;
+	}
+	pr_debug("voltage set to %d\n", chip->vmax_uV);
+
+unlock:
+	mutex_unlock(&chip->lock);
+}
+
+static enum hrtimer_restart vib_overdrive_timer(struct hrtimer *timer)
+{
+	struct vib_ldo_chip *chip = container_of(timer, struct vib_ldo_chip,
+					     overdrive_timer);
+	schedule_work(&chip->overdrive_work);
+	pr_debug("overdrive timer expired\n");
+	return HRTIMER_NORESTART;
+}
+
+static ssize_t qpnp_vib_show_state(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct led_classdev *cdev = dev_get_drvdata(dev);
+	struct vib_ldo_chip *chip = container_of(cdev, struct vib_ldo_chip,
+						cdev);
+
+	return snprintf(buf, PAGE_SIZE, "%d\n", chip->vib_enabled);
+}
+
+static ssize_t qpnp_vib_store_state(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	/* At present, nothing to do with setting state */
+	return count;
+}
+
+static ssize_t qpnp_vib_show_duration(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct led_classdev *cdev = dev_get_drvdata(dev);
+	struct vib_ldo_chip *chip = container_of(cdev, struct vib_ldo_chip,
+						cdev);
+	ktime_t time_rem;
+	s64 time_ms = 0;
+
+	if (hrtimer_active(&chip->stop_timer)) {
+		time_rem = hrtimer_get_remaining(&chip->stop_timer);
+		time_ms = ktime_to_ms(time_rem);
+	}
+
+	return snprintf(buf, PAGE_SIZE, "%lld\n", time_ms);
+}
+
+static ssize_t qpnp_vib_store_duration(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct led_classdev *cdev = dev_get_drvdata(dev);
+	struct vib_ldo_chip *chip = container_of(cdev, struct vib_ldo_chip,
+						cdev);
+	u32 val;
+	int ret;
+
+	ret = kstrtouint(buf, 0, &val);
+	if (ret < 0)
+		return ret;
+
+	/* setting 0 on duration is NOP for now */
+	if (val <= 0)
+		return count;
+
+	if (val < QPNP_VIB_MIN_PLAY_MS)
+		val = QPNP_VIB_MIN_PLAY_MS;
+
+	if (val > QPNP_VIB_MAX_PLAY_MS)
+		val = QPNP_VIB_MAX_PLAY_MS;
+
+	mutex_lock(&chip->lock);
+	chip->vib_play_ms = val;
+	mutex_unlock(&chip->lock);
+
+	return count;
+}
+
+static ssize_t qpnp_vib_show_activate(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	/* For now nothing to show */
+	return snprintf(buf, PAGE_SIZE, "%d\n", 0);
+}
+
+static ssize_t qpnp_vib_store_activate(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct led_classdev *cdev = dev_get_drvdata(dev);
+	struct vib_ldo_chip *chip = container_of(cdev, struct vib_ldo_chip,
+						cdev);
+	u32 val;
+	int ret;
+
+	ret = kstrtouint(buf, 0, &val);
+	if (ret < 0)
+		return ret;
+
+	if (val != 0 && val != 1)
+		return count;
+
+	mutex_lock(&chip->lock);
+	hrtimer_cancel(&chip->stop_timer);
+	chip->state = val;
+	pr_debug("state = %d, time = %llums\n", chip->state, chip->vib_play_ms);
+	mutex_unlock(&chip->lock);
+	schedule_work(&chip->vib_work);
+
+	return count;
+}
+
+static ssize_t qpnp_vib_show_vmax(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct led_classdev *cdev = dev_get_drvdata(dev);
+	struct vib_ldo_chip *chip = container_of(cdev, struct vib_ldo_chip,
+						cdev);
+
+	return snprintf(buf, PAGE_SIZE, "%d\n", chip->vmax_uV / 1000);
+}
+
+static ssize_t qpnp_vib_store_vmax(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct led_classdev *cdev = dev_get_drvdata(dev);
+	struct vib_ldo_chip *chip = container_of(cdev, struct vib_ldo_chip,
+						cdev);
+	int data, ret;
+
+	ret = kstrtoint(buf, 10, &data);
+	if (ret < 0)
+		return ret;
+
+	data = data * 1000; /* Convert to microvolts */
+
+	/* check against vibrator ldo min/max voltage limits */
+	data = min(data, QPNP_VIB_LDO_VMAX_UV);
+	data = max(data, QPNP_VIB_LDO_VMIN_UV);
+
+	mutex_lock(&chip->lock);
+	chip->vmax_uV = data;
+	mutex_unlock(&chip->lock);
+	return ret;
+}
+
+static struct device_attribute qpnp_vib_attrs[] = {
+	__ATTR(state, 0664, qpnp_vib_show_state, qpnp_vib_store_state),
+	__ATTR(duration, 0664, qpnp_vib_show_duration, qpnp_vib_store_duration),
+	__ATTR(activate, 0664, qpnp_vib_show_activate, qpnp_vib_store_activate),
+	__ATTR(vmax_mv, 0664, qpnp_vib_show_vmax, qpnp_vib_store_vmax),
+};
+
+static int qpnp_vib_parse_dt(struct device *dev, struct vib_ldo_chip *chip)
+{
+	int ret;
+
+	ret = of_property_read_u32(dev->of_node, "qcom,vib-ldo-volt-uv",
+				&chip->vmax_uV);
+	if (ret < 0) {
+		pr_err("qcom,vib-ldo-volt-uv property read failed, ret=%d\n",
+			ret);
+		return ret;
+	}
+
+	chip->disable_overdrive = of_property_read_bool(dev->of_node,
+					"qcom,disable-overdrive");
+
+	if (of_find_property(dev->of_node, "qcom,vib-overdrive-volt-uv",
+			     NULL)) {
+		ret = of_property_read_u32(dev->of_node,
+					   "qcom,vib-overdrive-volt-uv",
+					   &chip->overdrive_volt_uV);
+		if (ret < 0) {
+			pr_err("qcom,vib-overdrive-volt-uv property read failed, ret=%d\n",
+				ret);
+			return ret;
+		}
+
+		/* check against vibrator ldo min/max voltage limits */
+		chip->overdrive_volt_uV = min(chip->overdrive_volt_uV,
+						QPNP_VIB_LDO_VMAX_UV);
+		chip->overdrive_volt_uV = max(chip->overdrive_volt_uV,
+						QPNP_VIB_LDO_VMIN_UV);
+	}
+
+	return ret;
+}
+
+/* Dummy functions for brightness */
+static enum led_brightness qpnp_vib_brightness_get(struct led_classdev *cdev)
+{
+	return 0;
+}
+
+static void qpnp_vib_brightness_set(struct led_classdev *cdev,
+			enum led_brightness level)
+{
+}
+
+static int qpnp_vibrator_ldo_suspend(struct device *dev)
+{
+	struct vib_ldo_chip *chip = dev_get_drvdata(dev);
+
+	mutex_lock(&chip->lock);
+	if (!chip->disable_overdrive) {
+		hrtimer_cancel(&chip->overdrive_timer);
+		cancel_work_sync(&chip->overdrive_work);
+	}
+	hrtimer_cancel(&chip->stop_timer);
+	cancel_work_sync(&chip->vib_work);
+	mutex_unlock(&chip->lock);
+
+	return 0;
+}
+static SIMPLE_DEV_PM_OPS(qpnp_vibrator_ldo_pm_ops, qpnp_vibrator_ldo_suspend,
+			NULL);
+
+static int qpnp_vibrator_ldo_probe(struct platform_device *pdev)
+{
+	struct device_node *of_node = pdev->dev.of_node;
+	struct vib_ldo_chip *chip;
+	int i, ret;
+	u32 base;
+
+	ret = of_property_read_u32(of_node, "reg", &base);
+	if (ret < 0) {
+		pr_err("reg property reading failed, ret=%d\n", ret);
+		return ret;
+	}
+
+	chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL);
+	if (!chip)
+		return -ENOMEM;
+
+	chip->regmap = dev_get_regmap(pdev->dev.parent, NULL);
+	if (!chip->regmap) {
+		pr_err("couldn't get parent's regmap\n");
+		return -EINVAL;
+	}
+
+	ret = qpnp_vib_parse_dt(&pdev->dev, chip);
+	if (ret < 0) {
+		pr_err("couldn't parse device tree, ret=%d\n", ret);
+		return ret;
+	}
+
+	chip->base = (uint16_t)base;
+	chip->vib_play_ms = QPNP_VIB_PLAY_MS;
+	mutex_init(&chip->lock);
+	INIT_WORK(&chip->vib_work, qpnp_vib_work);
+	INIT_WORK(&chip->overdrive_work, qpnp_vib_overdrive_work);
+
+	hrtimer_init(&chip->stop_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+	chip->stop_timer.function = vib_stop_timer;
+	hrtimer_init(&chip->overdrive_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+	chip->overdrive_timer.function = vib_overdrive_timer;
+	dev_set_drvdata(&pdev->dev, chip);
+
+	chip->cdev.name = "vibrator";
+	chip->cdev.brightness_get = qpnp_vib_brightness_get;
+	chip->cdev.brightness_set = qpnp_vib_brightness_set;
+	chip->cdev.max_brightness = 100;
+	ret = devm_led_classdev_register(&pdev->dev, &chip->cdev);
+	if (ret < 0) {
+		pr_err("Error in registering led class device, ret=%d\n", ret);
+		goto fail;
+	}
+
+	for (i = 0; i < ARRAY_SIZE(qpnp_vib_attrs); i++) {
+		ret = sysfs_create_file(&chip->cdev.dev->kobj,
+				&qpnp_vib_attrs[i].attr);
+		if (ret < 0) {
+			dev_err(&pdev->dev, "Error in creating sysfs file, ret=%d\n",
+				ret);
+			goto sysfs_fail;
+		}
+	}
+
+	pr_info("Vibrator LDO successfully registered: uV = %d, overdrive = %s\n",
+		chip->vmax_uV,
+		chip->disable_overdrive ? "disabled" : "enabled");
+	return 0;
+
+sysfs_fail:
+	for (--i; i >= 0; i--)
+		sysfs_remove_file(&chip->cdev.dev->kobj,
+				&qpnp_vib_attrs[i].attr);
+fail:
+	mutex_destroy(&chip->lock);
+	dev_set_drvdata(&pdev->dev, NULL);
+	return ret;
+}
+
+static int qpnp_vibrator_ldo_remove(struct platform_device *pdev)
+{
+	struct vib_ldo_chip *chip = dev_get_drvdata(&pdev->dev);
+
+	if (!chip->disable_overdrive) {
+		hrtimer_cancel(&chip->overdrive_timer);
+		cancel_work_sync(&chip->overdrive_work);
+	}
+	hrtimer_cancel(&chip->stop_timer);
+	cancel_work_sync(&chip->vib_work);
+	mutex_destroy(&chip->lock);
+	dev_set_drvdata(&pdev->dev, NULL);
+
+	return 0;
+}
+
+static const struct of_device_id vibrator_ldo_match_table[] = {
+	{ .compatible = "qcom,qpnp-vibrator-ldo" },
+	{ /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, vibrator_ldo_match_table);
+
+static struct platform_driver qpnp_vibrator_ldo_driver = {
+	.driver	= {
+		.name		= "qcom,qpnp-vibrator-ldo",
+		.of_match_table	= vibrator_ldo_match_table,
+		.pm		= &qpnp_vibrator_ldo_pm_ops,
+	},
+	.probe	= qpnp_vibrator_ldo_probe,
+	.remove	= qpnp_vibrator_ldo_remove,
+};
+module_platform_driver(qpnp_vibrator_ldo_driver);
+
+MODULE_DESCRIPTION("QCOM QPNP Vibrator-LDO driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c
index ca4abe1..537903b 100644
--- a/drivers/md/bcache/alloc.c
+++ b/drivers/md/bcache/alloc.c
@@ -404,7 +404,8 @@ long bch_bucket_alloc(struct cache *ca, unsigned reserve, bool wait)
 
 	finish_wait(&ca->set->bucket_wait, &w);
 out:
-	wake_up_process(ca->alloc_thread);
+	if (ca->alloc_thread)
+		wake_up_process(ca->alloc_thread);
 
 	trace_bcache_alloc(ca, reserve);
 
@@ -476,7 +477,7 @@ int __bch_bucket_alloc_set(struct cache_set *c, unsigned reserve,
 		if (b == -1)
 			goto err;
 
-		k->ptr[i] = PTR(ca->buckets[b].gen,
+		k->ptr[i] = MAKE_PTR(ca->buckets[b].gen,
 				bucket_to_sector(c, b),
 				ca->sb.nr_this_dev);
 
diff --git a/drivers/md/bcache/extents.c b/drivers/md/bcache/extents.c
index 243de0bf..4bf1518 100644
--- a/drivers/md/bcache/extents.c
+++ b/drivers/md/bcache/extents.c
@@ -584,7 +584,7 @@ static bool bch_extent_merge(struct btree_keys *bk, struct bkey *l, struct bkey
 		return false;
 
 	for (i = 0; i < KEY_PTRS(l); i++)
-		if (l->ptr[i] + PTR(0, KEY_SIZE(l), 0) != r->ptr[i] ||
+		if (l->ptr[i] + MAKE_PTR(0, KEY_SIZE(l), 0) != r->ptr[i] ||
 		    PTR_BUCKET_NR(b->c, l, i) != PTR_BUCKET_NR(b->c, r, i))
 			return false;
 
diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
index 6925023..08f20b7 100644
--- a/drivers/md/bcache/journal.c
+++ b/drivers/md/bcache/journal.c
@@ -508,7 +508,7 @@ static void journal_reclaim(struct cache_set *c)
 			continue;
 
 		ja->cur_idx = next;
-		k->ptr[n++] = PTR(0,
+		k->ptr[n++] = MAKE_PTR(0,
 				  bucket_to_sector(c, ca->sb.d[ja->cur_idx]),
 				  ca->sb.nr_this_dev);
 	}
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index e0f1c6d..edb8d1a 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -468,6 +468,7 @@ struct search {
 	unsigned		recoverable:1;
 	unsigned		write:1;
 	unsigned		read_dirty_data:1;
+	unsigned		cache_missed:1;
 
 	unsigned long		start_time;
 
@@ -653,6 +654,7 @@ static inline struct search *search_alloc(struct bio *bio,
 
 	s->orig_bio		= bio;
 	s->cache_miss		= NULL;
+	s->cache_missed		= 0;
 	s->d			= d;
 	s->recoverable		= 1;
 	s->write		= op_is_write(bio_op(bio));
@@ -703,7 +705,14 @@ static void cached_dev_read_error(struct closure *cl)
 	struct search *s = container_of(cl, struct search, cl);
 	struct bio *bio = &s->bio.bio;
 
-	if (s->recoverable) {
+	/*
+	 * If read request hit dirty data (s->read_dirty_data is true),
+	 * then recovery a failed read request from cached device may
+	 * get a stale data back. So read failure recovery is only
+	 * permitted when read request hit clean data in cache device,
+	 * or when cache read race happened.
+	 */
+	if (s->recoverable && !s->read_dirty_data) {
 		/* Retry from the backing device: */
 		trace_bcache_read_retry(s->orig_bio);
 
@@ -764,7 +773,7 @@ static void cached_dev_read_done_bh(struct closure *cl)
 	struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
 
 	bch_mark_cache_accounting(s->iop.c, s->d,
-				  !s->cache_miss, s->iop.bypass);
+				  !s->cache_missed, s->iop.bypass);
 	trace_bcache_read(s->orig_bio, !s->cache_miss, s->iop.bypass);
 
 	if (s->iop.error)
@@ -783,6 +792,8 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
 	struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
 	struct bio *miss, *cache_bio;
 
+	s->cache_missed = 1;
+
 	if (s->cache_miss || s->iop.bypass) {
 		miss = bio_next_split(bio, sectors, GFP_NOIO, s->d->bio_split);
 		ret = miss == bio ? MAP_DONE : MAP_CONTINUE;
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index f4557f5..28ce342 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -2091,6 +2091,7 @@ static void bcache_exit(void)
 	if (bcache_major)
 		unregister_blkdev(bcache_major, "bcache");
 	unregister_reboot_notifier(&reboot);
+	mutex_destroy(&bch_register_lock);
 }
 
 static int __init bcache_init(void)
@@ -2109,14 +2110,15 @@ static int __init bcache_init(void)
 	bcache_major = register_blkdev(0, "bcache");
 	if (bcache_major < 0) {
 		unregister_reboot_notifier(&reboot);
+		mutex_destroy(&bch_register_lock);
 		return bcache_major;
 	}
 
 	if (!(bcache_wq = alloc_workqueue("bcache", WQ_MEM_RECLAIM, 0)) ||
 	    !(bcache_kobj = kobject_create_and_add("bcache", fs_kobj)) ||
-	    sysfs_create_files(bcache_kobj, files) ||
 	    bch_request_init() ||
-	    bch_debug_init(bcache_kobj))
+	    bch_debug_init(bcache_kobj) ||
+	    sysfs_create_files(bcache_kobj, files))
 		goto err;
 
 	return 0;
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index fb02c39..f7ff408 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -2084,6 +2084,7 @@ int bitmap_resize(struct bitmap *bitmap, sector_t blocks,
 				for (k = 0; k < page; k++) {
 					kfree(new_bp[k].map);
 				}
+				kfree(new_bp);
 
 				/* restore some fields from old_counts */
 				bitmap->counts.bp = old_counts.bp;
@@ -2134,6 +2135,14 @@ int bitmap_resize(struct bitmap *bitmap, sector_t blocks,
 		block += old_blocks;
 	}
 
+	if (bitmap->counts.bp != old_counts.bp) {
+		unsigned long k;
+		for (k = 0; k < old_counts.pages; k++)
+			if (!old_counts.bp[k].hijacked)
+				kfree(old_counts.bp[k].map);
+		kfree(old_counts.bp);
+	}
+
 	if (!init) {
 		int i;
 		while (block < (chunks << chunkshift)) {
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index 8bf9667..7643f72 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -937,7 +937,8 @@ static void __get_memory_limit(struct dm_bufio_client *c,
 		buffers = c->minimum_buffers;
 
 	*limit_buffers = buffers;
-	*threshold_buffers = buffers * DM_BUFIO_WRITEBACK_PERCENT / 100;
+	*threshold_buffers = mult_frac(buffers,
+				       DM_BUFIO_WRITEBACK_PERCENT, 100);
 }
 
 /*
@@ -1856,19 +1857,15 @@ static int __init dm_bufio_init(void)
 	memset(&dm_bufio_caches, 0, sizeof dm_bufio_caches);
 	memset(&dm_bufio_cache_names, 0, sizeof dm_bufio_cache_names);
 
-	mem = (__u64)((totalram_pages - totalhigh_pages) *
-		      DM_BUFIO_MEMORY_PERCENT / 100) << PAGE_SHIFT;
+	mem = (__u64)mult_frac(totalram_pages - totalhigh_pages,
+			       DM_BUFIO_MEMORY_PERCENT, 100) << PAGE_SHIFT;
 
 	if (mem > ULONG_MAX)
 		mem = ULONG_MAX;
 
 #ifdef CONFIG_MMU
-	/*
-	 * Get the size of vmalloc space the same way as VMALLOC_TOTAL
-	 * in fs/proc/internal.h
-	 */
-	if (mem > (VMALLOC_END - VMALLOC_START) * DM_BUFIO_VMALLOC_PERCENT / 100)
-		mem = (VMALLOC_END - VMALLOC_START) * DM_BUFIO_VMALLOC_PERCENT / 100;
+	if (mem > mult_frac(VMALLOC_TOTAL, DM_BUFIO_VMALLOC_PERCENT, 100))
+		mem = mult_frac(VMALLOC_TOTAL, DM_BUFIO_VMALLOC_PERCENT, 100);
 #endif
 
 	dm_bufio_default_cache_size = mem;
diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h
index 40ceba1..1609d49 100644
--- a/drivers/md/dm-core.h
+++ b/drivers/md/dm-core.h
@@ -29,7 +29,6 @@ struct dm_kobject_holder {
  * DM targets must _not_ deference a mapped_device to directly access its members!
  */
 struct mapped_device {
-	struct srcu_struct io_barrier;
 	struct mutex suspend_lock;
 
 	/*
@@ -127,6 +126,8 @@ struct mapped_device {
 	struct blk_mq_tag_set *tag_set;
 	bool use_blk_mq:1;
 	bool init_tio_pdu:1;
+
+	struct srcu_struct io_barrier;
 };
 
 void dm_init_md_queue(struct mapped_device *md);
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index aac7161..73e7262 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -21,6 +21,7 @@
 #include <linux/delay.h>
 #include <linux/wait.h>
 #include <linux/pr.h>
+#include <linux/vmalloc.h>
 
 #define DM_MSG_PREFIX "core"
 
@@ -1518,7 +1519,7 @@ static struct mapped_device *alloc_dev(int minor)
 	struct mapped_device *md;
 	void *old_md;
 
-	md = kzalloc_node(sizeof(*md), GFP_KERNEL, numa_node_id);
+	md = vzalloc_node(sizeof(*md), numa_node_id);
 	if (!md) {
 		DMWARN("unable to allocate device, out of memory.");
 		return NULL;
@@ -1612,7 +1613,7 @@ static struct mapped_device *alloc_dev(int minor)
 bad_minor:
 	module_put(THIS_MODULE);
 bad_module_get:
-	kfree(md);
+	kvfree(md);
 	return NULL;
 }
 
@@ -1631,7 +1632,7 @@ static void free_dev(struct mapped_device *md)
 	free_minor(minor);
 
 	module_put(THIS_MODULE);
-	kfree(md);
+	kvfree(md);
 }
 
 static void __bind_mempools(struct mapped_device *md, struct dm_table *t)
@@ -2521,11 +2522,15 @@ struct mapped_device *dm_get_from_kobject(struct kobject *kobj)
 
 	md = container_of(kobj, struct mapped_device, kobj_holder.kobj);
 
-	if (test_bit(DMF_FREEING, &md->flags) ||
-	    dm_deleting_md(md))
-		return NULL;
-
+	spin_lock(&_minor_lock);
+	if (test_bit(DMF_FREEING, &md->flags) || dm_deleting_md(md)) {
+		md = NULL;
+		goto out;
+	}
 	dm_get(md);
+out:
+	spin_unlock(&_minor_lock);
+
 	return md;
 }
 
diff --git a/drivers/md/md-cluster.c b/drivers/md/md-cluster.c
index 2b13117..ba7edcd 100644
--- a/drivers/md/md-cluster.c
+++ b/drivers/md/md-cluster.c
@@ -974,6 +974,7 @@ static int leave(struct mddev *mddev)
 	lockres_free(cinfo->bitmap_lockres);
 	unlock_all_bitmaps(mddev);
 	dlm_release_lockspace(cinfo->lockspace, 2);
+	kfree(cinfo);
 	return 0;
 }
 
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 7aea022..475a7a1 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -1689,8 +1689,11 @@ static void ops_complete_reconstruct(void *stripe_head_ref)
 		struct r5dev *dev = &sh->dev[i];
 
 		if (dev->written || i == pd_idx || i == qd_idx) {
-			if (!discard && !test_bit(R5_SkipCopy, &dev->flags))
+			if (!discard && !test_bit(R5_SkipCopy, &dev->flags)) {
 				set_bit(R5_UPTODATE, &dev->flags);
+				if (test_bit(STRIPE_EXPAND_READY, &sh->state))
+					set_bit(R5_Expanded, &dev->flags);
+			}
 			if (fua)
 				set_bit(R5_WantFUA, &dev->flags);
 			if (sync)
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_base.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_base.c
index 749aa7f..9d92acf 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_base.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_base.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, 2015-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012, 2015-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -31,6 +31,7 @@
 #include "sde_rotator_util.h"
 #include "sde_rotator_trace.h"
 #include "sde_rotator_debug.h"
+#include "sde_rotator_dev.h"
 
 static inline u64 fudge_factor(u64 val, u32 numer, u32 denom)
 {
@@ -707,6 +708,16 @@ static int sde_mdp_parse_dt_misc(struct platform_device *pdev,
 
 	sde_mdp_parse_inline_rot_lut_setting(pdev, mdata);
 
+	rc = of_property_read_u32(pdev->dev.of_node,
+		"qcom,mdss-rot-qos-cpu-mask", &data);
+	mdata->rot_pm_qos_cpu_mask = (!rc ? data : 0);
+
+	rc = of_property_read_u32(pdev->dev.of_node,
+		 "qcom,mdss-rot-qos-cpu-dma-latency", &data);
+	mdata->rot_pm_qos_cpu_dma_latency = (!rc ? data : 0);
+
+	sde_rotator_pm_qos_add(mdata);
+
 	mdata->mdp_base = mdata->sde_io.base + SDE_MDP_OFFSET;
 
 	return 0;
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_base.h b/drivers/media/platform/msm/sde/rotator/sde_rotator_base.h
index 8eef152..0ffe219 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_base.h
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_base.h
@@ -23,6 +23,7 @@
 #include "sde_rotator_io_util.h"
 #include "sde_rotator_smmu.h"
 #include "sde_rotator_formats.h"
+#include <linux/pm_qos.h>
 
 /* HW Revisions for different targets */
 #define SDE_GET_MAJOR_REV(rev)	((rev) >> 28)
@@ -240,6 +241,11 @@ struct sde_rot_data_type {
 	u32 *vbif_nrt_qos;
 	u32 npriority_lvl;
 
+	struct pm_qos_request pm_qos_rot_cpu_req;
+	u32 rot_pm_qos_cpu_count;
+	u32 rot_pm_qos_cpu_mask;
+	u32 rot_pm_qos_cpu_dma_latency;
+
 	u32 vbif_memtype_count;
 	u32 *vbif_memtype;
 
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c
index fd031d7..fa5eb49 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c
@@ -588,7 +588,7 @@ static int sde_rotator_secure_session_ctrl(bool enable)
 {
 	struct sde_rot_data_type *mdata = sde_rot_get_mdata();
 	uint32_t sid_info;
-	struct scm_desc desc;
+	struct scm_desc desc = {0};
 	unsigned int resp = 0;
 	int ret = 0;
 
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c
index 43d17d9..a46194f 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c
@@ -57,8 +57,15 @@
 #define SDE_ROTATOR_DEGREE_180		180
 #define SDE_ROTATOR_DEGREE_90		90
 
+/* Inline rotator qos request */
+#define SDE_ROTATOR_ADD_REQUEST		1
+#define SDE_ROTATOR_REMOVE_REQUEST		0
+
+
 static void sde_rotator_submit_handler(struct kthread_work *work);
 static void sde_rotator_retire_handler(struct kthread_work *work);
+static void sde_rotator_pm_qos_request(struct sde_rotator_device *rot_dev,
+					 bool add_request);
 #ifdef CONFIG_COMPAT
 static long sde_rotator_compat_ioctl32(struct file *file,
 	unsigned int cmd, unsigned long arg);
@@ -1012,6 +1019,8 @@ struct sde_rotator_ctx *sde_rotator_ctx_open(
 		SDEDEV_DBG(ctx->rot_dev->dev, "timeline is not available\n");
 
 	sde_rot_mgr_lock(rot_dev->mgr);
+	sde_rotator_pm_qos_request(rot_dev,
+				 SDE_ROTATOR_ADD_REQUEST);
 	ret = sde_rotator_session_open(rot_dev->mgr, &ctx->private,
 			ctx->session_id, &ctx->work_queue);
 	if (ret < 0) {
@@ -1121,6 +1130,8 @@ static int sde_rotator_ctx_release(struct sde_rotator_ctx *ctx,
 	}
 	SDEDEV_DBG(rot_dev->dev, "release session s:%d\n", session_id);
 	sde_rot_mgr_lock(rot_dev->mgr);
+	sde_rotator_pm_qos_request(rot_dev,
+			SDE_ROTATOR_REMOVE_REQUEST);
 	sde_rotator_session_close(rot_dev->mgr, ctx->private, session_id);
 	sde_rot_mgr_unlock(rot_dev->mgr);
 	SDEDEV_DBG(rot_dev->dev, "release retire work s:%d\n", session_id);
@@ -1230,6 +1241,104 @@ static bool sde_rotator_is_request_retired(struct sde_rotator_request *request)
 	return retire_delta >= 0;
 }
 
+static void sde_rotator_pm_qos_remove(struct sde_rot_data_type *rot_mdata)
+{
+	struct pm_qos_request *req;
+	u32 cpu_mask;
+
+	if (!rot_mdata) {
+		SDEROT_DBG("invalid rot device or context\n");
+		return;
+	}
+
+	cpu_mask = rot_mdata->rot_pm_qos_cpu_mask;
+
+	if (!cpu_mask)
+		return;
+
+	req = &rot_mdata->pm_qos_rot_cpu_req;
+	pm_qos_remove_request(req);
+}
+
+void sde_rotator_pm_qos_add(struct sde_rot_data_type *rot_mdata)
+{
+	struct pm_qos_request *req;
+	u32 cpu_mask;
+	int cpu;
+
+	if (!rot_mdata) {
+		SDEROT_DBG("invalid rot device or context\n");
+		return;
+	}
+
+	cpu_mask = rot_mdata->rot_pm_qos_cpu_mask;
+
+	if (!cpu_mask)
+		return;
+
+	req = &rot_mdata->pm_qos_rot_cpu_req;
+	req->type = PM_QOS_REQ_AFFINE_CORES;
+	cpumask_empty(&req->cpus_affine);
+	for_each_possible_cpu(cpu) {
+		if ((1 << cpu) & cpu_mask)
+			cpumask_set_cpu(cpu, &req->cpus_affine);
+	}
+	pm_qos_add_request(req, PM_QOS_CPU_DMA_LATENCY,
+		PM_QOS_DEFAULT_VALUE);
+
+	SDEROT_DBG("rotator pmqos add mask %x latency %x\n",
+		rot_mdata->rot_pm_qos_cpu_mask,
+		rot_mdata->rot_pm_qos_cpu_dma_latency);
+}
+
+static void sde_rotator_pm_qos_request(struct sde_rotator_device *rot_dev,
+					 bool add_request)
+{
+	u32 cpu_mask;
+	u32 cpu_dma_latency;
+	bool changed = false;
+
+	if (!rot_dev) {
+		SDEROT_DBG("invalid rot device or context\n");
+		return;
+	}
+
+	cpu_mask = rot_dev->mdata->rot_pm_qos_cpu_mask;
+	cpu_dma_latency = rot_dev->mdata->rot_pm_qos_cpu_dma_latency;
+
+	if (!cpu_mask)
+		return;
+
+	if (add_request) {
+		if (rot_dev->mdata->rot_pm_qos_cpu_count == 0)
+			changed = true;
+		rot_dev->mdata->rot_pm_qos_cpu_count++;
+	} else {
+		if (rot_dev->mdata->rot_pm_qos_cpu_count != 0) {
+			rot_dev->mdata->rot_pm_qos_cpu_count--;
+			if (rot_dev->mdata->rot_pm_qos_cpu_count == 0)
+				changed = true;
+		} else {
+			SDEROT_DBG("%s: ref_count is not balanced\n",
+				__func__);
+		}
+	}
+
+	if (!changed)
+		return;
+
+	SDEROT_EVTLOG(add_request, cpu_mask, cpu_dma_latency);
+
+	if (!add_request) {
+		pm_qos_update_request(&rot_dev->mdata->pm_qos_rot_cpu_req,
+			PM_QOS_DEFAULT_VALUE);
+		return;
+	}
+
+	pm_qos_update_request(&rot_dev->mdata->pm_qos_rot_cpu_req,
+		cpu_dma_latency);
+}
+
 /*
  * sde_rotator_inline_open - open inline rotator session
  * @pdev: Pointer to rotator platform device
@@ -3545,6 +3654,7 @@ static int sde_rotator_remove(struct platform_device *pdev)
 		return 0;
 	}
 
+	sde_rotator_pm_qos_remove(rot_dev->mdata);
 	sde_rotator_destroy_debugfs(rot_dev->debugfs_root);
 	video_unregister_device(rot_dev->vdev);
 	video_device_release(rot_dev->vdev);
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.h b/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.h
index a464a39..ab27043 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.h
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.h
@@ -250,4 +250,7 @@ struct sde_rot_mgr *sde_rot_mgr_from_device(struct device *dev)
 {
 	return ((struct sde_rotator_device *) dev_get_drvdata(dev))->mgr;
 }
+
+void sde_rotator_pm_qos_add(struct sde_rot_data_type *rot_mdata);
+
 #endif /* __SDE_ROTATOR_DEV_H__ */
diff --git a/drivers/media/platform/msm/vidc/msm_smem.c b/drivers/media/platform/msm/vidc/msm_smem.c
index 5198bc3..6936354 100644
--- a/drivers/media/platform/msm/vidc/msm_smem.c
+++ b/drivers/media/platform/msm/vidc/msm_smem.c
@@ -28,6 +28,7 @@ struct smem_client {
 	void *clnt;
 	struct msm_vidc_platform_resources *res;
 	enum session_type session_type;
+	bool tme_encode_mode;
 };
 
 static int msm_ion_get_device_address(struct smem_client *smem_client,
@@ -771,6 +772,13 @@ void *msm_smem_new_client(enum smem_type mtype,
 	return client;
 }
 
+void msm_smem_set_tme_encode_mode(struct smem_client *client, bool enable)
+{
+	if (!client)
+		return;
+	client->tme_encode_mode = enable;
+}
+
 int msm_smem_alloc(struct smem_client *client, size_t size,
 		u32 align, u32 flags, enum hal_buffer buffer_type,
 		int map_kernel, struct msm_smem *smem)
@@ -863,7 +871,8 @@ struct context_bank_info *msm_smem_get_context_bank(void *clt,
 	if (is_secure && client->session_type == MSM_VIDC_ENCODER) {
 		if (buffer_type == HAL_BUFFER_INPUT)
 			buffer_type = HAL_BUFFER_OUTPUT;
-		else if (buffer_type == HAL_BUFFER_OUTPUT)
+		else if (buffer_type == HAL_BUFFER_OUTPUT &&
+			!client->tme_encode_mode)
 			buffer_type = HAL_BUFFER_INPUT;
 	}
 
diff --git a/drivers/media/platform/msm/vidc/msm_venc.c b/drivers/media/platform/msm/vidc/msm_venc.c
index dd749d6..a80990c 100644
--- a/drivers/media/platform/msm/vidc/msm_venc.c
+++ b/drivers/media/platform/msm/vidc/msm_venc.c
@@ -2691,6 +2691,11 @@ int msm_venc_s_fmt(struct msm_vidc_inst *inst, struct v4l2_format *f)
 		memcpy(&inst->fmts[fmt->type], fmt,
 				sizeof(struct msm_vidc_format));
 
+		if (get_hal_codec(inst->fmts[CAPTURE_PORT].fourcc) ==
+			HAL_VIDEO_CODEC_TME) {
+			msm_smem_set_tme_encode_mode(inst->mem_client, true);
+		}
+
 		rc = msm_comm_try_state(inst, MSM_VIDC_OPEN_DONE);
 		if (rc) {
 			dprintk(VIDC_ERR, "Failed to open instance\n");
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_clocks.c b/drivers/media/platform/msm/vidc/msm_vidc_clocks.c
index dc9302e..29c2e3d 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_clocks.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_clocks.c
@@ -167,6 +167,7 @@ int msm_comm_vote_bus(struct msm_vidc_core *core)
 	struct hfi_device *hdev;
 	struct msm_vidc_inst *inst = NULL;
 	struct vidc_bus_vote_data *vote_data = NULL;
+	bool is_turbo = false;
 
 	if (!core || !core->device) {
 		dprintk(VIDC_ERR, "%s Invalid args: %pK\n", __func__, core);
@@ -208,6 +209,11 @@ int msm_comm_vote_bus(struct msm_vidc_core *core)
 					temp->vvb.vb2_buf.planes[0].bytesused);
 				device_addr = temp->smem[0].device_addr;
 			}
+			if (inst->session_type == MSM_VIDC_ENCODER &&
+				(temp->vvb.flags &
+				V4L2_QCOM_BUF_FLAG_PERF_MODE)) {
+				is_turbo = true;
+			}
 		}
 		mutex_unlock(&inst->registeredbufs.lock);
 
@@ -248,7 +254,7 @@ int msm_comm_vote_bus(struct msm_vidc_core *core)
 			vote_data[i].fps = inst->prop.fps;
 
 		vote_data[i].power_mode = 0;
-		if (!msm_vidc_clock_scaling ||
+		if (!msm_vidc_clock_scaling || is_turbo ||
 			inst->clk_data.buffer_counter < DCVS_FTB_WINDOW)
 			vote_data[i].power_mode = VIDC_POWER_TURBO;
 
@@ -409,7 +415,7 @@ static int msm_dcvs_scale_clocks(struct msm_vidc_inst *inst)
 }
 
 static void msm_vidc_update_freq_entry(struct msm_vidc_inst *inst,
-	unsigned long freq, u32 device_addr)
+	unsigned long freq, u32 device_addr, bool is_turbo)
 {
 	struct vidc_freq_data *temp, *next;
 	bool found = false;
@@ -433,6 +439,7 @@ static void msm_vidc_update_freq_entry(struct msm_vidc_inst *inst,
 		temp->device_addr = device_addr;
 		list_add_tail(&temp->list, &inst->freqs.list);
 	}
+	temp->turbo = !!is_turbo;
 exit:
 	mutex_unlock(&inst->freqs.lock);
 }
@@ -452,18 +459,36 @@ void msm_vidc_clear_freq_entry(struct msm_vidc_inst *inst,
 	inst->clk_data.buffer_counter++;
 }
 
+static unsigned long msm_vidc_max_freq(struct msm_vidc_core *core)
+{
+	struct allowed_clock_rates_table *allowed_clks_tbl = NULL;
+	unsigned long freq = 0;
+
+	allowed_clks_tbl = core->resources.allowed_clks_tbl;
+	freq = allowed_clks_tbl[0].clock_rate;
+	dprintk(VIDC_PROF, "Max rate = %lu\n", freq);
+	return freq;
+}
 
 static unsigned long msm_vidc_adjust_freq(struct msm_vidc_inst *inst)
 {
 	struct vidc_freq_data *temp;
 	unsigned long freq = 0;
+	bool is_turbo = false;
 
 	mutex_lock(&inst->freqs.lock);
 	list_for_each_entry(temp, &inst->freqs.list, list) {
 		freq = max(freq, temp->freq);
+		if (temp->turbo) {
+			is_turbo = true;
+			break;
+		}
 	}
 	mutex_unlock(&inst->freqs.lock);
 
+	if (is_turbo) {
+		return msm_vidc_max_freq(inst->core);
+	}
 	/* If current requirement is within DCVS limits, try DCVS. */
 
 	if (freq < inst->clk_data.load_norm) {
@@ -531,17 +556,8 @@ void msm_comm_update_input_cr(struct msm_vidc_inst *inst,
 	mutex_unlock(&inst->input_crs.lock);
 }
 
-static unsigned long msm_vidc_max_freq(struct msm_vidc_core *core)
-{
-	struct allowed_clock_rates_table *allowed_clks_tbl = NULL;
-	unsigned long freq = 0;
 
-	allowed_clks_tbl = core->resources.allowed_clks_tbl;
-	freq = allowed_clks_tbl[0].clock_rate;
-	dprintk(VIDC_PROF, "Max rate = %lu\n", freq);
 
-	return freq;
-}
 
 static unsigned long msm_vidc_calc_freq(struct msm_vidc_inst *inst,
 	u32 filled_len)
@@ -741,6 +757,7 @@ int msm_comm_scale_clocks(struct msm_vidc_inst *inst)
 	unsigned long freq = 0;
 	u32 filled_len = 0;
 	u32 device_addr = 0;
+	bool is_turbo = false;
 
 	if (!inst || !inst->core) {
 		dprintk(VIDC_ERR, "%s Invalid args: Inst = %pK\n",
@@ -755,6 +772,11 @@ int msm_comm_scale_clocks(struct msm_vidc_inst *inst)
 					temp->flags & MSM_VIDC_FLAG_DEFERRED) {
 			filled_len = max(filled_len,
 				temp->vvb.vb2_buf.planes[0].bytesused);
+			if (inst->session_type == MSM_VIDC_ENCODER &&
+				(temp->vvb.flags &
+				 V4L2_QCOM_BUF_FLAG_PERF_MODE)) {
+				is_turbo = true;
+			}
 			device_addr = temp->smem[0].device_addr;
 		}
 	}
@@ -767,7 +789,7 @@ int msm_comm_scale_clocks(struct msm_vidc_inst *inst)
 
 	freq = msm_vidc_calc_freq(inst, filled_len);
 
-	msm_vidc_update_freq_entry(inst, freq, device_addr);
+	msm_vidc_update_freq_entry(inst, freq, device_addr, is_turbo);
 
 	freq = msm_vidc_adjust_freq(inst);
 
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_internal.h b/drivers/media/platform/msm/vidc/msm_vidc_internal.h
index 98b5714..eda531e 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_internal.h
+++ b/drivers/media/platform/msm/vidc/msm_vidc_internal.h
@@ -143,6 +143,7 @@ struct vidc_freq_data {
 	struct list_head list;
 	u32 device_addr;
 	unsigned long freq;
+	bool turbo;
 };
 
 struct vidc_input_cr_data {
@@ -457,6 +458,7 @@ struct msm_vidc_buffer {
 void msm_comm_handle_thermal_event(void);
 void *msm_smem_new_client(enum smem_type mtype,
 		void *platform_resources, enum session_type stype);
+void msm_smem_set_tme_encode_mode(struct smem_client *client, bool enable);
 int msm_smem_alloc(struct smem_client *client,
 		size_t size, u32 align, u32 flags, enum hal_buffer buffer_type,
 		int map_kernel, struct msm_smem *smem);
diff --git a/drivers/media/rc/ir-lirc-codec.c b/drivers/media/rc/ir-lirc-codec.c
index b49f80c..d9a5710 100644
--- a/drivers/media/rc/ir-lirc-codec.c
+++ b/drivers/media/rc/ir-lirc-codec.c
@@ -286,11 +286,14 @@ static long ir_lirc_ioctl(struct file *filep, unsigned int cmd,
 		if (!dev->max_timeout)
 			return -ENOSYS;
 
+		/* Check for multiply overflow */
+		if (val > U32_MAX / 1000)
+			return -EINVAL;
+
 		tmp = val * 1000;
 
-		if (tmp < dev->min_timeout ||
-		    tmp > dev->max_timeout)
-				return -EINVAL;
+		if (tmp < dev->min_timeout || tmp > dev->max_timeout)
+			return -EINVAL;
 
 		if (dev->s_timeout)
 			ret = dev->s_timeout(dev, tmp);
diff --git a/drivers/media/rc/lirc_dev.c b/drivers/media/rc/lirc_dev.c
index 6ebe895..f4509ef 100644
--- a/drivers/media/rc/lirc_dev.c
+++ b/drivers/media/rc/lirc_dev.c
@@ -446,6 +446,8 @@ int lirc_dev_fop_open(struct inode *inode, struct file *file)
 		return -ERESTARTSYS;
 
 	ir = irctls[iminor(inode)];
+	mutex_unlock(&lirc_dev_lock);
+
 	if (!ir) {
 		retval = -ENODEV;
 		goto error;
@@ -486,8 +488,6 @@ int lirc_dev_fop_open(struct inode *inode, struct file *file)
 	}
 
 error:
-	mutex_unlock(&lirc_dev_lock);
-
 	nonseekable_open(inode, file);
 
 	return retval;
diff --git a/drivers/media/usb/as102/as102_fw.c b/drivers/media/usb/as102/as102_fw.c
index 5a28ce3..38dbc12 100644
--- a/drivers/media/usb/as102/as102_fw.c
+++ b/drivers/media/usb/as102/as102_fw.c
@@ -101,18 +101,23 @@ static int as102_firmware_upload(struct as10x_bus_adapter_t *bus_adap,
 				 unsigned char *cmd,
 				 const struct firmware *firmware) {
 
-	struct as10x_fw_pkt_t fw_pkt;
+	struct as10x_fw_pkt_t *fw_pkt;
 	int total_read_bytes = 0, errno = 0;
 	unsigned char addr_has_changed = 0;
 
+	fw_pkt = kmalloc(sizeof(*fw_pkt), GFP_KERNEL);
+	if (!fw_pkt)
+		return -ENOMEM;
+
+
 	for (total_read_bytes = 0; total_read_bytes < firmware->size; ) {
 		int read_bytes = 0, data_len = 0;
 
 		/* parse intel hex line */
 		read_bytes = parse_hex_line(
 				(u8 *) (firmware->data + total_read_bytes),
-				fw_pkt.raw.address,
-				fw_pkt.raw.data,
+				fw_pkt->raw.address,
+				fw_pkt->raw.data,
 				&data_len,
 				&addr_has_changed);
 
@@ -122,28 +127,28 @@ static int as102_firmware_upload(struct as10x_bus_adapter_t *bus_adap,
 		/* detect the end of file */
 		total_read_bytes += read_bytes;
 		if (total_read_bytes == firmware->size) {
-			fw_pkt.u.request[0] = 0x00;
-			fw_pkt.u.request[1] = 0x03;
+			fw_pkt->u.request[0] = 0x00;
+			fw_pkt->u.request[1] = 0x03;
 
 			/* send EOF command */
 			errno = bus_adap->ops->upload_fw_pkt(bus_adap,
 							     (uint8_t *)
-							     &fw_pkt, 2, 0);
+							     fw_pkt, 2, 0);
 			if (errno < 0)
 				goto error;
 		} else {
 			if (!addr_has_changed) {
 				/* prepare command to send */
-				fw_pkt.u.request[0] = 0x00;
-				fw_pkt.u.request[1] = 0x01;
+				fw_pkt->u.request[0] = 0x00;
+				fw_pkt->u.request[1] = 0x01;
 
-				data_len += sizeof(fw_pkt.u.request);
-				data_len += sizeof(fw_pkt.raw.address);
+				data_len += sizeof(fw_pkt->u.request);
+				data_len += sizeof(fw_pkt->raw.address);
 
 				/* send cmd to device */
 				errno = bus_adap->ops->upload_fw_pkt(bus_adap,
 								     (uint8_t *)
-								     &fw_pkt,
+								     fw_pkt,
 								     data_len,
 								     0);
 				if (errno < 0)
@@ -152,6 +157,7 @@ static int as102_firmware_upload(struct as10x_bus_adapter_t *bus_adap,
 		}
 	}
 error:
+	kfree(fw_pkt);
 	return (errno == 0) ? total_read_bytes : errno;
 }
 
diff --git a/drivers/media/usb/cx231xx/cx231xx-cards.c b/drivers/media/usb/cx231xx/cx231xx-cards.c
index be9e333..921cf1e 100644
--- a/drivers/media/usb/cx231xx/cx231xx-cards.c
+++ b/drivers/media/usb/cx231xx/cx231xx-cards.c
@@ -1622,7 +1622,7 @@ static int cx231xx_usb_probe(struct usb_interface *interface,
 	nr = dev->devno;
 
 	assoc_desc = udev->actconfig->intf_assoc[0];
-	if (assoc_desc->bFirstInterface != ifnum) {
+	if (!assoc_desc || assoc_desc->bFirstInterface != ifnum) {
 		dev_err(d, "Not found matching IAD interface\n");
 		retval = -ENODEV;
 		goto err_if;
diff --git a/drivers/media/usb/dvb-usb/dibusb-common.c b/drivers/media/usb/dvb-usb/dibusb-common.c
index 8207e69..bcacb0f 100644
--- a/drivers/media/usb/dvb-usb/dibusb-common.c
+++ b/drivers/media/usb/dvb-usb/dibusb-common.c
@@ -223,8 +223,20 @@ EXPORT_SYMBOL(dibusb_i2c_algo);
 
 int dibusb_read_eeprom_byte(struct dvb_usb_device *d, u8 offs, u8 *val)
 {
-	u8 wbuf[1] = { offs };
-	return dibusb_i2c_msg(d, 0x50, wbuf, 1, val, 1);
+	u8 *buf;
+	int rc;
+
+	buf = kmalloc(2, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	buf[0] = offs;
+
+	rc = dibusb_i2c_msg(d, 0x50, &buf[0], 1, &buf[1], 1);
+	*val = buf[1];
+	kfree(buf);
+
+	return rc;
 }
 EXPORT_SYMBOL(dibusb_read_eeprom_byte);
 
diff --git a/drivers/media/v4l2-core/v4l2-ctrls.c b/drivers/media/v4l2-core/v4l2-ctrls.c
index 6739fb0..98e5e3b 100644
--- a/drivers/media/v4l2-core/v4l2-ctrls.c
+++ b/drivers/media/v4l2-core/v4l2-ctrls.c
@@ -1222,6 +1222,16 @@ void v4l2_ctrl_fill(u32 id, const char **name, enum v4l2_ctrl_type *type,
 }
 EXPORT_SYMBOL(v4l2_ctrl_fill);
 
+static u32 user_flags(const struct v4l2_ctrl *ctrl)
+{
+	u32 flags = ctrl->flags;
+
+	if (ctrl->is_ptr)
+		flags |= V4L2_CTRL_FLAG_HAS_PAYLOAD;
+
+	return flags;
+}
+
 static void fill_event(struct v4l2_event *ev, struct v4l2_ctrl *ctrl, u32 changes)
 {
 	memset(ev->reserved, 0, sizeof(ev->reserved));
@@ -1229,7 +1239,7 @@ static void fill_event(struct v4l2_event *ev, struct v4l2_ctrl *ctrl, u32 change
 	ev->id = ctrl->id;
 	ev->u.ctrl.changes = changes;
 	ev->u.ctrl.type = ctrl->type;
-	ev->u.ctrl.flags = ctrl->flags;
+	ev->u.ctrl.flags = user_flags(ctrl);
 	if (ctrl->is_ptr)
 		ev->u.ctrl.value64 = 0;
 	else
@@ -2553,10 +2563,8 @@ int v4l2_query_ext_ctrl(struct v4l2_ctrl_handler *hdl, struct v4l2_query_ext_ctr
 	else
 		qc->id = ctrl->id;
 	strlcpy(qc->name, ctrl->name, sizeof(qc->name));
-	qc->flags = ctrl->flags;
+	qc->flags = user_flags(ctrl);
 	qc->type = ctrl->type;
-	if (ctrl->is_ptr)
-		qc->flags |= V4L2_CTRL_FLAG_HAS_PAYLOAD;
 	qc->elem_size = ctrl->elem_size;
 	qc->elems = ctrl->elems;
 	qc->nr_of_dims = ctrl->nr_of_dims;
diff --git a/drivers/memory/omap-gpmc.c b/drivers/memory/omap-gpmc.c
index 5457c36..bf0fe01 100644
--- a/drivers/memory/omap-gpmc.c
+++ b/drivers/memory/omap-gpmc.c
@@ -1947,9 +1947,7 @@ static int gpmc_probe_onenand_child(struct platform_device *pdev,
 	if (!of_property_read_u32(child, "dma-channel", &val))
 		gpmc_onenand_data->dma_channel = val;
 
-	gpmc_onenand_init(gpmc_onenand_data);
-
-	return 0;
+	return gpmc_onenand_init(gpmc_onenand_data);
 }
 #else
 static int gpmc_probe_onenand_child(struct platform_device *pdev,
diff --git a/drivers/mfd/cros_ec_spi.c b/drivers/mfd/cros_ec_spi.c
index a518832..59dbdaa 100644
--- a/drivers/mfd/cros_ec_spi.c
+++ b/drivers/mfd/cros_ec_spi.c
@@ -664,6 +664,7 @@ static int cros_ec_spi_probe(struct spi_device *spi)
 			   sizeof(struct ec_response_get_protocol_info);
 	ec_dev->dout_size = sizeof(struct ec_host_request);
 
+	ec_spi->last_transfer_ns = ktime_get_ns();
 
 	err = cros_ec_register(ec_dev);
 	if (err) {
diff --git a/drivers/mfd/fsl-imx25-tsadc.c b/drivers/mfd/fsl-imx25-tsadc.c
index 77b2675..92e1760 100644
--- a/drivers/mfd/fsl-imx25-tsadc.c
+++ b/drivers/mfd/fsl-imx25-tsadc.c
@@ -183,6 +183,19 @@ static int mx25_tsadc_probe(struct platform_device *pdev)
 	return 0;
 }
 
+static int mx25_tsadc_remove(struct platform_device *pdev)
+{
+	struct mx25_tsadc *tsadc = platform_get_drvdata(pdev);
+	int irq = platform_get_irq(pdev, 0);
+
+	if (irq) {
+		irq_set_chained_handler_and_data(irq, NULL, NULL);
+		irq_domain_remove(tsadc->domain);
+	}
+
+	return 0;
+}
+
 static const struct of_device_id mx25_tsadc_ids[] = {
 	{ .compatible = "fsl,imx25-tsadc" },
 	{ /* Sentinel */ }
@@ -194,6 +207,7 @@ static struct platform_driver mx25_tsadc_driver = {
 		.of_match_table = of_match_ptr(mx25_tsadc_ids),
 	},
 	.probe = mx25_tsadc_probe,
+	.remove = mx25_tsadc_remove,
 };
 module_platform_driver(mx25_tsadc_driver);
 
diff --git a/drivers/mfd/twl4030-audio.c b/drivers/mfd/twl4030-audio.c
index 0a16064..cc832d3 100644
--- a/drivers/mfd/twl4030-audio.c
+++ b/drivers/mfd/twl4030-audio.c
@@ -159,13 +159,18 @@ unsigned int twl4030_audio_get_mclk(void)
 EXPORT_SYMBOL_GPL(twl4030_audio_get_mclk);
 
 static bool twl4030_audio_has_codec(struct twl4030_audio_data *pdata,
-			      struct device_node *node)
+			      struct device_node *parent)
 {
+	struct device_node *node;
+
 	if (pdata && pdata->codec)
 		return true;
 
-	if (of_find_node_by_name(node, "codec"))
+	node = of_get_child_by_name(parent, "codec");
+	if (node) {
+		of_node_put(node);
 		return true;
+	}
 
 	return false;
 }
diff --git a/drivers/mfd/twl4030-power.c b/drivers/mfd/twl4030-power.c
index 1beb722..e1e69a4 100644
--- a/drivers/mfd/twl4030-power.c
+++ b/drivers/mfd/twl4030-power.c
@@ -701,6 +701,7 @@ static struct twl4030_ins omap3_wrst_seq[] = {
 	TWL_RESOURCE_RESET(RES_MAIN_REF),
 	TWL_RESOURCE_GROUP_RESET(RES_GRP_ALL, RES_TYPE_R0, RES_TYPE2_R2),
 	TWL_RESOURCE_RESET(RES_VUSB_3V1),
+	TWL_RESOURCE_RESET(RES_VMMC1),
 	TWL_RESOURCE_GROUP_RESET(RES_GRP_ALL, RES_TYPE_R0, RES_TYPE2_R1),
 	TWL_RESOURCE_GROUP_RESET(RES_GRP_RC, RES_TYPE_ALL, RES_TYPE2_R0),
 	TWL_RESOURCE_ON(RES_RESET),
diff --git a/drivers/mfd/twl6040.c b/drivers/mfd/twl6040.c
index d66502d..dd19f17 100644
--- a/drivers/mfd/twl6040.c
+++ b/drivers/mfd/twl6040.c
@@ -97,12 +97,16 @@ static struct reg_sequence twl6040_patch[] = {
 };
 
 
-static bool twl6040_has_vibra(struct device_node *node)
+static bool twl6040_has_vibra(struct device_node *parent)
 {
-#ifdef CONFIG_OF
-	if (of_find_node_by_name(node, "vibra"))
+	struct device_node *node;
+
+	node = of_get_child_by_name(parent, "vibra");
+	if (node) {
+		of_node_put(node);
 		return true;
-#endif
+	}
+
 	return false;
 }
 
diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c
index eef202d..a5422f4 100644
--- a/drivers/misc/cxl/pci.c
+++ b/drivers/misc/cxl/pci.c
@@ -1758,6 +1758,9 @@ static pci_ers_result_t cxl_vphb_error_detected(struct cxl_afu *afu,
 	/* There should only be one entry, but go through the list
 	 * anyway
 	 */
+	if (afu->phb == NULL)
+		return result;
+
 	list_for_each_entry(afu_dev, &afu->phb->bus->devices, bus_list) {
 		if (!afu_dev->driver)
 			continue;
@@ -1801,6 +1804,11 @@ static pci_ers_result_t cxl_pci_error_detected(struct pci_dev *pdev,
 			/* Only participate in EEH if we are on a virtual PHB */
 			if (afu->phb == NULL)
 				return PCI_ERS_RESULT_NONE;
+
+			/*
+			 * Tell the AFU drivers; but we don't care what they
+			 * say, we're going away.
+			 */
 			cxl_vphb_error_detected(afu, state);
 		}
 		return PCI_ERS_RESULT_DISCONNECT;
@@ -1941,6 +1949,9 @@ static pci_ers_result_t cxl_pci_slot_reset(struct pci_dev *pdev)
 		if (cxl_afu_select_best_mode(afu))
 			goto err;
 
+		if (afu->phb == NULL)
+			continue;
+
 		list_for_each_entry(afu_dev, &afu->phb->bus->devices, bus_list) {
 			/* Reset the device context.
 			 * TODO: make this less disruptive
@@ -2003,6 +2014,9 @@ static void cxl_pci_resume(struct pci_dev *pdev)
 	for (i = 0; i < adapter->slices; i++) {
 		afu = adapter->afu[i];
 
+		if (afu->phb == NULL)
+			continue;
+
 		list_for_each_entry(afu_dev, &afu->phb->bus->devices, bus_list) {
 			if (afu_dev->driver && afu_dev->driver->err_handler &&
 			    afu_dev->driver->err_handler->resume)
diff --git a/drivers/misc/eeprom/at24.c b/drivers/misc/eeprom/at24.c
index 051b147..d8a485f 100644
--- a/drivers/misc/eeprom/at24.c
+++ b/drivers/misc/eeprom/at24.c
@@ -365,7 +365,8 @@ static ssize_t at24_eeprom_read_mac(struct at24_data *at24, char *buf,
 	memset(msg, 0, sizeof(msg));
 	msg[0].addr = client->addr;
 	msg[0].buf = addrbuf;
-	addrbuf[0] = 0x90 + offset;
+	/* EUI-48 starts from 0x9a, EUI-64 from 0x98 */
+	addrbuf[0] = 0xa0 - at24->chip.byte_len + offset;
 	msg[0].len = 1;
 	msg[1].addr = client->addr;
 	msg[1].flags = I2C_M_RD;
@@ -506,6 +507,9 @@ static int at24_read(void *priv, unsigned int off, void *val, size_t count)
 	if (unlikely(!count))
 		return count;
 
+	if (off + count > at24->chip.byte_len)
+		return -EINVAL;
+
 	/*
 	 * Read data from chip, protecting against concurrent updates
 	 * from this host, but not from other I2C masters.
@@ -538,6 +542,9 @@ static int at24_write(void *priv, unsigned int off, void *val, size_t count)
 	if (unlikely(!count))
 		return -EINVAL;
 
+	if (off + count > at24->chip.byte_len)
+		return -EINVAL;
+
 	/*
 	 * Write data to chip, protecting against concurrent updates
 	 * from this host, but not from other I2C masters.
@@ -638,6 +645,16 @@ static int at24_probe(struct i2c_client *client, const struct i2c_device_id *id)
 		dev_warn(&client->dev,
 			"page_size looks suspicious (no power of 2)!\n");
 
+	/*
+	 * REVISIT: the size of the EUI-48 byte array is 6 in at24mac402, while
+	 * the call to ilog2() in AT24_DEVICE_MAGIC() rounds it down to 4.
+	 *
+	 * Eventually we'll get rid of the magic values altoghether in favor of
+	 * real structs, but for now just manually set the right size.
+	 */
+	if (chip.flags & AT24_FLAG_MAC && chip.byte_len == 4)
+		chip.byte_len = 6;
+
 	/* Use I2C operations unless we're stuck with SMBus extensions. */
 	if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
 		if (chip.flags & AT24_FLAG_ADDR16)
@@ -766,7 +783,7 @@ static int at24_probe(struct i2c_client *client, const struct i2c_device_id *id)
 	at24->nvmem_config.reg_read = at24_read;
 	at24->nvmem_config.reg_write = at24_write;
 	at24->nvmem_config.priv = at24;
-	at24->nvmem_config.stride = 4;
+	at24->nvmem_config.stride = 1;
 	at24->nvmem_config.word_size = 1;
 	at24->nvmem_config.size = chip.byte_len;
 
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index e4af5c3..980c1c0 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -4043,7 +4043,7 @@ static int mmc_blk_cmdq_issue_rq(struct mmc_queue *mq, struct request *req)
 		struct mmc_host *host = card->host;
 		struct mmc_cmdq_context_info *ctx = &host->cmdq_ctx;
 
-		if ((req_op(req) == REQ_OP_FLUSH || req_op(req) ==  REQ_OP_DISCARD) &&
+		if (mmc_req_is_special(req) &&
 		    (card->quirks & MMC_QUIRK_CMDQ_EMPTY_BEFORE_DCMD) &&
 		    ctx->active_small_sector_read_reqs) {
 			ret = wait_event_interruptible(ctx->queue_empty_wq,
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index 3fd621c..b34a143 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -102,8 +102,7 @@ static inline void mmc_cmdq_ready_wait(struct mmc_host *host,
 	 */
 	wait_event(ctx->wait, kthread_should_stop()
 		|| (mmc_peek_request(mq) &&
-		!(((req_op(mq->cmdq_req_peeked) == REQ_OP_FLUSH) ||
-		   (req_op(mq->cmdq_req_peeked) == REQ_OP_DISCARD))
+		!(mmc_req_is_special(mq->cmdq_req_peeked)
 		  && test_bit(CMDQ_STATE_DCMD_ACTIVE, &ctx->curr_state))
 		&& !(!host->card->part_curr && !mmc_card_suspended(host->card)
 		     && mmc_host_halt(host))
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index 3e0ba75..2958473 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -50,9 +50,28 @@ static void mmc_host_classdev_release(struct device *dev)
 	kfree(host);
 }
 
+static int mmc_host_prepare(struct device *dev)
+{
+	/*
+	 * Since mmc_host is a virtual device, we don't have to do anything.
+	 * If we return a positive value, the pm framework will consider that
+	 * the runtime suspend and system suspend of this device is same and
+	 * will set direct_complete flag as true. We don't want this as the
+	 * mmc_host always has positive disable_depth and setting the flag
+	 * will not speed up the suspend process.
+	 * So return 0.
+	 */
+	return 0;
+}
+
+static const struct dev_pm_ops mmc_pm_ops = {
+	.prepare = mmc_host_prepare,
+};
+
 static struct class mmc_host_class = {
 	.name		= "mmc_host",
 	.dev_release	= mmc_host_classdev_release,
+	.pm		= &mmc_pm_ops,
 };
 
 int mmc_register_host_class(void)
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index dd58288..bdc4e2a 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -854,7 +854,7 @@ MMC_DEV_ATTR(raw_rpmb_size_mult, "%#x\n", card->ext_csd.raw_rpmb_size_mult);
 MMC_DEV_ATTR(enhanced_rpmb_supported, "%#x\n",
 		card->ext_csd.enhanced_rpmb_supported);
 MMC_DEV_ATTR(rel_sectors, "%#x\n", card->ext_csd.rel_sectors);
-MMC_DEV_ATTR(ocr, "%08x\n", card->ocr);
+MMC_DEV_ATTR(ocr, "0x%08x\n", card->ocr);
 
 static ssize_t mmc_fwrev_show(struct device *dev,
 			      struct device_attribute *attr,
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index e3bbc2c..965d1f0 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -753,7 +753,7 @@ MMC_DEV_ATTR(manfid, "0x%06x\n", card->cid.manfid);
 MMC_DEV_ATTR(name, "%s\n", card->cid.prod_name);
 MMC_DEV_ATTR(oemid, "0x%04x\n", card->cid.oemid);
 MMC_DEV_ATTR(serial, "0x%08x\n", card->cid.serial);
-MMC_DEV_ATTR(ocr, "%08x\n", card->ocr);
+MMC_DEV_ATTR(ocr, "0x%08x\n", card->ocr);
 
 
 static ssize_t mmc_dsr_show(struct device *dev,
diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c
index 84e9afc..6f9535e 100644
--- a/drivers/mmc/host/mtk-sd.c
+++ b/drivers/mmc/host/mtk-sd.c
@@ -579,7 +579,7 @@ static void msdc_set_mclk(struct msdc_host *host, unsigned char timing, u32 hz)
 		}
 	}
 	sdr_set_field(host->base + MSDC_CFG, MSDC_CFG_CKMOD | MSDC_CFG_CKDIV,
-			(mode << 8) | (div % 0xff));
+		      (mode << 8) | div);
 	sdr_set_bits(host->base + MSDC_CFG, MSDC_CFG_CKPDN);
 	while (!(readl(host->base + MSDC_CFG) & MSDC_CFG_CKSTB))
 		cpu_relax();
@@ -1562,7 +1562,7 @@ static int msdc_drv_probe(struct platform_device *pdev)
 	host->src_clk_freq = clk_get_rate(host->src_clk);
 	/* Set host parameters to mmc */
 	mmc->ops = &mt_msdc_ops;
-	mmc->f_min = host->src_clk_freq / (4 * 255);
+	mmc->f_min = DIV_ROUND_UP(host->src_clk_freq, 4 * 255);
 
 	mmc->caps |= MMC_CAP_ERASE | MMC_CAP_CMD23;
 	/* MMC core transfer sizes tunable parameters */
diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
index 7880405..367c84f 100644
--- a/drivers/mmc/host/sdhci-msm.c
+++ b/drivers/mmc/host/sdhci-msm.c
@@ -4845,6 +4845,7 @@ static int sdhci_msm_probe(struct platform_device *pdev)
 				msm_host->pwr_irq);
 		goto vreg_deinit;
 	}
+
 	ret = devm_request_threaded_irq(&pdev->dev, msm_host->pwr_irq, NULL,
 					sdhci_msm_pwr_irq, IRQF_ONESHOT,
 					dev_name(&pdev->dev), host);
diff --git a/drivers/mtd/nand/mtk_ecc.c b/drivers/mtd/nand/mtk_ecc.c
index dbf2562..ada2d88 100644
--- a/drivers/mtd/nand/mtk_ecc.c
+++ b/drivers/mtd/nand/mtk_ecc.c
@@ -116,6 +116,11 @@ static irqreturn_t mtk_ecc_irq(int irq, void *id)
 		op = ECC_DECODE;
 		dec = readw(ecc->regs + ECC_DECDONE);
 		if (dec & ecc->sectors) {
+			/*
+			 * Clear decode IRQ status once again to ensure that
+			 * there will be no extra IRQ.
+			 */
+			readw(ecc->regs + ECC_DECIRQ_STA);
 			ecc->sectors = 0;
 			complete(&ecc->done);
 		} else {
@@ -131,8 +136,6 @@ static irqreturn_t mtk_ecc_irq(int irq, void *id)
 		}
 	}
 
-	writel(0, ecc->regs + ECC_IRQ_REG(op));
-
 	return IRQ_HANDLED;
 }
 
@@ -342,6 +345,12 @@ void mtk_ecc_disable(struct mtk_ecc *ecc)
 
 	/* disable it */
 	mtk_ecc_wait_idle(ecc, op);
+	if (op == ECC_DECODE)
+		/*
+		 * Clear decode IRQ status in case there is a timeout to wait
+		 * decode IRQ.
+		 */
+		readw(ecc->regs + ECC_DECIRQ_STA);
 	writew(0, ecc->regs + ECC_IRQ_REG(op));
 	writew(ECC_OP_DISABLE, ecc->regs + ECC_CTL_REG(op));
 
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index 31a6ee3..a77cfd7 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -2935,15 +2935,18 @@ static int panic_nand_write(struct mtd_info *mtd, loff_t to, size_t len,
 			    size_t *retlen, const uint8_t *buf)
 {
 	struct nand_chip *chip = mtd_to_nand(mtd);
+	int chipnr = (int)(to >> chip->chip_shift);
 	struct mtd_oob_ops ops;
 	int ret;
 
-	/* Wait for the device to get ready */
-	panic_nand_wait(mtd, chip, 400);
-
 	/* Grab the device */
 	panic_nand_get_device(chip, mtd, FL_WRITING);
 
+	chip->select_chip(mtd, chipnr);
+
+	/* Wait for the device to get ready */
+	panic_nand_wait(mtd, chip, 400);
+
 	memset(&ops, 0, sizeof(ops));
 	ops.len = len;
 	ops.datbuf = (uint8_t *)buf;
diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
index c178cb0d..f3a516b 100644
--- a/drivers/mtd/nand/omap2.c
+++ b/drivers/mtd/nand/omap2.c
@@ -1133,129 +1133,172 @@ static u8  bch8_polynomial[] = {0xef, 0x51, 0x2e, 0x09, 0xed, 0x93, 0x9a, 0xc2,
 				0x97, 0x79, 0xe5, 0x24, 0xb5};
 
 /**
- * omap_calculate_ecc_bch - Generate bytes of ECC bytes
+ * _omap_calculate_ecc_bch - Generate ECC bytes for one sector
  * @mtd:	MTD device structure
  * @dat:	The pointer to data on which ecc is computed
  * @ecc_code:	The ecc_code buffer
+ * @i:		The sector number (for a multi sector page)
  *
- * Support calculating of BCH4/8 ecc vectors for the page
+ * Support calculating of BCH4/8/16 ECC vectors for one sector
+ * within a page. Sector number is in @i.
  */
-static int __maybe_unused omap_calculate_ecc_bch(struct mtd_info *mtd,
-					const u_char *dat, u_char *ecc_calc)
+static int _omap_calculate_ecc_bch(struct mtd_info *mtd,
+				   const u_char *dat, u_char *ecc_calc, int i)
 {
 	struct omap_nand_info *info = mtd_to_omap(mtd);
 	int eccbytes	= info->nand.ecc.bytes;
 	struct gpmc_nand_regs	*gpmc_regs = &info->reg;
 	u8 *ecc_code;
-	unsigned long nsectors, bch_val1, bch_val2, bch_val3, bch_val4;
+	unsigned long bch_val1, bch_val2, bch_val3, bch_val4;
 	u32 val;
-	int i, j;
+	int j;
+
+	ecc_code = ecc_calc;
+	switch (info->ecc_opt) {
+	case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW:
+	case OMAP_ECC_BCH8_CODE_HW:
+		bch_val1 = readl(gpmc_regs->gpmc_bch_result0[i]);
+		bch_val2 = readl(gpmc_regs->gpmc_bch_result1[i]);
+		bch_val3 = readl(gpmc_regs->gpmc_bch_result2[i]);
+		bch_val4 = readl(gpmc_regs->gpmc_bch_result3[i]);
+		*ecc_code++ = (bch_val4 & 0xFF);
+		*ecc_code++ = ((bch_val3 >> 24) & 0xFF);
+		*ecc_code++ = ((bch_val3 >> 16) & 0xFF);
+		*ecc_code++ = ((bch_val3 >> 8) & 0xFF);
+		*ecc_code++ = (bch_val3 & 0xFF);
+		*ecc_code++ = ((bch_val2 >> 24) & 0xFF);
+		*ecc_code++ = ((bch_val2 >> 16) & 0xFF);
+		*ecc_code++ = ((bch_val2 >> 8) & 0xFF);
+		*ecc_code++ = (bch_val2 & 0xFF);
+		*ecc_code++ = ((bch_val1 >> 24) & 0xFF);
+		*ecc_code++ = ((bch_val1 >> 16) & 0xFF);
+		*ecc_code++ = ((bch_val1 >> 8) & 0xFF);
+		*ecc_code++ = (bch_val1 & 0xFF);
+		break;
+	case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW:
+	case OMAP_ECC_BCH4_CODE_HW:
+		bch_val1 = readl(gpmc_regs->gpmc_bch_result0[i]);
+		bch_val2 = readl(gpmc_regs->gpmc_bch_result1[i]);
+		*ecc_code++ = ((bch_val2 >> 12) & 0xFF);
+		*ecc_code++ = ((bch_val2 >> 4) & 0xFF);
+		*ecc_code++ = ((bch_val2 & 0xF) << 4) |
+			((bch_val1 >> 28) & 0xF);
+		*ecc_code++ = ((bch_val1 >> 20) & 0xFF);
+		*ecc_code++ = ((bch_val1 >> 12) & 0xFF);
+		*ecc_code++ = ((bch_val1 >> 4) & 0xFF);
+		*ecc_code++ = ((bch_val1 & 0xF) << 4);
+		break;
+	case OMAP_ECC_BCH16_CODE_HW:
+		val = readl(gpmc_regs->gpmc_bch_result6[i]);
+		ecc_code[0]  = ((val >>  8) & 0xFF);
+		ecc_code[1]  = ((val >>  0) & 0xFF);
+		val = readl(gpmc_regs->gpmc_bch_result5[i]);
+		ecc_code[2]  = ((val >> 24) & 0xFF);
+		ecc_code[3]  = ((val >> 16) & 0xFF);
+		ecc_code[4]  = ((val >>  8) & 0xFF);
+		ecc_code[5]  = ((val >>  0) & 0xFF);
+		val = readl(gpmc_regs->gpmc_bch_result4[i]);
+		ecc_code[6]  = ((val >> 24) & 0xFF);
+		ecc_code[7]  = ((val >> 16) & 0xFF);
+		ecc_code[8]  = ((val >>  8) & 0xFF);
+		ecc_code[9]  = ((val >>  0) & 0xFF);
+		val = readl(gpmc_regs->gpmc_bch_result3[i]);
+		ecc_code[10] = ((val >> 24) & 0xFF);
+		ecc_code[11] = ((val >> 16) & 0xFF);
+		ecc_code[12] = ((val >>  8) & 0xFF);
+		ecc_code[13] = ((val >>  0) & 0xFF);
+		val = readl(gpmc_regs->gpmc_bch_result2[i]);
+		ecc_code[14] = ((val >> 24) & 0xFF);
+		ecc_code[15] = ((val >> 16) & 0xFF);
+		ecc_code[16] = ((val >>  8) & 0xFF);
+		ecc_code[17] = ((val >>  0) & 0xFF);
+		val = readl(gpmc_regs->gpmc_bch_result1[i]);
+		ecc_code[18] = ((val >> 24) & 0xFF);
+		ecc_code[19] = ((val >> 16) & 0xFF);
+		ecc_code[20] = ((val >>  8) & 0xFF);
+		ecc_code[21] = ((val >>  0) & 0xFF);
+		val = readl(gpmc_regs->gpmc_bch_result0[i]);
+		ecc_code[22] = ((val >> 24) & 0xFF);
+		ecc_code[23] = ((val >> 16) & 0xFF);
+		ecc_code[24] = ((val >>  8) & 0xFF);
+		ecc_code[25] = ((val >>  0) & 0xFF);
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	/* ECC scheme specific syndrome customizations */
+	switch (info->ecc_opt) {
+	case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW:
+		/* Add constant polynomial to remainder, so that
+		 * ECC of blank pages results in 0x0 on reading back
+		 */
+		for (j = 0; j < eccbytes; j++)
+			ecc_calc[j] ^= bch4_polynomial[j];
+		break;
+	case OMAP_ECC_BCH4_CODE_HW:
+		/* Set  8th ECC byte as 0x0 for ROM compatibility */
+		ecc_calc[eccbytes - 1] = 0x0;
+		break;
+	case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW:
+		/* Add constant polynomial to remainder, so that
+		 * ECC of blank pages results in 0x0 on reading back
+		 */
+		for (j = 0; j < eccbytes; j++)
+			ecc_calc[j] ^= bch8_polynomial[j];
+		break;
+	case OMAP_ECC_BCH8_CODE_HW:
+		/* Set 14th ECC byte as 0x0 for ROM compatibility */
+		ecc_calc[eccbytes - 1] = 0x0;
+		break;
+	case OMAP_ECC_BCH16_CODE_HW:
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/**
+ * omap_calculate_ecc_bch_sw - ECC generator for sector for SW based correction
+ * @mtd:	MTD device structure
+ * @dat:	The pointer to data on which ecc is computed
+ * @ecc_code:	The ecc_code buffer
+ *
+ * Support calculating of BCH4/8/16 ECC vectors for one sector. This is used
+ * when SW based correction is required as ECC is required for one sector
+ * at a time.
+ */
+static int omap_calculate_ecc_bch_sw(struct mtd_info *mtd,
+				     const u_char *dat, u_char *ecc_calc)
+{
+	return _omap_calculate_ecc_bch(mtd, dat, ecc_calc, 0);
+}
+
+/**
+ * omap_calculate_ecc_bch_multi - Generate ECC for multiple sectors
+ * @mtd:	MTD device structure
+ * @dat:	The pointer to data on which ecc is computed
+ * @ecc_code:	The ecc_code buffer
+ *
+ * Support calculating of BCH4/8/16 ecc vectors for the entire page in one go.
+ */
+static int omap_calculate_ecc_bch_multi(struct mtd_info *mtd,
+					const u_char *dat, u_char *ecc_calc)
+{
+	struct omap_nand_info *info = mtd_to_omap(mtd);
+	int eccbytes = info->nand.ecc.bytes;
+	unsigned long nsectors;
+	int i, ret;
 
 	nsectors = ((readl(info->reg.gpmc_ecc_config) >> 4) & 0x7) + 1;
 	for (i = 0; i < nsectors; i++) {
-		ecc_code = ecc_calc;
-		switch (info->ecc_opt) {
-		case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW:
-		case OMAP_ECC_BCH8_CODE_HW:
-			bch_val1 = readl(gpmc_regs->gpmc_bch_result0[i]);
-			bch_val2 = readl(gpmc_regs->gpmc_bch_result1[i]);
-			bch_val3 = readl(gpmc_regs->gpmc_bch_result2[i]);
-			bch_val4 = readl(gpmc_regs->gpmc_bch_result3[i]);
-			*ecc_code++ = (bch_val4 & 0xFF);
-			*ecc_code++ = ((bch_val3 >> 24) & 0xFF);
-			*ecc_code++ = ((bch_val3 >> 16) & 0xFF);
-			*ecc_code++ = ((bch_val3 >> 8) & 0xFF);
-			*ecc_code++ = (bch_val3 & 0xFF);
-			*ecc_code++ = ((bch_val2 >> 24) & 0xFF);
-			*ecc_code++ = ((bch_val2 >> 16) & 0xFF);
-			*ecc_code++ = ((bch_val2 >> 8) & 0xFF);
-			*ecc_code++ = (bch_val2 & 0xFF);
-			*ecc_code++ = ((bch_val1 >> 24) & 0xFF);
-			*ecc_code++ = ((bch_val1 >> 16) & 0xFF);
-			*ecc_code++ = ((bch_val1 >> 8) & 0xFF);
-			*ecc_code++ = (bch_val1 & 0xFF);
-			break;
-		case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW:
-		case OMAP_ECC_BCH4_CODE_HW:
-			bch_val1 = readl(gpmc_regs->gpmc_bch_result0[i]);
-			bch_val2 = readl(gpmc_regs->gpmc_bch_result1[i]);
-			*ecc_code++ = ((bch_val2 >> 12) & 0xFF);
-			*ecc_code++ = ((bch_val2 >> 4) & 0xFF);
-			*ecc_code++ = ((bch_val2 & 0xF) << 4) |
-				((bch_val1 >> 28) & 0xF);
-			*ecc_code++ = ((bch_val1 >> 20) & 0xFF);
-			*ecc_code++ = ((bch_val1 >> 12) & 0xFF);
-			*ecc_code++ = ((bch_val1 >> 4) & 0xFF);
-			*ecc_code++ = ((bch_val1 & 0xF) << 4);
-			break;
-		case OMAP_ECC_BCH16_CODE_HW:
-			val = readl(gpmc_regs->gpmc_bch_result6[i]);
-			ecc_code[0]  = ((val >>  8) & 0xFF);
-			ecc_code[1]  = ((val >>  0) & 0xFF);
-			val = readl(gpmc_regs->gpmc_bch_result5[i]);
-			ecc_code[2]  = ((val >> 24) & 0xFF);
-			ecc_code[3]  = ((val >> 16) & 0xFF);
-			ecc_code[4]  = ((val >>  8) & 0xFF);
-			ecc_code[5]  = ((val >>  0) & 0xFF);
-			val = readl(gpmc_regs->gpmc_bch_result4[i]);
-			ecc_code[6]  = ((val >> 24) & 0xFF);
-			ecc_code[7]  = ((val >> 16) & 0xFF);
-			ecc_code[8]  = ((val >>  8) & 0xFF);
-			ecc_code[9]  = ((val >>  0) & 0xFF);
-			val = readl(gpmc_regs->gpmc_bch_result3[i]);
-			ecc_code[10] = ((val >> 24) & 0xFF);
-			ecc_code[11] = ((val >> 16) & 0xFF);
-			ecc_code[12] = ((val >>  8) & 0xFF);
-			ecc_code[13] = ((val >>  0) & 0xFF);
-			val = readl(gpmc_regs->gpmc_bch_result2[i]);
-			ecc_code[14] = ((val >> 24) & 0xFF);
-			ecc_code[15] = ((val >> 16) & 0xFF);
-			ecc_code[16] = ((val >>  8) & 0xFF);
-			ecc_code[17] = ((val >>  0) & 0xFF);
-			val = readl(gpmc_regs->gpmc_bch_result1[i]);
-			ecc_code[18] = ((val >> 24) & 0xFF);
-			ecc_code[19] = ((val >> 16) & 0xFF);
-			ecc_code[20] = ((val >>  8) & 0xFF);
-			ecc_code[21] = ((val >>  0) & 0xFF);
-			val = readl(gpmc_regs->gpmc_bch_result0[i]);
-			ecc_code[22] = ((val >> 24) & 0xFF);
-			ecc_code[23] = ((val >> 16) & 0xFF);
-			ecc_code[24] = ((val >>  8) & 0xFF);
-			ecc_code[25] = ((val >>  0) & 0xFF);
-			break;
-		default:
-			return -EINVAL;
-		}
+		ret = _omap_calculate_ecc_bch(mtd, dat, ecc_calc, i);
+		if (ret)
+			return ret;
 
-		/* ECC scheme specific syndrome customizations */
-		switch (info->ecc_opt) {
-		case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW:
-			/* Add constant polynomial to remainder, so that
-			 * ECC of blank pages results in 0x0 on reading back */
-			for (j = 0; j < eccbytes; j++)
-				ecc_calc[j] ^= bch4_polynomial[j];
-			break;
-		case OMAP_ECC_BCH4_CODE_HW:
-			/* Set  8th ECC byte as 0x0 for ROM compatibility */
-			ecc_calc[eccbytes - 1] = 0x0;
-			break;
-		case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW:
-			/* Add constant polynomial to remainder, so that
-			 * ECC of blank pages results in 0x0 on reading back */
-			for (j = 0; j < eccbytes; j++)
-				ecc_calc[j] ^= bch8_polynomial[j];
-			break;
-		case OMAP_ECC_BCH8_CODE_HW:
-			/* Set 14th ECC byte as 0x0 for ROM compatibility */
-			ecc_calc[eccbytes - 1] = 0x0;
-			break;
-		case OMAP_ECC_BCH16_CODE_HW:
-			break;
-		default:
-			return -EINVAL;
-		}
-
-	ecc_calc += eccbytes;
+		ecc_calc += eccbytes;
 	}
 
 	return 0;
@@ -1496,7 +1539,7 @@ static int omap_write_page_bch(struct mtd_info *mtd, struct nand_chip *chip,
 	chip->write_buf(mtd, buf, mtd->writesize);
 
 	/* Update ecc vector from GPMC result registers */
-	chip->ecc.calculate(mtd, buf, &ecc_calc[0]);
+	omap_calculate_ecc_bch_multi(mtd, buf, &ecc_calc[0]);
 
 	ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
 					 chip->ecc.total);
@@ -1509,6 +1552,72 @@ static int omap_write_page_bch(struct mtd_info *mtd, struct nand_chip *chip,
 }
 
 /**
+ * omap_write_subpage_bch - BCH hardware ECC based subpage write
+ * @mtd:	mtd info structure
+ * @chip:	nand chip info structure
+ * @offset:	column address of subpage within the page
+ * @data_len:	data length
+ * @buf:	data buffer
+ * @oob_required: must write chip->oob_poi to OOB
+ * @page: page number to write
+ *
+ * OMAP optimized subpage write method.
+ */
+static int omap_write_subpage_bch(struct mtd_info *mtd,
+				  struct nand_chip *chip, u32 offset,
+				  u32 data_len, const u8 *buf,
+				  int oob_required, int page)
+{
+	u8 *ecc_calc = chip->buffers->ecccalc;
+	int ecc_size      = chip->ecc.size;
+	int ecc_bytes     = chip->ecc.bytes;
+	int ecc_steps     = chip->ecc.steps;
+	u32 start_step = offset / ecc_size;
+	u32 end_step   = (offset + data_len - 1) / ecc_size;
+	int step, ret = 0;
+
+	/*
+	 * Write entire page at one go as it would be optimal
+	 * as ECC is calculated by hardware.
+	 * ECC is calculated for all subpages but we choose
+	 * only what we want.
+	 */
+
+	/* Enable GPMC ECC engine */
+	chip->ecc.hwctl(mtd, NAND_ECC_WRITE);
+
+	/* Write data */
+	chip->write_buf(mtd, buf, mtd->writesize);
+
+	for (step = 0; step < ecc_steps; step++) {
+		/* mask ECC of un-touched subpages by padding 0xFF */
+		if (step < start_step || step > end_step)
+			memset(ecc_calc, 0xff, ecc_bytes);
+		else
+			ret = _omap_calculate_ecc_bch(mtd, buf, ecc_calc, step);
+
+		if (ret)
+			return ret;
+
+		buf += ecc_size;
+		ecc_calc += ecc_bytes;
+	}
+
+	/* copy calculated ECC for whole page to chip->buffer->oob */
+	/* this include masked-value(0xFF) for unwritten subpages */
+	ecc_calc = chip->buffers->ecccalc;
+	ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
+					 chip->ecc.total);
+	if (ret)
+		return ret;
+
+	/* write OOB buffer to NAND device */
+	chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
+
+	return 0;
+}
+
+/**
  * omap_read_page_bch - BCH ecc based page read function for entire page
  * @mtd:		mtd info structure
  * @chip:		nand chip info structure
@@ -1544,7 +1653,7 @@ static int omap_read_page_bch(struct mtd_info *mtd, struct nand_chip *chip,
 		       chip->ecc.total);
 
 	/* Calculate ecc bytes */
-	chip->ecc.calculate(mtd, buf, ecc_calc);
+	omap_calculate_ecc_bch_multi(mtd, buf, ecc_calc);
 
 	ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
 					 chip->ecc.total);
@@ -2044,7 +2153,7 @@ static int omap_nand_probe(struct platform_device *pdev)
 		nand_chip->ecc.strength		= 4;
 		nand_chip->ecc.hwctl		= omap_enable_hwecc_bch;
 		nand_chip->ecc.correct		= nand_bch_correct_data;
-		nand_chip->ecc.calculate	= omap_calculate_ecc_bch;
+		nand_chip->ecc.calculate	= omap_calculate_ecc_bch_sw;
 		mtd_set_ooblayout(mtd, &omap_sw_ooblayout_ops);
 		/* Reserve one byte for the OMAP marker */
 		oobbytes_per_step		= nand_chip->ecc.bytes + 1;
@@ -2066,9 +2175,9 @@ static int omap_nand_probe(struct platform_device *pdev)
 		nand_chip->ecc.strength		= 4;
 		nand_chip->ecc.hwctl		= omap_enable_hwecc_bch;
 		nand_chip->ecc.correct		= omap_elm_correct_data;
-		nand_chip->ecc.calculate	= omap_calculate_ecc_bch;
 		nand_chip->ecc.read_page	= omap_read_page_bch;
 		nand_chip->ecc.write_page	= omap_write_page_bch;
+		nand_chip->ecc.write_subpage	= omap_write_subpage_bch;
 		mtd_set_ooblayout(mtd, &omap_ooblayout_ops);
 		oobbytes_per_step		= nand_chip->ecc.bytes;
 
@@ -2087,7 +2196,7 @@ static int omap_nand_probe(struct platform_device *pdev)
 		nand_chip->ecc.strength		= 8;
 		nand_chip->ecc.hwctl		= omap_enable_hwecc_bch;
 		nand_chip->ecc.correct		= nand_bch_correct_data;
-		nand_chip->ecc.calculate	= omap_calculate_ecc_bch;
+		nand_chip->ecc.calculate	= omap_calculate_ecc_bch_sw;
 		mtd_set_ooblayout(mtd, &omap_sw_ooblayout_ops);
 		/* Reserve one byte for the OMAP marker */
 		oobbytes_per_step		= nand_chip->ecc.bytes + 1;
@@ -2109,9 +2218,9 @@ static int omap_nand_probe(struct platform_device *pdev)
 		nand_chip->ecc.strength		= 8;
 		nand_chip->ecc.hwctl		= omap_enable_hwecc_bch;
 		nand_chip->ecc.correct		= omap_elm_correct_data;
-		nand_chip->ecc.calculate	= omap_calculate_ecc_bch;
 		nand_chip->ecc.read_page	= omap_read_page_bch;
 		nand_chip->ecc.write_page	= omap_write_page_bch;
+		nand_chip->ecc.write_subpage	= omap_write_subpage_bch;
 		mtd_set_ooblayout(mtd, &omap_ooblayout_ops);
 		oobbytes_per_step		= nand_chip->ecc.bytes;
 
@@ -2131,9 +2240,9 @@ static int omap_nand_probe(struct platform_device *pdev)
 		nand_chip->ecc.strength		= 16;
 		nand_chip->ecc.hwctl		= omap_enable_hwecc_bch;
 		nand_chip->ecc.correct		= omap_elm_correct_data;
-		nand_chip->ecc.calculate	= omap_calculate_ecc_bch;
 		nand_chip->ecc.read_page	= omap_read_page_bch;
 		nand_chip->ecc.write_page	= omap_write_page_bch;
+		nand_chip->ecc.write_subpage	= omap_write_subpage_bch;
 		mtd_set_ooblayout(mtd, &omap_ooblayout_ops);
 		oobbytes_per_step		= nand_chip->ecc.bytes;
 
diff --git a/drivers/net/appletalk/ipddp.c b/drivers/net/appletalk/ipddp.c
index e90c6a7..2e46496 100644
--- a/drivers/net/appletalk/ipddp.c
+++ b/drivers/net/appletalk/ipddp.c
@@ -191,7 +191,7 @@ static netdev_tx_t ipddp_xmit(struct sk_buff *skb, struct net_device *dev)
  */
 static int ipddp_create(struct ipddp_route *new_rt)
 {
-        struct ipddp_route *rt = kmalloc(sizeof(*rt), GFP_KERNEL);
+        struct ipddp_route *rt = kzalloc(sizeof(*rt), GFP_KERNEL);
 
         if (rt == NULL)
                 return -ENOMEM;
diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c
index 6749b18..4d01d7b 100644
--- a/drivers/net/can/ti_hecc.c
+++ b/drivers/net/can/ti_hecc.c
@@ -652,6 +652,9 @@ static int ti_hecc_rx_poll(struct napi_struct *napi, int quota)
 		mbx_mask = hecc_read(priv, HECC_CANMIM);
 		mbx_mask |= HECC_TX_MBOX_MASK;
 		hecc_write(priv, HECC_CANMIM, mbx_mask);
+	} else {
+		/* repoll is done only if whole budget is used */
+		num_pkts = quota;
 	}
 
 	return num_pkts;
diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c
index b3d0275..b003582 100644
--- a/drivers/net/can/usb/ems_usb.c
+++ b/drivers/net/can/usb/ems_usb.c
@@ -288,6 +288,8 @@ static void ems_usb_read_interrupt_callback(struct urb *urb)
 
 	case -ECONNRESET: /* unlink */
 	case -ENOENT:
+	case -EPIPE:
+	case -EPROTO:
 	case -ESHUTDOWN:
 		return;
 
diff --git a/drivers/net/can/usb/esd_usb2.c b/drivers/net/can/usb/esd_usb2.c
index 9fdb0f0..c6dcf93 100644
--- a/drivers/net/can/usb/esd_usb2.c
+++ b/drivers/net/can/usb/esd_usb2.c
@@ -393,6 +393,8 @@ static void esd_usb2_read_bulk_callback(struct urb *urb)
 		break;
 
 	case -ENOENT:
+	case -EPIPE:
+	case -EPROTO:
 	case -ESHUTDOWN:
 		return;
 
diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c
index 4224e06..c9d61a6 100644
--- a/drivers/net/can/usb/kvaser_usb.c
+++ b/drivers/net/can/usb/kvaser_usb.c
@@ -609,8 +609,8 @@ static int kvaser_usb_wait_msg(const struct kvaser_usb *dev, u8 id,
 			}
 
 			if (pos + tmp->len > actual_len) {
-				dev_err(dev->udev->dev.parent,
-					"Format error\n");
+				dev_err_ratelimited(dev->udev->dev.parent,
+						    "Format error\n");
 				break;
 			}
 
@@ -813,6 +813,7 @@ static int kvaser_usb_simple_msg_async(struct kvaser_usb_net_priv *priv,
 	if (err) {
 		netdev_err(netdev, "Error transmitting URB\n");
 		usb_unanchor_urb(urb);
+		kfree(buf);
 		usb_free_urb(urb);
 		return err;
 	}
@@ -1325,6 +1326,8 @@ static void kvaser_usb_read_bulk_callback(struct urb *urb)
 	case 0:
 		break;
 	case -ENOENT:
+	case -EPIPE:
+	case -EPROTO:
 	case -ESHUTDOWN:
 		return;
 	default:
@@ -1333,7 +1336,7 @@ static void kvaser_usb_read_bulk_callback(struct urb *urb)
 		goto resubmit_urb;
 	}
 
-	while (pos <= urb->actual_length - MSG_HEADER_LEN) {
+	while (pos <= (int)(urb->actual_length - MSG_HEADER_LEN)) {
 		msg = urb->transfer_buffer + pos;
 
 		/* The Kvaser firmware can only read and write messages that
@@ -1352,7 +1355,8 @@ static void kvaser_usb_read_bulk_callback(struct urb *urb)
 		}
 
 		if (pos + msg->len > urb->actual_length) {
-			dev_err(dev->udev->dev.parent, "Format error\n");
+			dev_err_ratelimited(dev->udev->dev.parent,
+					    "Format error\n");
 			break;
 		}
 
@@ -1768,6 +1772,7 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
 		spin_unlock_irqrestore(&priv->tx_contexts_lock, flags);
 
 		usb_unanchor_urb(urb);
+		kfree(buf);
 
 		stats->tx_dropped++;
 
diff --git a/drivers/net/can/usb/usb_8dev.c b/drivers/net/can/usb/usb_8dev.c
index d000cb6..27861c4 100644
--- a/drivers/net/can/usb/usb_8dev.c
+++ b/drivers/net/can/usb/usb_8dev.c
@@ -524,6 +524,8 @@ static void usb_8dev_read_bulk_callback(struct urb *urb)
 		break;
 
 	case -ENOENT:
+	case -EPIPE:
+	case -EPROTO:
 	case -ESHUTDOWN:
 		return;
 
diff --git a/drivers/net/ethernet/3com/typhoon.c b/drivers/net/ethernet/3com/typhoon.c
index 8f8418d..a0012c3 100644
--- a/drivers/net/ethernet/3com/typhoon.c
+++ b/drivers/net/ethernet/3com/typhoon.c
@@ -2366,9 +2366,9 @@ typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 	 * 4) Get the hardware address.
 	 * 5) Put the card to sleep.
 	 */
-	if (typhoon_reset(ioaddr, WaitSleep) < 0) {
+	err = typhoon_reset(ioaddr, WaitSleep);
+	if (err < 0) {
 		err_msg = "could not reset 3XP";
-		err = -EIO;
 		goto error_out_dma;
 	}
 
@@ -2382,24 +2382,25 @@ typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 	typhoon_init_interface(tp);
 	typhoon_init_rings(tp);
 
-	if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
+	err = typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST);
+	if (err < 0) {
 		err_msg = "cannot boot 3XP sleep image";
-		err = -EIO;
 		goto error_out_reset;
 	}
 
 	INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_MAC_ADDRESS);
-	if(typhoon_issue_command(tp, 1, &xp_cmd, 1, xp_resp) < 0) {
+	err = typhoon_issue_command(tp, 1, &xp_cmd, 1, xp_resp);
+	if (err < 0) {
 		err_msg = "cannot read MAC address";
-		err = -EIO;
 		goto error_out_reset;
 	}
 
 	*(__be16 *)&dev->dev_addr[0] = htons(le16_to_cpu(xp_resp[0].parm1));
 	*(__be32 *)&dev->dev_addr[2] = htonl(le32_to_cpu(xp_resp[0].parm2));
 
-	if(!is_valid_ether_addr(dev->dev_addr)) {
+	if (!is_valid_ether_addr(dev->dev_addr)) {
 		err_msg = "Could not obtain valid ethernet address, aborting";
+		err = -EIO;
 		goto error_out_reset;
 	}
 
@@ -2407,7 +2408,8 @@ typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 	 * later when we print out the version reported.
 	 */
 	INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_VERSIONS);
-	if(typhoon_issue_command(tp, 1, &xp_cmd, 3, xp_resp) < 0) {
+	err = typhoon_issue_command(tp, 1, &xp_cmd, 3, xp_resp);
+	if (err < 0) {
 		err_msg = "Could not get Sleep Image version";
 		goto error_out_reset;
 	}
@@ -2424,9 +2426,9 @@ typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 	if(xp_resp[0].numDesc != 0)
 		tp->capabilities |= TYPHOON_WAKEUP_NEEDS_RESET;
 
-	if(typhoon_sleep(tp, PCI_D3hot, 0) < 0) {
+	err = typhoon_sleep(tp, PCI_D3hot, 0);
+	if (err < 0) {
 		err_msg = "cannot put adapter to sleep";
-		err = -EIO;
 		goto error_out_reset;
 	}
 
@@ -2449,7 +2451,8 @@ typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 	dev->features = dev->hw_features |
 		NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_RXCSUM;
 
-	if(register_netdev(dev) < 0) {
+	err = register_netdev(dev);
+	if (err < 0) {
 		err_msg = "unable to register netdev";
 		goto error_out_reset;
 	}
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index be7ec5a..744ed6d 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -1023,6 +1023,18 @@ static netdev_tx_t bcm_sysport_xmit(struct sk_buff *skb,
 		goto out;
 	}
 
+	/* The Ethernet switch we are interfaced with needs packets to be at
+	 * least 64 bytes (including FCS) otherwise they will be discarded when
+	 * they enter the switch port logic. When Broadcom tags are enabled, we
+	 * need to make sure that packets are at least 68 bytes
+	 * (including FCS and tag) because the length verification is done after
+	 * the Broadcom tag is stripped off the ingress packet.
+	 */
+	if (skb_put_padto(skb, ETH_ZLEN + ENET_BRCM_TAG_LEN)) {
+		ret = NETDEV_TX_OK;
+		goto out;
+	}
+
 	/* Insert TSB and checksum infos */
 	if (priv->tsb_en) {
 		skb = bcm_sysport_insert_tsb(skb, dev);
@@ -1032,20 +1044,7 @@ static netdev_tx_t bcm_sysport_xmit(struct sk_buff *skb,
 		}
 	}
 
-	/* The Ethernet switch we are interfaced with needs packets to be at
-	 * least 64 bytes (including FCS) otherwise they will be discarded when
-	 * they enter the switch port logic. When Broadcom tags are enabled, we
-	 * need to make sure that packets are at least 68 bytes
-	 * (including FCS and tag) because the length verification is done after
-	 * the Broadcom tag is stripped off the ingress packet.
-	 */
-	if (skb_padto(skb, ETH_ZLEN + ENET_BRCM_TAG_LEN)) {
-		ret = NETDEV_TX_OK;
-		goto out;
-	}
-
-	skb_len = skb->len < ETH_ZLEN + ENET_BRCM_TAG_LEN ?
-			ETH_ZLEN + ENET_BRCM_TAG_LEN : skb->len;
+	skb_len = skb->len;
 
 	mapping = dma_map_single(kdev, skb->data, skb_len, DMA_TO_DEVICE);
 	if (dma_mapping_error(kdev, mapping)) {
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 4febe60..5d958b5 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -13293,17 +13293,15 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
 	dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
 		NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_HIGHDMA;
 
-	/* VF with OLD Hypervisor or old PF do not support filtering */
 	if (IS_PF(bp)) {
 		if (chip_is_e1x)
 			bp->accept_any_vlan = true;
 		else
 			dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
-#ifdef CONFIG_BNX2X_SRIOV
-	} else if (bp->acquire_resp.pfdev_info.pf_cap & PFVF_CAP_VLAN_FILTER) {
-		dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
-#endif
 	}
+	/* For VF we'll know whether to enable VLAN filtering after
+	 * getting a response to CHANNEL_TLV_ACQUIRE from PF.
+	 */
 
 	dev->features |= dev->hw_features | NETIF_F_HW_VLAN_CTAG_RX;
 	dev->features |= NETIF_F_HIGHDMA;
@@ -13735,7 +13733,7 @@ static int bnx2x_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
 	if (!netif_running(bp->dev)) {
 		DP(BNX2X_MSG_PTP,
 		   "PTP adjfreq called while the interface is down\n");
-		return -EFAULT;
+		return -ENETDOWN;
 	}
 
 	if (ppb < 0) {
@@ -13794,6 +13792,12 @@ static int bnx2x_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
 {
 	struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info);
 
+	if (!netif_running(bp->dev)) {
+		DP(BNX2X_MSG_PTP,
+		   "PTP adjtime called while the interface is down\n");
+		return -ENETDOWN;
+	}
+
 	DP(BNX2X_MSG_PTP, "PTP adjtime called, delta = %llx\n", delta);
 
 	timecounter_adjtime(&bp->timecounter, delta);
@@ -13806,6 +13810,12 @@ static int bnx2x_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
 	struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info);
 	u64 ns;
 
+	if (!netif_running(bp->dev)) {
+		DP(BNX2X_MSG_PTP,
+		   "PTP gettime called while the interface is down\n");
+		return -ENETDOWN;
+	}
+
 	ns = timecounter_read(&bp->timecounter);
 
 	DP(BNX2X_MSG_PTP, "PTP gettime called, ns = %llu\n", ns);
@@ -13821,6 +13831,12 @@ static int bnx2x_ptp_settime(struct ptp_clock_info *ptp,
 	struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info);
 	u64 ns;
 
+	if (!netif_running(bp->dev)) {
+		DP(BNX2X_MSG_PTP,
+		   "PTP settime called while the interface is down\n");
+		return -ENETDOWN;
+	}
+
 	ns = timespec64_to_ns(ts);
 
 	DP(BNX2X_MSG_PTP, "PTP settime called, ns = %llu\n", ns);
@@ -13988,6 +14004,14 @@ static int bnx2x_init_one(struct pci_dev *pdev,
 		rc = bnx2x_vfpf_acquire(bp, tx_count, rx_count);
 		if (rc)
 			goto init_one_freemem;
+
+#ifdef CONFIG_BNX2X_SRIOV
+		/* VF with OLD Hypervisor or old PF do not support filtering */
+		if (bp->acquire_resp.pfdev_info.pf_cap & PFVF_CAP_VLAN_FILTER) {
+			dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+			dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+		}
+#endif
 	}
 
 	/* Enable SRIOV if capability found in configuration space */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index 3f77d08..c6e0591 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -434,7 +434,9 @@ static int bnx2x_vf_mac_vlan_config(struct bnx2x *bp,
 
 	/* Add/Remove the filter */
 	rc = bnx2x_config_vlan_mac(bp, &ramrod);
-	if (rc && rc != -EEXIST) {
+	if (rc == -EEXIST)
+		return 0;
+	if (rc) {
 		BNX2X_ERR("Failed to %s %s\n",
 			  filter->add ? "add" : "delete",
 			  (filter->type == BNX2X_VF_FILTER_VLAN_MAC) ?
@@ -444,6 +446,8 @@ static int bnx2x_vf_mac_vlan_config(struct bnx2x *bp,
 		return rc;
 	}
 
+	filter->applied = true;
+
 	return 0;
 }
 
@@ -471,6 +475,8 @@ int bnx2x_vf_mac_vlan_config_list(struct bnx2x *bp, struct bnx2x_virtf *vf,
 		BNX2X_ERR("Managed only %d/%d filters - rolling back\n",
 			  i, filters->count + 1);
 		while (--i >= 0) {
+			if (!filters->filters[i].applied)
+				continue;
 			filters->filters[i].add = !filters->filters[i].add;
 			bnx2x_vf_mac_vlan_config(bp, vf, qid,
 						 &filters->filters[i],
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
index 7a6d406..888d0b6 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
@@ -114,6 +114,7 @@ struct bnx2x_vf_mac_vlan_filter {
 	(BNX2X_VF_FILTER_MAC | BNX2X_VF_FILTER_VLAN) /*shortcut*/
 
 	bool add;
+	bool applied;
 	u8 *mac;
 	u16 vid;
 };
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
index bfae300..c2d327d 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
@@ -868,7 +868,7 @@ int bnx2x_vfpf_set_mcast(struct net_device *dev)
 	struct bnx2x *bp = netdev_priv(dev);
 	struct vfpf_set_q_filters_tlv *req = &bp->vf2pf_mbox->req.set_q_filters;
 	struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
-	int rc, i = 0;
+	int rc = 0, i = 0;
 	struct netdev_hw_addr *ha;
 
 	if (bp->state != BNX2X_STATE_OPEN) {
@@ -883,6 +883,15 @@ int bnx2x_vfpf_set_mcast(struct net_device *dev)
 	/* Get Rx mode requested */
 	DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
 
+	/* We support PFVF_MAX_MULTICAST_PER_VF mcast addresses tops */
+	if (netdev_mc_count(dev) > PFVF_MAX_MULTICAST_PER_VF) {
+		DP(NETIF_MSG_IFUP,
+		   "VF supports not more than %d multicast MAC addresses\n",
+		   PFVF_MAX_MULTICAST_PER_VF);
+		rc = -EINVAL;
+		goto out;
+	}
+
 	netdev_for_each_mc_addr(ha, dev) {
 		DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
 		   bnx2x_mc_addr(ha));
@@ -890,16 +899,6 @@ int bnx2x_vfpf_set_mcast(struct net_device *dev)
 		i++;
 	}
 
-	/* We support four PFVF_MAX_MULTICAST_PER_VF mcast
-	  * addresses tops
-	  */
-	if (i >= PFVF_MAX_MULTICAST_PER_VF) {
-		DP(NETIF_MSG_IFUP,
-		   "VF supports not more than %d multicast MAC addresses\n",
-		   PFVF_MAX_MULTICAST_PER_VF);
-		return -EINVAL;
-	}
-
 	req->n_multicast = i;
 	req->flags |= VFPF_SET_Q_FILTERS_MULTICAST_CHANGED;
 	req->vf_qid = 0;
@@ -924,7 +923,7 @@ int bnx2x_vfpf_set_mcast(struct net_device *dev)
 out:
 	bnx2x_vfpf_finalize(bp, &req->first_tlv);
 
-	return 0;
+	return rc;
 }
 
 /* request pf to add a vlan for the vf */
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 333df54..bbb3641 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -2381,6 +2381,18 @@ static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
 	return 0;
 }
 
+static void bnxt_init_cp_rings(struct bnxt *bp)
+{
+	int i;
+
+	for (i = 0; i < bp->cp_nr_rings; i++) {
+		struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
+		struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
+
+		ring->fw_ring_id = INVALID_HW_RING_ID;
+	}
+}
+
 static int bnxt_init_rx_rings(struct bnxt *bp)
 {
 	int i, rc = 0;
@@ -3800,6 +3812,30 @@ static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
 	return rc;
 }
 
+static int bnxt_hwrm_set_async_event_cr(struct bnxt *bp, int idx)
+{
+	int rc;
+
+	if (BNXT_PF(bp)) {
+		struct hwrm_func_cfg_input req = {0};
+
+		bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
+		req.fid = cpu_to_le16(0xffff);
+		req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
+		req.async_event_cr = cpu_to_le16(idx);
+		rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+	} else {
+		struct hwrm_func_vf_cfg_input req = {0};
+
+		bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_CFG, -1, -1);
+		req.enables =
+			cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
+		req.async_event_cr = cpu_to_le16(idx);
+		rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+	}
+	return rc;
+}
+
 static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
 {
 	int i, rc = 0;
@@ -3816,6 +3852,12 @@ static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
 			goto err_out;
 		BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons);
 		bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id;
+
+		if (!i) {
+			rc = bnxt_hwrm_set_async_event_cr(bp, ring->fw_ring_id);
+			if (rc)
+				netdev_warn(bp->dev, "Failed to set async event completion ring.\n");
+		}
 	}
 
 	for (i = 0; i < bp->tx_nr_rings; i++) {
@@ -4670,6 +4712,7 @@ static int bnxt_shutdown_nic(struct bnxt *bp, bool irq_re_init)
 
 static int bnxt_init_nic(struct bnxt *bp, bool irq_re_init)
 {
+	bnxt_init_cp_rings(bp);
 	bnxt_init_rx_rings(bp);
 	bnxt_init_tx_rings(bp);
 	bnxt_init_ring_grps(bp, irq_re_init);
@@ -5102,8 +5145,9 @@ static int bnxt_hwrm_phy_qcaps(struct bnxt *bp)
 		bp->lpi_tmr_hi = le32_to_cpu(resp->valid_tx_lpi_timer_high) &
 				 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK;
 	}
-	link_info->support_auto_speeds =
-		le16_to_cpu(resp->supported_speeds_auto_mode);
+	if (resp->supported_speeds_auto_mode)
+		link_info->support_auto_speeds =
+			le16_to_cpu(resp->supported_speeds_auto_mode);
 
 hwrm_phy_qcaps_exit:
 	mutex_unlock(&bp->hwrm_cmd_lock);
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 0975af2..3480b30 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -1,7 +1,7 @@
 /*
  * Broadcom GENET (Gigabit Ethernet) controller driver
  *
- * Copyright (c) 2014 Broadcom Corporation
+ * Copyright (c) 2014-2017 Broadcom
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
@@ -778,8 +778,9 @@ static const struct bcmgenet_stats bcmgenet_gstrings_stats[] = {
 	STAT_GENET_RUNT("rx_runt_bytes", mib.rx_runt_bytes),
 	/* Misc UniMAC counters */
 	STAT_GENET_MISC("rbuf_ovflow_cnt", mib.rbuf_ovflow_cnt,
-			UMAC_RBUF_OVFL_CNT),
-	STAT_GENET_MISC("rbuf_err_cnt", mib.rbuf_err_cnt, UMAC_RBUF_ERR_CNT),
+			UMAC_RBUF_OVFL_CNT_V1),
+	STAT_GENET_MISC("rbuf_err_cnt", mib.rbuf_err_cnt,
+			UMAC_RBUF_ERR_CNT_V1),
 	STAT_GENET_MISC("mdf_err_cnt", mib.mdf_err_cnt, UMAC_MDF_ERR_CNT),
 	STAT_GENET_SOFT_MIB("alloc_rx_buff_failed", mib.alloc_rx_buff_failed),
 	STAT_GENET_SOFT_MIB("rx_dma_failed", mib.rx_dma_failed),
@@ -821,6 +822,45 @@ static void bcmgenet_get_strings(struct net_device *dev, u32 stringset,
 	}
 }
 
+static u32 bcmgenet_update_stat_misc(struct bcmgenet_priv *priv, u16 offset)
+{
+	u16 new_offset;
+	u32 val;
+
+	switch (offset) {
+	case UMAC_RBUF_OVFL_CNT_V1:
+		if (GENET_IS_V2(priv))
+			new_offset = RBUF_OVFL_CNT_V2;
+		else
+			new_offset = RBUF_OVFL_CNT_V3PLUS;
+
+		val = bcmgenet_rbuf_readl(priv,	new_offset);
+		/* clear if overflowed */
+		if (val == ~0)
+			bcmgenet_rbuf_writel(priv, 0, new_offset);
+		break;
+	case UMAC_RBUF_ERR_CNT_V1:
+		if (GENET_IS_V2(priv))
+			new_offset = RBUF_ERR_CNT_V2;
+		else
+			new_offset = RBUF_ERR_CNT_V3PLUS;
+
+		val = bcmgenet_rbuf_readl(priv,	new_offset);
+		/* clear if overflowed */
+		if (val == ~0)
+			bcmgenet_rbuf_writel(priv, 0, new_offset);
+		break;
+	default:
+		val = bcmgenet_umac_readl(priv, offset);
+		/* clear if overflowed */
+		if (val == ~0)
+			bcmgenet_umac_writel(priv, 0, offset);
+		break;
+	}
+
+	return val;
+}
+
 static void bcmgenet_update_mib_counters(struct bcmgenet_priv *priv)
 {
 	int i, j = 0;
@@ -836,19 +876,28 @@ static void bcmgenet_update_mib_counters(struct bcmgenet_priv *priv)
 		case BCMGENET_STAT_NETDEV:
 		case BCMGENET_STAT_SOFT:
 			continue;
-		case BCMGENET_STAT_MIB_RX:
-		case BCMGENET_STAT_MIB_TX:
 		case BCMGENET_STAT_RUNT:
-			if (s->type != BCMGENET_STAT_MIB_RX)
-				offset = BCMGENET_STAT_OFFSET;
+			offset += BCMGENET_STAT_OFFSET;
+			/* fall through */
+		case BCMGENET_STAT_MIB_TX:
+			offset += BCMGENET_STAT_OFFSET;
+			/* fall through */
+		case BCMGENET_STAT_MIB_RX:
 			val = bcmgenet_umac_readl(priv,
 						  UMAC_MIB_START + j + offset);
+			offset = 0;	/* Reset Offset */
 			break;
 		case BCMGENET_STAT_MISC:
-			val = bcmgenet_umac_readl(priv, s->reg_offset);
-			/* clear if overflowed */
-			if (val == ~0)
-				bcmgenet_umac_writel(priv, 0, s->reg_offset);
+			if (GENET_IS_V1(priv)) {
+				val = bcmgenet_umac_readl(priv, s->reg_offset);
+				/* clear if overflowed */
+				if (val == ~0)
+					bcmgenet_umac_writel(priv, 0,
+							     s->reg_offset);
+			} else {
+				val = bcmgenet_update_stat_misc(priv,
+								s->reg_offset);
+			}
 			break;
 		}
 
@@ -2464,24 +2513,28 @@ static int bcmgenet_init_dma(struct bcmgenet_priv *priv)
 /* Interrupt bottom half */
 static void bcmgenet_irq_task(struct work_struct *work)
 {
+	unsigned long flags;
+	unsigned int status;
 	struct bcmgenet_priv *priv = container_of(
 			work, struct bcmgenet_priv, bcmgenet_irq_work);
 
 	netif_dbg(priv, intr, priv->dev, "%s\n", __func__);
 
-	if (priv->irq0_stat & UMAC_IRQ_MPD_R) {
-		priv->irq0_stat &= ~UMAC_IRQ_MPD_R;
+	spin_lock_irqsave(&priv->lock, flags);
+	status = priv->irq0_stat;
+	priv->irq0_stat = 0;
+	spin_unlock_irqrestore(&priv->lock, flags);
+
+	if (status & UMAC_IRQ_MPD_R) {
 		netif_dbg(priv, wol, priv->dev,
 			  "magic packet detected, waking up\n");
 		bcmgenet_power_up(priv, GENET_POWER_WOL_MAGIC);
 	}
 
 	/* Link UP/DOWN event */
-	if (priv->irq0_stat & UMAC_IRQ_LINK_EVENT) {
+	if (status & UMAC_IRQ_LINK_EVENT)
 		phy_mac_interrupt(priv->phydev,
-				  !!(priv->irq0_stat & UMAC_IRQ_LINK_UP));
-		priv->irq0_stat &= ~UMAC_IRQ_LINK_EVENT;
-	}
+				  !!(status & UMAC_IRQ_LINK_UP));
 }
 
 /* bcmgenet_isr1: handle Rx and Tx priority queues */
@@ -2490,22 +2543,21 @@ static irqreturn_t bcmgenet_isr1(int irq, void *dev_id)
 	struct bcmgenet_priv *priv = dev_id;
 	struct bcmgenet_rx_ring *rx_ring;
 	struct bcmgenet_tx_ring *tx_ring;
-	unsigned int index;
+	unsigned int index, status;
 
-	/* Save irq status for bottom-half processing. */
-	priv->irq1_stat =
-		bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_STAT) &
+	/* Read irq status */
+	status = bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_STAT) &
 		~bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS);
 
 	/* clear interrupts */
-	bcmgenet_intrl2_1_writel(priv, priv->irq1_stat, INTRL2_CPU_CLEAR);
+	bcmgenet_intrl2_1_writel(priv, status, INTRL2_CPU_CLEAR);
 
 	netif_dbg(priv, intr, priv->dev,
-		  "%s: IRQ=0x%x\n", __func__, priv->irq1_stat);
+		  "%s: IRQ=0x%x\n", __func__, status);
 
 	/* Check Rx priority queue interrupts */
 	for (index = 0; index < priv->hw_params->rx_queues; index++) {
-		if (!(priv->irq1_stat & BIT(UMAC_IRQ1_RX_INTR_SHIFT + index)))
+		if (!(status & BIT(UMAC_IRQ1_RX_INTR_SHIFT + index)))
 			continue;
 
 		rx_ring = &priv->rx_rings[index];
@@ -2518,7 +2570,7 @@ static irqreturn_t bcmgenet_isr1(int irq, void *dev_id)
 
 	/* Check Tx priority queue interrupts */
 	for (index = 0; index < priv->hw_params->tx_queues; index++) {
-		if (!(priv->irq1_stat & BIT(index)))
+		if (!(status & BIT(index)))
 			continue;
 
 		tx_ring = &priv->tx_rings[index];
@@ -2538,19 +2590,20 @@ static irqreturn_t bcmgenet_isr0(int irq, void *dev_id)
 	struct bcmgenet_priv *priv = dev_id;
 	struct bcmgenet_rx_ring *rx_ring;
 	struct bcmgenet_tx_ring *tx_ring;
+	unsigned int status;
+	unsigned long flags;
 
-	/* Save irq status for bottom-half processing. */
-	priv->irq0_stat =
-		bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_STAT) &
+	/* Read irq status */
+	status = bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_STAT) &
 		~bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS);
 
 	/* clear interrupts */
-	bcmgenet_intrl2_0_writel(priv, priv->irq0_stat, INTRL2_CPU_CLEAR);
+	bcmgenet_intrl2_0_writel(priv, status, INTRL2_CPU_CLEAR);
 
 	netif_dbg(priv, intr, priv->dev,
-		  "IRQ=0x%x\n", priv->irq0_stat);
+		  "IRQ=0x%x\n", status);
 
-	if (priv->irq0_stat & UMAC_IRQ_RXDMA_DONE) {
+	if (status & UMAC_IRQ_RXDMA_DONE) {
 		rx_ring = &priv->rx_rings[DESC_INDEX];
 
 		if (likely(napi_schedule_prep(&rx_ring->napi))) {
@@ -2559,7 +2612,7 @@ static irqreturn_t bcmgenet_isr0(int irq, void *dev_id)
 		}
 	}
 
-	if (priv->irq0_stat & UMAC_IRQ_TXDMA_DONE) {
+	if (status & UMAC_IRQ_TXDMA_DONE) {
 		tx_ring = &priv->tx_rings[DESC_INDEX];
 
 		if (likely(napi_schedule_prep(&tx_ring->napi))) {
@@ -2568,20 +2621,21 @@ static irqreturn_t bcmgenet_isr0(int irq, void *dev_id)
 		}
 	}
 
-	if (priv->irq0_stat & (UMAC_IRQ_PHY_DET_R |
-				UMAC_IRQ_PHY_DET_F |
-				UMAC_IRQ_LINK_EVENT |
-				UMAC_IRQ_HFB_SM |
-				UMAC_IRQ_HFB_MM |
-				UMAC_IRQ_MPD_R)) {
-		/* all other interested interrupts handled in bottom half */
-		schedule_work(&priv->bcmgenet_irq_work);
+	if ((priv->hw_params->flags & GENET_HAS_MDIO_INTR) &&
+		status & (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR)) {
+		wake_up(&priv->wq);
 	}
 
-	if ((priv->hw_params->flags & GENET_HAS_MDIO_INTR) &&
-	    priv->irq0_stat & (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR)) {
-		priv->irq0_stat &= ~(UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR);
-		wake_up(&priv->wq);
+	/* all other interested interrupts handled in bottom half */
+	status &= (UMAC_IRQ_LINK_EVENT |
+		   UMAC_IRQ_MPD_R);
+	if (status) {
+		/* Save irq status for bottom-half processing. */
+		spin_lock_irqsave(&priv->lock, flags);
+		priv->irq0_stat |= status;
+		spin_unlock_irqrestore(&priv->lock, flags);
+
+		schedule_work(&priv->bcmgenet_irq_work);
 	}
 
 	return IRQ_HANDLED;
@@ -2808,6 +2862,8 @@ static int bcmgenet_open(struct net_device *dev)
 err_fini_dma:
 	bcmgenet_fini_dma(priv);
 err_clk_disable:
+	if (priv->internal_phy)
+		bcmgenet_power_down(priv, GENET_POWER_PASSIVE);
 	clk_disable_unprepare(priv->clk);
 	return ret;
 }
@@ -3184,6 +3240,12 @@ static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv)
 	 */
 	gphy_rev = reg & 0xffff;
 
+	/* This is reserved so should require special treatment */
+	if (gphy_rev == 0 || gphy_rev == 0x01ff) {
+		pr_warn("Invalid GPHY revision detected: 0x%04x\n", gphy_rev);
+		return;
+	}
+
 	/* This is the good old scheme, just GPHY major, no minor nor patch */
 	if ((gphy_rev & 0xf0) != 0)
 		priv->gphy_rev = gphy_rev << 8;
@@ -3192,12 +3254,6 @@ static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv)
 	else if ((gphy_rev & 0xff00) != 0)
 		priv->gphy_rev = gphy_rev;
 
-	/* This is reserved so should require special treatment */
-	else if (gphy_rev == 0 || gphy_rev == 0x01ff) {
-		pr_warn("Invalid GPHY revision detected: 0x%04x\n", gphy_rev);
-		return;
-	}
-
 #ifdef CONFIG_PHYS_ADDR_T_64BIT
 	if (!(params->flags & GENET_HAS_40BITS))
 		pr_warn("GENET does not support 40-bits PA\n");
@@ -3240,6 +3296,7 @@ static int bcmgenet_probe(struct platform_device *pdev)
 	const void *macaddr;
 	struct resource *r;
 	int err = -EIO;
+	const char *phy_mode_str;
 
 	/* Up to GENET_MAX_MQ_CNT + 1 TX queues and RX queues */
 	dev = alloc_etherdev_mqs(sizeof(*priv), GENET_MAX_MQ_CNT + 1,
@@ -3283,6 +3340,8 @@ static int bcmgenet_probe(struct platform_device *pdev)
 		goto err;
 	}
 
+	spin_lock_init(&priv->lock);
+
 	SET_NETDEV_DEV(dev, &pdev->dev);
 	dev_set_drvdata(&pdev->dev, dev);
 	ether_addr_copy(dev->dev_addr, macaddr);
@@ -3345,6 +3404,13 @@ static int bcmgenet_probe(struct platform_device *pdev)
 		priv->clk_eee = NULL;
 	}
 
+	/* If this is an internal GPHY, power it on now, before UniMAC is
+	 * brought out of reset as absolutely no UniMAC activity is allowed
+	 */
+	if (dn && !of_property_read_string(dn, "phy-mode", &phy_mode_str) &&
+	    !strcasecmp(phy_mode_str, "internal"))
+		bcmgenet_power_up(priv, GENET_POWER_PASSIVE);
+
 	err = reset_umac(priv);
 	if (err)
 		goto err_clk_disable;
@@ -3511,6 +3577,8 @@ static int bcmgenet_resume(struct device *d)
 	return 0;
 
 out_clk_disable:
+	if (priv->internal_phy)
+		bcmgenet_power_down(priv, GENET_POWER_PASSIVE);
 	clk_disable_unprepare(priv->clk);
 	return ret;
 }
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
index 1e2dc34..db7f289 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014 Broadcom Corporation
+ * Copyright (c) 2014-2017 Broadcom
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
@@ -214,7 +214,9 @@ struct bcmgenet_mib_counters {
 #define  MDIO_REG_SHIFT			16
 #define  MDIO_REG_MASK			0x1F
 
-#define UMAC_RBUF_OVFL_CNT		0x61C
+#define UMAC_RBUF_OVFL_CNT_V1		0x61C
+#define RBUF_OVFL_CNT_V2		0x80
+#define RBUF_OVFL_CNT_V3PLUS		0x94
 
 #define UMAC_MPD_CTRL			0x620
 #define  MPD_EN				(1 << 0)
@@ -224,7 +226,9 @@ struct bcmgenet_mib_counters {
 
 #define UMAC_MPD_PW_MS			0x624
 #define UMAC_MPD_PW_LS			0x628
-#define UMAC_RBUF_ERR_CNT		0x634
+#define UMAC_RBUF_ERR_CNT_V1		0x634
+#define RBUF_ERR_CNT_V2			0x84
+#define RBUF_ERR_CNT_V3PLUS		0x98
 #define UMAC_MDF_ERR_CNT		0x638
 #define UMAC_MDF_CTRL			0x650
 #define UMAC_MDF_ADDR			0x654
@@ -619,11 +623,13 @@ struct bcmgenet_priv {
 	struct work_struct bcmgenet_irq_work;
 	int irq0;
 	int irq1;
-	unsigned int irq0_stat;
-	unsigned int irq1_stat;
 	int wol_irq;
 	bool wol_irq_disabled;
 
+	/* shared status */
+	spinlock_t lock;
+	unsigned int irq0_stat;
+
 	/* HW descriptors/checksum variables */
 	bool desc_64b_en;
 	bool desc_rxchk_en;
diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.c b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
index 9e59663..0f68118 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_ioc.c
+++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
@@ -1930,13 +1930,13 @@ static void
 bfa_ioc_send_enable(struct bfa_ioc *ioc)
 {
 	struct bfi_ioc_ctrl_req enable_req;
-	struct timeval tv;
 
 	bfi_h2i_set(enable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_ENABLE_REQ,
 		    bfa_ioc_portid(ioc));
 	enable_req.clscode = htons(ioc->clscode);
-	do_gettimeofday(&tv);
-	enable_req.tv_sec = ntohl(tv.tv_sec);
+	enable_req.rsvd = htons(0);
+	/* overflow in 2106 */
+	enable_req.tv_sec = ntohl(ktime_get_real_seconds());
 	bfa_ioc_mbox_send(ioc, &enable_req, sizeof(struct bfi_ioc_ctrl_req));
 }
 
@@ -1947,6 +1947,10 @@ bfa_ioc_send_disable(struct bfa_ioc *ioc)
 
 	bfi_h2i_set(disable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_DISABLE_REQ,
 		    bfa_ioc_portid(ioc));
+	disable_req.clscode = htons(ioc->clscode);
+	disable_req.rsvd = htons(0);
+	/* overflow in 2106 */
+	disable_req.tv_sec = ntohl(ktime_get_real_seconds());
 	bfa_ioc_mbox_send(ioc, &disable_req, sizeof(struct bfi_ioc_ctrl_req));
 }
 
diff --git a/drivers/net/ethernet/brocade/bna/bnad_debugfs.c b/drivers/net/ethernet/brocade/bna/bnad_debugfs.c
index 05c1c1d..cebfe3b 100644
--- a/drivers/net/ethernet/brocade/bna/bnad_debugfs.c
+++ b/drivers/net/ethernet/brocade/bna/bnad_debugfs.c
@@ -325,7 +325,7 @@ bnad_debugfs_write_regrd(struct file *file, const char __user *buf,
 		return PTR_ERR(kern_buf);
 
 	rc = sscanf(kern_buf, "%x:%x", &addr, &len);
-	if (rc < 2) {
+	if (rc < 2 || len > UINT_MAX >> 2) {
 		netdev_warn(bnad->netdev, "failed to read user buffer\n");
 		kfree(kern_buf);
 		return -EINVAL;
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_xcv.c b/drivers/net/ethernet/cavium/thunder/thunder_xcv.c
index 67befed..578c7f8 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_xcv.c
+++ b/drivers/net/ethernet/cavium/thunder/thunder_xcv.c
@@ -116,8 +116,7 @@ void xcv_setup_link(bool link_up, int link_speed)
 	int speed = 2;
 
 	if (!xcv) {
-		dev_err(&xcv->pdev->dev,
-			"XCV init not done, probe may have failed\n");
+		pr_err("XCV init not done, probe may have failed\n");
 		return;
 	}
 
diff --git a/drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.c b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.c
index 0f0de5b..d04a6c1 100644
--- a/drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.c
+++ b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.c
@@ -133,17 +133,15 @@ cxgb_find_route6(struct cxgb4_lld_info *lldi,
 		if (ipv6_addr_type(&fl6.daddr) & IPV6_ADDR_LINKLOCAL)
 			fl6.flowi6_oif = sin6_scope_id;
 		dst = ip6_route_output(&init_net, NULL, &fl6);
-		if (!dst)
-			goto out;
-		if (!cxgb_our_interface(lldi, get_real_dev,
-					ip6_dst_idev(dst)->dev) &&
-		    !(ip6_dst_idev(dst)->dev->flags & IFF_LOOPBACK)) {
+		if (dst->error ||
+		    (!cxgb_our_interface(lldi, get_real_dev,
+					 ip6_dst_idev(dst)->dev) &&
+		     !(ip6_dst_idev(dst)->dev->flags & IFF_LOOPBACK))) {
 			dst_release(dst);
-			dst = NULL;
+			return NULL;
 		}
 	}
 
-out:
 	return dst;
 }
 EXPORT_SYMBOL(cxgb_find_route6);
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 5626908..1644896 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -275,8 +275,7 @@ static int be_dev_mac_add(struct be_adapter *adapter, u8 *mac)
 
 	/* Check if mac has already been added as part of uc-list */
 	for (i = 0; i < adapter->uc_macs; i++) {
-		if (ether_addr_equal((u8 *)&adapter->uc_list[i * ETH_ALEN],
-				     mac)) {
+		if (ether_addr_equal(adapter->uc_list[i].mac, mac)) {
 			/* mac already added, skip addition */
 			adapter->pmac_id[0] = adapter->pmac_id[i + 1];
 			return 0;
@@ -363,8 +362,10 @@ static int be_mac_addr_set(struct net_device *netdev, void *p)
 		status = -EPERM;
 		goto err;
 	}
-done:
+
+	/* Remember currently programmed MAC */
 	ether_addr_copy(adapter->dev_mac, addr->sa_data);
+done:
 	ether_addr_copy(netdev->dev_addr, addr->sa_data);
 	dev_info(dev, "MAC address changed to %pM\n", addr->sa_data);
 	return 0;
@@ -1679,14 +1680,12 @@ static void be_clear_mc_list(struct be_adapter *adapter)
 
 static int be_uc_mac_add(struct be_adapter *adapter, int uc_idx)
 {
-	if (ether_addr_equal((u8 *)&adapter->uc_list[uc_idx * ETH_ALEN],
-			     adapter->dev_mac)) {
+	if (ether_addr_equal(adapter->uc_list[uc_idx].mac, adapter->dev_mac)) {
 		adapter->pmac_id[uc_idx + 1] = adapter->pmac_id[0];
 		return 0;
 	}
 
-	return be_cmd_pmac_add(adapter,
-			       (u8 *)&adapter->uc_list[uc_idx * ETH_ALEN],
+	return be_cmd_pmac_add(adapter, adapter->uc_list[uc_idx].mac,
 			       adapter->if_handle,
 			       &adapter->pmac_id[uc_idx + 1], 0);
 }
@@ -1722,9 +1721,8 @@ static void be_set_uc_list(struct be_adapter *adapter)
 	}
 
 	if (adapter->update_uc_list) {
-		i = 1; /* First slot is claimed by the Primary MAC */
-
 		/* cache the uc-list in adapter array */
+		i = 0;
 		netdev_for_each_uc_addr(ha, netdev) {
 			ether_addr_copy(adapter->uc_list[i].mac, ha->addr);
 			i++;
@@ -3639,8 +3637,10 @@ static void be_disable_if_filters(struct be_adapter *adapter)
 {
 	/* Don't delete MAC on BE3 VFs without FILTMGMT privilege  */
 	if (!BEx_chip(adapter) || !be_virtfn(adapter) ||
-	    check_privilege(adapter, BE_PRIV_FILTMGMT))
+	    check_privilege(adapter, BE_PRIV_FILTMGMT)) {
 		be_dev_mac_del(adapter, adapter->pmac_id[0]);
+		eth_zero_addr(adapter->dev_mac);
+	}
 
 	be_clear_uc_list(adapter);
 	be_clear_mc_list(adapter);
@@ -3794,12 +3794,27 @@ static int be_enable_if_filters(struct be_adapter *adapter)
 	if (status)
 		return status;
 
-	/* Don't add MAC on BE3 VFs without FILTMGMT privilege */
-	if (!BEx_chip(adapter) || !be_virtfn(adapter) ||
-	    check_privilege(adapter, BE_PRIV_FILTMGMT)) {
+	/* Normally this condition usually true as the ->dev_mac is zeroed.
+	 * But on BE3 VFs the initial MAC is pre-programmed by PF and
+	 * subsequent be_dev_mac_add() can fail (after fresh boot)
+	 */
+	if (!ether_addr_equal(adapter->dev_mac, adapter->netdev->dev_addr)) {
+		int old_pmac_id = -1;
+
+		/* Remember old programmed MAC if any - can happen on BE3 VF */
+		if (!is_zero_ether_addr(adapter->dev_mac))
+			old_pmac_id = adapter->pmac_id[0];
+
 		status = be_dev_mac_add(adapter, adapter->netdev->dev_addr);
 		if (status)
 			return status;
+
+		/* Delete the old programmed MAC as we successfully programmed
+		 * a new MAC
+		 */
+		if (old_pmac_id >= 0 && old_pmac_id != adapter->pmac_id[0])
+			be_dev_mac_del(adapter, old_pmac_id);
+
 		ether_addr_copy(adapter->dev_mac, adapter->netdev->dev_addr);
 	}
 
@@ -4573,6 +4588,10 @@ static int be_mac_setup(struct be_adapter *adapter)
 
 		memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
 		memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
+
+		/* Initial MAC for BE3 VFs is already programmed by PF */
+		if (BEx_chip(adapter) && be_virtfn(adapter))
+			memcpy(adapter->dev_mac, mac, ETH_ALEN);
 	}
 
 	return 0;
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 12aef1b..849b871 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -2923,6 +2923,7 @@ static void set_multicast_list(struct net_device *ndev)
 	struct netdev_hw_addr *ha;
 	unsigned int i, bit, data, crc, tmp;
 	unsigned char hash;
+	unsigned int hash_high = 0, hash_low = 0;
 
 	if (ndev->flags & IFF_PROMISC) {
 		tmp = readl(fep->hwp + FEC_R_CNTRL);
@@ -2945,11 +2946,7 @@ static void set_multicast_list(struct net_device *ndev)
 		return;
 	}
 
-	/* Clear filter and add the addresses in hash register
-	 */
-	writel(0, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
-	writel(0, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
-
+	/* Add the addresses in hash register */
 	netdev_for_each_mc_addr(ha, ndev) {
 		/* calculate crc32 value of mac address */
 		crc = 0xffffffff;
@@ -2967,16 +2964,14 @@ static void set_multicast_list(struct net_device *ndev)
 		 */
 		hash = (crc >> (32 - FEC_HASH_BITS)) & 0x3f;
 
-		if (hash > 31) {
-			tmp = readl(fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
-			tmp |= 1 << (hash - 32);
-			writel(tmp, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
-		} else {
-			tmp = readl(fep->hwp + FEC_GRP_HASH_TABLE_LOW);
-			tmp |= 1 << hash;
-			writel(tmp, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
-		}
+		if (hash > 31)
+			hash_high |= 1 << (hash - 32);
+		else
+			hash_low |= 1 << hash;
 	}
+
+	writel(hash_high, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
+	writel(hash_low, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
 }
 
 /* Set a MAC change in hardware. */
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index b8778e7..7c6c146 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -404,7 +404,7 @@ static int ibmvnic_open(struct net_device *netdev)
 	send_map_query(adapter);
 	for (i = 0; i < rxadd_subcrqs; i++) {
 		init_rx_pool(adapter, &adapter->rx_pool[i],
-			     IBMVNIC_BUFFS_PER_POOL, i,
+			     adapter->req_rx_add_entries_per_subcrq, i,
 			     be64_to_cpu(size_array[i]), 1);
 		if (alloc_rx_pool(adapter, &adapter->rx_pool[i])) {
 			dev_err(dev, "Couldn't alloc rx pool\n");
@@ -419,23 +419,23 @@ static int ibmvnic_open(struct net_device *netdev)
 	for (i = 0; i < tx_subcrqs; i++) {
 		tx_pool = &adapter->tx_pool[i];
 		tx_pool->tx_buff =
-		    kcalloc(adapter->max_tx_entries_per_subcrq,
+		    kcalloc(adapter->req_tx_entries_per_subcrq,
 			    sizeof(struct ibmvnic_tx_buff), GFP_KERNEL);
 		if (!tx_pool->tx_buff)
 			goto tx_pool_alloc_failed;
 
 		if (alloc_long_term_buff(adapter, &tx_pool->long_term_buff,
-					 adapter->max_tx_entries_per_subcrq *
+					 adapter->req_tx_entries_per_subcrq *
 					 adapter->req_mtu))
 			goto tx_ltb_alloc_failed;
 
 		tx_pool->free_map =
-		    kcalloc(adapter->max_tx_entries_per_subcrq,
+		    kcalloc(adapter->req_tx_entries_per_subcrq,
 			    sizeof(int), GFP_KERNEL);
 		if (!tx_pool->free_map)
 			goto tx_fm_alloc_failed;
 
-		for (j = 0; j < adapter->max_tx_entries_per_subcrq; j++)
+		for (j = 0; j < adapter->req_tx_entries_per_subcrq; j++)
 			tx_pool->free_map[j] = j;
 
 		tx_pool->consumer_index = 0;
@@ -705,6 +705,7 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
 	u8 *hdrs = (u8 *)&adapter->tx_rx_desc_req;
 	struct device *dev = &adapter->vdev->dev;
 	struct ibmvnic_tx_buff *tx_buff = NULL;
+	struct ibmvnic_sub_crq_queue *tx_scrq;
 	struct ibmvnic_tx_pool *tx_pool;
 	unsigned int tx_send_failed = 0;
 	unsigned int tx_map_failed = 0;
@@ -724,6 +725,7 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
 	int ret = 0;
 
 	tx_pool = &adapter->tx_pool[queue_num];
+	tx_scrq = adapter->tx_scrq[queue_num];
 	txq = netdev_get_tx_queue(netdev, skb_get_queue_mapping(skb));
 	handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
 				   be32_to_cpu(adapter->login_rsp_buf->
@@ -744,7 +746,7 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
 
 	tx_pool->consumer_index =
 	    (tx_pool->consumer_index + 1) %
-		adapter->max_tx_entries_per_subcrq;
+		adapter->req_tx_entries_per_subcrq;
 
 	tx_buff = &tx_pool->tx_buff[index];
 	tx_buff->skb = skb;
@@ -817,7 +819,7 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
 
 		if (tx_pool->consumer_index == 0)
 			tx_pool->consumer_index =
-				adapter->max_tx_entries_per_subcrq - 1;
+				adapter->req_tx_entries_per_subcrq - 1;
 		else
 			tx_pool->consumer_index--;
 
@@ -826,6 +828,14 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
 		ret = NETDEV_TX_BUSY;
 		goto out;
 	}
+
+	atomic_inc(&tx_scrq->used);
+
+	if (atomic_read(&tx_scrq->used) >= adapter->req_tx_entries_per_subcrq) {
+		netdev_info(netdev, "Stopping queue %d\n", queue_num);
+		netif_stop_subqueue(netdev, queue_num);
+	}
+
 	tx_packets++;
 	tx_bytes += skb->len;
 	txq->trans_start = jiffies;
@@ -1220,6 +1230,7 @@ static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter
 	scrq->adapter = adapter;
 	scrq->size = 4 * PAGE_SIZE / sizeof(*scrq->msgs);
 	scrq->cur = 0;
+	atomic_set(&scrq->used, 0);
 	scrq->rx_skb_top = NULL;
 	spin_lock_init(&scrq->lock);
 
@@ -1368,14 +1379,28 @@ static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter,
 						 DMA_TO_DEVICE);
 			}
 
-			if (txbuff->last_frag)
+			if (txbuff->last_frag) {
+				atomic_dec(&scrq->used);
+
+				if (atomic_read(&scrq->used) <=
+				    (adapter->req_tx_entries_per_subcrq / 2) &&
+				    netif_subqueue_stopped(adapter->netdev,
+							   txbuff->skb)) {
+					netif_wake_subqueue(adapter->netdev,
+							    scrq->pool_index);
+					netdev_dbg(adapter->netdev,
+						   "Started queue %d\n",
+						   scrq->pool_index);
+				}
+
 				dev_kfree_skb_any(txbuff->skb);
+			}
 
 			adapter->tx_pool[pool].free_map[adapter->tx_pool[pool].
 						     producer_index] = index;
 			adapter->tx_pool[pool].producer_index =
 			    (adapter->tx_pool[pool].producer_index + 1) %
-			    adapter->max_tx_entries_per_subcrq;
+			    adapter->req_tx_entries_per_subcrq;
 		}
 		/* remove tx_comp scrq*/
 		next->tx_comp.first = 0;
diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h
index dd775d9..892eda3 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.h
+++ b/drivers/net/ethernet/ibm/ibmvnic.h
@@ -863,6 +863,7 @@ struct ibmvnic_sub_crq_queue {
 	spinlock_t lock;
 	struct sk_buff *rx_skb_top;
 	struct ibmvnic_adapter *adapter;
+	atomic_t used;
 };
 
 struct ibmvnic_long_term_buff {
diff --git a/drivers/net/ethernet/intel/e1000e/defines.h b/drivers/net/ethernet/intel/e1000e/defines.h
index 0641c00..afb7ebe 100644
--- a/drivers/net/ethernet/intel/e1000e/defines.h
+++ b/drivers/net/ethernet/intel/e1000e/defines.h
@@ -398,6 +398,7 @@
 #define E1000_ICR_LSC           0x00000004 /* Link Status Change */
 #define E1000_ICR_RXSEQ         0x00000008 /* Rx sequence error */
 #define E1000_ICR_RXDMT0        0x00000010 /* Rx desc min. threshold (0) */
+#define E1000_ICR_RXO           0x00000040 /* Receiver Overrun */
 #define E1000_ICR_RXT0          0x00000080 /* Rx timer intr (ring 0) */
 #define E1000_ICR_ECCER         0x00400000 /* Uncorrectable ECC Error */
 /* If this bit asserted, the driver should claim the interrupt */
diff --git a/drivers/net/ethernet/intel/e1000e/mac.c b/drivers/net/ethernet/intel/e1000e/mac.c
index b322011..f457c57 100644
--- a/drivers/net/ethernet/intel/e1000e/mac.c
+++ b/drivers/net/ethernet/intel/e1000e/mac.c
@@ -410,6 +410,9 @@ void e1000e_clear_hw_cntrs_base(struct e1000_hw *hw)
  *  Checks to see of the link status of the hardware has changed.  If a
  *  change in link status has been detected, then we read the PHY registers
  *  to get the current speed/duplex if link exists.
+ *
+ *  Returns a negative error code (-E1000_ERR_*) or 0 (link down) or 1 (link
+ *  up).
  **/
 s32 e1000e_check_for_copper_link(struct e1000_hw *hw)
 {
@@ -423,7 +426,7 @@ s32 e1000e_check_for_copper_link(struct e1000_hw *hw)
 	 * Change or Rx Sequence Error interrupt.
 	 */
 	if (!mac->get_link_status)
-		return 0;
+		return 1;
 
 	/* First we want to see if the MII Status Register reports
 	 * link.  If so, then we want to get the current speed/duplex
@@ -461,10 +464,12 @@ s32 e1000e_check_for_copper_link(struct e1000_hw *hw)
 	 * different link partner.
 	 */
 	ret_val = e1000e_config_fc_after_link_up(hw);
-	if (ret_val)
+	if (ret_val) {
 		e_dbg("Error configuring flow control\n");
+		return ret_val;
+	}
 
-	return ret_val;
+	return 1;
 }
 
 /**
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 7017281..0feddf3 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -1905,14 +1905,30 @@ static irqreturn_t e1000_msix_other(int __always_unused irq, void *data)
 	struct net_device *netdev = data;
 	struct e1000_adapter *adapter = netdev_priv(netdev);
 	struct e1000_hw *hw = &adapter->hw;
+	u32 icr;
+	bool enable = true;
 
-	hw->mac.get_link_status = true;
-
-	/* guard against interrupt when we're going down */
-	if (!test_bit(__E1000_DOWN, &adapter->state)) {
-		mod_timer(&adapter->watchdog_timer, jiffies + 1);
-		ew32(IMS, E1000_IMS_OTHER);
+	icr = er32(ICR);
+	if (icr & E1000_ICR_RXO) {
+		ew32(ICR, E1000_ICR_RXO);
+		enable = false;
+		/* napi poll will re-enable Other, make sure it runs */
+		if (napi_schedule_prep(&adapter->napi)) {
+			adapter->total_rx_bytes = 0;
+			adapter->total_rx_packets = 0;
+			__napi_schedule(&adapter->napi);
+		}
 	}
+	if (icr & E1000_ICR_LSC) {
+		ew32(ICR, E1000_ICR_LSC);
+		hw->mac.get_link_status = true;
+		/* guard against interrupt when we're going down */
+		if (!test_bit(__E1000_DOWN, &adapter->state))
+			mod_timer(&adapter->watchdog_timer, jiffies + 1);
+	}
+
+	if (enable && !test_bit(__E1000_DOWN, &adapter->state))
+		ew32(IMS, E1000_IMS_OTHER);
 
 	return IRQ_HANDLED;
 }
@@ -2683,7 +2699,8 @@ static int e1000e_poll(struct napi_struct *napi, int weight)
 		napi_complete_done(napi, work_done);
 		if (!test_bit(__E1000_DOWN, &adapter->state)) {
 			if (adapter->msix_entries)
-				ew32(IMS, adapter->rx_ring->ims_val);
+				ew32(IMS, adapter->rx_ring->ims_val |
+				     E1000_IMS_OTHER);
 			else
 				e1000_irq_enable(adapter);
 		}
@@ -4178,7 +4195,7 @@ static void e1000e_trigger_lsc(struct e1000_adapter *adapter)
 	struct e1000_hw *hw = &adapter->hw;
 
 	if (adapter->msix_entries)
-		ew32(ICS, E1000_ICS_OTHER);
+		ew32(ICS, E1000_ICS_LSC | E1000_ICS_OTHER);
 	else
 		ew32(ICS, E1000_ICS_LSC);
 }
@@ -5056,7 +5073,7 @@ static bool e1000e_has_link(struct e1000_adapter *adapter)
 	case e1000_media_type_copper:
 		if (hw->mac.get_link_status) {
 			ret_val = hw->mac.ops.check_for_link(hw);
-			link_active = !hw->mac.get_link_status;
+			link_active = ret_val > 0;
 		} else {
 			link_active = true;
 		}
@@ -5074,7 +5091,7 @@ static bool e1000e_has_link(struct e1000_adapter *adapter)
 		break;
 	}
 
-	if ((ret_val == E1000_ERR_PHY) && (hw->phy.type == e1000_phy_igp_3) &&
+	if ((ret_val == -E1000_ERR_PHY) && (hw->phy.type == e1000_phy_igp_3) &&
 	    (er32(CTRL) & E1000_PHY_CTRL_GBE_DISABLE)) {
 		/* See e1000_kmrn_lock_loss_workaround_ich8lan() */
 		e_info("Gigabit has been disabled, downgrading speed\n");
diff --git a/drivers/net/ethernet/intel/e1000e/phy.c b/drivers/net/ethernet/intel/e1000e/phy.c
index d78d47b..86ff096 100644
--- a/drivers/net/ethernet/intel/e1000e/phy.c
+++ b/drivers/net/ethernet/intel/e1000e/phy.c
@@ -1744,6 +1744,7 @@ s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations,
 	s32 ret_val = 0;
 	u16 i, phy_status;
 
+	*success = false;
 	for (i = 0; i < iterations; i++) {
 		/* Some PHYs require the MII_BMSR register to be read
 		 * twice due to the link bit being sticky.  No harm doing
@@ -1763,16 +1764,16 @@ s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations,
 		ret_val = e1e_rphy(hw, MII_BMSR, &phy_status);
 		if (ret_val)
 			break;
-		if (phy_status & BMSR_LSTATUS)
+		if (phy_status & BMSR_LSTATUS) {
+			*success = true;
 			break;
+		}
 		if (usec_interval >= 1000)
 			msleep(usec_interval / 1000);
 		else
 			udelay(usec_interval);
 	}
 
-	*success = (i < iterations);
-
 	return ret_val;
 }
 
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k.h b/drivers/net/ethernet/intel/fm10k/fm10k.h
index 4d19e46..3693ae1 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k.h
+++ b/drivers/net/ethernet/intel/fm10k/fm10k.h
@@ -508,8 +508,8 @@ s32 fm10k_iov_update_pvid(struct fm10k_intfc *interface, u16 glort, u16 pvid);
 int fm10k_ndo_set_vf_mac(struct net_device *netdev, int vf_idx, u8 *mac);
 int fm10k_ndo_set_vf_vlan(struct net_device *netdev,
 			  int vf_idx, u16 vid, u8 qos, __be16 vlan_proto);
-int fm10k_ndo_set_vf_bw(struct net_device *netdev, int vf_idx, int rate,
-			int unused);
+int fm10k_ndo_set_vf_bw(struct net_device *netdev, int vf_idx,
+			int __always_unused min_rate, int max_rate);
 int fm10k_ndo_get_vf_config(struct net_device *netdev,
 			    int vf_idx, struct ifla_vf_info *ivi);
 
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_iov.c b/drivers/net/ethernet/intel/fm10k/fm10k_iov.c
index 5f4dac0..e72fd52 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_iov.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_iov.c
@@ -126,6 +126,9 @@ s32 fm10k_iov_mbx(struct fm10k_intfc *interface)
 		struct fm10k_mbx_info *mbx = &vf_info->mbx;
 		u16 glort = vf_info->glort;
 
+		/* process the SM mailbox first to drain outgoing messages */
+		hw->mbx.ops.process(hw, &hw->mbx);
+
 		/* verify port mapping is valid, if not reset port */
 		if (vf_info->vf_flags && !fm10k_glort_valid_pf(hw, glort))
 			hw->iov.ops.reset_lport(hw, vf_info);
@@ -482,7 +485,7 @@ int fm10k_ndo_set_vf_vlan(struct net_device *netdev, int vf_idx, u16 vid,
 }
 
 int fm10k_ndo_set_vf_bw(struct net_device *netdev, int vf_idx,
-			int __always_unused unused, int rate)
+			int __always_unused min_rate, int max_rate)
 {
 	struct fm10k_intfc *interface = netdev_priv(netdev);
 	struct fm10k_iov_data *iov_data = interface->iov_data;
@@ -493,14 +496,15 @@ int fm10k_ndo_set_vf_bw(struct net_device *netdev, int vf_idx,
 		return -EINVAL;
 
 	/* rate limit cannot be less than 10Mbs or greater than link speed */
-	if (rate && ((rate < FM10K_VF_TC_MIN) || rate > FM10K_VF_TC_MAX))
+	if (max_rate &&
+	    (max_rate < FM10K_VF_TC_MIN || max_rate > FM10K_VF_TC_MAX))
 		return -EINVAL;
 
 	/* store values */
-	iov_data->vf_info[vf_idx].rate = rate;
+	iov_data->vf_info[vf_idx].rate = max_rate;
 
 	/* update hardware configuration */
-	hw->iov.ops.configure_tc(hw, vf_idx, rate);
+	hw->iov.ops.configure_tc(hw, vf_idx, max_rate);
 
 	return 0;
 }
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
index 5de9378..2aae6f8 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
@@ -1225,7 +1225,7 @@ static bool fm10k_clean_tx_irq(struct fm10k_q_vector *q_vector,
 			break;
 
 		/* prevent any other reads prior to eop_desc */
-		read_barrier_depends();
+		smp_rmb();
 
 		/* if DD is not set pending work has not been completed */
 		if (!(eop_desc->flags & FM10K_TXD_FLAG_DONE))
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 31c97e3..becffd1 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -3604,7 +3604,7 @@ static bool i40e_clean_fdir_tx_irq(struct i40e_ring *tx_ring, int budget)
 			break;
 
 		/* prevent any other reads prior to eop_desc */
-		read_barrier_depends();
+		smp_rmb();
 
 		/* if the descriptor isn't done, no work yet to do */
 		if (!(eop_desc->cmd_type_offset_bsz &
@@ -4217,8 +4217,12 @@ static void i40e_napi_enable_all(struct i40e_vsi *vsi)
 	if (!vsi->netdev)
 		return;
 
-	for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++)
-		napi_enable(&vsi->q_vectors[q_idx]->napi);
+	for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) {
+		struct i40e_q_vector *q_vector = vsi->q_vectors[q_idx];
+
+		if (q_vector->rx.ring || q_vector->tx.ring)
+			napi_enable(&q_vector->napi);
+	}
 }
 
 /**
@@ -4232,8 +4236,12 @@ static void i40e_napi_disable_all(struct i40e_vsi *vsi)
 	if (!vsi->netdev)
 		return;
 
-	for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++)
-		napi_disable(&vsi->q_vectors[q_idx]->napi);
+	for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) {
+		struct i40e_q_vector *q_vector = vsi->q_vectors[q_idx];
+
+		if (q_vector->rx.ring || q_vector->tx.ring)
+			napi_disable(&q_vector->napi);
+	}
 }
 
 /**
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 6287bf6..c543039 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -679,7 +679,7 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
 			break;
 
 		/* prevent any other reads prior to eop_desc */
-		read_barrier_depends();
+		smp_rmb();
 
 		/* we have caught up to head, no work left to do */
 		if (tx_head == tx_desc)
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
index 75f2a2c..c03800d 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
@@ -184,7 +184,7 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
 			break;
 
 		/* prevent any other reads prior to eop_desc */
-		read_barrier_depends();
+		smp_rmb();
 
 		/* we have caught up to head, no work left to do */
 		if (tx_head == tx_desc)
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index c6c2562..ca54f76 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -3102,6 +3102,8 @@ static int igb_sw_init(struct igb_adapter *adapter)
 	/* Setup and initialize a copy of the hw vlan table array */
 	adapter->shadow_vfta = kcalloc(E1000_VLAN_FILTER_TBL_SIZE, sizeof(u32),
 				       GFP_ATOMIC);
+	if (!adapter->shadow_vfta)
+		return -ENOMEM;
 
 	/* This call may decrease the number of queues */
 	if (igb_init_interrupt_scheme(adapter, true)) {
@@ -6660,7 +6662,7 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector, int napi_budget)
 			break;
 
 		/* prevent any other reads prior to eop_desc */
-		read_barrier_depends();
+		smp_rmb();
 
 		/* if DD is not set pending work has not been completed */
 		if (!(eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)))
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index 7dff7f6..5428e39 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -810,7 +810,7 @@ static bool igbvf_clean_tx_irq(struct igbvf_ring *tx_ring)
 			break;
 
 		/* prevent any other reads prior to eop_desc */
-		read_barrier_depends();
+		smp_rmb();
 
 		/* if DD is not set pending work has not been completed */
 		if (!(eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)))
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
index 77d3039..ad33622 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
@@ -3696,10 +3696,10 @@ s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
 	fw_cmd.ver_build = build;
 	fw_cmd.ver_sub = sub;
 	fw_cmd.hdr.checksum = 0;
-	fw_cmd.hdr.checksum = ixgbe_calculate_checksum((u8 *)&fw_cmd,
-				(FW_CEM_HDR_LEN + fw_cmd.hdr.buf_len));
 	fw_cmd.pad = 0;
 	fw_cmd.pad2 = 0;
+	fw_cmd.hdr.checksum = ixgbe_calculate_checksum((u8 *)&fw_cmd,
+				(FW_CEM_HDR_LEN + fw_cmd.hdr.buf_len));
 
 	for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) {
 		ret_val = ixgbe_host_interface_command(hw, &fw_cmd,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 334eb96..a5428b6 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -1171,7 +1171,7 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
 			break;
 
 		/* prevent any other reads prior to eop_desc */
-		read_barrier_depends();
+		smp_rmb();
 
 		/* if DD is not set pending work has not been completed */
 		if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
index 60f0bf7..77a60aa 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
@@ -617,6 +617,8 @@ static s32 ixgbe_read_ee_hostif_buffer_X550(struct ixgbe_hw *hw,
 		/* convert offset from words to bytes */
 		buffer.address = cpu_to_be32((offset + current_word) * 2);
 		buffer.length = cpu_to_be16(words_to_read * 2);
+		buffer.pad2 = 0;
+		buffer.pad3 = 0;
 
 		status = ixgbe_host_interface_command(hw, &buffer,
 						      sizeof(buffer),
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index cbf70fe..1499ce2b 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -325,7 +325,7 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector,
 			break;
 
 		/* prevent any other reads prior to eop_desc */
-		read_barrier_depends();
+		smp_rmb();
 
 		/* if DD is not set pending work has not been completed */
 		if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 6ea10a9..fa46326 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -1182,6 +1182,10 @@ static void mvneta_port_disable(struct mvneta_port *pp)
 	val &= ~MVNETA_GMAC0_PORT_ENABLE;
 	mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);
 
+	pp->link = 0;
+	pp->duplex = -1;
+	pp->speed = 0;
+
 	udelay(200);
 }
 
@@ -1905,9 +1909,9 @@ static int mvneta_rx_swbm(struct mvneta_port *pp, int rx_todo,
 
 		if (!mvneta_rxq_desc_is_first_last(rx_status) ||
 		    (rx_status & MVNETA_RXD_ERR_SUMMARY)) {
+			mvneta_rx_error(pp, rx_desc);
 err_drop_frame:
 			dev->stats.rx_errors++;
-			mvneta_rx_error(pp, rx_desc);
 			/* leave the descriptor untouched */
 			continue;
 		}
@@ -2922,7 +2926,7 @@ static void mvneta_cleanup_rxqs(struct mvneta_port *pp)
 {
 	int queue;
 
-	for (queue = 0; queue < txq_number; queue++)
+	for (queue = 0; queue < rxq_number; queue++)
 		mvneta_rxq_deinit(pp, &pp->rxqs[queue]);
 }
 
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index e36bebc..dae9dcf 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -2304,6 +2304,17 @@ static int sync_toggles(struct mlx4_dev *dev)
 		rd_toggle = swab32(readl(&priv->mfunc.comm->slave_read));
 		if (wr_toggle == 0xffffffff || rd_toggle == 0xffffffff) {
 			/* PCI might be offline */
+
+			/* If device removal has been requested,
+			 * do not continue retrying.
+			 */
+			if (dev->persist->interface_state &
+			    MLX4_INTERFACE_STATE_NOWAIT) {
+				mlx4_warn(dev,
+					  "communication channel is offline\n");
+				return -EIO;
+			}
+
 			msleep(100);
 			wr_toggle = swab32(readl(&priv->mfunc.comm->
 					   slave_write));
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_clock.c b/drivers/net/ethernet/mellanox/mlx4/en_clock.c
index d4d97ca..f9897d1 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_clock.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_clock.c
@@ -251,13 +251,9 @@ static u32 freq_to_shift(u16 freq)
 {
 	u32 freq_khz = freq * 1000;
 	u64 max_val_cycles = freq_khz * 1000 * MLX4_EN_WRAP_AROUND_SEC;
-	u64 tmp_rounded =
-		roundup_pow_of_two(max_val_cycles) > max_val_cycles ?
-		roundup_pow_of_two(max_val_cycles) - 1 : UINT_MAX;
-	u64 max_val_cycles_rounded = is_power_of_2(max_val_cycles + 1) ?
-		max_val_cycles : tmp_rounded;
+	u64 max_val_cycles_rounded = 1ULL << fls64(max_val_cycles - 1);
 	/* calculate max possible multiplier in order to fit in 64bit */
-	u64 max_mul = div_u64(0xffffffffffffffffULL, max_val_cycles_rounded);
+	u64 max_mul = div64_u64(ULLONG_MAX, max_val_cycles_rounded);
 
 	/* This comes from the reverse of clocksource_khz2mult */
 	return ilog2(div_u64(max_mul * freq_khz, 1000000));
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 727122d..5411ca4 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -1940,6 +1940,14 @@ static int mlx4_comm_check_offline(struct mlx4_dev *dev)
 			       (u32)(1 << COMM_CHAN_OFFLINE_OFFSET));
 		if (!offline_bit)
 			return 0;
+
+		/* If device removal has been requested,
+		 * do not continue retrying.
+		 */
+		if (dev->persist->interface_state &
+		    MLX4_INTERFACE_STATE_NOWAIT)
+			break;
+
 		/* There are cases as part of AER/Reset flow that PF needs
 		 * around 100 msec to load. We therefore sleep for 100 msec
 		 * to allow other tasks to make use of that CPU during this
@@ -3954,6 +3962,9 @@ static void mlx4_remove_one(struct pci_dev *pdev)
 	struct devlink *devlink = priv_to_devlink(priv);
 	int active_vfs = 0;
 
+	if (mlx4_is_slave(dev))
+		persist->interface_state |= MLX4_INTERFACE_STATE_NOWAIT;
+
 	mutex_lock(&persist->interface_state_mutex);
 	persist->interface_state |= MLX4_INTERFACE_STATE_DELETION;
 	mutex_unlock(&persist->interface_state_mutex);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 4de3c28..331a6ca 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -1015,7 +1015,7 @@ static struct mlx5_flow_group *create_autogroup(struct mlx5_flow_table *ft,
 						u32 *match_criteria)
 {
 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
-	struct list_head *prev = ft->node.children.prev;
+	struct list_head *prev = &ft->node.children;
 	unsigned int candidate_index = 0;
 	struct mlx5_flow_group *fg;
 	void *match_criteria_addr;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index b3309f2..981cd1d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -1283,6 +1283,7 @@ static int init_one(struct pci_dev *pdev,
 	if (err)
 		goto clean_load;
 
+	pci_save_state(pdev);
 	return 0;
 
 clean_load:
@@ -1331,9 +1332,8 @@ static pci_ers_result_t mlx5_pci_err_detected(struct pci_dev *pdev,
 
 	mlx5_enter_error_state(dev);
 	mlx5_unload_one(dev, priv, false);
-	/* In case of kernel call save the pci state and drain the health wq */
+	/* In case of kernel call drain the health wq */
 	if (state) {
-		pci_save_state(pdev);
 		mlx5_drain_health_wq(dev);
 		mlx5_pci_disable_device(dev);
 	}
@@ -1385,6 +1385,7 @@ static pci_ers_result_t mlx5_pci_slot_reset(struct pci_dev *pdev)
 
 	pci_set_master(pdev);
 	pci_restore_state(pdev);
+	pci_save_state(pdev);
 
 	if (wait_vital(pdev)) {
 		dev_err(&pdev->dev, "%s: wait_vital timed out\n", __func__);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
index 6460c72..a01e6c0 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -788,7 +788,7 @@ static inline void mlxsw_reg_spvid_pack(char *payload, u8 local_port, u16 pvid)
 #define MLXSW_REG_SPVM_ID 0x200F
 #define MLXSW_REG_SPVM_BASE_LEN 0x04 /* base length, without records */
 #define MLXSW_REG_SPVM_REC_LEN 0x04 /* record length */
-#define MLXSW_REG_SPVM_REC_MAX_COUNT 256
+#define MLXSW_REG_SPVM_REC_MAX_COUNT 255
 #define MLXSW_REG_SPVM_LEN (MLXSW_REG_SPVM_BASE_LEN +	\
 		    MLXSW_REG_SPVM_REC_LEN * MLXSW_REG_SPVM_REC_MAX_COUNT)
 
@@ -1757,7 +1757,7 @@ static inline void mlxsw_reg_sfmr_pack(char *payload,
 #define MLXSW_REG_SPVMLR_ID 0x2020
 #define MLXSW_REG_SPVMLR_BASE_LEN 0x04 /* base length, without records */
 #define MLXSW_REG_SPVMLR_REC_LEN 0x04 /* record length */
-#define MLXSW_REG_SPVMLR_REC_MAX_COUNT 256
+#define MLXSW_REG_SPVMLR_REC_MAX_COUNT 255
 #define MLXSW_REG_SPVMLR_LEN (MLXSW_REG_SPVMLR_BASE_LEN + \
 			      MLXSW_REG_SPVMLR_REC_LEN * \
 			      MLXSW_REG_SPVMLR_REC_MAX_COUNT)
diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c
index 4367dd6..0622fd0 100644
--- a/drivers/net/ethernet/moxa/moxart_ether.c
+++ b/drivers/net/ethernet/moxa/moxart_ether.c
@@ -25,6 +25,7 @@
 #include <linux/of_irq.h>
 #include <linux/crc32.h>
 #include <linux/crc32c.h>
+#include <linux/circ_buf.h>
 
 #include "moxart_ether.h"
 
@@ -278,6 +279,13 @@ static int moxart_rx_poll(struct napi_struct *napi, int budget)
 	return rx;
 }
 
+static int moxart_tx_queue_space(struct net_device *ndev)
+{
+	struct moxart_mac_priv_t *priv = netdev_priv(ndev);
+
+	return CIRC_SPACE(priv->tx_head, priv->tx_tail, TX_DESC_NUM);
+}
+
 static void moxart_tx_finished(struct net_device *ndev)
 {
 	struct moxart_mac_priv_t *priv = netdev_priv(ndev);
@@ -297,6 +305,9 @@ static void moxart_tx_finished(struct net_device *ndev)
 		tx_tail = TX_NEXT(tx_tail);
 	}
 	priv->tx_tail = tx_tail;
+	if (netif_queue_stopped(ndev) &&
+	    moxart_tx_queue_space(ndev) >= TX_WAKE_THRESHOLD)
+		netif_wake_queue(ndev);
 }
 
 static irqreturn_t moxart_mac_interrupt(int irq, void *dev_id)
@@ -324,13 +335,18 @@ static int moxart_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
 	struct moxart_mac_priv_t *priv = netdev_priv(ndev);
 	void *desc;
 	unsigned int len;
-	unsigned int tx_head = priv->tx_head;
+	unsigned int tx_head;
 	u32 txdes1;
 	int ret = NETDEV_TX_BUSY;
 
+	spin_lock_irq(&priv->txlock);
+
+	tx_head = priv->tx_head;
 	desc = priv->tx_desc_base + (TX_REG_DESC_SIZE * tx_head);
 
-	spin_lock_irq(&priv->txlock);
+	if (moxart_tx_queue_space(ndev) == 1)
+		netif_stop_queue(ndev);
+
 	if (moxart_desc_read(desc + TX_REG_OFFSET_DESC0) & TX_DESC0_DMA_OWN) {
 		net_dbg_ratelimited("no TX space for packet\n");
 		priv->stats.tx_dropped++;
diff --git a/drivers/net/ethernet/moxa/moxart_ether.h b/drivers/net/ethernet/moxa/moxart_ether.h
index 93a9563..afc32ec 100644
--- a/drivers/net/ethernet/moxa/moxart_ether.h
+++ b/drivers/net/ethernet/moxa/moxart_ether.h
@@ -59,6 +59,7 @@
 #define TX_NEXT(N)		(((N) + 1) & (TX_DESC_NUM_MASK))
 #define TX_BUF_SIZE		1600
 #define TX_BUF_SIZE_MAX		(TX_DESC1_BUF_SIZE_MASK+1)
+#define TX_WAKE_THRESHOLD	16
 
 #define RX_DESC_NUM		64
 #define RX_DESC_NUM_MASK	(RX_DESC_NUM-1)
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
index 0c42c24..ed014bd 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
@@ -373,8 +373,9 @@ static void qed_cxt_set_proto_cid_count(struct qed_hwfn *p_hwfn,
 		u32 page_sz = p_mgr->clients[ILT_CLI_CDUC].p_size.val;
 		u32 cxt_size = CONN_CXT_SIZE(p_hwfn);
 		u32 elems_per_page = ILT_PAGE_IN_BYTES(page_sz) / cxt_size;
+		u32 align = elems_per_page * DQ_RANGE_ALIGN;
 
-		p_conn->cid_count = roundup(p_conn->cid_count, elems_per_page);
+		p_conn->cid_count = roundup(p_conn->cid_count, align);
 	}
 }
 
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
index 62ae55b..a3360cb 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
@@ -187,6 +187,8 @@ static void qed_ll2b_complete_rx_packet(struct qed_hwfn *p_hwfn,
 	/* If need to reuse or there's no replacement buffer, repost this */
 	if (rc)
 		goto out_post;
+	dma_unmap_single(&cdev->pdev->dev, buffer->phys_addr,
+			 cdev->ll2->rx_size, DMA_FROM_DEVICE);
 
 	skb = build_skb(buffer->data, 0);
 	if (!skb) {
@@ -441,7 +443,7 @@ qed_ll2_rxq_completion_gsi(struct qed_hwfn *p_hwfn,
 static int qed_ll2_rxq_completion_reg(struct qed_hwfn *p_hwfn,
 				      struct qed_ll2_info *p_ll2_conn,
 				      union core_rx_cqe_union *p_cqe,
-				      unsigned long lock_flags,
+				      unsigned long *p_lock_flags,
 				      bool b_last_cqe)
 {
 	struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
@@ -462,10 +464,10 @@ static int qed_ll2_rxq_completion_reg(struct qed_hwfn *p_hwfn,
 			  "Mismatch between active_descq and the LL2 Rx chain\n");
 	list_add_tail(&p_pkt->list_entry, &p_rx->free_descq);
 
-	spin_unlock_irqrestore(&p_rx->lock, lock_flags);
+	spin_unlock_irqrestore(&p_rx->lock, *p_lock_flags);
 	qed_ll2b_complete_rx_packet(p_hwfn, p_ll2_conn->my_id,
 				    p_pkt, &p_cqe->rx_cqe_fp, b_last_cqe);
-	spin_lock_irqsave(&p_rx->lock, lock_flags);
+	spin_lock_irqsave(&p_rx->lock, *p_lock_flags);
 
 	return 0;
 }
@@ -505,7 +507,8 @@ static int qed_ll2_rxq_completion(struct qed_hwfn *p_hwfn, void *cookie)
 			break;
 		case CORE_RX_CQE_TYPE_REGULAR:
 			rc = qed_ll2_rxq_completion_reg(p_hwfn, p_ll2_conn,
-							cqe, flags, b_last_cqe);
+							cqe, &flags,
+							b_last_cqe);
 			break;
 		default:
 			rc = -EIO;
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index 11623aa..10d3a9f 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -941,14 +941,10 @@ static int ravb_poll(struct napi_struct *napi, int budget)
 	/* Receive error message handling */
 	priv->rx_over_errors =  priv->stats[RAVB_BE].rx_over_errors;
 	priv->rx_over_errors += priv->stats[RAVB_NC].rx_over_errors;
-	if (priv->rx_over_errors != ndev->stats.rx_over_errors) {
+	if (priv->rx_over_errors != ndev->stats.rx_over_errors)
 		ndev->stats.rx_over_errors = priv->rx_over_errors;
-		netif_err(priv, rx_err, ndev, "Receive Descriptor Empty\n");
-	}
-	if (priv->rx_fifo_errors != ndev->stats.rx_fifo_errors) {
+	if (priv->rx_fifo_errors != ndev->stats.rx_fifo_errors)
 		ndev->stats.rx_fifo_errors = priv->rx_fifo_errors;
-		netif_err(priv, rx_err, ndev, "Receive FIFO Overflow\n");
-	}
 out:
 	return budget - quota;
 }
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index 1d85109..3d5d5d54 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -4967,7 +4967,7 @@ static int efx_ef10_set_mac_address(struct efx_nic *efx)
 		 * MCFW do not support VFs.
 		 */
 		rc = efx_ef10_vport_set_mac_address(efx);
-	} else {
+	} else if (rc) {
 		efx_mcdi_display_error(efx, MC_CMD_VADAPTOR_SET_MAC,
 				       sizeof(inbuf), NULL, 0, rc);
 	}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index ef6bff8..adf61a7 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -1795,6 +1795,7 @@ static int stmmac_open(struct net_device *dev)
 
 	priv->dma_buf_sz = STMMAC_ALIGN(buf_sz);
 	priv->rx_copybreak = STMMAC_RX_COPYBREAK;
+	priv->mss = 0;
 
 	ret = alloc_dma_desc_resources(priv);
 	if (ret < 0) {
diff --git a/drivers/net/fjes/fjes_main.c b/drivers/net/fjes/fjes_main.c
index e46b1eb..7ea8ead 100644
--- a/drivers/net/fjes/fjes_main.c
+++ b/drivers/net/fjes/fjes_main.c
@@ -1277,7 +1277,7 @@ static void fjes_netdev_setup(struct net_device *netdev)
 	fjes_set_ethtool_ops(netdev);
 	netdev->mtu = fjes_support_mtu[3];
 	netdev->flags |= IFF_BROADCAST;
-	netdev->features |= NETIF_F_HW_CSUM | NETIF_F_HW_VLAN_CTAG_FILTER;
+	netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
 }
 
 static void fjes_irq_watch_task(struct work_struct *work)
diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
index cebde07..cb206e5 100644
--- a/drivers/net/gtp.c
+++ b/drivers/net/gtp.c
@@ -69,7 +69,6 @@ struct gtp_dev {
 	struct socket		*sock0;
 	struct socket		*sock1u;
 
-	struct net		*net;
 	struct net_device	*dev;
 
 	unsigned int		hash_size;
@@ -316,7 +315,7 @@ static int gtp_encap_recv(struct sock *sk, struct sk_buff *skb)
 
 	netdev_dbg(gtp->dev, "encap_recv sk=%p\n", sk);
 
-	xnet = !net_eq(gtp->net, dev_net(gtp->dev));
+	xnet = !net_eq(sock_net(sk), dev_net(gtp->dev));
 
 	switch (udp_sk(sk)->encap_type) {
 	case UDP_ENCAP_GTP0:
@@ -612,7 +611,7 @@ static netdev_tx_t gtp_dev_xmit(struct sk_buff *skb, struct net_device *dev)
 				    pktinfo.fl4.saddr, pktinfo.fl4.daddr,
 				    pktinfo.iph->tos,
 				    ip4_dst_hoplimit(&pktinfo.rt->dst),
-				    htons(IP_DF),
+				    0,
 				    pktinfo.gtph_port, pktinfo.gtph_port,
 				    true, false);
 		break;
@@ -658,7 +657,7 @@ static void gtp_link_setup(struct net_device *dev)
 static int gtp_hashtable_new(struct gtp_dev *gtp, int hsize);
 static void gtp_hashtable_free(struct gtp_dev *gtp);
 static int gtp_encap_enable(struct net_device *dev, struct gtp_dev *gtp,
-			    int fd_gtp0, int fd_gtp1, struct net *src_net);
+			    int fd_gtp0, int fd_gtp1);
 
 static int gtp_newlink(struct net *src_net, struct net_device *dev,
 			struct nlattr *tb[], struct nlattr *data[])
@@ -675,7 +674,7 @@ static int gtp_newlink(struct net *src_net, struct net_device *dev,
 	fd0 = nla_get_u32(data[IFLA_GTP_FD0]);
 	fd1 = nla_get_u32(data[IFLA_GTP_FD1]);
 
-	err = gtp_encap_enable(dev, gtp, fd0, fd1, src_net);
+	err = gtp_encap_enable(dev, gtp, fd0, fd1);
 	if (err < 0)
 		goto out_err;
 
@@ -821,7 +820,7 @@ static void gtp_hashtable_free(struct gtp_dev *gtp)
 }
 
 static int gtp_encap_enable(struct net_device *dev, struct gtp_dev *gtp,
-			    int fd_gtp0, int fd_gtp1, struct net *src_net)
+			    int fd_gtp0, int fd_gtp1)
 {
 	struct udp_tunnel_sock_cfg tuncfg = {NULL};
 	struct socket *sock0, *sock1u;
@@ -858,7 +857,6 @@ static int gtp_encap_enable(struct net_device *dev, struct gtp_dev *gtp,
 
 	gtp->sock0 = sock0;
 	gtp->sock1u = sock1u;
-	gtp->net = src_net;
 
 	tuncfg.sk_user_data = gtp;
 	tuncfg.encap_rcv = gtp_encap_recv;
diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c
index b4e9907..980e385 100644
--- a/drivers/net/ipvlan/ipvlan_core.c
+++ b/drivers/net/ipvlan/ipvlan_core.c
@@ -404,7 +404,7 @@ static int ipvlan_process_v6_outbound(struct sk_buff *skb)
 	struct dst_entry *dst;
 	int err, ret = NET_XMIT_DROP;
 	struct flowi6 fl6 = {
-		.flowi6_iif = dev->ifindex,
+		.flowi6_oif = dev->ifindex,
 		.daddr = ip6h->daddr,
 		.saddr = ip6h->saddr,
 		.flowi6_flags = FLOWI_FLAG_ANYSRC,
diff --git a/drivers/net/irda/vlsi_ir.c b/drivers/net/irda/vlsi_ir.c
index a0849f4..c0192f9 100644
--- a/drivers/net/irda/vlsi_ir.c
+++ b/drivers/net/irda/vlsi_ir.c
@@ -418,8 +418,9 @@ static struct vlsi_ring *vlsi_alloc_ring(struct pci_dev *pdev, struct ring_descr
 		memset(rd, 0, sizeof(*rd));
 		rd->hw = hwmap + i;
 		rd->buf = kmalloc(len, GFP_KERNEL|GFP_DMA);
-		if (rd->buf == NULL ||
-		    !(busaddr = pci_map_single(pdev, rd->buf, len, dir))) {
+		if (rd->buf)
+			busaddr = pci_map_single(pdev, rd->buf, len, dir);
+		if (rd->buf == NULL || pci_dma_mapping_error(pdev, busaddr)) {
 			if (rd->buf) {
 				net_err_ratelimited("%s: failed to create PCI-MAP for %p\n",
 						    __func__, rd->buf);
@@ -430,8 +431,7 @@ static struct vlsi_ring *vlsi_alloc_ring(struct pci_dev *pdev, struct ring_descr
 				rd = r->rd + j;
 				busaddr = rd_get_addr(rd);
 				rd_set_addr_status(rd, 0, 0);
-				if (busaddr)
-					pci_unmap_single(pdev, busaddr, len, dir);
+				pci_unmap_single(pdev, busaddr, len, dir);
 				kfree(rd->buf);
 				rd->buf = NULL;
 			}
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index dc8ccac..6d55049 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -452,7 +452,7 @@ static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb)
 					      struct macvlan_dev, list);
 	else
 		vlan = macvlan_hash_lookup(port, eth->h_dest);
-	if (vlan == NULL)
+	if (!vlan || vlan->mode == MACVLAN_MODE_SOURCE)
 		return RX_HANDLER_PASS;
 
 	dev = vlan->dev;
diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c
index a52b560..3603eec 100644
--- a/drivers/net/phy/at803x.c
+++ b/drivers/net/phy/at803x.c
@@ -166,7 +166,7 @@ static int at803x_set_wol(struct phy_device *phydev,
 		mac = (const u8 *) ndev->dev_addr;
 
 		if (!is_valid_ether_addr(mac))
-			return -EFAULT;
+			return -EINVAL;
 
 		for (i = 0; i < 3; i++) {
 			phy_write(phydev, AT803X_MMD_ACCESS_CONTROL,
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 2229188..fbf5945 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -1020,7 +1020,7 @@ static struct phy_driver ksphy_driver[] = {
 	.phy_id		= PHY_ID_KSZ8795,
 	.phy_id_mask	= MICREL_PHY_ID_MASK,
 	.name		= "Micrel KSZ8795",
-	.features	= (SUPPORTED_Pause | SUPPORTED_Asym_Pause),
+	.features	= PHY_BASIC_FEATURES,
 	.flags		= PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
 	.config_init	= kszphy_config_init,
 	.config_aneg	= ksz8873mll_config_aneg,
diff --git a/drivers/net/phy/spi_ks8995.c b/drivers/net/phy/spi_ks8995.c
index 93ffedf..1e2d4f1 100644
--- a/drivers/net/phy/spi_ks8995.c
+++ b/drivers/net/phy/spi_ks8995.c
@@ -491,13 +491,14 @@ static int ks8995_probe(struct spi_device *spi)
 	if (err)
 		return err;
 
-	ks->regs_attr.size = ks->chip->regs_size;
 	memcpy(&ks->regs_attr, &ks8995_registers_attr, sizeof(ks->regs_attr));
+	ks->regs_attr.size = ks->chip->regs_size;
 
 	err = ks8995_reset(ks);
 	if (err)
 		return err;
 
+	sysfs_attr_init(&ks->regs_attr.attr);
 	err = sysfs_create_bin_file(&spi->dev.kobj, &ks->regs_attr);
 	if (err) {
 		dev_err(&spi->dev, "unable to create sysfs file, err=%d\n",
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index 440d5f4..b883af9 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -958,6 +958,7 @@ static __net_exit void ppp_exit_net(struct net *net)
 	unregister_netdevice_many(&list);
 	rtnl_unlock();
 
+	mutex_destroy(&pn->all_ppp_mutex);
 	idr_destroy(&pn->units_idr);
 }
 
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 9cf11c8..105fbfb 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -74,9 +74,11 @@ static void qmi_wwan_netdev_setup(struct net_device *net)
 		net->hard_header_len = 0;
 		net->addr_len        = 0;
 		net->flags           = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
+		set_bit(EVENT_NO_IP_ALIGN, &dev->flags);
 		netdev_dbg(net, "mode: raw IP\n");
 	} else if (!net->header_ops) { /* don't bother if already set */
 		ether_setup(net);
+		clear_bit(EVENT_NO_IP_ALIGN, &dev->flags);
 		netdev_dbg(net, "mode: Ethernet\n");
 	}
 
@@ -580,6 +582,10 @@ static const struct usb_device_id products[] = {
 		USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_VENDOR_SPEC, 0x01, 0x69),
 		.driver_info        = (unsigned long)&qmi_wwan_info,
 	},
+	{	/* Motorola Mapphone devices with MDM6600 */
+		USB_VENDOR_AND_INTERFACE_INFO(0x22b8, USB_CLASS_VENDOR_SPEC, 0xfb, 0xff),
+		.driver_info        = (unsigned long)&qmi_wwan_info,
+	},
 
 	/* 2. Combined interface devices matching on class+protocol */
 	{	/* Huawei E367 and possibly others in "Windows mode" */
@@ -936,6 +942,7 @@ static const struct usb_device_id products[] = {
 	{QMI_FIXED_INTF(0x1e0e, 0x9001, 5)},	/* SIMCom 7230E */
 	{QMI_QUIRK_SET_DTR(0x2c7c, 0x0125, 4)},	/* Quectel EC25, EC20 R2.0  Mini PCIe */
 	{QMI_QUIRK_SET_DTR(0x2c7c, 0x0121, 4)},	/* Quectel EC21 Mini PCIe */
+	{QMI_FIXED_INTF(0x2c7c, 0x0296, 4)},	/* Quectel BG96 */
 
 	/* 4. Gobi 1000 devices */
 	{QMI_GOBI1K_DEVICE(0x05c6, 0x9212)},	/* Acer Gobi Modem Device */
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index afb953a..b2d7c7e 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -32,7 +32,7 @@
 #define NETNEXT_VERSION		"08"
 
 /* Information for net */
-#define NET_VERSION		"8"
+#define NET_VERSION		"9"
 
 #define DRIVER_VERSION		"v1." NETNEXT_VERSION "." NET_VERSION
 #define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>"
@@ -501,6 +501,8 @@ enum rtl_register_content {
 #define RTL8153_RMS		RTL8153_MAX_PACKET
 #define RTL8152_TX_TIMEOUT	(5 * HZ)
 #define RTL8152_NAPI_WEIGHT	64
+#define rx_reserved_size(x)	((x) + VLAN_ETH_HLEN + CRC_SIZE + \
+				 sizeof(struct rx_desc) + RX_ALIGN)
 
 /* rtl8152 flags */
 enum rtl8152_flags {
@@ -1292,6 +1294,7 @@ static void intr_callback(struct urb *urb)
 		}
 	} else {
 		if (netif_carrier_ok(tp->netdev)) {
+			netif_stop_queue(tp->netdev);
 			set_bit(RTL8152_LINK_CHG, &tp->flags);
 			schedule_delayed_work(&tp->schedule, 0);
 		}
@@ -1362,6 +1365,7 @@ static int alloc_all_mem(struct r8152 *tp)
 	spin_lock_init(&tp->rx_lock);
 	spin_lock_init(&tp->tx_lock);
 	INIT_LIST_HEAD(&tp->tx_free);
+	INIT_LIST_HEAD(&tp->rx_done);
 	skb_queue_head_init(&tp->tx_queue);
 	skb_queue_head_init(&tp->rx_queue);
 
@@ -2252,8 +2256,7 @@ static void r8153_set_rx_early_timeout(struct r8152 *tp)
 
 static void r8153_set_rx_early_size(struct r8152 *tp)
 {
-	u32 mtu = tp->netdev->mtu;
-	u32 ocp_data = (agg_buf_sz - mtu - VLAN_ETH_HLEN - VLAN_HLEN) / 8;
+	u32 ocp_data = (agg_buf_sz - rx_reserved_size(tp->netdev->mtu)) / 4;
 
 	ocp_write_word(tp, MCU_TYPE_USB, USB_RX_EARLY_SIZE, ocp_data);
 }
@@ -3165,6 +3168,9 @@ static void set_carrier(struct r8152 *tp)
 			napi_enable(&tp->napi);
 			netif_wake_queue(netdev);
 			netif_info(tp, link, netdev, "carrier on\n");
+		} else if (netif_queue_stopped(netdev) &&
+			   skb_queue_len(&tp->tx_queue) < tp->tx_qlen) {
+			netif_wake_queue(netdev);
 		}
 	} else {
 		if (netif_carrier_ok(netdev)) {
@@ -3698,8 +3704,18 @@ static int rtl8152_resume(struct usb_interface *intf)
 			tp->rtl_ops.autosuspend_en(tp, false);
 			napi_disable(&tp->napi);
 			set_bit(WORK_ENABLE, &tp->flags);
-			if (netif_carrier_ok(tp->netdev))
-				rtl_start_rx(tp);
+
+			if (netif_carrier_ok(tp->netdev)) {
+				if (rtl8152_get_speed(tp) & LINK_STATUS) {
+					rtl_start_rx(tp);
+				} else {
+					netif_carrier_off(tp->netdev);
+					tp->rtl_ops.disable(tp);
+					netif_info(tp, link, tp->netdev,
+						   "linking down\n");
+				}
+			}
+
 			napi_enable(&tp->napi);
 			clear_bit(SELECTIVE_SUSPEND, &tp->flags);
 			smp_mb__after_atomic();
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index d5071e3..4ab82b9 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -485,7 +485,10 @@ static int rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags)
 		return -ENOLINK;
 	}
 
-	skb = __netdev_alloc_skb_ip_align(dev->net, size, flags);
+	if (test_bit(EVENT_NO_IP_ALIGN, &dev->flags))
+		skb = __netdev_alloc_skb(dev->net, size, flags);
+	else
+		skb = __netdev_alloc_skb_ip_align(dev->net, size, flags);
 	if (!skb) {
 		netif_dbg(dev, rx_err, dev->net, "no rx skb\n");
 		usbnet_defer_kevent (dev, EVENT_RX_MEMORY);
diff --git a/drivers/net/wimax/i2400m/usb.c b/drivers/net/wimax/i2400m/usb.c
index e7f5910..f8eb66e 100644
--- a/drivers/net/wimax/i2400m/usb.c
+++ b/drivers/net/wimax/i2400m/usb.c
@@ -467,6 +467,9 @@ int i2400mu_probe(struct usb_interface *iface,
 	struct i2400mu *i2400mu;
 	struct usb_device *usb_dev = interface_to_usbdev(iface);
 
+	if (iface->cur_altsetting->desc.bNumEndpoints < 4)
+		return -ENODEV;
+
 	if (usb_dev->speed != USB_SPEED_HIGH)
 		dev_err(dev, "device not connected as high speed\n");
 
diff --git a/drivers/net/wireless/admtek/adm8211.c b/drivers/net/wireless/admtek/adm8211.c
index 70ecd82..098c814 100644
--- a/drivers/net/wireless/admtek/adm8211.c
+++ b/drivers/net/wireless/admtek/adm8211.c
@@ -413,6 +413,13 @@ static void adm8211_interrupt_rci(struct ieee80211_hw *dev)
 						       skb_tail_pointer(newskb),
 						       RX_PKT_SIZE,
 						       PCI_DMA_FROMDEVICE);
+				if (pci_dma_mapping_error(priv->pdev,
+					   priv->rx_buffers[entry].mapping)) {
+					priv->rx_buffers[entry].skb = NULL;
+					dev_kfree_skb(newskb);
+					skb = NULL;
+					/* TODO: update rx dropped stats */
+				}
 			} else {
 				skb = NULL;
 				/* TODO: update rx dropped stats */
@@ -1450,6 +1457,12 @@ static int adm8211_init_rings(struct ieee80211_hw *dev)
 						  skb_tail_pointer(rx_info->skb),
 						  RX_PKT_SIZE,
 						  PCI_DMA_FROMDEVICE);
+		if (pci_dma_mapping_error(priv->pdev, rx_info->mapping)) {
+			dev_kfree_skb(rx_info->skb);
+			rx_info->skb = NULL;
+			break;
+		}
+
 		desc->buffer1 = cpu_to_le32(rx_info->mapping);
 		desc->status = cpu_to_le32(RDES0_STATUS_OWN | RDES0_STATUS_SQL);
 	}
@@ -1613,7 +1626,7 @@ static void adm8211_calc_durations(int *dur, int *plcp, size_t payload_len, int
 }
 
 /* Transmit skb w/adm8211_tx_hdr (802.11 header created by hardware) */
-static void adm8211_tx_raw(struct ieee80211_hw *dev, struct sk_buff *skb,
+static int adm8211_tx_raw(struct ieee80211_hw *dev, struct sk_buff *skb,
 			   u16 plcp_signal,
 			   size_t hdrlen)
 {
@@ -1625,6 +1638,8 @@ static void adm8211_tx_raw(struct ieee80211_hw *dev, struct sk_buff *skb,
 
 	mapping = pci_map_single(priv->pdev, skb->data, skb->len,
 				 PCI_DMA_TODEVICE);
+	if (pci_dma_mapping_error(priv->pdev, mapping))
+		return -ENOMEM;
 
 	spin_lock_irqsave(&priv->lock, flags);
 
@@ -1657,6 +1672,8 @@ static void adm8211_tx_raw(struct ieee80211_hw *dev, struct sk_buff *skb,
 
 	/* Trigger transmit poll */
 	ADM8211_CSR_WRITE(TDR, 0);
+
+	return 0;
 }
 
 /* Put adm8211_tx_hdr on skb and transmit */
@@ -1710,7 +1727,10 @@ static void adm8211_tx(struct ieee80211_hw *dev,
 
 	txhdr->retry_limit = info->control.rates[0].count;
 
-	adm8211_tx_raw(dev, skb, plcp_signal, hdrlen);
+	if (adm8211_tx_raw(dev, skb, plcp_signal, hdrlen)) {
+		/* Drop packet */
+		ieee80211_free_txskb(dev, skb);
+	}
 }
 
 static int adm8211_alloc_rings(struct ieee80211_hw *dev)
@@ -1843,7 +1863,8 @@ static int adm8211_probe(struct pci_dev *pdev,
 	priv->rx_ring_size = rx_ring_size;
 	priv->tx_ring_size = tx_ring_size;
 
-	if (adm8211_alloc_rings(dev)) {
+	err = adm8211_alloc_rings(dev);
+	if (err) {
 		printk(KERN_ERR "%s (adm8211): Cannot allocate TX/RX ring\n",
 		       pci_name(pdev));
 		goto err_iounmap;
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
index 366d3dc..7b3017f 100644
--- a/drivers/net/wireless/ath/ath10k/core.c
+++ b/drivers/net/wireless/ath/ath10k/core.c
@@ -691,8 +691,11 @@ static int ath10k_core_get_board_id_from_otp(struct ath10k *ar)
 		   "boot get otp board id result 0x%08x board_id %d chip_id %d\n",
 		   result, board_id, chip_id);
 
-	if ((result & ATH10K_BMI_BOARD_ID_STATUS_MASK) != 0)
+	if ((result & ATH10K_BMI_BOARD_ID_STATUS_MASK) != 0 ||
+	    (board_id == 0)) {
+		ath10k_warn(ar, "board id is not exist in otp, ignore it\n");
 		return -EOPNOTSUPP;
+	}
 
 	ar->id.bmi_ids_valid = true;
 	ar->id.bmi_board_id = board_id;
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index 30e98af..17ab8ef 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -1224,6 +1224,36 @@ static int ath10k_monitor_recalc(struct ath10k *ar)
 		return ath10k_monitor_stop(ar);
 }
 
+static bool ath10k_mac_can_set_cts_prot(struct ath10k_vif *arvif)
+{
+	struct ath10k *ar = arvif->ar;
+
+	lockdep_assert_held(&ar->conf_mutex);
+
+	if (!arvif->is_started) {
+		ath10k_dbg(ar, ATH10K_DBG_MAC, "defer cts setup, vdev is not ready yet\n");
+		return false;
+	}
+
+	return true;
+}
+
+static int ath10k_mac_set_cts_prot(struct ath10k_vif *arvif)
+{
+	struct ath10k *ar = arvif->ar;
+	u32 vdev_param;
+
+	lockdep_assert_held(&ar->conf_mutex);
+
+	vdev_param = ar->wmi.vdev_param->protection_mode;
+
+	ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d cts_protection %d\n",
+		   arvif->vdev_id, arvif->use_cts_prot);
+
+	return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
+					 arvif->use_cts_prot ? 1 : 0);
+}
+
 static int ath10k_recalc_rtscts_prot(struct ath10k_vif *arvif)
 {
 	struct ath10k *ar = arvif->ar;
@@ -4668,7 +4698,8 @@ static int ath10k_mac_txpower_recalc(struct ath10k *ar)
 	lockdep_assert_held(&ar->conf_mutex);
 
 	list_for_each_entry(arvif, &ar->arvifs, list) {
-		WARN_ON(arvif->txpower < 0);
+		if (arvif->txpower <= 0)
+			continue;
 
 		if (txpower == -1)
 			txpower = arvif->txpower;
@@ -4676,8 +4707,8 @@ static int ath10k_mac_txpower_recalc(struct ath10k *ar)
 			txpower = min(txpower, arvif->txpower);
 	}
 
-	if (WARN_ON(txpower == -1))
-		return -EINVAL;
+	if (txpower == -1)
+		return 0;
 
 	ret = ath10k_mac_txpower_setup(ar, txpower);
 	if (ret) {
@@ -5321,20 +5352,18 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
 
 	if (changed & BSS_CHANGED_ERP_CTS_PROT) {
 		arvif->use_cts_prot = info->use_cts_prot;
-		ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d cts_prot %d\n",
-			   arvif->vdev_id, info->use_cts_prot);
 
 		ret = ath10k_recalc_rtscts_prot(arvif);
 		if (ret)
 			ath10k_warn(ar, "failed to recalculate rts/cts prot for vdev %d: %d\n",
 				    arvif->vdev_id, ret);
 
-		vdev_param = ar->wmi.vdev_param->protection_mode;
-		ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
-						info->use_cts_prot ? 1 : 0);
-		if (ret)
-			ath10k_warn(ar, "failed to set protection mode %d on vdev %i: %d\n",
-				    info->use_cts_prot, arvif->vdev_id, ret);
+		if (ath10k_mac_can_set_cts_prot(arvif)) {
+			ret = ath10k_mac_set_cts_prot(arvif);
+			if (ret)
+				ath10k_warn(ar, "failed to set cts protection for vdev %d: %d\n",
+					    arvif->vdev_id, ret);
+		}
 	}
 
 	if (changed & BSS_CHANGED_ERP_SLOT) {
@@ -7355,6 +7384,13 @@ ath10k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
 		arvif->is_up = true;
 	}
 
+	if (ath10k_mac_can_set_cts_prot(arvif)) {
+		ret = ath10k_mac_set_cts_prot(arvif);
+		if (ret)
+			ath10k_warn(ar, "failed to set cts protection for vdev %d: %d\n",
+				    arvif->vdev_id, ret);
+	}
+
 	mutex_unlock(&ar->conf_mutex);
 	return 0;
 
diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
index e64f593..0e4d49a 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c
+++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
@@ -1105,8 +1105,10 @@ static int ath10k_wmi_tlv_op_pull_fw_stats(struct ath10k *ar,
 		struct ath10k_fw_stats_pdev *dst;
 
 		src = data;
-		if (data_len < sizeof(*src))
+		if (data_len < sizeof(*src)) {
+			kfree(tb);
 			return -EPROTO;
+		}
 
 		data += sizeof(*src);
 		data_len -= sizeof(*src);
@@ -1126,8 +1128,10 @@ static int ath10k_wmi_tlv_op_pull_fw_stats(struct ath10k *ar,
 		struct ath10k_fw_stats_vdev *dst;
 
 		src = data;
-		if (data_len < sizeof(*src))
+		if (data_len < sizeof(*src)) {
+			kfree(tb);
 			return -EPROTO;
+		}
 
 		data += sizeof(*src);
 		data_len -= sizeof(*src);
@@ -1145,8 +1149,10 @@ static int ath10k_wmi_tlv_op_pull_fw_stats(struct ath10k *ar,
 		struct ath10k_fw_stats_peer *dst;
 
 		src = data;
-		if (data_len < sizeof(*src))
+		if (data_len < sizeof(*src)) {
+			kfree(tb);
 			return -EPROTO;
+		}
 
 		data += sizeof(*src);
 		data_len -= sizeof(*src);
diff --git a/drivers/net/wireless/ath/ath9k/tx99.c b/drivers/net/wireless/ath/ath9k/tx99.c
index 1fa7f84..8e9480c 100644
--- a/drivers/net/wireless/ath/ath9k/tx99.c
+++ b/drivers/net/wireless/ath/ath9k/tx99.c
@@ -179,6 +179,9 @@ static ssize_t write_file_tx99(struct file *file, const char __user *user_buf,
 	ssize_t len;
 	int r;
 
+	if (count < 1)
+		return -EINVAL;
+
 	if (sc->cur_chan->nvifs > 1)
 		return -EOPNOTSUPP;
 
@@ -186,6 +189,8 @@ static ssize_t write_file_tx99(struct file *file, const char __user *user_buf,
 	if (copy_from_user(buf, user_buf, len))
 		return -EFAULT;
 
+	buf[len] = '\0';
+
 	if (strtobool(buf, &start))
 		return -EINVAL;
 
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
index 8e3c6f4..edffe5a 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
@@ -4080,8 +4080,8 @@ static void brcmf_sdio_firmware_callback(struct device *dev, int err,
 	sdio_release_host(sdiodev->func[1]);
 fail:
 	brcmf_dbg(TRACE, "failed: dev=%s, err=%d\n", dev_name(dev), err);
-	device_release_driver(dev);
 	device_release_driver(&sdiodev->func[2]->dev);
+	device_release_driver(dev);
 }
 
 struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev)
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index 9789f3c..f1231c0 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -2320,7 +2320,7 @@ iwl_mvm_mac_release_buffered_frames(struct ieee80211_hw *hw,
 {
 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
 
-	/* Called when we need to transmit (a) frame(s) from agg queue */
+	/* Called when we need to transmit (a) frame(s) from agg or dqa queue */
 
 	iwl_mvm_sta_modify_sleep_tx_count(mvm, sta, reason, num_frames,
 					  tids, more_data, true);
@@ -2340,7 +2340,8 @@ static void iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw,
 	for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) {
 		struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
 
-		if (tid_data->state != IWL_AGG_ON &&
+		if (!iwl_mvm_is_dqa_supported(mvm) &&
+		    tid_data->state != IWL_AGG_ON &&
 		    tid_data->state != IWL_EMPTYING_HW_QUEUE_DELBA)
 			continue;
 
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
index e64aeb4..bdd1dee 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
@@ -3032,7 +3032,7 @@ void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,
 				       struct ieee80211_sta *sta,
 				       enum ieee80211_frame_release_type reason,
 				       u16 cnt, u16 tids, bool more_data,
-				       bool agg)
+				       bool single_sta_queue)
 {
 	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
 	struct iwl_mvm_add_sta_cmd cmd = {
@@ -3052,14 +3052,14 @@ void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,
 	for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT)
 		cmd.awake_acs |= BIT(tid_to_ucode_ac[tid]);
 
-	/* If we're releasing frames from aggregation queues then check if the
-	 * all queues combined that we're releasing frames from have
+	/* If we're releasing frames from aggregation or dqa queues then check
+	 * if all the queues that we're releasing frames from, combined, have:
 	 *  - more frames than the service period, in which case more_data
 	 *    needs to be set
 	 *  - fewer than 'cnt' frames, in which case we need to adjust the
 	 *    firmware command (but do that unconditionally)
 	 */
-	if (agg) {
+	if (single_sta_queue) {
 		int remaining = cnt;
 		int sleep_tx_count;
 
@@ -3069,7 +3069,8 @@ void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,
 			u16 n_queued;
 
 			tid_data = &mvmsta->tid_data[tid];
-			if (WARN(tid_data->state != IWL_AGG_ON &&
+			if (WARN(!iwl_mvm_is_dqa_supported(mvm) &&
+				 tid_data->state != IWL_AGG_ON &&
 				 tid_data->state != IWL_EMPTYING_HW_QUEUE_DELBA,
 				 "TID %d state is %d\n",
 				 tid, tid_data->state)) {
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
index e068d53..f65950e 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
@@ -545,7 +545,7 @@ void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,
 				       struct ieee80211_sta *sta,
 				       enum ieee80211_frame_release_type reason,
 				       u16 cnt, u16 tids, bool more_data,
-				       bool agg);
+				       bool single_sta_queue);
 int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
 		      bool drain);
 void iwl_mvm_sta_modify_disable_tx(struct iwl_mvm *mvm,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
index 092ae00..7465d4d 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
@@ -7,7 +7,7 @@
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
- * Copyright(c) 2016        Intel Deutschland GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -34,6 +34,7 @@
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -621,8 +622,10 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
 	 * values.
 	 * Note that we don't need to make sure it isn't agg'd, since we're
 	 * TXing non-sta
+	 * For DQA mode - we shouldn't increase it though
 	 */
-	atomic_inc(&mvm->pending_frames[sta_id]);
+	if (!iwl_mvm_is_dqa_supported(mvm))
+		atomic_inc(&mvm->pending_frames[sta_id]);
 
 	return 0;
 }
@@ -1009,11 +1012,8 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
 
 	spin_unlock(&mvmsta->lock);
 
-	/* Increase pending frames count if this isn't AMPDU */
-	if ((iwl_mvm_is_dqa_supported(mvm) &&
-	     mvmsta->tid_data[tx_cmd->tid_tspec].state != IWL_AGG_ON &&
-	     mvmsta->tid_data[tx_cmd->tid_tspec].state != IWL_AGG_STARTING) ||
-	    (!iwl_mvm_is_dqa_supported(mvm) && !is_ampdu))
+	/* Increase pending frames count if this isn't AMPDU or DQA queue */
+	if (!iwl_mvm_is_dqa_supported(mvm) && !is_ampdu)
 		atomic_inc(&mvm->pending_frames[mvmsta->sta_id]);
 
 	return 0;
@@ -1083,12 +1083,13 @@ static void iwl_mvm_check_ratid_empty(struct iwl_mvm *mvm,
 	lockdep_assert_held(&mvmsta->lock);
 
 	if ((tid_data->state == IWL_AGG_ON ||
-	     tid_data->state == IWL_EMPTYING_HW_QUEUE_DELBA) &&
+	     tid_data->state == IWL_EMPTYING_HW_QUEUE_DELBA ||
+	     iwl_mvm_is_dqa_supported(mvm)) &&
 	    iwl_mvm_tid_queued(tid_data) == 0) {
 		/*
-		 * Now that this aggregation queue is empty tell mac80211 so it
-		 * knows we no longer have frames buffered for the station on
-		 * this TID (for the TIM bitmap calculation.)
+		 * Now that this aggregation or DQA queue is empty tell
+		 * mac80211 so it knows we no longer have frames buffered for
+		 * the station on this TID (for the TIM bitmap calculation.)
 		 */
 		ieee80211_sta_set_buffered(sta, tid, false);
 	}
@@ -1261,7 +1262,6 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
 	u8 skb_freed = 0;
 	u16 next_reclaimed, seq_ctl;
 	bool is_ndp = false;
-	bool txq_agg = false; /* Is this TXQ aggregated */
 
 	__skb_queue_head_init(&skbs);
 
@@ -1287,6 +1287,10 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
 			info->flags |= IEEE80211_TX_STAT_ACK;
 			break;
 		case TX_STATUS_FAIL_DEST_PS:
+			/* In DQA, the FW should have stopped the queue and not
+			 * return this status
+			 */
+			WARN_ON(iwl_mvm_is_dqa_supported(mvm));
 			info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
 			break;
 		default:
@@ -1391,15 +1395,6 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
 			bool send_eosp_ndp = false;
 
 			spin_lock_bh(&mvmsta->lock);
-			if (iwl_mvm_is_dqa_supported(mvm)) {
-				enum iwl_mvm_agg_state state;
-
-				state = mvmsta->tid_data[tid].state;
-				txq_agg = (state == IWL_AGG_ON ||
-					state == IWL_EMPTYING_HW_QUEUE_DELBA);
-			} else {
-				txq_agg = txq_id >= mvm->first_agg_queue;
-			}
 
 			if (!is_ndp) {
 				tid_data->next_reclaimed = next_reclaimed;
@@ -1456,11 +1451,11 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
 	 * If the txq is not an AMPDU queue, there is no chance we freed
 	 * several skbs. Check that out...
 	 */
-	if (txq_agg)
+	if (iwl_mvm_is_dqa_supported(mvm) || txq_id >= mvm->first_agg_queue)
 		goto out;
 
 	/* We can't free more than one frame at once on a shared queue */
-	WARN_ON(!iwl_mvm_is_dqa_supported(mvm) && (skb_freed > 1));
+	WARN_ON(skb_freed > 1);
 
 	/* If we have still frames for this STA nothing to do here */
 	if (!atomic_sub_and_test(skb_freed, &mvm->pending_frames[sta_id]))
diff --git a/drivers/net/wireless/intersil/p54/main.c b/drivers/net/wireless/intersil/p54/main.c
index d5a3bf9..ab6d39e 100644
--- a/drivers/net/wireless/intersil/p54/main.c
+++ b/drivers/net/wireless/intersil/p54/main.c
@@ -852,12 +852,11 @@ void p54_unregister_common(struct ieee80211_hw *dev)
 {
 	struct p54_common *priv = dev->priv;
 
-#ifdef CONFIG_P54_LEDS
-	p54_unregister_leds(priv);
-#endif /* CONFIG_P54_LEDS */
-
 	if (priv->registered) {
 		priv->registered = false;
+#ifdef CONFIG_P54_LEDS
+		p54_unregister_leds(priv);
+#endif /* CONFIG_P54_LEDS */
 		ieee80211_unregister_hw(dev);
 	}
 
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index d2a28a9..4b462dc 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -3047,6 +3047,7 @@ static int hwsim_new_radio_nl(struct sk_buff *msg, struct genl_info *info)
 {
 	struct hwsim_new_radio_params param = { 0 };
 	const char *hwname = NULL;
+	int ret;
 
 	param.reg_strict = info->attrs[HWSIM_ATTR_REG_STRICT_REG];
 	param.p2p_device = info->attrs[HWSIM_ATTR_SUPPORT_P2P_DEVICE];
@@ -3086,7 +3087,9 @@ static int hwsim_new_radio_nl(struct sk_buff *msg, struct genl_info *info)
 		param.regd = hwsim_world_regdom_custom[idx];
 	}
 
-	return mac80211_hwsim_new_radio(info, &param);
+	ret = mac80211_hwsim_new_radio(info, &param);
+	kfree(hwname);
+	return ret;
 }
 
 static int hwsim_del_radio_nl(struct sk_buff *msg, struct genl_info *info)
diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.c b/drivers/net/wireless/marvell/mwifiex/sdio.c
index 8718950..8d601dc 100644
--- a/drivers/net/wireless/marvell/mwifiex/sdio.c
+++ b/drivers/net/wireless/marvell/mwifiex/sdio.c
@@ -2296,6 +2296,12 @@ static void mwifiex_recreate_adapter(struct sdio_mmc_card *card)
 	mmc_hw_reset(func->card->host);
 	sdio_release_host(func);
 
+	/* Previous save_adapter won't be valid after this. We will cancel
+	 * pending work requests.
+	 */
+	clear_bit(MWIFIEX_IFACE_WORK_DEVICE_DUMP, &iface_work_flags);
+	clear_bit(MWIFIEX_IFACE_WORK_CARD_RESET, &iface_work_flags);
+
 	mwifiex_sdio_probe(func, device_id);
 }
 
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
index bf3f0a3..9fc6f16 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
@@ -4707,8 +4707,8 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
 		rt2x00_set_field32(&reg, MAX_LEN_CFG_MAX_PSDU, 2);
 	else
 		rt2x00_set_field32(&reg, MAX_LEN_CFG_MAX_PSDU, 1);
-	rt2x00_set_field32(&reg, MAX_LEN_CFG_MIN_PSDU, 0);
-	rt2x00_set_field32(&reg, MAX_LEN_CFG_MIN_MPDU, 0);
+	rt2x00_set_field32(&reg, MAX_LEN_CFG_MIN_PSDU, 10);
+	rt2x00_set_field32(&reg, MAX_LEN_CFG_MIN_MPDU, 10);
 	rt2800_register_write(rt2x00dev, MAX_LEN_CFG, reg);
 
 	rt2800_register_read(rt2x00dev, LED_CFG, &reg);
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c b/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c
index 631df69..f57bb2c 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c
@@ -57,7 +57,7 @@ int rt2x00usb_vendor_request(struct rt2x00_dev *rt2x00dev,
 		if (status >= 0)
 			return 0;
 
-		if (status == -ENODEV) {
+		if (status == -ENODEV || status == -ENOENT) {
 			/* Device has disappeared. */
 			clear_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags);
 			break;
@@ -321,7 +321,7 @@ static bool rt2x00usb_kick_tx_entry(struct queue_entry *entry, void *data)
 
 	status = usb_submit_urb(entry_priv->urb, GFP_ATOMIC);
 	if (status) {
-		if (status == -ENODEV)
+		if (status == -ENODEV || status == -ENOENT)
 			clear_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags);
 		set_bit(ENTRY_DATA_IO_FAILED, &entry->flags);
 		rt2x00lib_dmadone(entry);
@@ -410,7 +410,7 @@ static bool rt2x00usb_kick_rx_entry(struct queue_entry *entry, void *data)
 
 	status = usb_submit_urb(entry_priv->urb, GFP_ATOMIC);
 	if (status) {
-		if (status == -ENODEV)
+		if (status == -ENODEV || status == -ENOENT)
 			clear_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags);
 		set_bit(ENTRY_DATA_IO_FAILED, &entry->flags);
 		rt2x00lib_dmadone(entry);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c
index b3f6a9e..27a0e50 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c
@@ -664,7 +664,7 @@ void rtl92ee_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished)
 	struct rtl_priv *rtlpriv = rtl_priv(hw);
 	struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
 	struct sk_buff *skb = NULL;
-
+	bool rtstatus;
 	u32 totalpacketlen;
 	u8 u1rsvdpageloc[5] = { 0 };
 	bool b_dlok = false;
@@ -727,7 +727,9 @@ void rtl92ee_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished)
 	memcpy((u8 *)skb_put(skb, totalpacketlen),
 	       &reserved_page_packet, totalpacketlen);
 
-	b_dlok = true;
+	rtstatus = rtl_cmd_send_packet(hw, skb);
+	if (rtstatus)
+		b_dlok = true;
 
 	if (b_dlok) {
 		RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD ,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
index 1281ebe..82d5389 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
@@ -1378,6 +1378,7 @@ static void _rtl8821ae_get_wakeup_reason(struct ieee80211_hw *hw)
 
 	ppsc->wakeup_reason = 0;
 
+	do_gettimeofday(&ts);
 	rtlhal->last_suspend_sec = ts.tv_sec;
 
 	switch (fw_reason) {
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
index cb7365b..5b1d2e8 100644
--- a/drivers/net/xen-netback/common.h
+++ b/drivers/net/xen-netback/common.h
@@ -113,10 +113,10 @@ struct xenvif_stats {
 	 * A subset of struct net_device_stats that contains only the
 	 * fields that are updated in netback.c for each queue.
 	 */
-	unsigned int rx_bytes;
-	unsigned int rx_packets;
-	unsigned int tx_bytes;
-	unsigned int tx_packets;
+	u64 rx_bytes;
+	u64 rx_packets;
+	u64 tx_bytes;
+	u64 tx_packets;
 
 	/* Additional stats used by xenvif */
 	unsigned long rx_gso_checksum_fixup;
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index 5bfaf55..618013e 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -225,10 +225,10 @@ static struct net_device_stats *xenvif_get_stats(struct net_device *dev)
 {
 	struct xenvif *vif = netdev_priv(dev);
 	struct xenvif_queue *queue = NULL;
-	unsigned long rx_bytes = 0;
-	unsigned long rx_packets = 0;
-	unsigned long tx_bytes = 0;
-	unsigned long tx_packets = 0;
+	u64 rx_bytes = 0;
+	u64 rx_packets = 0;
+	u64 tx_bytes = 0;
+	u64 tx_packets = 0;
 	unsigned int index;
 
 	spin_lock(&vif->lock);
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index cd442e4..8d498a9 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -1854,27 +1854,19 @@ static int talk_to_netback(struct xenbus_device *dev,
 		xennet_destroy_queues(info);
 
 	err = xennet_create_queues(info, &num_queues);
-	if (err < 0)
-		goto destroy_ring;
+	if (err < 0) {
+		xenbus_dev_fatal(dev, err, "creating queues");
+		kfree(info->queues);
+		info->queues = NULL;
+		goto out;
+	}
 
 	/* Create shared ring, alloc event channel -- for each queue */
 	for (i = 0; i < num_queues; ++i) {
 		queue = &info->queues[i];
 		err = setup_netfront(dev, queue, feature_split_evtchn);
-		if (err) {
-			/* setup_netfront() will tidy up the current
-			 * queue on error, but we need to clean up
-			 * those already allocated.
-			 */
-			if (i > 0) {
-				rtnl_lock();
-				netif_set_real_num_tx_queues(info->netdev, i);
-				rtnl_unlock();
-				goto destroy_ring;
-			} else {
-				goto out;
-			}
-		}
+		if (err)
+			goto destroy_ring;
 	}
 
 again:
@@ -1964,9 +1956,9 @@ static int talk_to_netback(struct xenbus_device *dev,
 	xenbus_transaction_end(xbt, 1);
  destroy_ring:
 	xennet_disconnect_backend(info);
-	kfree(info->queues);
-	info->queues = NULL;
+	xennet_destroy_queues(info);
  out:
+	device_unregister(&dev->dev);
 	return err;
 }
 
diff --git a/drivers/nvdimm/label.c b/drivers/nvdimm/label.c
index fac7cab..d8d189d 100644
--- a/drivers/nvdimm/label.c
+++ b/drivers/nvdimm/label.c
@@ -861,7 +861,7 @@ static int init_labels(struct nd_mapping *nd_mapping, int num_labels)
 	nsindex = to_namespace_index(ndd, 0);
 	memset(nsindex, 0, ndd->nsarea.config_size);
 	for (i = 0; i < 2; i++) {
-		int rc = nd_label_write_index(ndd, i, i*2, ND_NSINDEX_INIT);
+		int rc = nd_label_write_index(ndd, i, 3 - i, ND_NSINDEX_INIT);
 
 		if (rc)
 			return rc;
diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c
index a38ae34..b8fb1ef 100644
--- a/drivers/nvdimm/namespace_devs.c
+++ b/drivers/nvdimm/namespace_devs.c
@@ -1451,7 +1451,7 @@ static umode_t namespace_visible(struct kobject *kobj,
 	if (a == &dev_attr_resource.attr) {
 		if (is_namespace_blk(dev))
 			return 0;
-		return a->mode;
+		return 0400;
 	}
 
 	if (is_namespace_pmem(dev) || is_namespace_blk(dev)) {
diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c
index 78cb3e2..42abdd2 100644
--- a/drivers/nvdimm/pfn_devs.c
+++ b/drivers/nvdimm/pfn_devs.c
@@ -270,8 +270,16 @@ static struct attribute *nd_pfn_attributes[] = {
 	NULL,
 };
 
+static umode_t pfn_visible(struct kobject *kobj, struct attribute *a, int n)
+{
+	if (a == &dev_attr_resource.attr)
+		return 0400;
+	return a->mode;
+}
+
 struct attribute_group nd_pfn_attribute_group = {
 	.attrs = nd_pfn_attributes,
+	.is_visible = pfn_visible,
 };
 
 static const struct attribute_group *nd_pfn_attribute_groups[] = {
@@ -344,9 +352,9 @@ struct device *nd_pfn_create(struct nd_region *nd_region)
 int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig)
 {
 	u64 checksum, offset;
-	unsigned long align;
 	enum nd_pfn_mode mode;
 	struct nd_namespace_io *nsio;
+	unsigned long align, start_pad;
 	struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
 	struct nd_namespace_common *ndns = nd_pfn->ndns;
 	const u8 *parent_uuid = nd_dev_to_uuid(&ndns->dev);
@@ -390,6 +398,7 @@ int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig)
 
 	align = le32_to_cpu(pfn_sb->align);
 	offset = le64_to_cpu(pfn_sb->dataoff);
+	start_pad = le32_to_cpu(pfn_sb->start_pad);
 	if (align == 0)
 		align = 1UL << ilog2(offset);
 	mode = le32_to_cpu(pfn_sb->mode);
@@ -448,7 +457,7 @@ int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig)
 		return -EBUSY;
 	}
 
-	if ((align && !IS_ALIGNED(offset, align))
+	if ((align && !IS_ALIGNED(nsio->res.start + offset + start_pad, align))
 			|| !IS_ALIGNED(offset, PAGE_SIZE)) {
 		dev_err(&nd_pfn->dev,
 				"bad offset: %#llx dax disabled align: %#lx\n",
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index fbeca06..719ee5f 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -1619,7 +1619,8 @@ static struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid)
 	mutex_lock(&ctrl->namespaces_mutex);
 	list_for_each_entry(ns, &ctrl->namespaces, list) {
 		if (ns->ns_id == nsid) {
-			kref_get(&ns->kref);
+			if (!kref_get_unless_zero(&ns->kref))
+				continue;
 			ret = ns;
 			break;
 		}
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 8edafd8..5c52a61 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -84,7 +84,7 @@ enum nvme_quirks {
  * NVME_QUIRK_DELAY_BEFORE_CHK_RDY quirk enabled. The value (in ms) was
  * found empirically.
  */
-#define NVME_QUIRK_DELAY_AMOUNT		2000
+#define NVME_QUIRK_DELAY_AMOUNT		2300
 
 enum nvme_ctrl_state {
 	NVME_CTRL_NEW,
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 54ea90f..e48ecb9 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -2109,6 +2109,8 @@ static const struct pci_device_id nvme_id_table[] = {
 		.driver_data = NVME_QUIRK_IDENTIFY_CNS, },
 	{ PCI_DEVICE(0x1c58, 0x0003),	/* HGST adapter */
 		.driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
+	{ PCI_DEVICE(0x1c58, 0x0023),	/* WDC SN200 adapter */
+		.driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
 	{ PCI_DEVICE(0x1c5f, 0x0540),	/* Memblaze Pblaze4 adapter */
 		.driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
 	{ PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) },
diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c
index 6fe4c48..f791d46 100644
--- a/drivers/nvme/target/admin-cmd.c
+++ b/drivers/nvme/target/admin-cmd.c
@@ -381,7 +381,6 @@ static void nvmet_execute_set_features(struct nvmet_req *req)
 {
 	struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
 	u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10[0]);
-	u64 val;
 	u32 val32;
 	u16 status = 0;
 
@@ -391,8 +390,7 @@ static void nvmet_execute_set_features(struct nvmet_req *req)
 			(subsys->max_qid - 1) | ((subsys->max_qid - 1) << 16));
 		break;
 	case NVME_FEAT_KATO:
-		val = le64_to_cpu(req->cmd->prop_set.value);
-		val32 = val & 0xffff;
+		val32 = le32_to_cpu(req->cmd->common.cdw10[1]);
 		req->sq->ctrl->kato = DIV_ROUND_UP(val32, 1000);
 		nvmet_set_result(req, req->sq->ctrl->kato);
 		break;
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index 55ce769..c89d68a 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -422,6 +422,13 @@ void nvmet_sq_setup(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq,
 	ctrl->sqs[qid] = sq;
 }
 
+static void nvmet_confirm_sq(struct percpu_ref *ref)
+{
+	struct nvmet_sq *sq = container_of(ref, struct nvmet_sq, ref);
+
+	complete(&sq->confirm_done);
+}
+
 void nvmet_sq_destroy(struct nvmet_sq *sq)
 {
 	/*
@@ -430,7 +437,8 @@ void nvmet_sq_destroy(struct nvmet_sq *sq)
 	 */
 	if (sq->ctrl && sq->ctrl->sqs && sq->ctrl->sqs[0] == sq)
 		nvmet_async_events_free(sq->ctrl);
-	percpu_ref_kill(&sq->ref);
+	percpu_ref_kill_and_confirm(&sq->ref, nvmet_confirm_sq);
+	wait_for_completion(&sq->confirm_done);
 	wait_for_completion(&sq->free_done);
 	percpu_ref_exit(&sq->ref);
 
@@ -458,6 +466,7 @@ int nvmet_sq_init(struct nvmet_sq *sq)
 		return ret;
 	}
 	init_completion(&sq->free_done);
+	init_completion(&sq->confirm_done);
 
 	return 0;
 }
@@ -816,6 +825,9 @@ static void nvmet_ctrl_free(struct kref *ref)
 	list_del(&ctrl->subsys_entry);
 	mutex_unlock(&subsys->lock);
 
+	flush_work(&ctrl->async_event_work);
+	cancel_work_sync(&ctrl->fatal_err_work);
+
 	ida_simple_remove(&subsys->cntlid_ida, ctrl->cntlid);
 	nvmet_subsys_put(subsys);
 
diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
index d5df77d..e56ca3f 100644
--- a/drivers/nvme/target/loop.c
+++ b/drivers/nvme/target/loop.c
@@ -223,8 +223,6 @@ static void nvme_loop_submit_async_event(struct nvme_ctrl *arg, int aer_idx)
 static int nvme_loop_init_iod(struct nvme_loop_ctrl *ctrl,
 		struct nvme_loop_iod *iod, unsigned int queue_idx)
 {
-	BUG_ON(queue_idx >= ctrl->queue_count);
-
 	iod->req.cmd = &iod->cmd;
 	iod->req.rsp = &iod->rsp;
 	iod->queue = &ctrl->queues[queue_idx];
@@ -288,9 +286,9 @@ static struct blk_mq_ops nvme_loop_admin_mq_ops = {
 
 static void nvme_loop_destroy_admin_queue(struct nvme_loop_ctrl *ctrl)
 {
+	nvmet_sq_destroy(&ctrl->queues[0].nvme_sq);
 	blk_cleanup_queue(ctrl->ctrl.admin_q);
 	blk_mq_free_tag_set(&ctrl->admin_tag_set);
-	nvmet_sq_destroy(&ctrl->queues[0].nvme_sq);
 }
 
 static void nvme_loop_free_ctrl(struct nvme_ctrl *nctrl)
@@ -314,6 +312,43 @@ static void nvme_loop_free_ctrl(struct nvme_ctrl *nctrl)
 	kfree(ctrl);
 }
 
+static void nvme_loop_destroy_io_queues(struct nvme_loop_ctrl *ctrl)
+{
+	int i;
+
+	for (i = 1; i < ctrl->queue_count; i++)
+		nvmet_sq_destroy(&ctrl->queues[i].nvme_sq);
+}
+
+static int nvme_loop_init_io_queues(struct nvme_loop_ctrl *ctrl)
+{
+	struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
+	unsigned int nr_io_queues;
+	int ret, i;
+
+	nr_io_queues = min(opts->nr_io_queues, num_online_cpus());
+	ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues);
+	if (ret || !nr_io_queues)
+		return ret;
+
+	dev_info(ctrl->ctrl.device, "creating %d I/O queues.\n", nr_io_queues);
+
+	for (i = 1; i <= nr_io_queues; i++) {
+		ctrl->queues[i].ctrl = ctrl;
+		ret = nvmet_sq_init(&ctrl->queues[i].nvme_sq);
+		if (ret)
+			goto out_destroy_queues;
+
+		ctrl->queue_count++;
+	}
+
+	return 0;
+
+out_destroy_queues:
+	nvme_loop_destroy_io_queues(ctrl);
+	return ret;
+}
+
 static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl)
 {
 	int error;
@@ -385,17 +420,13 @@ static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl)
 
 static void nvme_loop_shutdown_ctrl(struct nvme_loop_ctrl *ctrl)
 {
-	int i;
-
 	nvme_stop_keep_alive(&ctrl->ctrl);
 
 	if (ctrl->queue_count > 1) {
 		nvme_stop_queues(&ctrl->ctrl);
 		blk_mq_tagset_busy_iter(&ctrl->tag_set,
 					nvme_cancel_request, &ctrl->ctrl);
-
-		for (i = 1; i < ctrl->queue_count; i++)
-			nvmet_sq_destroy(&ctrl->queues[i].nvme_sq);
+		nvme_loop_destroy_io_queues(ctrl);
 	}
 
 	if (ctrl->ctrl.state == NVME_CTRL_LIVE)
@@ -467,19 +498,14 @@ static void nvme_loop_reset_ctrl_work(struct work_struct *work)
 	if (ret)
 		goto out_disable;
 
-	for (i = 1; i <= ctrl->ctrl.opts->nr_io_queues; i++) {
-		ctrl->queues[i].ctrl = ctrl;
-		ret = nvmet_sq_init(&ctrl->queues[i].nvme_sq);
-		if (ret)
-			goto out_free_queues;
+	ret = nvme_loop_init_io_queues(ctrl);
+	if (ret)
+		goto out_destroy_admin;
 
-		ctrl->queue_count++;
-	}
-
-	for (i = 1; i <= ctrl->ctrl.opts->nr_io_queues; i++) {
+	for (i = 1; i < ctrl->queue_count; i++) {
 		ret = nvmf_connect_io_queue(&ctrl->ctrl, i);
 		if (ret)
-			goto out_free_queues;
+			goto out_destroy_io;
 	}
 
 	changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
@@ -492,9 +518,9 @@ static void nvme_loop_reset_ctrl_work(struct work_struct *work)
 
 	return;
 
-out_free_queues:
-	for (i = 1; i < ctrl->queue_count; i++)
-		nvmet_sq_destroy(&ctrl->queues[i].nvme_sq);
+out_destroy_io:
+	nvme_loop_destroy_io_queues(ctrl);
+out_destroy_admin:
 	nvme_loop_destroy_admin_queue(ctrl);
 out_disable:
 	dev_warn(ctrl->ctrl.device, "Removing after reset failure\n");
@@ -533,25 +559,12 @@ static const struct nvme_ctrl_ops nvme_loop_ctrl_ops = {
 
 static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl)
 {
-	struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
 	int ret, i;
 
-	ret = nvme_set_queue_count(&ctrl->ctrl, &opts->nr_io_queues);
-	if (ret || !opts->nr_io_queues)
+	ret = nvme_loop_init_io_queues(ctrl);
+	if (ret)
 		return ret;
 
-	dev_info(ctrl->ctrl.device, "creating %d I/O queues.\n",
-		opts->nr_io_queues);
-
-	for (i = 1; i <= opts->nr_io_queues; i++) {
-		ctrl->queues[i].ctrl = ctrl;
-		ret = nvmet_sq_init(&ctrl->queues[i].nvme_sq);
-		if (ret)
-			goto out_destroy_queues;
-
-		ctrl->queue_count++;
-	}
-
 	memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set));
 	ctrl->tag_set.ops = &nvme_loop_mq_ops;
 	ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size;
@@ -575,7 +588,7 @@ static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl)
 		goto out_free_tagset;
 	}
 
-	for (i = 1; i <= opts->nr_io_queues; i++) {
+	for (i = 1; i < ctrl->queue_count; i++) {
 		ret = nvmf_connect_io_queue(&ctrl->ctrl, i);
 		if (ret)
 			goto out_cleanup_connect_q;
@@ -588,8 +601,7 @@ static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl)
 out_free_tagset:
 	blk_mq_free_tag_set(&ctrl->tag_set);
 out_destroy_queues:
-	for (i = 1; i < ctrl->queue_count; i++)
-		nvmet_sq_destroy(&ctrl->queues[i].nvme_sq);
+	nvme_loop_destroy_io_queues(ctrl);
 	return ret;
 }
 
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
index 7655a35..26b87dc 100644
--- a/drivers/nvme/target/nvmet.h
+++ b/drivers/nvme/target/nvmet.h
@@ -73,6 +73,7 @@ struct nvmet_sq {
 	u16			qid;
 	u16			size;
 	struct completion	free_done;
+	struct completion	confirm_done;
 };
 
 /**
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
index ca8ddc3..53bd325 100644
--- a/drivers/nvme/target/rdma.c
+++ b/drivers/nvme/target/rdma.c
@@ -703,11 +703,6 @@ static void nvmet_rdma_handle_command(struct nvmet_rdma_queue *queue,
 {
 	u16 status;
 
-	cmd->queue = queue;
-	cmd->n_rdma = 0;
-	cmd->req.port = queue->port;
-
-
 	ib_dma_sync_single_for_cpu(queue->dev->device,
 		cmd->cmd->sge[0].addr, cmd->cmd->sge[0].length,
 		DMA_FROM_DEVICE);
@@ -760,9 +755,12 @@ static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc)
 
 	cmd->queue = queue;
 	rsp = nvmet_rdma_get_rsp(queue);
+	rsp->queue = queue;
 	rsp->cmd = cmd;
 	rsp->flags = 0;
 	rsp->req.cmd = cmd->nvme_cmd;
+	rsp->req.port = queue->port;
+	rsp->n_rdma = 0;
 
 	if (unlikely(queue->state != NVMET_RDMA_Q_LIVE)) {
 		unsigned long flags;
diff --git a/drivers/parisc/lba_pci.c b/drivers/parisc/lba_pci.c
index bc286cb..1cced1d 100644
--- a/drivers/parisc/lba_pci.c
+++ b/drivers/parisc/lba_pci.c
@@ -1656,3 +1656,36 @@ void lba_set_iregs(struct parisc_device *lba, u32 ibase, u32 imask)
 	iounmap(base_addr);
 }
 
+
+/*
+ * The design of the Diva management card in rp34x0 machines (rp3410, rp3440)
+ * seems rushed, so that many built-in components simply don't work.
+ * The following quirks disable the serial AUX port and the built-in ATI RV100
+ * Radeon 7000 graphics card which both don't have any external connectors and
+ * thus are useless, and even worse, e.g. the AUX port occupies ttyS0 and as
+ * such makes those machines the only PARISC machines on which we can't use
+ * ttyS0 as boot console.
+ */
+static void quirk_diva_ati_card(struct pci_dev *dev)
+{
+	if (dev->subsystem_vendor != PCI_VENDOR_ID_HP ||
+	    dev->subsystem_device != 0x1292)
+		return;
+
+	dev_info(&dev->dev, "Hiding Diva built-in ATI card");
+	dev->device = 0;
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RADEON_QY,
+	quirk_diva_ati_card);
+
+static void quirk_diva_aux_disable(struct pci_dev *dev)
+{
+	if (dev->subsystem_vendor != PCI_VENDOR_ID_HP ||
+	    dev->subsystem_device != 0x1291)
+		return;
+
+	dev_info(&dev->dev, "Hiding Diva built-in AUX serial device");
+	dev->device = 0;
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_DIVA_AUX,
+	quirk_diva_aux_disable);
diff --git a/drivers/pci/host/pci-msm.c b/drivers/pci/host/pci-msm.c
index 9e0989c..eb05f5ef 100644
--- a/drivers/pci/host/pci-msm.c
+++ b/drivers/pci/host/pci-msm.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -5444,7 +5444,8 @@ static void msm_pcie_config_l1(struct msm_pcie_dev_t *dev,
 static void msm_pcie_config_l1ss(struct msm_pcie_dev_t *dev,
 				struct pci_dev *pdev, bool enable)
 {
-	bool l1_1_cap_support, l1_2_cap_support;
+	bool l1_1_pcipm_support, l1_2_pcipm_support;
+	bool l1_1_aspm_support, l1_2_aspm_support;
 	u32 val, val2;
 	u32 l1ss_cap_id_offset, l1ss_cap_offset, l1ss_ctl1_offset;
 	u32 devctl2_offset = pdev->pcie_cap + PCI_EXP_DEVCTL2;
@@ -5461,11 +5462,14 @@ static void msm_pcie_config_l1ss(struct msm_pcie_dev_t *dev,
 	l1ss_ctl1_offset = l1ss_cap_id_offset + PCI_L1SS_CTL1;
 
 	pci_read_config_dword(pdev, l1ss_cap_offset, &val);
-	l1_1_cap_support = !!(val & (PCI_L1SS_CAP_ASPM_L1_1));
-	l1_2_cap_support = !!(val & (PCI_L1SS_CAP_ASPM_L1_2));
-	if (!l1_1_cap_support && !l1_2_cap_support) {
+	l1_1_pcipm_support = !!(val & (PCI_L1SS_CAP_PCIPM_L1_1));
+	l1_2_pcipm_support = !!(val & (PCI_L1SS_CAP_PCIPM_L1_2));
+	l1_1_aspm_support = !!(val & (PCI_L1SS_CAP_ASPM_L1_1));
+	l1_2_aspm_support = !!(val & (PCI_L1SS_CAP_ASPM_L1_2));
+	if (!l1_1_pcipm_support && !l1_2_pcipm_support &&
+		!l1_1_aspm_support && !l1_2_aspm_support) {
 		PCIE_DBG(dev,
-			"PCIe: RC%d: PCI device does not support L1.1 and L1.2\n",
+			"PCIe: RC%d: PCI device does not support any L1ss\n",
 			dev->rc_idx);
 		return;
 	}
@@ -5484,14 +5488,18 @@ static void msm_pcie_config_l1ss(struct msm_pcie_dev_t *dev,
 		msm_pcie_config_clear_set_dword(pdev, devctl2_offset, 0,
 			PCI_EXP_DEVCTL2_LTR_EN);
 		msm_pcie_config_clear_set_dword(pdev, l1ss_ctl1_offset, 0,
-			(l1_1_cap_support ? PCI_L1SS_CTL1_ASPM_L1_1 : 0) |
-			(l1_2_cap_support ? PCI_L1SS_CTL1_ASPM_L1_2 : 0));
+			(l1_1_pcipm_support ? PCI_L1SS_CTL1_PCIPM_L1_1 : 0) |
+			(l1_2_pcipm_support ? PCI_L1SS_CTL1_PCIPM_L1_2 : 0) |
+			(l1_1_aspm_support ? PCI_L1SS_CTL1_ASPM_L1_1 : 0) |
+			(l1_2_aspm_support ? PCI_L1SS_CTL1_ASPM_L1_2 : 0));
 	} else {
 		msm_pcie_config_clear_set_dword(pdev, devctl2_offset,
 			PCI_EXP_DEVCTL2_LTR_EN, 0);
 		msm_pcie_config_clear_set_dword(pdev, l1ss_ctl1_offset,
-			(l1_1_cap_support ? PCI_L1SS_CTL1_ASPM_L1_1 : 0) |
-			(l1_2_cap_support ? PCI_L1SS_CTL1_ASPM_L1_2 : 0), 0);
+			(l1_1_pcipm_support ? PCI_L1SS_CTL1_PCIPM_L1_1 : 0) |
+			(l1_2_pcipm_support ? PCI_L1SS_CTL1_PCIPM_L1_2 : 0) |
+			(l1_1_aspm_support ? PCI_L1SS_CTL1_ASPM_L1_1 : 0) |
+			(l1_2_aspm_support ? PCI_L1SS_CTL1_ASPM_L1_2 : 0), 0);
 	}
 
 	pci_read_config_dword(pdev, l1ss_ctl1_offset, &val);
@@ -5560,7 +5568,7 @@ static void msm_pcie_config_link_pm_rc(struct msm_pcie_dev_t *dev,
 			u32 val;
 
 			pci_read_config_dword(child_pdev,
-				pdev->pcie_cap + PCI_EXP_LNKCTL, &val);
+				child_pdev->pcie_cap + PCI_EXP_LNKCTL, &val);
 			child_l0s_enable = !!(val & PCI_EXP_LNKCTL_ASPM_L0S);
 			if (child_l0s_enable)
 				break;
@@ -5588,7 +5596,9 @@ static void msm_pcie_config_link_pm_rc(struct msm_pcie_dev_t *dev,
 			pci_read_config_dword(child_pdev,
 				l1ss_cap_id_offset + PCI_L1SS_CTL1, &val);
 			child_l1ss_enable = !!(val &
-				(PCI_L1SS_CTL1_ASPM_L1_1 |
+				(PCI_L1SS_CTL1_PCIPM_L1_1 |
+				PCI_L1SS_CTL1_PCIPM_L1_2 |
+				PCI_L1SS_CTL1_ASPM_L1_1 |
 				PCI_L1SS_CTL1_ASPM_L1_2));
 			if (child_l1ss_enable)
 				break;
@@ -5608,7 +5618,7 @@ static void msm_pcie_config_link_pm_rc(struct msm_pcie_dev_t *dev,
 			u32 val;
 
 			pci_read_config_dword(child_pdev,
-				pdev->pcie_cap + PCI_EXP_LNKCTL, &val);
+				child_pdev->pcie_cap + PCI_EXP_LNKCTL, &val);
 			child_l1_enable = !!(val & PCI_EXP_LNKCTL_ASPM_L1);
 			if (child_l1_enable)
 				break;
diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
index 4722782..1d32fe2 100644
--- a/drivers/pci/iov.c
+++ b/drivers/pci/iov.c
@@ -164,7 +164,6 @@ int pci_iov_add_virtfn(struct pci_dev *dev, int id, int reset)
 	pci_device_add(virtfn, virtfn->bus);
 	mutex_unlock(&iov->dev->sriov->lock);
 
-	pci_bus_add_device(virtfn);
 	sprintf(buf, "virtfn%u", id);
 	rc = sysfs_create_link(&dev->dev.kobj, &virtfn->dev.kobj, buf);
 	if (rc)
@@ -175,6 +174,8 @@ int pci_iov_add_virtfn(struct pci_dev *dev, int id, int reset)
 
 	kobject_uevent(&virtfn->dev.kobj, KOBJ_CHANGE);
 
+	pci_bus_add_device(virtfn);
+
 	return 0;
 
 failed2:
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index 8a68e2b..802997e 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -953,7 +953,12 @@ static int pci_pm_thaw_noirq(struct device *dev)
 	if (pci_has_legacy_pm_support(pci_dev))
 		return pci_legacy_resume_early(dev);
 
-	pci_update_current_state(pci_dev, PCI_D0);
+	/*
+	 * pci_restore_state() requires the device to be in D0 (because of MSI
+	 * restoration among other things), so force it into D0 in case the
+	 * driver's "freeze" callbacks put it into a low-power state directly.
+	 */
+	pci_set_power_state(pci_dev, PCI_D0);
 	pci_restore_state(pci_dev);
 
 	if (drv && drv->pm && drv->pm->thaw_noirq)
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index e7d4048..a87c8e1 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -4214,6 +4214,10 @@ static bool pci_bus_resetable(struct pci_bus *bus)
 {
 	struct pci_dev *dev;
 
+
+	if (bus->self && (bus->self->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET))
+		return false;
+
 	list_for_each_entry(dev, &bus->devices, bus_list) {
 		if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET ||
 		    (dev->subordinate && !pci_bus_resetable(dev->subordinate)))
diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c
index b1303b3..057465ad 100644
--- a/drivers/pci/pcie/aer/aerdrv_core.c
+++ b/drivers/pci/pcie/aer/aerdrv_core.c
@@ -390,7 +390,14 @@ static pci_ers_result_t broadcast_error_message(struct pci_dev *dev,
 		 * If the error is reported by an end point, we think this
 		 * error is related to the upstream link of the end point.
 		 */
-		pci_walk_bus(dev->bus, cb, &result_data);
+		if (state == pci_channel_io_normal)
+			/*
+			 * the error is non fatal so the bus is ok, just invoke
+			 * the callback for the function that logged the error.
+			 */
+			cb(dev, &result_data);
+		else
+			pci_walk_bus(dev->bus, cb, &result_data);
 	}
 
 	return result_data.result;
diff --git a/drivers/pci/pcie/pme.c b/drivers/pci/pcie/pme.c
index 4b70349..00f61225 100644
--- a/drivers/pci/pcie/pme.c
+++ b/drivers/pci/pcie/pme.c
@@ -232,6 +232,9 @@ static void pcie_pme_work_fn(struct work_struct *work)
 			break;
 
 		pcie_capability_read_dword(port, PCI_EXP_RTSTA, &rtsta);
+		if (rtsta == (u32) ~0)
+			break;
+
 		if (rtsta & PCI_EXP_RTSTA_PME) {
 			/*
 			 * Clear PME status of the port.  If there are other
@@ -279,7 +282,7 @@ static irqreturn_t pcie_pme_irq(int irq, void *context)
 	spin_lock_irqsave(&data->lock, flags);
 	pcie_capability_read_dword(port, PCI_EXP_RTSTA, &rtsta);
 
-	if (!(rtsta & PCI_EXP_RTSTA_PME)) {
+	if (rtsta == (u32) ~0 || !(rtsta & PCI_EXP_RTSTA_PME)) {
 		spin_unlock_irqrestore(&data->lock, flags);
 		return IRQ_NONE;
 	}
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index d266d80..a98be6d 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -932,7 +932,8 @@ int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, int pass)
 			child = pci_add_new_bus(bus, dev, max+1);
 			if (!child)
 				goto out;
-			pci_bus_insert_busn_res(child, max+1, 0xff);
+			pci_bus_insert_busn_res(child, max+1,
+						bus->busn_res.end);
 		}
 		max++;
 		buses = (buses & 0xff000000)
@@ -1438,8 +1439,16 @@ static void program_hpp_type0(struct pci_dev *dev, struct hpp_type0 *hpp)
 
 static void program_hpp_type1(struct pci_dev *dev, struct hpp_type1 *hpp)
 {
-	if (hpp)
-		dev_warn(&dev->dev, "PCI-X settings not supported\n");
+	int pos;
+
+	if (!hpp)
+		return;
+
+	pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
+	if (!pos)
+		return;
+
+	dev_warn(&dev->dev, "PCI-X settings not supported\n");
 }
 
 static bool pcie_root_rcb_set(struct pci_dev *dev)
@@ -1465,6 +1474,9 @@ static void program_hpp_type2(struct pci_dev *dev, struct hpp_type2 *hpp)
 	if (!hpp)
 		return;
 
+	if (!pci_is_pcie(dev))
+		return;
+
 	if (hpp->revision > 1) {
 		dev_warn(&dev->dev, "PCIe settings rev %d not supported\n",
 			 hpp->revision);
@@ -2125,6 +2137,10 @@ unsigned int pci_scan_child_bus(struct pci_bus *bus)
 	if (bus->self && bus->self->is_hotplug_bridge && pci_hotplug_bus_size) {
 		if (max - bus->busn_res.start < pci_hotplug_bus_size - 1)
 			max = bus->busn_res.start + pci_hotplug_bus_size - 1;
+
+		/* Do not allocate more buses than we have room left */
+		if (max > bus->busn_res.end)
+			max = bus->busn_res.end;
 	}
 
 	/*
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 5d8151b..98eba91 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -4088,12 +4088,14 @@ static int pci_quirk_amd_sb_acs(struct pci_dev *dev, u16 acs_flags)
 static int pci_quirk_cavium_acs(struct pci_dev *dev, u16 acs_flags)
 {
 	/*
-	 * Cavium devices matching this quirk do not perform peer-to-peer
-	 * with other functions, allowing masking out these bits as if they
-	 * were unimplemented in the ACS capability.
+	 * Cavium root ports don't advertise an ACS capability.  However,
+	 * the RTL internally implements similar protection as if ACS had
+	 * Request Redirection, Completion Redirection, Source Validation,
+	 * and Upstream Forwarding features enabled.  Assert that the
+	 * hardware implements and enables equivalent ACS functionality for
+	 * these flags.
 	 */
-	acs_flags &= ~(PCI_ACS_SV | PCI_ACS_TB | PCI_ACS_RR |
-		       PCI_ACS_CR | PCI_ACS_UF | PCI_ACS_DT);
+	acs_flags &= ~(PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_SV | PCI_ACS_UF);
 
 	return acs_flags ? 0 : 1;
 }
diff --git a/drivers/pci/remove.c b/drivers/pci/remove.c
index f9357e0..b6b9b5b 100644
--- a/drivers/pci/remove.c
+++ b/drivers/pci/remove.c
@@ -19,9 +19,9 @@ static void pci_stop_dev(struct pci_dev *dev)
 	pci_pme_active(dev, false);
 
 	if (dev->is_added) {
+		device_release_driver(&dev->dev);
 		pci_proc_detach_device(dev);
 		pci_remove_sysfs_dev_files(dev);
-		device_release_driver(&dev->dev);
 		dev->is_added = 0;
 	}
 
diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig
index 671610c..b0c0fa0 100644
--- a/drivers/pinctrl/Kconfig
+++ b/drivers/pinctrl/Kconfig
@@ -26,7 +26,8 @@
 
 config PINCTRL_ADI2
 	bool "ADI pin controller driver"
-	depends on BLACKFIN
+	depends on (BF54x || BF60x)
+	depends on !GPIO_ADI
 	select PINMUX
 	select IRQ_DOMAIN
 	help
diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c
index 0d34d8a4..e8c08eb 100644
--- a/drivers/pinctrl/intel/pinctrl-cherryview.c
+++ b/drivers/pinctrl/intel/pinctrl-cherryview.c
@@ -1594,6 +1594,22 @@ static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq)
 			clear_bit(i, chip->irq_valid_mask);
 	}
 
+	/*
+	 * The same set of machines in chv_no_valid_mask[] have incorrectly
+	 * configured GPIOs that generate spurious interrupts so we use
+	 * this same list to apply another quirk for them.
+	 *
+	 * See also https://bugzilla.kernel.org/show_bug.cgi?id=197953.
+	 */
+	if (!need_valid_mask) {
+		/*
+		 * Mask all interrupts the community is able to generate
+		 * but leave the ones that can only generate GPEs unmasked.
+		 */
+		chv_writel(GENMASK(31, pctrl->community->nirqs),
+			   pctrl->regs + CHV_INTMASK);
+	}
+
 	/* Clear all interrupts */
 	chv_writel(0xffff, pctrl->regs + CHV_INTSTAT);
 
diff --git a/drivers/pinctrl/pinctrl-st.c b/drivers/pinctrl/pinctrl-st.c
index b7bb371..50c45bd 100644
--- a/drivers/pinctrl/pinctrl-st.c
+++ b/drivers/pinctrl/pinctrl-st.c
@@ -1285,6 +1285,22 @@ static void st_gpio_irq_unmask(struct irq_data *d)
 	writel(BIT(d->hwirq), bank->base + REG_PIO_SET_PMASK);
 }
 
+static int st_gpio_irq_request_resources(struct irq_data *d)
+{
+	struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+
+	st_gpio_direction_input(gc, d->hwirq);
+
+	return gpiochip_lock_as_irq(gc, d->hwirq);
+}
+
+static void st_gpio_irq_release_resources(struct irq_data *d)
+{
+	struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+
+	gpiochip_unlock_as_irq(gc, d->hwirq);
+}
+
 static int st_gpio_irq_set_type(struct irq_data *d, unsigned type)
 {
 	struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
@@ -1438,12 +1454,14 @@ static struct gpio_chip st_gpio_template = {
 };
 
 static struct irq_chip st_gpio_irqchip = {
-	.name		= "GPIO",
-	.irq_disable	= st_gpio_irq_mask,
-	.irq_mask	= st_gpio_irq_mask,
-	.irq_unmask	= st_gpio_irq_unmask,
-	.irq_set_type	= st_gpio_irq_set_type,
-	.flags		= IRQCHIP_SKIP_SET_WAKE,
+	.name			= "GPIO",
+	.irq_request_resources	= st_gpio_irq_request_resources,
+	.irq_release_resources	= st_gpio_irq_release_resources,
+	.irq_disable		= st_gpio_irq_mask,
+	.irq_mask		= st_gpio_irq_mask,
+	.irq_unmask		= st_gpio_irq_unmask,
+	.irq_set_type		= st_gpio_irq_set_type,
+	.flags			= IRQCHIP_SKIP_SET_WAKE,
 };
 
 static int st_gpiolib_register_bank(struct st_pinctrl *info,
diff --git a/drivers/pinctrl/sirf/pinctrl-atlas7.c b/drivers/pinctrl/sirf/pinctrl-atlas7.c
index 7f30416..f714f67 100644
--- a/drivers/pinctrl/sirf/pinctrl-atlas7.c
+++ b/drivers/pinctrl/sirf/pinctrl-atlas7.c
@@ -5420,14 +5420,15 @@ static int atlas7_pinmux_probe(struct platform_device *pdev)
 	sys2pci_np = of_find_node_by_name(NULL, "sys2pci");
 	if (!sys2pci_np)
 		return -EINVAL;
+
 	ret = of_address_to_resource(sys2pci_np, 0, &res);
+	of_node_put(sys2pci_np);
 	if (ret)
 		return ret;
+
 	pmx->sys2pci_base = devm_ioremap_resource(&pdev->dev, &res);
-	if (IS_ERR(pmx->sys2pci_base)) {
-		of_node_put(sys2pci_np);
+	if (IS_ERR(pmx->sys2pci_base))
 		return -ENOMEM;
-	}
 
 	pmx->dev = &pdev->dev;
 
diff --git a/drivers/platform/msm/gsi/gsi.c b/drivers/platform/msm/gsi/gsi.c
index 6727da6..dc445a0 100644
--- a/drivers/platform/msm/gsi/gsi.c
+++ b/drivers/platform/msm/gsi/gsi.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -27,6 +27,7 @@
 
 #define GSI_RESET_WA_MIN_SLEEP 1000
 #define GSI_RESET_WA_MAX_SLEEP 2000
+#define GSI_CHNL_STATE_MAX_RETRYCNT 10
 static const struct of_device_id msm_gsi_match[] = {
 	{ .compatible = "qcom,msm_gsi", },
 	{ },
@@ -2076,6 +2077,7 @@ int gsi_reset_channel(unsigned long chan_hdl)
 	uint32_t val;
 	struct gsi_chan_ctx *ctx;
 	bool reset_done = false;
+	uint32_t retry_cnt = 0;
 
 	if (!gsi_ctx) {
 		pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
@@ -2112,9 +2114,19 @@ int gsi_reset_channel(unsigned long chan_hdl)
 		return -GSI_STATUS_TIMED_OUT;
 	}
 
+revrfy_chnlstate:
 	if (ctx->state != GSI_CHAN_STATE_ALLOCATED) {
 		GSIERR("chan_hdl=%lu unexpected state=%u\n", chan_hdl,
 				ctx->state);
+		/* GSI register update state not sync with gsi channel
+		 * context state not sync, need to wait for 1ms to sync.
+		 */
+		retry_cnt++;
+		if (retry_cnt <= GSI_CHNL_STATE_MAX_RETRYCNT) {
+			usleep_range(GSI_RESET_WA_MIN_SLEEP,
+				GSI_RESET_WA_MAX_SLEEP);
+			goto revrfy_chnlstate;
+		}
 		BUG();
 	}
 
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c b/drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c
index a297f24..37614cc 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c
@@ -580,6 +580,15 @@ static int ipa_attrib_dump(struct ipa_rule_attrib *attrib,
 	if (attrib->attrib_mask & IPA_FLT_MAC_ETHER_TYPE)
 		pr_err("ether_type:%x ", attrib->ether_type);
 
+	if (attrib->attrib_mask & IPA_FLT_L2TP_INNER_IP_TYPE)
+		pr_err("l2tp inner ip type: %d ", attrib->type);
+
+	if (attrib->attrib_mask & IPA_FLT_L2TP_INNER_IPV4_DST_ADDR) {
+		addr[0] = htonl(attrib->u.v4.dst_addr);
+		mask[0] = htonl(attrib->u.v4.dst_addr_mask);
+		pr_err("dst_addr:%pI4 dst_addr_mask:%pI4 ", addr, mask);
+	}
+
 	pr_err("\n");
 	return 0;
 }
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_dp.c b/drivers/platform/msm/ipa/ipa_v2/ipa_dp.c
index 3cb86d0..9f71d7b 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_dp.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_dp.c
@@ -2903,10 +2903,12 @@ void ipa_lan_rx_cb(void *priv, enum ipa_dp_evt_type evt, unsigned long data)
 	struct ipa_ep_context *ep;
 	unsigned int src_pipe;
 	u32 metadata;
+	u8 ucp;
 
 	status = (struct ipa_hw_pkt_status *)rx_skb->data;
 	src_pipe = status->endp_src_idx;
 	metadata = status->metadata;
+	ucp = status->ucp;
 	ep = &ipa_ctx->ep[src_pipe];
 	if (unlikely(src_pipe >= ipa_ctx->ipa_num_pipes ||
 		!ep->valid ||
@@ -2930,8 +2932,10 @@ void ipa_lan_rx_cb(void *priv, enum ipa_dp_evt_type evt, unsigned long data)
 	 *  ------------------------------------------
 	 */
 	*(u16 *)rx_skb->cb = ((metadata >> 16) & 0xFFFF);
+	*(u8 *)(rx_skb->cb + 4) = ucp;
 	IPADBG_LOW("meta_data: 0x%x cb: 0x%x\n",
 			metadata, *(u32 *)rx_skb->cb);
+	IPADBG_LOW("ucp: %d\n", *(u8 *)(rx_skb->cb + 4));
 
 	ep->client_notify(ep->priv, IPA_RECEIVE, (unsigned long)(rx_skb));
 }
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service.c b/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service.c
index d53b1b5..4ffbd55 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service.c
@@ -706,7 +706,9 @@ int qmi_filter_notify_send(struct ipa_fltr_installed_notif_req_msg_v01 *req)
 				req->filter_index_list[i].filter_handle,
 				req->filter_index_list[i].filter_index);
 		return -EINVAL;
-	} else if (req->install_status != IPA_QMI_RESULT_SUCCESS_V01) {
+	}
+
+	if (req->install_status != IPA_QMI_RESULT_SUCCESS_V01) {
 		IPAWANERR(" UL filter rule for pipe %d install_status = %d\n",
 			req->source_pipe_index, req->install_status);
 		return -EINVAL;
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c b/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c
index 72b2e96..8a3fbd4 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c
@@ -1407,6 +1407,37 @@ int ipa_generate_hw_rule(enum ipa_ip_type ip,
 			ihl_ofst_meq32++;
 		}
 
+		if (attrib->attrib_mask & IPA_FLT_L2TP_INNER_IP_TYPE) {
+			if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) {
+				IPAERR("ran out of ihl_meq32 eq\n");
+				return -EPERM;
+			}
+			*en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32];
+			/* 22  => offset of IP type after v6 header */
+			*buf = ipa_write_8(22, *buf);
+			*buf = ipa_write_32(0xF0000000, *buf);
+			if (attrib->type == 0x40)
+				*buf = ipa_write_32(0x40000000, *buf);
+			else
+				*buf = ipa_write_32(0x60000000, *buf);
+			*buf = ipa_pad_to_32(*buf);
+			ihl_ofst_meq32++;
+		}
+
+		if (attrib->attrib_mask & IPA_FLT_L2TP_INNER_IPV4_DST_ADDR) {
+			if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) {
+				IPAERR("ran out of ihl_meq32 eq\n");
+				return -EPERM;
+			}
+			*en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32];
+			/* 38  => offset of inner IPv4 addr */
+			*buf = ipa_write_8(38, *buf);
+			*buf = ipa_write_32(attrib->u.v4.dst_addr_mask, *buf);
+			*buf = ipa_write_32(attrib->u.v4.dst_addr, *buf);
+			*buf = ipa_pad_to_32(*buf);
+			ihl_ofst_meq32++;
+		}
+
 		if (attrib->attrib_mask & IPA_FLT_SRC_PORT) {
 			if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
 				IPAERR("ran out of ihl_rng16 eq\n");
@@ -2006,6 +2037,36 @@ int ipa_generate_flt_eq(enum ipa_ip_type ip,
 			ihl_ofst_meq32++;
 		}
 
+		if (attrib->attrib_mask & IPA_FLT_L2TP_INNER_IP_TYPE) {
+			if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) {
+				IPAERR("ran out of ihl_meq32 eq\n");
+				return -EPERM;
+			}
+			*en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32];
+			/* 22  => offset of inner IP type after v6 header */
+			eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].offset = 22;
+			eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].mask =
+				0xF0000000;
+			eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].value =
+				(u32)attrib->type << 24;
+			ihl_ofst_meq32++;
+		}
+
+		if (attrib->attrib_mask & IPA_FLT_L2TP_INNER_IPV4_DST_ADDR) {
+			if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) {
+				IPAERR("ran out of ihl_meq32 eq\n");
+				return -EPERM;
+			}
+			*en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32];
+			/* 38  => offset of inner IPv4 addr */
+			eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].offset = 38;
+			eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].mask =
+				attrib->u.v4.dst_addr_mask;
+			eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].value =
+				attrib->u.v4.dst_addr;
+			ihl_ofst_meq32++;
+		}
+
 		if (attrib->attrib_mask & IPA_FLT_SRC_PORT) {
 			if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
 				IPAERR_RL("ran out of ihl_rng16 eq\n");
diff --git a/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c b/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c
index d91d7eb..9f0cec9 100644
--- a/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -2868,7 +2868,7 @@ int rmnet_ipa_query_tethering_stats_modem(
 	if (reset) {
 		req->reset_stats_valid = true;
 		req->reset_stats = true;
-		IPAWANERR("reset the pipe stats\n");
+		IPAWANDBG("reset the pipe stats\n");
 	} else {
 		/* print tethered-client enum */
 		IPAWANDBG_LOW("Tethered-client enum(%d)\n", data->ipa_client);
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa.c b/drivers/platform/msm/ipa/ipa_v3/ipa.c
index 8e8aaef..ae24675 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -3486,7 +3486,7 @@ static unsigned int ipa3_get_bus_vote(void)
 	} else {
 		WARN_ON(1);
 	}
-	IPADBG("curr %d idx %d\n", ipa3_ctx->curr_ipa_clk_rate, idx);
+	IPADBG_LOW("curr %d idx %d\n", ipa3_ctx->curr_ipa_clk_rate, idx);
 
 	return idx;
 }
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_client.c b/drivers/platform/msm/ipa/ipa_v3/ipa_client.c
index 35b6dff..8872c24 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_client.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_client.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -386,6 +386,8 @@ int ipa3_reset_gsi_channel(u32 clnt_hdl)
 	int result = -EFAULT;
 	enum gsi_status gsi_res;
 	int aggr_active_bitmap = 0;
+	bool undo_aggr_value = false;
+	struct ipahal_reg_clkon_cfg fields;
 
 	IPADBG("entry\n");
 	if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
@@ -398,6 +400,25 @@ int ipa3_reset_gsi_channel(u32 clnt_hdl)
 
 	if (!ep->keep_ipa_awake)
 		IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+	/*
+	 * IPAv4.0 HW has a limitation where WSEQ in MBIM NTH header is not
+	 * reset to 0 when MBIM pipe is reset. Workaround is to disable
+	 * HW clock gating for AGGR block using IPA_CLKON_CFG reg. undo flag to
+	 * disable the bit after reset is finished
+	 */
+	if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) {
+		if (ep->cfg.aggr.aggr == IPA_MBIM_16 &&
+			ep->cfg.aggr.aggr_en != IPA_BYPASS_AGGR) {
+			ipahal_read_reg_fields(IPA_CLKON_CFG, &fields);
+			if (fields.open_aggr_wrapper == true) {
+				undo_aggr_value = true;
+				fields.open_aggr_wrapper = false;
+				ipahal_write_reg_fields(IPA_CLKON_CFG, &fields);
+			}
+		}
+	}
+
 	/*
 	 * Check for open aggregation frame on Consumer EP -
 	 * reset with open aggregation frame WA
@@ -429,10 +450,22 @@ int ipa3_reset_gsi_channel(u32 clnt_hdl)
 	if (!ep->keep_ipa_awake)
 		IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
 
+	/* undo the aggr value if flag was set above*/
+	if (undo_aggr_value) {
+		fields.open_aggr_wrapper = false;
+		ipahal_write_reg_fields(IPA_CLKON_CFG, &fields);
+	}
+
 	IPADBG("exit\n");
 	return 0;
 
 reset_chan_fail:
+	/* undo the aggr value if flag was set above*/
+	if (undo_aggr_value) {
+		fields.open_aggr_wrapper = false;
+		ipahal_write_reg_fields(IPA_CLKON_CFG, &fields);
+	}
+
 	if (!ep->keep_ipa_awake)
 		IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
 	return result;
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c b/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
index 5da83e5..e88ab27 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
@@ -527,6 +527,15 @@ static int ipa3_attrib_dump(struct ipa_rule_attrib *attrib,
 	if (attrib->attrib_mask & IPA_FLT_TCP_SYN_L2TP)
 		pr_err("tcp syn l2tp ");
 
+	if (attrib->attrib_mask & IPA_FLT_L2TP_INNER_IP_TYPE)
+		pr_err("l2tp inner ip type: %d ", attrib->type);
+
+	if (attrib->attrib_mask & IPA_FLT_L2TP_INNER_IPV4_DST_ADDR) {
+		addr[0] = htonl(attrib->u.v4.dst_addr);
+		mask[0] = htonl(attrib->u.v4.dst_addr_mask);
+		pr_err("dst_addr:%pI4 dst_addr_mask:%pI4 ", addr, mask);
+	}
+
 	pr_err("\n");
 	return 0;
 }
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
index ee312c7..3aaae8d 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
@@ -2327,6 +2327,7 @@ static int ipa3_lan_rx_pyld_hdlr(struct sk_buff *skb,
 			IPA_STATS_DEC_CNT(ipa3_ctx->stats.rx_excp_pkts
 				[IPAHAL_PKT_STATUS_EXCEPTION_NONE]);
 		}
+		tx_pkt = NULL;
 	};
 
 	return rc;
@@ -2569,10 +2570,12 @@ void ipa3_lan_rx_cb(void *priv, enum ipa_dp_evt_type evt, unsigned long data)
 	struct ipa3_ep_context *ep;
 	unsigned int src_pipe;
 	u32 metadata;
+	u8 ucp;
 
 	ipahal_pkt_status_parse(rx_skb->data, &status);
 	src_pipe = status.endp_src_idx;
 	metadata = status.metadata;
+	ucp = status.ucp;
 	ep = &ipa3_ctx->ep[src_pipe];
 	if (unlikely(src_pipe >= ipa3_ctx->ipa_num_pipes ||
 		!ep->valid ||
@@ -2595,8 +2598,10 @@ void ipa3_lan_rx_cb(void *priv, enum ipa_dp_evt_type evt, unsigned long data)
 	 *  ------------------------------------------
 	 */
 	*(u16 *)rx_skb->cb = ((metadata >> 16) & 0xFFFF);
+	*(u8 *)(rx_skb->cb + 4) = ucp;
 	IPADBG_LOW("meta_data: 0x%x cb: 0x%x\n",
 			metadata, *(u32 *)rx_skb->cb);
+	IPADBG_LOW("ucp: %d\n", *(u8 *)(rx_skb->cb + 4));
 
 	ep->client_notify(ep->priv, IPA_RECEIVE, (unsigned long)(rx_skb));
 }
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c
index 1c8715a..c158c94 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c
@@ -610,7 +610,7 @@ int ipa3_qmi_filter_request_send(struct ipa_install_fltr_rule_req_msg_v01 *req)
 		IPAWANDBG("IPACM pass zero rules to Q6\n");
 	} else {
 		IPAWANDBG("IPACM pass %u rules to Q6\n",
-		req->filter_spec_ex_list_len);
+		req->filter_spec_list_len);
 	}
 
 	if (req->filter_spec_list_len >= QMI_IPA_MAX_FILTERS_V01) {
@@ -919,7 +919,9 @@ int ipa3_qmi_filter_notify_send(
 		req->source_pipe_index,
 		req->rule_id_len);
 		return -EINVAL;
-	} else if (req->install_status != IPA_QMI_RESULT_SUCCESS_V01) {
+	}
+
+	if (req->install_status != IPA_QMI_RESULT_SUCCESS_V01) {
 		IPAWANERR(" UL filter rule for pipe %d install_status = %d\n",
 			req->source_pipe_index, req->install_status);
 		return -EINVAL;
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
index 7421eb8..11f90a5 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -57,7 +57,6 @@
 #define IPA_BCR_REG_VAL_v3_0 (0x00000001)
 #define IPA_BCR_REG_VAL_v3_5 (0x0000003B)
 #define IPA_BCR_REG_VAL_v4_0 (0x00000039)
-#define IPA_CLKON_CFG_v4_0 (0x30000000)
 #define IPA_AGGR_GRAN_MIN (1)
 #define IPA_AGGR_GRAN_MAX (32)
 #define IPA_EOT_COAL_GRAN_MIN (1)
@@ -2051,13 +2050,20 @@ int ipa3_init_hw(void)
 	ipahal_write_reg(IPA_BCR, val);
 
 	if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) {
-		struct ipahal_reg_tx_cfg cfg;
+		struct ipahal_reg_clkon_cfg clkon_cfg;
+		struct ipahal_reg_tx_cfg tx_cfg;
 
-		ipahal_write_reg(IPA_CLKON_CFG, IPA_CLKON_CFG_v4_0);
-		ipahal_read_reg_fields(IPA_TX_CFG, &cfg);
+		memset(&clkon_cfg, 0, sizeof(clkon_cfg));
+
+		/*enable open global clocks*/
+		clkon_cfg.open_global_2x_clk = true;
+		clkon_cfg.open_global = true;
+		ipahal_write_reg_fields(IPA_CLKON_CFG, &clkon_cfg);
+
+		ipahal_read_reg_fields(IPA_TX_CFG, &tx_cfg);
 		/* disable PA_MASK_EN to allow holb drop */
-		cfg.pa_mask_en = 0;
-		ipahal_write_reg_fields(IPA_TX_CFG, &cfg);
+		tx_cfg.pa_mask_en = 0;
+		ipahal_write_reg_fields(IPA_TX_CFG, &tx_cfg);
 	}
 
 	ipa3_cfg_qsb();
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.c b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.c
index a677046..1254fe3 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.c
@@ -1233,6 +1233,39 @@ static int ipa_fltrt_generate_hw_rule_bdy_ip6(u16 *en_rule,
 		ihl_ofst_meq32 += 2;
 	}
 
+	if (attrib->attrib_mask & IPA_FLT_L2TP_INNER_IP_TYPE) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32,
+			ihl_ofst_meq32)) {
+			IPAHAL_ERR("ran out of ihl_meq32 eq\n");
+			goto err;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32]);
+		/* 22  => offset of IP type after v6 header */
+		extra = ipa_write_8(22, extra);
+		rest = ipa_write_32(0xF0000000, rest);
+		if (attrib->type == 0x40)
+			rest = ipa_write_32(0x40000000, rest);
+		else
+			rest = ipa_write_32(0x60000000, rest);
+		ihl_ofst_meq32++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_L2TP_INNER_IPV4_DST_ADDR) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32,
+			ihl_ofst_meq32)) {
+			IPAHAL_ERR("ran out of ihl_meq32 eq\n");
+			goto err;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32]);
+		/* 38  => offset of inner IPv4 addr */
+		extra = ipa_write_8(38, extra);
+		rest = ipa_write_32(attrib->u.v4.dst_addr_mask, rest);
+		rest = ipa_write_32(attrib->u.v4.dst_addr, rest);
+		ihl_ofst_meq32++;
+	}
+
 	if (attrib->attrib_mask & IPA_FLT_META_DATA) {
 		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(IPA_METADATA_COMPARE);
 		rest = ipa_write_32(attrib->meta_data_mask, rest);
@@ -2269,6 +2302,40 @@ static int ipa_flt_generate_eq_ip6(enum ipa_ip_type ip,
 		ihl_ofst_meq32 += 2;
 	}
 
+	if (attrib->attrib_mask & IPA_FLT_L2TP_INNER_IP_TYPE) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32,
+			ihl_ofst_meq32)) {
+			IPAHAL_ERR("ran out of ihl_meq32 eq\n");
+			return -EPERM;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32]);
+		/* 22  => offset of inner IP type after v6 header */
+		eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].offset = 22;
+		eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].mask =
+			0xF0000000;
+		eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].value =
+			(u32)attrib->type << 24;
+		ihl_ofst_meq32++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_L2TP_INNER_IPV4_DST_ADDR) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32,
+			ihl_ofst_meq32)) {
+			IPAHAL_ERR("ran out of ihl_meq32 eq\n");
+			return -EPERM;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32]);
+		/* 38  => offset of inner IPv4 addr */
+		eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].offset = 38;
+		eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].mask =
+			attrib->u.v4.dst_addr_mask;
+		eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].value =
+			attrib->u.v4.dst_addr;
+		ihl_ofst_meq32++;
+	}
+
 	if (attrib->attrib_mask & IPA_FLT_MAC_ETHER_TYPE) {
 		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq32, ofst_meq32)) {
 			IPAHAL_ERR_RL("ran out of meq32 eq\n");
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c
index 1d8eb13..4920942 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -399,6 +399,261 @@ static void ipareg_construct_endp_status_n_v4_0(
 			IPA_ENDP_STATUS_n_STATUS_PKT_SUPPRESS_BMSK);
 }
 
+static void ipareg_construct_clkon_cfg(
+	enum ipahal_reg_name reg, const void *fields, u32 *val)
+{
+	struct ipahal_reg_clkon_cfg *clkon_cfg =
+		(struct ipahal_reg_clkon_cfg *)fields;
+
+	IPA_SETFIELD_IN_REG(*val, clkon_cfg->open_global_2x_clk,
+			IPA_CLKON_CFG_OPEN_GLOBAL_2X_CLK_SHFT,
+			IPA_CLKON_CFG_OPEN_GLOBAL_2X_CLK_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, clkon_cfg->open_global,
+			IPA_CLKON_CFG_OPEN_GLOBAL_SHFT,
+			IPA_CLKON_CFG_OPEN_GLOBAL_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, clkon_cfg->open_gsi_if,
+			IPA_CLKON_CFG_OPEN_GSI_IF_SHFT,
+			IPA_CLKON_CFG_OPEN_GSI_IF_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, clkon_cfg->open_weight_arb,
+			IPA_CLKON_CFG_OPEN_WEIGHT_ARB_SHFT,
+			IPA_CLKON_CFG_OPEN_WEIGHT_ARB_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, clkon_cfg->open_qmb,
+			IPA_CLKON_CFG_OPEN_QMB_SHFT,
+			IPA_CLKON_CFG_OPEN_QMB_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, clkon_cfg->open_ram_slaveway,
+			IPA_CLKON_CFG_OPEN_RAM_SLAVEWAY_SHFT,
+			IPA_CLKON_CFG_OPEN_RAM_SLAVEWAY_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, clkon_cfg->open_aggr_wrapper,
+			IPA_CLKON_CFG_OPEN_AGGR_WRAPPER_SHFT,
+			IPA_CLKON_CFG_OPEN_AGGR_WRAPPER_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, clkon_cfg->open_qsb2axi_cmdq_l,
+			IPA_CLKON_CFG_OPEN_QSB2AXI_CMDQ_L_SHFT,
+			IPA_CLKON_CFG_OPEN_QSB2AXI_CMDQ_L_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, clkon_cfg->open_fnr,
+			IPA_CLKON_CFG_OPEN_FNR_SHFT,
+			IPA_CLKON_CFG_OPEN_FNR_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, clkon_cfg->open_tx_1,
+			IPA_CLKON_CFG_OPEN_TX_1_SHFT,
+			IPA_CLKON_CFG_OPEN_TX_1_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, clkon_cfg->open_tx_0,
+			IPA_CLKON_CFG_OPEN_TX_0_SHFT,
+			IPA_CLKON_CFG_OPEN_TX_0_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, clkon_cfg->open_ntf_tx_cmdqs,
+			IPA_CLKON_CFG_OPEN_NTF_TX_CMDQS_SHFT,
+			IPA_CLKON_CFG_OPEN_NTF_TX_CMDQS_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, clkon_cfg->open_dcmp,
+			IPA_CLKON_CFG_OPEN_DCMP_SHFT,
+			IPA_CLKON_CFG_OPEN_DCMP_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, clkon_cfg->open_h_dcph,
+			IPA_CLKON_CFG_OPEN_H_DCPH_SHFT,
+			IPA_CLKON_CFG_OPEN_H_DCPH_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, clkon_cfg->open_d_dcph,
+			IPA_CLKON_CFG_OPEN_D_DCPH_SHFT,
+			IPA_CLKON_CFG_OPEN_D_DCPH_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, clkon_cfg->open_ack_mngr,
+			IPA_CLKON_CFG_OPEN_ACK_MNGR_SHFT,
+			IPA_CLKON_CFG_OPEN_ACK_MNGR_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, clkon_cfg->open_ctx_handler,
+			IPA_CLKON_CFG_OPEN_CTX_HANDLER_SHFT,
+			IPA_CLKON_CFG_OPEN_CTX_HANDLER_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, clkon_cfg->open_rsrc_mngr,
+			IPA_CLKON_CFG_OPEN_RSRC_MNGR_SHFT,
+			IPA_CLKON_CFG_OPEN_RSRC_MNGR_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, clkon_cfg->open_dps_tx_cmdqs,
+			IPA_CLKON_CFG_OPEN_DPS_TX_CMDQS_SHFT,
+			IPA_CLKON_CFG_OPEN_DPS_TX_CMDQS_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, clkon_cfg->open_hps_dps_cmdqs,
+			IPA_CLKON_CFG_OPEN_HPS_DPS_CMDQS_SHFT,
+			IPA_CLKON_CFG_OPEN_HPS_DPS_CMDQS_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, clkon_cfg->open_rx_hps_cmdqs,
+			IPA_CLKON_CFG_OPEN_RX_HPS_CMDQS_SHFT,
+			IPA_CLKON_CFG_OPEN_RX_HPS_CMDQS_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, clkon_cfg->open_dps,
+			IPA_CLKON_CFG_OPEN_DPS_SHFT,
+			IPA_CLKON_CFG_OPEN_DPS_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, clkon_cfg->open_hps,
+			IPA_CLKON_CFG_OPEN_HPS_SHFT,
+			IPA_CLKON_CFG_OPEN_HPS_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, clkon_cfg->open_ftch_dps,
+			IPA_CLKON_CFG_OPEN_FTCH_DPS_SHFT,
+			IPA_CLKON_CFG_OPEN_FTCH_DPS_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, clkon_cfg->open_ftch_hps,
+			IPA_CLKON_CFG_OPEN_FTCH_HPS_SHFT,
+			IPA_CLKON_CFG_OPEN_FTCH_HPS_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, clkon_cfg->open_ram_arb,
+			IPA_CLKON_CFG_OPEN_RAM_ARB_SHFT,
+			IPA_CLKON_CFG_OPEN_RAM_ARB_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, clkon_cfg->open_misc,
+			IPA_CLKON_CFG_OPEN_MISC_SHFT,
+			IPA_CLKON_CFG_OPEN_MISC_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, clkon_cfg->open_tx_wrapper,
+			IPA_CLKON_CFG_OPEN_TX_WRAPPER_SHFT,
+			IPA_CLKON_CFG_OPEN_TX_WRAPPER_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, clkon_cfg->open_proc,
+			IPA_CLKON_CFG_OPEN_PROC_SHFT,
+			IPA_CLKON_CFG_OPEN_PROC_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, clkon_cfg->open_rx,
+			IPA_CLKON_CFG_OPEN_RX_SHFT,
+			IPA_CLKON_CFG_OPEN_RX_BMSK);
+}
+
+static void ipareg_parse_clkon_cfg(
+	enum ipahal_reg_name reg, void *fields, u32 val)
+{
+	struct ipahal_reg_clkon_cfg *clkon_cfg =
+		(struct ipahal_reg_clkon_cfg *)fields;
+
+	memset(clkon_cfg, 0, sizeof(struct ipahal_reg_clkon_cfg));
+	clkon_cfg->open_global_2x_clk = IPA_GETFIELD_FROM_REG(val,
+			IPA_CLKON_CFG_OPEN_GLOBAL_2X_CLK_SHFT,
+			IPA_CLKON_CFG_OPEN_GLOBAL_2X_CLK_BMSK);
+
+	clkon_cfg->open_global = IPA_GETFIELD_FROM_REG(val,
+			IPA_CLKON_CFG_OPEN_GLOBAL_SHFT,
+			IPA_CLKON_CFG_OPEN_GLOBAL_BMSK);
+
+	clkon_cfg->open_gsi_if = IPA_GETFIELD_FROM_REG(val,
+			IPA_CLKON_CFG_OPEN_GSI_IF_SHFT,
+			IPA_CLKON_CFG_OPEN_GSI_IF_BMSK);
+
+	clkon_cfg->open_weight_arb = IPA_GETFIELD_FROM_REG(val,
+			IPA_CLKON_CFG_OPEN_WEIGHT_ARB_SHFT,
+			IPA_CLKON_CFG_OPEN_WEIGHT_ARB_BMSK);
+
+	clkon_cfg->open_qmb = IPA_GETFIELD_FROM_REG(val,
+			IPA_CLKON_CFG_OPEN_QMB_SHFT,
+			IPA_CLKON_CFG_OPEN_QMB_BMSK);
+
+	clkon_cfg->open_ram_slaveway = IPA_GETFIELD_FROM_REG(val,
+			IPA_CLKON_CFG_OPEN_RAM_SLAVEWAY_SHFT,
+			IPA_CLKON_CFG_OPEN_RAM_SLAVEWAY_BMSK);
+
+	clkon_cfg->open_aggr_wrapper = IPA_GETFIELD_FROM_REG(val,
+			IPA_CLKON_CFG_OPEN_AGGR_WRAPPER_SHFT,
+			IPA_CLKON_CFG_OPEN_AGGR_WRAPPER_BMSK);
+
+	clkon_cfg->open_qsb2axi_cmdq_l = IPA_GETFIELD_FROM_REG(val,
+			IPA_CLKON_CFG_OPEN_QSB2AXI_CMDQ_L_SHFT,
+			IPA_CLKON_CFG_OPEN_QSB2AXI_CMDQ_L_BMSK);
+
+	clkon_cfg->open_fnr = IPA_GETFIELD_FROM_REG(val,
+			IPA_CLKON_CFG_OPEN_FNR_SHFT,
+			IPA_CLKON_CFG_OPEN_FNR_BMSK);
+
+	clkon_cfg->open_tx_1 = IPA_GETFIELD_FROM_REG(val,
+			IPA_CLKON_CFG_OPEN_TX_1_SHFT,
+			IPA_CLKON_CFG_OPEN_TX_1_BMSK);
+
+	clkon_cfg->open_tx_0 = IPA_GETFIELD_FROM_REG(val,
+			IPA_CLKON_CFG_OPEN_TX_0_SHFT,
+			IPA_CLKON_CFG_OPEN_TX_0_BMSK);
+
+	clkon_cfg->open_ntf_tx_cmdqs = IPA_GETFIELD_FROM_REG(val,
+			IPA_CLKON_CFG_OPEN_NTF_TX_CMDQS_SHFT,
+			IPA_CLKON_CFG_OPEN_NTF_TX_CMDQS_BMSK);
+
+	clkon_cfg->open_dcmp = IPA_GETFIELD_FROM_REG(val,
+			IPA_CLKON_CFG_OPEN_DCMP_SHFT,
+			IPA_CLKON_CFG_OPEN_DCMP_BMSK);
+
+	clkon_cfg->open_h_dcph = IPA_GETFIELD_FROM_REG(val,
+			IPA_CLKON_CFG_OPEN_H_DCPH_SHFT,
+			IPA_CLKON_CFG_OPEN_H_DCPH_BMSK);
+
+	clkon_cfg->open_d_dcph = IPA_GETFIELD_FROM_REG(val,
+			IPA_CLKON_CFG_OPEN_D_DCPH_SHFT,
+			IPA_CLKON_CFG_OPEN_D_DCPH_BMSK);
+
+	clkon_cfg->open_ack_mngr = IPA_GETFIELD_FROM_REG(val,
+			IPA_CLKON_CFG_OPEN_ACK_MNGR_SHFT,
+			IPA_CLKON_CFG_OPEN_ACK_MNGR_BMSK);
+
+	clkon_cfg->open_ctx_handler = IPA_GETFIELD_FROM_REG(val,
+			IPA_CLKON_CFG_OPEN_CTX_HANDLER_SHFT,
+			IPA_CLKON_CFG_OPEN_CTX_HANDLER_BMSK);
+
+	clkon_cfg->open_rsrc_mngr = IPA_GETFIELD_FROM_REG(val,
+			IPA_CLKON_CFG_OPEN_RSRC_MNGR_SHFT,
+			IPA_CLKON_CFG_OPEN_RSRC_MNGR_BMSK);
+
+	clkon_cfg->open_dps_tx_cmdqs = IPA_GETFIELD_FROM_REG(val,
+			IPA_CLKON_CFG_OPEN_DPS_TX_CMDQS_SHFT,
+			IPA_CLKON_CFG_OPEN_DPS_TX_CMDQS_BMSK);
+
+	clkon_cfg->open_hps_dps_cmdqs = IPA_GETFIELD_FROM_REG(val,
+			IPA_CLKON_CFG_OPEN_HPS_DPS_CMDQS_SHFT,
+			IPA_CLKON_CFG_OPEN_HPS_DPS_CMDQS_BMSK);
+
+	clkon_cfg->open_rx_hps_cmdqs = IPA_GETFIELD_FROM_REG(val,
+			IPA_CLKON_CFG_OPEN_RX_HPS_CMDQS_SHFT,
+			IPA_CLKON_CFG_OPEN_RX_HPS_CMDQS_BMSK);
+
+	clkon_cfg->open_dps = IPA_GETFIELD_FROM_REG(val,
+			IPA_CLKON_CFG_OPEN_DPS_SHFT,
+			IPA_CLKON_CFG_OPEN_DPS_BMSK);
+
+	clkon_cfg->open_hps = IPA_GETFIELD_FROM_REG(val,
+			IPA_CLKON_CFG_OPEN_HPS_SHFT,
+			IPA_CLKON_CFG_OPEN_HPS_BMSK);
+
+	clkon_cfg->open_ftch_dps = IPA_GETFIELD_FROM_REG(val,
+			IPA_CLKON_CFG_OPEN_FTCH_DPS_SHFT,
+			IPA_CLKON_CFG_OPEN_FTCH_DPS_BMSK);
+
+	clkon_cfg->open_ftch_hps = IPA_GETFIELD_FROM_REG(val,
+			IPA_CLKON_CFG_OPEN_FTCH_HPS_SHFT,
+			IPA_CLKON_CFG_OPEN_FTCH_HPS_BMSK);
+
+	clkon_cfg->open_ram_arb = IPA_GETFIELD_FROM_REG(val,
+			IPA_CLKON_CFG_OPEN_RAM_ARB_SHFT,
+			IPA_CLKON_CFG_OPEN_RAM_ARB_BMSK);
+
+	clkon_cfg->open_misc = IPA_GETFIELD_FROM_REG(val,
+			IPA_CLKON_CFG_OPEN_MISC_SHFT,
+			IPA_CLKON_CFG_OPEN_MISC_BMSK);
+
+	clkon_cfg->open_tx_wrapper = IPA_GETFIELD_FROM_REG(val,
+			IPA_CLKON_CFG_OPEN_TX_WRAPPER_SHFT,
+			IPA_CLKON_CFG_OPEN_TX_WRAPPER_BMSK);
+
+	clkon_cfg->open_proc = IPA_GETFIELD_FROM_REG(val,
+			IPA_CLKON_CFG_OPEN_PROC_SHFT,
+			IPA_CLKON_CFG_OPEN_PROC_BMSK);
+
+	clkon_cfg->open_rx = IPA_GETFIELD_FROM_REG(val,
+			IPA_CLKON_CFG_OPEN_RX_SHFT,
+			IPA_CLKON_CFG_OPEN_RX_BMSK);
+}
+
 static void ipareg_construct_qcncm(
 	enum ipahal_reg_name reg, const void *fields, u32 *val)
 {
@@ -1522,7 +1777,7 @@ static struct ipahal_reg_obj ipahal_reg_objs[IPA_HW_MAX][IPA_REG_MAX] = {
 		ipareg_construct_endp_status_n_v4_0, ipareg_parse_dummy,
 		0x00000840, 0x70},
 	[IPA_HW_v4_0][IPA_CLKON_CFG] = {
-		ipareg_construct_dummy, ipareg_parse_dummy,
+		ipareg_construct_clkon_cfg, ipareg_parse_clkon_cfg,
 		0x00000044, 0},
 	[IPA_HW_v4_0][IPA_ENDP_INIT_CONN_TRACK_n] = {
 		ipareg_construct_endp_init_conn_track_n,
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.h b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.h
index df3c976..98481a1 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -199,6 +199,47 @@ struct ipahal_reg_ep_cfg_status {
 };
 
 /*
+ * struct ipahal_reg_clkon_cfg-  Enables SW bypass clock-gating for the IPA core
+ *
+ * @all: Enables SW bypass clock-gating controls for this sub-module;
+ *	0: CGC is enabled by internal logic, 1: No CGC (clk is always 'ON').
+ *	sub-module affected is based on var name -> ex: open_rx refers
+ *	to IPA_RX sub-module and open_global refers to global IPA 1x clock
+ */
+struct ipahal_reg_clkon_cfg {
+	bool open_global_2x_clk;
+	bool open_global;
+	bool open_gsi_if;
+	bool open_weight_arb;
+	bool open_qmb;
+	bool open_ram_slaveway;
+	bool open_aggr_wrapper;
+	bool open_qsb2axi_cmdq_l;
+	bool open_fnr;
+	bool open_tx_1;
+	bool open_tx_0;
+	bool open_ntf_tx_cmdqs;
+	bool open_dcmp;
+	bool open_h_dcph;
+	bool open_d_dcph;
+	bool open_ack_mngr;
+	bool open_ctx_handler;
+	bool open_rsrc_mngr;
+	bool open_dps_tx_cmdqs;
+	bool open_hps_dps_cmdqs;
+	bool open_rx_hps_cmdqs;
+	bool open_dps;
+	bool open_hps;
+	bool open_ftch_dps;
+	bool open_ftch_hps;
+	bool open_ram_arb;
+	bool open_misc;
+	bool open_tx_wrapper;
+	bool open_proc;
+	bool open_rx;
+};
+
+/*
  * struct ipa_hash_tuple - Hash tuple members for flt and rt
  *  the fields tells if to be masked or not
  * @src_id: pipe number for flt, table index for rt
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg_i.h b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg_i.h
index 664d254..35a36e1 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg_i.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg_i.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -246,6 +246,68 @@ int ipahal_reg_init(enum ipa_hw_type ipa_hw_type);
 #define IPA_ENDP_STATUS_n_STATUS_EN_BMSK 0x1
 #define IPA_ENDP_STATUS_n_STATUS_EN_SHFT 0x0
 
+/* IPA_CLKON_CFG register */
+#define IPA_CLKON_CFG_OPEN_GLOBAL_2X_CLK_BMSK  0x20000000
+#define IPA_CLKON_CFG_OPEN_GLOBAL_2X_CLK_SHFT 29
+#define IPA_CLKON_CFG_OPEN_GLOBAL_BMSK 0x10000000
+#define IPA_CLKON_CFG_OPEN_GLOBAL_SHFT 28
+#define IPA_CLKON_CFG_OPEN_GSI_IF_BMSK 0x8000000
+#define IPA_CLKON_CFG_OPEN_GSI_IF_SHFT 27
+#define IPA_CLKON_CFG_OPEN_WEIGHT_ARB_SHFT 26
+#define IPA_CLKON_CFG_OPEN_WEIGHT_ARB_BMSK 0x4000000
+#define IPA_CLKON_CFG_OPEN_QMB_SHFT 25
+#define IPA_CLKON_CFG_OPEN_QMB_BMSK 0x2000000
+#define IPA_CLKON_CFG_OPEN_RAM_SLAVEWAY_SHFT 24
+#define IPA_CLKON_CFG_OPEN_RAM_SLAVEWAY_BMSK 0x1000000
+#define IPA_CLKON_CFG_OPEN_AGGR_WRAPPER_SHFT 23
+#define IPA_CLKON_CFG_OPEN_AGGR_WRAPPER_BMSK 0x800000
+#define IPA_CLKON_CFG_OPEN_QSB2AXI_CMDQ_L_SHFT 22
+#define IPA_CLKON_CFG_OPEN_QSB2AXI_CMDQ_L_BMSK 0x400000
+#define IPA_CLKON_CFG_OPEN_FNR_SHFT 21
+#define IPA_CLKON_CFG_OPEN_FNR_BMSK 0x200000
+#define IPA_CLKON_CFG_OPEN_TX_1_SHFT 20
+#define IPA_CLKON_CFG_OPEN_TX_1_BMSK 0x100000
+#define IPA_CLKON_CFG_OPEN_TX_0_SHFT 19
+#define IPA_CLKON_CFG_OPEN_TX_0_BMSK 0x80000
+#define IPA_CLKON_CFG_OPEN_NTF_TX_CMDQS_SHFT 18
+#define IPA_CLKON_CFG_OPEN_NTF_TX_CMDQS_BMSK 0x40000
+#define IPA_CLKON_CFG_OPEN_DCMP_SHFT 17
+#define IPA_CLKON_CFG_OPEN_DCMP_BMSK 0x20000
+#define IPA_CLKON_CFG_OPEN_H_DCPH_SHFT 16
+#define IPA_CLKON_CFG_OPEN_H_DCPH_BMSK 0x10000
+#define IPA_CLKON_CFG_OPEN_D_DCPH_SHFT 15
+#define IPA_CLKON_CFG_OPEN_D_DCPH_BMSK 0x8000
+#define IPA_CLKON_CFG_OPEN_ACK_MNGR_SHFT 14
+#define IPA_CLKON_CFG_OPEN_ACK_MNGR_BMSK 0x4000
+#define IPA_CLKON_CFG_OPEN_CTX_HANDLER_SHFT 13
+#define IPA_CLKON_CFG_OPEN_CTX_HANDLER_BMSK 0x2000
+#define IPA_CLKON_CFG_OPEN_RSRC_MNGR_SHFT 12
+#define IPA_CLKON_CFG_OPEN_RSRC_MNGR_BMSK 0x1000
+#define IPA_CLKON_CFG_OPEN_DPS_TX_CMDQS_SHFT 11
+#define IPA_CLKON_CFG_OPEN_DPS_TX_CMDQS_BMSK 0x800
+#define IPA_CLKON_CFG_OPEN_HPS_DPS_CMDQS_SHFT 10
+#define IPA_CLKON_CFG_OPEN_HPS_DPS_CMDQS_BMSK 0x400
+#define IPA_CLKON_CFG_OPEN_RX_HPS_CMDQS_SHFT 9
+#define IPA_CLKON_CFG_OPEN_RX_HPS_CMDQS_BMSK 0x200
+#define IPA_CLKON_CFG_OPEN_DPS_SHFT 8
+#define IPA_CLKON_CFG_OPEN_DPS_BMSK 0x100
+#define IPA_CLKON_CFG_OPEN_HPS_SHFT 7
+#define IPA_CLKON_CFG_OPEN_HPS_BMSK 0x80
+#define IPA_CLKON_CFG_OPEN_FTCH_DPS_SHFT 6
+#define IPA_CLKON_CFG_OPEN_FTCH_DPS_BMSK 0x40
+#define IPA_CLKON_CFG_OPEN_FTCH_HPS_SHFT 5
+#define IPA_CLKON_CFG_OPEN_FTCH_HPS_BMSK 0x20
+#define IPA_CLKON_CFG_OPEN_RAM_ARB_SHFT 4
+#define IPA_CLKON_CFG_OPEN_RAM_ARB_BMSK 0x10
+#define IPA_CLKON_CFG_OPEN_MISC_SHFT 3
+#define IPA_CLKON_CFG_OPEN_MISC_BMSK 0x8
+#define IPA_CLKON_CFG_OPEN_TX_WRAPPER_SHFT 2
+#define IPA_CLKON_CFG_OPEN_TX_WRAPPER_BMSK 0x4
+#define IPA_CLKON_CFG_OPEN_PROC_SHFT 1
+#define IPA_CLKON_CFG_OPEN_PROC_BMSK 0x2
+#define IPA_CLKON_CFG_OPEN_RX_BMSK 0x1
+#define IPA_CLKON_CFG_OPEN_RX_SHFT 0
+
 /* IPA_ENDP_FILTER_ROUTER_HSH_CFG_n register */
 #define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_ID_SHFT 0
 #define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_ID_BMSK 0x1
diff --git a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
index 98a8594..5b0834a 100644
--- a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -3170,7 +3170,7 @@ static int rmnet_ipa3_query_tethering_stats_modem(
 	if (reset) {
 		req->reset_stats_valid = true;
 		req->reset_stats = true;
-		IPAWANERR("reset the pipe stats\n");
+		IPAWANDBG("reset the pipe stats\n");
 	} else {
 		/* print tethered-client enum */
 		IPAWANDBG("Tethered-client enum(%d)\n", data->ipa_client);
diff --git a/drivers/platform/msm/ipa/test/ipa_test_mhi.c b/drivers/platform/msm/ipa/test/ipa_test_mhi.c
index 195799e..212557c 100644
--- a/drivers/platform/msm/ipa/test/ipa_test_mhi.c
+++ b/drivers/platform/msm/ipa/test/ipa_test_mhi.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -1304,6 +1304,7 @@ static int ipa_mhi_test_q_transfer_re(struct ipa_mem_buffer *mmio,
 	u32 next_wp_ofst;
 	int i;
 	u32 num_of_ed_to_queue;
+	u32 avail_ev;
 
 	IPA_UT_LOG("Entry\n");
 
@@ -1341,6 +1342,8 @@ static int ipa_mhi_test_q_transfer_re(struct ipa_mem_buffer *mmio,
 
 	wp_ofst = (u32)(p_events[event_ring_index].wp -
 		p_events[event_ring_index].rbase);
+	rp_ofst = (u32)(p_events[event_ring_index].rp -
+		p_events[event_ring_index].rbase);
 
 	if (p_events[event_ring_index].rlen & 0xFFFFFFFF00000000) {
 		IPA_UT_LOG("invalid ev rlen %llu\n",
@@ -1348,23 +1351,48 @@ static int ipa_mhi_test_q_transfer_re(struct ipa_mem_buffer *mmio,
 		return -EFAULT;
 	}
 
-	next_wp_ofst = (wp_ofst + num_of_ed_to_queue *
-		sizeof(struct ipa_mhi_event_ring_element)) %
-		(u32)p_events[event_ring_index].rlen;
+	if (wp_ofst > rp_ofst) {
+		avail_ev = (wp_ofst - rp_ofst) /
+			sizeof(struct ipa_mhi_event_ring_element);
+	} else {
+		avail_ev = (u32)p_events[event_ring_index].rlen -
+			(rp_ofst - wp_ofst);
+		avail_ev /= sizeof(struct ipa_mhi_event_ring_element);
+	}
 
-	/* set next WP */
-	p_events[event_ring_index].wp =
-		(u32)p_events[event_ring_index].rbase + next_wp_ofst;
+	IPA_UT_LOG("wp_ofst=0x%x rp_ofst=0x%x rlen=%llu avail_ev=%u\n",
+		wp_ofst, rp_ofst, p_events[event_ring_index].rlen, avail_ev);
 
-	/* write value to event ring doorbell */
-	IPA_UT_LOG("DB to event 0x%llx: base %pa ofst 0x%x\n",
-		p_events[event_ring_index].wp,
-		&(gsi_ctx->per.phys_addr), GSI_EE_n_EV_CH_k_DOORBELL_0_OFFS(
+	if (num_of_ed_to_queue > ((u32)p_events[event_ring_index].rlen /
+		sizeof(struct ipa_mhi_event_ring_element))) {
+		IPA_UT_LOG("event ring too small for %u credits\n",
+			num_of_ed_to_queue);
+		return -EFAULT;
+	}
+
+	if (num_of_ed_to_queue > avail_ev) {
+		IPA_UT_LOG("Need to add event credits (needed=%u)\n",
+			num_of_ed_to_queue - avail_ev);
+
+		next_wp_ofst = (wp_ofst + (num_of_ed_to_queue - avail_ev) *
+			sizeof(struct ipa_mhi_event_ring_element)) %
+			(u32)p_events[event_ring_index].rlen;
+
+		/* set next WP */
+		p_events[event_ring_index].wp =
+			(u32)p_events[event_ring_index].rbase + next_wp_ofst;
+
+		/* write value to event ring doorbell */
+		IPA_UT_LOG("DB to event 0x%llx: base %pa ofst 0x%x\n",
+			p_events[event_ring_index].wp,
+			&(gsi_ctx->per.phys_addr),
+			GSI_EE_n_EV_CH_k_DOORBELL_0_OFFS(
 			event_ring_index + ipa3_ctx->mhi_evid_limits[0], 0));
-	iowrite32(p_events[event_ring_index].wp,
-		test_mhi_ctx->gsi_mmio +
-		GSI_EE_n_EV_CH_k_DOORBELL_0_OFFS(
+		iowrite32(p_events[event_ring_index].wp,
+			test_mhi_ctx->gsi_mmio +
+			GSI_EE_n_EV_CH_k_DOORBELL_0_OFFS(
 			event_ring_index + ipa3_ctx->mhi_evid_limits[0], 0));
+	}
 
 	for (i = 0; i < buf_array_size; i++) {
 		/* calculate virtual pointer for current WP and RP */
diff --git a/drivers/platform/x86/asus-wireless.c b/drivers/platform/x86/asus-wireless.c
index 9f31bc1..1871602 100644
--- a/drivers/platform/x86/asus-wireless.c
+++ b/drivers/platform/x86/asus-wireless.c
@@ -97,6 +97,7 @@ static void asus_wireless_notify(struct acpi_device *adev, u32 event)
 		return;
 	}
 	input_report_key(data->idev, KEY_RFKILL, 1);
+	input_sync(data->idev);
 	input_report_key(data->idev, KEY_RFKILL, 0);
 	input_sync(data->idev);
 }
diff --git a/drivers/platform/x86/hp_accel.c b/drivers/platform/x86/hp_accel.c
index 0935668..abd9d83 100644
--- a/drivers/platform/x86/hp_accel.c
+++ b/drivers/platform/x86/hp_accel.c
@@ -240,6 +240,7 @@ static const struct dmi_system_id lis3lv02d_dmi_ids[] = {
 	AXIS_DMI_MATCH("HDX18", "HP HDX 18", x_inverted),
 	AXIS_DMI_MATCH("HPB432x", "HP ProBook 432", xy_rotated_left),
 	AXIS_DMI_MATCH("HPB440G3", "HP ProBook 440 G3", x_inverted_usd),
+	AXIS_DMI_MATCH("HPB440G4", "HP ProBook 440 G4", x_inverted),
 	AXIS_DMI_MATCH("HPB442x", "HP ProBook 442", xy_rotated_left),
 	AXIS_DMI_MATCH("HPB452x", "HP ProBook 452", y_inverted),
 	AXIS_DMI_MATCH("HPB522x", "HP ProBook 522", xy_swap),
diff --git a/drivers/platform/x86/intel_punit_ipc.c b/drivers/platform/x86/intel_punit_ipc.c
index a47a41f..b5b8901 100644
--- a/drivers/platform/x86/intel_punit_ipc.c
+++ b/drivers/platform/x86/intel_punit_ipc.c
@@ -252,28 +252,28 @@ static int intel_punit_get_bars(struct platform_device *pdev)
 	 * - GTDRIVER_IPC BASE_IFACE
 	 */
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
-	if (res) {
+	if (res && resource_size(res) > 1) {
 		addr = devm_ioremap_resource(&pdev->dev, res);
 		if (!IS_ERR(addr))
 			punit_ipcdev->base[ISPDRIVER_IPC][BASE_DATA] = addr;
 	}
 
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 3);
-	if (res) {
+	if (res && resource_size(res) > 1) {
 		addr = devm_ioremap_resource(&pdev->dev, res);
 		if (!IS_ERR(addr))
 			punit_ipcdev->base[ISPDRIVER_IPC][BASE_IFACE] = addr;
 	}
 
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 4);
-	if (res) {
+	if (res && resource_size(res) > 1) {
 		addr = devm_ioremap_resource(&pdev->dev, res);
 		if (!IS_ERR(addr))
 			punit_ipcdev->base[GTDRIVER_IPC][BASE_DATA] = addr;
 	}
 
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 5);
-	if (res) {
+	if (res && resource_size(res) > 1) {
 		addr = devm_ioremap_resource(&pdev->dev, res);
 		if (!IS_ERR(addr))
 			punit_ipcdev->base[GTDRIVER_IPC][BASE_IFACE] = addr;
diff --git a/drivers/power/supply/qcom/qpnp-fg.c b/drivers/power/supply/qcom/qpnp-fg.c
index a12b0ad..015da41 100644
--- a/drivers/power/supply/qcom/qpnp-fg.c
+++ b/drivers/power/supply/qcom/qpnp-fg.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -2548,13 +2548,13 @@ static int estimate_battery_age(struct fg_chip *chip, int *actual_capacity)
 
 	/* calculate soc_cutoff_new */
 	val = (1000000LL + temp_rs_to_rslow) * battery_esr;
-	do_div(val, 1000000);
+	val = div64_s64(val, 1000000);
 	ocv_cutoff_new = div64_s64(chip->evaluation_current * val, 1000)
 		+ chip->cutoff_voltage;
 
 	/* calculate soc_cutoff_aged */
 	val = (1000000LL + temp_rs_to_rslow) * esr_actual;
-	do_div(val, 1000000);
+	val = div64_s64(val, 1000000);
 	ocv_cutoff_aged = div64_s64(chip->evaluation_current * val, 1000)
 		+ chip->cutoff_voltage;
 
@@ -3068,11 +3068,11 @@ static void fg_cap_learning_post_process(struct fg_chip *chip)
 
 	max_inc_val = chip->learning_data.learned_cc_uah
 			* (1000 + chip->learning_data.max_increment);
-	do_div(max_inc_val, 1000);
+	max_inc_val = div_s64(max_inc_val, 1000);
 
 	min_dec_val = chip->learning_data.learned_cc_uah
 			* (1000 - chip->learning_data.max_decrement);
-	do_div(min_dec_val, 1000);
+	min_dec_val = div_s64(min_dec_val, 1000);
 
 	old_cap = chip->learning_data.learned_cc_uah;
 	if (chip->learning_data.cc_uah > max_inc_val)
diff --git a/drivers/power/supply/qcom/qpnp-smb2.c b/drivers/power/supply/qcom/qpnp-smb2.c
index d6ff6fc..74e80cd 100644
--- a/drivers/power/supply/qcom/qpnp-smb2.c
+++ b/drivers/power/supply/qcom/qpnp-smb2.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2018 The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -1573,20 +1573,12 @@ static int smb2_init_hw(struct smb2 *chip)
 		BATT_PROFILE_VOTER, true, chg->batt_profile_fv_uv);
 	vote(chg->dc_icl_votable,
 		DEFAULT_VOTER, true, chip->dt.dc_icl_ua);
-	vote(chg->hvdcp_disable_votable_indirect, PD_INACTIVE_VOTER,
-			true, 0);
-	vote(chg->hvdcp_disable_votable_indirect, VBUS_CC_SHORT_VOTER,
-			true, 0);
 	vote(chg->hvdcp_disable_votable_indirect, DEFAULT_VOTER,
 		chip->dt.hvdcp_disable, 0);
 	vote(chg->pd_disallowed_votable_indirect, CC_DETACHED_VOTER,
 			true, 0);
 	vote(chg->pd_disallowed_votable_indirect, HVDCP_TIMEOUT_VOTER,
 			true, 0);
-	vote(chg->pd_disallowed_votable_indirect, MICRO_USB_VOTER,
-		(chg->connector_type == POWER_SUPPLY_CONNECTOR_MICRO_USB), 0);
-	vote(chg->hvdcp_enable_votable, MICRO_USB_VOTER,
-		(chg->connector_type == POWER_SUPPLY_CONNECTOR_MICRO_USB), 0);
 
 	/*
 	 * AICL configuration:
@@ -1636,6 +1628,16 @@ static int smb2_init_hw(struct smb2 *chip)
 		return rc;
 	}
 
+	/* Connector types based votes */
+	vote(chg->hvdcp_disable_votable_indirect, PD_INACTIVE_VOTER,
+		(chg->connector_type == POWER_SUPPLY_CONNECTOR_TYPEC), 0);
+	vote(chg->hvdcp_disable_votable_indirect, VBUS_CC_SHORT_VOTER,
+		(chg->connector_type == POWER_SUPPLY_CONNECTOR_TYPEC), 0);
+	vote(chg->pd_disallowed_votable_indirect, MICRO_USB_VOTER,
+		(chg->connector_type == POWER_SUPPLY_CONNECTOR_MICRO_USB), 0);
+	vote(chg->hvdcp_enable_votable, MICRO_USB_VOTER,
+		(chg->connector_type == POWER_SUPPLY_CONNECTOR_MICRO_USB), 0);
+
 	/* configure VCONN for software control */
 	rc = smblib_masked_write(chg, TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG,
 				 VCONN_EN_SRC_BIT | VCONN_EN_VALUE_BIT,
diff --git a/drivers/power/supply/qcom/smb-lib.c b/drivers/power/supply/qcom/smb-lib.c
index 4656e35..496a276 100644
--- a/drivers/power/supply/qcom/smb-lib.c
+++ b/drivers/power/supply/qcom/smb-lib.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2018 The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -2079,9 +2079,6 @@ int smblib_set_prop_system_temp_level(struct smb_charger *chg,
 		return -EINVAL;
 
 	chg->system_temp_level = val->intval;
-	/* disable parallel charge in case of system temp level */
-	vote(chg->pl_disable_votable, THERMAL_DAEMON_VOTER,
-			chg->system_temp_level ? true : false, 0);
 
 	if (chg->system_temp_level == chg->thermal_levels)
 		return vote(chg->chg_disable_votable,
diff --git a/drivers/power/supply/qcom/smb1355-charger.c b/drivers/power/supply/qcom/smb1355-charger.c
index 833a8da..ebaaf5c 100644
--- a/drivers/power/supply/qcom/smb1355-charger.c
+++ b/drivers/power/supply/qcom/smb1355-charger.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2018 The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -35,6 +35,7 @@
 #define ANA2_BASE	0x1100
 #define BATIF_BASE	0x1200
 #define USBIN_BASE	0x1300
+#define ANA1_BASE	0x1400
 #define MISC_BASE	0x1600
 
 #define BATTERY_STATUS_2_REG			(CHGR_BASE + 0x0B)
@@ -82,6 +83,9 @@
 #define EXT_BIAS_PIN_BIT			BIT(2)
 #define DIE_TEMP_COMP_HYST_BIT			BIT(1)
 
+#define ANA1_ENG_SREFGEN_CFG2_REG		(ANA1_BASE + 0xC1)
+#define VALLEY_COMPARATOR_EN_BIT		BIT(0)
+
 #define TEMP_COMP_STATUS_REG			(MISC_BASE + 0x07)
 #define SKIN_TEMP_RST_HOT_BIT			BIT(6)
 #define SKIN_TEMP_UB_HOT_BIT			BIT(5)
@@ -94,6 +98,9 @@
 #define MISC_RT_STS_REG				(MISC_BASE + 0x10)
 #define HARD_ILIMIT_RT_STS_BIT			BIT(5)
 
+#define BANDGAP_ENABLE_REG			(MISC_BASE + 0x42)
+#define BANDGAP_ENABLE_CMD_BIT			BIT(0)
+
 #define BARK_BITE_WDOG_PET_REG			(MISC_BASE + 0x43)
 #define BARK_BITE_WDOG_PET_BIT			BIT(0)
 
@@ -108,6 +115,9 @@
 #define MISC_CUST_SDCDC_CLK_CFG_REG		(MISC_BASE + 0xA0)
 #define SWITCHER_CLK_FREQ_MASK			GENMASK(3, 0)
 
+#define MISC_CUST_SDCDC_ILIMIT_CFG_REG		(MISC_BASE + 0xA1)
+#define LS_VALLEY_THRESH_PCT_BIT		BIT(3)
+
 #define SNARL_BARK_BITE_WD_CFG_REG		(MISC_BASE + 0x53)
 #define BITE_WDOG_DISABLE_CHARGING_CFG_BIT	BIT(7)
 #define SNARL_WDOG_TIMEOUT_MASK			GENMASK(6, 4)
@@ -150,6 +160,8 @@
 	((mode == POWER_SUPPLY_PL_USBIN_USBIN) \
 	 || (mode == POWER_SUPPLY_PL_USBIN_USBIN_EXT))
 
+#define PARALLEL_ENABLE_VOTER			"PARALLEL_ENABLE_VOTER"
+
 struct smb_chg_param {
 	const char	*name;
 	u16		reg;
@@ -224,6 +236,8 @@ struct smb1355 {
 	bool			exit_die_temp;
 	struct delayed_work	die_temp_work;
 	bool			disabled;
+
+	struct votable		*irq_disable_votable;
 };
 
 static bool is_secure(struct smb1355 *chip, int addr)
@@ -449,7 +463,7 @@ static int smb1355_parse_dt(struct smb1355 *chip)
 	if (of_property_read_bool(node, "qcom,stacked-batfet"))
 		chip->dt.pl_batfet_mode = POWER_SUPPLY_PL_STACKED_BATFET;
 
-	return rc;
+	return 0;
 }
 
 /*****************************
@@ -662,6 +676,18 @@ static int smb1355_set_parallel_charging(struct smb1355 *chip, bool disable)
 		schedule_delayed_work(&chip->die_temp_work, 0);
 	}
 
+	if (chip->irq_disable_votable)
+		vote(chip->irq_disable_votable, PARALLEL_ENABLE_VOTER,
+				disable, 0);
+
+	rc = smb1355_masked_write(chip, BANDGAP_ENABLE_REG,
+				BANDGAP_ENABLE_CMD_BIT,
+				disable ? 0 : BANDGAP_ENABLE_CMD_BIT);
+	if (rc < 0) {
+		pr_err("Couldn't configure bandgap enable rc=%d\n", rc);
+		return rc;
+	}
+
 	chip->disabled = disable;
 
 	return 0;
@@ -947,6 +973,22 @@ static int smb1355_init_hw(struct smb1355 *chip)
 		return rc;
 	}
 
+	/* Enable valley current comparator all the time */
+	rc = smb1355_masked_write(chip, ANA1_ENG_SREFGEN_CFG2_REG,
+		VALLEY_COMPARATOR_EN_BIT, VALLEY_COMPARATOR_EN_BIT);
+	if (rc < 0) {
+		pr_err("Couldn't enable valley current comparator rc=%d\n", rc);
+		return rc;
+	}
+
+	/* Set LS_VALLEY threshold to 85% */
+	rc = smb1355_masked_write(chip, MISC_CUST_SDCDC_ILIMIT_CFG_REG,
+		LS_VALLEY_THRESH_PCT_BIT, LS_VALLEY_THRESH_PCT_BIT);
+	if (rc < 0) {
+		pr_err("Couldn't set LS valley threshold to 85pc rc=%d\n", rc);
+		return rc;
+	}
+
 	rc = smb1355_tskin_sensor_config(chip);
 	if (rc < 0) {
 		pr_err("Couldn't configure tskin regs rc=%d\n", rc);
@@ -1084,6 +1126,7 @@ static int smb1355_request_interrupt(struct smb1355 *chip,
 		return rc;
 	}
 
+	smb1355_irqs[irq_index].irq = irq;
 	if (smb1355_irqs[irq_index].wake)
 		enable_irq_wake(irq);
 
@@ -1112,6 +1155,23 @@ static int smb1355_request_interrupts(struct smb1355 *chip)
 
 	return rc;
 }
+static int smb1355_irq_disable_callback(struct votable *votable, void *data,
+			int disable, const char *client)
+
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(smb1355_irqs); i++) {
+		if (smb1355_irqs[i].irq) {
+			if (disable)
+				disable_irq(smb1355_irqs[i].irq);
+			else
+				enable_irq(smb1355_irqs[i].irq);
+		}
+	}
+
+	return 0;
+}
 
 /*********
  * PROBE *
@@ -1187,6 +1247,15 @@ static int smb1355_probe(struct platform_device *pdev)
 		goto cleanup;
 	}
 
+	chip->irq_disable_votable = create_votable("SMB1355_IRQ_DISABLE",
+			VOTE_SET_ANY, smb1355_irq_disable_callback, chip);
+	if (IS_ERR(chip->irq_disable_votable)) {
+		rc = PTR_ERR(chip->irq_disable_votable);
+		goto cleanup;
+	}
+	/* keep IRQ's disabled until parallel is enabled */
+	vote(chip->irq_disable_votable, PARALLEL_ENABLE_VOTER, true, 0);
+
 	pr_info("%s probed successfully pl_mode=%s batfet_mode=%s\n",
 		chip->name,
 		IS_USBIN(chip->dt.pl_mode) ? "USBIN-USBIN" : "USBMID-USBMID",
diff --git a/drivers/rapidio/devices/rio_mport_cdev.c b/drivers/rapidio/devices/rio_mport_cdev.c
index 9013a58..f32fc70 100644
--- a/drivers/rapidio/devices/rio_mport_cdev.c
+++ b/drivers/rapidio/devices/rio_mport_cdev.c
@@ -964,7 +964,8 @@ rio_dma_transfer(struct file *filp, u32 transfer_mode,
 			   req->sgt.sgl, req->sgt.nents, dir);
 	if (nents == -EFAULT) {
 		rmcd_error("Failed to map SG list");
-		return -EFAULT;
+		ret = -EFAULT;
+		goto err_pg;
 	}
 
 	ret = do_dma_request(req, xfer, sync, nents);
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index ee1b322..1bd67e6 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -888,6 +888,16 @@
 	  This driver provides support for the voltage regulators on the
 	  WM8994 CODEC.
 
+config REGULATOR_CPR
+	bool "RBCPR regulator driver for APC"
+	depends on OF
+	help
+	  Compile in RBCPR (RapidBridge Core Power Reduction) driver to support
+	  corner vote for APC power rail. The driver takes PTE process voltage
+	  suggestions in efuse as initial settings. It converts corner vote
+	  to voltage value before writing to a voltage regulator API, such as
+	  that provided by spm-regulator driver.
+
 config REGULATOR_CPR3
 	bool "CPR3 regulator core support"
 	help
diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile
index b2bfba8..c75e399 100644
--- a/drivers/regulator/Makefile
+++ b/drivers/regulator/Makefile
@@ -116,6 +116,7 @@
 obj-$(CONFIG_REGULATOR_RPM_SMD) += rpm-smd-regulator.o
 obj-$(CONFIG_REGULATOR_SPM) += spm-regulator.o
 
+obj-$(CONFIG_REGULATOR_CPR) += cpr-regulator.o
 obj-$(CONFIG_REGULATOR_CPR3) += cpr3-regulator.o cpr3-util.o
 obj-$(CONFIG_REGULATOR_CPR3_HMSS) += cpr3-hmss-regulator.o
 obj-$(CONFIG_REGULATOR_CPR3_MMSS) += cpr3-mmss-regulator.o
diff --git a/drivers/regulator/cpr-regulator.c b/drivers/regulator/cpr-regulator.c
new file mode 100644
index 0000000..9c47e82
--- /dev/null
+++ b/drivers/regulator/cpr-regulator.c
@@ -0,0 +1,6408 @@
+/*
+ * Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/module.h>
+#include <linux/cpu.h>
+#include <linux/cpu_pm.h>
+#include <linux/cpumask.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/bitops.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_opp.h>
+#include <linux/interrupt.h>
+#include <linux/debugfs.h>
+#include <linux/sort.h>
+#include <linux/uaccess.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/of_regulator.h>
+#include <linux/regulator/cpr-regulator.h>
+#include <linux/msm_thermal.h>
+#include <linux/msm_tsens.h>
+#include <soc/qcom/scm.h>
+
+/* Register Offsets for RB-CPR and Bit Definitions */
+
+/* RBCPR Version Register */
+#define REG_RBCPR_VERSION		0
+#define RBCPR_VER_2			0x02
+
+/* RBCPR Gate Count and Target Registers */
+#define REG_RBCPR_GCNT_TARGET(n)	(0x60 + 4 * n)
+
+#define RBCPR_GCNT_TARGET_GCNT_BITS	10
+#define RBCPR_GCNT_TARGET_GCNT_SHIFT	12
+#define RBCPR_GCNT_TARGET_GCNT_MASK	((1<<RBCPR_GCNT_TARGET_GCNT_BITS)-1)
+
+/* RBCPR Sensor Mask and Bypass Registers */
+#define REG_RBCPR_SENSOR_MASK0		0x20
+#define RBCPR_SENSOR_MASK0_SENSOR(n)	(~BIT(n))
+#define REG_RBCPR_SENSOR_BYPASS0	0x30
+
+/* RBCPR Timer Control */
+#define REG_RBCPR_TIMER_INTERVAL	0x44
+#define REG_RBIF_TIMER_ADJUST		0x4C
+
+#define RBIF_TIMER_ADJ_CONS_UP_BITS	4
+#define RBIF_TIMER_ADJ_CONS_UP_MASK	((1<<RBIF_TIMER_ADJ_CONS_UP_BITS)-1)
+#define RBIF_TIMER_ADJ_CONS_DOWN_BITS	4
+#define RBIF_TIMER_ADJ_CONS_DOWN_MASK	((1<<RBIF_TIMER_ADJ_CONS_DOWN_BITS)-1)
+#define RBIF_TIMER_ADJ_CONS_DOWN_SHIFT	4
+#define RBIF_TIMER_ADJ_CLAMP_INT_BITS	8
+#define RBIF_TIMER_ADJ_CLAMP_INT_MASK	((1<<RBIF_TIMER_ADJ_CLAMP_INT_BITS)-1)
+#define RBIF_TIMER_ADJ_CLAMP_INT_SHIFT	8
+
+/* RBCPR Config Register */
+#define REG_RBIF_LIMIT			0x48
+#define REG_RBCPR_STEP_QUOT		0x80
+#define REG_RBIF_SW_VLEVEL		0x94
+
+#define RBIF_LIMIT_CEILING_BITS		6
+#define RBIF_LIMIT_CEILING_MASK		((1<<RBIF_LIMIT_CEILING_BITS)-1)
+#define RBIF_LIMIT_CEILING_SHIFT	6
+#define RBIF_LIMIT_FLOOR_BITS		6
+#define RBIF_LIMIT_FLOOR_MASK		((1<<RBIF_LIMIT_FLOOR_BITS)-1)
+
+#define RBIF_LIMIT_CEILING_DEFAULT	RBIF_LIMIT_CEILING_MASK
+#define RBIF_LIMIT_FLOOR_DEFAULT	0
+#define RBIF_SW_VLEVEL_DEFAULT		0x20
+
+#define RBCPR_STEP_QUOT_STEPQUOT_BITS	8
+#define RBCPR_STEP_QUOT_STEPQUOT_MASK	((1<<RBCPR_STEP_QUOT_STEPQUOT_BITS)-1)
+#define RBCPR_STEP_QUOT_IDLE_CLK_BITS	4
+#define RBCPR_STEP_QUOT_IDLE_CLK_MASK	((1<<RBCPR_STEP_QUOT_IDLE_CLK_BITS)-1)
+#define RBCPR_STEP_QUOT_IDLE_CLK_SHIFT	8
+
+/* RBCPR Control Register */
+#define REG_RBCPR_CTL			0x90
+
+#define RBCPR_CTL_LOOP_EN			BIT(0)
+#define RBCPR_CTL_TIMER_EN			BIT(3)
+#define RBCPR_CTL_SW_AUTO_CONT_ACK_EN		BIT(5)
+#define RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN	BIT(6)
+#define RBCPR_CTL_COUNT_MODE			BIT(10)
+#define RBCPR_CTL_UP_THRESHOLD_BITS	4
+#define RBCPR_CTL_UP_THRESHOLD_MASK	((1<<RBCPR_CTL_UP_THRESHOLD_BITS)-1)
+#define RBCPR_CTL_UP_THRESHOLD_SHIFT	24
+#define RBCPR_CTL_DN_THRESHOLD_BITS	4
+#define RBCPR_CTL_DN_THRESHOLD_MASK	((1<<RBCPR_CTL_DN_THRESHOLD_BITS)-1)
+#define RBCPR_CTL_DN_THRESHOLD_SHIFT	28
+
+/* RBCPR Ack/Nack Response */
+#define REG_RBIF_CONT_ACK_CMD		0x98
+#define REG_RBIF_CONT_NACK_CMD		0x9C
+
+/* RBCPR Result status Registers */
+#define REG_RBCPR_RESULT_0		0xA0
+#define REG_RBCPR_RESULT_1		0xA4
+
+#define RBCPR_RESULT_1_SEL_FAST_BITS	3
+#define RBCPR_RESULT_1_SEL_FAST(val)	(val & \
+					((1<<RBCPR_RESULT_1_SEL_FAST_BITS) - 1))
+
+#define RBCPR_RESULT0_BUSY_SHIFT	19
+#define RBCPR_RESULT0_BUSY_MASK		BIT(RBCPR_RESULT0_BUSY_SHIFT)
+#define RBCPR_RESULT0_ERROR_LT0_SHIFT	18
+#define RBCPR_RESULT0_ERROR_SHIFT	6
+#define RBCPR_RESULT0_ERROR_BITS	12
+#define RBCPR_RESULT0_ERROR_MASK	((1<<RBCPR_RESULT0_ERROR_BITS)-1)
+#define RBCPR_RESULT0_ERROR_STEPS_SHIFT	2
+#define RBCPR_RESULT0_ERROR_STEPS_BITS	4
+#define RBCPR_RESULT0_ERROR_STEPS_MASK	((1<<RBCPR_RESULT0_ERROR_STEPS_BITS)-1)
+#define RBCPR_RESULT0_STEP_UP_SHIFT	1
+
+/* RBCPR Interrupt Control Register */
+#define REG_RBIF_IRQ_EN(n)		(0x100 + 4 * n)
+#define REG_RBIF_IRQ_CLEAR		0x110
+#define REG_RBIF_IRQ_STATUS		0x114
+
+#define CPR_INT_DONE		BIT(0)
+#define CPR_INT_MIN		BIT(1)
+#define CPR_INT_DOWN		BIT(2)
+#define CPR_INT_MID		BIT(3)
+#define CPR_INT_UP		BIT(4)
+#define CPR_INT_MAX		BIT(5)
+#define CPR_INT_CLAMP		BIT(6)
+#define CPR_INT_ALL	(CPR_INT_DONE | CPR_INT_MIN | CPR_INT_DOWN | \
+			CPR_INT_MID | CPR_INT_UP | CPR_INT_MAX | CPR_INT_CLAMP)
+#define CPR_INT_DEFAULT	(CPR_INT_UP | CPR_INT_DOWN)
+
+#define CPR_NUM_RING_OSC	8
+
+/* RBCPR Debug Resgister */
+#define REG_RBCPR_DEBUG1		0x120
+#define RBCPR_DEBUG1_QUOT_FAST_BITS	12
+#define RBCPR_DEBUG1_QUOT_SLOW_BITS	12
+#define RBCPR_DEBUG1_QUOT_SLOW_SHIFT	12
+
+#define RBCPR_DEBUG1_QUOT_FAST(val)	(val & \
+					((1<<RBCPR_DEBUG1_QUOT_FAST_BITS)-1))
+
+#define RBCPR_DEBUG1_QUOT_SLOW(val)	((val>>RBCPR_DEBUG1_QUOT_SLOW_SHIFT) & \
+					((1<<RBCPR_DEBUG1_QUOT_SLOW_BITS)-1))
+
+/* RBCPR Aging Resgister */
+#define REG_RBCPR_HTOL_AGE		0x160
+#define RBCPR_HTOL_AGE_PAGE		BIT(1)
+#define RBCPR_AGE_DATA_STATUS		BIT(2)
+
+/* RBCPR Clock Control Register */
+#define RBCPR_CLK_SEL_MASK	BIT(0)
+#define RBCPR_CLK_SEL_19P2_MHZ	0
+#define RBCPR_CLK_SEL_AHB_CLK	BIT(0)
+
+/* CPR eFuse parameters */
+#define CPR_FUSE_TARGET_QUOT_BITS	12
+#define CPR_FUSE_TARGET_QUOT_BITS_MASK	((1<<CPR_FUSE_TARGET_QUOT_BITS)-1)
+#define CPR_FUSE_RO_SEL_BITS		3
+#define CPR_FUSE_RO_SEL_BITS_MASK	((1<<CPR_FUSE_RO_SEL_BITS)-1)
+
+#define CPR_FUSE_MIN_QUOT_DIFF		50
+
+#define BYTES_PER_FUSE_ROW		8
+
+#define SPEED_BIN_NONE			UINT_MAX
+
+#define FUSE_REVISION_UNKNOWN		(-1)
+#define FUSE_MAP_NO_MATCH		(-1)
+#define FUSE_PARAM_MATCH_ANY		0xFFFFFFFF
+
+#define FLAGS_IGNORE_1ST_IRQ_STATUS	BIT(0)
+#define FLAGS_SET_MIN_VOLTAGE		BIT(1)
+#define FLAGS_UPLIFT_QUOT_VOLT		BIT(2)
+
+/*
+ * The number of individual aging measurements to perform which are then
+ * averaged together in order to determine the final aging adjustment value.
+ */
+#define CPR_AGING_MEASUREMENT_ITERATIONS	16
+
+/*
+ * Aging measurements for the aged and unaged ring oscillators take place a few
+ * microseconds apart.  If the vdd-supply voltage fluctuates between the two
+ * measurements, then the difference between them will be incorrect.  The
+ * difference could end up too high or too low.  This constant defines the
+ * number of lowest and highest measurements to ignore when averaging.
+ */
+#define CPR_AGING_MEASUREMENT_FILTER	3
+
+#define CPR_REGULATOR_DRIVER_NAME	"qcom,cpr-regulator"
+
+/**
+ * enum vdd_mx_vmin_method - Method to determine vmin for vdd-mx
+ * %VDD_MX_VMIN_APC:			Equal to APC voltage
+ * %VDD_MX_VMIN_APC_CORNER_CEILING:	Equal to PVS corner ceiling voltage
+ * %VDD_MX_VMIN_APC_SLOW_CORNER_CEILING:
+ *					Equal to slow speed corner ceiling
+ * %VDD_MX_VMIN_MX_VMAX:		Equal to specified vdd-mx-vmax voltage
+ * %VDD_MX_VMIN_APC_CORNER_MAP:		Equal to the APC corner mapped MX
+ *					voltage
+ */
+enum vdd_mx_vmin_method {
+	VDD_MX_VMIN_APC,
+	VDD_MX_VMIN_APC_CORNER_CEILING,
+	VDD_MX_VMIN_APC_SLOW_CORNER_CEILING,
+	VDD_MX_VMIN_MX_VMAX,
+	VDD_MX_VMIN_APC_FUSE_CORNER_MAP,
+	VDD_MX_VMIN_APC_CORNER_MAP,
+};
+
+#define CPR_CORNER_MIN		1
+#define CPR_FUSE_CORNER_MIN	1
+/*
+ * This is an arbitrary upper limit which is used in a sanity check in order to
+ * avoid excessive memory allocation due to bad device tree data.
+ */
+#define CPR_FUSE_CORNER_LIMIT	100
+
+struct quot_adjust_info {
+	int speed_bin;
+	int virtual_corner;
+	int quot_adjust;
+};
+
+struct cpr_quot_scale {
+	u32 offset;
+	u32 multiplier;
+};
+
+struct cpr_aging_sensor_info {
+	u32 sensor_id;
+	int initial_quot_diff;
+	int current_quot_diff;
+};
+
+struct cpr_aging_info {
+	struct cpr_aging_sensor_info *sensor_info;
+	int	num_aging_sensors;
+	int	aging_corner;
+	u32	aging_ro_kv;
+	u32	*aging_derate;
+	u32	aging_sensor_bypass;
+	u32	max_aging_margin;
+	u32	aging_ref_voltage;
+	u32	cpr_ro_kv[CPR_NUM_RING_OSC];
+	int	*voltage_adjust;
+
+	bool	cpr_aging_error;
+	bool	cpr_aging_done;
+};
+
+static const char * const vdd_apc_name[] =	{"vdd-apc-optional-prim",
+						"vdd-apc-optional-sec",
+						"vdd-apc"};
+
+enum voltage_change_dir {
+	NO_CHANGE,
+	DOWN,
+	UP,
+};
+
+struct cpr_regulator {
+	struct list_head		list;
+	struct regulator_desc		rdesc;
+	struct regulator_dev		*rdev;
+	bool				vreg_enabled;
+	int				corner;
+	int				ceiling_max;
+	struct dentry			*debugfs;
+	struct device			*dev;
+
+	/* eFuse parameters */
+	phys_addr_t	efuse_addr;
+	void __iomem	*efuse_base;
+	u64		*remapped_row;
+	u32		remapped_row_base;
+	int		num_remapped_rows;
+
+	/* Process voltage parameters */
+	u32		*pvs_corner_v;
+	/* Process voltage variables */
+	u32		pvs_bin;
+	u32		speed_bin;
+	u32		pvs_version;
+
+	/* APC voltage regulator */
+	struct regulator	*vdd_apc;
+
+	/* Dependency parameters */
+	struct regulator	*vdd_mx;
+	int			vdd_mx_vmax;
+	int			vdd_mx_vmin_method;
+	int			vdd_mx_vmin;
+	int			*vdd_mx_corner_map;
+
+	struct regulator	*rpm_apc_vreg;
+	int			*rpm_apc_corner_map;
+
+	/* mem-acc regulator */
+	struct regulator	*mem_acc_vreg;
+
+	/* thermal monitor */
+	int			tsens_id;
+	int			cpr_disable_temp_threshold;
+	int			cpr_enable_temp_threshold;
+	bool			cpr_disable_on_temperature;
+	bool			cpr_thermal_disable;
+	struct threshold_info	tsens_threshold_config;
+
+	/* CPR parameters */
+	u32		num_fuse_corners;
+	u64		cpr_fuse_bits;
+	bool		cpr_fuse_disable;
+	bool		cpr_fuse_local;
+	bool		cpr_fuse_redundant;
+	int		cpr_fuse_revision;
+	int		cpr_fuse_map_count;
+	int		cpr_fuse_map_match;
+	int		*cpr_fuse_target_quot;
+	int		*cpr_fuse_ro_sel;
+	int		*fuse_quot_offset;
+	int		gcnt;
+
+	unsigned int	cpr_irq;
+	void __iomem	*rbcpr_base;
+	phys_addr_t	rbcpr_clk_addr;
+	struct mutex	cpr_mutex;
+
+	int		*cpr_max_ceiling;
+	int		*ceiling_volt;
+	int		*floor_volt;
+	int		*fuse_ceiling_volt;
+	int		*fuse_floor_volt;
+	int		*last_volt;
+	int		*open_loop_volt;
+	int		step_volt;
+
+	int		*save_ctl;
+	int		*save_irq;
+
+	int		*vsens_corner_map;
+	/* vsens status */
+	bool		vsens_enabled;
+	/* vsens regulators */
+	struct regulator	*vdd_vsens_corner;
+	struct regulator	*vdd_vsens_voltage;
+
+	/* Config parameters */
+	bool		enable;
+	u32		ref_clk_khz;
+	u32		timer_delay_us;
+	u32		timer_cons_up;
+	u32		timer_cons_down;
+	u32		irq_line;
+	u32		*step_quotient;
+	u32		up_threshold;
+	u32		down_threshold;
+	u32		idle_clocks;
+	u32		gcnt_time_us;
+	u32		clamp_timer_interval;
+	u32		vdd_apc_step_up_limit;
+	u32		vdd_apc_step_down_limit;
+	u32		flags;
+	int		*corner_map;
+	u32		num_corners;
+	int		*quot_adjust;
+	int		*mem_acc_corner_map;
+
+	int			num_adj_cpus;
+	int			online_cpus;
+	int			*adj_cpus;
+	int			**adj_cpus_save_ctl;
+	int			**adj_cpus_save_irq;
+	int			**adj_cpus_last_volt;
+	int			**adj_cpus_quot_adjust;
+	int			**adj_cpus_open_loop_volt;
+	bool			adj_cpus_open_loop_volt_as_ceiling;
+	struct notifier_block	cpu_notifier;
+	cpumask_t		cpu_mask;
+	bool			cpr_disabled_in_pc;
+	struct notifier_block	pm_notifier;
+
+	bool		is_cpr_suspended;
+	bool		skip_voltage_change_during_suspend;
+
+	struct cpr_aging_info	*aging_info;
+
+	struct notifier_block	panic_notifier;
+};
+
+#define CPR_DEBUG_MASK_IRQ	BIT(0)
+#define CPR_DEBUG_MASK_API	BIT(1)
+
+static int cpr_debug_enable;
+#if defined(CONFIG_DEBUG_FS)
+static struct dentry *cpr_debugfs_base;
+#endif
+
+static DEFINE_MUTEX(cpr_regulator_list_mutex);
+static LIST_HEAD(cpr_regulator_list);
+
+module_param_named(debug_enable, cpr_debug_enable, int, S_IRUGO | S_IWUSR);
+#define cpr_debug(cpr_vreg, message, ...) \
+	do { \
+		if (cpr_debug_enable & CPR_DEBUG_MASK_API) \
+			pr_info("%s: " message, (cpr_vreg)->rdesc.name, \
+				##__VA_ARGS__); \
+	} while (0)
+#define cpr_debug_irq(cpr_vreg, message, ...) \
+	do { \
+		if (cpr_debug_enable & CPR_DEBUG_MASK_IRQ) \
+			pr_info("%s: " message, (cpr_vreg)->rdesc.name, \
+				##__VA_ARGS__); \
+		else \
+			pr_debug("%s: " message, (cpr_vreg)->rdesc.name, \
+				##__VA_ARGS__); \
+	} while (0)
+#define cpr_info(cpr_vreg, message, ...) \
+	pr_info("%s: " message, (cpr_vreg)->rdesc.name, ##__VA_ARGS__)
+#define cpr_err(cpr_vreg, message, ...) \
+	pr_err("%s: " message, (cpr_vreg)->rdesc.name, ##__VA_ARGS__)
+
+static u64 cpr_read_remapped_efuse_row(struct cpr_regulator *cpr_vreg,
+					u32 row_num)
+{
+	if (row_num - cpr_vreg->remapped_row_base
+			>= cpr_vreg->num_remapped_rows) {
+		cpr_err(cpr_vreg, "invalid row=%u, max remapped row=%u\n",
+			row_num, cpr_vreg->remapped_row_base
+					+ cpr_vreg->num_remapped_rows - 1);
+		return 0;
+	}
+
+	return cpr_vreg->remapped_row[row_num - cpr_vreg->remapped_row_base];
+}
+
+static u64 cpr_read_efuse_row(struct cpr_regulator *cpr_vreg, u32 row_num,
+				bool use_tz_api)
+{
+	int rc;
+	u64 efuse_bits;
+	struct scm_desc desc = {0};
+	struct cpr_read_req {
+		u32 row_address;
+		int addr_type;
+	} req;
+
+	struct cpr_read_rsp {
+		u32 row_data[2];
+		u32 status;
+	} rsp;
+
+	if (cpr_vreg->remapped_row && row_num >= cpr_vreg->remapped_row_base)
+		return cpr_read_remapped_efuse_row(cpr_vreg, row_num);
+
+	if (!use_tz_api) {
+		efuse_bits = readq_relaxed(cpr_vreg->efuse_base
+			+ row_num * BYTES_PER_FUSE_ROW);
+		return efuse_bits;
+	}
+
+	desc.args[0] = req.row_address = cpr_vreg->efuse_addr +
+					row_num * BYTES_PER_FUSE_ROW;
+	desc.args[1] = req.addr_type = 0;
+	desc.arginfo = SCM_ARGS(2);
+	efuse_bits = 0;
+
+	if (!is_scm_armv8()) {
+		rc = scm_call(SCM_SVC_FUSE, SCM_FUSE_READ,
+			&req, sizeof(req), &rsp, sizeof(rsp));
+	} else {
+		rc = scm_call2(SCM_SIP_FNID(SCM_SVC_FUSE, SCM_FUSE_READ),
+				&desc);
+		rsp.row_data[0] = desc.ret[0];
+		rsp.row_data[1] = desc.ret[1];
+		rsp.status = desc.ret[2];
+	}
+
+	if (rc) {
+		cpr_err(cpr_vreg, "read row %d failed, err code = %d",
+			row_num, rc);
+	} else {
+		efuse_bits = ((u64)(rsp.row_data[1]) << 32) +
+				(u64)rsp.row_data[0];
+	}
+
+	return efuse_bits;
+}
+
+/**
+ * cpr_read_efuse_param() - read a parameter from one or two eFuse rows
+ * @cpr_vreg:	Pointer to cpr_regulator struct for this regulator.
+ * @row_start:	Fuse row number to start reading from.
+ * @bit_start:	The LSB of the parameter to read from the fuse.
+ * @bit_len:	The length of the parameter in bits.
+ * @use_tz_api:	Flag to indicate if an SCM call should be used to read the fuse.
+ *
+ * This function reads a parameter of specified offset and bit size out of one
+ * or two consecutive eFuse rows.  This allows for the reading of parameters
+ * that happen to be split between two eFuse rows.
+ *
+ * Returns the fuse parameter on success or 0 on failure.
+ */
+static u64 cpr_read_efuse_param(struct cpr_regulator *cpr_vreg, int row_start,
+		int bit_start, int bit_len, bool use_tz_api)
+{
+	u64 fuse[2];
+	u64 param = 0;
+	int bits_first, bits_second;
+
+	if (bit_start < 0) {
+		cpr_err(cpr_vreg, "Invalid LSB = %d specified\n", bit_start);
+		return 0;
+	}
+
+	if (bit_len < 0 || bit_len > 64) {
+		cpr_err(cpr_vreg, "Invalid bit length = %d specified\n",
+			bit_len);
+		return 0;
+	}
+
+	/* Allow bit indexing to start beyond the end of the start row. */
+	if (bit_start >= 64) {
+		row_start += bit_start >> 6; /* equivalent to bit_start / 64 */
+		bit_start &= 0x3F;
+	}
+
+	fuse[0] = cpr_read_efuse_row(cpr_vreg, row_start, use_tz_api);
+
+	if (bit_start == 0 && bit_len == 64) {
+		param = fuse[0];
+	} else if (bit_start + bit_len <= 64) {
+		param = (fuse[0] >> bit_start) & ((1ULL << bit_len) - 1);
+	} else {
+		fuse[1] = cpr_read_efuse_row(cpr_vreg, row_start + 1,
+						use_tz_api);
+		bits_first = 64 - bit_start;
+		bits_second = bit_len - bits_first;
+		param = (fuse[0] >> bit_start) & ((1ULL << bits_first) - 1);
+		param |= (fuse[1] & ((1ULL << bits_second) - 1)) << bits_first;
+	}
+
+	return param;
+}
+
+static bool cpr_is_allowed(struct cpr_regulator *cpr_vreg)
+{
+	if (cpr_vreg->cpr_fuse_disable || !cpr_vreg->enable ||
+				cpr_vreg->cpr_thermal_disable)
+		return false;
+	else
+		return true;
+}
+
+static void cpr_write(struct cpr_regulator *cpr_vreg, u32 offset, u32 value)
+{
+	writel_relaxed(value, cpr_vreg->rbcpr_base + offset);
+}
+
+static u32 cpr_read(struct cpr_regulator *cpr_vreg, u32 offset)
+{
+	return readl_relaxed(cpr_vreg->rbcpr_base + offset);
+}
+
+static void cpr_masked_write(struct cpr_regulator *cpr_vreg, u32 offset,
+			     u32 mask, u32 value)
+{
+	u32 reg_val;
+
+	reg_val = readl_relaxed(cpr_vreg->rbcpr_base + offset);
+	reg_val &= ~mask;
+	reg_val |= value & mask;
+	writel_relaxed(reg_val, cpr_vreg->rbcpr_base + offset);
+}
+
+static void cpr_irq_clr(struct cpr_regulator *cpr_vreg)
+{
+	cpr_write(cpr_vreg, REG_RBIF_IRQ_CLEAR, CPR_INT_ALL);
+}
+
+static void cpr_irq_clr_nack(struct cpr_regulator *cpr_vreg)
+{
+	cpr_irq_clr(cpr_vreg);
+	cpr_write(cpr_vreg, REG_RBIF_CONT_NACK_CMD, 1);
+}
+
+static void cpr_irq_clr_ack(struct cpr_regulator *cpr_vreg)
+{
+	cpr_irq_clr(cpr_vreg);
+	cpr_write(cpr_vreg, REG_RBIF_CONT_ACK_CMD, 1);
+}
+
+static void cpr_irq_set(struct cpr_regulator *cpr_vreg, u32 int_bits)
+{
+	cpr_write(cpr_vreg, REG_RBIF_IRQ_EN(cpr_vreg->irq_line), int_bits);
+}
+
+static void cpr_ctl_modify(struct cpr_regulator *cpr_vreg, u32 mask, u32 value)
+{
+	cpr_masked_write(cpr_vreg, REG_RBCPR_CTL, mask, value);
+}
+
+static void cpr_ctl_enable(struct cpr_regulator *cpr_vreg, int corner)
+{
+	u32 val;
+
+	if (cpr_vreg->is_cpr_suspended)
+		return;
+
+	/* Program Consecutive Up & Down */
+	val = ((cpr_vreg->timer_cons_down & RBIF_TIMER_ADJ_CONS_DOWN_MASK)
+			<< RBIF_TIMER_ADJ_CONS_DOWN_SHIFT) |
+		(cpr_vreg->timer_cons_up & RBIF_TIMER_ADJ_CONS_UP_MASK);
+	cpr_masked_write(cpr_vreg, REG_RBIF_TIMER_ADJUST,
+			RBIF_TIMER_ADJ_CONS_UP_MASK |
+			RBIF_TIMER_ADJ_CONS_DOWN_MASK, val);
+	cpr_masked_write(cpr_vreg, REG_RBCPR_CTL,
+			RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN |
+			RBCPR_CTL_SW_AUTO_CONT_ACK_EN,
+			cpr_vreg->save_ctl[corner]);
+	cpr_irq_set(cpr_vreg, cpr_vreg->save_irq[corner]);
+
+	if (cpr_is_allowed(cpr_vreg) && cpr_vreg->vreg_enabled &&
+	    (cpr_vreg->ceiling_volt[corner] >
+		cpr_vreg->floor_volt[corner]))
+		val = RBCPR_CTL_LOOP_EN;
+	else
+		val = 0;
+	cpr_ctl_modify(cpr_vreg, RBCPR_CTL_LOOP_EN, val);
+}
+
+static void cpr_ctl_disable(struct cpr_regulator *cpr_vreg)
+{
+	if (cpr_vreg->is_cpr_suspended)
+		return;
+
+	cpr_irq_set(cpr_vreg, 0);
+	cpr_ctl_modify(cpr_vreg, RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN |
+			RBCPR_CTL_SW_AUTO_CONT_ACK_EN, 0);
+	cpr_masked_write(cpr_vreg, REG_RBIF_TIMER_ADJUST,
+			RBIF_TIMER_ADJ_CONS_UP_MASK |
+			RBIF_TIMER_ADJ_CONS_DOWN_MASK, 0);
+	cpr_irq_clr(cpr_vreg);
+	cpr_write(cpr_vreg, REG_RBIF_CONT_ACK_CMD, 1);
+	cpr_write(cpr_vreg, REG_RBIF_CONT_NACK_CMD, 1);
+	cpr_ctl_modify(cpr_vreg, RBCPR_CTL_LOOP_EN, 0);
+}
+
+static bool cpr_ctl_is_enabled(struct cpr_regulator *cpr_vreg)
+{
+	u32 reg_val;
+
+	reg_val = cpr_read(cpr_vreg, REG_RBCPR_CTL);
+	return reg_val & RBCPR_CTL_LOOP_EN;
+}
+
+static bool cpr_ctl_is_busy(struct cpr_regulator *cpr_vreg)
+{
+	u32 reg_val;
+
+	reg_val = cpr_read(cpr_vreg, REG_RBCPR_RESULT_0);
+	return reg_val & RBCPR_RESULT0_BUSY_MASK;
+}
+
+static void cpr_corner_save(struct cpr_regulator *cpr_vreg, int corner)
+{
+	cpr_vreg->save_ctl[corner] = cpr_read(cpr_vreg, REG_RBCPR_CTL);
+	cpr_vreg->save_irq[corner] =
+		cpr_read(cpr_vreg, REG_RBIF_IRQ_EN(cpr_vreg->irq_line));
+}
+
+static void cpr_corner_restore(struct cpr_regulator *cpr_vreg, int corner)
+{
+	u32 gcnt, ctl, irq, ro_sel, step_quot;
+	int fuse_corner = cpr_vreg->corner_map[corner];
+	int i;
+
+	ro_sel = cpr_vreg->cpr_fuse_ro_sel[fuse_corner];
+	gcnt = cpr_vreg->gcnt | (cpr_vreg->cpr_fuse_target_quot[fuse_corner] -
+					cpr_vreg->quot_adjust[corner]);
+
+	/* Program the step quotient and idle clocks */
+	step_quot = ((cpr_vreg->idle_clocks & RBCPR_STEP_QUOT_IDLE_CLK_MASK)
+			<< RBCPR_STEP_QUOT_IDLE_CLK_SHIFT) |
+		(cpr_vreg->step_quotient[fuse_corner]
+			& RBCPR_STEP_QUOT_STEPQUOT_MASK);
+	cpr_write(cpr_vreg, REG_RBCPR_STEP_QUOT, step_quot);
+
+	/* Clear the target quotient value and gate count of all ROs */
+	for (i = 0; i < CPR_NUM_RING_OSC; i++)
+		cpr_write(cpr_vreg, REG_RBCPR_GCNT_TARGET(i), 0);
+
+	cpr_write(cpr_vreg, REG_RBCPR_GCNT_TARGET(ro_sel), gcnt);
+	ctl = cpr_vreg->save_ctl[corner];
+	cpr_write(cpr_vreg, REG_RBCPR_CTL, ctl);
+	irq = cpr_vreg->save_irq[corner];
+	cpr_irq_set(cpr_vreg, irq);
+	cpr_debug(cpr_vreg, "gcnt = 0x%08x, ctl = 0x%08x, irq = 0x%08x\n",
+		  gcnt, ctl, irq);
+}
+
+static void cpr_corner_switch(struct cpr_regulator *cpr_vreg, int corner)
+{
+	if (cpr_vreg->corner == corner)
+		return;
+
+	cpr_corner_restore(cpr_vreg, corner);
+}
+
+static int cpr_apc_set(struct cpr_regulator *cpr_vreg, u32 new_volt)
+{
+	int max_volt, rc;
+
+	max_volt = cpr_vreg->ceiling_max;
+	rc = regulator_set_voltage(cpr_vreg->vdd_apc, new_volt, max_volt);
+	if (rc)
+		cpr_err(cpr_vreg, "set: vdd_apc = %d uV: rc=%d\n",
+			new_volt, rc);
+	return rc;
+}
+
+static int cpr_mx_get(struct cpr_regulator *cpr_vreg, int corner, int apc_volt)
+{
+	int vdd_mx;
+	int fuse_corner = cpr_vreg->corner_map[corner];
+	int highest_fuse_corner = cpr_vreg->num_fuse_corners;
+
+	switch (cpr_vreg->vdd_mx_vmin_method) {
+	case VDD_MX_VMIN_APC:
+		vdd_mx = apc_volt;
+		break;
+	case VDD_MX_VMIN_APC_CORNER_CEILING:
+		vdd_mx = cpr_vreg->fuse_ceiling_volt[fuse_corner];
+		break;
+	case VDD_MX_VMIN_APC_SLOW_CORNER_CEILING:
+		vdd_mx = cpr_vreg->fuse_ceiling_volt[highest_fuse_corner];
+		break;
+	case VDD_MX_VMIN_MX_VMAX:
+		vdd_mx = cpr_vreg->vdd_mx_vmax;
+		break;
+	case VDD_MX_VMIN_APC_FUSE_CORNER_MAP:
+		vdd_mx = cpr_vreg->vdd_mx_corner_map[fuse_corner];
+		break;
+	case VDD_MX_VMIN_APC_CORNER_MAP:
+		vdd_mx = cpr_vreg->vdd_mx_corner_map[corner];
+		break;
+	default:
+		vdd_mx = 0;
+		break;
+	}
+
+	return vdd_mx;
+}
+
+static int cpr_mx_set(struct cpr_regulator *cpr_vreg, int corner,
+		      int vdd_mx_vmin)
+{
+	int rc;
+	int fuse_corner = cpr_vreg->corner_map[corner];
+
+	rc = regulator_set_voltage(cpr_vreg->vdd_mx, vdd_mx_vmin,
+				   cpr_vreg->vdd_mx_vmax);
+	cpr_debug(cpr_vreg, "[corner:%d, fuse_corner:%d] %d uV\n", corner,
+			fuse_corner, vdd_mx_vmin);
+
+	if (!rc) {
+		cpr_vreg->vdd_mx_vmin = vdd_mx_vmin;
+	} else {
+		cpr_err(cpr_vreg, "set: vdd_mx [corner:%d, fuse_corner:%d] = %d uV failed: rc=%d\n",
+			corner, fuse_corner, vdd_mx_vmin, rc);
+	}
+	return rc;
+}
+
+static int cpr_scale_voltage(struct cpr_regulator *cpr_vreg, int corner,
+			     int new_apc_volt, enum voltage_change_dir dir)
+{
+	int rc = 0, vdd_mx_vmin = 0;
+	int mem_acc_corner = cpr_vreg->mem_acc_corner_map[corner];
+	int fuse_corner = cpr_vreg->corner_map[corner];
+	int apc_corner, vsens_corner;
+
+	/* Determine the vdd_mx voltage */
+	if (dir != NO_CHANGE && cpr_vreg->vdd_mx != NULL)
+		vdd_mx_vmin = cpr_mx_get(cpr_vreg, corner, new_apc_volt);
+
+
+	if (cpr_vreg->vdd_vsens_voltage && cpr_vreg->vsens_enabled) {
+		rc = regulator_disable(cpr_vreg->vdd_vsens_voltage);
+		if (!rc)
+			cpr_vreg->vsens_enabled = false;
+	}
+
+	if (dir == DOWN) {
+		if (!rc && cpr_vreg->mem_acc_vreg)
+			rc = regulator_set_voltage(cpr_vreg->mem_acc_vreg,
+					mem_acc_corner, mem_acc_corner);
+		if (!rc && cpr_vreg->rpm_apc_vreg) {
+			apc_corner = cpr_vreg->rpm_apc_corner_map[corner];
+			rc = regulator_set_voltage(cpr_vreg->rpm_apc_vreg,
+						apc_corner, apc_corner);
+			if (rc)
+				cpr_err(cpr_vreg, "apc_corner voting failed rc=%d\n",
+						rc);
+		}
+	}
+
+	if (!rc && vdd_mx_vmin && dir == UP) {
+		if (vdd_mx_vmin != cpr_vreg->vdd_mx_vmin)
+			rc = cpr_mx_set(cpr_vreg, corner, vdd_mx_vmin);
+	}
+
+	if (!rc)
+		rc = cpr_apc_set(cpr_vreg, new_apc_volt);
+
+	if (dir == UP) {
+		if (!rc && cpr_vreg->mem_acc_vreg)
+			rc = regulator_set_voltage(cpr_vreg->mem_acc_vreg,
+					mem_acc_corner, mem_acc_corner);
+		if (!rc && cpr_vreg->rpm_apc_vreg) {
+			apc_corner = cpr_vreg->rpm_apc_corner_map[corner];
+			rc = regulator_set_voltage(cpr_vreg->rpm_apc_vreg,
+						apc_corner, apc_corner);
+			if (rc)
+				cpr_err(cpr_vreg, "apc_corner voting failed rc=%d\n",
+						rc);
+		}
+	}
+
+	if (!rc && vdd_mx_vmin && dir == DOWN) {
+		if (vdd_mx_vmin != cpr_vreg->vdd_mx_vmin)
+			rc = cpr_mx_set(cpr_vreg, corner, vdd_mx_vmin);
+	}
+
+	if (!rc && cpr_vreg->vdd_vsens_corner) {
+		vsens_corner = cpr_vreg->vsens_corner_map[fuse_corner];
+		rc = regulator_set_voltage(cpr_vreg->vdd_vsens_corner,
+					vsens_corner, vsens_corner);
+	}
+	if (!rc && cpr_vreg->vdd_vsens_voltage) {
+		rc = regulator_set_voltage(cpr_vreg->vdd_vsens_voltage,
+					cpr_vreg->floor_volt[corner],
+					cpr_vreg->ceiling_volt[corner]);
+		if (!rc && !cpr_vreg->vsens_enabled) {
+			rc = regulator_enable(cpr_vreg->vdd_vsens_voltage);
+			if (!rc)
+				cpr_vreg->vsens_enabled = true;
+		}
+	}
+
+	return rc;
+}
+
+static void cpr_scale(struct cpr_regulator *cpr_vreg,
+		      enum voltage_change_dir dir)
+{
+	u32 reg_val, error_steps, reg_mask;
+	int last_volt, new_volt, corner, fuse_corner;
+	u32 gcnt, quot;
+
+	corner = cpr_vreg->corner;
+	fuse_corner = cpr_vreg->corner_map[corner];
+
+	reg_val = cpr_read(cpr_vreg, REG_RBCPR_RESULT_0);
+
+	error_steps = (reg_val >> RBCPR_RESULT0_ERROR_STEPS_SHIFT)
+				& RBCPR_RESULT0_ERROR_STEPS_MASK;
+	last_volt = cpr_vreg->last_volt[corner];
+
+	cpr_debug_irq(cpr_vreg,
+			"last_volt[corner:%d, fuse_corner:%d] = %d uV\n",
+			corner, fuse_corner, last_volt);
+
+	gcnt = cpr_read(cpr_vreg, REG_RBCPR_GCNT_TARGET
+			(cpr_vreg->cpr_fuse_ro_sel[fuse_corner]));
+	quot = gcnt & ((1 << RBCPR_GCNT_TARGET_GCNT_SHIFT) - 1);
+
+	if (dir == UP) {
+		if (cpr_vreg->clamp_timer_interval
+				&& error_steps < cpr_vreg->up_threshold) {
+			/*
+			 * Handle the case where another measurement started
+			 * after the interrupt was triggered due to a core
+			 * exiting from power collapse.
+			 */
+			error_steps = max(cpr_vreg->up_threshold,
+					cpr_vreg->vdd_apc_step_up_limit);
+		}
+		cpr_debug_irq(cpr_vreg,
+				"Up: cpr status = 0x%08x (error_steps=%d)\n",
+				reg_val, error_steps);
+
+		if (last_volt >= cpr_vreg->ceiling_volt[corner]) {
+			cpr_debug_irq(cpr_vreg,
+			"[corn:%d, fuse_corn:%d] @ ceiling: %d >= %d: NACK\n",
+				corner, fuse_corner, last_volt,
+				cpr_vreg->ceiling_volt[corner]);
+			cpr_irq_clr_nack(cpr_vreg);
+
+			cpr_debug_irq(cpr_vreg, "gcnt = 0x%08x (quot = %d)\n",
+					gcnt, quot);
+
+			/* Maximize the UP threshold */
+			reg_mask = RBCPR_CTL_UP_THRESHOLD_MASK <<
+					RBCPR_CTL_UP_THRESHOLD_SHIFT;
+			reg_val = reg_mask;
+			cpr_ctl_modify(cpr_vreg, reg_mask, reg_val);
+
+			/* Disable UP interrupt */
+			cpr_irq_set(cpr_vreg, CPR_INT_DEFAULT & ~CPR_INT_UP);
+
+			return;
+		}
+
+		if (error_steps > cpr_vreg->vdd_apc_step_up_limit) {
+			cpr_debug_irq(cpr_vreg,
+				      "%d is over up-limit(%d): Clamp\n",
+				      error_steps,
+				      cpr_vreg->vdd_apc_step_up_limit);
+			error_steps = cpr_vreg->vdd_apc_step_up_limit;
+		}
+
+		/* Calculate new voltage */
+		new_volt = last_volt + (error_steps * cpr_vreg->step_volt);
+		if (new_volt > cpr_vreg->ceiling_volt[corner]) {
+			cpr_debug_irq(cpr_vreg,
+				      "new_volt(%d) >= ceiling(%d): Clamp\n",
+				      new_volt,
+				      cpr_vreg->ceiling_volt[corner]);
+
+			new_volt = cpr_vreg->ceiling_volt[corner];
+		}
+
+		if (cpr_scale_voltage(cpr_vreg, corner, new_volt, dir)) {
+			cpr_irq_clr_nack(cpr_vreg);
+			return;
+		}
+		cpr_vreg->last_volt[corner] = new_volt;
+
+		/* Disable auto nack down */
+		reg_mask = RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN;
+		reg_val = 0;
+
+		cpr_ctl_modify(cpr_vreg, reg_mask, reg_val);
+
+		/* Re-enable default interrupts */
+		cpr_irq_set(cpr_vreg, CPR_INT_DEFAULT);
+
+		/* Ack */
+		cpr_irq_clr_ack(cpr_vreg);
+
+		cpr_debug_irq(cpr_vreg,
+			"UP: -> new_volt[corner:%d, fuse_corner:%d] = %d uV\n",
+			corner, fuse_corner, new_volt);
+	} else if (dir == DOWN) {
+		if (cpr_vreg->clamp_timer_interval
+				&& error_steps < cpr_vreg->down_threshold) {
+			/*
+			 * Handle the case where another measurement started
+			 * after the interrupt was triggered due to a core
+			 * exiting from power collapse.
+			 */
+			error_steps = max(cpr_vreg->down_threshold,
+					cpr_vreg->vdd_apc_step_down_limit);
+		}
+		cpr_debug_irq(cpr_vreg,
+			      "Down: cpr status = 0x%08x (error_steps=%d)\n",
+			      reg_val, error_steps);
+
+		if (last_volt <= cpr_vreg->floor_volt[corner]) {
+			cpr_debug_irq(cpr_vreg,
+			"[corn:%d, fuse_corner:%d] @ floor: %d <= %d: NACK\n",
+				corner, fuse_corner, last_volt,
+				cpr_vreg->floor_volt[corner]);
+			cpr_irq_clr_nack(cpr_vreg);
+
+			cpr_debug_irq(cpr_vreg, "gcnt = 0x%08x (quot = %d)\n",
+					gcnt, quot);
+
+			/* Enable auto nack down */
+			reg_mask = RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN;
+			reg_val = RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN;
+
+			cpr_ctl_modify(cpr_vreg, reg_mask, reg_val);
+
+			/* Disable DOWN interrupt */
+			cpr_irq_set(cpr_vreg, CPR_INT_DEFAULT & ~CPR_INT_DOWN);
+
+			return;
+		}
+
+		if (error_steps > cpr_vreg->vdd_apc_step_down_limit) {
+			cpr_debug_irq(cpr_vreg,
+				      "%d is over down-limit(%d): Clamp\n",
+				      error_steps,
+				      cpr_vreg->vdd_apc_step_down_limit);
+			error_steps = cpr_vreg->vdd_apc_step_down_limit;
+		}
+
+		/* Calculte new voltage */
+		new_volt = last_volt - (error_steps * cpr_vreg->step_volt);
+		if (new_volt < cpr_vreg->floor_volt[corner]) {
+			cpr_debug_irq(cpr_vreg,
+				      "new_volt(%d) < floor(%d): Clamp\n",
+				      new_volt,
+				      cpr_vreg->floor_volt[corner]);
+			new_volt = cpr_vreg->floor_volt[corner];
+		}
+
+		if (cpr_scale_voltage(cpr_vreg, corner, new_volt, dir)) {
+			cpr_irq_clr_nack(cpr_vreg);
+			return;
+		}
+		cpr_vreg->last_volt[corner] = new_volt;
+
+		/* Restore default threshold for UP */
+		reg_mask = RBCPR_CTL_UP_THRESHOLD_MASK <<
+				RBCPR_CTL_UP_THRESHOLD_SHIFT;
+		reg_val = cpr_vreg->up_threshold <<
+				RBCPR_CTL_UP_THRESHOLD_SHIFT;
+		cpr_ctl_modify(cpr_vreg, reg_mask, reg_val);
+
+		/* Re-enable default interrupts */
+		cpr_irq_set(cpr_vreg, CPR_INT_DEFAULT);
+
+		/* Ack */
+		cpr_irq_clr_ack(cpr_vreg);
+
+		cpr_debug_irq(cpr_vreg,
+		"DOWN: -> new_volt[corner:%d, fuse_corner:%d] = %d uV\n",
+			corner, fuse_corner, new_volt);
+	}
+}
+
+static irqreturn_t cpr_irq_handler(int irq, void *dev)
+{
+	struct cpr_regulator *cpr_vreg = dev;
+	u32 reg_val;
+
+	mutex_lock(&cpr_vreg->cpr_mutex);
+
+	reg_val = cpr_read(cpr_vreg, REG_RBIF_IRQ_STATUS);
+	if (cpr_vreg->flags & FLAGS_IGNORE_1ST_IRQ_STATUS)
+		reg_val = cpr_read(cpr_vreg, REG_RBIF_IRQ_STATUS);
+
+	cpr_debug_irq(cpr_vreg, "IRQ_STATUS = 0x%02X\n", reg_val);
+
+	if (!cpr_ctl_is_enabled(cpr_vreg)) {
+		cpr_debug_irq(cpr_vreg, "CPR is disabled\n");
+		goto _exit;
+	} else if (cpr_ctl_is_busy(cpr_vreg)
+			&& !cpr_vreg->clamp_timer_interval) {
+		cpr_debug_irq(cpr_vreg, "CPR measurement is not ready\n");
+		goto _exit;
+	} else if (!cpr_is_allowed(cpr_vreg)) {
+		reg_val = cpr_read(cpr_vreg, REG_RBCPR_CTL);
+		cpr_err(cpr_vreg, "Interrupt broken? RBCPR_CTL = 0x%02X\n",
+			reg_val);
+		goto _exit;
+	}
+
+	/* Following sequence of handling is as per each IRQ's priority */
+	if (reg_val & CPR_INT_UP) {
+		cpr_scale(cpr_vreg, UP);
+	} else if (reg_val & CPR_INT_DOWN) {
+		cpr_scale(cpr_vreg, DOWN);
+	} else if (reg_val & CPR_INT_MIN) {
+		cpr_irq_clr_nack(cpr_vreg);
+	} else if (reg_val & CPR_INT_MAX) {
+		cpr_irq_clr_nack(cpr_vreg);
+	} else if (reg_val & CPR_INT_MID) {
+		/* RBCPR_CTL_SW_AUTO_CONT_ACK_EN is enabled */
+		cpr_debug_irq(cpr_vreg, "IRQ occurred for Mid Flag\n");
+	} else {
+		cpr_debug_irq(cpr_vreg,
+			"IRQ occurred for unknown flag (0x%08x)\n", reg_val);
+	}
+
+	/* Save register values for the corner */
+	cpr_corner_save(cpr_vreg, cpr_vreg->corner);
+
+_exit:
+	mutex_unlock(&cpr_vreg->cpr_mutex);
+	return IRQ_HANDLED;
+}
+
+/**
+ * cmp_int() - int comparison function to be passed into the sort() function
+ *		which leads to ascending sorting
+ * @a:			First int value
+ * @b:			Second int value
+ *
+ * Return: >0 if a > b, 0 if a == b, <0 if a < b
+ */
+static int cmp_int(const void *a, const void *b)
+{
+	return *(int *)a - *(int *)b;
+}
+
+static int cpr_get_aging_quot_delta(struct cpr_regulator *cpr_vreg,
+			struct cpr_aging_sensor_info *aging_sensor_info)
+{
+	int quot_min, quot_max, is_aging_measurement, aging_measurement_count;
+	int quot_min_scaled, quot_max_scaled, quot_delta_scaled_sum;
+	int retries, rc = 0, sel_fast = 0, i, quot_delta_scaled;
+	u32 val, gcnt_ref, gcnt;
+	int *quot_delta_results, filtered_count;
+
+
+	quot_delta_results = kcalloc(CPR_AGING_MEASUREMENT_ITERATIONS,
+			sizeof(*quot_delta_results), GFP_ATOMIC);
+	if (!quot_delta_results)
+		return -ENOMEM;
+
+	/* Clear the target quotient value and gate count of all ROs */
+	for (i = 0; i < CPR_NUM_RING_OSC; i++)
+		cpr_write(cpr_vreg, REG_RBCPR_GCNT_TARGET(i), 0);
+
+	/* Program GCNT0/1 for getting aging data */
+	gcnt_ref = (cpr_vreg->ref_clk_khz * cpr_vreg->gcnt_time_us) / 1000;
+	gcnt = gcnt_ref * 3 / 2;
+	val = (gcnt & RBCPR_GCNT_TARGET_GCNT_MASK) <<
+			RBCPR_GCNT_TARGET_GCNT_SHIFT;
+	cpr_write(cpr_vreg, REG_RBCPR_GCNT_TARGET(0), val);
+	cpr_write(cpr_vreg, REG_RBCPR_GCNT_TARGET(1), val);
+
+	val = cpr_read(cpr_vreg, REG_RBCPR_GCNT_TARGET(0));
+	cpr_debug(cpr_vreg, "RBCPR_GCNT_TARGET0 = 0x%08x\n", val);
+
+	val = cpr_read(cpr_vreg, REG_RBCPR_GCNT_TARGET(1));
+	cpr_debug(cpr_vreg, "RBCPR_GCNT_TARGET1 = 0x%08x\n", val);
+
+	/* Program TIMER_INTERVAL to zero */
+	cpr_write(cpr_vreg, REG_RBCPR_TIMER_INTERVAL, 0);
+
+	/* Bypass sensors in collapsible domain */
+	if (cpr_vreg->aging_info->aging_sensor_bypass)
+		cpr_write(cpr_vreg, REG_RBCPR_SENSOR_BYPASS0,
+			(cpr_vreg->aging_info->aging_sensor_bypass &
+		RBCPR_SENSOR_MASK0_SENSOR(aging_sensor_info->sensor_id)));
+
+	/* Mask other sensors */
+	cpr_write(cpr_vreg, REG_RBCPR_SENSOR_MASK0,
+		RBCPR_SENSOR_MASK0_SENSOR(aging_sensor_info->sensor_id));
+	val = cpr_read(cpr_vreg, REG_RBCPR_SENSOR_MASK0);
+	cpr_debug(cpr_vreg, "RBCPR_SENSOR_MASK0 = 0x%08x\n", val);
+
+	/* Enable cpr controller */
+	cpr_ctl_modify(cpr_vreg, RBCPR_CTL_LOOP_EN, RBCPR_CTL_LOOP_EN);
+
+	/* Make sure cpr starts measurement with toggling busy bit */
+	mb();
+
+	/* Wait and Ignore the first measurement. Time-out after 5ms */
+	retries = 50;
+	while (retries-- && cpr_ctl_is_busy(cpr_vreg))
+		udelay(100);
+
+	if (retries < 0) {
+		cpr_err(cpr_vreg, "Aging calibration failed\n");
+		rc = -EBUSY;
+		goto _exit;
+	}
+
+	/* Set age page mode */
+	cpr_write(cpr_vreg, REG_RBCPR_HTOL_AGE, RBCPR_HTOL_AGE_PAGE);
+
+	aging_measurement_count = 0;
+	quot_delta_scaled_sum = 0;
+
+	for (i = 0; i < CPR_AGING_MEASUREMENT_ITERATIONS; i++) {
+		/* Send cont nack */
+		cpr_write(cpr_vreg, REG_RBIF_CONT_NACK_CMD, 1);
+
+		/*
+		 * Make sure cpr starts next measurement with
+		 * toggling busy bit
+		 */
+		mb();
+
+		/*
+		 * Wait for controller to finish measurement
+		 * and time-out after 5ms
+		 */
+		retries = 50;
+		while (retries-- && cpr_ctl_is_busy(cpr_vreg))
+			udelay(100);
+
+		if (retries < 0) {
+			cpr_err(cpr_vreg, "Aging calibration failed\n");
+			rc = -EBUSY;
+			goto _exit;
+		}
+
+		/* Check for PAGE_IS_AGE flag in status register */
+		val = cpr_read(cpr_vreg, REG_RBCPR_HTOL_AGE);
+		is_aging_measurement = val & RBCPR_AGE_DATA_STATUS;
+
+		val = cpr_read(cpr_vreg, REG_RBCPR_RESULT_1);
+		sel_fast = RBCPR_RESULT_1_SEL_FAST(val);
+		cpr_debug(cpr_vreg, "RBCPR_RESULT_1 = 0x%08x\n", val);
+
+		val = cpr_read(cpr_vreg, REG_RBCPR_DEBUG1);
+		cpr_debug(cpr_vreg, "RBCPR_DEBUG1 = 0x%08x\n", val);
+
+		if (sel_fast == 1) {
+			quot_min = RBCPR_DEBUG1_QUOT_FAST(val);
+			quot_max = RBCPR_DEBUG1_QUOT_SLOW(val);
+		} else {
+			quot_min = RBCPR_DEBUG1_QUOT_SLOW(val);
+			quot_max = RBCPR_DEBUG1_QUOT_FAST(val);
+		}
+
+		/*
+		 * Scale the quotients so that they are equivalent to the fused
+		 * values.  This accounts for the difference in measurement
+		 * interval times.
+		 */
+
+		quot_min_scaled = quot_min * (gcnt_ref + 1) / (gcnt + 1);
+		quot_max_scaled = quot_max * (gcnt_ref + 1) / (gcnt + 1);
+
+		quot_delta_scaled = 0;
+		if (is_aging_measurement) {
+			quot_delta_scaled = quot_min_scaled - quot_max_scaled;
+			quot_delta_results[aging_measurement_count++] =
+					quot_delta_scaled;
+		}
+
+		cpr_debug(cpr_vreg,
+			"Age sensor[%d]: measurement[%d]: page_is_age=%u quot_min = %d, quot_max = %d quot_min_scaled = %d, quot_max_scaled = %d quot_delta_scaled = %d\n",
+			aging_sensor_info->sensor_id, i, is_aging_measurement,
+			quot_min, quot_max, quot_min_scaled, quot_max_scaled,
+			quot_delta_scaled);
+	}
+
+	filtered_count
+		= aging_measurement_count - CPR_AGING_MEASUREMENT_FILTER * 2;
+	if (filtered_count > 0) {
+		sort(quot_delta_results, aging_measurement_count,
+			sizeof(*quot_delta_results), cmp_int, NULL);
+
+		quot_delta_scaled_sum = 0;
+		for (i = 0; i < filtered_count; i++)
+			quot_delta_scaled_sum
+				+= quot_delta_results[i
+					+ CPR_AGING_MEASUREMENT_FILTER];
+
+		aging_sensor_info->current_quot_diff
+			= quot_delta_scaled_sum / filtered_count;
+		cpr_debug(cpr_vreg,
+			"Age sensor[%d]: average aging quotient delta = %d (count = %d)\n",
+			aging_sensor_info->sensor_id,
+			aging_sensor_info->current_quot_diff, filtered_count);
+	} else {
+		cpr_err(cpr_vreg, "%d aging measurements completed after %d iterations\n",
+			aging_measurement_count,
+			CPR_AGING_MEASUREMENT_ITERATIONS);
+		rc = -EBUSY;
+	}
+
+_exit:
+	/* Clear age page bit */
+	cpr_write(cpr_vreg, REG_RBCPR_HTOL_AGE, 0x0);
+
+	/* Disable the CPR controller after aging procedure */
+	cpr_ctl_modify(cpr_vreg, RBCPR_CTL_LOOP_EN, 0x0);
+
+	/* Clear the sensor bypass */
+	if (cpr_vreg->aging_info->aging_sensor_bypass)
+		cpr_write(cpr_vreg, REG_RBCPR_SENSOR_BYPASS0, 0x0);
+
+	/* Unmask all sensors */
+	cpr_write(cpr_vreg, REG_RBCPR_SENSOR_MASK0, 0x0);
+
+	/* Clear gcnt0/1 registers */
+	cpr_write(cpr_vreg, REG_RBCPR_GCNT_TARGET(0), 0x0);
+	cpr_write(cpr_vreg, REG_RBCPR_GCNT_TARGET(1), 0x0);
+
+	/* Program the delay count for the timer */
+	val = (cpr_vreg->ref_clk_khz * cpr_vreg->timer_delay_us) / 1000;
+	cpr_write(cpr_vreg, REG_RBCPR_TIMER_INTERVAL, val);
+
+	kfree(quot_delta_results);
+
+	return rc;
+}
+
+static void cpr_de_aging_adjustment(void *data)
+{
+	struct cpr_regulator *cpr_vreg = (struct cpr_regulator *)data;
+	struct cpr_aging_info *aging_info = cpr_vreg->aging_info;
+	struct cpr_aging_sensor_info *aging_sensor_info;
+	int i, num_aging_sensors, retries, rc = 0;
+	int max_quot_diff = 0, ro_sel = 0;
+	u32 voltage_adjust, aging_voltage_adjust = 0;
+
+	aging_sensor_info = aging_info->sensor_info;
+	num_aging_sensors = aging_info->num_aging_sensors;
+
+	for (i = 0; i < num_aging_sensors; i++, aging_sensor_info++) {
+		retries = 2;
+		while (retries--) {
+			rc = cpr_get_aging_quot_delta(cpr_vreg,
+					aging_sensor_info);
+			if (!rc)
+				break;
+		}
+		if (rc && retries < 0) {
+			cpr_err(cpr_vreg, "error in age calibration: rc = %d\n",
+				rc);
+			aging_info->cpr_aging_error = true;
+			return;
+		}
+
+		max_quot_diff = max(max_quot_diff,
+					(aging_sensor_info->current_quot_diff -
+					aging_sensor_info->initial_quot_diff));
+	}
+
+	cpr_debug(cpr_vreg, "Max aging quot delta = %d\n",
+				max_quot_diff);
+	aging_voltage_adjust = DIV_ROUND_UP(max_quot_diff * 1000000,
+					aging_info->aging_ro_kv);
+
+	for (i = CPR_FUSE_CORNER_MIN; i <= cpr_vreg->num_fuse_corners; i++) {
+		/* Remove initial max aging adjustment */
+		ro_sel = cpr_vreg->cpr_fuse_ro_sel[i];
+		cpr_vreg->cpr_fuse_target_quot[i] -=
+				(aging_info->cpr_ro_kv[ro_sel]
+				* aging_info->max_aging_margin) / 1000000;
+		aging_info->voltage_adjust[i] = 0;
+
+		if (aging_voltage_adjust > 0) {
+			/* Add required aging adjustment */
+			voltage_adjust = (aging_voltage_adjust
+					* aging_info->aging_derate[i]) / 1000;
+			voltage_adjust = min(voltage_adjust,
+						aging_info->max_aging_margin);
+			cpr_vreg->cpr_fuse_target_quot[i] +=
+					(aging_info->cpr_ro_kv[ro_sel]
+					* voltage_adjust) / 1000000;
+			aging_info->voltage_adjust[i] = voltage_adjust;
+		}
+	}
+}
+
+static int cpr_regulator_is_enabled(struct regulator_dev *rdev)
+{
+	struct cpr_regulator *cpr_vreg = rdev_get_drvdata(rdev);
+
+	return cpr_vreg->vreg_enabled;
+}
+
+static int cpr_regulator_enable(struct regulator_dev *rdev)
+{
+	struct cpr_regulator *cpr_vreg = rdev_get_drvdata(rdev);
+	int rc = 0;
+
+	/* Enable dependency power before vdd_apc */
+	if (cpr_vreg->vdd_mx) {
+		rc = regulator_enable(cpr_vreg->vdd_mx);
+		if (rc) {
+			cpr_err(cpr_vreg, "regulator_enable: vdd_mx: rc=%d\n",
+				rc);
+			return rc;
+		}
+	}
+
+	rc = regulator_enable(cpr_vreg->vdd_apc);
+	if (rc) {
+		cpr_err(cpr_vreg, "regulator_enable: vdd_apc: rc=%d\n", rc);
+		return rc;
+	}
+
+	mutex_lock(&cpr_vreg->cpr_mutex);
+	cpr_vreg->vreg_enabled = true;
+	if (cpr_is_allowed(cpr_vreg) && cpr_vreg->corner) {
+		cpr_irq_clr(cpr_vreg);
+		cpr_corner_restore(cpr_vreg, cpr_vreg->corner);
+		cpr_ctl_enable(cpr_vreg, cpr_vreg->corner);
+	}
+	mutex_unlock(&cpr_vreg->cpr_mutex);
+
+	return rc;
+}
+
+static int cpr_regulator_disable(struct regulator_dev *rdev)
+{
+	struct cpr_regulator *cpr_vreg = rdev_get_drvdata(rdev);
+	int rc;
+
+	rc = regulator_disable(cpr_vreg->vdd_apc);
+	if (!rc) {
+		if (cpr_vreg->vdd_mx)
+			rc = regulator_disable(cpr_vreg->vdd_mx);
+
+		if (rc) {
+			cpr_err(cpr_vreg, "regulator_disable: vdd_mx: rc=%d\n",
+				rc);
+			return rc;
+		}
+
+		mutex_lock(&cpr_vreg->cpr_mutex);
+		cpr_vreg->vreg_enabled = false;
+		if (cpr_is_allowed(cpr_vreg))
+			cpr_ctl_disable(cpr_vreg);
+		mutex_unlock(&cpr_vreg->cpr_mutex);
+	} else {
+		cpr_err(cpr_vreg, "regulator_disable: vdd_apc: rc=%d\n", rc);
+	}
+
+	return rc;
+}
+
+static int cpr_calculate_de_aging_margin(struct cpr_regulator *cpr_vreg)
+{
+	struct cpr_aging_info *aging_info = cpr_vreg->aging_info;
+	enum voltage_change_dir change_dir = NO_CHANGE;
+	u32 save_ctl, save_irq;
+	cpumask_t tmp_mask;
+	int rc = 0, i;
+
+	save_ctl = cpr_read(cpr_vreg, REG_RBCPR_CTL);
+	save_irq = cpr_read(cpr_vreg, REG_RBIF_IRQ_EN(cpr_vreg->irq_line));
+
+	/* Disable interrupt and CPR */
+	cpr_irq_set(cpr_vreg, 0);
+	cpr_write(cpr_vreg, REG_RBCPR_CTL, 0);
+
+	if (aging_info->aging_corner > cpr_vreg->corner)
+		change_dir = UP;
+	else if (aging_info->aging_corner < cpr_vreg->corner)
+		change_dir = DOWN;
+
+	/* set selected reference voltage for de-aging */
+	rc = cpr_scale_voltage(cpr_vreg,
+				aging_info->aging_corner,
+				aging_info->aging_ref_voltage,
+				change_dir);
+	if (rc) {
+		cpr_err(cpr_vreg, "Unable to set aging reference voltage, rc = %d\n",
+			rc);
+		return rc;
+	}
+
+	/* Force PWM mode */
+	rc = regulator_set_mode(cpr_vreg->vdd_apc, REGULATOR_MODE_NORMAL);
+	if (rc) {
+		cpr_err(cpr_vreg, "unable to configure vdd-supply for mode=%u, rc=%d\n",
+			REGULATOR_MODE_NORMAL, rc);
+		return rc;
+	}
+
+	get_online_cpus();
+	cpumask_and(&tmp_mask, &cpr_vreg->cpu_mask, cpu_online_mask);
+	if (!cpumask_empty(&tmp_mask)) {
+		smp_call_function_any(&tmp_mask,
+					cpr_de_aging_adjustment,
+					cpr_vreg, true);
+		aging_info->cpr_aging_done = true;
+		if (!aging_info->cpr_aging_error)
+			for (i = CPR_FUSE_CORNER_MIN;
+					i <= cpr_vreg->num_fuse_corners; i++)
+				cpr_info(cpr_vreg, "Corner[%d]: age adjusted target quot = %d\n",
+					i, cpr_vreg->cpr_fuse_target_quot[i]);
+	}
+
+	put_online_cpus();
+
+	/* Set to initial mode */
+	rc = regulator_set_mode(cpr_vreg->vdd_apc, REGULATOR_MODE_IDLE);
+	if (rc) {
+		cpr_err(cpr_vreg, "unable to configure vdd-supply for mode=%u, rc=%d\n",
+			REGULATOR_MODE_IDLE, rc);
+		return rc;
+	}
+
+	/* Clear interrupts */
+	cpr_irq_clr(cpr_vreg);
+
+	/* Restore register values */
+	cpr_irq_set(cpr_vreg, save_irq);
+	cpr_write(cpr_vreg, REG_RBCPR_CTL, save_ctl);
+
+	return rc;
+}
+
+/* Note that cpr_vreg->cpr_mutex must be held by the caller. */
+static int cpr_regulator_set_voltage(struct regulator_dev *rdev,
+		int corner, bool reset_quot)
+{
+	struct cpr_regulator *cpr_vreg = rdev_get_drvdata(rdev);
+	struct cpr_aging_info *aging_info = cpr_vreg->aging_info;
+	int rc;
+	int new_volt;
+	enum voltage_change_dir change_dir = NO_CHANGE;
+	int fuse_corner = cpr_vreg->corner_map[corner];
+
+	if (cpr_is_allowed(cpr_vreg)) {
+		cpr_ctl_disable(cpr_vreg);
+		new_volt = cpr_vreg->last_volt[corner];
+	} else {
+		new_volt = cpr_vreg->open_loop_volt[corner];
+	}
+
+	cpr_debug(cpr_vreg, "[corner:%d, fuse_corner:%d] = %d uV\n",
+		corner, fuse_corner, new_volt);
+
+	if (corner > cpr_vreg->corner)
+		change_dir = UP;
+	else if (corner < cpr_vreg->corner)
+		change_dir = DOWN;
+
+	/* Read age sensor data and apply de-aging adjustments */
+	if (cpr_vreg->vreg_enabled && aging_info && !aging_info->cpr_aging_done
+		&& (corner <= aging_info->aging_corner)) {
+		rc = cpr_calculate_de_aging_margin(cpr_vreg);
+		if (rc) {
+			cpr_err(cpr_vreg, "failed in de-aging calibration: rc=%d\n",
+				rc);
+		} else {
+			change_dir = NO_CHANGE;
+			if (corner > aging_info->aging_corner)
+				change_dir = UP;
+			else if (corner  < aging_info->aging_corner)
+				change_dir = DOWN;
+		}
+		reset_quot = true;
+	}
+
+	rc = cpr_scale_voltage(cpr_vreg, corner, new_volt, change_dir);
+	if (rc)
+		return rc;
+
+	if (cpr_is_allowed(cpr_vreg) && cpr_vreg->vreg_enabled) {
+		cpr_irq_clr(cpr_vreg);
+		if (reset_quot)
+			cpr_corner_restore(cpr_vreg, corner);
+		else
+			cpr_corner_switch(cpr_vreg, corner);
+		cpr_ctl_enable(cpr_vreg, corner);
+	}
+
+	cpr_vreg->corner = corner;
+
+	return rc;
+}
+
+static int cpr_regulator_set_voltage_op(struct regulator_dev *rdev,
+		int corner, int corner_max, unsigned *selector)
+{
+	struct cpr_regulator *cpr_vreg = rdev_get_drvdata(rdev);
+	int rc;
+
+	mutex_lock(&cpr_vreg->cpr_mutex);
+	rc = cpr_regulator_set_voltage(rdev, corner, false);
+	mutex_unlock(&cpr_vreg->cpr_mutex);
+
+	return rc;
+}
+
+static int cpr_regulator_get_voltage(struct regulator_dev *rdev)
+{
+	struct cpr_regulator *cpr_vreg = rdev_get_drvdata(rdev);
+
+	return cpr_vreg->corner;
+}
+
+/**
+ * cpr_regulator_list_corner_voltage() - return the ceiling voltage mapped to
+ *			the specified voltage corner
+ * @rdev:		Regulator device pointer for the cpr-regulator
+ * @corner:		Voltage corner
+ *
+ * This function is passed as a callback function into the regulator ops that
+ * are registered for each cpr-regulator device.
+ *
+ * Return: voltage value in microvolts or -EINVAL if the corner is out of range
+ */
+static int cpr_regulator_list_corner_voltage(struct regulator_dev *rdev,
+		int corner)
+{
+	struct cpr_regulator *cpr_vreg = rdev_get_drvdata(rdev);
+
+	if (corner >= CPR_CORNER_MIN && corner <= cpr_vreg->num_corners)
+		return cpr_vreg->ceiling_volt[corner];
+	else
+		return -EINVAL;
+}
+
+static struct regulator_ops cpr_corner_ops = {
+	.enable			= cpr_regulator_enable,
+	.disable		= cpr_regulator_disable,
+	.is_enabled		= cpr_regulator_is_enabled,
+	.set_voltage		= cpr_regulator_set_voltage_op,
+	.get_voltage		= cpr_regulator_get_voltage,
+	.list_corner_voltage	= cpr_regulator_list_corner_voltage,
+};
+
+#ifdef CONFIG_PM
+static int cpr_suspend(struct cpr_regulator *cpr_vreg)
+{
+	cpr_debug(cpr_vreg, "suspend\n");
+
+	cpr_ctl_disable(cpr_vreg);
+
+	cpr_irq_clr(cpr_vreg);
+
+	return 0;
+}
+
+static int cpr_resume(struct cpr_regulator *cpr_vreg)
+
+{
+	cpr_debug(cpr_vreg, "resume\n");
+
+	cpr_irq_clr(cpr_vreg);
+
+	cpr_ctl_enable(cpr_vreg, cpr_vreg->corner);
+
+	return 0;
+}
+
+static int cpr_regulator_suspend(struct platform_device *pdev,
+				 pm_message_t state)
+{
+	struct cpr_regulator *cpr_vreg = platform_get_drvdata(pdev);
+	int rc = 0;
+
+	mutex_lock(&cpr_vreg->cpr_mutex);
+
+	if (cpr_is_allowed(cpr_vreg))
+		rc = cpr_suspend(cpr_vreg);
+
+	cpr_vreg->is_cpr_suspended = true;
+
+	mutex_unlock(&cpr_vreg->cpr_mutex);
+
+	return rc;
+}
+
+static int cpr_regulator_resume(struct platform_device *pdev)
+{
+	struct cpr_regulator *cpr_vreg = platform_get_drvdata(pdev);
+	int rc = 0;
+
+	mutex_lock(&cpr_vreg->cpr_mutex);
+
+	cpr_vreg->is_cpr_suspended = false;
+
+	if (cpr_is_allowed(cpr_vreg))
+		rc = cpr_resume(cpr_vreg);
+
+	mutex_unlock(&cpr_vreg->cpr_mutex);
+
+	return rc;
+}
+#else
+#define cpr_regulator_suspend NULL
+#define cpr_regulator_resume NULL
+#endif
+
+static int cpr_config(struct cpr_regulator *cpr_vreg, struct device *dev)
+{
+	int i;
+	u32 val, gcnt, reg;
+	void __iomem *rbcpr_clk;
+	int size;
+
+	if (cpr_vreg->rbcpr_clk_addr) {
+		/* Use 19.2 MHz clock for CPR. */
+		rbcpr_clk = ioremap(cpr_vreg->rbcpr_clk_addr, 4);
+		if (!rbcpr_clk) {
+			cpr_err(cpr_vreg, "Unable to map rbcpr_clk\n");
+			return -EINVAL;
+		}
+		reg = readl_relaxed(rbcpr_clk);
+		reg &= ~RBCPR_CLK_SEL_MASK;
+		reg |= RBCPR_CLK_SEL_19P2_MHZ & RBCPR_CLK_SEL_MASK;
+		writel_relaxed(reg, rbcpr_clk);
+		iounmap(rbcpr_clk);
+	}
+
+	/* Disable interrupt and CPR */
+	cpr_write(cpr_vreg, REG_RBIF_IRQ_EN(cpr_vreg->irq_line), 0);
+	cpr_write(cpr_vreg, REG_RBCPR_CTL, 0);
+
+	/* Program the default HW Ceiling, Floor and vlevel */
+	val = ((RBIF_LIMIT_CEILING_DEFAULT & RBIF_LIMIT_CEILING_MASK)
+			<< RBIF_LIMIT_CEILING_SHIFT)
+		| (RBIF_LIMIT_FLOOR_DEFAULT & RBIF_LIMIT_FLOOR_MASK);
+	cpr_write(cpr_vreg, REG_RBIF_LIMIT, val);
+	cpr_write(cpr_vreg, REG_RBIF_SW_VLEVEL, RBIF_SW_VLEVEL_DEFAULT);
+
+	/* Clear the target quotient value and gate count of all ROs */
+	for (i = 0; i < CPR_NUM_RING_OSC; i++)
+		cpr_write(cpr_vreg, REG_RBCPR_GCNT_TARGET(i), 0);
+
+	/* Init and save gcnt */
+	gcnt = (cpr_vreg->ref_clk_khz * cpr_vreg->gcnt_time_us) / 1000;
+	gcnt = (gcnt & RBCPR_GCNT_TARGET_GCNT_MASK) <<
+			RBCPR_GCNT_TARGET_GCNT_SHIFT;
+	cpr_vreg->gcnt = gcnt;
+
+	/* Program the delay count for the timer */
+	val = (cpr_vreg->ref_clk_khz * cpr_vreg->timer_delay_us) / 1000;
+	cpr_write(cpr_vreg, REG_RBCPR_TIMER_INTERVAL, val);
+	cpr_info(cpr_vreg, "Timer count: 0x%0x (for %d us)\n", val,
+		cpr_vreg->timer_delay_us);
+
+	/* Program Consecutive Up & Down */
+	val = ((cpr_vreg->timer_cons_down & RBIF_TIMER_ADJ_CONS_DOWN_MASK)
+			<< RBIF_TIMER_ADJ_CONS_DOWN_SHIFT) |
+	       (cpr_vreg->timer_cons_up & RBIF_TIMER_ADJ_CONS_UP_MASK) |
+	       ((cpr_vreg->clamp_timer_interval & RBIF_TIMER_ADJ_CLAMP_INT_MASK)
+			<< RBIF_TIMER_ADJ_CLAMP_INT_SHIFT);
+	cpr_write(cpr_vreg, REG_RBIF_TIMER_ADJUST, val);
+
+	/* Program the control register */
+	cpr_vreg->up_threshold &= RBCPR_CTL_UP_THRESHOLD_MASK;
+	cpr_vreg->down_threshold &= RBCPR_CTL_DN_THRESHOLD_MASK;
+	val = (cpr_vreg->up_threshold << RBCPR_CTL_UP_THRESHOLD_SHIFT)
+		| (cpr_vreg->down_threshold << RBCPR_CTL_DN_THRESHOLD_SHIFT);
+	val |= RBCPR_CTL_TIMER_EN | RBCPR_CTL_COUNT_MODE;
+	val |= RBCPR_CTL_SW_AUTO_CONT_ACK_EN;
+	cpr_write(cpr_vreg, REG_RBCPR_CTL, val);
+
+	cpr_irq_set(cpr_vreg, CPR_INT_DEFAULT);
+
+	val = cpr_read(cpr_vreg, REG_RBCPR_VERSION);
+	if (val <= RBCPR_VER_2)
+		cpr_vreg->flags |= FLAGS_IGNORE_1ST_IRQ_STATUS;
+
+	size = cpr_vreg->num_corners + 1;
+	cpr_vreg->save_ctl = devm_kzalloc(dev, sizeof(int) * size, GFP_KERNEL);
+	cpr_vreg->save_irq = devm_kzalloc(dev, sizeof(int) * size, GFP_KERNEL);
+	if (!cpr_vreg->save_ctl || !cpr_vreg->save_irq)
+		return -ENOMEM;
+
+	for (i = 1; i < size; i++)
+		cpr_corner_save(cpr_vreg, i);
+
+	return 0;
+}
+
+static int cpr_fuse_is_setting_expected(struct cpr_regulator *cpr_vreg,
+					u32 sel_array[5])
+{
+	u64 fuse_bits;
+	u32 ret;
+
+	fuse_bits = cpr_read_efuse_row(cpr_vreg, sel_array[0], sel_array[4]);
+	ret = (fuse_bits >> sel_array[1]) & ((1 << sel_array[2]) - 1);
+	if (ret == sel_array[3])
+		ret = 1;
+	else
+		ret = 0;
+
+	cpr_info(cpr_vreg, "[row:%d] = 0x%llx @%d:%d == %d ?: %s\n",
+			sel_array[0], fuse_bits,
+			sel_array[1], sel_array[2],
+			sel_array[3],
+			(ret == 1) ? "yes" : "no");
+	return ret;
+}
+
+static int cpr_voltage_uplift_wa_inc_volt(struct cpr_regulator *cpr_vreg,
+					struct device_node *of_node)
+{
+	u32 uplift_voltage;
+	u32 uplift_max_volt = 0;
+	int highest_fuse_corner = cpr_vreg->num_fuse_corners;
+	int rc;
+
+	rc = of_property_read_u32(of_node,
+		"qcom,cpr-uplift-voltage", &uplift_voltage);
+	if (rc < 0) {
+		cpr_err(cpr_vreg, "cpr-uplift-voltage is missing, rc = %d", rc);
+		return rc;
+	}
+	rc = of_property_read_u32(of_node,
+		"qcom,cpr-uplift-max-volt", &uplift_max_volt);
+	if (rc < 0) {
+		cpr_err(cpr_vreg, "cpr-uplift-max-volt is missing, rc = %d",
+			rc);
+		return rc;
+	}
+
+	cpr_vreg->pvs_corner_v[highest_fuse_corner] += uplift_voltage;
+	if (cpr_vreg->pvs_corner_v[highest_fuse_corner] > uplift_max_volt)
+		cpr_vreg->pvs_corner_v[highest_fuse_corner] = uplift_max_volt;
+
+	return rc;
+}
+
+static int cpr_adjust_init_voltages(struct device_node *of_node,
+				struct cpr_regulator *cpr_vreg)
+{
+	int tuple_count, tuple_match, i;
+	u32 index;
+	u32 volt_adjust = 0;
+	int len = 0;
+	int rc = 0;
+
+	if (!of_find_property(of_node, "qcom,cpr-init-voltage-adjustment",
+				&len)) {
+		/* No initial voltage adjustment needed. */
+		return 0;
+	}
+
+	if (cpr_vreg->cpr_fuse_map_count) {
+		if (cpr_vreg->cpr_fuse_map_match == FUSE_MAP_NO_MATCH) {
+			/*
+			 * No matching index to use for initial voltage
+			 * adjustment.
+			 */
+			return 0;
+		}
+		tuple_count = cpr_vreg->cpr_fuse_map_count;
+		tuple_match = cpr_vreg->cpr_fuse_map_match;
+	} else {
+		tuple_count = 1;
+		tuple_match = 0;
+	}
+
+	if (len != cpr_vreg->num_fuse_corners * tuple_count * sizeof(u32)) {
+		cpr_err(cpr_vreg, "qcom,cpr-init-voltage-adjustment length=%d is invalid\n",
+			len);
+		return -EINVAL;
+	}
+
+	for (i = CPR_FUSE_CORNER_MIN; i <= cpr_vreg->num_fuse_corners; i++) {
+		index = tuple_match * cpr_vreg->num_fuse_corners
+				+ i - CPR_FUSE_CORNER_MIN;
+		rc = of_property_read_u32_index(of_node,
+			"qcom,cpr-init-voltage-adjustment", index,
+			&volt_adjust);
+		if (rc) {
+			cpr_err(cpr_vreg, "could not read qcom,cpr-init-voltage-adjustment index %u, rc=%d\n",
+				index, rc);
+			return rc;
+		}
+
+		if (volt_adjust) {
+			cpr_vreg->pvs_corner_v[i] += volt_adjust;
+			cpr_info(cpr_vreg, "adjusted initial voltage[%d]: %d -> %d uV\n",
+				i, cpr_vreg->pvs_corner_v[i] - volt_adjust,
+				cpr_vreg->pvs_corner_v[i]);
+		}
+	}
+
+	return rc;
+}
+
+/*
+ * Property qcom,cpr-fuse-init-voltage specifies the fuse position of the
+ * initial voltage for each fuse corner. MSB of the fuse value is a sign
+ * bit, and the remaining bits define the steps of the offset. Each step has
+ * units of microvolts defined in the qcom,cpr-fuse-init-voltage-step property.
+ * The initial voltages can be calculated using the formula:
+ * pvs_corner_v[corner] = ceiling_volt[corner] + (sign * steps * step_size_uv)
+ */
+static int cpr_pvs_per_corner_init(struct device_node *of_node,
+				struct cpr_regulator *cpr_vreg)
+{
+	u64 efuse_bits;
+	int i, size, sign, steps, step_size_uv, rc;
+	u32 *fuse_sel, *tmp, *ref_uv;
+	struct property *prop;
+	char *init_volt_str;
+
+	init_volt_str = cpr_vreg->cpr_fuse_redundant
+			? "qcom,cpr-fuse-redun-init-voltage"
+			: "qcom,cpr-fuse-init-voltage";
+
+	prop = of_find_property(of_node, init_volt_str, NULL);
+	if (!prop) {
+		cpr_err(cpr_vreg, "%s is missing\n", init_volt_str);
+		return -EINVAL;
+	}
+	size = prop->length / sizeof(u32);
+	if (size != cpr_vreg->num_fuse_corners * 4) {
+		cpr_err(cpr_vreg,
+			"fuse position for init voltages is invalid\n");
+		return -EINVAL;
+	}
+	fuse_sel = kzalloc(sizeof(u32) * size, GFP_KERNEL);
+	if (!fuse_sel) {
+		cpr_err(cpr_vreg, "memory alloc failed.\n");
+		return -ENOMEM;
+	}
+	rc = of_property_read_u32_array(of_node, init_volt_str,
+							fuse_sel, size);
+	if (rc < 0) {
+		cpr_err(cpr_vreg,
+			"read cpr-fuse-init-voltage failed, rc = %d\n", rc);
+		kfree(fuse_sel);
+		return rc;
+	}
+	rc = of_property_read_u32(of_node, "qcom,cpr-init-voltage-step",
+							&step_size_uv);
+	if (rc < 0) {
+		cpr_err(cpr_vreg,
+			"read cpr-init-voltage-step failed, rc = %d\n", rc);
+		kfree(fuse_sel);
+		return rc;
+	}
+
+	ref_uv = kzalloc((cpr_vreg->num_fuse_corners + 1) * sizeof(*ref_uv),
+			GFP_KERNEL);
+	if (!ref_uv) {
+		cpr_err(cpr_vreg,
+			"Could not allocate memory for reference voltages\n");
+		kfree(fuse_sel);
+		return -ENOMEM;
+	}
+
+	rc = of_property_read_u32_array(of_node, "qcom,cpr-init-voltage-ref",
+		&ref_uv[CPR_FUSE_CORNER_MIN], cpr_vreg->num_fuse_corners);
+	if (rc < 0) {
+		cpr_err(cpr_vreg,
+			"read qcom,cpr-init-voltage-ref failed, rc = %d\n", rc);
+		kfree(fuse_sel);
+		kfree(ref_uv);
+		return rc;
+	}
+
+	tmp = fuse_sel;
+	for (i = CPR_FUSE_CORNER_MIN; i <= cpr_vreg->num_fuse_corners; i++) {
+		efuse_bits = cpr_read_efuse_param(cpr_vreg, fuse_sel[0],
+					fuse_sel[1], fuse_sel[2], fuse_sel[3]);
+		sign = (efuse_bits & (1 << (fuse_sel[2] - 1))) ? -1 : 1;
+		steps = efuse_bits & ((1 << (fuse_sel[2] - 1)) - 1);
+		cpr_vreg->pvs_corner_v[i] =
+				ref_uv[i] + sign * steps * step_size_uv;
+		cpr_vreg->pvs_corner_v[i] = DIV_ROUND_UP(
+				cpr_vreg->pvs_corner_v[i],
+				cpr_vreg->step_volt) *
+				cpr_vreg->step_volt;
+		cpr_debug(cpr_vreg, "corner %d: sign = %d, steps = %d, volt = %d uV\n",
+			i, sign, steps, cpr_vreg->pvs_corner_v[i]);
+		fuse_sel += 4;
+	}
+
+	rc = cpr_adjust_init_voltages(of_node, cpr_vreg);
+	if (rc)
+		goto done;
+
+	for (i = CPR_FUSE_CORNER_MIN; i <= cpr_vreg->num_fuse_corners; i++) {
+		if (cpr_vreg->pvs_corner_v[i]
+		    > cpr_vreg->fuse_ceiling_volt[i]) {
+			cpr_info(cpr_vreg, "Warning: initial voltage[%d] %d above ceiling %d\n",
+				i, cpr_vreg->pvs_corner_v[i],
+				cpr_vreg->fuse_ceiling_volt[i]);
+			cpr_vreg->pvs_corner_v[i]
+				= cpr_vreg->fuse_ceiling_volt[i];
+		} else if (cpr_vreg->pvs_corner_v[i] <
+				cpr_vreg->fuse_floor_volt[i]) {
+			cpr_info(cpr_vreg, "Warning: initial voltage[%d] %d below floor %d\n",
+				i, cpr_vreg->pvs_corner_v[i],
+				cpr_vreg->fuse_floor_volt[i]);
+			cpr_vreg->pvs_corner_v[i]
+				= cpr_vreg->fuse_floor_volt[i];
+		}
+	}
+
+done:
+	kfree(tmp);
+	kfree(ref_uv);
+
+	return rc;
+}
+
+/*
+ * A single PVS bin is stored in a fuse that's position is defined either
+ * in the qcom,pvs-fuse-redun property or in the qcom,pvs-fuse property.
+ * The fuse value defined in the qcom,pvs-fuse-redun-sel property is used
+ * to pick between the primary or redudant PVS fuse position.
+ * After the PVS bin value is read out successfully, it is used as the row
+ * index to get initial voltages for each fuse corner from the voltage table
+ * defined in the qcom,pvs-voltage-table property.
+ */
+static int cpr_pvs_single_bin_init(struct device_node *of_node,
+				struct cpr_regulator *cpr_vreg)
+{
+	u64 efuse_bits;
+	u32 pvs_fuse[4], pvs_fuse_redun_sel[5];
+	int rc, i, stripe_size;
+	bool redundant;
+	size_t pvs_bins;
+	u32 *tmp;
+
+	rc = of_property_read_u32_array(of_node, "qcom,pvs-fuse-redun-sel",
+						pvs_fuse_redun_sel, 5);
+	if (rc < 0) {
+		cpr_err(cpr_vreg, "pvs-fuse-redun-sel missing: rc=%d\n", rc);
+		return rc;
+	}
+
+	redundant = cpr_fuse_is_setting_expected(cpr_vreg, pvs_fuse_redun_sel);
+	if (redundant) {
+		rc = of_property_read_u32_array(of_node, "qcom,pvs-fuse-redun",
+								pvs_fuse, 4);
+		if (rc < 0) {
+			cpr_err(cpr_vreg, "pvs-fuse-redun missing: rc=%d\n",
+				rc);
+			return rc;
+		}
+	} else {
+		rc = of_property_read_u32_array(of_node, "qcom,pvs-fuse",
+							pvs_fuse, 4);
+		if (rc < 0) {
+			cpr_err(cpr_vreg, "pvs-fuse missing: rc=%d\n", rc);
+			return rc;
+		}
+	}
+
+	/* Construct PVS process # from the efuse bits */
+	efuse_bits = cpr_read_efuse_row(cpr_vreg, pvs_fuse[0], pvs_fuse[3]);
+	cpr_vreg->pvs_bin = (efuse_bits >> pvs_fuse[1]) &
+				((1 << pvs_fuse[2]) - 1);
+	pvs_bins = 1 << pvs_fuse[2];
+	stripe_size = cpr_vreg->num_fuse_corners;
+	tmp = kzalloc(sizeof(u32) * pvs_bins * stripe_size, GFP_KERNEL);
+	if (!tmp) {
+		cpr_err(cpr_vreg, "memory alloc failed\n");
+		return -ENOMEM;
+	}
+
+	rc = of_property_read_u32_array(of_node, "qcom,pvs-voltage-table",
+						tmp, pvs_bins * stripe_size);
+	if (rc < 0) {
+		cpr_err(cpr_vreg, "pvs-voltage-table missing: rc=%d\n", rc);
+		kfree(tmp);
+		return rc;
+	}
+
+	for (i = CPR_FUSE_CORNER_MIN; i <= cpr_vreg->num_fuse_corners; i++)
+		cpr_vreg->pvs_corner_v[i] = tmp[cpr_vreg->pvs_bin *
+						stripe_size + i - 1];
+	kfree(tmp);
+
+	rc = cpr_adjust_init_voltages(of_node, cpr_vreg);
+	if (rc)
+		return rc;
+
+	return 0;
+}
+
+/*
+ * The function reads VDD_MX dependency parameters from device node.
+ * Select the qcom,vdd-mx-corner-map length equal to either num_fuse_corners
+ * or num_corners based on selected vdd-mx-vmin-method.
+ */
+static int cpr_parse_vdd_mx_parameters(struct platform_device *pdev,
+					struct cpr_regulator *cpr_vreg)
+{
+	struct device_node *of_node = pdev->dev.of_node;
+	u32 corner_map_len;
+	int rc, len, size;
+
+	rc = of_property_read_u32(of_node, "qcom,vdd-mx-vmax",
+				&cpr_vreg->vdd_mx_vmax);
+	if (rc < 0) {
+		cpr_err(cpr_vreg, "vdd-mx-vmax missing: rc=%d\n", rc);
+		return rc;
+	}
+
+	rc = of_property_read_u32(of_node, "qcom,vdd-mx-vmin-method",
+			 &cpr_vreg->vdd_mx_vmin_method);
+	if (rc < 0) {
+		cpr_err(cpr_vreg, "vdd-mx-vmin-method missing: rc=%d\n",
+			rc);
+		return rc;
+	}
+	if (cpr_vreg->vdd_mx_vmin_method > VDD_MX_VMIN_APC_CORNER_MAP) {
+		cpr_err(cpr_vreg, "Invalid vdd-mx-vmin-method(%d)\n",
+			cpr_vreg->vdd_mx_vmin_method);
+		return -EINVAL;
+	}
+
+	switch (cpr_vreg->vdd_mx_vmin_method) {
+	case VDD_MX_VMIN_APC_FUSE_CORNER_MAP:
+		corner_map_len = cpr_vreg->num_fuse_corners;
+		break;
+	case VDD_MX_VMIN_APC_CORNER_MAP:
+		corner_map_len = cpr_vreg->num_corners;
+		break;
+	default:
+		cpr_vreg->vdd_mx_corner_map = NULL;
+		return 0;
+	}
+
+	if (!of_find_property(of_node, "qcom,vdd-mx-corner-map", &len)) {
+		cpr_err(cpr_vreg, "qcom,vdd-mx-corner-map missing");
+		return -EINVAL;
+	}
+
+	size = len / sizeof(u32);
+	if (size != corner_map_len) {
+		cpr_err(cpr_vreg,
+			"qcom,vdd-mx-corner-map length=%d is invalid: required:%u\n",
+			size, corner_map_len);
+		return -EINVAL;
+	}
+
+	cpr_vreg->vdd_mx_corner_map = devm_kzalloc(&pdev->dev,
+		(corner_map_len + 1) * sizeof(*cpr_vreg->vdd_mx_corner_map),
+			GFP_KERNEL);
+	if (!cpr_vreg->vdd_mx_corner_map) {
+		cpr_err(cpr_vreg,
+			"Can't allocate memory for cpr_vreg->vdd_mx_corner_map\n");
+		return -ENOMEM;
+	}
+
+	rc = of_property_read_u32_array(of_node,
+				"qcom,vdd-mx-corner-map",
+				&cpr_vreg->vdd_mx_corner_map[1],
+				corner_map_len);
+	if (rc)
+		cpr_err(cpr_vreg,
+			"read qcom,vdd-mx-corner-map failed, rc = %d\n", rc);
+
+	return rc;
+}
+
+#define MAX_CHARS_PER_INT	10
+
+/*
+ * The initial voltage for each fuse corner may be determined by one of two
+ * possible styles of fuse. If qcom,cpr-fuse-init-voltage is present, then
+ * the initial voltages are encoded in a fuse for each fuse corner. If it is
+ * not present, then the initial voltages are all determined using a single
+ * PVS bin fuse value.
+ */
+static int cpr_pvs_init(struct platform_device *pdev,
+			       struct cpr_regulator *cpr_vreg)
+{
+	struct device_node *of_node = pdev->dev.of_node;
+	int highest_fuse_corner = cpr_vreg->num_fuse_corners;
+	int i, rc, pos;
+	size_t buflen;
+	char *buf;
+
+	rc = of_property_read_u32(of_node, "qcom,cpr-apc-volt-step",
+					&cpr_vreg->step_volt);
+	if (rc < 0) {
+		cpr_err(cpr_vreg, "read cpr-apc-volt-step failed, rc = %d\n",
+			rc);
+		return rc;
+	} else if (cpr_vreg->step_volt == 0) {
+		cpr_err(cpr_vreg, "apc voltage step size can't be set to 0.\n");
+		return -EINVAL;
+	}
+
+	if (of_find_property(of_node, "qcom,cpr-fuse-init-voltage", NULL)) {
+		rc = cpr_pvs_per_corner_init(of_node, cpr_vreg);
+		if (rc < 0) {
+			cpr_err(cpr_vreg, "get pvs per corner failed, rc = %d",
+				rc);
+			return rc;
+		}
+	} else {
+		rc = cpr_pvs_single_bin_init(of_node, cpr_vreg);
+		if (rc < 0) {
+			cpr_err(cpr_vreg,
+				"get pvs from single bin failed, rc = %d", rc);
+			return rc;
+		}
+	}
+
+	if (cpr_vreg->flags & FLAGS_UPLIFT_QUOT_VOLT) {
+		rc = cpr_voltage_uplift_wa_inc_volt(cpr_vreg, of_node);
+		if (rc < 0) {
+			cpr_err(cpr_vreg, "pvs volt uplift wa apply failed: %d",
+				rc);
+			return rc;
+		}
+	}
+
+	/*
+	 * Allow the highest fuse corner's PVS voltage to define the ceiling
+	 * voltage for that corner in order to support SoC's in which variable
+	 * ceiling values are required.
+	 */
+	if (cpr_vreg->pvs_corner_v[highest_fuse_corner] >
+		cpr_vreg->fuse_ceiling_volt[highest_fuse_corner])
+		cpr_vreg->fuse_ceiling_volt[highest_fuse_corner] =
+			cpr_vreg->pvs_corner_v[highest_fuse_corner];
+
+	/*
+	 * Restrict all fuse corner PVS voltages based upon per corner
+	 * ceiling and floor voltages.
+	 */
+	for (i = CPR_FUSE_CORNER_MIN; i <= highest_fuse_corner; i++)
+		if (cpr_vreg->pvs_corner_v[i] > cpr_vreg->fuse_ceiling_volt[i])
+			cpr_vreg->pvs_corner_v[i]
+				= cpr_vreg->fuse_ceiling_volt[i];
+		else if (cpr_vreg->pvs_corner_v[i]
+				< cpr_vreg->fuse_floor_volt[i])
+			cpr_vreg->pvs_corner_v[i]
+				= cpr_vreg->fuse_floor_volt[i];
+
+	cpr_vreg->ceiling_max
+		= cpr_vreg->fuse_ceiling_volt[highest_fuse_corner];
+
+	/*
+	 * Log ceiling, floor, and inital voltages since they are critical for
+	 * all CPR debugging.
+	 */
+	buflen = cpr_vreg->num_fuse_corners * (MAX_CHARS_PER_INT + 2)
+			* sizeof(*buf);
+	buf = kzalloc(buflen, GFP_KERNEL);
+	if (buf == NULL) {
+		cpr_err(cpr_vreg, "Could not allocate memory for corner voltage logging\n");
+		return 0;
+	}
+
+	for (i = CPR_FUSE_CORNER_MIN, pos = 0; i <= highest_fuse_corner; i++)
+		pos += scnprintf(buf + pos, buflen - pos, "%u%s",
+				cpr_vreg->pvs_corner_v[i],
+				i < highest_fuse_corner ? " " : "");
+	cpr_info(cpr_vreg, "pvs voltage: [%s] uV\n", buf);
+
+	for (i = CPR_FUSE_CORNER_MIN, pos = 0; i <= highest_fuse_corner; i++)
+		pos += scnprintf(buf + pos, buflen - pos, "%d%s",
+				cpr_vreg->fuse_ceiling_volt[i],
+				i < highest_fuse_corner ? " " : "");
+	cpr_info(cpr_vreg, "ceiling voltage: [%s] uV\n", buf);
+
+	for (i = CPR_FUSE_CORNER_MIN, pos = 0; i <= highest_fuse_corner; i++)
+		pos += scnprintf(buf + pos, buflen - pos, "%d%s",
+				cpr_vreg->fuse_floor_volt[i],
+				i < highest_fuse_corner ? " " : "");
+	cpr_info(cpr_vreg, "floor voltage: [%s] uV\n", buf);
+
+	kfree(buf);
+	return 0;
+}
+
+#define CPR_PROP_READ_U32(cpr_vreg, of_node, cpr_property, cpr_config, rc) \
+do {									\
+	if (!rc) {							\
+		rc = of_property_read_u32(of_node,			\
+				"qcom," cpr_property,			\
+				cpr_config);				\
+		if (rc) {						\
+			cpr_err(cpr_vreg, "Missing " #cpr_property	\
+				": rc = %d\n", rc);			\
+		}							\
+	}								\
+} while (0)
+
+static int cpr_apc_init(struct platform_device *pdev,
+			       struct cpr_regulator *cpr_vreg)
+{
+	struct device_node *of_node = pdev->dev.of_node;
+	int i, rc = 0;
+
+	for (i = 0; i < ARRAY_SIZE(vdd_apc_name); i++) {
+		cpr_vreg->vdd_apc = devm_regulator_get_optional(&pdev->dev,
+					vdd_apc_name[i]);
+		rc = PTR_RET(cpr_vreg->vdd_apc);
+		if (!IS_ERR_OR_NULL(cpr_vreg->vdd_apc))
+			break;
+	}
+
+	if (rc) {
+		if (rc != -EPROBE_DEFER)
+			cpr_err(cpr_vreg, "devm_regulator_get: rc=%d\n", rc);
+		return rc;
+	}
+
+	/* Check dependencies */
+	if (of_find_property(of_node, "vdd-mx-supply", NULL)) {
+		cpr_vreg->vdd_mx = devm_regulator_get(&pdev->dev, "vdd-mx");
+		if (IS_ERR_OR_NULL(cpr_vreg->vdd_mx)) {
+			rc = PTR_RET(cpr_vreg->vdd_mx);
+			if (rc != -EPROBE_DEFER)
+				cpr_err(cpr_vreg,
+					"devm_regulator_get: vdd-mx: rc=%d\n",
+					rc);
+			return rc;
+		}
+	}
+
+	return 0;
+}
+
+static void cpr_apc_exit(struct cpr_regulator *cpr_vreg)
+{
+	if (cpr_vreg->vreg_enabled) {
+		regulator_disable(cpr_vreg->vdd_apc);
+
+		if (cpr_vreg->vdd_mx)
+			regulator_disable(cpr_vreg->vdd_mx);
+	}
+}
+
+static int cpr_voltage_uplift_wa_inc_quot(struct cpr_regulator *cpr_vreg,
+					struct device_node *of_node)
+{
+	u32 delta_quot[3];
+	int rc, i;
+
+	rc = of_property_read_u32_array(of_node,
+			"qcom,cpr-uplift-quotient", delta_quot, 3);
+	if (rc < 0) {
+		cpr_err(cpr_vreg, "cpr-uplift-quotient is missing: %d", rc);
+		return rc;
+	}
+	for (i = CPR_FUSE_CORNER_MIN; i <= cpr_vreg->num_fuse_corners; i++)
+		cpr_vreg->cpr_fuse_target_quot[i] += delta_quot[i-1];
+	return rc;
+}
+
+static void cpr_parse_pvs_version_fuse(struct cpr_regulator *cpr_vreg,
+				struct device_node *of_node)
+{
+	int rc;
+	u64 fuse_bits;
+	u32 fuse_sel[4];
+
+	rc = of_property_read_u32_array(of_node,
+			"qcom,pvs-version-fuse-sel", fuse_sel, 4);
+	if (!rc) {
+		fuse_bits = cpr_read_efuse_row(cpr_vreg,
+				fuse_sel[0], fuse_sel[3]);
+		cpr_vreg->pvs_version = (fuse_bits >> fuse_sel[1]) &
+			((1 << fuse_sel[2]) - 1);
+		cpr_info(cpr_vreg, "[row: %d]: 0x%llx, pvs_version = %d\n",
+				fuse_sel[0], fuse_bits, cpr_vreg->pvs_version);
+	} else {
+		cpr_vreg->pvs_version = 0;
+	}
+}
+
+/**
+ * cpr_get_open_loop_voltage() - fill the open_loop_volt array with linearly
+ *				 interpolated open-loop CPR voltage values.
+ * @cpr_vreg:	Handle to the cpr-regulator device
+ * @dev:	Device pointer for the cpr-regulator device
+ * @corner_max:	Array of length (cpr_vreg->num_fuse_corners + 1) which maps from
+ *		fuse corners to the highest virtual corner corresponding to a
+ *		given fuse corner
+ * @freq_map:	Array of length (cpr_vreg->num_corners + 1) which maps from
+ *		virtual corners to frequencies in Hz.
+ * @maps_valid:	Boolean which indicates if the values in corner_max and freq_map
+ *		are valid.  If they are not valid, then the open_loop_volt
+ *		values are not interpolated.
+ */
+static int cpr_get_open_loop_voltage(struct cpr_regulator *cpr_vreg,
+		struct device *dev, const u32 *corner_max, const u32 *freq_map,
+		bool maps_valid)
+{
+	int rc = 0;
+	int i, j;
+	u64 volt_high, volt_low, freq_high, freq_low, freq, temp, temp_limit;
+	u32 *max_factor = NULL;
+
+	cpr_vreg->open_loop_volt = devm_kzalloc(dev,
+			sizeof(int) * (cpr_vreg->num_corners + 1), GFP_KERNEL);
+	if (!cpr_vreg->open_loop_volt) {
+		cpr_err(cpr_vreg,
+			"Can't allocate memory for cpr_vreg->open_loop_volt\n");
+		return -ENOMEM;
+	}
+
+	/*
+	 * Set open loop voltage to be equal to per-fuse-corner initial voltage
+	 * by default.  This ensures that the open loop voltage is valid for
+	 * all virtual corners even if some virtual corner to frequency mappings
+	 * are missing.  It also ensures that the voltage is valid for the
+	 * higher corners not utilized by a given speed-bin.
+	 */
+	for (i = CPR_CORNER_MIN; i <= cpr_vreg->num_corners; i++)
+		cpr_vreg->open_loop_volt[i]
+			= cpr_vreg->pvs_corner_v[cpr_vreg->corner_map[i]];
+
+	if (!maps_valid || !corner_max || !freq_map
+	    || !of_find_property(dev->of_node,
+				 "qcom,cpr-voltage-scaling-factor-max", NULL)) {
+		/* Not using interpolation */
+		return 0;
+	}
+
+	max_factor
+	       = kzalloc(sizeof(*max_factor) * (cpr_vreg->num_fuse_corners + 1),
+			 GFP_KERNEL);
+	if (!max_factor) {
+		cpr_err(cpr_vreg, "Could not allocate memory for max_factor array\n");
+		return -ENOMEM;
+	}
+
+	rc = of_property_read_u32_array(dev->of_node,
+			"qcom,cpr-voltage-scaling-factor-max",
+			&max_factor[CPR_FUSE_CORNER_MIN],
+			cpr_vreg->num_fuse_corners);
+	if (rc) {
+		cpr_debug(cpr_vreg, "failed to read qcom,cpr-voltage-scaling-factor-max; initial voltage interpolation not possible\n");
+		kfree(max_factor);
+		return 0;
+	}
+
+	for (j = CPR_FUSE_CORNER_MIN + 1; j <= cpr_vreg->num_fuse_corners;
+	    j++) {
+		freq_high = freq_map[corner_max[j]];
+		freq_low = freq_map[corner_max[j - 1]];
+		volt_high = cpr_vreg->pvs_corner_v[j];
+		volt_low = cpr_vreg->pvs_corner_v[j - 1];
+		if (freq_high <= freq_low || volt_high <= volt_low)
+			continue;
+
+		for (i = corner_max[j - 1] + 1; i < corner_max[j]; i++) {
+			freq = freq_map[i];
+			if (freq_high <= freq)
+				continue;
+
+			temp = (freq_high - freq) * (volt_high - volt_low);
+			do_div(temp, (u32)(freq_high - freq_low));
+
+			/*
+			 * max_factor[j] has units of uV/MHz while freq values
+			 * have units of Hz.  Divide by 1000000 to convert.
+			 */
+			temp_limit = (freq_high - freq) * max_factor[j];
+			do_div(temp_limit, 1000000);
+
+			cpr_vreg->open_loop_volt[i]
+				= volt_high - min(temp, temp_limit);
+			cpr_vreg->open_loop_volt[i]
+				= DIV_ROUND_UP(cpr_vreg->open_loop_volt[i],
+						cpr_vreg->step_volt)
+					* cpr_vreg->step_volt;
+		}
+	}
+
+	kfree(max_factor);
+	return 0;
+}
+
+/*
+ * Limit the per-virtual-corner open-loop voltages using the per-virtual-corner
+ * ceiling and floor voltage values.  This must be called only after the
+ * open_loop_volt, ceiling, and floor arrays have all been initialized.
+ */
+static int cpr_limit_open_loop_voltage(struct cpr_regulator *cpr_vreg)
+{
+	int i;
+
+	for (i = CPR_CORNER_MIN; i <= cpr_vreg->num_corners; i++) {
+		if (cpr_vreg->open_loop_volt[i] > cpr_vreg->ceiling_volt[i])
+			cpr_vreg->open_loop_volt[i] = cpr_vreg->ceiling_volt[i];
+		else if (cpr_vreg->open_loop_volt[i] < cpr_vreg->floor_volt[i])
+			cpr_vreg->open_loop_volt[i] = cpr_vreg->floor_volt[i];
+	}
+
+	return 0;
+}
+
+/*
+ * Fill an OPP table for the cpr-regulator device struct with pairs of
+ * <virtual voltage corner number, open loop voltage> tuples.
+ */
+static int cpr_populate_opp_table(struct cpr_regulator *cpr_vreg,
+				struct device *dev)
+{
+	int i, rc = 0;
+
+	for (i = CPR_CORNER_MIN; i <= cpr_vreg->num_corners; i++) {
+		rc |= dev_pm_opp_add(dev, i, cpr_vreg->open_loop_volt[i]);
+		if (rc)
+			cpr_debug(cpr_vreg, "could not add OPP entry <%d, %d>, rc=%d\n",
+				i, cpr_vreg->open_loop_volt[i], rc);
+	}
+	if (rc)
+		cpr_err(cpr_vreg, "adding OPP entry failed - OPP may not be enabled, rc=%d\n",
+				rc);
+
+	return 0;
+}
+
+/*
+ * Conditionally reduce the per-virtual-corner ceiling voltages if certain
+ * device tree flags are present.  This must be called only after the ceiling
+ * array has been initialized and the open_loop_volt array values have been
+ * initialized and limited to the existing floor to ceiling voltage range.
+ */
+static int cpr_reduce_ceiling_voltage(struct cpr_regulator *cpr_vreg,
+				struct device *dev)
+{
+	bool reduce_to_fuse_open_loop, reduce_to_interpolated_open_loop;
+	int i;
+
+	reduce_to_fuse_open_loop = of_property_read_bool(dev->of_node,
+				"qcom,cpr-init-voltage-as-ceiling");
+	reduce_to_interpolated_open_loop = of_property_read_bool(dev->of_node,
+				"qcom,cpr-scaled-init-voltage-as-ceiling");
+
+	if (!reduce_to_fuse_open_loop && !reduce_to_interpolated_open_loop)
+		return 0;
+
+	for (i = CPR_CORNER_MIN; i <= cpr_vreg->num_corners; i++) {
+		if (reduce_to_interpolated_open_loop &&
+		    cpr_vreg->open_loop_volt[i] < cpr_vreg->ceiling_volt[i])
+			cpr_vreg->ceiling_volt[i] = cpr_vreg->open_loop_volt[i];
+		else if (reduce_to_fuse_open_loop &&
+				cpr_vreg->pvs_corner_v[cpr_vreg->corner_map[i]]
+				< cpr_vreg->ceiling_volt[i])
+			cpr_vreg->ceiling_volt[i]
+				= max((u32)cpr_vreg->floor_volt[i],
+			       cpr_vreg->pvs_corner_v[cpr_vreg->corner_map[i]]);
+		cpr_debug(cpr_vreg, "lowered ceiling[%d] = %d uV\n",
+			i, cpr_vreg->ceiling_volt[i]);
+	}
+
+	return 0;
+}
+
+static int cpr_adjust_target_quot_offsets(struct platform_device *pdev,
+					struct cpr_regulator *cpr_vreg)
+{
+	struct device_node *of_node = pdev->dev.of_node;
+	int tuple_count, tuple_match, i;
+	u32 index;
+	u32 quot_offset_adjust = 0;
+	int len = 0;
+	int rc = 0;
+	char *quot_offset_str;
+
+	quot_offset_str = "qcom,cpr-quot-offset-adjustment";
+	if (!of_find_property(of_node, quot_offset_str, &len)) {
+		/* No static quotient adjustment needed. */
+		return 0;
+	}
+
+	if (cpr_vreg->cpr_fuse_map_count) {
+		if (cpr_vreg->cpr_fuse_map_match == FUSE_MAP_NO_MATCH) {
+			/* No matching index to use for quotient adjustment. */
+			return 0;
+		}
+		tuple_count = cpr_vreg->cpr_fuse_map_count;
+		tuple_match = cpr_vreg->cpr_fuse_map_match;
+	} else {
+		tuple_count = 1;
+		tuple_match = 0;
+	}
+
+	if (len != cpr_vreg->num_fuse_corners * tuple_count * sizeof(u32)) {
+		cpr_err(cpr_vreg, "%s length=%d is invalid\n", quot_offset_str,
+			len);
+		return -EINVAL;
+	}
+
+	for (i = CPR_FUSE_CORNER_MIN; i <= cpr_vreg->num_fuse_corners; i++) {
+		index = tuple_match * cpr_vreg->num_fuse_corners
+				+ i - CPR_FUSE_CORNER_MIN;
+		rc = of_property_read_u32_index(of_node, quot_offset_str, index,
+			&quot_offset_adjust);
+		if (rc) {
+			cpr_err(cpr_vreg, "could not read %s index %u, rc=%d\n",
+				quot_offset_str, index, rc);
+			return rc;
+		}
+
+		if (quot_offset_adjust) {
+			cpr_vreg->fuse_quot_offset[i] += quot_offset_adjust;
+			cpr_info(cpr_vreg, "Corner[%d]: adjusted target quot = %d\n",
+				i, cpr_vreg->fuse_quot_offset[i]);
+		}
+	}
+
+	return rc;
+}
+
+static int cpr_get_fuse_quot_offset(struct cpr_regulator *cpr_vreg,
+					struct platform_device *pdev,
+					struct cpr_quot_scale *quot_scale)
+{
+	struct device *dev = &pdev->dev;
+	struct property *prop;
+	u32 *fuse_sel, *tmp, *offset_multiplier = NULL;
+	int rc = 0, i, size, len;
+	char *quot_offset_str;
+
+	quot_offset_str = cpr_vreg->cpr_fuse_redundant
+			? "qcom,cpr-fuse-redun-quot-offset"
+			: "qcom,cpr-fuse-quot-offset";
+
+	prop = of_find_property(dev->of_node, quot_offset_str, NULL);
+	if (!prop) {
+		cpr_debug(cpr_vreg, "%s not present\n", quot_offset_str);
+		return 0;
+	} else {
+		size = prop->length / sizeof(u32);
+		if (size != cpr_vreg->num_fuse_corners * 4) {
+			cpr_err(cpr_vreg, "fuse position for quot offset is invalid\n");
+			return -EINVAL;
+		}
+	}
+
+	fuse_sel = kzalloc(sizeof(u32) * size, GFP_KERNEL);
+	if (!fuse_sel) {
+		cpr_err(cpr_vreg, "memory alloc failed.\n");
+		return -ENOMEM;
+	}
+
+	rc = of_property_read_u32_array(dev->of_node, quot_offset_str,
+			fuse_sel, size);
+
+	if (rc < 0) {
+		cpr_err(cpr_vreg, "read %s failed, rc = %d\n", quot_offset_str,
+			rc);
+		kfree(fuse_sel);
+		return rc;
+	}
+
+	cpr_vreg->fuse_quot_offset = devm_kzalloc(dev,
+			sizeof(u32) * (cpr_vreg->num_fuse_corners + 1),
+			GFP_KERNEL);
+	if (!cpr_vreg->fuse_quot_offset) {
+		cpr_err(cpr_vreg, "Can't allocate memory for cpr_vreg->fuse_quot_offset\n");
+		kfree(fuse_sel);
+		return -ENOMEM;
+	}
+
+	if (!of_find_property(dev->of_node,
+				"qcom,cpr-fuse-quot-offset-scale", &len)) {
+		cpr_debug(cpr_vreg, "qcom,cpr-fuse-quot-offset-scale not present\n");
+	} else {
+		if (len != cpr_vreg->num_fuse_corners * sizeof(u32)) {
+			cpr_err(cpr_vreg, "the size of qcom,cpr-fuse-quot-offset-scale is invalid\n");
+			kfree(fuse_sel);
+			return -EINVAL;
+		}
+
+		offset_multiplier = kzalloc(sizeof(*offset_multiplier)
+					* (cpr_vreg->num_fuse_corners + 1),
+					GFP_KERNEL);
+		if (!offset_multiplier) {
+			cpr_err(cpr_vreg, "memory alloc failed.\n");
+			kfree(fuse_sel);
+			return -ENOMEM;
+		}
+
+		rc = of_property_read_u32_array(dev->of_node,
+						"qcom,cpr-fuse-quot-offset-scale",
+						&offset_multiplier[1],
+						cpr_vreg->num_fuse_corners);
+		if (rc < 0) {
+			cpr_err(cpr_vreg, "read qcom,cpr-fuse-quot-offset-scale failed, rc = %d\n",
+				rc);
+			kfree(fuse_sel);
+			goto out;
+		}
+	}
+
+	tmp = fuse_sel;
+	for (i = CPR_FUSE_CORNER_MIN; i <= cpr_vreg->num_fuse_corners; i++) {
+		cpr_vreg->fuse_quot_offset[i] = cpr_read_efuse_param(cpr_vreg,
+					fuse_sel[0], fuse_sel[1], fuse_sel[2],
+					fuse_sel[3]);
+		if (offset_multiplier)
+			cpr_vreg->fuse_quot_offset[i] *= offset_multiplier[i];
+		fuse_sel += 4;
+	}
+
+	rc = cpr_adjust_target_quot_offsets(pdev, cpr_vreg);
+	kfree(tmp);
+out:
+	kfree(offset_multiplier);
+	return rc;
+}
+
+/*
+ * Adjust the per-virtual-corner open loop voltage with an offset specfied by a
+ * device-tree property. This must be called after open-loop voltage scaling.
+ */
+static int cpr_virtual_corner_voltage_adjust(struct cpr_regulator *cpr_vreg,
+						struct device *dev)
+{
+	char *prop_name = "qcom,cpr-virtual-corner-init-voltage-adjustment";
+	int i, rc, tuple_count, tuple_match, index, len;
+	u32 voltage_adjust;
+
+	if (!of_find_property(dev->of_node, prop_name, &len)) {
+		cpr_debug(cpr_vreg, "%s not specified\n", prop_name);
+		return 0;
+	}
+
+	if (cpr_vreg->cpr_fuse_map_count) {
+		if (cpr_vreg->cpr_fuse_map_match == FUSE_MAP_NO_MATCH) {
+			/* No matching index to use for voltage adjustment. */
+			return 0;
+		}
+		tuple_count = cpr_vreg->cpr_fuse_map_count;
+		tuple_match = cpr_vreg->cpr_fuse_map_match;
+	} else {
+		tuple_count = 1;
+		tuple_match = 0;
+	}
+
+	if (len != cpr_vreg->num_corners * tuple_count * sizeof(u32)) {
+		cpr_err(cpr_vreg, "%s length=%d is invalid\n", prop_name,
+			len);
+		return -EINVAL;
+	}
+
+	for (i = CPR_CORNER_MIN; i <= cpr_vreg->num_corners; i++) {
+		index = tuple_match * cpr_vreg->num_corners
+				+ i - CPR_CORNER_MIN;
+		rc = of_property_read_u32_index(dev->of_node, prop_name,
+						index, &voltage_adjust);
+		if (rc) {
+			cpr_err(cpr_vreg, "could not read %s index %u, rc=%d\n",
+				prop_name, index, rc);
+			return rc;
+		}
+
+		if (voltage_adjust) {
+			cpr_vreg->open_loop_volt[i] += (int)voltage_adjust;
+			cpr_info(cpr_vreg, "corner=%d adjusted open-loop voltage=%d\n",
+				i, cpr_vreg->open_loop_volt[i]);
+		}
+	}
+
+	return 0;
+}
+
+/*
+ * Adjust the per-virtual-corner quot with an offset specfied by a
+ * device-tree property. This must be called after the quot-scaling adjustments
+ * are completed.
+ */
+static int cpr_virtual_corner_quot_adjust(struct cpr_regulator *cpr_vreg,
+						struct device *dev)
+{
+	char *prop_name = "qcom,cpr-virtual-corner-quotient-adjustment";
+	int i, rc, tuple_count, tuple_match, index, len;
+	u32 quot_adjust;
+
+	if (!of_find_property(dev->of_node, prop_name, &len)) {
+		cpr_debug(cpr_vreg, "%s not specified\n", prop_name);
+		return 0;
+	}
+
+	if (cpr_vreg->cpr_fuse_map_count) {
+		if (cpr_vreg->cpr_fuse_map_match == FUSE_MAP_NO_MATCH) {
+			/* No matching index to use for quotient adjustment. */
+			return 0;
+		}
+		tuple_count = cpr_vreg->cpr_fuse_map_count;
+		tuple_match = cpr_vreg->cpr_fuse_map_match;
+	} else {
+		tuple_count = 1;
+		tuple_match = 0;
+	}
+
+	if (len != cpr_vreg->num_corners * tuple_count * sizeof(u32)) {
+		cpr_err(cpr_vreg, "%s length=%d is invalid\n", prop_name,
+			len);
+		return -EINVAL;
+	}
+
+	for (i = CPR_CORNER_MIN; i <= cpr_vreg->num_corners; i++) {
+		index = tuple_match * cpr_vreg->num_corners
+				+ i - CPR_CORNER_MIN;
+		rc = of_property_read_u32_index(dev->of_node, prop_name,
+						index, &quot_adjust);
+		if (rc) {
+			cpr_err(cpr_vreg, "could not read %s index %u, rc=%d\n",
+				prop_name, index, rc);
+			return rc;
+		}
+
+		if (quot_adjust) {
+			cpr_vreg->quot_adjust[i] -= (int)quot_adjust;
+			cpr_info(cpr_vreg, "corner=%d adjusted quotient=%d\n",
+					i,
+			cpr_vreg->cpr_fuse_target_quot[cpr_vreg->corner_map[i]]
+						- cpr_vreg->quot_adjust[i]);
+		}
+	}
+
+	return 0;
+}
+
+/*
+ * cpr_get_corner_quot_adjustment() -- get the quot_adjust for each corner.
+ *
+ * Get the virtual corner to fuse corner mapping and virtual corner to APC clock
+ * frequency mapping from device tree.
+ * Calculate the quotient adjustment scaling factor for those corners mapping to
+ * all fuse corners except for the lowest one using linear interpolation.
+ * Calculate the quotient adjustment for each of these virtual corners using the
+ * min of the calculated scaling factor and the constant max scaling factor
+ * defined for each fuse corner in device tree.
+ */
+static int cpr_get_corner_quot_adjustment(struct cpr_regulator *cpr_vreg,
+					struct device *dev)
+{
+	int rc = 0;
+	int highest_fuse_corner = cpr_vreg->num_fuse_corners;
+	int i, j, size;
+	struct property *prop;
+	bool corners_mapped, match_found;
+	u32 *tmp, *freq_map = NULL;
+	u32 corner, freq_corner;
+	u32 *freq_max = NULL;
+	u32 *scaling = NULL;
+	u32 *max_factor = NULL;
+	u32 *corner_max = NULL;
+	bool maps_valid = false;
+
+	prop = of_find_property(dev->of_node, "qcom,cpr-corner-map", NULL);
+
+	if (prop) {
+		size = prop->length / sizeof(u32);
+		corners_mapped = true;
+	} else {
+		size = cpr_vreg->num_fuse_corners;
+		corners_mapped = false;
+	}
+
+	cpr_vreg->corner_map = devm_kzalloc(dev, sizeof(int) * (size + 1),
+					GFP_KERNEL);
+	if (!cpr_vreg->corner_map) {
+		cpr_err(cpr_vreg,
+			"Can't allocate memory for cpr_vreg->corner_map\n");
+		return -ENOMEM;
+	}
+	cpr_vreg->num_corners = size;
+
+	cpr_vreg->quot_adjust = devm_kzalloc(dev,
+			sizeof(u32) * (cpr_vreg->num_corners + 1),
+			GFP_KERNEL);
+	if (!cpr_vreg->quot_adjust) {
+		cpr_err(cpr_vreg,
+			"Can't allocate memory for cpr_vreg->quot_adjust\n");
+		return -ENOMEM;
+	}
+
+	if (!corners_mapped) {
+		for (i = CPR_FUSE_CORNER_MIN; i <= cpr_vreg->num_fuse_corners;
+		     i++)
+			cpr_vreg->corner_map[i] = i;
+		goto free_arrays;
+	} else {
+		rc = of_property_read_u32_array(dev->of_node,
+			"qcom,cpr-corner-map", &cpr_vreg->corner_map[1], size);
+
+		if (rc) {
+			cpr_err(cpr_vreg,
+				"qcom,cpr-corner-map missing, rc = %d\n", rc);
+			return rc;
+		}
+
+		/*
+		 * Verify that the virtual corner to fuse corner mapping is
+		 * valid.
+		 */
+		for (i = CPR_CORNER_MIN; i <= cpr_vreg->num_corners; i++) {
+			if (cpr_vreg->corner_map[i] > cpr_vreg->num_fuse_corners
+			    || cpr_vreg->corner_map[i] < CPR_FUSE_CORNER_MIN) {
+				cpr_err(cpr_vreg, "qcom,cpr-corner-map contains an element %d which isn't in the allowed range [%d, %d]\n",
+					cpr_vreg->corner_map[i],
+					CPR_FUSE_CORNER_MIN,
+					cpr_vreg->num_fuse_corners);
+				return -EINVAL;
+			}
+		}
+	}
+
+	prop = of_find_property(dev->of_node,
+			"qcom,cpr-speed-bin-max-corners", NULL);
+	if (!prop) {
+		cpr_debug(cpr_vreg, "qcom,cpr-speed-bin-max-corner missing\n");
+		goto free_arrays;
+	}
+
+	size = prop->length / sizeof(u32);
+	tmp = kzalloc(size * sizeof(u32), GFP_KERNEL);
+	if (!tmp) {
+		cpr_err(cpr_vreg, "memory alloc failed\n");
+		return -ENOMEM;
+	}
+	rc = of_property_read_u32_array(dev->of_node,
+		"qcom,cpr-speed-bin-max-corners", tmp, size);
+	if (rc < 0) {
+		kfree(tmp);
+		cpr_err(cpr_vreg,
+			"get cpr-speed-bin-max-corners failed, rc = %d\n", rc);
+		return rc;
+	}
+
+	corner_max = kzalloc((cpr_vreg->num_fuse_corners + 1)
+				* sizeof(*corner_max), GFP_KERNEL);
+	freq_max = kzalloc((cpr_vreg->num_fuse_corners + 1) * sizeof(*freq_max),
+				GFP_KERNEL);
+	if (corner_max == NULL || freq_max == NULL) {
+		cpr_err(cpr_vreg, "Could not allocate memory for quotient scaling arrays\n");
+		kfree(tmp);
+		rc = -ENOMEM;
+		goto free_arrays;
+	}
+
+	/*
+	 * Get the maximum virtual corner for each fuse corner based upon the
+	 * speed_bin and pvs_version values.
+	 */
+	match_found = false;
+	for (i = 0; i < size; i += cpr_vreg->num_fuse_corners + 2) {
+		if (tmp[i] != cpr_vreg->speed_bin &&
+		    tmp[i] != FUSE_PARAM_MATCH_ANY)
+			continue;
+		if (tmp[i + 1] != cpr_vreg->pvs_version &&
+		    tmp[i + 1] != FUSE_PARAM_MATCH_ANY)
+			continue;
+		for (j = CPR_FUSE_CORNER_MIN;
+		     j <= cpr_vreg->num_fuse_corners; j++)
+			corner_max[j] = tmp[i + 2 + j - CPR_FUSE_CORNER_MIN];
+		match_found = true;
+		break;
+	}
+	kfree(tmp);
+
+	if (!match_found) {
+		cpr_debug(cpr_vreg, "No quotient adjustment possible for speed bin=%u, pvs version=%u\n",
+			cpr_vreg->speed_bin, cpr_vreg->pvs_version);
+		goto free_arrays;
+	}
+
+	/* Verify that fuse corner to max virtual corner mapping is valid. */
+	for (i = CPR_FUSE_CORNER_MIN; i <= highest_fuse_corner; i++) {
+		if (corner_max[i] < CPR_CORNER_MIN
+		    || corner_max[i] > cpr_vreg->num_corners) {
+			cpr_err(cpr_vreg, "Invalid corner=%d in qcom,cpr-speed-bin-max-corners\n",
+				corner_max[i]);
+			goto free_arrays;
+		}
+	}
+
+	/*
+	 * Return success if the virtual corner values read from
+	 * qcom,cpr-speed-bin-max-corners property are incorrect.  This allows
+	 * the driver to continue to run without quotient scaling.
+	 */
+	for (i = CPR_FUSE_CORNER_MIN + 1; i <= highest_fuse_corner; i++) {
+		if (corner_max[i] <= corner_max[i - 1]) {
+			cpr_err(cpr_vreg, "fuse corner=%d (%u) should be larger than the fuse corner=%d (%u)\n",
+				i, corner_max[i], i - 1, corner_max[i - 1]);
+			goto free_arrays;
+		}
+	}
+
+	prop = of_find_property(dev->of_node,
+			"qcom,cpr-corner-frequency-map", NULL);
+	if (!prop) {
+		cpr_debug(cpr_vreg, "qcom,cpr-corner-frequency-map missing\n");
+		goto free_arrays;
+	}
+
+	size = prop->length / sizeof(u32);
+	tmp = kzalloc(sizeof(u32) * size, GFP_KERNEL);
+	if (!tmp) {
+		cpr_err(cpr_vreg, "memory alloc failed\n");
+		rc = -ENOMEM;
+		goto free_arrays;
+	}
+	rc = of_property_read_u32_array(dev->of_node,
+		"qcom,cpr-corner-frequency-map", tmp, size);
+	if (rc < 0) {
+		cpr_err(cpr_vreg,
+			"get cpr-corner-frequency-map failed, rc = %d\n", rc);
+		kfree(tmp);
+		goto free_arrays;
+	}
+	freq_map = kzalloc(sizeof(u32) * (cpr_vreg->num_corners + 1),
+			GFP_KERNEL);
+	if (!freq_map) {
+		cpr_err(cpr_vreg, "memory alloc for freq_map failed!\n");
+		kfree(tmp);
+		rc = -ENOMEM;
+		goto free_arrays;
+	}
+	for (i = 0; i < size; i += 2) {
+		corner = tmp[i];
+		if ((corner < 1) || (corner > cpr_vreg->num_corners)) {
+			cpr_err(cpr_vreg,
+				"corner should be in 1~%d range: %d\n",
+				cpr_vreg->num_corners, corner);
+			continue;
+		}
+		freq_map[corner] = tmp[i + 1];
+		cpr_debug(cpr_vreg,
+				"Frequency at virtual corner %d is %d Hz.\n",
+				corner, freq_map[corner]);
+	}
+	kfree(tmp);
+
+	prop = of_find_property(dev->of_node,
+			"qcom,cpr-quot-adjust-scaling-factor-max", NULL);
+	if (!prop) {
+		cpr_debug(cpr_vreg, "qcom,cpr-quot-adjust-scaling-factor-max missing\n");
+		rc = 0;
+		goto free_arrays;
+	}
+
+	size = prop->length / sizeof(u32);
+	if ((size != 1) && (size != cpr_vreg->num_fuse_corners)) {
+		cpr_err(cpr_vreg, "The size of qcom,cpr-quot-adjust-scaling-factor-max should be 1 or %d\n",
+			cpr_vreg->num_fuse_corners);
+		rc = 0;
+		goto free_arrays;
+	}
+
+	max_factor = kzalloc(sizeof(u32) * (cpr_vreg->num_fuse_corners + 1),
+			GFP_KERNEL);
+	if (!max_factor) {
+		cpr_err(cpr_vreg, "Could not allocate memory for max_factor array\n");
+		rc = -ENOMEM;
+		goto free_arrays;
+	}
+	/*
+	 * Leave max_factor[CPR_FUSE_CORNER_MIN ... highest_fuse_corner-1] = 0
+	 * if cpr-quot-adjust-scaling-factor-max is a single value in order to
+	 * maintain backward compatibility.
+	 */
+	i = (size == cpr_vreg->num_fuse_corners) ? CPR_FUSE_CORNER_MIN
+						 : highest_fuse_corner;
+	rc = of_property_read_u32_array(dev->of_node,
+			"qcom,cpr-quot-adjust-scaling-factor-max",
+			&max_factor[i], size);
+	if (rc < 0) {
+		cpr_debug(cpr_vreg, "could not read qcom,cpr-quot-adjust-scaling-factor-max, rc=%d\n",
+			rc);
+		rc = 0;
+		goto free_arrays;
+	}
+
+	/*
+	 * Get the quotient adjustment scaling factor, according to:
+	 * scaling = min(1000 * (QUOT(corner_N) - QUOT(corner_N-1))
+	 *		/ (freq(corner_N) - freq(corner_N-1)), max_factor)
+	 *
+	 * QUOT(corner_N):	quotient read from fuse for fuse corner N
+	 * QUOT(corner_N-1):	quotient read from fuse for fuse corner (N - 1)
+	 * freq(corner_N):	max frequency in MHz supported by fuse corner N
+	 * freq(corner_N-1):	max frequency in MHz supported by fuse corner
+	 *			 (N - 1)
+	 */
+
+	for (i = CPR_FUSE_CORNER_MIN; i <= highest_fuse_corner; i++)
+		freq_max[i] = freq_map[corner_max[i]];
+	for (i = CPR_FUSE_CORNER_MIN + 1; i <= highest_fuse_corner; i++) {
+		if (freq_max[i] <= freq_max[i - 1] || freq_max[i - 1] == 0) {
+			cpr_err(cpr_vreg, "fuse corner %d freq=%u should be larger than fuse corner %d freq=%u\n",
+			      i, freq_max[i], i - 1, freq_max[i - 1]);
+			rc = -EINVAL;
+			goto free_arrays;
+		}
+	}
+	scaling = kzalloc((cpr_vreg->num_fuse_corners + 1) * sizeof(*scaling),
+			GFP_KERNEL);
+	if (!scaling) {
+		cpr_err(cpr_vreg, "Could not allocate memory for scaling array\n");
+		rc = -ENOMEM;
+		goto free_arrays;
+	}
+	/* Convert corner max frequencies from Hz to MHz. */
+	for (i = CPR_FUSE_CORNER_MIN; i <= highest_fuse_corner; i++)
+		freq_max[i] /= 1000000;
+
+	for (i = CPR_FUSE_CORNER_MIN + 1; i <= highest_fuse_corner; i++) {
+		if (cpr_vreg->fuse_quot_offset &&
+			(cpr_vreg->cpr_fuse_ro_sel[i] !=
+				cpr_vreg->cpr_fuse_ro_sel[i - 1])) {
+			scaling[i] = 1000 * cpr_vreg->fuse_quot_offset[i]
+				/ (freq_max[i] - freq_max[i - 1]);
+		} else {
+			scaling[i] = 1000 * (cpr_vreg->cpr_fuse_target_quot[i]
+				      - cpr_vreg->cpr_fuse_target_quot[i - 1])
+				  / (freq_max[i] - freq_max[i - 1]);
+			if (cpr_vreg->cpr_fuse_target_quot[i]
+				< cpr_vreg->cpr_fuse_target_quot[i - 1])
+				scaling[i] = 0;
+		}
+		scaling[i] = min(scaling[i], max_factor[i]);
+		cpr_info(cpr_vreg, "fuse corner %d quotient adjustment scaling factor: %d.%03d\n",
+			i, scaling[i] / 1000, scaling[i] % 1000);
+	}
+
+	/*
+	 * Walk through the virtual corners mapped to each fuse corner
+	 * and calculate the quotient adjustment for each one using the
+	 * following formula:
+	 * quot_adjust = (freq_max - freq_corner) * scaling / 1000
+	 *
+	 * @freq_max: max frequency in MHz supported by the fuse corner
+	 * @freq_corner: frequency in MHz corresponding to the virtual corner
+	 */
+	for (j = CPR_FUSE_CORNER_MIN + 1; j <= highest_fuse_corner; j++) {
+		for (i = corner_max[j - 1] + 1; i < corner_max[j]; i++) {
+			freq_corner = freq_map[i] / 1000000; /* MHz */
+			if (freq_corner > 0) {
+				cpr_vreg->quot_adjust[i] = scaling[j] *
+				   (freq_max[j] - freq_corner) / 1000;
+			}
+		}
+	}
+
+	rc = cpr_virtual_corner_quot_adjust(cpr_vreg, dev);
+	if (rc) {
+		cpr_err(cpr_vreg, "count not adjust virtual-corner quot rc=%d\n",
+			rc);
+		goto free_arrays;
+	}
+
+	for (i = CPR_CORNER_MIN; i <= cpr_vreg->num_corners; i++)
+		cpr_info(cpr_vreg, "adjusted quotient[%d] = %d\n", i,
+			cpr_vreg->cpr_fuse_target_quot[cpr_vreg->corner_map[i]]
+			- cpr_vreg->quot_adjust[i]);
+
+	maps_valid = true;
+
+free_arrays:
+	if (!rc) {
+
+		rc = cpr_get_open_loop_voltage(cpr_vreg, dev, corner_max,
+						freq_map, maps_valid);
+		if (rc) {
+			cpr_err(cpr_vreg, "could not fill open loop voltage array, rc=%d\n",
+				rc);
+			goto free_arrays_1;
+		}
+
+		rc = cpr_virtual_corner_voltage_adjust(cpr_vreg, dev);
+		if (rc)
+			cpr_err(cpr_vreg, "count not adjust virtual-corner voltage rc=%d\n",
+				rc);
+	}
+
+free_arrays_1:
+	kfree(max_factor);
+	kfree(scaling);
+	kfree(freq_map);
+	kfree(corner_max);
+	kfree(freq_max);
+	return rc;
+}
+
+/*
+ * Check if the redundant set of CPR fuses should be used in place of the
+ * primary set and configure the cpr_fuse_redundant element accordingly.
+ */
+static int cpr_check_redundant(struct platform_device *pdev,
+		     struct cpr_regulator *cpr_vreg)
+{
+	struct device_node *of_node = pdev->dev.of_node;
+	u32 cpr_fuse_redun_sel[5];
+	int rc;
+
+	if (of_find_property(of_node, "qcom,cpr-fuse-redun-sel", NULL)) {
+		rc = of_property_read_u32_array(of_node,
+			"qcom,cpr-fuse-redun-sel", cpr_fuse_redun_sel, 5);
+		if (rc < 0) {
+			cpr_err(cpr_vreg, "qcom,cpr-fuse-redun-sel missing: rc=%d\n",
+				rc);
+			return rc;
+		}
+		cpr_vreg->cpr_fuse_redundant
+			= cpr_fuse_is_setting_expected(cpr_vreg,
+						cpr_fuse_redun_sel);
+	} else {
+		cpr_vreg->cpr_fuse_redundant = false;
+	}
+
+	if (cpr_vreg->cpr_fuse_redundant)
+		cpr_info(cpr_vreg, "using redundant fuse parameters\n");
+
+	return 0;
+}
+
+static int cpr_read_fuse_revision(struct platform_device *pdev,
+		     struct cpr_regulator *cpr_vreg)
+{
+	struct device_node *of_node = pdev->dev.of_node;
+	u32 fuse_sel[4];
+	int rc;
+
+	if (of_find_property(of_node, "qcom,cpr-fuse-revision", NULL)) {
+		rc = of_property_read_u32_array(of_node,
+			"qcom,cpr-fuse-revision", fuse_sel, 4);
+		if (rc < 0) {
+			cpr_err(cpr_vreg, "qcom,cpr-fuse-revision read failed: rc=%d\n",
+				rc);
+			return rc;
+		}
+		cpr_vreg->cpr_fuse_revision
+			= cpr_read_efuse_param(cpr_vreg, fuse_sel[0],
+					fuse_sel[1], fuse_sel[2], fuse_sel[3]);
+		cpr_info(cpr_vreg, "fuse revision = %d\n",
+			cpr_vreg->cpr_fuse_revision);
+	} else {
+		cpr_vreg->cpr_fuse_revision = FUSE_REVISION_UNKNOWN;
+	}
+
+	return 0;
+}
+
+static int cpr_read_ro_select(struct platform_device *pdev,
+				     struct cpr_regulator *cpr_vreg)
+{
+	struct device_node *of_node = pdev->dev.of_node;
+	int rc = 0;
+	u32 cpr_fuse_row[2];
+	char *ro_sel_str;
+	int *bp_ro_sel;
+	int i;
+
+	bp_ro_sel
+		= kzalloc((cpr_vreg->num_fuse_corners + 1) * sizeof(*bp_ro_sel),
+			GFP_KERNEL);
+	if (!bp_ro_sel) {
+		cpr_err(cpr_vreg, "could not allocate memory for temp array\n");
+		return -ENOMEM;
+	}
+
+	if (cpr_vreg->cpr_fuse_redundant) {
+		rc = of_property_read_u32_array(of_node,
+				"qcom,cpr-fuse-redun-row",
+				cpr_fuse_row, 2);
+		ro_sel_str = "qcom,cpr-fuse-redun-ro-sel";
+	} else {
+		rc = of_property_read_u32_array(of_node, "qcom,cpr-fuse-row",
+				cpr_fuse_row, 2);
+		ro_sel_str = "qcom,cpr-fuse-ro-sel";
+	}
+	if (rc)
+		goto error;
+
+	rc = of_property_read_u32_array(of_node, ro_sel_str,
+		&bp_ro_sel[CPR_FUSE_CORNER_MIN], cpr_vreg->num_fuse_corners);
+	if (rc) {
+		cpr_err(cpr_vreg, "%s read error, rc=%d\n", ro_sel_str, rc);
+		goto error;
+	}
+
+	for (i = CPR_FUSE_CORNER_MIN; i <= cpr_vreg->num_fuse_corners; i++)
+		cpr_vreg->cpr_fuse_ro_sel[i]
+			= cpr_read_efuse_param(cpr_vreg, cpr_fuse_row[0],
+				bp_ro_sel[i], CPR_FUSE_RO_SEL_BITS,
+				cpr_fuse_row[1]);
+
+error:
+	kfree(bp_ro_sel);
+
+	return rc;
+}
+
+static int cpr_find_fuse_map_match(struct platform_device *pdev,
+				     struct cpr_regulator *cpr_vreg)
+{
+	struct device_node *of_node = pdev->dev.of_node;
+	int i, j, rc, tuple_size;
+	int len = 0;
+	u32 *tmp, val, ro;
+
+	/* Specify default no match case. */
+	cpr_vreg->cpr_fuse_map_match = FUSE_MAP_NO_MATCH;
+	cpr_vreg->cpr_fuse_map_count = 0;
+
+	if (!of_find_property(of_node, "qcom,cpr-fuse-version-map", &len)) {
+		/* No mapping present. */
+		return 0;
+	}
+
+	tuple_size = cpr_vreg->num_fuse_corners + 3;
+	cpr_vreg->cpr_fuse_map_count = len / (sizeof(u32) * tuple_size);
+
+	if (len == 0 || len % (sizeof(u32) * tuple_size)) {
+		cpr_err(cpr_vreg, "qcom,cpr-fuse-version-map length=%d is invalid\n",
+			len);
+		return -EINVAL;
+	}
+
+	tmp = kzalloc(len, GFP_KERNEL);
+	if (!tmp) {
+		cpr_err(cpr_vreg, "could not allocate memory for temp array\n");
+		return -ENOMEM;
+	}
+
+	rc = of_property_read_u32_array(of_node, "qcom,cpr-fuse-version-map",
+				tmp, cpr_vreg->cpr_fuse_map_count * tuple_size);
+	if (rc) {
+		cpr_err(cpr_vreg, "could not read qcom,cpr-fuse-version-map, rc=%d\n",
+			rc);
+		goto done;
+	}
+
+	/*
+	 * qcom,cpr-fuse-version-map tuple format:
+	 * <speed_bin, pvs_version, cpr_fuse_revision, ro_sel[1], ...,
+	 *  ro_sel[n]> for n == number of fuse corners
+	 */
+	for (i = 0; i < cpr_vreg->cpr_fuse_map_count; i++) {
+		if (tmp[i * tuple_size] != cpr_vreg->speed_bin
+		    && tmp[i * tuple_size] != FUSE_PARAM_MATCH_ANY)
+			continue;
+		if (tmp[i * tuple_size + 1] != cpr_vreg->pvs_version
+		    && tmp[i * tuple_size + 1] != FUSE_PARAM_MATCH_ANY)
+			continue;
+		if (tmp[i * tuple_size + 2] != cpr_vreg->cpr_fuse_revision
+		    && tmp[i * tuple_size + 2] != FUSE_PARAM_MATCH_ANY)
+			continue;
+		for (j = 0; j < cpr_vreg->num_fuse_corners; j++) {
+			val = tmp[i * tuple_size + 3 + j];
+			ro = cpr_vreg->cpr_fuse_ro_sel[j + CPR_FUSE_CORNER_MIN];
+			if (val != ro && val != FUSE_PARAM_MATCH_ANY)
+				break;
+		}
+		if (j == cpr_vreg->num_fuse_corners) {
+			cpr_vreg->cpr_fuse_map_match = i;
+			break;
+		}
+	}
+
+	if (cpr_vreg->cpr_fuse_map_match != FUSE_MAP_NO_MATCH)
+		cpr_debug(cpr_vreg, "qcom,cpr-fuse-version-map tuple match found: %d\n",
+			cpr_vreg->cpr_fuse_map_match);
+	else
+		cpr_debug(cpr_vreg, "qcom,cpr-fuse-version-map tuple match not found\n");
+
+done:
+	kfree(tmp);
+	return rc;
+}
+
+static int cpr_minimum_quot_difference_adjustment(struct platform_device *pdev,
+					struct cpr_regulator *cpr_vreg)
+{
+	struct device_node *of_node = pdev->dev.of_node;
+	int tuple_count, tuple_match;
+	int rc, i, len = 0;
+	u32 index, adjust_quot = 0;
+	u32 *min_diff_quot;
+
+	if (!of_find_property(of_node, "qcom,cpr-fuse-min-quot-diff", NULL))
+		/* No conditional adjustment needed on revised quotients. */
+		return 0;
+
+	if (!of_find_property(of_node, "qcom,cpr-min-quot-diff-adjustment",
+						&len)) {
+		cpr_err(cpr_vreg, "qcom,cpr-min-quot-diff-adjustment not specified\n");
+		return -ENODEV;
+	}
+
+	if (cpr_vreg->cpr_fuse_map_count) {
+		if (cpr_vreg->cpr_fuse_map_match == FUSE_MAP_NO_MATCH)
+			/* No matching index to use for quotient adjustment. */
+			return 0;
+		tuple_count = cpr_vreg->cpr_fuse_map_count;
+		tuple_match = cpr_vreg->cpr_fuse_map_match;
+	} else {
+		tuple_count = 1;
+		tuple_match = 0;
+	}
+
+	if (len != cpr_vreg->num_fuse_corners * tuple_count * sizeof(u32)) {
+		cpr_err(cpr_vreg, "qcom,cpr-min-quot-diff-adjustment length=%d is invalid\n",
+					len);
+		return -EINVAL;
+	}
+
+	min_diff_quot = kzalloc(cpr_vreg->num_fuse_corners * sizeof(u32),
+							GFP_KERNEL);
+	if (!min_diff_quot) {
+		cpr_err(cpr_vreg, "memory alloc failed\n");
+		return -ENOMEM;
+	}
+
+	rc = of_property_read_u32_array(of_node, "qcom,cpr-fuse-min-quot-diff",
+						min_diff_quot,
+						cpr_vreg->num_fuse_corners);
+	if (rc < 0) {
+		cpr_err(cpr_vreg, "qcom,cpr-fuse-min-quot-diff reading failed, rc = %d\n",
+							rc);
+		goto error;
+	}
+
+	for (i = CPR_FUSE_CORNER_MIN + 1;
+				i <= cpr_vreg->num_fuse_corners; i++) {
+		if ((cpr_vreg->cpr_fuse_target_quot[i]
+			- cpr_vreg->cpr_fuse_target_quot[i - 1])
+		    <= (int)min_diff_quot[i - CPR_FUSE_CORNER_MIN]) {
+			index = tuple_match * cpr_vreg->num_fuse_corners
+					+ i - CPR_FUSE_CORNER_MIN;
+			rc = of_property_read_u32_index(of_node,
+						"qcom,cpr-min-quot-diff-adjustment",
+						index, &adjust_quot);
+			if (rc) {
+				cpr_err(cpr_vreg, "could not read qcom,cpr-min-quot-diff-adjustment index %u, rc=%d\n",
+							index, rc);
+				goto error;
+			}
+
+			cpr_vreg->cpr_fuse_target_quot[i]
+				= cpr_vreg->cpr_fuse_target_quot[i - 1]
+					+ adjust_quot;
+			cpr_info(cpr_vreg, "Corner[%d]: revised adjusted quotient = %d\n",
+					i, cpr_vreg->cpr_fuse_target_quot[i]);
+		};
+	}
+
+error:
+	kfree(min_diff_quot);
+	return rc;
+}
+
+static int cpr_adjust_target_quots(struct platform_device *pdev,
+					struct cpr_regulator *cpr_vreg)
+{
+	struct device_node *of_node = pdev->dev.of_node;
+	int tuple_count, tuple_match, i;
+	u32 index;
+	u32 quot_adjust = 0;
+	int len = 0;
+	int rc = 0;
+
+	if (!of_find_property(of_node, "qcom,cpr-quotient-adjustment", &len)) {
+		/* No static quotient adjustment needed. */
+		return 0;
+	}
+
+	if (cpr_vreg->cpr_fuse_map_count) {
+		if (cpr_vreg->cpr_fuse_map_match == FUSE_MAP_NO_MATCH) {
+			/* No matching index to use for quotient adjustment. */
+			return 0;
+		}
+		tuple_count = cpr_vreg->cpr_fuse_map_count;
+		tuple_match = cpr_vreg->cpr_fuse_map_match;
+	} else {
+		tuple_count = 1;
+		tuple_match = 0;
+	}
+
+	if (len != cpr_vreg->num_fuse_corners * tuple_count * sizeof(u32)) {
+		cpr_err(cpr_vreg, "qcom,cpr-quotient-adjustment length=%d is invalid\n",
+			len);
+		return -EINVAL;
+	}
+
+	for (i = CPR_FUSE_CORNER_MIN; i <= cpr_vreg->num_fuse_corners; i++) {
+		index = tuple_match * cpr_vreg->num_fuse_corners
+				+ i - CPR_FUSE_CORNER_MIN;
+		rc = of_property_read_u32_index(of_node,
+			"qcom,cpr-quotient-adjustment", index, &quot_adjust);
+		if (rc) {
+			cpr_err(cpr_vreg, "could not read qcom,cpr-quotient-adjustment index %u, rc=%d\n",
+				index, rc);
+			return rc;
+		}
+
+		if (quot_adjust) {
+			cpr_vreg->cpr_fuse_target_quot[i] += quot_adjust;
+			cpr_info(cpr_vreg, "Corner[%d]: adjusted target quot = %d\n",
+				i, cpr_vreg->cpr_fuse_target_quot[i]);
+		}
+	}
+
+	rc = cpr_minimum_quot_difference_adjustment(pdev, cpr_vreg);
+	if (rc)
+		cpr_err(cpr_vreg, "failed to apply minimum quot difference rc=%d\n",
+					rc);
+
+	return rc;
+}
+
+static int cpr_check_allowed(struct platform_device *pdev,
+					struct cpr_regulator *cpr_vreg)
+{
+	struct device_node *of_node = pdev->dev.of_node;
+	char *allow_str = "qcom,cpr-allowed";
+	int rc = 0, count;
+	int tuple_count, tuple_match;
+	u32 allow_status;
+
+	if (!of_find_property(of_node, allow_str, &count))
+		/* CPR is allowed for all fuse revisions. */
+		return 0;
+
+	count /= sizeof(u32);
+	if (cpr_vreg->cpr_fuse_map_count) {
+		if (cpr_vreg->cpr_fuse_map_match == FUSE_MAP_NO_MATCH)
+			/* No matching index to use for CPR allowed. */
+			return 0;
+		tuple_count = cpr_vreg->cpr_fuse_map_count;
+		tuple_match = cpr_vreg->cpr_fuse_map_match;
+	} else {
+		tuple_count = 1;
+		tuple_match = 0;
+	}
+
+	if (count != tuple_count) {
+		cpr_err(cpr_vreg, "%s count=%d is invalid\n", allow_str,
+			count);
+		return -EINVAL;
+	}
+
+	rc = of_property_read_u32_index(of_node, allow_str, tuple_match,
+		&allow_status);
+	if (rc) {
+		cpr_err(cpr_vreg, "could not read %s index %u, rc=%d\n",
+			allow_str, tuple_match, rc);
+		return rc;
+	}
+
+	if (allow_status && !cpr_vreg->cpr_fuse_disable)
+		cpr_vreg->cpr_fuse_disable = false;
+	else
+		cpr_vreg->cpr_fuse_disable = true;
+
+	cpr_info(cpr_vreg, "CPR closed loop is %s for fuse revision %d\n",
+		cpr_vreg->cpr_fuse_disable ? "disabled" : "enabled",
+		cpr_vreg->cpr_fuse_revision);
+
+	return rc;
+}
+
+static int cpr_check_de_aging_allowed(struct cpr_regulator *cpr_vreg,
+				struct device *dev)
+{
+	struct device_node *of_node = dev->of_node;
+	char *allow_str = "qcom,cpr-de-aging-allowed";
+	int rc = 0, count;
+	int tuple_count, tuple_match;
+	u32 allow_status = 0;
+
+	if (!of_find_property(of_node, allow_str, &count)) {
+		/* CPR de-aging is not allowed for all fuse revisions. */
+		return allow_status;
+	}
+
+	count /= sizeof(u32);
+	if (cpr_vreg->cpr_fuse_map_count) {
+		if (cpr_vreg->cpr_fuse_map_match == FUSE_MAP_NO_MATCH)
+			/* No matching index to use for CPR de-aging allowed. */
+			return 0;
+		tuple_count = cpr_vreg->cpr_fuse_map_count;
+		tuple_match = cpr_vreg->cpr_fuse_map_match;
+	} else {
+		tuple_count = 1;
+		tuple_match = 0;
+	}
+
+	if (count != tuple_count) {
+		cpr_err(cpr_vreg, "%s count=%d is invalid\n", allow_str,
+			count);
+		return -EINVAL;
+	}
+
+	rc = of_property_read_u32_index(of_node, allow_str, tuple_match,
+		&allow_status);
+	if (rc) {
+		cpr_err(cpr_vreg, "could not read %s index %u, rc=%d\n",
+			allow_str, tuple_match, rc);
+		return rc;
+	}
+
+	cpr_info(cpr_vreg, "CPR de-aging is %s for fuse revision %d\n",
+			allow_status ? "allowed" : "not allowed",
+			cpr_vreg->cpr_fuse_revision);
+
+	return allow_status;
+}
+
+static int cpr_aging_init(struct platform_device *pdev,
+			struct cpr_regulator *cpr_vreg)
+{
+	struct device_node *of_node = pdev->dev.of_node;
+	struct cpr_aging_info *aging_info;
+	struct cpr_aging_sensor_info *sensor_info;
+	int num_fuse_corners = cpr_vreg->num_fuse_corners;
+	int i, rc = 0, len = 0, num_aging_sensors, ro_sel, bits;
+	u32 *aging_sensor_id, *fuse_sel, *fuse_sel_orig;
+	u32 sensor = 0, non_collapsible_sensor_mask = 0;
+	u64 efuse_val;
+	struct property *prop;
+
+	if (!of_find_property(of_node, "qcom,cpr-aging-sensor-id", &len)) {
+		/* No CPR de-aging adjustments needed */
+		return 0;
+	}
+
+	if (len == 0) {
+		cpr_err(cpr_vreg, "qcom,cpr-aging-sensor-id property format is invalid\n");
+		return -EINVAL;
+	}
+	num_aging_sensors = len / sizeof(u32);
+	cpr_debug(cpr_vreg, "No of aging sensors = %d\n", num_aging_sensors);
+
+	if (cpumask_empty(&cpr_vreg->cpu_mask)) {
+		cpr_err(cpr_vreg, "qcom,cpr-cpus property missing\n");
+		return -EINVAL;
+	}
+
+	rc = cpr_check_de_aging_allowed(cpr_vreg, &pdev->dev);
+	if (rc < 0) {
+		cpr_err(cpr_vreg, "cpr_check_de_aging_allowed failed: rc=%d\n",
+			rc);
+		return rc;
+	} else if (rc == 0) {
+		/* CPR de-aging is not allowed for the current fuse combo */
+		return 0;
+	}
+
+	aging_info = devm_kzalloc(&pdev->dev, sizeof(*aging_info),
+				GFP_KERNEL);
+	if (!aging_info)
+		return -ENOMEM;
+
+	cpr_vreg->aging_info = aging_info;
+	aging_info->num_aging_sensors = num_aging_sensors;
+
+	rc = of_property_read_u32(of_node, "qcom,cpr-aging-ref-corner",
+			&aging_info->aging_corner);
+	if (rc) {
+		cpr_err(cpr_vreg, "qcom,cpr-aging-ref-corner missing rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	CPR_PROP_READ_U32(cpr_vreg, of_node, "cpr-aging-ref-voltage",
+			&aging_info->aging_ref_voltage, rc);
+	if (rc)
+		return rc;
+
+	CPR_PROP_READ_U32(cpr_vreg, of_node, "cpr-max-aging-margin",
+			&aging_info->max_aging_margin, rc);
+	if (rc)
+		return rc;
+
+	CPR_PROP_READ_U32(cpr_vreg, of_node, "cpr-aging-ro-scaling-factor",
+			&aging_info->aging_ro_kv, rc);
+	if (rc)
+		return rc;
+
+	/* Check for DIV by 0 error */
+	if (aging_info->aging_ro_kv == 0) {
+		cpr_err(cpr_vreg, "invalid cpr-aging-ro-scaling-factor value: %u\n",
+			aging_info->aging_ro_kv);
+		return -EINVAL;
+	}
+
+	rc = of_property_read_u32_array(of_node, "qcom,cpr-ro-scaling-factor",
+			aging_info->cpr_ro_kv, CPR_NUM_RING_OSC);
+	if (rc) {
+		cpr_err(cpr_vreg, "qcom,cpr-ro-scaling-factor property read failed, rc = %d\n",
+			rc);
+		return rc;
+	}
+
+	if (of_find_property(of_node, "qcom,cpr-non-collapsible-sensors",
+				&len)) {
+		len = len / sizeof(u32);
+		if (len <= 0 || len > 32) {
+			cpr_err(cpr_vreg, "qcom,cpr-non-collapsible-sensors has an incorrect size\n");
+			return -EINVAL;
+		}
+
+		for (i = 0; i < len; i++) {
+			rc = of_property_read_u32_index(of_node,
+						"qcom,cpr-non-collapsible-sensors",
+						i, &sensor);
+			if (rc) {
+				cpr_err(cpr_vreg, "could not read qcom,cpr-non-collapsible-sensors index %u, rc=%d\n",
+					i, rc);
+				return rc;
+			}
+
+			if (sensor > 31) {
+				cpr_err(cpr_vreg, "invalid non-collapsible sensor = %u\n",
+					sensor);
+				return -EINVAL;
+			}
+
+			non_collapsible_sensor_mask |= BIT(sensor);
+		}
+
+		/*
+		 * Bypass the sensors in collapsible domain for
+		 * de-aging measurements
+		 */
+		aging_info->aging_sensor_bypass =
+						~(non_collapsible_sensor_mask);
+		cpr_debug(cpr_vreg, "sensor bypass mask for aging = 0x%08x\n",
+			aging_info->aging_sensor_bypass);
+	}
+
+	prop = of_find_property(pdev->dev.of_node, "qcom,cpr-aging-derate",
+			NULL);
+	if ((!prop) ||
+		(prop->length != num_fuse_corners * sizeof(u32))) {
+		cpr_err(cpr_vreg, "qcom,cpr-aging-derate incorrectly configured\n");
+		return -EINVAL;
+	}
+
+	aging_sensor_id = kcalloc(num_aging_sensors, sizeof(*aging_sensor_id),
+				GFP_KERNEL);
+	fuse_sel = kcalloc(num_aging_sensors * 4, sizeof(*fuse_sel),
+				GFP_KERNEL);
+	aging_info->voltage_adjust = devm_kcalloc(&pdev->dev,
+					num_fuse_corners + 1,
+					sizeof(*aging_info->voltage_adjust),
+					GFP_KERNEL);
+	aging_info->sensor_info = devm_kcalloc(&pdev->dev, num_aging_sensors,
+					sizeof(*aging_info->sensor_info),
+					GFP_KERNEL);
+	aging_info->aging_derate = devm_kcalloc(&pdev->dev,
+					num_fuse_corners + 1,
+					sizeof(*aging_info->aging_derate),
+					GFP_KERNEL);
+
+	if (!aging_info->aging_derate || !aging_sensor_id
+		|| !aging_info->sensor_info || !fuse_sel
+		|| !aging_info->voltage_adjust)
+		goto err;
+
+	rc = of_property_read_u32_array(of_node, "qcom,cpr-aging-sensor-id",
+					aging_sensor_id, num_aging_sensors);
+	if (rc) {
+		cpr_err(cpr_vreg, "qcom,cpr-aging-sensor-id property read failed, rc = %d\n",
+				rc);
+		goto err;
+	}
+
+	for (i = 0; i < num_aging_sensors; i++)
+		if (aging_sensor_id[i] < 0 || aging_sensor_id[i] > 31) {
+			cpr_err(cpr_vreg, "Invalid aging sensor id: %u\n",
+				aging_sensor_id[i]);
+			rc = -EINVAL;
+			goto err;
+		}
+
+	rc = of_property_read_u32_array(of_node, "qcom,cpr-aging-derate",
+			&aging_info->aging_derate[CPR_FUSE_CORNER_MIN],
+			num_fuse_corners);
+	if (rc) {
+		cpr_err(cpr_vreg, "qcom,cpr-aging-derate property read failed, rc = %d\n",
+				rc);
+		goto err;
+	}
+
+	rc = of_property_read_u32_array(of_node,
+				"qcom,cpr-fuse-aging-init-quot-diff",
+				fuse_sel, (num_aging_sensors * 4));
+	if (rc) {
+		cpr_err(cpr_vreg, "qcom,cpr-fuse-aging-init-quot-diff read failed, rc = %d\n",
+				rc);
+		goto err;
+	}
+
+	fuse_sel_orig = fuse_sel;
+	sensor_info = aging_info->sensor_info;
+	for (i = 0; i < num_aging_sensors; i++, sensor_info++) {
+		sensor_info->sensor_id = aging_sensor_id[i];
+		efuse_val = cpr_read_efuse_param(cpr_vreg, fuse_sel[0],
+				fuse_sel[1], fuse_sel[2], fuse_sel[3]);
+		bits = fuse_sel[2];
+		sensor_info->initial_quot_diff = ((efuse_val & BIT(bits - 1)) ?
+			-1 : 1) * (efuse_val & (BIT(bits - 1) - 1));
+
+		cpr_debug(cpr_vreg, "Age sensor[%d] Initial quot diff = %d\n",
+				sensor_info->sensor_id,
+				sensor_info->initial_quot_diff);
+		fuse_sel += 4;
+	}
+
+	/*
+	 * Add max aging margin here. This can be adjusted later in
+	 * de-aging algorithm.
+	 */
+	for (i = CPR_FUSE_CORNER_MIN; i <= num_fuse_corners; i++) {
+		ro_sel = cpr_vreg->cpr_fuse_ro_sel[i];
+		cpr_vreg->cpr_fuse_target_quot[i] +=
+				(aging_info->cpr_ro_kv[ro_sel]
+				* aging_info->max_aging_margin) / 1000000;
+		aging_info->voltage_adjust[i] = aging_info->max_aging_margin;
+		cpr_info(cpr_vreg, "Corner[%d]: age margin adjusted quotient = %d\n",
+			i, cpr_vreg->cpr_fuse_target_quot[i]);
+	}
+
+	kfree(fuse_sel_orig);
+err:
+	kfree(aging_sensor_id);
+	return rc;
+}
+
+static int cpr_cpu_map_init(struct cpr_regulator *cpr_vreg, struct device *dev)
+{
+	struct device_node *cpu_node;
+	int i, cpu;
+
+	if (!of_find_property(dev->of_node, "qcom,cpr-cpus",
+				&cpr_vreg->num_adj_cpus)) {
+		/* No adjustments based on online cores */
+		return 0;
+	}
+	cpr_vreg->num_adj_cpus /= sizeof(u32);
+
+	cpr_vreg->adj_cpus = devm_kcalloc(dev, cpr_vreg->num_adj_cpus,
+					sizeof(int), GFP_KERNEL);
+	if (!cpr_vreg->adj_cpus)
+		return -ENOMEM;
+
+	for (i = 0; i < cpr_vreg->num_adj_cpus; i++) {
+		cpu_node = of_parse_phandle(dev->of_node, "qcom,cpr-cpus", i);
+		if (!cpu_node) {
+			cpr_err(cpr_vreg, "could not find CPU node %d\n", i);
+			return -EINVAL;
+		}
+		cpr_vreg->adj_cpus[i] = -1;
+		for_each_possible_cpu(cpu) {
+			if (of_get_cpu_node(cpu, NULL) == cpu_node) {
+				cpr_vreg->adj_cpus[i] = cpu;
+				cpumask_set_cpu(cpu, &cpr_vreg->cpu_mask);
+				break;
+			}
+		}
+		of_node_put(cpu_node);
+	}
+
+	return 0;
+}
+
+static int cpr_init_cpr_efuse(struct platform_device *pdev,
+				     struct cpr_regulator *cpr_vreg)
+{
+	struct device_node *of_node = pdev->dev.of_node;
+	int i, rc = 0;
+	bool scheme_fuse_valid = false;
+	bool disable_fuse_valid = false;
+	char *targ_quot_str;
+	u32 cpr_fuse_row[2];
+	u32 bp_cpr_disable, bp_scheme;
+	size_t len;
+	int *bp_target_quot;
+	u64 fuse_bits, fuse_bits_2;
+	u32 *target_quot_size;
+	struct cpr_quot_scale *quot_scale;
+
+	len = cpr_vreg->num_fuse_corners + 1;
+
+	bp_target_quot = kzalloc(len * sizeof(*bp_target_quot), GFP_KERNEL);
+	target_quot_size = kzalloc(len * sizeof(*target_quot_size), GFP_KERNEL);
+	quot_scale = kzalloc(len * sizeof(*quot_scale), GFP_KERNEL);
+
+	if (!bp_target_quot || !target_quot_size || !quot_scale) {
+		cpr_err(cpr_vreg,
+			"Could not allocate memory for fuse parsing arrays\n");
+		rc = -ENOMEM;
+		goto error;
+	}
+
+	if (cpr_vreg->cpr_fuse_redundant) {
+		rc = of_property_read_u32_array(of_node,
+				"qcom,cpr-fuse-redun-row",
+				cpr_fuse_row, 2);
+		targ_quot_str = "qcom,cpr-fuse-redun-target-quot";
+	} else {
+		rc = of_property_read_u32_array(of_node, "qcom,cpr-fuse-row",
+				cpr_fuse_row, 2);
+		targ_quot_str = "qcom,cpr-fuse-target-quot";
+	}
+	if (rc)
+		goto error;
+
+	rc = of_property_read_u32_array(of_node, targ_quot_str,
+		&bp_target_quot[CPR_FUSE_CORNER_MIN],
+		cpr_vreg->num_fuse_corners);
+	if (rc < 0) {
+		cpr_err(cpr_vreg, "missing %s: rc=%d\n", targ_quot_str, rc);
+		goto error;
+	}
+
+	if (of_find_property(of_node, "qcom,cpr-fuse-target-quot-size", NULL)) {
+		rc = of_property_read_u32_array(of_node,
+			"qcom,cpr-fuse-target-quot-size",
+			&target_quot_size[CPR_FUSE_CORNER_MIN],
+			cpr_vreg->num_fuse_corners);
+		if (rc < 0) {
+			cpr_err(cpr_vreg, "error while reading qcom,cpr-fuse-target-quot-size: rc=%d\n",
+				rc);
+			goto error;
+		}
+	} else {
+		/*
+		 * Default fuse quotient parameter size to match target register
+		 * size.
+		 */
+		for (i = CPR_FUSE_CORNER_MIN; i <= cpr_vreg->num_fuse_corners;
+		     i++)
+			target_quot_size[i] = CPR_FUSE_TARGET_QUOT_BITS;
+	}
+
+	if (of_find_property(of_node, "qcom,cpr-fuse-target-quot-scale",
+				NULL)) {
+		for (i = 0; i < cpr_vreg->num_fuse_corners; i++) {
+			rc = of_property_read_u32_index(of_node,
+				"qcom,cpr-fuse-target-quot-scale", i * 2,
+				&quot_scale[i + CPR_FUSE_CORNER_MIN].offset);
+			if (rc < 0) {
+				cpr_err(cpr_vreg, "error while reading qcom,cpr-fuse-target-quot-scale: rc=%d\n",
+					rc);
+				goto error;
+			}
+
+			rc = of_property_read_u32_index(of_node,
+				"qcom,cpr-fuse-target-quot-scale", i * 2 + 1,
+			       &quot_scale[i + CPR_FUSE_CORNER_MIN].multiplier);
+			if (rc < 0) {
+				cpr_err(cpr_vreg, "error while reading qcom,cpr-fuse-target-quot-scale: rc=%d\n",
+					rc);
+				goto error;
+			}
+		}
+	} else {
+		/*
+		 * In the default case, target quotients require no scaling so
+		 * use offset = 0, multiplier = 1.
+		 */
+		for (i = CPR_FUSE_CORNER_MIN; i <= cpr_vreg->num_fuse_corners;
+		     i++) {
+			quot_scale[i].offset = 0;
+			quot_scale[i].multiplier = 1;
+		}
+	}
+
+	/* Read the control bits of eFuse */
+	fuse_bits = cpr_read_efuse_row(cpr_vreg, cpr_fuse_row[0],
+					cpr_fuse_row[1]);
+	cpr_info(cpr_vreg, "[row:%d] = 0x%llx\n", cpr_fuse_row[0], fuse_bits);
+
+	if (cpr_vreg->cpr_fuse_redundant) {
+		if (of_find_property(of_node,
+				"qcom,cpr-fuse-redun-bp-cpr-disable", NULL)) {
+			CPR_PROP_READ_U32(cpr_vreg, of_node,
+					  "cpr-fuse-redun-bp-cpr-disable",
+					  &bp_cpr_disable, rc);
+			disable_fuse_valid = true;
+			if (of_find_property(of_node,
+					"qcom,cpr-fuse-redun-bp-scheme",
+					NULL)) {
+				CPR_PROP_READ_U32(cpr_vreg, of_node,
+						"cpr-fuse-redun-bp-scheme",
+						&bp_scheme, rc);
+				scheme_fuse_valid = true;
+			}
+			if (rc)
+				goto error;
+			fuse_bits_2 = fuse_bits;
+		} else {
+			u32 temp_row[2];
+
+			/* Use original fuse if no optional property */
+			if (of_find_property(of_node,
+					"qcom,cpr-fuse-bp-cpr-disable", NULL)) {
+				CPR_PROP_READ_U32(cpr_vreg, of_node,
+					"cpr-fuse-bp-cpr-disable",
+					&bp_cpr_disable, rc);
+				disable_fuse_valid = true;
+			}
+			if (of_find_property(of_node,
+					"qcom,cpr-fuse-bp-scheme",
+					NULL)) {
+				CPR_PROP_READ_U32(cpr_vreg, of_node,
+						"cpr-fuse-bp-scheme",
+						&bp_scheme, rc);
+				scheme_fuse_valid = true;
+			}
+			rc = of_property_read_u32_array(of_node,
+					"qcom,cpr-fuse-row",
+					temp_row, 2);
+			if (rc)
+				goto error;
+
+			fuse_bits_2 = cpr_read_efuse_row(cpr_vreg, temp_row[0],
+							temp_row[1]);
+			cpr_info(cpr_vreg, "[original row:%d] = 0x%llx\n",
+				temp_row[0], fuse_bits_2);
+		}
+	} else {
+		if (of_find_property(of_node, "qcom,cpr-fuse-bp-cpr-disable",
+					NULL)) {
+			CPR_PROP_READ_U32(cpr_vreg, of_node,
+				"cpr-fuse-bp-cpr-disable", &bp_cpr_disable, rc);
+			disable_fuse_valid = true;
+		}
+		if (of_find_property(of_node, "qcom,cpr-fuse-bp-scheme",
+							NULL)) {
+			CPR_PROP_READ_U32(cpr_vreg, of_node,
+					"cpr-fuse-bp-scheme", &bp_scheme, rc);
+			scheme_fuse_valid = true;
+		}
+		if (rc)
+			goto error;
+		fuse_bits_2 = fuse_bits;
+	}
+
+	if (disable_fuse_valid) {
+		cpr_vreg->cpr_fuse_disable =
+					(fuse_bits_2 >> bp_cpr_disable) & 0x01;
+		cpr_info(cpr_vreg, "CPR disable fuse = %d\n",
+			cpr_vreg->cpr_fuse_disable);
+	} else {
+		cpr_vreg->cpr_fuse_disable = false;
+	}
+
+	if (scheme_fuse_valid) {
+		cpr_vreg->cpr_fuse_local = (fuse_bits_2 >> bp_scheme) & 0x01;
+		cpr_info(cpr_vreg, "local = %d\n", cpr_vreg->cpr_fuse_local);
+	} else {
+		cpr_vreg->cpr_fuse_local = true;
+	}
+
+	for (i = CPR_FUSE_CORNER_MIN; i <= cpr_vreg->num_fuse_corners; i++) {
+		cpr_vreg->cpr_fuse_target_quot[i]
+			= cpr_read_efuse_param(cpr_vreg, cpr_fuse_row[0],
+				bp_target_quot[i], target_quot_size[i],
+				cpr_fuse_row[1]);
+		/* Unpack the target quotient by scaling. */
+		cpr_vreg->cpr_fuse_target_quot[i] *= quot_scale[i].multiplier;
+		cpr_vreg->cpr_fuse_target_quot[i] += quot_scale[i].offset;
+		cpr_info(cpr_vreg,
+			"Corner[%d]: ro_sel = %d, target quot = %d\n", i,
+			cpr_vreg->cpr_fuse_ro_sel[i],
+			cpr_vreg->cpr_fuse_target_quot[i]);
+	}
+
+	rc = cpr_cpu_map_init(cpr_vreg, &pdev->dev);
+	if (rc) {
+		cpr_err(cpr_vreg, "CPR cpu map init failed: rc=%d\n", rc);
+		goto error;
+	}
+
+	rc = cpr_aging_init(pdev, cpr_vreg);
+	if (rc) {
+		cpr_err(cpr_vreg, "CPR aging init failed: rc=%d\n", rc);
+		goto error;
+	}
+
+	rc = cpr_adjust_target_quots(pdev, cpr_vreg);
+	if (rc)
+		goto error;
+
+	for (i = CPR_FUSE_CORNER_MIN + 1;
+				i <= cpr_vreg->num_fuse_corners; i++) {
+		if (cpr_vreg->cpr_fuse_target_quot[i]
+				< cpr_vreg->cpr_fuse_target_quot[i - 1] &&
+			cpr_vreg->cpr_fuse_ro_sel[i] ==
+				cpr_vreg->cpr_fuse_ro_sel[i - 1]) {
+			cpr_vreg->cpr_fuse_disable = true;
+			cpr_err(cpr_vreg, "invalid quotient values; permanently disabling CPR\n");
+		}
+	}
+
+	if (cpr_vreg->flags & FLAGS_UPLIFT_QUOT_VOLT) {
+		cpr_voltage_uplift_wa_inc_quot(cpr_vreg, of_node);
+		for (i = CPR_FUSE_CORNER_MIN; i <= cpr_vreg->num_fuse_corners;
+		     i++) {
+			cpr_info(cpr_vreg,
+				"Corner[%d]: uplifted target quot = %d\n",
+				i, cpr_vreg->cpr_fuse_target_quot[i]);
+		}
+	}
+
+	/*
+	 * Check whether the fuse-quot-offset is defined per fuse corner.
+	 * If it is defined, use it (quot_offset) in the calculation
+	 * below for obtaining scaling factor per fuse corner.
+	 */
+	rc = cpr_get_fuse_quot_offset(cpr_vreg, pdev, quot_scale);
+	if (rc < 0)
+		goto error;
+
+	rc = cpr_get_corner_quot_adjustment(cpr_vreg, &pdev->dev);
+	if (rc)
+		goto error;
+
+	cpr_vreg->cpr_fuse_bits = fuse_bits;
+	if (!cpr_vreg->cpr_fuse_bits) {
+		cpr_vreg->cpr_fuse_disable = true;
+		cpr_err(cpr_vreg,
+			"cpr_fuse_bits == 0; permanently disabling CPR\n");
+	} else if (!cpr_vreg->fuse_quot_offset) {
+		/*
+		 * Check if the target quotients for the highest two fuse
+		 * corners are too close together.
+		 */
+		int *quot = cpr_vreg->cpr_fuse_target_quot;
+		int highest_fuse_corner = cpr_vreg->num_fuse_corners;
+		u32 min_diff_quot;
+		bool valid_fuse = true;
+
+		min_diff_quot = CPR_FUSE_MIN_QUOT_DIFF;
+		of_property_read_u32(of_node, "qcom,cpr-quot-min-diff",
+							&min_diff_quot);
+
+		if (quot[highest_fuse_corner] > quot[highest_fuse_corner - 1]) {
+			if ((quot[highest_fuse_corner]
+				- quot[highest_fuse_corner - 1])
+					<= min_diff_quot)
+				valid_fuse = false;
+		} else {
+			valid_fuse = false;
+		}
+
+		if (!valid_fuse) {
+			cpr_vreg->cpr_fuse_disable = true;
+			cpr_err(cpr_vreg, "invalid quotient values; permanently disabling CPR\n");
+		}
+	}
+	rc = cpr_check_allowed(pdev, cpr_vreg);
+
+error:
+	kfree(bp_target_quot);
+	kfree(target_quot_size);
+	kfree(quot_scale);
+
+	return rc;
+}
+
+static int cpr_init_cpr_voltages(struct cpr_regulator *cpr_vreg,
+			struct device *dev)
+{
+	int i;
+	int size = cpr_vreg->num_corners + 1;
+
+	cpr_vreg->last_volt = devm_kzalloc(dev, sizeof(int) * size, GFP_KERNEL);
+	if (!cpr_vreg->last_volt)
+		return -EINVAL;
+
+	for (i = CPR_CORNER_MIN; i <= cpr_vreg->num_corners; i++)
+		cpr_vreg->last_volt[i] = cpr_vreg->open_loop_volt[i];
+
+	return 0;
+}
+
+/*
+ * This function fills the virtual_limit array with voltages read from the
+ * prop_name device tree property if a given tuple in the property matches
+ * the speedbin and PVS version fuses found on the chip.  Otherwise,
+ * it fills the virtual_limit_array with corresponding values from the
+ * fuse_limit_array.
+ */
+static int cpr_fill_override_voltage(struct cpr_regulator *cpr_vreg,
+		struct device *dev, const char *prop_name, const char *label,
+		int *virtual_limit, int *fuse_limit)
+{
+	int rc = 0;
+	int i, j, size, pos;
+	struct property *prop;
+	bool match_found = false;
+	size_t buflen;
+	char *buf;
+	u32 *tmp;
+
+	prop = of_find_property(dev->of_node, prop_name, NULL);
+	if (!prop)
+		goto use_fuse_corner_limits;
+
+	size = prop->length / sizeof(u32);
+	if (size == 0 || size % (cpr_vreg->num_corners + 2)) {
+		cpr_err(cpr_vreg, "%s property format is invalid; reusing per-fuse-corner limits\n",
+			prop_name);
+		goto use_fuse_corner_limits;
+	}
+
+	tmp = kzalloc(size * sizeof(u32), GFP_KERNEL);
+	if (!tmp) {
+		cpr_err(cpr_vreg, "memory alloc failed\n");
+		return -ENOMEM;
+	}
+	rc = of_property_read_u32_array(dev->of_node, prop_name, tmp, size);
+	if (rc < 0) {
+		kfree(tmp);
+		cpr_err(cpr_vreg, "%s reading failed, rc = %d\n", prop_name,
+			rc);
+		return rc;
+	}
+
+	/*
+	 * Get limit voltage for each virtual corner based upon the speed_bin
+	 * and pvs_version values.
+	 */
+	for (i = 0; i < size; i += cpr_vreg->num_corners + 2) {
+		if (tmp[i] != cpr_vreg->speed_bin &&
+		    tmp[i] != FUSE_PARAM_MATCH_ANY)
+			continue;
+		if (tmp[i + 1] != cpr_vreg->pvs_version &&
+		    tmp[i + 1] != FUSE_PARAM_MATCH_ANY)
+			continue;
+		for (j = CPR_CORNER_MIN; j <= cpr_vreg->num_corners; j++)
+			virtual_limit[j] = tmp[i + 2 + j - CPR_FUSE_CORNER_MIN];
+		match_found = true;
+		break;
+	}
+	kfree(tmp);
+
+	if (!match_found)
+		goto use_fuse_corner_limits;
+
+	/*
+	 * Log per-virtual-corner voltage limits since they are useful for
+	 * baseline CPR debugging.
+	 */
+	buflen = cpr_vreg->num_corners * (MAX_CHARS_PER_INT + 2) * sizeof(*buf);
+	buf = kzalloc(buflen, GFP_KERNEL);
+	if (buf == NULL) {
+		cpr_err(cpr_vreg, "Could not allocate memory for corner limit voltage logging\n");
+		return 0;
+	}
+
+	for (i = CPR_CORNER_MIN, pos = 0; i <= cpr_vreg->num_corners; i++)
+		pos += scnprintf(buf + pos, buflen - pos, "%d%s",
+			virtual_limit[i], i < cpr_vreg->num_corners ? " " : "");
+	cpr_info(cpr_vreg, "%s override voltage: [%s] uV\n", label, buf);
+	kfree(buf);
+
+	return rc;
+
+use_fuse_corner_limits:
+	for (i = CPR_CORNER_MIN; i <= cpr_vreg->num_corners; i++)
+		virtual_limit[i] = fuse_limit[cpr_vreg->corner_map[i]];
+	return rc;
+}
+
+/*
+ * This function loads per-virtual-corner ceiling and floor voltages from device
+ * tree if their respective device tree properties are present.  These limits
+ * override those found in the per-fuse-corner arrays fuse_ceiling_volt and
+ * fuse_floor_volt.
+ */
+static int cpr_init_ceiling_floor_override_voltages(
+	struct cpr_regulator *cpr_vreg, struct device *dev)
+{
+	int rc, i;
+	int size = cpr_vreg->num_corners + 1;
+
+	cpr_vreg->ceiling_volt = devm_kzalloc(dev, sizeof(int) * size,
+						GFP_KERNEL);
+	cpr_vreg->floor_volt = devm_kzalloc(dev, sizeof(int) * size,
+						GFP_KERNEL);
+	cpr_vreg->cpr_max_ceiling = devm_kzalloc(dev, sizeof(int) * size,
+						GFP_KERNEL);
+	if (!cpr_vreg->ceiling_volt || !cpr_vreg->floor_volt ||
+		!cpr_vreg->cpr_max_ceiling)
+		return -ENOMEM;
+
+	rc = cpr_fill_override_voltage(cpr_vreg, dev,
+		"qcom,cpr-voltage-ceiling-override", "ceiling",
+		cpr_vreg->ceiling_volt, cpr_vreg->fuse_ceiling_volt);
+	if (rc)
+		return rc;
+
+	rc = cpr_fill_override_voltage(cpr_vreg, dev,
+		"qcom,cpr-voltage-floor-override", "floor",
+		cpr_vreg->floor_volt, cpr_vreg->fuse_floor_volt);
+	if (rc)
+		return rc;
+
+	for (i = CPR_CORNER_MIN; i <= cpr_vreg->num_corners; i++) {
+		if (cpr_vreg->floor_volt[i] > cpr_vreg->ceiling_volt[i]) {
+			cpr_err(cpr_vreg, "virtual corner %d floor=%d uV > ceiling=%d uV\n",
+				i, cpr_vreg->floor_volt[i],
+				cpr_vreg->ceiling_volt[i]);
+			return -EINVAL;
+		}
+
+		if (cpr_vreg->ceiling_max < cpr_vreg->ceiling_volt[i])
+			cpr_vreg->ceiling_max = cpr_vreg->ceiling_volt[i];
+		cpr_vreg->cpr_max_ceiling[i] = cpr_vreg->ceiling_volt[i];
+	}
+
+	return rc;
+}
+
+/*
+ * This function computes the per-virtual-corner floor voltages from
+ * per-virtual-corner ceiling voltages with an offset specified by a
+ * device-tree property. This must be called after open-loop voltage
+ * scaling, floor_volt array loading and the ceiling voltage is
+ * conditionally reduced to the open-loop voltage. It selects the
+ * maximum value between the calculated floor voltage values and
+ * the floor_volt array values and stores them in the floor_volt array.
+ */
+static int cpr_init_floor_to_ceiling_range(
+	struct cpr_regulator *cpr_vreg, struct device *dev)
+{
+	int rc, i, tuple_count, tuple_match, len, pos;
+	u32 index, floor_volt_adjust = 0;
+	char *prop_str, *buf;
+	size_t buflen;
+
+	prop_str = "qcom,cpr-floor-to-ceiling-max-range";
+
+	if (!of_find_property(dev->of_node, prop_str, &len))
+		return 0;
+
+	if (cpr_vreg->cpr_fuse_map_count) {
+		if (cpr_vreg->cpr_fuse_map_match == FUSE_MAP_NO_MATCH) {
+			/*
+			 * No matching index to use for floor-to-ceiling
+			 * max range.
+			 */
+			return 0;
+		}
+		tuple_count = cpr_vreg->cpr_fuse_map_count;
+		tuple_match = cpr_vreg->cpr_fuse_map_match;
+	} else {
+		tuple_count = 1;
+		tuple_match = 0;
+	}
+
+	if (len != cpr_vreg->num_corners * tuple_count * sizeof(u32)) {
+		cpr_err(cpr_vreg, "%s length=%d is invalid\n", prop_str, len);
+		return -EINVAL;
+	}
+
+	for (i = CPR_CORNER_MIN; i <= cpr_vreg->num_corners; i++) {
+		index = tuple_match * cpr_vreg->num_corners
+				+ i - CPR_CORNER_MIN;
+		rc = of_property_read_u32_index(dev->of_node, prop_str,
+			index, &floor_volt_adjust);
+		if (rc) {
+			cpr_err(cpr_vreg, "could not read %s index %u, rc=%d\n",
+				prop_str, index, rc);
+			return rc;
+		}
+
+		if ((int)floor_volt_adjust >= 0) {
+			cpr_vreg->floor_volt[i] = max(cpr_vreg->floor_volt[i],
+						(cpr_vreg->ceiling_volt[i]
+						- (int)floor_volt_adjust));
+			cpr_vreg->floor_volt[i]
+					= DIV_ROUND_UP(cpr_vreg->floor_volt[i],
+							cpr_vreg->step_volt) *
+							cpr_vreg->step_volt;
+			if (cpr_vreg->open_loop_volt[i]
+					< cpr_vreg->floor_volt[i])
+				cpr_vreg->open_loop_volt[i]
+						= cpr_vreg->floor_volt[i];
+		}
+	}
+
+	/*
+	 * Log per-virtual-corner voltage limits resulted after considering the
+	 * floor-to-ceiling max range since they are useful for baseline CPR
+	 * debugging.
+	 */
+	buflen = cpr_vreg->num_corners * (MAX_CHARS_PER_INT + 2) * sizeof(*buf);
+	buf = kzalloc(buflen, GFP_KERNEL);
+	if (buf == NULL) {
+		cpr_err(cpr_vreg, "Could not allocate memory for corner limit voltage logging\n");
+		return 0;
+	}
+
+	for (i = CPR_CORNER_MIN, pos = 0; i <= cpr_vreg->num_corners; i++)
+		pos += scnprintf(buf + pos, buflen - pos, "%d%s",
+			cpr_vreg->floor_volt[i],
+			i < cpr_vreg->num_corners ? " " : "");
+	cpr_info(cpr_vreg, "Final floor override voltages: [%s] uV\n", buf);
+	kfree(buf);
+
+	return 0;
+}
+
+static int cpr_init_step_quotient(struct platform_device *pdev,
+		  struct cpr_regulator *cpr_vreg)
+{
+	struct device_node *of_node = pdev->dev.of_node;
+	int len = 0;
+	u32 step_quot[CPR_NUM_RING_OSC];
+	int i, rc;
+
+	if (!of_find_property(of_node, "qcom,cpr-step-quotient", &len)) {
+		cpr_err(cpr_vreg, "qcom,cpr-step-quotient property missing\n");
+		return -EINVAL;
+	}
+
+	if (len == sizeof(u32)) {
+		/* Single step quotient used for all ring oscillators. */
+		rc = of_property_read_u32(of_node, "qcom,cpr-step-quotient",
+					step_quot);
+		if (rc) {
+			cpr_err(cpr_vreg, "could not read qcom,cpr-step-quotient, rc=%d\n",
+				rc);
+			return rc;
+		}
+
+		for (i = CPR_FUSE_CORNER_MIN; i <= cpr_vreg->num_fuse_corners;
+		     i++)
+			cpr_vreg->step_quotient[i] = step_quot[0];
+	} else if (len == sizeof(u32) * CPR_NUM_RING_OSC) {
+		/* Unique step quotient used per ring oscillator. */
+		rc = of_property_read_u32_array(of_node,
+			"qcom,cpr-step-quotient", step_quot, CPR_NUM_RING_OSC);
+		if (rc) {
+			cpr_err(cpr_vreg, "could not read qcom,cpr-step-quotient, rc=%d\n",
+				rc);
+			return rc;
+		}
+
+		for (i = CPR_FUSE_CORNER_MIN; i <= cpr_vreg->num_fuse_corners;
+		     i++)
+			cpr_vreg->step_quotient[i]
+				= step_quot[cpr_vreg->cpr_fuse_ro_sel[i]];
+	} else {
+		cpr_err(cpr_vreg, "qcom,cpr-step-quotient has invalid length=%d\n",
+			len);
+		return -EINVAL;
+	}
+
+	for (i = CPR_FUSE_CORNER_MIN; i <= cpr_vreg->num_fuse_corners; i++)
+		cpr_debug(cpr_vreg, "step_quotient[%d]=%u\n", i,
+			cpr_vreg->step_quotient[i]);
+
+	return 0;
+}
+
+static int cpr_init_cpr_parameters(struct platform_device *pdev,
+					  struct cpr_regulator *cpr_vreg)
+{
+	struct device_node *of_node = pdev->dev.of_node;
+	int rc = 0;
+
+	CPR_PROP_READ_U32(cpr_vreg, of_node, "cpr-ref-clk",
+			  &cpr_vreg->ref_clk_khz, rc);
+	if (rc)
+		return rc;
+	CPR_PROP_READ_U32(cpr_vreg, of_node, "cpr-timer-delay",
+			  &cpr_vreg->timer_delay_us, rc);
+	if (rc)
+		return rc;
+	CPR_PROP_READ_U32(cpr_vreg, of_node, "cpr-timer-cons-up",
+			  &cpr_vreg->timer_cons_up, rc);
+	if (rc)
+		return rc;
+	CPR_PROP_READ_U32(cpr_vreg, of_node, "cpr-timer-cons-down",
+			  &cpr_vreg->timer_cons_down, rc);
+	if (rc)
+		return rc;
+	CPR_PROP_READ_U32(cpr_vreg, of_node, "cpr-irq-line",
+			  &cpr_vreg->irq_line, rc);
+	if (rc)
+		return rc;
+
+	rc = cpr_init_step_quotient(pdev, cpr_vreg);
+	if (rc)
+		return rc;
+
+	CPR_PROP_READ_U32(cpr_vreg, of_node, "cpr-up-threshold",
+			  &cpr_vreg->up_threshold, rc);
+	if (rc)
+		return rc;
+	CPR_PROP_READ_U32(cpr_vreg, of_node, "cpr-down-threshold",
+			  &cpr_vreg->down_threshold, rc);
+	if (rc)
+		return rc;
+	cpr_info(cpr_vreg, "up threshold = %u, down threshold = %u\n",
+		cpr_vreg->up_threshold, cpr_vreg->down_threshold);
+
+	CPR_PROP_READ_U32(cpr_vreg, of_node, "cpr-idle-clocks",
+			  &cpr_vreg->idle_clocks, rc);
+	if (rc)
+		return rc;
+	CPR_PROP_READ_U32(cpr_vreg, of_node, "cpr-gcnt-time",
+			  &cpr_vreg->gcnt_time_us, rc);
+	if (rc)
+		return rc;
+	CPR_PROP_READ_U32(cpr_vreg, of_node, "vdd-apc-step-up-limit",
+			  &cpr_vreg->vdd_apc_step_up_limit, rc);
+	if (rc)
+		return rc;
+	CPR_PROP_READ_U32(cpr_vreg, of_node, "vdd-apc-step-down-limit",
+			  &cpr_vreg->vdd_apc_step_down_limit, rc);
+	if (rc)
+		return rc;
+
+	rc = of_property_read_u32(of_node, "qcom,cpr-clamp-timer-interval",
+				  &cpr_vreg->clamp_timer_interval);
+	if (rc && rc != -EINVAL) {
+		cpr_err(cpr_vreg,
+			"error reading qcom,cpr-clamp-timer-interval, rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	cpr_vreg->clamp_timer_interval = min(cpr_vreg->clamp_timer_interval,
+					(u32)RBIF_TIMER_ADJ_CLAMP_INT_MASK);
+
+	/* Init module parameter with the DT value */
+	cpr_vreg->enable = of_property_read_bool(of_node, "qcom,cpr-enable");
+	cpr_info(cpr_vreg, "CPR is %s by default.\n",
+		cpr_vreg->enable ? "enabled" : "disabled");
+
+	return 0;
+}
+
+static void cpr_regulator_switch_adj_cpus(struct cpr_regulator *cpr_vreg)
+{
+	cpr_vreg->last_volt = cpr_vreg->adj_cpus_last_volt
+					[cpr_vreg->online_cpus];
+	cpr_vreg->save_ctl = cpr_vreg->adj_cpus_save_ctl[cpr_vreg->online_cpus];
+	cpr_vreg->save_irq = cpr_vreg->adj_cpus_save_irq[cpr_vreg->online_cpus];
+
+	if (cpr_vreg->adj_cpus_quot_adjust)
+		cpr_vreg->quot_adjust = cpr_vreg->adj_cpus_quot_adjust
+						[cpr_vreg->online_cpus];
+	if (cpr_vreg->adj_cpus_open_loop_volt)
+		cpr_vreg->open_loop_volt
+			= cpr_vreg->adj_cpus_open_loop_volt
+				[cpr_vreg->online_cpus];
+	if (cpr_vreg->adj_cpus_open_loop_volt_as_ceiling)
+		cpr_vreg->ceiling_volt = cpr_vreg->open_loop_volt;
+}
+
+static void cpr_regulator_set_online_cpus(struct cpr_regulator *cpr_vreg)
+{
+	int i, j;
+
+	cpr_vreg->online_cpus = 0;
+	get_online_cpus();
+	for_each_online_cpu(i)
+		for (j = 0; j < cpr_vreg->num_adj_cpus; j++)
+			if (i == cpr_vreg->adj_cpus[j])
+				cpr_vreg->online_cpus++;
+	put_online_cpus();
+}
+
+static int cpr_regulator_cpu_callback(struct notifier_block *nb,
+					    unsigned long action, void *data)
+{
+	struct cpr_regulator *cpr_vreg = container_of(nb, struct cpr_regulator,
+					cpu_notifier);
+	int cpu = (long)data;
+	int prev_online_cpus, rc, i;
+
+	action &= ~CPU_TASKS_FROZEN;
+
+	if (action != CPU_UP_PREPARE && action != CPU_UP_CANCELED
+	    && action != CPU_DEAD)
+		return NOTIFY_OK;
+
+	mutex_lock(&cpr_vreg->cpr_mutex);
+
+	if (cpr_vreg->skip_voltage_change_during_suspend
+	    && cpr_vreg->is_cpr_suspended) {
+		/* Do nothing during system suspend/resume */
+		goto done;
+	}
+
+	prev_online_cpus = cpr_vreg->online_cpus;
+	cpr_regulator_set_online_cpus(cpr_vreg);
+
+	if (action == CPU_UP_PREPARE)
+		for (i = 0; i < cpr_vreg->num_adj_cpus; i++)
+			if (cpu == cpr_vreg->adj_cpus[i]) {
+				cpr_vreg->online_cpus++;
+				break;
+			}
+
+	if (cpr_vreg->online_cpus == prev_online_cpus)
+		goto done;
+
+	cpr_debug(cpr_vreg, "adjusting corner %d quotient for %d cpus\n",
+		cpr_vreg->corner, cpr_vreg->online_cpus);
+
+	cpr_regulator_switch_adj_cpus(cpr_vreg);
+
+	if (cpr_vreg->corner) {
+		rc = cpr_regulator_set_voltage(cpr_vreg->rdev,
+				cpr_vreg->corner, true);
+		if (rc)
+			cpr_err(cpr_vreg, "could not update quotient, rc=%d\n",
+				rc);
+	}
+
+done:
+	mutex_unlock(&cpr_vreg->cpr_mutex);
+	return NOTIFY_OK;
+}
+
+static void cpr_pm_disable(struct cpr_regulator *cpr_vreg, bool disable)
+{
+	u32 reg_val;
+
+	if (cpr_vreg->is_cpr_suspended)
+		return;
+
+	reg_val = cpr_read(cpr_vreg, REG_RBCPR_CTL);
+
+	if (disable) {
+		/* Proceed only if CPR is enabled */
+		if (!(reg_val & RBCPR_CTL_LOOP_EN))
+			return;
+		cpr_ctl_disable(cpr_vreg);
+		cpr_vreg->cpr_disabled_in_pc = true;
+	} else {
+		/* Proceed only if CPR was disabled in PM_ENTER */
+		if (!cpr_vreg->cpr_disabled_in_pc)
+			return;
+		cpr_vreg->cpr_disabled_in_pc = false;
+		cpr_ctl_enable(cpr_vreg, cpr_vreg->corner);
+	}
+
+	/* Make sure register write is complete */
+	mb();
+}
+
+static int cpr_pm_callback(struct notifier_block *nb,
+			    unsigned long action, void *data)
+{
+	struct cpr_regulator *cpr_vreg = container_of(nb,
+			struct cpr_regulator, pm_notifier);
+
+	if (action != CPU_PM_ENTER && action != CPU_PM_ENTER_FAILED &&
+			action != CPU_PM_EXIT)
+		return NOTIFY_OK;
+
+	switch (action) {
+	case CPU_PM_ENTER:
+		cpr_pm_disable(cpr_vreg, true);
+		break;
+	case CPU_PM_ENTER_FAILED:
+	case CPU_PM_EXIT:
+		cpr_pm_disable(cpr_vreg, false);
+		break;
+	}
+
+	return NOTIFY_OK;
+}
+
+static int cpr_parse_adj_cpus_init_voltage(struct cpr_regulator *cpr_vreg,
+		struct device *dev)
+{
+	int rc, i, j, k, tuple_count, tuple_match, len, offset;
+	int *temp;
+
+	if (!of_find_property(dev->of_node,
+		   "qcom,cpr-online-cpu-virtual-corner-init-voltage-adjustment",
+		   NULL))
+		return 0;
+
+	if (cpr_vreg->cpr_fuse_map_count) {
+		if (cpr_vreg->cpr_fuse_map_match == FUSE_MAP_NO_MATCH) {
+			/* No matching index to use for voltage adjustment. */
+			return 0;
+		}
+		tuple_count = cpr_vreg->cpr_fuse_map_count;
+		tuple_match = cpr_vreg->cpr_fuse_map_match;
+	} else {
+		tuple_count = 1;
+		tuple_match = 0;
+	}
+
+	len = (cpr_vreg->num_adj_cpus + 1) * tuple_count
+		* cpr_vreg->num_corners;
+
+	temp = kzalloc(sizeof(int) * len, GFP_KERNEL);
+	if (!temp) {
+		cpr_err(cpr_vreg, "Could not allocate memory\n");
+		return -ENOMEM;
+	}
+
+	cpr_vreg->adj_cpus_open_loop_volt = devm_kzalloc(dev,
+				sizeof(int *) * (cpr_vreg->num_adj_cpus + 1),
+				GFP_KERNEL);
+	if (!cpr_vreg->adj_cpus_open_loop_volt) {
+		cpr_err(cpr_vreg, "Could not allocate memory\n");
+		rc = -ENOMEM;
+		goto done;
+	}
+
+	cpr_vreg->adj_cpus_open_loop_volt[0] = devm_kzalloc(dev,
+				sizeof(int) * (cpr_vreg->num_adj_cpus + 1)
+				* (cpr_vreg->num_corners + 1),
+				GFP_KERNEL);
+	if (!cpr_vreg->adj_cpus_open_loop_volt[0]) {
+		cpr_err(cpr_vreg, "Could not allocate memory\n");
+		rc = -ENOMEM;
+		goto done;
+	}
+	for (i = 1; i <= cpr_vreg->num_adj_cpus; i++)
+		cpr_vreg->adj_cpus_open_loop_volt[i] =
+			cpr_vreg->adj_cpus_open_loop_volt[0] +
+			i * (cpr_vreg->num_corners + 1);
+
+	rc = of_property_read_u32_array(dev->of_node,
+		"qcom,cpr-online-cpu-virtual-corner-init-voltage-adjustment",
+		temp, len);
+	if (rc) {
+		cpr_err(cpr_vreg, "failed to read qcom,cpr-online-cpu-virtual-corner-init-voltage-adjustment, rc=%d\n",
+			rc);
+		goto done;
+	}
+
+	cpr_debug(cpr_vreg, "Open loop voltage based on number of online CPUs:\n");
+	offset = tuple_match * cpr_vreg->num_corners *
+			(cpr_vreg->num_adj_cpus + 1);
+
+	for (i = 0; i <= cpr_vreg->num_adj_cpus; i++) {
+		for (j = CPR_CORNER_MIN; j <= cpr_vreg->num_corners; j++) {
+			k = j - 1 + offset;
+
+			cpr_vreg->adj_cpus_open_loop_volt[i][j]
+				= cpr_vreg->open_loop_volt[j] + temp[k];
+			cpr_vreg->adj_cpus_open_loop_volt[i][j]
+			    = DIV_ROUND_UP(cpr_vreg->
+					adj_cpus_open_loop_volt[i][j],
+				cpr_vreg->step_volt) * cpr_vreg->step_volt;
+
+			if (cpr_vreg->adj_cpus_open_loop_volt[i][j]
+					> cpr_vreg->ceiling_volt[j])
+				cpr_vreg->adj_cpus_open_loop_volt[i][j]
+					= cpr_vreg->ceiling_volt[j];
+			if (cpr_vreg->adj_cpus_open_loop_volt[i][j]
+					< cpr_vreg->floor_volt[j])
+				cpr_vreg->adj_cpus_open_loop_volt[i][j]
+					= cpr_vreg->floor_volt[j];
+
+			cpr_debug(cpr_vreg, "cpus=%d, corner=%d, volt=%d\n",
+				i, j, cpr_vreg->adj_cpus_open_loop_volt[i][j]);
+		}
+		offset += cpr_vreg->num_corners;
+	}
+
+	cpr_vreg->adj_cpus_open_loop_volt_as_ceiling
+		= of_property_read_bool(dev->of_node,
+			"qcom,cpr-online-cpu-init-voltage-as-ceiling");
+done:
+	kfree(temp);
+	return rc;
+}
+
+static int cpr_parse_adj_cpus_target_quot(struct cpr_regulator *cpr_vreg,
+		struct device *dev)
+{
+	int rc, i, j, k, tuple_count, tuple_match, len, offset;
+	int *temp;
+
+	if (!of_find_property(dev->of_node,
+		   "qcom,cpr-online-cpu-virtual-corner-quotient-adjustment",
+		   NULL))
+		return 0;
+
+	if (cpr_vreg->cpr_fuse_map_count) {
+		if (cpr_vreg->cpr_fuse_map_match == FUSE_MAP_NO_MATCH) {
+			/* No matching index to use for quotient adjustment. */
+			return 0;
+		}
+		tuple_count = cpr_vreg->cpr_fuse_map_count;
+		tuple_match = cpr_vreg->cpr_fuse_map_match;
+	} else {
+		tuple_count = 1;
+		tuple_match = 0;
+	}
+
+	len = (cpr_vreg->num_adj_cpus + 1) * tuple_count
+		* cpr_vreg->num_corners;
+
+	temp = kzalloc(sizeof(int) * len, GFP_KERNEL);
+	if (!temp) {
+		cpr_err(cpr_vreg, "Could not allocate memory\n");
+		return -ENOMEM;
+	}
+
+	cpr_vreg->adj_cpus_quot_adjust = devm_kzalloc(dev,
+				sizeof(int *) * (cpr_vreg->num_adj_cpus + 1),
+				GFP_KERNEL);
+	if (!cpr_vreg->adj_cpus_quot_adjust) {
+		cpr_err(cpr_vreg, "Could not allocate memory\n");
+		rc = -ENOMEM;
+		goto done;
+	}
+
+	cpr_vreg->adj_cpus_quot_adjust[0] = devm_kzalloc(dev,
+				sizeof(int) * (cpr_vreg->num_adj_cpus + 1)
+				* (cpr_vreg->num_corners + 1),
+				GFP_KERNEL);
+	if (!cpr_vreg->adj_cpus_quot_adjust[0]) {
+		cpr_err(cpr_vreg, "Could not allocate memory\n");
+		rc = -ENOMEM;
+		goto done;
+	}
+	for (i = 1; i <= cpr_vreg->num_adj_cpus; i++)
+		cpr_vreg->adj_cpus_quot_adjust[i] =
+			cpr_vreg->adj_cpus_quot_adjust[0] +
+			i * (cpr_vreg->num_corners + 1);
+
+
+	rc = of_property_read_u32_array(dev->of_node,
+		"qcom,cpr-online-cpu-virtual-corner-quotient-adjustment",
+		temp, len);
+	if (rc) {
+		cpr_err(cpr_vreg, "failed to read qcom,cpr-online-cpu-virtual-corner-quotient-adjustment, rc=%d\n",
+			rc);
+		goto done;
+	}
+
+	cpr_debug(cpr_vreg, "Target quotients based on number of online CPUs:\n");
+	offset = tuple_match * cpr_vreg->num_corners *
+			(cpr_vreg->num_adj_cpus + 1);
+
+	for (i = 0; i <= cpr_vreg->num_adj_cpus; i++) {
+		for (j = CPR_CORNER_MIN; j <= cpr_vreg->num_corners; j++) {
+			k = j - 1 + offset;
+
+			cpr_vreg->adj_cpus_quot_adjust[i][j] =
+					cpr_vreg->quot_adjust[j] - temp[k];
+
+			cpr_debug(cpr_vreg, "cpus=%d, corner=%d, quot=%d\n",
+				i, j,
+				cpr_vreg->cpr_fuse_target_quot[
+							cpr_vreg->corner_map[j]]
+					- cpr_vreg->adj_cpus_quot_adjust[i][j]);
+		}
+		offset += cpr_vreg->num_corners;
+	}
+
+done:
+	kfree(temp);
+	return rc;
+}
+
+static int cpr_init_per_cpu_adjustments(struct cpr_regulator *cpr_vreg,
+		struct device *dev)
+{
+	int rc, i, j;
+
+	if (!of_find_property(dev->of_node,
+		   "qcom,cpr-online-cpu-virtual-corner-init-voltage-adjustment",
+		   NULL)
+	    && !of_find_property(dev->of_node,
+		   "qcom,cpr-online-cpu-virtual-corner-quotient-adjustment",
+		   NULL)) {
+		/* No per-online CPU adjustment needed */
+		return 0;
+	}
+
+	if (!cpr_vreg->num_adj_cpus) {
+		cpr_err(cpr_vreg, "qcom,cpr-cpus property missing\n");
+		return -EINVAL;
+	}
+
+	rc = cpr_parse_adj_cpus_init_voltage(cpr_vreg, dev);
+	if (rc) {
+		cpr_err(cpr_vreg, "cpr_parse_adj_cpus_init_voltage failed: rc =%d\n",
+			rc);
+		return rc;
+	}
+
+	rc = cpr_parse_adj_cpus_target_quot(cpr_vreg, dev);
+	if (rc) {
+		cpr_err(cpr_vreg, "cpr_parse_adj_cpus_target_quot failed: rc =%d\n",
+			rc);
+		return rc;
+	}
+
+	cpr_vreg->adj_cpus_last_volt = devm_kzalloc(dev,
+				sizeof(int *) * (cpr_vreg->num_adj_cpus + 1),
+				GFP_KERNEL);
+	cpr_vreg->adj_cpus_save_ctl = devm_kzalloc(dev,
+				sizeof(int *) * (cpr_vreg->num_adj_cpus + 1),
+				GFP_KERNEL);
+	cpr_vreg->adj_cpus_save_irq = devm_kzalloc(dev,
+				sizeof(int *) * (cpr_vreg->num_adj_cpus + 1),
+				GFP_KERNEL);
+	if (!cpr_vreg->adj_cpus_last_volt || !cpr_vreg->adj_cpus_save_ctl ||
+		!cpr_vreg->adj_cpus_save_irq) {
+		cpr_err(cpr_vreg, "Could not allocate memory\n");
+		return -ENOMEM;
+	}
+
+	cpr_vreg->adj_cpus_last_volt[0] = devm_kzalloc(dev,
+				sizeof(int) * (cpr_vreg->num_adj_cpus + 1)
+				* (cpr_vreg->num_corners + 1),
+				GFP_KERNEL);
+	cpr_vreg->adj_cpus_save_ctl[0] = devm_kzalloc(dev,
+				sizeof(int) * (cpr_vreg->num_adj_cpus + 1)
+				* (cpr_vreg->num_corners + 1),
+				GFP_KERNEL);
+	cpr_vreg->adj_cpus_save_irq[0] = devm_kzalloc(dev,
+				sizeof(int) * (cpr_vreg->num_adj_cpus + 1)
+				* (cpr_vreg->num_corners + 1),
+				GFP_KERNEL);
+	if (!cpr_vreg->adj_cpus_last_volt[0] ||
+		!cpr_vreg->adj_cpus_save_ctl[0] ||
+		!cpr_vreg->adj_cpus_save_irq[0]) {
+		cpr_err(cpr_vreg, "Could not allocate memory\n");
+		return -ENOMEM;
+	}
+	for (i = 1; i <= cpr_vreg->num_adj_cpus; i++) {
+		j = i * (cpr_vreg->num_corners + 1);
+		cpr_vreg->adj_cpus_last_volt[i] =
+			cpr_vreg->adj_cpus_last_volt[0] + j;
+		cpr_vreg->adj_cpus_save_ctl[i] =
+			cpr_vreg->adj_cpus_save_ctl[0] + j;
+		cpr_vreg->adj_cpus_save_irq[i] =
+			cpr_vreg->adj_cpus_save_irq[0] + j;
+	}
+
+
+	for (i = 0; i <= cpr_vreg->num_adj_cpus; i++) {
+		for (j = CPR_CORNER_MIN; j <= cpr_vreg->num_corners; j++) {
+
+			cpr_vreg->adj_cpus_save_ctl[i][j] =
+				cpr_vreg->save_ctl[j];
+			cpr_vreg->adj_cpus_save_irq[i][j] =
+				cpr_vreg->save_irq[j];
+
+			cpr_vreg->adj_cpus_last_volt[i][j]
+				= cpr_vreg->adj_cpus_open_loop_volt
+				? cpr_vreg->adj_cpus_open_loop_volt[i][j]
+					: cpr_vreg->open_loop_volt[j];
+		}
+	}
+
+	cpr_regulator_set_online_cpus(cpr_vreg);
+	cpr_debug(cpr_vreg, "%d cpus online\n", cpr_vreg->online_cpus);
+
+	devm_kfree(dev, cpr_vreg->last_volt);
+	devm_kfree(dev, cpr_vreg->save_ctl);
+	devm_kfree(dev, cpr_vreg->save_irq);
+	if (cpr_vreg->adj_cpus_quot_adjust)
+		devm_kfree(dev, cpr_vreg->quot_adjust);
+	if (cpr_vreg->adj_cpus_open_loop_volt)
+		devm_kfree(dev, cpr_vreg->open_loop_volt);
+	if (cpr_vreg->adj_cpus_open_loop_volt_as_ceiling)
+		devm_kfree(dev, cpr_vreg->ceiling_volt);
+
+	cpr_regulator_switch_adj_cpus(cpr_vreg);
+
+	cpr_vreg->skip_voltage_change_during_suspend
+			= of_property_read_bool(dev->of_node,
+				"qcom,cpr-skip-voltage-change-during-suspend");
+
+	cpr_vreg->cpu_notifier.notifier_call = cpr_regulator_cpu_callback;
+	register_hotcpu_notifier(&cpr_vreg->cpu_notifier);
+
+	return rc;
+}
+
+static int cpr_init_pm_notification(struct cpr_regulator *cpr_vreg)
+{
+	int rc;
+
+	/* enabled only for single-core designs */
+	if (cpr_vreg->num_adj_cpus != 1) {
+		pr_warn("qcom,cpr-cpus not defined or invalid %d\n",
+					cpr_vreg->num_adj_cpus);
+		return 0;
+	}
+
+	cpr_vreg->pm_notifier.notifier_call = cpr_pm_callback;
+	rc = cpu_pm_register_notifier(&cpr_vreg->pm_notifier);
+	if (rc)
+		cpr_err(cpr_vreg, "Unable to register pm notifier rc=%d\n", rc);
+
+	return rc;
+}
+
+static int cpr_rpm_apc_init(struct platform_device *pdev,
+			       struct cpr_regulator *cpr_vreg)
+{
+	int rc, len = 0;
+	struct device_node *of_node = pdev->dev.of_node;
+
+	if (!of_find_property(of_node, "rpm-apc-supply", NULL))
+		return 0;
+
+	cpr_vreg->rpm_apc_vreg = devm_regulator_get(&pdev->dev, "rpm-apc");
+	if (IS_ERR_OR_NULL(cpr_vreg->rpm_apc_vreg)) {
+		rc = PTR_RET(cpr_vreg->rpm_apc_vreg);
+		if (rc != -EPROBE_DEFER)
+			cpr_err(cpr_vreg, "devm_regulator_get: rpm-apc: rc=%d\n",
+					rc);
+		return rc;
+	}
+
+	if (!of_find_property(of_node, "qcom,rpm-apc-corner-map", &len)) {
+		cpr_err(cpr_vreg,
+			"qcom,rpm-apc-corner-map missing:\n");
+		return -EINVAL;
+	}
+	if (len != cpr_vreg->num_corners * sizeof(u32)) {
+		cpr_err(cpr_vreg,
+			"qcom,rpm-apc-corner-map length=%d is invalid: required:%d\n",
+			len, cpr_vreg->num_corners);
+		return -EINVAL;
+	}
+
+	cpr_vreg->rpm_apc_corner_map = devm_kzalloc(&pdev->dev,
+		(cpr_vreg->num_corners + 1) *
+		sizeof(*cpr_vreg->rpm_apc_corner_map), GFP_KERNEL);
+	if (!cpr_vreg->rpm_apc_corner_map) {
+		cpr_err(cpr_vreg, "Can't allocate memory for cpr_vreg->rpm_apc_corner_map\n");
+			return -ENOMEM;
+	}
+
+	rc = of_property_read_u32_array(of_node, "qcom,rpm-apc-corner-map",
+		&cpr_vreg->rpm_apc_corner_map[1], cpr_vreg->num_corners);
+	if (rc)
+		cpr_err(cpr_vreg, "read qcom,rpm-apc-corner-map failed, rc = %d\n",
+				rc);
+
+	return rc;
+}
+
+static int cpr_vsens_init(struct platform_device *pdev,
+			       struct cpr_regulator *cpr_vreg)
+{
+	int rc = 0, len = 0;
+	struct device_node *of_node = pdev->dev.of_node;
+
+	if (of_find_property(of_node, "vdd-vsens-voltage-supply", NULL)) {
+		cpr_vreg->vdd_vsens_voltage = devm_regulator_get(&pdev->dev,
+							"vdd-vsens-voltage");
+		if (IS_ERR_OR_NULL(cpr_vreg->vdd_vsens_voltage)) {
+			rc = PTR_ERR(cpr_vreg->vdd_vsens_voltage);
+			cpr_vreg->vdd_vsens_voltage = NULL;
+			if (rc == -EPROBE_DEFER)
+				return rc;
+			/* device not found */
+			cpr_debug(cpr_vreg, "regulator_get: vdd-vsens-voltage: rc=%d\n",
+					rc);
+			return 0;
+		}
+	}
+
+	if (of_find_property(of_node, "vdd-vsens-corner-supply", NULL)) {
+		cpr_vreg->vdd_vsens_corner = devm_regulator_get(&pdev->dev,
+							"vdd-vsens-corner");
+		if (IS_ERR_OR_NULL(cpr_vreg->vdd_vsens_corner)) {
+			rc = PTR_ERR(cpr_vreg->vdd_vsens_corner);
+			cpr_vreg->vdd_vsens_corner = NULL;
+			if (rc == -EPROBE_DEFER)
+				return rc;
+			/* device not found */
+			cpr_debug(cpr_vreg, "regulator_get: vdd-vsens-corner: rc=%d\n",
+					rc);
+			return 0;
+		}
+
+		if (!of_find_property(of_node, "qcom,vsens-corner-map", &len)) {
+			cpr_err(cpr_vreg, "qcom,vsens-corner-map missing\n");
+			return -EINVAL;
+		}
+
+		if (len != cpr_vreg->num_fuse_corners * sizeof(u32)) {
+			cpr_err(cpr_vreg, "qcom,vsens-corner-map length=%d is invalid: required:%d\n",
+				len, cpr_vreg->num_fuse_corners);
+			return -EINVAL;
+		}
+
+		cpr_vreg->vsens_corner_map = devm_kcalloc(&pdev->dev,
+					(cpr_vreg->num_fuse_corners + 1),
+			sizeof(*cpr_vreg->vsens_corner_map), GFP_KERNEL);
+		if (!cpr_vreg->vsens_corner_map)
+			return -ENOMEM;
+
+		rc = of_property_read_u32_array(of_node,
+					"qcom,vsens-corner-map",
+					&cpr_vreg->vsens_corner_map[1],
+					cpr_vreg->num_fuse_corners);
+		if (rc)
+			cpr_err(cpr_vreg, "read qcom,vsens-corner-map failed, rc = %d\n",
+				rc);
+	}
+
+	return rc;
+}
+
+static int cpr_disable_on_temp(struct cpr_regulator *cpr_vreg, bool disable)
+{
+	int rc = 0;
+
+	mutex_lock(&cpr_vreg->cpr_mutex);
+
+	if (cpr_vreg->cpr_fuse_disable ||
+		(cpr_vreg->cpr_thermal_disable == disable))
+		goto out;
+
+	cpr_vreg->cpr_thermal_disable = disable;
+
+	if (cpr_vreg->enable && cpr_vreg->corner) {
+		if (disable) {
+			cpr_debug(cpr_vreg, "Disabling CPR - below temperature threshold [%d]\n",
+					cpr_vreg->cpr_disable_temp_threshold);
+			/* disable CPR and force open-loop */
+			cpr_ctl_disable(cpr_vreg);
+			rc = cpr_regulator_set_voltage(cpr_vreg->rdev,
+						cpr_vreg->corner, false);
+			if (rc < 0)
+				cpr_err(cpr_vreg, "Failed to set voltage, rc=%d\n",
+						rc);
+		} else {
+			/* enable CPR */
+			cpr_debug(cpr_vreg, "Enabling CPR - above temperature thresold [%d]\n",
+					cpr_vreg->cpr_enable_temp_threshold);
+			rc = cpr_regulator_set_voltage(cpr_vreg->rdev,
+						cpr_vreg->corner, true);
+			if (rc < 0)
+				cpr_err(cpr_vreg, "Failed to set voltage, rc=%d\n",
+						rc);
+		}
+	}
+out:
+	mutex_unlock(&cpr_vreg->cpr_mutex);
+	return rc;
+}
+
+static void tsens_threshold_notify(struct therm_threshold *tsens_cb_data)
+{
+	struct threshold_info *info = tsens_cb_data->parent;
+	struct cpr_regulator *cpr_vreg = container_of(info,
+			struct cpr_regulator, tsens_threshold_config);
+	int rc = 0;
+
+	cpr_debug(cpr_vreg, "Triggered tsens-notification trip_type=%d for thermal_zone_id=%d\n",
+		tsens_cb_data->trip_triggered, tsens_cb_data->sensor_id);
+
+	switch (tsens_cb_data->trip_triggered) {
+	case THERMAL_TRIP_CONFIGURABLE_HI:
+		rc = cpr_disable_on_temp(cpr_vreg, false);
+		if (rc < 0)
+			cpr_err(cpr_vreg, "Failed to enable CPR, rc=%d\n", rc);
+		break;
+	case THERMAL_TRIP_CONFIGURABLE_LOW:
+		rc = cpr_disable_on_temp(cpr_vreg, true);
+		if (rc < 0)
+			cpr_err(cpr_vreg, "Failed to disable CPR, rc=%d\n", rc);
+		break;
+	default:
+		cpr_debug(cpr_vreg, "trip-type %d not supported\n",
+				tsens_cb_data->trip_triggered);
+		break;
+	}
+
+	if (tsens_cb_data->cur_state != tsens_cb_data->trip_triggered) {
+		rc = sensor_mgr_set_threshold(tsens_cb_data->sensor_id,
+						tsens_cb_data->threshold);
+		if (rc < 0)
+			cpr_err(cpr_vreg,
+			"Failed to set temp. threshold, rc=%d\n", rc);
+		else
+			tsens_cb_data->cur_state =
+				tsens_cb_data->trip_triggered;
+	}
+}
+
+static int cpr_check_tsens(struct cpr_regulator *cpr_vreg)
+{
+	int rc = 0;
+	struct tsens_device tsens_dev;
+	unsigned long temp = 0;
+	bool disable;
+
+	if (tsens_is_ready() > 0) {
+		tsens_dev.sensor_num = cpr_vreg->tsens_id;
+		rc = tsens_get_temp(&tsens_dev, &temp);
+		if (rc < 0) {
+			cpr_err(cpr_vreg, "Faled to read tsens, rc=%d\n", rc);
+			return rc;
+		}
+
+		disable = (int) temp <= cpr_vreg->cpr_disable_temp_threshold;
+		rc = cpr_disable_on_temp(cpr_vreg, disable);
+		if (rc)
+			cpr_err(cpr_vreg, "Failed to %s CPR, rc=%d\n",
+					disable ? "disable" : "enable", rc);
+	}
+
+	return rc;
+}
+
+static int cpr_thermal_init(struct cpr_regulator *cpr_vreg)
+{
+	int rc;
+	struct device_node *of_node = cpr_vreg->dev->of_node;
+
+	if (!of_find_property(of_node, "qcom,cpr-thermal-sensor-id", NULL))
+		return 0;
+
+	CPR_PROP_READ_U32(cpr_vreg, of_node, "cpr-thermal-sensor-id",
+			  &cpr_vreg->tsens_id, rc);
+	if (rc < 0)
+		return rc;
+
+	CPR_PROP_READ_U32(cpr_vreg, of_node, "cpr-disable-temp-threshold",
+			  &cpr_vreg->cpr_disable_temp_threshold, rc);
+	if (rc < 0)
+		return rc;
+
+	CPR_PROP_READ_U32(cpr_vreg, of_node, "cpr-enable-temp-threshold",
+			  &cpr_vreg->cpr_enable_temp_threshold, rc);
+	if (rc < 0)
+		return rc;
+
+	if (cpr_vreg->cpr_disable_temp_threshold >=
+				cpr_vreg->cpr_enable_temp_threshold) {
+		cpr_err(cpr_vreg, "Invalid temperature threshold cpr_disable_temp[%d] >= cpr_enable_temp[%d]\n",
+				cpr_vreg->cpr_disable_temp_threshold,
+				cpr_vreg->cpr_enable_temp_threshold);
+		return -EINVAL;
+	}
+
+	cpr_vreg->cpr_disable_on_temperature = true;
+
+	return 0;
+}
+
+static int cpr_init_cpr(struct platform_device *pdev,
+			       struct cpr_regulator *cpr_vreg)
+{
+	struct resource *res;
+	int rc = 0;
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rbcpr_clk");
+	if (res && res->start)
+		cpr_vreg->rbcpr_clk_addr = res->start;
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rbcpr");
+	if (!res || !res->start) {
+		cpr_err(cpr_vreg, "missing rbcpr address: res=%p\n", res);
+		return -EINVAL;
+	}
+	cpr_vreg->rbcpr_base = devm_ioremap(&pdev->dev, res->start,
+					    resource_size(res));
+
+	/* Init CPR configuration parameters */
+	rc = cpr_init_cpr_parameters(pdev, cpr_vreg);
+	if (rc)
+		return rc;
+
+	rc = cpr_init_cpr_efuse(pdev, cpr_vreg);
+	if (rc)
+		return rc;
+
+	/* Load per corner ceiling and floor voltages if they exist. */
+	rc = cpr_init_ceiling_floor_override_voltages(cpr_vreg, &pdev->dev);
+	if (rc)
+		return rc;
+
+	/*
+	 * Limit open loop voltages based upon per corner ceiling and floor
+	 * voltages.
+	 */
+	rc = cpr_limit_open_loop_voltage(cpr_vreg);
+	if (rc)
+		return rc;
+
+	/*
+	 * Fill the OPP table for this device with virtual voltage corner to
+	 * open-loop voltage pairs.
+	 */
+	rc = cpr_populate_opp_table(cpr_vreg, &pdev->dev);
+	if (rc)
+		return rc;
+
+	/* Reduce the ceiling voltage if allowed. */
+	rc = cpr_reduce_ceiling_voltage(cpr_vreg, &pdev->dev);
+	if (rc)
+		return rc;
+
+	/* Load CPR floor to ceiling range if exist. */
+	rc = cpr_init_floor_to_ceiling_range(cpr_vreg, &pdev->dev);
+	if (rc)
+		return rc;
+
+	/* Init all voltage set points of APC regulator for CPR */
+	rc = cpr_init_cpr_voltages(cpr_vreg, &pdev->dev);
+	if (rc)
+		return rc;
+
+	/* Get and Init interrupt */
+	cpr_vreg->cpr_irq = platform_get_irq(pdev, 0);
+	if (!cpr_vreg->cpr_irq) {
+		cpr_err(cpr_vreg, "missing CPR IRQ\n");
+		return -EINVAL;
+	}
+
+	/* Configure CPR HW but keep it disabled */
+	rc = cpr_config(cpr_vreg, &pdev->dev);
+	if (rc)
+		return rc;
+
+	rc = request_threaded_irq(cpr_vreg->cpr_irq, NULL, cpr_irq_handler,
+				  IRQF_ONESHOT | IRQF_TRIGGER_RISING, "cpr",
+				  cpr_vreg);
+	if (rc) {
+		cpr_err(cpr_vreg, "CPR: request irq failed for IRQ %d\n",
+				cpr_vreg->cpr_irq);
+		return rc;
+	}
+
+	return 0;
+}
+
+/*
+ * Create a set of virtual fuse rows if optional device tree properties are
+ * present.
+ */
+static int cpr_remap_efuse_data(struct platform_device *pdev,
+				 struct cpr_regulator *cpr_vreg)
+{
+	struct device_node *of_node = pdev->dev.of_node;
+	struct property *prop;
+	u64 fuse_param;
+	u32 *temp;
+	int size, rc, i, bits, in_row, in_bit, out_row, out_bit;
+
+	prop = of_find_property(of_node, "qcom,fuse-remap-source", NULL);
+	if (!prop) {
+		/* No fuse remapping needed. */
+		return 0;
+	}
+
+	size = prop->length / sizeof(u32);
+	if (size == 0 || size % 4) {
+		cpr_err(cpr_vreg, "qcom,fuse-remap-source has invalid size=%d\n",
+			size);
+		return -EINVAL;
+	}
+	size /= 4;
+
+	rc = of_property_read_u32(of_node, "qcom,fuse-remap-base-row",
+				&cpr_vreg->remapped_row_base);
+	if (rc) {
+		cpr_err(cpr_vreg, "could not read qcom,fuse-remap-base-row, rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	temp = kzalloc(sizeof(*temp) * size * 4, GFP_KERNEL);
+	if (!temp) {
+		cpr_err(cpr_vreg, "temp memory allocation failed\n");
+		return -ENOMEM;
+	}
+
+	rc = of_property_read_u32_array(of_node, "qcom,fuse-remap-source", temp,
+					size * 4);
+	if (rc) {
+		cpr_err(cpr_vreg, "could not read qcom,fuse-remap-source, rc=%d\n",
+			rc);
+		goto done;
+	}
+
+	/*
+	 * Format of tuples in qcom,fuse-remap-source property:
+	 * <row bit-offset bit-count fuse-read-method>
+	 */
+	for (i = 0, bits = 0; i < size; i++)
+		bits += temp[i * 4 + 2];
+
+	cpr_vreg->num_remapped_rows = DIV_ROUND_UP(bits, 64);
+	cpr_vreg->remapped_row = devm_kzalloc(&pdev->dev,
+		sizeof(*cpr_vreg->remapped_row) * cpr_vreg->num_remapped_rows,
+		GFP_KERNEL);
+	if (!cpr_vreg->remapped_row) {
+		cpr_err(cpr_vreg, "remapped_row memory allocation failed\n");
+		rc = -ENOMEM;
+		goto done;
+	}
+
+	for (i = 0, out_row = 0, out_bit = 0; i < size; i++) {
+		in_row = temp[i * 4];
+		in_bit = temp[i * 4 + 1];
+		bits = temp[i * 4 + 2];
+
+		while (bits > 64) {
+			fuse_param = cpr_read_efuse_param(cpr_vreg, in_row,
+					in_bit, 64, temp[i * 4 + 3]);
+
+			cpr_vreg->remapped_row[out_row++]
+				|= fuse_param << out_bit;
+			if (out_bit > 0)
+				cpr_vreg->remapped_row[out_row]
+					|= fuse_param >> (64 - out_bit);
+
+			bits -= 64;
+			in_bit += 64;
+		}
+
+		fuse_param = cpr_read_efuse_param(cpr_vreg, in_row, in_bit,
+						bits, temp[i * 4 + 3]);
+
+		cpr_vreg->remapped_row[out_row] |= fuse_param << out_bit;
+		if (bits < 64 - out_bit) {
+			out_bit += bits;
+		} else {
+			out_row++;
+			if (out_bit > 0)
+				cpr_vreg->remapped_row[out_row]
+					|= fuse_param >> (64 - out_bit);
+			out_bit = bits - (64 - out_bit);
+		}
+	}
+
+done:
+	kfree(temp);
+	return rc;
+}
+
+static int cpr_efuse_init(struct platform_device *pdev,
+				 struct cpr_regulator *cpr_vreg)
+{
+	struct resource *res;
+	int len;
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "efuse_addr");
+	if (!res || !res->start) {
+		cpr_err(cpr_vreg, "efuse_addr missing: res=%p\n", res);
+		return -EINVAL;
+	}
+
+	cpr_vreg->efuse_addr = res->start;
+	len = res->end - res->start + 1;
+
+	cpr_info(cpr_vreg, "efuse_addr = %pa (len=0x%x)\n", &res->start, len);
+
+	cpr_vreg->efuse_base = ioremap(cpr_vreg->efuse_addr, len);
+	if (!cpr_vreg->efuse_base) {
+		cpr_err(cpr_vreg, "Unable to map efuse_addr %pa\n",
+				&cpr_vreg->efuse_addr);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static void cpr_efuse_free(struct cpr_regulator *cpr_vreg)
+{
+	iounmap(cpr_vreg->efuse_base);
+}
+
+static void cpr_parse_cond_min_volt_fuse(struct cpr_regulator *cpr_vreg,
+						struct device_node *of_node)
+{
+	int rc;
+	u32 fuse_sel[5];
+	/*
+	 * Restrict all pvs corner voltages to a minimum value of
+	 * qcom,cpr-cond-min-voltage if the fuse defined in
+	 * qcom,cpr-fuse-cond-min-volt-sel does not read back with
+	 * the expected value.
+	 */
+	rc = of_property_read_u32_array(of_node,
+			"qcom,cpr-fuse-cond-min-volt-sel", fuse_sel, 5);
+	if (!rc) {
+		if (!cpr_fuse_is_setting_expected(cpr_vreg, fuse_sel))
+			cpr_vreg->flags |= FLAGS_SET_MIN_VOLTAGE;
+	}
+}
+
+static void cpr_parse_speed_bin_fuse(struct cpr_regulator *cpr_vreg,
+				struct device_node *of_node)
+{
+	int rc;
+	u64 fuse_bits;
+	u32 fuse_sel[4];
+	u32 speed_bits;
+
+	rc = of_property_read_u32_array(of_node,
+			"qcom,speed-bin-fuse-sel", fuse_sel, 4);
+
+	if (!rc) {
+		fuse_bits = cpr_read_efuse_row(cpr_vreg,
+				fuse_sel[0], fuse_sel[3]);
+		speed_bits = (fuse_bits >> fuse_sel[1]) &
+			((1 << fuse_sel[2]) - 1);
+		cpr_info(cpr_vreg, "[row: %d]: 0x%llx, speed_bits = %d\n",
+				fuse_sel[0], fuse_bits, speed_bits);
+		cpr_vreg->speed_bin = speed_bits;
+	} else {
+		cpr_vreg->speed_bin = SPEED_BIN_NONE;
+	}
+}
+
+static int cpr_voltage_uplift_enable_check(struct cpr_regulator *cpr_vreg,
+					struct device_node *of_node)
+{
+	int rc;
+	u32 fuse_sel[5];
+	u32 uplift_speed_bin;
+
+	rc = of_property_read_u32_array(of_node,
+			"qcom,cpr-fuse-uplift-sel", fuse_sel, 5);
+	if (!rc) {
+		rc = of_property_read_u32(of_node,
+				"qcom,cpr-uplift-speed-bin",
+				&uplift_speed_bin);
+		if (rc < 0) {
+			cpr_err(cpr_vreg,
+				"qcom,cpr-uplift-speed-bin missing\n");
+			return rc;
+		}
+		if (cpr_fuse_is_setting_expected(cpr_vreg, fuse_sel)
+			&& (uplift_speed_bin == cpr_vreg->speed_bin)
+			&& !(cpr_vreg->flags & FLAGS_SET_MIN_VOLTAGE)) {
+			cpr_vreg->flags |= FLAGS_UPLIFT_QUOT_VOLT;
+		}
+	}
+	return 0;
+}
+
+/*
+ * Read in the number of fuse corners and then allocate memory for arrays that
+ * are sized based upon the number of fuse corners.
+ */
+static int cpr_fuse_corner_array_alloc(struct device *dev,
+					struct cpr_regulator *cpr_vreg)
+{
+	int rc;
+	size_t len;
+
+	rc = of_property_read_u32(dev->of_node, "qcom,cpr-fuse-corners",
+				&cpr_vreg->num_fuse_corners);
+	if (rc < 0) {
+		cpr_err(cpr_vreg, "qcom,cpr-fuse-corners missing: rc=%d\n", rc);
+		return rc;
+	}
+
+	if (cpr_vreg->num_fuse_corners < CPR_FUSE_CORNER_MIN
+	    || cpr_vreg->num_fuse_corners > CPR_FUSE_CORNER_LIMIT) {
+		cpr_err(cpr_vreg, "corner count=%d is invalid\n",
+			cpr_vreg->num_fuse_corners);
+		return -EINVAL;
+	}
+
+	/*
+	 * The arrays sized based on the fuse corner count ignore element 0
+	 * in order to simplify indexing throughout the driver since min_uV = 0
+	 * cannot be passed into a set_voltage() callback.
+	 */
+	len = cpr_vreg->num_fuse_corners + 1;
+
+	cpr_vreg->pvs_corner_v = devm_kzalloc(dev,
+			len * sizeof(*cpr_vreg->pvs_corner_v), GFP_KERNEL);
+	cpr_vreg->cpr_fuse_target_quot = devm_kzalloc(dev,
+		len * sizeof(*cpr_vreg->cpr_fuse_target_quot), GFP_KERNEL);
+	cpr_vreg->cpr_fuse_ro_sel = devm_kzalloc(dev,
+			len * sizeof(*cpr_vreg->cpr_fuse_ro_sel), GFP_KERNEL);
+	cpr_vreg->fuse_ceiling_volt = devm_kzalloc(dev,
+		len * (sizeof(*cpr_vreg->fuse_ceiling_volt)), GFP_KERNEL);
+	cpr_vreg->fuse_floor_volt = devm_kzalloc(dev,
+		len * (sizeof(*cpr_vreg->fuse_floor_volt)), GFP_KERNEL);
+	cpr_vreg->step_quotient = devm_kzalloc(dev,
+		len * sizeof(*cpr_vreg->step_quotient), GFP_KERNEL);
+
+	if (cpr_vreg->pvs_corner_v == NULL || cpr_vreg->cpr_fuse_ro_sel == NULL
+	    || cpr_vreg->fuse_ceiling_volt == NULL
+	    || cpr_vreg->fuse_floor_volt == NULL
+	    || cpr_vreg->cpr_fuse_target_quot == NULL
+	    || cpr_vreg->step_quotient == NULL) {
+		cpr_err(cpr_vreg, "Could not allocate memory for CPR arrays\n");
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+static int cpr_voltage_plan_init(struct platform_device *pdev,
+					struct cpr_regulator *cpr_vreg)
+{
+	struct device_node *of_node = pdev->dev.of_node;
+	int rc, i;
+	u32 min_uv = 0;
+
+	rc = of_property_read_u32_array(of_node, "qcom,cpr-voltage-ceiling",
+		&cpr_vreg->fuse_ceiling_volt[CPR_FUSE_CORNER_MIN],
+		cpr_vreg->num_fuse_corners);
+	if (rc < 0) {
+		cpr_err(cpr_vreg, "cpr-voltage-ceiling missing: rc=%d\n", rc);
+		return rc;
+	}
+
+	rc = of_property_read_u32_array(of_node, "qcom,cpr-voltage-floor",
+		&cpr_vreg->fuse_floor_volt[CPR_FUSE_CORNER_MIN],
+		cpr_vreg->num_fuse_corners);
+	if (rc < 0) {
+		cpr_err(cpr_vreg, "cpr-voltage-floor missing: rc=%d\n", rc);
+		return rc;
+	}
+
+	cpr_parse_cond_min_volt_fuse(cpr_vreg, of_node);
+	rc = cpr_voltage_uplift_enable_check(cpr_vreg, of_node);
+	if (rc < 0) {
+		cpr_err(cpr_vreg, "voltage uplift enable check failed, %d\n",
+			rc);
+		return rc;
+	}
+	if (cpr_vreg->flags & FLAGS_SET_MIN_VOLTAGE) {
+		of_property_read_u32(of_node, "qcom,cpr-cond-min-voltage",
+					&min_uv);
+		for (i = CPR_FUSE_CORNER_MIN; i <= cpr_vreg->num_fuse_corners;
+		     i++)
+			if (cpr_vreg->fuse_ceiling_volt[i] < min_uv) {
+				cpr_vreg->fuse_ceiling_volt[i] = min_uv;
+				cpr_vreg->fuse_floor_volt[i] = min_uv;
+			} else if (cpr_vreg->fuse_floor_volt[i] < min_uv) {
+				cpr_vreg->fuse_floor_volt[i] = min_uv;
+			}
+	}
+
+	return 0;
+}
+
+static int cpr_mem_acc_init(struct platform_device *pdev,
+				struct cpr_regulator *cpr_vreg)
+{
+	int rc, size;
+	struct property *prop;
+	char *corner_map_str;
+
+	if (of_find_property(pdev->dev.of_node, "mem-acc-supply", NULL)) {
+		cpr_vreg->mem_acc_vreg = devm_regulator_get(&pdev->dev,
+							"mem-acc");
+		if (IS_ERR_OR_NULL(cpr_vreg->mem_acc_vreg)) {
+			rc = PTR_RET(cpr_vreg->mem_acc_vreg);
+			if (rc != -EPROBE_DEFER)
+				cpr_err(cpr_vreg,
+					"devm_regulator_get: mem-acc: rc=%d\n",
+					rc);
+			return rc;
+		}
+	}
+
+	corner_map_str = "qcom,mem-acc-corner-map";
+	prop = of_find_property(pdev->dev.of_node, corner_map_str, NULL);
+	if (!prop) {
+		corner_map_str = "qcom,cpr-corner-map";
+		prop = of_find_property(pdev->dev.of_node, corner_map_str,
+					NULL);
+		if (!prop) {
+			cpr_err(cpr_vreg, "qcom,cpr-corner-map missing\n");
+			return -EINVAL;
+		}
+	}
+
+	size = prop->length / sizeof(u32);
+	cpr_vreg->mem_acc_corner_map = devm_kzalloc(&pdev->dev,
+					sizeof(int) * (size + 1),
+					GFP_KERNEL);
+
+	rc = of_property_read_u32_array(pdev->dev.of_node, corner_map_str,
+			&cpr_vreg->mem_acc_corner_map[CPR_FUSE_CORNER_MIN],
+			size);
+	if (rc) {
+		cpr_err(cpr_vreg, "%s missing, rc = %d\n", corner_map_str, rc);
+		return rc;
+	}
+
+	return 0;
+}
+
+#if defined(CONFIG_DEBUG_FS)
+
+static int cpr_enable_set(void *data, u64 val)
+{
+	struct cpr_regulator *cpr_vreg = data;
+	bool old_cpr_enable;
+
+	mutex_lock(&cpr_vreg->cpr_mutex);
+
+	old_cpr_enable = cpr_vreg->enable;
+	cpr_vreg->enable = val;
+
+	if (old_cpr_enable == cpr_vreg->enable)
+		goto _exit;
+
+	if (cpr_vreg->enable && cpr_vreg->cpr_fuse_disable) {
+		cpr_info(cpr_vreg,
+			"CPR permanently disabled due to fuse values\n");
+		cpr_vreg->enable = false;
+		goto _exit;
+	}
+
+	cpr_debug(cpr_vreg, "%s CPR [corner=%d, fuse_corner=%d]\n",
+		cpr_vreg->enable ? "enabling" : "disabling",
+		cpr_vreg->corner, cpr_vreg->corner_map[cpr_vreg->corner]);
+
+	if (cpr_vreg->corner) {
+		if (cpr_vreg->enable) {
+			cpr_ctl_disable(cpr_vreg);
+			cpr_irq_clr(cpr_vreg);
+			cpr_corner_restore(cpr_vreg, cpr_vreg->corner);
+			cpr_ctl_enable(cpr_vreg, cpr_vreg->corner);
+		} else {
+			cpr_ctl_disable(cpr_vreg);
+			cpr_irq_set(cpr_vreg, 0);
+		}
+	}
+
+_exit:
+	mutex_unlock(&cpr_vreg->cpr_mutex);
+
+	return 0;
+}
+
+static int cpr_enable_get(void *data, u64 *val)
+{
+	struct cpr_regulator *cpr_vreg = data;
+
+	*val = cpr_vreg->enable;
+
+	return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(cpr_enable_fops, cpr_enable_get, cpr_enable_set,
+			"%llu\n");
+
+static int cpr_get_cpr_ceiling(void *data, u64 *val)
+{
+	struct cpr_regulator *cpr_vreg = data;
+
+	*val = cpr_vreg->ceiling_volt[cpr_vreg->corner];
+
+	return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(cpr_ceiling_fops, cpr_get_cpr_ceiling, NULL,
+			"%llu\n");
+
+static int cpr_get_cpr_floor(void *data, u64 *val)
+{
+	struct cpr_regulator *cpr_vreg = data;
+
+	*val = cpr_vreg->floor_volt[cpr_vreg->corner];
+
+	return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(cpr_floor_fops, cpr_get_cpr_floor, NULL,
+			"%llu\n");
+
+static int cpr_get_cpr_max_ceiling(void *data, u64 *val)
+{
+	struct cpr_regulator *cpr_vreg = data;
+
+	*val = cpr_vreg->cpr_max_ceiling[cpr_vreg->corner];
+
+	return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(cpr_max_ceiling_fops, cpr_get_cpr_max_ceiling, NULL,
+			"%llu\n");
+
+static int cpr_debug_info_open(struct inode *inode, struct file *file)
+{
+	file->private_data = inode->i_private;
+
+	return 0;
+}
+
+static ssize_t cpr_debug_info_read(struct file *file, char __user *buff,
+				size_t count, loff_t *ppos)
+{
+	struct cpr_regulator *cpr_vreg = file->private_data;
+	char *debugfs_buf;
+	ssize_t len, ret = 0;
+	u32 gcnt, ro_sel, ctl, irq_status, reg, error_steps;
+	u32 step_dn, step_up, error, error_lt0, busy;
+	int fuse_corner;
+
+	debugfs_buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
+	if (!debugfs_buf)
+		return -ENOMEM;
+
+	mutex_lock(&cpr_vreg->cpr_mutex);
+
+	fuse_corner = cpr_vreg->corner_map[cpr_vreg->corner];
+
+	len = snprintf(debugfs_buf + ret, PAGE_SIZE - ret,
+		"corner = %d, current_volt = %d uV\n",
+		cpr_vreg->corner, cpr_vreg->last_volt[cpr_vreg->corner]);
+	ret += len;
+
+	len = snprintf(debugfs_buf + ret, PAGE_SIZE - ret,
+			"fuse_corner = %d, current_volt = %d uV\n",
+			fuse_corner, cpr_vreg->last_volt[cpr_vreg->corner]);
+	ret += len;
+
+	ro_sel = cpr_vreg->cpr_fuse_ro_sel[fuse_corner];
+	gcnt = cpr_read(cpr_vreg, REG_RBCPR_GCNT_TARGET(ro_sel));
+	len = snprintf(debugfs_buf + ret, PAGE_SIZE - ret,
+			"rbcpr_gcnt_target (%u) = 0x%02X\n", ro_sel, gcnt);
+	ret += len;
+
+	ctl = cpr_read(cpr_vreg, REG_RBCPR_CTL);
+	len = snprintf(debugfs_buf + ret, PAGE_SIZE - ret,
+			"rbcpr_ctl = 0x%02X\n", ctl);
+	ret += len;
+
+	irq_status = cpr_read(cpr_vreg, REG_RBIF_IRQ_STATUS);
+	len = snprintf(debugfs_buf + ret, PAGE_SIZE - ret,
+			"rbcpr_irq_status = 0x%02X\n", irq_status);
+	ret += len;
+
+	reg = cpr_read(cpr_vreg, REG_RBCPR_RESULT_0);
+	len = snprintf(debugfs_buf + ret, PAGE_SIZE - ret,
+			"rbcpr_result_0 = 0x%02X\n", reg);
+	ret += len;
+
+	step_dn = reg & 0x01;
+	step_up = (reg >> RBCPR_RESULT0_STEP_UP_SHIFT) & 0x01;
+	len = snprintf(debugfs_buf + ret, PAGE_SIZE - ret,
+			"  [step_dn = %u", step_dn);
+	ret += len;
+
+	len = snprintf(debugfs_buf + ret, PAGE_SIZE - ret,
+			", step_up = %u", step_up);
+	ret += len;
+
+	error_steps = (reg >> RBCPR_RESULT0_ERROR_STEPS_SHIFT)
+				& RBCPR_RESULT0_ERROR_STEPS_MASK;
+	len = snprintf(debugfs_buf + ret, PAGE_SIZE - ret,
+			", error_steps = %u", error_steps);
+	ret += len;
+
+	error = (reg >> RBCPR_RESULT0_ERROR_SHIFT) & RBCPR_RESULT0_ERROR_MASK;
+	len = snprintf(debugfs_buf + ret, PAGE_SIZE - ret,
+			", error = %u", error);
+	ret += len;
+
+	error_lt0 = (reg >> RBCPR_RESULT0_ERROR_LT0_SHIFT) & 0x01;
+	len = snprintf(debugfs_buf + ret, PAGE_SIZE - ret,
+			", error_lt_0 = %u", error_lt0);
+	ret += len;
+
+	busy = (reg >> RBCPR_RESULT0_BUSY_SHIFT) & 0x01;
+	len = snprintf(debugfs_buf + ret, PAGE_SIZE - ret,
+			", busy = %u]\n", busy);
+	ret += len;
+	mutex_unlock(&cpr_vreg->cpr_mutex);
+
+	ret = simple_read_from_buffer(buff, count, ppos, debugfs_buf, ret);
+	kfree(debugfs_buf);
+	return ret;
+}
+
+static const struct file_operations cpr_debug_info_fops = {
+	.open = cpr_debug_info_open,
+	.read = cpr_debug_info_read,
+};
+
+static int cpr_aging_debug_info_open(struct inode *inode, struct file *file)
+{
+	file->private_data = inode->i_private;
+
+	return 0;
+}
+
+static ssize_t cpr_aging_debug_info_read(struct file *file, char __user *buff,
+				size_t count, loff_t *ppos)
+{
+	struct cpr_regulator *cpr_vreg = file->private_data;
+	struct cpr_aging_info *aging_info = cpr_vreg->aging_info;
+	char *debugfs_buf;
+	ssize_t len, ret = 0;
+	int i;
+
+	debugfs_buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
+	if (!debugfs_buf)
+		return -ENOMEM;
+
+	mutex_lock(&cpr_vreg->cpr_mutex);
+
+	len = snprintf(debugfs_buf + ret, PAGE_SIZE - ret,
+			"aging_adj_volt = [");
+	ret += len;
+
+	for (i = CPR_FUSE_CORNER_MIN; i <= cpr_vreg->num_fuse_corners; i++) {
+		len = snprintf(debugfs_buf + ret, PAGE_SIZE - ret,
+				" %d", aging_info->voltage_adjust[i]);
+		ret += len;
+	}
+
+	len = snprintf(debugfs_buf + ret, PAGE_SIZE - ret,
+			" ]uV\n");
+	ret += len;
+
+	len = snprintf(debugfs_buf + ret, PAGE_SIZE - ret,
+			"aging_measurement_done = %s\n",
+			aging_info->cpr_aging_done ? "true" : "false");
+	ret += len;
+
+	len = snprintf(debugfs_buf + ret, PAGE_SIZE - ret,
+			"aging_measurement_error = %s\n",
+			aging_info->cpr_aging_error ? "true" : "false");
+	ret += len;
+
+	mutex_unlock(&cpr_vreg->cpr_mutex);
+
+	ret = simple_read_from_buffer(buff, count, ppos, debugfs_buf, ret);
+	kfree(debugfs_buf);
+	return ret;
+}
+
+static const struct file_operations cpr_aging_debug_info_fops = {
+	.open = cpr_aging_debug_info_open,
+	.read = cpr_aging_debug_info_read,
+};
+
+static void cpr_debugfs_init(struct cpr_regulator *cpr_vreg)
+{
+	struct dentry *temp;
+
+	if (IS_ERR_OR_NULL(cpr_debugfs_base)) {
+		cpr_err(cpr_vreg, "Could not create debugfs nodes since base directory is missing\n");
+		return;
+	}
+
+	cpr_vreg->debugfs = debugfs_create_dir(cpr_vreg->rdesc.name,
+						cpr_debugfs_base);
+	if (IS_ERR_OR_NULL(cpr_vreg->debugfs)) {
+		cpr_err(cpr_vreg, "debugfs directory creation failed\n");
+		return;
+	}
+
+	temp = debugfs_create_file("debug_info", S_IRUGO, cpr_vreg->debugfs,
+					cpr_vreg, &cpr_debug_info_fops);
+	if (IS_ERR_OR_NULL(temp)) {
+		cpr_err(cpr_vreg, "debug_info node creation failed\n");
+		return;
+	}
+
+	temp = debugfs_create_file("cpr_enable", S_IRUGO | S_IWUSR,
+			cpr_vreg->debugfs, cpr_vreg, &cpr_enable_fops);
+	if (IS_ERR_OR_NULL(temp)) {
+		cpr_err(cpr_vreg, "cpr_enable node creation failed\n");
+		return;
+	}
+
+	temp = debugfs_create_file("cpr_ceiling", S_IRUGO,
+			cpr_vreg->debugfs, cpr_vreg, &cpr_ceiling_fops);
+	if (IS_ERR_OR_NULL(temp)) {
+		cpr_err(cpr_vreg, "cpr_ceiling node creation failed\n");
+		return;
+	}
+
+	temp = debugfs_create_file("cpr_floor", S_IRUGO,
+			cpr_vreg->debugfs, cpr_vreg, &cpr_floor_fops);
+	if (IS_ERR_OR_NULL(temp)) {
+		cpr_err(cpr_vreg, "cpr_floor node creation failed\n");
+		return;
+	}
+
+	temp = debugfs_create_file("cpr_max_ceiling", S_IRUGO,
+			cpr_vreg->debugfs, cpr_vreg, &cpr_max_ceiling_fops);
+	if (IS_ERR_OR_NULL(temp)) {
+		cpr_err(cpr_vreg, "cpr_max_ceiling node creation failed\n");
+		return;
+	}
+
+	if (cpr_vreg->aging_info) {
+		temp = debugfs_create_file("aging_debug_info", S_IRUGO,
+					cpr_vreg->debugfs, cpr_vreg,
+					&cpr_aging_debug_info_fops);
+		if (IS_ERR_OR_NULL(temp)) {
+			cpr_err(cpr_vreg, "aging_debug_info node creation failed\n");
+			return;
+		}
+	}
+}
+
+static void cpr_debugfs_remove(struct cpr_regulator *cpr_vreg)
+{
+	debugfs_remove_recursive(cpr_vreg->debugfs);
+}
+
+static void cpr_debugfs_base_init(void)
+{
+	cpr_debugfs_base = debugfs_create_dir("cpr-regulator", NULL);
+	if (IS_ERR_OR_NULL(cpr_debugfs_base))
+		pr_err("cpr-regulator debugfs base directory creation failed\n");
+}
+
+static void cpr_debugfs_base_remove(void)
+{
+	debugfs_remove_recursive(cpr_debugfs_base);
+}
+
+#else
+
+static void cpr_debugfs_init(struct cpr_regulator *cpr_vreg)
+{}
+
+static void cpr_debugfs_remove(struct cpr_regulator *cpr_vreg)
+{}
+
+static void cpr_debugfs_base_init(void)
+{}
+
+static void cpr_debugfs_base_remove(void)
+{}
+
+#endif
+
+/**
+ * cpr_panic_callback() - panic notification callback function. This function
+ *		is invoked when a kernel panic occurs.
+ * @nfb:	Notifier block pointer of CPR regulator
+ * @event:	Value passed unmodified to notifier function
+ * @data:	Pointer passed unmodified to notifier function
+ *
+ * Return: NOTIFY_OK
+ */
+static int cpr_panic_callback(struct notifier_block *nfb,
+			unsigned long event, void *data)
+{
+	struct cpr_regulator *cpr_vreg = container_of(nfb,
+				struct cpr_regulator, panic_notifier);
+	int corner, fuse_corner, volt;
+
+	corner = cpr_vreg->corner;
+	fuse_corner = cpr_vreg->corner_map[corner];
+	if (cpr_is_allowed(cpr_vreg))
+		volt = cpr_vreg->last_volt[corner];
+	else
+		volt = cpr_vreg->open_loop_volt[corner];
+
+	cpr_err(cpr_vreg, "[corner:%d, fuse_corner:%d] = %d uV\n",
+		corner, fuse_corner, volt);
+
+	return NOTIFY_OK;
+}
+
+static int cpr_regulator_probe(struct platform_device *pdev)
+{
+	struct regulator_config reg_config = {};
+	struct cpr_regulator *cpr_vreg;
+	struct regulator_desc *rdesc;
+	struct device *dev = &pdev->dev;
+	struct regulator_init_data *init_data = pdev->dev.platform_data;
+	int rc;
+
+	if (!pdev->dev.of_node) {
+		dev_err(dev, "Device tree node is missing\n");
+		return -EINVAL;
+	}
+
+	init_data = of_get_regulator_init_data(&pdev->dev, pdev->dev.of_node);
+	if (!init_data) {
+		dev_err(dev, "regulator init data is missing\n");
+		return -EINVAL;
+	} else {
+		init_data->constraints.input_uV
+			= init_data->constraints.max_uV;
+		init_data->constraints.valid_ops_mask
+			|= REGULATOR_CHANGE_VOLTAGE | REGULATOR_CHANGE_STATUS;
+	}
+
+	cpr_vreg = devm_kzalloc(&pdev->dev, sizeof(struct cpr_regulator),
+				GFP_KERNEL);
+	if (!cpr_vreg) {
+		dev_err(dev, "Can't allocate cpr_regulator memory\n");
+		return -ENOMEM;
+	}
+
+	cpr_vreg->dev = &pdev->dev;
+	cpr_vreg->rdesc.name = init_data->constraints.name;
+	if (cpr_vreg->rdesc.name == NULL) {
+		dev_err(dev, "regulator-name missing\n");
+		return -EINVAL;
+	}
+
+	rc = cpr_fuse_corner_array_alloc(&pdev->dev, cpr_vreg);
+	if (rc)
+		return rc;
+
+	rc = cpr_mem_acc_init(pdev, cpr_vreg);
+	if (rc) {
+		cpr_err(cpr_vreg, "mem_acc intialization error rc=%d\n", rc);
+		return rc;
+	}
+
+	rc = cpr_efuse_init(pdev, cpr_vreg);
+	if (rc) {
+		cpr_err(cpr_vreg, "Wrong eFuse address specified: rc=%d\n", rc);
+		return rc;
+	}
+
+	rc = cpr_remap_efuse_data(pdev, cpr_vreg);
+	if (rc) {
+		cpr_err(cpr_vreg, "Could not remap fuse data: rc=%d\n", rc);
+		return rc;
+	}
+
+	rc = cpr_check_redundant(pdev, cpr_vreg);
+	if (rc) {
+		cpr_err(cpr_vreg, "Could not check redundant fuse: rc=%d\n",
+			rc);
+		goto err_out;
+	}
+
+	rc = cpr_read_fuse_revision(pdev, cpr_vreg);
+	if (rc) {
+		cpr_err(cpr_vreg, "Could not read fuse revision: rc=%d\n", rc);
+		goto err_out;
+	}
+
+	cpr_parse_speed_bin_fuse(cpr_vreg, dev->of_node);
+	cpr_parse_pvs_version_fuse(cpr_vreg, dev->of_node);
+
+	rc = cpr_read_ro_select(pdev, cpr_vreg);
+	if (rc) {
+		cpr_err(cpr_vreg, "Could not read RO select: rc=%d\n", rc);
+		goto err_out;
+	}
+
+	rc = cpr_find_fuse_map_match(pdev, cpr_vreg);
+	if (rc) {
+		cpr_err(cpr_vreg, "Could not determine fuse mapping match: rc=%d\n",
+			rc);
+		goto err_out;
+	}
+
+	rc = cpr_voltage_plan_init(pdev, cpr_vreg);
+	if (rc) {
+		cpr_err(cpr_vreg, "Wrong DT parameter specified: rc=%d\n", rc);
+		goto err_out;
+	}
+
+	rc = cpr_pvs_init(pdev, cpr_vreg);
+	if (rc) {
+		cpr_err(cpr_vreg, "Initialize PVS wrong: rc=%d\n", rc);
+		goto err_out;
+	}
+
+	rc = cpr_vsens_init(pdev, cpr_vreg);
+	if (rc) {
+		cpr_err(cpr_vreg, "Initialize vsens configuration failed rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	rc = cpr_apc_init(pdev, cpr_vreg);
+	if (rc) {
+		if (rc != -EPROBE_DEFER)
+			cpr_err(cpr_vreg, "Initialize APC wrong: rc=%d\n", rc);
+		goto err_out;
+	}
+
+	rc = cpr_init_cpr(pdev, cpr_vreg);
+	if (rc) {
+		cpr_err(cpr_vreg, "Initialize CPR failed: rc=%d\n", rc);
+		goto err_out;
+	}
+
+	rc = cpr_rpm_apc_init(pdev, cpr_vreg);
+	if (rc) {
+		cpr_err(cpr_vreg, "Initialize RPM APC regulator failed rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	rc = cpr_thermal_init(cpr_vreg);
+	if (rc) {
+		cpr_err(cpr_vreg, "Thermal intialization failed rc=%d\n", rc);
+		return rc;
+	}
+
+	if (of_property_read_bool(pdev->dev.of_node,
+				"qcom,disable-closed-loop-in-pc")) {
+		rc = cpr_init_pm_notification(cpr_vreg);
+		if (rc) {
+			cpr_err(cpr_vreg,
+				"cpr_init_pm_notification failed rc=%d\n", rc);
+			return rc;
+		}
+	}
+
+	/* Load per-online CPU adjustment data */
+	rc = cpr_init_per_cpu_adjustments(cpr_vreg, &pdev->dev);
+	if (rc) {
+		cpr_err(cpr_vreg, "cpr_init_per_cpu_adjustments failed: rc=%d\n",
+			rc);
+		goto err_out;
+	}
+
+	/* Parse dependency parameters */
+	if (cpr_vreg->vdd_mx) {
+		rc = cpr_parse_vdd_mx_parameters(pdev, cpr_vreg);
+		if (rc) {
+			cpr_err(cpr_vreg, "parsing vdd_mx parameters failed: rc=%d\n",
+				rc);
+			goto err_out;
+		}
+	}
+
+	cpr_efuse_free(cpr_vreg);
+
+	/*
+	 * Ensure that enable state accurately reflects the case in which CPR
+	 * is permanently disabled.
+	 */
+	cpr_vreg->enable &= !cpr_vreg->cpr_fuse_disable;
+
+	mutex_init(&cpr_vreg->cpr_mutex);
+
+	rdesc			= &cpr_vreg->rdesc;
+	rdesc->owner		= THIS_MODULE;
+	rdesc->type		= REGULATOR_VOLTAGE;
+	rdesc->ops		= &cpr_corner_ops;
+
+	reg_config.dev = &pdev->dev;
+	reg_config.init_data = init_data;
+	reg_config.driver_data = cpr_vreg;
+	reg_config.of_node = pdev->dev.of_node;
+	cpr_vreg->rdev = regulator_register(rdesc, &reg_config);
+	if (IS_ERR(cpr_vreg->rdev)) {
+		rc = PTR_ERR(cpr_vreg->rdev);
+		cpr_err(cpr_vreg, "regulator_register failed: rc=%d\n", rc);
+
+		cpr_apc_exit(cpr_vreg);
+		return rc;
+	}
+
+	platform_set_drvdata(pdev, cpr_vreg);
+	cpr_debugfs_init(cpr_vreg);
+
+	if (cpr_vreg->cpr_disable_on_temperature) {
+		rc = cpr_check_tsens(cpr_vreg);
+		if (rc < 0) {
+			cpr_err(cpr_vreg, "Unable to config CPR on tsens, rc=%d\n",
+									rc);
+			cpr_apc_exit(cpr_vreg);
+			cpr_debugfs_remove(cpr_vreg);
+			return rc;
+		}
+	}
+
+	/* Register panic notification call back */
+	cpr_vreg->panic_notifier.notifier_call = cpr_panic_callback;
+	atomic_notifier_chain_register(&panic_notifier_list,
+			&cpr_vreg->panic_notifier);
+
+	mutex_lock(&cpr_regulator_list_mutex);
+	list_add(&cpr_vreg->list, &cpr_regulator_list);
+	mutex_unlock(&cpr_regulator_list_mutex);
+
+	return 0;
+
+err_out:
+	cpr_efuse_free(cpr_vreg);
+	return rc;
+}
+
+static int cpr_regulator_remove(struct platform_device *pdev)
+{
+	struct cpr_regulator *cpr_vreg;
+
+	cpr_vreg = platform_get_drvdata(pdev);
+	if (cpr_vreg) {
+		/* Disable CPR */
+		if (cpr_is_allowed(cpr_vreg)) {
+			cpr_ctl_disable(cpr_vreg);
+			cpr_irq_set(cpr_vreg, 0);
+		}
+
+		mutex_lock(&cpr_regulator_list_mutex);
+		list_del(&cpr_vreg->list);
+		mutex_unlock(&cpr_regulator_list_mutex);
+
+		if (cpr_vreg->cpu_notifier.notifier_call)
+			unregister_hotcpu_notifier(&cpr_vreg->cpu_notifier);
+
+		if (cpr_vreg->cpr_disable_on_temperature)
+			sensor_mgr_remove_threshold(
+				&cpr_vreg->tsens_threshold_config);
+
+		atomic_notifier_chain_unregister(&panic_notifier_list,
+			&cpr_vreg->panic_notifier);
+
+		cpr_apc_exit(cpr_vreg);
+		cpr_debugfs_remove(cpr_vreg);
+		regulator_unregister(cpr_vreg->rdev);
+	}
+
+	return 0;
+}
+
+static struct of_device_id cpr_regulator_match_table[] = {
+	{ .compatible = CPR_REGULATOR_DRIVER_NAME, },
+	{}
+};
+
+static struct platform_driver cpr_regulator_driver = {
+	.driver		= {
+		.name	= CPR_REGULATOR_DRIVER_NAME,
+		.of_match_table = cpr_regulator_match_table,
+		.owner = THIS_MODULE,
+	},
+	.probe		= cpr_regulator_probe,
+	.remove		= cpr_regulator_remove,
+	.suspend	= cpr_regulator_suspend,
+	.resume		= cpr_regulator_resume,
+};
+
+static int initialize_tsens_monitor(struct cpr_regulator *cpr_vreg)
+{
+	int rc;
+
+	rc = cpr_check_tsens(cpr_vreg);
+	if (rc < 0) {
+		cpr_err(cpr_vreg, "Unable to check tsens, rc=%d\n", rc);
+		return rc;
+	}
+
+	rc = sensor_mgr_init_threshold(&cpr_vreg->tsens_threshold_config,
+				cpr_vreg->tsens_id,
+				cpr_vreg->cpr_enable_temp_threshold, /* high */
+				cpr_vreg->cpr_disable_temp_threshold, /* low */
+				tsens_threshold_notify);
+	if (rc < 0) {
+		cpr_err(cpr_vreg, "Failed to init tsens monitor, rc=%d\n", rc);
+		return rc;
+	}
+
+	rc = sensor_mgr_convert_id_and_set_threshold(
+			&cpr_vreg->tsens_threshold_config);
+	if (rc < 0)
+		cpr_err(cpr_vreg, "Failed to set tsens threshold, rc=%d\n",
+					rc);
+
+	return rc;
+}
+
+int __init cpr_regulator_late_init(void)
+{
+	int rc;
+	struct cpr_regulator *cpr_vreg;
+
+	mutex_lock(&cpr_regulator_list_mutex);
+
+	list_for_each_entry(cpr_vreg, &cpr_regulator_list, list) {
+		if (cpr_vreg->cpr_disable_on_temperature) {
+			rc = initialize_tsens_monitor(cpr_vreg);
+			if (rc)
+				cpr_err(cpr_vreg, "Failed to initialize temperature monitor, rc=%d\n",
+					rc);
+		}
+	}
+
+	mutex_unlock(&cpr_regulator_list_mutex);
+	return 0;
+}
+late_initcall(cpr_regulator_late_init);
+
+/**
+ * cpr_regulator_init() - register cpr-regulator driver
+ *
+ * This initialization function should be called in systems in which driver
+ * registration ordering must be controlled precisely.
+ */
+int __init cpr_regulator_init(void)
+{
+	static bool initialized;
+
+	if (initialized)
+		return 0;
+	else
+		initialized = true;
+
+	cpr_debugfs_base_init();
+	return platform_driver_register(&cpr_regulator_driver);
+}
+EXPORT_SYMBOL(cpr_regulator_init);
+
+static void __exit cpr_regulator_exit(void)
+{
+	platform_driver_unregister(&cpr_regulator_driver);
+	cpr_debugfs_base_remove();
+}
+
+MODULE_DESCRIPTION("CPR regulator driver");
+MODULE_LICENSE("GPL v2");
+
+arch_initcall(cpr_regulator_init);
+module_exit(cpr_regulator_exit);
diff --git a/drivers/regulator/rpmh-regulator.c b/drivers/regulator/rpmh-regulator.c
index 1de08d4..f370d2b 100644
--- a/drivers/regulator/rpmh-regulator.c
+++ b/drivers/regulator/rpmh-regulator.c
@@ -46,6 +46,23 @@ enum rpmh_regulator_type {
 };
 
 /**
+ * enum rpmh_regulator_hw_type - supported PMIC regulator hardware types
+ * This enum defines the specific regulator type along with its PMIC family.
+ */
+enum rpmh_regulator_hw_type {
+	RPMH_REGULATOR_HW_TYPE_UNKNOWN,
+	RPMH_REGULATOR_HW_TYPE_PMIC4_LDO,
+	RPMH_REGULATOR_HW_TYPE_PMIC4_HFSMPS,
+	RPMH_REGULATOR_HW_TYPE_PMIC4_FTSMPS,
+	RPMH_REGULATOR_HW_TYPE_PMIC4_BOB,
+	RPMH_REGULATOR_HW_TYPE_PMIC5_LDO,
+	RPMH_REGULATOR_HW_TYPE_PMIC5_HFSMPS,
+	RPMH_REGULATOR_HW_TYPE_PMIC5_FTSMPS,
+	RPMH_REGULATOR_HW_TYPE_PMIC5_BOB,
+	RPMH_REGULATOR_HW_TYPE_MAX,
+};
+
+/**
  * enum rpmh_regulator_reg_index - RPMh accelerator register indices
  * %RPMH_REGULATOR_REG_VRM_VOLTAGE:	VRM voltage voting register index
  * %RPMH_REGULATOR_REG_ARC_LEVEL:	ARC voltage level voting register index
@@ -115,20 +132,6 @@ enum rpmh_regulator_reg_index {
 /* XOB voting registers are found in the VRM hardware module */
 #define CMD_DB_HW_XOB			CMD_DB_HW_VRM
 
-/*
- * Mapping from RPMh VRM accelerator modes to regulator framework modes
- * Assumes that SMPS PFM mode == LDO LPM mode and SMPS PWM mode == LDO HPM mode
- */
-static const int rpmh_regulator_mode_map[] = {
-	[RPMH_REGULATOR_MODE_SMPS_PFM]	= REGULATOR_MODE_IDLE,
-	[RPMH_REGULATOR_MODE_SMPS_AUTO]	= REGULATOR_MODE_NORMAL,
-	[RPMH_REGULATOR_MODE_SMPS_PWM]	= REGULATOR_MODE_FAST,
-	[RPMH_REGULATOR_MODE_BOB_PASS]	= REGULATOR_MODE_STANDBY,
-	[RPMH_REGULATOR_MODE_BOB_PFM]	= REGULATOR_MODE_IDLE,
-	[RPMH_REGULATOR_MODE_BOB_AUTO]	= REGULATOR_MODE_NORMAL,
-	[RPMH_REGULATOR_MODE_BOB_PWM]	= REGULATOR_MODE_FAST,
-};
-
 /**
  * struct rpmh_regulator_request - rpmh request data
  * @reg:			Array of RPMh accelerator register values
@@ -175,6 +178,8 @@ struct rpmh_vreg;
  *				common to a single aggregated resource
  * @regulator_type:		RPMh accelerator type for this regulator
  *				resource
+ * @regulator_hw_type:		The regulator hardware type (e.g. LDO or SMPS)
+ *				along with PMIC family (i.e. PMIC4 or PMIC5)
  * @level:			Mapping from ARC resource specific voltage
  *				levels (0 to RPMH_ARC_MAX_LEVELS - 1) to common
  *				consumer voltage levels (i.e.
@@ -221,6 +226,7 @@ struct rpmh_aggr_vreg {
 	struct rpmh_client		*rpmh_client;
 	struct mutex			lock;
 	enum rpmh_regulator_type	regulator_type;
+	enum rpmh_regulator_hw_type	regulator_hw_type;
 	u32				level[RPMH_ARC_MAX_LEVELS];
 	int				level_count;
 	bool				always_wait_for_ack;
@@ -268,6 +274,187 @@ struct rpmh_vreg {
 	int				mode_index;
 };
 
+#define RPMH_REGULATOR_MODE_COUNT		5
+
+#define RPMH_REGULATOR_MODE_PMIC4_LDO_RM	4
+#define RPMH_REGULATOR_MODE_PMIC4_LDO_LPM	5
+#define RPMH_REGULATOR_MODE_PMIC4_LDO_HPM	7
+
+#define RPMH_REGULATOR_MODE_PMIC4_SMPS_RM	4
+#define RPMH_REGULATOR_MODE_PMIC4_SMPS_PFM	5
+#define RPMH_REGULATOR_MODE_PMIC4_SMPS_AUTO	6
+#define RPMH_REGULATOR_MODE_PMIC4_SMPS_PWM	7
+
+#define RPMH_REGULATOR_MODE_PMIC4_BOB_PASS	0
+#define RPMH_REGULATOR_MODE_PMIC4_BOB_PFM	1
+#define RPMH_REGULATOR_MODE_PMIC4_BOB_AUTO	2
+#define RPMH_REGULATOR_MODE_PMIC4_BOB_PWM	3
+
+#define RPMH_REGULATOR_MODE_PMIC5_LDO_RM	3
+#define RPMH_REGULATOR_MODE_PMIC5_LDO_LPM	4
+#define RPMH_REGULATOR_MODE_PMIC5_LDO_HPM	7
+
+#define RPMH_REGULATOR_MODE_PMIC5_HFSMPS_RM	3
+#define RPMH_REGULATOR_MODE_PMIC5_HFSMPS_PFM	4
+#define RPMH_REGULATOR_MODE_PMIC5_HFSMPS_AUTO	6
+#define RPMH_REGULATOR_MODE_PMIC5_HFSMPS_PWM	7
+
+#define RPMH_REGULATOR_MODE_PMIC5_FTSMPS_RM	3
+#define RPMH_REGULATOR_MODE_PMIC5_FTSMPS_PWM	7
+
+#define RPMH_REGULATOR_MODE_PMIC5_BOB_PASS	2
+#define RPMH_REGULATOR_MODE_PMIC5_BOB_PFM	4
+#define RPMH_REGULATOR_MODE_PMIC5_BOB_AUTO	6
+#define RPMH_REGULATOR_MODE_PMIC5_BOB_PWM	7
+
+/*
+ * Mappings from RPMh generic modes to VRM accelerator modes and regulator
+ * framework modes for each regulator type.
+ */
+static const struct rpmh_regulator_mode
+rpmh_regulator_mode_map_pmic4_ldo[RPMH_REGULATOR_MODE_COUNT] = {
+	[RPMH_REGULATOR_MODE_RET] = {
+		.pmic_mode = RPMH_REGULATOR_MODE_PMIC4_LDO_RM,
+		.framework_mode = REGULATOR_MODE_STANDBY,
+	},
+	[RPMH_REGULATOR_MODE_LPM] = {
+		.pmic_mode = RPMH_REGULATOR_MODE_PMIC4_LDO_LPM,
+		.framework_mode = REGULATOR_MODE_IDLE,
+	},
+	[RPMH_REGULATOR_MODE_HPM] = {
+		.pmic_mode = RPMH_REGULATOR_MODE_PMIC4_LDO_HPM,
+		.framework_mode = REGULATOR_MODE_FAST,
+	},
+};
+
+static const struct rpmh_regulator_mode
+rpmh_regulator_mode_map_pmic4_smps[RPMH_REGULATOR_MODE_COUNT] = {
+	[RPMH_REGULATOR_MODE_RET] = {
+		.pmic_mode = RPMH_REGULATOR_MODE_PMIC4_SMPS_RM,
+		.framework_mode = REGULATOR_MODE_STANDBY,
+	},
+	[RPMH_REGULATOR_MODE_LPM] = {
+		.pmic_mode = RPMH_REGULATOR_MODE_PMIC4_SMPS_PFM,
+		.framework_mode = REGULATOR_MODE_IDLE,
+	},
+	[RPMH_REGULATOR_MODE_AUTO] = {
+		.pmic_mode = RPMH_REGULATOR_MODE_PMIC4_SMPS_AUTO,
+		.framework_mode = REGULATOR_MODE_NORMAL,
+	},
+	[RPMH_REGULATOR_MODE_HPM] = {
+		.pmic_mode = RPMH_REGULATOR_MODE_PMIC4_SMPS_PWM,
+		.framework_mode = REGULATOR_MODE_FAST,
+	},
+};
+
+static const struct rpmh_regulator_mode
+rpmh_regulator_mode_map_pmic4_bob[RPMH_REGULATOR_MODE_COUNT] = {
+	[RPMH_REGULATOR_MODE_PASS] = {
+		.pmic_mode = RPMH_REGULATOR_MODE_PMIC4_BOB_PASS,
+		.framework_mode = REGULATOR_MODE_STANDBY,
+	},
+	[RPMH_REGULATOR_MODE_LPM] = {
+		.pmic_mode = RPMH_REGULATOR_MODE_PMIC4_BOB_PFM,
+		.framework_mode = REGULATOR_MODE_IDLE,
+	},
+	[RPMH_REGULATOR_MODE_AUTO] = {
+		.pmic_mode = RPMH_REGULATOR_MODE_PMIC4_BOB_AUTO,
+		.framework_mode = REGULATOR_MODE_NORMAL,
+	},
+	[RPMH_REGULATOR_MODE_HPM] = {
+		.pmic_mode = RPMH_REGULATOR_MODE_PMIC4_BOB_PWM,
+		.framework_mode = REGULATOR_MODE_FAST,
+	},
+};
+
+static const struct rpmh_regulator_mode
+rpmh_regulator_mode_map_pmic5_ldo[RPMH_REGULATOR_MODE_COUNT] = {
+	[RPMH_REGULATOR_MODE_RET] = {
+		.pmic_mode = RPMH_REGULATOR_MODE_PMIC5_LDO_RM,
+		.framework_mode = REGULATOR_MODE_STANDBY,
+	},
+	[RPMH_REGULATOR_MODE_LPM] = {
+		.pmic_mode = RPMH_REGULATOR_MODE_PMIC5_LDO_LPM,
+		.framework_mode = REGULATOR_MODE_IDLE,
+	},
+	[RPMH_REGULATOR_MODE_HPM] = {
+		.pmic_mode = RPMH_REGULATOR_MODE_PMIC5_LDO_HPM,
+		.framework_mode = REGULATOR_MODE_FAST,
+	},
+};
+
+static const struct rpmh_regulator_mode
+rpmh_regulator_mode_map_pmic5_hfsmps[RPMH_REGULATOR_MODE_COUNT] = {
+	[RPMH_REGULATOR_MODE_RET] = {
+		.pmic_mode = RPMH_REGULATOR_MODE_PMIC5_HFSMPS_RM,
+		.framework_mode = REGULATOR_MODE_STANDBY,
+	},
+	[RPMH_REGULATOR_MODE_LPM] = {
+		.pmic_mode = RPMH_REGULATOR_MODE_PMIC5_HFSMPS_PFM,
+		.framework_mode = REGULATOR_MODE_IDLE,
+	},
+	[RPMH_REGULATOR_MODE_AUTO] = {
+		.pmic_mode = RPMH_REGULATOR_MODE_PMIC5_HFSMPS_AUTO,
+		.framework_mode = REGULATOR_MODE_NORMAL,
+	},
+	[RPMH_REGULATOR_MODE_HPM] = {
+		.pmic_mode = RPMH_REGULATOR_MODE_PMIC5_HFSMPS_PWM,
+		.framework_mode = REGULATOR_MODE_FAST,
+	},
+};
+
+static const struct rpmh_regulator_mode
+rpmh_regulator_mode_map_pmic5_ftsmps[RPMH_REGULATOR_MODE_COUNT] = {
+	[RPMH_REGULATOR_MODE_RET] = {
+		.pmic_mode = RPMH_REGULATOR_MODE_PMIC5_FTSMPS_RM,
+		.framework_mode = REGULATOR_MODE_STANDBY,
+	},
+	[RPMH_REGULATOR_MODE_HPM] = {
+		.pmic_mode = RPMH_REGULATOR_MODE_PMIC5_FTSMPS_PWM,
+		.framework_mode = REGULATOR_MODE_FAST,
+	},
+};
+
+static const struct rpmh_regulator_mode
+rpmh_regulator_mode_map_pmic5_bob[RPMH_REGULATOR_MODE_COUNT] = {
+	[RPMH_REGULATOR_MODE_PASS] = {
+		.pmic_mode = RPMH_REGULATOR_MODE_PMIC5_BOB_PASS,
+		.framework_mode = REGULATOR_MODE_STANDBY,
+	},
+	[RPMH_REGULATOR_MODE_LPM] = {
+		.pmic_mode = RPMH_REGULATOR_MODE_PMIC5_BOB_PFM,
+		.framework_mode = REGULATOR_MODE_IDLE,
+	},
+	[RPMH_REGULATOR_MODE_AUTO] = {
+		.pmic_mode = RPMH_REGULATOR_MODE_PMIC5_BOB_AUTO,
+		.framework_mode = REGULATOR_MODE_NORMAL,
+	},
+	[RPMH_REGULATOR_MODE_HPM] = {
+		.pmic_mode = RPMH_REGULATOR_MODE_PMIC5_BOB_PWM,
+		.framework_mode = REGULATOR_MODE_FAST,
+	},
+};
+
+static const struct rpmh_regulator_mode * const
+rpmh_regulator_mode_map[RPMH_REGULATOR_HW_TYPE_MAX] = {
+	[RPMH_REGULATOR_HW_TYPE_PMIC4_LDO]
+		= rpmh_regulator_mode_map_pmic4_ldo,
+	[RPMH_REGULATOR_HW_TYPE_PMIC4_HFSMPS]
+		= rpmh_regulator_mode_map_pmic4_smps,
+	[RPMH_REGULATOR_HW_TYPE_PMIC4_FTSMPS]
+		= rpmh_regulator_mode_map_pmic4_smps,
+	[RPMH_REGULATOR_HW_TYPE_PMIC4_BOB]
+		= rpmh_regulator_mode_map_pmic4_bob,
+	[RPMH_REGULATOR_HW_TYPE_PMIC5_LDO]
+		= rpmh_regulator_mode_map_pmic5_ldo,
+	[RPMH_REGULATOR_HW_TYPE_PMIC5_HFSMPS]
+		= rpmh_regulator_mode_map_pmic5_hfsmps,
+	[RPMH_REGULATOR_HW_TYPE_PMIC5_FTSMPS]
+		= rpmh_regulator_mode_map_pmic5_ftsmps,
+	[RPMH_REGULATOR_HW_TYPE_PMIC5_BOB]
+		= rpmh_regulator_mode_map_pmic5_bob,
+};
+
 /*
  * This voltage in uV is returned by get_voltage functions when there is no way
  * to determine the current voltage level.  It is needed because the regulator
@@ -869,9 +1056,9 @@ static int rpmh_regulator_vrm_set_mode_index(struct rpmh_vreg *vreg,
  *
  * This function sets the PMIC mode corresponding to the specified framework
  * mode.  The set of PMIC modes allowed is defined in device tree for a given
- * RPMh regulator resource.  The full mapping from PMIC modes to framework modes
- * is defined in the rpmh_regulator_mode_map[] array.  The RPMh resource
- * specific mapping is defined in the aggr_vreg->mode[] array.
+ * RPMh regulator resource.  The full mapping from generic modes to PMIC modes
+ * and framework modes is defined in the rpmh_regulator_mode_map[] array.  The
+ * RPMh resource specific mapping is defined in the aggr_vreg->mode[] array.
  *
  * Return: 0 on success, errno on failure
  */
@@ -1148,11 +1335,60 @@ rpmh_regulator_load_arc_level_mapping(struct rpmh_aggr_vreg *aggr_vreg)
 static int rpmh_regulator_parse_vrm_modes(struct rpmh_aggr_vreg *aggr_vreg)
 {
 	struct device_node *node = aggr_vreg->dev->of_node;
-	const char *prop = "qcom,supported-modes";
+	const char *type = "";
+	const struct rpmh_regulator_mode *map;
+	const char *prop;
 	int i, len, rc;
 	u32 *buf;
 
+	aggr_vreg->regulator_hw_type = RPMH_REGULATOR_HW_TYPE_UNKNOWN;
+
+	/* qcom,regulator-type is optional */
+	prop = "qcom,regulator-type";
+	if (!of_find_property(node, prop, &len))
+		return 0;
+
+	rc = of_property_read_string(node, prop, &type);
+	if (rc) {
+		aggr_vreg_err(aggr_vreg, "unable to read %s, rc=%d\n",
+				prop, rc);
+		return rc;
+	}
+
+	if (!strcmp(type, "pmic4-ldo")) {
+		aggr_vreg->regulator_hw_type
+			= RPMH_REGULATOR_HW_TYPE_PMIC4_LDO;
+	} else if (!strcmp(type, "pmic4-hfsmps")) {
+		aggr_vreg->regulator_hw_type
+			= RPMH_REGULATOR_HW_TYPE_PMIC4_HFSMPS;
+	} else if (!strcmp(type, "pmic4-ftsmps")) {
+		aggr_vreg->regulator_hw_type
+			= RPMH_REGULATOR_HW_TYPE_PMIC4_FTSMPS;
+	} else if (!strcmp(type, "pmic4-bob")) {
+		aggr_vreg->regulator_hw_type
+			= RPMH_REGULATOR_HW_TYPE_PMIC4_BOB;
+	} else if (!strcmp(type, "pmic5-ldo")) {
+		aggr_vreg->regulator_hw_type
+			= RPMH_REGULATOR_HW_TYPE_PMIC5_LDO;
+	} else if (!strcmp(type, "pmic5-hfsmps")) {
+		aggr_vreg->regulator_hw_type
+			= RPMH_REGULATOR_HW_TYPE_PMIC5_HFSMPS;
+	} else if (!strcmp(type, "pmic5-ftsmps")) {
+		aggr_vreg->regulator_hw_type
+			= RPMH_REGULATOR_HW_TYPE_PMIC5_FTSMPS;
+	} else if (!strcmp(type, "pmic5-bob")) {
+		aggr_vreg->regulator_hw_type
+			= RPMH_REGULATOR_HW_TYPE_PMIC5_BOB;
+	} else {
+		aggr_vreg_err(aggr_vreg, "unknown %s = %s\n",
+				prop, type);
+		return -EINVAL;
+	}
+
+	map = rpmh_regulator_mode_map[aggr_vreg->regulator_hw_type];
+
 	/* qcom,supported-modes is optional */
+	prop = "qcom,supported-modes";
 	if (!of_find_property(node, prop, &len))
 		return 0;
 
@@ -1176,15 +1412,22 @@ static int rpmh_regulator_parse_vrm_modes(struct rpmh_aggr_vreg *aggr_vreg)
 	}
 
 	for (i = 0; i < len; i++) {
-		if (buf[i] >= ARRAY_SIZE(rpmh_regulator_mode_map)) {
+		if (buf[i] >= RPMH_REGULATOR_MODE_COUNT) {
 			aggr_vreg_err(aggr_vreg, "element %d of %s = %u is invalid\n",
 				i, prop, buf[i]);
 			rc = -EINVAL;
 			goto done;
 		}
-		aggr_vreg->mode[i].pmic_mode = buf[i];
-		aggr_vreg->mode[i].framework_mode
-			= rpmh_regulator_mode_map[buf[i]];
+
+		if (!map[buf[i]].framework_mode) {
+			aggr_vreg_err(aggr_vreg, "element %d of %s = %u is invalid for regulator type = %s\n",
+				i, prop, buf[i], type);
+			rc = -EINVAL;
+			goto done;
+		}
+
+		aggr_vreg->mode[i].pmic_mode = map[buf[i]].pmic_mode;
+		aggr_vreg->mode[i].framework_mode = map[buf[i]].framework_mode;
 
 		if (i > 0 && aggr_vreg->mode[i].pmic_mode
 				<= aggr_vreg->mode[i - 1].pmic_mode) {
@@ -1287,6 +1530,7 @@ static int rpmh_regulator_allocate_vreg(struct rpmh_aggr_vreg *aggr_vreg)
 static int rpmh_regulator_load_default_parameters(struct rpmh_vreg *vreg)
 {
 	enum rpmh_regulator_type type = vreg->aggr_vreg->regulator_type;
+	const struct rpmh_regulator_mode *map;
 	const char *prop;
 	int i, rc;
 	u32 temp;
@@ -1336,15 +1580,36 @@ static int rpmh_regulator_load_default_parameters(struct rpmh_vreg *vreg)
 		prop = "qcom,init-mode";
 		rc = of_property_read_u32(vreg->of_node, prop, &temp);
 		if (!rc) {
-			if (temp < RPMH_VRM_MODE_MIN ||
-			    temp > RPMH_VRM_MODE_MAX)  {
+			if (temp >= RPMH_REGULATOR_MODE_COUNT) {
 				vreg_err(vreg, "%s=%u is invalid\n",
 					prop, temp);
 				return -EINVAL;
+			} else if (vreg->aggr_vreg->regulator_hw_type
+					== RPMH_REGULATOR_HW_TYPE_UNKNOWN) {
+				vreg_err(vreg, "qcom,regulator-type missing so %s cannot be used\n",
+					prop);
+				return -EINVAL;
 			}
+
+			map = rpmh_regulator_mode_map[
+					vreg->aggr_vreg->regulator_hw_type];
+			if (!map[temp].framework_mode) {
+				vreg_err(vreg, "%s=%u is not supported by type = %d\n",
+					prop, temp,
+					vreg->aggr_vreg->regulator_hw_type);
+				return -EINVAL;
+			}
+
 			rpmh_regulator_set_reg(vreg,
 						RPMH_REGULATOR_REG_VRM_MODE,
-						temp);
+						map[temp].pmic_mode);
+			for (i = 0; i < vreg->aggr_vreg->mode_count; i++) {
+				if (vreg->aggr_vreg->mode[i].pmic_mode
+				    == map[temp].pmic_mode) {
+					vreg->mode_index = i;
+					break;
+				}
+			}
 		}
 
 		prop = "qcom,init-headroom-voltage";
diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c
index f1d4ca2..99b5e35 100644
--- a/drivers/rtc/interface.c
+++ b/drivers/rtc/interface.c
@@ -784,7 +784,7 @@ static int rtc_timer_enqueue(struct rtc_device *rtc, struct rtc_timer *timer)
 	}
 
 	timerqueue_add(&rtc->timerqueue, &timer->node);
-	if (!next) {
+	if (!next || ktime_before(timer->node.expires, next->expires)) {
 		struct rtc_wkalrm alarm;
 		int err;
 		alarm.time = rtc_ktime_to_tm(timer->node.expires);
diff --git a/drivers/rtc/rtc-pcf8563.c b/drivers/rtc/rtc-pcf8563.c
index 1227cea..a4b8b60 100644
--- a/drivers/rtc/rtc-pcf8563.c
+++ b/drivers/rtc/rtc-pcf8563.c
@@ -422,7 +422,7 @@ static unsigned long pcf8563_clkout_recalc_rate(struct clk_hw *hw,
 		return 0;
 
 	buf &= PCF8563_REG_CLKO_F_MASK;
-	return clkout_rates[ret];
+	return clkout_rates[buf];
 }
 
 static long pcf8563_clkout_round_rate(struct clk_hw *hw, unsigned long rate,
diff --git a/drivers/rtc/rtc-pl031.c b/drivers/rtc/rtc-pl031.c
index e1687e1..a30f24c 100644
--- a/drivers/rtc/rtc-pl031.c
+++ b/drivers/rtc/rtc-pl031.c
@@ -308,7 +308,8 @@ static int pl031_remove(struct amba_device *adev)
 
 	dev_pm_clear_wake_irq(&adev->dev);
 	device_init_wakeup(&adev->dev, false);
-	free_irq(adev->irq[0], ldata);
+	if (adev->irq[0])
+		free_irq(adev->irq[0], ldata);
 	rtc_device_unregister(ldata->rtc);
 	iounmap(ldata->base);
 	kfree(ldata);
@@ -381,12 +382,13 @@ static int pl031_probe(struct amba_device *adev, const struct amba_id *id)
 		goto out_no_rtc;
 	}
 
-	if (request_irq(adev->irq[0], pl031_interrupt,
-			vendor->irqflags, "rtc-pl031", ldata)) {
-		ret = -EIO;
-		goto out_no_irq;
+	if (adev->irq[0]) {
+		ret = request_irq(adev->irq[0], pl031_interrupt,
+				  vendor->irqflags, "rtc-pl031", ldata);
+		if (ret)
+			goto out_no_irq;
+		dev_pm_set_wake_irq(&adev->dev, adev->irq[0]);
 	}
-	dev_pm_set_wake_irq(&adev->dev, adev->irq[0]);
 	return 0;
 
 out_no_irq:
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index d55e643..e72234e 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -969,7 +969,8 @@ int qeth_bridgeport_query_ports(struct qeth_card *card,
 int qeth_bridgeport_setrole(struct qeth_card *card, enum qeth_sbp_roles role);
 int qeth_bridgeport_an_set(struct qeth_card *card, int enable);
 int qeth_get_priority_queue(struct qeth_card *, struct sk_buff *, int, int);
-int qeth_get_elements_no(struct qeth_card *, struct sk_buff *, int);
+int qeth_get_elements_no(struct qeth_card *card, struct sk_buff *skb,
+			 int extra_elems, int data_offset);
 int qeth_get_elements_for_frags(struct sk_buff *);
 int qeth_do_send_packet_fast(struct qeth_card *, struct qeth_qdio_out_q *,
 			struct sk_buff *, struct qeth_hdr *, int, int, int);
@@ -1004,6 +1005,9 @@ struct qeth_cmd_buffer *qeth_get_setassparms_cmd(struct qeth_card *,
 int qeth_set_features(struct net_device *, netdev_features_t);
 int qeth_recover_features(struct net_device *);
 netdev_features_t qeth_fix_features(struct net_device *, netdev_features_t);
+netdev_features_t qeth_features_check(struct sk_buff *skb,
+				      struct net_device *dev,
+				      netdev_features_t features);
 
 /* exports for OSN */
 int qeth_osn_assist(struct net_device *, void *, int);
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 21ef802..838ed62 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -19,6 +19,11 @@
 #include <linux/mii.h>
 #include <linux/kthread.h>
 #include <linux/slab.h>
+#include <linux/if_vlan.h>
+#include <linux/netdevice.h>
+#include <linux/netdev_features.h>
+#include <linux/skbuff.h>
+
 #include <net/iucv/af_iucv.h>
 #include <net/dsfield.h>
 
@@ -3837,6 +3842,7 @@ EXPORT_SYMBOL_GPL(qeth_get_elements_for_frags);
  * @card:			qeth card structure, to check max. elems.
  * @skb:			SKB address
  * @extra_elems:		extra elems needed, to check against max.
+ * @data_offset:		range starts at skb->data + data_offset
  *
  * Returns the number of pages, and thus QDIO buffer elements, needed to cover
  * skb data, including linear part and fragments. Checks if the result plus
@@ -3844,10 +3850,10 @@ EXPORT_SYMBOL_GPL(qeth_get_elements_for_frags);
  * Note: extra_elems is not included in the returned result.
  */
 int qeth_get_elements_no(struct qeth_card *card,
-		     struct sk_buff *skb, int extra_elems)
+		     struct sk_buff *skb, int extra_elems, int data_offset)
 {
 	int elements = qeth_get_elements_for_range(
-				(addr_t)skb->data,
+				(addr_t)skb->data + data_offset,
 				(addr_t)skb->data + skb_headlen(skb)) +
 			qeth_get_elements_for_frags(skb);
 
@@ -6240,6 +6246,32 @@ netdev_features_t qeth_fix_features(struct net_device *dev,
 }
 EXPORT_SYMBOL_GPL(qeth_fix_features);
 
+netdev_features_t qeth_features_check(struct sk_buff *skb,
+				      struct net_device *dev,
+				      netdev_features_t features)
+{
+	/* GSO segmentation builds skbs with
+	 *	a (small) linear part for the headers, and
+	 *	page frags for the data.
+	 * Compared to a linear skb, the header-only part consumes an
+	 * additional buffer element. This reduces buffer utilization, and
+	 * hurts throughput. So compress small segments into one element.
+	 */
+	if (netif_needs_gso(skb, features)) {
+		/* match skb_segment(): */
+		unsigned int doffset = skb->data - skb_mac_header(skb);
+		unsigned int hsize = skb_shinfo(skb)->gso_size;
+		unsigned int hroom = skb_headroom(skb);
+
+		/* linearize only if resulting skb allocations are order-0: */
+		if (SKB_DATA_ALIGN(hroom + doffset + hsize) <= SKB_MAX_HEAD(0))
+			features &= ~NETIF_F_SG;
+	}
+
+	return vlan_features_check(skb, features);
+}
+EXPORT_SYMBOL_GPL(qeth_features_check);
+
 static int __init qeth_core_init(void)
 {
 	int rc;
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 8530477..5082dfe 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -865,7 +865,7 @@ static int qeth_l2_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
 	 * chaining we can not send long frag lists
 	 */
 	if ((card->info.type != QETH_CARD_TYPE_IQD) &&
-	    !qeth_get_elements_no(card, new_skb, 0)) {
+	    !qeth_get_elements_no(card, new_skb, 0, 0)) {
 		int lin_rc = skb_linearize(new_skb);
 
 		if (card->options.performance_stats) {
@@ -910,7 +910,8 @@ static int qeth_l2_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
 		}
 	}
 
-	elements = qeth_get_elements_no(card, new_skb, elements_needed);
+	elements = qeth_get_elements_no(card, new_skb, elements_needed,
+					(data_offset > 0) ? data_offset : 0);
 	if (!elements) {
 		if (data_offset >= 0)
 			kmem_cache_free(qeth_core_header_cache, hdr);
@@ -1084,6 +1085,7 @@ static const struct net_device_ops qeth_l2_netdev_ops = {
 	.ndo_stop		= qeth_l2_stop,
 	.ndo_get_stats		= qeth_get_stats,
 	.ndo_start_xmit		= qeth_l2_hard_start_xmit,
+	.ndo_features_check	= qeth_features_check,
 	.ndo_validate_addr	= eth_validate_addr,
 	.ndo_set_rx_mode	= qeth_l2_set_rx_mode,
 	.ndo_do_ioctl	   	= qeth_l2_do_ioctl,
@@ -1128,6 +1130,7 @@ static int qeth_l2_setup_netdev(struct qeth_card *card)
 	if (card->info.type == QETH_CARD_TYPE_OSD && !card->info.guestlan) {
 		card->dev->hw_features = NETIF_F_SG;
 		card->dev->vlan_features = NETIF_F_SG;
+		card->dev->features |= NETIF_F_SG;
 		/* OSA 3S and earlier has no RX/TX support */
 		if (qeth_is_supported(card, IPA_OUTBOUND_CHECKSUM)) {
 			card->dev->hw_features |= NETIF_F_IP_CSUM;
@@ -1140,8 +1143,6 @@ static int qeth_l2_setup_netdev(struct qeth_card *card)
 	}
 	card->info.broadcast_capable = 1;
 	qeth_l2_request_initial_mac(card);
-	card->dev->gso_max_size = (QETH_MAX_BUFFER_ELEMENTS(card) - 1) *
-				  PAGE_SIZE;
 	SET_NETDEV_DEV(card->dev, &card->gdev->dev);
 	netif_napi_add(card->dev, &card->napi, qeth_l2_poll, QETH_NAPI_WEIGHT);
 	netif_carrier_off(card->dev);
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 03a2619..f91e70c 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -1416,6 +1416,7 @@ qeth_l3_add_mc_to_hash(struct qeth_card *card, struct in_device *in4_dev)
 
 		tmp->u.a4.addr = im4->multiaddr;
 		memcpy(tmp->mac, buf, sizeof(tmp->mac));
+		tmp->is_multicast = 1;
 
 		ipm = qeth_l3_ip_from_hash(card, tmp);
 		if (ipm) {
@@ -1593,7 +1594,7 @@ static void qeth_l3_free_vlan_addresses4(struct qeth_card *card,
 
 	addr = qeth_l3_get_addr_buffer(QETH_PROT_IPV4);
 	if (!addr)
-		return;
+		goto out;
 
 	spin_lock_bh(&card->ip_lock);
 
@@ -1607,6 +1608,7 @@ static void qeth_l3_free_vlan_addresses4(struct qeth_card *card,
 	spin_unlock_bh(&card->ip_lock);
 
 	kfree(addr);
+out:
 	in_dev_put(in_dev);
 }
 
@@ -1631,7 +1633,7 @@ static void qeth_l3_free_vlan_addresses6(struct qeth_card *card,
 
 	addr = qeth_l3_get_addr_buffer(QETH_PROT_IPV6);
 	if (!addr)
-		return;
+		goto out;
 
 	spin_lock_bh(&card->ip_lock);
 
@@ -1646,6 +1648,7 @@ static void qeth_l3_free_vlan_addresses6(struct qeth_card *card,
 	spin_unlock_bh(&card->ip_lock);
 
 	kfree(addr);
+out:
 	in6_dev_put(in6_dev);
 #endif /* CONFIG_QETH_IPV6 */
 }
@@ -2609,17 +2612,13 @@ static void qeth_l3_fill_af_iucv_hdr(struct qeth_card *card,
 	char daddr[16];
 	struct af_iucv_trans_hdr *iucv_hdr;
 
-	skb_pull(skb, 14);
-	card->dev->header_ops->create(skb, card->dev, 0,
-				      card->dev->dev_addr, card->dev->dev_addr,
-				      card->dev->addr_len);
-	skb_pull(skb, 14);
-	iucv_hdr = (struct af_iucv_trans_hdr *)skb->data;
 	memset(hdr, 0, sizeof(struct qeth_hdr));
 	hdr->hdr.l3.id = QETH_HEADER_TYPE_LAYER3;
 	hdr->hdr.l3.ext_flags = 0;
-	hdr->hdr.l3.length = skb->len;
+	hdr->hdr.l3.length = skb->len - ETH_HLEN;
 	hdr->hdr.l3.flags = QETH_HDR_IPV6 | QETH_CAST_UNICAST;
+
+	iucv_hdr = (struct af_iucv_trans_hdr *) (skb->data + ETH_HLEN);
 	memset(daddr, 0, sizeof(daddr));
 	daddr[0] = 0xfe;
 	daddr[1] = 0x80;
@@ -2823,10 +2822,7 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
 	if ((card->info.type == QETH_CARD_TYPE_IQD) &&
 	    !skb_is_nonlinear(skb)) {
 		new_skb = skb;
-		if (new_skb->protocol == ETH_P_AF_IUCV)
-			data_offset = 0;
-		else
-			data_offset = ETH_HLEN;
+		data_offset = ETH_HLEN;
 		hdr = kmem_cache_alloc(qeth_core_header_cache, GFP_ATOMIC);
 		if (!hdr)
 			goto tx_drop;
@@ -2867,7 +2863,7 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
 	 */
 	if ((card->info.type != QETH_CARD_TYPE_IQD) &&
 	    ((use_tso && !qeth_l3_get_elements_no_tso(card, new_skb, 1)) ||
-	     (!use_tso && !qeth_get_elements_no(card, new_skb, 0)))) {
+	     (!use_tso && !qeth_get_elements_no(card, new_skb, 0, 0)))) {
 		int lin_rc = skb_linearize(new_skb);
 
 		if (card->options.performance_stats) {
@@ -2909,7 +2905,8 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
 
 	elements = use_tso ?
 		   qeth_l3_get_elements_no_tso(card, new_skb, hdr_elements) :
-		   qeth_get_elements_no(card, new_skb, hdr_elements);
+		   qeth_get_elements_no(card, new_skb, hdr_elements,
+					(data_offset > 0) ? data_offset : 0);
 	if (!elements) {
 		if (data_offset >= 0)
 			kmem_cache_free(qeth_core_header_cache, hdr);
@@ -3064,6 +3061,7 @@ static const struct net_device_ops qeth_l3_netdev_ops = {
 	.ndo_stop		= qeth_l3_stop,
 	.ndo_get_stats		= qeth_get_stats,
 	.ndo_start_xmit		= qeth_l3_hard_start_xmit,
+	.ndo_features_check	= qeth_features_check,
 	.ndo_validate_addr	= eth_validate_addr,
 	.ndo_set_rx_mode	= qeth_l3_set_multicast_list,
 	.ndo_do_ioctl		= qeth_l3_do_ioctl,
@@ -3120,6 +3118,7 @@ static int qeth_l3_setup_netdev(struct qeth_card *card)
 				card->dev->vlan_features = NETIF_F_SG |
 					NETIF_F_RXCSUM | NETIF_F_IP_CSUM |
 					NETIF_F_TSO;
+				card->dev->features |= NETIF_F_SG;
 			}
 		}
 	} else if (card->info.type == QETH_CARD_TYPE_IQD) {
@@ -3145,8 +3144,8 @@ static int qeth_l3_setup_netdev(struct qeth_card *card)
 				NETIF_F_HW_VLAN_CTAG_RX |
 				NETIF_F_HW_VLAN_CTAG_FILTER;
 	netif_keep_dst(card->dev);
-	card->dev->gso_max_size = (QETH_MAX_BUFFER_ELEMENTS(card) - 1) *
-				  PAGE_SIZE;
+	netif_set_gso_max_size(card->dev, (QETH_MAX_BUFFER_ELEMENTS(card) - 1) *
+					  PAGE_SIZE);
 
 	SET_NETDEV_DEV(card->dev, &card->gdev->dev);
 	netif_napi_add(card->dev, &card->napi, qeth_l3_poll, QETH_NAPI_WEIGHT);
diff --git a/drivers/scsi/bfa/bfad_debugfs.c b/drivers/scsi/bfa/bfad_debugfs.c
index 8dcd8c7..05f5239 100644
--- a/drivers/scsi/bfa/bfad_debugfs.c
+++ b/drivers/scsi/bfa/bfad_debugfs.c
@@ -255,7 +255,8 @@ bfad_debugfs_write_regrd(struct file *file, const char __user *buf,
 	struct bfad_s *bfad = port->bfad;
 	struct bfa_s *bfa = &bfad->bfa;
 	struct bfa_ioc_s *ioc = &bfa->ioc;
-	int addr, len, rc, i;
+	int addr, rc, i;
+	u32 len;
 	u32 *regbuf;
 	void __iomem *rb, *reg_addr;
 	unsigned long flags;
@@ -266,7 +267,7 @@ bfad_debugfs_write_regrd(struct file *file, const char __user *buf,
 		return PTR_ERR(kern_buf);
 
 	rc = sscanf(kern_buf, "%x:%x", &addr, &len);
-	if (rc < 2) {
+	if (rc < 2 || len > (UINT_MAX >> 2)) {
 		printk(KERN_INFO
 			"bfad[%d]: %s failed to read user buf\n",
 			bfad->inst_no, __func__);
diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
index 0039beb..358ec32 100644
--- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
+++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
@@ -1347,6 +1347,7 @@ static void release_offload_resources(struct cxgbi_sock *csk)
 		csk, csk->state, csk->flags, csk->tid);
 
 	cxgbi_sock_free_cpl_skbs(csk);
+	cxgbi_sock_purge_write_queue(csk);
 	if (csk->wr_cred != csk->wr_max_cred) {
 		cxgbi_sock_purge_wr_queue(csk);
 		cxgbi_sock_reset_wr_list(csk);
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index a1d6ab7..9962370 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -2951,7 +2951,7 @@ static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr,
 	/* fill_cmd can't fail here, no data buffer to map. */
 	(void) fill_cmd(c, reset_type, h, NULL, 0, 0,
 			scsi3addr, TYPE_MSG);
-	rc = hpsa_scsi_do_simple_cmd(h, c, reply_queue, DEFAULT_TIMEOUT);
+	rc = hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT);
 	if (rc) {
 		dev_warn(&h->pdev->dev, "Failed to send reset command\n");
 		goto out;
@@ -3686,7 +3686,7 @@ static int hpsa_get_volume_status(struct ctlr_info *h,
  *  # (integer code indicating one of several NOT READY states
  *     describing why a volume is to be kept offline)
  */
-static int hpsa_volume_offline(struct ctlr_info *h,
+static unsigned char hpsa_volume_offline(struct ctlr_info *h,
 					unsigned char scsi3addr[])
 {
 	struct CommandList *c;
@@ -3707,7 +3707,7 @@ static int hpsa_volume_offline(struct ctlr_info *h,
 					DEFAULT_TIMEOUT);
 	if (rc) {
 		cmd_free(h, c);
-		return 0;
+		return HPSA_VPD_LV_STATUS_UNSUPPORTED;
 	}
 	sense = c->err_info->SenseInfo;
 	if (c->err_info->SenseLen > sizeof(c->err_info->SenseInfo))
@@ -3718,19 +3718,13 @@ static int hpsa_volume_offline(struct ctlr_info *h,
 	cmd_status = c->err_info->CommandStatus;
 	scsi_status = c->err_info->ScsiStatus;
 	cmd_free(h, c);
-	/* Is the volume 'not ready'? */
-	if (cmd_status != CMD_TARGET_STATUS ||
-		scsi_status != SAM_STAT_CHECK_CONDITION ||
-		sense_key != NOT_READY ||
-		asc != ASC_LUN_NOT_READY)  {
-		return 0;
-	}
 
 	/* Determine the reason for not ready state */
 	ldstat = hpsa_get_volume_status(h, scsi3addr);
 
 	/* Keep volume offline in certain cases: */
 	switch (ldstat) {
+	case HPSA_LV_FAILED:
 	case HPSA_LV_UNDERGOING_ERASE:
 	case HPSA_LV_NOT_AVAILABLE:
 	case HPSA_LV_UNDERGOING_RPI:
@@ -3752,7 +3746,7 @@ static int hpsa_volume_offline(struct ctlr_info *h,
 	default:
 		break;
 	}
-	return 0;
+	return HPSA_LV_OK;
 }
 
 /*
@@ -3825,10 +3819,10 @@ static int hpsa_update_device_info(struct ctlr_info *h,
 	/* Do an inquiry to the device to see what it is. */
 	if (hpsa_scsi_do_inquiry(h, scsi3addr, 0, inq_buff,
 		(unsigned char) OBDR_TAPE_INQ_SIZE) != 0) {
-		/* Inquiry failed (msg printed already) */
 		dev_err(&h->pdev->dev,
-			"hpsa_update_device_info: inquiry failed\n");
-		rc = -EIO;
+			"%s: inquiry failed, device will be skipped.\n",
+			__func__);
+		rc = HPSA_INQUIRY_FAILED;
 		goto bail_out;
 	}
 
@@ -3857,15 +3851,19 @@ static int hpsa_update_device_info(struct ctlr_info *h,
 	if ((this_device->devtype == TYPE_DISK ||
 		this_device->devtype == TYPE_ZBC) &&
 		is_logical_dev_addr_mode(scsi3addr)) {
-		int volume_offline;
+		unsigned char volume_offline;
 
 		hpsa_get_raid_level(h, scsi3addr, &this_device->raid_level);
 		if (h->fw_support & MISC_FW_RAID_OFFLOAD_BASIC)
 			hpsa_get_ioaccel_status(h, scsi3addr, this_device);
 		volume_offline = hpsa_volume_offline(h, scsi3addr);
-		if (volume_offline < 0 || volume_offline > 0xff)
-			volume_offline = HPSA_VPD_LV_STATUS_UNSUPPORTED;
-		this_device->volume_offline = volume_offline & 0xff;
+		if (volume_offline == HPSA_LV_FAILED) {
+			rc = HPSA_LV_FAILED;
+			dev_err(&h->pdev->dev,
+				"%s: LV failed, device will be skipped.\n",
+				__func__);
+			goto bail_out;
+		}
 	} else {
 		this_device->raid_level = RAID_UNKNOWN;
 		this_device->offload_config = 0;
@@ -4353,8 +4351,7 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h)
 			goto out;
 		}
 		if (rc) {
-			dev_warn(&h->pdev->dev,
-				"Inquiry failed, skipping device.\n");
+			h->drv_req_rescan = 1;
 			continue;
 		}
 
@@ -5532,7 +5529,7 @@ static void hpsa_scan_complete(struct ctlr_info *h)
 
 	spin_lock_irqsave(&h->scan_lock, flags);
 	h->scan_finished = 1;
-	wake_up_all(&h->scan_wait_queue);
+	wake_up(&h->scan_wait_queue);
 	spin_unlock_irqrestore(&h->scan_lock, flags);
 }
 
@@ -5550,11 +5547,23 @@ static void hpsa_scan_start(struct Scsi_Host *sh)
 	if (unlikely(lockup_detected(h)))
 		return hpsa_scan_complete(h);
 
+	/*
+	 * If a scan is already waiting to run, no need to add another
+	 */
+	spin_lock_irqsave(&h->scan_lock, flags);
+	if (h->scan_waiting) {
+		spin_unlock_irqrestore(&h->scan_lock, flags);
+		return;
+	}
+
+	spin_unlock_irqrestore(&h->scan_lock, flags);
+
 	/* wait until any scan already in progress is finished. */
 	while (1) {
 		spin_lock_irqsave(&h->scan_lock, flags);
 		if (h->scan_finished)
 			break;
+		h->scan_waiting = 1;
 		spin_unlock_irqrestore(&h->scan_lock, flags);
 		wait_event(h->scan_wait_queue, h->scan_finished);
 		/* Note: We don't need to worry about a race between this
@@ -5564,6 +5573,7 @@ static void hpsa_scan_start(struct Scsi_Host *sh)
 		 */
 	}
 	h->scan_finished = 0; /* mark scan as in progress */
+	h->scan_waiting = 0;
 	spin_unlock_irqrestore(&h->scan_lock, flags);
 
 	if (unlikely(lockup_detected(h)))
@@ -8802,6 +8812,7 @@ static int hpsa_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 	init_waitqueue_head(&h->event_sync_wait_queue);
 	mutex_init(&h->reset_mutex);
 	h->scan_finished = 1; /* no scan currently in progress */
+	h->scan_waiting = 0;
 
 	pci_set_drvdata(pdev, h);
 	h->ndevices = 0;
@@ -9094,6 +9105,8 @@ static void hpsa_remove_one(struct pci_dev *pdev)
 	destroy_workqueue(h->rescan_ctlr_wq);
 	destroy_workqueue(h->resubmit_wq);
 
+	hpsa_delete_sas_host(h);
+
 	/*
 	 * Call before disabling interrupts.
 	 * scsi_remove_host can trigger I/O operations especially
@@ -9128,8 +9141,6 @@ static void hpsa_remove_one(struct pci_dev *pdev)
 	h->lockup_detected = NULL;			/* init_one 2 */
 	/* (void) pci_disable_pcie_error_reporting(pdev); */	/* init_one 1 */
 
-	hpsa_delete_sas_host(h);
-
 	kfree(h);					/* init_one 1 */
 }
 
@@ -9621,9 +9632,9 @@ static void hpsa_free_sas_phy(struct hpsa_sas_phy *hpsa_sas_phy)
 	struct sas_phy *phy = hpsa_sas_phy->phy;
 
 	sas_port_delete_phy(hpsa_sas_phy->parent_port->port, phy);
-	sas_phy_free(phy);
 	if (hpsa_sas_phy->added_to_port)
 		list_del(&hpsa_sas_phy->phy_list_entry);
+	sas_phy_delete(phy);
 	kfree(hpsa_sas_phy);
 }
 
diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
index 9ea162d..e16f294 100644
--- a/drivers/scsi/hpsa.h
+++ b/drivers/scsi/hpsa.h
@@ -203,6 +203,7 @@ struct ctlr_info {
 	dma_addr_t		errinfo_pool_dhandle;
 	unsigned long  		*cmd_pool_bits;
 	int			scan_finished;
+	u8			scan_waiting : 1;
 	spinlock_t		scan_lock;
 	wait_queue_head_t	scan_wait_queue;
 
diff --git a/drivers/scsi/hpsa_cmd.h b/drivers/scsi/hpsa_cmd.h
index a584cdf..5961705 100644
--- a/drivers/scsi/hpsa_cmd.h
+++ b/drivers/scsi/hpsa_cmd.h
@@ -156,6 +156,7 @@
 #define CFGTBL_BusType_Fibre2G  0x00000200l
 
 /* VPD Inquiry types */
+#define HPSA_INQUIRY_FAILED		0x02
 #define HPSA_VPD_SUPPORTED_PAGES        0x00
 #define HPSA_VPD_LV_DEVICE_ID           0x83
 #define HPSA_VPD_LV_DEVICE_GEOMETRY     0xC1
@@ -166,6 +167,7 @@
 /* Logical volume states */
 #define HPSA_VPD_LV_STATUS_UNSUPPORTED			0xff
 #define HPSA_LV_OK                                      0x0
+#define HPSA_LV_FAILED					0x01
 #define HPSA_LV_NOT_AVAILABLE				0x0b
 #define HPSA_LV_UNDERGOING_ERASE			0x0F
 #define HPSA_LV_UNDERGOING_RPI				0x12
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 4df3cdc..fc7adda 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -7782,7 +7782,8 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
 			did, vport->port_state, ndlp->nlp_flag);
 
 		phba->fc_stat.elsRcvPRLI++;
-		if (vport->port_state < LPFC_DISC_AUTH) {
+		if ((vport->port_state < LPFC_DISC_AUTH) &&
+		    (vport->fc_flag & FC_FABRIC)) {
 			rjt_err = LSRJT_UNABLE_TPC;
 			rjt_exp = LSEXP_NOTHING_MORE;
 			break;
@@ -8185,11 +8186,17 @@ lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
 			spin_lock_irq(shost->host_lock);
 			vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
 			spin_unlock_irq(shost->host_lock);
-			if (vport->port_type == LPFC_PHYSICAL_PORT
-				&& !(vport->fc_flag & FC_LOGO_RCVD_DID_CHNG))
-				lpfc_issue_init_vfi(vport);
-			else
+			if (mb->mbxStatus == MBX_NOT_FINISHED)
+				break;
+			if ((vport->port_type == LPFC_PHYSICAL_PORT) &&
+			    !(vport->fc_flag & FC_LOGO_RCVD_DID_CHNG)) {
+				if (phba->sli_rev == LPFC_SLI_REV4)
+					lpfc_issue_init_vfi(vport);
+				else
+					lpfc_initial_flogi(vport);
+			} else {
 				lpfc_initial_fdisc(vport);
+			}
 			break;
 		}
 	} else {
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index ed22393..7d2ad63 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -4784,7 +4784,8 @@ lpfc_nlp_remove(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
 	lpfc_cancel_retry_delay_tmo(vport, ndlp);
 	if ((ndlp->nlp_flag & NLP_DEFER_RM) &&
 	    !(ndlp->nlp_flag & NLP_REG_LOGIN_SEND) &&
-	    !(ndlp->nlp_flag & NLP_RPI_REGISTERED)) {
+	    !(ndlp->nlp_flag & NLP_RPI_REGISTERED) &&
+	    phba->sli_rev != LPFC_SLI_REV4) {
 		/* For this case we need to cleanup the default rpi
 		 * allocated by the firmware.
 		 */
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index 55faa94..2a436df 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -3232,7 +3232,7 @@ struct lpfc_mbx_get_port_name {
 #define MB_CEQ_STATUS_QUEUE_FLUSHING		0x4
 #define MB_CQE_STATUS_DMA_FAILED		0x5
 
-#define LPFC_MBX_WR_CONFIG_MAX_BDE		8
+#define LPFC_MBX_WR_CONFIG_MAX_BDE		1
 struct lpfc_mbx_wr_object {
 	struct mbox_header header;
 	union {
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index 289374c..468acab 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -4770,6 +4770,11 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
 		} else if (log_info == VIRTUAL_IO_FAILED_RETRY) {
 			scmd->result = DID_RESET << 16;
 			break;
+		} else if ((scmd->device->channel == RAID_CHANNEL) &&
+		   (scsi_state == (MPI2_SCSI_STATE_TERMINATED |
+		   MPI2_SCSI_STATE_NO_SCSI_STATUS))) {
+			scmd->result = DID_RESET << 16;
+			break;
 		}
 		scmd->result = DID_SOFT_ERROR << 16;
 		break;
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index 658e4d1..ce4ac76 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -2707,13 +2707,9 @@ ql_dump_buffer(uint32_t level, scsi_qla_host_t *vha, int32_t id,
 	    "%-+5d  0  1  2  3  4  5  6  7  8  9  A  B  C  D  E  F\n", size);
 	ql_dbg(level, vha, id,
 	    "----- -----------------------------------------------\n");
-	for (cnt = 0; cnt < size; cnt++, buf++) {
-		if (cnt % 16 == 0)
-			ql_dbg(level, vha, id, "%04x:", cnt & ~0xFU);
-		printk(" %02x", *buf);
-		if (cnt % 16 == 15)
-			printk("\n");
+	for (cnt = 0; cnt < size; cnt += 16) {
+		ql_dbg(level, vha, id, "%04x: ", cnt);
+		print_hex_dump(KERN_CONT, "", DUMP_PREFIX_NONE, 16, 1,
+			       buf + cnt, min(16U, size - cnt), false);
 	}
-	if (cnt % 16 != 0)
-		printk("\n");
 }
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index 91f5f55..59059ff 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -668,11 +668,9 @@ static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd)
 {
 	struct qla_hw_data *ha = vha->hw;
 	struct qla_tgt_sess *sess = NULL;
-	uint32_t unpacked_lun, lun = 0;
 	uint16_t loop_id;
 	int res = 0;
 	struct imm_ntfy_from_isp *n = (struct imm_ntfy_from_isp *)iocb;
-	struct atio_from_isp *a = (struct atio_from_isp *)iocb;
 	unsigned long flags;
 
 	loop_id = le16_to_cpu(n->u.isp24.nport_handle);
@@ -725,11 +723,7 @@ static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd)
 	    "loop_id %d)\n", vha->host_no, sess, sess->port_name,
 	    mcmd, loop_id);
 
-	lun = a->u.isp24.fcp_cmnd.lun;
-	unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun);
-
-	return qlt_issue_task_mgmt(sess, unpacked_lun, mcmd,
-	    iocb, QLA24XX_MGMT_SEND_NACK);
+	return qlt_issue_task_mgmt(sess, 0, mcmd, iocb, QLA24XX_MGMT_SEND_NACK);
 }
 
 /* ha->tgt.sess_lock supposed to be held on entry */
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index cf04a36..2b0e615 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -2996,11 +2996,11 @@ static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num,
 	if (-1 == ret) {
 		write_unlock_irqrestore(&atomic_rw, iflags);
 		return DID_ERROR << 16;
-	} else if (sdebug_verbose && (ret < (num * sdebug_sector_size)))
+	} else if (sdebug_verbose && !ndob && (ret < sdebug_sector_size))
 		sdev_printk(KERN_INFO, scp->device,
-			    "%s: %s: cdb indicated=%u, IO sent=%d bytes\n",
+			    "%s: %s: lb size=%u, IO sent=%d bytes\n",
 			    my_name, "write same",
-			    num * sdebug_sector_size, ret);
+			    sdebug_sector_size, ret);
 
 	/* Copy first sector to remaining blocks */
 	for (i = 1 ; i < num ; i++)
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
index 2464569..26e6b05 100644
--- a/drivers/scsi/scsi_devinfo.c
+++ b/drivers/scsi/scsi_devinfo.c
@@ -160,7 +160,7 @@ static struct {
 	{"DGC", "RAID", NULL, BLIST_SPARSELUN},	/* Dell PV 650F, storage on LUN 0 */
 	{"DGC", "DISK", NULL, BLIST_SPARSELUN},	/* Dell PV 650F, no storage on LUN 0 */
 	{"EMC",  "Invista", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
-	{"EMC", "SYMMETRIX", NULL, BLIST_SPARSELUN | BLIST_LARGELUN | BLIST_FORCELUN},
+	{"EMC", "SYMMETRIX", NULL, BLIST_SPARSELUN | BLIST_LARGELUN | BLIST_REPORTLUN2},
 	{"EMULEX", "MD21/S2     ESDI", NULL, BLIST_SINGLELUN},
 	{"easyRAID", "16P", NULL, BLIST_NOREPORTLUN},
 	{"easyRAID", "X6P", NULL, BLIST_NOREPORTLUN},
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 043ab9e..57a3ee0 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -2045,11 +2045,13 @@ static void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q)
 		q->limits.cluster = 0;
 
 	/*
-	 * set a reasonable default alignment on word boundaries: the
-	 * host and device may alter it using
-	 * blk_queue_update_dma_alignment() later.
+	 * Set a reasonable default alignment:  The larger of 32-byte (dword),
+	 * which is a common minimum for HBAs, and the minimum DMA alignment,
+	 * which is set by the platform.
+	 *
+	 * Devices that require a bigger alignment can increase it later.
 	 */
-	blk_queue_dma_alignment(q, 0x03);
+	blk_queue_dma_alignment(q, max(4, dma_get_cache_alignment()) - 1);
 }
 
 struct request_queue *__scsi_alloc_queue(struct Scsi_Host *shost,
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 02823a7..4fb494a 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -234,11 +234,15 @@ manage_start_stop_store(struct device *dev, struct device_attribute *attr,
 {
 	struct scsi_disk *sdkp = to_scsi_disk(dev);
 	struct scsi_device *sdp = sdkp->device;
+	bool v;
 
 	if (!capable(CAP_SYS_ADMIN))
 		return -EACCES;
 
-	sdp->manage_start_stop = simple_strtoul(buf, NULL, 10);
+	if (kstrtobool(buf, &v))
+		return -EINVAL;
+
+	sdp->manage_start_stop = v;
 
 	return count;
 }
@@ -256,6 +260,7 @@ static ssize_t
 allow_restart_store(struct device *dev, struct device_attribute *attr,
 		    const char *buf, size_t count)
 {
+	bool v;
 	struct scsi_disk *sdkp = to_scsi_disk(dev);
 	struct scsi_device *sdp = sdkp->device;
 
@@ -265,7 +270,10 @@ allow_restart_store(struct device *dev, struct device_attribute *attr,
 	if (sdp->type != TYPE_DISK)
 		return -EINVAL;
 
-	sdp->allow_restart = simple_strtoul(buf, NULL, 10);
+	if (kstrtobool(buf, &v))
+		return -EINVAL;
+
+	sdp->allow_restart = v;
 
 	return count;
 }
diff --git a/drivers/soc/mediatek/mtk-pmic-wrap.c b/drivers/soc/mediatek/mtk-pmic-wrap.c
index a5f1093..e929f51 100644
--- a/drivers/soc/mediatek/mtk-pmic-wrap.c
+++ b/drivers/soc/mediatek/mtk-pmic-wrap.c
@@ -522,7 +522,7 @@ struct pmic_wrapper_type {
 	u32 int_en_all;
 	u32 spi_w;
 	u32 wdt_src;
-	int has_bridge:1;
+	unsigned int has_bridge:1;
 	int (*init_reg_clock)(struct pmic_wrapper *wrp);
 	int (*init_soc_specific)(struct pmic_wrapper *wrp);
 };
diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile
index dc641ef..f34b714 100644
--- a/drivers/soc/qcom/Makefile
+++ b/drivers/soc/qcom/Makefile
@@ -87,6 +87,9 @@
 ifdef CONFIG_MSM_RPM_SMD
 	obj-$(CONFIG_QTI_RPM_STATS_LOG) += rpm_master_stat.o
 endif
+ifdef CONFIG_QTI_RPMH_API
+	obj-$(CONFIG_QTI_RPM_STATS_LOG) += rpmh_master_stat.o
+endif
 obj-$(CONFIG_QCOM_SMCINVOKE) += smcinvoke.o
 obj-$(CONFIG_QMP_DEBUGFS_CLIENT) += qmp-debugfs-client.o
 obj-$(CONFIG_MSM_REMOTEQDSS) += remoteqdss.o
diff --git a/drivers/soc/qcom/dcc_v2.c b/drivers/soc/qcom/dcc_v2.c
index 457dc5f..41a1a79 100644
--- a/drivers/soc/qcom/dcc_v2.c
+++ b/drivers/soc/qcom/dcc_v2.c
@@ -671,13 +671,14 @@ static ssize_t dcc_store_func_type(struct device *dev,
 	if (sscanf(buf, "%s", str) != 1)
 		return -EINVAL;
 
+	mutex_lock(&drvdata->mutex);
 	if (drvdata->curr_list >= DCC_MAX_LINK_LIST) {
 		dev_err(dev,
 			"Select link list to program using curr_list\n");
-		return -EINVAL;
+		ret = -EINVAL;
+		goto out;
 	}
 
-	mutex_lock(&drvdata->mutex);
 	if (drvdata->enable[drvdata->curr_list]) {
 		ret = -EBUSY;
 		goto out;
@@ -771,10 +772,21 @@ static DEVICE_ATTR(trigger, 0200, NULL, dcc_store_trigger);
 static ssize_t dcc_show_enable(struct device *dev,
 			       struct device_attribute *attr, char *buf)
 {
+	int ret;
 	struct dcc_drvdata *drvdata = dev_get_drvdata(dev);
 
-	return scnprintf(buf, PAGE_SIZE, "%u\n",
+	mutex_lock(&drvdata->mutex);
+	if (drvdata->curr_list >= DCC_MAX_LINK_LIST) {
+		dev_err(dev, "Select link list to program using curr_list\n");
+		ret = -EINVAL;
+		goto err;
+	}
+
+	ret = scnprintf(buf, PAGE_SIZE, "%u\n",
 			 (unsigned int)drvdata->enable[drvdata->curr_list]);
+err:
+	mutex_unlock(&drvdata->mutex);
+	return ret;
 }
 
 static ssize_t dcc_store_enable(struct device *dev,
@@ -812,10 +824,13 @@ static ssize_t dcc_show_config(struct device *dev,
 
 	buf[0] = '\0';
 
-	if (drvdata->curr_list >= DCC_MAX_LINK_LIST)
-		return -EINVAL;
-
 	mutex_lock(&drvdata->mutex);
+	if (drvdata->curr_list >= DCC_MAX_LINK_LIST) {
+		dev_err(dev, "Select link list to program using curr_list\n");
+		count = -EINVAL;
+		goto err;
+	}
+
 	list_for_each_entry(entry,
 			    &drvdata->cfg_head[drvdata->curr_list], list) {
 		switch (entry->desc_type) {
@@ -852,8 +867,8 @@ static ssize_t dcc_show_config(struct device *dev,
 		count += len;
 	}
 
+err:
 	mutex_unlock(&drvdata->mutex);
-
 	return count;
 }
 
@@ -866,6 +881,12 @@ static int dcc_config_add(struct dcc_drvdata *drvdata, unsigned int addr,
 
 	mutex_lock(&drvdata->mutex);
 
+	if (drvdata->curr_list >= DCC_MAX_LINK_LIST) {
+		dev_err(drvdata->dev, "Select link list to program using curr_list\n");
+		ret = -EINVAL;
+		goto err;
+	}
+
 	if (!len) {
 		dev_err(drvdata->dev, "DCC: Invalid length\n");
 		ret = -EINVAL;
@@ -959,11 +980,6 @@ static ssize_t dcc_store_config(struct device *dev,
 	if (nval <= 0 || nval > 3)
 		return -EINVAL;
 
-	if (drvdata->curr_list >= DCC_MAX_LINK_LIST) {
-		dev_err(dev, "Select link list to program using curr_list\n");
-		return -EINVAL;
-	}
-
 	if (nval == 1) {
 		len = 1;
 		apb_bus = 0;
@@ -1028,6 +1044,12 @@ static ssize_t dcc_show_crc_error(struct device *dev,
 	struct dcc_drvdata *drvdata = dev_get_drvdata(dev);
 
 	mutex_lock(&drvdata->mutex);
+	if (drvdata->curr_list >= DCC_MAX_LINK_LIST) {
+		dev_err(dev, "Select link list to program using curr_list\n");
+		ret = -EINVAL;
+		goto err;
+	}
+
 	if (!drvdata->enable[drvdata->curr_list]) {
 		ret = -EINVAL;
 		goto err;
@@ -1049,6 +1071,13 @@ static ssize_t dcc_show_ready(struct device *dev,
 	struct dcc_drvdata *drvdata = dev_get_drvdata(dev);
 
 	mutex_lock(&drvdata->mutex);
+
+	if (drvdata->curr_list >= DCC_MAX_LINK_LIST) {
+		dev_err(dev, "Select link list to program using curr_list\n");
+		ret = -EINVAL;
+		goto err;
+	}
+
 	if (!drvdata->enable[drvdata->curr_list]) {
 		ret = -EINVAL;
 		goto err;
@@ -1159,6 +1188,12 @@ static ssize_t dcc_rd_mod_wr(struct device *dev,
 		goto err;
 	}
 
+	if (drvdata->curr_list >= DCC_MAX_LINK_LIST) {
+		dev_err(dev, "Select link list to program using curr_list\n");
+		ret = -EINVAL;
+		goto err;
+	}
+
 	if (list_empty(&drvdata->cfg_head[drvdata->curr_list])) {
 		dev_err(drvdata->dev, "DCC: No read address programmed\n");
 		ret = -EPERM;
@@ -1266,6 +1301,12 @@ static ssize_t dcc_store_cti_trig(struct device *dev,
 
 	mutex_lock(&drvdata->mutex);
 
+	if (drvdata->curr_list >= DCC_MAX_LINK_LIST) {
+		dev_err(dev, "Select link list to program using curr_list\n");
+		ret = -EINVAL;
+		goto out;
+	}
+
 	if (drvdata->enable[drvdata->curr_list]) {
 		ret = -EBUSY;
 		goto out;
diff --git a/drivers/soc/qcom/glink.c b/drivers/soc/qcom/glink.c
index b315a97..d8cc2c4 100644
--- a/drivers/soc/qcom/glink.c
+++ b/drivers/soc/qcom/glink.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -2994,7 +2994,7 @@ static int glink_tx_common(void *handle, void *pkt_priv,
 			if (!wait_for_completion_timeout(
 					&ctx->int_req_ack_complete,
 					ctx->rx_intent_req_timeout_jiffies)) {
-				GLINK_ERR_CH(ctx,
+				GLINK_ERR(
 					"%s: Intent request ack with size: %zu not granted for lcid\n",
 					__func__, size);
 				ret = -ETIMEDOUT;
@@ -3014,7 +3014,7 @@ static int glink_tx_common(void *handle, void *pkt_priv,
 			if (!wait_for_completion_timeout(
 					&ctx->int_req_complete,
 					ctx->rx_intent_req_timeout_jiffies)) {
-				GLINK_ERR_CH(ctx,
+				GLINK_ERR(
 					"%s: Intent request with size: %zu not granted for lcid\n",
 					__func__, size);
 				ret = -ETIMEDOUT;
diff --git a/drivers/soc/qcom/glink_smem_native_xprt.c b/drivers/soc/qcom/glink_smem_native_xprt.c
index 187c80d..9becb10 100644
--- a/drivers/soc/qcom/glink_smem_native_xprt.c
+++ b/drivers/soc/qcom/glink_smem_native_xprt.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -2028,6 +2028,7 @@ static int tx_data(struct glink_transport_if *if_ptr, uint16_t cmd_id,
 	/* Need enough space to write the command and some data */
 	if (size <= sizeof(cmd)) {
 		einfo->tx_resume_needed = true;
+		send_tx_blocked_signal(einfo);
 		spin_unlock_irqrestore(&einfo->write_lock, flags);
 		srcu_read_unlock(&einfo->use_ref, rcu_id);
 		return -EAGAIN;
@@ -2296,6 +2297,7 @@ static int parse_qos_dt_params(struct device_node *node,
 		einfo->ramp_time_us[i] = arr32[i];
 
 	rc = 0;
+	kfree(arr32);
 	return rc;
 
 invalid_key:
diff --git a/drivers/soc/qcom/icnss.c b/drivers/soc/qcom/icnss.c
index 649d0ff..95004a1 100644
--- a/drivers/soc/qcom/icnss.c
+++ b/drivers/soc/qcom/icnss.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -463,6 +463,7 @@ static struct icnss_priv {
 	struct ramdump_device *msa0_dump_dev;
 	bool bypass_s1_smmu;
 	bool force_err_fatal;
+	bool allow_recursive_recovery;
 	u8 cause_for_rejuvenation;
 	u8 requesting_sub_system;
 	u16 line_number;
@@ -2230,7 +2231,8 @@ static int icnss_pd_restart_complete(struct icnss_priv *priv)
 	if (ret < 0) {
 		icnss_pr_err("Driver reinit failed: %d, state: 0x%lx\n",
 			     ret, priv->state);
-		ICNSS_ASSERT(false);
+		if (!priv->allow_recursive_recovery)
+			ICNSS_ASSERT(false);
 		goto out_power_off;
 	}
 
@@ -2405,7 +2407,8 @@ static int icnss_driver_event_pd_service_down(struct icnss_priv *priv,
 	if (test_bit(ICNSS_PD_RESTART, &priv->state) && event_data->crashed) {
 		icnss_pr_err("PD Down while recovery inprogress, crashed: %d, state: 0x%lx\n",
 			     event_data->crashed, priv->state);
-		ICNSS_ASSERT(0);
+		if (!priv->allow_recursive_recovery)
+			ICNSS_ASSERT(0);
 		goto out;
 	}
 
@@ -3725,6 +3728,15 @@ static int icnss_test_mode_fw_test(struct icnss_priv *priv,
 	return ret;
 }
 
+static void icnss_allow_recursive_recovery(struct device *dev)
+{
+	struct icnss_priv *priv = dev_get_drvdata(dev);
+
+	priv->allow_recursive_recovery = true;
+
+	icnss_pr_info("Recursive recovery allowed for WLAN\n");
+}
+
 static ssize_t icnss_fw_debug_write(struct file *fp,
 				    const char __user *user_buf,
 				    size_t count, loff_t *off)
@@ -3773,6 +3785,9 @@ static ssize_t icnss_fw_debug_write(struct file *fp,
 		case 3:
 			ret = icnss_trigger_recovery(&priv->pdev->dev);
 			break;
+		case 4:
+			icnss_allow_recursive_recovery(&priv->pdev->dev);
+			break;
 		default:
 			return -EINVAL;
 		}
diff --git a/drivers/soc/qcom/minidump_log.c b/drivers/soc/qcom/minidump_log.c
index c65dfd9..87e1700 100644
--- a/drivers/soc/qcom/minidump_log.c
+++ b/drivers/soc/qcom/minidump_log.c
@@ -76,6 +76,9 @@ void dump_stack_minidump(u64 sp)
 	struct md_region ksp_entry, ktsk_entry;
 	u32 cpu = smp_processor_id();
 
+	if (is_idle_task(current))
+		return;
+
 	if (sp < KIMAGE_VADDR || sp > -256UL)
 		sp = current_stack_pointer;
 
diff --git a/drivers/soc/qcom/msm_glink_pkt.c b/drivers/soc/qcom/msm_glink_pkt.c
index 25099bb..9a9b4df 100644
--- a/drivers/soc/qcom/msm_glink_pkt.c
+++ b/drivers/soc/qcom/msm_glink_pkt.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -571,8 +571,10 @@ static void glink_pkt_notify_state_worker(struct work_struct *work)
 	mutex_lock(&devp->ch_lock);
 	devp->ch_state = event;
 	if (event == GLINK_CONNECTED) {
-		if (!devp->handle)
-			devp->handle = handle;
+		if (!devp->handle) {
+			GLINK_PKT_ERR("%s: Invalid device handle\n", __func__);
+			goto exit;
+		}
 		devp->in_reset = 0;
 		wake_up_interruptible(&devp->ch_opened_wait_queue);
 	} else if (event == GLINK_REMOTE_DISCONNECTED) {
@@ -584,6 +586,7 @@ static void glink_pkt_notify_state_worker(struct work_struct *work)
 			devp->handle = NULL;
 		wake_up_interruptible(&devp->ch_closed_wait_queue);
 	}
+exit:
 	mutex_unlock(&devp->ch_lock);
 	kfree(work_item);
 }
diff --git a/drivers/soc/qcom/pil-msa.c b/drivers/soc/qcom/pil-msa.c
index ce31d66..e0f912d 100644
--- a/drivers/soc/qcom/pil-msa.c
+++ b/drivers/soc/qcom/pil-msa.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -263,6 +263,12 @@ int pil_mss_assert_resets(struct q6v5_data *drv)
 
 	pil_mss_pdc_sync(drv, 1);
 	pil_mss_alt_reset(drv, 1);
+	if (drv->reset_clk) {
+		pil_mss_disable_clks(drv);
+		if (drv->ahb_clk_vote)
+			clk_disable_unprepare(drv->ahb_clk);
+	}
+
 	ret = pil_mss_restart_reg(drv, true);
 
 	return ret;
@@ -277,6 +283,9 @@ int pil_mss_deassert_resets(struct q6v5_data *drv)
 		return ret;
 	/* Wait 6 32kHz sleep cycles for reset */
 	udelay(200);
+
+	if (drv->reset_clk)
+		pil_mss_enable_clks(drv);
 	pil_mss_alt_reset(drv, 0);
 	pil_mss_pdc_sync(drv, false);
 
diff --git a/drivers/soc/qcom/pil-q6v5-mss.c b/drivers/soc/qcom/pil-q6v5-mss.c
index 728a68c..3fdacf2 100644
--- a/drivers/soc/qcom/pil-q6v5-mss.c
+++ b/drivers/soc/qcom/pil-q6v5-mss.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -38,7 +38,6 @@
 #define PROXY_TIMEOUT_MS	10000
 #define MAX_SSR_REASON_LEN	256U
 #define STOP_ACK_TIMEOUT_MS	1000
-#define QDSP6SS_NMI_STATUS	0x44
 
 #define subsys_to_drv(d) container_of(d, struct modem_data, subsys_desc)
 
@@ -72,17 +71,12 @@ static void restart_modem(struct modem_data *drv)
 static irqreturn_t modem_err_fatal_intr_handler(int irq, void *dev_id)
 {
 	struct modem_data *drv = subsys_to_drv(dev_id);
-	u32 nmi_status = readl_relaxed(drv->q6->reg_base + QDSP6SS_NMI_STATUS);
 
 	/* Ignore if we're the one that set the force stop GPIO */
 	if (drv->crash_shutdown)
 		return IRQ_HANDLED;
 
-	if (nmi_status & 0x04)
-		pr_err("%s: Fatal error on the modem due to TZ NMI\n",
-			__func__);
-	else
-		pr_err("%s: Fatal error on the modem\n", __func__);
+	pr_err("Fatal error on the modem.\n");
 	subsys_set_crash_status(drv->subsys, CRASH_STATUS_ERR_FATAL);
 	restart_modem(drv);
 	return IRQ_HANDLED;
@@ -278,6 +272,8 @@ static int pil_mss_loadable_init(struct modem_data *drv,
 
 	q6_desc->ops = &pil_msa_mss_ops;
 
+	q6->reset_clk = of_property_read_bool(pdev->dev.of_node,
+							"qcom,reset-clk");
 	q6->self_auth = of_property_read_bool(pdev->dev.of_node,
 							"qcom,pil-self-auth");
 	if (q6->self_auth) {
diff --git a/drivers/soc/qcom/pil-q6v5.h b/drivers/soc/qcom/pil-q6v5.h
index 4961b1f..2690bb7 100644
--- a/drivers/soc/qcom/pil-q6v5.h
+++ b/drivers/soc/qcom/pil-q6v5.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -77,6 +77,7 @@ struct q6v5_data {
 	int mss_pdc_offset;
 	bool ahb_clk_vote;
 	bool mx_spike_wa;
+	bool reset_clk;
 };
 
 int pil_q6v5_make_proxy_votes(struct pil_desc *pil);
diff --git a/drivers/soc/qcom/rpm-smd-debug.c b/drivers/soc/qcom/rpm-smd-debug.c
index 6ae9f08..e52fc72 100644
--- a/drivers/soc/qcom/rpm-smd-debug.c
+++ b/drivers/soc/qcom/rpm-smd-debug.c
@@ -90,23 +90,23 @@ static ssize_t rsc_ops_write(struct file *fp, const char __user *user_buffer,
 		cmp += pos;
 		if (sscanf(cmp, "%5s %n", key_str, &pos) != 1) {
 			pr_err("Invalid number of arguments passed\n");
-			goto err;
+			goto err_request;
 		}
 
 		if (strlen(key_str) > 4) {
 			pr_err("Key value cannot be more than 4 charecters");
-			goto err;
+			goto err_request;
 		}
 		key = string_to_uint(key_str);
 		if (!key) {
 			pr_err("Key values entered incorrectly\n");
-			goto err;
+			goto err_request;
 		}
 
 		cmp += pos;
 		if (sscanf(cmp, "%u %n", &data, &pos) != 1) {
 			pr_err("Invalid number of arguments passed\n");
-			goto err;
+			goto err_request;
 		}
 
 		if (msm_rpm_add_kvp_data(req, key,
diff --git a/drivers/soc/qcom/rpmh_master_stat.c b/drivers/soc/qcom/rpmh_master_stat.c
new file mode 100644
index 0000000..2c379a0
--- /dev/null
+++ b/drivers/soc/qcom/rpmh_master_stat.c
@@ -0,0 +1,220 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, KBUILD_MODNAME
+
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <linux/of.h>
+#include <linux/uaccess.h>
+#include <soc/qcom/smem.h>
+
+enum master_smem_id {
+	MPSS = 605,
+	ADSP,
+	CDSP,
+	SLPI,
+	GPU,
+	DISPLAY,
+};
+
+enum master_pid {
+	PID_APSS = 0,
+	PID_MPSS = 1,
+	PID_ADSP = 2,
+	PID_SLPI = 3,
+	PID_CDSP = 5,
+	PID_GPU = PID_APSS,
+	PID_DISPLAY = PID_APSS,
+};
+
+struct msm_rpmh_master_data {
+	char *master_name;
+	enum master_smem_id smem_id;
+	enum master_pid pid;
+};
+
+static const struct msm_rpmh_master_data rpmh_masters[] = {
+	{"MPSS", MPSS, PID_MPSS},
+	{"ADSP", ADSP, PID_ADSP},
+	{"CDSP", CDSP, PID_CDSP},
+	{"SLPI", SLPI, PID_SLPI},
+	{"GPU", GPU, PID_GPU},
+	{"DISPLAY", DISPLAY, PID_DISPLAY},
+};
+
+struct msm_rpmh_master_stats {
+	uint32_t version_id;
+	uint32_t counts;
+	uint64_t last_entered_at;
+	uint64_t last_exited_at;
+	uint64_t accumulated_duration;
+};
+
+struct rpmh_master_stats_prv_data {
+	struct kobj_attribute ka;
+	struct kobject *kobj;
+};
+
+static DEFINE_MUTEX(rpmh_stats_mutex);
+
+static ssize_t msm_rpmh_master_stats_print_data(char *prvbuf, ssize_t length,
+				struct msm_rpmh_master_stats *record,
+				const char *name)
+{
+	return snprintf(prvbuf, length, "%s\n\tVersion:0x%x\n"
+			"\tSleep Count:0x%x\n"
+			"\tSleep Last Entered At:0x%llx\n"
+			"\tSleep Last Exited At:0x%llx\n"
+			"\tSleep Accumulated Duration:0x%llx\n\n",
+			name, record->version_id, record->counts,
+			record->last_entered_at, record->last_exited_at,
+			record->accumulated_duration);
+}
+
+static ssize_t msm_rpmh_master_stats_show(struct kobject *kobj,
+				struct kobj_attribute *attr, char *buf)
+{
+	ssize_t length;
+	int i = 0;
+	unsigned int size = 0;
+	struct msm_rpmh_master_stats *record = NULL;
+
+	/*
+	 * Read SMEM data written by masters
+	 */
+
+	mutex_lock(&rpmh_stats_mutex);
+
+	for (i = 0, length = 0; i < ARRAY_SIZE(rpmh_masters); i++) {
+		record = (struct msm_rpmh_master_stats *) smem_get_entry(
+					rpmh_masters[i].smem_id, &size,
+					rpmh_masters[i].pid, 0);
+		if (!IS_ERR_OR_NULL(record) && (PAGE_SIZE - length > 0))
+			length += msm_rpmh_master_stats_print_data(
+					buf + length, PAGE_SIZE - length,
+					record,
+					rpmh_masters[i].master_name);
+	}
+
+	mutex_unlock(&rpmh_stats_mutex);
+
+	return length;
+}
+
+static int msm_rpmh_master_stats_probe(struct platform_device *pdev)
+{
+	struct rpmh_master_stats_prv_data *prvdata = NULL;
+	struct kobject *rpmh_master_stats_kobj = NULL;
+	int ret = 0;
+
+	if (!pdev)
+		return -EINVAL;
+
+	prvdata = kzalloc(sizeof(struct rpmh_master_stats_prv_data),
+							GFP_KERNEL);
+	if (!prvdata) {
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	rpmh_master_stats_kobj = kobject_create_and_add(
+					"rpmh_stats",
+					power_kobj);
+	if (!rpmh_master_stats_kobj) {
+		ret = -ENOMEM;
+		kfree(prvdata);
+		goto fail;
+	}
+
+	prvdata->kobj = rpmh_master_stats_kobj;
+
+	sysfs_attr_init(&prvdata->ka.attr);
+	prvdata->ka.attr.mode = 0444;
+	prvdata->ka.attr.name = "master_stats";
+	prvdata->ka.show = msm_rpmh_master_stats_show;
+	prvdata->ka.store = NULL;
+
+	ret = sysfs_create_file(prvdata->kobj, &prvdata->ka.attr);
+	if (ret) {
+		pr_err("sysfs_create_file failed\n");
+		kobject_put(prvdata->kobj);
+		kfree(prvdata);
+		goto fail;
+	}
+
+	platform_set_drvdata(pdev, prvdata);
+
+fail:
+	return ret;
+}
+
+static int msm_rpmh_master_stats_remove(struct platform_device *pdev)
+{
+	struct rpmh_master_stats_prv_data *prvdata;
+
+	if (!pdev)
+		return -EINVAL;
+
+	prvdata = (struct rpmh_master_stats_prv_data *)
+				platform_get_drvdata(pdev);
+
+	sysfs_remove_file(prvdata->kobj, &prvdata->ka.attr);
+	kobject_put(prvdata->kobj);
+	kfree(prvdata);
+	platform_set_drvdata(pdev, NULL);
+
+	return 0;
+}
+
+static const struct of_device_id rpmh_master_table[] = {
+	{.compatible = "qcom,rpmh-master-stats"},
+	{},
+};
+
+static struct platform_driver msm_rpmh_master_stats_driver = {
+	.probe	= msm_rpmh_master_stats_probe,
+	.remove = msm_rpmh_master_stats_remove,
+	.driver = {
+		.name = "msm_rpmh_master_stats",
+		.owner = THIS_MODULE,
+		.of_match_table = rpmh_master_table,
+	},
+};
+
+static int __init msm_rpmh_master_stats_init(void)
+{
+	return platform_driver_register(&msm_rpmh_master_stats_driver);
+}
+
+static void __exit msm_rpmh_master_stats_exit(void)
+{
+	platform_driver_unregister(&msm_rpmh_master_stats_driver);
+}
+
+module_init(msm_rpmh_master_stats_init);
+module_exit(msm_rpmh_master_stats_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("MSM RPMH Master Statistics driver");
+MODULE_ALIAS("platform:msm_rpmh_master_stat_log");
diff --git a/drivers/soc/qcom/secure_buffer.c b/drivers/soc/qcom/secure_buffer.c
index 5289cd0..1c8bc51 100644
--- a/drivers/soc/qcom/secure_buffer.c
+++ b/drivers/soc/qcom/secure_buffer.c
@@ -25,12 +25,28 @@
 
 DEFINE_MUTEX(secure_buffer_mutex);
 
+struct cp2_mem_chunks {
+	u32 chunk_list;
+	u32 chunk_list_size;
+	u32 chunk_size;
+} __attribute__ ((__packed__));
+
+struct cp2_lock_req {
+	struct cp2_mem_chunks chunks;
+	u32 mem_usage;
+	u32 lock;
+} __attribute__ ((__packed__));
+
 struct mem_prot_info {
 	phys_addr_t addr;
 	u64 size;
 };
 
 #define MEM_PROT_ASSIGN_ID		0x16
+#define MEM_PROTECT_LOCK_ID2		0x0A
+#define MEM_PROTECT_LOCK_ID2_FLAT	0x11
+#define V2_CHUNK_SIZE           SZ_1M
+#define FEATURE_ID_CP 12
 
 struct dest_vm_and_perm_info {
 	u32 vm;
@@ -42,6 +58,134 @@ struct dest_vm_and_perm_info {
 static void *qcom_secure_mem;
 #define QCOM_SECURE_MEM_SIZE (512*1024)
 
+static int secure_buffer_change_chunk(u32 chunks,
+				u32 nchunks,
+				u32 chunk_size,
+				int lock)
+{
+	struct cp2_lock_req request;
+	u32 resp;
+	int ret;
+	struct scm_desc desc = {0};
+
+	desc.args[0] = request.chunks.chunk_list = chunks;
+	desc.args[1] = request.chunks.chunk_list_size = nchunks;
+	desc.args[2] = request.chunks.chunk_size = chunk_size;
+	/* Usage is now always 0 */
+	desc.args[3] = request.mem_usage = 0;
+	desc.args[4] = request.lock = lock;
+	desc.args[5] = 0;
+	desc.arginfo = SCM_ARGS(6, SCM_RW, SCM_VAL, SCM_VAL, SCM_VAL, SCM_VAL,
+				SCM_VAL);
+
+	kmap_flush_unused();
+	kmap_atomic_flush_unused();
+
+	if (!is_scm_armv8()) {
+		ret = scm_call(SCM_SVC_MP, MEM_PROTECT_LOCK_ID2,
+				&request, sizeof(request), &resp, sizeof(resp));
+	} else {
+		ret = scm_call2(SCM_SIP_FNID(SCM_SVC_MP,
+				MEM_PROTECT_LOCK_ID2_FLAT), &desc);
+		resp = desc.ret[0];
+	}
+
+	return ret;
+}
+
+static int secure_buffer_change_table(struct sg_table *table, int lock)
+{
+	int i, j;
+	int ret = -EINVAL;
+	u32 *chunk_list;
+	struct scatterlist *sg;
+
+	for_each_sg(table->sgl, sg, table->nents, i) {
+		int nchunks;
+		int size = sg->length;
+		int chunk_list_len;
+		phys_addr_t chunk_list_phys;
+
+		/*
+		 * This should theoretically be a phys_addr_t but the protocol
+		 * indicates this should be a u32.
+		 */
+		u32 base;
+		u64 tmp = sg_dma_address(sg);
+
+		WARN((tmp >> 32) & 0xffffffff,
+			"%s: there are ones in the upper 32 bits of the sg at %p! They will be truncated! Address: 0x%llx\n",
+			__func__, sg, tmp);
+		if (unlikely(!size || (size % V2_CHUNK_SIZE))) {
+			WARN(1,
+				"%s: chunk %d has invalid size: 0x%x. Must be a multiple of 0x%x\n",
+				__func__, i, size, V2_CHUNK_SIZE);
+			return -EINVAL;
+		}
+
+		base = (u32)tmp;
+
+		nchunks = size / V2_CHUNK_SIZE;
+		chunk_list_len = sizeof(u32)*nchunks;
+
+		chunk_list = kzalloc(chunk_list_len, GFP_KERNEL);
+
+		if (!chunk_list)
+			return -ENOMEM;
+
+		chunk_list_phys = virt_to_phys(chunk_list);
+		for (j = 0; j < nchunks; j++)
+			chunk_list[j] = base + j * V2_CHUNK_SIZE;
+
+		/*
+		 * Flush the chunk list before sending the memory to the
+		 * secure environment to ensure the data is actually present
+		 * in RAM
+		 */
+		dmac_flush_range(chunk_list, chunk_list + chunk_list_len);
+
+		ret = secure_buffer_change_chunk(chunk_list_phys,
+				nchunks, V2_CHUNK_SIZE, lock);
+
+		if (!ret) {
+			/*
+			 * Set or clear the private page flag to communicate the
+			 * status of the chunk to other entities
+			 */
+			if (lock)
+				SetPagePrivate(sg_page(sg));
+			else
+				ClearPagePrivate(sg_page(sg));
+		}
+
+		kfree(chunk_list);
+	}
+
+	return ret;
+}
+
+int msm_secure_table(struct sg_table *table)
+{
+	int ret;
+
+	mutex_lock(&secure_buffer_mutex);
+	ret = secure_buffer_change_table(table, 1);
+	mutex_unlock(&secure_buffer_mutex);
+
+	return ret;
+}
+
+int msm_unsecure_table(struct sg_table *table)
+{
+	int ret;
+
+	mutex_lock(&secure_buffer_mutex);
+	ret = secure_buffer_change_table(table, 0);
+	mutex_unlock(&secure_buffer_mutex);
+
+	return ret;
+}
+
 static struct dest_vm_and_perm_info *
 populate_dest_info(int *dest_vmids, int nelements, int *dest_perms,
 		   size_t *size_in_bytes)
@@ -279,6 +423,19 @@ const char *msm_secure_vmid_to_string(int secure_vmid)
 	}
 }
 
+#define MAKE_CP_VERSION(major, minor, patch) \
+	(((major & 0x3FF) << 22) | ((minor & 0x3FF) << 12) | (patch & 0xFFF))
+
+bool msm_secure_v2_is_supported(void)
+{
+	/*
+	 * if the version is < 1.1.0 then dynamic buffer allocation is
+	 * not supported
+	 */
+	return (scm_get_feat_version(FEATURE_ID_CP) >=
+			MAKE_CP_VERSION(1, 1, 0));
+}
+
 static int __init alloc_secure_shared_memory(void)
 {
 	int ret = 0;
diff --git a/drivers/soc/qcom/smp2p_sleepstate.c b/drivers/soc/qcom/smp2p_sleepstate.c
index 310a186..d2b8733 100644
--- a/drivers/soc/qcom/smp2p_sleepstate.c
+++ b/drivers/soc/qcom/smp2p_sleepstate.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -37,12 +37,12 @@ static int sleepstate_pm_notifier(struct notifier_block *nb,
 	switch (event) {
 	case PM_SUSPEND_PREPARE:
 		gpio_set_value(slst_gpio_base_id + PROC_AWAKE_ID, 0);
+		usleep_range(10000, 10500); /* Tuned based on SMP2P latencies */
 		msm_ipc_router_set_ws_allowed(true);
 		break;
 
 	case PM_POST_SUSPEND:
 		gpio_set_value(slst_gpio_base_id + PROC_AWAKE_ID, 1);
-		usleep_range(10000, 10500); /* Tuned based on SMP2P latencies */
 		msm_ipc_router_set_ws_allowed(false);
 		break;
 	}
diff --git a/drivers/soc/qcom/socinfo.c b/drivers/soc/qcom/socinfo.c
index 9af39e1..82dea32 100644
--- a/drivers/soc/qcom/socinfo.c
+++ b/drivers/soc/qcom/socinfo.c
@@ -587,6 +587,10 @@ static struct msm_soc_info cpu_of_id[] = {
 	/* SDM450 ID */
 	[338] = {MSM_CPU_SDM450, "SDM450"},
 
+	/* SDM632 ID */
+	[349] = {MSM_CPU_SDM632, "SDM632"},
+	[350] = {MSM_CPU_SDA632, "SDA632"},
+
 	/* Uninitialized IDs are not known to run Linux.
 	 * MSM_CPU_UNKNOWN is set to 0 to ensure these IDs are
 	 * considered as unknown CPU.
@@ -1469,6 +1473,10 @@ static void * __init setup_dummy_socinfo(void)
 		dummy_socinfo.id = 338;
 		strlcpy(dummy_socinfo.build_id, "sdm450 - ",
 			sizeof(dummy_socinfo.build_id));
+	} else if (early_machine_is_sdm632()) {
+		dummy_socinfo.id = 349;
+		strlcpy(dummy_socinfo.build_id, "sdm632 - ",
+			sizeof(dummy_socinfo.build_id));
 	}
 
 	strlcat(dummy_socinfo.build_id, "Dummy socinfo",
diff --git a/drivers/soc/qcom/spcom.c b/drivers/soc/qcom/spcom.c
index 876e176..0b037d4 100644
--- a/drivers/soc/qcom/spcom.c
+++ b/drivers/soc/qcom/spcom.c
@@ -840,6 +840,8 @@ static int spcom_close(struct spcom_channel *ch)
 	ch->glink_state = GLINK_LOCAL_DISCONNECTED;
 	ch->txn_id = INITIAL_TXN_ID; /* use non-zero nonce for debug */
 	ch->pid = 0;
+	ch->actual_rx_size = 0;
+	ch->glink_rx_buf = NULL;
 
 	pr_debug("Channel closed [%s].\n", ch->name);
 
@@ -940,8 +942,8 @@ static int spcom_rx(struct spcom_channel *ch,
 
 	/* check for already pending data */
 	if (ch->actual_rx_size) {
-		pr_debug("already pending data size [%zu]\n",
-			 ch->actual_rx_size);
+		pr_debug("already pending data size [%zu] ch [%s]\n",
+			 ch->actual_rx_size, ch->name);
 		goto copy_buf;
 	}
 
@@ -949,24 +951,27 @@ static int spcom_rx(struct spcom_channel *ch,
 	reinit_completion(&ch->rx_done);
 
 	/* Wait for Rx response */
-	pr_debug("Wait for Rx done.\n");
+	pr_debug("Wait for Rx done, ch [%s].\n", ch->name);
 	if (timeout_msec)
 		timeleft = wait_for_completion_timeout(&ch->rx_done, jiffies);
 	else
 		wait_for_completion(&ch->rx_done);
 
 	if (timeleft == 0) {
-		pr_err("rx_done timeout [%d] msec expired.\n", timeout_msec);
+		pr_err("rx_done timeout [%d] msec expired, ch [%s]\n",
+			timeout_msec, ch->name);
 		mutex_unlock(&ch->lock);
 		return -ETIMEDOUT;
 	} else if (ch->rx_abort) {
 		mutex_unlock(&ch->lock);
-		pr_err("rx_abort, probably remote side reset (SSR).\n");
+		pr_err("rx_abort, probably remote side reset (SSR), ch [%s].\n",
+			ch->name);
 		return -ERESTART; /* probably SSR */
 	} else if (ch->actual_rx_size) {
-		pr_debug("actual_rx_size is [%zu]\n", ch->actual_rx_size);
+		pr_debug("actual_rx_size is [%zu], ch [%s]\n",
+			ch->actual_rx_size, ch->name);
 	} else {
-		pr_err("actual_rx_size is zero.\n");
+		pr_err("actual_rx_size is zero, ch [%s].\n", ch->name);
 		goto exit_err;
 	}
 
@@ -980,7 +985,7 @@ static int spcom_rx(struct spcom_channel *ch,
 	size = min_t(int, ch->actual_rx_size, size);
 	memcpy(buf, ch->glink_rx_buf, size);
 
-	pr_debug("copy size [%d].\n", (int) size);
+	pr_debug("copy size [%d] , ch [%s].\n", (int) size, ch->name);
 
 	/* free glink buffer after copy to spcom buffer */
 	glink_rx_done(ch->glink_handle, ch->glink_rx_buf, false);
@@ -993,7 +998,8 @@ static int spcom_rx(struct spcom_channel *ch,
 		pr_err("glink_queue_rx_intent() failed, ret [%d]", ret);
 		goto exit_err;
 	} else {
-		pr_debug("queue rx_buf, size [%zu]\n", ch->rx_buf_size);
+		pr_debug("queue rx_buf, size [%zu], ch [%s]\n",
+			ch->rx_buf_size, ch->name);
 	}
 
 	mutex_unlock(&ch->lock);
@@ -1038,7 +1044,7 @@ static int spcom_get_next_request_size(struct spcom_channel *ch)
 	mutex_lock(&ch->lock); /* re-lock after waiting */
 	/* Check Rx Abort on SP reset */
 	if (ch->rx_abort) {
-		pr_err("rx aborted.\n");
+		pr_err("rx aborted, ch [%s].\n", ch->name);
 		goto exit_error;
 	}
 
@@ -1085,13 +1091,14 @@ static void spcom_rx_abort_pending_server(void)
 		if (!ch->is_server)
 			continue;
 
-		/* The server might not be connected to a client.
-		 * Don't check if connected, only if open.
+		/* The ch REMOTE_DISCONNECT notification happens before
+		 * the LINK_DOWN notification,
+		 * so the channel is already closed.
 		 */
-		if (!spcom_is_channel_open(ch) || (ch->rx_abort))
+		if (ch->rx_abort)
 			continue;
 
-		pr_debug("rx-abort server ch [%s].\n", ch->name);
+		pr_err("rx-abort server ch [%s].\n", ch->name);
 		ch->rx_abort = true;
 		complete_all(&ch->rx_done);
 	}
@@ -2814,7 +2821,7 @@ static int __init spcom_init(void)
 {
 	int ret;
 
-	pr_info("spcom driver version 1.2 23-Aug-2017.\n");
+	pr_info("spcom driver version 1.3 28-Dec-2017.\n");
 
 	ret = platform_driver_register(&spcom_driver);
 	if (ret)
diff --git a/drivers/soc/qcom/subsys-pil-tz.c b/drivers/soc/qcom/subsys-pil-tz.c
index d3819b6..92b6423 100644
--- a/drivers/soc/qcom/subsys-pil-tz.c
+++ b/drivers/soc/qcom/subsys-pil-tz.c
@@ -42,7 +42,6 @@
 
 #define ERR_READY	0
 #define PBL_DONE	1
-#define QDSP6SS_NMI_STATUS	0x44
 
 #define desc_to_data(d) container_of(d, struct pil_tz_data, desc)
 #define subsys_to_data(d) container_of(d, struct pil_tz_data, subsys_desc)
@@ -117,7 +116,6 @@ struct pil_tz_data {
 	void __iomem *irq_mask;
 	void __iomem *err_status;
 	void __iomem *err_status_spare;
-	void __iomem *reg_base;
 	u32 bits_arr[2];
 };
 
@@ -931,19 +929,8 @@ static void subsys_crash_shutdown(const struct subsys_desc *subsys)
 static irqreturn_t subsys_err_fatal_intr_handler (int irq, void *dev_id)
 {
 	struct pil_tz_data *d = subsys_to_data(dev_id);
-	u32 nmi_status = 0;
 
-	if (d->reg_base)
-		nmi_status = readl_relaxed(d->reg_base +
-						QDSP6SS_NMI_STATUS);
-
-	if (nmi_status & 0x04)
-		pr_err("%s: Fatal error on the %s due to TZ NMI\n",
-			__func__, d->subsys_desc.name);
-	else
-		pr_err("%s Fatal error on the %s\n",
-			__func__, d->subsys_desc.name);
-
+	pr_err("Fatal error on %s!\n", d->subsys_desc.name);
 	if (subsys_get_crash_status(d->subsys)) {
 		pr_err("%s: Ignoring error fatal, restart in progress\n",
 							d->subsys_desc.name);
@@ -1065,7 +1052,8 @@ static int pil_tz_driver_probe(struct platform_device *pdev)
 {
 	struct pil_tz_data *d;
 	struct resource *res;
-	u32 proxy_timeout;
+	struct device_node *crypto_node;
+	u32 proxy_timeout, crypto_id;
 	int len, rc;
 
 	d = devm_kzalloc(&pdev->dev, sizeof(*d), GFP_KERNEL);
@@ -1079,13 +1067,6 @@ static int pil_tz_driver_probe(struct platform_device *pdev)
 	d->keep_proxy_regs_on = of_property_read_bool(pdev->dev.of_node,
 						"qcom,keep-proxy-regs-on");
 
-	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "base_reg");
-	d->reg_base = devm_ioremap_resource(&pdev->dev, res);
-	if (IS_ERR(d->reg_base)) {
-		dev_err(&pdev->dev, "Failed to ioremap base register\n");
-		d->reg_base = NULL;
-	}
-
 	rc = of_property_read_string(pdev->dev.of_node, "qcom,firmware-name",
 				      &d->desc.name);
 	if (rc)
@@ -1128,7 +1109,17 @@ static int pil_tz_driver_probe(struct platform_device *pdev)
 									rc);
 			return rc;
 		}
-		scm_pas_init(MSM_BUS_MASTER_CRYPTO_CORE_0);
+
+		crypto_id = MSM_BUS_MASTER_CRYPTO_CORE_0;
+		crypto_node = of_parse_phandle(pdev->dev.of_node,
+						"qcom,mas-crypto", 0);
+		if (!IS_ERR_OR_NULL(crypto_node)) {
+			of_property_read_u32(crypto_node, "cell-id",
+				&crypto_id);
+			of_node_put(crypto_node);
+		}
+
+		scm_pas_init((int)crypto_id);
 	}
 
 	rc = pil_desc_init(&d->desc);
diff --git a/drivers/soc/qcom/system_pm.c b/drivers/soc/qcom/system_pm.c
index 3d978f7..480a33c 100644
--- a/drivers/soc/qcom/system_pm.c
+++ b/drivers/soc/qcom/system_pm.c
@@ -18,23 +18,35 @@
 #include <soc/qcom/rpmh.h>
 #include <soc/qcom/system_pm.h>
 
-#define ARCH_TIMER_HZ		(19200000UL)
+#include <clocksource/arm_arch_timer.h>
+
 #define PDC_TIME_VALID_SHIFT	31
 #define PDC_TIME_UPPER_MASK	0xFFFFFF
 
 static struct rpmh_client *rpmh_client;
 
-static int setup_wakeup(uint64_t sleep_val)
+static int setup_wakeup(uint32_t lo, uint32_t hi)
 {
 	struct tcs_cmd cmd[2] = { { 0 } };
 
-	cmd[0].data = (sleep_val >> 32) & PDC_TIME_UPPER_MASK;
+	cmd[0].data =  hi & PDC_TIME_UPPER_MASK;
 	cmd[0].data |= 1 << PDC_TIME_VALID_SHIFT;
-	cmd[1].data = sleep_val & 0xFFFFFFFF;
+	cmd[1].data = lo;
 
 	return rpmh_write_control(rpmh_client, cmd, ARRAY_SIZE(cmd));
 }
 
+int system_sleep_update_wakeup(void)
+{
+	uint32_t lo = ~0U, hi = ~0U;
+
+	/* Read the hardware to get the most accurate value */
+	arch_timer_mem_get_cval(&lo, &hi);
+
+	return setup_wakeup(lo, hi);
+}
+EXPORT_SYMBOL(system_sleep_update_wakeup);
+
 /**
  * system_sleep_allowed() - Returns if its okay to enter system low power modes
  */
@@ -47,35 +59,15 @@ EXPORT_SYMBOL(system_sleep_allowed);
 /**
  * system_sleep_enter() - Activties done when entering system low power modes
  *
- * @sleep_val: The sleep duration in us.
- *
- * Returns 0 for success or error values from writing the timer value in the
- * hardware block.
+ * Returns 0 for success or error values from writing the sleep/wake values to
+ * the hardware block.
  */
-int system_sleep_enter(uint64_t sleep_val)
+int system_sleep_enter(void)
 {
-	int ret;
-
 	if (IS_ERR_OR_NULL(rpmh_client))
 		return -EFAULT;
 
-	ret = rpmh_flush(rpmh_client);
-	if (ret)
-		return ret;
-
-	/*
-	 * Set up the wake up value offset from the current time.
-	 * Convert us to ns to allow div by 19.2 Mhz tick timer.
-	 */
-	if (sleep_val) {
-		sleep_val *= NSEC_PER_USEC;
-		do_div(sleep_val, NSEC_PER_SEC/ARCH_TIMER_HZ);
-		sleep_val += arch_counter_get_cntvct();
-	} else {
-		sleep_val = ~0ULL;
-	}
-
-	return setup_wakeup(sleep_val);
+	return rpmh_flush(rpmh_client);
 }
 EXPORT_SYMBOL(system_sleep_enter);
 
diff --git a/drivers/spi/spi-axi-spi-engine.c b/drivers/spi/spi-axi-spi-engine.c
index c1eafbd..da51fed 100644
--- a/drivers/spi/spi-axi-spi-engine.c
+++ b/drivers/spi/spi-axi-spi-engine.c
@@ -553,7 +553,7 @@ static int spi_engine_probe(struct platform_device *pdev)
 
 static int spi_engine_remove(struct platform_device *pdev)
 {
-	struct spi_master *master = platform_get_drvdata(pdev);
+	struct spi_master *master = spi_master_get(platform_get_drvdata(pdev));
 	struct spi_engine *spi_engine = spi_master_get_devdata(master);
 	int irq = platform_get_irq(pdev, 0);
 
@@ -561,6 +561,8 @@ static int spi_engine_remove(struct platform_device *pdev)
 
 	free_irq(irq, master);
 
+	spi_master_put(master);
+
 	writel_relaxed(0xff, spi_engine->base + SPI_ENGINE_REG_INT_PENDING);
 	writel_relaxed(0x00, spi_engine->base + SPI_ENGINE_REG_INT_ENABLE);
 	writel_relaxed(0x01, spi_engine->base + SPI_ENGINE_REG_RESET);
diff --git a/drivers/spi/spi-geni-qcom.c b/drivers/spi/spi-geni-qcom.c
index 186e7ae..58a9308 100644
--- a/drivers/spi/spi-geni-qcom.c
+++ b/drivers/spi/spi-geni-qcom.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -1006,7 +1006,7 @@ static int spi_geni_transfer_one(struct spi_master *spi,
 
 			for (i = 0 ; i < mas->num_tx_eot; i++) {
 				timeout =
-				wait_for_completion_interruptible_timeout(
+				wait_for_completion_timeout(
 					&mas->tx_cb,
 					msecs_to_jiffies(SPI_XFER_TIMEOUT_MS));
 				if (timeout <= 0) {
@@ -1018,7 +1018,7 @@ static int spi_geni_transfer_one(struct spi_master *spi,
 			}
 			for (i = 0 ; i < mas->num_rx_eot; i++) {
 				timeout =
-				wait_for_completion_interruptible_timeout(
+				wait_for_completion_timeout(
 					&mas->rx_cb,
 					msecs_to_jiffies(SPI_XFER_TIMEOUT_MS));
 				if (timeout <= 0) {
diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c
index 1de3a77..cbf02eb 100644
--- a/drivers/spi/spi-sh-msiof.c
+++ b/drivers/spi/spi-sh-msiof.c
@@ -862,7 +862,7 @@ static int sh_msiof_transfer_one(struct spi_master *master,
 				break;
 			copy32 = copy_bswap32;
 		} else if (bits <= 16) {
-			if (l & 1)
+			if (l & 3)
 				break;
 			copy32 = copy_wswap32;
 		} else {
diff --git a/drivers/spi/spi-xilinx.c b/drivers/spi/spi-xilinx.c
index bc7100b..e0b9fe1 100644
--- a/drivers/spi/spi-xilinx.c
+++ b/drivers/spi/spi-xilinx.c
@@ -271,6 +271,7 @@ static int xilinx_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t)
 	while (remaining_words) {
 		int n_words, tx_words, rx_words;
 		u32 sr;
+		int stalled;
 
 		n_words = min(remaining_words, xspi->buffer_size);
 
@@ -299,7 +300,17 @@ static int xilinx_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t)
 
 		/* Read out all the data from the Rx FIFO */
 		rx_words = n_words;
+		stalled = 10;
 		while (rx_words) {
+			if (rx_words == n_words && !(stalled--) &&
+			    !(sr & XSPI_SR_TX_EMPTY_MASK) &&
+			    (sr & XSPI_SR_RX_EMPTY_MASK)) {
+				dev_err(&spi->dev,
+					"Detected stall. Check C_SPI_MODE and C_SPI_MEMORY\n");
+				xspi_init_hw(xspi);
+				return -EIO;
+			}
+
 			if ((sr & XSPI_SR_TX_EMPTY_MASK) && (rx_words > 1)) {
 				xilinx_spi_rx(xspi);
 				rx_words--;
diff --git a/drivers/spmi/spmi-pmic-arb.c b/drivers/spmi/spmi-pmic-arb.c
index d6089aa..6199523 100644
--- a/drivers/spmi/spmi-pmic-arb.c
+++ b/drivers/spmi/spmi-pmic-arb.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -726,6 +726,24 @@ static int qpnpint_get_irqchip_state(struct irq_data *d,
 	return 0;
 }
 
+static int qpnpint_irq_request_resources(struct irq_data *d)
+{
+	struct spmi_pmic_arb *pmic_arb = irq_data_get_irq_chip_data(d);
+	u16 periph = HWIRQ_PER(d->hwirq);
+	u16 apid = HWIRQ_APID(d->hwirq);
+	u16 sid = HWIRQ_SID(d->hwirq);
+	u16 irq = HWIRQ_IRQ(d->hwirq);
+
+	if (pmic_arb->apid_data[apid].irq_owner != pmic_arb->ee) {
+		dev_err(&pmic_arb->spmic->dev, "failed to xlate sid = %#x, periph = %#x, irq = %u: ee=%u but owner=%u\n",
+			sid, periph, irq, pmic_arb->ee,
+			pmic_arb->apid_data[apid].irq_owner);
+		return -ENODEV;
+	}
+
+	return 0;
+}
+
 static struct irq_chip pmic_arb_irqchip = {
 	.name		= "pmic_arb",
 	.irq_ack	= qpnpint_irq_ack,
@@ -733,6 +751,7 @@ static struct irq_chip pmic_arb_irqchip = {
 	.irq_unmask	= qpnpint_irq_unmask,
 	.irq_set_type	= qpnpint_irq_set_type,
 	.irq_get_irqchip_state	= qpnpint_get_irqchip_state,
+	.irq_request_resources = qpnpint_irq_request_resources,
 	.flags		= IRQCHIP_MASK_ON_SUSPEND
 			| IRQCHIP_SKIP_SET_WAKE,
 };
@@ -779,13 +798,6 @@ static int qpnpint_irq_domain_dt_translate(struct irq_domain *d,
 		return rc;
 	}
 
-	if (pa->apid_data[apid].irq_owner != pa->ee) {
-		dev_err(&pa->spmic->dev, "failed to xlate sid = 0x%x, periph = 0x%x, irq = %u: ee=%u but owner=%u\n",
-			intspec[0], intspec[1], intspec[2], pa->ee,
-			pa->apid_data[apid].irq_owner);
-		return -ENODEV;
-	}
-
 	/* Keep track of {max,min}_apid for bounding search during interrupt */
 	if (apid > pa->max_apid)
 		pa->max_apid = apid;
@@ -1252,6 +1264,13 @@ static int spmi_pmic_arb_probe(struct platform_device *pdev)
 		goto err_put_ctrl;
 	}
 
+	pa->ppid_to_apid = devm_kcalloc(&ctrl->dev, PMIC_ARB_MAX_PPID,
+					sizeof(*pa->ppid_to_apid), GFP_KERNEL);
+	if (!pa->ppid_to_apid) {
+		err = -ENOMEM;
+		goto err_put_ctrl;
+	}
+
 	hw_ver = readl_relaxed(core + PMIC_ARB_VERSION);
 
 	if (hw_ver < PMIC_ARB_VERSION_V2_MIN) {
@@ -1287,15 +1306,6 @@ static int spmi_pmic_arb_probe(struct platform_device *pdev)
 			err = PTR_ERR(pa->wr_base);
 			goto err_put_ctrl;
 		}
-
-		pa->ppid_to_apid = devm_kcalloc(&ctrl->dev,
-						PMIC_ARB_MAX_PPID,
-						sizeof(*pa->ppid_to_apid),
-						GFP_KERNEL);
-		if (!pa->ppid_to_apid) {
-			err = -ENOMEM;
-			goto err_put_ctrl;
-		}
 	}
 
 	dev_info(&ctrl->dev, "PMIC arbiter version %s (0x%x)\n",
diff --git a/drivers/staging/android/ion/Makefile b/drivers/staging/android/ion/Makefile
index 309b9cc..f3c8d51 100644
--- a/drivers/staging/android/ion/Makefile
+++ b/drivers/staging/android/ion/Makefile
@@ -1,6 +1,8 @@
 obj-$(CONFIG_ION) +=	ion.o ion_heap.o ion_page_pool.o ion_system_heap.o \
-			ion_carveout_heap.o ion_chunk_heap.o ion_cma_heap.o \
-			ion_system_secure_heap.o
+			ion_carveout_heap.o ion_chunk_heap.o ion_system_secure_heap.o
+ifdef CONFIG_ION_MSM
+obj-$(CONFIG_CMA) += ion_cma_heap.o ion_cma_secure_heap.o
+endif
 obj-$(CONFIG_ION_TEST) += ion_test.o
 ifdef CONFIG_COMPAT
 obj-$(CONFIG_ION) += compat_ion.o
diff --git a/drivers/staging/android/ion/ion_cma_secure_heap.c b/drivers/staging/android/ion/ion_cma_secure_heap.c
new file mode 100644
index 0000000..b2eac28
--- /dev/null
+++ b/drivers/staging/android/ion/ion_cma_secure_heap.c
@@ -0,0 +1,902 @@
+/*
+ * drivers/staging/android/ion/ion_cma_secure_heap.c
+ *
+ * Copyright (C) Linaro 2012
+ * Author: <benjamin.gaignard@linaro.org> for ST-Ericsson.
+ * Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/ion.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/dma-mapping.h>
+#include <linux/msm_ion.h>
+#include <trace/events/kmem.h>
+
+#include <soc/qcom/secure_buffer.h>
+#include <asm/cacheflush.h>
+
+/* for ion_heap_ops structure */
+#include "ion_priv.h"
+
+#define ION_CMA_ALLOCATE_FAILED NULL
+
+struct ion_secure_cma_non_contig_info {
+	dma_addr_t phys;
+	int len;
+	struct list_head entry;
+};
+
+struct ion_secure_cma_buffer_info {
+	dma_addr_t phys;
+	struct sg_table *table;
+	bool is_cached;
+	int len;
+	struct list_head non_contig_list;
+	unsigned long ncelems;
+};
+
+struct ion_cma_alloc_chunk {
+	void *cpu_addr;
+	struct list_head entry;
+	dma_addr_t handle;
+	unsigned long chunk_size;
+	atomic_t cnt;
+};
+
+struct ion_cma_secure_heap {
+	struct device *dev;
+	/*
+	 * Protects against races between threads allocating memory/adding to
+	 * pool at the same time. (e.g. thread 1 adds to pool, thread 2
+	 * allocates thread 1's memory before thread 1 knows it needs to
+	 * allocate more.
+	 * Admittedly this is fairly coarse grained right now but the chance for
+	 * contention on this lock is unlikely right now. This can be changed if
+	 * this ever changes in the future
+	 */
+	struct mutex alloc_lock;
+	/*
+	 * protects the list of memory chunks in this pool
+	 */
+	struct mutex chunk_lock;
+	struct ion_heap heap;
+	/*
+	 * Bitmap for allocation. This contains the aggregate of all chunks.
+	 */
+	unsigned long *bitmap;
+	/*
+	 * List of all allocated chunks
+	 *
+	 * This is where things get 'clever'. Individual allocations from
+	 * dma_alloc_coherent must be allocated and freed in one chunk.
+	 * We don't just want to limit the allocations to those confined
+	 * within a single chunk (if clients allocate n small chunks we would
+	 * never be able to use the combined size). The bitmap allocator is
+	 * used to find the contiguous region and the parts of the chunks are
+	 * marked off as used. The chunks won't be freed in the shrinker until
+	 * the usage is actually zero.
+	 */
+	struct list_head chunks;
+	int npages;
+	ion_phys_addr_t base;
+	struct work_struct work;
+	unsigned long last_alloc;
+	struct shrinker shrinker;
+	atomic_t total_allocated;
+	atomic_t total_pool_size;
+	atomic_t total_leaked;
+	unsigned long heap_size;
+	unsigned long default_prefetch_size;
+};
+
+static void ion_secure_pool_pages(struct work_struct *work);
+
+/*
+ * Create scatter-list for the already allocated DMA buffer.
+ * This function could be replace by dma_common_get_sgtable
+ * as soon as it will avalaible.
+ */
+static int ion_secure_cma_get_sgtable(struct device *dev, struct sg_table *sgt,
+				      dma_addr_t handle, size_t size)
+{
+	struct page *page = pfn_to_page(PFN_DOWN(handle));
+	int ret;
+
+	ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
+	if (unlikely(ret))
+		return ret;
+
+	sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
+	sg_dma_address(sgt->sgl) = handle;
+	return 0;
+}
+
+static int ion_secure_cma_add_to_pool(
+					struct ion_cma_secure_heap *sheap,
+					unsigned long len,
+					bool prefetch)
+{
+	void *cpu_addr;
+	dma_addr_t handle;
+	unsigned long attrs = 0;
+	int ret = 0;
+	struct ion_cma_alloc_chunk *chunk;
+
+	trace_ion_secure_cma_add_to_pool_start(len, atomic_read(&sheap->
+					       total_pool_size), prefetch);
+	mutex_lock(&sheap->chunk_lock);
+
+	chunk = kzalloc(sizeof(*chunk), GFP_KERNEL);
+	if (!chunk) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	attrs = DMA_ATTR_NO_KERNEL_MAPPING | DMA_ATTR_SKIP_ZEROING;
+
+	cpu_addr = dma_alloc_attrs(sheap->dev, len, &handle, GFP_KERNEL,
+				   attrs);
+
+	if (!cpu_addr) {
+		ret = -ENOMEM;
+		goto out_free;
+	}
+
+	chunk->cpu_addr = cpu_addr;
+	chunk->handle = handle;
+	chunk->chunk_size = len;
+	atomic_set(&chunk->cnt, 0);
+	list_add(&chunk->entry, &sheap->chunks);
+	atomic_add(len, &sheap->total_pool_size);
+	 /* clear the bitmap to indicate this region can be allocated from */
+	bitmap_clear(sheap->bitmap, (handle - sheap->base) >> PAGE_SHIFT,
+		     len >> PAGE_SHIFT);
+	goto out;
+
+out_free:
+	kfree(chunk);
+out:
+	mutex_unlock(&sheap->chunk_lock);
+
+	trace_ion_secure_cma_add_to_pool_end(len, atomic_read(&sheap->
+					     total_pool_size), prefetch);
+
+	return ret;
+}
+
+static void ion_secure_pool_pages(struct work_struct *work)
+{
+	struct ion_cma_secure_heap *sheap = container_of(work,
+			struct ion_cma_secure_heap, work);
+
+	ion_secure_cma_add_to_pool(sheap, sheap->last_alloc, true);
+}
+
+/*
+ * @s1: start of the first region
+ * @l1: length of the first region
+ * @s2: start of the second region
+ * @l2: length of the second region
+ *
+ * Returns the total number of bytes that intersect.
+ *
+ * s1 is the region we are trying to clear so s2 may be subsumed by s1 but the
+ * maximum size to clear should only ever be l1
+ *
+ */
+static unsigned int intersect(unsigned long s1, unsigned long l1,
+			      unsigned long s2, unsigned long l2)
+{
+	unsigned long base1 = s1;
+	unsigned long end1 = s1 + l1;
+	unsigned long base2 = s2;
+	unsigned long end2 = s2 + l2;
+
+	/* Case 0: The regions don't overlap at all */
+	if (!(base1 < end2 && base2 < end1))
+		return 0;
+
+	/* Case 1: region 2 is subsumed by region 1 */
+	if (base1 <= base2 && end2 <= end1)
+		return l2;
+
+	/* case 2: region 1 is subsumed by region 2 */
+	if (base2 <= base1 && end1 <= end2)
+		return l1;
+
+	/* case 3: region1 overlaps region2 on the bottom */
+	if (base2 < end1 && base2 > base1)
+		return end1 - base2;
+
+	/* case 4: region 2 overlaps region1 on the bottom */
+	if (base1 < end2 && base1 > base2)
+		return end2 - base1;
+
+	pr_err("Bad math! Did not detect chunks correctly! %lx %lx %lx %lx\n",
+	       s1, l1, s2, l2);
+	WARN_ON(1);
+	/* retrun max intersection value, so that it will fail later*/
+	return (unsigned int)(~0);
+}
+
+int ion_secure_cma_prefetch(struct ion_heap *heap, void *data)
+{
+	unsigned long len = (unsigned long)data;
+	struct ion_cma_secure_heap *sheap =
+		container_of(heap, struct ion_cma_secure_heap, heap);
+	unsigned long diff;
+
+	if ((int)heap->type != ION_HEAP_TYPE_SECURE_DMA)
+		return -EINVAL;
+
+	if (len == 0)
+		len = sheap->default_prefetch_size;
+
+	/*
+	 * Only prefetch as much space as there is left in the pool so
+	 * check against the current free size of the heap.
+	 * This is slightly racy if someone else is allocating at the same
+	 * time. CMA has a restricted size for the heap so worst case
+	 * the prefetch doesn't work because the allocation fails.
+	 */
+	diff = sheap->heap_size - atomic_read(&sheap->total_pool_size);
+
+	if (len > diff)
+		len = diff;
+
+	sheap->last_alloc = len;
+	trace_ion_prefetching(sheap->last_alloc);
+	schedule_work(&sheap->work);
+
+	return 0;
+}
+
+static void bad_math_dump(unsigned long len, int total_overlap,
+			  struct ion_cma_secure_heap *sheap,
+			  bool alloc, dma_addr_t paddr)
+{
+	struct list_head *entry;
+
+	pr_err("Bad math! expected total was %lx actual was %x\n",
+	       len, total_overlap);
+	pr_err("attempted %s address was %pa len %lx\n",
+	       alloc ? "allocation" : "free", &paddr, len);
+	pr_err("chunks:\n");
+	list_for_each(entry, &sheap->chunks) {
+		struct ion_cma_alloc_chunk *chunk =
+			container_of(entry,
+				     struct ion_cma_alloc_chunk, entry);
+		pr_info("---   pa %pa len %lx\n",
+			&chunk->handle, chunk->chunk_size);
+	}
+	WARN(1, "mismatch in the sizes of secure cma chunks\n");
+}
+
+static int ion_secure_cma_alloc_from_pool(
+					struct ion_cma_secure_heap *sheap,
+					dma_addr_t *phys,
+					unsigned long len)
+{
+	dma_addr_t paddr;
+	unsigned long page_no;
+	int ret = 0;
+	int total_overlap = 0;
+	struct list_head *entry;
+
+	mutex_lock(&sheap->chunk_lock);
+
+	page_no = bitmap_find_next_zero_area(sheap->bitmap,
+					     sheap->npages, 0,
+					     len >> PAGE_SHIFT, 0);
+	if (page_no >= sheap->npages) {
+		ret = -ENOMEM;
+		goto out;
+	}
+	bitmap_set(sheap->bitmap, page_no, len >> PAGE_SHIFT);
+	paddr = sheap->base + (page_no << PAGE_SHIFT);
+
+	list_for_each(entry, &sheap->chunks) {
+		struct ion_cma_alloc_chunk *chunk = container_of(entry,
+					struct ion_cma_alloc_chunk, entry);
+		int overlap = intersect(chunk->handle,
+					chunk->chunk_size, paddr, len);
+
+		atomic_add(overlap, &chunk->cnt);
+		total_overlap += overlap;
+	}
+
+	if (total_overlap != len) {
+		bad_math_dump(len, total_overlap, sheap, 1, paddr);
+		ret = -EINVAL;
+		goto out;
+	}
+
+	*phys = paddr;
+out:
+	mutex_unlock(&sheap->chunk_lock);
+	return ret;
+}
+
+static void ion_secure_cma_free_chunk(struct ion_cma_secure_heap *sheap,
+				      struct ion_cma_alloc_chunk *chunk)
+{
+	unsigned long attrs = 0;
+
+	attrs = DMA_ATTR_NO_KERNEL_MAPPING;
+	/* This region is 'allocated' and not available to allocate from */
+	bitmap_set(sheap->bitmap, (chunk->handle - sheap->base) >> PAGE_SHIFT,
+		   chunk->chunk_size >> PAGE_SHIFT);
+	dma_free_attrs(sheap->dev, chunk->chunk_size, chunk->cpu_addr,
+		       chunk->handle, attrs);
+	atomic_sub(chunk->chunk_size, &sheap->total_pool_size);
+	list_del(&chunk->entry);
+	kfree(chunk);
+}
+
+static void __ion_secure_cma_shrink_pool(struct ion_cma_secure_heap *sheap,
+					 int max_nr)
+{
+	struct list_head *entry, *_n;
+	unsigned long drained_size = 0, skipped_size = 0;
+
+	trace_ion_secure_cma_shrink_pool_start(drained_size, skipped_size);
+
+	list_for_each_safe(entry, _n, &sheap->chunks) {
+		struct ion_cma_alloc_chunk *chunk = container_of(entry,
+					struct ion_cma_alloc_chunk, entry);
+
+		if (max_nr < 0)
+			break;
+
+		if (atomic_read(&chunk->cnt) == 0) {
+			max_nr -= chunk->chunk_size;
+			drained_size += chunk->chunk_size;
+			ion_secure_cma_free_chunk(sheap, chunk);
+		} else {
+			skipped_size += chunk->chunk_size;
+		}
+	}
+
+	trace_ion_secure_cma_shrink_pool_end(drained_size, skipped_size);
+}
+
+int ion_secure_cma_drain_pool(struct ion_heap *heap, void *unused)
+{
+	struct ion_cma_secure_heap *sheap =
+		container_of(heap, struct ion_cma_secure_heap, heap);
+
+	mutex_lock(&sheap->chunk_lock);
+	__ion_secure_cma_shrink_pool(sheap, INT_MAX);
+	mutex_unlock(&sheap->chunk_lock);
+
+	return 0;
+}
+
+static unsigned long ion_secure_cma_shrinker(struct shrinker *shrinker,
+					     struct shrink_control *sc)
+{
+	struct ion_cma_secure_heap *sheap = container_of(shrinker,
+					struct ion_cma_secure_heap, shrinker);
+	int nr_to_scan = sc->nr_to_scan;
+
+	/*
+	 * Allocation path may invoke the shrinker. Proceeding any further
+	 * would cause a deadlock in several places so don't shrink if that
+	 * happens.
+	 */
+	if (!mutex_trylock(&sheap->chunk_lock))
+		return -EAGAIN;
+
+	__ion_secure_cma_shrink_pool(sheap, nr_to_scan);
+
+	mutex_unlock(&sheap->chunk_lock);
+
+	return atomic_read(&sheap->total_pool_size);
+}
+
+static unsigned long ion_secure_cma_shrinker_count(struct shrinker *shrinker,
+						   struct shrink_control *sc)
+{
+	struct ion_cma_secure_heap *sheap = container_of(shrinker,
+					struct ion_cma_secure_heap, shrinker);
+	return atomic_read(&sheap->total_pool_size);
+}
+
+static void ion_secure_cma_free_from_pool(struct ion_cma_secure_heap *sheap,
+					  dma_addr_t handle,
+					  unsigned long len)
+{
+	struct list_head *entry, *_n;
+	int total_overlap = 0;
+
+	mutex_lock(&sheap->chunk_lock);
+	bitmap_clear(sheap->bitmap, (handle - sheap->base) >> PAGE_SHIFT,
+		     len >> PAGE_SHIFT);
+
+	list_for_each_safe(entry, _n, &sheap->chunks) {
+		struct ion_cma_alloc_chunk *chunk = container_of(entry,
+					struct ion_cma_alloc_chunk, entry);
+		int overlap = intersect(chunk->handle,
+					chunk->chunk_size, handle, len);
+
+		/*
+		 * Don't actually free this from the pool list yet, let either
+		 * an explicit drain call or the shrinkers take care of the
+		 * pool.
+		 */
+		atomic_sub_return(overlap, &chunk->cnt);
+		if (atomic_read(&chunk->cnt) < 0) {
+			WARN(1, "Invalid chunk size of %d\n",
+			     atomic_read(&chunk->cnt));
+			goto out;
+		}
+
+		total_overlap += overlap;
+	}
+
+	if (atomic_read(&sheap->total_pool_size) < 0) {
+		WARN(1, "total pool size of %d is unexpected\n",
+		     atomic_read(&sheap->total_pool_size));
+		goto out;
+	}
+
+	if (total_overlap != len)
+		bad_math_dump(len, total_overlap, sheap, 0, handle);
+out:
+	mutex_unlock(&sheap->chunk_lock);
+}
+
+/* ION CMA heap operations functions */
+static struct ion_secure_cma_buffer_info *__ion_secure_cma_allocate(
+			    struct ion_heap *heap, struct ion_buffer *buffer,
+			    unsigned long len, unsigned long align,
+			    unsigned long flags)
+{
+	struct ion_cma_secure_heap *sheap =
+		container_of(heap, struct ion_cma_secure_heap, heap);
+	struct ion_secure_cma_buffer_info *info;
+	int ret;
+
+	dev_dbg(sheap->dev, "Request buffer allocation len %ld\n", len);
+
+	info = kzalloc(sizeof(*info), GFP_KERNEL);
+	if (!info)
+		return ION_CMA_ALLOCATE_FAILED;
+
+	mutex_lock(&sheap->alloc_lock);
+	ret = ion_secure_cma_alloc_from_pool(sheap, &info->phys, len);
+
+	if (ret) {
+retry:
+		ret = ion_secure_cma_add_to_pool(sheap, len, false);
+		if (ret) {
+			mutex_unlock(&sheap->alloc_lock);
+			dev_err(sheap->dev, "Fail to allocate buffer\n");
+			goto err;
+		}
+		ret = ion_secure_cma_alloc_from_pool(sheap, &info->phys, len);
+		if (ret) {
+			/*
+			 * Lost the race with the shrinker, try again
+			 */
+			goto retry;
+		}
+	}
+	mutex_unlock(&sheap->alloc_lock);
+
+	atomic_add(len, &sheap->total_allocated);
+	info->table = kmalloc(sizeof(*info->table), GFP_KERNEL);
+	if (!info->table) {
+		dev_err(sheap->dev, "Fail to allocate sg table\n");
+		goto err;
+	}
+
+	info->len = len;
+	ion_secure_cma_get_sgtable(sheap->dev,
+				   info->table, info->phys, len);
+
+	/* keep this for memory release */
+	buffer->priv_virt = info;
+	dev_dbg(sheap->dev, "Allocate buffer %pK\n", buffer);
+	return info;
+
+err:
+	kfree(info);
+	return ION_CMA_ALLOCATE_FAILED;
+}
+
+static void __ion_secure_cma_free_non_contig(struct ion_cma_secure_heap *sheap,
+					     struct ion_secure_cma_buffer_info
+					     *info)
+{
+	struct ion_secure_cma_non_contig_info *nc_info, *temp;
+
+	list_for_each_entry_safe(nc_info, temp, &info->non_contig_list, entry) {
+		ion_secure_cma_free_from_pool(sheap, nc_info->phys,
+					      nc_info->len);
+		list_del(&nc_info->entry);
+		kfree(nc_info);
+	}
+}
+
+static void __ion_secure_cma_free(struct ion_cma_secure_heap *sheap,
+				  struct ion_secure_cma_buffer_info *info,
+				  bool release_memory)
+{
+	if (release_memory) {
+		if (info->ncelems)
+			__ion_secure_cma_free_non_contig(sheap, info);
+		else
+			ion_secure_cma_free_from_pool(sheap, info->phys,
+						      info->len);
+	}
+	sg_free_table(info->table);
+	kfree(info->table);
+	kfree(info);
+}
+
+static struct ion_secure_cma_buffer_info *__ion_secure_cma_allocate_non_contig(
+			struct ion_heap *heap, struct ion_buffer *buffer,
+			unsigned long len, unsigned long align,
+			unsigned long flags)
+{
+	struct ion_cma_secure_heap *sheap =
+		container_of(heap, struct ion_cma_secure_heap, heap);
+	struct ion_secure_cma_buffer_info *info;
+	int ret;
+	unsigned long alloc_size = len;
+	struct ion_secure_cma_non_contig_info *nc_info, *temp;
+	unsigned long ncelems = 0;
+	struct scatterlist *sg;
+	unsigned long total_allocated = 0;
+
+	dev_dbg(sheap->dev, "Request buffer allocation len %ld\n", len);
+
+	info = kzalloc(sizeof(*info), GFP_KERNEL);
+	if (!info)
+		return ION_CMA_ALLOCATE_FAILED;
+
+	INIT_LIST_HEAD(&info->non_contig_list);
+	info->table = kmalloc(sizeof(*info->table), GFP_KERNEL);
+	if (!info->table) {
+		dev_err(sheap->dev, "Fail to allocate sg table\n");
+		goto err;
+	}
+	mutex_lock(&sheap->alloc_lock);
+	while (total_allocated < len) {
+		if (alloc_size < SZ_1M) {
+			pr_err("Cannot allocate less than 1MB\n");
+			goto err2;
+		}
+		nc_info = kzalloc(sizeof(*nc_info), GFP_KERNEL);
+		if (!nc_info)
+			goto err2;
+
+		ret = ion_secure_cma_alloc_from_pool(sheap, &nc_info->phys,
+						     alloc_size);
+		if (ret) {
+retry:
+			ret = ion_secure_cma_add_to_pool(sheap, alloc_size,
+							 false);
+			if (ret) {
+				alloc_size = alloc_size / 2;
+				if (!IS_ALIGNED(alloc_size, SZ_1M))
+					alloc_size = round_down(alloc_size,
+								SZ_1M);
+				kfree(nc_info);
+				continue;
+			}
+			ret = ion_secure_cma_alloc_from_pool(sheap,
+							     &nc_info->phys,
+							     alloc_size);
+			if (ret) {
+				/*
+				 * Lost the race with the shrinker, try again
+				 */
+				goto retry;
+			}
+		}
+		nc_info->len = alloc_size;
+		list_add_tail(&nc_info->entry, &info->non_contig_list);
+		ncelems++;
+		total_allocated += alloc_size;
+		alloc_size = min(alloc_size, len - total_allocated);
+	}
+	mutex_unlock(&sheap->alloc_lock);
+	atomic_add(total_allocated, &sheap->total_allocated);
+
+	nc_info = list_first_entry_or_null(&info->non_contig_list,
+					   struct
+					   ion_secure_cma_non_contig_info,
+					   entry);
+	if (!nc_info) {
+		pr_err("%s: Unable to find first entry of non contig list\n",
+		       __func__);
+		goto err1;
+	}
+	info->phys = nc_info->phys;
+	info->len = total_allocated;
+	info->ncelems = ncelems;
+
+	ret = sg_alloc_table(info->table, ncelems, GFP_KERNEL);
+	if (unlikely(ret))
+		goto err1;
+
+	sg = info->table->sgl;
+	list_for_each_entry(nc_info, &info->non_contig_list, entry) {
+		sg_set_page(sg, phys_to_page(nc_info->phys), nc_info->len, 0);
+		sg_dma_address(sg) = nc_info->phys;
+		sg = sg_next(sg);
+	}
+	buffer->priv_virt = info;
+	dev_dbg(sheap->dev, "Allocate buffer %pK\n", buffer);
+	return info;
+
+err2:
+	mutex_unlock(&sheap->alloc_lock);
+err1:
+	list_for_each_entry_safe(nc_info, temp, &info->non_contig_list,
+				 entry) {
+		list_del(&nc_info->entry);
+		kfree(nc_info);
+	}
+	kfree(info->table);
+err:
+	kfree(info);
+	return ION_CMA_ALLOCATE_FAILED;
+}
+
+static int ion_secure_cma_allocate(struct ion_heap *heap,
+				   struct ion_buffer *buffer,
+				   unsigned long len, unsigned long align,
+				   unsigned long flags)
+{
+	unsigned long secure_allocation = flags & ION_FLAG_SECURE;
+	struct ion_secure_cma_buffer_info *buf = NULL;
+	unsigned long allow_non_contig = flags & ION_FLAG_ALLOW_NON_CONTIG;
+
+	if (!secure_allocation &&
+	    !ion_heap_allow_secure_allocation(heap->type)) {
+		pr_err("%s: non-secure allocation disallowed from heap %s %lx\n",
+		       __func__, heap->name, flags);
+		return -ENOMEM;
+	}
+
+	if (ION_IS_CACHED(flags)) {
+		pr_err("%s: cannot allocate cached memory from secure heap %s\n",
+		       __func__, heap->name);
+		return -ENOMEM;
+	}
+
+	if (!IS_ALIGNED(len, SZ_1M)) {
+		pr_err("%s: length of allocation from %s must be a multiple of 1MB\n",
+		       __func__, heap->name);
+		return -ENOMEM;
+	}
+	trace_ion_secure_cma_allocate_start(heap->name, len, align, flags);
+	if (!allow_non_contig)
+		buf = __ion_secure_cma_allocate(heap, buffer, len, align,
+						flags);
+	else
+		buf = __ion_secure_cma_allocate_non_contig(heap, buffer, len,
+							   align, flags);
+	trace_ion_secure_cma_allocate_end(heap->name, len, align, flags);
+	if (buf) {
+		int ret;
+
+		if (!msm_secure_v2_is_supported()) {
+			pr_err("%s: securing buffers from clients is not supported on this platform\n",
+			       __func__);
+			ret = 1;
+		} else {
+			trace_ion_cp_secure_buffer_start(heap->name, len, align,
+							 flags);
+			ret = msm_secure_table(buf->table);
+			trace_ion_cp_secure_buffer_end(heap->name, len, align,
+						       flags);
+		}
+		if (ret) {
+			struct ion_cma_secure_heap *sheap =
+				container_of(buffer->heap,
+					     struct ion_cma_secure_heap, heap);
+
+			pr_err("%s: failed to secure buffer\n", __func__);
+			__ion_secure_cma_free(sheap, buf, true);
+		}
+		return ret;
+	} else {
+		return -ENOMEM;
+	}
+}
+
+static void ion_secure_cma_free(struct ion_buffer *buffer)
+{
+	struct ion_cma_secure_heap *sheap =
+		container_of(buffer->heap, struct ion_cma_secure_heap, heap);
+	struct ion_secure_cma_buffer_info *info = buffer->priv_virt;
+	int ret = 0;
+
+	dev_dbg(sheap->dev, "Release buffer %pK\n", buffer);
+	if (msm_secure_v2_is_supported())
+		ret = msm_unsecure_table(info->table);
+	atomic_sub(buffer->size, &sheap->total_allocated);
+	if (atomic_read(&sheap->total_allocated) < 0) {
+		WARN(1, "no memory is allocated from this pool\n");
+		return;
+	}
+
+	/* release memory */
+	if (ret) {
+		WARN(1, "Unsecure failed, can't free the memory. Leaking it!");
+		atomic_add(buffer->size, &sheap->total_leaked);
+	}
+
+	__ion_secure_cma_free(sheap, info, ret ? false : true);
+}
+
+static int ion_secure_cma_phys(struct ion_heap *heap, struct ion_buffer *buffer,
+			       ion_phys_addr_t *addr, size_t *len)
+{
+	struct ion_cma_secure_heap *sheap =
+		container_of(heap, struct ion_cma_secure_heap, heap);
+	struct ion_secure_cma_buffer_info *info = buffer->priv_virt;
+
+	dev_dbg(sheap->dev, "Return buffer %pK physical address 0x%pa\n",
+		buffer, &info->phys);
+
+	*addr = info->phys;
+	*len = buffer->size;
+
+	return 0;
+}
+
+static struct sg_table *ion_secure_cma_heap_map_dma(struct ion_heap *heap,
+						    struct ion_buffer *buffer)
+{
+	struct ion_secure_cma_buffer_info *info = buffer->priv_virt;
+
+	return info->table;
+}
+
+static void ion_secure_cma_heap_unmap_dma(struct ion_heap *heap,
+					  struct ion_buffer *buffer)
+{
+}
+
+static int ion_secure_cma_mmap(struct ion_heap *mapper,
+			       struct ion_buffer *buffer,
+			       struct vm_area_struct *vma)
+{
+	pr_info("%s: mmaping from secure heap %s disallowed\n",
+		__func__, mapper->name);
+	return -EINVAL;
+}
+
+static void *ion_secure_cma_map_kernel(struct ion_heap *heap,
+				       struct ion_buffer *buffer)
+{
+	pr_info("%s: kernel mapping from secure heap %s disallowed\n",
+		__func__, heap->name);
+	return ERR_PTR(-EINVAL);
+}
+
+static void ion_secure_cma_unmap_kernel(struct ion_heap *heap,
+					struct ion_buffer *buffer)
+{
+}
+
+static int ion_secure_cma_print_debug(struct ion_heap *heap, struct seq_file *s,
+				      const struct list_head *mem_map)
+{
+	struct ion_cma_secure_heap *sheap =
+		container_of(heap, struct ion_cma_secure_heap, heap);
+
+	if (mem_map) {
+		struct mem_map_data *data;
+
+		seq_puts(s, "\nMemory Map\n");
+		seq_printf(s, "%16.s %14.s %14.s %14.s\n",
+			   "client", "start address", "end address",
+			   "size");
+
+		list_for_each_entry(data, mem_map, node) {
+			const char *client_name = "(null)";
+
+			if (data->client_name)
+				client_name = data->client_name;
+
+			seq_printf(s, "%16.s 0x%14pa 0x%14pa %14lu (0x%lx)\n",
+				   client_name, &data->addr,
+				   &data->addr_end,
+				   data->size, data->size);
+		}
+	}
+	seq_printf(s, "Total allocated: 0x%x\n",
+		   atomic_read(&sheap->total_allocated));
+	seq_printf(s, "Total pool size: 0x%x\n",
+		   atomic_read(&sheap->total_pool_size));
+	seq_printf(s, "Total memory leaked due to unlock failures: 0x%x\n",
+		   atomic_read(&sheap->total_leaked));
+
+	return 0;
+}
+
+static struct ion_heap_ops ion_secure_cma_ops = {
+	.allocate = ion_secure_cma_allocate,
+	.free = ion_secure_cma_free,
+	.map_dma = ion_secure_cma_heap_map_dma,
+	.unmap_dma = ion_secure_cma_heap_unmap_dma,
+	.phys = ion_secure_cma_phys,
+	.map_user = ion_secure_cma_mmap,
+	.map_kernel = ion_secure_cma_map_kernel,
+	.unmap_kernel = ion_secure_cma_unmap_kernel,
+	.print_debug = ion_secure_cma_print_debug,
+};
+
+struct ion_heap *ion_secure_cma_heap_create(struct ion_platform_heap *data)
+{
+	struct ion_cma_secure_heap *sheap;
+	int map_size = BITS_TO_LONGS(data->size >> PAGE_SHIFT) * sizeof(long);
+
+	sheap = kzalloc(sizeof(*sheap), GFP_KERNEL);
+	if (!sheap)
+		return ERR_PTR(-ENOMEM);
+
+	sheap->dev = data->priv;
+	mutex_init(&sheap->chunk_lock);
+	mutex_init(&sheap->alloc_lock);
+	sheap->heap.ops = &ion_secure_cma_ops;
+	sheap->heap.type = ION_HEAP_TYPE_SECURE_DMA;
+	sheap->npages = data->size >> PAGE_SHIFT;
+	sheap->base = data->base;
+	sheap->heap_size = data->size;
+	sheap->bitmap = kmalloc(map_size, GFP_KERNEL);
+	INIT_LIST_HEAD(&sheap->chunks);
+	INIT_WORK(&sheap->work, ion_secure_pool_pages);
+	sheap->shrinker.seeks = DEFAULT_SEEKS;
+	sheap->shrinker.batch = 0;
+	sheap->shrinker.scan_objects = ion_secure_cma_shrinker;
+	sheap->shrinker.count_objects = ion_secure_cma_shrinker_count;
+	sheap->default_prefetch_size = sheap->heap_size;
+	register_shrinker(&sheap->shrinker);
+
+	if (!sheap->bitmap) {
+		kfree(sheap);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	if (data->extra_data) {
+		struct ion_cma_pdata *extra = data->extra_data;
+
+		sheap->default_prefetch_size = extra->default_prefetch_size;
+	}
+
+	/*
+	 * we initially mark everything in the allocator as being free so that
+	 * allocations can come in later
+	 */
+	bitmap_fill(sheap->bitmap, sheap->npages);
+
+	return &sheap->heap;
+}
+
+void ion_secure_cma_heap_destroy(struct ion_heap *heap)
+{
+	struct ion_cma_secure_heap *sheap =
+		container_of(heap, struct ion_cma_secure_heap, heap);
+
+	kfree(sheap);
+}
diff --git a/drivers/staging/android/ion/ion_priv.h b/drivers/staging/android/ion/ion_priv.h
index bb119cc..775c666 100644
--- a/drivers/staging/android/ion/ion_priv.h
+++ b/drivers/staging/android/ion/ion_priv.h
@@ -26,6 +26,7 @@
 #include <linux/rbtree.h>
 #include <linux/seq_file.h>
 
+#include "msm_ion_priv.h"
 #include <linux/sched.h>
 #include <linux/shrinker.h>
 #include <linux/types.h>
diff --git a/drivers/staging/android/ion/msm/msm_ion.c b/drivers/staging/android/ion/msm/msm_ion.c
index 9d53391..3771726 100644
--- a/drivers/staging/android/ion/msm/msm_ion.c
+++ b/drivers/staging/android/ion/msm/msm_ion.c
@@ -404,6 +404,28 @@ static int msm_init_extra_data(struct device_node *node,
 			ret = -ENOMEM;
 		break;
 	}
+	case ION_HEAP_TYPE_SECURE_DMA:
+	{
+		unsigned int val;
+		struct ion_cma_pdata *extra = NULL;
+
+		ret = of_property_read_u32(node,
+					   "qcom,default-prefetch-size", &val);
+		if (!ret) {
+			heap->extra_data = kzalloc(sizeof(*extra),
+						   GFP_KERNEL);
+
+			if (!heap->extra_data) {
+				ret = -ENOMEM;
+			} else {
+				extra = heap->extra_data;
+				extra->default_prefetch_size = val;
+			}
+		} else {
+			ret = 0;
+		}
+		break;
+	}
 	default:
 		heap->extra_data = 0;
 		break;
@@ -423,6 +445,7 @@ static struct heap_types_info {
 	MAKE_HEAP_TYPE_MAPPING(CARVEOUT),
 	MAKE_HEAP_TYPE_MAPPING(CHUNK),
 	MAKE_HEAP_TYPE_MAPPING(DMA),
+	MAKE_HEAP_TYPE_MAPPING(SECURE_DMA),
 	MAKE_HEAP_TYPE_MAPPING(SYSTEM_SECURE),
 	MAKE_HEAP_TYPE_MAPPING(HYP_CMA),
 };
@@ -609,6 +632,16 @@ int ion_heap_is_system_secure_heap_type(enum ion_heap_type type)
 	return type == ((enum ion_heap_type)ION_HEAP_TYPE_SYSTEM_SECURE);
 }
 
+int ion_heap_allow_secure_allocation(enum ion_heap_type type)
+{
+	return type == ((enum ion_heap_type)ION_HEAP_TYPE_SECURE_DMA);
+}
+
+int ion_heap_allow_handle_secure(enum ion_heap_type type)
+{
+	return type == ((enum ion_heap_type)ION_HEAP_TYPE_SECURE_DMA);
+}
+
 int ion_heap_allow_heap_secure(enum ion_heap_type type)
 {
 	return false;
@@ -796,6 +829,13 @@ long msm_ion_custom_ioctl(struct ion_client *client,
 		int ret;
 
 		ret = ion_walk_heaps(client, data.prefetch_data.heap_id,
+			ION_HEAP_TYPE_SECURE_DMA,
+			(void *)data.prefetch_data.len,
+			ion_secure_cma_prefetch);
+		if (ret)
+			return ret;
+
+		ret = ion_walk_heaps(client, data.prefetch_data.heap_id,
 				     ION_HEAP_TYPE_SYSTEM_SECURE,
 				     (void *)&data.prefetch_data,
 				     ion_system_secure_heap_prefetch);
@@ -806,6 +846,13 @@ long msm_ion_custom_ioctl(struct ion_client *client,
 	case ION_IOC_DRAIN:
 	{
 		int ret;
+		ret = ion_walk_heaps(client, data.prefetch_data.heap_id,
+				     ION_HEAP_TYPE_SECURE_DMA,
+				     (void *)data.prefetch_data.len,
+				     ion_secure_cma_drain_pool);
+
+		if (ret)
+			return ret;
 
 		ret = ion_walk_heaps(client, data.prefetch_data.heap_id,
 				     ION_HEAP_TYPE_SYSTEM_SECURE,
@@ -959,6 +1006,11 @@ static struct ion_heap *msm_ion_heap_create(struct ion_platform_heap *heap_data)
 	struct ion_heap *heap = NULL;
 
 	switch ((int)heap_data->type) {
+#ifdef CONFIG_CMA
+	case ION_HEAP_TYPE_SECURE_DMA:
+		heap = ion_secure_cma_heap_create(heap_data);
+		break;
+#endif
 	case ION_HEAP_TYPE_SYSTEM_SECURE:
 		heap = ion_system_secure_heap_create(heap_data);
 		break;
@@ -988,6 +1040,11 @@ static void msm_ion_heap_destroy(struct ion_heap *heap)
 		return;
 
 	switch ((int)heap->type) {
+#ifdef CONFIG_CMA
+	case ION_HEAP_TYPE_SECURE_DMA:
+		ion_secure_cma_heap_destroy(heap);
+		break;
+#endif
 	case ION_HEAP_TYPE_SYSTEM_SECURE:
 		ion_system_secure_heap_destroy(heap);
 		break;
diff --git a/drivers/staging/android/ion/msm_ion_priv.h b/drivers/staging/android/ion/msm_ion_priv.h
new file mode 100644
index 0000000..bbf2e8b
--- /dev/null
+++ b/drivers/staging/android/ion/msm_ion_priv.h
@@ -0,0 +1,112 @@
+/*
+ * drivers/staging/android/ion/msm_ion_priv.h
+ *
+ * Copyright (C) 2011 Google, Inc.
+ * Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _MSM_ION_PRIV_H
+#define _MSM_ION_PRIV_H
+
+#include <linux/kref.h>
+#include <linux/mm_types.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
+#include <linux/ion.h>
+#include <linux/iommu.h>
+#include <linux/seq_file.h>
+
+struct ion_heap *ion_iommu_heap_create(struct ion_platform_heap *heap);
+void ion_iommu_heap_destroy(struct ion_heap *heap);
+
+struct ion_heap *ion_cp_heap_create(struct ion_platform_heap *heap);
+void ion_cp_heap_destroy(struct ion_heap *heap);
+
+struct ion_heap *ion_system_secure_heap_create(struct ion_platform_heap *heap);
+void ion_system_secure_heap_destroy(struct ion_heap *heap);
+int ion_system_secure_heap_prefetch(struct ion_heap *heap, void *data);
+int ion_system_secure_heap_drain(struct ion_heap *heap, void *data);
+
+struct ion_heap *ion_cma_secure_heap_create(struct ion_platform_heap *heap);
+void ion_cma_secure_heap_destroy(struct ion_heap *heap);
+
+long msm_ion_custom_ioctl(struct ion_client *client,
+			  unsigned int cmd,
+			  unsigned long arg);
+
+#ifdef CONFIG_CMA
+struct ion_heap *ion_secure_cma_heap_create(struct ion_platform_heap *heap);
+void ion_secure_cma_heap_destroy(struct ion_heap *heap);
+
+int ion_secure_cma_prefetch(struct ion_heap *heap, void *data);
+
+int ion_secure_cma_drain_pool(struct ion_heap *heap, void *unused);
+
+#else
+static inline int ion_secure_cma_prefetch(struct ion_heap *heap, void *data)
+{
+	return -ENODEV;
+}
+
+static inline int ion_secure_cma_drain_pool(struct ion_heap *heap, void *unused)
+{
+	return -ENODEV;
+}
+
+#endif
+
+struct ion_heap *ion_removed_heap_create(struct ion_platform_heap *pheap);
+void ion_removed_heap_destroy(struct ion_heap *heap);
+
+#define ION_CP_ALLOCATE_FAIL -1
+#define ION_RESERVED_ALLOCATE_FAIL -1
+
+void ion_cp_heap_get_base(struct ion_heap *heap, unsigned long *base,
+			  unsigned long *size);
+
+void ion_mem_map_show(struct ion_heap *heap);
+
+int ion_heap_is_system_secure_heap_type(enum ion_heap_type type);
+
+int ion_heap_allow_secure_allocation(enum ion_heap_type type);
+
+int ion_heap_allow_heap_secure(enum ion_heap_type type);
+
+int ion_heap_allow_handle_secure(enum ion_heap_type type);
+
+int get_secure_vmid(unsigned long flags);
+
+bool is_secure_vmid_valid(int vmid);
+
+/**
+ * Functions to help assign/unassign sg_table for System Secure Heap
+ */
+
+int ion_system_secure_heap_unassign_sg(struct sg_table *sgt, int source_vmid);
+int ion_system_secure_heap_assign_sg(struct sg_table *sgt, int dest_vmid);
+
+/**
+ * ion_create_chunked_sg_table - helper function to create sg table
+ * with specified chunk size
+ * @buffer_base:	The starting address used for the sg dma address
+ * @chunk_size:		The size of each entry in the sg table
+ * @total_size:		The total size of the sg table (i.e. the sum of the
+ *			entries). This will be rounded up to the nearest
+ *			multiple of `chunk_size'
+ */
+struct sg_table *ion_create_chunked_sg_table(phys_addr_t buffer_base,
+					     size_t chunk_size,
+					     size_t total_size);
+
+void show_ion_usage(struct ion_device *dev);
+#endif /* _MSM_ION_PRIV_H */
diff --git a/drivers/staging/android/lowmemorykiller.c b/drivers/staging/android/lowmemorykiller.c
index 4747949..41f1a19 100644
--- a/drivers/staging/android/lowmemorykiller.c
+++ b/drivers/staging/android/lowmemorykiller.c
@@ -431,8 +431,10 @@ static void mark_lmk_victim(struct task_struct *tsk)
 {
 	struct mm_struct *mm = tsk->mm;
 
-	if (!cmpxchg(&tsk->signal->oom_mm, NULL, mm))
+	if (!cmpxchg(&tsk->signal->oom_mm, NULL, mm)) {
 		atomic_inc(&tsk->signal->oom_mm->mm_count);
+		set_bit(MMF_OOM_VICTIM, &mm->flags);
+	}
 }
 
 static unsigned long lowmem_scan(struct shrinker *s, struct shrink_control *sc)
diff --git a/drivers/staging/android/uapi/msm_ion.h b/drivers/staging/android/uapi/msm_ion.h
index 4f9dd73..d510fda 100644
--- a/drivers/staging/android/uapi/msm_ion.h
+++ b/drivers/staging/android/uapi/msm_ion.h
@@ -92,6 +92,11 @@ enum cp_mem_usage {
 #define ION_FLAG_CP_CAMERA_PREVIEW	ION_BIT(27)
 #define ION_FLAG_CP_SPSS_HLOS_SHARED	ION_BIT(30)
 
+/**
+ * Flag to allow non continguous allocation of memory from secure
+ * heap
+ */
+#define ION_FLAG_ALLOW_NON_CONTIG       ION_BIT(28)
 
 /**
  * Flag to use when allocating to indicate that a heap is secure.
diff --git a/drivers/staging/greybus/light.c b/drivers/staging/greybus/light.c
index 8dffd8a..9f01427 100644
--- a/drivers/staging/greybus/light.c
+++ b/drivers/staging/greybus/light.c
@@ -924,6 +924,8 @@ static void __gb_lights_led_unregister(struct gb_channel *channel)
 		return;
 
 	led_classdev_unregister(cdev);
+	kfree(cdev->name);
+	cdev->name = NULL;
 	channel->led = NULL;
 }
 
diff --git a/drivers/staging/greybus/loopback.c b/drivers/staging/greybus/loopback.c
index 29dc249..3c2c233 100644
--- a/drivers/staging/greybus/loopback.c
+++ b/drivers/staging/greybus/loopback.c
@@ -1034,8 +1034,10 @@ static int gb_loopback_fn(void *data)
 				error = gb_loopback_async_sink(gb, size);
 			}
 
-			if (error)
+			if (error) {
 				gb->error++;
+				gb->iteration_count++;
+			}
 		} else {
 			/* We are effectively single threaded here */
 			if (type == GB_LOOPBACK_TYPE_PING)
diff --git a/drivers/staging/iio/cdc/ad7150.c b/drivers/staging/iio/cdc/ad7150.c
index 5578a07..50a5b0c2 100644
--- a/drivers/staging/iio/cdc/ad7150.c
+++ b/drivers/staging/iio/cdc/ad7150.c
@@ -274,7 +274,7 @@ static int ad7150_write_event_config(struct iio_dev *indio_dev,
 error_ret:
 	mutex_unlock(&chip->state_lock);
 
-	return 0;
+	return ret;
 }
 
 static int ad7150_read_event_value(struct iio_dev *indio_dev,
diff --git a/drivers/staging/lustre/lustre/llite/llite_mmap.c b/drivers/staging/lustre/lustre/llite/llite_mmap.c
index 4366918..27333d9 100644
--- a/drivers/staging/lustre/lustre/llite/llite_mmap.c
+++ b/drivers/staging/lustre/lustre/llite/llite_mmap.c
@@ -401,15 +401,13 @@ static int ll_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
 		result = VM_FAULT_LOCKED;
 		break;
 	case -ENODATA:
+	case -EAGAIN:
 	case -EFAULT:
 		result = VM_FAULT_NOPAGE;
 		break;
 	case -ENOMEM:
 		result = VM_FAULT_OOM;
 		break;
-	case -EAGAIN:
-		result = VM_FAULT_RETRY;
-		break;
 	default:
 		result = VM_FAULT_SIGBUS;
 		break;
diff --git a/drivers/staging/media/cec/cec-adap.c b/drivers/staging/media/cec/cec-adap.c
index 057c9b5..75e6d5e 100644
--- a/drivers/staging/media/cec/cec-adap.c
+++ b/drivers/staging/media/cec/cec-adap.c
@@ -288,10 +288,10 @@ static void cec_data_cancel(struct cec_data *data)
 
 	/* Mark it as an error */
 	data->msg.tx_ts = ktime_get_ns();
-	data->msg.tx_status = CEC_TX_STATUS_ERROR |
-			      CEC_TX_STATUS_MAX_RETRIES;
+	data->msg.tx_status |= CEC_TX_STATUS_ERROR |
+			       CEC_TX_STATUS_MAX_RETRIES;
+	data->msg.tx_error_cnt++;
 	data->attempts = 0;
-	data->msg.tx_error_cnt = 1;
 	/* Queue transmitted message for monitoring purposes */
 	cec_queue_msg_monitor(data->adap, &data->msg, 1);
 
@@ -608,8 +608,7 @@ int cec_transmit_msg_fh(struct cec_adapter *adap, struct cec_msg *msg,
 	}
 	memset(msg->msg + msg->len, 0, sizeof(msg->msg) - msg->len);
 	if (msg->len == 1) {
-		if (cec_msg_initiator(msg) != 0xf ||
-		    cec_msg_destination(msg) == 0xf) {
+		if (cec_msg_destination(msg) == 0xf) {
 			dprintk(1, "cec_transmit_msg: invalid poll message\n");
 			return -EINVAL;
 		}
@@ -634,7 +633,7 @@ int cec_transmit_msg_fh(struct cec_adapter *adap, struct cec_msg *msg,
 		dprintk(1, "cec_transmit_msg: destination is the adapter itself\n");
 		return -EINVAL;
 	}
-	if (cec_msg_initiator(msg) != 0xf &&
+	if (msg->len > 1 && adap->is_configured &&
 	    !cec_has_log_addr(adap, cec_msg_initiator(msg))) {
 		dprintk(1, "cec_transmit_msg: initiator has unknown logical address %d\n",
 			cec_msg_initiator(msg));
@@ -883,7 +882,7 @@ static int cec_config_log_addr(struct cec_adapter *adap,
 
 	/* Send poll message */
 	msg.len = 1;
-	msg.msg[0] = 0xf0 | log_addr;
+	msg.msg[0] = (log_addr << 4) | log_addr;
 	err = cec_transmit_msg_fh(adap, &msg, NULL, true);
 
 	/*
@@ -1062,6 +1061,8 @@ static int cec_config_thread_func(void *arg)
 		for (i = 1; i < las->num_log_addrs; i++)
 			las->log_addr[i] = CEC_LOG_ADDR_INVALID;
 	}
+	for (i = las->num_log_addrs; i < CEC_MAX_LOG_ADDRS; i++)
+		las->log_addr[i] = CEC_LOG_ADDR_INVALID;
 	adap->is_configured = true;
 	adap->is_configuring = false;
 	cec_post_state_event(adap);
@@ -1079,8 +1080,6 @@ static int cec_config_thread_func(void *arg)
 			cec_report_features(adap, i);
 		cec_report_phys_addr(adap, i);
 	}
-	for (i = las->num_log_addrs; i < CEC_MAX_LOG_ADDRS; i++)
-		las->log_addr[i] = CEC_LOG_ADDR_INVALID;
 	mutex_lock(&adap->lock);
 	adap->kthread_config = NULL;
 	mutex_unlock(&adap->lock);
@@ -1557,9 +1556,9 @@ static int cec_receive_notify(struct cec_adapter *adap, struct cec_msg *msg,
 	}
 
 	case CEC_MSG_GIVE_FEATURES:
-		if (adap->log_addrs.cec_version >= CEC_OP_CEC_VERSION_2_0)
-			return cec_report_features(adap, la_idx);
-		return 0;
+		if (adap->log_addrs.cec_version < CEC_OP_CEC_VERSION_2_0)
+			return cec_feature_abort(adap, msg);
+		return cec_report_features(adap, la_idx);
 
 	default:
 		/*
diff --git a/drivers/staging/rtl8188eu/core/rtw_cmd.c b/drivers/staging/rtl8188eu/core/rtw_cmd.c
index f1f4788..6051a7b 100644
--- a/drivers/staging/rtl8188eu/core/rtw_cmd.c
+++ b/drivers/staging/rtl8188eu/core/rtw_cmd.c
@@ -342,7 +342,7 @@ u8 rtw_createbss_cmd(struct adapter  *padapter)
 	else
 		RT_TRACE(_module_rtl871x_cmd_c_, _drv_info_, (" createbss for SSid:%s\n", pmlmepriv->assoc_ssid.Ssid));
 
-	pcmd = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL);
+	pcmd = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
 	if (!pcmd) {
 		res = _FAIL;
 		goto exit;
@@ -522,7 +522,7 @@ u8 rtw_disassoc_cmd(struct adapter *padapter, u32 deauth_timeout_ms, bool enqueu
 
 	if (enqueue) {
 		/* need enqueue, prepare cmd_obj and enqueue */
-		cmdobj = kzalloc(sizeof(*cmdobj), GFP_KERNEL);
+		cmdobj = kzalloc(sizeof(*cmdobj), GFP_ATOMIC);
 		if (!cmdobj) {
 			res = _FAIL;
 			kfree(param);
diff --git a/drivers/staging/rtl8188eu/core/rtw_mlme.c b/drivers/staging/rtl8188eu/core/rtw_mlme.c
index ee2dcd0..0b60d1e 100644
--- a/drivers/staging/rtl8188eu/core/rtw_mlme.c
+++ b/drivers/staging/rtl8188eu/core/rtw_mlme.c
@@ -107,10 +107,10 @@ void rtw_free_mlme_priv_ie_data(struct mlme_priv *pmlmepriv)
 
 void rtw_free_mlme_priv(struct mlme_priv *pmlmepriv)
 {
-	rtw_free_mlme_priv_ie_data(pmlmepriv);
-
-	if (pmlmepriv)
+	if (pmlmepriv) {
+		rtw_free_mlme_priv_ie_data(pmlmepriv);
 		vfree(pmlmepriv->free_bss_buf);
+	}
 }
 
 struct wlan_network *_rtw_alloc_network(struct mlme_priv *pmlmepriv)
diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c
index f109eeac..ab96629 100644
--- a/drivers/staging/vt6655/device_main.c
+++ b/drivers/staging/vt6655/device_main.c
@@ -1698,10 +1698,11 @@ static int vt6655_suspend(struct pci_dev *pcid, pm_message_t state)
 	MACbShutdown(priv);
 
 	pci_disable_device(pcid);
-	pci_set_power_state(pcid, pci_choose_state(pcid, state));
 
 	spin_unlock_irqrestore(&priv->lock, flags);
 
+	pci_set_power_state(pcid, pci_choose_state(pcid, state));
+
 	return 0;
 }
 
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index f3c9d18..72e926d 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -841,6 +841,7 @@ static int iscsit_add_reject_from_cmd(
 	unsigned char *buf)
 {
 	struct iscsi_conn *conn;
+	const bool do_put = cmd->se_cmd.se_tfo != NULL;
 
 	if (!cmd->conn) {
 		pr_err("cmd->conn is NULL for ITT: 0x%08x\n",
@@ -871,7 +872,7 @@ static int iscsit_add_reject_from_cmd(
 	 * Perform the kref_put now if se_cmd has already been setup by
 	 * scsit_setup_scsi_cmd()
 	 */
-	if (cmd->se_cmd.se_tfo != NULL) {
+	if (do_put) {
 		pr_debug("iscsi reject: calling target_put_sess_cmd >>>>>>\n");
 		target_put_sess_cmd(&cmd->se_cmd);
 	}
@@ -2104,12 +2105,14 @@ iscsit_handle_task_mgt_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
 
 	if (!(hdr->opcode & ISCSI_OP_IMMEDIATE)) {
 		int cmdsn_ret = iscsit_sequence_cmd(conn, cmd, buf, hdr->cmdsn);
-		if (cmdsn_ret == CMDSN_HIGHER_THAN_EXP)
+		if (cmdsn_ret == CMDSN_HIGHER_THAN_EXP) {
 			out_of_order_cmdsn = 1;
-		else if (cmdsn_ret == CMDSN_LOWER_THAN_EXP)
+		} else if (cmdsn_ret == CMDSN_LOWER_THAN_EXP) {
+			target_put_sess_cmd(&cmd->se_cmd);
 			return 0;
-		else if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER)
+		} else if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER) {
 			return -1;
+		}
 	}
 	iscsit_ack_from_expstatsn(conn, be32_to_cpu(hdr->exp_statsn));
 
diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c
index 9cbbc9c..8a4bc15 100644
--- a/drivers/target/iscsi/iscsi_target_configfs.c
+++ b/drivers/target/iscsi/iscsi_target_configfs.c
@@ -1144,7 +1144,7 @@ static struct se_portal_group *lio_target_tiqn_addtpg(
 
 	ret = core_tpg_register(wwn, &tpg->tpg_se_tpg, SCSI_PROTOCOL_ISCSI);
 	if (ret < 0)
-		return NULL;
+		goto free_out;
 
 	ret = iscsit_tpg_add_portal_group(tiqn, tpg);
 	if (ret != 0)
@@ -1156,6 +1156,7 @@ static struct se_portal_group *lio_target_tiqn_addtpg(
 	return &tpg->tpg_se_tpg;
 out:
 	core_tpg_deregister(&tpg->tpg_se_tpg);
+free_out:
 	kfree(tpg);
 	return NULL;
 }
diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c
index 4c82bbe..ee5b29a 100644
--- a/drivers/target/target_core_alua.c
+++ b/drivers/target/target_core_alua.c
@@ -1010,7 +1010,7 @@ static void core_alua_queue_state_change_ua(struct t10_alua_tg_pt_gp *tg_pt_gp)
 static void core_alua_do_transition_tg_pt_work(struct work_struct *work)
 {
 	struct t10_alua_tg_pt_gp *tg_pt_gp = container_of(work,
-		struct t10_alua_tg_pt_gp, tg_pt_gp_transition_work.work);
+		struct t10_alua_tg_pt_gp, tg_pt_gp_transition_work);
 	struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
 	bool explicit = (tg_pt_gp->tg_pt_gp_alua_access_status ==
 			 ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG);
@@ -1073,17 +1073,8 @@ static int core_alua_do_transition_tg_pt(
 	/*
 	 * Flush any pending transitions
 	 */
-	if (!explicit && tg_pt_gp->tg_pt_gp_implicit_trans_secs &&
-	    atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state) ==
-	    ALUA_ACCESS_STATE_TRANSITION) {
-		/* Just in case */
-		tg_pt_gp->tg_pt_gp_alua_pending_state = new_state;
-		tg_pt_gp->tg_pt_gp_transition_complete = &wait;
-		flush_delayed_work(&tg_pt_gp->tg_pt_gp_transition_work);
-		wait_for_completion(&wait);
-		tg_pt_gp->tg_pt_gp_transition_complete = NULL;
-		return 0;
-	}
+	if (!explicit)
+		flush_work(&tg_pt_gp->tg_pt_gp_transition_work);
 
 	/*
 	 * Save the old primary ALUA access state, and set the current state
@@ -1114,17 +1105,9 @@ static int core_alua_do_transition_tg_pt(
 	atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
 	spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
 
-	if (!explicit && tg_pt_gp->tg_pt_gp_implicit_trans_secs) {
-		unsigned long transition_tmo;
-
-		transition_tmo = tg_pt_gp->tg_pt_gp_implicit_trans_secs * HZ;
-		queue_delayed_work(tg_pt_gp->tg_pt_gp_dev->tmr_wq,
-				   &tg_pt_gp->tg_pt_gp_transition_work,
-				   transition_tmo);
-	} else {
+	schedule_work(&tg_pt_gp->tg_pt_gp_transition_work);
+	if (explicit) {
 		tg_pt_gp->tg_pt_gp_transition_complete = &wait;
-		queue_delayed_work(tg_pt_gp->tg_pt_gp_dev->tmr_wq,
-				   &tg_pt_gp->tg_pt_gp_transition_work, 0);
 		wait_for_completion(&wait);
 		tg_pt_gp->tg_pt_gp_transition_complete = NULL;
 	}
@@ -1692,8 +1675,8 @@ struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp(struct se_device *dev,
 	mutex_init(&tg_pt_gp->tg_pt_gp_md_mutex);
 	spin_lock_init(&tg_pt_gp->tg_pt_gp_lock);
 	atomic_set(&tg_pt_gp->tg_pt_gp_ref_cnt, 0);
-	INIT_DELAYED_WORK(&tg_pt_gp->tg_pt_gp_transition_work,
-			  core_alua_do_transition_tg_pt_work);
+	INIT_WORK(&tg_pt_gp->tg_pt_gp_transition_work,
+		  core_alua_do_transition_tg_pt_work);
 	tg_pt_gp->tg_pt_gp_dev = dev;
 	atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state,
 		ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED);
@@ -1801,7 +1784,7 @@ void core_alua_free_tg_pt_gp(
 	dev->t10_alua.alua_tg_pt_gps_counter--;
 	spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
 
-	flush_delayed_work(&tg_pt_gp->tg_pt_gp_transition_work);
+	flush_work(&tg_pt_gp->tg_pt_gp_transition_work);
 
 	/*
 	 * Allow a struct t10_alua_tg_pt_gp_member * referenced by
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
index 29f807b..97928b4 100644
--- a/drivers/target/target_core_file.c
+++ b/drivers/target/target_core_file.c
@@ -466,6 +466,10 @@ fd_execute_unmap(struct se_cmd *cmd, sector_t lba, sector_t nolb)
 	struct inode *inode = file->f_mapping->host;
 	int ret;
 
+	if (!nolb) {
+		return 0;
+	}
+
 	if (cmd->se_dev->dev_attrib.pi_prot_type) {
 		ret = fd_do_prot_unmap(cmd, lba, nolb);
 		if (ret)
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
index 47463c9..df20921 100644
--- a/drivers/target/target_core_pr.c
+++ b/drivers/target/target_core_pr.c
@@ -56,8 +56,10 @@ void core_pr_dump_initiator_port(
 	char *buf,
 	u32 size)
 {
-	if (!pr_reg->isid_present_at_reg)
+	if (!pr_reg->isid_present_at_reg) {
 		buf[0] = '\0';
+		return;
+	}
 
 	snprintf(buf, size, ",i,0x%s", pr_reg->pr_reg_isid);
 }
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index bacfa8f..4c0782c 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -1976,6 +1976,8 @@ static void target_restart_delayed_cmds(struct se_device *dev)
 		list_del(&cmd->se_delayed_node);
 		spin_unlock(&dev->delayed_cmd_lock);
 
+		cmd->transport_state |= CMD_T_SENT;
+
 		__target_execute_cmd(cmd, true);
 
 		if (cmd->sam_task_attr == TCM_ORDERED_TAG)
@@ -2013,6 +2015,8 @@ static void transport_complete_task_attr(struct se_cmd *cmd)
 		pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED\n",
 			 dev->dev_cur_ordered_id);
 	}
+	cmd->se_cmd_flags &= ~SCF_TASK_ATTR_SET;
+
 restart:
 	target_restart_delayed_cmds(dev);
 }
diff --git a/drivers/thermal/hisi_thermal.c b/drivers/thermal/hisi_thermal.c
index f642966..c5285ed 100644
--- a/drivers/thermal/hisi_thermal.c
+++ b/drivers/thermal/hisi_thermal.c
@@ -35,8 +35,9 @@
 #define TEMP0_RST_MSK			(0x1C)
 #define TEMP0_VALUE			(0x28)
 
-#define HISI_TEMP_BASE			(-60)
+#define HISI_TEMP_BASE			(-60000)
 #define HISI_TEMP_RESET			(100000)
+#define HISI_TEMP_STEP			(784)
 
 #define HISI_MAX_SENSORS		4
 
@@ -61,19 +62,38 @@ struct hisi_thermal_data {
 	void __iomem *regs;
 };
 
-/* in millicelsius */
-static inline int _step_to_temp(int step)
+/*
+ * The temperature computation on the tsensor is as follow:
+ *	Unit: millidegree Celsius
+ *	Step: 255/200 (0.7843)
+ *	Temperature base: -60°C
+ *
+ * The register is programmed in temperature steps, every step is 784
+ * millidegree and begins at -60 000 m°C
+ *
+ * The temperature from the steps:
+ *
+ *	Temp = TempBase + (steps x 784)
+ *
+ * and the steps from the temperature:
+ *
+ *	steps = (Temp - TempBase) / 784
+ *
+ */
+static inline int hisi_thermal_step_to_temp(int step)
 {
-	/*
-	 * Every step equals (1 * 200) / 255 celsius, and finally
-	 * need convert to millicelsius.
-	 */
-	return (HISI_TEMP_BASE * 1000 + (step * 200000 / 255));
+	return HISI_TEMP_BASE + (step * HISI_TEMP_STEP);
 }
 
-static inline long _temp_to_step(long temp)
+static inline long hisi_thermal_temp_to_step(long temp)
 {
-	return ((temp - HISI_TEMP_BASE * 1000) * 255) / 200000;
+	return (temp - HISI_TEMP_BASE) / HISI_TEMP_STEP;
+}
+
+static inline long hisi_thermal_round_temp(int temp)
+{
+	return hisi_thermal_step_to_temp(
+		hisi_thermal_temp_to_step(temp));
 }
 
 static long hisi_thermal_get_sensor_temp(struct hisi_thermal_data *data,
@@ -99,7 +119,7 @@ static long hisi_thermal_get_sensor_temp(struct hisi_thermal_data *data,
 	usleep_range(3000, 5000);
 
 	val = readl(data->regs + TEMP0_VALUE);
-	val = _step_to_temp(val);
+	val = hisi_thermal_step_to_temp(val);
 
 	mutex_unlock(&data->thermal_lock);
 
@@ -126,10 +146,11 @@ static void hisi_thermal_enable_bind_irq_sensor
 	writel((sensor->id << 12), data->regs + TEMP0_CFG);
 
 	/* enable for interrupt */
-	writel(_temp_to_step(sensor->thres_temp) | 0x0FFFFFF00,
+	writel(hisi_thermal_temp_to_step(sensor->thres_temp) | 0x0FFFFFF00,
 	       data->regs + TEMP0_TH);
 
-	writel(_temp_to_step(HISI_TEMP_RESET), data->regs + TEMP0_RST_TH);
+	writel(hisi_thermal_temp_to_step(HISI_TEMP_RESET),
+	       data->regs + TEMP0_RST_TH);
 
 	/* enable module */
 	writel(0x1, data->regs + TEMP0_RST_MSK);
@@ -230,7 +251,7 @@ static irqreturn_t hisi_thermal_alarm_irq_thread(int irq, void *dev)
 	sensor = &data->sensors[data->irq_bind_sensor];
 
 	dev_crit(&data->pdev->dev, "THERMAL ALARM: T > %d\n",
-		 sensor->thres_temp / 1000);
+		 sensor->thres_temp);
 	mutex_unlock(&data->thermal_lock);
 
 	for (i = 0; i < HISI_MAX_SENSORS; i++) {
@@ -269,7 +290,7 @@ static int hisi_thermal_register_sensor(struct platform_device *pdev,
 
 	for (i = 0; i < of_thermal_get_ntrips(sensor->tzd); i++) {
 		if (trip[i].type == THERMAL_TRIP_PASSIVE) {
-			sensor->thres_temp = trip[i].temperature;
+			sensor->thres_temp = hisi_thermal_round_temp(trip[i].temperature);
 			break;
 		}
 	}
@@ -317,15 +338,6 @@ static int hisi_thermal_probe(struct platform_device *pdev)
 	if (data->irq < 0)
 		return data->irq;
 
-	ret = devm_request_threaded_irq(&pdev->dev, data->irq,
-					hisi_thermal_alarm_irq,
-					hisi_thermal_alarm_irq_thread,
-					0, "hisi_thermal", data);
-	if (ret < 0) {
-		dev_err(&pdev->dev, "failed to request alarm irq: %d\n", ret);
-		return ret;
-	}
-
 	platform_set_drvdata(pdev, data);
 
 	data->clk = devm_clk_get(&pdev->dev, "thermal_clk");
@@ -345,8 +357,7 @@ static int hisi_thermal_probe(struct platform_device *pdev)
 	}
 
 	hisi_thermal_enable_bind_irq_sensor(data);
-	irq_get_irqchip_state(data->irq, IRQCHIP_STATE_MASKED,
-			      &data->irq_enabled);
+	data->irq_enabled = true;
 
 	for (i = 0; i < HISI_MAX_SENSORS; ++i) {
 		ret = hisi_thermal_register_sensor(pdev, data,
@@ -358,6 +369,17 @@ static int hisi_thermal_probe(struct platform_device *pdev)
 			hisi_thermal_toggle_sensor(&data->sensors[i], true);
 	}
 
+	ret = devm_request_threaded_irq(&pdev->dev, data->irq,
+					hisi_thermal_alarm_irq,
+					hisi_thermal_alarm_irq_thread,
+					0, "hisi_thermal", data);
+	if (ret < 0) {
+		dev_err(&pdev->dev, "failed to request alarm irq: %d\n", ret);
+		return ret;
+	}
+
+	enable_irq(data->irq);
+
 	return 0;
 }
 
@@ -397,8 +419,11 @@ static int hisi_thermal_suspend(struct device *dev)
 static int hisi_thermal_resume(struct device *dev)
 {
 	struct hisi_thermal_data *data = dev_get_drvdata(dev);
+	int ret;
 
-	clk_prepare_enable(data->clk);
+	ret = clk_prepare_enable(data->clk);
+	if (ret)
+		return ret;
 
 	data->irq_enabled = true;
 	hisi_thermal_enable_bind_irq_sensor(data);
diff --git a/drivers/thermal/qcom/qti_virtual_sensor.c b/drivers/thermal/qcom/qti_virtual_sensor.c
index 8cb7dc3..9b4fae8 100644
--- a/drivers/thermal/qcom/qti_virtual_sensor.c
+++ b/drivers/thermal/qcom/qti_virtual_sensor.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -64,6 +64,21 @@ static const struct virtual_sensor_data qti_virtual_sensors[] = {
 				"cpu1-gold-usr"},
 		.logic = VIRT_MAXIMUM,
 	},
+	{
+		.virt_zone_name = "deca-cpu-max-step",
+		.num_sensors = 10,
+		.sensor_names = {"apc0-cpu0-usr",
+				"apc0-cpu1-usr",
+				"apc0-cpu2-usr",
+				"apc0-cpu3-usr",
+				"apc0-l2-usr",
+				"apc1-cpu0-usr",
+				"apc1-cpu1-usr",
+				"apc1-cpu2-usr",
+				"apc1-cpu3-usr",
+				"apc1-l2-usr"},
+		.logic = VIRT_MAXIMUM,
+	},
 };
 
 int qti_virtual_sensor_register(struct device *dev)
diff --git a/drivers/thermal/qpnp-adc-tm.c b/drivers/thermal/qpnp-adc-tm.c
index 152b2a2..bec3dea 100644
--- a/drivers/thermal/qpnp-adc-tm.c
+++ b/drivers/thermal/qpnp-adc-tm.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -256,22 +256,7 @@ static int32_t qpnp_adc_tm_enable(struct qpnp_adc_tm_chip *chip)
 
 static int32_t qpnp_adc_tm_disable(struct qpnp_adc_tm_chip *chip)
 {
-	u8 data = 0;
-	int rc = 0;
-
-	rc = qpnp_adc_tm_write_reg(chip, QPNP_BTM_CONV_REQ, data, 1);
-	if (rc < 0) {
-		pr_err("adc-tm enable failed\n");
-		return rc;
-	}
-
-	rc = qpnp_adc_tm_write_reg(chip, QPNP_EN_CTL1, data, 1);
-	if (rc < 0) {
-		pr_err("adc-tm disable failed\n");
-		return rc;
-	}
-
-	return rc;
+	return 0;
 }
 
 static int qpnp_adc_tm_is_valid(struct qpnp_adc_tm_chip *chip)
diff --git a/drivers/thermal/step_wise.c b/drivers/thermal/step_wise.c
index 1fff359..4bbb47a 100644
--- a/drivers/thermal/step_wise.c
+++ b/drivers/thermal/step_wise.c
@@ -31,8 +31,7 @@
  * If the temperature is higher than a trip point,
  *    a. if the trend is THERMAL_TREND_RAISING, use higher cooling
  *       state for this trip point
- *    b. if the trend is THERMAL_TREND_DROPPING, use lower cooling
- *       state for this trip point
+ *    b. if the trend is THERMAL_TREND_DROPPING, do nothing
  *    c. if the trend is THERMAL_TREND_RAISE_FULL, use upper limit
  *       for this trip point
  *    d. if the trend is THERMAL_TREND_DROP_FULL, use lower limit
@@ -102,10 +101,11 @@ static unsigned long get_target_state(struct thermal_instance *instance,
 			if (!throttle)
 				next_target = THERMAL_NO_TARGET;
 		} else {
-			if (!throttle)
+			if (!throttle) {
 				next_target = cur_state - 1;
-			if (next_target > instance->upper)
-				next_target = instance->upper;
+				if (next_target > instance->upper)
+					next_target = instance->upper;
+			}
 		}
 		break;
 	case THERMAL_TREND_DROP_FULL:
diff --git a/drivers/tty/serial/8250/8250_fintek.c b/drivers/tty/serial/8250/8250_fintek.c
index f8c3107..2ffebb7 100644
--- a/drivers/tty/serial/8250/8250_fintek.c
+++ b/drivers/tty/serial/8250/8250_fintek.c
@@ -121,7 +121,7 @@ static int fintek_8250_rs485_config(struct uart_port *port,
 
 	if ((!!(rs485->flags & SER_RS485_RTS_ON_SEND)) ==
 			(!!(rs485->flags & SER_RS485_RTS_AFTER_SEND)))
-		rs485->flags &= SER_RS485_ENABLED;
+		rs485->flags &= ~SER_RS485_ENABLED;
 	else
 		config |= RS485_URA;
 
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
index 22d32d2..b80ea87 100644
--- a/drivers/tty/serial/8250/8250_pci.c
+++ b/drivers/tty/serial/8250/8250_pci.c
@@ -5568,6 +5568,9 @@ static struct pci_device_id serial_pci_tbl[] = {
 	{ PCI_DEVICE(0x1601, 0x0800), .driver_data = pbn_b0_4_1250000 },
 	{ PCI_DEVICE(0x1601, 0xa801), .driver_data = pbn_b0_4_1250000 },
 
+	/* Amazon PCI serial device */
+	{ PCI_DEVICE(0x1d0f, 0x8250), .driver_data = pbn_b0_1_115200 },
+
 	/*
 	 * These entries match devices with class COMMUNICATION_SERIAL,
 	 * COMMUNICATION_MODEM or COMMUNICATION_MULTISERIAL
diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
index 1ef31e3..f6e4373 100644
--- a/drivers/tty/serial/8250/8250_port.c
+++ b/drivers/tty/serial/8250/8250_port.c
@@ -2526,8 +2526,11 @@ static void serial8250_set_divisor(struct uart_port *port, unsigned int baud,
 	serial_dl_write(up, quot);
 
 	/* XR17V35x UARTs have an extra fractional divisor register (DLD) */
-	if (up->port.type == PORT_XR17V35X)
+	if (up->port.type == PORT_XR17V35X) {
+		/* Preserve bits not related to baudrate; DLD[7:4]. */
+		quot_frac |= serial_port_in(port, 0x2) & 0xf0;
 		serial_port_out(port, 0x2, quot_frac);
+	}
 }
 
 static unsigned int serial8250_get_baud_rate(struct uart_port *port,
diff --git a/drivers/tty/serial/msm_geni_serial.c b/drivers/tty/serial/msm_geni_serial.c
index 0ce23c3..d79b95764 100644
--- a/drivers/tty/serial/msm_geni_serial.c
+++ b/drivers/tty/serial/msm_geni_serial.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017, The Linux foundation. All rights reserved.
+ * Copyright (c) 2017-2018, The Linux foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -1727,7 +1727,8 @@ static int msm_geni_serial_startup(struct uart_port *uport)
 static int get_clk_cfg(unsigned long clk_freq, unsigned long *ser_clk)
 {
 	unsigned long root_freq[] = {7372800, 14745600, 19200000, 29491200,
-		32000000, 48000000, 64000000, 80000000, 96000000, 100000000};
+		32000000, 48000000, 64000000, 80000000, 96000000, 100000000,
+		102400000, 112000000, 120000000, 128000000};
 	int i;
 	int match = -1;
 
@@ -2135,7 +2136,7 @@ msm_geni_serial_earlycon_setup(struct earlycon_device *dev,
 exit_geni_serial_earlyconsetup:
 	return ret;
 }
-OF_EARLYCON_DECLARE(msm_geni_serial, "qcom,msm-geni-uart",
+OF_EARLYCON_DECLARE(msm_geni_serial, "qcom,msm-geni-console",
 		msm_geni_serial_earlycon_setup);
 
 static int console_register(struct uart_driver *drv)
@@ -2657,17 +2658,12 @@ static const struct dev_pm_ops msm_geni_serial_pm_ops = {
 	.resume_noirq = msm_geni_serial_sys_resume_noirq,
 };
 
-static const struct of_device_id msm_geni_serial_match_table[] = {
-	{ .compatible = "qcom,msm-geni-uart"},
-	{},
-};
-
 static struct platform_driver msm_geni_serial_platform_driver = {
 	.remove = msm_geni_serial_remove,
 	.probe = msm_geni_serial_probe,
 	.driver = {
 		.name = "msm_geni_serial",
-		.of_match_table = msm_geni_serial_match_table,
+		.of_match_table = msm_geni_device_tbl,
 		.pm = &msm_geni_serial_pm_ops,
 	},
 };
diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
index 94c3718..547bd21 100644
--- a/drivers/tty/sysrq.c
+++ b/drivers/tty/sysrq.c
@@ -244,8 +244,10 @@ static void sysrq_handle_showallcpus(int key)
 	 * architecture has no support for it:
 	 */
 	if (!trigger_all_cpu_backtrace()) {
-		struct pt_regs *regs = get_irq_regs();
+		struct pt_regs *regs = NULL;
 
+		if (in_irq())
+			regs = get_irq_regs();
 		if (regs) {
 			pr_info("CPU%d:\n", smp_processor_id());
 			show_regs(regs);
@@ -264,7 +266,10 @@ static struct sysrq_key_op sysrq_showallcpus_op = {
 
 static void sysrq_handle_showregs(int key)
 {
-	struct pt_regs *regs = get_irq_regs();
+	struct pt_regs *regs = NULL;
+
+	if (in_irq())
+		regs = get_irq_regs();
 	if (regs)
 		show_regs(regs);
 	perf_event_print_debug();
diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
index 68947f6..b0500a0 100644
--- a/drivers/tty/tty_ldisc.c
+++ b/drivers/tty/tty_ldisc.c
@@ -271,10 +271,13 @@ const struct file_operations tty_ldiscs_proc_fops = {
 
 struct tty_ldisc *tty_ldisc_ref_wait(struct tty_struct *tty)
 {
+	struct tty_ldisc *ld;
+
 	ldsem_down_read(&tty->ldisc_sem, MAX_SCHEDULE_TIMEOUT);
-	if (!tty->ldisc)
+	ld = tty->ldisc;
+	if (!ld)
 		ldsem_up_read(&tty->ldisc_sem);
-	return tty->ldisc;
+	return ld;
 }
 EXPORT_SYMBOL_GPL(tty_ldisc_ref_wait);
 
@@ -489,41 +492,6 @@ static void tty_ldisc_close(struct tty_struct *tty, struct tty_ldisc *ld)
 }
 
 /**
- *	tty_ldisc_restore	-	helper for tty ldisc change
- *	@tty: tty to recover
- *	@old: previous ldisc
- *
- *	Restore the previous line discipline or N_TTY when a line discipline
- *	change fails due to an open error
- */
-
-static void tty_ldisc_restore(struct tty_struct *tty, struct tty_ldisc *old)
-{
-	struct tty_ldisc *new_ldisc;
-	int r;
-
-	/* There is an outstanding reference here so this is safe */
-	old = tty_ldisc_get(tty, old->ops->num);
-	WARN_ON(IS_ERR(old));
-	tty->ldisc = old;
-	tty_set_termios_ldisc(tty, old->ops->num);
-	if (tty_ldisc_open(tty, old) < 0) {
-		tty_ldisc_put(old);
-		/* This driver is always present */
-		new_ldisc = tty_ldisc_get(tty, N_TTY);
-		if (IS_ERR(new_ldisc))
-			panic("n_tty: get");
-		tty->ldisc = new_ldisc;
-		tty_set_termios_ldisc(tty, N_TTY);
-		r = tty_ldisc_open(tty, new_ldisc);
-		if (r < 0)
-			panic("Couldn't open N_TTY ldisc for "
-			      "%s --- error %d.",
-			      tty_name(tty), r);
-	}
-}
-
-/**
  *	tty_set_ldisc		-	set line discipline
  *	@tty: the terminal to set
  *	@ldisc: the line discipline
@@ -536,12 +504,7 @@ static void tty_ldisc_restore(struct tty_struct *tty, struct tty_ldisc *old)
 
 int tty_set_ldisc(struct tty_struct *tty, int disc)
 {
-	int retval;
-	struct tty_ldisc *old_ldisc, *new_ldisc;
-
-	new_ldisc = tty_ldisc_get(tty, disc);
-	if (IS_ERR(new_ldisc))
-		return PTR_ERR(new_ldisc);
+	int retval, old_disc;
 
 	tty_lock(tty);
 	retval = tty_ldisc_lock(tty, 5 * HZ);
@@ -554,7 +517,8 @@ int tty_set_ldisc(struct tty_struct *tty, int disc)
 	}
 
 	/* Check the no-op case */
-	if (tty->ldisc->ops->num == disc)
+	old_disc = tty->ldisc->ops->num;
+	if (old_disc == disc)
 		goto out;
 
 	if (test_bit(TTY_HUPPED, &tty->flags)) {
@@ -563,34 +527,25 @@ int tty_set_ldisc(struct tty_struct *tty, int disc)
 		goto out;
 	}
 
-	old_ldisc = tty->ldisc;
-
-	/* Shutdown the old discipline. */
-	tty_ldisc_close(tty, old_ldisc);
-
-	/* Now set up the new line discipline. */
-	tty->ldisc = new_ldisc;
-	tty_set_termios_ldisc(tty, disc);
-
-	retval = tty_ldisc_open(tty, new_ldisc);
+	retval = tty_ldisc_reinit(tty, disc);
 	if (retval < 0) {
 		/* Back to the old one or N_TTY if we can't */
-		tty_ldisc_put(new_ldisc);
-		tty_ldisc_restore(tty, old_ldisc);
+		if (tty_ldisc_reinit(tty, old_disc) < 0) {
+			pr_err("tty: TIOCSETD failed, reinitializing N_TTY\n");
+			if (tty_ldisc_reinit(tty, N_TTY) < 0) {
+				/* At this point we have tty->ldisc == NULL. */
+				pr_err("tty: reinitializing N_TTY failed\n");
+			}
+		}
 	}
 
-	if (tty->ldisc->ops->num != old_ldisc->ops->num && tty->ops->set_ldisc) {
+	if (tty->ldisc && tty->ldisc->ops->num != old_disc &&
+	    tty->ops->set_ldisc) {
 		down_read(&tty->termios_rwsem);
 		tty->ops->set_ldisc(tty);
 		up_read(&tty->termios_rwsem);
 	}
 
-	/* At this point we hold a reference to the new ldisc and a
-	   reference to the old ldisc, or we hold two references to
-	   the old ldisc (if it was restored as part of error cleanup
-	   above). In either case, releasing a single reference from
-	   the old ldisc is correct. */
-	new_ldisc = old_ldisc;
 out:
 	tty_ldisc_unlock(tty);
 
@@ -598,7 +553,6 @@ int tty_set_ldisc(struct tty_struct *tty, int disc)
 	   already running */
 	tty_buffer_restart_work(tty->port);
 err:
-	tty_ldisc_put(new_ldisc);	/* drop the extra reference */
 	tty_unlock(tty);
 	return retval;
 }
@@ -659,10 +613,8 @@ int tty_ldisc_reinit(struct tty_struct *tty, int disc)
 	int retval;
 
 	ld = tty_ldisc_get(tty, disc);
-	if (IS_ERR(ld)) {
-		BUG_ON(disc == N_TTY);
+	if (IS_ERR(ld))
 		return PTR_ERR(ld);
-	}
 
 	if (tty->ldisc) {
 		tty_ldisc_close(tty, tty->ldisc);
@@ -674,10 +626,8 @@ int tty_ldisc_reinit(struct tty_struct *tty, int disc)
 	tty_set_termios_ldisc(tty, disc);
 	retval = tty_ldisc_open(tty, tty->ldisc);
 	if (retval) {
-		if (!WARN_ON(disc == N_TTY)) {
-			tty_ldisc_put(tty->ldisc);
-			tty->ldisc = NULL;
-		}
+		tty_ldisc_put(tty->ldisc);
+		tty->ldisc = NULL;
 	}
 	return retval;
 }
diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
index 701d9f7..23b7823 100644
--- a/drivers/usb/core/config.c
+++ b/drivers/usb/core/config.c
@@ -550,6 +550,9 @@ static int usb_parse_configuration(struct usb_device *dev, int cfgidx,
 	unsigned iad_num = 0;
 
 	memcpy(&config->desc, buffer, USB_DT_CONFIG_SIZE);
+	nintf = nintf_orig = config->desc.bNumInterfaces;
+	config->desc.bNumInterfaces = 0;	// Adjusted later
+
 	if (config->desc.bDescriptorType != USB_DT_CONFIG ||
 	    config->desc.bLength < USB_DT_CONFIG_SIZE ||
 	    config->desc.bLength > size) {
@@ -563,7 +566,6 @@ static int usb_parse_configuration(struct usb_device *dev, int cfgidx,
 	buffer += config->desc.bLength;
 	size -= config->desc.bLength;
 
-	nintf = nintf_orig = config->desc.bNumInterfaces;
 	if (nintf > USB_MAXINTERFACES) {
 		dev_warn(ddev, "config %d has too many interfaces: %d, "
 		    "using maximum allowed: %d\n",
@@ -900,14 +902,25 @@ void usb_release_bos_descriptor(struct usb_device *dev)
 	}
 }
 
+static const __u8 bos_desc_len[256] = {
+	[USB_CAP_TYPE_WIRELESS_USB] = USB_DT_USB_WIRELESS_CAP_SIZE,
+	[USB_CAP_TYPE_EXT]          = USB_DT_USB_EXT_CAP_SIZE,
+	[USB_SS_CAP_TYPE]           = USB_DT_USB_SS_CAP_SIZE,
+	[USB_SSP_CAP_TYPE]          = USB_DT_USB_SSP_CAP_SIZE(1),
+	[CONTAINER_ID_TYPE]         = USB_DT_USB_SS_CONTN_ID_SIZE,
+	[USB_PTM_CAP_TYPE]          = USB_DT_USB_PTM_ID_SIZE,
+};
+
 /* Get BOS descriptor set */
 int usb_get_bos_descriptor(struct usb_device *dev)
 {
 	struct device *ddev = &dev->dev;
 	struct usb_bos_descriptor *bos;
 	struct usb_dev_cap_header *cap;
+	struct usb_ssp_cap_descriptor *ssp_cap;
 	unsigned char *buffer;
-	int length, total_len, num, i;
+	int length, total_len, num, i, ssac;
+	__u8 cap_type;
 	int ret;
 
 	bos = kzalloc(sizeof(struct usb_bos_descriptor), GFP_KERNEL);
@@ -960,7 +973,13 @@ int usb_get_bos_descriptor(struct usb_device *dev)
 			dev->bos->desc->bNumDeviceCaps = i;
 			break;
 		}
+		cap_type = cap->bDevCapabilityType;
 		length = cap->bLength;
+		if (bos_desc_len[cap_type] && length < bos_desc_len[cap_type]) {
+			dev->bos->desc->bNumDeviceCaps = i;
+			break;
+		}
+
 		total_len -= length;
 
 		if (cap->bDescriptorType != USB_DT_DEVICE_CAPABILITY) {
@@ -968,7 +987,7 @@ int usb_get_bos_descriptor(struct usb_device *dev)
 			continue;
 		}
 
-		switch (cap->bDevCapabilityType) {
+		switch (cap_type) {
 		case USB_CAP_TYPE_WIRELESS_USB:
 			/* Wireless USB cap descriptor is handled by wusb */
 			break;
@@ -981,8 +1000,11 @@ int usb_get_bos_descriptor(struct usb_device *dev)
 				(struct usb_ss_cap_descriptor *)buffer;
 			break;
 		case USB_SSP_CAP_TYPE:
-			dev->bos->ssp_cap =
-				(struct usb_ssp_cap_descriptor *)buffer;
+			ssp_cap = (struct usb_ssp_cap_descriptor *)buffer;
+			ssac = (le32_to_cpu(ssp_cap->bmAttributes) &
+				USB_SSP_SUBLINK_SPEED_ATTRIBS) + 1;
+			if (length >= USB_DT_USB_SSP_CAP_SIZE(ssac))
+				dev->bos->ssp_cap = ssp_cap;
 			break;
 		case CONTAINER_ID_TYPE:
 			dev->bos->ss_id =
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
index fa61935..893ebae 100644
--- a/drivers/usb/core/devio.c
+++ b/drivers/usb/core/devio.c
@@ -134,42 +134,38 @@ enum snoop_when {
 #define USB_DEVICE_DEV		MKDEV(USB_DEVICE_MAJOR, 0)
 
 /* Limit on the total amount of memory we can allocate for transfers */
-static unsigned usbfs_memory_mb = 16;
+static u32 usbfs_memory_mb = 16;
 module_param(usbfs_memory_mb, uint, 0644);
 MODULE_PARM_DESC(usbfs_memory_mb,
 		"maximum MB allowed for usbfs buffers (0 = no limit)");
 
 /* Hard limit, necessary to avoid arithmetic overflow */
-#define USBFS_XFER_MAX		(UINT_MAX / 2 - 1000000)
+#define USBFS_XFER_MAX         (UINT_MAX / 2 - 1000000)
 
-static atomic_t usbfs_memory_usage;	/* Total memory currently allocated */
+static atomic64_t usbfs_memory_usage;	/* Total memory currently allocated */
 
 /* Check whether it's okay to allocate more memory for a transfer */
-static int usbfs_increase_memory_usage(unsigned amount)
+static int usbfs_increase_memory_usage(u64 amount)
 {
-	unsigned lim;
+	u64 lim;
 
-	/*
-	 * Convert usbfs_memory_mb to bytes, avoiding overflows.
-	 * 0 means use the hard limit (effectively unlimited).
-	 */
 	lim = ACCESS_ONCE(usbfs_memory_mb);
-	if (lim == 0 || lim > (USBFS_XFER_MAX >> 20))
-		lim = USBFS_XFER_MAX;
-	else
-		lim <<= 20;
+	lim <<= 20;
 
-	atomic_add(amount, &usbfs_memory_usage);
-	if (atomic_read(&usbfs_memory_usage) <= lim)
-		return 0;
-	atomic_sub(amount, &usbfs_memory_usage);
-	return -ENOMEM;
+	atomic64_add(amount, &usbfs_memory_usage);
+
+	if (lim > 0 && atomic64_read(&usbfs_memory_usage) > lim) {
+		atomic64_sub(amount, &usbfs_memory_usage);
+		return -ENOMEM;
+	}
+
+	return 0;
 }
 
 /* Memory for a transfer is being deallocated */
-static void usbfs_decrease_memory_usage(unsigned amount)
+static void usbfs_decrease_memory_usage(u64 amount)
 {
-	atomic_sub(amount, &usbfs_memory_usage);
+	atomic64_sub(amount, &usbfs_memory_usage);
 }
 
 static int connected(struct usb_dev_state *ps)
@@ -1191,7 +1187,7 @@ static int proc_bulk(struct usb_dev_state *ps, void __user *arg)
 	if (!usb_maxpacket(dev, pipe, !(bulk.ep & USB_DIR_IN)))
 		return -EINVAL;
 	len1 = bulk.len;
-	if (len1 >= USBFS_XFER_MAX)
+	if (len1 >= (INT_MAX - sizeof(struct urb)))
 		return -EINVAL;
 	ret = usbfs_increase_memory_usage(len1 + sizeof(struct urb));
 	if (ret)
@@ -1458,13 +1454,19 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb
 	int number_of_packets = 0;
 	unsigned int stream_id = 0;
 	void *buf;
-
-	if (uurb->flags & ~(USBDEVFS_URB_ISO_ASAP |
-				USBDEVFS_URB_SHORT_NOT_OK |
+	unsigned long mask =	USBDEVFS_URB_SHORT_NOT_OK |
 				USBDEVFS_URB_BULK_CONTINUATION |
 				USBDEVFS_URB_NO_FSBR |
 				USBDEVFS_URB_ZERO_PACKET |
-				USBDEVFS_URB_NO_INTERRUPT))
+				USBDEVFS_URB_NO_INTERRUPT;
+	/* USBDEVFS_URB_ISO_ASAP is a special case */
+	if (uurb->type == USBDEVFS_URB_TYPE_ISO)
+		mask |= USBDEVFS_URB_ISO_ASAP;
+
+	if (uurb->flags & ~mask)
+			return -EINVAL;
+
+	if ((unsigned int)uurb->buffer_length >= USBFS_XFER_MAX)
 		return -EINVAL;
 	if (uurb->buffer_length > 0 && !uurb->buffer)
 		return -EINVAL;
@@ -1584,10 +1586,6 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb
 		return -EINVAL;
 	}
 
-	if (uurb->buffer_length >= USBFS_XFER_MAX) {
-		ret = -EINVAL;
-		goto error;
-	}
 	if (uurb->buffer_length > 0 &&
 			!access_ok(is_in ? VERIFY_WRITE : VERIFY_READ,
 				uurb->buffer, uurb->buffer_length)) {
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 50a6f2f..a9117ee 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -4947,6 +4947,15 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus,
 		usb_put_dev(udev);
 		if ((status == -ENOTCONN) || (status == -ENOTSUPP))
 			break;
+
+		/* When halfway through our retry count, power-cycle the port */
+		if (i == (SET_CONFIG_TRIES / 2) - 1) {
+			dev_info(&port_dev->dev, "attempt power cycle\n");
+			usb_hub_set_port_power(hdev, hub, port1, false);
+			msleep(2 * hub_power_on_good_delay(hub));
+			usb_hub_set_port_power(hdev, hub, port1, true);
+			msleep(hub_power_on_good_delay(hub));
+		}
 	}
 	if (hub->hdev->parent ||
 			!hcd->driver->port_handed_over ||
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index 37c418e..5001028 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -151,6 +151,9 @@ static const struct usb_device_id usb_quirk_list[] = {
 	/* appletouch */
 	{ USB_DEVICE(0x05ac, 0x021a), .driver_info = USB_QUIRK_RESET_RESUME },
 
+	/* Genesys Logic hub, internally used by KY-688 USB 3.1 Type-C Hub */
+	{ USB_DEVICE(0x05e3, 0x0612), .driver_info = USB_QUIRK_NO_LPM },
+
 	/* Genesys Logic hub, internally used by Moshi USB to Ethernet Adapter */
 	{ USB_DEVICE(0x05e3, 0x0616), .driver_info = USB_QUIRK_NO_LPM },
 
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index 719fcbf..f049857 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -889,6 +889,19 @@ int dwc3_core_init(struct dwc3 *dwc)
 		dwc3_writel(dwc->regs, DWC3_GUCTL2, reg);
 	}
 
+	/*
+	 * Workaround for STAR 9001198391 which affects dwc3 core
+	 * version 3.20a only. Default HP timer value is incorrectly
+	 * set to 3us. Reprogram HP timer value to support USB 3.1
+	 * HP timer ECN.
+	 */
+	if (!dwc3_is_usb31(dwc) &&  dwc->revision == DWC3_REVISION_320A) {
+		reg = dwc3_readl(dwc->regs, DWC3_GUCTL2);
+		reg &= ~DWC3_GUCTL2_HP_TIMER_MASK;
+		reg |= DWC3_GUCTL2_HP_TIMER(11);
+		dwc3_writel(dwc->regs, DWC3_GUCTL2, reg);
+	}
+
 	return 0;
 
 err2:
diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
index a8400dd..e1dc7c8 100644
--- a/drivers/usb/dwc3/core.h
+++ b/drivers/usb/dwc3/core.h
@@ -323,6 +323,8 @@
 
 /* Global User Control Register 2 */
 #define DWC3_GUCTL2_RST_ACTBITLATER		(1 << 14)
+#define DWC3_GUCTL2_HP_TIMER(n)			((n) << 21)
+#define DWC3_GUCTL2_HP_TIMER_MASK		DWC3_GUCTL2_HP_TIMER(0x1f)
 
 /* Device Configuration Register */
 #define DWC3_DCFG_DEVADDR(addr)	((addr) << 3)
@@ -1036,6 +1038,7 @@ struct dwc3 {
 #define DWC3_REVISION_280A	0x5533280a
 #define DWC3_REVISION_300A	0x5533300a
 #define DWC3_REVISION_310A	0x5533310a
+#define DWC3_REVISION_320A	0x5533320a
 
 /*
  * NOTICE: we're using bit 31 as a "is usb 3.1" flag. This is really
diff --git a/drivers/usb/dwc3/dwc3-msm.c b/drivers/usb/dwc3/dwc3-msm.c
index de7fefc..a49b24e 100644
--- a/drivers/usb/dwc3/dwc3-msm.c
+++ b/drivers/usb/dwc3/dwc3-msm.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -121,7 +121,7 @@ MODULE_PARM_DESC(override_usb_speed, "override for USB speed");
 #define GSI_DBL_ADDR_L(n)	((QSCRATCH_REG_OFFSET + 0x110) + (n*4))
 #define GSI_DBL_ADDR_H(n)	((QSCRATCH_REG_OFFSET + 0x120) + (n*4))
 #define GSI_RING_BASE_ADDR_L(n)	((QSCRATCH_REG_OFFSET + 0x130) + (n*4))
-#define GSI_RING_BASE_ADDR_H(n)	((QSCRATCH_REG_OFFSET + 0x140) + (n*4))
+#define GSI_RING_BASE_ADDR_H(n)	((QSCRATCH_REG_OFFSET + 0x144) + (n*4))
 
 #define	GSI_IF_STS	(QSCRATCH_REG_OFFSET + 0x1A4)
 #define	GSI_WR_CTRL_STATE_MASK	BIT(15)
@@ -271,6 +271,7 @@ struct dwc3_msm {
 	struct pm_qos_request pm_qos_req_dma;
 	struct delayed_work perf_vote_work;
 	struct delayed_work sdp_check;
+	bool usb_compliance_mode;
 	struct mutex suspend_resume_mutex;
 };
 
@@ -933,9 +934,10 @@ static int gsi_startxfer_for_ep(struct usb_ep *ep)
 * for GSI channel creation.
 *
 * @usb_ep - pointer to usb_ep instance.
-* @dbl_addr - Doorbell address obtained from IPA driver
+* @request - USB GSI request to get Doorbell address obtained from IPA driver
 */
-static void gsi_store_ringbase_dbl_info(struct usb_ep *ep, u32 dbl_addr)
+static void gsi_store_ringbase_dbl_info(struct usb_ep *ep,
+			struct usb_gsi_request *request)
 {
 	struct dwc3_ep *dep = to_dwc3_ep(ep);
 	struct dwc3	*dwc = dep->dwc;
@@ -944,11 +946,27 @@ static void gsi_store_ringbase_dbl_info(struct usb_ep *ep, u32 dbl_addr)
 
 	dwc3_msm_write_reg(mdwc->base, GSI_RING_BASE_ADDR_L(n),
 			dwc3_trb_dma_offset(dep, &dep->trb_pool[0]));
-	dwc3_msm_write_reg(mdwc->base, GSI_DBL_ADDR_L(n), dbl_addr);
 
-	dev_dbg(mdwc->dev, "Ring Base Addr %d = %x", n,
+	if (request->mapped_db_reg_phs_addr_lsb)
+		dma_unmap_resource(dwc->sysdev,
+			request->mapped_db_reg_phs_addr_lsb,
+			PAGE_SIZE, DMA_BIDIRECTIONAL, 0);
+
+	request->mapped_db_reg_phs_addr_lsb = dma_map_resource(dwc->sysdev,
+			(phys_addr_t)request->db_reg_phs_addr_lsb, PAGE_SIZE,
+			DMA_BIDIRECTIONAL, 0);
+	if (dma_mapping_error(dwc->sysdev, request->mapped_db_reg_phs_addr_lsb))
+		dev_err(mdwc->dev, "mapping error for db_reg_phs_addr_lsb\n");
+
+	dev_dbg(mdwc->dev, "ep:%s dbl_addr_lsb:%x mapped_dbl_addr_lsb:%llx\n",
+		ep->name, request->db_reg_phs_addr_lsb,
+		(unsigned long long)request->mapped_db_reg_phs_addr_lsb);
+
+	dwc3_msm_write_reg(mdwc->base, GSI_DBL_ADDR_L(n),
+			(u32)request->mapped_db_reg_phs_addr_lsb);
+	dev_dbg(mdwc->dev, "Ring Base Addr %d: %x (LSB)\n", n,
 			dwc3_msm_read_reg(mdwc->base, GSI_RING_BASE_ADDR_L(n)));
-	dev_dbg(mdwc->dev, "GSI DB Addr %d = %x", n,
+	dev_dbg(mdwc->dev, "GSI DB Addr %d: %x (LSB)\n", n,
 			dwc3_msm_read_reg(mdwc->base, GSI_DBL_ADDR_L(n)));
 }
 
@@ -964,9 +982,6 @@ static void gsi_ring_db(struct usb_ep *ep, struct usb_gsi_request *request)
 	void __iomem *gsi_dbl_address_lsb;
 	void __iomem *gsi_dbl_address_msb;
 	dma_addr_t offset;
-	u64 dbl_addr = *((u64 *)request->buf_base_addr);
-	u32 dbl_lo_addr = (dbl_addr & 0xFFFFFFFF);
-	u32 dbl_hi_addr = (dbl_addr >> 32);
 	struct dwc3_ep *dep = to_dwc3_ep(ep);
 	struct dwc3	*dwc = dep->dwc;
 	struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
@@ -974,18 +989,19 @@ static void gsi_ring_db(struct usb_ep *ep, struct usb_gsi_request *request)
 					: (request->num_bufs + 2);
 
 	gsi_dbl_address_lsb = devm_ioremap_nocache(mdwc->dev,
-					dbl_lo_addr, sizeof(u32));
+				request->db_reg_phs_addr_lsb, sizeof(u32));
 	if (!gsi_dbl_address_lsb)
 		dev_dbg(mdwc->dev, "Failed to get GSI DBL address LSB\n");
 
 	gsi_dbl_address_msb = devm_ioremap_nocache(mdwc->dev,
-					dbl_hi_addr, sizeof(u32));
+			request->db_reg_phs_addr_msb, sizeof(u32));
 	if (!gsi_dbl_address_msb)
 		dev_dbg(mdwc->dev, "Failed to get GSI DBL address MSB\n");
 
 	offset = dwc3_trb_dma_offset(dep, &dep->trb_pool[num_trbs-1]);
 	dev_dbg(mdwc->dev, "Writing link TRB addr: %pa to %p (%x) for ep:%s\n",
-		&offset, gsi_dbl_address_lsb, dbl_lo_addr, ep->name);
+		&offset, gsi_dbl_address_lsb, request->db_reg_phs_addr_lsb,
+		ep->name);
 
 	writel_relaxed(offset, gsi_dbl_address_lsb);
 	writel_relaxed(0, gsi_dbl_address_msb);
@@ -1381,7 +1397,8 @@ static int dwc3_msm_gsi_ep_op(struct usb_ep *ep,
 		break;
 	case GSI_EP_OP_STORE_DBL_INFO:
 		dev_dbg(mdwc->dev, "EP_OP_STORE_DBL_INFO\n");
-		gsi_store_ringbase_dbl_info(ep, *((u32 *)op_data));
+		request = (struct usb_gsi_request *)op_data;
+		gsi_store_ringbase_dbl_info(ep, request);
 		break;
 	case GSI_EP_OP_ENABLE_GSI:
 		dev_dbg(mdwc->dev, "EP_OP_ENABLE_GSI\n");
@@ -2851,6 +2868,13 @@ static void check_for_sdp_connection(struct work_struct *w)
 	if (!mdwc->vbus_active)
 		return;
 
+	/* USB 3.1 compliance equipment usually repoted as floating
+	 * charger as HS dp/dm lines are never connected. Do not
+	 * tear down USB stack if compliance parameter is set
+	 */
+	if (mdwc->usb_compliance_mode)
+		return;
+
 	/* floating D+/D- lines detected */
 	if (dwc->gadget.state < USB_STATE_DEFAULT &&
 		dwc3_gadget_get_link_state(dwc) != DWC3_LINK_STATE_CMPLY) {
@@ -3132,6 +3156,31 @@ static ssize_t speed_store(struct device *dev, struct device_attribute *attr,
 }
 static DEVICE_ATTR_RW(speed);
 
+static ssize_t usb_compliance_mode_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct dwc3_msm *mdwc = dev_get_drvdata(dev);
+
+	return snprintf(buf, PAGE_SIZE, "%c\n",
+			mdwc->usb_compliance_mode ? 'Y' : 'N');
+}
+
+static ssize_t usb_compliance_mode_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int ret = 0;
+	struct dwc3_msm *mdwc = dev_get_drvdata(dev);
+
+	ret = strtobool(buf, &mdwc->usb_compliance_mode);
+
+	if (ret)
+		return ret;
+
+	return count;
+}
+static DEVICE_ATTR_RW(usb_compliance_mode);
+
+
 static int dwc3_msm_probe(struct platform_device *pdev)
 {
 	struct device_node *node = pdev->dev.of_node, *dwc3_node;
@@ -3482,6 +3531,7 @@ static int dwc3_msm_probe(struct platform_device *pdev)
 
 	device_create_file(&pdev->dev, &dev_attr_mode);
 	device_create_file(&pdev->dev, &dev_attr_speed);
+	device_create_file(&pdev->dev, &dev_attr_usb_compliance_mode);
 
 	host_mode = usb_get_dr_mode(&mdwc->dwc3->dev) == USB_DR_MODE_HOST;
 	if (!dwc->is_drd && host_mode) {
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 0ffe351..8f0ca3f 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -413,7 +413,7 @@ int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned cmd,
 		dwc3_trace(trace_dwc3_gadget, "Command Timed Out");
 		dev_err(dwc->dev, "%s command timeout for %s\n",
 			dwc3_gadget_ep_cmd_string(cmd), dep->name);
-		if (!(cmd & DWC3_DEPCMD_ENDTRANSFER)) {
+		if (cmd != DWC3_DEPCMD_ENDTRANSFER) {
 			dwc->ep_cmd_timeout_cnt++;
 			dwc3_notify_event(dwc,
 				DWC3_CONTROLLER_RESTART_USB_SESSION);
@@ -3968,15 +3968,10 @@ void dwc3_gadget_exit(struct dwc3 *dwc)
 
 int dwc3_gadget_suspend(struct dwc3 *dwc)
 {
-	int ret;
-
 	if (!dwc->gadget_driver)
 		return 0;
 
-	ret = dwc3_gadget_run_stop(dwc, false, false);
-	if (ret < 0)
-		return ret;
-
+	dwc3_gadget_run_stop(dwc, false, false);
 	dwc3_disconnect_gadget(dwc);
 	__dwc3_gadget_stop(dwc);
 
diff --git a/drivers/usb/gadget/Makefile b/drivers/usb/gadget/Makefile
index ceacf3d..598a67d 100644
--- a/drivers/usb/gadget/Makefile
+++ b/drivers/usb/gadget/Makefile
@@ -10,5 +10,3 @@
 libcomposite-y			+= composite.o functions.o configfs.o u_f.o
 
 obj-$(CONFIG_USB_GADGET)	+= udc/ function/ legacy/
-
-obj-$(CONFIG_USB_CI13XXX_MSM)   += ci13xxx_msm.o
diff --git a/drivers/usb/gadget/ci13xxx_msm.c b/drivers/usb/gadget/ci13xxx_msm.c
deleted file mode 100644
index 78b7d3a..0000000
--- a/drivers/usb/gadget/ci13xxx_msm.c
+++ /dev/null
@@ -1,556 +0,0 @@
-/* Copyright (c) 2010-2017, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- */
-
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/pm_runtime.h>
-#include <linux/usb/msm_hsusb_hw.h>
-#include <linux/usb/ulpi.h>
-#include <linux/gpio.h>
-#include <linux/pinctrl/consumer.h>
-
-#include "ci13xxx_udc.c"
-
-#define MSM_USB_BASE	(udc->regs)
-
-#define CI13XXX_MSM_MAX_LOG2_ITC	7
-
-struct ci13xxx_udc_context {
-	int irq;
-	void __iomem *regs;
-	int wake_gpio;
-	int wake_irq;
-	bool wake_irq_state;
-	struct pinctrl *ci13xxx_pinctrl;
-	struct timer_list irq_enable_timer;
-	bool irq_disabled;
-};
-
-static struct ci13xxx_udc_context _udc_ctxt;
-#define IRQ_ENABLE_DELAY	(jiffies + msecs_to_jiffies(1000))
-
-static irqreturn_t msm_udc_irq(int irq, void *data)
-{
-	return udc_irq();
-}
-
-static void ci13xxx_msm_suspend(void)
-{
-	struct device *dev = _udc->gadget.dev.parent;
-
-	dev_dbg(dev, "ci13xxx_msm_suspend\n");
-
-	if (_udc_ctxt.wake_irq && !_udc_ctxt.wake_irq_state) {
-		enable_irq_wake(_udc_ctxt.wake_irq);
-		enable_irq(_udc_ctxt.wake_irq);
-		_udc_ctxt.wake_irq_state = true;
-	}
-}
-
-static void ci13xxx_msm_resume(void)
-{
-	struct device *dev = _udc->gadget.dev.parent;
-
-	dev_dbg(dev, "ci13xxx_msm_resume\n");
-
-	if (_udc_ctxt.wake_irq && _udc_ctxt.wake_irq_state) {
-		disable_irq_wake(_udc_ctxt.wake_irq);
-		disable_irq_nosync(_udc_ctxt.wake_irq);
-		_udc_ctxt.wake_irq_state = false;
-	}
-}
-
-static void ci13xxx_msm_disconnect(void)
-{
-	struct ci13xxx *udc = _udc;
-	struct usb_phy *phy = udc->transceiver;
-
-	if (phy && (phy->flags & ENABLE_DP_MANUAL_PULLUP)) {
-		u32 temp;
-
-		usb_phy_io_write(phy,
-				ULPI_MISC_A_VBUSVLDEXT |
-				ULPI_MISC_A_VBUSVLDEXTSEL,
-				ULPI_CLR(ULPI_MISC_A));
-
-		/* Notify LINK of VBUS LOW */
-		temp = readl_relaxed(USB_USBCMD);
-		temp &= ~USBCMD_SESS_VLD_CTRL;
-		writel_relaxed(temp, USB_USBCMD);
-
-		/*
-		 * Add memory barrier as it is must to complete
-		 * above USB PHY and Link register writes before
-		 * moving ahead with USB peripheral mode enumeration,
-		 * otherwise USB peripheral mode may not work.
-		 */
-		mb();
-	}
-}
-
-/* Link power management will reduce power consumption by
- * short time HW suspend/resume.
- */
-static void ci13xxx_msm_set_l1(struct ci13xxx *udc)
-{
-	int temp;
-	struct device *dev = udc->gadget.dev.parent;
-
-	dev_dbg(dev, "Enable link power management\n");
-
-	/* Enable remote wakeup and L1 for IN EPs */
-	writel_relaxed(0xffff0000, USB_L1_EP_CTRL);
-
-	temp = readl_relaxed(USB_L1_CONFIG);
-	temp |= L1_CONFIG_LPM_EN | L1_CONFIG_REMOTE_WAKEUP |
-		L1_CONFIG_GATE_SYS_CLK | L1_CONFIG_PHY_LPM |
-		L1_CONFIG_PLL;
-	writel_relaxed(temp, USB_L1_CONFIG);
-}
-
-static void ci13xxx_msm_connect(void)
-{
-	struct ci13xxx *udc = _udc;
-	struct usb_phy *phy = udc->transceiver;
-
-	if (phy && (phy->flags & ENABLE_DP_MANUAL_PULLUP)) {
-		int	temp;
-
-		usb_phy_io_write(phy,
-			ULPI_MISC_A_VBUSVLDEXT |
-			ULPI_MISC_A_VBUSVLDEXTSEL,
-			ULPI_SET(ULPI_MISC_A));
-
-		temp = readl_relaxed(USB_GENCONFIG_2);
-		temp |= GENCONFIG_2_SESS_VLD_CTRL_EN;
-		writel_relaxed(temp, USB_GENCONFIG_2);
-
-		temp = readl_relaxed(USB_USBCMD);
-		temp |= USBCMD_SESS_VLD_CTRL;
-		writel_relaxed(temp, USB_USBCMD);
-
-		/*
-		 * Add memory barrier as it is must to complete
-		 * above USB PHY and Link register writes before
-		 * moving ahead with USB peripheral mode enumeration,
-		 * otherwise USB peripheral mode may not work.
-		 */
-		mb();
-	}
-}
-
-static void ci13xxx_msm_reset(void)
-{
-	struct ci13xxx *udc = _udc;
-	struct usb_phy *phy = udc->transceiver;
-	struct device *dev = udc->gadget.dev.parent;
-	int	temp;
-
-	writel_relaxed(0, USB_AHBBURST);
-	writel_relaxed(0x08, USB_AHBMODE);
-
-	/* workaround for rx buffer collision issue */
-	temp = readl_relaxed(USB_GENCONFIG);
-	temp &= ~GENCONFIG_TXFIFO_IDLE_FORCE_DISABLE;
-	temp &= ~GENCONFIG_ULPI_SERIAL_EN;
-	writel_relaxed(temp, USB_GENCONFIG);
-
-	if (udc->gadget.l1_supported)
-		ci13xxx_msm_set_l1(udc);
-
-	if (phy && (phy->flags & ENABLE_SECONDARY_PHY)) {
-		int	temp;
-
-		dev_dbg(dev, "using secondary hsphy\n");
-		temp = readl_relaxed(USB_PHY_CTRL2);
-		temp |= (1<<16);
-		writel_relaxed(temp, USB_PHY_CTRL2);
-
-		/*
-		 * Add memory barrier to make sure above LINK writes are
-		 * complete before moving ahead with USB peripheral mode
-		 * enumeration.
-		 */
-		mb();
-	}
-}
-
-static void ci13xxx_msm_mark_err_event(void)
-{
-	struct ci13xxx *udc = _udc;
-	struct msm_otg *otg;
-
-	if (udc == NULL)
-		return;
-
-	if (udc->transceiver == NULL)
-		return;
-
-	otg = container_of(udc->transceiver, struct msm_otg, phy);
-
-	/* This will trigger hardware reset before next connection */
-	otg->err_event_seen = true;
-}
-
-static void ci13xxx_msm_notify_event(struct ci13xxx *udc, unsigned int event)
-{
-	struct device *dev = udc->gadget.dev.parent;
-
-	switch (event) {
-	case CI13XXX_CONTROLLER_RESET_EVENT:
-		dev_info(dev, "CI13XXX_CONTROLLER_RESET_EVENT received\n");
-		ci13xxx_msm_reset();
-		break;
-	case CI13XXX_CONTROLLER_DISCONNECT_EVENT:
-		dev_info(dev, "CI13XXX_CONTROLLER_DISCONNECT_EVENT received\n");
-		ci13xxx_msm_disconnect();
-		ci13xxx_msm_resume();
-		break;
-	case CI13XXX_CONTROLLER_CONNECT_EVENT:
-		dev_info(dev, "CI13XXX_CONTROLLER_CONNECT_EVENT received\n");
-		ci13xxx_msm_connect();
-		break;
-	case CI13XXX_CONTROLLER_SUSPEND_EVENT:
-		dev_info(dev, "CI13XXX_CONTROLLER_SUSPEND_EVENT received\n");
-		ci13xxx_msm_suspend();
-		break;
-	case CI13XXX_CONTROLLER_RESUME_EVENT:
-		dev_info(dev, "CI13XXX_CONTROLLER_RESUME_EVENT received\n");
-		ci13xxx_msm_resume();
-		break;
-	case CI13XXX_CONTROLLER_ERROR_EVENT:
-		dev_info(dev, "CI13XXX_CONTROLLER_ERROR_EVENT received\n");
-		ci13xxx_msm_mark_err_event();
-		break;
-	case CI13XXX_CONTROLLER_UDC_STARTED_EVENT:
-		dev_info(dev,
-			 "CI13XXX_CONTROLLER_UDC_STARTED_EVENT received\n");
-		break;
-	default:
-		dev_dbg(dev, "unknown ci13xxx_udc event\n");
-		break;
-	}
-}
-
-static bool ci13xxx_msm_in_lpm(struct ci13xxx *udc)
-{
-	struct msm_otg *otg;
-
-	if (udc == NULL)
-		return false;
-
-	if (udc->transceiver == NULL)
-		return false;
-
-	otg = container_of(udc->transceiver, struct msm_otg, phy);
-
-	return (atomic_read(&otg->in_lpm) != 0);
-}
-
-
-static irqreturn_t ci13xxx_msm_resume_irq(int irq, void *data)
-{
-	struct ci13xxx *udc = _udc;
-
-	if (udc->transceiver && udc->vbus_active && udc->suspended)
-		usb_phy_set_suspend(udc->transceiver, 0);
-	else if (!udc->suspended)
-		ci13xxx_msm_resume();
-
-	return IRQ_HANDLED;
-}
-
-static struct ci13xxx_udc_driver ci13xxx_msm_udc_driver = {
-	.name			= "ci13xxx_msm",
-	.flags			= CI13XXX_REGS_SHARED |
-				  CI13XXX_REQUIRE_TRANSCEIVER |
-				  CI13XXX_PULLUP_ON_VBUS |
-				  CI13XXX_ZERO_ITC |
-				  CI13XXX_DISABLE_STREAMING,
-	.nz_itc			= 0,
-	.notify_event		= ci13xxx_msm_notify_event,
-	.in_lpm                 = ci13xxx_msm_in_lpm,
-};
-
-static int ci13xxx_msm_install_wake_gpio(struct platform_device *pdev,
-				struct resource *res)
-{
-	int wake_irq;
-	int ret;
-	struct pinctrl_state *set_state;
-
-	dev_dbg(&pdev->dev, "ci13xxx_msm_install_wake_gpio\n");
-
-	_udc_ctxt.wake_gpio = res->start;
-	if (_udc_ctxt.ci13xxx_pinctrl) {
-		set_state = pinctrl_lookup_state(_udc_ctxt.ci13xxx_pinctrl,
-				"ci13xxx_active");
-		if (IS_ERR(set_state)) {
-			pr_err("cannot get ci13xxx pinctrl active state\n");
-			return PTR_ERR(set_state);
-		}
-		pinctrl_select_state(_udc_ctxt.ci13xxx_pinctrl, set_state);
-	}
-	gpio_request(_udc_ctxt.wake_gpio, "USB_RESUME");
-	gpio_direction_input(_udc_ctxt.wake_gpio);
-	wake_irq = gpio_to_irq(_udc_ctxt.wake_gpio);
-	if (wake_irq < 0) {
-		dev_err(&pdev->dev, "could not register USB_RESUME GPIO.\n");
-		return -ENXIO;
-	}
-
-	dev_dbg(&pdev->dev, "_udc_ctxt.gpio_irq = %d and irq = %d\n",
-			_udc_ctxt.wake_gpio, wake_irq);
-	ret = request_irq(wake_irq, ci13xxx_msm_resume_irq,
-		IRQF_TRIGGER_RISING | IRQF_ONESHOT, "usb resume", NULL);
-	if (ret < 0) {
-		dev_err(&pdev->dev, "could not register USB_RESUME IRQ.\n");
-		goto gpio_free;
-	}
-	disable_irq(wake_irq);
-	_udc_ctxt.wake_irq = wake_irq;
-
-	return 0;
-
-gpio_free:
-	gpio_free(_udc_ctxt.wake_gpio);
-	if (_udc_ctxt.ci13xxx_pinctrl) {
-		set_state = pinctrl_lookup_state(_udc_ctxt.ci13xxx_pinctrl,
-				"ci13xxx_sleep");
-		if (IS_ERR(set_state))
-			pr_err("cannot get ci13xxx pinctrl sleep state\n");
-		else
-			pinctrl_select_state(_udc_ctxt.ci13xxx_pinctrl,
-					set_state);
-	}
-	_udc_ctxt.wake_gpio = 0;
-	return ret;
-}
-
-static void ci13xxx_msm_uninstall_wake_gpio(struct platform_device *pdev)
-{
-	struct pinctrl_state *set_state;
-
-	dev_dbg(&pdev->dev, "ci13xxx_msm_uninstall_wake_gpio\n");
-
-	if (_udc_ctxt.wake_gpio) {
-		gpio_free(_udc_ctxt.wake_gpio);
-		if (_udc_ctxt.ci13xxx_pinctrl) {
-			set_state =
-				pinctrl_lookup_state(_udc_ctxt.ci13xxx_pinctrl,
-						"ci13xxx_sleep");
-			if (IS_ERR(set_state))
-				pr_err("cannot get ci13xxx pinctrl sleep state\n");
-			else
-				pinctrl_select_state(_udc_ctxt.ci13xxx_pinctrl,
-						set_state);
-		}
-		_udc_ctxt.wake_gpio = 0;
-	}
-}
-
-static void enable_usb_irq_timer_func(unsigned long data);
-static int ci13xxx_msm_probe(struct platform_device *pdev)
-{
-	struct resource *res;
-	int ret;
-	struct ci13xxx_platform_data *pdata = pdev->dev.platform_data;
-	bool is_l1_supported = false;
-
-	dev_dbg(&pdev->dev, "ci13xxx_msm_probe\n");
-
-	if (pdata) {
-		/* Acceptable values for nz_itc are: 0,1,2,4,8,16,32,64 */
-		if (pdata->log2_itc > CI13XXX_MSM_MAX_LOG2_ITC ||
-			pdata->log2_itc <= 0)
-			ci13xxx_msm_udc_driver.nz_itc = 0;
-		else
-			ci13xxx_msm_udc_driver.nz_itc =
-				1 << (pdata->log2_itc-1);
-
-		is_l1_supported = pdata->l1_supported;
-		/* Set ahb2ahb bypass flag if it is requested. */
-		if (pdata->enable_ahb2ahb_bypass)
-			ci13xxx_msm_udc_driver.flags |=
-				CI13XXX_ENABLE_AHB2AHB_BYPASS;
-
-		/* Clear disable streaming flag if is requested. */
-		if (pdata->enable_streaming)
-			ci13xxx_msm_udc_driver.flags &=
-						~CI13XXX_DISABLE_STREAMING;
-	}
-
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	if (!res) {
-		dev_err(&pdev->dev, "failed to get platform resource mem\n");
-		return -ENXIO;
-	}
-
-	_udc_ctxt.regs = ioremap(res->start, resource_size(res));
-	if (!_udc_ctxt.regs) {
-		dev_err(&pdev->dev, "ioremap failed\n");
-		return -ENOMEM;
-	}
-
-	ret = udc_probe(&ci13xxx_msm_udc_driver, &pdev->dev, _udc_ctxt.regs);
-	if (ret < 0) {
-		dev_err(&pdev->dev, "udc_probe failed\n");
-		goto iounmap;
-	}
-
-	_udc->gadget.l1_supported = is_l1_supported;
-
-	_udc_ctxt.irq = platform_get_irq(pdev, 0);
-	if (_udc_ctxt.irq < 0) {
-		dev_err(&pdev->dev, "IRQ not found\n");
-		ret = -ENXIO;
-		goto udc_remove;
-	}
-
-	res = platform_get_resource_byname(pdev, IORESOURCE_IO, "USB_RESUME");
-	/* Get pinctrl if target uses pinctrl */
-	_udc_ctxt.ci13xxx_pinctrl = devm_pinctrl_get(&pdev->dev);
-	if (IS_ERR(_udc_ctxt.ci13xxx_pinctrl)) {
-		if (of_property_read_bool(pdev->dev.of_node, "pinctrl-names")) {
-			dev_err(&pdev->dev, "Error encountered while getting pinctrl");
-			ret = PTR_ERR(_udc_ctxt.ci13xxx_pinctrl);
-			goto udc_remove;
-		}
-		dev_dbg(&pdev->dev, "Target does not use pinctrl\n");
-		_udc_ctxt.ci13xxx_pinctrl = NULL;
-	}
-	if (res) {
-		ret = ci13xxx_msm_install_wake_gpio(pdev, res);
-		if (ret < 0) {
-			dev_err(&pdev->dev, "gpio irq install failed\n");
-			goto udc_remove;
-		}
-	}
-
-	ret = request_irq(_udc_ctxt.irq, msm_udc_irq, IRQF_SHARED, pdev->name,
-					  pdev);
-	if (ret < 0) {
-		dev_err(&pdev->dev, "request_irq failed\n");
-		goto gpio_uninstall;
-	}
-
-	setup_timer(&_udc_ctxt.irq_enable_timer, enable_usb_irq_timer_func,
-							(unsigned long)NULL);
-
-	pm_runtime_no_callbacks(&pdev->dev);
-	pm_runtime_set_active(&pdev->dev);
-	pm_runtime_enable(&pdev->dev);
-
-	return 0;
-
-gpio_uninstall:
-	ci13xxx_msm_uninstall_wake_gpio(pdev);
-udc_remove:
-	udc_remove();
-iounmap:
-	iounmap(_udc_ctxt.regs);
-
-	return ret;
-}
-
-int ci13xxx_msm_remove(struct platform_device *pdev)
-{
-	pm_runtime_disable(&pdev->dev);
-	free_irq(_udc_ctxt.irq, pdev);
-	ci13xxx_msm_uninstall_wake_gpio(pdev);
-	udc_remove();
-	iounmap(_udc_ctxt.regs);
-	return 0;
-}
-
-void ci13xxx_msm_shutdown(struct platform_device *pdev)
-{
-	ci13xxx_pullup(&_udc->gadget, 0);
-}
-
-void msm_hw_soft_reset(void)
-{
-	struct ci13xxx *udc = _udc;
-
-	hw_device_reset(udc);
-}
-
-void msm_hw_bam_disable(bool bam_disable)
-{
-	u32 val;
-	struct ci13xxx *udc = _udc;
-
-	if (bam_disable)
-		val = readl_relaxed(USB_GENCONFIG) | GENCONFIG_BAM_DISABLE;
-	else
-		val = readl_relaxed(USB_GENCONFIG) & ~GENCONFIG_BAM_DISABLE;
-
-	writel_relaxed(val, USB_GENCONFIG);
-}
-
-void msm_usb_irq_disable(bool disable)
-{
-	struct ci13xxx *udc = _udc;
-	unsigned long flags;
-
-	spin_lock_irqsave(udc->lock, flags);
-
-	if (_udc_ctxt.irq_disabled == disable) {
-		pr_debug("Interrupt state already disable = %d\n", disable);
-		if (disable)
-			mod_timer(&_udc_ctxt.irq_enable_timer,
-					IRQ_ENABLE_DELAY);
-		spin_unlock_irqrestore(udc->lock, flags);
-		return;
-	}
-
-	if (disable) {
-		disable_irq_nosync(_udc_ctxt.irq);
-		/* start timer here */
-		pr_debug("%s: Disabling interrupts\n", __func__);
-		mod_timer(&_udc_ctxt.irq_enable_timer, IRQ_ENABLE_DELAY);
-		_udc_ctxt.irq_disabled = true;
-
-	} else {
-		pr_debug("%s: Enabling interrupts\n", __func__);
-		del_timer(&_udc_ctxt.irq_enable_timer);
-		enable_irq(_udc_ctxt.irq);
-		_udc_ctxt.irq_disabled = false;
-	}
-
-	spin_unlock_irqrestore(udc->lock, flags);
-}
-
-static void enable_usb_irq_timer_func(unsigned long data)
-{
-	pr_debug("enabling interrupt from timer\n");
-	msm_usb_irq_disable(false);
-}
-
-static struct platform_driver ci13xxx_msm_driver = {
-	.probe = ci13xxx_msm_probe,
-	.driver = {
-		.name = "msm_hsusb",
-	},
-	.remove = ci13xxx_msm_remove,
-	.shutdown = ci13xxx_msm_shutdown,
-};
-MODULE_ALIAS("platform:msm_hsusb");
-
-static int __init ci13xxx_msm_init(void)
-{
-	return platform_driver_register(&ci13xxx_msm_driver);
-}
-module_init(ci13xxx_msm_init);
-
-static void __exit ci13xxx_msm_exit(void)
-{
-	platform_driver_unregister(&ci13xxx_msm_driver);
-}
-module_exit(ci13xxx_msm_exit);
-
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/gadget/ci13xxx_udc.c b/drivers/usb/gadget/ci13xxx_udc.c
deleted file mode 100644
index 28aaa1f..0000000
--- a/drivers/usb/gadget/ci13xxx_udc.c
+++ /dev/null
@@ -1,3983 +0,0 @@
-/*
- * ci13xxx_udc.c - MIPS USB IP core family device controller
- *
- * Copyright (C) 2008 Chipidea - MIPS Technologies, Inc. All rights reserved.
- *
- * Author: David Lopo
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-/*
- * Description: MIPS USB IP core family device controller
- *              Currently it only supports IP part number CI13412
- *
- * This driver is composed of several blocks:
- * - HW:     hardware interface
- * - DBG:    debug facilities (optional)
- * - UTIL:   utilities
- * - ISR:    interrupts handling
- * - ENDPT:  endpoint operations (Gadget API)
- * - GADGET: gadget operations (Gadget API)
- * - BUS:    bus glue code, bus abstraction layer
- *
- * Compile Options
- * - CONFIG_USB_GADGET_DEBUG_FILES: enable debug facilities
- * - STALL_IN:  non-empty bulk-in pipes cannot be halted
- *              if defined mass storage compliance succeeds but with warnings
- *              => case 4: Hi >  Dn
- *              => case 5: Hi >  Di
- *              => case 8: Hi <> Do
- *              if undefined usbtest 13 fails
- * - TRACE:     enable function tracing (depends on DEBUG)
- *
- * Main Features
- * - Chapter 9 & Mass Storage Compliance with Gadget File Storage
- * - Chapter 9 Compliance with Gadget Zero (STALL_IN undefined)
- * - Normal & LPM support
- *
- * USBTEST Report
- * - OK: 0-12, 13 (STALL_IN defined) & 14
- * - Not Supported: 15 & 16 (ISO)
- *
- * TODO List
- * - OTG
- * - Isochronous & Interrupt Traffic
- * - Handle requests which spawns into several TDs
- * - GET_STATUS(device) - always reports 0
- * - Gadget API (majority of optional features)
- */
-#include <linux/delay.h>
-#include <linux/device.h>
-#include <linux/dmapool.h>
-#include <linux/dma-mapping.h>
-#include <linux/init.h>
-#include <linux/ratelimit.h>
-#include <linux/interrupt.h>
-#include <linux/io.h>
-#include <linux/irq.h>
-#include <linux/clk.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/pm_runtime.h>
-#include <linux/usb/ch9.h>
-#include <linux/usb/gadget.h>
-#include <linux/usb/otg.h>
-#include <linux/usb/msm_hsusb.h>
-
-#include "ci13xxx_udc.h"
-
-/******************************************************************************
- * DEFINE
- *****************************************************************************/
-
-#define USB_MAX_TIMEOUT		25 /* 25msec timeout */
-#define EP_PRIME_CHECK_DELAY	(jiffies + msecs_to_jiffies(1000))
-#define MAX_PRIME_CHECK_RETRY	3 /*Wait for 3sec for EP prime failure */
-#define EXTRA_ALLOCATION_SIZE	256
-
-/* ctrl register bank access */
-static DEFINE_SPINLOCK(udc_lock);
-
-/* control endpoint description */
-static const struct usb_endpoint_descriptor
-ctrl_endpt_out_desc = {
-	.bLength         = USB_DT_ENDPOINT_SIZE,
-	.bDescriptorType = USB_DT_ENDPOINT,
-
-	.bEndpointAddress = USB_DIR_OUT,
-	.bmAttributes    = USB_ENDPOINT_XFER_CONTROL,
-	.wMaxPacketSize  = cpu_to_le16(CTRL_PAYLOAD_MAX),
-};
-
-static const struct usb_endpoint_descriptor
-ctrl_endpt_in_desc = {
-	.bLength         = USB_DT_ENDPOINT_SIZE,
-	.bDescriptorType = USB_DT_ENDPOINT,
-
-	.bEndpointAddress = USB_DIR_IN,
-	.bmAttributes    = USB_ENDPOINT_XFER_CONTROL,
-	.wMaxPacketSize  = cpu_to_le16(CTRL_PAYLOAD_MAX),
-};
-
-/* UDC descriptor */
-static struct ci13xxx *_udc;
-
-/* Interrupt statistics */
-#define ISR_MASK   0x1F
-static struct {
-	u32 test;
-	u32 ui;
-	u32 uei;
-	u32 pci;
-	u32 uri;
-	u32 sli;
-	u32 none;
-	struct {
-		u32 cnt;
-		u32 buf[ISR_MASK+1];
-		u32 idx;
-	} hndl;
-} isr_statistics;
-
-/**
- * ffs_nr: find first (least significant) bit set
- * @x: the word to search
- *
- * This function returns bit number (instead of position)
- */
-static int ffs_nr(u32 x)
-{
-	int n = ffs(x);
-
-	return n ? n-1 : 32;
-}
-
-/******************************************************************************
- * HW block
- *****************************************************************************/
-/* register bank descriptor */
-static struct {
-	unsigned int  lpm;    /* is LPM? */
-	void __iomem *abs;    /* bus map offset */
-	void __iomem *cap;    /* bus map offset + CAP offset + CAP data */
-	size_t        size;   /* bank size */
-} hw_bank;
-
-/* MSM specific */
-#define ABS_AHBBURST        (0x0090UL)
-#define ABS_AHBMODE         (0x0098UL)
-/* UDC register map */
-#define ABS_CAPLENGTH       (0x100UL)
-#define ABS_HCCPARAMS       (0x108UL)
-#define ABS_DCCPARAMS       (0x124UL)
-#define ABS_TESTMODE        (hw_bank.lpm ? 0x0FCUL : 0x138UL)
-/* offset to CAPLENTGH (addr + data) */
-#define CAP_USBCMD          (0x000UL)
-#define CAP_USBSTS          (0x004UL)
-#define CAP_USBINTR         (0x008UL)
-#define CAP_DEVICEADDR      (0x014UL)
-#define CAP_ENDPTLISTADDR   (0x018UL)
-#define CAP_PORTSC          (0x044UL)
-#define CAP_DEVLC           (0x084UL)
-#define CAP_ENDPTPIPEID     (0x0BCUL)
-#define CAP_USBMODE         (hw_bank.lpm ? 0x0C8UL : 0x068UL)
-#define CAP_ENDPTSETUPSTAT  (hw_bank.lpm ? 0x0D8UL : 0x06CUL)
-#define CAP_ENDPTPRIME      (hw_bank.lpm ? 0x0DCUL : 0x070UL)
-#define CAP_ENDPTFLUSH      (hw_bank.lpm ? 0x0E0UL : 0x074UL)
-#define CAP_ENDPTSTAT       (hw_bank.lpm ? 0x0E4UL : 0x078UL)
-#define CAP_ENDPTCOMPLETE   (hw_bank.lpm ? 0x0E8UL : 0x07CUL)
-#define CAP_ENDPTCTRL       (hw_bank.lpm ? 0x0ECUL : 0x080UL)
-#define CAP_LAST            (hw_bank.lpm ? 0x12CUL : 0x0C0UL)
-
-#define REMOTE_WAKEUP_DELAY	msecs_to_jiffies(200)
-
-/* maximum number of enpoints: valid only after hw_device_reset() */
-static unsigned int hw_ep_max;
-static void dbg_usb_op_fail(u8 addr, const char *name,
-				const struct ci13xxx_ep *mep);
-/**
- * hw_ep_bit: calculates the bit number
- * @num: endpoint number
- * @dir: endpoint direction
- *
- * This function returns bit number
- */
-static inline int hw_ep_bit(int num, int dir)
-{
-	return num + (dir ? 16 : 0);
-}
-
-static int ep_to_bit(int n)
-{
-	int fill = 16 - hw_ep_max / 2;
-
-	if (n >= hw_ep_max / 2)
-		n += fill;
-
-	return n;
-}
-
-/**
- * hw_aread: reads from register bitfield
- * @addr: address relative to bus map
- * @mask: bitfield mask
- *
- * This function returns register bitfield data
- */
-static u32 hw_aread(u32 addr, u32 mask)
-{
-	return ioread32(addr + hw_bank.abs) & mask;
-}
-
-/**
- * hw_awrite: writes to register bitfield
- * @addr: address relative to bus map
- * @mask: bitfield mask
- * @data: new data
- */
-static void hw_awrite(u32 addr, u32 mask, u32 data)
-{
-	iowrite32(hw_aread(addr, ~mask) | (data & mask),
-		  addr + hw_bank.abs);
-}
-
-/**
- * hw_cread: reads from register bitfield
- * @addr: address relative to CAP offset plus content
- * @mask: bitfield mask
- *
- * This function returns register bitfield data
- */
-static u32 hw_cread(u32 addr, u32 mask)
-{
-	return ioread32(addr + hw_bank.cap) & mask;
-}
-
-/**
- * hw_cwrite: writes to register bitfield
- * @addr: address relative to CAP offset plus content
- * @mask: bitfield mask
- * @data: new data
- */
-static void hw_cwrite(u32 addr, u32 mask, u32 data)
-{
-	iowrite32(hw_cread(addr, ~mask) | (data & mask),
-		  addr + hw_bank.cap);
-}
-
-/**
- * hw_ctest_and_clear: tests & clears register bitfield
- * @addr: address relative to CAP offset plus content
- * @mask: bitfield mask
- *
- * This function returns register bitfield data
- */
-static u32 hw_ctest_and_clear(u32 addr, u32 mask)
-{
-	u32 reg = hw_cread(addr, mask);
-
-	iowrite32(reg, addr + hw_bank.cap);
-	return reg;
-}
-
-/**
- * hw_ctest_and_write: tests & writes register bitfield
- * @addr: address relative to CAP offset plus content
- * @mask: bitfield mask
- * @data: new data
- *
- * This function returns register bitfield data
- */
-static u32 hw_ctest_and_write(u32 addr, u32 mask, u32 data)
-{
-	u32 reg = hw_cread(addr, ~0);
-
-	iowrite32((reg & ~mask) | (data & mask), addr + hw_bank.cap);
-	return (reg & mask) >> ffs_nr(mask);
-}
-
-static int hw_device_init(void __iomem *base)
-{
-	u32 reg;
-
-	/* bank is a module variable */
-	hw_bank.abs = base;
-
-	hw_bank.cap = hw_bank.abs;
-	hw_bank.cap += ABS_CAPLENGTH;
-	hw_bank.cap += ioread8(hw_bank.cap);
-
-	reg = hw_aread(ABS_HCCPARAMS, HCCPARAMS_LEN) >> ffs_nr(HCCPARAMS_LEN);
-	hw_bank.lpm  = reg;
-	hw_bank.size = hw_bank.cap - hw_bank.abs;
-	hw_bank.size += CAP_LAST;
-	hw_bank.size /= sizeof(u32);
-
-	reg = hw_aread(ABS_DCCPARAMS, DCCPARAMS_DEN) >> ffs_nr(DCCPARAMS_DEN);
-	hw_ep_max = reg * 2;   /* cache hw ENDPT_MAX */
-
-	if (hw_ep_max == 0 || hw_ep_max > ENDPT_MAX)
-		return -ENODEV;
-
-	/* setup lock mode ? */
-
-	/* ENDPTSETUPSTAT is '0' by default */
-
-	/* HCSPARAMS.bf.ppc SHOULD BE zero for device */
-
-	return 0;
-}
-/**
- * hw_device_reset: resets chip (execute without interruption)
- * @base: register base address
- *
- * This function returns an error code
- */
-static int hw_device_reset(struct ci13xxx *udc)
-{
-	int delay_count = 25; /* 250 usec */
-
-	/* should flush & stop before reset */
-	hw_cwrite(CAP_ENDPTFLUSH, ~0, ~0);
-	hw_cwrite(CAP_USBCMD, USBCMD_RS, 0);
-
-	hw_cwrite(CAP_USBCMD, USBCMD_RST, USBCMD_RST);
-	while (delay_count--  && hw_cread(CAP_USBCMD, USBCMD_RST))
-		udelay(10);
-	if (delay_count < 0)
-		pr_err("USB controller reset failed\n");
-
-	if (udc->udc_driver->notify_event)
-		udc->udc_driver->notify_event(udc,
-			CI13XXX_CONTROLLER_RESET_EVENT);
-
-	/* USBMODE should be configured step by step */
-	hw_cwrite(CAP_USBMODE, USBMODE_CM, USBMODE_CM_IDLE);
-	hw_cwrite(CAP_USBMODE, USBMODE_CM, USBMODE_CM_DEVICE);
-	hw_cwrite(CAP_USBMODE, USBMODE_SLOM, USBMODE_SLOM);  /* HW >= 2.3 */
-
-	/*
-	 * ITC (Interrupt Threshold Control) field is to set the maximum
-	 * rate at which the device controller will issue interrupts.
-	 * The maximum interrupt interval measured in micro frames.
-	 * Valid values are 0, 1, 2, 4, 8, 16, 32, 64. The default value is
-	 * 8 micro frames. If CPU can handle interrupts at faster rate, ITC
-	 * can be set to lesser value to gain performance.
-	 */
-	if (udc->udc_driver->nz_itc)
-		hw_cwrite(CAP_USBCMD, USBCMD_ITC_MASK,
-			USBCMD_ITC(udc->udc_driver->nz_itc));
-	else if (udc->udc_driver->flags & CI13XXX_ZERO_ITC)
-		hw_cwrite(CAP_USBCMD, USBCMD_ITC_MASK, USBCMD_ITC(0));
-
-	if (hw_cread(CAP_USBMODE, USBMODE_CM) != USBMODE_CM_DEVICE) {
-		pr_err("cannot enter in device mode");
-		pr_err("lpm = %i", hw_bank.lpm);
-		return -ENODEV;
-	}
-
-	return 0;
-}
-
-/**
- * hw_device_state: enables/disables interrupts & starts/stops device (execute
- *                  without interruption)
- * @dma: 0 => disable, !0 => enable and set dma engine
- *
- * This function returns an error code
- */
-static int hw_device_state(u32 dma)
-{
-	struct ci13xxx *udc = _udc;
-
-	if (dma) {
-		if (!(udc->udc_driver->flags & CI13XXX_DISABLE_STREAMING)) {
-			hw_cwrite(CAP_USBMODE, USBMODE_SDIS, 0);
-			pr_debug("%s(): streaming mode is enabled. USBMODE:%x\n",
-				 __func__, hw_cread(CAP_USBMODE, ~0));
-
-		} else {
-			hw_cwrite(CAP_USBMODE, USBMODE_SDIS, USBMODE_SDIS);
-			pr_debug("%s(): streaming mode is disabled. USBMODE:%x\n",
-				__func__, hw_cread(CAP_USBMODE, ~0));
-		}
-
-		hw_cwrite(CAP_ENDPTLISTADDR, ~0, dma);
-
-
-		/* Set BIT(31) to enable AHB2AHB Bypass functionality */
-		if (udc->udc_driver->flags & CI13XXX_ENABLE_AHB2AHB_BYPASS) {
-			hw_awrite(ABS_AHBMODE, AHB2AHB_BYPASS, AHB2AHB_BYPASS);
-			pr_debug("%s(): ByPass Mode is enabled. AHBMODE:%x\n",
-					__func__, hw_aread(ABS_AHBMODE, ~0));
-		}
-
-		/* interrupt, error, port change, reset, sleep/suspend */
-		hw_cwrite(CAP_USBINTR, ~0,
-			     USBi_UI|USBi_UEI|USBi_PCI|USBi_URI|USBi_SLI);
-		hw_cwrite(CAP_USBCMD, USBCMD_RS, USBCMD_RS);
-	} else {
-		hw_cwrite(CAP_USBCMD, USBCMD_RS, 0);
-		hw_cwrite(CAP_USBINTR, ~0, 0);
-		/* Clear BIT(31) to disable AHB2AHB Bypass functionality */
-		if (udc->udc_driver->flags & CI13XXX_ENABLE_AHB2AHB_BYPASS) {
-			hw_awrite(ABS_AHBMODE, AHB2AHB_BYPASS, 0);
-			pr_debug("%s(): ByPass Mode is disabled. AHBMODE:%x\n",
-					__func__, hw_aread(ABS_AHBMODE, ~0));
-		}
-	}
-	return 0;
-}
-
-static void debug_ept_flush_info(int ep_num, int dir)
-{
-	struct ci13xxx *udc = _udc;
-	struct ci13xxx_ep *mep;
-
-	if (dir)
-		mep = &udc->ci13xxx_ep[ep_num + hw_ep_max/2];
-	else
-		mep = &udc->ci13xxx_ep[ep_num];
-
-	pr_err_ratelimited("USB Registers\n");
-	pr_err_ratelimited("USBCMD:%x\n", hw_cread(CAP_USBCMD, ~0));
-	pr_err_ratelimited("USBSTS:%x\n", hw_cread(CAP_USBSTS, ~0));
-	pr_err_ratelimited("ENDPTLISTADDR:%x\n",
-			hw_cread(CAP_ENDPTLISTADDR, ~0));
-	pr_err_ratelimited("PORTSC:%x\n", hw_cread(CAP_PORTSC, ~0));
-	pr_err_ratelimited("USBMODE:%x\n", hw_cread(CAP_USBMODE, ~0));
-	pr_err_ratelimited("ENDPTSTAT:%x\n", hw_cread(CAP_ENDPTSTAT, ~0));
-
-	dbg_usb_op_fail(0xFF, "FLUSHF", mep);
-}
-/**
- * hw_ep_flush: flush endpoint fifo (execute without interruption)
- * @num: endpoint number
- * @dir: endpoint direction
- *
- * This function returns an error code
- */
-static int hw_ep_flush(int num, int dir)
-{
-	ktime_t start, diff;
-	int n = hw_ep_bit(num, dir);
-	struct ci13xxx_ep *mEp = &_udc->ci13xxx_ep[n];
-
-	/* Flush ep0 even when queue is empty */
-	if (_udc->skip_flush || (num && list_empty(&mEp->qh.queue)))
-		return 0;
-
-	start = ktime_get();
-	do {
-		/* flush any pending transfer */
-		hw_cwrite(CAP_ENDPTFLUSH, BIT(n), BIT(n));
-		while (hw_cread(CAP_ENDPTFLUSH, BIT(n))) {
-			cpu_relax();
-			diff = ktime_sub(ktime_get(), start);
-			if (ktime_to_ms(diff) > USB_MAX_TIMEOUT) {
-				printk_ratelimited(KERN_ERR
-					"%s: Failed to flush ep#%d %s\n",
-					__func__, num,
-					dir ? "IN" : "OUT");
-				debug_ept_flush_info(num, dir);
-				_udc->skip_flush = true;
-				/* Notify to trigger h/w reset recovery later */
-				if (_udc->udc_driver->notify_event)
-					_udc->udc_driver->notify_event(_udc,
-						CI13XXX_CONTROLLER_ERROR_EVENT);
-				return 0;
-			}
-		}
-	} while (hw_cread(CAP_ENDPTSTAT, BIT(n)));
-
-	return 0;
-}
-
-/**
- * hw_ep_disable: disables endpoint (execute without interruption)
- * @num: endpoint number
- * @dir: endpoint direction
- *
- * This function returns an error code
- */
-static int hw_ep_disable(int num, int dir)
-{
-	hw_cwrite(CAP_ENDPTCTRL + num * sizeof(u32),
-		  dir ? ENDPTCTRL_TXE : ENDPTCTRL_RXE, 0);
-	return 0;
-}
-
-/**
- * hw_ep_enable: enables endpoint (execute without interruption)
- * @num:  endpoint number
- * @dir:  endpoint direction
- * @type: endpoint type
- *
- * This function returns an error code
- */
-static int hw_ep_enable(int num, int dir, int type)
-{
-	u32 mask, data;
-
-	if (dir) {
-		mask  = ENDPTCTRL_TXT;  /* type    */
-		data  = type << ffs_nr(mask);
-
-		mask |= ENDPTCTRL_TXS;  /* unstall */
-		mask |= ENDPTCTRL_TXR;  /* reset data toggle */
-		data |= ENDPTCTRL_TXR;
-		mask |= ENDPTCTRL_TXE;  /* enable  */
-		data |= ENDPTCTRL_TXE;
-	} else {
-		mask  = ENDPTCTRL_RXT;  /* type    */
-		data  = type << ffs_nr(mask);
-
-		mask |= ENDPTCTRL_RXS;  /* unstall */
-		mask |= ENDPTCTRL_RXR;  /* reset data toggle */
-		data |= ENDPTCTRL_RXR;
-		mask |= ENDPTCTRL_RXE;  /* enable  */
-		data |= ENDPTCTRL_RXE;
-	}
-	hw_cwrite(CAP_ENDPTCTRL + num * sizeof(u32), mask, data);
-
-	/* make sure endpoint is enabled before returning */
-	mb();
-
-	return 0;
-}
-
-/**
- * hw_ep_get_halt: return endpoint halt status
- * @num: endpoint number
- * @dir: endpoint direction
- *
- * This function returns 1 if endpoint halted
- */
-static int hw_ep_get_halt(int num, int dir)
-{
-	u32 mask = dir ? ENDPTCTRL_TXS : ENDPTCTRL_RXS;
-
-	return hw_cread(CAP_ENDPTCTRL + num * sizeof(u32), mask) ? 1 : 0;
-}
-
-/**
- * hw_test_and_clear_setup_status: test & clear setup status (execute without
- *                                 interruption)
- * @n: endpoint number
- *
- * This function returns setup status
- */
-static int hw_test_and_clear_setup_status(int n)
-{
-	n = ep_to_bit(n);
-	return hw_ctest_and_clear(CAP_ENDPTSETUPSTAT, BIT(n));
-}
-
-/**
- * hw_ep_prime: primes endpoint (execute without interruption)
- * @num:     endpoint number
- * @dir:     endpoint direction
- * @is_ctrl: true if control endpoint
- *
- * This function returns an error code
- */
-static int hw_ep_prime(int num, int dir, int is_ctrl)
-{
-	int n = hw_ep_bit(num, dir);
-
-	if (is_ctrl && dir == RX && hw_cread(CAP_ENDPTSETUPSTAT, BIT(num)))
-		return -EAGAIN;
-
-	hw_cwrite(CAP_ENDPTPRIME, BIT(n), BIT(n));
-
-	if (is_ctrl && dir == RX  && hw_cread(CAP_ENDPTSETUPSTAT, BIT(num)))
-		return -EAGAIN;
-
-	/* status shoult be tested according with manual but it doesn't work */
-	return 0;
-}
-
-/**
- * hw_ep_set_halt: configures ep halt & resets data toggle after clear (execute
- *                 without interruption)
- * @num:   endpoint number
- * @dir:   endpoint direction
- * @value: true => stall, false => unstall
- *
- * This function returns an error code
- */
-static int hw_ep_set_halt(int num, int dir, int value)
-{
-	u32 addr, mask_xs, mask_xr;
-
-	if (value != 0 && value != 1)
-		return -EINVAL;
-
-	do {
-		if (hw_cread(CAP_ENDPTSETUPSTAT, BIT(num)))
-			return 0;
-
-		addr = CAP_ENDPTCTRL + num * sizeof(u32);
-		mask_xs = dir ? ENDPTCTRL_TXS : ENDPTCTRL_RXS;
-		mask_xr = dir ? ENDPTCTRL_TXR : ENDPTCTRL_RXR;
-
-		/* data toggle - reserved for EP0 but it's in ESS */
-		hw_cwrite(addr, mask_xs|mask_xr, value ? mask_xs : mask_xr);
-
-	} while (value != hw_ep_get_halt(num, dir));
-
-	return 0;
-}
-
-/**
- * hw_intr_clear: disables interrupt & clears interrupt status (execute without
- *                interruption)
- * @n: interrupt bit
- *
- * This function returns an error code
- */
-static int hw_intr_clear(int n)
-{
-	if (n >= REG_BITS)
-		return -EINVAL;
-
-	hw_cwrite(CAP_USBINTR, BIT(n), 0);
-	hw_cwrite(CAP_USBSTS,  BIT(n), BIT(n));
-	return 0;
-}
-
-/**
- * hw_intr_force: enables interrupt & forces interrupt status (execute without
- *                interruption)
- * @n: interrupt bit
- *
- * This function returns an error code
- */
-static int hw_intr_force(int n)
-{
-	if (n >= REG_BITS)
-		return -EINVAL;
-
-	hw_awrite(ABS_TESTMODE, TESTMODE_FORCE, TESTMODE_FORCE);
-	hw_cwrite(CAP_USBINTR,  BIT(n), BIT(n));
-	hw_cwrite(CAP_USBSTS,   BIT(n), BIT(n));
-	hw_awrite(ABS_TESTMODE, TESTMODE_FORCE, 0);
-	return 0;
-}
-
-/**
- * hw_is_port_high_speed: test if port is high speed
- *
- * This function returns true if high speed port
- */
-static int hw_port_is_high_speed(void)
-{
-	return hw_bank.lpm ? hw_cread(CAP_DEVLC, DEVLC_PSPD) :
-		hw_cread(CAP_PORTSC, PORTSC_HSP);
-}
-
-/**
- * hw_port_test_get: reads port test mode value
- *
- * This function returns port test mode value
- */
-static u8 hw_port_test_get(void)
-{
-	return hw_cread(CAP_PORTSC, PORTSC_PTC) >> ffs_nr(PORTSC_PTC);
-}
-
-/**
- * hw_port_test_set: writes port test mode (execute without interruption)
- * @mode: new value
- *
- * This function returns an error code
- */
-static int hw_port_test_set(u8 mode)
-{
-	const u8 TEST_MODE_MAX = 7;
-
-	if (mode > TEST_MODE_MAX)
-		return -EINVAL;
-
-	hw_cwrite(CAP_PORTSC, PORTSC_PTC, mode << ffs_nr(PORTSC_PTC));
-	return 0;
-}
-
-/**
- * hw_read_intr_enable: returns interrupt enable register
- *
- * This function returns register data
- */
-static u32 hw_read_intr_enable(void)
-{
-	return hw_cread(CAP_USBINTR, ~0);
-}
-
-/**
- * hw_read_intr_status: returns interrupt status register
- *
- * This function returns register data
- */
-static u32 hw_read_intr_status(void)
-{
-	return hw_cread(CAP_USBSTS, ~0);
-}
-
-/**
- * hw_register_read: reads all device registers (execute without interruption)
- * @buf:  destination buffer
- * @size: buffer size
- *
- * This function returns number of registers read
- */
-static size_t hw_register_read(u32 *buf, size_t size)
-{
-	unsigned int i;
-
-	if (size > hw_bank.size)
-		size = hw_bank.size;
-
-	for (i = 0; i < size; i++)
-		buf[i] = hw_aread(i * sizeof(u32), ~0);
-
-	return size;
-}
-
-/**
- * hw_register_write: writes to register
- * @addr: register address
- * @data: register value
- *
- * This function returns an error code
- */
-static int hw_register_write(u16 addr, u32 data)
-{
-	/* align */
-	addr /= sizeof(u32);
-
-	if (addr >= hw_bank.size)
-		return -EINVAL;
-
-	/* align */
-	addr *= sizeof(u32);
-
-	hw_awrite(addr, ~0, data);
-	return 0;
-}
-
-/**
- * hw_test_and_clear_complete: test & clear complete status (execute without
- *                             interruption)
- * @n: endpoint number
- *
- * This function returns complete status
- */
-static int hw_test_and_clear_complete(int n)
-{
-	n = ep_to_bit(n);
-	return hw_ctest_and_clear(CAP_ENDPTCOMPLETE, BIT(n));
-}
-
-/**
- * hw_test_and_clear_intr_active: test & clear active interrupts (execute
- *                                without interruption)
- *
- * This function returns active interrutps
- */
-static u32 hw_test_and_clear_intr_active(void)
-{
-	u32 reg = hw_read_intr_status() & hw_read_intr_enable();
-
-	hw_cwrite(CAP_USBSTS, ~0, reg);
-	return reg;
-}
-
-/**
- * hw_test_and_clear_setup_guard: test & clear setup guard (execute without
- *                                interruption)
- *
- * This function returns guard value
- */
-static int hw_test_and_clear_setup_guard(void)
-{
-	return hw_ctest_and_write(CAP_USBCMD, USBCMD_SUTW, 0);
-}
-
-/**
- * hw_test_and_set_setup_guard: test & set setup guard (execute without
- *                              interruption)
- *
- * This function returns guard value
- */
-static int hw_test_and_set_setup_guard(void)
-{
-	return hw_ctest_and_write(CAP_USBCMD, USBCMD_SUTW, USBCMD_SUTW);
-}
-
-/**
- * hw_usb_set_address: configures USB address (execute without interruption)
- * @value: new USB address
- *
- * This function returns an error code
- */
-static int hw_usb_set_address(u8 value)
-{
-	/* advance */
-	hw_cwrite(CAP_DEVICEADDR, DEVICEADDR_USBADR | DEVICEADDR_USBADRA,
-		  value << ffs_nr(DEVICEADDR_USBADR) | DEVICEADDR_USBADRA);
-	return 0;
-}
-
-/**
- * hw_usb_reset: restart device after a bus reset (execute without
- *               interruption)
- *
- * This function returns an error code
- */
-static int hw_usb_reset(void)
-{
-	int delay_count = 10; /* 100 usec delay */
-
-	hw_usb_set_address(0);
-
-	/* ESS flushes only at end?!? */
-	hw_cwrite(CAP_ENDPTFLUSH,    ~0, ~0);   /* flush all EPs */
-
-	/* clear complete status */
-	hw_cwrite(CAP_ENDPTCOMPLETE,  0,  0);   /* writes its content */
-
-	/* wait until all bits cleared */
-	while (delay_count-- && hw_cread(CAP_ENDPTPRIME, ~0))
-		udelay(10);
-	if (delay_count < 0)
-		pr_err("ENDPTPRIME is not cleared during bus reset\n");
-
-	/* reset all endpoints ? */
-
-	/*
-	 * reset internal status and wait for further instructions
-	 * no need to verify the port reset status (ESS does it)
-	 */
-
-	return 0;
-}
-
-/******************************************************************************
- * DBG block
- *****************************************************************************/
-/**
- * show_device: prints information about device capabilities and status
- *
- * Check "device.h" for details
- */
-static ssize_t show_device(struct device *dev, struct device_attribute *attr,
-			   char *buf)
-{
-	struct ci13xxx *udc = container_of(dev, struct ci13xxx, gadget.dev);
-	struct usb_gadget *gadget = &udc->gadget;
-	int n = 0;
-
-	dbg_trace("[%s] %pK\n", __func__, buf);
-	if (attr == NULL || buf == NULL) {
-		dev_err(dev, "[%s] EINVAL\n", __func__);
-		return 0;
-	}
-
-	n += scnprintf(buf + n, PAGE_SIZE - n, "speed             = %d\n",
-		       gadget->speed);
-	n += scnprintf(buf + n, PAGE_SIZE - n, "max_speed         = %d\n",
-		       gadget->max_speed);
-	/* TODO: Scheduled for removal in 3.8. */
-	n += scnprintf(buf + n, PAGE_SIZE - n, "is_dualspeed      = %d\n",
-		       gadget_is_dualspeed(gadget));
-	n += scnprintf(buf + n, PAGE_SIZE - n, "is_otg            = %d\n",
-		       gadget->is_otg);
-	n += scnprintf(buf + n, PAGE_SIZE - n, "is_a_peripheral   = %d\n",
-		       gadget->is_a_peripheral);
-	n += scnprintf(buf + n, PAGE_SIZE - n, "b_hnp_enable      = %d\n",
-		       gadget->b_hnp_enable);
-	n += scnprintf(buf + n, PAGE_SIZE - n, "a_hnp_support     = %d\n",
-		       gadget->a_hnp_support);
-	n += scnprintf(buf + n, PAGE_SIZE - n, "a_alt_hnp_support = %d\n",
-		       gadget->a_alt_hnp_support);
-	n += scnprintf(buf + n, PAGE_SIZE - n, "name              = %s\n",
-		       (gadget->name ? gadget->name : ""));
-
-	return n;
-}
-static DEVICE_ATTR(device, 0400, show_device, NULL);
-
-/**
- * show_driver: prints information about attached gadget (if any)
- *
- * Check "device.h" for details
- */
-static ssize_t show_driver(struct device *dev, struct device_attribute *attr,
-			   char *buf)
-{
-	struct ci13xxx *udc = container_of(dev, struct ci13xxx, gadget.dev);
-	struct usb_gadget_driver *driver = udc->driver;
-	int n = 0;
-
-	dbg_trace("[%s] %pK\n", __func__, buf);
-	if (attr == NULL || buf == NULL) {
-		dev_err(dev, "[%s] EINVAL\n", __func__);
-		return 0;
-	}
-
-	if (driver == NULL)
-		return scnprintf(buf, PAGE_SIZE,
-				 "There is no gadget attached!\n");
-
-	n += scnprintf(buf + n, PAGE_SIZE - n, "function  = %s\n",
-		       (driver->function ? driver->function : ""));
-	n += scnprintf(buf + n, PAGE_SIZE - n, "max speed = %d\n",
-		       driver->max_speed);
-
-	return n;
-}
-static DEVICE_ATTR(driver, 0400, show_driver, NULL);
-
-/* Maximum event message length */
-#define DBG_DATA_MSG   64UL
-
-/* Maximum event messages */
-#define DBG_DATA_MAX   128UL
-
-/* Event buffer descriptor */
-static struct {
-	char		(buf[DBG_DATA_MAX])[DBG_DATA_MSG];   /* buffer */
-	unsigned int	idx;   /* index */
-	unsigned int	tty;   /* print to console? */
-	rwlock_t	lck;   /* lock */
-} dbg_data = {
-	.idx = 0,
-	.tty = 0,
-	.lck = __RW_LOCK_UNLOCKED(lck)
-};
-
-/**
- * dbg_dec: decrements debug event index
- * @idx: buffer index
- */
-static void dbg_dec(unsigned int *idx)
-{
-	*idx = (*idx - 1) & (DBG_DATA_MAX-1);
-}
-
-/**
- * dbg_inc: increments debug event index
- * @idx: buffer index
- */
-static void dbg_inc(unsigned int *idx)
-{
-	*idx = (*idx + 1) & (DBG_DATA_MAX-1);
-}
-
-
-static unsigned int ep_addr_txdbg_mask;
-module_param(ep_addr_txdbg_mask, uint, 0644);
-static unsigned int ep_addr_rxdbg_mask;
-module_param(ep_addr_rxdbg_mask, uint, 0644);
-
-static int allow_dbg_print(u8 addr)
-{
-	int dir, num;
-
-	/* allow bus wide events */
-	if (addr == 0xff)
-		return 1;
-
-	dir = addr & USB_ENDPOINT_DIR_MASK ? TX : RX;
-	num = addr & ~USB_ENDPOINT_DIR_MASK;
-	num = 1 << num;
-
-	if ((dir == TX) && (num & ep_addr_txdbg_mask))
-		return 1;
-	if ((dir == RX) && (num & ep_addr_rxdbg_mask))
-		return 1;
-
-	return 0;
-}
-
-#define TIME_BUF_LEN  20
-/*get_timestamp - returns time of day in us */
-static char *get_timestamp(char *tbuf)
-{
-	unsigned long long t;
-	unsigned long nanosec_rem;
-
-	t = cpu_clock(smp_processor_id());
-	nanosec_rem = do_div(t, 1000000000)/1000;
-	scnprintf(tbuf, TIME_BUF_LEN, "[%5lu.%06lu] ", (unsigned long)t,
-		nanosec_rem);
-	return tbuf;
-}
-
-/**
- * dbg_print:  prints the common part of the event
- * @addr:   endpoint address
- * @name:   event name
- * @status: status
- * @extra:  extra information
- */
-static void dbg_print(u8 addr, const char *name, int status, const char *extra)
-{
-	unsigned long flags;
-	char tbuf[TIME_BUF_LEN];
-
-	if (!allow_dbg_print(addr))
-		return;
-
-	write_lock_irqsave(&dbg_data.lck, flags);
-
-	scnprintf(dbg_data.buf[dbg_data.idx], DBG_DATA_MSG,
-		  "%s\t? %02X %-7.7s %4i ?\t%s\n",
-		  get_timestamp(tbuf), addr, name, status, extra);
-
-	dbg_inc(&dbg_data.idx);
-
-	write_unlock_irqrestore(&dbg_data.lck, flags);
-
-	if (dbg_data.tty != 0)
-		pr_notice("%s\t? %02X %-7.7s %4i ?\t%s\n",
-			  get_timestamp(tbuf), addr, name, status, extra);
-}
-
-/**
- * dbg_done: prints a DONE event
- * @addr:   endpoint address
- * @td:     transfer descriptor
- * @status: status
- */
-static void dbg_done(u8 addr, const u32 token, int status)
-{
-	char msg[DBG_DATA_MSG];
-
-	scnprintf(msg, sizeof(msg), "%d %02X",
-		  (int)(token & TD_TOTAL_BYTES) >> ffs_nr(TD_TOTAL_BYTES),
-		  (int)(token & TD_STATUS)      >> ffs_nr(TD_STATUS));
-	dbg_print(addr, "DONE", status, msg);
-}
-
-/**
- * dbg_event: prints a generic event
- * @addr:   endpoint address
- * @name:   event name
- * @status: status
- */
-static void dbg_event(u8 addr, const char *name, int status)
-{
-	if (name != NULL)
-		dbg_print(addr, name, status, "");
-}
-
-/*
- * dbg_queue: prints a QUEUE event
- * @addr:   endpoint address
- * @req:    USB request
- * @status: status
- */
-static void dbg_queue(u8 addr, const struct usb_request *req, int status)
-{
-	char msg[DBG_DATA_MSG];
-
-	if (req != NULL) {
-		scnprintf(msg, sizeof(msg),
-			  "%d %d", !req->no_interrupt, req->length);
-		dbg_print(addr, "QUEUE", status, msg);
-	}
-}
-
-/**
- * dbg_setup: prints a SETUP event
- * @addr: endpoint address
- * @req:  setup request
- */
-static void dbg_setup(u8 addr, const struct usb_ctrlrequest *req)
-{
-	char msg[DBG_DATA_MSG];
-
-	if (req != NULL) {
-		scnprintf(msg, sizeof(msg),
-			  "%02X %02X %04X %04X %d", req->bRequestType,
-			  req->bRequest, le16_to_cpu(req->wValue),
-			  le16_to_cpu(req->wIndex), le16_to_cpu(req->wLength));
-		dbg_print(addr, "SETUP", 0, msg);
-	}
-}
-
-/**
- * dbg_usb_op_fail: prints USB Operation FAIL event
- * @addr: endpoint address
- * @mEp:  endpoint structure
- */
-static void dbg_usb_op_fail(u8 addr, const char *name,
-				const struct ci13xxx_ep *mep)
-{
-	char msg[DBG_DATA_MSG];
-	struct ci13xxx_req *req;
-	struct list_head *ptr = NULL;
-
-	if (mep != NULL) {
-		scnprintf(msg, sizeof(msg),
-			"%s Fail EP%d%s QH:%08X",
-			name, mep->num,
-			mep->dir ? "IN" : "OUT", mep->qh.ptr->cap);
-		dbg_print(addr, name, 0, msg);
-		scnprintf(msg, sizeof(msg),
-				"cap:%08X %08X %08X\n",
-				mep->qh.ptr->curr, mep->qh.ptr->td.next,
-				mep->qh.ptr->td.token);
-		dbg_print(addr, "QHEAD", 0, msg);
-
-		list_for_each(ptr, &mep->qh.queue) {
-			req = list_entry(ptr, struct ci13xxx_req, queue);
-			scnprintf(msg, sizeof(msg),
-					"%pKa:%08X:%08X\n",
-					&req->dma, req->ptr->next,
-					req->ptr->token);
-			dbg_print(addr, "REQ", 0, msg);
-			scnprintf(msg, sizeof(msg), "%08X:%d\n",
-					req->ptr->page[0],
-					req->req.status);
-			dbg_print(addr, "REQPAGE", 0, msg);
-		}
-	}
-}
-
-/**
- * show_events: displays the event buffer
- *
- * Check "device.h" for details
- */
-static ssize_t show_events(struct device *dev, struct device_attribute *attr,
-			   char *buf)
-{
-	unsigned long flags;
-	unsigned int i, j, n = 0;
-
-	dbg_trace("[%s] %pK\n", __func__, buf);
-	if (attr == NULL || buf == NULL) {
-		dev_err(dev, "[%s] EINVAL\n", __func__);
-		return 0;
-	}
-
-	read_lock_irqsave(&dbg_data.lck, flags);
-
-	i = dbg_data.idx;
-	for (dbg_dec(&i); i != dbg_data.idx; dbg_dec(&i)) {
-		n += strlen(dbg_data.buf[i]);
-		if (n >= PAGE_SIZE) {
-			n -= strlen(dbg_data.buf[i]);
-			break;
-		}
-	}
-	for (j = 0, dbg_inc(&i); j < n; dbg_inc(&i))
-		j += scnprintf(buf + j, PAGE_SIZE - j,
-			       "%s", dbg_data.buf[i]);
-
-	read_unlock_irqrestore(&dbg_data.lck, flags);
-
-	return n;
-}
-
-/**
- * store_events: configure if events are going to be also printed to console
- *
- * Check "device.h" for details
- */
-static ssize_t store_events(struct device *dev, struct device_attribute *attr,
-			    const char *buf, size_t count)
-{
-	unsigned int tty;
-
-	dbg_trace("[%s] %pK, %d\n", __func__, buf, count);
-	if (attr == NULL || buf == NULL) {
-		dev_err(dev, "[%s] EINVAL\n", __func__);
-		goto done;
-	}
-
-	if (kstrtouint(buf, 10, &tty) || tty > 1) {
-		dev_err(dev, "<1|0>: enable|disable console log\n");
-		goto done;
-	}
-
-	dbg_data.tty = tty;
-	dev_info(dev, "tty = %u", dbg_data.tty);
-
- done:
-	return count;
-}
-static DEVICE_ATTR(events, 0600, show_events, store_events);
-
-/**
- * show_inters: interrupt status, enable status and historic
- *
- * Check "device.h" for details
- */
-static ssize_t show_inters(struct device *dev, struct device_attribute *attr,
-			   char *buf)
-{
-	struct ci13xxx *udc = container_of(dev, struct ci13xxx, gadget.dev);
-	unsigned long flags;
-	u32 intr;
-	unsigned int i, j, n = 0;
-
-	dbg_trace("[%s] %pK\n", __func__, buf);
-	if (attr == NULL || buf == NULL) {
-		dev_err(dev, "[%s] EINVAL\n", __func__);
-		return 0;
-	}
-
-	spin_lock_irqsave(udc->lock, flags);
-
-	n += scnprintf(buf + n, PAGE_SIZE - n,
-		       "status = %08x\n", hw_read_intr_status());
-	n += scnprintf(buf + n, PAGE_SIZE - n,
-		       "enable = %08x\n", hw_read_intr_enable());
-
-	n += scnprintf(buf + n, PAGE_SIZE - n, "*test = %d\n",
-		       isr_statistics.test);
-	n += scnprintf(buf + n, PAGE_SIZE - n, "? ui  = %d\n",
-		       isr_statistics.ui);
-	n += scnprintf(buf + n, PAGE_SIZE - n, "? uei = %d\n",
-		       isr_statistics.uei);
-	n += scnprintf(buf + n, PAGE_SIZE - n, "? pci = %d\n",
-		       isr_statistics.pci);
-	n += scnprintf(buf + n, PAGE_SIZE - n, "? uri = %d\n",
-		       isr_statistics.uri);
-	n += scnprintf(buf + n, PAGE_SIZE - n, "? sli = %d\n",
-		       isr_statistics.sli);
-	n += scnprintf(buf + n, PAGE_SIZE - n, "*none = %d\n",
-		       isr_statistics.none);
-	n += scnprintf(buf + n, PAGE_SIZE - n, "*hndl = %d\n",
-		       isr_statistics.hndl.cnt);
-
-	for (i = isr_statistics.hndl.idx, j = 0; j <= ISR_MASK; j++, i++) {
-		i   &= ISR_MASK;
-		intr = isr_statistics.hndl.buf[i];
-
-		if (USBi_UI  & intr)
-			n += scnprintf(buf + n, PAGE_SIZE - n, "ui  ");
-		intr &= ~USBi_UI;
-		if (USBi_UEI & intr)
-			n += scnprintf(buf + n, PAGE_SIZE - n, "uei ");
-		intr &= ~USBi_UEI;
-		if (USBi_PCI & intr)
-			n += scnprintf(buf + n, PAGE_SIZE - n, "pci ");
-		intr &= ~USBi_PCI;
-		if (USBi_URI & intr)
-			n += scnprintf(buf + n, PAGE_SIZE - n, "uri ");
-		intr &= ~USBi_URI;
-		if (USBi_SLI & intr)
-			n += scnprintf(buf + n, PAGE_SIZE - n, "sli ");
-		intr &= ~USBi_SLI;
-		if (intr)
-			n += scnprintf(buf + n, PAGE_SIZE - n, "??? ");
-		if (isr_statistics.hndl.buf[i])
-			n += scnprintf(buf + n, PAGE_SIZE - n, "\n");
-	}
-
-	spin_unlock_irqrestore(udc->lock, flags);
-
-	return n;
-}
-
-/**
- * store_inters: enable & force or disable an individual interrutps
- *                   (to be used for test purposes only)
- *
- * Check "device.h" for details
- */
-static ssize_t store_inters(struct device *dev, struct device_attribute *attr,
-			    const char *buf, size_t count)
-{
-	struct ci13xxx *udc = container_of(dev, struct ci13xxx, gadget.dev);
-	unsigned long flags;
-	unsigned int en, bit;
-
-	dbg_trace("[%s] %pK, %d\n", __func__, buf, count);
-	if (attr == NULL || buf == NULL) {
-		dev_err(dev, "[%s] EINVAL\n", __func__);
-		goto done;
-	}
-
-	if (sscanf(buf, "%u %u", &en, &bit) != 2 || en > 1) {
-		dev_err(dev, "<1|0> <bit>: enable|disable interrupt");
-		goto done;
-	}
-
-	spin_lock_irqsave(udc->lock, flags);
-	if (en) {
-		if (hw_intr_force(bit))
-			dev_err(dev, "invalid bit number\n");
-		else
-			isr_statistics.test++;
-	} else {
-		if (hw_intr_clear(bit))
-			dev_err(dev, "invalid bit number\n");
-	}
-	spin_unlock_irqrestore(udc->lock, flags);
-
- done:
-	return count;
-}
-static DEVICE_ATTR(inters, 0600, show_inters, store_inters);
-
-/**
- * show_port_test: reads port test mode
- *
- * Check "device.h" for details
- */
-static ssize_t show_port_test(struct device *dev,
-			      struct device_attribute *attr, char *buf)
-{
-	struct ci13xxx *udc = container_of(dev, struct ci13xxx, gadget.dev);
-	unsigned long flags;
-	unsigned int mode;
-
-	dbg_trace("[%s] %pK\n", __func__, buf);
-	if (attr == NULL || buf == NULL) {
-		dev_err(dev, "[%s] EINVAL\n", __func__);
-		return 0;
-	}
-
-	spin_lock_irqsave(udc->lock, flags);
-	mode = hw_port_test_get();
-	spin_unlock_irqrestore(udc->lock, flags);
-
-	return scnprintf(buf, PAGE_SIZE, "mode = %u\n", mode);
-}
-
-/**
- * store_port_test: writes port test mode
- *
- * Check "device.h" for details
- */
-static ssize_t store_port_test(struct device *dev,
-			       struct device_attribute *attr,
-			       const char *buf, size_t count)
-{
-	struct ci13xxx *udc = container_of(dev, struct ci13xxx, gadget.dev);
-	unsigned long flags;
-	unsigned int mode;
-
-	dbg_trace("[%s] %pK, %d\n", __func__, buf, count);
-	if (attr == NULL || buf == NULL) {
-		dev_err(dev, "[%s] EINVAL\n", __func__);
-		goto done;
-	}
-
-	if (kstrtouint(buf, 10, &mode)) {
-		dev_err(dev, "<mode>: set port test mode");
-		goto done;
-	}
-
-	spin_lock_irqsave(udc->lock, flags);
-	if (hw_port_test_set(mode))
-		dev_err(dev, "invalid mode\n");
-	spin_unlock_irqrestore(udc->lock, flags);
-
- done:
-	return count;
-}
-static DEVICE_ATTR(port_test, 0600, show_port_test, store_port_test);
-
-/**
- * show_qheads: DMA contents of all queue heads
- *
- * Check "device.h" for details
- */
-static ssize_t show_qheads(struct device *dev, struct device_attribute *attr,
-			   char *buf)
-{
-	struct ci13xxx *udc = container_of(dev, struct ci13xxx, gadget.dev);
-	unsigned long flags;
-	unsigned int i, j, n = 0;
-
-	dbg_trace("[%s] %pK\n", __func__, buf);
-	if (attr == NULL || buf == NULL) {
-		dev_err(dev, "[%s] EINVAL\n", __func__);
-		return 0;
-	}
-
-	spin_lock_irqsave(udc->lock, flags);
-	for (i = 0; i < hw_ep_max/2; i++) {
-		struct ci13xxx_ep *mEpRx = &udc->ci13xxx_ep[i];
-		struct ci13xxx_ep *mEpTx = &udc->ci13xxx_ep[i + hw_ep_max/2];
-
-		n += scnprintf(buf + n, PAGE_SIZE - n,
-			       "EP=%02i: RX=%08X TX=%08X\n",
-			       i, (u32)mEpRx->qh.dma, (u32)mEpTx->qh.dma);
-		for (j = 0; j < (sizeof(struct ci13xxx_qh)/sizeof(u32)); j++) {
-			n += scnprintf(buf + n, PAGE_SIZE - n,
-				       " %04X:    %08X    %08X\n", j,
-				       *((u32 *)mEpRx->qh.ptr + j),
-				       *((u32 *)mEpTx->qh.ptr + j));
-		}
-	}
-	spin_unlock_irqrestore(udc->lock, flags);
-
-	return n;
-}
-static DEVICE_ATTR(qheads, 0400, show_qheads, NULL);
-
-/**
- * show_registers: dumps all registers
- *
- * Check "device.h" for details
- */
-#define DUMP_ENTRIES	512
-static ssize_t show_registers(struct device *dev,
-			      struct device_attribute *attr, char *buf)
-{
-	struct ci13xxx *udc = container_of(dev, struct ci13xxx, gadget.dev);
-	unsigned long flags;
-	u32 *dump;
-	unsigned int i, k, n = 0;
-
-	dbg_trace("[%s] %pK\n", __func__, buf);
-	if (attr == NULL || buf == NULL) {
-		dev_err(dev, "[%s] EINVAL\n", __func__);
-		return 0;
-	}
-
-	dump = kmalloc(sizeof(u32) * DUMP_ENTRIES, GFP_KERNEL);
-	if (!dump)
-		return 0;
-
-	spin_lock_irqsave(udc->lock, flags);
-	k = hw_register_read(dump, DUMP_ENTRIES);
-	spin_unlock_irqrestore(udc->lock, flags);
-
-	for (i = 0; i < k; i++) {
-		n += scnprintf(buf + n, PAGE_SIZE - n,
-			       "reg[0x%04X] = 0x%08X\n",
-			       i * (unsigned int)sizeof(u32), dump[i]);
-	}
-	kfree(dump);
-
-	return n;
-}
-
-/**
- * store_registers: writes value to register address
- *
- * Check "device.h" for details
- */
-static ssize_t store_registers(struct device *dev,
-			       struct device_attribute *attr,
-			       const char *buf, size_t count)
-{
-	struct ci13xxx *udc = container_of(dev, struct ci13xxx, gadget.dev);
-	unsigned long addr, data, flags;
-
-	dbg_trace("[%s] %pK, %d\n", __func__, buf, count);
-	if (attr == NULL || buf == NULL) {
-		dev_err(dev, "[%s] EINVAL\n", __func__);
-		goto done;
-	}
-
-	if (sscanf(buf, "%li %li", &addr, &data) != 2) {
-		dev_err(dev, "<addr> <data>: write data to register address");
-		goto done;
-	}
-
-	spin_lock_irqsave(udc->lock, flags);
-	if (hw_register_write(addr, data))
-		dev_err(dev, "invalid address range\n");
-	spin_unlock_irqrestore(udc->lock, flags);
-
- done:
-	return count;
-}
-static DEVICE_ATTR(registers, 0600, show_registers, store_registers);
-
-/**
- * show_requests: DMA contents of all requests currently queued (all endpts)
- *
- * Check "device.h" for details
- */
-static ssize_t show_requests(struct device *dev, struct device_attribute *attr,
-			     char *buf)
-{
-	struct ci13xxx *udc = container_of(dev, struct ci13xxx, gadget.dev);
-	unsigned long flags;
-	struct list_head   *ptr = NULL;
-	struct ci13xxx_req *req = NULL;
-	unsigned int i, j, n = 0, qSize = sizeof(struct ci13xxx_td)/sizeof(u32);
-
-	dbg_trace("[%s] %pK\n", __func__, buf);
-	if (attr == NULL || buf == NULL) {
-		dev_err(dev, "[%s] EINVAL\n", __func__);
-		return 0;
-	}
-
-	spin_lock_irqsave(udc->lock, flags);
-	for (i = 0; i < hw_ep_max; i++)
-		list_for_each(ptr, &udc->ci13xxx_ep[i].qh.queue)
-		{
-			req = list_entry(ptr, struct ci13xxx_req, queue);
-
-			n += scnprintf(buf + n, PAGE_SIZE - n,
-					"EP=%02i: TD=%08X %s\n",
-					i % hw_ep_max/2, (u32)req->dma,
-					((i < hw_ep_max/2) ? "RX" : "TX"));
-
-			for (j = 0; j < qSize; j++)
-				n += scnprintf(buf + n, PAGE_SIZE - n,
-						" %04X:    %08X\n", j,
-						*((u32 *)req->ptr + j));
-		}
-	spin_unlock_irqrestore(udc->lock, flags);
-
-	return n;
-}
-static DEVICE_ATTR(requests, 0400, show_requests, NULL);
-
-/* EP# and Direction */
-static ssize_t prime_ept(struct device *dev,
-			       struct device_attribute *attr,
-			       const char *buf, size_t count)
-{
-	struct ci13xxx *udc = container_of(dev, struct ci13xxx, gadget.dev);
-	struct ci13xxx_ep *mEp;
-	unsigned int ep_num, dir;
-	int n;
-	struct ci13xxx_req *mReq = NULL;
-
-	if (sscanf(buf, "%u %u", &ep_num, &dir) != 2) {
-		dev_err(dev, "<ep_num> <dir>: prime the ep");
-		goto done;
-	}
-
-	if (dir)
-		mEp = &udc->ci13xxx_ep[ep_num + hw_ep_max/2];
-	else
-		mEp = &udc->ci13xxx_ep[ep_num];
-
-	n = hw_ep_bit(mEp->num, mEp->dir);
-	mReq =  list_entry(mEp->qh.queue.next, struct ci13xxx_req, queue);
-	mEp->qh.ptr->td.next   = mReq->dma;
-	mEp->qh.ptr->td.token &= ~TD_STATUS;
-
-	/* Makes sure that above write goes through */
-	wmb();
-
-	hw_cwrite(CAP_ENDPTPRIME, BIT(n), BIT(n));
-	while (hw_cread(CAP_ENDPTPRIME, BIT(n)))
-		cpu_relax();
-
-	pr_info("%s: prime:%08x stat:%08x ep#%d dir:%s\n", __func__,
-			hw_cread(CAP_ENDPTPRIME, ~0),
-			hw_cread(CAP_ENDPTSTAT, ~0),
-			mEp->num, mEp->dir ? "IN" : "OUT");
-done:
-	return count;
-
-}
-static DEVICE_ATTR(prime, 0200, NULL, prime_ept);
-
-/* EP# and Direction */
-static ssize_t print_dtds(struct device *dev,
-			       struct device_attribute *attr,
-			       const char *buf, size_t count)
-{
-	struct ci13xxx *udc = container_of(dev, struct ci13xxx, gadget.dev);
-	struct ci13xxx_ep *mEp;
-	unsigned int ep_num, dir;
-	int n;
-	struct list_head   *ptr = NULL;
-	struct ci13xxx_req *req = NULL;
-
-	if (sscanf(buf, "%u %u", &ep_num, &dir) != 2) {
-		dev_err(dev, "<ep_num> <dir>: to print dtds");
-		goto done;
-	}
-
-	if (dir)
-		mEp = &udc->ci13xxx_ep[ep_num + hw_ep_max/2];
-	else
-		mEp = &udc->ci13xxx_ep[ep_num];
-
-	n = hw_ep_bit(mEp->num, mEp->dir);
-	pr_info("%s: prime:%08x stat:%08x ep#%d dir:%s dTD_update_fail_count: %lu mEp->dTD_update_fail_count: %lu mEp->dTD_active_re_q_count: %lu mEp->prime_fail_count: %lu\n",
-			__func__,
-			hw_cread(CAP_ENDPTPRIME, ~0),
-			hw_cread(CAP_ENDPTSTAT, ~0),
-			mEp->num, mEp->dir ? "IN" : "OUT",
-			udc->dTD_update_fail_count,
-			mEp->dTD_update_fail_count,
-			mEp->dTD_active_re_q_count,
-			mEp->prime_fail_count);
-
-	pr_info("QH: cap:%08x cur:%08x next:%08x token:%08x\n",
-			mEp->qh.ptr->cap, mEp->qh.ptr->curr,
-			mEp->qh.ptr->td.next, mEp->qh.ptr->td.token);
-
-	list_for_each(ptr, &mEp->qh.queue) {
-		req = list_entry(ptr, struct ci13xxx_req, queue);
-
-		pr_info("\treq:%pKa next:%08x token:%08x page0:%08x status:%d\n",
-				&req->dma, req->ptr->next, req->ptr->token,
-				req->ptr->page[0], req->req.status);
-	}
-done:
-	return count;
-
-}
-static DEVICE_ATTR(dtds, 0200, NULL, print_dtds);
-
-static int ci13xxx_wakeup(struct usb_gadget *_gadget)
-{
-	struct ci13xxx *udc = container_of(_gadget, struct ci13xxx, gadget);
-	unsigned long flags;
-	int ret = 0;
-
-	trace();
-
-	spin_lock_irqsave(udc->lock, flags);
-	if (!udc->gadget.remote_wakeup) {
-		ret = -EOPNOTSUPP;
-		dbg_trace("remote wakeup feature is not enabled\n");
-		goto out;
-	}
-	spin_unlock_irqrestore(udc->lock, flags);
-
-	pm_runtime_get_sync(&_gadget->dev);
-
-	udc->udc_driver->notify_event(udc,
-		CI13XXX_CONTROLLER_REMOTE_WAKEUP_EVENT);
-
-	if (udc->transceiver)
-		usb_phy_set_suspend(udc->transceiver, 0);
-
-	spin_lock_irqsave(udc->lock, flags);
-	if (!hw_cread(CAP_PORTSC, PORTSC_SUSP)) {
-		ret = -EINVAL;
-		dbg_trace("port is not suspended\n");
-		pm_runtime_put(&_gadget->dev);
-		goto out;
-	}
-	hw_cwrite(CAP_PORTSC, PORTSC_FPR, PORTSC_FPR);
-
-	pm_runtime_mark_last_busy(&_gadget->dev);
-	pm_runtime_put_autosuspend(&_gadget->dev);
-out:
-	spin_unlock_irqrestore(udc->lock, flags);
-	return ret;
-}
-
-static void usb_do_remote_wakeup(struct work_struct *w)
-{
-	struct ci13xxx *udc = _udc;
-	unsigned long flags;
-	bool do_wake;
-
-	/*
-	 * This work can not be canceled from interrupt handler. Check
-	 * if wakeup conditions are still met.
-	 */
-	spin_lock_irqsave(udc->lock, flags);
-	do_wake = udc->suspended && udc->gadget.remote_wakeup;
-	spin_unlock_irqrestore(udc->lock, flags);
-
-	if (do_wake)
-		ci13xxx_wakeup(&udc->gadget);
-}
-
-static ssize_t usb_remote_wakeup(struct device *dev,
-		struct device_attribute *attr, const char *buf, size_t count)
-{
-	struct ci13xxx *udc = container_of(dev, struct ci13xxx, gadget.dev);
-
-	ci13xxx_wakeup(&udc->gadget);
-
-	return count;
-}
-static DEVICE_ATTR(wakeup, 0200, 0, usb_remote_wakeup);
-
-/**
- * dbg_create_files: initializes the attribute interface
- * @dev: device
- *
- * This function returns an error code
- */
-static int __maybe_unused dbg_create_files(struct device *dev)
-{
-	int retval = 0;
-
-	if (dev == NULL)
-		return -EINVAL;
-	retval = device_create_file(dev, &dev_attr_device);
-	if (retval)
-		goto done;
-	retval = device_create_file(dev, &dev_attr_driver);
-	if (retval)
-		goto rm_device;
-	retval = device_create_file(dev, &dev_attr_events);
-	if (retval)
-		goto rm_driver;
-	retval = device_create_file(dev, &dev_attr_inters);
-	if (retval)
-		goto rm_events;
-	retval = device_create_file(dev, &dev_attr_port_test);
-	if (retval)
-		goto rm_inters;
-	retval = device_create_file(dev, &dev_attr_qheads);
-	if (retval)
-		goto rm_port_test;
-	retval = device_create_file(dev, &dev_attr_registers);
-	if (retval)
-		goto rm_qheads;
-	retval = device_create_file(dev, &dev_attr_requests);
-	if (retval)
-		goto rm_registers;
-	retval = device_create_file(dev, &dev_attr_wakeup);
-	if (retval)
-		goto rm_remote_wakeup;
-	retval = device_create_file(dev, &dev_attr_prime);
-	if (retval)
-		goto rm_prime;
-	retval = device_create_file(dev, &dev_attr_dtds);
-	if (retval)
-		goto rm_dtds;
-
-	return 0;
-
-rm_dtds:
-	device_remove_file(dev, &dev_attr_dtds);
-rm_prime:
-	device_remove_file(dev, &dev_attr_prime);
-rm_remote_wakeup:
-	device_remove_file(dev, &dev_attr_wakeup);
- rm_registers:
-	device_remove_file(dev, &dev_attr_registers);
- rm_qheads:
-	device_remove_file(dev, &dev_attr_qheads);
- rm_port_test:
-	device_remove_file(dev, &dev_attr_port_test);
- rm_inters:
-	device_remove_file(dev, &dev_attr_inters);
- rm_events:
-	device_remove_file(dev, &dev_attr_events);
- rm_driver:
-	device_remove_file(dev, &dev_attr_driver);
- rm_device:
-	device_remove_file(dev, &dev_attr_device);
- done:
-	return retval;
-}
-
-/**
- * dbg_remove_files: destroys the attribute interface
- * @dev: device
- *
- * This function returns an error code
- */
-static int __maybe_unused dbg_remove_files(struct device *dev)
-{
-	if (dev == NULL)
-		return -EINVAL;
-	device_remove_file(dev, &dev_attr_requests);
-	device_remove_file(dev, &dev_attr_registers);
-	device_remove_file(dev, &dev_attr_qheads);
-	device_remove_file(dev, &dev_attr_port_test);
-	device_remove_file(dev, &dev_attr_inters);
-	device_remove_file(dev, &dev_attr_events);
-	device_remove_file(dev, &dev_attr_driver);
-	device_remove_file(dev, &dev_attr_device);
-	device_remove_file(dev, &dev_attr_wakeup);
-	return 0;
-}
-
-/******************************************************************************
- * UTIL block
- *****************************************************************************/
-/**
- * _usb_addr: calculates endpoint address from direction & number
- * @ep:  endpoint
- */
-static inline u8 _usb_addr(struct ci13xxx_ep *ep)
-{
-	return ((ep->dir == TX) ? USB_ENDPOINT_DIR_MASK : 0) | ep->num;
-}
-
-static void ep_prime_timer_func(unsigned long data)
-{
-	struct ci13xxx_ep *mep = (struct ci13xxx_ep *)data;
-	struct ci13xxx_req *req;
-	struct list_head *ptr = NULL;
-	int n = hw_ep_bit(mep->num, mep->dir);
-	unsigned long flags;
-
-
-	spin_lock_irqsave(mep->lock, flags);
-
-	if (_udc && (!_udc->vbus_active || _udc->suspended)) {
-		pr_debug("ep%d%s prime timer when vbus_active=%d,suspend=%d\n",
-			mep->num, mep->dir ? "IN" : "OUT",
-			_udc->vbus_active, _udc->suspended);
-		goto out;
-	}
-
-	if (!hw_cread(CAP_ENDPTPRIME, BIT(n)))
-		goto out;
-
-	if (list_empty(&mep->qh.queue))
-		goto out;
-
-	req = list_entry(mep->qh.queue.next, struct ci13xxx_req, queue);
-
-	/* clean speculative fetches on req->ptr->token */
-	mb();
-	if (!(TD_STATUS_ACTIVE & req->ptr->token))
-		goto out;
-
-	mep->prime_timer_count++;
-	if (mep->prime_timer_count == MAX_PRIME_CHECK_RETRY) {
-		mep->prime_timer_count = 0;
-		pr_info("ep%d dir:%s QH:cap:%08x cur:%08x next:%08x tkn:%08x\n",
-				mep->num, mep->dir ? "IN" : "OUT",
-				mep->qh.ptr->cap, mep->qh.ptr->curr,
-				mep->qh.ptr->td.next, mep->qh.ptr->td.token);
-		list_for_each(ptr, &mep->qh.queue) {
-			req = list_entry(ptr, struct ci13xxx_req, queue);
-			pr_info("\treq:%pKa:%08xtkn:%08xpage0:%08xsts:%d\n",
-					&req->dma, req->ptr->next,
-					req->ptr->token, req->ptr->page[0],
-					req->req.status);
-		}
-		dbg_usb_op_fail(0xFF, "PRIMEF", mep);
-		mep->prime_fail_count++;
-	} else {
-		mod_timer(&mep->prime_timer, EP_PRIME_CHECK_DELAY);
-	}
-
-	spin_unlock_irqrestore(mep->lock, flags);
-	return;
-
-out:
-	mep->prime_timer_count = 0;
-	spin_unlock_irqrestore(mep->lock, flags);
-
-}
-
-/**
- * _hardware_queue: configures a request at hardware level
- * @gadget: gadget
- * @mEp:    endpoint
- *
- * This function returns an error code
- */
-static int _hardware_enqueue(struct ci13xxx_ep *mEp, struct ci13xxx_req *mReq)
-{
-	unsigned int i;
-	int ret = 0;
-	unsigned int length = mReq->req.length;
-	struct ci13xxx *udc = _udc;
-
-	trace("%pK, %pK", mEp, mReq);
-
-	/* don't queue twice */
-	if (mReq->req.status == -EALREADY)
-		return -EALREADY;
-
-	mReq->req.status = -EALREADY;
-	if (length && mReq->req.dma == DMA_ERROR_CODE) {
-		mReq->req.dma = dma_map_single(mEp->device, mReq->req.buf,
-					length, mEp->dir ? DMA_TO_DEVICE :
-					DMA_FROM_DEVICE);
-		if (mReq->req.dma == 0)
-			return -ENOMEM;
-
-		mReq->map = 1;
-	}
-
-	if (mReq->req.zero && length && (length % mEp->ep.maxpacket == 0)) {
-		mReq->zptr = dma_pool_alloc(mEp->td_pool, GFP_ATOMIC,
-					   &mReq->zdma);
-		if (mReq->zptr == NULL) {
-			if (mReq->map) {
-				dma_unmap_single(mEp->device, mReq->req.dma,
-					length, mEp->dir ? DMA_TO_DEVICE :
-					DMA_FROM_DEVICE);
-				mReq->req.dma = DMA_ERROR_CODE;
-				mReq->map     = 0;
-			}
-			return -ENOMEM;
-		}
-		memset(mReq->zptr, 0, sizeof(*mReq->zptr));
-		mReq->zptr->next    = TD_TERMINATE;
-		mReq->zptr->token   = TD_STATUS_ACTIVE;
-		if (!mReq->req.no_interrupt)
-			mReq->zptr->token   |= TD_IOC;
-	}
-
-	/*
-	 * TD configuration
-	 * TODO - handle requests which spawns into several TDs
-	 */
-	memset(mReq->ptr, 0, sizeof(*mReq->ptr));
-	mReq->ptr->token    = length << ffs_nr(TD_TOTAL_BYTES);
-	mReq->ptr->token   &= TD_TOTAL_BYTES;
-	mReq->ptr->token   |= TD_STATUS_ACTIVE;
-	if (mReq->zptr) {
-		mReq->ptr->next    = mReq->zdma;
-	} else {
-		mReq->ptr->next    = TD_TERMINATE;
-		if (!mReq->req.no_interrupt)
-			mReq->ptr->token  |= TD_IOC;
-	}
-
-	/* MSM Specific: updating the request as required for
-	 * SPS mode. Enable MSM DMA engine according
-	 * to the UDC private data in the request.
-	 */
-	if (CI13XX_REQ_VENDOR_ID(mReq->req.udc_priv) == MSM_VENDOR_ID) {
-		if (mReq->req.udc_priv & MSM_SPS_MODE) {
-			mReq->ptr->token = TD_STATUS_ACTIVE;
-			if (mReq->req.udc_priv & MSM_IS_FINITE_TRANSFER)
-				mReq->ptr->next = TD_TERMINATE;
-			else
-				mReq->ptr->next = MSM_ETD_TYPE | mReq->dma;
-			if (!mReq->req.no_interrupt)
-				mReq->ptr->token |= MSM_ETD_IOC;
-		}
-		mReq->req.dma = 0;
-	}
-
-	mReq->ptr->page[0]  = mReq->req.dma;
-	for (i = 1; i < 5; i++)
-		mReq->ptr->page[i] = (mReq->req.dma + i * CI13XXX_PAGE_SIZE) &
-							~TD_RESERVED_MASK;
-	/* Makes sure that above write goes through */
-	wmb();
-
-	/* Remote Wakeup */
-	if (udc->suspended) {
-		if (!udc->gadget.remote_wakeup) {
-			mReq->req.status = -EAGAIN;
-
-			dev_dbg(mEp->device, "%s: queue failed (suspend).",
-					__func__);
-			dev_dbg(mEp->device, "%s: Remote wakeup is not supported. ept #%d\n",
-					__func__, mEp->num);
-
-			return -EAGAIN;
-		}
-
-		usb_phy_set_suspend(udc->transceiver, 0);
-		schedule_delayed_work(&udc->rw_work, REMOTE_WAKEUP_DELAY);
-	}
-
-	if (!list_empty(&mEp->qh.queue)) {
-		struct ci13xxx_req *mReqPrev;
-		int n = hw_ep_bit(mEp->num, mEp->dir);
-		int tmp_stat;
-		ktime_t start, diff;
-
-		mReqPrev = list_entry(mEp->qh.queue.prev,
-				struct ci13xxx_req, queue);
-		if (mReqPrev->zptr)
-			mReqPrev->zptr->next = mReq->dma & TD_ADDR_MASK;
-		else
-			mReqPrev->ptr->next = mReq->dma & TD_ADDR_MASK;
-		/* Makes sure that above write goes through */
-		wmb();
-		if (hw_cread(CAP_ENDPTPRIME, BIT(n)))
-			goto done;
-		start = ktime_get();
-		do {
-			hw_cwrite(CAP_USBCMD, USBCMD_ATDTW, USBCMD_ATDTW);
-			tmp_stat = hw_cread(CAP_ENDPTSTAT, BIT(n));
-			diff = ktime_sub(ktime_get(), start);
-			/* poll for max. 100ms */
-			if (ktime_to_ms(diff) > USB_MAX_TIMEOUT) {
-				if (hw_cread(CAP_USBCMD, USBCMD_ATDTW))
-					break;
-				printk_ratelimited(KERN_ERR
-				"%s:queue failed ep#%d %s\n",
-				 __func__, mEp->num, mEp->dir ? "IN" : "OUT");
-				return -EAGAIN;
-			}
-		} while (!hw_cread(CAP_USBCMD, USBCMD_ATDTW));
-		hw_cwrite(CAP_USBCMD, USBCMD_ATDTW, 0);
-		if (tmp_stat)
-			goto done;
-	}
-
-	/* Hardware may leave few TDs unprocessed, check and reprime with 1st */
-	if (!list_empty(&mEp->qh.queue)) {
-		struct ci13xxx_req *mReq_active, *mReq_next;
-		u32 i = 0;
-
-		/* Nothing to be done if hardware already finished this TD */
-		if ((TD_STATUS_ACTIVE & mReq->ptr->token) == 0)
-			goto done;
-
-		/* Iterate forward to find first TD with ACTIVE bit set */
-		mReq_active = mReq;
-		list_for_each_entry(mReq_next, &mEp->qh.queue, queue) {
-			i++;
-			mEp->dTD_active_re_q_count++;
-			if (TD_STATUS_ACTIVE & mReq_next->ptr->token) {
-				mReq_active = mReq_next;
-				dbg_event(_usb_addr(mEp), "ReQUE",
-					  mReq_next->ptr->token);
-				pr_debug("!!ReQ(%u-%u-%x)-%u!!\n", mEp->num,
-					 mEp->dir, mReq_next->ptr->token, i);
-				break;
-			}
-		}
-
-		/*  QH configuration */
-		mEp->qh.ptr->td.next = mReq_active->dma;
-		mEp->qh.ptr->td.token &= ~TD_STATUS;
-		goto prime;
-	}
-
-	/*  QH configuration */
-	mEp->qh.ptr->td.next   = mReq->dma;    /* TERMINATE = 0 */
-
-	if (CI13XX_REQ_VENDOR_ID(mReq->req.udc_priv) == MSM_VENDOR_ID) {
-		if (mReq->req.udc_priv & MSM_SPS_MODE) {
-			mEp->qh.ptr->td.next   |= MSM_ETD_TYPE;
-			i = hw_cread(CAP_ENDPTPIPEID +
-						 mEp->num * sizeof(u32), ~0);
-			/* Read current value of this EPs pipe id */
-			i = (mEp->dir == TX) ?
-				((i >> MSM_TX_PIPE_ID_OFS) & MSM_PIPE_ID_MASK) :
-					(i & MSM_PIPE_ID_MASK);
-			/*
-			 * If requested pipe id is different from current,
-			 * then write it
-			 */
-			if (i != (mReq->req.udc_priv & MSM_PIPE_ID_MASK)) {
-				if (mEp->dir == TX)
-					hw_cwrite(
-						CAP_ENDPTPIPEID +
-							mEp->num * sizeof(u32),
-						MSM_PIPE_ID_MASK <<
-							MSM_TX_PIPE_ID_OFS,
-						(mReq->req.udc_priv &
-						 MSM_PIPE_ID_MASK)
-							<< MSM_TX_PIPE_ID_OFS);
-				else
-					hw_cwrite(
-						CAP_ENDPTPIPEID +
-							mEp->num * sizeof(u32),
-						MSM_PIPE_ID_MASK,
-						mReq->req.udc_priv &
-							MSM_PIPE_ID_MASK);
-			}
-		}
-	}
-
-	mEp->qh.ptr->td.token &= ~TD_STATUS;   /* clear status */
-	mEp->qh.ptr->cap |=  QH_ZLT;
-
-prime:
-	/* Makes sure that above write goes through */
-	wmb();   /* synchronize before ep prime */
-
-	ret = hw_ep_prime(mEp->num, mEp->dir,
-			   mEp->type == USB_ENDPOINT_XFER_CONTROL);
-	if (!ret)
-		mod_timer(&mEp->prime_timer, EP_PRIME_CHECK_DELAY);
-
-done:
-	return ret;
-}
-
-/**
- * _hardware_dequeue: handles a request at hardware level
- * @gadget: gadget
- * @mEp:    endpoint
- *
- * This function returns an error code
- */
-static int _hardware_dequeue(struct ci13xxx_ep *mEp, struct ci13xxx_req *mReq)
-{
-	trace("%pK, %pK", mEp, mReq);
-
-	if (mReq->req.status != -EALREADY)
-		return -EINVAL;
-
-	/* clean speculative fetches on req->ptr->token */
-	mb();
-
-	if ((TD_STATUS_ACTIVE & mReq->ptr->token) != 0)
-		return -EBUSY;
-
-	if (CI13XX_REQ_VENDOR_ID(mReq->req.udc_priv) == MSM_VENDOR_ID)
-		if ((mReq->req.udc_priv & MSM_SPS_MODE) &&
-			(mReq->req.udc_priv & MSM_IS_FINITE_TRANSFER))
-			return -EBUSY;
-	if (mReq->zptr) {
-		if ((TD_STATUS_ACTIVE & mReq->zptr->token) != 0)
-			return -EBUSY;
-
-		/* The controller may access this dTD one more time.
-		 * Defer freeing this to next zero length dTD completion.
-		 * It is safe to assume that controller will no longer
-		 * access the previous dTD after next dTD completion.
-		 */
-		if (mEp->last_zptr)
-			dma_pool_free(mEp->td_pool, mEp->last_zptr,
-					mEp->last_zdma);
-		mEp->last_zptr = mReq->zptr;
-		mEp->last_zdma = mReq->zdma;
-
-		mReq->zptr = NULL;
-	}
-
-	mReq->req.status = 0;
-
-	if (mReq->map) {
-		dma_unmap_single(mEp->device, mReq->req.dma, mReq->req.length,
-				 mEp->dir ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
-		mReq->req.dma = DMA_ERROR_CODE;
-		mReq->map     = 0;
-	}
-
-	mReq->req.status = mReq->ptr->token & TD_STATUS;
-	if ((TD_STATUS_HALTED & mReq->req.status) != 0)
-		mReq->req.status = -1;
-	else if ((TD_STATUS_DT_ERR & mReq->req.status) != 0)
-		mReq->req.status = -1;
-	else if ((TD_STATUS_TR_ERR & mReq->req.status) != 0)
-		mReq->req.status = -1;
-
-	mReq->req.actual   = mReq->ptr->token & TD_TOTAL_BYTES;
-	mReq->req.actual >>= ffs_nr(TD_TOTAL_BYTES);
-	mReq->req.actual   = mReq->req.length - mReq->req.actual;
-	mReq->req.actual   = mReq->req.status ? 0 : mReq->req.actual;
-
-	return mReq->req.actual;
-}
-
-/**
- * purge_rw_queue: Purge requests pending at the remote-wakeup
- * queue and send them to the HW.
- *
- * Go over all of the endpoints and push any pending requests to
- * the HW queue.
- */
-static void purge_rw_queue(struct ci13xxx *udc)
-{
-	int i;
-	struct ci13xxx_ep  *mEp  = NULL;
-	struct ci13xxx_req *mReq = NULL;
-
-	/*
-	 * Go over all of the endpoints and push any pending requests to
-	 * the HW queue.
-	 */
-	for (i = 0; i < hw_ep_max; i++) {
-		mEp = &udc->ci13xxx_ep[i];
-
-		while (!list_empty(&udc->ci13xxx_ep[i].rw_queue)) {
-			int retval;
-
-			/* pop oldest request */
-			mReq = list_entry(udc->ci13xxx_ep[i].rw_queue.next,
-					  struct ci13xxx_req, queue);
-
-			list_del_init(&mReq->queue);
-
-			retval = _hardware_enqueue(mEp, mReq);
-
-			if (retval != 0) {
-				dbg_event(_usb_addr(mEp), "QUEUE", retval);
-				mReq->req.status = retval;
-				if (mReq->req.complete != NULL) {
-					if (mEp->type ==
-					    USB_ENDPOINT_XFER_CONTROL)
-						mReq->req.complete(
-							&(_udc->ep0in.ep),
-							&mReq->req);
-					else
-						mReq->req.complete(
-							&mEp->ep,
-							&mReq->req);
-				}
-				retval = 0;
-			}
-
-			if (!retval)
-				list_add_tail(&mReq->queue, &mEp->qh.queue);
-			else if (mEp->multi_req)
-				mEp->multi_req = false;
-
-		}
-	}
-
-	udc->rw_pending = false;
-}
-
-/**
- * restore_original_req: Restore original req's attributes
- * @mReq: Request
- *
- * This function restores original req's attributes.  Call
- * this function before completing the large req (>16K).
- */
-static void restore_original_req(struct ci13xxx_req *mReq)
-{
-	mReq->req.buf = mReq->multi.buf;
-	mReq->req.length = mReq->multi.len;
-	if (!mReq->req.status)
-		mReq->req.actual = mReq->multi.actual;
-
-	mReq->multi.len = 0;
-	mReq->multi.actual = 0;
-	mReq->multi.buf = NULL;
-}
-
-/**
- * release_ep_request: Free and endpoint request and release
- * resources
- * @mReq: request
- * @mEp: endpoint
- *
- */
-static void release_ep_request(struct ci13xxx_ep  *mEp,
-			       struct ci13xxx_req *mReq)
-{
-	struct ci13xxx_ep *mEpTemp = mEp;
-
-	unsigned int val;
-
-	/* MSM Specific: Clear end point specific register */
-	if (CI13XX_REQ_VENDOR_ID(mReq->req.udc_priv) == MSM_VENDOR_ID) {
-		if (mReq->req.udc_priv & MSM_SPS_MODE) {
-			val = hw_cread(CAP_ENDPTPIPEID +
-				mEp->num * sizeof(u32),
-				~0);
-
-			if (val != MSM_EP_PIPE_ID_RESET_VAL)
-				hw_cwrite(
-					CAP_ENDPTPIPEID +
-					 mEp->num * sizeof(u32),
-					~0, MSM_EP_PIPE_ID_RESET_VAL);
-		}
-	}
-	mReq->req.status = -ESHUTDOWN;
-
-	if (mReq->map) {
-		dma_unmap_single(mEp->device, mReq->req.dma,
-			mReq->req.length,
-			mEp->dir ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
-		mReq->req.dma = DMA_ERROR_CODE;
-		mReq->map     = 0;
-	}
-
-	if (mReq->zptr) {
-		dma_pool_free(mEp->td_pool, mReq->zptr, mReq->zdma);
-		mReq->zptr = NULL;
-		mReq->zdma = 0;
-	}
-
-	if (mEp->multi_req) {
-		restore_original_req(mReq);
-		mEp->multi_req = false;
-	}
-
-	if (mReq->req.complete != NULL) {
-		spin_unlock(mEp->lock);
-		if ((mEp->type == USB_ENDPOINT_XFER_CONTROL) &&
-			mReq->req.length)
-			mEpTemp = &_udc->ep0in;
-		mReq->req.complete(&mEpTemp->ep, &mReq->req);
-		if (mEp->type == USB_ENDPOINT_XFER_CONTROL)
-			mReq->req.complete = NULL;
-		spin_lock(mEp->lock);
-	}
-}
-
-/**
- * _ep_nuke: dequeues all endpoint requests
- * @mEp: endpoint
- *
- * This function returns an error code
- * Caller must hold lock
- */
-static int _ep_nuke(struct ci13xxx_ep *mEp)
-__releases(mEp->lock)
-__acquires(mEp->lock)
-{
-	trace("%pK", mEp);
-
-	if (mEp == NULL)
-		return -EINVAL;
-
-	del_timer(&mEp->prime_timer);
-	mEp->prime_timer_count = 0;
-
-	hw_ep_flush(mEp->num, mEp->dir);
-
-	while (!list_empty(&mEp->qh.queue)) {
-		/* pop oldest request */
-		struct ci13xxx_req *mReq =
-			list_entry(mEp->qh.queue.next,
-				   struct ci13xxx_req, queue);
-		list_del_init(&mReq->queue);
-
-		release_ep_request(mEp, mReq);
-	}
-
-	/* Clear the requests pending at the remote-wakeup queue */
-	while (!list_empty(&mEp->rw_queue)) {
-
-		/* pop oldest request */
-		struct ci13xxx_req *mReq =
-			list_entry(mEp->rw_queue.next,
-				   struct ci13xxx_req, queue);
-
-		list_del_init(&mReq->queue);
-
-		release_ep_request(mEp, mReq);
-	}
-
-	if (mEp->last_zptr) {
-		dma_pool_free(mEp->td_pool, mEp->last_zptr, mEp->last_zdma);
-		mEp->last_zptr = NULL;
-		mEp->last_zdma = 0;
-	}
-
-	return 0;
-}
-
-/**
- * _gadget_stop_activity: stops all USB activity, flushes & disables all endpts
- * @gadget: gadget
- *
- * This function returns an error code
- */
-static int _gadget_stop_activity(struct usb_gadget *gadget)
-{
-	struct ci13xxx    *udc = container_of(gadget, struct ci13xxx, gadget);
-	unsigned long flags;
-
-	trace("%pK", gadget);
-
-	if (gadget == NULL)
-		return -EINVAL;
-
-	spin_lock_irqsave(udc->lock, flags);
-	udc->gadget.speed = USB_SPEED_UNKNOWN;
-	udc->gadget.remote_wakeup = 0;
-	udc->suspended = 0;
-	udc->configured = 0;
-	spin_unlock_irqrestore(udc->lock, flags);
-
-	udc->driver->disconnect(gadget);
-
-	spin_lock_irqsave(udc->lock, flags);
-	_ep_nuke(&udc->ep0out);
-	_ep_nuke(&udc->ep0in);
-	spin_unlock_irqrestore(udc->lock, flags);
-
-	return 0;
-}
-
-/******************************************************************************
- * ISR block
- *****************************************************************************/
-/**
- * isr_reset_handler: USB reset interrupt handler
- * @udc: UDC device
- *
- * This function resets USB engine after a bus reset occurred
- */
-static void isr_reset_handler(struct ci13xxx *udc)
-__releases(udc->lock)
-__acquires(udc->lock)
-{
-	int retval;
-
-	trace("%pK", udc);
-
-	if (udc == NULL) {
-		err("EINVAL");
-		return;
-	}
-
-	dbg_event(0xFF, "BUS RST", 0);
-
-	spin_unlock(udc->lock);
-
-	if (udc->suspended) {
-		if (udc->udc_driver->notify_event)
-			udc->udc_driver->notify_event(udc,
-			CI13XXX_CONTROLLER_RESUME_EVENT);
-		if (udc->transceiver)
-			usb_phy_set_suspend(udc->transceiver, 0);
-		udc->driver->resume(&udc->gadget);
-		udc->suspended = 0;
-	}
-
-	/*stop charging upon reset */
-	if (udc->transceiver)
-		usb_phy_set_power(udc->transceiver, 100);
-
-	retval = _gadget_stop_activity(&udc->gadget);
-	if (retval)
-		goto done;
-
-	if (udc->rw_pending)
-		purge_rw_queue(udc);
-
-	_udc->skip_flush = false;
-	retval = hw_usb_reset();
-	if (retval)
-		goto done;
-
-	spin_lock(udc->lock);
-
- done:
-	if (retval)
-		err("error: %i", retval);
-}
-
-/**
- * isr_resume_handler: USB PCI interrupt handler
- * @udc: UDC device
- *
- */
-static void isr_resume_handler(struct ci13xxx *udc)
-{
-	udc->gadget.speed = hw_port_is_high_speed() ?
-		USB_SPEED_HIGH : USB_SPEED_FULL;
-	if (udc->suspended) {
-		spin_unlock(udc->lock);
-		if (udc->udc_driver->notify_event)
-			udc->udc_driver->notify_event(udc,
-			  CI13XXX_CONTROLLER_RESUME_EVENT);
-		if (udc->transceiver)
-			usb_phy_set_suspend(udc->transceiver, 0);
-		udc->suspended = 0;
-		udc->driver->resume(&udc->gadget);
-		spin_lock(udc->lock);
-
-		if (udc->rw_pending)
-			purge_rw_queue(udc);
-
-	}
-}
-
-/**
- * isr_resume_handler: USB SLI interrupt handler
- * @udc: UDC device
- *
- */
-static void isr_suspend_handler(struct ci13xxx *udc)
-{
-	if (udc->gadget.speed != USB_SPEED_UNKNOWN &&
-		udc->vbus_active) {
-		if (udc->suspended == 0) {
-			spin_unlock(udc->lock);
-			udc->driver->suspend(&udc->gadget);
-			if (udc->udc_driver->notify_event)
-				udc->udc_driver->notify_event(udc,
-				CI13XXX_CONTROLLER_SUSPEND_EVENT);
-			if (udc->transceiver)
-				usb_phy_set_suspend(udc->transceiver, 1);
-			spin_lock(udc->lock);
-			udc->suspended = 1;
-		}
-	}
-}
-
-/**
- * isr_get_status_complete: get_status request complete function
- * @ep:  endpoint
- * @req: request handled
- *
- * Caller must release lock
- */
-static void isr_get_status_complete(struct usb_ep *ep, struct usb_request *req)
-{
-	trace("%pK, %pK", ep, req);
-
-	if (ep == NULL || req == NULL) {
-		err("EINVAL");
-		return;
-	}
-
-	if (req->status)
-		err("GET_STATUS failed");
-}
-
-/**
- * isr_get_status_response: get_status request response
- * @udc: udc struct
- * @setup: setup request packet
- *
- * This function returns an error code
- */
-static int isr_get_status_response(struct ci13xxx *udc,
-				   struct usb_ctrlrequest *setup)
-__releases(mEp->lock)
-__acquires(mEp->lock)
-{
-	struct ci13xxx_ep *mEp = &udc->ep0in;
-	struct usb_request *req = udc->status;
-	int dir, num, retval;
-
-	trace("%pK, %pK", mEp, setup);
-
-	if (mEp == NULL || setup == NULL)
-		return -EINVAL;
-
-	req->complete = isr_get_status_complete;
-	req->length   = 2;
-	req->buf      = udc->status_buf;
-
-	if ((setup->bRequestType & USB_RECIP_MASK) == USB_RECIP_DEVICE) {
-		/* Assume that device is bus powered for now. */
-		*((u16 *)req->buf) = _udc->gadget.remote_wakeup << 1;
-		retval = 0;
-	} else if ((setup->bRequestType & USB_RECIP_MASK) ==
-							USB_RECIP_ENDPOINT) {
-		dir = (le16_to_cpu(setup->wIndex) & USB_ENDPOINT_DIR_MASK) ?
-			TX : RX;
-		num =  le16_to_cpu(setup->wIndex) & USB_ENDPOINT_NUMBER_MASK;
-		*((u16 *)req->buf) = hw_ep_get_halt(num, dir);
-	}
-	/* else do nothing; reserved for future use */
-
-	spin_unlock(mEp->lock);
-	retval = usb_ep_queue(&mEp->ep, req, GFP_ATOMIC);
-	spin_lock(mEp->lock);
-	return retval;
-}
-
-/**
- * isr_setup_status_complete: setup_status request complete function
- * @ep:  endpoint
- * @req: request handled
- *
- * Caller must release lock. Put the port in test mode if test mode
- * feature is selected.
- */
-static void
-isr_setup_status_complete(struct usb_ep *ep, struct usb_request *req)
-{
-	struct ci13xxx *udc = req->context;
-	unsigned long flags;
-
-	trace("%pK, %pK", ep, req);
-
-	spin_lock_irqsave(udc->lock, flags);
-	if (udc->test_mode)
-		hw_port_test_set(udc->test_mode);
-	spin_unlock_irqrestore(udc->lock, flags);
-}
-
-/**
- * isr_setup_status_phase: queues the status phase of a setup transation
- * @udc: udc struct
- *
- * This function returns an error code
- */
-static int isr_setup_status_phase(struct ci13xxx *udc)
-__releases(mEp->lock)
-__acquires(mEp->lock)
-{
-	int retval;
-	struct ci13xxx_ep *mEp;
-
-	trace("%pK", udc);
-
-	mEp = (udc->ep0_dir == TX) ? &udc->ep0out : &udc->ep0in;
-	udc->status->context = udc;
-	udc->status->complete = isr_setup_status_complete;
-	udc->status->length = 0;
-
-	spin_unlock(mEp->lock);
-	retval = usb_ep_queue(&mEp->ep, udc->status, GFP_ATOMIC);
-	spin_lock(mEp->lock);
-
-	return retval;
-}
-
-/**
- * isr_tr_complete_low: transaction complete low level handler
- * @mEp: endpoint
- *
- * This function returns an error code
- * Caller must hold lock
- */
-static int isr_tr_complete_low(struct ci13xxx_ep *mEp)
-__releases(mEp->lock)
-__acquires(mEp->lock)
-{
-	struct ci13xxx_req *mReq, *mReqTemp;
-	struct ci13xxx_ep *mEpTemp = mEp;
-	int retval = 0;
-	int req_dequeue = 1;
-	struct ci13xxx *udc = _udc;
-
-	trace("%pK", mEp);
-
-	if (list_empty(&mEp->qh.queue))
-		return 0;
-
-	del_timer(&mEp->prime_timer);
-	mEp->prime_timer_count = 0;
-	list_for_each_entry_safe(mReq, mReqTemp, &mEp->qh.queue,
-			queue) {
-dequeue:
-		retval = _hardware_dequeue(mEp, mReq);
-		if (retval < 0) {
-			/*
-			 * FIXME: don't know exact delay
-			 * required for HW to update dTD status
-			 * bits. This is a temporary workaround till
-			 * HW designers come back on this.
-			 */
-			if (retval == -EBUSY && req_dequeue &&
-				(mEp->dir == 0 || mEp->num == 0)) {
-				req_dequeue = 0;
-				udc->dTD_update_fail_count++;
-				mEp->dTD_update_fail_count++;
-				udelay(10);
-				goto dequeue;
-			}
-			break;
-		}
-		req_dequeue = 0;
-
-		if (mEp->multi_req) { /* Large request in progress */
-			unsigned int remain_len;
-
-			mReq->multi.actual += mReq->req.actual;
-			remain_len = mReq->multi.len - mReq->multi.actual;
-			if (mReq->req.status || !remain_len ||
-				(mReq->req.actual != mReq->req.length)) {
-				restore_original_req(mReq);
-				mEp->multi_req = false;
-			} else {
-				mReq->req.buf = mReq->multi.buf +
-						mReq->multi.actual;
-				mReq->req.length = min_t(unsigned int,
-							remain_len,
-							4 * CI13XXX_PAGE_SIZE);
-
-				mReq->req.status = -EINPROGRESS;
-				mReq->req.actual = 0;
-				list_del_init(&mReq->queue);
-				retval = _hardware_enqueue(mEp, mReq);
-				if (retval) {
-					err("Large req failed in middle");
-					mReq->req.status = retval;
-					restore_original_req(mReq);
-					mEp->multi_req = false;
-					goto done;
-				} else {
-					list_add_tail(&mReq->queue,
-						&mEp->qh.queue);
-					return 0;
-				}
-			}
-		}
-		list_del_init(&mReq->queue);
-done:
-
-		dbg_done(_usb_addr(mEp), mReq->ptr->token, retval);
-
-		if (mReq->req.complete != NULL) {
-			spin_unlock(mEp->lock);
-			if ((mEp->type == USB_ENDPOINT_XFER_CONTROL) &&
-					mReq->req.length)
-				mEpTemp = &_udc->ep0in;
-			mReq->req.complete(&mEpTemp->ep, &mReq->req);
-			spin_lock(mEp->lock);
-		}
-	}
-
-	if (retval == -EBUSY)
-		retval = 0;
-	if (retval < 0)
-		dbg_event(_usb_addr(mEp), "DONE", retval);
-
-	return retval;
-}
-
-/**
- * isr_tr_complete_handler: transaction complete interrupt handler
- * @udc: UDC descriptor
- *
- * This function handles traffic events
- */
-static void isr_tr_complete_handler(struct ci13xxx *udc)
-__releases(udc->lock)
-__acquires(udc->lock)
-{
-	unsigned int i;
-	u8 tmode = 0;
-
-	trace("%pK", udc);
-
-	if (udc == NULL) {
-		err("EINVAL");
-		return;
-	}
-
-	for (i = 0; i < hw_ep_max; i++) {
-		struct ci13xxx_ep *mEp  = &udc->ci13xxx_ep[i];
-		int type, num, dir, err = -EINVAL;
-		struct usb_ctrlrequest req;
-
-		if (mEp->desc == NULL)
-			continue;   /* not configured */
-
-		if (hw_test_and_clear_complete(i)) {
-			err = isr_tr_complete_low(mEp);
-			if (mEp->type == USB_ENDPOINT_XFER_CONTROL) {
-				if (err > 0)   /* needs status phase */
-					err = isr_setup_status_phase(udc);
-				if (err < 0) {
-					dbg_event(_usb_addr(mEp),
-						  "ERROR", err);
-					spin_unlock(udc->lock);
-					if (usb_ep_set_halt(&mEp->ep))
-						err("error: ep_set_halt");
-					spin_lock(udc->lock);
-				}
-			}
-		}
-
-		if (mEp->type != USB_ENDPOINT_XFER_CONTROL ||
-		    !hw_test_and_clear_setup_status(i))
-			continue;
-
-		if (i != 0) {
-			warn("ctrl traffic received at endpoint");
-			continue;
-		}
-
-		/*
-		 * Flush data and handshake transactions of previous
-		 * setup packet.
-		 */
-		_ep_nuke(&udc->ep0out);
-		_ep_nuke(&udc->ep0in);
-
-		/* read_setup_packet */
-		do {
-			hw_test_and_set_setup_guard();
-			memcpy(&req, &mEp->qh.ptr->setup, sizeof(req));
-			/* Ensure buffer is read before acknowledging to h/w */
-			mb();
-		} while (!hw_test_and_clear_setup_guard());
-
-		type = req.bRequestType;
-
-		udc->ep0_dir = (type & USB_DIR_IN) ? TX : RX;
-
-		dbg_setup(_usb_addr(mEp), &req);
-
-		switch (req.bRequest) {
-		case USB_REQ_CLEAR_FEATURE:
-			if (type == (USB_DIR_OUT|USB_RECIP_ENDPOINT) &&
-					le16_to_cpu(req.wValue) ==
-					USB_ENDPOINT_HALT) {
-				if (req.wLength != 0)
-					break;
-				num  = le16_to_cpu(req.wIndex);
-				dir = num & USB_ENDPOINT_DIR_MASK;
-				num &= USB_ENDPOINT_NUMBER_MASK;
-				if (dir) /* TX */
-					num += hw_ep_max/2;
-				if (!udc->ci13xxx_ep[num].wedge) {
-					spin_unlock(udc->lock);
-					err = usb_ep_clear_halt(
-						&udc->ci13xxx_ep[num].ep);
-					spin_lock(udc->lock);
-					if (err)
-						break;
-				}
-				err = isr_setup_status_phase(udc);
-			} else if (type == (USB_DIR_OUT|USB_RECIP_DEVICE) &&
-					le16_to_cpu(req.wValue) ==
-					USB_DEVICE_REMOTE_WAKEUP) {
-				if (req.wLength != 0)
-					break;
-				udc->gadget.remote_wakeup = 0;
-				err = isr_setup_status_phase(udc);
-			} else {
-				goto delegate;
-			}
-			break;
-		case USB_REQ_GET_STATUS:
-			if (type != (USB_DIR_IN|USB_RECIP_DEVICE)   &&
-			    type != (USB_DIR_IN|USB_RECIP_ENDPOINT) &&
-			    type != (USB_DIR_IN|USB_RECIP_INTERFACE))
-				goto delegate;
-			if (le16_to_cpu(req.wLength) != 2 ||
-			    le16_to_cpu(req.wValue)  != 0)
-				break;
-			err = isr_get_status_response(udc, &req);
-			break;
-		case USB_REQ_SET_ADDRESS:
-			if (type != (USB_DIR_OUT|USB_RECIP_DEVICE))
-				goto delegate;
-			if (le16_to_cpu(req.wLength) != 0 ||
-			    le16_to_cpu(req.wIndex)  != 0)
-				break;
-			err = hw_usb_set_address((u8)le16_to_cpu(req.wValue));
-			if (err)
-				break;
-			err = isr_setup_status_phase(udc);
-			break;
-		case USB_REQ_SET_CONFIGURATION:
-			if (type == (USB_DIR_OUT|USB_TYPE_STANDARD))
-				udc->configured = !!req.wValue;
-			goto delegate;
-		case USB_REQ_SET_FEATURE:
-			if (type == (USB_DIR_OUT|USB_RECIP_ENDPOINT) &&
-					le16_to_cpu(req.wValue) ==
-					USB_ENDPOINT_HALT) {
-				if (req.wLength != 0)
-					break;
-				num  = le16_to_cpu(req.wIndex);
-				dir = num & USB_ENDPOINT_DIR_MASK;
-				num &= USB_ENDPOINT_NUMBER_MASK;
-				if (dir) /* TX */
-					num += hw_ep_max/2;
-
-				spin_unlock(udc->lock);
-				err = usb_ep_set_halt(&udc->ci13xxx_ep[num].ep);
-				spin_lock(udc->lock);
-				if (!err)
-					isr_setup_status_phase(udc);
-			} else if (type == (USB_DIR_OUT|USB_RECIP_DEVICE)) {
-				if (req.wLength != 0)
-					break;
-				switch (le16_to_cpu(req.wValue)) {
-				case USB_DEVICE_REMOTE_WAKEUP:
-					udc->gadget.remote_wakeup = 1;
-					err = isr_setup_status_phase(udc);
-					break;
-				case USB_DEVICE_TEST_MODE:
-					tmode = le16_to_cpu(req.wIndex) >> 8;
-					switch (tmode) {
-					case TEST_J:
-					case TEST_K:
-					case TEST_SE0_NAK:
-					case TEST_PACKET:
-					case TEST_FORCE_EN:
-						udc->test_mode = tmode;
-						err = isr_setup_status_phase(
-								udc);
-						break;
-					default:
-						break;
-					}
-				default:
-					goto delegate;
-				}
-			} else {
-				goto delegate;
-			}
-			break;
-		default:
-delegate:
-			if (req.wLength == 0)   /* no data phase */
-				udc->ep0_dir = TX;
-
-			spin_unlock(udc->lock);
-			err = udc->driver->setup(&udc->gadget, &req);
-			spin_lock(udc->lock);
-			break;
-		}
-
-		if (err < 0) {
-			dbg_event(_usb_addr(mEp), "ERROR", err);
-
-			spin_unlock(udc->lock);
-			if (usb_ep_set_halt(&mEp->ep))
-				err("error: ep_set_halt");
-			spin_lock(udc->lock);
-		}
-	}
-}
-
-/******************************************************************************
- * ENDPT block
- *****************************************************************************/
-/**
- * ep_enable: configure endpoint, making it usable
- *
- * Check usb_ep_enable() at "usb_gadget.h" for details
- */
-static int ep_enable(struct usb_ep *ep,
-		     const struct usb_endpoint_descriptor *desc)
-{
-	struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep);
-	int retval = 0;
-	unsigned long flags;
-	unsigned int mult = 0;
-
-	trace("ep = %pK, desc = %pK", ep, desc);
-
-	if (ep == NULL || desc == NULL)
-		return -EINVAL;
-
-	spin_lock_irqsave(mEp->lock, flags);
-
-	/* only internal SW should enable ctrl endpts */
-
-	mEp->desc = desc;
-
-	if (!list_empty(&mEp->qh.queue))
-		warn("enabling a non-empty endpoint!");
-
-	mEp->dir  = usb_endpoint_dir_in(desc) ? TX : RX;
-	mEp->num  = usb_endpoint_num(desc);
-	mEp->type = usb_endpoint_type(desc);
-
-	mEp->ep.maxpacket = usb_endpoint_maxp(desc);
-
-	dbg_event(_usb_addr(mEp), "ENABLE", 0);
-
-	mEp->qh.ptr->cap = 0;
-
-	if (mEp->type == USB_ENDPOINT_XFER_CONTROL) {
-		mEp->qh.ptr->cap |=  QH_IOS;
-	} else if (mEp->type == USB_ENDPOINT_XFER_ISOC) {
-		mEp->qh.ptr->cap &= ~QH_MULT;
-		mult = ((mEp->ep.maxpacket >> QH_MULT_SHIFT) + 1) & 0x03;
-		mEp->qh.ptr->cap |= (mult << ffs_nr(QH_MULT));
-	} else {
-		mEp->qh.ptr->cap |= QH_ZLT;
-	}
-
-	mEp->qh.ptr->cap |=
-		(mEp->ep.maxpacket << ffs_nr(QH_MAX_PKT)) & QH_MAX_PKT;
-	mEp->qh.ptr->td.next |= TD_TERMINATE;   /* needed? */
-
-	/* complete all the updates to ept->head before enabling endpoint*/
-	mb();
-
-	/*
-	 * Enable endpoints in the HW other than ep0 as ep0
-	 * is always enabled
-	 */
-	if (mEp->num)
-		retval |= hw_ep_enable(mEp->num, mEp->dir, mEp->type);
-
-	spin_unlock_irqrestore(mEp->lock, flags);
-	return retval;
-}
-
-/**
- * ep_disable: endpoint is no longer usable
- *
- * Check usb_ep_disable() at "usb_gadget.h" for details
- */
-static int ep_disable(struct usb_ep *ep)
-{
-	struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep);
-	int direction, retval = 0;
-	unsigned long flags;
-
-	trace("%pK", ep);
-
-	if (ep == NULL)
-		return -EINVAL;
-	else if (mEp->desc == NULL)
-		return -EBUSY;
-
-	spin_lock_irqsave(mEp->lock, flags);
-
-	/* only internal SW should disable ctrl endpts */
-
-	direction = mEp->dir;
-	do {
-		dbg_event(_usb_addr(mEp), "DISABLE", 0);
-
-		retval |= _ep_nuke(mEp);
-		retval |= hw_ep_disable(mEp->num, mEp->dir);
-
-		if (mEp->type == USB_ENDPOINT_XFER_CONTROL)
-			mEp->dir = (mEp->dir == TX) ? RX : TX;
-
-	} while (mEp->dir != direction);
-
-	mEp->desc = NULL;
-	mEp->ep.desc = NULL;
-	mEp->ep.maxpacket = USHRT_MAX;
-
-	spin_unlock_irqrestore(mEp->lock, flags);
-	return retval;
-}
-
-/**
- * ep_alloc_request: allocate a request object to use with this endpoint
- *
- * Check usb_ep_alloc_request() at "usb_gadget.h" for details
- */
-static struct usb_request *ep_alloc_request(struct usb_ep *ep, gfp_t gfp_flags)
-{
-	struct ci13xxx_ep  *mEp  = container_of(ep, struct ci13xxx_ep, ep);
-	struct ci13xxx_req *mReq = NULL;
-
-	trace("%pK, %i", ep, gfp_flags);
-
-	if (ep == NULL) {
-		err("EINVAL");
-		return NULL;
-	}
-
-	mReq = kzalloc(sizeof(struct ci13xxx_req), gfp_flags);
-	if (mReq != NULL) {
-		INIT_LIST_HEAD(&mReq->queue);
-		mReq->req.dma = DMA_ERROR_CODE;
-
-		mReq->ptr = dma_pool_alloc(mEp->td_pool, gfp_flags,
-					   &mReq->dma);
-		if (mReq->ptr == NULL) {
-			kfree(mReq);
-			mReq = NULL;
-		}
-	}
-
-	dbg_event(_usb_addr(mEp), "ALLOC", mReq == NULL);
-
-	return (mReq == NULL) ? NULL : &mReq->req;
-}
-
-/**
- * ep_free_request: frees a request object
- *
- * Check usb_ep_free_request() at "usb_gadget.h" for details
- */
-static void ep_free_request(struct usb_ep *ep, struct usb_request *req)
-{
-	struct ci13xxx_ep  *mEp  = container_of(ep,  struct ci13xxx_ep, ep);
-	struct ci13xxx_req *mReq = container_of(req, struct ci13xxx_req, req);
-	unsigned long flags;
-
-	trace("%pK, %pK", ep, req);
-
-	if (ep == NULL || req == NULL) {
-		err("EINVAL");
-		return;
-	} else if (!list_empty(&mReq->queue)) {
-		err("EBUSY");
-		return;
-	}
-
-	spin_lock_irqsave(mEp->lock, flags);
-
-	if (mReq->ptr)
-		dma_pool_free(mEp->td_pool, mReq->ptr, mReq->dma);
-	kfree(mReq);
-
-	dbg_event(_usb_addr(mEp), "FREE", 0);
-
-	spin_unlock_irqrestore(mEp->lock, flags);
-}
-
-/**
- * ep_queue: queues (submits) an I/O request to an endpoint
- *
- * Check usb_ep_queue()* at usb_gadget.h" for details
- */
-static int ep_queue(struct usb_ep *ep, struct usb_request *req,
-		    gfp_t __maybe_unused gfp_flags)
-{
-	struct ci13xxx_ep  *mEp  = container_of(ep,  struct ci13xxx_ep, ep);
-	struct ci13xxx_req *mReq = container_of(req, struct ci13xxx_req, req);
-	int retval = 0;
-	unsigned long flags;
-	struct ci13xxx *udc = _udc;
-
-	trace("%pK, %pK, %X", ep, req, gfp_flags);
-
-	if (ep == NULL)
-		return -EINVAL;
-
-	spin_lock_irqsave(mEp->lock, flags);
-	if (req == NULL || mEp->desc == NULL) {
-		retval = -EINVAL;
-		goto done;
-	}
-
-	if (!udc->softconnect) {
-		retval = -ENODEV;
-		goto done;
-	}
-
-	if (!udc->configured && mEp->type !=
-		USB_ENDPOINT_XFER_CONTROL) {
-		trace("usb is not configured ept #%d, ept name#%s\n",
-			mEp->num, mEp->ep.name);
-		retval = -ESHUTDOWN;
-		goto done;
-	}
-
-	if (mEp->type == USB_ENDPOINT_XFER_CONTROL) {
-		if (req->length)
-			mEp = (_udc->ep0_dir == RX) ?
-				&_udc->ep0out : &_udc->ep0in;
-		if (!list_empty(&mEp->qh.queue)) {
-			_ep_nuke(mEp);
-			retval = -EOVERFLOW;
-			warn("endpoint ctrl %X nuked", _usb_addr(mEp));
-		}
-	}
-
-	if (ep->endless && udc->gadget.speed == USB_SPEED_FULL) {
-		err("Queueing endless req is not supported for FS");
-		retval = -EINVAL;
-		goto done;
-	}
-
-	/* first nuke then test link, e.g. previous status has not sent */
-	if (!list_empty(&mReq->queue)) {
-		retval = -EBUSY;
-		err("request already in queue");
-		goto done;
-	}
-	if (mEp->multi_req) {
-		retval = -EAGAIN;
-		err("Large request is in progress. come again");
-		goto done;
-	}
-
-	if (req->length > (4 * CI13XXX_PAGE_SIZE)) {
-		if (!list_empty(&mEp->qh.queue)) {
-			retval = -EAGAIN;
-			err("Queue is busy. Large req is not allowed");
-			goto done;
-		}
-		if ((mEp->type != USB_ENDPOINT_XFER_BULK) ||
-				(mEp->dir != RX)) {
-			retval = -EINVAL;
-			err("Larger req is supported only for Bulk OUT");
-			goto done;
-		}
-		mEp->multi_req = true;
-		mReq->multi.len = req->length;
-		mReq->multi.buf = req->buf;
-		req->length = (4 * CI13XXX_PAGE_SIZE);
-	}
-
-	dbg_queue(_usb_addr(mEp), req, retval);
-
-	/* push request */
-	mReq->req.status = -EINPROGRESS;
-	mReq->req.actual = 0;
-
-	if (udc->rw_pending) {
-		list_add_tail(&mReq->queue, &mEp->rw_queue);
-		retval = 0;
-		goto done;
-	}
-
-	if (udc->suspended) {
-		/* Remote Wakeup */
-		if (!udc->gadget.remote_wakeup) {
-
-			dev_dbg(mEp->device, "%s: queue failed (suspend).",
-					__func__);
-			dev_dbg(mEp->device, "%s: Remote wakeup is not supported. ept #%d\n",
-					__func__, mEp->num);
-			mEp->multi_req = false;
-
-			retval = -EAGAIN;
-			goto done;
-		}
-
-		list_add_tail(&mReq->queue, &mEp->rw_queue);
-
-		udc->rw_pending = true;
-		schedule_delayed_work(&udc->rw_work,
-				      REMOTE_WAKEUP_DELAY);
-
-		retval = 0;
-		goto done;
-	}
-
-	retval = _hardware_enqueue(mEp, mReq);
-
-	if (retval == -EALREADY) {
-		dbg_event(_usb_addr(mEp), "QUEUE", retval);
-		retval = 0;
-	}
-	if (!retval)
-		list_add_tail(&mReq->queue, &mEp->qh.queue);
-	else if (mEp->multi_req)
-		mEp->multi_req = false;
-
- done:
-	spin_unlock_irqrestore(mEp->lock, flags);
-	return retval;
-}
-
-/**
- * ep_dequeue: dequeues (cancels, unlinks) an I/O request from an endpoint
- *
- * Check usb_ep_dequeue() at "usb_gadget.h" for details
- */
-static int ep_dequeue(struct usb_ep *ep, struct usb_request *req)
-{
-	struct ci13xxx_ep  *mEp  = container_of(ep,  struct ci13xxx_ep, ep);
-	struct ci13xxx_ep *mEpTemp = mEp;
-	struct ci13xxx_req *mReq = container_of(req, struct ci13xxx_req, req);
-	struct ci13xxx *udc = _udc;
-	unsigned long flags;
-
-	trace("%pK, %pK", ep, req);
-
-	if (udc->udc_driver->in_lpm && udc->udc_driver->in_lpm(udc)) {
-		dev_err(udc->transceiver->dev,
-				"%s: Unable to dequeue while in LPM\n",
-				__func__);
-		return -EAGAIN;
-	}
-
-	if (ep == NULL)
-		return -EINVAL;
-
-	spin_lock_irqsave(mEp->lock, flags);
-	/*
-	 * Only ep0 IN is exposed to composite.  When a req is dequeued
-	 * on ep0, check both ep0 IN and ep0 OUT queues.
-	 */
-	if (req == NULL || mReq->req.status != -EALREADY ||
-		mEp->desc == NULL || list_empty(&mReq->queue) ||
-		(list_empty(&mEp->qh.queue) && ((mEp->type !=
-			USB_ENDPOINT_XFER_CONTROL) ||
-			list_empty(&_udc->ep0out.qh.queue)))) {
-		spin_unlock_irqrestore(mEp->lock, flags);
-		return -EINVAL;
-	}
-
-	dbg_event(_usb_addr(mEp), "DEQUEUE", 0);
-
-	if (mEp->type == USB_ENDPOINT_XFER_CONTROL) {
-		hw_ep_flush(_udc->ep0out.num, RX);
-		hw_ep_flush(_udc->ep0in.num, TX);
-	} else {
-		hw_ep_flush(mEp->num, mEp->dir);
-	}
-
-	/* pop request */
-	list_del_init(&mReq->queue);
-	if (mReq->map) {
-		dma_unmap_single(mEp->device, mReq->req.dma, mReq->req.length,
-				 mEp->dir ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
-		mReq->req.dma = DMA_ERROR_CODE;
-		mReq->map     = 0;
-	}
-	req->status = -ECONNRESET;
-
-	if (mEp->last_zptr) {
-		dma_pool_free(mEp->td_pool, mEp->last_zptr, mEp->last_zdma);
-		mEp->last_zptr = NULL;
-		mEp->last_zdma = 0;
-	}
-
-	if (mReq->zptr) {
-		dma_pool_free(mEp->td_pool, mReq->zptr, mReq->zdma);
-		mReq->zptr = NULL;
-		mReq->zdma = 0;
-	}
-
-	if (mEp->multi_req) {
-		restore_original_req(mReq);
-		mEp->multi_req = false;
-	}
-
-	if (mReq->req.complete != NULL) {
-		spin_unlock(mEp->lock);
-		if ((mEp->type == USB_ENDPOINT_XFER_CONTROL) &&
-				mReq->req.length)
-			mEpTemp = &_udc->ep0in;
-		mReq->req.complete(&mEpTemp->ep, &mReq->req);
-		if (mEp->type == USB_ENDPOINT_XFER_CONTROL)
-			mReq->req.complete = NULL;
-		spin_lock(mEp->lock);
-	}
-
-	spin_unlock_irqrestore(mEp->lock, flags);
-	return 0;
-}
-
-static int is_sps_req(struct ci13xxx_req *mReq)
-{
-	return (CI13XX_REQ_VENDOR_ID(mReq->req.udc_priv) == MSM_VENDOR_ID &&
-			mReq->req.udc_priv & MSM_SPS_MODE);
-}
-
-/**
- * ep_set_halt: sets the endpoint halt feature
- *
- * Check usb_ep_set_halt() at "usb_gadget.h" for details
- */
-static int ep_set_halt(struct usb_ep *ep, int value)
-{
-	struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep);
-	struct ci13xxx *udc = _udc;
-	int direction, retval = 0;
-	unsigned long flags;
-
-	trace("%pK, %i", ep, value);
-
-	if (ep == NULL || mEp->desc == NULL)
-		return -EINVAL;
-
-	if (udc->suspended) {
-		dev_err(udc->transceiver->dev,
-			"%s: Unable to halt EP while suspended\n", __func__);
-		return -EINVAL;
-	}
-
-	spin_lock_irqsave(mEp->lock, flags);
-
-#ifndef STALL_IN
-	/* g_file_storage MS compliant but g_zero fails chapter 9 compliance */
-	if (value && mEp->type == USB_ENDPOINT_XFER_BULK && mEp->dir == TX &&
-		!list_empty(&mEp->qh.queue) &&
-		!is_sps_req(list_entry(mEp->qh.queue.next, struct ci13xxx_req,
-							   queue))){
-		spin_unlock_irqrestore(mEp->lock, flags);
-		return -EAGAIN;
-	}
-#endif
-
-	direction = mEp->dir;
-	do {
-		dbg_event(_usb_addr(mEp), "HALT", value);
-		retval |= hw_ep_set_halt(mEp->num, mEp->dir, value);
-
-		if (!value)
-			mEp->wedge = 0;
-
-		if (mEp->type == USB_ENDPOINT_XFER_CONTROL)
-			mEp->dir = (mEp->dir == TX) ? RX : TX;
-
-	} while (mEp->dir != direction);
-
-	spin_unlock_irqrestore(mEp->lock, flags);
-	return retval;
-}
-
-/**
- * ep_set_wedge: sets the halt feature and ignores clear requests
- *
- * Check usb_ep_set_wedge() at "usb_gadget.h" for details
- */
-static int ep_set_wedge(struct usb_ep *ep)
-{
-	struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep);
-	unsigned long flags;
-
-	trace("%pK", ep);
-
-	if (ep == NULL || mEp->desc == NULL)
-		return -EINVAL;
-
-	spin_lock_irqsave(mEp->lock, flags);
-
-	dbg_event(_usb_addr(mEp), "WEDGE", 0);
-	mEp->wedge = 1;
-
-	spin_unlock_irqrestore(mEp->lock, flags);
-
-	return usb_ep_set_halt(ep);
-}
-
-/**
- * ep_fifo_flush: flushes contents of a fifo
- *
- * Check usb_ep_fifo_flush() at "usb_gadget.h" for details
- */
-static void ep_fifo_flush(struct usb_ep *ep)
-{
-	struct ci13xxx *udc = _udc;
-	struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep);
-	unsigned long flags;
-
-	trace("%pK", ep);
-
-	if (ep == NULL) {
-		err("%02X: -EINVAL", _usb_addr(mEp));
-		return;
-	}
-
-	if (udc->udc_driver->in_lpm && udc->udc_driver->in_lpm(udc)) {
-		dev_err(udc->transceiver->dev,
-				"%s: Unable to fifo_flush while in LPM\n",
-				__func__);
-		return;
-	}
-
-	spin_lock_irqsave(mEp->lock, flags);
-
-	dbg_event(_usb_addr(mEp), "FFLUSH", 0);
-	/*
-	 * _ep_nuke() takes care of flushing the endpoint.
-	 * some function drivers expect udc to retire all
-	 * pending requests upon flushing an endpoint.  There
-	 * is no harm in doing it.
-	 */
-	_ep_nuke(mEp);
-
-	spin_unlock_irqrestore(mEp->lock, flags);
-}
-
-/**
- * Endpoint-specific part of the API to the USB controller hardware
- * Check "usb_gadget.h" for details
- */
-static const struct usb_ep_ops usb_ep_ops = {
-	.enable	       = ep_enable,
-	.disable       = ep_disable,
-	.alloc_request = ep_alloc_request,
-	.free_request  = ep_free_request,
-	.queue	       = ep_queue,
-	.dequeue       = ep_dequeue,
-	.set_halt      = ep_set_halt,
-	.set_wedge     = ep_set_wedge,
-	.fifo_flush    = ep_fifo_flush,
-};
-
-/******************************************************************************
- * GADGET block
- *****************************************************************************/
-static int ci13xxx_vbus_session(struct usb_gadget *_gadget, int is_active)
-{
-	struct ci13xxx *udc = container_of(_gadget, struct ci13xxx, gadget);
-	unsigned long flags;
-	int gadget_ready = 0;
-
-	if (!(udc->udc_driver->flags & CI13XXX_PULLUP_ON_VBUS))
-		return -EOPNOTSUPP;
-
-	spin_lock_irqsave(udc->lock, flags);
-	udc->vbus_active = is_active;
-	if (udc->driver)
-		gadget_ready = 1;
-	spin_unlock_irqrestore(udc->lock, flags);
-
-	if (!gadget_ready)
-		return 0;
-
-	if (is_active) {
-		hw_device_reset(udc);
-		if (udc->udc_driver->notify_event)
-			udc->udc_driver->notify_event(udc,
-				CI13XXX_CONTROLLER_CONNECT_EVENT);
-		/* Enable BAM (if needed) before starting controller */
-		if (udc->softconnect) {
-			dbg_event(0xFF, "BAM EN2",
-				_gadget->bam2bam_func_enabled);
-			msm_usb_bam_enable(CI_CTRL,
-				_gadget->bam2bam_func_enabled);
-			hw_device_state(udc->ep0out.qh.dma);
-		}
-	} else {
-		hw_device_state(0);
-		_gadget_stop_activity(&udc->gadget);
-		if (udc->udc_driver->notify_event)
-			udc->udc_driver->notify_event(udc,
-				CI13XXX_CONTROLLER_DISCONNECT_EVENT);
-	}
-
-	return 0;
-}
-
-#define VBUS_DRAW_BUF_LEN 10
-#define MAX_OVERRIDE_VBUS_ALLOWED 900	/* 900 mA */
-static char vbus_draw_mA[VBUS_DRAW_BUF_LEN];
-module_param_string(vbus_draw_mA, vbus_draw_mA, VBUS_DRAW_BUF_LEN, 0644);
-
-static int ci13xxx_vbus_draw(struct usb_gadget *_gadget, unsigned int mA)
-{
-	struct ci13xxx *udc = container_of(_gadget, struct ci13xxx, gadget);
-	unsigned int override_mA = 0;
-
-	/* override param to draw more current if battery draining faster */
-	if ((mA == CONFIG_USB_GADGET_VBUS_DRAW) &&
-		(vbus_draw_mA[0] != '\0')) {
-		if ((!kstrtoint(vbus_draw_mA, 10, &override_mA)) &&
-				(override_mA <= MAX_OVERRIDE_VBUS_ALLOWED)) {
-			mA = override_mA;
-		}
-	}
-
-	if (udc->transceiver)
-		return usb_phy_set_power(udc->transceiver, mA);
-	return -ENOTSUPP;
-}
-
-static int ci13xxx_pullup(struct usb_gadget *_gadget, int is_active)
-{
-	struct ci13xxx *udc = container_of(_gadget, struct ci13xxx, gadget);
-	unsigned long flags;
-
-	spin_lock_irqsave(udc->lock, flags);
-	udc->softconnect = is_active;
-	if (((udc->udc_driver->flags & CI13XXX_PULLUP_ON_VBUS) &&
-			!udc->vbus_active) || !udc->driver) {
-		spin_unlock_irqrestore(udc->lock, flags);
-		return 0;
-	}
-	spin_unlock_irqrestore(udc->lock, flags);
-
-	pm_runtime_get_sync(&_gadget->dev);
-
-	/* Enable BAM (if needed) before starting controller */
-	if (is_active) {
-		dbg_event(0xFF, "BAM EN1", _gadget->bam2bam_func_enabled);
-		msm_usb_bam_enable(CI_CTRL, _gadget->bam2bam_func_enabled);
-	}
-
-	spin_lock_irqsave(udc->lock, flags);
-	if (!udc->vbus_active) {
-		spin_unlock_irqrestore(udc->lock, flags);
-		pm_runtime_put_sync(&_gadget->dev);
-		return 0;
-	}
-	if (is_active) {
-		spin_unlock(udc->lock);
-		if (udc->udc_driver->notify_event)
-			udc->udc_driver->notify_event(udc,
-				CI13XXX_CONTROLLER_CONNECT_EVENT);
-		spin_lock(udc->lock);
-		hw_device_state(udc->ep0out.qh.dma);
-	} else {
-		hw_device_state(0);
-	}
-	spin_unlock_irqrestore(udc->lock, flags);
-
-	pm_runtime_mark_last_busy(&_gadget->dev);
-	pm_runtime_put_autosuspend(&_gadget->dev);
-
-	return 0;
-}
-
-static int ci13xxx_start(struct usb_gadget *gadget,
-			 struct usb_gadget_driver *driver);
-static int ci13xxx_stop(struct usb_gadget *gadget);
-
-/**
- * Device operations part of the API to the USB controller hardware,
- * which don't involve endpoints (or i/o)
- * Check  "usb_gadget.h" for details
- */
-static const struct usb_gadget_ops usb_gadget_ops = {
-	.vbus_session	= ci13xxx_vbus_session,
-	.wakeup		= ci13xxx_wakeup,
-	.vbus_draw	= ci13xxx_vbus_draw,
-	.pullup		= ci13xxx_pullup,
-	.udc_start	= ci13xxx_start,
-	.udc_stop	= ci13xxx_stop,
-};
-
-/**
- * ci13xxx_start: register a gadget driver
- * @gadget: our gadget
- * @driver: the driver being registered
- *
- * Interrupts are enabled here.
- */
-static int ci13xxx_start(struct usb_gadget *gadget,
-			 struct usb_gadget_driver *driver)
-{
-	struct ci13xxx *udc = _udc;
-	unsigned long flags;
-	int retval = -ENOMEM;
-
-	trace("%pK", driver);
-
-	if (driver             == NULL ||
-	    driver->setup      == NULL ||
-	    driver->disconnect == NULL)
-		return -EINVAL;
-	else if (udc         == NULL)
-		return -ENODEV;
-	else if (udc->driver != NULL)
-		return -EBUSY;
-
-	spin_lock_irqsave(udc->lock, flags);
-
-	info("hw_ep_max = %d", hw_ep_max);
-
-	udc->gadget.dev.driver = NULL;
-
-	spin_unlock_irqrestore(udc->lock, flags);
-
-	pm_runtime_get_sync(&udc->gadget.dev);
-
-	udc->ep0out.ep.desc = &ctrl_endpt_out_desc;
-	retval = usb_ep_enable(&udc->ep0out.ep);
-	if (retval)
-		goto pm_put;
-
-	udc->ep0in.ep.desc = &ctrl_endpt_in_desc;
-	retval = usb_ep_enable(&udc->ep0in.ep);
-	if (retval)
-		goto pm_put;
-	udc->status = usb_ep_alloc_request(&udc->ep0in.ep, GFP_KERNEL);
-	if (!udc->status) {
-		retval = -ENOMEM;
-		goto pm_put;
-	}
-
-	udc->status_buf = kzalloc(2 + udc->gadget.extra_buf_alloc,
-				GFP_KERNEL); /* for GET_STATUS */
-	if (!udc->status_buf) {
-		usb_ep_free_request(&udc->ep0in.ep, udc->status);
-		retval = -ENOMEM;
-		goto pm_put;
-	}
-	spin_lock_irqsave(udc->lock, flags);
-
-	udc->gadget.ep0 = &udc->ep0in.ep;
-	/* bind gadget */
-	driver->driver.bus     = NULL;
-	udc->gadget.dev.driver = &driver->driver;
-
-	udc->driver = driver;
-	if (udc->udc_driver->flags & CI13XXX_PULLUP_ON_VBUS) {
-		if (udc->vbus_active) {
-			if (udc->udc_driver->flags & CI13XXX_REGS_SHARED)
-				hw_device_reset(udc);
-		} else {
-			goto done;
-		}
-	}
-
-	if (!udc->softconnect)
-		goto done;
-
-	retval = hw_device_state(udc->ep0out.qh.dma);
-
-done:
-	spin_unlock_irqrestore(udc->lock, flags);
-
-	if (udc->udc_driver->notify_event)
-		udc->udc_driver->notify_event(udc,
-				CI13XXX_CONTROLLER_UDC_STARTED_EVENT);
-pm_put:
-	pm_runtime_put(&udc->gadget.dev);
-
-	return retval;
-}
-
-/**
- * ci13xxx_stop: unregister a gadget driver
- *
- * Check usb_gadget_unregister_driver() at "usb_gadget.h" for details
- */
-static int ci13xxx_stop(struct usb_gadget *gadget)
-{
-	struct ci13xxx *udc = _udc;
-	unsigned long flags;
-
-	spin_lock_irqsave(udc->lock, flags);
-
-	if (!(udc->udc_driver->flags & CI13XXX_PULLUP_ON_VBUS) ||
-			udc->vbus_active) {
-		hw_device_state(0);
-		spin_unlock_irqrestore(udc->lock, flags);
-		_gadget_stop_activity(&udc->gadget);
-		spin_lock_irqsave(udc->lock, flags);
-	}
-
-	spin_unlock_irqrestore(udc->lock, flags);
-
-	usb_ep_free_request(&udc->ep0in.ep, udc->status);
-	kfree(udc->status_buf);
-
-	return 0;
-}
-
-/******************************************************************************
- * BUS block
- *****************************************************************************/
-/**
- * udc_irq: global interrupt handler
- *
- * This function returns IRQ_HANDLED if the IRQ has been handled
- * It locks access to registers
- */
-static irqreturn_t udc_irq(void)
-{
-	struct ci13xxx *udc = _udc;
-	irqreturn_t retval;
-	u32 intr;
-
-	trace();
-
-	if (udc == NULL) {
-		err("ENODEV");
-		return IRQ_HANDLED;
-	}
-
-	spin_lock(udc->lock);
-
-	if (udc->udc_driver->in_lpm && udc->udc_driver->in_lpm(udc)) {
-		spin_unlock(udc->lock);
-		return IRQ_NONE;
-	}
-
-	if (udc->udc_driver->flags & CI13XXX_REGS_SHARED) {
-		if (hw_cread(CAP_USBMODE, USBMODE_CM) !=
-				USBMODE_CM_DEVICE) {
-			spin_unlock(udc->lock);
-			return IRQ_NONE;
-		}
-	}
-	intr = hw_test_and_clear_intr_active();
-	if (intr) {
-		isr_statistics.hndl.buf[isr_statistics.hndl.idx++] = intr;
-		isr_statistics.hndl.idx &= ISR_MASK;
-		isr_statistics.hndl.cnt++;
-
-		/* order defines priority - do NOT change it */
-		if (USBi_URI & intr) {
-			isr_statistics.uri++;
-			if (!hw_cread(CAP_PORTSC, PORTSC_PR))
-				pr_info("%s: USB reset interrupt is delayed\n",
-								__func__);
-			isr_reset_handler(udc);
-		}
-		if (USBi_PCI & intr) {
-			isr_statistics.pci++;
-			isr_resume_handler(udc);
-		}
-		if (USBi_UEI & intr)
-			isr_statistics.uei++;
-		if (USBi_UI  & intr) {
-			isr_statistics.ui++;
-			isr_tr_complete_handler(udc);
-		}
-		if (USBi_SLI & intr) {
-			isr_suspend_handler(udc);
-			isr_statistics.sli++;
-		}
-		retval = IRQ_HANDLED;
-	} else {
-		isr_statistics.none++;
-		retval = IRQ_NONE;
-	}
-	spin_unlock(udc->lock);
-
-	return retval;
-}
-
-static void destroy_eps(struct ci13xxx *ci)
-{
-	int i;
-
-	for (i = 0; i < hw_ep_max; i++) {
-		struct ci13xxx_ep *mEp = &ci->ci13xxx_ep[i];
-
-		dma_pool_free(ci->qh_pool, mEp->qh.ptr, mEp->qh.dma);
-	}
-}
-
-/**
- * udc_probe: parent probe must call this to initialize UDC
- * @dev:  parent device
- * @regs: registers base address
- * @name: driver name
- *
- * This function returns an error code
- * No interrupts active, the IRQ has not been requested yet
- * Kernel assumes 32-bit DMA operations by default, no need to dma_set_mask
- */
-static int udc_probe(struct ci13xxx_udc_driver *driver, struct device *dev,
-		void __iomem *regs)
-{
-	struct ci13xxx *udc;
-	struct ci13xxx_platform_data *pdata;
-	int retval = 0, i, j;
-
-	trace("%pK, %pK, %pK", dev, regs, driver->name);
-
-	if (dev == NULL || regs == NULL || driver == NULL ||
-			driver->name == NULL)
-		return -EINVAL;
-
-	udc = kzalloc(sizeof(struct ci13xxx), GFP_KERNEL);
-	if (udc == NULL)
-		return -ENOMEM;
-
-	udc->lock = &udc_lock;
-	udc->regs = regs;
-	udc->udc_driver = driver;
-
-	udc->gadget.ops          = &usb_gadget_ops;
-	udc->gadget.speed        = USB_SPEED_UNKNOWN;
-	udc->gadget.max_speed    = USB_SPEED_HIGH;
-	udc->gadget.is_otg       = 0;
-	udc->gadget.name         = driver->name;
-
-	/* alloc resources */
-	udc->qh_pool = dma_pool_create("ci13xxx_qh", dev,
-				       sizeof(struct ci13xxx_qh),
-				       64, CI13XXX_PAGE_SIZE);
-	if (udc->qh_pool == NULL) {
-		retval = -ENOMEM;
-		goto free_udc;
-	}
-
-	udc->td_pool = dma_pool_create("ci13xxx_td", dev,
-				       sizeof(struct ci13xxx_td),
-				       64, CI13XXX_PAGE_SIZE);
-	if (udc->td_pool == NULL) {
-		retval = -ENOMEM;
-		goto free_qh_pool;
-	}
-
-	INIT_DELAYED_WORK(&udc->rw_work, usb_do_remote_wakeup);
-
-	retval = hw_device_init(regs);
-	if (retval < 0)
-		goto free_qh_pool;
-
-	INIT_LIST_HEAD(&udc->gadget.ep_list);
-	for (i = 0; i < hw_ep_max; i++) {
-		struct ci13xxx_ep *mEp = &udc->ci13xxx_ep[i];
-
-		INIT_LIST_HEAD(&mEp->ep.ep_list);
-		INIT_LIST_HEAD(&mEp->rw_queue);
-		setup_timer(&mEp->prime_timer, ep_prime_timer_func,
-			(unsigned long) mEp);
-	}
-
-	for (i = 0; i < hw_ep_max/2; i++) {
-		for (j = RX; j <= TX; j++) {
-			int k = i + j * hw_ep_max/2;
-			struct ci13xxx_ep *mEp = &udc->ci13xxx_ep[k];
-
-			scnprintf(mEp->name, sizeof(mEp->name), "ep%i%s", i,
-					(j == TX)  ? "in" : "out");
-
-			mEp->lock         = udc->lock;
-			mEp->device       = &udc->gadget.dev;
-			mEp->td_pool      = udc->td_pool;
-
-			mEp->ep.name      = mEp->name;
-			mEp->ep.ops       = &usb_ep_ops;
-			usb_ep_set_maxpacket_limit(&mEp->ep,
-				k ? USHRT_MAX : CTRL_PAYLOAD_MAX);
-
-			INIT_LIST_HEAD(&mEp->qh.queue);
-			mEp->qh.ptr = dma_pool_alloc(udc->qh_pool, GFP_KERNEL,
-					&mEp->qh.dma);
-			if (mEp->qh.ptr == NULL)
-				retval = -ENOMEM;
-			else
-				memset(mEp->qh.ptr, 0, sizeof(*mEp->qh.ptr));
-
-			/* skip ep0 out and in endpoints  */
-			if (i == 0)
-				continue;
-
-			list_add_tail(&mEp->ep.ep_list, &udc->gadget.ep_list);
-		}
-	}
-
-	if (retval)
-		goto free_dma_pools;
-
-	udc->gadget.ep0 = &udc->ep0in.ep;
-
-	pdata = dev->platform_data;
-	if (pdata) {
-		if (pdata->enable_axi_prefetch)
-			udc->gadget.extra_buf_alloc = EXTRA_ALLOCATION_SIZE;
-	}
-
-	if (udc->udc_driver->flags & CI13XXX_REQUIRE_TRANSCEIVER) {
-		udc->transceiver = usb_get_phy(USB_PHY_TYPE_USB2);
-		if (udc->transceiver == NULL) {
-			retval = -ENODEV;
-			goto destroy_eps;
-		}
-	}
-
-	if (!(udc->udc_driver->flags & CI13XXX_REGS_SHARED)) {
-		retval = hw_device_reset(udc);
-		if (retval)
-			goto put_transceiver;
-	}
-
-	if (udc->transceiver) {
-		retval = otg_set_peripheral(udc->transceiver->otg,
-						&udc->gadget);
-		if (retval)
-			goto put_transceiver;
-	}
-
-	retval = usb_add_gadget_udc(dev, &udc->gadget);
-	if (retval)
-		goto remove_trans;
-
-#ifdef CONFIG_USB_GADGET_DEBUG_FILES
-	retval = dbg_create_files(&udc->gadget.dev);
-	if (retval) {
-		pr_err("Registering sysfs files for debug failed!!!!\n");
-		goto del_udc;
-	}
-#endif
-
-	pm_runtime_no_callbacks(&udc->gadget.dev);
-	pm_runtime_set_active(&udc->gadget.dev);
-	pm_runtime_enable(&udc->gadget.dev);
-
-	/* Use delayed LPM especially for composition-switch in LPM (suspend) */
-	pm_runtime_set_autosuspend_delay(&udc->gadget.dev, 2000);
-	pm_runtime_use_autosuspend(&udc->gadget.dev);
-
-	_udc = udc;
-	return retval;
-
-del_udc:
-	usb_del_gadget_udc(&udc->gadget);
-remove_trans:
-	if (udc->transceiver)
-		otg_set_peripheral(udc->transceiver->otg, &udc->gadget);
-
-	err("error = %i", retval);
-put_transceiver:
-	if (udc->transceiver)
-		usb_put_phy(udc->transceiver);
-destroy_eps:
-	destroy_eps(udc);
-free_dma_pools:
-	dma_pool_destroy(udc->td_pool);
-free_qh_pool:
-	dma_pool_destroy(udc->qh_pool);
-free_udc:
-	kfree(udc);
-	_udc = NULL;
-	return retval;
-}
-
-/**
- * udc_remove: parent remove must call this to remove UDC
- *
- * No interrupts active, the IRQ has been released
- */
-static void udc_remove(void)
-{
-	struct ci13xxx *udc = _udc;
-
-	if (udc == NULL) {
-		err("EINVAL");
-		return;
-	}
-
-	usb_del_gadget_udc(&udc->gadget);
-
-	if (udc->transceiver) {
-		otg_set_peripheral(udc->transceiver->otg, &udc->gadget);
-		usb_put_phy(udc->transceiver);
-	}
-#ifdef CONFIG_USB_GADGET_DEBUG_FILES
-	dbg_remove_files(&udc->gadget.dev);
-#endif
-	destroy_eps(udc);
-	dma_pool_destroy(udc->td_pool);
-	dma_pool_destroy(udc->qh_pool);
-
-	kfree(udc);
-	_udc = NULL;
-}
diff --git a/drivers/usb/gadget/ci13xxx_udc.h b/drivers/usb/gadget/ci13xxx_udc.h
deleted file mode 100644
index 8c93080..0000000
--- a/drivers/usb/gadget/ci13xxx_udc.h
+++ /dev/null
@@ -1,282 +0,0 @@
-/*
- * ci13xxx_udc.h - structures, registers, and macros MIPS USB IP core
- *
- * Copyright (C) 2008 Chipidea - MIPS Technologies, Inc. All rights reserved.
- *
- * Author: David Lopo
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * Description: MIPS USB IP core family device controller
- *              Structures, registers and logging macros
- */
-
-#ifndef _CI13XXX_h_
-#define _CI13XXX_h_
-
-/******************************************************************************
- * DEFINE
- *****************************************************************************/
-#define CI13XXX_PAGE_SIZE  4096ul /* page size for TD's */
-#define ENDPT_MAX          (32)
-#define CTRL_PAYLOAD_MAX   (64)
-#define RX        (0)  /* similar to USB_DIR_OUT but can be used as an index */
-#define TX        (1)  /* similar to USB_DIR_IN  but can be used as an index */
-
-/* UDC private data:
- *  16MSb - Vendor ID | 16 LSb Vendor private data
- */
-#define CI13XX_REQ_VENDOR_ID(id)  (id & 0xFFFF0000UL)
-
-#define MSM_ETD_TYPE			BIT(1)
-#define MSM_EP_PIPE_ID_RESET_VAL	0x1F001F
-
-/******************************************************************************
- * STRUCTURES
- *****************************************************************************/
-/* DMA layout of transfer descriptors */
-struct ci13xxx_td {
-	/* 0 */
-	u32 next;
-#define TD_TERMINATE          BIT(0)
-#define TD_ADDR_MASK          (0xFFFFFFEUL << 5)
-	/* 1 */
-	u32 token;
-#define TD_STATUS             (0x00FFUL <<  0)
-#define TD_STATUS_TR_ERR      BIT(3)
-#define TD_STATUS_DT_ERR      BIT(5)
-#define TD_STATUS_HALTED      BIT(6)
-#define TD_STATUS_ACTIVE      BIT(7)
-#define TD_MULTO              (0x0003UL << 10)
-#define TD_IOC                BIT(15)
-#define TD_TOTAL_BYTES        (0x7FFFUL << 16)
-	/* 2 */
-	u32 page[5];
-#define TD_CURR_OFFSET        (0x0FFFUL <<  0)
-#define TD_FRAME_NUM          (0x07FFUL <<  0)
-#define TD_RESERVED_MASK      (0x0FFFUL <<  0)
-} __packed __aligned(4);
-
-/* DMA layout of queue heads */
-struct ci13xxx_qh {
-	/* 0 */
-	u32 cap;
-#define QH_IOS                BIT(15)
-#define QH_MAX_PKT            (0x07FFUL << 16)
-#define QH_ZLT                BIT(29)
-#define QH_MULT               (0x0003UL << 30)
-#define QH_MULT_SHIFT         11
-	/* 1 */
-	u32 curr;
-	/* 2 - 8 */
-	struct ci13xxx_td        td;
-	/* 9 */
-	u32 RESERVED;
-	struct usb_ctrlrequest   setup;
-} __packed __aligned(4);
-
-/* cache of larger request's original attributes */
-struct ci13xxx_multi_req {
-	unsigned int	     len;
-	unsigned int	     actual;
-	void                *buf;
-};
-
-/* Extension of usb_request */
-struct ci13xxx_req {
-	struct usb_request   req;
-	unsigned int	     map;
-	struct list_head     queue;
-	struct ci13xxx_td   *ptr;
-	dma_addr_t           dma;
-	struct ci13xxx_td   *zptr;
-	dma_addr_t           zdma;
-	struct ci13xxx_multi_req multi;
-};
-
-/* Extension of usb_ep */
-struct ci13xxx_ep {
-	struct usb_ep                          ep;
-	const struct usb_endpoint_descriptor  *desc;
-	u8                                     dir;
-	u8                                     num;
-	u8                                     type;
-	char                                   name[16];
-	struct {
-		struct list_head   queue;
-		struct ci13xxx_qh *ptr;
-		dma_addr_t         dma;
-	}                                      qh;
-	struct list_head                       rw_queue;
-	int                                    wedge;
-
-	/* global resources */
-	spinlock_t                            *lock;
-	struct device                         *device;
-	struct dma_pool                       *td_pool;
-	struct ci13xxx_td                     *last_zptr;
-	dma_addr_t                            last_zdma;
-	unsigned long                         dTD_update_fail_count;
-	unsigned long                         dTD_active_re_q_count;
-	unsigned long			      prime_fail_count;
-	int				      prime_timer_count;
-	struct timer_list		      prime_timer;
-
-	bool                                  multi_req;
-};
-
-struct ci13xxx;
-struct ci13xxx_udc_driver {
-	const char	*name;
-	unsigned long	 flags;
-	unsigned int nz_itc;
-#define CI13XXX_REGS_SHARED		BIT(0)
-#define CI13XXX_REQUIRE_TRANSCEIVER	BIT(1)
-#define CI13XXX_PULLUP_ON_VBUS		BIT(2)
-#define CI13XXX_DISABLE_STREAMING	BIT(3)
-#define CI13XXX_ZERO_ITC		BIT(4)
-#define CI13XXX_ENABLE_AHB2AHB_BYPASS	BIT(6)
-
-#define CI13XXX_CONTROLLER_RESET_EVENT			0
-#define CI13XXX_CONTROLLER_CONNECT_EVENT		1
-#define CI13XXX_CONTROLLER_SUSPEND_EVENT		2
-#define CI13XXX_CONTROLLER_REMOTE_WAKEUP_EVENT		3
-#define CI13XXX_CONTROLLER_RESUME_EVENT		4
-#define CI13XXX_CONTROLLER_DISCONNECT_EVENT		5
-#define CI13XXX_CONTROLLER_UDC_STARTED_EVENT		6
-#define CI13XXX_CONTROLLER_ERROR_EVENT			7
-
-	void	(*notify_event)(struct ci13xxx *udc, unsigned int event);
-	bool    (*in_lpm)(struct ci13xxx *udc);
-};
-
-/* CI13XXX UDC descriptor & global resources */
-struct ci13xxx {
-	spinlock_t		  *lock;      /* ctrl register bank access */
-	void __iomem              *regs;      /* registers address space */
-
-	struct dma_pool           *qh_pool;   /* DMA pool for queue heads */
-	struct dma_pool           *td_pool;   /* DMA pool for transfer descs */
-	struct usb_request        *status;    /* ep0 status request */
-	void                      *status_buf;/* GET_STATUS buffer */
-
-	struct usb_gadget          gadget;     /* USB slave device */
-	struct ci13xxx_ep          ci13xxx_ep[ENDPT_MAX]; /* extended endpts */
-	u32                        ep0_dir;    /* ep0 direction */
-#define ep0out ci13xxx_ep[0]
-#define ep0in  ci13xxx_ep[hw_ep_max / 2]
-	u8                         suspended;  /* suspended by the host */
-	u8                         configured;  /* is device configured */
-	u8                         test_mode;  /* the selected test mode */
-	bool                       rw_pending; /* Remote wakeup pending flag */
-	struct delayed_work        rw_work;    /* remote wakeup delayed work */
-	struct usb_gadget_driver  *driver;     /* 3rd party gadget driver */
-	struct ci13xxx_udc_driver *udc_driver; /* device controller driver */
-	int                        vbus_active; /* is VBUS active */
-	int                        softconnect; /* is pull-up enable allowed */
-	unsigned long dTD_update_fail_count;
-	struct usb_phy            *transceiver; /* Transceiver struct */
-	bool                      skip_flush;   /*
-						 * skip flushing remaining EP
-						 * upon flush timeout for the
-						 * first EP.
-						 */
-};
-
-/******************************************************************************
- * REGISTERS
- *****************************************************************************/
-/* register size */
-#define REG_BITS   (32)
-
-/* HCCPARAMS */
-#define HCCPARAMS_LEN         BIT(17)
-
-/* DCCPARAMS */
-#define DCCPARAMS_DEN         (0x1F << 0)
-#define DCCPARAMS_DC          BIT(7)
-
-/* TESTMODE */
-#define TESTMODE_FORCE        BIT(0)
-
-/* AHB_MODE */
-#define AHB2AHB_BYPASS	      BIT(31)
-
-/* USBCMD */
-#define USBCMD_RS             BIT(0)
-#define USBCMD_RST            BIT(1)
-#define USBCMD_SUTW           BIT(13)
-#define USBCMD_ATDTW          BIT(14)
-
-/* USBSTS & USBINTR */
-#define USBi_UI               BIT(0)
-#define USBi_UEI              BIT(1)
-#define USBi_PCI              BIT(2)
-#define USBi_URI              BIT(6)
-#define USBi_SLI              BIT(8)
-
-/* DEVICEADDR */
-#define DEVICEADDR_USBADRA    BIT(24)
-#define DEVICEADDR_USBADR     (0x7FUL << 25)
-
-/* PORTSC */
-#define PORTSC_FPR            BIT(6)
-#define PORTSC_SUSP           BIT(7)
-#define PORTSC_PR             BIT(8)
-#define PORTSC_HSP            BIT(9)
-#define PORTSC_PTC            (0x0FUL << 16)
-
-/* DEVLC */
-#define DEVLC_PSPD            (0x03UL << 25)
-#define    DEVLC_PSPD_HS      (0x02UL << 25)
-
-/* USBMODE */
-#define USBMODE_CM            (0x03UL <<  0)
-#define    USBMODE_CM_IDLE    (0x00UL <<  0)
-#define    USBMODE_CM_DEVICE  (0x02UL <<  0)
-#define    USBMODE_CM_HOST    (0x03UL <<  0)
-#define USBMODE_SLOM          BIT(3)
-#define USBMODE_SDIS          BIT(4)
-#define USBCMD_ITC(n)         (n << 16) /* n = 0, 1, 2, 4, 8, 16, 32, 64 */
-#define USBCMD_ITC_MASK       (0xFF << 16)
-
-/* ENDPTCTRL */
-#define ENDPTCTRL_RXS         BIT(0)
-#define ENDPTCTRL_RXT         (0x03UL <<  2)
-#define ENDPTCTRL_RXR         BIT(6)         /* reserved for port 0 */
-#define ENDPTCTRL_RXE         BIT(7)
-#define ENDPTCTRL_TXS         BIT(16)
-#define ENDPTCTRL_TXT         (0x03UL << 18)
-#define ENDPTCTRL_TXR         BIT(22)        /* reserved for port 0 */
-#define ENDPTCTRL_TXE         BIT(23)
-
-/******************************************************************************
- * LOGGING
- *****************************************************************************/
-#define ci13xxx_printk(level, format, args...) \
-do { \
-	if (_udc == NULL) \
-		printk(level "[%s] " format "\n", __func__, ## args); \
-	else \
-		dev_printk(level, _udc->gadget.dev.parent, \
-			   "[%s] " format "\n", __func__, ## args); \
-} while (0)
-
-#ifndef err
-#define err(format, args...)    ci13xxx_printk(KERN_ERR, format, ## args)
-#endif
-
-#define warn(format, args...)   ci13xxx_printk(KERN_WARNING, format, ## args)
-#define info(format, args...)   ci13xxx_printk(KERN_INFO, format, ## args)
-
-#ifdef TRACE
-#define trace(format, args...)      ci13xxx_printk(KERN_DEBUG, format, ## args)
-#define dbg_trace(format, args...)  dev_dbg(dev, format, ##args)
-#else
-#define trace(format, args...)      do {} while (0)
-#define dbg_trace(format, args...)  do {} while (0)
-#endif
-
-#endif	/* _CI13XXX_h_ */
diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c
index 16b6619..f779fdc30 100644
--- a/drivers/usb/gadget/configfs.c
+++ b/drivers/usb/gadget/configfs.c
@@ -317,6 +317,7 @@ static ssize_t gadget_dev_desc_UDC_store(struct config_item *item,
 		ret = unregister_gadget(gi);
 		if (ret)
 			goto err;
+		kfree(name);
 	} else {
 		if (gi->composite.gadget_driver.udc_name) {
 			ret = -EBUSY;
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index 42f9007..866c3ec 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -1187,7 +1187,7 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
 			ret = __ffs_epfile_read_data(epfile, data, ep->status,
 						     &io_data->data);
 		goto error_mutex;
-	} else if (!(req = usb_ep_alloc_request(ep->ep, GFP_KERNEL))) {
+	} else if (!(req = usb_ep_alloc_request(ep->ep, GFP_ATOMIC))) {
 		ret = -ENOMEM;
 	} else {
 		req->buf      = data;
@@ -2655,9 +2655,18 @@ static int __ffs_data_do_os_desc(enum ffs_os_desc_type type,
 		int i;
 
 		if (len < sizeof(*d) ||
-		    d->bFirstInterfaceNumber >= ffs->interfaces_count ||
-		    !d->Reserved1)
+		    d->bFirstInterfaceNumber >= ffs->interfaces_count)
 			return -EINVAL;
+		if (d->Reserved1 != 1) {
+			/*
+			 * According to the spec, Reserved1 must be set to 1
+			 * but older kernels incorrectly rejected non-zero
+			 * values.  We fix it here to avoid returning EINVAL
+			 * in response to values we used to accept.
+			 */
+			pr_debug("usb_ext_compat_desc::Reserved1 forced to 1\n");
+			d->Reserved1 = 1;
+		}
 		for (i = 0; i < ARRAY_SIZE(d->Reserved2); ++i)
 			if (d->Reserved2[i])
 				return -EINVAL;
diff --git a/drivers/usb/gadget/function/f_gsi.c b/drivers/usb/gadget/function/f_gsi.c
index 7e4e7ce..dc368c7 100644
--- a/drivers/usb/gadget/function/f_gsi.c
+++ b/drivers/usb/gadget/function/f_gsi.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015-2017, Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2018, Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -407,17 +407,17 @@ static int ipa_connect_channels(struct gsi_data_port *d_port)
 			ipa_out_channel_out_params.db_reg_phs_addr_lsb);
 
 	d_port->in_channel_handle = ipa_in_channel_out_params.clnt_hdl;
-	d_port->in_db_reg_phs_addr_lsb =
+	d_port->in_request.db_reg_phs_addr_lsb =
 		ipa_in_channel_out_params.db_reg_phs_addr_lsb;
-	d_port->in_db_reg_phs_addr_msb =
+	d_port->in_request.db_reg_phs_addr_msb =
 		ipa_in_channel_out_params.db_reg_phs_addr_msb;
 
 	if (gsi->prot_id != IPA_USB_DIAG) {
 		d_port->out_channel_handle =
 			ipa_out_channel_out_params.clnt_hdl;
-		d_port->out_db_reg_phs_addr_lsb =
+		d_port->out_request.db_reg_phs_addr_lsb =
 			ipa_out_channel_out_params.db_reg_phs_addr_lsb;
-		d_port->out_db_reg_phs_addr_msb =
+		d_port->out_request.db_reg_phs_addr_msb =
 			ipa_out_channel_out_params.db_reg_phs_addr_msb;
 	}
 	return ret;
@@ -426,22 +426,19 @@ static int ipa_connect_channels(struct gsi_data_port *d_port)
 static void ipa_data_path_enable(struct gsi_data_port *d_port)
 {
 	struct f_gsi *gsi = d_port_to_gsi(d_port);
-	struct usb_gsi_request req;
-	u64 dbl_register_addr;
 	bool block_db = false;
 
-
-	log_event_dbg("in_db_reg_phs_addr_lsb = %x",
-			gsi->d_port.in_db_reg_phs_addr_lsb);
+	log_event_dbg("IN: db_reg_phs_addr_lsb = %x",
+			gsi->d_port.in_request.db_reg_phs_addr_lsb);
 	usb_gsi_ep_op(gsi->d_port.in_ep,
-			(void *)&gsi->d_port.in_db_reg_phs_addr_lsb,
+			&gsi->d_port.in_request,
 			GSI_EP_OP_STORE_DBL_INFO);
 
 	if (gsi->d_port.out_ep) {
-		log_event_dbg("out_db_reg_phs_addr_lsb = %x",
-				gsi->d_port.out_db_reg_phs_addr_lsb);
+		log_event_dbg("OUT: db_reg_phs_addr_lsb = %x",
+				gsi->d_port.out_request.db_reg_phs_addr_lsb);
 		usb_gsi_ep_op(gsi->d_port.out_ep,
-				(void *)&gsi->d_port.out_db_reg_phs_addr_lsb,
+				&gsi->d_port.out_request,
 				GSI_EP_OP_STORE_DBL_INFO);
 
 		usb_gsi_ep_op(gsi->d_port.out_ep, &gsi->d_port.out_request,
@@ -452,29 +449,12 @@ static void ipa_data_path_enable(struct gsi_data_port *d_port)
 	usb_gsi_ep_op(d_port->in_ep, (void *)&block_db,
 				GSI_EP_OP_SET_CLR_BLOCK_DBL);
 
-	/* GSI channel DBL address for USB IN endpoint */
-	dbl_register_addr = gsi->d_port.in_db_reg_phs_addr_msb;
-	dbl_register_addr = dbl_register_addr << 32;
-	dbl_register_addr =
-		dbl_register_addr | gsi->d_port.in_db_reg_phs_addr_lsb;
+	usb_gsi_ep_op(gsi->d_port.in_ep, &gsi->d_port.in_request,
+						GSI_EP_OP_RING_DB);
 
-	/* use temp gsi request to pass 64 bit dbl reg addr and num_bufs */
-	req.buf_base_addr = &dbl_register_addr;
-
-	req.num_bufs = gsi->d_port.in_request.num_bufs;
-	usb_gsi_ep_op(gsi->d_port.in_ep, &req, GSI_EP_OP_RING_DB);
-
-	if (gsi->d_port.out_ep) {
-		/* GSI channel DBL address for USB OUT endpoint */
-		dbl_register_addr = gsi->d_port.out_db_reg_phs_addr_msb;
-		dbl_register_addr = dbl_register_addr << 32;
-		dbl_register_addr = dbl_register_addr |
-					gsi->d_port.out_db_reg_phs_addr_lsb;
-		/* use temp request to pass 64 bit dbl reg addr and num_bufs */
-		req.buf_base_addr = &dbl_register_addr;
-		req.num_bufs = gsi->d_port.out_request.num_bufs;
-		usb_gsi_ep_op(gsi->d_port.out_ep, &req, GSI_EP_OP_RING_DB);
-	}
+	if (gsi->d_port.out_ep)
+		usb_gsi_ep_op(gsi->d_port.out_ep, &gsi->d_port.out_request,
+						GSI_EP_OP_RING_DB);
 }
 
 static void ipa_disconnect_handler(struct gsi_data_port *d_port)
@@ -491,11 +471,13 @@ static void ipa_disconnect_handler(struct gsi_data_port *d_port)
 		 */
 		usb_gsi_ep_op(d_port->in_ep, (void *)&block_db,
 				GSI_EP_OP_SET_CLR_BLOCK_DBL);
-		usb_gsi_ep_op(gsi->d_port.in_ep, NULL, GSI_EP_OP_DISABLE);
+		usb_gsi_ep_op(gsi->d_port.in_ep,
+				&gsi->d_port.in_request, GSI_EP_OP_DISABLE);
 	}
 
 	if (gsi->d_port.out_ep)
-		usb_gsi_ep_op(gsi->d_port.out_ep, NULL, GSI_EP_OP_DISABLE);
+		usb_gsi_ep_op(gsi->d_port.out_ep,
+				&gsi->d_port.out_request, GSI_EP_OP_DISABLE);
 
 	gsi->d_port.net_ready_trigger = false;
 }
@@ -1219,14 +1201,16 @@ static long gsi_ctrl_dev_ioctl(struct file *fp, unsigned int cmd,
 		break;
 	case QTI_CTRL_GET_LINE_STATE:
 		val = atomic_read(&gsi->connected);
+		if (gsi->prot_id == IPA_USB_RMNET)
+			val = gsi->rmnet_dtr_status;
+
 		ret = copy_to_user((void __user *)arg, &val, sizeof(val));
 		if (ret) {
 			log_event_err("copy_to_user fail LINE_STATE");
 			ret = -EFAULT;
 		}
 		log_event_dbg("%s: Sent line_state: %d for prot id:%d",
-				__func__,
-				atomic_read(&gsi->connected), gsi->prot_id);
+				__func__, val, gsi->prot_id);
 		break;
 	case QTI_CTRL_EP_LOOKUP:
 	case GSI_MBIM_EP_LOOKUP:
@@ -1750,6 +1734,7 @@ gsi_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
 	struct gsi_ctrl_pkt *cpkt;
 	u8 *buf;
 	u32 n;
+	bool line_state;
 
 	if (!atomic_read(&gsi->connected)) {
 		log_event_dbg("usb cable is not connected");
@@ -1830,8 +1815,11 @@ gsi_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
 		break;
 	case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
 			| USB_CDC_REQ_SET_CONTROL_LINE_STATE:
+		line_state = (w_value & GSI_CTRL_DTR ? true : false);
+		if (gsi->prot_id == IPA_USB_RMNET)
+			gsi->rmnet_dtr_status = line_state;
 		log_event_dbg("%s: USB_CDC_REQ_SET_CONTROL_LINE_STATE DTR:%d\n",
-				__func__, w_value & GSI_CTRL_DTR ? 1 : 0);
+						__func__, line_state);
 		gsi_ctrl_send_cpkt_tomodem(gsi, NULL, 0);
 		value = 0;
 		break;
@@ -2184,7 +2172,10 @@ static void gsi_disable(struct usb_function *f)
 	if (gsi->prot_id == IPA_USB_RNDIS)
 		rndis_uninit(gsi->params);
 
-	 /* Disable Control Path */
+	if (gsi->prot_id == IPA_USB_RMNET)
+		gsi->rmnet_dtr_status = false;
+
+	/* Disable Control Path */
 	if (gsi->c_port.notify &&
 		gsi->c_port.notify->driver_data) {
 		usb_ep_disable(gsi->c_port.notify);
@@ -3072,7 +3063,7 @@ static ssize_t gsi_info_show(struct config_item *item, char *page)
 				gsi->d_port.in_channel_handle);
 		len += scnprintf(buf + len, PAGE_SIZE - len,
 		"%25s %10x\n", "IN Chnl Dbl Addr: ",
-				gsi->d_port.in_db_reg_phs_addr_lsb);
+				gsi->d_port.in_request.db_reg_phs_addr_lsb);
 		len += scnprintf(buf + len, PAGE_SIZE - len,
 		"%25s %10u\n", "IN TRB Ring Len: ",
 				ipa_chnl_params->xfer_ring_len);
@@ -3106,7 +3097,7 @@ static ssize_t gsi_info_show(struct config_item *item, char *page)
 			gsi->d_port.out_channel_handle);
 		len += scnprintf(buf + len, PAGE_SIZE - len,
 		"%25s %10x\n", "OUT Channel Dbl Addr: ",
-			gsi->d_port.out_db_reg_phs_addr_lsb);
+			gsi->d_port.out_request.db_reg_phs_addr_lsb);
 		len += scnprintf(buf + len, PAGE_SIZE - len,
 		"%25s %10u\n", "OUT TRB Ring Len: ",
 			ipa_chnl_params->xfer_ring_len);
diff --git a/drivers/usb/gadget/function/f_gsi.h b/drivers/usb/gadget/function/f_gsi.h
index fa36d05..58a7706 100644
--- a/drivers/usb/gadget/function/f_gsi.h
+++ b/drivers/usb/gadget/function/f_gsi.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -232,10 +232,6 @@ struct gsi_data_port {
 	struct ipa_usb_teth_params ipa_init_params;
 	int in_channel_handle;
 	int out_channel_handle;
-	u32 in_db_reg_phs_addr_lsb;
-	u32 in_db_reg_phs_addr_msb;
-	u32 out_db_reg_phs_addr_lsb;
-	u32 out_db_reg_phs_addr_msb;
 	u32 in_xfer_rsc_index;
 	u32 out_xfer_rsc_index;
 	u16 in_last_trb_addr;
@@ -280,6 +276,7 @@ struct f_gsi {
 	struct gsi_data_port d_port;
 	struct gsi_ctrl_port c_port;
 	void *ipc_log_ctxt;
+	bool rmnet_dtr_status;
 };
 
 static inline struct f_gsi *func_to_gsi(struct usb_function *f)
diff --git a/drivers/usb/gadget/function/f_uvc.c b/drivers/usb/gadget/function/f_uvc.c
index c99d547..fbc942d 100644
--- a/drivers/usb/gadget/function/f_uvc.c
+++ b/drivers/usb/gadget/function/f_uvc.c
@@ -594,6 +594,14 @@ uvc_function_bind(struct usb_configuration *c, struct usb_function *f)
 	opts->streaming_maxpacket = clamp(opts->streaming_maxpacket, 1U, 3072U);
 	opts->streaming_maxburst = min(opts->streaming_maxburst, 15U);
 
+	/* For SS, wMaxPacketSize has to be 1024 if bMaxBurst is not 0 */
+	if (opts->streaming_maxburst &&
+	    (opts->streaming_maxpacket % 1024) != 0) {
+		opts->streaming_maxpacket = roundup(opts->streaming_maxpacket, 1024);
+		INFO(cdev, "overriding streaming_maxpacket to %d\n",
+		     opts->streaming_maxpacket);
+	}
+
 	/* Fill in the FS/HS/SS Video Streaming specific descriptors from the
 	 * module parameters.
 	 *
diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c
index f69dbd4..b8534d3 100644
--- a/drivers/usb/gadget/legacy/inode.c
+++ b/drivers/usb/gadget/legacy/inode.c
@@ -1819,8 +1819,10 @@ dev_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
 
 	spin_lock_irq (&dev->lock);
 	value = -EINVAL;
-	if (dev->buf)
+	if (dev->buf) {
+		kfree(kbuf);
 		goto fail;
+	}
 	dev->buf = kbuf;
 
 	/* full or low speed config */
diff --git a/drivers/usb/gadget/udc/Kconfig b/drivers/usb/gadget/udc/Kconfig
index 243febf..658b8da 100644
--- a/drivers/usb/gadget/udc/Kconfig
+++ b/drivers/usb/gadget/udc/Kconfig
@@ -389,20 +389,6 @@
 	  dynamically linked module called "udc-xilinx" and force all
 	  gadget drivers to also be dynamically linked.
 
-config USB_CI13XXX_MSM
-	tristate "MIPS USB CI13xxx for MSM"
-	select USB_MSM_OTG
-	help
-	  MSM SoC has chipidea USB controller.  This driver uses
-	  ci13xxx_udc core.
-	  This driver depends on OTG driver for PHY initialization,
-	  clock management, powering up VBUS, and power management.
-	  This driver is not supported on boards like trout which
-	  has an external PHY.
-
-	  Say "y" to link the driver statically, or "m" to build a
-	  dynamically linked module called "ci13xxx_msm" and force all
-	  gadget drivers to also be dynamically linked.
 #
 # LAST -- dummy/emulated controller
 #
diff --git a/drivers/usb/gadget/udc/net2280.c b/drivers/usb/gadget/udc/net2280.c
index 33f3987..d133252 100644
--- a/drivers/usb/gadget/udc/net2280.c
+++ b/drivers/usb/gadget/udc/net2280.c
@@ -1146,15 +1146,15 @@ static int scan_dma_completions(struct net2280_ep *ep)
 	 */
 	while (!list_empty(&ep->queue)) {
 		struct net2280_request	*req;
-		u32			tmp;
+		u32 req_dma_count;
 
 		req = list_entry(ep->queue.next,
 				struct net2280_request, queue);
 		if (!req->valid)
 			break;
 		rmb();
-		tmp = le32_to_cpup(&req->td->dmacount);
-		if ((tmp & BIT(VALID_BIT)) != 0)
+		req_dma_count = le32_to_cpup(&req->td->dmacount);
+		if ((req_dma_count & BIT(VALID_BIT)) != 0)
 			break;
 
 		/* SHORT_PACKET_TRANSFERRED_INTERRUPT handles "usb-short"
@@ -1163,40 +1163,41 @@ static int scan_dma_completions(struct net2280_ep *ep)
 		 */
 		if (unlikely(req->td->dmadesc == 0)) {
 			/* paranoia */
-			tmp = readl(&ep->dma->dmacount);
-			if (tmp & DMA_BYTE_COUNT_MASK)
+			u32 const ep_dmacount = readl(&ep->dma->dmacount);
+
+			if (ep_dmacount & DMA_BYTE_COUNT_MASK)
 				break;
 			/* single transfer mode */
-			dma_done(ep, req, tmp, 0);
+			dma_done(ep, req, req_dma_count, 0);
 			num_completed++;
 			break;
 		} else if (!ep->is_in &&
 			   (req->req.length % ep->ep.maxpacket) &&
 			   !(ep->dev->quirks & PLX_PCIE)) {
 
-			tmp = readl(&ep->regs->ep_stat);
+			u32 const ep_stat = readl(&ep->regs->ep_stat);
 			/* AVOID TROUBLE HERE by not issuing short reads from
 			 * your gadget driver.  That helps avoids errata 0121,
 			 * 0122, and 0124; not all cases trigger the warning.
 			 */
-			if ((tmp & BIT(NAK_OUT_PACKETS)) == 0) {
+			if ((ep_stat & BIT(NAK_OUT_PACKETS)) == 0) {
 				ep_warn(ep->dev, "%s lost packet sync!\n",
 						ep->ep.name);
 				req->req.status = -EOVERFLOW;
 			} else {
-				tmp = readl(&ep->regs->ep_avail);
-				if (tmp) {
+				u32 const ep_avail = readl(&ep->regs->ep_avail);
+				if (ep_avail) {
 					/* fifo gets flushed later */
 					ep->out_overflow = 1;
 					ep_dbg(ep->dev,
 						"%s dma, discard %d len %d\n",
-						ep->ep.name, tmp,
+						ep->ep.name, ep_avail,
 						req->req.length);
 					req->req.status = -EOVERFLOW;
 				}
 			}
 		}
-		dma_done(ep, req, tmp, 0);
+		dma_done(ep, req, req_dma_count, 0);
 		num_completed++;
 	}
 
diff --git a/drivers/usb/gadget/udc/pch_udc.c b/drivers/usb/gadget/udc/pch_udc.c
index a97da64..8a365aa 100644
--- a/drivers/usb/gadget/udc/pch_udc.c
+++ b/drivers/usb/gadget/udc/pch_udc.c
@@ -1523,7 +1523,6 @@ static void pch_udc_free_dma_chain(struct pch_udc_dev *dev,
 		td = phys_to_virt(addr);
 		addr2 = (dma_addr_t)td->next;
 		pci_pool_free(dev->data_requests, td, addr);
-		td->next = 0x00;
 		addr = addr2;
 	}
 	req->chain_len = 1;
diff --git a/drivers/usb/gadget/udc/pxa27x_udc.c b/drivers/usb/gadget/udc/pxa27x_udc.c
index 7fa60f5..afd6b86 100644
--- a/drivers/usb/gadget/udc/pxa27x_udc.c
+++ b/drivers/usb/gadget/udc/pxa27x_udc.c
@@ -2534,9 +2534,10 @@ static int pxa_udc_remove(struct platform_device *_dev)
 	usb_del_gadget_udc(&udc->gadget);
 	pxa_cleanup_debugfs(udc);
 
-	if (!IS_ERR_OR_NULL(udc->transceiver))
+	if (!IS_ERR_OR_NULL(udc->transceiver)) {
 		usb_unregister_notifier(udc->transceiver, &pxa27x_udc_phy);
-	usb_put_phy(udc->transceiver);
+		usb_put_phy(udc->transceiver);
+	}
 
 	udc->transceiver = NULL;
 	the_controller = NULL;
diff --git a/drivers/usb/gadget/udc/renesas_usb3.c b/drivers/usb/gadget/udc/renesas_usb3.c
index bb89e24..2197a50 100644
--- a/drivers/usb/gadget/udc/renesas_usb3.c
+++ b/drivers/usb/gadget/udc/renesas_usb3.c
@@ -222,7 +222,7 @@
 #define USB3_EP0_SS_MAX_PACKET_SIZE	512
 #define USB3_EP0_HSFS_MAX_PACKET_SIZE	64
 #define USB3_EP0_BUF_SIZE		8
-#define USB3_MAX_NUM_PIPES		30
+#define USB3_MAX_NUM_PIPES		6	/* This includes PIPE 0 */
 #define USB3_WAIT_US			3
 
 struct renesas_usb3;
diff --git a/drivers/usb/host/ehci-dbg.c b/drivers/usb/host/ehci-dbg.c
index 1a2614a..3ff6468 100644
--- a/drivers/usb/host/ehci-dbg.c
+++ b/drivers/usb/host/ehci-dbg.c
@@ -837,7 +837,7 @@ static ssize_t fill_registers_buffer(struct debug_buffer *buf)
 			default:		/* unknown */
 				break;
 			}
-			temp = (cap >> 8) & 0xff;
+			offset = (cap >> 8) & 0xff;
 		}
 	}
 #endif
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index c99121a6..b59d214 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -996,6 +996,12 @@ void xhci_free_virt_devices_depth_first(struct xhci_hcd *xhci, int slot_id)
 	if (!vdev)
 		return;
 
+	if (vdev->real_port == 0 ||
+			vdev->real_port > HCS_MAX_PORTS(xhci->hcs_params1)) {
+		xhci_dbg(xhci, "Bad vdev->real_port.\n");
+		goto out;
+	}
+
 	tt_list_head = &(xhci->rh_bw[vdev->real_port - 1].tts);
 	list_for_each_entry_safe(tt_info, next, tt_list_head, tt_list) {
 		/* is this a hub device that added a tt_info to the tts list */
@@ -1009,6 +1015,7 @@ void xhci_free_virt_devices_depth_first(struct xhci_hcd *xhci, int slot_id)
 			}
 		}
 	}
+out:
 	/* we are now at a leaf device */
 	xhci_free_virt_device(xhci, slot_id);
 }
@@ -1025,10 +1032,9 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
 		return 0;
 	}
 
-	xhci->devs[slot_id] = kzalloc(sizeof(*xhci->devs[slot_id]), flags);
-	if (!xhci->devs[slot_id])
+	dev = kzalloc(sizeof(*dev), flags);
+	if (!dev)
 		return 0;
-	dev = xhci->devs[slot_id];
 
 	/* Allocate the (output) device context that will be used in the HC. */
 	dev->out_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_DEVICE, flags);
@@ -1076,9 +1082,17 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
 		 &xhci->dcbaa->dev_context_ptrs[slot_id],
 		 le64_to_cpu(xhci->dcbaa->dev_context_ptrs[slot_id]));
 
+	xhci->devs[slot_id] = dev;
+
 	return 1;
 fail:
-	xhci_free_virt_device(xhci, slot_id);
+
+	if (dev->in_ctx)
+		xhci_free_container_ctx(xhci, dev->in_ctx);
+	if (dev->out_ctx)
+		xhci_free_container_ctx(xhci, dev->out_ctx);
+	kfree(dev);
+
 	return 0;
 }
 
diff --git a/drivers/usb/host/xhci-mtk.c b/drivers/usb/host/xhci-mtk.c
index f2365a4..ce9e457 100644
--- a/drivers/usb/host/xhci-mtk.c
+++ b/drivers/usb/host/xhci-mtk.c
@@ -632,13 +632,13 @@ static int xhci_mtk_probe(struct platform_device *pdev)
 		goto power_off_phys;
 	}
 
-	if (HCC_MAX_PSA(xhci->hcc_params) >= 4)
-		xhci->shared_hcd->can_do_streams = 1;
-
 	ret = usb_add_hcd(hcd, irq, IRQF_SHARED);
 	if (ret)
 		goto put_usb3_hcd;
 
+	if (HCC_MAX_PSA(xhci->hcc_params) >= 4)
+		xhci->shared_hcd->can_do_streams = 1;
+
 	ret = usb_add_hcd(xhci->shared_hcd, irq, IRQF_SHARED);
 	if (ret)
 		goto dealloc_usb2_hcd;
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index c7596a7..0ce6929 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -486,6 +486,7 @@ MODULE_DEVICE_TABLE(acpi, usb_xhci_acpi_match);
 static struct platform_driver usb_xhci_driver = {
 	.probe	= xhci_plat_probe,
 	.remove	= xhci_plat_remove,
+	.shutdown	= usb_hcd_platform_shutdown,
 	.driver	= {
 		.name = "xhci-hcd",
 		.pm = DEV_PM_OPS,
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index e185bbe..09ae74e 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -3132,7 +3132,7 @@ static u32 xhci_td_remainder(struct xhci_hcd *xhci, int transferred,
 {
 	u32 maxp, total_packet_count;
 
-	/* MTK xHCI is mostly 0.97 but contains some features from 1.0 */
+	/* MTK xHCI 0.96 contains some features from 1.0 */
 	if (xhci->hci_version < 0x100 && !(xhci->quirks & XHCI_MTK_HOST))
 		return ((td_total_len - transferred) >> 10);
 
@@ -3141,8 +3141,8 @@ static u32 xhci_td_remainder(struct xhci_hcd *xhci, int transferred,
 	    trb_buff_len == td_total_len)
 		return 0;
 
-	/* for MTK xHCI, TD size doesn't include this TRB */
-	if (xhci->quirks & XHCI_MTK_HOST)
+	/* for MTK xHCI 0.96, TD size include this TRB, but not in 1.x */
+	if ((xhci->quirks & XHCI_MTK_HOST) && (xhci->hci_version < 0x100))
 		trb_buff_len = 0;
 
 	maxp = GET_MAX_PACKET(usb_endpoint_maxp(&urb->ep->desc));
diff --git a/drivers/usb/musb/da8xx.c b/drivers/usb/musb/da8xx.c
index bacee0f..ea5bad4 100644
--- a/drivers/usb/musb/da8xx.c
+++ b/drivers/usb/musb/da8xx.c
@@ -302,7 +302,15 @@ static irqreturn_t da8xx_musb_interrupt(int irq, void *hci)
 			musb->xceiv->otg->state = OTG_STATE_A_WAIT_VRISE;
 			portstate(musb->port1_status |= USB_PORT_STAT_POWER);
 			del_timer(&otg_workaround);
-		} else {
+		} else if (!(musb->int_usb & MUSB_INTR_BABBLE)){
+			/*
+			 * When babble condition happens, drvvbus interrupt
+			 * is also generated. Ignore this drvvbus interrupt
+			 * and let babble interrupt handler recovers the
+			 * controller; otherwise, the host-mode flag is lost
+			 * due to the MUSB_DEV_MODE() call below and babble
+			 * recovery logic will not called.
+			 */
 			musb->is_active = 0;
 			MUSB_DEV_MODE(musb);
 			otg->default_a = 0;
diff --git a/drivers/usb/phy/phy-isp1301.c b/drivers/usb/phy/phy-isp1301.c
index db68156..b3b33cf 100644
--- a/drivers/usb/phy/phy-isp1301.c
+++ b/drivers/usb/phy/phy-isp1301.c
@@ -33,6 +33,12 @@ static const struct i2c_device_id isp1301_id[] = {
 };
 MODULE_DEVICE_TABLE(i2c, isp1301_id);
 
+static const struct of_device_id isp1301_of_match[] = {
+	{.compatible = "nxp,isp1301" },
+	{ },
+};
+MODULE_DEVICE_TABLE(of, isp1301_of_match);
+
 static struct i2c_client *isp1301_i2c_client;
 
 static int __isp1301_write(struct isp1301 *isp, u8 reg, u8 value, u8 clear)
@@ -130,6 +136,7 @@ static int isp1301_remove(struct i2c_client *client)
 static struct i2c_driver isp1301_driver = {
 	.driver = {
 		.name = DRV_NAME,
+		.of_match_table = of_match_ptr(isp1301_of_match),
 	},
 	.probe = isp1301_probe,
 	.remove = isp1301_remove,
diff --git a/drivers/usb/phy/phy-tahvo.c b/drivers/usb/phy/phy-tahvo.c
index ab5d364..335a1ef 100644
--- a/drivers/usb/phy/phy-tahvo.c
+++ b/drivers/usb/phy/phy-tahvo.c
@@ -368,7 +368,8 @@ static int tahvo_usb_probe(struct platform_device *pdev)
 	tu->extcon = devm_extcon_dev_allocate(&pdev->dev, tahvo_cable);
 	if (IS_ERR(tu->extcon)) {
 		dev_err(&pdev->dev, "failed to allocate memory for extcon\n");
-		return -ENOMEM;
+		ret = PTR_ERR(tu->extcon);
+		goto err_disable_clk;
 	}
 
 	ret = devm_extcon_dev_register(&pdev->dev, tu->extcon);
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index db3d34c..ffa8ec9 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -241,6 +241,7 @@ static void option_instat_callback(struct urb *urb);
 /* These Quectel products use Quectel's vendor ID */
 #define QUECTEL_PRODUCT_EC21			0x0121
 #define QUECTEL_PRODUCT_EC25			0x0125
+#define QUECTEL_PRODUCT_BG96			0x0296
 
 #define CMOTECH_VENDOR_ID			0x16d8
 #define CMOTECH_PRODUCT_6001			0x6001
@@ -1185,6 +1186,8 @@ static const struct usb_device_id option_ids[] = {
 	  .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
 	{ USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC25),
 	  .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+	{ USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_BG96),
+	  .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
 	{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) },
 	{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) },
 	{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6003),
diff --git a/drivers/usb/storage/uas-detect.h b/drivers/usb/storage/uas-detect.h
index a155cd0..ecc83c4 100644
--- a/drivers/usb/storage/uas-detect.h
+++ b/drivers/usb/storage/uas-detect.h
@@ -111,6 +111,10 @@ static int uas_use_uas_driver(struct usb_interface *intf,
 		}
 	}
 
+	/* All Seagate disk enclosures have broken ATA pass-through support */
+	if (le16_to_cpu(udev->descriptor.idVendor) == 0x0bc2)
+		flags |= US_FL_NO_ATA_1X;
+
 	usb_stor_adjust_quirks(udev, &flags);
 
 	if (flags & US_FL_IGNORE_UAS) {
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index 2572fd5..b605115 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -2113,6 +2113,13 @@ UNUSUAL_DEV(  0x152d, 0x0567, 0x0114, 0x0116,
 		USB_SC_DEVICE, USB_PR_DEVICE, NULL,
 		US_FL_BROKEN_FUA ),
 
+/* Reported by David Kozub <zub@linux.fjfi.cvut.cz> */
+UNUSUAL_DEV(0x152d, 0x0578, 0x0000, 0x9999,
+		"JMicron",
+		"JMS567",
+		USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+		US_FL_BROKEN_FUA),
+
 /*
  * Reported by Alexandre Oliva <oliva@lsd.ic.unicamp.br>
  * JMicron responds to USN and several other SCSI ioctls with a
diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h
index cde1153..9f356f7 100644
--- a/drivers/usb/storage/unusual_uas.h
+++ b/drivers/usb/storage/unusual_uas.h
@@ -142,6 +142,13 @@ UNUSUAL_DEV(0x152d, 0x0567, 0x0000, 0x9999,
 		USB_SC_DEVICE, USB_PR_DEVICE, NULL,
 		US_FL_BROKEN_FUA | US_FL_NO_REPORT_OPCODES),
 
+/* Reported-by: David Kozub <zub@linux.fjfi.cvut.cz> */
+UNUSUAL_DEV(0x152d, 0x0578, 0x0000, 0x9999,
+		"JMicron",
+		"JMS567",
+		USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+		US_FL_BROKEN_FUA),
+
 /* Reported-by: Hans de Goede <hdegoede@redhat.com> */
 UNUSUAL_DEV(0x2109, 0x0711, 0x0000, 0x9999,
 		"VIA",
diff --git a/drivers/usb/usbip/stub_rx.c b/drivers/usb/usbip/stub_rx.c
index 191b176..283a9be 100644
--- a/drivers/usb/usbip/stub_rx.c
+++ b/drivers/usb/usbip/stub_rx.c
@@ -336,23 +336,34 @@ static struct stub_priv *stub_priv_alloc(struct stub_device *sdev,
 	return priv;
 }
 
-static int get_pipe(struct stub_device *sdev, int epnum, int dir)
+static int get_pipe(struct stub_device *sdev, struct usbip_header *pdu)
 {
 	struct usb_device *udev = sdev->udev;
 	struct usb_host_endpoint *ep;
 	struct usb_endpoint_descriptor *epd = NULL;
+	int epnum = pdu->base.ep;
+	int dir = pdu->base.direction;
+
+	if (epnum < 0 || epnum > 15)
+		goto err_ret;
 
 	if (dir == USBIP_DIR_IN)
 		ep = udev->ep_in[epnum & 0x7f];
 	else
 		ep = udev->ep_out[epnum & 0x7f];
-	if (!ep) {
-		dev_err(&sdev->udev->dev, "no such endpoint?, %d\n",
-			epnum);
-		BUG();
-	}
+	if (!ep)
+		goto err_ret;
 
 	epd = &ep->desc;
+
+	/* validate transfer_buffer_length */
+	if (pdu->u.cmd_submit.transfer_buffer_length > INT_MAX) {
+		dev_err(&sdev->udev->dev,
+			"CMD_SUBMIT: -EMSGSIZE transfer_buffer_length %d\n",
+			pdu->u.cmd_submit.transfer_buffer_length);
+		return -1;
+	}
+
 	if (usb_endpoint_xfer_control(epd)) {
 		if (dir == USBIP_DIR_OUT)
 			return usb_sndctrlpipe(udev, epnum);
@@ -375,15 +386,31 @@ static int get_pipe(struct stub_device *sdev, int epnum, int dir)
 	}
 
 	if (usb_endpoint_xfer_isoc(epd)) {
+		/* validate packet size and number of packets */
+		unsigned int maxp, packets, bytes;
+
+		maxp = usb_endpoint_maxp(epd);
+		maxp *= usb_endpoint_maxp_mult(epd);
+		bytes = pdu->u.cmd_submit.transfer_buffer_length;
+		packets = DIV_ROUND_UP(bytes, maxp);
+
+		if (pdu->u.cmd_submit.number_of_packets < 0 ||
+		    pdu->u.cmd_submit.number_of_packets > packets) {
+			dev_err(&sdev->udev->dev,
+				"CMD_SUBMIT: isoc invalid num packets %d\n",
+				pdu->u.cmd_submit.number_of_packets);
+			return -1;
+		}
 		if (dir == USBIP_DIR_OUT)
 			return usb_sndisocpipe(udev, epnum);
 		else
 			return usb_rcvisocpipe(udev, epnum);
 	}
 
+err_ret:
 	/* NOT REACHED */
-	dev_err(&sdev->udev->dev, "get pipe, epnum %d\n", epnum);
-	return 0;
+	dev_err(&sdev->udev->dev, "CMD_SUBMIT: invalid epnum %d\n", epnum);
+	return -1;
 }
 
 static void masking_bogus_flags(struct urb *urb)
@@ -447,7 +474,10 @@ static void stub_recv_cmd_submit(struct stub_device *sdev,
 	struct stub_priv *priv;
 	struct usbip_device *ud = &sdev->ud;
 	struct usb_device *udev = sdev->udev;
-	int pipe = get_pipe(sdev, pdu->base.ep, pdu->base.direction);
+	int pipe = get_pipe(sdev, pdu);
+
+	if (pipe == -1)
+		return;
 
 	priv = stub_priv_alloc(sdev, pdu);
 	if (!priv)
@@ -466,7 +496,8 @@ static void stub_recv_cmd_submit(struct stub_device *sdev,
 	}
 
 	/* allocate urb transfer buffer, if needed */
-	if (pdu->u.cmd_submit.transfer_buffer_length > 0) {
+	if (pdu->u.cmd_submit.transfer_buffer_length > 0 &&
+	    pdu->u.cmd_submit.transfer_buffer_length <= INT_MAX) {
 		priv->urb->transfer_buffer =
 			kzalloc(pdu->u.cmd_submit.transfer_buffer_length,
 				GFP_KERNEL);
diff --git a/drivers/usb/usbip/stub_tx.c b/drivers/usb/usbip/stub_tx.c
index be50cef..87ff94b 100644
--- a/drivers/usb/usbip/stub_tx.c
+++ b/drivers/usb/usbip/stub_tx.c
@@ -181,6 +181,13 @@ static int stub_send_ret_submit(struct stub_device *sdev)
 		memset(&pdu_header, 0, sizeof(pdu_header));
 		memset(&msg, 0, sizeof(msg));
 
+		if (urb->actual_length > 0 && !urb->transfer_buffer) {
+			dev_err(&sdev->udev->dev,
+				"urb: actual_length %d transfer_buffer null\n",
+				urb->actual_length);
+			return -1;
+		}
+
 		if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS)
 			iovnum = 2 + urb->number_of_packets;
 		else
diff --git a/drivers/vfio/pci/vfio_pci_config.c b/drivers/vfio/pci/vfio_pci_config.c
index 65d4a30..9f1ec43 100644
--- a/drivers/vfio/pci/vfio_pci_config.c
+++ b/drivers/vfio/pci/vfio_pci_config.c
@@ -851,11 +851,13 @@ static int __init init_pci_cap_exp_perm(struct perm_bits *perm)
 
 	/*
 	 * Allow writes to device control fields, except devctl_phantom,
-	 * which could confuse IOMMU, and the ARI bit in devctl2, which
+	 * which could confuse IOMMU, MPS, which can break communication
+	 * with other physical devices, and the ARI bit in devctl2, which
 	 * is set at probe time.  FLR gets virtualized via our writefn.
 	 */
 	p_setw(perm, PCI_EXP_DEVCTL,
-	       PCI_EXP_DEVCTL_BCR_FLR, ~PCI_EXP_DEVCTL_PHANTOM);
+	       PCI_EXP_DEVCTL_BCR_FLR | PCI_EXP_DEVCTL_PAYLOAD,
+	       ~PCI_EXP_DEVCTL_PHANTOM);
 	p_setw(perm, PCI_EXP_DEVCTL2, NO_VIRT, ~PCI_EXP_DEVCTL2_ARI);
 	return 0;
 }
diff --git a/drivers/vfio/vfio_iommu_spapr_tce.c b/drivers/vfio/vfio_iommu_spapr_tce.c
index 85d3e64..59b3f62 100644
--- a/drivers/vfio/vfio_iommu_spapr_tce.c
+++ b/drivers/vfio/vfio_iommu_spapr_tce.c
@@ -1123,12 +1123,11 @@ static long tce_iommu_ioctl(void *iommu_data,
 		mutex_lock(&container->lock);
 
 		ret = tce_iommu_create_default_window(container);
-		if (ret)
-			return ret;
-
-		ret = tce_iommu_create_window(container, create.page_shift,
-				create.window_size, create.levels,
-				&create.start_addr);
+		if (!ret)
+			ret = tce_iommu_create_window(container,
+					create.page_shift,
+					create.window_size, create.levels,
+					&create.start_addr);
 
 		mutex_unlock(&container->lock);
 
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index 6e29d05..9e36632 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -693,6 +693,7 @@ vhost_scsi_iov_to_sgl(struct vhost_scsi_cmd *cmd, bool write,
 		      struct scatterlist *sg, int sg_count)
 {
 	size_t off = iter->iov_offset;
+	struct scatterlist *p = sg;
 	int i, ret;
 
 	for (i = 0; i < iter->nr_segs; i++) {
@@ -701,8 +702,8 @@ vhost_scsi_iov_to_sgl(struct vhost_scsi_cmd *cmd, bool write,
 
 		ret = vhost_scsi_map_to_sgl(cmd, base, len, sg, write);
 		if (ret < 0) {
-			for (i = 0; i < sg_count; i++) {
-				struct page *page = sg_page(&sg[i]);
+			while (p < sg) {
+				struct page *page = sg_page(p++);
 				if (page)
 					put_page(page);
 			}
diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c
index e3fad30..0ec970c 100644
--- a/drivers/vhost/vsock.c
+++ b/drivers/vhost/vsock.c
@@ -218,6 +218,46 @@ vhost_transport_send_pkt(struct virtio_vsock_pkt *pkt)
 	return len;
 }
 
+static int
+vhost_transport_cancel_pkt(struct vsock_sock *vsk)
+{
+	struct vhost_vsock *vsock;
+	struct virtio_vsock_pkt *pkt, *n;
+	int cnt = 0;
+	LIST_HEAD(freeme);
+
+	/* Find the vhost_vsock according to guest context id  */
+	vsock = vhost_vsock_get(vsk->remote_addr.svm_cid);
+	if (!vsock)
+		return -ENODEV;
+
+	spin_lock_bh(&vsock->send_pkt_list_lock);
+	list_for_each_entry_safe(pkt, n, &vsock->send_pkt_list, list) {
+		if (pkt->vsk != vsk)
+			continue;
+		list_move(&pkt->list, &freeme);
+	}
+	spin_unlock_bh(&vsock->send_pkt_list_lock);
+
+	list_for_each_entry_safe(pkt, n, &freeme, list) {
+		if (pkt->reply)
+			cnt++;
+		list_del(&pkt->list);
+		virtio_transport_free_pkt(pkt);
+	}
+
+	if (cnt) {
+		struct vhost_virtqueue *tx_vq = &vsock->vqs[VSOCK_VQ_TX];
+		int new_cnt;
+
+		new_cnt = atomic_sub_return(cnt, &vsock->queued_replies);
+		if (new_cnt + cnt >= tx_vq->num && new_cnt < tx_vq->num)
+			vhost_poll_queue(&tx_vq->poll);
+	}
+
+	return 0;
+}
+
 static struct virtio_vsock_pkt *
 vhost_vsock_alloc_pkt(struct vhost_virtqueue *vq,
 		      unsigned int out, unsigned int in)
@@ -669,6 +709,7 @@ static struct virtio_transport vhost_transport = {
 		.release                  = virtio_transport_release,
 		.connect                  = virtio_transport_connect,
 		.shutdown                 = virtio_transport_shutdown,
+		.cancel_pkt               = vhost_transport_cancel_pkt,
 
 		.dgram_enqueue            = virtio_transport_dgram_enqueue,
 		.dgram_dequeue            = virtio_transport_dgram_dequeue,
diff --git a/drivers/video/backlight/pwm_bl.c b/drivers/video/backlight/pwm_bl.c
index 1261400..d95ae09 100644
--- a/drivers/video/backlight/pwm_bl.c
+++ b/drivers/video/backlight/pwm_bl.c
@@ -79,14 +79,17 @@ static void pwm_backlight_power_off(struct pwm_bl_data *pb)
 static int compute_duty_cycle(struct pwm_bl_data *pb, int brightness)
 {
 	unsigned int lth = pb->lth_brightness;
-	int duty_cycle;
+	u64 duty_cycle;
 
 	if (pb->levels)
 		duty_cycle = pb->levels[brightness];
 	else
 		duty_cycle = brightness;
 
-	return (duty_cycle * (pb->period - lth) / pb->scale) + lth;
+	duty_cycle *= pb->period - lth;
+	do_div(duty_cycle, pb->scale);
+
+	return duty_cycle + lth;
 }
 
 static int pwm_backlight_update_status(struct backlight_device *bl)
diff --git a/drivers/video/fbdev/au1200fb.c b/drivers/video/fbdev/au1200fb.c
index 6c2b2ca..44c2be1 100644
--- a/drivers/video/fbdev/au1200fb.c
+++ b/drivers/video/fbdev/au1200fb.c
@@ -1681,8 +1681,10 @@ static int au1200fb_drv_probe(struct platform_device *dev)
 
 		fbi = framebuffer_alloc(sizeof(struct au1200fb_device),
 					&dev->dev);
-		if (!fbi)
+		if (!fbi) {
+			ret = -ENOMEM;
 			goto failed;
+		}
 
 		_au1200fb_infos[plane] = fbi;
 		fbdev = fbi->par;
@@ -1700,7 +1702,8 @@ static int au1200fb_drv_probe(struct platform_device *dev)
 		if (!fbdev->fb_mem) {
 			print_err("fail to allocate frambuffer (size: %dK))",
 				  fbdev->fb_len / 1024);
-			return -ENOMEM;
+			ret = -ENOMEM;
+			goto failed;
 		}
 
 		/*
diff --git a/drivers/video/fbdev/controlfb.h b/drivers/video/fbdev/controlfb.h
index 6026c60..261522f 100644
--- a/drivers/video/fbdev/controlfb.h
+++ b/drivers/video/fbdev/controlfb.h
@@ -141,5 +141,7 @@ static struct max_cmodes control_mac_modes[] = {
 	{{ 1, 2}},	/* 1152x870, 75Hz */
 	{{ 0, 1}},	/* 1280x960, 75Hz */
 	{{ 0, 1}},	/* 1280x1024, 75Hz */
+	{{ 1, 2}},	/* 1152x768, 60Hz */
+	{{ 0, 1}},	/* 1600x1024, 60Hz */
 };
 
diff --git a/drivers/video/fbdev/udlfb.c b/drivers/video/fbdev/udlfb.c
index e9c2f7b..53326ba 100644
--- a/drivers/video/fbdev/udlfb.c
+++ b/drivers/video/fbdev/udlfb.c
@@ -769,11 +769,11 @@ static int dlfb_get_edid(struct dlfb_data *dev, char *edid, int len)
 
 	for (i = 0; i < len; i++) {
 		ret = usb_control_msg(dev->udev,
-				    usb_rcvctrlpipe(dev->udev, 0), (0x02),
-				    (0x80 | (0x02 << 5)), i << 8, 0xA1, rbuf, 2,
-				    HZ);
-		if (ret < 1) {
-			pr_err("Read EDID byte %d failed err %x\n", i, ret);
+				      usb_rcvctrlpipe(dev->udev, 0), 0x02,
+				      (0x80 | (0x02 << 5)), i << 8, 0xA1,
+				      rbuf, 2, USB_CTRL_GET_TIMEOUT);
+		if (ret < 2) {
+			pr_err("Read EDID byte %d failed: %d\n", i, ret);
 			i--;
 			break;
 		}
diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c
index 7062bb0..462e183 100644
--- a/drivers/virtio/virtio.c
+++ b/drivers/virtio/virtio.c
@@ -323,6 +323,8 @@ int register_virtio_device(struct virtio_device *dev)
 	/* device_register() causes the bus infrastructure to look for a
 	 * matching driver. */
 	err = device_register(&dev->dev);
+	if (err)
+		ida_simple_remove(&virtio_index_ida, dev->index);
 out:
 	if (err)
 		add_status(dev, VIRTIO_CONFIG_S_FAILED);
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index 2c2e679..a7c08cc 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -241,11 +241,11 @@ static inline void update_stat(struct virtio_balloon *vb, int idx,
 
 #define pages_to_bytes(x) ((u64)(x) << PAGE_SHIFT)
 
-static void update_balloon_stats(struct virtio_balloon *vb)
+static unsigned int update_balloon_stats(struct virtio_balloon *vb)
 {
 	unsigned long events[NR_VM_EVENT_ITEMS];
 	struct sysinfo i;
-	int idx = 0;
+	unsigned int idx = 0;
 	long available;
 
 	all_vm_events(events);
@@ -253,18 +253,22 @@ static void update_balloon_stats(struct virtio_balloon *vb)
 
 	available = si_mem_available();
 
+#ifdef CONFIG_VM_EVENT_COUNTERS
 	update_stat(vb, idx++, VIRTIO_BALLOON_S_SWAP_IN,
 				pages_to_bytes(events[PSWPIN]));
 	update_stat(vb, idx++, VIRTIO_BALLOON_S_SWAP_OUT,
 				pages_to_bytes(events[PSWPOUT]));
 	update_stat(vb, idx++, VIRTIO_BALLOON_S_MAJFLT, events[PGMAJFAULT]);
 	update_stat(vb, idx++, VIRTIO_BALLOON_S_MINFLT, events[PGFAULT]);
+#endif
 	update_stat(vb, idx++, VIRTIO_BALLOON_S_MEMFREE,
 				pages_to_bytes(i.freeram));
 	update_stat(vb, idx++, VIRTIO_BALLOON_S_MEMTOT,
 				pages_to_bytes(i.totalram));
 	update_stat(vb, idx++, VIRTIO_BALLOON_S_AVAIL,
 				pages_to_bytes(available));
+
+	return idx;
 }
 
 /*
@@ -290,14 +294,14 @@ static void stats_handle_request(struct virtio_balloon *vb)
 {
 	struct virtqueue *vq;
 	struct scatterlist sg;
-	unsigned int len;
+	unsigned int len, num_stats;
 
-	update_balloon_stats(vb);
+	num_stats = update_balloon_stats(vb);
 
 	vq = vb->stats_vq;
 	if (!virtqueue_get_buf(vq, &len))
 		return;
-	sg_init_one(&sg, vb->stats, sizeof(vb->stats));
+	sg_init_one(&sg, vb->stats, sizeof(vb->stats[0]) * num_stats);
 	virtqueue_add_outbuf(vq, &sg, 1, vb, GFP_KERNEL);
 	virtqueue_kick(vq);
 }
@@ -421,15 +425,16 @@ static int init_vqs(struct virtio_balloon *vb)
 	vb->deflate_vq = vqs[1];
 	if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_STATS_VQ)) {
 		struct scatterlist sg;
+		unsigned int num_stats;
 		vb->stats_vq = vqs[2];
 
 		/*
 		 * Prime this virtqueue with one buffer so the hypervisor can
 		 * use it to signal us later (it can't be broken yet!).
 		 */
-		update_balloon_stats(vb);
+		num_stats = update_balloon_stats(vb);
 
-		sg_init_one(&sg, vb->stats, sizeof vb->stats);
+		sg_init_one(&sg, vb->stats, sizeof(vb->stats[0]) * num_stats);
 		if (virtqueue_add_outbuf(vb->stats_vq, &sg, 1, vb, GFP_KERNEL)
 		    < 0)
 			BUG();
diff --git a/drivers/xen/xenbus/xenbus_dev_frontend.c b/drivers/xen/xenbus/xenbus_dev_frontend.c
index 1e8be12..0a3c676 100644
--- a/drivers/xen/xenbus/xenbus_dev_frontend.c
+++ b/drivers/xen/xenbus/xenbus_dev_frontend.c
@@ -316,7 +316,7 @@ static int xenbus_write_transaction(unsigned msg_type,
 			rc = -ENOMEM;
 			goto out;
 		}
-	} else if (msg_type == XS_TRANSACTION_END) {
+	} else if (u->u.msg.tx_id != 0) {
 		list_for_each_entry(trans, &u->transactions, list)
 			if (trans->handle.id == u->u.msg.tx_id)
 				break;
diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
index 30ca770..f8ab4a6 100644
--- a/fs/9p/vfs_inode.c
+++ b/fs/9p/vfs_inode.c
@@ -483,6 +483,9 @@ static int v9fs_test_inode(struct inode *inode, void *data)
 
 	if (v9inode->qid.type != st->qid.type)
 		return 0;
+
+	if (v9inode->qid.path != st->qid.path)
+		return 0;
 	return 1;
 }
 
diff --git a/fs/9p/vfs_inode_dotl.c b/fs/9p/vfs_inode_dotl.c
index afaa4b6..c3dd0d4 100644
--- a/fs/9p/vfs_inode_dotl.c
+++ b/fs/9p/vfs_inode_dotl.c
@@ -87,6 +87,9 @@ static int v9fs_test_inode_dotl(struct inode *inode, void *data)
 
 	if (v9inode->qid.type != st->qid.type)
 		return 0;
+
+	if (v9inode->qid.path != st->qid.path)
+		return 0;
 	return 1;
 }
 
diff --git a/fs/afs/callback.c b/fs/afs/callback.c
index 1e9d2f8..1592dc6 100644
--- a/fs/afs/callback.c
+++ b/fs/afs/callback.c
@@ -362,7 +362,7 @@ static void afs_callback_updater(struct work_struct *work)
 {
 	struct afs_server *server;
 	struct afs_vnode *vnode, *xvnode;
-	time_t now;
+	time64_t now;
 	long timeout;
 	int ret;
 
@@ -370,7 +370,7 @@ static void afs_callback_updater(struct work_struct *work)
 
 	_enter("");
 
-	now = get_seconds();
+	now = ktime_get_real_seconds();
 
 	/* find the first vnode to update */
 	spin_lock(&server->cb_lock);
@@ -424,7 +424,8 @@ static void afs_callback_updater(struct work_struct *work)
 
 	/* and then reschedule */
 	_debug("reschedule");
-	vnode->update_at = get_seconds() + afs_vnode_update_timeout;
+	vnode->update_at = ktime_get_real_seconds() +
+			afs_vnode_update_timeout;
 
 	spin_lock(&server->cb_lock);
 
diff --git a/fs/afs/cmservice.c b/fs/afs/cmservice.c
index d764236..168f2a4 100644
--- a/fs/afs/cmservice.c
+++ b/fs/afs/cmservice.c
@@ -106,6 +106,9 @@ bool afs_cm_incoming_call(struct afs_call *call)
 	case CBProbe:
 		call->type = &afs_SRXCBProbe;
 		return true;
+	case CBProbeUuid:
+		call->type = &afs_SRXCBProbeUuid;
+		return true;
 	case CBTellMeAboutYourself:
 		call->type = &afs_SRXCBTellMeAboutYourself;
 		return true;
@@ -165,7 +168,6 @@ static int afs_deliver_cb_callback(struct afs_call *call)
 	struct afs_callback *cb;
 	struct afs_server *server;
 	__be32 *bp;
-	u32 tmp;
 	int ret, loop;
 
 	_enter("{%u}", call->unmarshall);
@@ -227,9 +229,9 @@ static int afs_deliver_cb_callback(struct afs_call *call)
 		if (ret < 0)
 			return ret;
 
-		tmp = ntohl(call->tmp);
-		_debug("CB count: %u", tmp);
-		if (tmp != call->count && tmp != 0)
+		call->count2 = ntohl(call->tmp);
+		_debug("CB count: %u", call->count2);
+		if (call->count2 != call->count && call->count2 != 0)
 			return -EBADMSG;
 		call->offset = 0;
 		call->unmarshall++;
@@ -237,14 +239,14 @@ static int afs_deliver_cb_callback(struct afs_call *call)
 	case 4:
 		_debug("extract CB array");
 		ret = afs_extract_data(call, call->buffer,
-				       call->count * 3 * 4, false);
+				       call->count2 * 3 * 4, false);
 		if (ret < 0)
 			return ret;
 
 		_debug("unmarshall CB array");
 		cb = call->request;
 		bp = call->buffer;
-		for (loop = call->count; loop > 0; loop--, cb++) {
+		for (loop = call->count2; loop > 0; loop--, cb++) {
 			cb->version	= ntohl(*bp++);
 			cb->expiry	= ntohl(*bp++);
 			cb->type	= ntohl(*bp++);
diff --git a/fs/afs/file.c b/fs/afs/file.c
index 6344aee..7237297 100644
--- a/fs/afs/file.c
+++ b/fs/afs/file.c
@@ -29,6 +29,7 @@ static int afs_readpages(struct file *filp, struct address_space *mapping,
 
 const struct file_operations afs_file_operations = {
 	.open		= afs_open,
+	.flush		= afs_flush,
 	.release	= afs_release,
 	.llseek		= generic_file_llseek,
 	.read_iter	= generic_file_read_iter,
diff --git a/fs/afs/fsclient.c b/fs/afs/fsclient.c
index 31c616a..88e4406 100644
--- a/fs/afs/fsclient.c
+++ b/fs/afs/fsclient.c
@@ -105,7 +105,7 @@ static void xdr_decode_AFSFetchStatus(const __be32 **_bp,
 			vnode->vfs_inode.i_mode = mode;
 		}
 
-		vnode->vfs_inode.i_ctime.tv_sec	= status->mtime_server;
+		vnode->vfs_inode.i_ctime.tv_sec	= status->mtime_client;
 		vnode->vfs_inode.i_mtime	= vnode->vfs_inode.i_ctime;
 		vnode->vfs_inode.i_atime	= vnode->vfs_inode.i_ctime;
 		vnode->vfs_inode.i_version	= data_version;
@@ -139,7 +139,7 @@ static void xdr_decode_AFSCallBack(const __be32 **_bp, struct afs_vnode *vnode)
 	vnode->cb_version	= ntohl(*bp++);
 	vnode->cb_expiry	= ntohl(*bp++);
 	vnode->cb_type		= ntohl(*bp++);
-	vnode->cb_expires	= vnode->cb_expiry + get_seconds();
+	vnode->cb_expires	= vnode->cb_expiry + ktime_get_real_seconds();
 	*_bp = bp;
 }
 
@@ -676,8 +676,8 @@ int afs_fs_create(struct afs_server *server,
 		memset(bp, 0, padsz);
 		bp = (void *) bp + padsz;
 	}
-	*bp++ = htonl(AFS_SET_MODE);
-	*bp++ = 0; /* mtime */
+	*bp++ = htonl(AFS_SET_MODE | AFS_SET_MTIME);
+	*bp++ = htonl(vnode->vfs_inode.i_mtime.tv_sec); /* mtime */
 	*bp++ = 0; /* owner */
 	*bp++ = 0; /* group */
 	*bp++ = htonl(mode & S_IALLUGO); /* unix mode */
@@ -945,8 +945,8 @@ int afs_fs_symlink(struct afs_server *server,
 		memset(bp, 0, c_padsz);
 		bp = (void *) bp + c_padsz;
 	}
-	*bp++ = htonl(AFS_SET_MODE);
-	*bp++ = 0; /* mtime */
+	*bp++ = htonl(AFS_SET_MODE | AFS_SET_MTIME);
+	*bp++ = htonl(vnode->vfs_inode.i_mtime.tv_sec); /* mtime */
 	*bp++ = 0; /* owner */
 	*bp++ = 0; /* group */
 	*bp++ = htonl(S_IRWXUGO); /* unix mode */
@@ -1145,8 +1145,8 @@ static int afs_fs_store_data64(struct afs_server *server,
 	*bp++ = htonl(vnode->fid.vnode);
 	*bp++ = htonl(vnode->fid.unique);
 
-	*bp++ = 0; /* mask */
-	*bp++ = 0; /* mtime */
+	*bp++ = htonl(AFS_SET_MTIME); /* mask */
+	*bp++ = htonl(vnode->vfs_inode.i_mtime.tv_sec); /* mtime */
 	*bp++ = 0; /* owner */
 	*bp++ = 0; /* group */
 	*bp++ = 0; /* unix mode */
@@ -1178,7 +1178,7 @@ int afs_fs_store_data(struct afs_server *server, struct afs_writeback *wb,
 	_enter(",%x,{%x:%u},,",
 	       key_serial(wb->key), vnode->fid.vid, vnode->fid.vnode);
 
-	size = to - offset;
+	size = (loff_t)to - (loff_t)offset;
 	if (first != last)
 		size += (loff_t)(last - first) << PAGE_SHIFT;
 	pos = (loff_t)first << PAGE_SHIFT;
@@ -1222,8 +1222,8 @@ int afs_fs_store_data(struct afs_server *server, struct afs_writeback *wb,
 	*bp++ = htonl(vnode->fid.vnode);
 	*bp++ = htonl(vnode->fid.unique);
 
-	*bp++ = 0; /* mask */
-	*bp++ = 0; /* mtime */
+	*bp++ = htonl(AFS_SET_MTIME); /* mask */
+	*bp++ = htonl(vnode->vfs_inode.i_mtime.tv_sec); /* mtime */
 	*bp++ = 0; /* owner */
 	*bp++ = 0; /* group */
 	*bp++ = 0; /* unix mode */
diff --git a/fs/afs/inode.c b/fs/afs/inode.c
index 86cc726..42582e4 100644
--- a/fs/afs/inode.c
+++ b/fs/afs/inode.c
@@ -70,9 +70,9 @@ static int afs_inode_map_status(struct afs_vnode *vnode, struct key *key)
 
 	set_nlink(inode, vnode->status.nlink);
 	inode->i_uid		= vnode->status.owner;
-	inode->i_gid		= GLOBAL_ROOT_GID;
+	inode->i_gid            = vnode->status.group;
 	inode->i_size		= vnode->status.size;
-	inode->i_ctime.tv_sec	= vnode->status.mtime_server;
+	inode->i_ctime.tv_sec	= vnode->status.mtime_client;
 	inode->i_ctime.tv_nsec	= 0;
 	inode->i_atime		= inode->i_mtime = inode->i_ctime;
 	inode->i_blocks		= 0;
@@ -245,12 +245,13 @@ struct inode *afs_iget(struct super_block *sb, struct key *key,
 			vnode->cb_version = 0;
 			vnode->cb_expiry = 0;
 			vnode->cb_type = 0;
-			vnode->cb_expires = get_seconds();
+			vnode->cb_expires = ktime_get_real_seconds();
 		} else {
 			vnode->cb_version = cb->version;
 			vnode->cb_expiry = cb->expiry;
 			vnode->cb_type = cb->type;
-			vnode->cb_expires = vnode->cb_expiry + get_seconds();
+			vnode->cb_expires = vnode->cb_expiry +
+				ktime_get_real_seconds();
 		}
 	}
 
@@ -323,7 +324,7 @@ int afs_validate(struct afs_vnode *vnode, struct key *key)
 	    !test_bit(AFS_VNODE_CB_BROKEN, &vnode->flags) &&
 	    !test_bit(AFS_VNODE_MODIFIED, &vnode->flags) &&
 	    !test_bit(AFS_VNODE_ZAP_DATA, &vnode->flags)) {
-		if (vnode->cb_expires < get_seconds() + 10) {
+		if (vnode->cb_expires < ktime_get_real_seconds() + 10) {
 			_debug("callback expired");
 			set_bit(AFS_VNODE_CB_BROKEN, &vnode->flags);
 		} else {
diff --git a/fs/afs/internal.h b/fs/afs/internal.h
index 535a38d..dd98dcd 100644
--- a/fs/afs/internal.h
+++ b/fs/afs/internal.h
@@ -11,6 +11,7 @@
 
 #include <linux/compiler.h>
 #include <linux/kernel.h>
+#include <linux/ktime.h>
 #include <linux/fs.h>
 #include <linux/pagemap.h>
 #include <linux/rxrpc.h>
@@ -105,7 +106,10 @@ struct afs_call {
 	unsigned		request_size;	/* size of request data */
 	unsigned		reply_max;	/* maximum size of reply */
 	unsigned		first_offset;	/* offset into mapping[first] */
-	unsigned		last_to;	/* amount of mapping[last] */
+	union {
+		unsigned	last_to;	/* amount of mapping[last] */
+		unsigned	count2;		/* count used in unmarshalling */
+	};
 	unsigned char		unmarshall;	/* unmarshalling phase */
 	bool			incoming;	/* T if incoming call */
 	bool			send_pages;	/* T if data from mapping should be sent */
@@ -242,7 +246,7 @@ struct afs_cache_vhash {
  */
 struct afs_vlocation {
 	atomic_t		usage;
-	time_t			time_of_death;	/* time at which put reduced usage to 0 */
+	time64_t		time_of_death;	/* time at which put reduced usage to 0 */
 	struct list_head	link;		/* link in cell volume location list */
 	struct list_head	grave;		/* link in master graveyard list */
 	struct list_head	update;		/* link in master update list */
@@ -253,7 +257,7 @@ struct afs_vlocation {
 	struct afs_cache_vlocation vldb;	/* volume information DB record */
 	struct afs_volume	*vols[3];	/* volume access record pointer (index by type) */
 	wait_queue_head_t	waitq;		/* status change waitqueue */
-	time_t			update_at;	/* time at which record should be updated */
+	time64_t		update_at;	/* time at which record should be updated */
 	spinlock_t		lock;		/* access lock */
 	afs_vlocation_state_t	state;		/* volume location state */
 	unsigned short		upd_rej_cnt;	/* ENOMEDIUM count during update */
@@ -266,7 +270,7 @@ struct afs_vlocation {
  */
 struct afs_server {
 	atomic_t		usage;
-	time_t			time_of_death;	/* time at which put reduced usage to 0 */
+	time64_t		time_of_death;	/* time at which put reduced usage to 0 */
 	struct in_addr		addr;		/* server address */
 	struct afs_cell		*cell;		/* cell in which server resides */
 	struct list_head	link;		/* link in cell's server list */
@@ -369,8 +373,8 @@ struct afs_vnode {
 	struct rb_node		server_rb;	/* link in server->fs_vnodes */
 	struct rb_node		cb_promise;	/* link in server->cb_promises */
 	struct work_struct	cb_broken_work;	/* work to be done on callback break */
-	time_t			cb_expires;	/* time at which callback expires */
-	time_t			cb_expires_at;	/* time used to order cb_promise */
+	time64_t		cb_expires;	/* time at which callback expires */
+	time64_t		cb_expires_at;	/* time used to order cb_promise */
 	unsigned		cb_version;	/* callback version */
 	unsigned		cb_expiry;	/* callback expiry time */
 	afs_callback_type_t	cb_type;	/* type of callback */
@@ -749,6 +753,7 @@ extern int afs_writepages(struct address_space *, struct writeback_control *);
 extern void afs_pages_written_back(struct afs_vnode *, struct afs_call *);
 extern ssize_t afs_file_write(struct kiocb *, struct iov_iter *);
 extern int afs_writeback_all(struct afs_vnode *);
+extern int afs_flush(struct file *, fl_owner_t);
 extern int afs_fsync(struct file *, loff_t, loff_t, int);
 
 
diff --git a/fs/afs/misc.c b/fs/afs/misc.c
index 91ea1aa..100b207 100644
--- a/fs/afs/misc.c
+++ b/fs/afs/misc.c
@@ -84,6 +84,8 @@ int afs_abort_to_error(u32 abort_code)
 	case RXKADDATALEN:	return -EKEYREJECTED;
 	case RXKADILLEGALLEVEL:	return -EKEYREJECTED;
 
+	case RXGEN_OPCODE:	return -ENOTSUPP;
+
 	default:		return -EREMOTEIO;
 	}
 }
diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c
index 25f05a8..523b1d3 100644
--- a/fs/afs/rxrpc.c
+++ b/fs/afs/rxrpc.c
@@ -321,6 +321,8 @@ int afs_make_call(struct in_addr *addr, struct afs_call *call, gfp_t gfp,
 	struct rxrpc_call *rxcall;
 	struct msghdr msg;
 	struct kvec iov[1];
+	size_t offset;
+	u32 abort_code;
 	int ret;
 
 	_enter("%x,{%d},", addr->s_addr, ntohs(call->port));
@@ -368,9 +370,11 @@ int afs_make_call(struct in_addr *addr, struct afs_call *call, gfp_t gfp,
 	msg.msg_controllen	= 0;
 	msg.msg_flags		= (call->send_pages ? MSG_MORE : 0);
 
-	/* have to change the state *before* sending the last packet as RxRPC
-	 * might give us the reply before it returns from sending the
-	 * request */
+	/* We have to change the state *before* sending the last packet as
+	 * rxrpc might give us the reply before it returns from sending the
+	 * request.  Further, if the send fails, we may already have been given
+	 * a notification and may have collected it.
+	 */
 	if (!call->send_pages)
 		call->state = AFS_CALL_AWAIT_REPLY;
 	ret = rxrpc_kernel_send_data(afs_socket, rxcall,
@@ -389,7 +393,17 @@ int afs_make_call(struct in_addr *addr, struct afs_call *call, gfp_t gfp,
 	return wait_mode->wait(call);
 
 error_do_abort:
-	rxrpc_kernel_abort_call(afs_socket, rxcall, RX_USER_ABORT, -ret, "KSD");
+	call->state = AFS_CALL_COMPLETE;
+	if (ret != -ECONNABORTED) {
+		rxrpc_kernel_abort_call(afs_socket, rxcall, RX_USER_ABORT,
+					-ret, "KSD");
+	} else {
+		abort_code = 0;
+		offset = 0;
+		rxrpc_kernel_recv_data(afs_socket, rxcall, NULL, 0, &offset,
+				       false, &abort_code);
+		ret = call->type->abort_to_error(abort_code);
+	}
 error_kill_call:
 	afs_end_call(call);
 	_leave(" = %d", ret);
@@ -434,16 +448,18 @@ static void afs_deliver_to_call(struct afs_call *call)
 		case -EINPROGRESS:
 		case -EAGAIN:
 			goto out;
+		case -ECONNABORTED:
+			goto call_complete;
 		case -ENOTCONN:
 			abort_code = RX_CALL_DEAD;
 			rxrpc_kernel_abort_call(afs_socket, call->rxcall,
 						abort_code, -ret, "KNC");
-			goto do_abort;
+			goto save_error;
 		case -ENOTSUPP:
-			abort_code = RX_INVALID_OPERATION;
+			abort_code = RXGEN_OPCODE;
 			rxrpc_kernel_abort_call(afs_socket, call->rxcall,
 						abort_code, -ret, "KIV");
-			goto do_abort;
+			goto save_error;
 		case -ENODATA:
 		case -EBADMSG:
 		case -EMSGSIZE:
@@ -453,7 +469,7 @@ static void afs_deliver_to_call(struct afs_call *call)
 				abort_code = RXGEN_SS_UNMARSHAL;
 			rxrpc_kernel_abort_call(afs_socket, call->rxcall,
 						abort_code, EBADMSG, "KUM");
-			goto do_abort;
+			goto save_error;
 		}
 	}
 
@@ -464,8 +480,9 @@ static void afs_deliver_to_call(struct afs_call *call)
 	_leave("");
 	return;
 
-do_abort:
+save_error:
 	call->error = ret;
+call_complete:
 	call->state = AFS_CALL_COMPLETE;
 	goto done;
 }
@@ -475,7 +492,6 @@ static void afs_deliver_to_call(struct afs_call *call)
  */
 static int afs_wait_for_call_to_complete(struct afs_call *call)
 {
-	const char *abort_why;
 	int ret;
 
 	DECLARE_WAITQUEUE(myself, current);
@@ -494,13 +510,8 @@ static int afs_wait_for_call_to_complete(struct afs_call *call)
 			continue;
 		}
 
-		abort_why = "KWC";
-		ret = call->error;
-		if (call->state == AFS_CALL_COMPLETE)
-			break;
-		abort_why = "KWI";
-		ret = -EINTR;
-		if (signal_pending(current))
+		if (call->state == AFS_CALL_COMPLETE ||
+		    signal_pending(current))
 			break;
 		schedule();
 	}
@@ -508,13 +519,14 @@ static int afs_wait_for_call_to_complete(struct afs_call *call)
 	remove_wait_queue(&call->waitq, &myself);
 	__set_current_state(TASK_RUNNING);
 
-	/* kill the call */
+	/* Kill off the call if it's still live. */
 	if (call->state < AFS_CALL_COMPLETE) {
-		_debug("call incomplete");
+		_debug("call interrupted");
 		rxrpc_kernel_abort_call(afs_socket, call->rxcall,
-					RX_CALL_DEAD, -ret, abort_why);
+					RX_USER_ABORT, -EINTR, "KWI");
 	}
 
+	ret = call->error;
 	_debug("call complete");
 	afs_end_call(call);
 	_leave(" = %d", ret);
diff --git a/fs/afs/security.c b/fs/afs/security.c
index 8d01042..bfa9d34 100644
--- a/fs/afs/security.c
+++ b/fs/afs/security.c
@@ -340,17 +340,22 @@ int afs_permission(struct inode *inode, int mask)
 	} else {
 		if (!(access & AFS_ACE_LOOKUP))
 			goto permission_denied;
+		if ((mask & MAY_EXEC) && !(inode->i_mode & S_IXUSR))
+			goto permission_denied;
 		if (mask & (MAY_EXEC | MAY_READ)) {
 			if (!(access & AFS_ACE_READ))
 				goto permission_denied;
+			if (!(inode->i_mode & S_IRUSR))
+				goto permission_denied;
 		} else if (mask & MAY_WRITE) {
 			if (!(access & AFS_ACE_WRITE))
 				goto permission_denied;
+			if (!(inode->i_mode & S_IWUSR))
+				goto permission_denied;
 		}
 	}
 
 	key_put(key);
-	ret = generic_permission(inode, mask);
 	_leave(" = %d", ret);
 	return ret;
 
diff --git a/fs/afs/server.c b/fs/afs/server.c
index d4066ab..c001b1f 100644
--- a/fs/afs/server.c
+++ b/fs/afs/server.c
@@ -242,7 +242,7 @@ void afs_put_server(struct afs_server *server)
 	spin_lock(&afs_server_graveyard_lock);
 	if (atomic_read(&server->usage) == 0) {
 		list_move_tail(&server->grave, &afs_server_graveyard);
-		server->time_of_death = get_seconds();
+		server->time_of_death = ktime_get_real_seconds();
 		queue_delayed_work(afs_wq, &afs_server_reaper,
 				   afs_server_timeout * HZ);
 	}
@@ -277,9 +277,9 @@ static void afs_reap_server(struct work_struct *work)
 	LIST_HEAD(corpses);
 	struct afs_server *server;
 	unsigned long delay, expiry;
-	time_t now;
+	time64_t now;
 
-	now = get_seconds();
+	now = ktime_get_real_seconds();
 	spin_lock(&afs_server_graveyard_lock);
 
 	while (!list_empty(&afs_server_graveyard)) {
diff --git a/fs/afs/vlocation.c b/fs/afs/vlocation.c
index 45a8639..92bd555 100644
--- a/fs/afs/vlocation.c
+++ b/fs/afs/vlocation.c
@@ -340,7 +340,8 @@ static void afs_vlocation_queue_for_updates(struct afs_vlocation *vl)
 	struct afs_vlocation *xvl;
 
 	/* wait at least 10 minutes before updating... */
-	vl->update_at = get_seconds() + afs_vlocation_update_timeout;
+	vl->update_at = ktime_get_real_seconds() +
+			afs_vlocation_update_timeout;
 
 	spin_lock(&afs_vlocation_updates_lock);
 
@@ -506,7 +507,7 @@ void afs_put_vlocation(struct afs_vlocation *vl)
 	if (atomic_read(&vl->usage) == 0) {
 		_debug("buried");
 		list_move_tail(&vl->grave, &afs_vlocation_graveyard);
-		vl->time_of_death = get_seconds();
+		vl->time_of_death = ktime_get_real_seconds();
 		queue_delayed_work(afs_wq, &afs_vlocation_reap,
 				   afs_vlocation_timeout * HZ);
 
@@ -543,11 +544,11 @@ static void afs_vlocation_reaper(struct work_struct *work)
 	LIST_HEAD(corpses);
 	struct afs_vlocation *vl;
 	unsigned long delay, expiry;
-	time_t now;
+	time64_t now;
 
 	_enter("");
 
-	now = get_seconds();
+	now = ktime_get_real_seconds();
 	spin_lock(&afs_vlocation_graveyard_lock);
 
 	while (!list_empty(&afs_vlocation_graveyard)) {
@@ -622,13 +623,13 @@ static void afs_vlocation_updater(struct work_struct *work)
 {
 	struct afs_cache_vlocation vldb;
 	struct afs_vlocation *vl, *xvl;
-	time_t now;
+	time64_t now;
 	long timeout;
 	int ret;
 
 	_enter("");
 
-	now = get_seconds();
+	now = ktime_get_real_seconds();
 
 	/* find a record to update */
 	spin_lock(&afs_vlocation_updates_lock);
@@ -684,7 +685,8 @@ static void afs_vlocation_updater(struct work_struct *work)
 
 	/* and then reschedule */
 	_debug("reschedule");
-	vl->update_at = get_seconds() + afs_vlocation_update_timeout;
+	vl->update_at = ktime_get_real_seconds() +
+			afs_vlocation_update_timeout;
 
 	spin_lock(&afs_vlocation_updates_lock);
 
diff --git a/fs/afs/write.c b/fs/afs/write.c
index f865c3f..3fba2b5 100644
--- a/fs/afs/write.c
+++ b/fs/afs/write.c
@@ -148,12 +148,12 @@ int afs_write_begin(struct file *file, struct address_space *mapping,
 		kfree(candidate);
 		return -ENOMEM;
 	}
-	*pagep = page;
-	/* page won't leak in error case: it eventually gets cleaned off LRU */
 
 	if (!PageUptodate(page) && len != PAGE_SIZE) {
 		ret = afs_fill_page(vnode, key, index << PAGE_SHIFT, page);
 		if (ret < 0) {
+			unlock_page(page);
+			put_page(page);
 			kfree(candidate);
 			_leave(" = %d [prep]", ret);
 			return ret;
@@ -161,6 +161,9 @@ int afs_write_begin(struct file *file, struct address_space *mapping,
 		SetPageUptodate(page);
 	}
 
+	/* page won't leak in error case: it eventually gets cleaned off LRU */
+	*pagep = page;
+
 try_again:
 	spin_lock(&vnode->writeback_lock);
 
@@ -296,10 +299,14 @@ static void afs_kill_pages(struct afs_vnode *vnode, bool error,
 		ASSERTCMP(pv.nr, ==, count);
 
 		for (loop = 0; loop < count; loop++) {
-			ClearPageUptodate(pv.pages[loop]);
+			struct page *page = pv.pages[loop];
+			ClearPageUptodate(page);
 			if (error)
-				SetPageError(pv.pages[loop]);
-			end_page_writeback(pv.pages[loop]);
+				SetPageError(page);
+			if (PageWriteback(page))
+				end_page_writeback(page);
+			if (page->index >= first)
+				first = page->index + 1;
 		}
 
 		__pagevec_release(&pv);
@@ -502,6 +509,7 @@ static int afs_writepages_region(struct address_space *mapping,
 
 		if (PageWriteback(page) || !PageDirty(page)) {
 			unlock_page(page);
+			put_page(page);
 			continue;
 		}
 
@@ -735,6 +743,20 @@ int afs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
 }
 
 /*
+ * Flush out all outstanding writes on a file opened for writing when it is
+ * closed.
+ */
+int afs_flush(struct file *file, fl_owner_t id)
+{
+	_enter("");
+
+	if ((file->f_mode & FMODE_WRITE) == 0)
+		return 0;
+
+	return vfs_fsync(file, 0);
+}
+
+/*
  * notification that a previously read-only page is about to become writable
  * - if it returns an error, the caller will deliver a bus error signal
  */
diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
index 5db6c8d..0ea31a5 100644
--- a/fs/autofs4/waitq.c
+++ b/fs/autofs4/waitq.c
@@ -87,7 +87,8 @@ static int autofs4_write(struct autofs_sb_info *sbi,
 		spin_unlock_irqrestore(&current->sighand->siglock, flags);
 	}
 
-	return (bytes > 0);
+	/* if 'wr' returned 0 (impossible) we assume -EIO (safe) */
+	return bytes == 0 ? 0 : wr < 0 ? wr : -EIO;
 }
 
 static void autofs4_notify_daemon(struct autofs_sb_info *sbi,
@@ -101,6 +102,7 @@ static void autofs4_notify_daemon(struct autofs_sb_info *sbi,
 	} pkt;
 	struct file *pipe = NULL;
 	size_t pktsz;
+	int ret;
 
 	pr_debug("wait id = 0x%08lx, name = %.*s, type=%d\n",
 		 (unsigned long) wq->wait_queue_token,
@@ -174,8 +176,18 @@ static void autofs4_notify_daemon(struct autofs_sb_info *sbi,
 
 	mutex_unlock(&sbi->wq_mutex);
 
-	if (autofs4_write(sbi, pipe, &pkt, pktsz))
+	switch (ret = autofs4_write(sbi, pipe, &pkt, pktsz)) {
+	case 0:
+		break;
+	case -ENOMEM:
+	case -ERESTARTSYS:
+		/* Just fail this one */
+		autofs4_wait_release(sbi, wq->wait_queue_token, ret);
+		break;
+	default:
 		autofs4_catatonic_mode(sbi);
+		break;
+	}
 	fput(pipe);
 }
 
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 705bb5f..a29730c 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -3397,13 +3397,6 @@ static int cache_save_setup(struct btrfs_block_group_cache *block_group,
 		goto again;
 	}
 
-	/* We've already setup this transaction, go ahead and exit */
-	if (block_group->cache_generation == trans->transid &&
-	    i_size_read(inode)) {
-		dcs = BTRFS_DC_SETUP;
-		goto out_put;
-	}
-
 	/*
 	 * We want to set the generation to 0, that way if anything goes wrong
 	 * from here on out we know not to trust this cache when we load up next
@@ -3427,6 +3420,13 @@ static int cache_save_setup(struct btrfs_block_group_cache *block_group,
 	}
 	WARN_ON(ret);
 
+	/* We've already setup this transaction, go ahead and exit */
+	if (block_group->cache_generation == trans->transid &&
+	    i_size_read(inode)) {
+		dcs = BTRFS_DC_SETUP;
+		goto out_put;
+	}
+
 	if (i_size_read(inode) > 0) {
 		ret = btrfs_check_trunc_cache_free_space(root,
 					&root->fs_info->global_block_rsv);
@@ -9362,6 +9362,7 @@ int btrfs_drop_snapshot(struct btrfs_root *root,
 	ret = btrfs_del_root(trans, tree_root, &root->root_key);
 	if (ret) {
 		btrfs_abort_transaction(trans, ret);
+		err = ret;
 		goto out_end_trans;
 	}
 
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index f089d7d..894d563 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -6812,6 +6812,20 @@ static noinline int uncompress_inline(struct btrfs_path *path,
 	max_size = min_t(unsigned long, PAGE_SIZE, max_size);
 	ret = btrfs_decompress(compress_type, tmp, page,
 			       extent_offset, inline_size, max_size);
+
+	/*
+	 * decompression code contains a memset to fill in any space between the end
+	 * of the uncompressed data and the end of max_size in case the decompressed
+	 * data ends up shorter than ram_bytes.  That doesn't cover the hole between
+	 * the end of an inline extent and the beginning of the next block, so we
+	 * cover that region here.
+	 */
+
+	if (max_size + pg_offset < PAGE_SIZE) {
+		char *map = kmap(page);
+		memset(map + pg_offset + max_size, 0, PAGE_SIZE - max_size - pg_offset);
+		kunmap(page);
+	}
 	kfree(tmp);
 	return ret;
 }
diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
index 77f9efc..9a47b55 100644
--- a/fs/btrfs/send.c
+++ b/fs/btrfs/send.c
@@ -6196,8 +6196,13 @@ long btrfs_ioctl_send(struct file *mnt_file, void __user *arg_)
 		goto out;
 	}
 
+	/*
+	 * Check that we don't overflow at later allocations, we request
+	 * clone_sources_count + 1 items, and compare to unsigned long inside
+	 * access_ok.
+	 */
 	if (arg->clone_sources_count >
-	    ULLONG_MAX / sizeof(*arg->clone_sources)) {
+	    ULONG_MAX / sizeof(struct clone_root) - 1) {
 		ret = -EINVAL;
 		goto out;
 	}
diff --git a/fs/btrfs/tests/free-space-tree-tests.c b/fs/btrfs/tests/free-space-tree-tests.c
index 6e14404..a724d9a 100644
--- a/fs/btrfs/tests/free-space-tree-tests.c
+++ b/fs/btrfs/tests/free-space-tree-tests.c
@@ -501,7 +501,8 @@ static int run_test(test_func_t test_func, int bitmaps, u32 sectorsize,
 	path = btrfs_alloc_path();
 	if (!path) {
 		test_msg("Couldn't allocate path\n");
-		return -ENOMEM;
+		ret = -ENOMEM;
+		goto out;
 	}
 
 	ret = add_block_group_free_space(&trans, root->fs_info, cache);
diff --git a/fs/btrfs/uuid-tree.c b/fs/btrfs/uuid-tree.c
index 7fc89e4..83bb2f2 100644
--- a/fs/btrfs/uuid-tree.c
+++ b/fs/btrfs/uuid-tree.c
@@ -351,7 +351,5 @@ int btrfs_uuid_tree_iterate(struct btrfs_fs_info *fs_info,
 
 out:
 	btrfs_free_path(path);
-	if (ret)
-		btrfs_warn(fs_info, "btrfs_uuid_tree_iterate failed %d", ret);
-	return 0;
+	return ret;
 }
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index c0f52c4..3d2639c 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -1396,6 +1396,29 @@ static int __close_session(struct ceph_mds_client *mdsc,
 	return request_close_session(mdsc, session);
 }
 
+static bool drop_negative_children(struct dentry *dentry)
+{
+	struct dentry *child;
+	bool all_negative = true;
+
+	if (!d_is_dir(dentry))
+		goto out;
+
+	spin_lock(&dentry->d_lock);
+	list_for_each_entry(child, &dentry->d_subdirs, d_child) {
+		if (d_really_is_positive(child)) {
+			all_negative = false;
+			break;
+		}
+	}
+	spin_unlock(&dentry->d_lock);
+
+	if (all_negative)
+		shrink_dcache_parent(dentry);
+out:
+	return all_negative;
+}
+
 /*
  * Trim old(er) caps.
  *
@@ -1441,16 +1464,27 @@ static int trim_caps_cb(struct inode *inode, struct ceph_cap *cap, void *arg)
 	if ((used | wanted) & ~oissued & mine)
 		goto out;   /* we need these caps */
 
-	session->s_trim_caps--;
 	if (oissued) {
 		/* we aren't the only cap.. just remove us */
 		__ceph_remove_cap(cap, true);
+		session->s_trim_caps--;
 	} else {
+		struct dentry *dentry;
 		/* try dropping referring dentries */
 		spin_unlock(&ci->i_ceph_lock);
-		d_prune_aliases(inode);
-		dout("trim_caps_cb %p cap %p  pruned, count now %d\n",
-		     inode, cap, atomic_read(&inode->i_count));
+		dentry = d_find_any_alias(inode);
+		if (dentry && drop_negative_children(dentry)) {
+			int count;
+			dput(dentry);
+			d_prune_aliases(inode);
+			count = atomic_read(&inode->i_count);
+			if (count == 1)
+				session->s_trim_caps--;
+			dout("trim_caps_cb %p cap %p pruned, count now %d\n",
+			     inode, cap, count);
+		} else {
+			dput(dentry);
+		}
 		return 0;
 	}
 
diff --git a/fs/crypto/crypto.c b/fs/crypto/crypto.c
index 5c24071..ab6e7dc 100644
--- a/fs/crypto/crypto.c
+++ b/fs/crypto/crypto.c
@@ -489,9 +489,6 @@ int fscrypt_initialize(void)
 {
 	int i, res = -ENOMEM;
 
-	if (fscrypt_bounce_page_pool)
-		return 0;
-
 	mutex_lock(&fscrypt_init_mutex);
 	if (fscrypt_bounce_page_pool)
 		goto already_initialized;
diff --git a/fs/crypto/fname.c b/fs/crypto/fname.c
index d1bbdc9..e14bb7b 100644
--- a/fs/crypto/fname.c
+++ b/fs/crypto/fname.c
@@ -332,7 +332,7 @@ int fscrypt_fname_usr_to_disk(struct inode *inode,
 	 * in a directory. Consequently, a user space name cannot be mapped to
 	 * a disk-space name
 	 */
-	return -EACCES;
+	return -ENOKEY;
 }
 EXPORT_SYMBOL(fscrypt_fname_usr_to_disk);
 
@@ -367,7 +367,7 @@ int fscrypt_setup_filename(struct inode *dir, const struct qstr *iname,
 		return 0;
 	}
 	if (!lookup)
-		return -EACCES;
+		return -ENOKEY;
 
 	/*
 	 * We don't have the key and we are doing a lookup; decode the
diff --git a/fs/crypto/policy.c b/fs/crypto/policy.c
index bb4e209..c160d2d 100644
--- a/fs/crypto/policy.c
+++ b/fs/crypto/policy.c
@@ -113,7 +113,7 @@ int fscrypt_process_policy(struct file *filp,
 
 	if (!inode_has_encryption_context(inode)) {
 		if (!S_ISDIR(inode->i_mode))
-			ret = -EINVAL;
+			ret = -ENOTDIR;
 		else if (!inode->i_sb->s_cop->empty_dir)
 			ret = -EOPNOTSUPP;
 		else if (!inode->i_sb->s_cop->empty_dir(inode))
diff --git a/fs/dax.c b/fs/dax.c
index bf6218d..800748f 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -1265,6 +1265,17 @@ iomap_dax_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
 	if (WARN_ON_ONCE(iomap->type != IOMAP_MAPPED))
 		return -EIO;
 
+	/*
+	 * Write can allocate block for an area which has a hole page mapped
+	 * into page tables. We have to tear down these mappings so that data
+	 * written by write(2) is visible in mmap.
+	 */
+	if ((iomap->flags & IOMAP_F_NEW) && inode->i_mapping->nrpages) {
+		invalidate_inode_pages2_range(inode->i_mapping,
+					      pos >> PAGE_SHIFT,
+					      (end - 1) >> PAGE_SHIFT);
+	}
+
 	while (pos < end) {
 		unsigned offset = pos & (PAGE_SIZE - 1);
 		struct blk_dax_ctl dax = { 0 };
@@ -1329,23 +1340,6 @@ iomap_dax_rw(struct kiocb *iocb, struct iov_iter *iter,
 	if (iov_iter_rw(iter) == WRITE)
 		flags |= IOMAP_WRITE;
 
-	/*
-	 * Yes, even DAX files can have page cache attached to them:  A zeroed
-	 * page is inserted into the pagecache when we have to serve a write
-	 * fault on a hole.  It should never be dirtied and can simply be
-	 * dropped from the pagecache once we get real data for the page.
-	 *
-	 * XXX: This is racy against mmap, and there's nothing we can do about
-	 * it. We'll eventually need to shift this down even further so that
-	 * we can check if we allocated blocks over a hole first.
-	 */
-	if (mapping->nrpages) {
-		ret = invalidate_inode_pages2_range(mapping,
-				pos >> PAGE_SHIFT,
-				(pos + iov_iter_count(iter) - 1) >> PAGE_SHIFT);
-		WARN_ON_ONCE(ret);
-	}
-
 	while (iov_iter_count(iter)) {
 		ret = iomap_apply(inode, pos, iov_iter_count(iter), flags, ops,
 				iter, iomap_dax_actor);
diff --git a/fs/ecryptfs/messaging.c b/fs/ecryptfs/messaging.c
index 286f10b..4f457d5 100644
--- a/fs/ecryptfs/messaging.c
+++ b/fs/ecryptfs/messaging.c
@@ -442,15 +442,16 @@ void ecryptfs_release_messaging(void)
 	}
 	if (ecryptfs_daemon_hash) {
 		struct ecryptfs_daemon *daemon;
+		struct hlist_node *n;
 		int i;
 
 		mutex_lock(&ecryptfs_daemon_hash_mux);
 		for (i = 0; i < (1 << ecryptfs_hash_bits); i++) {
 			int rc;
 
-			hlist_for_each_entry(daemon,
-					     &ecryptfs_daemon_hash[i],
-					     euid_chain) {
+			hlist_for_each_entry_safe(daemon, n,
+						  &ecryptfs_daemon_hash[i],
+						  euid_chain) {
 				rc = ecryptfs_exorcise_daemon(daemon);
 				if (rc)
 					printk(KERN_ERR "%s: Error whilst "
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index e57d463..a8573fa 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -4733,6 +4733,7 @@ static int ext4_alloc_file_blocks(struct file *file, ext4_lblk_t offset,
 						    EXT4_INODE_EOFBLOCKS);
 		}
 		ext4_mark_inode_dirty(handle, inode);
+		ext4_update_inode_fsync_trans(handle, inode, 1);
 		ret2 = ext4_journal_stop(handle);
 		if (ret2)
 			break;
@@ -4805,7 +4806,8 @@ static long ext4_zero_range(struct file *file, loff_t offset,
 	}
 
 	if (!(mode & FALLOC_FL_KEEP_SIZE) &&
-	     offset + len > i_size_read(inode)) {
+	    (offset + len > i_size_read(inode) ||
+	     offset + len > EXT4_I(inode)->i_disksize)) {
 		new_size = offset + len;
 		ret = inode_newsize_ok(inode, new_size);
 		if (ret)
@@ -4976,7 +4978,8 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
 	}
 
 	if (!(mode & FALLOC_FL_KEEP_SIZE) &&
-	     offset + len > i_size_read(inode)) {
+	    (offset + len > i_size_read(inode) ||
+	     offset + len > EXT4_I(inode)->i_disksize)) {
 		new_size = offset + len;
 		ret = inode_newsize_ok(inode, new_size);
 		if (ret)
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
index 170421e..2d94e85 100644
--- a/fs/ext4/ialloc.c
+++ b/fs/ext4/ialloc.c
@@ -771,7 +771,7 @@ struct inode *__ext4_new_inode(handle_t *handle, struct inode *dir,
 		if (err)
 			return ERR_PTR(err);
 		if (!fscrypt_has_encryption_key(dir))
-			return ERR_PTR(-EPERM);
+			return ERR_PTR(-ENOKEY);
 		if (!handle)
 			nblocks += EXT4_DATA_TRANS_BLOCKS(dir->i_sb);
 		encrypt = 1;
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index 00b8a5a..b1766a6 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -1378,6 +1378,8 @@ static struct buffer_head * ext4_find_entry (struct inode *dir,
 		return NULL;
 
 	retval = ext4_fname_setup_filename(dir, d_name, 1, &fname);
+	if (retval == -ENOENT)
+		return NULL;
 	if (retval)
 		return ERR_PTR(retval);
 
@@ -1415,6 +1417,10 @@ static struct buffer_head * ext4_find_entry (struct inode *dir,
 			       "falling back\n"));
 	}
 	nblocks = dir->i_size >> EXT4_BLOCK_SIZE_BITS(sb);
+	if (!nblocks) {
+		ret = NULL;
+		goto cleanup_and_exit;
+	}
 	start = EXT4_I(dir)->i_dir_start_lookup;
 	if (start >= nblocks)
 		start = 0;
@@ -3090,7 +3096,7 @@ static int ext4_symlink(struct inode *dir,
 		if (err)
 			return err;
 		if (!fscrypt_has_encryption_key(dir))
-			return -EPERM;
+			return -ENOKEY;
 		disk_link.len = (fscrypt_fname_encrypted_size(dir, len) +
 				 sizeof(struct fscrypt_symlink_data));
 		sd = kzalloc(disk_link.len, GFP_KERNEL);
diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c
index 11f3717..8add4e8 100644
--- a/fs/f2fs/dir.c
+++ b/fs/f2fs/dir.c
@@ -277,7 +277,10 @@ struct f2fs_dir_entry *f2fs_find_entry(struct inode *dir,
 
 	err = fscrypt_setup_filename(dir, child, 1, &fname);
 	if (err) {
-		*res_page = ERR_PTR(err);
+		if (err == -ENOENT)
+			*res_page = NULL;
+		else
+			*res_page = ERR_PTR(err);
 		return NULL;
 	}
 
diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c
index 08d7dc9..8556fe1 100644
--- a/fs/f2fs/namei.c
+++ b/fs/f2fs/namei.c
@@ -403,7 +403,7 @@ static int f2fs_symlink(struct inode *dir, struct dentry *dentry,
 			return err;
 
 		if (!fscrypt_has_encryption_key(dir))
-			return -EPERM;
+			return -ENOKEY;
 
 		disk_link.len = (fscrypt_fname_encrypted_size(dir, len) +
 				sizeof(struct fscrypt_symlink_data));
@@ -447,7 +447,7 @@ static int f2fs_symlink(struct inode *dir, struct dentry *dentry,
 			goto err_out;
 
 		if (!fscrypt_has_encryption_key(inode)) {
-			err = -EPERM;
+			err = -ENOKEY;
 			goto err_out;
 		}
 
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index ffec69d..ad2e55d 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -173,19 +173,33 @@ static void wb_wakeup(struct bdi_writeback *wb)
 	spin_unlock_bh(&wb->work_lock);
 }
 
+static void finish_writeback_work(struct bdi_writeback *wb,
+				  struct wb_writeback_work *work)
+{
+	struct wb_completion *done = work->done;
+
+	if (work->auto_free)
+		kfree(work);
+	if (done && atomic_dec_and_test(&done->cnt))
+		wake_up_all(&wb->bdi->wb_waitq);
+}
+
 static void wb_queue_work(struct bdi_writeback *wb,
 			  struct wb_writeback_work *work)
 {
 	trace_writeback_queue(wb, work);
 
-	spin_lock_bh(&wb->work_lock);
-	if (!test_bit(WB_registered, &wb->state))
-		goto out_unlock;
 	if (work->done)
 		atomic_inc(&work->done->cnt);
-	list_add_tail(&work->list, &wb->work_list);
-	mod_delayed_work(bdi_wq, &wb->dwork, 0);
-out_unlock:
+
+	spin_lock_bh(&wb->work_lock);
+
+	if (test_bit(WB_registered, &wb->state)) {
+		list_add_tail(&work->list, &wb->work_list);
+		mod_delayed_work(bdi_wq, &wb->dwork, 0);
+	} else
+		finish_writeback_work(wb, work);
+
 	spin_unlock_bh(&wb->work_lock);
 }
 
@@ -1875,16 +1889,9 @@ static long wb_do_writeback(struct bdi_writeback *wb)
 
 	set_bit(WB_writeback_running, &wb->state);
 	while ((work = get_next_work_item(wb)) != NULL) {
-		struct wb_completion *done = work->done;
-
 		trace_writeback_exec(wb, work);
-
 		wrote += wb_writeback(wb, work);
-
-		if (work->auto_free)
-			kfree(work);
-		if (done && atomic_dec_and_test(&done->cnt))
-			wake_up_all(&wb->bdi->wb_waitq);
+		finish_writeback_work(wb, work);
 	}
 
 	/*
diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c
index e23ff70..39c382f 100644
--- a/fs/gfs2/file.c
+++ b/fs/gfs2/file.c
@@ -256,7 +256,7 @@ static int do_gfs2_set_flags(struct file *filp, u32 reqflags, u32 mask)
 			goto out;
 	}
 	if ((flags ^ new_flags) & GFS2_DIF_JDATA) {
-		if (flags & GFS2_DIF_JDATA)
+		if (new_flags & GFS2_DIF_JDATA)
 			gfs2_log_flush(sdp, ip->i_gl, NORMAL_FLUSH);
 		error = filemap_fdatawrite(inode->i_mapping);
 		if (error)
@@ -264,6 +264,8 @@ static int do_gfs2_set_flags(struct file *filp, u32 reqflags, u32 mask)
 		error = filemap_fdatawait(inode->i_mapping);
 		if (error)
 			goto out;
+		if (new_flags & GFS2_DIF_JDATA)
+			gfs2_ordered_del_inode(ip);
 	}
 	error = gfs2_trans_begin(sdp, RES_DINODE, 0);
 	if (error)
diff --git a/fs/isofs/isofs.h b/fs/isofs/isofs.h
index 0ac4c1f..25177e6 100644
--- a/fs/isofs/isofs.h
+++ b/fs/isofs/isofs.h
@@ -103,7 +103,7 @@ static inline unsigned int isonum_733(char *p)
 	/* Ignore bigendian datum due to broken mastering programs */
 	return get_unaligned_le32(p);
 }
-extern int iso_date(char *, int);
+extern int iso_date(u8 *, int);
 
 struct inode;		/* To make gcc happy */
 
diff --git a/fs/isofs/rock.h b/fs/isofs/rock.h
index ed09e2b..f835976 100644
--- a/fs/isofs/rock.h
+++ b/fs/isofs/rock.h
@@ -65,7 +65,7 @@ struct RR_PL_s {
 };
 
 struct stamp {
-	char time[7];
+	__u8 time[7];		/* actually 6 unsigned, 1 signed */
 } __attribute__ ((packed));
 
 struct RR_TF_s {
diff --git a/fs/isofs/util.c b/fs/isofs/util.c
index 005a15c..37860fe 100644
--- a/fs/isofs/util.c
+++ b/fs/isofs/util.c
@@ -15,7 +15,7 @@
  * to GMT.  Thus  we should always be correct.
  */
 
-int iso_date(char * p, int flag)
+int iso_date(u8 *p, int flag)
 {
 	int year, month, day, hour, minute, second, tz;
 	int crtime;
diff --git a/fs/libfs.c b/fs/libfs.c
index 48826d4..9588780a 100644
--- a/fs/libfs.c
+++ b/fs/libfs.c
@@ -245,7 +245,8 @@ struct dentry *mount_pseudo_xattr(struct file_system_type *fs_type, char *name,
 	struct inode *root;
 	struct qstr d_name = QSTR_INIT(name, strlen(name));
 
-	s = sget(fs_type, NULL, set_anon_super, MS_NOUSER, NULL);
+	s = sget_userns(fs_type, NULL, set_anon_super, MS_KERNMOUNT|MS_NOUSER,
+			&init_user_ns, NULL);
 	if (IS_ERR(s))
 		return ERR_CAST(s);
 
diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c
index fc4084e..9d37324 100644
--- a/fs/lockd/svc.c
+++ b/fs/lockd/svc.c
@@ -365,6 +365,7 @@ static int lockd_start_svc(struct svc_serv *serv)
 		printk(KERN_WARNING
 			"lockd_up: svc_rqst allocation failed, error=%d\n",
 			error);
+		lockd_unregister_notifiers();
 		goto out_rqst;
 	}
 
@@ -455,13 +456,16 @@ int lockd_up(struct net *net)
 	}
 
 	error = lockd_up_net(serv, net);
-	if (error < 0)
-		goto err_net;
+	if (error < 0) {
+		lockd_unregister_notifiers();
+		goto err_put;
+	}
 
 	error = lockd_start_svc(serv);
-	if (error < 0)
-		goto err_start;
-
+	if (error < 0) {
+		lockd_down_net(serv, net);
+		goto err_put;
+	}
 	nlmsvc_users++;
 	/*
 	 * Note: svc_serv structures have an initial use count of 1,
@@ -472,12 +476,6 @@ int lockd_up(struct net *net)
 err_create:
 	mutex_unlock(&nlmsvc_mutex);
 	return error;
-
-err_start:
-	lockd_down_net(serv, net);
-err_net:
-	lockd_unregister_notifiers();
-	goto err_put;
 }
 EXPORT_SYMBOL_GPL(lockd_up);
 
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index d04ec381..1e5321d 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -1292,7 +1292,7 @@ static int nfs_weak_revalidate(struct dentry *dentry, unsigned int flags)
 		return 0;
 	}
 
-	error = nfs_revalidate_inode(NFS_SERVER(inode), inode);
+	error = nfs_lookup_verify_inode(inode, flags);
 	dfprintk(LOOKUPCACHE, "NFS: %s: inode %lu is %s\n",
 			__func__, inode->i_ino, error ? "invalid" : "valid");
 	return !error;
@@ -1443,6 +1443,7 @@ static int nfs4_lookup_revalidate(struct dentry *, unsigned int);
 
 const struct dentry_operations nfs4_dentry_operations = {
 	.d_revalidate	= nfs4_lookup_revalidate,
+	.d_weak_revalidate	= nfs_weak_revalidate,
 	.d_delete	= nfs_dentry_delete,
 	.d_iput		= nfs_dentry_iput,
 	.d_automount	= nfs_d_automount,
@@ -2097,7 +2098,7 @@ int nfs_rename(struct inode *old_dir, struct dentry *old_dentry,
 		if (new_inode != NULL)
 			nfs_drop_nlink(new_inode);
 		d_move(old_dentry, new_dentry);
-		nfs_set_verifier(new_dentry,
+		nfs_set_verifier(old_dentry,
 					nfs_save_change_attribute(new_dir));
 	} else if (error == -ENOENT)
 		nfs_dentry_handle_enoent(old_dentry);
diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c
index 074ac71..f6b0848 100644
--- a/fs/nfs/nfs4client.c
+++ b/fs/nfs/nfs4client.c
@@ -1004,9 +1004,9 @@ static void nfs4_session_set_rwsize(struct nfs_server *server)
 	server_resp_sz = sess->fc_attrs.max_resp_sz - nfs41_maxread_overhead;
 	server_rqst_sz = sess->fc_attrs.max_rqst_sz - nfs41_maxwrite_overhead;
 
-	if (server->rsize > server_resp_sz)
+	if (!server->rsize || server->rsize > server_resp_sz)
 		server->rsize = server_resp_sz;
-	if (server->wsize > server_rqst_sz)
+	if (!server->wsize || server->wsize > server_rqst_sz)
 		server->wsize = server_rqst_sz;
 #endif /* CONFIG_NFS_V4_1 */
 }
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index a53b8e0..4638654 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -38,7 +38,6 @@
 #include <linux/mm.h>
 #include <linux/delay.h>
 #include <linux/errno.h>
-#include <linux/file.h>
 #include <linux/string.h>
 #include <linux/ratelimit.h>
 #include <linux/printk.h>
@@ -256,15 +255,12 @@ const u32 nfs4_fsinfo_bitmap[3] = { FATTR4_WORD0_MAXFILESIZE
 };
 
 const u32 nfs4_fs_locations_bitmap[3] = {
-	FATTR4_WORD0_TYPE
-	| FATTR4_WORD0_CHANGE
+	FATTR4_WORD0_CHANGE
 	| FATTR4_WORD0_SIZE
 	| FATTR4_WORD0_FSID
 	| FATTR4_WORD0_FILEID
 	| FATTR4_WORD0_FS_LOCATIONS,
-	FATTR4_WORD1_MODE
-	| FATTR4_WORD1_NUMLINKS
-	| FATTR4_WORD1_OWNER
+	FATTR4_WORD1_OWNER
 	| FATTR4_WORD1_OWNER_GROUP
 	| FATTR4_WORD1_RAWDEV
 	| FATTR4_WORD1_SPACE_USED
@@ -6009,7 +6005,6 @@ static struct nfs4_lockdata *nfs4_alloc_lockdata(struct file_lock *fl,
 	p->server = server;
 	atomic_inc(&lsp->ls_count);
 	p->ctx = get_nfs_open_context(ctx);
-	get_file(fl->fl_file);
 	memcpy(&p->fl, fl, sizeof(p->fl));
 	return p;
 out_free_seqid:
@@ -6122,7 +6117,6 @@ static void nfs4_lock_release(void *calldata)
 		nfs_free_seqid(data->arg.lock_seqid);
 	nfs4_put_lock_state(data->lsp);
 	put_nfs_open_context(data->ctx);
-	fput(data->fl.fl_file);
 	kfree(data);
 	dprintk("%s: done!\n", __func__);
 }
@@ -6678,9 +6672,7 @@ static int _nfs4_proc_fs_locations(struct rpc_clnt *client, struct inode *dir,
 				   struct page *page)
 {
 	struct nfs_server *server = NFS_SERVER(dir);
-	u32 bitmask[3] = {
-		[0] = FATTR4_WORD0_FSID | FATTR4_WORD0_FS_LOCATIONS,
-	};
+	u32 bitmask[3];
 	struct nfs4_fs_locations_arg args = {
 		.dir_fh = NFS_FH(dir),
 		.name = name,
@@ -6699,12 +6691,15 @@ static int _nfs4_proc_fs_locations(struct rpc_clnt *client, struct inode *dir,
 
 	dprintk("%s: start\n", __func__);
 
+	bitmask[0] = nfs4_fattr_bitmap[0] | FATTR4_WORD0_FS_LOCATIONS;
+	bitmask[1] = nfs4_fattr_bitmap[1];
+
 	/* Ask for the fileid of the absent filesystem if mounted_on_fileid
 	 * is not supported */
 	if (NFS_SERVER(dir)->attr_bitmask[1] & FATTR4_WORD1_MOUNTED_ON_FILEID)
-		bitmask[1] |= FATTR4_WORD1_MOUNTED_ON_FILEID;
+		bitmask[0] &= ~FATTR4_WORD0_FILEID;
 	else
-		bitmask[0] |= FATTR4_WORD0_FILEID;
+		bitmask[1] &= ~FATTR4_WORD1_MOUNTED_ON_FILEID;
 
 	nfs_fattr_init(&fs_locations->fattr);
 	fs_locations->server = server;
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index 9267191..71deeae 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -1718,7 +1718,6 @@ static int nfs4_recovery_handle_error(struct nfs_client *clp, int error)
 			break;
 		case -NFS4ERR_STALE_CLIENTID:
 			set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
-			nfs4_state_clear_reclaim_reboot(clp);
 			nfs4_state_start_reclaim_reboot(clp);
 			break;
 		case -NFS4ERR_EXPIRED:
diff --git a/fs/nfs/nfs4trace.h b/fs/nfs/nfs4trace.h
index cfb8f7c..20cd850 100644
--- a/fs/nfs/nfs4trace.h
+++ b/fs/nfs/nfs4trace.h
@@ -201,17 +201,13 @@ DECLARE_EVENT_CLASS(nfs4_clientid_event,
 		TP_ARGS(clp, error),
 
 		TP_STRUCT__entry(
-			__string(dstaddr,
-				rpc_peeraddr2str(clp->cl_rpcclient,
-					RPC_DISPLAY_ADDR))
+			__string(dstaddr, clp->cl_hostname)
 			__field(int, error)
 		),
 
 		TP_fast_assign(
 			__entry->error = error;
-			__assign_str(dstaddr,
-				rpc_peeraddr2str(clp->cl_rpcclient,
-						RPC_DISPLAY_ADDR));
+			__assign_str(dstaddr, clp->cl_hostname);
 		),
 
 		TP_printk(
@@ -1103,9 +1099,7 @@ DECLARE_EVENT_CLASS(nfs4_inode_callback_event,
 			__field(dev_t, dev)
 			__field(u32, fhandle)
 			__field(u64, fileid)
-			__string(dstaddr, clp ?
-				rpc_peeraddr2str(clp->cl_rpcclient,
-					RPC_DISPLAY_ADDR) : "unknown")
+			__string(dstaddr, clp ? clp->cl_hostname : "unknown")
 		),
 
 		TP_fast_assign(
@@ -1118,9 +1112,7 @@ DECLARE_EVENT_CLASS(nfs4_inode_callback_event,
 				__entry->fileid = 0;
 				__entry->dev = 0;
 			}
-			__assign_str(dstaddr, clp ?
-				rpc_peeraddr2str(clp->cl_rpcclient,
-					RPC_DISPLAY_ADDR) : "unknown")
+			__assign_str(dstaddr, clp ? clp->cl_hostname : "unknown")
 		),
 
 		TP_printk(
@@ -1162,9 +1154,7 @@ DECLARE_EVENT_CLASS(nfs4_inode_stateid_callback_event,
 			__field(dev_t, dev)
 			__field(u32, fhandle)
 			__field(u64, fileid)
-			__string(dstaddr, clp ?
-				rpc_peeraddr2str(clp->cl_rpcclient,
-					RPC_DISPLAY_ADDR) : "unknown")
+			__string(dstaddr, clp ? clp->cl_hostname : "unknown")
 			__field(int, stateid_seq)
 			__field(u32, stateid_hash)
 		),
@@ -1179,9 +1169,7 @@ DECLARE_EVENT_CLASS(nfs4_inode_stateid_callback_event,
 				__entry->fileid = 0;
 				__entry->dev = 0;
 			}
-			__assign_str(dstaddr, clp ?
-				rpc_peeraddr2str(clp->cl_rpcclient,
-					RPC_DISPLAY_ADDR) : "unknown")
+			__assign_str(dstaddr, clp ? clp->cl_hostname : "unknown")
 			__entry->stateid_seq =
 				be32_to_cpu(stateid->seqid);
 			__entry->stateid_hash =
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index ddce94ce..51bf1f9 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -1339,7 +1339,7 @@ static int nfs_parse_mount_options(char *raw,
 			mnt->options |= NFS_OPTION_MIGRATION;
 			break;
 		case Opt_nomigration:
-			mnt->options &= NFS_OPTION_MIGRATION;
+			mnt->options &= ~NFS_OPTION_MIGRATION;
 			break;
 
 		/*
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index e4772a8..9905735 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -1859,6 +1859,8 @@ int nfs_commit_inode(struct inode *inode, int how)
 	if (res)
 		error = nfs_generic_commit_list(inode, &head, how, &cinfo);
 	nfs_commit_end(cinfo.mds);
+	if (res == 0)
+		return res;
 	if (error < 0)
 		goto out_error;
 	if (!may_wait)
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index d35eb07..9ebb2d7 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -3513,7 +3513,9 @@ nfsd4_find_existing_open(struct nfs4_file *fp, struct nfsd4_open *open)
 		/* ignore lock owners */
 		if (local->st_stateowner->so_is_open_owner == 0)
 			continue;
-		if (local->st_stateowner == &oo->oo_owner) {
+		if (local->st_stateowner != &oo->oo_owner)
+			continue;
+		if (local->st_stid.sc_type == NFS4_OPEN_STID) {
 			ret = local;
 			atomic_inc(&ret->st_stid.sc_count);
 			break;
@@ -3522,6 +3524,52 @@ nfsd4_find_existing_open(struct nfs4_file *fp, struct nfsd4_open *open)
 	return ret;
 }
 
+static __be32
+nfsd4_verify_open_stid(struct nfs4_stid *s)
+{
+	__be32 ret = nfs_ok;
+
+	switch (s->sc_type) {
+	default:
+		break;
+	case NFS4_CLOSED_STID:
+	case NFS4_CLOSED_DELEG_STID:
+		ret = nfserr_bad_stateid;
+		break;
+	case NFS4_REVOKED_DELEG_STID:
+		ret = nfserr_deleg_revoked;
+	}
+	return ret;
+}
+
+/* Lock the stateid st_mutex, and deal with races with CLOSE */
+static __be32
+nfsd4_lock_ol_stateid(struct nfs4_ol_stateid *stp)
+{
+	__be32 ret;
+
+	mutex_lock(&stp->st_mutex);
+	ret = nfsd4_verify_open_stid(&stp->st_stid);
+	if (ret != nfs_ok)
+		mutex_unlock(&stp->st_mutex);
+	return ret;
+}
+
+static struct nfs4_ol_stateid *
+nfsd4_find_and_lock_existing_open(struct nfs4_file *fp, struct nfsd4_open *open)
+{
+	struct nfs4_ol_stateid *stp;
+	for (;;) {
+		spin_lock(&fp->fi_lock);
+		stp = nfsd4_find_existing_open(fp, open);
+		spin_unlock(&fp->fi_lock);
+		if (!stp || nfsd4_lock_ol_stateid(stp) == nfs_ok)
+			break;
+		nfs4_put_stid(&stp->st_stid);
+	}
+	return stp;
+}
+
 static struct nfs4_openowner *
 alloc_init_open_stateowner(unsigned int strhashval, struct nfsd4_open *open,
 			   struct nfsd4_compound_state *cstate)
@@ -3566,6 +3614,7 @@ init_open_stateid(struct nfs4_file *fp, struct nfsd4_open *open)
 	mutex_init(&stp->st_mutex);
 	mutex_lock(&stp->st_mutex);
 
+retry:
 	spin_lock(&oo->oo_owner.so_client->cl_lock);
 	spin_lock(&fp->fi_lock);
 
@@ -3590,7 +3639,11 @@ init_open_stateid(struct nfs4_file *fp, struct nfsd4_open *open)
 	spin_unlock(&fp->fi_lock);
 	spin_unlock(&oo->oo_owner.so_client->cl_lock);
 	if (retstp) {
-		mutex_lock(&retstp->st_mutex);
+		/* Handle races with CLOSE */
+		if (nfsd4_lock_ol_stateid(retstp) != nfs_ok) {
+			nfs4_put_stid(&retstp->st_stid);
+			goto retry;
+		}
 		/* To keep mutex tracking happy */
 		mutex_unlock(&stp->st_mutex);
 		stp = retstp;
@@ -3967,7 +4020,8 @@ static struct nfs4_delegation *find_deleg_stateid(struct nfs4_client *cl, statei
 {
 	struct nfs4_stid *ret;
 
-	ret = find_stateid_by_type(cl, s, NFS4_DELEG_STID);
+	ret = find_stateid_by_type(cl, s,
+				NFS4_DELEG_STID|NFS4_REVOKED_DELEG_STID);
 	if (!ret)
 		return NULL;
 	return delegstateid(ret);
@@ -3990,6 +4044,12 @@ nfs4_check_deleg(struct nfs4_client *cl, struct nfsd4_open *open,
 	deleg = find_deleg_stateid(cl, &open->op_delegate_stateid);
 	if (deleg == NULL)
 		goto out;
+	if (deleg->dl_stid.sc_type == NFS4_REVOKED_DELEG_STID) {
+		nfs4_put_stid(&deleg->dl_stid);
+		if (cl->cl_minorversion)
+			status = nfserr_deleg_revoked;
+		goto out;
+	}
 	flags = share_access_to_flags(open->op_share_access);
 	status = nfs4_check_delegmode(deleg, flags);
 	if (status) {
@@ -4393,6 +4453,7 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf
 	struct nfs4_ol_stateid *stp = NULL;
 	struct nfs4_delegation *dp = NULL;
 	__be32 status;
+	bool new_stp = false;
 
 	/*
 	 * Lookup file; if found, lookup stateid and check open request,
@@ -4404,9 +4465,7 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf
 		status = nfs4_check_deleg(cl, open, &dp);
 		if (status)
 			goto out;
-		spin_lock(&fp->fi_lock);
-		stp = nfsd4_find_existing_open(fp, open);
-		spin_unlock(&fp->fi_lock);
+		stp = nfsd4_find_and_lock_existing_open(fp, open);
 	} else {
 		open->op_file = NULL;
 		status = nfserr_bad_stateid;
@@ -4414,35 +4473,31 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf
 			goto out;
 	}
 
+	if (!stp) {
+		stp = init_open_stateid(fp, open);
+		if (!open->op_stp)
+			new_stp = true;
+	}
+
 	/*
 	 * OPEN the file, or upgrade an existing OPEN.
 	 * If truncate fails, the OPEN fails.
+	 *
+	 * stp is already locked.
 	 */
-	if (stp) {
+	if (!new_stp) {
 		/* Stateid was found, this is an OPEN upgrade */
-		mutex_lock(&stp->st_mutex);
 		status = nfs4_upgrade_open(rqstp, fp, current_fh, stp, open);
 		if (status) {
 			mutex_unlock(&stp->st_mutex);
 			goto out;
 		}
 	} else {
-		/* stp is returned locked. */
-		stp = init_open_stateid(fp, open);
-		/* See if we lost the race to some other thread */
-		if (stp->st_access_bmap != 0) {
-			status = nfs4_upgrade_open(rqstp, fp, current_fh,
-						stp, open);
-			if (status) {
-				mutex_unlock(&stp->st_mutex);
-				goto out;
-			}
-			goto upgrade_out;
-		}
 		status = nfs4_get_vfs_file(rqstp, fp, current_fh, stp, open);
 		if (status) {
-			mutex_unlock(&stp->st_mutex);
+			stp->st_stid.sc_type = NFS4_CLOSED_STID;
 			release_open_stateid(stp);
+			mutex_unlock(&stp->st_mutex);
 			goto out;
 		}
 
@@ -4451,7 +4506,7 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf
 		if (stp->st_clnt_odstate == open->op_odstate)
 			open->op_odstate = NULL;
 	}
-upgrade_out:
+
 	nfs4_inc_and_copy_stateid(&open->op_stateid, &stp->st_stid);
 	mutex_unlock(&stp->st_mutex);
 
@@ -4677,7 +4732,7 @@ nfs4_laundromat(struct nfsd_net *nn)
 	spin_unlock(&nn->blocked_locks_lock);
 
 	while (!list_empty(&reaplist)) {
-		nbl = list_first_entry(&nn->blocked_locks_lru,
+		nbl = list_first_entry(&reaplist,
 					struct nfsd4_blocked_lock, nbl_lru);
 		list_del_init(&nbl->nbl_lru);
 		posix_unblock_lock(&nbl->nbl_lock);
@@ -4858,6 +4913,16 @@ nfsd4_lookup_stateid(struct nfsd4_compound_state *cstate,
 		     struct nfs4_stid **s, struct nfsd_net *nn)
 {
 	__be32 status;
+	bool return_revoked = false;
+
+	/*
+	 *  only return revoked delegations if explicitly asked.
+	 *  otherwise we report revoked or bad_stateid status.
+	 */
+	if (typemask & NFS4_REVOKED_DELEG_STID)
+		return_revoked = true;
+	else if (typemask & NFS4_DELEG_STID)
+		typemask |= NFS4_REVOKED_DELEG_STID;
 
 	if (ZERO_STATEID(stateid) || ONE_STATEID(stateid))
 		return nfserr_bad_stateid;
@@ -4872,6 +4937,12 @@ nfsd4_lookup_stateid(struct nfsd4_compound_state *cstate,
 	*s = find_stateid_by_type(cstate->clp, stateid, typemask);
 	if (!*s)
 		return nfserr_bad_stateid;
+	if (((*s)->sc_type == NFS4_REVOKED_DELEG_STID) && !return_revoked) {
+		nfs4_put_stid(*s);
+		if (cstate->minorversion)
+			return nfserr_deleg_revoked;
+		return nfserr_bad_stateid;
+	}
 	return nfs_ok;
 }
 
@@ -5291,7 +5362,6 @@ static void nfsd4_close_open_stateid(struct nfs4_ol_stateid *s)
 	bool unhashed;
 	LIST_HEAD(reaplist);
 
-	s->st_stid.sc_type = NFS4_CLOSED_STID;
 	spin_lock(&clp->cl_lock);
 	unhashed = unhash_open_stateid(s, &reaplist);
 
@@ -5330,10 +5400,12 @@ nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
 	nfsd4_bump_seqid(cstate, status);
 	if (status)
 		goto out; 
+
+	stp->st_stid.sc_type = NFS4_CLOSED_STID;
 	nfs4_inc_and_copy_stateid(&close->cl_stateid, &stp->st_stid);
-	mutex_unlock(&stp->st_mutex);
 
 	nfsd4_close_open_stateid(stp);
+	mutex_unlock(&stp->st_mutex);
 
 	/* put reference from nfs4_preprocess_seqid_op */
 	nfs4_put_stid(&stp->st_stid);
@@ -7071,7 +7143,7 @@ nfs4_state_shutdown_net(struct net *net)
 	spin_unlock(&nn->blocked_locks_lock);
 
 	while (!list_empty(&reaplist)) {
-		nbl = list_first_entry(&nn->blocked_locks_lru,
+		nbl = list_first_entry(&reaplist,
 					struct nfsd4_blocked_lock, nbl_lru);
 		list_del_init(&nbl->nbl_lru);
 		posix_unblock_lock(&nbl->nbl_lock);
diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c
index 1645b97..5c48006 100644
--- a/fs/nfsd/nfssvc.c
+++ b/fs/nfsd/nfssvc.c
@@ -155,7 +155,8 @@ int nfsd_vers(int vers, enum vers_op change)
 
 int nfsd_minorversion(u32 minorversion, enum vers_op change)
 {
-	if (minorversion > NFSD_SUPPORTED_MINOR_VERSION)
+	if (minorversion > NFSD_SUPPORTED_MINOR_VERSION &&
+	    change != NFSD_AVAIL)
 		return -1;
 	switch(change) {
 	case NFSD_SET:
@@ -399,23 +400,20 @@ static void nfsd_last_thread(struct svc_serv *serv, struct net *net)
 
 void nfsd_reset_versions(void)
 {
-	int found_one = 0;
 	int i;
 
-	for (i = NFSD_MINVERS; i < NFSD_NRVERS; i++) {
-		if (nfsd_program.pg_vers[i])
-			found_one = 1;
-	}
+	for (i = 0; i < NFSD_NRVERS; i++)
+		if (nfsd_vers(i, NFSD_TEST))
+			return;
 
-	if (!found_one) {
-		for (i = NFSD_MINVERS; i < NFSD_NRVERS; i++)
-			nfsd_program.pg_vers[i] = nfsd_version[i];
-#if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL)
-		for (i = NFSD_ACL_MINVERS; i < NFSD_ACL_NRVERS; i++)
-			nfsd_acl_program.pg_vers[i] =
-				nfsd_acl_version[i];
-#endif
-	}
+	for (i = 0; i < NFSD_NRVERS; i++)
+		if (i != 4)
+			nfsd_vers(i, NFSD_SET);
+		else {
+			int minor = 0;
+			while (nfsd_minorversion(minor, NFSD_SET) >= 0)
+				minor++;
+		}
 }
 
 /*
diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
index 7d18d62..36362d4 100644
--- a/fs/nilfs2/segment.c
+++ b/fs/nilfs2/segment.c
@@ -1956,8 +1956,6 @@ static int nilfs_segctor_collect_dirty_files(struct nilfs_sc_info *sci,
 					  err, ii->vfs_inode.i_ino);
 				return err;
 			}
-			mark_buffer_dirty(ibh);
-			nilfs_mdt_mark_dirty(ifile);
 			spin_lock(&nilfs->ns_inode_lock);
 			if (likely(!ii->i_bh))
 				ii->i_bh = ibh;
@@ -1966,6 +1964,10 @@ static int nilfs_segctor_collect_dirty_files(struct nilfs_sc_info *sci,
 			goto retry;
 		}
 
+		// Always redirty the buffer to avoid race condition
+		mark_buffer_dirty(ii->i_bh);
+		nilfs_mdt_mark_dirty(ifile);
+
 		clear_bit(NILFS_I_QUEUED, &ii->i_state);
 		set_bit(NILFS_I_BUSY, &ii->i_state);
 		list_move_tail(&ii->i_dirty, &sci->sc_dirty_files);
diff --git a/fs/proc/proc_tty.c b/fs/proc/proc_tty.c
index 15f327b..7340c36 100644
--- a/fs/proc/proc_tty.c
+++ b/fs/proc/proc_tty.c
@@ -14,6 +14,7 @@
 #include <linux/tty.h>
 #include <linux/seq_file.h>
 #include <linux/bitops.h>
+#include "internal.h"
 
 /*
  * The /proc/tty directory inodes...
@@ -164,7 +165,7 @@ void proc_tty_unregister_driver(struct tty_driver *driver)
 	if (!ent)
 		return;
 		
-	remove_proc_entry(driver->driver_name, proc_tty_driver);
+	remove_proc_entry(ent->name, proc_tty_driver);
 	
 	driver->proc_entry = NULL;
 }
diff --git a/fs/udf/super.c b/fs/udf/super.c
index 4942549..4b1f6d5 100644
--- a/fs/udf/super.c
+++ b/fs/udf/super.c
@@ -710,7 +710,7 @@ static loff_t udf_check_vsd(struct super_block *sb)
 	else
 		sectorsize = sb->s_blocksize;
 
-	sector += (sbi->s_session << sb->s_blocksize_bits);
+	sector += (((loff_t)sbi->s_session) << sb->s_blocksize_bits);
 
 	udf_debug("Starting at sector %u (%ld byte sectors)\n",
 		  (unsigned int)(sector >> sb->s_blocksize_bits),
diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
index 2cde073..9d9c032 100644
--- a/fs/userfaultfd.c
+++ b/fs/userfaultfd.c
@@ -419,7 +419,7 @@ int handle_userfault(struct fault_env *fe, unsigned long reason)
 			 * in such case.
 			 */
 			down_read(&mm->mmap_sem);
-			ret = 0;
+			ret = VM_FAULT_NOPAGE;
 		}
 	}
 
diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
index 7eb9970..8ad65d4 100644
--- a/fs/xfs/libxfs/xfs_bmap.c
+++ b/fs/xfs/libxfs/xfs_bmap.c
@@ -2713,7 +2713,7 @@ xfs_bmap_add_extent_unwritten_real(
 					&i)))
 				goto done;
 			XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
-			cur->bc_rec.b.br_state = XFS_EXT_NORM;
+			cur->bc_rec.b.br_state = new->br_state;
 			if ((error = xfs_btree_insert(cur, &i)))
 				goto done;
 			XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index fe9a9a1..98ca9f1 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -2386,6 +2386,7 @@ xfs_ifree_cluster(
 				 */
 				if (ip->i_ino != inum + i) {
 					xfs_iunlock(ip, XFS_ILOCK_EXCL);
+					rcu_read_unlock();
 					continue;
 				}
 			}
diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
index 5b81f7f..33c3899 100644
--- a/fs/xfs/xfs_iops.c
+++ b/fs/xfs/xfs_iops.c
@@ -871,22 +871,6 @@ xfs_setattr_size(
 		return error;
 
 	/*
-	 * We are going to log the inode size change in this transaction so
-	 * any previous writes that are beyond the on disk EOF and the new
-	 * EOF that have not been written out need to be written here.  If we
-	 * do not write the data out, we expose ourselves to the null files
-	 * problem. Note that this includes any block zeroing we did above;
-	 * otherwise those blocks may not be zeroed after a crash.
-	 */
-	if (did_zeroing ||
-	    (newsize > ip->i_d.di_size && oldsize != ip->i_d.di_size)) {
-		error = filemap_write_and_wait_range(VFS_I(ip)->i_mapping,
-						      ip->i_d.di_size, newsize);
-		if (error)
-			return error;
-	}
-
-	/*
 	 * We've already locked out new page faults, so now we can safely remove
 	 * pages from the page cache knowing they won't get refaulted until we
 	 * drop the XFS_MMAP_EXCL lock after the extent manipulations are
@@ -902,9 +886,29 @@ xfs_setattr_size(
 	 * user visible changes). There's not much we can do about this, except
 	 * to hope that the caller sees ENOMEM and retries the truncate
 	 * operation.
+	 *
+	 * And we update in-core i_size and truncate page cache beyond newsize
+	 * before writeback the [di_size, newsize] range, so we're guaranteed
+	 * not to write stale data past the new EOF on truncate down.
 	 */
 	truncate_setsize(inode, newsize);
 
+	/*
+	 * We are going to log the inode size change in this transaction so
+	 * any previous writes that are beyond the on disk EOF and the new
+	 * EOF that have not been written out need to be written here.  If we
+	 * do not write the data out, we expose ourselves to the null files
+	 * problem. Note that this includes any block zeroing we did above;
+	 * otherwise those blocks may not be zeroed after a crash.
+	 */
+	if (did_zeroing ||
+	    (newsize > ip->i_d.di_size && oldsize != ip->i_d.di_size)) {
+		error = filemap_write_and_wait_range(VFS_I(ip)->i_mapping,
+						ip->i_d.di_size, newsize - 1);
+		if (error)
+			return error;
+	}
+
 	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0, &tp);
 	if (error)
 		return error;
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
index 0590926..1e26f45 100644
--- a/fs/xfs/xfs_log_recover.c
+++ b/fs/xfs/xfs_log_recover.c
@@ -753,7 +753,7 @@ xlog_find_head(
 	 * in the in-core log.  The following number can be made tighter if
 	 * we actually look at the block size of the filesystem.
 	 */
-	num_scan_bblks = XLOG_TOTAL_REC_SHIFT(log);
+	num_scan_bblks = min_t(int, log_bbnum, XLOG_TOTAL_REC_SHIFT(log));
 	if (head_blk >= num_scan_bblks) {
 		/*
 		 * We are guaranteed that the entire check can be performed
diff --git a/include/clocksource/arm_arch_timer.h b/include/clocksource/arm_arch_timer.h
index caedb74..43783ef 100644
--- a/include/clocksource/arm_arch_timer.h
+++ b/include/clocksource/arm_arch_timer.h
@@ -59,7 +59,7 @@ struct arch_timer_kvm_info {
 extern u32 arch_timer_get_rate(void);
 extern u64 (*arch_timer_read_counter)(void);
 extern struct arch_timer_kvm_info *arch_timer_get_kvm_info(void);
-
+extern void arch_timer_mem_get_cval(u32 *lo, u32 *hi);
 #else
 
 static inline u32 arch_timer_get_rate(void)
@@ -72,6 +72,10 @@ static inline u64 arch_timer_read_counter(void)
 	return 0;
 }
 
+static void arch_timer_mem_get_cval(u32 *lo, u32 *hi)
+{
+	*lo = *hi = ~0U;
+}
 #endif
 
 #endif
diff --git a/include/crypto/internal/hash.h b/include/crypto/internal/hash.h
index f6d9af3e..cac5735 100644
--- a/include/crypto/internal/hash.h
+++ b/include/crypto/internal/hash.h
@@ -80,6 +80,14 @@ int ahash_register_instance(struct crypto_template *tmpl,
 			    struct ahash_instance *inst);
 void ahash_free_instance(struct crypto_instance *inst);
 
+int shash_no_setkey(struct crypto_shash *tfm, const u8 *key,
+		    unsigned int keylen);
+
+static inline bool crypto_shash_alg_has_setkey(struct shash_alg *alg)
+{
+	return alg->setkey != shash_no_setkey;
+}
+
 int crypto_init_ahash_spawn(struct crypto_ahash_spawn *spawn,
 			    struct hash_alg_common *alg,
 			    struct crypto_instance *inst);
diff --git a/include/crypto/mcryptd.h b/include/crypto/mcryptd.h
index 4a53c0d..e045722 100644
--- a/include/crypto/mcryptd.h
+++ b/include/crypto/mcryptd.h
@@ -26,6 +26,7 @@ static inline struct mcryptd_ahash *__mcryptd_ahash_cast(
 
 struct mcryptd_cpu_queue {
 	struct crypto_queue queue;
+	spinlock_t q_lock;
 	struct work_struct work;
 };
 
diff --git a/include/dt-bindings/regulator/qcom,rpmh-regulator.h b/include/dt-bindings/regulator/qcom,rpmh-regulator.h
index 3dad124..5d152f3 100644
--- a/include/dt-bindings/regulator/qcom,rpmh-regulator.h
+++ b/include/dt-bindings/regulator/qcom,rpmh-regulator.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -43,19 +43,35 @@
 
 /*
  * These mode constants may be used for qcom,supported-modes and qcom,init-mode
- * properties of an RPMh resource.  Modes should be matched to the physical
- * PMIC regulator type (i.e. LDO, SMPS, or BOB).
+ * properties of an RPMh resource.  Each type of regulator supports a subset of
+ * the possible modes.
+ *
+ * %RPMH_REGULATOR_MODE_PASS:	Pass-through mode in which output is directly
+ *				tied to input.  This mode is only supported by
+ *				BOB type regulators.
+ * %RPMH_REGULATOR_MODE_RET:	Retention mode in which only an extremely small
+ *				load current is allowed.  This mode is supported
+ *				by LDO and SMPS type regulators.
+ * %RPMH_REGULATOR_MODE_LPM:	Low power mode in which a small load current is
+ *				allowed.  This mode corresponds to PFM for SMPS
+ *				and BOB type regulators.  This mode is supported
+ *				by LDO, HFSMPS, BOB, and PMIC4 FTSMPS type
+ *				regulators.
+ * %RPMH_REGULATOR_MODE_AUTO:	Auto mode in which the regulator hardware
+ *				automatically switches between LPM and HPM based
+ *				upon the real-time load current.  This mode is
+ *				supported by HFSMPS, BOB, and PMIC4 FTSMPS type
+ *				regulators.
+ * %RPMH_REGULATOR_MODE_HPM:	High power mode in which the full rated current
+ *				of the regulator is allowed.  This mode
+ *				corresponds to PWM for SMPS and BOB type
+ *				regulators.  This mode is supported by all types
+ *				of regulators.
  */
-#define RPMH_REGULATOR_MODE_LDO_LPM	5
-#define RPMH_REGULATOR_MODE_LDO_HPM	7
-
-#define RPMH_REGULATOR_MODE_SMPS_PFM	5
-#define RPMH_REGULATOR_MODE_SMPS_AUTO	6
-#define RPMH_REGULATOR_MODE_SMPS_PWM	7
-
-#define RPMH_REGULATOR_MODE_BOB_PASS	0
-#define RPMH_REGULATOR_MODE_BOB_PFM	1
-#define RPMH_REGULATOR_MODE_BOB_AUTO	2
-#define RPMH_REGULATOR_MODE_BOB_PWM	3
+#define RPMH_REGULATOR_MODE_PASS	0
+#define RPMH_REGULATOR_MODE_RET		1
+#define RPMH_REGULATOR_MODE_LPM		2
+#define RPMH_REGULATOR_MODE_AUTO	3
+#define RPMH_REGULATOR_MODE_HPM		4
 
 #endif
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 61a3d90..ca2b4c4 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -276,11 +276,8 @@ bool acpi_processor_validate_proc_id(int proc_id);
 /* Arch dependent functions for cpu hotplug support */
 int acpi_map_cpu(acpi_handle handle, phys_cpuid_t physid, int *pcpu);
 int acpi_unmap_cpu(int cpu);
-int acpi_map_cpu2node(acpi_handle handle, int cpu, int physid);
 #endif /* CONFIG_ACPI_HOTPLUG_CPU */
 
-void acpi_set_processor_mapping(void);
-
 #ifdef CONFIG_ACPI_HOTPLUG_IOAPIC
 int acpi_get_ioapic_id(acpi_handle handle, u32 gsi_base, u64 *phys_addr);
 #endif
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index 3101141..4c4e935 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -68,6 +68,7 @@ struct bpf_verifier_state_list {
 
 struct bpf_insn_aux_data {
 	enum bpf_reg_type ptr_type;	/* pointer type for load/store insns */
+	bool seen; /* this insn was processed by the verifier */
 };
 
 #define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
index 447a915..4431ea2 100644
--- a/include/linux/buffer_head.h
+++ b/include/linux/buffer_head.h
@@ -239,12 +239,10 @@ static inline int block_page_mkwrite_return(int err)
 {
 	if (err == 0)
 		return VM_FAULT_LOCKED;
-	if (err == -EFAULT)
+	if (err == -EFAULT || err == -EAGAIN)
 		return VM_FAULT_NOPAGE;
 	if (err == -ENOMEM)
 		return VM_FAULT_OOM;
-	if (err == -EAGAIN)
-		return VM_FAULT_RETRY;
 	/* -ENOSPC, -EDQUOT, -EIO ... */
 	return VM_FAULT_SIGBUS;
 }
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index ec9c128..69935e66 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -730,7 +730,6 @@ static inline void *dma_zalloc_coherent(struct device *dev, size_t size,
 	return ret;
 }
 
-#ifdef CONFIG_HAS_DMA
 static inline int dma_get_cache_alignment(void)
 {
 #ifdef ARCH_DMA_MINALIGN
@@ -738,7 +737,6 @@ static inline int dma_get_cache_alignment(void)
 #endif
 	return 1;
 }
-#endif
 
 /* flags for the coherent memory api */
 #define	DMA_MEMORY_MAP			0x01
diff --git a/include/linux/fence.h b/include/linux/fence.h
index fd9b89f..7c9b78c 100644
--- a/include/linux/fence.h
+++ b/include/linux/fence.h
@@ -47,7 +47,7 @@ struct fence_cb;
  * can be compared to decide which fence would be signaled later.
  * @flags: A mask of FENCE_FLAG_* defined below
  * @timestamp: Timestamp when the fence was signaled.
- * @status: Optional, only valid if < 0, must be set before calling
+ * @error: Optional, only valid if < 0, must be set before calling
  * fence_signal, indicates that the fence has completed with an error.
  *
  * the flags member must be manipulated and read using the appropriate
@@ -79,7 +79,7 @@ struct fence {
 	unsigned seqno;
 	unsigned long flags;
 	ktime_t timestamp;
-	int status;
+	int error;
 };
 
 enum fence_flag_bits {
@@ -133,7 +133,7 @@ struct fence_cb {
  * or some failure occurred that made it impossible to enable
  * signaling. True indicates successful enabling.
  *
- * fence->status may be set in enable_signaling, but only when false is
+ * fence->error may be set in enable_signaling, but only when false is
  * returned.
  *
  * Calling fence_signal before enable_signaling is called allows
@@ -145,7 +145,7 @@ struct fence_cb {
  * the second time will be a noop since it was already signaled.
  *
  * Notes on signaled:
- * May set fence->status if returning true.
+ * May set fence->error if returning true.
  *
  * Notes on wait:
  * Must not be NULL, set to fence_default_wait for default implementation.
@@ -329,6 +329,19 @@ fence_is_signaled(struct fence *fence)
 }
 
 /**
+ * __fence_is_later - return if f1 is chronologically later than f2
+ * @f1:	[in]	the first fence's seqno
+ * @f2:	[in]	the second fence's seqno from the same context
+ *
+ * Returns true if f1 is chronologically later than f2. Both fences must be
+ * from the same context, since a seqno is not common across contexts.
+ */
+static inline bool __fence_is_later(u32 f1, u32 f2)
+{
+	return (int)(f1 - f2) > 0;
+}
+
+/**
  * fence_is_later - return if f1 is chronologically later than f2
  * @f1:	[in]	the first fence from the same context
  * @f2:	[in]	the second fence from the same context
@@ -341,7 +354,7 @@ static inline bool fence_is_later(struct fence *f1, struct fence *f2)
 	if (WARN_ON(f1->context != f2->context))
 		return false;
 
-	return (int)(f1->seqno - f2->seqno) > 0;
+	return __fence_is_later(f1->seqno, f2->seqno);
 }
 
 /**
@@ -369,6 +382,50 @@ static inline struct fence *fence_later(struct fence *f1, struct fence *f2)
 		return fence_is_signaled(f2) ? NULL : f2;
 }
 
+/**
+ * fence_get_status_locked - returns the status upon completion
+ * @fence: [in]	the fence to query
+ *
+ * Drivers can supply an optional error status condition before they signal
+ * the fence (to indicate whether the fence was completed due to an error
+ * rather than success). The value of the status condition is only valid
+ * if the fence has been signaled, fence_get_status_locked() first checks
+ * the signal state before reporting the error status.
+ *
+ * Returns 0 if the fence has not yet been signaled, 1 if the fence has
+ * been signaled without an error condition, or a negative error code
+ * if the fence has been completed in err.
+ */
+static inline int fence_get_status_locked(struct fence *fence)
+{
+	if (fence_is_signaled_locked(fence))
+		return fence->error ?: 1;
+	else
+		return 0;
+}
+
+int fence_get_status(struct fence *fence);
+
+/**
+ * fence_set_error - flag an error condition on the fence
+ * @fence: [in]	the fence
+ * @error: [in]	the error to store
+ *
+ * Drivers can supply an optional error status condition before they signal
+ * the fence, to indicate that the fence was completed due to an error
+ * rather than success. This must be set before signaling (so that the value
+ * is visible before any waiters on the signal callback are woken). This
+ * helper exists to help catching erroneous setting of #fence.error.
+ */
+static inline void fence_set_error(struct fence *fence,
+				       int error)
+{
+	BUG_ON(test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags));
+	BUG_ON(error >= 0 || error < -MAX_ERRNO);
+
+	fence->error = error;
+}
+
 signed long fence_wait_timeout(struct fence *, bool intr, signed long timeout);
 signed long fence_wait_any_timeout(struct fence **fences, uint32_t count,
 				   bool intr, signed long timeout);
diff --git a/include/linux/genalloc.h b/include/linux/genalloc.h
index 29d4385..206fe3b 100644
--- a/include/linux/genalloc.h
+++ b/include/linux/genalloc.h
@@ -32,6 +32,7 @@
 
 #include <linux/types.h>
 #include <linux/spinlock_types.h>
+#include <linux/atomic.h>
 
 struct device;
 struct device_node;
@@ -70,7 +71,7 @@ struct gen_pool {
  */
 struct gen_pool_chunk {
 	struct list_head next_chunk;	/* next chunk in pool */
-	atomic_t avail;
+	atomic_long_t avail;
 	phys_addr_t phys_addr;		/* physical starting address of memory chunk */
 	unsigned long start_addr;	/* start address of memory chunk */
 	unsigned long end_addr;		/* end address of memory chunk (inclusive) */
diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h
index ee971f3..7118876 100644
--- a/include/linux/inetdevice.h
+++ b/include/linux/inetdevice.h
@@ -128,6 +128,8 @@ static inline void ipv4_devconf_setall(struct in_device *in_dev)
 #define IN_DEV_ARP_ANNOUNCE(in_dev)	IN_DEV_MAXCONF((in_dev), ARP_ANNOUNCE)
 #define IN_DEV_ARP_IGNORE(in_dev)	IN_DEV_MAXCONF((in_dev), ARP_IGNORE)
 #define IN_DEV_ARP_NOTIFY(in_dev)	IN_DEV_MAXCONF((in_dev), ARP_NOTIFY)
+#define IN_DEV_NF_IPV4_DEFRAG_SKIP(in_dev) \
+	IN_DEV_ORCONF((in_dev), NF_IPV4_DEFRAG_SKIP)
 
 struct in_ifaddr {
 	struct hlist_node	hash;
diff --git a/include/linux/input/synaptics_dsx.h b/include/linux/input/synaptics_dsx.h
new file mode 100644
index 0000000..56fe12e
--- /dev/null
+++ b/include/linux/input/synaptics_dsx.h
@@ -0,0 +1,113 @@
+/*
+ * Synaptics DSX touchscreen driver
+ *
+ * Copyright (C) 2012-2016 Synaptics Incorporated. All rights reserved.
+ *
+ * Copyright (C) 2012 Alexandra Chin <alexandra.chin@tw.synaptics.com>
+ * Copyright (C) 2012 Scott Lin <scott.lin@tw.synaptics.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * INFORMATION CONTAINED IN THIS DOCUMENT IS PROVIDED "AS-IS," AND SYNAPTICS
+ * EXPRESSLY DISCLAIMS ALL EXPRESS AND IMPLIED WARRANTIES, INCLUDING ANY
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE,
+ * AND ANY WARRANTIES OF NON-INFRINGEMENT OF ANY INTELLECTUAL PROPERTY RIGHTS.
+ * IN NO EVENT SHALL SYNAPTICS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, PUNITIVE, OR CONSEQUENTIAL DAMAGES ARISING OUT OF OR IN CONNECTION
+ * WITH THE USE OF THE INFORMATION CONTAINED IN THIS DOCUMENT, HOWEVER CAUSED
+ * AND BASED ON ANY THEORY OF LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * NEGLIGENCE OR OTHER TORTIOUS ACTION, AND EVEN IF SYNAPTICS WAS ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE. IF A TRIBUNAL OF COMPETENT JURISDICTION DOES
+ * NOT PERMIT THE DISCLAIMER OF DIRECT DAMAGES OR ANY OTHER DAMAGES, SYNAPTICS'
+ * TOTAL CUMULATIVE LIABILITY TO ANY PARTY SHALL NOT EXCEED ONE HUNDRED U.S.
+ * DOLLARS.
+ */
+
+#ifndef _SYNAPTICS_DSX_H_
+#define _SYNAPTICS_DSX_H_
+
+#define PLATFORM_DRIVER_NAME "synaptics_dsx"
+#define STYLUS_DRIVER_NAME "synaptics_dsx_stylus"
+#define ACTIVE_PEN_DRIVER_NAME "synaptics_dsx_active_pen"
+#define PROXIMITY_DRIVER_NAME "synaptics_dsx_proximity"
+#define GESTURE_DRIVER_NAME "synaptics_dsx_gesture"
+#define I2C_DRIVER_NAME "synaptics_dsx_i2c"
+#define SPI_DRIVER_NAME "synaptics_dsx_spi"
+
+/*
+ * struct synaptics_dsx_button_map - button map
+ * @nbuttons: number of buttons
+ * @map: pointer to array of button codes
+ */
+struct synaptics_dsx_button_map {
+	unsigned char nbuttons;
+	unsigned int *map;
+};
+
+/*
+ * struct synaptics_dsx_board_data - DSX board data
+ * @x_flip: x flip flag
+ * @y_flip: y flip flag
+ * @swap_axes: swap axes flag
+ * @irq_gpio: attention interrupt GPIO
+ * @irq_on_state: attention interrupt active state
+ * @power_gpio: power switch GPIO
+ * @power_on_state: power switch active state
+ * @reset_gpio: reset GPIO
+ * @reset_on_state: reset active state
+ * @max_y_for_2d: maximum y value for 2D area when virtual buttons are present
+ * @irq_flags: IRQ flags
+ * @i2c_addr: I2C slave address
+ * @ub_i2c_addr: microbootloader mode I2C slave address
+ * @device_descriptor_addr: HID device descriptor address
+ * @panel_x: x-axis resolution of display panel
+ * @panel_y: y-axis resolution of display panel
+ * @power_delay_ms: delay time to wait after powering up device
+ * @reset_delay_ms: delay time to wait after resetting device
+ * @reset_active_ms: reset active time
+ * @byte_delay_us: delay time between two bytes of SPI data
+ * @block_delay_us: delay time between two SPI transfers
+ * @addr_delay_us: delay time after sending address word
+ * @pwr_reg_name: pointer to name of regulator for power control
+ * @bus_reg_name: pointer to name of regulator for bus pullup control
+ * @cap_button_map: pointer to 0D button map
+ * @vir_button_map: pointer to virtual button map
+ */
+struct synaptics_dsx_board_data {
+	bool x_flip;
+	bool y_flip;
+	bool swap_axes;
+	int irq_gpio;
+	int irq_on_state;
+	int power_gpio;
+	int power_on_state;
+	int reset_gpio;
+	int reset_on_state;
+	int max_y_for_2d;
+	unsigned long irq_flags;
+	unsigned short i2c_addr;
+	unsigned short ub_i2c_addr;
+	unsigned short device_descriptor_addr;
+	unsigned int panel_x;
+	unsigned int panel_y;
+	unsigned int power_delay_ms;
+	unsigned int reset_delay_ms;
+	unsigned int reset_active_ms;
+	unsigned int byte_delay_us;
+	unsigned int block_delay_us;
+	unsigned int addr_delay_us;
+	const char *pwr_reg_name;
+	const char *bus_reg_name;
+	struct synaptics_dsx_button_map *cap_button_map;
+	struct synaptics_dsx_button_map *vir_button_map;
+};
+
+#endif
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index 80faf44..dd1b009 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -476,6 +476,7 @@ enum {
 enum {
 	MLX4_INTERFACE_STATE_UP		= 1 << 0,
 	MLX4_INTERFACE_STATE_DELETION	= 1 << 1,
+	MLX4_INTERFACE_STATE_NOWAIT	= 1 << 2,
 };
 
 #define MSTR_SM_CHANGE_MASK (MLX4_EQ_PORT_INFO_MSTR_SM_SL_CHANGE_MASK | \
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 907e029..16155d0 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -351,6 +351,7 @@ struct fault_env {
 struct vm_operations_struct {
 	void (*open)(struct vm_area_struct * area);
 	void (*close)(struct vm_area_struct * area);
+	int (*split)(struct vm_area_struct * area, unsigned long addr);
 	int (*mremap)(struct vm_area_struct * area);
 	int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf);
 	int (*pmd_fault)(struct vm_area_struct *, unsigned long address,
diff --git a/include/linux/mman.h b/include/linux/mman.h
index 634c4c5..c540001 100644
--- a/include/linux/mman.h
+++ b/include/linux/mman.h
@@ -63,8 +63,9 @@ static inline bool arch_validate_prot(unsigned long prot)
  * ("bit1" and "bit2" must be single bits)
  */
 #define _calc_vm_trans(x, bit1, bit2) \
+  ((!(bit1) || !(bit2)) ? 0 : \
   ((bit1) <= (bit2) ? ((x) & (bit1)) * ((bit2) / (bit1)) \
-   : ((x) & (bit1)) / ((bit1) / (bit2)))
+   : ((x) & (bit1)) / ((bit1) / (bit2))))
 
 /*
  * Combine the mmap "prot" argument into "vm_flags" used internally.
diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
index 25c0dc3..854dfa6 100644
--- a/include/linux/mmu_notifier.h
+++ b/include/linux/mmu_notifier.h
@@ -381,18 +381,6 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
 	___pmd;								\
 })
 
-#define pmdp_huge_get_and_clear_notify(__mm, __haddr, __pmd)		\
-({									\
-	unsigned long ___haddr = __haddr & HPAGE_PMD_MASK;		\
-	pmd_t ___pmd;							\
-									\
-	___pmd = pmdp_huge_get_and_clear(__mm, __haddr, __pmd);		\
-	mmu_notifier_invalidate_range(__mm, ___haddr,			\
-				      ___haddr + HPAGE_PMD_SIZE);	\
-									\
-	___pmd;								\
-})
-
 /*
  * set_pte_at_notify() sets the pte _after_ running the notifier.
  * This is safe to start by updating the secondary MMUs, because the primary MMU
@@ -480,7 +468,6 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
 #define pmdp_clear_young_notify pmdp_test_and_clear_young
 #define	ptep_clear_flush_notify ptep_clear_flush
 #define pmdp_huge_clear_flush_notify pmdp_huge_clear_flush
-#define pmdp_huge_get_and_clear_notify pmdp_huge_get_and_clear
 #define set_pte_at_notify set_pte_at
 
 #endif /* CONFIG_MMU_NOTIFIER */
diff --git a/include/linux/msm_gpi.h b/include/linux/msm_gpi.h
index 31eaf13..feebe16 100644
--- a/include/linux/msm_gpi.h
+++ b/include/linux/msm_gpi.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -208,6 +208,7 @@ struct msm_gpi_dma_async_tx_cb_param {
 	u32 length;
 	enum msm_gpi_tce_code completion_code; /* TCE event code */
 	u32 status;
+	struct __packed msm_gpi_tre imed_tre;
 	void *userdata;
 };
 
diff --git a/include/linux/omap-gpmc.h b/include/linux/omap-gpmc.h
index 35d0fd7..e821a31 100644
--- a/include/linux/omap-gpmc.h
+++ b/include/linux/omap-gpmc.h
@@ -88,10 +88,11 @@ static inline int gpmc_nand_init(struct omap_nand_platform_data *d,
 #endif
 
 #if IS_ENABLED(CONFIG_MTD_ONENAND_OMAP2)
-extern void gpmc_onenand_init(struct omap_onenand_platform_data *d);
+extern int gpmc_onenand_init(struct omap_onenand_platform_data *d);
 #else
 #define board_onenand_data	NULL
-static inline void gpmc_onenand_init(struct omap_onenand_platform_data *d)
+static inline int gpmc_onenand_init(struct omap_onenand_platform_data *d)
 {
+	return 0;
 }
 #endif
diff --git a/include/linux/oom.h b/include/linux/oom.h
index 5b5d4c7..34ed577 100644
--- a/include/linux/oom.h
+++ b/include/linux/oom.h
@@ -63,6 +63,15 @@ static inline bool tsk_is_oom_victim(struct task_struct * tsk)
 	return tsk->signal->oom_mm;
 }
 
+/*
+ * Use this helper if tsk->mm != mm and the victim mm needs a special
+ * handling. This is guaranteed to stay true after once set.
+ */
+static inline bool mm_is_oom_victim(struct mm_struct *mm)
+{
+	return test_bit(MMF_OOM_VICTIM, &mm->flags);
+}
+
 extern unsigned long oom_badness(struct task_struct *p,
 		struct mem_cgroup *memcg, const nodemask_t *nodemask,
 		unsigned long totalpages);
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 70936bf..47c5b39 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -1277,6 +1277,7 @@ extern void perf_event_disable(struct perf_event *event);
 extern void perf_event_disable_local(struct perf_event *event);
 extern void perf_event_disable_inatomic(struct perf_event *event);
 extern void perf_event_task_tick(void);
+extern int perf_event_account_interrupt(struct perf_event *event);
 #else /* !CONFIG_PERF_EVENTS: */
 static inline void *
 perf_aux_output_begin(struct perf_output_handle *handle,
diff --git a/include/linux/psci.h b/include/linux/psci.h
index bdea1cb..6306ab1 100644
--- a/include/linux/psci.h
+++ b/include/linux/psci.h
@@ -26,6 +26,7 @@ int psci_cpu_init_idle(unsigned int cpu);
 int psci_cpu_suspend_enter(unsigned long index);
 
 struct psci_operations {
+	u32 (*get_version)(void);
 	int (*cpu_suspend)(u32 state, unsigned long entry_point);
 	int (*cpu_off)(u32 state);
 	int (*cpu_on)(unsigned long cpuid, unsigned long entry_point);
diff --git a/include/linux/rculist_nulls.h b/include/linux/rculist_nulls.h
index 4ae95f7..6224a0a 100644
--- a/include/linux/rculist_nulls.h
+++ b/include/linux/rculist_nulls.h
@@ -100,44 +100,6 @@ static inline void hlist_nulls_add_head_rcu(struct hlist_nulls_node *n,
 }
 
 /**
- * hlist_nulls_add_tail_rcu
- * @n: the element to add to the hash list.
- * @h: the list to add to.
- *
- * Description:
- * Adds the specified element to the end of the specified hlist_nulls,
- * while permitting racing traversals.  NOTE: tail insertion requires
- * list traversal.
- *
- * The caller must take whatever precautions are necessary
- * (such as holding appropriate locks) to avoid racing
- * with another list-mutation primitive, such as hlist_nulls_add_head_rcu()
- * or hlist_nulls_del_rcu(), running on this same list.
- * However, it is perfectly legal to run concurrently with
- * the _rcu list-traversal primitives, such as
- * hlist_nulls_for_each_entry_rcu(), used to prevent memory-consistency
- * problems on Alpha CPUs.  Regardless of the type of CPU, the
- * list-traversal primitive must be guarded by rcu_read_lock().
- */
-static inline void hlist_nulls_add_tail_rcu(struct hlist_nulls_node *n,
-					struct hlist_nulls_head *h)
-{
-	struct hlist_nulls_node *i, *last = NULL;
-
-	for (i = hlist_nulls_first_rcu(h); !is_a_nulls(i);
-	     i = hlist_nulls_next_rcu(i))
-		last = i;
-
-	if (last) {
-		n->next = last->next;
-		n->pprev = &last->next;
-		rcu_assign_pointer(hlist_nulls_next_rcu(last), n);
-	} else {
-		hlist_nulls_add_head_rcu(n, h);
-	}
-}
-
-/**
  * hlist_nulls_for_each_entry_rcu - iterate over rcu list of given type
  * @tpos:	the type * to use as a loop cursor.
  * @pos:	the &struct hlist_nulls_node to use as a loop cursor.
diff --git a/include/linux/regulator/cpr-regulator.h b/include/linux/regulator/cpr-regulator.h
new file mode 100644
index 0000000..7a04e70
--- /dev/null
+++ b/include/linux/regulator/cpr-regulator.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 2013-2014, 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __REGULATOR_CPR_REGULATOR_H__
+#define __REGULATOR_CPR_REGULATOR_H__
+
+#include <linux/init.h>
+
+#ifdef CONFIG_REGULATOR_CPR
+
+int __init cpr_regulator_init(void);
+
+#else
+
+static inline int __init cpr_regulator_init(void)
+{
+	return -ENODEV;
+}
+
+#endif /* CONFIG_REGULATOR_CPR */
+
+#endif /* __REGULATOR_CPR_REGULATOR_H__ */
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 0d4035a..62c770d 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -636,6 +636,7 @@ static inline int get_dumpable(struct mm_struct *mm)
 #define MMF_OOM_SKIP		21	/* mm is of no interest for the OOM killer */
 #define MMF_UNSTABLE		22	/* mm is unstable for copy_from_user */
 #define MMF_HUGE_ZERO_PAGE	23      /* mm has ever used the global huge zero page */
+#define MMF_OOM_VICTIM		25      /* mm is the oom victim */
 
 #define MMF_INIT_MASK		(MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK)
 
diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h
index c6f0f0d..00a1f33 100644
--- a/include/linux/sysfs.h
+++ b/include/linux/sysfs.h
@@ -116,6 +116,12 @@ struct attribute_group {
 	.show	= _name##_show,						\
 }
 
+#define __ATTR_RO_MODE(_name, _mode) {					\
+	.attr	= { .name = __stringify(_name),				\
+		    .mode = VERIFY_OCTAL_PERMISSIONS(_mode) },		\
+	.show	= _name##_show,						\
+}
+
 #define __ATTR_WO(_name) {						\
 	.attr	= { .name = __stringify(_name), .mode = S_IWUSR },	\
 	.store	= _name##_store,					\
diff --git a/include/linux/thermal.h b/include/linux/thermal.h
index a700e5f..52bc890 100644
--- a/include/linux/thermal.h
+++ b/include/linux/thermal.h
@@ -47,7 +47,7 @@
 #define THERMAL_WEIGHT_DEFAULT 0
 
 /* Max sensors that can be used for a single virtual thermalzone */
-#define THERMAL_MAX_VIRT_SENSORS 8
+#define THERMAL_MAX_VIRT_SENSORS 10
 
 /* use value, which < 0K, to indicate an invalid/uninitialized temperature */
 #define THERMAL_TEMP_INVALID	-274000
diff --git a/include/linux/usb/gadget.h b/include/linux/usb/gadget.h
index 200c3ab..972dabc 100644
--- a/include/linux/usb/gadget.h
+++ b/include/linux/usb/gadget.h
@@ -81,12 +81,18 @@ enum gsi_ep_op {
  * @buf_len: Size of each individual buffer is determined based on aggregation
  *	negotiated as per the protocol. In case of no aggregation supported by
  *	the protocol, we use default values.
+ * @db_reg_phs_addr_lsb: IPA channel doorbell register's physical address LSB
+ * @mapped_db_reg_phs_addr_lsb: doorbell LSB IOVA address mapped with IOMMU
+ * @db_reg_phs_addr_msb: IPA channel doorbell register's physical address MSB
  */
 struct usb_gsi_request {
 	void *buf_base_addr;
 	dma_addr_t dma;
 	size_t num_bufs;
 	size_t buf_len;
+	u32 db_reg_phs_addr_lsb;
+	dma_addr_t mapped_db_reg_phs_addr_lsb;
+	u32 db_reg_phs_addr_msb;
 };
 
 /*
@@ -468,9 +474,6 @@ struct usb_gadget_ops {
  * @deactivated: True if gadget is deactivated - in deactivated state it cannot
  *	be connected.
  * @connected: True if gadget is connected.
- * @bam2bam_func_enabled; Indicates function using bam2bam is enabled or not.
- * @extra_buf_alloc: Extra allocation size for AXI prefetch so that out of
- * boundary access is protected.
  *
  * Gadgets have a mostly-portable "gadget driver" implementing device
  * functions, handling all usb configurations and interfaces.  Gadget
@@ -524,9 +527,6 @@ struct usb_gadget {
 	unsigned			deactivated:1;
 	unsigned			connected:1;
 	bool				remote_wakeup;
-	bool				bam2bam_func_enabled;
-	u32				extra_buf_alloc;
-	bool				l1_supported;
 };
 #define work_to_gadget(w)	(container_of((w), struct usb_gadget, work))
 
diff --git a/include/linux/usb/msm_hsusb_hw.h b/include/linux/usb/msm_hsusb_hw.h
index c32bc7e..974c379 100644
--- a/include/linux/usb/msm_hsusb_hw.h
+++ b/include/linux/usb/msm_hsusb_hw.h
@@ -21,7 +21,6 @@
 
 #define USB_AHBBURST         (MSM_USB_BASE + 0x0090)
 #define USB_AHBMODE          (MSM_USB_BASE + 0x0098)
-#define USB_GENCONFIG        (MSM_USB_BASE + 0x009C)
 #define USB_GENCONFIG_2      (MSM_USB_BASE + 0x00a0)
 #define ULPI_TX_PKT_EN_CLR_FIX	BIT(19)
 
@@ -40,15 +39,6 @@
 #define USBCMD_RESET   2
 #define USB_USBINTR          (MSM_USB_BASE + 0x0148)
 
-#define USB_L1_EP_CTRL       (MSM_USB_BASE + 0x0250)
-#define USB_L1_CONFIG        (MSM_USB_BASE + 0x0254)
-
-#define L1_CONFIG_LPM_EN        BIT(4)
-#define L1_CONFIG_REMOTE_WAKEUP BIT(5)
-#define L1_CONFIG_GATE_SYS_CLK	BIT(7)
-#define L1_CONFIG_PHY_LPM	BIT(10)
-#define L1_CONFIG_PLL		BIT(11)
-
 #define PORTSC_PHCD            (1 << 23) /* phy suspend mode */
 #define PORTSC_PTS_MASK        (3 << 30)
 #define PORTSC_PTS_ULPI        (2 << 30)
@@ -62,10 +52,6 @@
 #define ULPI_DATA(n)          ((n) & 255)
 #define ULPI_DATA_READ(n)     (((n) >> 8) & 255)
 
-#define GENCONFIG_BAM_DISABLE (1 << 13)
-#define GENCONFIG_TXFIFO_IDLE_FORCE_DISABLE (1 << 4)
-#define GENCONFIG_ULPI_SERIAL_EN (1 << 5)
-
 /* synopsys 28nm phy registers */
 #define ULPI_PWR_CLK_MNG_REG	0x88
 #define OTG_COMP_DISABLE	BIT(0)
diff --git a/include/linux/usb/usbnet.h b/include/linux/usb/usbnet.h
index 6e0ce8c..fde7550 100644
--- a/include/linux/usb/usbnet.h
+++ b/include/linux/usb/usbnet.h
@@ -79,6 +79,7 @@ struct usbnet {
 #		define EVENT_RX_KILL	10
 #		define EVENT_LINK_CHANGE	11
 #		define EVENT_SET_RX_MODE	12
+#		define EVENT_NO_IP_ALIGN	13
 };
 
 static inline struct usb_driver *driver_of(struct usb_interface *intf)
diff --git a/include/linux/virtio_vsock.h b/include/linux/virtio_vsock.h
index 9638bfe..584f9a6 100644
--- a/include/linux/virtio_vsock.h
+++ b/include/linux/virtio_vsock.h
@@ -48,6 +48,8 @@ struct virtio_vsock_pkt {
 	struct virtio_vsock_hdr	hdr;
 	struct work_struct work;
 	struct list_head list;
+	/* socket refcnt not held, only use for cancellation */
+	struct vsock_sock *vsk;
 	void *buf;
 	u32 len;
 	u32 off;
@@ -56,6 +58,7 @@ struct virtio_vsock_pkt {
 
 struct virtio_vsock_pkt_info {
 	u32 remote_cid, remote_port;
+	struct vsock_sock *vsk;
 	struct msghdr *msg;
 	u32 pkt_len;
 	u16 type;
diff --git a/include/net/af_vsock.h b/include/net/af_vsock.h
index f275896..f32ed9a 100644
--- a/include/net/af_vsock.h
+++ b/include/net/af_vsock.h
@@ -100,6 +100,9 @@ struct vsock_transport {
 	void (*destruct)(struct vsock_sock *);
 	void (*release)(struct vsock_sock *);
 
+	/* Cancel all pending packets sent on vsock. */
+	int (*cancel_pkt)(struct vsock_sock *vsk);
+
 	/* Connections. */
 	int (*connect)(struct vsock_sock *);
 
diff --git a/include/net/sock.h b/include/net/sock.h
index 97f8ed2..badd144 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -649,11 +649,7 @@ static inline void sk_add_node_rcu(struct sock *sk, struct hlist_head *list)
 
 static inline void __sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list)
 {
-	if (IS_ENABLED(CONFIG_IPV6) && sk->sk_reuseport &&
-	    sk->sk_family == AF_INET6)
-		hlist_nulls_add_tail_rcu(&sk->sk_nulls_node, list);
-	else
-		hlist_nulls_add_head_rcu(&sk->sk_nulls_node, list);
+	hlist_nulls_add_head_rcu(&sk->sk_nulls_node, list);
 }
 
 static inline void sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list)
diff --git a/include/rdma/ib_addr.h b/include/rdma/ib_addr.h
index 1beab55..818a38f 100644
--- a/include/rdma/ib_addr.h
+++ b/include/rdma/ib_addr.h
@@ -243,10 +243,11 @@ static inline void rdma_addr_set_dgid(struct rdma_dev_addr *dev_addr, union ib_g
 static inline enum ib_mtu iboe_get_mtu(int mtu)
 {
 	/*
-	 * reduce IB headers from effective IBoE MTU. 28 stands for
-	 * atomic header which is the biggest possible header after BTH
+	 * Reduce IB headers from effective IBoE MTU.
 	 */
-	mtu = mtu - IB_GRH_BYTES - IB_BTH_BYTES - 28;
+	mtu = mtu - (IB_GRH_BYTES + IB_UDP_BYTES + IB_BTH_BYTES +
+		     IB_EXT_XRC_BYTES + IB_EXT_ATOMICETH_BYTES +
+		     IB_ICRC_BYTES);
 
 	if (mtu >= ib_mtu_enum_to_int(IB_MTU_4096))
 		return IB_MTU_4096;
diff --git a/include/rdma/ib_pack.h b/include/rdma/ib_pack.h
index b13419c..e02b78a 100644
--- a/include/rdma/ib_pack.h
+++ b/include/rdma/ib_pack.h
@@ -37,14 +37,17 @@
 #include <uapi/linux/if_ether.h>
 
 enum {
-	IB_LRH_BYTES  = 8,
-	IB_ETH_BYTES  = 14,
-	IB_VLAN_BYTES = 4,
-	IB_GRH_BYTES  = 40,
-	IB_IP4_BYTES  = 20,
-	IB_UDP_BYTES  = 8,
-	IB_BTH_BYTES  = 12,
-	IB_DETH_BYTES = 8
+	IB_LRH_BYTES		= 8,
+	IB_ETH_BYTES		= 14,
+	IB_VLAN_BYTES		= 4,
+	IB_GRH_BYTES		= 40,
+	IB_IP4_BYTES		= 20,
+	IB_UDP_BYTES		= 8,
+	IB_BTH_BYTES		= 12,
+	IB_DETH_BYTES		= 8,
+	IB_EXT_ATOMICETH_BYTES	= 28,
+	IB_EXT_XRC_BYTES	= 4,
+	IB_ICRC_BYTES		= 4
 };
 
 struct ib_field {
diff --git a/include/scsi/libsas.h b/include/scsi/libsas.h
index dae99d7..706a701 100644
--- a/include/scsi/libsas.h
+++ b/include/scsi/libsas.h
@@ -165,11 +165,11 @@ struct expander_device {
 
 struct sata_device {
 	unsigned int class;
-	struct smp_resp        rps_resp; /* report_phy_sata_resp */
 	u8     port_no;        /* port number, if this is a PM (Port) */
 
 	struct ata_port *ap;
 	struct ata_host ata_host;
+	struct smp_resp rps_resp ____cacheline_aligned; /* report_phy_sata_resp */
 	u8     fis[ATA_RESP_FIS_SIZE];
 };
 
diff --git a/include/soc/qcom/memory_dump.h b/include/soc/qcom/memory_dump.h
index b4733d7..5bc50b5 100644
--- a/include/soc/qcom/memory_dump.h
+++ b/include/soc/qcom/memory_dump.h
@@ -86,6 +86,7 @@ enum msm_dump_data_ids {
 	MSM_DUMP_DATA_FCM = 0xEE,
 	MSM_DUMP_DATA_POWER_REGS = 0xED,
 	MSM_DUMP_DATA_TMC_ETF = 0xF0,
+	MSM_DUMP_DATA_TPDM_SWAO_MCMB = 0xF2,
 	MSM_DUMP_DATA_TMC_REG = 0x100,
 	MSM_DUMP_DATA_LOG_BUF = 0x110,
 	MSM_DUMP_DATA_LOG_BUF_FIRST_IDX = 0x111,
diff --git a/include/soc/qcom/msm_tz_smmu.h b/include/soc/qcom/msm_tz_smmu.h
index a83c9bd..1d47f1f 100644
--- a/include/soc/qcom/msm_tz_smmu.h
+++ b/include/soc/qcom/msm_tz_smmu.h
@@ -56,6 +56,15 @@ enum tz_smmu_device_id msm_dev_to_device_id(struct device *dev);
 int msm_tz_set_cb_format(enum tz_smmu_device_id sec_id, int cbndx);
 int msm_iommu_sec_pgtbl_init(void);
 int register_iommu_sec_ptbl(void);
+bool arm_smmu_skip_write(void __iomem *addr);
+
+/* Donot write to smmu global space with CONFIG_MSM_TZ_SMMU */
+#undef writel_relaxed
+#define writel_relaxed(v, c)	do {					\
+	if (!arm_smmu_skip_write(c))					\
+		((void)__raw_writel((__force u32)cpu_to_le32(v), (c)));	\
+	} while (0)
+
 #else
 
 static inline int msm_tz_smmu_atos_start(struct device *dev, int cb_num)
diff --git a/include/soc/qcom/secure_buffer.h b/include/soc/qcom/secure_buffer.h
index 12fa374..d9a526d 100644
--- a/include/soc/qcom/secure_buffer.h
+++ b/include/soc/qcom/secure_buffer.h
@@ -50,6 +50,8 @@ enum vmid {
 #define PERM_EXEC			0x1
 
 #ifdef CONFIG_QCOM_SECURE_BUFFER
+int msm_secure_table(struct sg_table *table);
+int msm_unsecure_table(struct sg_table *table);
 int hyp_assign_table(struct sg_table *table,
 			u32 *source_vm_list, int source_nelems,
 			int *dest_vmids, int *dest_perms,
@@ -57,8 +59,19 @@ int hyp_assign_table(struct sg_table *table,
 extern int hyp_assign_phys(phys_addr_t addr, u64 size,
 			u32 *source_vmlist, int source_nelems,
 			int *dest_vmids, int *dest_perms, int dest_nelems);
+bool msm_secure_v2_is_supported(void);
 const char *msm_secure_vmid_to_string(int secure_vmid);
 #else
+static inline int msm_secure_table(struct sg_table *table)
+{
+	return -EINVAL;
+}
+
+static inline int msm_unsecure_table(struct sg_table *table)
+{
+	return -EINVAL;
+}
+
 static inline int hyp_assign_table(struct sg_table *table,
 			u32 *source_vm_list, int source_nelems,
 			int *dest_vmids, int *dest_perms,
@@ -74,6 +87,11 @@ static inline int hyp_assign_phys(phys_addr_t addr, u64 size,
 	return -EINVAL;
 }
 
+static inline bool msm_secure_v2_is_supported(void)
+{
+	return false;
+}
+
 static inline const char *msm_secure_vmid_to_string(int secure_vmid)
 {
 	return "N/A";
diff --git a/include/soc/qcom/socinfo.h b/include/soc/qcom/socinfo.h
index 9e91e4b..505e82b 100644
--- a/include/soc/qcom/socinfo.h
+++ b/include/soc/qcom/socinfo.h
@@ -110,6 +110,8 @@
 	of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,msm8953")
 #define early_machine_is_sdm450()	\
 	of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,sdm450")
+#define early_machine_is_sdm632()	\
+	of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,sdm632")
 #else
 #define of_board_is_sim()		0
 #define of_board_is_rumi()		0
@@ -154,6 +156,7 @@
 #define early_machine_is_sda670()	0
 #define early_machine_is_msm8953()	0
 #define early_machine_is_sdm450()	0
+#define early_machine_is_sdm632()	0
 #endif
 
 #define PLATFORM_SUBTYPE_MDM	1
@@ -220,6 +223,8 @@ enum msm_cpu {
 	MSM_CPU_SDA670,
 	MSM_CPU_8953,
 	MSM_CPU_SDM450,
+	MSM_CPU_SDM632,
+	MSM_CPU_SDA632,
 };
 
 struct msm_soc_info {
diff --git a/include/soc/qcom/system_pm.h b/include/soc/qcom/system_pm.h
index 6d0993a..028c729 100644
--- a/include/soc/qcom/system_pm.h
+++ b/include/soc/qcom/system_pm.h
@@ -14,13 +14,15 @@
 #define __SOC_QCOM_SYS_PM_H__
 
 #ifdef CONFIG_QTI_SYSTEM_PM
-int system_sleep_enter(uint64_t sleep_val);
+int system_sleep_enter(void);
 
 void system_sleep_exit(void);
 
 bool system_sleep_allowed(void);
+
+int system_sleep_update_wakeup(void);
 #else
-static inline int system_sleep_enter(uint64_t sleep_val)
+static inline int system_sleep_enter(void)
 { return -ENODEV; }
 
 static inline void system_sleep_exit(void)
@@ -29,6 +31,9 @@ static inline void system_sleep_exit(void)
 static inline bool system_sleep_allowed(void)
 { return false; }
 
+static inline int system_sleep_update_wakeup(void)
+{ return -ENODEV; }
+
 #endif /* CONFIG_QTI_SYSTEM_PM */
 
 #endif /* __SOC_QCOM_SYS_PM_H__ */
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index a87e894..eb3b23b 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -297,7 +297,7 @@ struct t10_alua_tg_pt_gp {
 	struct list_head tg_pt_gp_lun_list;
 	struct se_lun *tg_pt_gp_alua_lun;
 	struct se_node_acl *tg_pt_gp_alua_nacl;
-	struct delayed_work tg_pt_gp_transition_work;
+	struct work_struct tg_pt_gp_transition_work;
 	struct completion *tg_pt_gp_transition_complete;
 };
 
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index 8dc7ad5..0125cde 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -273,6 +273,7 @@ TRACE_EVENT(sched_get_task_cpu_cycles,
 		__field(u64,		exec_time	)
 		__field(u32,		freq		)
 		__field(u32,		legacy_freq	)
+		__field(u32,		max_freq	)
 		__field(pid_t,		pid		)
 		__array(char,	comm,   TASK_COMM_LEN	)
 	),
@@ -284,13 +285,15 @@ TRACE_EVENT(sched_get_task_cpu_cycles,
 		__entry->exec_time	= exec_time;
 		__entry->freq		= cpu_cycles_to_freq(cycles, exec_time);
 		__entry->legacy_freq	= cpu_cur_freq(cpu);
+		__entry->max_freq	= cpu_max_freq(cpu);
 		__entry->pid            = p->pid;
 		memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
 	),
 
-	TP_printk("cpu=%d event=%d cycles=%llu exec_time=%llu freq=%u legacy_freq=%u task=%d (%s)",
+	TP_printk("cpu=%d event=%d cycles=%llu exec_time=%llu freq=%u legacy_freq=%u max_freq=%u task=%d (%s)",
 		  __entry->cpu, __entry->event, __entry->cycles,
-		  __entry->exec_time, __entry->freq, __entry->legacy_freq, __entry->pid, __entry->comm)
+		  __entry->exec_time, __entry->freq, __entry->legacy_freq,
+		  __entry->max_freq, __entry->pid, __entry->comm)
 );
 
 TRACE_EVENT(sched_update_task_ravg,
diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h
index 8a707f8..8a13e39 100644
--- a/include/trace/events/sunrpc.h
+++ b/include/trace/events/sunrpc.h
@@ -455,20 +455,22 @@ TRACE_EVENT(svc_recv,
 	TP_ARGS(rqst, status),
 
 	TP_STRUCT__entry(
-		__field(struct sockaddr *, addr)
 		__field(__be32, xid)
 		__field(int, status)
 		__field(unsigned long, flags)
+		__dynamic_array(unsigned char, addr, rqst->rq_addrlen)
 	),
 
 	TP_fast_assign(
-		__entry->addr = (struct sockaddr *)&rqst->rq_addr;
 		__entry->xid = status > 0 ? rqst->rq_xid : 0;
 		__entry->status = status;
 		__entry->flags = rqst->rq_flags;
+		memcpy(__get_dynamic_array(addr),
+			&rqst->rq_addr, rqst->rq_addrlen);
 	),
 
-	TP_printk("addr=%pIScp xid=0x%x status=%d flags=%s", __entry->addr,
+	TP_printk("addr=%pIScp xid=0x%x status=%d flags=%s",
+			(struct sockaddr *)__get_dynamic_array(addr),
 			be32_to_cpu(__entry->xid), __entry->status,
 			show_rqstp_flags(__entry->flags))
 );
@@ -513,22 +515,23 @@ DECLARE_EVENT_CLASS(svc_rqst_status,
 	TP_ARGS(rqst, status),
 
 	TP_STRUCT__entry(
-		__field(struct sockaddr *, addr)
 		__field(__be32, xid)
-		__field(int, dropme)
 		__field(int, status)
 		__field(unsigned long, flags)
+		__dynamic_array(unsigned char, addr, rqst->rq_addrlen)
 	),
 
 	TP_fast_assign(
-		__entry->addr = (struct sockaddr *)&rqst->rq_addr;
 		__entry->xid = rqst->rq_xid;
 		__entry->status = status;
 		__entry->flags = rqst->rq_flags;
+		memcpy(__get_dynamic_array(addr),
+			&rqst->rq_addr, rqst->rq_addrlen);
 	),
 
 	TP_printk("addr=%pIScp rq_xid=0x%x status=%d flags=%s",
-		__entry->addr, be32_to_cpu(__entry->xid),
+		(struct sockaddr *)__get_dynamic_array(addr),
+		be32_to_cpu(__entry->xid),
 		__entry->status, show_rqstp_flags(__entry->flags))
 );
 
diff --git a/include/uapi/linux/bcache.h b/include/uapi/linux/bcache.h
index 22b6ad3..8562b1c 100644
--- a/include/uapi/linux/bcache.h
+++ b/include/uapi/linux/bcache.h
@@ -90,7 +90,7 @@ PTR_FIELD(PTR_GEN,			0,  8)
 
 #define PTR_CHECK_DEV			((1 << PTR_DEV_BITS) - 1)
 
-#define PTR(gen, offset, dev)						\
+#define MAKE_PTR(gen, offset, dev)					\
 	((((__u64) dev) << 51) | ((__u64) offset) << 8 | gen)
 
 /* Bkey utility code */
diff --git a/include/uapi/linux/ip.h b/include/uapi/linux/ip.h
index f291569..739a4f3 100644
--- a/include/uapi/linux/ip.h
+++ b/include/uapi/linux/ip.h
@@ -167,6 +167,7 @@ enum
 	IPV4_DEVCONF_IGNORE_ROUTES_WITH_LINKDOWN,
 	IPV4_DEVCONF_DROP_UNICAST_IN_L2_MULTICAST,
 	IPV4_DEVCONF_DROP_GRATUITOUS_ARP,
+	IPV4_DEVCONF_NF_IPV4_DEFRAG_SKIP,
 	__IPV4_DEVCONF_MAX
 };
 
diff --git a/include/uapi/linux/msm_ipa.h b/include/uapi/linux/msm_ipa.h
index ef07f78..de3f890 100644
--- a/include/uapi/linux/msm_ipa.h
+++ b/include/uapi/linux/msm_ipa.h
@@ -165,6 +165,8 @@
 #define IPA_FLT_MAC_DST_ADDR_L2TP	(1ul << 22)
 #define IPA_FLT_TCP_SYN			(1ul << 23)
 #define IPA_FLT_TCP_SYN_L2TP		(1ul << 24)
+#define IPA_FLT_L2TP_INNER_IP_TYPE  (1ul << 25)
+#define IPA_FLT_L2TP_INNER_IPV4_DST_ADDR (1ul << 26)
 
 /**
  * maximal number of NAT PDNs in the PDN config table
diff --git a/include/uapi/linux/sysctl.h b/include/uapi/linux/sysctl.h
index 08aa800..d67f476 100644
--- a/include/uapi/linux/sysctl.h
+++ b/include/uapi/linux/sysctl.h
@@ -482,6 +482,7 @@ enum
 	NET_IPV4_CONF_PROMOTE_SECONDARIES=20,
 	NET_IPV4_CONF_ARP_ACCEPT=21,
 	NET_IPV4_CONF_ARP_NOTIFY=22,
+	NET_IPV4_CONF_NF_IPV4_DEFRAG_SKIP = 23,
 };
 
 /* /proc/sys/net/ipv4/netfilter */
diff --git a/include/uapi/linux/usb/ch9.h b/include/uapi/linux/usb/ch9.h
index 0d69769..0303a6f 100644
--- a/include/uapi/linux/usb/ch9.h
+++ b/include/uapi/linux/usb/ch9.h
@@ -423,6 +423,11 @@ struct usb_endpoint_descriptor {
 #define USB_ENDPOINT_XFER_INT		3
 #define USB_ENDPOINT_MAX_ADJUSTABLE	0x80
 
+#define USB_EP_MAXP_MULT_SHIFT	11
+#define USB_EP_MAXP_MULT_MASK	(3 << USB_EP_MAXP_MULT_SHIFT)
+#define USB_EP_MAXP_MULT(m) \
+	(((m) & USB_EP_MAXP_MULT_MASK) >> USB_EP_MAXP_MULT_SHIFT)
+
 /* The USB 3.0 spec redefines bits 5:4 of bmAttributes as interrupt ep type. */
 #define USB_ENDPOINT_INTRTYPE		0x30
 #define USB_ENDPOINT_INTR_PERIODIC	(0 << 4)
@@ -630,6 +635,20 @@ static inline int usb_endpoint_maxp(const struct usb_endpoint_descriptor *epd)
 	return __le16_to_cpu(epd->wMaxPacketSize);
 }
 
+/**
+ * usb_endpoint_maxp_mult - get endpoint's transactional opportunities
+ * @epd: endpoint to be checked
+ *
+ * Return @epd's wMaxPacketSize[12:11] + 1
+ */
+static inline int
+usb_endpoint_maxp_mult(const struct usb_endpoint_descriptor *epd)
+{
+	int maxp = __le16_to_cpu(epd->wMaxPacketSize);
+
+	return USB_EP_MAXP_MULT(maxp) + 1;
+}
+
 static inline int usb_endpoint_interrupt_type(
 		const struct usb_endpoint_descriptor *epd)
 {
@@ -854,6 +873,8 @@ struct usb_wireless_cap_descriptor {	/* Ultra Wide Band */
 	__u8  bReserved;
 } __attribute__((packed));
 
+#define USB_DT_USB_WIRELESS_CAP_SIZE	11
+
 /* USB 2.0 Extension descriptor */
 #define	USB_CAP_TYPE_EXT		2
 
@@ -1046,6 +1067,7 @@ struct usb_ptm_cap_descriptor {
 	__u8  bDevCapabilityType;
 } __attribute__((packed));
 
+#define USB_DT_USB_PTM_ID_SIZE		3
 /*
  * The size of the descriptor for the Sublink Speed Attribute Count
  * (SSAC) specified in bmAttributes[4:0].
diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h
index 229dd25..71772c3 100644
--- a/include/uapi/linux/videodev2.h
+++ b/include/uapi/linux/videodev2.h
@@ -1016,6 +1016,7 @@ struct v4l2_buffer {
 #define V4L2_QCOM_BUF_INPUT_UNSUPPORTED		0x01000000
 #define V4L2_QCOM_BUF_FLAG_EOS			0x02000000
 #define V4L2_QCOM_BUF_FLAG_READONLY		0x04000000
+#define V4L2_QCOM_BUF_FLAG_PERF_MODE		0x20000000
 #define V4L2_MSM_BUF_FLAG_DEFER			0x40000000
 #define V4L2_QCOM_BUF_FLAG_IDRFRAME		0x80000000
 
diff --git a/kernel/audit.c b/kernel/audit.c
index f1ca116..da4e7c0 100644
--- a/kernel/audit.c
+++ b/kernel/audit.c
@@ -79,13 +79,13 @@ static int	audit_initialized;
 #define AUDIT_OFF	0
 #define AUDIT_ON	1
 #define AUDIT_LOCKED	2
-u32		audit_enabled;
-u32		audit_ever_enabled;
+u32		audit_enabled = AUDIT_OFF;
+u32		audit_ever_enabled = !!AUDIT_OFF;
 
 EXPORT_SYMBOL_GPL(audit_enabled);
 
 /* Default state when kernel boots without any parameters. */
-static u32	audit_default;
+static u32	audit_default = AUDIT_OFF;
 
 /* If auditing cannot proceed, audit_failure selects what happens. */
 static u32	audit_failure = AUDIT_FAIL_PRINTK;
@@ -1199,8 +1199,6 @@ static int __init audit_init(void)
 	skb_queue_head_init(&audit_skb_queue);
 	skb_queue_head_init(&audit_skb_hold_queue);
 	audit_initialized = AUDIT_INITIALIZED;
-	audit_enabled = audit_default;
-	audit_ever_enabled |= !!audit_default;
 
 	audit_log(NULL, GFP_KERNEL, AUDIT_KERNEL, "initialized");
 
@@ -1217,6 +1215,8 @@ static int __init audit_enable(char *str)
 	audit_default = !!simple_strtol(str, NULL, 0);
 	if (!audit_default)
 		audit_initialized = AUDIT_DISABLED;
+	audit_enabled = audit_default;
+	audit_ever_enabled = !!audit_enabled;
 
 	pr_info("%s\n", audit_default ?
 		"enabled (after initialization)" : "disabled (until reboot)");
diff --git a/kernel/bpf/percpu_freelist.c b/kernel/bpf/percpu_freelist.c
index 5c51d19..673fa6f 100644
--- a/kernel/bpf/percpu_freelist.c
+++ b/kernel/bpf/percpu_freelist.c
@@ -78,8 +78,10 @@ struct pcpu_freelist_node *pcpu_freelist_pop(struct pcpu_freelist *s)
 {
 	struct pcpu_freelist_head *head;
 	struct pcpu_freelist_node *node;
+	unsigned long flags;
 	int orig_cpu, cpu;
 
+	local_irq_save(flags);
 	orig_cpu = cpu = raw_smp_processor_id();
 	while (1) {
 		head = per_cpu_ptr(s->freelist, cpu);
@@ -87,14 +89,16 @@ struct pcpu_freelist_node *pcpu_freelist_pop(struct pcpu_freelist *s)
 		node = head->first;
 		if (node) {
 			head->first = node->next;
-			raw_spin_unlock(&head->lock);
+			raw_spin_unlock_irqrestore(&head->lock, flags);
 			return node;
 		}
 		raw_spin_unlock(&head->lock);
 		cpu = cpumask_next(cpu, cpu_possible_mask);
 		if (cpu >= nr_cpu_ids)
 			cpu = 0;
-		if (cpu == orig_cpu)
+		if (cpu == orig_cpu) {
+			local_irq_restore(flags);
 			return NULL;
+		}
 	}
 }
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 372454a..d7eeebf 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -1790,10 +1790,17 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
 			/* case: R = imm
 			 * remember the value we stored into this reg
 			 */
+			u64 imm;
+
+			if (BPF_CLASS(insn->code) == BPF_ALU64)
+				imm = insn->imm;
+			else
+				imm = (u32)insn->imm;
+
 			regs[insn->dst_reg].type = CONST_IMM;
-			regs[insn->dst_reg].imm = insn->imm;
-			regs[insn->dst_reg].max_value = insn->imm;
-			regs[insn->dst_reg].min_value = insn->imm;
+			regs[insn->dst_reg].imm = imm;
+			regs[insn->dst_reg].max_value = imm;
+			regs[insn->dst_reg].min_value = imm;
 		}
 
 	} else if (opcode > BPF_END) {
@@ -1861,10 +1868,28 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
 			   ((BPF_SRC(insn->code) == BPF_X &&
 			     regs[insn->src_reg].type == CONST_IMM) ||
 			    BPF_SRC(insn->code) == BPF_K)) {
-			if (BPF_SRC(insn->code) == BPF_X)
+			if (BPF_SRC(insn->code) == BPF_X) {
+				/* check in case the register contains a big
+				 * 64-bit value
+				 */
+				if (regs[insn->src_reg].imm < -MAX_BPF_STACK ||
+				    regs[insn->src_reg].imm > MAX_BPF_STACK) {
+					verbose("R%d value too big in R%d pointer arithmetic\n",
+						insn->src_reg, insn->dst_reg);
+					return -EACCES;
+				}
 				dst_reg->imm += regs[insn->src_reg].imm;
-			else
+			} else {
+				/* safe against overflow: addition of 32-bit
+				 * numbers in 64-bit representation
+				 */
 				dst_reg->imm += insn->imm;
+			}
+			if (dst_reg->imm > 0 || dst_reg->imm < -MAX_BPF_STACK) {
+				verbose("R%d out-of-bounds pointer arithmetic\n",
+					insn->dst_reg);
+				return -EACCES;
+			}
 			return 0;
 		} else if (opcode == BPF_ADD &&
 			   BPF_CLASS(insn->code) == BPF_ALU64 &&
@@ -2697,11 +2722,12 @@ static bool states_equal(struct bpf_verifier_env *env,
 
 		/* If we didn't map access then again we don't care about the
 		 * mismatched range values and it's ok if our old type was
-		 * UNKNOWN and we didn't go to a NOT_INIT'ed reg.
+		 * UNKNOWN and we didn't go to a NOT_INIT'ed or pointer reg.
 		 */
 		if (rold->type == NOT_INIT ||
 		    (!varlen_map_access && rold->type == UNKNOWN_VALUE &&
-		     rcur->type != NOT_INIT))
+		     rcur->type != NOT_INIT &&
+		     !__is_pointer_value(env->allow_ptr_leaks, rcur)))
 			continue;
 
 		/* Don't care about the reg->id in this case. */
@@ -2862,6 +2888,7 @@ static int do_check(struct bpf_verifier_env *env)
 		if (err)
 			return err;
 
+		env->insn_aux_data[insn_idx].seen = true;
 		if (class == BPF_ALU || class == BPF_ALU64) {
 			err = check_alu_op(env, insn);
 			if (err)
@@ -3059,6 +3086,7 @@ static int do_check(struct bpf_verifier_env *env)
 					return err;
 
 				insn_idx++;
+				env->insn_aux_data[insn_idx].seen = true;
 			} else {
 				verbose("invalid BPF_LD mode\n");
 				return -EINVAL;
@@ -3210,6 +3238,63 @@ static void convert_pseudo_ld_imm64(struct bpf_verifier_env *env)
 			insn->src_reg = 0;
 }
 
+/* single env->prog->insni[off] instruction was replaced with the range
+ * insni[off, off + cnt).  Adjust corresponding insn_aux_data by copying
+ * [0, off) and [off, end) to new locations, so the patched range stays zero
+ */
+static int adjust_insn_aux_data(struct bpf_verifier_env *env, u32 prog_len,
+				u32 off, u32 cnt)
+{
+	struct bpf_insn_aux_data *new_data, *old_data = env->insn_aux_data;
+	int i;
+
+	if (cnt == 1)
+		return 0;
+	new_data = vzalloc(sizeof(struct bpf_insn_aux_data) * prog_len);
+	if (!new_data)
+		return -ENOMEM;
+	memcpy(new_data, old_data, sizeof(struct bpf_insn_aux_data) * off);
+	memcpy(new_data + off + cnt - 1, old_data + off,
+	       sizeof(struct bpf_insn_aux_data) * (prog_len - off - cnt + 1));
+	for (i = off; i < off + cnt - 1; i++)
+		new_data[i].seen = true;
+	env->insn_aux_data = new_data;
+	vfree(old_data);
+	return 0;
+}
+
+static struct bpf_prog *bpf_patch_insn_data(struct bpf_verifier_env *env, u32 off,
+					    const struct bpf_insn *patch, u32 len)
+{
+	struct bpf_prog *new_prog;
+
+	new_prog = bpf_patch_insn_single(env->prog, off, patch, len);
+	if (!new_prog)
+		return NULL;
+	if (adjust_insn_aux_data(env, new_prog->len, off, len))
+		return NULL;
+	return new_prog;
+}
+
+/* The verifier does more data flow analysis than llvm and will not explore
+ * branches that are dead at run time. Malicious programs can have dead code
+ * too. Therefore replace all dead at-run-time code with nops.
+ */
+static void sanitize_dead_code(struct bpf_verifier_env *env)
+{
+	struct bpf_insn_aux_data *aux_data = env->insn_aux_data;
+	struct bpf_insn nop = BPF_MOV64_REG(BPF_REG_0, BPF_REG_0);
+	struct bpf_insn *insn = env->prog->insnsi;
+	const int insn_cnt = env->prog->len;
+	int i;
+
+	for (i = 0; i < insn_cnt; i++) {
+		if (aux_data[i].seen)
+			continue;
+		memcpy(insn + i, &nop, sizeof(nop));
+	}
+}
+
 /* convert load instructions that access fields of 'struct __sk_buff'
  * into sequence of instructions that access fields of 'struct sk_buff'
  */
@@ -3229,10 +3314,10 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
 			verbose("bpf verifier is misconfigured\n");
 			return -EINVAL;
 		} else if (cnt) {
-			new_prog = bpf_patch_insn_single(env->prog, 0,
-							 insn_buf, cnt);
+			new_prog = bpf_patch_insn_data(env, 0, insn_buf, cnt);
 			if (!new_prog)
 				return -ENOMEM;
+
 			env->prog = new_prog;
 			delta += cnt - 1;
 		}
@@ -3253,7 +3338,7 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
 		else
 			continue;
 
-		if (env->insn_aux_data[i].ptr_type != PTR_TO_CTX)
+		if (env->insn_aux_data[i + delta].ptr_type != PTR_TO_CTX)
 			continue;
 
 		cnt = ops->convert_ctx_access(type, insn->dst_reg, insn->src_reg,
@@ -3263,8 +3348,7 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
 			return -EINVAL;
 		}
 
-		new_prog = bpf_patch_insn_single(env->prog, i + delta, insn_buf,
-						 cnt);
+		new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
 		if (!new_prog)
 			return -ENOMEM;
 
@@ -3373,6 +3457,9 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr)
 	free_states(env);
 
 	if (ret == 0)
+		sanitize_dead_code(env);
+
+	if (ret == 0)
 		/* program is valid, convert *(u32*)(ctx + off) accesses */
 		ret = convert_ctx_accesses(env);
 
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 915e750..823fe5a 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -1388,11 +1388,6 @@ static struct cpuhp_step cpuhp_bp_states[] = {
 		.teardown.single	= NULL,
 		.cant_stop		= true,
 	},
-	[CPUHP_AP_SMPCFD_DYING] = {
-		.name			= "smpcfd:dying",
-		.startup.single		= NULL,
-		.teardown.single	= smpcfd_dying_cpu,
-	},
 	/*
 	 * Handled on controll processor until the plugged processor manages
 	 * this itself.
@@ -1439,6 +1434,11 @@ static struct cpuhp_step cpuhp_ap_states[] = {
 		.startup.single		= NULL,
 		.teardown.single	= kmap_remove_unused_cpu,
 	},
+	[CPUHP_AP_SMPCFD_DYING] = {
+		.name			= "smpcfd:dying",
+		.startup.single		= NULL,
+		.teardown.single	= smpcfd_dying_cpu,
+	},
 	/* Entry state on starting. Interrupts enabled from here on. Transient
 	 * state for synchronsization */
 	[CPUHP_AP_ONLINE] = {
diff --git a/kernel/debug/kdb/kdb_io.c b/kernel/debug/kdb/kdb_io.c
index 0b89128..3990c1f 100644
--- a/kernel/debug/kdb/kdb_io.c
+++ b/kernel/debug/kdb/kdb_io.c
@@ -357,7 +357,7 @@ static char *kdb_read(char *buffer, size_t bufsize)
 			}
 			kdb_printf("\n");
 			for (i = 0; i < count; i++) {
-				if (kallsyms_symbol_next(p_tmp, i) < 0)
+				if (WARN_ON(!kallsyms_symbol_next(p_tmp, i)))
 					break;
 				kdb_printf("%s ", p_tmp);
 				*(p_tmp + len) = '\0';
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 712ba4e..e144ded 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -7218,25 +7218,12 @@ static void perf_log_itrace_start(struct perf_event *event)
 	perf_output_end(&handle);
 }
 
-/*
- * Generic event overflow handling, sampling.
- */
-
-static int __perf_event_overflow(struct perf_event *event,
-				   int throttle, struct perf_sample_data *data,
-				   struct pt_regs *regs)
+static int
+__perf_event_account_interrupt(struct perf_event *event, int throttle)
 {
-	int events = atomic_read(&event->event_limit);
 	struct hw_perf_event *hwc = &event->hw;
-	u64 seq;
 	int ret = 0;
-
-	/*
-	 * Non-sampling counters might still use the PMI to fold short
-	 * hardware counters, ignore those.
-	 */
-	if (unlikely(!is_sampling_event(event)))
-		return 0;
+	u64 seq;
 
 	seq = __this_cpu_read(perf_throttled_seq);
 	if (seq != hwc->interrupts_seq) {
@@ -7264,6 +7251,34 @@ static int __perf_event_overflow(struct perf_event *event,
 			perf_adjust_period(event, delta, hwc->last_period, true);
 	}
 
+	return ret;
+}
+
+int perf_event_account_interrupt(struct perf_event *event)
+{
+	return __perf_event_account_interrupt(event, 1);
+}
+
+/*
+ * Generic event overflow handling, sampling.
+ */
+
+static int __perf_event_overflow(struct perf_event *event,
+				   int throttle, struct perf_sample_data *data,
+				   struct pt_regs *regs)
+{
+	int events = atomic_read(&event->event_limit);
+	int ret = 0;
+
+	/*
+	 * Non-sampling counters might still use the PMI to fold short
+	 * hardware counters, ignore those.
+	 */
+	if (unlikely(!is_sampling_event(event)))
+		return 0;
+
+	ret = __perf_event_account_interrupt(event, throttle);
+
 	/*
 	 * XXX event_limit might not quite work as expected on inherited
 	 * events
diff --git a/kernel/jump_label.c b/kernel/jump_label.c
index a9b8cf5..def4548 100644
--- a/kernel/jump_label.c
+++ b/kernel/jump_label.c
@@ -612,7 +612,7 @@ static __init int jump_label_test(void)
 
 	return 0;
 }
-late_initcall(jump_label_test);
+early_initcall(jump_label_test);
 #endif /* STATIC_KEYS_SELFTEST */
 
 #endif /* HAVE_JUMP_LABEL */
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index bbe783e..59fcdbe 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -5100,6 +5100,14 @@ long sched_getaffinity(pid_t pid, struct cpumask *mask)
 
 	raw_spin_lock_irqsave(&p->pi_lock, flags);
 	cpumask_and(mask, &p->cpus_allowed, cpu_active_mask);
+
+	/* The userspace tasks are forbidden to run on
+	 * isolated CPUs. So exclude isolated CPUs from
+	 * the getaffinity.
+	 */
+	if (!(p->flags & PF_KTHREAD))
+		cpumask_andnot(mask, mask, cpu_isolated_mask);
+
 	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
 
 out_unlock:
@@ -6446,6 +6454,12 @@ static int init_rootdomain(struct root_domain *rd)
 	if (!zalloc_cpumask_var(&rd->rto_mask, GFP_KERNEL))
 		goto free_dlo_mask;
 
+#ifdef HAVE_RT_PUSH_IPI
+	rd->rto_cpu = -1;
+	raw_spin_lock_init(&rd->rto_lock);
+	init_irq_work(&rd->rto_push_work, rto_push_irq_work_func);
+#endif
+
 	init_dl_bw(&rd->dl_bw);
 	if (cpudl_init(&rd->cpudl) != 0)
 		goto free_dlo_mask;
@@ -9614,11 +9628,11 @@ void sched_exit(struct task_struct *p)
 	reset_task_stats(p);
 	p->ravg.mark_start = wallclock;
 	p->ravg.sum_history[0] = EXITING_TASK_MARKER;
-	free_task_load_ptrs(p);
 
 	enqueue_task(rq, p, 0);
 	clear_ed_task(p, rq);
 	task_rq_unlock(rq, p, &rf);
+	free_task_load_ptrs(p);
 }
 #endif /* CONFIG_SCHED_WALT */
 
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index 08d4511..da8261d 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -446,13 +446,13 @@ static void replenish_dl_entity(struct sched_dl_entity *dl_se,
  *
  * This function returns true if:
  *
- *   runtime / (deadline - t) > dl_runtime / dl_period ,
+ *   runtime / (deadline - t) > dl_runtime / dl_deadline ,
  *
  * IOW we can't recycle current parameters.
  *
- * Notice that the bandwidth check is done against the period. For
+ * Notice that the bandwidth check is done against the deadline. For
  * task with deadline equal to period this is the same of using
- * dl_deadline instead of dl_period in the equation above.
+ * dl_period instead of dl_deadline in the equation above.
  */
 static bool dl_entity_overflow(struct sched_dl_entity *dl_se,
 			       struct sched_dl_entity *pi_se, u64 t)
@@ -477,7 +477,7 @@ static bool dl_entity_overflow(struct sched_dl_entity *dl_se,
 	 * of anything below microseconds resolution is actually fiction
 	 * (but still we want to give the user that illusion >;).
 	 */
-	left = (pi_se->dl_period >> DL_SCALE) * (dl_se->runtime >> DL_SCALE);
+	left = (pi_se->dl_deadline >> DL_SCALE) * (dl_se->runtime >> DL_SCALE);
 	right = ((dl_se->deadline - t) >> DL_SCALE) *
 		(pi_se->dl_runtime >> DL_SCALE);
 
@@ -506,10 +506,15 @@ static void update_dl_entity(struct sched_dl_entity *dl_se,
 	}
 }
 
+static inline u64 dl_next_period(struct sched_dl_entity *dl_se)
+{
+	return dl_se->deadline - dl_se->dl_deadline + dl_se->dl_period;
+}
+
 /*
  * If the entity depleted all its runtime, and if we want it to sleep
  * while waiting for some new execution time to become available, we
- * set the bandwidth enforcement timer to the replenishment instant
+ * set the bandwidth replenishment timer to the replenishment instant
  * and try to activate it.
  *
  * Notice that it is important for the caller to know if the timer
@@ -531,7 +536,7 @@ static int start_dl_timer(struct task_struct *p)
 	 * that it is actually coming from rq->clock and not from
 	 * hrtimer's time base reading.
 	 */
-	act = ns_to_ktime(dl_se->deadline);
+	act = ns_to_ktime(dl_next_period(dl_se));
 	now = hrtimer_cb_get_time(timer);
 	delta = ktime_to_ns(now) - rq_clock(rq);
 	act = ktime_add_ns(act, delta);
@@ -639,6 +644,7 @@ static enum hrtimer_restart dl_task_timer(struct hrtimer *timer)
 		lockdep_unpin_lock(&rq->lock, rf.cookie);
 		rq = dl_task_offline_migration(rq, p);
 		rf.cookie = lockdep_pin_lock(&rq->lock);
+		update_rq_clock(rq);
 
 		/*
 		 * Now that the task has been migrated to the new RQ and we
@@ -690,6 +696,37 @@ void init_dl_task_timer(struct sched_dl_entity *dl_se)
 	timer->function = dl_task_timer;
 }
 
+/*
+ * During the activation, CBS checks if it can reuse the current task's
+ * runtime and period. If the deadline of the task is in the past, CBS
+ * cannot use the runtime, and so it replenishes the task. This rule
+ * works fine for implicit deadline tasks (deadline == period), and the
+ * CBS was designed for implicit deadline tasks. However, a task with
+ * constrained deadline (deadine < period) might be awakened after the
+ * deadline, but before the next period. In this case, replenishing the
+ * task would allow it to run for runtime / deadline. As in this case
+ * deadline < period, CBS enables a task to run for more than the
+ * runtime / period. In a very loaded system, this can cause a domino
+ * effect, making other tasks miss their deadlines.
+ *
+ * To avoid this problem, in the activation of a constrained deadline
+ * task after the deadline but before the next period, throttle the
+ * task and set the replenishing timer to the begin of the next period,
+ * unless it is boosted.
+ */
+static inline void dl_check_constrained_dl(struct sched_dl_entity *dl_se)
+{
+	struct task_struct *p = dl_task_of(dl_se);
+	struct rq *rq = rq_of_dl_rq(dl_rq_of_se(dl_se));
+
+	if (dl_time_before(dl_se->deadline, rq_clock(rq)) &&
+	    dl_time_before(rq_clock(rq), dl_next_period(dl_se))) {
+		if (unlikely(dl_se->dl_boosted || !start_dl_timer(p)))
+			return;
+		dl_se->dl_throttled = 1;
+	}
+}
+
 static
 int dl_runtime_exceeded(struct sched_dl_entity *dl_se)
 {
@@ -925,6 +962,11 @@ static void dequeue_dl_entity(struct sched_dl_entity *dl_se)
 	__dequeue_dl_entity(dl_se);
 }
 
+static inline bool dl_is_constrained(struct sched_dl_entity *dl_se)
+{
+	return dl_se->dl_deadline < dl_se->dl_period;
+}
+
 static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags)
 {
 	struct task_struct *pi_task = rt_mutex_get_top_task(p);
@@ -951,6 +993,15 @@ static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags)
 	}
 
 	/*
+	 * Check if a constrained deadline task was activated
+	 * after the deadline but before the next period.
+	 * If that is the case, the task will be throttled and
+	 * the replenishment timer will be set to the next period.
+	 */
+	if (!p->dl.dl_throttled && dl_is_constrained(&p->dl))
+		dl_check_constrained_dl(&p->dl);
+
+	/*
 	 * If p is throttled, we do nothing. In fact, if it exhausted
 	 * its budget it needs a replenishment and, since it now is on
 	 * its rq, the bandwidth timer callback (which clearly has not
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 130bbb7..1ff2e5e 100755
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -6606,7 +6606,7 @@ static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, int t
 	 * Due to large variance we need a large fuzz factor; hackbench in
 	 * particularly is sensitive here.
 	 */
-	if ((avg_idle / 512) < avg_cost)
+	if (sched_feat(SIS_AVG_CPU) && (avg_idle / 512) < avg_cost)
 		return -1;
 
 	time = local_clock();
diff --git a/kernel/sched/features.h b/kernel/sched/features.h
index c30c48f..a1afd13 100644
--- a/kernel/sched/features.h
+++ b/kernel/sched/features.h
@@ -51,6 +51,11 @@ SCHED_FEAT(NONTASK_CAPACITY, true)
  */
 SCHED_FEAT(TTWU_QUEUE, false)
 
+/*
+ * When doing wakeups, attempt to limit superfluous scans of the LLC domain.
+ */
+SCHED_FEAT(SIS_AVG_CPU, false)
+
 #ifdef HAVE_RT_PUSH_IPI
 /*
  * In order to avoid a thundering herd attack of CPUs that are
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index 1294950..645b472 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -77,10 +77,6 @@ static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
 	raw_spin_unlock(&rt_b->rt_runtime_lock);
 }
 
-#if defined(CONFIG_SMP) && defined(HAVE_RT_PUSH_IPI)
-static void push_irq_work_func(struct irq_work *work);
-#endif
-
 void init_rt_rq(struct rt_rq *rt_rq)
 {
 	struct rt_prio_array *array;
@@ -100,13 +96,6 @@ void init_rt_rq(struct rt_rq *rt_rq)
 	rt_rq->rt_nr_migratory = 0;
 	rt_rq->overloaded = 0;
 	plist_head_init(&rt_rq->pushable_tasks);
-
-#ifdef HAVE_RT_PUSH_IPI
-	rt_rq->push_flags = 0;
-	rt_rq->push_cpu = nr_cpu_ids;
-	raw_spin_lock_init(&rt_rq->push_lock);
-	init_irq_work(&rt_rq->push_work, push_irq_work_func);
-#endif
 #endif /* CONFIG_SMP */
 	/* We start is dequeued state, because no RT tasks are queued */
 	rt_rq->rt_queued = 0;
@@ -2147,160 +2136,166 @@ static void push_rt_tasks(struct rq *rq)
 }
 
 #ifdef HAVE_RT_PUSH_IPI
+
 /*
- * The search for the next cpu always starts at rq->cpu and ends
- * when we reach rq->cpu again. It will never return rq->cpu.
- * This returns the next cpu to check, or nr_cpu_ids if the loop
- * is complete.
+ * When a high priority task schedules out from a CPU and a lower priority
+ * task is scheduled in, a check is made to see if there's any RT tasks
+ * on other CPUs that are waiting to run because a higher priority RT task
+ * is currently running on its CPU. In this case, the CPU with multiple RT
+ * tasks queued on it (overloaded) needs to be notified that a CPU has opened
+ * up that may be able to run one of its non-running queued RT tasks.
  *
- * rq->rt.push_cpu holds the last cpu returned by this function,
- * or if this is the first instance, it must hold rq->cpu.
+ * All CPUs with overloaded RT tasks need to be notified as there is currently
+ * no way to know which of these CPUs have the highest priority task waiting
+ * to run. Instead of trying to take a spinlock on each of these CPUs,
+ * which has shown to cause large latency when done on machines with many
+ * CPUs, sending an IPI to the CPUs to have them push off the overloaded
+ * RT tasks waiting to run.
+ *
+ * Just sending an IPI to each of the CPUs is also an issue, as on large
+ * count CPU machines, this can cause an IPI storm on a CPU, especially
+ * if its the only CPU with multiple RT tasks queued, and a large number
+ * of CPUs scheduling a lower priority task at the same time.
+ *
+ * Each root domain has its own irq work function that can iterate over
+ * all CPUs with RT overloaded tasks. Since all CPUs with overloaded RT
+ * tassk must be checked if there's one or many CPUs that are lowering
+ * their priority, there's a single irq work iterator that will try to
+ * push off RT tasks that are waiting to run.
+ *
+ * When a CPU schedules a lower priority task, it will kick off the
+ * irq work iterator that will jump to each CPU with overloaded RT tasks.
+ * As it only takes the first CPU that schedules a lower priority task
+ * to start the process, the rto_start variable is incremented and if
+ * the atomic result is one, then that CPU will try to take the rto_lock.
+ * This prevents high contention on the lock as the process handles all
+ * CPUs scheduling lower priority tasks.
+ *
+ * All CPUs that are scheduling a lower priority task will increment the
+ * rt_loop_next variable. This will make sure that the irq work iterator
+ * checks all RT overloaded CPUs whenever a CPU schedules a new lower
+ * priority task, even if the iterator is in the middle of a scan. Incrementing
+ * the rt_loop_next will cause the iterator to perform another scan.
+ *
  */
 static int rto_next_cpu(struct rq *rq)
 {
-	int prev_cpu = rq->rt.push_cpu;
+	struct root_domain *rd = rq->rd;
+	int next;
 	int cpu;
 
-	cpu = cpumask_next(prev_cpu, rq->rd->rto_mask);
-
 	/*
-	 * If the previous cpu is less than the rq's CPU, then it already
-	 * passed the end of the mask, and has started from the beginning.
-	 * We end if the next CPU is greater or equal to rq's CPU.
+	 * When starting the IPI RT pushing, the rto_cpu is set to -1,
+	 * rt_next_cpu() will simply return the first CPU found in
+	 * the rto_mask.
+	 *
+	 * If rto_next_cpu() is called with rto_cpu is a valid cpu, it
+	 * will return the next CPU found in the rto_mask.
+	 *
+	 * If there are no more CPUs left in the rto_mask, then a check is made
+	 * against rto_loop and rto_loop_next. rto_loop is only updated with
+	 * the rto_lock held, but any CPU may increment the rto_loop_next
+	 * without any locking.
 	 */
-	if (prev_cpu < rq->cpu) {
-		if (cpu >= rq->cpu)
-			return nr_cpu_ids;
+	for (;;) {
 
-	} else if (cpu >= nr_cpu_ids) {
+		/* When rto_cpu is -1 this acts like cpumask_first() */
+		cpu = cpumask_next(rd->rto_cpu, rd->rto_mask);
+
+		rd->rto_cpu = cpu;
+
+		if (cpu < nr_cpu_ids)
+			return cpu;
+
+		rd->rto_cpu = -1;
+
 		/*
-		 * We passed the end of the mask, start at the beginning.
-		 * If the result is greater or equal to the rq's CPU, then
-		 * the loop is finished.
+		 * ACQUIRE ensures we see the @rto_mask changes
+		 * made prior to the @next value observed.
+		 *
+		 * Matches WMB in rt_set_overload().
 		 */
-		cpu = cpumask_first(rq->rd->rto_mask);
-		if (cpu >= rq->cpu)
-			return nr_cpu_ids;
-	}
-	rq->rt.push_cpu = cpu;
+		next = atomic_read_acquire(&rd->rto_loop_next);
 
-	/* Return cpu to let the caller know if the loop is finished or not */
-	return cpu;
+		if (rd->rto_loop == next)
+			break;
+
+		rd->rto_loop = next;
+	}
+
+	return -1;
 }
 
-static int find_next_push_cpu(struct rq *rq)
+static inline bool rto_start_trylock(atomic_t *v)
 {
-	struct rq *next_rq;
-	int cpu;
-
-	while (1) {
-		cpu = rto_next_cpu(rq);
-		if (cpu >= nr_cpu_ids)
-			break;
-		next_rq = cpu_rq(cpu);
-
-		/* Make sure the next rq can push to this rq */
-		if (next_rq->rt.highest_prio.next < rq->rt.highest_prio.curr)
-			break;
-	}
-
-	return cpu;
+	return !atomic_cmpxchg_acquire(v, 0, 1);
 }
 
-#define RT_PUSH_IPI_EXECUTING		1
-#define RT_PUSH_IPI_RESTART		2
+static inline void rto_start_unlock(atomic_t *v)
+{
+	atomic_set_release(v, 0);
+}
 
 static void tell_cpu_to_push(struct rq *rq)
 {
-	int cpu;
+	int cpu = -1;
 
-	if (rq->rt.push_flags & RT_PUSH_IPI_EXECUTING) {
-		raw_spin_lock(&rq->rt.push_lock);
-		/* Make sure it's still executing */
-		if (rq->rt.push_flags & RT_PUSH_IPI_EXECUTING) {
-			/*
-			 * Tell the IPI to restart the loop as things have
-			 * changed since it started.
-			 */
-			rq->rt.push_flags |= RT_PUSH_IPI_RESTART;
-			raw_spin_unlock(&rq->rt.push_lock);
-			return;
-		}
-		raw_spin_unlock(&rq->rt.push_lock);
-	}
+	/* Keep the loop going if the IPI is currently active */
+	atomic_inc(&rq->rd->rto_loop_next);
 
-	/* When here, there's no IPI going around */
-
-	rq->rt.push_cpu = rq->cpu;
-	cpu = find_next_push_cpu(rq);
-	if (cpu >= nr_cpu_ids)
+	/* Only one CPU can initiate a loop at a time */
+	if (!rto_start_trylock(&rq->rd->rto_loop_start))
 		return;
 
-	rq->rt.push_flags = RT_PUSH_IPI_EXECUTING;
+	raw_spin_lock(&rq->rd->rto_lock);
 
-	irq_work_queue_on(&rq->rt.push_work, cpu);
+	/*
+	 * The rto_cpu is updated under the lock, if it has a valid cpu
+	 * then the IPI is still running and will continue due to the
+	 * update to loop_next, and nothing needs to be done here.
+	 * Otherwise it is finishing up and an ipi needs to be sent.
+	 */
+	if (rq->rd->rto_cpu < 0)
+		cpu = rto_next_cpu(rq);
+
+	raw_spin_unlock(&rq->rd->rto_lock);
+
+	rto_start_unlock(&rq->rd->rto_loop_start);
+
+	if (cpu >= 0)
+		irq_work_queue_on(&rq->rd->rto_push_work, cpu);
 }
 
 /* Called from hardirq context */
-static void try_to_push_tasks(void *arg)
+void rto_push_irq_work_func(struct irq_work *work)
 {
-	struct rt_rq *rt_rq = arg;
-	struct rq *rq, *src_rq;
-	int this_cpu;
+	struct rq *rq;
 	int cpu;
 
-	this_cpu = rt_rq->push_cpu;
+	rq = this_rq();
 
-	/* Paranoid check */
-	BUG_ON(this_cpu != smp_processor_id());
-
-	rq = cpu_rq(this_cpu);
-	src_rq = rq_of_rt_rq(rt_rq);
-
-again:
+	/*
+	 * We do not need to grab the lock to check for has_pushable_tasks.
+	 * When it gets updated, a check is made if a push is possible.
+	 */
 	if (has_pushable_tasks(rq)) {
 		raw_spin_lock(&rq->lock);
-		push_rt_task(rq);
+		push_rt_tasks(rq);
 		raw_spin_unlock(&rq->lock);
 	}
 
+	raw_spin_lock(&rq->rd->rto_lock);
+
 	/* Pass the IPI to the next rt overloaded queue */
-	raw_spin_lock(&rt_rq->push_lock);
-	/*
-	 * If the source queue changed since the IPI went out,
-	 * we need to restart the search from that CPU again.
-	 */
-	if (rt_rq->push_flags & RT_PUSH_IPI_RESTART) {
-		rt_rq->push_flags &= ~RT_PUSH_IPI_RESTART;
-		rt_rq->push_cpu = src_rq->cpu;
-	}
+	cpu = rto_next_cpu(rq);
 
-	cpu = find_next_push_cpu(src_rq);
+	raw_spin_unlock(&rq->rd->rto_lock);
 
-	if (cpu >= nr_cpu_ids)
-		rt_rq->push_flags &= ~RT_PUSH_IPI_EXECUTING;
-	raw_spin_unlock(&rt_rq->push_lock);
-
-	if (cpu >= nr_cpu_ids)
+	if (cpu < 0)
 		return;
 
-	/*
-	 * It is possible that a restart caused this CPU to be
-	 * chosen again. Don't bother with an IPI, just see if we
-	 * have more to push.
-	 */
-	if (unlikely(cpu == rq->cpu))
-		goto again;
-
 	/* Try the next RT overloaded CPU */
-	irq_work_queue_on(&rt_rq->push_work, cpu);
-}
-
-static void push_irq_work_func(struct irq_work *work)
-{
-	struct rt_rq *rt_rq = container_of(work, struct rt_rq, push_work);
-
-	try_to_push_tasks(rt_rq);
+	irq_work_queue_on(&rq->rd->rto_push_work, cpu);
 }
 #endif /* HAVE_RT_PUSH_IPI */
 
@@ -2310,8 +2305,9 @@ static void pull_rt_task(struct rq *this_rq)
 	bool resched = false;
 	struct task_struct *p;
 	struct rq *src_rq;
+	int rt_overload_count = rt_overloaded(this_rq);
 
-	if (likely(!rt_overloaded(this_rq)))
+	if (likely(!rt_overload_count))
 		return;
 
 	/*
@@ -2320,6 +2316,11 @@ static void pull_rt_task(struct rq *this_rq)
 	 */
 	smp_rmb();
 
+	/* If we are the only overloaded CPU do nothing */
+	if (rt_overload_count == 1 &&
+	    cpumask_test_cpu(this_rq->cpu, this_rq->rd->rto_mask))
+		return;
+
 #ifdef HAVE_RT_PUSH_IPI
 	if (sched_feat(RT_PUSH_IPI)) {
 		tell_cpu_to_push(this_rq);
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index c85928b..5508248 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -539,7 +539,7 @@ static inline int rt_bandwidth_enabled(void)
 }
 
 /* RT IPI pull logic requires IRQ_WORK */
-#ifdef CONFIG_IRQ_WORK
+#if defined(CONFIG_IRQ_WORK) && defined(CONFIG_SMP)
 # define HAVE_RT_PUSH_IPI
 #endif
 
@@ -561,12 +561,6 @@ struct rt_rq {
 	unsigned long rt_nr_total;
 	int overloaded;
 	struct plist_head pushable_tasks;
-#ifdef HAVE_RT_PUSH_IPI
-	int push_flags;
-	int push_cpu;
-	struct irq_work push_work;
-	raw_spinlock_t push_lock;
-#endif
 #endif /* CONFIG_SMP */
 	int rt_queued;
 
@@ -657,6 +651,19 @@ struct root_domain {
 	struct dl_bw dl_bw;
 	struct cpudl cpudl;
 
+#ifdef HAVE_RT_PUSH_IPI
+	/*
+	 * For IPI pull requests, loop across the rto_mask.
+	 */
+	struct irq_work rto_push_work;
+	raw_spinlock_t rto_lock;
+	/* These are only updated and read within rto_lock */
+	int rto_loop;
+	int rto_cpu;
+	/* These atomics are updated outside of a lock */
+	atomic_t rto_loop_next;
+	atomic_t rto_loop_start;
+#endif
 	/*
 	 * The "RT overload" flag: it gets set if a CPU has more than
 	 * one runnable RT task.
@@ -673,6 +680,9 @@ struct root_domain {
 
 extern struct root_domain def_root_domain;
 
+#ifdef HAVE_RT_PUSH_IPI
+extern void rto_push_irq_work_func(struct irq_work *work);
+#endif
 #endif /* CONFIG_SMP */
 
 /*
diff --git a/kernel/sched/walt.c b/kernel/sched/walt.c
index f941d92..b7da03f 100644
--- a/kernel/sched/walt.c
+++ b/kernel/sched/walt.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -1965,6 +1965,10 @@ void init_new_task_load(struct task_struct *p, bool idle_task)
 	p->misfit = false;
 }
 
+/*
+ * kfree() may wakeup kswapd. So this function should NOT be called
+ * with any CPU's rq->lock acquired.
+ */
 void free_task_load_ptrs(struct task_struct *p)
 {
 	kfree(p->ravg.curr_window_cpu);
@@ -2915,7 +2919,7 @@ static unsigned long thermal_cap_cpu[NR_CPUS];
 
 unsigned long thermal_cap(int cpu)
 {
-	return thermal_cap_cpu[cpu] ?: cpu_rq(cpu)->cpu_capacity_orig;
+	return thermal_cap_cpu[cpu] ?: SCHED_CAPACITY_SCALE;
 }
 
 unsigned long do_thermal_cap(int cpu, unsigned long thermal_max_freq)
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 29bb99c..dea7e55 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -314,7 +314,30 @@ static struct ctl_table kern_table[] = {
 		.extra1		= &zero,
 		.extra2		= &sysctl_sched_group_upmigrate_pct,
 	},
+	{
+		.procname	= "sched_boost",
+		.data		= &sysctl_sched_boost,
+		.maxlen		= sizeof(unsigned int),
+		.mode		= 0644,
+		.proc_handler	= sched_boost_handler,
+		.extra1         = &zero,
+		.extra2		= &three,
+	},
 #endif
+	{
+		.procname	= "sched_upmigrate",
+		.data		= &sysctl_sched_capacity_margin,
+		.maxlen		= sizeof(unsigned int),
+		.mode		= 0644,
+		.proc_handler	= sched_updown_migrate_handler,
+	},
+	{
+		.procname	= "sched_downmigrate",
+		.data		= &sysctl_sched_capacity_margin_down,
+		.maxlen		= sizeof(unsigned int),
+		.mode		= 0644,
+		.proc_handler	= sched_updown_migrate_handler,
+	},
 #ifdef CONFIG_SCHED_DEBUG
 	{
 		.procname	= "sched_min_granularity_ns",
@@ -356,15 +379,6 @@ static struct ctl_table kern_table[] = {
 		.mode		= 0644,
 		.proc_handler	= proc_dointvec,
 	},
-	{
-		.procname	= "sched_boost",
-		.data		= &sysctl_sched_boost,
-		.maxlen		= sizeof(unsigned int),
-		.mode		= 0644,
-		.proc_handler	= sched_boost_handler,
-		.extra1         = &zero,
-		.extra2		= &three,
-	},
 #endif
 	{
 		.procname	= "sched_initial_task_util",
@@ -389,20 +403,6 @@ static struct ctl_table kern_table[] = {
 		.extra1		= &min_wakeup_granularity_ns,
 		.extra2		= &max_wakeup_granularity_ns,
 	},
-	{
-		.procname	= "sched_upmigrate",
-		.data		= &sysctl_sched_capacity_margin,
-		.maxlen		= sizeof(unsigned int),
-		.mode		= 0644,
-		.proc_handler	= sched_updown_migrate_handler,
-	},
-	{
-		.procname	= "sched_downmigrate",
-		.data		= &sysctl_sched_capacity_margin_down,
-		.maxlen		= sizeof(unsigned int),
-		.mode		= 0644,
-		.proc_handler	= sched_updown_migrate_handler,
-	},
 #ifdef CONFIG_SMP
 	{
 		.procname	= "sched_tunable_scaling",
diff --git a/kernel/sysctl_binary.c b/kernel/sysctl_binary.c
index 33fba7f..d542c09 100644
--- a/kernel/sysctl_binary.c
+++ b/kernel/sysctl_binary.c
@@ -256,6 +256,7 @@ static const struct bin_table bin_net_ipv4_conf_vars_table[] = {
 	{ CTL_INT,	NET_IPV4_CONF_NOPOLICY,			"disable_policy" },
 	{ CTL_INT,	NET_IPV4_CONF_FORCE_IGMP_VERSION,	"force_igmp_version" },
 	{ CTL_INT,	NET_IPV4_CONF_PROMOTE_SECONDARIES,	"promote_secondaries" },
+	{ CTL_INT, NET_IPV4_CONF_NF_IPV4_DEFRAG_SKIP, "nf_ipv4_defrag_skip" },
 	{}
 };
 
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index e3aae88..9e862ae 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -3800,37 +3800,30 @@ static const struct file_operations show_traces_fops = {
 	.llseek		= seq_lseek,
 };
 
-/*
- * The tracer itself will not take this lock, but still we want
- * to provide a consistent cpumask to user-space:
- */
-static DEFINE_MUTEX(tracing_cpumask_update_lock);
-
-/*
- * Temporary storage for the character representation of the
- * CPU bitmask (and one more byte for the newline):
- */
-static char mask_str[NR_CPUS + 1];
-
 static ssize_t
 tracing_cpumask_read(struct file *filp, char __user *ubuf,
 		     size_t count, loff_t *ppos)
 {
 	struct trace_array *tr = file_inode(filp)->i_private;
+	char *mask_str;
 	int len;
 
-	mutex_lock(&tracing_cpumask_update_lock);
+	len = snprintf(NULL, 0, "%*pb\n",
+		       cpumask_pr_args(tr->tracing_cpumask)) + 1;
+	mask_str = kmalloc(len, GFP_KERNEL);
+	if (!mask_str)
+		return -ENOMEM;
 
-	len = snprintf(mask_str, count, "%*pb\n",
+	len = snprintf(mask_str, len, "%*pb\n",
 		       cpumask_pr_args(tr->tracing_cpumask));
 	if (len >= count) {
 		count = -EINVAL;
 		goto out_err;
 	}
-	count = simple_read_from_buffer(ubuf, count, ppos, mask_str, NR_CPUS+1);
+	count = simple_read_from_buffer(ubuf, count, ppos, mask_str, len);
 
 out_err:
-	mutex_unlock(&tracing_cpumask_update_lock);
+	kfree(mask_str);
 
 	return count;
 }
@@ -3850,8 +3843,6 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf,
 	if (err)
 		goto err_unlock;
 
-	mutex_lock(&tracing_cpumask_update_lock);
-
 	local_irq_disable();
 	arch_spin_lock(&tr->max_lock);
 	for_each_tracing_cpu(cpu) {
@@ -3874,8 +3865,6 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf,
 	local_irq_enable();
 
 	cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
-
-	mutex_unlock(&tracing_cpumask_update_lock);
 	free_cpumask_var(tracing_cpumask_new);
 
 	return count;
diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c
index f3a960e..0664044 100644
--- a/kernel/trace/trace_events_hist.c
+++ b/kernel/trace/trace_events_hist.c
@@ -449,7 +449,7 @@ static int create_val_field(struct hist_trigger_data *hist_data,
 	}
 
 	field = trace_find_event_field(file->event_call, field_name);
-	if (!field) {
+	if (!field || !field->size) {
 		ret = -EINVAL;
 		goto out;
 	}
@@ -547,7 +547,7 @@ static int create_key_field(struct hist_trigger_data *hist_data,
 		}
 
 		field = trace_find_event_field(file->event_call, field_name);
-		if (!field) {
+		if (!field || !field->size) {
 			ret = -EINVAL;
 			goto out;
 		}
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 3630826..6b9af20 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -1514,6 +1514,7 @@ static void __queue_delayed_work(int cpu, struct workqueue_struct *wq,
 	struct timer_list *timer = &dwork->timer;
 	struct work_struct *work = &dwork->work;
 
+	WARN_ON_ONCE(!wq);
 	WARN_ON_ONCE(timer->function != delayed_work_timer_fn ||
 		     timer->data != (unsigned long)dwork);
 	WARN_ON_ONCE(timer_pending(timer));
diff --git a/lib/asn1_decoder.c b/lib/asn1_decoder.c
index 1ef0cec..dc14bea 100644
--- a/lib/asn1_decoder.c
+++ b/lib/asn1_decoder.c
@@ -313,42 +313,47 @@ int asn1_ber_decoder(const struct asn1_decoder *decoder,
 
 	/* Decide how to handle the operation */
 	switch (op) {
-	case ASN1_OP_MATCH_ANY_ACT:
-	case ASN1_OP_MATCH_ANY_ACT_OR_SKIP:
-	case ASN1_OP_COND_MATCH_ANY_ACT:
-	case ASN1_OP_COND_MATCH_ANY_ACT_OR_SKIP:
-		ret = actions[machine[pc + 1]](context, hdr, tag, data + dp, len);
-		if (ret < 0)
-			return ret;
-		goto skip_data;
-
-	case ASN1_OP_MATCH_ACT:
-	case ASN1_OP_MATCH_ACT_OR_SKIP:
-	case ASN1_OP_COND_MATCH_ACT_OR_SKIP:
-		ret = actions[machine[pc + 2]](context, hdr, tag, data + dp, len);
-		if (ret < 0)
-			return ret;
-		goto skip_data;
-
 	case ASN1_OP_MATCH:
 	case ASN1_OP_MATCH_OR_SKIP:
+	case ASN1_OP_MATCH_ACT:
+	case ASN1_OP_MATCH_ACT_OR_SKIP:
 	case ASN1_OP_MATCH_ANY:
 	case ASN1_OP_MATCH_ANY_OR_SKIP:
+	case ASN1_OP_MATCH_ANY_ACT:
+	case ASN1_OP_MATCH_ANY_ACT_OR_SKIP:
 	case ASN1_OP_COND_MATCH_OR_SKIP:
+	case ASN1_OP_COND_MATCH_ACT_OR_SKIP:
 	case ASN1_OP_COND_MATCH_ANY:
 	case ASN1_OP_COND_MATCH_ANY_OR_SKIP:
-	skip_data:
+	case ASN1_OP_COND_MATCH_ANY_ACT:
+	case ASN1_OP_COND_MATCH_ANY_ACT_OR_SKIP:
+
 		if (!(flags & FLAG_CONS)) {
 			if (flags & FLAG_INDEFINITE_LENGTH) {
+				size_t tmp = dp;
+
 				ret = asn1_find_indefinite_length(
-					data, datalen, &dp, &len, &errmsg);
+					data, datalen, &tmp, &len, &errmsg);
 				if (ret < 0)
 					goto error;
-			} else {
-				dp += len;
 			}
 			pr_debug("- LEAF: %zu\n", len);
 		}
+
+		if (op & ASN1_OP_MATCH__ACT) {
+			unsigned char act;
+
+			if (op & ASN1_OP_MATCH__ANY)
+				act = machine[pc + 1];
+			else
+				act = machine[pc + 2];
+			ret = actions[act](context, hdr, tag, data + dp, len);
+			if (ret < 0)
+				return ret;
+		}
+
+		if (!(flags & FLAG_CONS))
+			dp += len;
 		pc += asn1_op_lengths[op];
 		goto next_op;
 
@@ -434,6 +439,8 @@ int asn1_ber_decoder(const struct asn1_decoder *decoder,
 			else
 				act = machine[pc + 1];
 			ret = actions[act](context, hdr, 0, data + tdp, len);
+			if (ret < 0)
+				return ret;
 		}
 		pc += asn1_op_lengths[op];
 		goto next_op;
diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c
index da796e2..c7c96bc 100644
--- a/lib/dynamic_debug.c
+++ b/lib/dynamic_debug.c
@@ -360,6 +360,10 @@ static int ddebug_parse_query(char *words[], int nwords,
 				if (parse_lineno(last, &query->last_lineno) < 0)
 					return -EINVAL;
 
+				/* special case for last lineno not specified */
+				if (query->last_lineno == 0)
+					query->last_lineno = UINT_MAX;
+
 				if (query->last_lineno < query->first_lineno) {
 					pr_err("last-line:%d < 1st-line:%d\n",
 						query->last_lineno,
diff --git a/lib/genalloc.c b/lib/genalloc.c
index 144fe6b..ca06adc 100644
--- a/lib/genalloc.c
+++ b/lib/genalloc.c
@@ -194,7 +194,7 @@ int gen_pool_add_virt(struct gen_pool *pool, unsigned long virt, phys_addr_t phy
 	chunk->phys_addr = phys;
 	chunk->start_addr = virt;
 	chunk->end_addr = virt + size - 1;
-	atomic_set(&chunk->avail, size);
+	atomic_long_set(&chunk->avail, size);
 
 	spin_lock(&pool->lock);
 	list_add_rcu(&chunk->next_chunk, &pool->chunks);
@@ -304,7 +304,7 @@ unsigned long gen_pool_alloc_algo(struct gen_pool *pool, size_t size,
 	nbits = (size + (1UL << order) - 1) >> order;
 	rcu_read_lock();
 	list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) {
-		if (size > atomic_read(&chunk->avail))
+		if (size > atomic_long_read(&chunk->avail))
 			continue;
 
 		start_bit = 0;
@@ -324,7 +324,7 @@ unsigned long gen_pool_alloc_algo(struct gen_pool *pool, size_t size,
 
 		addr = chunk->start_addr + ((unsigned long)start_bit << order);
 		size = nbits << order;
-		atomic_sub(size, &chunk->avail);
+		atomic_long_sub(size, &chunk->avail);
 		break;
 	}
 	rcu_read_unlock();
@@ -390,7 +390,7 @@ void gen_pool_free(struct gen_pool *pool, unsigned long addr, size_t size)
 			remain = bitmap_clear_ll(chunk->bits, start_bit, nbits);
 			BUG_ON(remain);
 			size = nbits << order;
-			atomic_add(size, &chunk->avail);
+			atomic_long_add(size, &chunk->avail);
 			rcu_read_unlock();
 			return;
 		}
@@ -464,7 +464,7 @@ size_t gen_pool_avail(struct gen_pool *pool)
 
 	rcu_read_lock();
 	list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk)
-		avail += atomic_read(&chunk->avail);
+		avail += atomic_long_read(&chunk->avail);
 	rcu_read_unlock();
 	return avail;
 }
diff --git a/lib/mpi/mpi-pow.c b/lib/mpi/mpi-pow.c
index e24388a..468fb7c 100644
--- a/lib/mpi/mpi-pow.c
+++ b/lib/mpi/mpi-pow.c
@@ -26,6 +26,7 @@
  *	 however I decided to publish this code under the plain GPL.
  */
 
+#include <linux/sched.h>
 #include <linux/string.h>
 #include "mpi-internal.h"
 #include "longlong.h"
@@ -256,6 +257,7 @@ int mpi_powm(MPI res, MPI base, MPI exp, MPI mod)
 				}
 				e <<= 1;
 				c--;
+				cond_resched();
 			}
 
 			i--;
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 8258e9e..c234c07 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -745,20 +745,15 @@ int vmf_insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
 EXPORT_SYMBOL_GPL(vmf_insert_pfn_pmd);
 
 static void touch_pmd(struct vm_area_struct *vma, unsigned long addr,
-		pmd_t *pmd)
+		pmd_t *pmd, int flags)
 {
 	pmd_t _pmd;
 
-	/*
-	 * We should set the dirty bit only for FOLL_WRITE but for now
-	 * the dirty bit in the pmd is meaningless.  And if the dirty
-	 * bit will become meaningful and we'll only set it with
-	 * FOLL_WRITE, an atomic set_bit will be required on the pmd to
-	 * set the young bit, instead of the current set_pmd_at.
-	 */
-	_pmd = pmd_mkyoung(pmd_mkdirty(*pmd));
+	_pmd = pmd_mkyoung(*pmd);
+	if (flags & FOLL_WRITE)
+		_pmd = pmd_mkdirty(_pmd);
 	if (pmdp_set_access_flags(vma, addr & HPAGE_PMD_MASK,
-				pmd, _pmd,  1))
+				pmd, _pmd, flags & FOLL_WRITE))
 		update_mmu_cache_pmd(vma, addr, pmd);
 }
 
@@ -787,7 +782,7 @@ struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
 		return NULL;
 
 	if (flags & FOLL_TOUCH)
-		touch_pmd(vma, addr, pmd);
+		touch_pmd(vma, addr, pmd, flags);
 
 	/*
 	 * device mapped pages can only be returned if the
@@ -1158,7 +1153,7 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
 	page = pmd_page(*pmd);
 	VM_BUG_ON_PAGE(!PageHead(page) && !is_zone_device_page(page), page);
 	if (flags & FOLL_TOUCH)
-		touch_pmd(vma, addr, pmd);
+		touch_pmd(vma, addr, pmd, flags);
 	if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) {
 		/*
 		 * We don't mlock() pte-mapped THPs. This way we can avoid
@@ -1514,37 +1509,69 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
 {
 	struct mm_struct *mm = vma->vm_mm;
 	spinlock_t *ptl;
-	int ret = 0;
+	pmd_t entry;
+	bool preserve_write;
+	int ret;
 
 	ptl = __pmd_trans_huge_lock(pmd, vma);
-	if (ptl) {
-		pmd_t entry;
-		bool preserve_write = prot_numa && pmd_write(*pmd);
-		ret = 1;
+	if (!ptl)
+		return 0;
 
-		/*
-		 * Avoid trapping faults against the zero page. The read-only
-		 * data is likely to be read-cached on the local CPU and
-		 * local/remote hits to the zero page are not interesting.
-		 */
-		if (prot_numa && is_huge_zero_pmd(*pmd)) {
-			spin_unlock(ptl);
-			return ret;
-		}
+	preserve_write = prot_numa && pmd_write(*pmd);
+	ret = 1;
 
-		if (!prot_numa || !pmd_protnone(*pmd)) {
-			entry = pmdp_huge_get_and_clear_notify(mm, addr, pmd);
-			entry = pmd_modify(entry, newprot);
-			if (preserve_write)
-				entry = pmd_mkwrite(entry);
-			ret = HPAGE_PMD_NR;
-			set_pmd_at(mm, addr, pmd, entry);
-			BUG_ON(vma_is_anonymous(vma) && !preserve_write &&
-					pmd_write(entry));
-		}
-		spin_unlock(ptl);
-	}
+	/*
+	 * Avoid trapping faults against the zero page. The read-only
+	 * data is likely to be read-cached on the local CPU and
+	 * local/remote hits to the zero page are not interesting.
+	 */
+	if (prot_numa && is_huge_zero_pmd(*pmd))
+		goto unlock;
 
+	if (prot_numa && pmd_protnone(*pmd))
+		goto unlock;
+
+	/*
+	 * In case prot_numa, we are under down_read(mmap_sem). It's critical
+	 * to not clear pmd intermittently to avoid race with MADV_DONTNEED
+	 * which is also under down_read(mmap_sem):
+	 *
+	 *	CPU0:				CPU1:
+	 *				change_huge_pmd(prot_numa=1)
+	 *				 pmdp_huge_get_and_clear_notify()
+	 * madvise_dontneed()
+	 *  zap_pmd_range()
+	 *   pmd_trans_huge(*pmd) == 0 (without ptl)
+	 *   // skip the pmd
+	 *				 set_pmd_at();
+	 *				 // pmd is re-established
+	 *
+	 * The race makes MADV_DONTNEED miss the huge pmd and don't clear it
+	 * which may break userspace.
+	 *
+	 * pmdp_invalidate() is required to make sure we don't miss
+	 * dirty/young flags set by hardware.
+	 */
+	entry = *pmd;
+	pmdp_invalidate(vma, addr, pmd);
+
+	/*
+	 * Recover dirty/young flags.  It relies on pmdp_invalidate to not
+	 * corrupt them.
+	 */
+	if (pmd_dirty(*pmd))
+		entry = pmd_mkdirty(entry);
+	if (pmd_young(*pmd))
+		entry = pmd_mkyoung(entry);
+
+	entry = pmd_modify(entry, newprot);
+	if (preserve_write)
+		entry = pmd_mkwrite(entry);
+	ret = HPAGE_PMD_NR;
+	set_pmd_at(mm, addr, pmd, entry);
+	BUG_ON(vma_is_anonymous(vma) && !preserve_write && pmd_write(entry));
+unlock:
+	spin_unlock(ptl);
 	return ret;
 }
 
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 65c36ac..6ff65c4 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -3135,6 +3135,13 @@ static void hugetlb_vm_op_close(struct vm_area_struct *vma)
 	}
 }
 
+static int hugetlb_vm_op_split(struct vm_area_struct *vma, unsigned long addr)
+{
+	if (addr & ~(huge_page_mask(hstate_vma(vma))))
+		return -EINVAL;
+	return 0;
+}
+
 /*
  * We cannot handle pagefaults against hugetlb pages at all.  They cause
  * handle_mm_fault() to try to instantiate regular-sized pages in the
@@ -3151,6 +3158,7 @@ const struct vm_operations_struct hugetlb_vm_ops = {
 	.fault = hugetlb_vm_op_fault,
 	.open = hugetlb_vm_op_open,
 	.close = hugetlb_vm_op_close,
+	.split = hugetlb_vm_op_split,
 };
 
 static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
diff --git a/mm/madvise.c b/mm/madvise.c
index 8b25167..59d1aae 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -230,15 +230,14 @@ static long madvise_willneed(struct vm_area_struct *vma,
 {
 	struct file *file = vma->vm_file;
 
+	*prev = vma;
 #ifdef CONFIG_SWAP
 	if (!file) {
-		*prev = vma;
 		force_swapin_readahead(vma, start, end);
 		return 0;
 	}
 
 	if (shmem_mapping(file->f_mapping)) {
-		*prev = vma;
 		force_shm_swapin_readahead(vma, start, end,
 					file->f_mapping);
 		return 0;
@@ -253,7 +252,6 @@ static long madvise_willneed(struct vm_area_struct *vma,
 		return 0;
 	}
 
-	*prev = vma;
 	start = ((start - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
 	if (end > vma->vm_end)
 		end = vma->vm_end;
diff --git a/mm/mmap.c b/mm/mmap.c
index 7e6c049..621db7f 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -2548,9 +2548,11 @@ static int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
 	struct vm_area_struct *new;
 	int err;
 
-	if (is_vm_hugetlb_page(vma) && (addr &
-					~(huge_page_mask(hstate_vma(vma)))))
-		return -EINVAL;
+	if (vma->vm_ops && vma->vm_ops->split) {
+		err = vma->vm_ops->split(vma, addr);
+		if (err)
+			return err;
+	}
 
 	new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
 	if (!new)
@@ -2984,20 +2986,20 @@ void exit_mmap(struct mm_struct *mm)
 	/* Use -1 here to ensure all VMAs in the mm are unmapped */
 	unmap_vmas(&tlb, vma, 0, -1);
 
-	set_bit(MMF_OOM_SKIP, &mm->flags);
-	if (unlikely(tsk_is_oom_victim(current))) {
+	if (unlikely(mm_is_oom_victim(mm))) {
 		/*
 		 * Wait for oom_reap_task() to stop working on this
 		 * mm. Because MMF_OOM_SKIP is already set before
 		 * calling down_read(), oom_reap_task() will not run
 		 * on this "mm" post up_write().
 		 *
-		 * tsk_is_oom_victim() cannot be set from under us
-		 * either because current->mm is already set to NULL
+		 * mm_is_oom_victim() cannot be set from under us
+		 * either because victim->mm is already set to NULL
 		 * under task_lock before calling mmput and oom_mm is
-		 * set not NULL by the OOM killer only if current->mm
+		 * set not NULL by the OOM killer only if victim->mm
 		 * is found not NULL while holding the task_lock.
 		 */
+		set_bit(MMF_OOM_SKIP, &mm->flags);
 		down_write(&mm->mmap_sem);
 		up_write(&mm->mmap_sem);
 	}
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index af9a8a6..6fd9773 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -677,8 +677,10 @@ static void mark_oom_victim(struct task_struct *tsk)
 		return;
 
 	/* oom_mm is bound to the signal struct life time. */
-	if (!cmpxchg(&tsk->signal->oom_mm, NULL, mm))
+	if (!cmpxchg(&tsk->signal->oom_mm, NULL, mm)) {
 		atomic_inc(&tsk->signal->oom_mm->mm_count);
+		set_bit(MMF_OOM_VICTIM, &mm->flags);
+	}
 
 	/*
 	 * Make sure that the task is woken up from uninterruptible sleep
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 09a684a..63b19a3 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2642,30 +2642,23 @@ int __isolate_free_page(struct page *page, unsigned int order)
  * Update NUMA hit/miss statistics
  *
  * Must be called with interrupts disabled.
- *
- * When __GFP_OTHER_NODE is set assume the node of the preferred
- * zone is the local node. This is useful for daemons who allocate
- * memory on behalf of other processes.
  */
 static inline void zone_statistics(struct zone *preferred_zone, struct zone *z,
 								gfp_t flags)
 {
 #ifdef CONFIG_NUMA
-	int local_nid = numa_node_id();
 	enum zone_stat_item local_stat = NUMA_LOCAL;
 
-	if (unlikely(flags & __GFP_OTHER_NODE)) {
+	if (z->node != numa_node_id())
 		local_stat = NUMA_OTHER;
-		local_nid = preferred_zone->node;
-	}
 
-	if (z->node == local_nid) {
+	if (z->node == preferred_zone->node)
 		__inc_zone_state(z, NUMA_HIT);
-		__inc_zone_state(z, local_stat);
-	} else {
+	else {
 		__inc_zone_state(z, NUMA_MISS);
 		__inc_zone_state(preferred_zone, NUMA_FOREIGN);
 	}
+	__inc_zone_state(z, local_stat);
 #endif
 }
 
@@ -7383,11 +7376,18 @@ int alloc_contig_range(unsigned long start, unsigned long end,
 	cc.zone->cma_alloc = 1;
 	/*
 	 * In case of -EBUSY, we'd like to know which page causes problem.
-	 * So, just fall through. We will check it in test_pages_isolated().
+	 * So, just fall through. test_pages_isolated() has a tracepoint
+	 * which will report the busy page.
+	 *
+	 * It is possible that busy pages could become available before
+	 * the call to test_pages_isolated, and the range will actually be
+	 * allocated.  So, if we fall through be sure to clear ret so that
+	 * -EBUSY is not accidentally used or returned to caller.
 	 */
 	ret = __alloc_contig_migrate_range(&cc, start, end);
 	if (ret && ret != -EBUSY)
 		goto done;
+	ret =0;
 
 	/*
 	 * Pages from [start, end) are within a MAX_ORDER_NR_PAGES
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index 1689bb5..d3548c4 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -1407,7 +1407,7 @@ void *zs_map_object(struct zs_pool *pool, unsigned long handle,
 	 * pools/users, we can't allow mapping in interrupt context
 	 * because it can corrupt another users mappings.
 	 */
-	WARN_ON_ONCE(in_interrupt());
+	BUG_ON(in_interrupt());
 
 	/* From now on, migration cannot move the object */
 	pin_tag(handle);
diff --git a/net/9p/client.c b/net/9p/client.c
index cf129fe..1fd6019 100644
--- a/net/9p/client.c
+++ b/net/9p/client.c
@@ -749,8 +749,7 @@ p9_client_rpc(struct p9_client *c, int8_t type, const char *fmt, ...)
 	}
 again:
 	/* Wait for the response */
-	err = wait_event_interruptible(*req->wq,
-				       req->status >= REQ_STATUS_RCVD);
+	err = wait_event_killable(*req->wq, req->status >= REQ_STATUS_RCVD);
 
 	/*
 	 * Make sure our req is coherent with regard to updates in other
diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
index f24b25c..f3a4efc 100644
--- a/net/9p/trans_virtio.c
+++ b/net/9p/trans_virtio.c
@@ -286,8 +286,8 @@ p9_virtio_request(struct p9_client *client, struct p9_req_t *req)
 		if (err == -ENOSPC) {
 			chan->ring_bufs_avail = 0;
 			spin_unlock_irqrestore(&chan->lock, flags);
-			err = wait_event_interruptible(*chan->vc_wq,
-							chan->ring_bufs_avail);
+			err = wait_event_killable(*chan->vc_wq,
+						  chan->ring_bufs_avail);
 			if (err  == -ERESTARTSYS)
 				return err;
 
@@ -327,7 +327,7 @@ static int p9_get_mapped_pages(struct virtio_chan *chan,
 		 * Other zc request to finish here
 		 */
 		if (atomic_read(&vp_pinned) >= chan->p9_max_pages) {
-			err = wait_event_interruptible(vp_wq,
+			err = wait_event_killable(vp_wq,
 			      (atomic_read(&vp_pinned) < chan->p9_max_pages));
 			if (err == -ERESTARTSYS)
 				return err;
@@ -471,8 +471,8 @@ p9_virtio_zc_request(struct p9_client *client, struct p9_req_t *req,
 		if (err == -ENOSPC) {
 			chan->ring_bufs_avail = 0;
 			spin_unlock_irqrestore(&chan->lock, flags);
-			err = wait_event_interruptible(*chan->vc_wq,
-						       chan->ring_bufs_avail);
+			err = wait_event_killable(*chan->vc_wq,
+						  chan->ring_bufs_avail);
 			if (err  == -ERESTARTSYS)
 				goto err_out;
 
@@ -489,8 +489,7 @@ p9_virtio_zc_request(struct p9_client *client, struct p9_req_t *req,
 	virtqueue_kick(chan->vq);
 	spin_unlock_irqrestore(&chan->lock, flags);
 	p9_debug(P9_DEBUG_TRANS, "virtio request kicked\n");
-	err = wait_event_interruptible(*req->wq,
-				       req->status >= REQ_STATUS_RCVD);
+	err = wait_event_killable(*req->wq, req->status >= REQ_STATUS_RCVD);
 	/*
 	 * Non kernel buffers are pinned, unpin them
 	 */
diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c
index aa1df1a..82ce571 100644
--- a/net/bridge/br_netfilter_hooks.c
+++ b/net/bridge/br_netfilter_hooks.c
@@ -706,18 +706,20 @@ static unsigned int nf_bridge_mtu_reduction(const struct sk_buff *skb)
 
 static int br_nf_dev_queue_xmit(struct net *net, struct sock *sk, struct sk_buff *skb)
 {
-	struct nf_bridge_info *nf_bridge;
-	unsigned int mtu_reserved;
+	struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
+	unsigned int mtu, mtu_reserved;
 
 	mtu_reserved = nf_bridge_mtu_reduction(skb);
+	mtu = skb->dev->mtu;
 
-	if (skb_is_gso(skb) || skb->len + mtu_reserved <= skb->dev->mtu) {
+	if (nf_bridge->frag_max_size && nf_bridge->frag_max_size < mtu)
+		mtu = nf_bridge->frag_max_size;
+
+	if (skb_is_gso(skb) || skb->len + mtu_reserved <= mtu) {
 		nf_bridge_info_free(skb);
 		return br_dev_queue_push_xmit(net, sk, skb);
 	}
 
-	nf_bridge = nf_bridge_info_get(skb);
-
 	/* This is wrong! We should preserve the original fragment
 	 * boundaries by preserving frag_list rather than refragmenting.
 	 */
diff --git a/net/ceph/crypto.c b/net/ceph/crypto.c
index 292e33b..5f3a627 100644
--- a/net/ceph/crypto.c
+++ b/net/ceph/crypto.c
@@ -34,7 +34,9 @@ static int set_secret(struct ceph_crypto_key *key, void *buf)
 		return -ENOTSUPP;
 	}
 
-	WARN_ON(!key->len);
+	if (!key->len)
+		return -EINVAL;
+
 	key->key = kmemdup(buf, key->len, GFP_NOIO);
 	if (!key->key) {
 		ret = -ENOMEM;
diff --git a/net/core/dev.c b/net/core/dev.c
index 5685744..e0217f8 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1306,6 +1306,7 @@ void netdev_notify_peers(struct net_device *dev)
 {
 	rtnl_lock();
 	call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, dev);
+	call_netdevice_notifiers(NETDEV_RESEND_IGMP, dev);
 	rtnl_unlock();
 }
 EXPORT_SYMBOL(netdev_notify_peers);
@@ -4171,6 +4172,9 @@ static inline int nf_ingress(struct sk_buff *skb, struct packet_type **pt_prev,
 	return 0;
 }
 
+int (*gsb_nw_stack_recv)(struct sk_buff *skb) __rcu __read_mostly;
+EXPORT_SYMBOL(gsb_nw_stack_recv);
+
 int (*athrs_fast_nat_recv)(struct sk_buff *skb) __rcu __read_mostly;
 EXPORT_SYMBOL(athrs_fast_nat_recv);
 
@@ -4185,6 +4189,7 @@ static int __netif_receive_skb_core(struct sk_buff *skb, bool pfmemalloc)
 	bool deliver_exact = false;
 	int ret = NET_RX_DROP;
 	__be16 type;
+	int (*gsb_ns_recv)(struct sk_buff *skb);
 	int (*fast_recv)(struct sk_buff *skb);
 	int (*embms_recv)(struct sk_buff *skb);
 
@@ -4246,6 +4251,13 @@ static int __netif_receive_skb_core(struct sk_buff *skb, bool pfmemalloc)
 			goto out;
 	}
 #endif
+	gsb_ns_recv = rcu_dereference(gsb_nw_stack_recv);
+		if (gsb_ns_recv) {
+			if (gsb_ns_recv(skb)) {
+				ret = NET_RX_SUCCESS;
+				goto out;
+		}
+	}
 	fast_recv = rcu_dereference(athrs_fast_nat_recv);
 	if (fast_recv) {
 		if (fast_recv(skb)) {
diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
index 0df2aa6..a7f05f0 100644
--- a/net/core/sysctl_net_core.c
+++ b/net/core/sysctl_net_core.c
@@ -369,14 +369,16 @@ static struct ctl_table net_core_table[] = {
 		.data		= &sysctl_net_busy_poll,
 		.maxlen		= sizeof(unsigned int),
 		.mode		= 0644,
-		.proc_handler	= proc_dointvec
+		.proc_handler	= proc_dointvec_minmax,
+		.extra1		= &zero,
 	},
 	{
 		.procname	= "busy_read",
 		.data		= &sysctl_net_busy_read,
 		.maxlen		= sizeof(unsigned int),
 		.mode		= 0644,
-		.proc_handler	= proc_dointvec
+		.proc_handler	= proc_dointvec_minmax,
+		.extra1		= &zero,
 	},
 #endif
 #ifdef CONFIG_NET_SCHED
diff --git a/net/dccp/minisocks.c b/net/dccp/minisocks.c
index 39e7e2b..62522b8 100644
--- a/net/dccp/minisocks.c
+++ b/net/dccp/minisocks.c
@@ -57,10 +57,16 @@ void dccp_time_wait(struct sock *sk, int state, int timeo)
 		if (state == DCCP_TIME_WAIT)
 			timeo = DCCP_TIMEWAIT_LEN;
 
+		/* tw_timer is pinned, so we need to make sure BH are disabled
+		 * in following section, otherwise timer handler could run before
+		 * we complete the initialization.
+		 */
+		local_bh_disable();
 		inet_twsk_schedule(tw, timeo);
 		/* Linkage updates. */
 		__inet_twsk_hashdance(tw, sk, &dccp_hashinfo);
 		inet_twsk_put(tw);
+		local_bh_enable();
 	} else {
 		/* Sorry, if we're out of memory, just CLOSE this
 		 * socket up.  We've got bigger problems than
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index 062a67c..db54461 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -2228,6 +2228,8 @@ static struct devinet_sysctl_table {
 					      "route_localnet"),
 		DEVINET_SYSCTL_FLUSHING_ENTRY(DROP_UNICAST_IN_L2_MULTICAST,
 					      "drop_unicast_in_l2_multicast"),
+		DEVINET_SYSCTL_RW_ENTRY(NF_IPV4_DEFRAG_SKIP,
+					"nf_ipv4_defrag_skip"),
 	},
 };
 
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index 691146a..42a19fb 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -768,7 +768,7 @@ static bool icmp_tag_validation(int proto)
 }
 
 /*
- *	Handle ICMP_DEST_UNREACH, ICMP_TIME_EXCEED, ICMP_QUENCH, and
+ *	Handle ICMP_DEST_UNREACH, ICMP_TIME_EXCEEDED, ICMP_QUENCH, and
  *	ICMP_PARAMETERPROB.
  */
 
@@ -796,7 +796,8 @@ static bool icmp_unreach(struct sk_buff *skb)
 	if (iph->ihl < 5) /* Mangled header, drop. */
 		goto out_err;
 
-	if (icmph->type == ICMP_DEST_UNREACH) {
+	switch (icmph->type) {
+	case ICMP_DEST_UNREACH:
 		switch (icmph->code & 15) {
 		case ICMP_NET_UNREACH:
 		case ICMP_HOST_UNREACH:
@@ -832,8 +833,16 @@ static bool icmp_unreach(struct sk_buff *skb)
 		}
 		if (icmph->code > NR_ICMP_UNREACH)
 			goto out;
-	} else if (icmph->type == ICMP_PARAMETERPROB)
+		break;
+	case ICMP_PARAMETERPROB:
 		info = ntohl(icmph->un.gateway) >> 24;
+		break;
+	case ICMP_TIME_EXCEEDED:
+		__ICMP_INC_STATS(net, ICMP_MIB_INTIMEEXCDS);
+		if (icmph->code == ICMP_EXC_FRAGTIME)
+			goto out;
+		break;
+	}
 
 	/*
 	 *	Throw it at our lower layers
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index 453db95..4bf3b8a 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -198,6 +198,7 @@ static void ip_expire(unsigned long arg)
 	qp = container_of((struct inet_frag_queue *) arg, struct ipq, q);
 	net = container_of(qp->q.net, struct net, ipv4.frags);
 
+	rcu_read_lock();
 	spin_lock(&qp->q.lock);
 
 	if (qp->q.flags & INET_FRAG_COMPLETE)
@@ -207,7 +208,7 @@ static void ip_expire(unsigned long arg)
 	__IP_INC_STATS(net, IPSTATS_MIB_REASMFAILS);
 
 	if (!inet_frag_evicting(&qp->q)) {
-		struct sk_buff *head = qp->q.fragments;
+		struct sk_buff *clone, *head = qp->q.fragments;
 		const struct iphdr *iph;
 		int err;
 
@@ -216,32 +217,40 @@ static void ip_expire(unsigned long arg)
 		if (!(qp->q.flags & INET_FRAG_FIRST_IN) || !qp->q.fragments)
 			goto out;
 
-		rcu_read_lock();
 		head->dev = dev_get_by_index_rcu(net, qp->iif);
 		if (!head->dev)
-			goto out_rcu_unlock;
+			goto out;
+
 
 		/* skb has no dst, perform route lookup again */
 		iph = ip_hdr(head);
 		err = ip_route_input_noref(head, iph->daddr, iph->saddr,
 					   iph->tos, head->dev);
 		if (err)
-			goto out_rcu_unlock;
+			goto out;
 
 		/* Only an end host needs to send an ICMP
 		 * "Fragment Reassembly Timeout" message, per RFC792.
 		 */
 		if (frag_expire_skip_icmp(qp->user) &&
 		    (skb_rtable(head)->rt_type != RTN_LOCAL))
-			goto out_rcu_unlock;
+			goto out;
+
+		clone = skb_clone(head, GFP_ATOMIC);
 
 		/* Send an ICMP "Fragment Reassembly Timeout" message. */
-		icmp_send(head, ICMP_TIME_EXCEEDED, ICMP_EXC_FRAGTIME, 0);
-out_rcu_unlock:
-		rcu_read_unlock();
+		if (clone) {
+			spin_unlock(&qp->q.lock);
+			icmp_send(clone, ICMP_TIME_EXCEEDED,
+				  ICMP_EXC_FRAGTIME, 0);
+			consume_skb(clone);
+			goto out_rcu_unlock;
+		}
 	}
 out:
 	spin_unlock(&qp->q.lock);
+out_rcu_unlock:
+	rcu_read_unlock();
 	ipq_put(qp);
 }
 
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index 4d37bdc..551dd39 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -819,6 +819,7 @@ static int do_ip_setsockopt(struct sock *sk, int level,
 	{
 		struct ip_mreqn mreq;
 		struct net_device *dev = NULL;
+		int midx;
 
 		if (sk->sk_type == SOCK_STREAM)
 			goto e_inval;
@@ -863,11 +864,15 @@ static int do_ip_setsockopt(struct sock *sk, int level,
 		err = -EADDRNOTAVAIL;
 		if (!dev)
 			break;
+
+		midx = l3mdev_master_ifindex(dev);
+
 		dev_put(dev);
 
 		err = -EINVAL;
 		if (sk->sk_bound_dev_if &&
-		    mreq.imr_ifindex != sk->sk_bound_dev_if)
+		    mreq.imr_ifindex != sk->sk_bound_dev_if &&
+		    (!midx || midx != sk->sk_bound_dev_if))
 			break;
 
 		inet->mc_index = mreq.imr_ifindex;
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
index 071a785..b23464d 100644
--- a/net/ipv4/ipconfig.c
+++ b/net/ipv4/ipconfig.c
@@ -306,7 +306,7 @@ static void __init ic_close_devs(void)
 	while ((d = next)) {
 		next = d->next;
 		dev = d->dev;
-		if ((!ic_dev || dev != ic_dev->dev) && !netdev_uses_dsa(dev)) {
+		if (d != ic_dev && !netdev_uses_dsa(dev)) {
 			pr_debug("IP-Config: Downing %s\n", dev->name);
 			dev_change_flags(dev, d->flags);
 		}
diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
index 713c09a..0c9ded2 100644
--- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
+++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
@@ -158,6 +158,10 @@ static unsigned int ipv4_conntrack_local(void *priv,
 	if (skb->len < sizeof(struct iphdr) ||
 	    ip_hdrlen(skb) < sizeof(struct iphdr))
 		return NF_ACCEPT;
+
+	if (ip_is_fragment(ip_hdr(skb))) /* IP_NODEFRAG setsockopt set */
+		return NF_ACCEPT;
+
 	return nf_conntrack_in(state->net, PF_INET, state->hook, skb);
 }
 
diff --git a/net/ipv4/netfilter/nf_defrag_ipv4.c b/net/ipv4/netfilter/nf_defrag_ipv4.c
index d88da36..93224b2 100644
--- a/net/ipv4/netfilter/nf_defrag_ipv4.c
+++ b/net/ipv4/netfilter/nf_defrag_ipv4.c
@@ -11,6 +11,7 @@
 #include <linux/netfilter.h>
 #include <linux/module.h>
 #include <linux/skbuff.h>
+#include <linux/inetdevice.h>
 #include <net/route.h>
 #include <net/ip.h>
 
@@ -78,8 +79,13 @@ static unsigned int ipv4_conntrack_defrag(void *priv,
 #endif
 	/* Gather fragments. */
 	if (ip_is_fragment(ip_hdr(skb))) {
-		enum ip_defrag_users user =
-			nf_ct_defrag_user(state->hook, skb);
+		enum ip_defrag_users user;
+
+		if (skb->dev &&
+		    IN_DEV_NF_IPV4_DEFRAG_SKIP(__in_dev_get_rcu(skb->dev)))
+			return NF_ACCEPT;
+
+		user = nf_ct_defrag_user(state->hook, skb);
 
 		if (nf_ct_ipv4_gather_frags(state->net, skb, user))
 			return NF_STOLEN;
diff --git a/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c b/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c
index f8aad03..6f5e8d0 100644
--- a/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c
+++ b/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c
@@ -255,11 +255,6 @@ nf_nat_ipv4_fn(void *priv, struct sk_buff *skb,
 	/* maniptype == SRC for postrouting. */
 	enum nf_nat_manip_type maniptype = HOOK2MANIP(state->hook);
 
-	/* We never see fragments: conntrack defrags on pre-routing
-	 * and local-out, and nf_nat_out protects post-routing.
-	 */
-	NF_CT_ASSERT(!ip_is_fragment(ip_hdr(skb)));
-
 	ct = nf_ct_get(skb, &ctinfo);
 	/* Can't track?  It's not due to stress, or conntrack would
 	 * have dropped it.  Hence it's the user's responsibilty to
diff --git a/net/ipv4/netfilter/nf_nat_snmp_basic.c b/net/ipv4/netfilter/nf_nat_snmp_basic.c
index 5a8f7c3..53e49f5 100644
--- a/net/ipv4/netfilter/nf_nat_snmp_basic.c
+++ b/net/ipv4/netfilter/nf_nat_snmp_basic.c
@@ -1260,16 +1260,6 @@ static const struct nf_conntrack_expect_policy snmp_exp_policy = {
 	.timeout	= 180,
 };
 
-static struct nf_conntrack_helper snmp_helper __read_mostly = {
-	.me			= THIS_MODULE,
-	.help			= help,
-	.expect_policy		= &snmp_exp_policy,
-	.name			= "snmp",
-	.tuple.src.l3num	= AF_INET,
-	.tuple.src.u.udp.port	= cpu_to_be16(SNMP_PORT),
-	.tuple.dst.protonum	= IPPROTO_UDP,
-};
-
 static struct nf_conntrack_helper snmp_trap_helper __read_mostly = {
 	.me			= THIS_MODULE,
 	.help			= help,
@@ -1288,17 +1278,10 @@ static struct nf_conntrack_helper snmp_trap_helper __read_mostly = {
 
 static int __init nf_nat_snmp_basic_init(void)
 {
-	int ret = 0;
-
 	BUG_ON(nf_nat_snmp_hook != NULL);
 	RCU_INIT_POINTER(nf_nat_snmp_hook, help);
 
-	ret = nf_conntrack_helper_register(&snmp_trap_helper);
-	if (ret < 0) {
-		nf_conntrack_helper_unregister(&snmp_helper);
-		return ret;
-	}
-	return ret;
+	return nf_conntrack_helper_register(&snmp_trap_helper);
 }
 
 static void __exit nf_nat_snmp_basic_fini(void)
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index cd632e6..03728c6 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -633,9 +633,12 @@ static void update_or_create_fnhe(struct fib_nh *nh, __be32 daddr, __be32 gw,
 	struct fnhe_hash_bucket *hash;
 	struct fib_nh_exception *fnhe;
 	struct rtable *rt;
+	u32 genid, hval;
 	unsigned int i;
 	int depth;
-	u32 hval = fnhe_hashfun(daddr);
+
+	genid = fnhe_genid(dev_net(nh->nh_dev));
+	hval = fnhe_hashfun(daddr);
 
 	spin_lock_bh(&fnhe_lock);
 
@@ -658,12 +661,13 @@ static void update_or_create_fnhe(struct fib_nh *nh, __be32 daddr, __be32 gw,
 	}
 
 	if (fnhe) {
+		if (fnhe->fnhe_genid != genid)
+			fnhe->fnhe_genid = genid;
 		if (gw)
 			fnhe->fnhe_gw = gw;
-		if (pmtu) {
+		if (pmtu)
 			fnhe->fnhe_pmtu = pmtu;
-			fnhe->fnhe_expires = max(1UL, expires);
-		}
+		fnhe->fnhe_expires = max(1UL, expires);
 		/* Update all cached dsts too */
 		rt = rcu_dereference(fnhe->fnhe_rth_input);
 		if (rt)
@@ -682,7 +686,7 @@ static void update_or_create_fnhe(struct fib_nh *nh, __be32 daddr, __be32 gw,
 			fnhe->fnhe_next = hash->chain;
 			rcu_assign_pointer(hash->chain, fnhe);
 		}
-		fnhe->fnhe_genid = fnhe_genid(dev_net(nh->nh_dev));
+		fnhe->fnhe_genid = genid;
 		fnhe->fnhe_daddr = daddr;
 		fnhe->fnhe_gw = gw;
 		fnhe->fnhe_pmtu = pmtu;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index ec9e58b..b6a7f91 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -5082,7 +5082,7 @@ static void tcp_check_space(struct sock *sk)
 	if (sock_flag(sk, SOCK_QUEUE_SHRUNK)) {
 		sock_reset_flag(sk, SOCK_QUEUE_SHRUNK);
 		/* pairs with tcp_poll() */
-		smp_mb__after_atomic();
+		smp_mb();
 		if (sk->sk_socket &&
 		    test_bit(SOCK_NOSPACE, &sk->sk_socket->flags))
 			tcp_new_space(sk);
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index 64e1ba4..830a564 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -328,10 +328,16 @@ void tcp_time_wait(struct sock *sk, int state, int timeo)
 				timeo = TCP_TIMEWAIT_LEN;
 		}
 
+		/* tw_timer is pinned, so we need to make sure BH are disabled
+		 * in following section, otherwise timer handler could run before
+		 * we complete the initialization.
+		 */
+		local_bh_disable();
 		inet_twsk_schedule(tw, timeo);
 		/* Linkage updates. */
 		__inet_twsk_hashdance(tw, sk, &tcp_hashinfo);
 		inet_twsk_put(tw);
+		local_bh_enable();
 	} else {
 		/* Sorry, if we're out of memory, just CLOSE this
 		 * socket up.  We've got bigger problems than
diff --git a/net/ipv4/tcp_vegas.c b/net/ipv4/tcp_vegas.c
index 4c4bac1..3ecb61e 100644
--- a/net/ipv4/tcp_vegas.c
+++ b/net/ipv4/tcp_vegas.c
@@ -158,7 +158,7 @@ EXPORT_SYMBOL_GPL(tcp_vegas_cwnd_event);
 
 static inline u32 tcp_vegas_ssthresh(struct tcp_sock *tp)
 {
-	return  min(tp->snd_ssthresh, tp->snd_cwnd-1);
+	return  min(tp->snd_ssthresh, tp->snd_cwnd);
 }
 
 static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 acked)
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 2abaa2e..140d05f 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -290,10 +290,10 @@ static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = {
 	.keep_addr_on_down	= 0,
 };
 
-/* Check if a valid qdisc is available */
-static inline bool addrconf_qdisc_ok(const struct net_device *dev)
+/* Check if link is ready: is it up and is a valid qdisc available */
+static inline bool addrconf_link_ready(const struct net_device *dev)
 {
-	return !qdisc_tx_is_noop(dev);
+	return netif_oper_up(dev) && !qdisc_tx_is_noop(dev);
 }
 
 static void addrconf_del_rs_timer(struct inet6_dev *idev)
@@ -438,7 +438,7 @@ static struct inet6_dev *ipv6_add_dev(struct net_device *dev)
 
 	ndev->token = in6addr_any;
 
-	if (netif_running(dev) && addrconf_qdisc_ok(dev))
+	if (netif_running(dev) && addrconf_link_ready(dev))
 		ndev->if_flags |= IF_READY;
 
 	ipv6_mc_init_dev(ndev);
@@ -3408,7 +3408,7 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
 			/* restore routes for permanent addresses */
 			addrconf_permanent_addr(dev);
 
-			if (!addrconf_qdisc_ok(dev)) {
+			if (!addrconf_link_ready(dev)) {
 				/* device is not ready yet. */
 				pr_info("ADDRCONF(NETDEV_UP): %s: link is not ready\n",
 					dev->name);
@@ -3423,7 +3423,7 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
 				run_pending = 1;
 			}
 		} else if (event == NETDEV_CHANGE) {
-			if (!addrconf_qdisc_ok(dev)) {
+			if (!addrconf_link_ready(dev)) {
 				/* device is still not ready. */
 				break;
 			}
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index 0281645..f51f94b 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -926,12 +926,12 @@ static int __init inet6_init(void)
 	err = register_pernet_subsys(&inet6_net_ops);
 	if (err)
 		goto register_pernet_fail;
-	err = icmpv6_init();
-	if (err)
-		goto icmp_fail;
 	err = ip6_mr_init();
 	if (err)
 		goto ipmr_fail;
+	err = icmpv6_init();
+	if (err)
+		goto icmp_fail;
 	err = ndisc_init();
 	if (err)
 		goto ndisc_fail;
@@ -1061,10 +1061,10 @@ static int __init inet6_init(void)
 	ndisc_cleanup();
 ndisc_fail:
 	ip6_mr_cleanup();
-ipmr_fail:
-	icmpv6_cleanup();
 icmp_fail:
 	unregister_pernet_subsys(&inet6_net_ops);
+ipmr_fail:
+	icmpv6_cleanup();
 register_pernet_fail:
 	sock_unregister(PF_INET6);
 	rtnl_unregister_all(PF_INET6);
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index 65a58fe..a7d0c01 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -461,7 +461,7 @@ static int ip6gre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi)
 				      &ipv6h->saddr, &ipv6h->daddr, tpi->key,
 				      tpi->proto);
 	if (tunnel) {
-		ip6_tnl_rcv(tunnel, skb, tpi, NULL, false);
+		ip6_tnl_rcv(tunnel, skb, tpi, NULL, log_ecn_error);
 
 		return PACKET_RCVD;
 	}
diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
index da64b20..afc30a0 100644
--- a/net/ipv6/ip6_vti.c
+++ b/net/ipv6/ip6_vti.c
@@ -189,12 +189,12 @@ static int vti6_tnl_create2(struct net_device *dev)
 	struct vti6_net *ip6n = net_generic(net, vti6_net_id);
 	int err;
 
+	dev->rtnl_link_ops = &vti6_link_ops;
 	err = register_netdevice(dev);
 	if (err < 0)
 		goto out;
 
 	strcpy(t->parms.name, dev->name);
-	dev->rtnl_link_ops = &vti6_link_ops;
 
 	dev_hold(dev);
 	vti6_tnl_link(ip6n, t);
@@ -485,11 +485,15 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
 	if (!skb->ignore_df && skb->len > mtu) {
 		skb_dst(skb)->ops->update_pmtu(dst, NULL, skb, mtu);
 
-		if (skb->protocol == htons(ETH_P_IPV6))
+		if (skb->protocol == htons(ETH_P_IPV6)) {
+			if (mtu < IPV6_MIN_MTU)
+				mtu = IPV6_MIN_MTU;
+
 			icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
-		else
+		} else {
 			icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
 				  htonl(mtu));
+		}
 
 		return -EMSGSIZE;
 	}
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
index 636ec56..38bee17 100644
--- a/net/ipv6/ipv6_sockglue.c
+++ b/net/ipv6/ipv6_sockglue.c
@@ -585,16 +585,24 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
 
 		if (val) {
 			struct net_device *dev;
+			int midx;
 
-			if (sk->sk_bound_dev_if && sk->sk_bound_dev_if != val)
-				goto e_inval;
+			rcu_read_lock();
 
-			dev = dev_get_by_index(net, val);
+			dev = dev_get_by_index_rcu(net, val);
 			if (!dev) {
+				rcu_read_unlock();
 				retv = -ENODEV;
 				break;
 			}
-			dev_put(dev);
+			midx = l3mdev_master_ifindex_rcu(dev);
+
+			rcu_read_unlock();
+
+			if (sk->sk_bound_dev_if &&
+			    sk->sk_bound_dev_if != val &&
+			    (!midx || midx != sk->sk_bound_dev_if))
+				goto e_inval;
 		}
 		np->mcast_oif = val;
 		retv = 0;
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 5acd855..f7e685f 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -3470,7 +3470,11 @@ static int ip6_route_dev_notify(struct notifier_block *this,
 		net->ipv6.ip6_blk_hole_entry->dst.dev = dev;
 		net->ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(dev);
 #endif
-	 } else if (event == NETDEV_UNREGISTER) {
+	 } else if (event == NETDEV_UNREGISTER &&
+		    dev->reg_state != NETREG_UNREGISTERED) {
+		/* NETDEV_UNREGISTER could be fired for multiple times by
+		 * netdev_wait_allrefs(). Make sure we only call this once.
+		 */
 		in6_dev_put(net->ipv6.ip6_null_entry->rt6i_idev);
 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
 		in6_dev_put(net->ipv6.ip6_prohibit_entry->rt6i_idev);
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index 40d7405..db6d437 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -1085,6 +1085,7 @@ static void ipip6_tunnel_update(struct ip_tunnel *t, struct ip_tunnel_parm *p)
 	ipip6_tunnel_link(sitn, t);
 	t->parms.iph.ttl = p->iph.ttl;
 	t->parms.iph.tos = p->iph.tos;
+	t->parms.iph.frag_off = p->iph.frag_off;
 	if (t->parms.link != p->link) {
 		t->parms.link = p->link;
 		ipip6_tunnel_bind_dev(t->dev);
diff --git a/net/kcm/kcmsock.c b/net/kcm/kcmsock.c
index 7eb0e8f..22785dc 100644
--- a/net/kcm/kcmsock.c
+++ b/net/kcm/kcmsock.c
@@ -1624,60 +1624,35 @@ static struct proto kcm_proto = {
 };
 
 /* Clone a kcm socket. */
-static int kcm_clone(struct socket *osock, struct kcm_clone *info,
-		     struct socket **newsockp)
+static struct file *kcm_clone(struct socket *osock)
 {
 	struct socket *newsock;
 	struct sock *newsk;
-	struct file *newfile;
-	int err, newfd;
+	struct file *file;
 
-	err = -ENFILE;
 	newsock = sock_alloc();
 	if (!newsock)
-		goto out;
+		return ERR_PTR(-ENFILE);
 
 	newsock->type = osock->type;
 	newsock->ops = osock->ops;
 
 	__module_get(newsock->ops->owner);
 
-	newfd = get_unused_fd_flags(0);
-	if (unlikely(newfd < 0)) {
-		err = newfd;
-		goto out_fd_fail;
-	}
-
-	newfile = sock_alloc_file(newsock, 0, osock->sk->sk_prot_creator->name);
-	if (unlikely(IS_ERR(newfile))) {
-		err = PTR_ERR(newfile);
-		goto out_sock_alloc_fail;
-	}
-
 	newsk = sk_alloc(sock_net(osock->sk), PF_KCM, GFP_KERNEL,
 			 &kcm_proto, true);
 	if (!newsk) {
-		err = -ENOMEM;
-		goto out_sk_alloc_fail;
+		sock_release(newsock);
+		return ERR_PTR(-ENOMEM);
 	}
-
 	sock_init_data(newsock, newsk);
 	init_kcm_sock(kcm_sk(newsk), kcm_sk(osock->sk)->mux);
 
-	fd_install(newfd, newfile);
-	*newsockp = newsock;
-	info->fd = newfd;
+	file = sock_alloc_file(newsock, 0, osock->sk->sk_prot_creator->name);
+	if (IS_ERR(file))
+		sock_release(newsock);
 
-	return 0;
-
-out_sk_alloc_fail:
-	fput(newfile);
-out_sock_alloc_fail:
-	put_unused_fd(newfd);
-out_fd_fail:
-	sock_release(newsock);
-out:
-	return err;
+	return file;
 }
 
 static int kcm_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
@@ -1707,21 +1682,25 @@ static int kcm_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
 	}
 	case SIOCKCMCLONE: {
 		struct kcm_clone info;
-		struct socket *newsock = NULL;
+		struct file *file;
 
-		if (copy_from_user(&info, (void __user *)arg, sizeof(info)))
-			return -EFAULT;
+		info.fd = get_unused_fd_flags(0);
+		if (unlikely(info.fd < 0))
+			return info.fd;
 
-		err = kcm_clone(sock, &info, &newsock);
-
-		if (!err) {
-			if (copy_to_user((void __user *)arg, &info,
-					 sizeof(info))) {
-				err = -EFAULT;
-				sys_close(info.fd);
-			}
+		file = kcm_clone(sock);
+		if (IS_ERR(file)) {
+			put_unused_fd(info.fd);
+			return PTR_ERR(file);
 		}
-
+		if (copy_to_user((void __user *)arg, &info,
+				 sizeof(info))) {
+			put_unused_fd(info.fd);
+			fput(file);
+			return -EFAULT;
+		}
+		fd_install(info.fd, file);
+		err = 0;
 		break;
 	}
 	default:
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
index b06acd0..cfc4dd8 100644
--- a/net/l2tp/l2tp_core.c
+++ b/net/l2tp/l2tp_core.c
@@ -1944,7 +1944,7 @@ static __net_exit void l2tp_exit_net(struct net *net)
 
 	rcu_read_lock_bh();
 	list_for_each_entry_rcu(tunnel, &pn->l2tp_tunnel_list, list) {
-		(void)l2tp_tunnel_delete(tunnel);
+		l2tp_tunnel_delete(tunnel);
 	}
 	rcu_read_unlock_bh();
 
diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
index 3468d56..9d77a54 100644
--- a/net/l2tp/l2tp_ip.c
+++ b/net/l2tp/l2tp_ip.c
@@ -48,7 +48,8 @@ static inline struct l2tp_ip_sock *l2tp_ip_sk(const struct sock *sk)
 	return (struct l2tp_ip_sock *)sk;
 }
 
-static struct sock *__l2tp_ip_bind_lookup(struct net *net, __be32 laddr, int dif, u32 tunnel_id)
+static struct sock *__l2tp_ip_bind_lookup(const struct net *net, __be32 laddr,
+					  __be32 raddr, int dif, u32 tunnel_id)
 {
 	struct sock *sk;
 
@@ -62,6 +63,7 @@ static struct sock *__l2tp_ip_bind_lookup(struct net *net, __be32 laddr, int dif
 		if ((l2tp->conn_id == tunnel_id) &&
 		    net_eq(sock_net(sk), net) &&
 		    !(inet->inet_rcv_saddr && inet->inet_rcv_saddr != laddr) &&
+		    (!inet->inet_daddr || !raddr || inet->inet_daddr == raddr) &&
 		    (!sk->sk_bound_dev_if || !dif ||
 		     sk->sk_bound_dev_if == dif))
 			goto found;
@@ -72,15 +74,6 @@ static struct sock *__l2tp_ip_bind_lookup(struct net *net, __be32 laddr, int dif
 	return sk;
 }
 
-static inline struct sock *l2tp_ip_bind_lookup(struct net *net, __be32 laddr, int dif, u32 tunnel_id)
-{
-	struct sock *sk = __l2tp_ip_bind_lookup(net, laddr, dif, tunnel_id);
-	if (sk)
-		sock_hold(sk);
-
-	return sk;
-}
-
 /* When processing receive frames, there are two cases to
  * consider. Data frames consist of a non-zero session-id and an
  * optional cookie. Control frames consist of a regular L2TP header
@@ -186,8 +179,8 @@ static int l2tp_ip_recv(struct sk_buff *skb)
 		struct iphdr *iph = (struct iphdr *) skb_network_header(skb);
 
 		read_lock_bh(&l2tp_ip_lock);
-		sk = __l2tp_ip_bind_lookup(net, iph->daddr, inet_iif(skb),
-					   tunnel_id);
+		sk = __l2tp_ip_bind_lookup(net, iph->daddr, iph->saddr,
+					   inet_iif(skb), tunnel_id);
 		if (!sk) {
 			read_unlock_bh(&l2tp_ip_lock);
 			goto discard;
@@ -289,7 +282,7 @@ static int l2tp_ip_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
 		inet->inet_saddr = 0;  /* Use device */
 
 	write_lock_bh(&l2tp_ip_lock);
-	if (__l2tp_ip_bind_lookup(net, addr->l2tp_addr.s_addr,
+	if (__l2tp_ip_bind_lookup(net, addr->l2tp_addr.s_addr, 0,
 				  sk->sk_bound_dev_if, addr->l2tp_conn_id)) {
 		write_unlock_bh(&l2tp_ip_lock);
 		ret = -EADDRINUSE;
diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
index 96efe47..86ad51a 100644
--- a/net/l2tp/l2tp_ip6.c
+++ b/net/l2tp/l2tp_ip6.c
@@ -59,12 +59,14 @@ static inline struct l2tp_ip6_sock *l2tp_ip6_sk(const struct sock *sk)
 
 static struct sock *__l2tp_ip6_bind_lookup(struct net *net,
 					   struct in6_addr *laddr,
+					   const struct in6_addr *raddr,
 					   int dif, u32 tunnel_id)
 {
 	struct sock *sk;
 
 	sk_for_each_bound(sk, &l2tp_ip6_bind_table) {
 		const struct in6_addr *sk_laddr = inet6_rcv_saddr(sk);
+		const struct in6_addr *sk_raddr = &sk->sk_v6_daddr;
 		struct l2tp_ip6_sock *l2tp = l2tp_ip6_sk(sk);
 
 		if (l2tp == NULL)
@@ -73,6 +75,7 @@ static struct sock *__l2tp_ip6_bind_lookup(struct net *net,
 		if ((l2tp->conn_id == tunnel_id) &&
 		    net_eq(sock_net(sk), net) &&
 		    (!sk_laddr || ipv6_addr_any(sk_laddr) || ipv6_addr_equal(sk_laddr, laddr)) &&
+		    (!raddr || ipv6_addr_any(sk_raddr) || ipv6_addr_equal(sk_raddr, raddr)) &&
 		    (!sk->sk_bound_dev_if || !dif ||
 		     sk->sk_bound_dev_if == dif))
 			goto found;
@@ -83,17 +86,6 @@ static struct sock *__l2tp_ip6_bind_lookup(struct net *net,
 	return sk;
 }
 
-static inline struct sock *l2tp_ip6_bind_lookup(struct net *net,
-						struct in6_addr *laddr,
-						int dif, u32 tunnel_id)
-{
-	struct sock *sk = __l2tp_ip6_bind_lookup(net, laddr, dif, tunnel_id);
-	if (sk)
-		sock_hold(sk);
-
-	return sk;
-}
-
 /* When processing receive frames, there are two cases to
  * consider. Data frames consist of a non-zero session-id and an
  * optional cookie. Control frames consist of a regular L2TP header
@@ -200,8 +192,8 @@ static int l2tp_ip6_recv(struct sk_buff *skb)
 		struct ipv6hdr *iph = ipv6_hdr(skb);
 
 		read_lock_bh(&l2tp_ip6_lock);
-		sk = __l2tp_ip6_bind_lookup(net, &iph->daddr, inet6_iif(skb),
-					    tunnel_id);
+		sk = __l2tp_ip6_bind_lookup(net, &iph->daddr, &iph->saddr,
+					    inet6_iif(skb), tunnel_id);
 		if (!sk) {
 			read_unlock_bh(&l2tp_ip6_lock);
 			goto discard;
@@ -339,7 +331,7 @@ static int l2tp_ip6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
 	rcu_read_unlock();
 
 	write_lock_bh(&l2tp_ip6_lock);
-	if (__l2tp_ip6_bind_lookup(net, &addr->l2tp_addr, bound_dev_if,
+	if (__l2tp_ip6_bind_lookup(net, &addr->l2tp_addr, NULL, bound_dev_if,
 				   addr->l2tp_conn_id)) {
 		write_unlock_bh(&l2tp_ip6_lock);
 		err = -EADDRINUSE;
diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c
index 1ccd310..ee03bc8 100644
--- a/net/l2tp/l2tp_netlink.c
+++ b/net/l2tp/l2tp_netlink.c
@@ -287,7 +287,7 @@ static int l2tp_nl_cmd_tunnel_delete(struct sk_buff *skb, struct genl_info *info
 	l2tp_tunnel_notify(&l2tp_nl_family, info,
 			   tunnel, L2TP_CMD_TUNNEL_DELETE);
 
-	(void) l2tp_tunnel_delete(tunnel);
+	l2tp_tunnel_delete(tunnel);
 
 out:
 	return ret;
diff --git a/net/mac80211/chan.c b/net/mac80211/chan.c
index e75cbf6..a0d901d 100644
--- a/net/mac80211/chan.c
+++ b/net/mac80211/chan.c
@@ -231,9 +231,6 @@ ieee80211_get_max_required_bw(struct ieee80211_sub_if_data *sdata)
 		    !(sta->sdata->bss && sta->sdata->bss == sdata->bss))
 			continue;
 
-		if (!sta->uploaded || !test_sta_flag(sta, WLAN_STA_ASSOC))
-			continue;
-
 		max_bw = max(max_bw, ieee80211_get_sta_bw(&sta->sta));
 	}
 	rcu_read_unlock();
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 34c2add..03dbc6b 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -681,7 +681,6 @@ struct ieee80211_if_mesh {
 	const struct ieee80211_mesh_sync_ops *sync_ops;
 	s64 sync_offset_clockdrift_max;
 	spinlock_t sync_offset_lock;
-	bool adjusting_tbtt;
 	/* mesh power save */
 	enum nl80211_mesh_power_mode nonpeer_pm;
 	int ps_peers_light_sleep;
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index 50e1b7f..b4b3fe0 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -279,10 +279,6 @@ int mesh_add_meshconf_ie(struct ieee80211_sub_if_data *sdata,
 	/* Mesh PS mode. See IEEE802.11-2012 8.4.2.100.8 */
 	*pos |= ifmsh->ps_peers_deep_sleep ?
 			IEEE80211_MESHCONF_CAPAB_POWER_SAVE_LEVEL : 0x00;
-	*pos++ |= ifmsh->adjusting_tbtt ?
-			IEEE80211_MESHCONF_CAPAB_TBTT_ADJUSTING : 0x00;
-	*pos++ = 0x00;
-
 	return 0;
 }
 
@@ -850,7 +846,6 @@ int ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata)
 	ifmsh->mesh_cc_id = 0;	/* Disabled */
 	/* register sync ops from extensible synchronization framework */
 	ifmsh->sync_ops = ieee80211_mesh_sync_ops_get(ifmsh->mesh_sp_id);
-	ifmsh->adjusting_tbtt = false;
 	ifmsh->sync_offset_clockdrift_max = 0;
 	set_bit(MESH_WORK_HOUSEKEEPING, &ifmsh->wrkq_flags);
 	ieee80211_mesh_root_setup(ifmsh);
diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c
index 7fcdcf6..fcba70e5 100644
--- a/net/mac80211/mesh_plink.c
+++ b/net/mac80211/mesh_plink.c
@@ -505,12 +505,14 @@ mesh_sta_info_alloc(struct ieee80211_sub_if_data *sdata, u8 *addr,
 
 	/* Userspace handles station allocation */
 	if (sdata->u.mesh.user_mpm ||
-	    sdata->u.mesh.security & IEEE80211_MESH_SEC_AUTHED)
-		cfg80211_notify_new_peer_candidate(sdata->dev, addr,
-						   elems->ie_start,
-						   elems->total_len,
-						   GFP_KERNEL);
-	else
+	    sdata->u.mesh.security & IEEE80211_MESH_SEC_AUTHED) {
+		if (mesh_peer_accepts_plinks(elems) &&
+		    mesh_plink_availables(sdata))
+			cfg80211_notify_new_peer_candidate(sdata->dev, addr,
+							   elems->ie_start,
+							   elems->total_len,
+							   GFP_KERNEL);
+	} else
 		sta = __mesh_sta_info_alloc(sdata, addr);
 
 	return sta;
diff --git a/net/mac80211/mesh_sync.c b/net/mac80211/mesh_sync.c
index faca22c..75608c0 100644
--- a/net/mac80211/mesh_sync.c
+++ b/net/mac80211/mesh_sync.c
@@ -123,7 +123,6 @@ static void mesh_sync_offset_rx_bcn_presp(struct ieee80211_sub_if_data *sdata,
 	 */
 
 	if (elems->mesh_config && mesh_peer_tbtt_adjusting(elems)) {
-		clear_sta_flag(sta, WLAN_STA_TOFFSET_KNOWN);
 		msync_dbg(sdata, "STA %pM : is adjusting TBTT\n",
 			  sta->sta.addr);
 		goto no_sync;
@@ -172,11 +171,9 @@ static void mesh_sync_offset_adjust_tbtt(struct ieee80211_sub_if_data *sdata,
 					 struct beacon_data *beacon)
 {
 	struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
-	u8 cap;
 
 	WARN_ON(ifmsh->mesh_sp_id != IEEE80211_SYNC_METHOD_NEIGHBOR_OFFSET);
 	WARN_ON(!rcu_read_lock_held());
-	cap = beacon->meshconf->meshconf_cap;
 
 	spin_lock_bh(&ifmsh->sync_offset_lock);
 
@@ -190,21 +187,13 @@ static void mesh_sync_offset_adjust_tbtt(struct ieee80211_sub_if_data *sdata,
 			  "TBTT : kicking off TBTT adjustment with clockdrift_max=%lld\n",
 			  ifmsh->sync_offset_clockdrift_max);
 		set_bit(MESH_WORK_DRIFT_ADJUST, &ifmsh->wrkq_flags);
-
-		ifmsh->adjusting_tbtt = true;
 	} else {
 		msync_dbg(sdata,
 			  "TBTT : max clockdrift=%lld; too small to adjust\n",
 			  (long long)ifmsh->sync_offset_clockdrift_max);
 		ifmsh->sync_offset_clockdrift_max = 0;
-
-		ifmsh->adjusting_tbtt = false;
 	}
 	spin_unlock_bh(&ifmsh->sync_offset_lock);
-
-	beacon->meshconf->meshconf_cap = ifmsh->adjusting_tbtt ?
-			IEEE80211_MESHCONF_CAPAB_TBTT_ADJUSTING | cap :
-			~IEEE80211_MESHCONF_CAPAB_TBTT_ADJUSTING & cap;
 }
 
 static const struct sync_method sync_methods[] = {
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 274c564..1ffd1e1 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -1244,7 +1244,7 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata,
 
 static struct txq_info *ieee80211_get_txq(struct ieee80211_local *local,
 					  struct ieee80211_vif *vif,
-					  struct ieee80211_sta *pubsta,
+					  struct sta_info *sta,
 					  struct sk_buff *skb)
 {
 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
@@ -1258,10 +1258,13 @@ static struct txq_info *ieee80211_get_txq(struct ieee80211_local *local,
 	if (!ieee80211_is_data(hdr->frame_control))
 		return NULL;
 
-	if (pubsta) {
+	if (sta) {
 		u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
 
-		txq = pubsta->txq[tid];
+		if (!sta->uploaded)
+			return NULL;
+
+		txq = sta->sta.txq[tid];
 	} else if (vif) {
 		txq = vif->txq;
 	}
@@ -1499,23 +1502,17 @@ static bool ieee80211_queue_skb(struct ieee80211_local *local,
 	struct fq *fq = &local->fq;
 	struct ieee80211_vif *vif;
 	struct txq_info *txqi;
-	struct ieee80211_sta *pubsta;
 
 	if (!local->ops->wake_tx_queue ||
 	    sdata->vif.type == NL80211_IFTYPE_MONITOR)
 		return false;
 
-	if (sta && sta->uploaded)
-		pubsta = &sta->sta;
-	else
-		pubsta = NULL;
-
 	if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
 		sdata = container_of(sdata->bss,
 				     struct ieee80211_sub_if_data, u.ap);
 
 	vif = &sdata->vif;
-	txqi = ieee80211_get_txq(local, vif, pubsta, skb);
+	txqi = ieee80211_get_txq(local, vif, sta, skb);
 
 	if (!txqi)
 		return false;
diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c
index 1309e2c..c5a5a69 100644
--- a/net/mpls/af_mpls.c
+++ b/net/mpls/af_mpls.c
@@ -937,6 +937,8 @@ static void mpls_ifdown(struct net_device *dev, int event)
 {
 	struct mpls_route __rcu **platform_label;
 	struct net *net = dev_net(dev);
+	unsigned int nh_flags = RTNH_F_DEAD | RTNH_F_LINKDOWN;
+	unsigned int alive;
 	unsigned index;
 
 	platform_label = rtnl_dereference(net->mpls.platform_label);
@@ -946,9 +948,11 @@ static void mpls_ifdown(struct net_device *dev, int event)
 		if (!rt)
 			continue;
 
+		alive = 0;
 		change_nexthops(rt) {
 			if (rtnl_dereference(nh->nh_dev) != dev)
-				continue;
+				goto next;
+
 			switch (event) {
 			case NETDEV_DOWN:
 			case NETDEV_UNREGISTER:
@@ -956,13 +960,16 @@ static void mpls_ifdown(struct net_device *dev, int event)
 				/* fall through */
 			case NETDEV_CHANGE:
 				nh->nh_flags |= RTNH_F_LINKDOWN;
-				if (event != NETDEV_UNREGISTER)
-					ACCESS_ONCE(rt->rt_nhn_alive) = rt->rt_nhn_alive - 1;
 				break;
 			}
 			if (event == NETDEV_UNREGISTER)
 				RCU_INIT_POINTER(nh->nh_dev, NULL);
+next:
+			if (!(nh->nh_flags & nh_flags))
+				alive++;
 		} endfor_nexthops(rt);
+
+		WRITE_ONCE(rt->rt_nhn_alive, alive);
 	}
 }
 
diff --git a/net/netfilter/core.c b/net/netfilter/core.c
index 004af03..d869ea5 100644
--- a/net/netfilter/core.c
+++ b/net/netfilter/core.c
@@ -364,6 +364,11 @@ int nf_hook_slow(struct sk_buff *skb, struct nf_hook_state *state)
 		ret = nf_queue(skb, state, &entry, verdict);
 		if (ret == 1 && entry)
 			goto next_hook;
+	} else {
+		/* Implicit handling for NF_STOLEN, as well as any other
+		 * non conventional verdicts.
+		 */
+		ret = 0;
 	}
 	return ret;
 }
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index a6e44ef..2155c24 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -2040,12 +2040,16 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
 		seq_puts(seq,
 			 "  -> RemoteAddress:Port Forward Weight ActiveConn InActConn\n");
 	} else {
+		struct net *net = seq_file_net(seq);
+		struct netns_ipvs *ipvs = net_ipvs(net);
 		const struct ip_vs_service *svc = v;
 		const struct ip_vs_iter *iter = seq->private;
 		const struct ip_vs_dest *dest;
 		struct ip_vs_scheduler *sched = rcu_dereference(svc->scheduler);
 		char *sched_name = sched ? sched->name : "none";
 
+		if (svc->ipvs != ipvs)
+			return 0;
 		if (iter->table == ip_vs_svc_table) {
 #ifdef CONFIG_IP_VS_IPV6
 			if (svc->af == AF_INET6)
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 778fcdb..fa3ef25 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -2068,7 +2068,7 @@ static void nf_tables_rule_destroy(const struct nft_ctx *ctx,
 	 * is called on error from nf_tables_newrule().
 	 */
 	expr = nft_expr_first(rule);
-	while (expr->ops && expr != nft_expr_last(rule)) {
+	while (expr != nft_expr_last(rule) && expr->ops) {
 		nf_tables_expr_destroy(ctx, expr);
 		expr = nft_expr_next(expr);
 	}
diff --git a/net/netfilter/nfnetlink_cthelper.c b/net/netfilter/nfnetlink_cthelper.c
index b1fcfa0..28d0653 100644
--- a/net/netfilter/nfnetlink_cthelper.c
+++ b/net/netfilter/nfnetlink_cthelper.c
@@ -32,6 +32,13 @@ MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
 MODULE_DESCRIPTION("nfnl_cthelper: User-space connection tracking helpers");
 
+struct nfnl_cthelper {
+	struct list_head		list;
+	struct nf_conntrack_helper	helper;
+};
+
+static LIST_HEAD(nfnl_cthelper_list);
+
 static int
 nfnl_userspace_cthelper(struct sk_buff *skb, unsigned int protoff,
 			struct nf_conn *ct, enum ip_conntrack_info ctinfo)
@@ -205,18 +212,20 @@ nfnl_cthelper_create(const struct nlattr * const tb[],
 		     struct nf_conntrack_tuple *tuple)
 {
 	struct nf_conntrack_helper *helper;
+	struct nfnl_cthelper *nfcth;
 	int ret;
 
 	if (!tb[NFCTH_TUPLE] || !tb[NFCTH_POLICY] || !tb[NFCTH_PRIV_DATA_LEN])
 		return -EINVAL;
 
-	helper = kzalloc(sizeof(struct nf_conntrack_helper), GFP_KERNEL);
-	if (helper == NULL)
+	nfcth = kzalloc(sizeof(*nfcth), GFP_KERNEL);
+	if (nfcth == NULL)
 		return -ENOMEM;
+	helper = &nfcth->helper;
 
 	ret = nfnl_cthelper_parse_expect_policy(helper, tb[NFCTH_POLICY]);
 	if (ret < 0)
-		goto err;
+		goto err1;
 
 	strncpy(helper->name, nla_data(tb[NFCTH_NAME]), NF_CT_HELPER_NAME_LEN);
 	helper->data_len = ntohl(nla_get_be32(tb[NFCTH_PRIV_DATA_LEN]));
@@ -247,12 +256,98 @@ nfnl_cthelper_create(const struct nlattr * const tb[],
 
 	ret = nf_conntrack_helper_register(helper);
 	if (ret < 0)
-		goto err;
+		goto err2;
+
+	list_add_tail(&nfcth->list, &nfnl_cthelper_list);
+	return 0;
+err2:
+	kfree(helper->expect_policy);
+err1:
+	kfree(nfcth);
+	return ret;
+}
+
+static int
+nfnl_cthelper_update_policy_one(const struct nf_conntrack_expect_policy *policy,
+				struct nf_conntrack_expect_policy *new_policy,
+				const struct nlattr *attr)
+{
+	struct nlattr *tb[NFCTH_POLICY_MAX + 1];
+	int err;
+
+	err = nla_parse_nested(tb, NFCTH_POLICY_MAX, attr,
+			       nfnl_cthelper_expect_pol);
+	if (err < 0)
+		return err;
+
+	if (!tb[NFCTH_POLICY_NAME] ||
+	    !tb[NFCTH_POLICY_EXPECT_MAX] ||
+	    !tb[NFCTH_POLICY_EXPECT_TIMEOUT])
+		return -EINVAL;
+
+	if (nla_strcmp(tb[NFCTH_POLICY_NAME], policy->name))
+		return -EBUSY;
+
+	new_policy->max_expected =
+		ntohl(nla_get_be32(tb[NFCTH_POLICY_EXPECT_MAX]));
+	new_policy->timeout =
+		ntohl(nla_get_be32(tb[NFCTH_POLICY_EXPECT_TIMEOUT]));
 
 	return 0;
-err:
-	kfree(helper);
-	return ret;
+}
+
+static int nfnl_cthelper_update_policy_all(struct nlattr *tb[],
+					   struct nf_conntrack_helper *helper)
+{
+	struct nf_conntrack_expect_policy new_policy[helper->expect_class_max + 1];
+	struct nf_conntrack_expect_policy *policy;
+	int i, err;
+
+	/* Check first that all policy attributes are well-formed, so we don't
+	 * leave things in inconsistent state on errors.
+	 */
+	for (i = 0; i < helper->expect_class_max + 1; i++) {
+
+		if (!tb[NFCTH_POLICY_SET + i])
+			return -EINVAL;
+
+		err = nfnl_cthelper_update_policy_one(&helper->expect_policy[i],
+						      &new_policy[i],
+						      tb[NFCTH_POLICY_SET + i]);
+		if (err < 0)
+			return err;
+	}
+	/* Now we can safely update them. */
+	for (i = 0; i < helper->expect_class_max + 1; i++) {
+		policy = (struct nf_conntrack_expect_policy *)
+				&helper->expect_policy[i];
+		policy->max_expected = new_policy->max_expected;
+		policy->timeout	= new_policy->timeout;
+	}
+
+	return 0;
+}
+
+static int nfnl_cthelper_update_policy(struct nf_conntrack_helper *helper,
+				       const struct nlattr *attr)
+{
+	struct nlattr *tb[NFCTH_POLICY_SET_MAX + 1];
+	unsigned int class_max;
+	int err;
+
+	err = nla_parse_nested(tb, NFCTH_POLICY_SET_MAX, attr,
+			       nfnl_cthelper_expect_policy_set);
+	if (err < 0)
+		return err;
+
+	if (!tb[NFCTH_POLICY_SET_NUM])
+		return -EINVAL;
+
+	class_max = ntohl(nla_get_be32(tb[NFCTH_POLICY_SET_NUM]));
+	if (helper->expect_class_max + 1 != class_max)
+		return -EBUSY;
+
+	return nfnl_cthelper_update_policy_all(tb, helper);
 }
 
 static int
@@ -265,8 +360,7 @@ nfnl_cthelper_update(const struct nlattr * const tb[],
 		return -EBUSY;
 
 	if (tb[NFCTH_POLICY]) {
-		ret = nfnl_cthelper_parse_expect_policy(helper,
-							tb[NFCTH_POLICY]);
+		ret = nfnl_cthelper_update_policy(helper, tb[NFCTH_POLICY]);
 		if (ret < 0)
 			return ret;
 	}
@@ -295,7 +389,8 @@ static int nfnl_cthelper_new(struct net *net, struct sock *nfnl,
 	const char *helper_name;
 	struct nf_conntrack_helper *cur, *helper = NULL;
 	struct nf_conntrack_tuple tuple;
-	int ret = 0, i;
+	struct nfnl_cthelper *nlcth;
+	int ret = 0;
 
 	if (!tb[NFCTH_NAME] || !tb[NFCTH_TUPLE])
 		return -EINVAL;
@@ -306,31 +401,22 @@ static int nfnl_cthelper_new(struct net *net, struct sock *nfnl,
 	if (ret < 0)
 		return ret;
 
-	rcu_read_lock();
-	for (i = 0; i < nf_ct_helper_hsize && !helper; i++) {
-		hlist_for_each_entry_rcu(cur, &nf_ct_helper_hash[i], hnode) {
+	list_for_each_entry(nlcth, &nfnl_cthelper_list, list) {
+		cur = &nlcth->helper;
 
-			/* skip non-userspace conntrack helpers. */
-			if (!(cur->flags & NF_CT_HELPER_F_USERSPACE))
-				continue;
+		if (strncmp(cur->name, helper_name, NF_CT_HELPER_NAME_LEN))
+			continue;
 
-			if (strncmp(cur->name, helper_name,
-					NF_CT_HELPER_NAME_LEN) != 0)
-				continue;
+		if ((tuple.src.l3num != cur->tuple.src.l3num ||
+		     tuple.dst.protonum != cur->tuple.dst.protonum))
+			continue;
 
-			if ((tuple.src.l3num != cur->tuple.src.l3num ||
-			     tuple.dst.protonum != cur->tuple.dst.protonum))
-				continue;
+		if (nlh->nlmsg_flags & NLM_F_EXCL)
+			return -EEXIST;
 
-			if (nlh->nlmsg_flags & NLM_F_EXCL) {
-				ret = -EEXIST;
-				goto err;
-			}
-			helper = cur;
-			break;
-		}
+		helper = cur;
+		break;
 	}
-	rcu_read_unlock();
 
 	if (helper == NULL)
 		ret = nfnl_cthelper_create(tb, &tuple);
@@ -338,9 +424,6 @@ static int nfnl_cthelper_new(struct net *net, struct sock *nfnl,
 		ret = nfnl_cthelper_update(tb, helper);
 
 	return ret;
-err:
-	rcu_read_unlock();
-	return ret;
 }
 
 static int
@@ -504,11 +587,12 @@ static int nfnl_cthelper_get(struct net *net, struct sock *nfnl,
 			     struct sk_buff *skb, const struct nlmsghdr *nlh,
 			     const struct nlattr * const tb[])
 {
-	int ret = -ENOENT, i;
+	int ret = -ENOENT;
 	struct nf_conntrack_helper *cur;
 	struct sk_buff *skb2;
 	char *helper_name = NULL;
 	struct nf_conntrack_tuple tuple;
+	struct nfnl_cthelper *nlcth;
 	bool tuple_set = false;
 
 	if (nlh->nlmsg_flags & NLM_F_DUMP) {
@@ -529,45 +613,39 @@ static int nfnl_cthelper_get(struct net *net, struct sock *nfnl,
 		tuple_set = true;
 	}
 
-	for (i = 0; i < nf_ct_helper_hsize; i++) {
-		hlist_for_each_entry_rcu(cur, &nf_ct_helper_hash[i], hnode) {
+	list_for_each_entry(nlcth, &nfnl_cthelper_list, list) {
+		cur = &nlcth->helper;
+		if (helper_name &&
+		    strncmp(cur->name, helper_name, NF_CT_HELPER_NAME_LEN))
+			continue;
 
-			/* skip non-userspace conntrack helpers. */
-			if (!(cur->flags & NF_CT_HELPER_F_USERSPACE))
-				continue;
+		if (tuple_set &&
+		    (tuple.src.l3num != cur->tuple.src.l3num ||
+		     tuple.dst.protonum != cur->tuple.dst.protonum))
+			continue;
 
-			if (helper_name && strncmp(cur->name, helper_name,
-						NF_CT_HELPER_NAME_LEN) != 0) {
-				continue;
-			}
-			if (tuple_set &&
-			    (tuple.src.l3num != cur->tuple.src.l3num ||
-			     tuple.dst.protonum != cur->tuple.dst.protonum))
-				continue;
-
-			skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
-			if (skb2 == NULL) {
-				ret = -ENOMEM;
-				break;
-			}
-
-			ret = nfnl_cthelper_fill_info(skb2, NETLINK_CB(skb).portid,
-						nlh->nlmsg_seq,
-						NFNL_MSG_TYPE(nlh->nlmsg_type),
-						NFNL_MSG_CTHELPER_NEW, cur);
-			if (ret <= 0) {
-				kfree_skb(skb2);
-				break;
-			}
-
-			ret = netlink_unicast(nfnl, skb2, NETLINK_CB(skb).portid,
-						MSG_DONTWAIT);
-			if (ret > 0)
-				ret = 0;
-
-			/* this avoids a loop in nfnetlink. */
-			return ret == -EAGAIN ? -ENOBUFS : ret;
+		skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+		if (skb2 == NULL) {
+			ret = -ENOMEM;
+			break;
 		}
+
+		ret = nfnl_cthelper_fill_info(skb2, NETLINK_CB(skb).portid,
+					      nlh->nlmsg_seq,
+					      NFNL_MSG_TYPE(nlh->nlmsg_type),
+					      NFNL_MSG_CTHELPER_NEW, cur);
+		if (ret <= 0) {
+			kfree_skb(skb2);
+			break;
+		}
+
+		ret = netlink_unicast(nfnl, skb2, NETLINK_CB(skb).portid,
+				      MSG_DONTWAIT);
+		if (ret > 0)
+			ret = 0;
+
+		/* this avoids a loop in nfnetlink. */
+		return ret == -EAGAIN ? -ENOBUFS : ret;
 	}
 	return ret;
 }
@@ -578,10 +656,10 @@ static int nfnl_cthelper_del(struct net *net, struct sock *nfnl,
 {
 	char *helper_name = NULL;
 	struct nf_conntrack_helper *cur;
-	struct hlist_node *tmp;
 	struct nf_conntrack_tuple tuple;
 	bool tuple_set = false, found = false;
-	int i, j = 0, ret;
+	struct nfnl_cthelper *nlcth, *n;
+	int j = 0, ret;
 
 	if (tb[NFCTH_NAME])
 		helper_name = nla_data(tb[NFCTH_NAME]);
@@ -594,28 +672,27 @@ static int nfnl_cthelper_del(struct net *net, struct sock *nfnl,
 		tuple_set = true;
 	}
 
-	for (i = 0; i < nf_ct_helper_hsize; i++) {
-		hlist_for_each_entry_safe(cur, tmp, &nf_ct_helper_hash[i],
-								hnode) {
-			/* skip non-userspace conntrack helpers. */
-			if (!(cur->flags & NF_CT_HELPER_F_USERSPACE))
-				continue;
+	list_for_each_entry_safe(nlcth, n, &nfnl_cthelper_list, list) {
+		cur = &nlcth->helper;
+		j++;
 
-			j++;
+		if (helper_name &&
+		    strncmp(cur->name, helper_name, NF_CT_HELPER_NAME_LEN))
+			continue;
 
-			if (helper_name && strncmp(cur->name, helper_name,
-						NF_CT_HELPER_NAME_LEN) != 0) {
-				continue;
-			}
-			if (tuple_set &&
-			    (tuple.src.l3num != cur->tuple.src.l3num ||
-			     tuple.dst.protonum != cur->tuple.dst.protonum))
-				continue;
+		if (tuple_set &&
+		    (tuple.src.l3num != cur->tuple.src.l3num ||
+		     tuple.dst.protonum != cur->tuple.dst.protonum))
+			continue;
 
-			found = true;
-			nf_conntrack_helper_unregister(cur);
-		}
+		found = true;
+		nf_conntrack_helper_unregister(cur);
+		kfree(cur->expect_policy);
+
+		list_del(&nlcth->list);
+		kfree(nlcth);
 	}
+
 	/* Make sure we return success if we flush and there is no helpers */
 	return (found || j == 0) ? 0 : -ENOENT;
 }
@@ -664,20 +741,16 @@ static int __init nfnl_cthelper_init(void)
 static void __exit nfnl_cthelper_exit(void)
 {
 	struct nf_conntrack_helper *cur;
-	struct hlist_node *tmp;
-	int i;
+	struct nfnl_cthelper *nlcth, *n;
 
 	nfnetlink_subsys_unregister(&nfnl_cthelper_subsys);
 
-	for (i=0; i<nf_ct_helper_hsize; i++) {
-		hlist_for_each_entry_safe(cur, tmp, &nf_ct_helper_hash[i],
-									hnode) {
-			/* skip non-userspace conntrack helpers. */
-			if (!(cur->flags & NF_CT_HELPER_F_USERSPACE))
-				continue;
+	list_for_each_entry_safe(nlcth, n, &nfnl_cthelper_list, list) {
+		cur = &nlcth->helper;
 
-			nf_conntrack_helper_unregister(cur);
-		}
+		nf_conntrack_helper_unregister(cur);
+		kfree(cur->expect_policy);
+		kfree(nlcth);
 	}
 }
 
diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c
index af832c5..5efb402 100644
--- a/net/netfilter/nfnetlink_queue.c
+++ b/net/netfilter/nfnetlink_queue.c
@@ -443,7 +443,7 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue,
 	skb = alloc_skb(size, GFP_ATOMIC);
 	if (!skb) {
 		skb_tx_error(entskb);
-		return NULL;
+		goto nlmsg_failure;
 	}
 
 	nlh = nlmsg_put(skb, 0, 0,
@@ -452,7 +452,7 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue,
 	if (!nlh) {
 		skb_tx_error(entskb);
 		kfree_skb(skb);
-		return NULL;
+		goto nlmsg_failure;
 	}
 	nfmsg = nlmsg_data(nlh);
 	nfmsg->nfgen_family = entry->state.pf;
@@ -598,12 +598,17 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue,
 	}
 
 	nlh->nlmsg_len = skb->len;
+	if (seclen)
+		security_release_secctx(secdata, seclen);
 	return skb;
 
 nla_put_failure:
 	skb_tx_error(entskb);
 	kfree_skb(skb);
 	net_err_ratelimited("nf_queue: error creating packet message\n");
+nlmsg_failure:
+	if (seclen)
+		security_release_secctx(secdata, seclen);
 	return NULL;
 }
 
diff --git a/net/netfilter/nft_queue.c b/net/netfilter/nft_queue.c
index 393d359..ef4768a 100644
--- a/net/netfilter/nft_queue.c
+++ b/net/netfilter/nft_queue.c
@@ -38,7 +38,7 @@ static void nft_queue_eval(const struct nft_expr *expr,
 
 	if (priv->queues_total > 1) {
 		if (priv->flags & NFT_QUEUE_FLAG_CPU_FANOUT) {
-			int cpu = smp_processor_id();
+			int cpu = raw_smp_processor_id();
 
 			queue = priv->queuenum + cpu % priv->queues_total;
 		} else {
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index c9fac08..1ff497b 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -96,6 +96,44 @@ EXPORT_SYMBOL_GPL(nl_table);
 
 static DECLARE_WAIT_QUEUE_HEAD(nl_table_wait);
 
+static struct lock_class_key nlk_cb_mutex_keys[MAX_LINKS];
+
+static const char *const nlk_cb_mutex_key_strings[MAX_LINKS + 1] = {
+	"nlk_cb_mutex-ROUTE",
+	"nlk_cb_mutex-1",
+	"nlk_cb_mutex-USERSOCK",
+	"nlk_cb_mutex-FIREWALL",
+	"nlk_cb_mutex-SOCK_DIAG",
+	"nlk_cb_mutex-NFLOG",
+	"nlk_cb_mutex-XFRM",
+	"nlk_cb_mutex-SELINUX",
+	"nlk_cb_mutex-ISCSI",
+	"nlk_cb_mutex-AUDIT",
+	"nlk_cb_mutex-FIB_LOOKUP",
+	"nlk_cb_mutex-CONNECTOR",
+	"nlk_cb_mutex-NETFILTER",
+	"nlk_cb_mutex-IP6_FW",
+	"nlk_cb_mutex-DNRTMSG",
+	"nlk_cb_mutex-KOBJECT_UEVENT",
+	"nlk_cb_mutex-GENERIC",
+	"nlk_cb_mutex-17",
+	"nlk_cb_mutex-SCSITRANSPORT",
+	"nlk_cb_mutex-ECRYPTFS",
+	"nlk_cb_mutex-RDMA",
+	"nlk_cb_mutex-CRYPTO",
+	"nlk_cb_mutex-SMC",
+	"nlk_cb_mutex-23",
+	"nlk_cb_mutex-24",
+	"nlk_cb_mutex-25",
+	"nlk_cb_mutex-26",
+	"nlk_cb_mutex-27",
+	"nlk_cb_mutex-28",
+	"nlk_cb_mutex-29",
+	"nlk_cb_mutex-30",
+	"nlk_cb_mutex-31",
+	"nlk_cb_mutex-MAX_LINKS"
+};
+
 static int netlink_dump(struct sock *sk);
 static void netlink_skb_destructor(struct sk_buff *skb);
 
@@ -585,6 +623,9 @@ static int __netlink_create(struct net *net, struct socket *sock,
 	} else {
 		nlk->cb_mutex = &nlk->cb_def_mutex;
 		mutex_init(nlk->cb_mutex);
+		lockdep_set_class_and_name(nlk->cb_mutex,
+					   nlk_cb_mutex_keys + protocol,
+					   nlk_cb_mutex_key_strings[protocol]);
 	}
 	init_waitqueue_head(&nlk->wait);
 
diff --git a/net/nfc/core.c b/net/nfc/core.c
index 5cf33df..c699d64 100644
--- a/net/nfc/core.c
+++ b/net/nfc/core.c
@@ -1106,7 +1106,7 @@ struct nfc_dev *nfc_allocate_device(struct nfc_ops *ops,
 err_free_dev:
 	kfree(dev);
 
-	return ERR_PTR(rc);
+	return NULL;
 }
 EXPORT_SYMBOL(nfc_allocate_device);
 
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index e7f6657..267db0d 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -1661,7 +1661,6 @@ static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
 		atomic_long_set(&rollover->num, 0);
 		atomic_long_set(&rollover->num_huge, 0);
 		atomic_long_set(&rollover->num_failed, 0);
-		po->rollover = rollover;
 	}
 
 	match = NULL;
@@ -1706,6 +1705,8 @@ static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
 		if (atomic_read(&match->sk_ref) < PACKET_FANOUT_MAX) {
 			__dev_remove_pack(&po->prot_hook);
 			po->fanout = match;
+			po->rollover = rollover;
+			rollover = NULL;
 			atomic_inc(&match->sk_ref);
 			__fanout_link(sk, po);
 			err = 0;
@@ -1719,10 +1720,7 @@ static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
 	}
 
 out:
-	if (err && rollover) {
-		kfree_rcu(rollover, rcu);
-		po->rollover = NULL;
-	}
+	kfree(rollover);
 	mutex_unlock(&fanout_mutex);
 	return err;
 }
@@ -1746,11 +1744,6 @@ static struct packet_fanout *fanout_release(struct sock *sk)
 			list_del(&f->list);
 		else
 			f = NULL;
-
-		if (po->rollover) {
-			kfree_rcu(po->rollover, rcu);
-			po->rollover = NULL;
-		}
 	}
 	mutex_unlock(&fanout_mutex);
 
@@ -3039,6 +3032,7 @@ static int packet_release(struct socket *sock)
 	synchronize_net();
 
 	if (f) {
+		kfree(po->rollover);
 		fanout_release_data(f);
 		kfree(f);
 	}
@@ -3107,6 +3101,10 @@ static int packet_do_bind(struct sock *sk, const char *name, int ifindex,
 	if (need_rehook) {
 		if (po->running) {
 			rcu_read_unlock();
+			/* prevents packet_notifier() from calling
+			 * register_prot_hook()
+			 */
+			po->num = 0;
 			__unregister_prot_hook(sk, true);
 			rcu_read_lock();
 			dev_curr = po->prot_hook.dev;
@@ -3115,6 +3113,7 @@ static int packet_do_bind(struct sock *sk, const char *name, int ifindex,
 								 dev->ifindex);
 		}
 
+		BUG_ON(po->running);
 		po->num = proto;
 		po->prot_hook.type = proto;
 
@@ -3853,7 +3852,6 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
 	void *data = &val;
 	union tpacket_stats_u st;
 	struct tpacket_rollover_stats rstats;
-	struct packet_rollover *rollover;
 
 	if (level != SOL_PACKET)
 		return -ENOPROTOOPT;
@@ -3932,18 +3930,13 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
 		       0);
 		break;
 	case PACKET_ROLLOVER_STATS:
-		rcu_read_lock();
-		rollover = rcu_dereference(po->rollover);
-		if (rollover) {
-			rstats.tp_all = atomic_long_read(&rollover->num);
-			rstats.tp_huge = atomic_long_read(&rollover->num_huge);
-			rstats.tp_failed = atomic_long_read(&rollover->num_failed);
-			data = &rstats;
-			lv = sizeof(rstats);
-		}
-		rcu_read_unlock();
-		if (!rollover)
+		if (!po->rollover)
 			return -EINVAL;
+		rstats.tp_all = atomic_long_read(&po->rollover->num);
+		rstats.tp_huge = atomic_long_read(&po->rollover->num_huge);
+		rstats.tp_failed = atomic_long_read(&po->rollover->num_failed);
+		data = &rstats;
+		lv = sizeof(rstats);
 		break;
 	case PACKET_TX_HAS_OFF:
 		val = po->tp_tx_has_off;
diff --git a/net/packet/internal.h b/net/packet/internal.h
index 9ee4631..d55bfc3 100644
--- a/net/packet/internal.h
+++ b/net/packet/internal.h
@@ -92,7 +92,6 @@ struct packet_fanout {
 
 struct packet_rollover {
 	int			sock;
-	struct rcu_head		rcu;
 	atomic_long_t		num;
 	atomic_long_t		num_huge;
 	atomic_long_t		num_failed;
diff --git a/net/qrtr/qrtr.c b/net/qrtr/qrtr.c
index c985ecb..ae5ac17 100644
--- a/net/qrtr/qrtr.c
+++ b/net/qrtr/qrtr.c
@@ -252,7 +252,7 @@ static struct sk_buff *qrtr_alloc_resume_tx(u32 src_node,
 	const int pkt_len = 20;
 	struct qrtr_hdr *hdr;
 	struct sk_buff *skb;
-	u32 *buf;
+	__le32 *buf;
 
 	skb = alloc_skb(QRTR_HDR_SIZE + pkt_len, GFP_KERNEL);
 	if (!skb)
@@ -269,7 +269,7 @@ static struct sk_buff *qrtr_alloc_resume_tx(u32 src_node,
 	hdr->dst_node_id = cpu_to_le32(dst_node);
 	hdr->dst_port_id = cpu_to_le32(QRTR_PORT_CTRL);
 
-	buf = (u32 *)skb_put(skb, pkt_len);
+	buf = (__le32 *)skb_put(skb, pkt_len);
 	memset(buf, 0, pkt_len);
 	buf[0] = cpu_to_le32(QRTR_TYPE_RESUME_TX);
 	buf[1] = cpu_to_le32(src_node);
diff --git a/net/rds/ib_frmr.c b/net/rds/ib_frmr.c
index d921adc..66b3d62 100644
--- a/net/rds/ib_frmr.c
+++ b/net/rds/ib_frmr.c
@@ -104,14 +104,15 @@ static int rds_ib_post_reg_frmr(struct rds_ib_mr *ibmr)
 	struct rds_ib_frmr *frmr = &ibmr->u.frmr;
 	struct ib_send_wr *failed_wr;
 	struct ib_reg_wr reg_wr;
-	int ret;
+	int ret, off = 0;
 
 	while (atomic_dec_return(&ibmr->ic->i_fastreg_wrs) <= 0) {
 		atomic_inc(&ibmr->ic->i_fastreg_wrs);
 		cpu_relax();
 	}
 
-	ret = ib_map_mr_sg_zbva(frmr->mr, ibmr->sg, ibmr->sg_len, 0, PAGE_SIZE);
+	ret = ib_map_mr_sg_zbva(frmr->mr, ibmr->sg, ibmr->sg_len,
+				&off, PAGE_SIZE);
 	if (unlikely(ret != ibmr->sg_len))
 		return ret < 0 ? ret : -EINVAL;
 
diff --git a/net/rds/rdma.c b/net/rds/rdma.c
index 8d3a851..de8496e 100644
--- a/net/rds/rdma.c
+++ b/net/rds/rdma.c
@@ -40,7 +40,6 @@
 /*
  * XXX
  *  - build with sparse
- *  - should we limit the size of a mr region?  let transport return failure?
  *  - should we detect duplicate keys on a socket?  hmm.
  *  - an rdma is an mlock, apply rlimit?
  */
@@ -184,7 +183,7 @@ static int __rds_rdma_map(struct rds_sock *rs, struct rds_get_mr_args *args,
 	long i;
 	int ret;
 
-	if (rs->rs_bound_addr == 0) {
+	if (rs->rs_bound_addr == 0 || !rs->rs_transport) {
 		ret = -ENOTCONN; /* XXX not a great errno */
 		goto out;
 	}
@@ -200,6 +199,14 @@ static int __rds_rdma_map(struct rds_sock *rs, struct rds_get_mr_args *args,
 		goto out;
 	}
 
+	/* Restrict the size of mr irrespective of underlying transport
+	 * To account for unaligned mr regions, subtract one from nr_pages
+	 */
+	if ((nr_pages - 1) > (RDS_MAX_MSG_SIZE >> PAGE_SHIFT)) {
+		ret = -EMSGSIZE;
+		goto out;
+	}
+
 	rdsdebug("RDS: get_mr addr %llx len %llu nr_pages %u\n",
 		args->vec.addr, args->vec.bytes, nr_pages);
 
diff --git a/net/rds/rds.h b/net/rds/rds.h
index f107a96..30a51fe 100644
--- a/net/rds/rds.h
+++ b/net/rds/rds.h
@@ -50,6 +50,9 @@ void rdsdebug(char *fmt, ...)
 #define RDS_FRAG_SHIFT	12
 #define RDS_FRAG_SIZE	((unsigned int)(1 << RDS_FRAG_SHIFT))
 
+/* Used to limit both RDMA and non-RDMA RDS message to 1MB */
+#define RDS_MAX_MSG_SIZE	((unsigned int)(1 << 20))
+
 #define RDS_CONG_MAP_BYTES	(65536 / 8)
 #define RDS_CONG_MAP_PAGES	(PAGE_ALIGN(RDS_CONG_MAP_BYTES) / PAGE_SIZE)
 #define RDS_CONG_MAP_PAGE_BITS	(PAGE_SIZE * 8)
diff --git a/net/rds/send.c b/net/rds/send.c
index f28651b..ad247dc 100644
--- a/net/rds/send.c
+++ b/net/rds/send.c
@@ -946,6 +946,11 @@ static int rds_cmsg_send(struct rds_sock *rs, struct rds_message *rm,
 			ret = rds_cmsg_rdma_map(rs, rm, cmsg);
 			if (!ret)
 				*allocated_mr = 1;
+			else if (ret == -ENODEV)
+				/* Accommodate the get_mr() case which can fail
+				 * if connection isn't established yet.
+				 */
+				ret = -EAGAIN;
 			break;
 		case RDS_CMSG_ATOMIC_CSWP:
 		case RDS_CMSG_ATOMIC_FADD:
@@ -988,6 +993,26 @@ static int rds_send_mprds_hash(struct rds_sock *rs, struct rds_connection *conn)
 	return hash;
 }
 
+static int rds_rdma_bytes(struct msghdr *msg, size_t *rdma_bytes)
+{
+	struct rds_rdma_args *args;
+	struct cmsghdr *cmsg;
+
+	for_each_cmsghdr(cmsg, msg) {
+		if (!CMSG_OK(msg, cmsg))
+			return -EINVAL;
+
+		if (cmsg->cmsg_level != SOL_RDS)
+			continue;
+
+		if (cmsg->cmsg_type == RDS_CMSG_RDMA_ARGS) {
+			args = CMSG_DATA(cmsg);
+			*rdma_bytes += args->remote_vec.bytes;
+		}
+	}
+	return 0;
+}
+
 int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len)
 {
 	struct sock *sk = sock->sk;
@@ -1002,6 +1027,7 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len)
 	int nonblock = msg->msg_flags & MSG_DONTWAIT;
 	long timeo = sock_sndtimeo(sk, nonblock);
 	struct rds_conn_path *cpath;
+	size_t total_payload_len = payload_len, rdma_payload_len = 0;
 
 	/* Mirror Linux UDP mirror of BSD error message compatibility */
 	/* XXX: Perhaps MSG_MORE someday */
@@ -1034,6 +1060,16 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len)
 	}
 	release_sock(sk);
 
+	ret = rds_rdma_bytes(msg, &rdma_payload_len);
+	if (ret)
+		goto out;
+
+	total_payload_len += rdma_payload_len;
+	if (max_t(size_t, payload_len, rdma_payload_len) > RDS_MAX_MSG_SIZE) {
+		ret = -EMSGSIZE;
+		goto out;
+	}
+
 	if (payload_len > rds_sk_sndbuf(rs)) {
 		ret = -EMSGSIZE;
 		goto out;
@@ -1083,8 +1119,12 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len)
 
 	/* Parse any control messages the user may have included. */
 	ret = rds_cmsg_send(rs, rm, msg, &allocated_mr);
-	if (ret)
+	if (ret) {
+		/* Trigger connection so that its ready for the next retry */
+		if (ret ==  -EAGAIN)
+			rds_conn_connect_if_down(conn);
 		goto out;
+	}
 
 	if (rm->rdma.op_active && !conn->c_trans->xmit_rdma) {
 		printk_ratelimited(KERN_NOTICE "rdma_op %p conn xmit_rdma %p\n",
diff --git a/net/rds/tcp.c b/net/rds/tcp.c
index 20e2923..78f976d3 100644
--- a/net/rds/tcp.c
+++ b/net/rds/tcp.c
@@ -478,9 +478,10 @@ static void __net_exit rds_tcp_exit_net(struct net *net)
 	 * we do need to clean up the listen socket here.
 	 */
 	if (rtn->rds_tcp_listen_sock) {
-		rds_tcp_listen_stop(rtn->rds_tcp_listen_sock);
+		struct socket *lsock = rtn->rds_tcp_listen_sock;
+
 		rtn->rds_tcp_listen_sock = NULL;
-		flush_work(&rtn->rds_tcp_accept_w);
+		rds_tcp_listen_stop(lsock, &rtn->rds_tcp_accept_w);
 	}
 }
 
@@ -517,10 +518,10 @@ static void rds_tcp_kill_sock(struct net *net)
 	struct rds_tcp_connection *tc, *_tc;
 	LIST_HEAD(tmp_list);
 	struct rds_tcp_net *rtn = net_generic(net, rds_tcp_netid);
+	struct socket *lsock = rtn->rds_tcp_listen_sock;
 
-	rds_tcp_listen_stop(rtn->rds_tcp_listen_sock);
 	rtn->rds_tcp_listen_sock = NULL;
-	flush_work(&rtn->rds_tcp_accept_w);
+	rds_tcp_listen_stop(lsock, &rtn->rds_tcp_accept_w);
 	spin_lock_irq(&rds_tcp_conn_lock);
 	list_for_each_entry_safe(tc, _tc, &rds_tcp_conn_list, t_tcp_node) {
 		struct net *c_net = read_pnet(&tc->t_cpath->cp_conn->c_net);
@@ -540,8 +541,12 @@ static void rds_tcp_kill_sock(struct net *net)
 void *rds_tcp_listen_sock_def_readable(struct net *net)
 {
 	struct rds_tcp_net *rtn = net_generic(net, rds_tcp_netid);
+	struct socket *lsock = rtn->rds_tcp_listen_sock;
 
-	return rtn->rds_tcp_listen_sock->sk->sk_user_data;
+	if (!lsock)
+		return NULL;
+
+	return lsock->sk->sk_user_data;
 }
 
 static int rds_tcp_dev_event(struct notifier_block *this,
diff --git a/net/rds/tcp.h b/net/rds/tcp.h
index 9a1cc89..56ea662 100644
--- a/net/rds/tcp.h
+++ b/net/rds/tcp.h
@@ -66,7 +66,7 @@ void rds_tcp_state_change(struct sock *sk);
 
 /* tcp_listen.c */
 struct socket *rds_tcp_listen_init(struct net *);
-void rds_tcp_listen_stop(struct socket *);
+void rds_tcp_listen_stop(struct socket *sock, struct work_struct *acceptor);
 void rds_tcp_listen_data_ready(struct sock *sk);
 int rds_tcp_accept_one(struct socket *sock);
 int rds_tcp_keepalive(struct socket *sock);
diff --git a/net/rds/tcp_listen.c b/net/rds/tcp_listen.c
index 525b624..185a56b 100644
--- a/net/rds/tcp_listen.c
+++ b/net/rds/tcp_listen.c
@@ -227,6 +227,9 @@ void rds_tcp_listen_data_ready(struct sock *sk)
 	 * before it has been accepted and the accepter has set up their
 	 * data_ready.. we only want to queue listen work for our listening
 	 * socket
+	 *
+	 * (*ready)() may be null if we are racing with netns delete, and
+	 * the listen socket is being torn down.
 	 */
 	if (sk->sk_state == TCP_LISTEN)
 		rds_tcp_accept_work(sk);
@@ -235,7 +238,8 @@ void rds_tcp_listen_data_ready(struct sock *sk)
 
 out:
 	read_unlock_bh(&sk->sk_callback_lock);
-	ready(sk);
+	if (ready)
+		ready(sk);
 }
 
 struct socket *rds_tcp_listen_init(struct net *net)
@@ -275,7 +279,7 @@ struct socket *rds_tcp_listen_init(struct net *net)
 	return NULL;
 }
 
-void rds_tcp_listen_stop(struct socket *sock)
+void rds_tcp_listen_stop(struct socket *sock, struct work_struct *acceptor)
 {
 	struct sock *sk;
 
@@ -296,5 +300,6 @@ void rds_tcp_listen_stop(struct socket *sock)
 
 	/* wait for accepts to stop and close the socket */
 	flush_workqueue(rds_wq);
+	flush_work(acceptor);
 	sock_release(sock);
 }
diff --git a/net/rmnet_data/rmnet_map_data.c b/net/rmnet_data/rmnet_map_data.c
index 669a890..cc377bb 100644
--- a/net/rmnet_data/rmnet_map_data.c
+++ b/net/rmnet_data/rmnet_map_data.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -192,10 +192,6 @@ static void rmnet_map_flush_packet_work(struct work_struct *work)
 			memset(&config->agg_time, 0, sizeof(struct timespec));
 		}
 		config->agg_state = RMNET_MAP_AGG_IDLE;
-	} else {
-		/* How did we get here? */
-		LOGE("Ran queued command when state %s",
-		     "is idle. State machine likely broken");
 	}
 
 	spin_unlock_irqrestore(&config->agg_lock, flags);
diff --git a/net/rxrpc/conn_event.c b/net/rxrpc/conn_event.c
index 3f9d8d7..b099b64 100644
--- a/net/rxrpc/conn_event.c
+++ b/net/rxrpc/conn_event.c
@@ -275,6 +275,10 @@ static int rxrpc_process_event(struct rxrpc_connection *conn,
 		rxrpc_conn_retransmit_call(conn, skb);
 		return 0;
 
+	case RXRPC_PACKET_TYPE_BUSY:
+		/* Just ignore BUSY packets for now. */
+		return 0;
+
 	case RXRPC_PACKET_TYPE_ABORT:
 		if (skb_copy_bits(skb, sizeof(struct rxrpc_wire_header),
 				  &wtmp, sizeof(wtmp)) < 0)
diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c
index 44fb8d8..1060d14 100644
--- a/net/rxrpc/input.c
+++ b/net/rxrpc/input.c
@@ -649,6 +649,7 @@ static void rxrpc_input_ackinfo(struct rxrpc_call *call, struct sk_buff *skb,
 	struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
 	struct rxrpc_peer *peer;
 	unsigned int mtu;
+	bool wake = false;
 	u32 rwind = ntohl(ackinfo->rwind);
 
 	_proto("Rx ACK %%%u Info { rx=%u max=%u rwin=%u jm=%u }",
@@ -656,9 +657,14 @@ static void rxrpc_input_ackinfo(struct rxrpc_call *call, struct sk_buff *skb,
 	       ntohl(ackinfo->rxMTU), ntohl(ackinfo->maxMTU),
 	       rwind, ntohl(ackinfo->jumbo_max));
 
-	if (rwind > RXRPC_RXTX_BUFF_SIZE - 1)
-		rwind = RXRPC_RXTX_BUFF_SIZE - 1;
-	call->tx_winsize = rwind;
+	if (call->tx_winsize != rwind) {
+		if (rwind > RXRPC_RXTX_BUFF_SIZE - 1)
+			rwind = RXRPC_RXTX_BUFF_SIZE - 1;
+		if (rwind > call->tx_winsize)
+			wake = true;
+		call->tx_winsize = rwind;
+	}
+
 	if (call->cong_ssthresh > rwind)
 		call->cong_ssthresh = rwind;
 
@@ -672,6 +678,9 @@ static void rxrpc_input_ackinfo(struct rxrpc_call *call, struct sk_buff *skb,
 		spin_unlock_bh(&peer->lock);
 		_net("Net MTU %u (maxdata %u)", peer->mtu, peer->maxdata);
 	}
+
+	if (wake)
+		wake_up(&call->waitq);
 }
 
 /*
diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c
index 1308bbf..b56d579 100644
--- a/net/sched/sch_dsmark.c
+++ b/net/sched/sch_dsmark.c
@@ -200,9 +200,13 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch,
 	pr_debug("%s(skb %p,sch %p,[qdisc %p])\n", __func__, skb, sch, p);
 
 	if (p->set_tc_index) {
+		int wlen = skb_network_offset(skb);
+
 		switch (tc_skb_protocol(skb)) {
 		case htons(ETH_P_IP):
-			if (skb_cow_head(skb, sizeof(struct iphdr)))
+			wlen += sizeof(struct iphdr);
+			if (!pskb_may_pull(skb, wlen) ||
+			    skb_try_make_writable(skb, wlen))
 				goto drop;
 
 			skb->tc_index = ipv4_get_dsfield(ip_hdr(skb))
@@ -210,7 +214,9 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch,
 			break;
 
 		case htons(ETH_P_IPV6):
-			if (skb_cow_head(skb, sizeof(struct ipv6hdr)))
+			wlen += sizeof(struct ipv6hdr);
+			if (!pskb_may_pull(skb, wlen) ||
+			    skb_try_make_writable(skb, wlen))
 				goto drop;
 
 			skb->tc_index = ipv6_get_dsfield(ipv6_hdr(skb))
diff --git a/net/sctp/debug.c b/net/sctp/debug.c
index 95d7b15..e371a0d 100644
--- a/net/sctp/debug.c
+++ b/net/sctp/debug.c
@@ -166,7 +166,7 @@ static const char *const sctp_timer_tbl[] = {
 /* Lookup timer debug name. */
 const char *sctp_tname(const sctp_subtype_t id)
 {
-	if (id.timeout <= SCTP_EVENT_TIMEOUT_MAX)
+	if (id.timeout < ARRAY_SIZE(sctp_timer_tbl))
 		return sctp_timer_tbl[id.timeout];
 	return "unknown_timer";
 }
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
index 5825853..0994ce4 100644
--- a/net/sctp/outqueue.c
+++ b/net/sctp/outqueue.c
@@ -382,17 +382,18 @@ static int sctp_prsctp_prune_sent(struct sctp_association *asoc,
 }
 
 static int sctp_prsctp_prune_unsent(struct sctp_association *asoc,
-				    struct sctp_sndrcvinfo *sinfo,
-				    struct list_head *queue, int msg_len)
+				    struct sctp_sndrcvinfo *sinfo, int msg_len)
 {
+	struct sctp_outq *q = &asoc->outqueue;
 	struct sctp_chunk *chk, *temp;
 
-	list_for_each_entry_safe(chk, temp, queue, list) {
+	list_for_each_entry_safe(chk, temp, &q->out_chunk_list, list) {
 		if (!SCTP_PR_PRIO_ENABLED(chk->sinfo.sinfo_flags) ||
 		    chk->sinfo.sinfo_timetolive <= sinfo->sinfo_timetolive)
 			continue;
 
 		list_del_init(&chk->list);
+		q->out_qlen -= chk->skb->len;
 		asoc->sent_cnt_removable--;
 		asoc->abandoned_unsent[SCTP_PR_INDEX(PRIO)]++;
 
@@ -431,9 +432,7 @@ void sctp_prsctp_prune(struct sctp_association *asoc,
 			return;
 	}
 
-	sctp_prsctp_prune_unsent(asoc, sinfo,
-				 &asoc->outqueue.out_chunk_list,
-				 msg_len);
+	sctp_prsctp_prune_unsent(asoc, sinfo, msg_len);
 }
 
 /* Mark all the eligible packets on a transport for retransmission.  */
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index c062cea..c2ab864 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -82,8 +82,8 @@
 /* Forward declarations for internal helper functions. */
 static int sctp_writeable(struct sock *sk);
 static void sctp_wfree(struct sk_buff *skb);
-static int sctp_wait_for_sndbuf(struct sctp_association *, long *timeo_p,
-				size_t msg_len);
+static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
+				size_t msg_len, struct sock **orig_sk);
 static int sctp_wait_for_packet(struct sock *sk, int *err, long *timeo_p);
 static int sctp_wait_for_connect(struct sctp_association *, long *timeo_p);
 static int sctp_wait_for_accept(struct sock *sk, long timeo);
@@ -1957,9 +1957,16 @@ static int sctp_sendmsg(struct sock *sk, struct msghdr *msg, size_t msg_len)
 
 	timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
 	if (!sctp_wspace(asoc)) {
-		err = sctp_wait_for_sndbuf(asoc, &timeo, msg_len);
-		if (err)
+		/* sk can be changed by peel off when waiting for buf. */
+		err = sctp_wait_for_sndbuf(asoc, &timeo, msg_len, &sk);
+		if (err) {
+			if (err == -ESRCH) {
+				/* asoc is already dead. */
+				new_asoc = NULL;
+				err = -EPIPE;
+			}
 			goto out_free;
+		}
 	}
 
 	/* If an address is passed with the sendto/sendmsg call, it is used
@@ -4771,12 +4778,6 @@ int sctp_do_peeloff(struct sock *sk, sctp_assoc_t id, struct socket **sockp)
 	if (!asoc)
 		return -EINVAL;
 
-	/* If there is a thread waiting on more sndbuf space for
-	 * sending on this asoc, it cannot be peeled.
-	 */
-	if (waitqueue_active(&asoc->wait))
-		return -EBUSY;
-
 	/* An association cannot be branched off from an already peeled-off
 	 * socket, nor is this supported for tcp style sockets.
 	 */
@@ -7440,7 +7441,7 @@ void sctp_sock_rfree(struct sk_buff *skb)
 
 /* Helper function to wait for space in the sndbuf.  */
 static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
-				size_t msg_len)
+				size_t msg_len, struct sock **orig_sk)
 {
 	struct sock *sk = asoc->base.sk;
 	int err = 0;
@@ -7457,10 +7458,11 @@ static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
 	for (;;) {
 		prepare_to_wait_exclusive(&asoc->wait, &wait,
 					  TASK_INTERRUPTIBLE);
+		if (asoc->base.dead)
+			goto do_dead;
 		if (!*timeo_p)
 			goto do_nonblock;
-		if (sk->sk_err || asoc->state >= SCTP_STATE_SHUTDOWN_PENDING ||
-		    asoc->base.dead)
+		if (sk->sk_err || asoc->state >= SCTP_STATE_SHUTDOWN_PENDING)
 			goto do_error;
 		if (signal_pending(current))
 			goto do_interrupted;
@@ -7473,11 +7475,17 @@ static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
 		release_sock(sk);
 		current_timeo = schedule_timeout(current_timeo);
 		lock_sock(sk);
+		if (sk != asoc->base.sk) {
+			release_sock(sk);
+			sk = asoc->base.sk;
+			lock_sock(sk);
+		}
 
 		*timeo_p = current_timeo;
 	}
 
 out:
+	*orig_sk = sk;
 	finish_wait(&asoc->wait, &wait);
 
 	/* Release the association's refcnt.  */
@@ -7485,6 +7493,10 @@ static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
 
 	return err;
 
+do_dead:
+	err = -ESRCH;
+	goto out;
+
 do_error:
 	err = -EPIPE;
 	goto out;
diff --git a/net/socket.c b/net/socket.c
index a4fb472..fc0b609 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -1720,6 +1720,7 @@ SYSCALL_DEFINE6(recvfrom, int, fd, void __user *, ubuf, size_t, size,
 	/* We assume all kernel code knows the size of sockaddr_storage */
 	msg.msg_namelen = 0;
 	msg.msg_iocb = NULL;
+	msg.msg_flags = 0;
 	if (sock->file->f_flags & O_NONBLOCK)
 		flags |= MSG_DONTWAIT;
 	err = sock_recvmsg(sock, &msg, flags);
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index 5db68b3..600eacc 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -274,10 +274,9 @@ static inline void rpc_task_set_debuginfo(struct rpc_task *task)
 
 static void rpc_set_active(struct rpc_task *task)
 {
-	trace_rpc_task_begin(task->tk_client, task, NULL);
-
 	rpc_task_set_debuginfo(task);
 	set_bit(RPC_TASK_ACTIVE, &task->tk_runstate);
+	trace_rpc_task_begin(task->tk_client, task, NULL);
 }
 
 /*
diff --git a/net/tipc/server.c b/net/tipc/server.c
index f89c0c2..f4c1b18 100644
--- a/net/tipc/server.c
+++ b/net/tipc/server.c
@@ -86,7 +86,6 @@ struct outqueue_entry {
 static void tipc_recv_work(struct work_struct *work);
 static void tipc_send_work(struct work_struct *work);
 static void tipc_clean_outqueues(struct tipc_conn *con);
-static void tipc_sock_release(struct tipc_conn *con);
 
 static void tipc_conn_kref_release(struct kref *kref)
 {
@@ -104,7 +103,6 @@ static void tipc_conn_kref_release(struct kref *kref)
 		}
 		saddr->scope = -TIPC_NODE_SCOPE;
 		kernel_bind(sock, (struct sockaddr *)saddr, sizeof(*saddr));
-		tipc_sock_release(con);
 		sock_release(sock);
 		con->sock = NULL;
 
@@ -194,19 +192,15 @@ static void tipc_unregister_callbacks(struct tipc_conn *con)
 	write_unlock_bh(&sk->sk_callback_lock);
 }
 
-static void tipc_sock_release(struct tipc_conn *con)
+static void tipc_close_conn(struct tipc_conn *con)
 {
 	struct tipc_server *s = con->server;
 
-	if (con->conid)
-		s->tipc_conn_release(con->conid, con->usr_data);
-
-	tipc_unregister_callbacks(con);
-}
-
-static void tipc_close_conn(struct tipc_conn *con)
-{
 	if (test_and_clear_bit(CF_CONNECTED, &con->flags)) {
+		tipc_unregister_callbacks(con);
+
+		if (con->conid)
+			s->tipc_conn_release(con->conid, con->usr_data);
 
 		/* We shouldn't flush pending works as we may be in the
 		 * thread. In fact the races with pending rx/tx work structs
@@ -319,6 +313,7 @@ static int tipc_accept_from_sock(struct tipc_conn *con)
 	newcon->usr_data = s->tipc_conn_new(newcon->conid);
 	if (!newcon->usr_data) {
 		sock_release(newsock);
+		conn_put(newcon);
 		return -ENOMEM;
 	}
 
@@ -625,14 +620,12 @@ int tipc_server_start(struct tipc_server *s)
 void tipc_server_stop(struct tipc_server *s)
 {
 	struct tipc_conn *con;
-	int total = 0;
 	int id;
 
 	spin_lock_bh(&s->idr_lock);
-	for (id = 0; total < s->idr_in_use; id++) {
+	for (id = 0; s->idr_in_use; id++) {
 		con = idr_find(&s->conn_idr, id);
 		if (con) {
-			total++;
 			spin_unlock_bh(&s->idr_lock);
 			tipc_close_conn(con);
 			spin_lock_bh(&s->idr_lock);
diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
index 9d94e65..271cd66 100644
--- a/net/tipc/subscr.c
+++ b/net/tipc/subscr.c
@@ -141,6 +141,11 @@ void tipc_subscrp_report_overlap(struct tipc_subscription *sub, u32 found_lower,
 static void tipc_subscrp_timeout(unsigned long data)
 {
 	struct tipc_subscription *sub = (struct tipc_subscription *)data;
+	struct tipc_subscriber *subscriber = sub->subscriber;
+
+	spin_lock_bh(&subscriber->lock);
+	tipc_nametbl_unsubscribe(sub);
+	spin_unlock_bh(&subscriber->lock);
 
 	/* Notify subscriber of timeout */
 	tipc_subscrp_send_event(sub, sub->evt.s.seq.lower, sub->evt.s.seq.upper,
@@ -173,7 +178,6 @@ static void tipc_subscrp_kref_release(struct kref *kref)
 	struct tipc_subscriber *subscriber = sub->subscriber;
 
 	spin_lock_bh(&subscriber->lock);
-	tipc_nametbl_unsubscribe(sub);
 	list_del(&sub->subscrp_list);
 	atomic_dec(&tn->subscription_count);
 	spin_unlock_bh(&subscriber->lock);
@@ -205,6 +209,7 @@ static void tipc_subscrb_subscrp_delete(struct tipc_subscriber *subscriber,
 		if (s && memcmp(s, &sub->evt.s, sizeof(struct tipc_subscr)))
 			continue;
 
+		tipc_nametbl_unsubscribe(sub);
 		tipc_subscrp_get(sub);
 		spin_unlock_bh(&subscriber->lock);
 		tipc_subscrp_delete(sub);
diff --git a/net/tipc/udp_media.c b/net/tipc/udp_media.c
index b58dc95..107375d 100644
--- a/net/tipc/udp_media.c
+++ b/net/tipc/udp_media.c
@@ -371,10 +371,6 @@ static int tipc_udp_recv(struct sock *sk, struct sk_buff *skb)
 			goto rcu_out;
 	}
 
-	tipc_rcv(sock_net(sk), skb, b);
-	rcu_read_unlock();
-	return 0;
-
 rcu_out:
 	rcu_read_unlock();
 out:
diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
index 8a398b3..ee12e17 100644
--- a/net/vmw_vsock/af_vsock.c
+++ b/net/vmw_vsock/af_vsock.c
@@ -1101,10 +1101,19 @@ static const struct proto_ops vsock_dgram_ops = {
 	.sendpage = sock_no_sendpage,
 };
 
+static int vsock_transport_cancel_pkt(struct vsock_sock *vsk)
+{
+	if (!transport->cancel_pkt)
+		return -EOPNOTSUPP;
+
+	return transport->cancel_pkt(vsk);
+}
+
 static void vsock_connect_timeout(struct work_struct *work)
 {
 	struct sock *sk;
 	struct vsock_sock *vsk;
+	int cancel = 0;
 
 	vsk = container_of(work, struct vsock_sock, dwork.work);
 	sk = sk_vsock(vsk);
@@ -1115,8 +1124,11 @@ static void vsock_connect_timeout(struct work_struct *work)
 		sk->sk_state = SS_UNCONNECTED;
 		sk->sk_err = ETIMEDOUT;
 		sk->sk_error_report(sk);
+		cancel = 1;
 	}
 	release_sock(sk);
+	if (cancel)
+		vsock_transport_cancel_pkt(vsk);
 
 	sock_put(sk);
 }
@@ -1223,11 +1235,13 @@ static int vsock_stream_connect(struct socket *sock, struct sockaddr *addr,
 			err = sock_intr_errno(timeout);
 			sk->sk_state = SS_UNCONNECTED;
 			sock->state = SS_UNCONNECTED;
+			vsock_transport_cancel_pkt(vsk);
 			goto out_wait;
 		} else if (timeout == 0) {
 			err = -ETIMEDOUT;
 			sk->sk_state = SS_UNCONNECTED;
 			sock->state = SS_UNCONNECTED;
+			vsock_transport_cancel_pkt(vsk);
 			goto out_wait;
 		}
 
@@ -1524,8 +1538,7 @@ static int vsock_stream_sendmsg(struct socket *sock, struct msghdr *msg,
 	long timeout;
 	int err;
 	struct vsock_transport_send_notify_data send_data;
-
-	DEFINE_WAIT(wait);
+	DEFINE_WAIT_FUNC(wait, woken_wake_function);
 
 	sk = sock->sk;
 	vsk = vsock_sk(sk);
@@ -1568,11 +1581,10 @@ static int vsock_stream_sendmsg(struct socket *sock, struct msghdr *msg,
 	if (err < 0)
 		goto out;
 
-
 	while (total_written < len) {
 		ssize_t written;
 
-		prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
+		add_wait_queue(sk_sleep(sk), &wait);
 		while (vsock_stream_has_space(vsk) == 0 &&
 		       sk->sk_err == 0 &&
 		       !(sk->sk_shutdown & SEND_SHUTDOWN) &&
@@ -1581,33 +1593,30 @@ static int vsock_stream_sendmsg(struct socket *sock, struct msghdr *msg,
 			/* Don't wait for non-blocking sockets. */
 			if (timeout == 0) {
 				err = -EAGAIN;
-				finish_wait(sk_sleep(sk), &wait);
+				remove_wait_queue(sk_sleep(sk), &wait);
 				goto out_err;
 			}
 
 			err = transport->notify_send_pre_block(vsk, &send_data);
 			if (err < 0) {
-				finish_wait(sk_sleep(sk), &wait);
+				remove_wait_queue(sk_sleep(sk), &wait);
 				goto out_err;
 			}
 
 			release_sock(sk);
-			timeout = schedule_timeout(timeout);
+			timeout = wait_woken(&wait, TASK_INTERRUPTIBLE, timeout);
 			lock_sock(sk);
 			if (signal_pending(current)) {
 				err = sock_intr_errno(timeout);
-				finish_wait(sk_sleep(sk), &wait);
+				remove_wait_queue(sk_sleep(sk), &wait);
 				goto out_err;
 			} else if (timeout == 0) {
 				err = -EAGAIN;
-				finish_wait(sk_sleep(sk), &wait);
+				remove_wait_queue(sk_sleep(sk), &wait);
 				goto out_err;
 			}
-
-			prepare_to_wait(sk_sleep(sk), &wait,
-					TASK_INTERRUPTIBLE);
 		}
-		finish_wait(sk_sleep(sk), &wait);
+		remove_wait_queue(sk_sleep(sk), &wait);
 
 		/* These checks occur both as part of and after the loop
 		 * conditional since we need to check before and after
diff --git a/net/vmw_vsock/virtio_transport_common.c b/net/vmw_vsock/virtio_transport_common.c
index 62c056e..9c07c76 100644
--- a/net/vmw_vsock/virtio_transport_common.c
+++ b/net/vmw_vsock/virtio_transport_common.c
@@ -57,6 +57,7 @@ virtio_transport_alloc_pkt(struct virtio_vsock_pkt_info *info,
 	pkt->len		= len;
 	pkt->hdr.len		= cpu_to_le32(len);
 	pkt->reply		= info->reply;
+	pkt->vsk		= info->vsk;
 
 	if (info->msg && len > 0) {
 		pkt->buf = kmalloc(len, GFP_KERNEL);
@@ -180,6 +181,7 @@ static int virtio_transport_send_credit_update(struct vsock_sock *vsk,
 	struct virtio_vsock_pkt_info info = {
 		.op = VIRTIO_VSOCK_OP_CREDIT_UPDATE,
 		.type = type,
+		.vsk = vsk,
 	};
 
 	return virtio_transport_send_pkt_info(vsk, &info);
@@ -519,6 +521,7 @@ int virtio_transport_connect(struct vsock_sock *vsk)
 	struct virtio_vsock_pkt_info info = {
 		.op = VIRTIO_VSOCK_OP_REQUEST,
 		.type = VIRTIO_VSOCK_TYPE_STREAM,
+		.vsk = vsk,
 	};
 
 	return virtio_transport_send_pkt_info(vsk, &info);
@@ -534,6 +537,7 @@ int virtio_transport_shutdown(struct vsock_sock *vsk, int mode)
 			  VIRTIO_VSOCK_SHUTDOWN_RCV : 0) |
 			 (mode & SEND_SHUTDOWN ?
 			  VIRTIO_VSOCK_SHUTDOWN_SEND : 0),
+		.vsk = vsk,
 	};
 
 	return virtio_transport_send_pkt_info(vsk, &info);
@@ -560,6 +564,7 @@ virtio_transport_stream_enqueue(struct vsock_sock *vsk,
 		.type = VIRTIO_VSOCK_TYPE_STREAM,
 		.msg = msg,
 		.pkt_len = len,
+		.vsk = vsk,
 	};
 
 	return virtio_transport_send_pkt_info(vsk, &info);
@@ -581,6 +586,7 @@ static int virtio_transport_reset(struct vsock_sock *vsk,
 		.op = VIRTIO_VSOCK_OP_RST,
 		.type = VIRTIO_VSOCK_TYPE_STREAM,
 		.reply = !!pkt,
+		.vsk = vsk,
 	};
 
 	/* Send RST only if the original pkt is not a RST pkt */
@@ -826,6 +832,7 @@ virtio_transport_send_response(struct vsock_sock *vsk,
 		.remote_cid = le64_to_cpu(pkt->hdr.src_cid),
 		.remote_port = le32_to_cpu(pkt->hdr.src_port),
 		.reply = true,
+		.vsk = vsk,
 	};
 
 	return virtio_transport_send_pkt_info(vsk, &info);
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 77fbfbd..178acf9 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -1393,6 +1393,7 @@ static struct xfrm_policy *clone_policy(const struct xfrm_policy *old, int dir)
 		newp->xfrm_nr = old->xfrm_nr;
 		newp->index = old->index;
 		newp->type = old->type;
+		newp->family = old->family;
 		memcpy(newp->xfrm_vec, old->xfrm_vec,
 		       newp->xfrm_nr*sizeof(struct xfrm_tmpl));
 		spin_lock_bh(&net->xfrm.xfrm_policy_lock);
diff --git a/scripts/coccicheck b/scripts/coccicheck
index ec487b8..c36b04b 100755
--- a/scripts/coccicheck
+++ b/scripts/coccicheck
@@ -29,12 +29,6 @@
 	VERBOSE=0
 fi
 
-if [ -z "$J" ]; then
-	NPROC=$(getconf _NPROCESSORS_ONLN)
-else
-	NPROC="$J"
-fi
-
 FLAGS="--very-quiet"
 
 # You can use SPFLAGS to append extra arguments to coccicheck or override any
@@ -69,6 +63,9 @@
     # Take only the last argument, which is the C file to test
     shift $(( $# - 1 ))
     OPTIONS="$COCCIINCLUDE $1"
+
+    # No need to parallelize Coccinelle since this mode takes one input file.
+    NPROC=1
 else
     ONLINE=0
     if [ "$KBUILD_EXTMOD" = "" ] ; then
@@ -76,6 +73,12 @@
     else
         OPTIONS="--dir $KBUILD_EXTMOD $COCCIINCLUDE"
     fi
+
+    if [ -z "$J" ]; then
+        NPROC=$(getconf _NPROCESSORS_ONLN)
+    else
+        NPROC="$J"
+    fi
 fi
 
 if [ "$KBUILD_EXTMOD" != "" ] ; then
diff --git a/scripts/module-common.lds b/scripts/module-common.lds
index 73a2c7d..53234e8 100644
--- a/scripts/module-common.lds
+++ b/scripts/module-common.lds
@@ -19,4 +19,6 @@
 
 	. = ALIGN(8);
 	.init_array		0 : { *(SORT(.init_array.*)) *(.init_array) }
+
+	__jump_table		0 : ALIGN(8) { KEEP(*(__jump_table)) }
 }
diff --git a/scripts/package/Makefile b/scripts/package/Makefile
index 71b4a8a..7badec3 100644
--- a/scripts/package/Makefile
+++ b/scripts/package/Makefile
@@ -39,10 +39,9 @@
 	false; \
 fi ; \
 $(srctree)/scripts/setlocalversion --save-scmversion; \
-ln -sf $(srctree) $(2); \
 tar -cz $(RCS_TAR_IGNORE) -f $(2).tar.gz \
-	$(addprefix $(2)/,$(TAR_CONTENT) $(3)); \
-rm -f $(2) $(objtree)/.scmversion
+	--transform 's:^:$(2)/:S' $(TAR_CONTENT) $(3); \
+rm -f $(objtree)/.scmversion
 
 # rpm-pkg
 # ---------------------------------------------------------------------------
diff --git a/security/integrity/ima/ima_main.c b/security/integrity/ima/ima_main.c
index 0e87629..2b3def1 100644
--- a/security/integrity/ima/ima_main.c
+++ b/security/integrity/ima/ima_main.c
@@ -51,6 +51,8 @@ static int __init hash_setup(char *str)
 			ima_hash_algo = HASH_ALGO_SHA1;
 		else if (strncmp(str, "md5", 3) == 0)
 			ima_hash_algo = HASH_ALGO_MD5;
+		else
+			return 1;
 		goto out;
 	}
 
@@ -60,6 +62,8 @@ static int __init hash_setup(char *str)
 			break;
 		}
 	}
+	if (i == HASH_ALGO__LAST)
+		return 1;
 out:
 	hash_setup_done = 1;
 	return 1;
diff --git a/security/keys/request_key.c b/security/keys/request_key.c
index 5030fcf..cb7f8f7 100644
--- a/security/keys/request_key.c
+++ b/security/keys/request_key.c
@@ -250,11 +250,12 @@ static int construct_key(struct key *key, const void *callout_info,
  * The keyring selected is returned with an extra reference upon it which the
  * caller must release.
  */
-static void construct_get_dest_keyring(struct key **_dest_keyring)
+static int construct_get_dest_keyring(struct key **_dest_keyring)
 {
 	struct request_key_auth *rka;
 	const struct cred *cred = current_cred();
 	struct key *dest_keyring = *_dest_keyring, *authkey;
+	int ret;
 
 	kenter("%p", dest_keyring);
 
@@ -263,6 +264,8 @@ static void construct_get_dest_keyring(struct key **_dest_keyring)
 		/* the caller supplied one */
 		key_get(dest_keyring);
 	} else {
+		bool do_perm_check = true;
+
 		/* use a default keyring; falling through the cases until we
 		 * find one that we actually have */
 		switch (cred->jit_keyring) {
@@ -277,8 +280,10 @@ static void construct_get_dest_keyring(struct key **_dest_keyring)
 					dest_keyring =
 						key_get(rka->dest_keyring);
 				up_read(&authkey->sem);
-				if (dest_keyring)
+				if (dest_keyring) {
+					do_perm_check = false;
 					break;
+				}
 			}
 
 		case KEY_REQKEY_DEFL_THREAD_KEYRING:
@@ -313,11 +318,29 @@ static void construct_get_dest_keyring(struct key **_dest_keyring)
 		default:
 			BUG();
 		}
+
+		/*
+		 * Require Write permission on the keyring.  This is essential
+		 * because the default keyring may be the session keyring, and
+		 * joining a keyring only requires Search permission.
+		 *
+		 * However, this check is skipped for the "requestor keyring" so
+		 * that /sbin/request-key can itself use request_key() to add
+		 * keys to the original requestor's destination keyring.
+		 */
+		if (dest_keyring && do_perm_check) {
+			ret = key_permission(make_key_ref(dest_keyring, 1),
+					     KEY_NEED_WRITE);
+			if (ret) {
+				key_put(dest_keyring);
+				return ret;
+			}
+		}
 	}
 
 	*_dest_keyring = dest_keyring;
 	kleave(" [dk %d]", key_serial(dest_keyring));
-	return;
+	return 0;
 }
 
 /*
@@ -443,11 +466,15 @@ static struct key *construct_key_and_link(struct keyring_search_context *ctx,
 	if (ctx->index_key.type == &key_type_keyring)
 		return ERR_PTR(-EPERM);
 
-	user = key_user_lookup(current_fsuid());
-	if (!user)
-		return ERR_PTR(-ENOMEM);
+	ret = construct_get_dest_keyring(&dest_keyring);
+	if (ret)
+		goto error;
 
-	construct_get_dest_keyring(&dest_keyring);
+	user = key_user_lookup(current_fsuid());
+	if (!user) {
+		ret = -ENOMEM;
+		goto error_put_dest_keyring;
+	}
 
 	ret = construct_alloc_key(ctx, dest_keyring, flags, user, &key);
 	key_user_put(user);
@@ -462,7 +489,7 @@ static struct key *construct_key_and_link(struct keyring_search_context *ctx,
 	} else if (ret == -EINPROGRESS) {
 		ret = 0;
 	} else {
-		goto couldnt_alloc_key;
+		goto error_put_dest_keyring;
 	}
 
 	key_put(dest_keyring);
@@ -472,8 +499,9 @@ static struct key *construct_key_and_link(struct keyring_search_context *ctx,
 construction_failed:
 	key_negate_and_link(key, key_negative_timeout, NULL, NULL);
 	key_put(key);
-couldnt_alloc_key:
+error_put_dest_keyring:
 	key_put(dest_keyring);
+error:
 	kleave(" = %d", ret);
 	return ERR_PTR(ret);
 }
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index e26ecb0..e53e076 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -98,7 +98,7 @@
 static atomic_t selinux_secmark_refcount = ATOMIC_INIT(0);
 
 #ifdef CONFIG_SECURITY_SELINUX_DEVELOP
-int selinux_enforcing;
+int selinux_enforcing __aligned(0x1000) __attribute__((section(".bss_rtic")));
 
 static int __init enforcing_setup(char *str)
 {
diff --git a/sound/core/pcm.c b/sound/core/pcm.c
index 4fc68b1..48f6aee 100644
--- a/sound/core/pcm.c
+++ b/sound/core/pcm.c
@@ -149,7 +149,9 @@ static int snd_pcm_control_ioctl(struct snd_card *card,
 				err = -ENXIO;
 				goto _error;
 			}
+			mutex_lock(&pcm->open_mutex);
 			err = snd_pcm_info_user(substream, info);
+			mutex_unlock(&pcm->open_mutex);
 		_error:
 			mutex_unlock(&register_mutex);
 			return err;
diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c
index f8d0bd8..2dfe772 100644
--- a/sound/core/pcm_lib.c
+++ b/sound/core/pcm_lib.c
@@ -267,8 +267,10 @@ static void update_audio_tstamp(struct snd_pcm_substream *substream,
 				runtime->rate);
 		*audio_tstamp = ns_to_timespec(audio_nsecs);
 	}
-	runtime->status->audio_tstamp = *audio_tstamp;
-	runtime->status->tstamp = *curr_tstamp;
+	if (!timespec_equal(&runtime->status->audio_tstamp, audio_tstamp)) {
+		runtime->status->audio_tstamp = *audio_tstamp;
+		runtime->status->tstamp = *curr_tstamp;
+	}
 
 	/*
 	 * re-take a driver timestamp to let apps detect if the reference tstamp
diff --git a/sound/core/rawmidi.c b/sound/core/rawmidi.c
index b450a27..16f8124 100644
--- a/sound/core/rawmidi.c
+++ b/sound/core/rawmidi.c
@@ -579,15 +579,14 @@ static int snd_rawmidi_info_user(struct snd_rawmidi_substream *substream,
 	return 0;
 }
 
-int snd_rawmidi_info_select(struct snd_card *card, struct snd_rawmidi_info *info)
+static int __snd_rawmidi_info_select(struct snd_card *card,
+				     struct snd_rawmidi_info *info)
 {
 	struct snd_rawmidi *rmidi;
 	struct snd_rawmidi_str *pstr;
 	struct snd_rawmidi_substream *substream;
 
-	mutex_lock(&register_mutex);
 	rmidi = snd_rawmidi_search(card, info->device);
-	mutex_unlock(&register_mutex);
 	if (!rmidi)
 		return -ENXIO;
 	if (info->stream < 0 || info->stream > 1)
@@ -603,6 +602,16 @@ int snd_rawmidi_info_select(struct snd_card *card, struct snd_rawmidi_info *info
 	}
 	return -ENXIO;
 }
+
+int snd_rawmidi_info_select(struct snd_card *card, struct snd_rawmidi_info *info)
+{
+	int ret;
+
+	mutex_lock(&register_mutex);
+	ret = __snd_rawmidi_info_select(card, info);
+	mutex_unlock(&register_mutex);
+	return ret;
+}
 EXPORT_SYMBOL(snd_rawmidi_info_select);
 
 static int snd_rawmidi_info_select_user(struct snd_card *card,
diff --git a/sound/core/seq/seq_timer.c b/sound/core/seq/seq_timer.c
index 37d9cfb..b80985f 100644
--- a/sound/core/seq/seq_timer.c
+++ b/sound/core/seq/seq_timer.c
@@ -355,7 +355,7 @@ static int initialize_timer(struct snd_seq_timer *tmr)
 	unsigned long freq;
 
 	t = tmr->timeri->timer;
-	if (snd_BUG_ON(!t))
+	if (!t)
 		return -EINVAL;
 
 	freq = tmr->preferred_resolution;
diff --git a/sound/core/timer_compat.c b/sound/core/timer_compat.c
index 59127b6..e00f7e3 100644
--- a/sound/core/timer_compat.c
+++ b/sound/core/timer_compat.c
@@ -66,11 +66,11 @@ static int snd_timer_user_info_compat(struct file *file,
 	struct snd_timer *t;
 
 	tu = file->private_data;
-	if (snd_BUG_ON(!tu->timeri))
-		return -ENXIO;
+	if (!tu->timeri)
+		return -EBADFD;
 	t = tu->timeri->timer;
-	if (snd_BUG_ON(!t))
-		return -ENXIO;
+	if (!t)
+		return -EBADFD;
 	memset(&info, 0, sizeof(info));
 	info.card = t->card ? t->card->number : -1;
 	if (t->hw.flags & SNDRV_TIMER_HW_SLAVE)
@@ -99,8 +99,8 @@ static int snd_timer_user_status_compat(struct file *file,
 	struct snd_timer_status32 status;
 	
 	tu = file->private_data;
-	if (snd_BUG_ON(!tu->timeri))
-		return -ENXIO;
+	if (!tu->timeri)
+		return -EBADFD;
 	memset(&status, 0, sizeof(status));
 	status.tstamp.tv_sec = tu->tstamp.tv_sec;
 	status.tstamp.tv_nsec = tu->tstamp.tv_nsec;
diff --git a/sound/hda/hdmi_chmap.c b/sound/hda/hdmi_chmap.c
index 81acc20..f21633c 100644
--- a/sound/hda/hdmi_chmap.c
+++ b/sound/hda/hdmi_chmap.c
@@ -746,7 +746,7 @@ static int hdmi_chmap_ctl_get(struct snd_kcontrol *kcontrol,
 	memset(pcm_chmap, 0, sizeof(pcm_chmap));
 	chmap->ops.get_chmap(chmap->hdac, pcm_idx, pcm_chmap);
 
-	for (i = 0; i < sizeof(chmap); i++)
+	for (i = 0; i < ARRAY_SIZE(pcm_chmap); i++)
 		ucontrol->value.integer.value[i] = pcm_chmap[i];
 
 	return 0;
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 5cb7e04..293f3f2 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -2305,6 +2305,9 @@ static const struct pci_device_id azx_ids[] = {
 	/* AMD Hudson */
 	{ PCI_DEVICE(0x1022, 0x780d),
 	  .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_SB },
+	/* AMD Raven */
+	{ PCI_DEVICE(0x1022, 0x15e3),
+	  .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_SB },
 	/* ATI HDMI */
 	{ PCI_DEVICE(0x1002, 0x0002),
 	  .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
index f2e4e99..2c3065c 100644
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -261,6 +261,7 @@ enum {
 	CXT_FIXUP_HP_530,
 	CXT_FIXUP_CAP_MIX_AMP_5047,
 	CXT_FIXUP_MUTE_LED_EAPD,
+	CXT_FIXUP_HP_DOCK,
 	CXT_FIXUP_HP_SPECTRE,
 	CXT_FIXUP_HP_GATE_MIC,
 };
@@ -778,6 +779,14 @@ static const struct hda_fixup cxt_fixups[] = {
 		.type = HDA_FIXUP_FUNC,
 		.v.func = cxt_fixup_mute_led_eapd,
 	},
+	[CXT_FIXUP_HP_DOCK] = {
+		.type = HDA_FIXUP_PINS,
+		.v.pins = (const struct hda_pintbl[]) {
+			{ 0x16, 0x21011020 }, /* line-out */
+			{ 0x18, 0x2181103f }, /* line-in */
+			{ }
+		}
+	},
 	[CXT_FIXUP_HP_SPECTRE] = {
 		.type = HDA_FIXUP_PINS,
 		.v.pins = (const struct hda_pintbl[]) {
@@ -839,6 +848,7 @@ static const struct snd_pci_quirk cxt5066_fixups[] = {
 	SND_PCI_QUIRK(0x1025, 0x0543, "Acer Aspire One 522", CXT_FIXUP_STEREO_DMIC),
 	SND_PCI_QUIRK(0x1025, 0x054c, "Acer Aspire 3830TG", CXT_FIXUP_ASPIRE_DMIC),
 	SND_PCI_QUIRK(0x1025, 0x054f, "Acer Aspire 4830T", CXT_FIXUP_ASPIRE_DMIC),
+	SND_PCI_QUIRK(0x103c, 0x8079, "HP EliteBook 840 G3", CXT_FIXUP_HP_DOCK),
 	SND_PCI_QUIRK(0x103c, 0x8174, "HP Spectre x360", CXT_FIXUP_HP_SPECTRE),
 	SND_PCI_QUIRK(0x103c, 0x8115, "HP Z1 Gen3", CXT_FIXUP_HP_GATE_MIC),
 	SND_PCI_QUIRK(0x1043, 0x138d, "Asus", CXT_FIXUP_HEADPHONE_MIC_PIN),
@@ -872,6 +882,7 @@ static const struct hda_model_fixup cxt5066_fixup_models[] = {
 	{ .id = CXT_PINCFG_LEMOTE_A1205, .name = "lemote-a1205" },
 	{ .id = CXT_FIXUP_OLPC_XO, .name = "olpc-xo" },
 	{ .id = CXT_FIXUP_MUTE_LED_EAPD, .name = "mute-led-eapd" },
+	{ .id = CXT_FIXUP_HP_DOCK, .name = "hp-dock" },
 	{}
 };
 
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 80c40a1..ba40596 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -4419,7 +4419,7 @@ static void alc_no_shutup(struct hda_codec *codec)
 static void alc_fixup_no_shutup(struct hda_codec *codec,
 				const struct hda_fixup *fix, int action)
 {
-	if (action == HDA_FIXUP_ACT_PRE_PROBE) {
+	if (action == HDA_FIXUP_ACT_PROBE) {
 		struct alc_spec *spec = codec->spec;
 		spec->shutup = alc_no_shutup;
 	}
@@ -4854,6 +4854,7 @@ enum {
 	ALC286_FIXUP_HP_GPIO_LED,
 	ALC280_FIXUP_HP_GPIO2_MIC_HOTKEY,
 	ALC280_FIXUP_HP_DOCK_PINS,
+	ALC269_FIXUP_HP_DOCK_GPIO_MIC1_LED,
 	ALC280_FIXUP_HP_9480M,
 	ALC288_FIXUP_DELL_HEADSET_MODE,
 	ALC288_FIXUP_DELL1_MIC_NO_PRESENCE,
@@ -5394,6 +5395,16 @@ static const struct hda_fixup alc269_fixups[] = {
 		.chained = true,
 		.chain_id = ALC280_FIXUP_HP_GPIO4
 	},
+	[ALC269_FIXUP_HP_DOCK_GPIO_MIC1_LED] = {
+		.type = HDA_FIXUP_PINS,
+		.v.pins = (const struct hda_pintbl[]) {
+			{ 0x1b, 0x21011020 }, /* line-out */
+			{ 0x18, 0x2181103f }, /* line-in */
+			{ },
+		},
+		.chained = true,
+		.chain_id = ALC269_FIXUP_HP_GPIO_MIC1_LED
+	},
 	[ALC280_FIXUP_HP_9480M] = {
 		.type = HDA_FIXUP_FUNC,
 		.v.func = alc280_fixup_hp_9480m,
@@ -5646,7 +5657,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
 	SND_PCI_QUIRK(0x103c, 0x2256, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
 	SND_PCI_QUIRK(0x103c, 0x2257, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
 	SND_PCI_QUIRK(0x103c, 0x2259, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
-	SND_PCI_QUIRK(0x103c, 0x225a, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
+	SND_PCI_QUIRK(0x103c, 0x225a, "HP", ALC269_FIXUP_HP_DOCK_GPIO_MIC1_LED),
 	SND_PCI_QUIRK(0x103c, 0x2260, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
 	SND_PCI_QUIRK(0x103c, 0x2263, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
 	SND_PCI_QUIRK(0x103c, 0x2264, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
@@ -5812,6 +5823,7 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
 	{.id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC, .name = "headset-mode-no-hp-mic"},
 	{.id = ALC269_FIXUP_LENOVO_DOCK, .name = "lenovo-dock"},
 	{.id = ALC269_FIXUP_HP_GPIO_LED, .name = "hp-gpio-led"},
+	{.id = ALC269_FIXUP_HP_DOCK_GPIO_MIC1_LED, .name = "hp-dock-gpio-mic1-led"},
 	{.id = ALC269_FIXUP_DELL1_MIC_NO_PRESENCE, .name = "dell-headset-multi"},
 	{.id = ALC269_FIXUP_DELL2_MIC_NO_PRESENCE, .name = "dell-headset-dock"},
 	{.id = ALC283_FIXUP_CHROME_BOOK, .name = "alc283-dac-wcaps"},
@@ -6272,7 +6284,7 @@ static int patch_alc269(struct hda_codec *codec)
 	case 0x10ec0703:
 		spec->codec_variant = ALC269_TYPE_ALC700;
 		spec->gen.mixer_nid = 0; /* ALC700 does not have any loopback mixer path */
-		alc_update_coef_idx(codec, 0x4a, 0, 1 << 15); /* Combo jack auto trigger control */
+		alc_update_coef_idx(codec, 0x4a, 1 << 15, 0); /* Combo jack auto trigger control */
 		break;
 
 	}
diff --git a/sound/soc/codecs/wm_adsp.c b/sound/soc/codecs/wm_adsp.c
index 3bdd819..757af79 100644
--- a/sound/soc/codecs/wm_adsp.c
+++ b/sound/soc/codecs/wm_adsp.c
@@ -1365,7 +1365,7 @@ static int wm_adsp_load(struct wm_adsp *dsp)
 	const struct wmfw_region *region;
 	const struct wm_adsp_region *mem;
 	const char *region_name;
-	char *file, *text;
+	char *file, *text = NULL;
 	struct wm_adsp_buf *buf;
 	unsigned int reg;
 	int regions = 0;
@@ -1526,10 +1526,21 @@ static int wm_adsp_load(struct wm_adsp *dsp)
 			 regions, le32_to_cpu(region->len), offset,
 			 region_name);
 
+		if ((pos + le32_to_cpu(region->len) + sizeof(*region)) >
+		    firmware->size) {
+			adsp_err(dsp,
+				 "%s.%d: %s region len %d bytes exceeds file length %zu\n",
+				 file, regions, region_name,
+				 le32_to_cpu(region->len), firmware->size);
+			ret = -EINVAL;
+			goto out_fw;
+		}
+
 		if (text) {
 			memcpy(text, region->data, le32_to_cpu(region->len));
 			adsp_info(dsp, "%s: %s\n", file, text);
 			kfree(text);
+			text = NULL;
 		}
 
 		if (reg) {
@@ -1574,6 +1585,7 @@ static int wm_adsp_load(struct wm_adsp *dsp)
 	regmap_async_complete(regmap);
 	wm_adsp_buf_free(&buf_list);
 	release_firmware(firmware);
+	kfree(text);
 out:
 	kfree(file);
 
@@ -2054,6 +2066,17 @@ static int wm_adsp_load_coeff(struct wm_adsp *dsp)
 		}
 
 		if (reg) {
+			if ((pos + le32_to_cpu(blk->len) + sizeof(*blk)) >
+			    firmware->size) {
+				adsp_err(dsp,
+					 "%s.%d: %s region len %d bytes exceeds file length %zu\n",
+					 file, blocks, region_name,
+					 le32_to_cpu(blk->len),
+					 firmware->size);
+				ret = -EINVAL;
+				goto out_fw;
+			}
+
 			buf = wm_adsp_buf_alloc(blk->data,
 						le32_to_cpu(blk->len),
 						&buf_list);
diff --git a/sound/soc/img/img-parallel-out.c b/sound/soc/img/img-parallel-out.c
index c1610a0..3cf522d 100644
--- a/sound/soc/img/img-parallel-out.c
+++ b/sound/soc/img/img-parallel-out.c
@@ -166,9 +166,11 @@ static int img_prl_out_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
 		return -EINVAL;
 	}
 
+	pm_runtime_get_sync(prl->dev);
 	reg = img_prl_out_readl(prl, IMG_PRL_OUT_CTL);
 	reg = (reg & ~IMG_PRL_OUT_CTL_EDGE_MASK) | control_set;
 	img_prl_out_writel(prl, reg, IMG_PRL_OUT_CTL);
+	pm_runtime_put(prl->dev);
 
 	return 0;
 }
diff --git a/sound/soc/intel/skylake/skl-sst-utils.c b/sound/soc/intel/skylake/skl-sst-utils.c
index ea162fb..d5adc04 100644
--- a/sound/soc/intel/skylake/skl-sst-utils.c
+++ b/sound/soc/intel/skylake/skl-sst-utils.c
@@ -295,6 +295,7 @@ int snd_skl_parse_uuids(struct sst_dsp *ctx, const struct firmware *fw,
 	struct uuid_module *module;
 	struct firmware stripped_fw;
 	unsigned int safe_file;
+	int ret = 0;
 
 	/* Get the FW pointer to derive ADSP header */
 	stripped_fw.data = fw->data;
@@ -343,8 +344,10 @@ int snd_skl_parse_uuids(struct sst_dsp *ctx, const struct firmware *fw,
 
 	for (i = 0; i < num_entry; i++, mod_entry++) {
 		module = kzalloc(sizeof(*module), GFP_KERNEL);
-		if (!module)
-			return -ENOMEM;
+		if (!module) {
+			ret = -ENOMEM;
+			goto free_uuid_list;
+		}
 
 		uuid_bin = (uuid_le *)mod_entry->uuid.id;
 		memcpy(&module->uuid, uuid_bin, sizeof(module->uuid));
@@ -355,8 +358,8 @@ int snd_skl_parse_uuids(struct sst_dsp *ctx, const struct firmware *fw,
 		size = sizeof(int) * mod_entry->instance_max_count;
 		module->instance_id = devm_kzalloc(ctx->dev, size, GFP_KERNEL);
 		if (!module->instance_id) {
-			kfree(module);
-			return -ENOMEM;
+			ret = -ENOMEM;
+			goto free_uuid_list;
 		}
 
 		list_add_tail(&module->list, &skl->uuid_list);
@@ -367,6 +370,10 @@ int snd_skl_parse_uuids(struct sst_dsp *ctx, const struct firmware *fw,
 	}
 
 	return 0;
+
+free_uuid_list:
+	skl_freeup_uuid_list(skl);
+	return ret;
 }
 
 void skl_freeup_uuid_list(struct skl_sst *ctx)
diff --git a/sound/soc/sh/rcar/cmd.c b/sound/soc/sh/rcar/cmd.c
index abb5eaa..7d92a24 100644
--- a/sound/soc/sh/rcar/cmd.c
+++ b/sound/soc/sh/rcar/cmd.c
@@ -31,23 +31,24 @@ static int rsnd_cmd_init(struct rsnd_mod *mod,
 	struct rsnd_mod *mix = rsnd_io_to_mod_mix(io);
 	struct device *dev = rsnd_priv_to_dev(priv);
 	u32 data;
+	u32 path[] = {
+		[1] = 1 << 0,
+		[5] = 1 << 8,
+		[6] = 1 << 12,
+		[9] = 1 << 15,
+	};
 
 	if (!mix && !dvc)
 		return 0;
 
+	if (ARRAY_SIZE(path) < rsnd_mod_id(mod) + 1)
+		return -ENXIO;
+
 	if (mix) {
 		struct rsnd_dai *rdai;
 		struct rsnd_mod *src;
 		struct rsnd_dai_stream *tio;
 		int i;
-		u32 path[] = {
-			[0] = 0,
-			[1] = 1 << 0,
-			[2] = 0,
-			[3] = 0,
-			[4] = 0,
-			[5] = 1 << 8
-		};
 
 		/*
 		 * it is assuming that integrater is well understanding about
@@ -70,16 +71,19 @@ static int rsnd_cmd_init(struct rsnd_mod *mod,
 	} else {
 		struct rsnd_mod *src = rsnd_io_to_mod_src(io);
 
-		u32 path[] = {
-			[0] = 0x30000,
-			[1] = 0x30001,
-			[2] = 0x40000,
-			[3] = 0x10000,
-			[4] = 0x20000,
-			[5] = 0x40100
+		u8 cmd_case[] = {
+			[0] = 0x3,
+			[1] = 0x3,
+			[2] = 0x4,
+			[3] = 0x1,
+			[4] = 0x2,
+			[5] = 0x4,
+			[6] = 0x1,
+			[9] = 0x2,
 		};
 
-		data = path[rsnd_mod_id(src)];
+		data = path[rsnd_mod_id(src)] |
+			cmd_case[rsnd_mod_id(src)] << 16;
 	}
 
 	dev_dbg(dev, "ctu/mix path = 0x%08x", data);
diff --git a/sound/soc/sh/rcar/core.c b/sound/soc/sh/rcar/core.c
index f181410..91b444d 100644
--- a/sound/soc/sh/rcar/core.c
+++ b/sound/soc/sh/rcar/core.c
@@ -978,10 +978,8 @@ static int __rsnd_kctrl_new(struct rsnd_mod *mod,
 		return -ENOMEM;
 
 	ret = snd_ctl_add(card, kctrl);
-	if (ret < 0) {
-		snd_ctl_free_one(kctrl);
+	if (ret < 0)
 		return ret;
-	}
 
 	cfg->update = update;
 	cfg->card = card;
diff --git a/sound/soc/sh/rcar/dma.c b/sound/soc/sh/rcar/dma.c
index 6bc93cb..edeb74a 100644
--- a/sound/soc/sh/rcar/dma.c
+++ b/sound/soc/sh/rcar/dma.c
@@ -361,6 +361,20 @@ static u32 rsnd_dmapp_read(struct rsnd_dma *dma, u32 reg)
 	return ioread32(rsnd_dmapp_addr(dmac, dma, reg));
 }
 
+static void rsnd_dmapp_bset(struct rsnd_dma *dma, u32 data, u32 mask, u32 reg)
+{
+	struct rsnd_mod *mod = rsnd_mod_get(dma);
+	struct rsnd_priv *priv = rsnd_mod_to_priv(mod);
+	struct rsnd_dma_ctrl *dmac = rsnd_priv_to_dmac(priv);
+	volatile void __iomem *addr = rsnd_dmapp_addr(dmac, dma, reg);
+	u32 val = ioread32(addr);
+
+	val &= ~mask;
+	val |= (data & mask);
+
+	iowrite32(val, addr);
+}
+
 static int rsnd_dmapp_stop(struct rsnd_mod *mod,
 			   struct rsnd_dai_stream *io,
 			   struct rsnd_priv *priv)
@@ -368,10 +382,10 @@ static int rsnd_dmapp_stop(struct rsnd_mod *mod,
 	struct rsnd_dma *dma = rsnd_mod_to_dma(mod);
 	int i;
 
-	rsnd_dmapp_write(dma, 0, PDMACHCR);
+	rsnd_dmapp_bset(dma, 0,  PDMACHCR_DE, PDMACHCR);
 
 	for (i = 0; i < 1024; i++) {
-		if (0 == rsnd_dmapp_read(dma, PDMACHCR))
+		if (0 == (rsnd_dmapp_read(dma, PDMACHCR) & PDMACHCR_DE))
 			return 0;
 		udelay(1);
 	}
diff --git a/sound/soc/sh/rcar/ssi.c b/sound/soc/sh/rcar/ssi.c
index 6cb6db0..560cf4b 100644
--- a/sound/soc/sh/rcar/ssi.c
+++ b/sound/soc/sh/rcar/ssi.c
@@ -172,10 +172,15 @@ static u32 rsnd_ssi_run_mods(struct rsnd_dai_stream *io)
 {
 	struct rsnd_mod *ssi_mod = rsnd_io_to_mod_ssi(io);
 	struct rsnd_mod *ssi_parent_mod = rsnd_io_to_mod_ssip(io);
+	u32 mods;
 
-	return rsnd_ssi_multi_slaves_runtime(io) |
-		1 << rsnd_mod_id(ssi_mod) |
-		1 << rsnd_mod_id(ssi_parent_mod);
+	mods = rsnd_ssi_multi_slaves_runtime(io) |
+		1 << rsnd_mod_id(ssi_mod);
+
+	if (ssi_parent_mod)
+		mods |= 1 << rsnd_mod_id(ssi_parent_mod);
+
+	return mods;
 }
 
 u32 rsnd_ssi_multi_slaves_runtime(struct rsnd_dai_stream *io)
diff --git a/sound/soc/sh/rcar/ssiu.c b/sound/soc/sh/rcar/ssiu.c
index 6f9b388..3f95d6b 100644
--- a/sound/soc/sh/rcar/ssiu.c
+++ b/sound/soc/sh/rcar/ssiu.c
@@ -44,7 +44,11 @@ static int rsnd_ssiu_init(struct rsnd_mod *mod,
 	mask1 = (1 << 4) | (1 << 20);	/* mask sync bit */
 	mask2 = (1 << 4);		/* mask sync bit */
 	val1  = val2  = 0;
-	if (rsnd_ssi_is_pin_sharing(io)) {
+	if (id == 8) {
+		/*
+		 * SSI8 pin is sharing with SSI7, nothing to do.
+		 */
+	} else if (rsnd_ssi_is_pin_sharing(io)) {
 		int shift = -1;
 
 		switch (id) {
diff --git a/sound/soc/sti/uniperif_reader.c b/sound/soc/sti/uniperif_reader.c
index 0e1c3ee..9735b4c 100644
--- a/sound/soc/sti/uniperif_reader.c
+++ b/sound/soc/sti/uniperif_reader.c
@@ -364,6 +364,8 @@ static int uni_reader_startup(struct snd_pcm_substream *substream,
 	struct uniperif *reader = priv->dai_data.uni;
 	int ret;
 
+	reader->substream = substream;
+
 	if (!UNIPERIF_TYPE_IS_TDM(reader))
 		return 0;
 
@@ -393,6 +395,7 @@ static void uni_reader_shutdown(struct snd_pcm_substream *substream,
 		/* Stop the reader */
 		uni_reader_stop(reader);
 	}
+	reader->substream = NULL;
 }
 
 static const struct snd_soc_dai_ops uni_reader_dai_ops = {
diff --git a/sound/usb/clock.c b/sound/usb/clock.c
index 8238180..09c6e29 100644
--- a/sound/usb/clock.c
+++ b/sound/usb/clock.c
@@ -43,7 +43,7 @@ static struct uac_clock_source_descriptor *
 	while ((cs = snd_usb_find_csint_desc(ctrl_iface->extra,
 					     ctrl_iface->extralen,
 					     cs, UAC2_CLOCK_SOURCE))) {
-		if (cs->bClockID == clock_id)
+		if (cs->bLength >= sizeof(*cs) && cs->bClockID == clock_id)
 			return cs;
 	}
 
@@ -59,8 +59,11 @@ static struct uac_clock_selector_descriptor *
 	while ((cs = snd_usb_find_csint_desc(ctrl_iface->extra,
 					     ctrl_iface->extralen,
 					     cs, UAC2_CLOCK_SELECTOR))) {
-		if (cs->bClockID == clock_id)
+		if (cs->bLength >= sizeof(*cs) && cs->bClockID == clock_id) {
+			if (cs->bLength < 5 + cs->bNrInPins)
+				return NULL;
 			return cs;
+		}
 	}
 
 	return NULL;
@@ -75,7 +78,7 @@ static struct uac_clock_multiplier_descriptor *
 	while ((cs = snd_usb_find_csint_desc(ctrl_iface->extra,
 					     ctrl_iface->extralen,
 					     cs, UAC2_CLOCK_MULTIPLIER))) {
-		if (cs->bClockID == clock_id)
+		if (cs->bLength >= sizeof(*cs) && cs->bClockID == clock_id)
 			return cs;
 	}
 
diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
index 3501ff9..98f879f 100644
--- a/sound/usb/mixer.c
+++ b/sound/usb/mixer.c
@@ -216,6 +216,11 @@ static int snd_usb_copy_string_desc(struct mixer_build *state,
 				    int index, char *buf, int maxlen)
 {
 	int len = usb_string(state->chip->dev, index, buf, maxlen - 1);
+
+	if (len < 0)
+		return 0;
+
+	buf[len] = 0;
 	return len;
 }
 
@@ -1607,6 +1612,12 @@ static int parse_audio_feature_unit(struct mixer_build *state, int unitid,
 	__u8 *bmaControls;
 
 	if (state->mixer->protocol == UAC_VERSION_1) {
+		if (hdr->bLength < 7) {
+			usb_audio_err(state->chip,
+				      "unit %u: invalid UAC_FEATURE_UNIT descriptor\n",
+				      unitid);
+			return -EINVAL;
+		}
 		csize = hdr->bControlSize;
 		if (!csize) {
 			usb_audio_dbg(state->chip,
@@ -1624,6 +1635,12 @@ static int parse_audio_feature_unit(struct mixer_build *state, int unitid,
 		}
 	} else if (state->mixer->protocol == UAC_VERSION_2) {
 		struct uac2_feature_unit_descriptor *ftr = _ftr;
+		if (hdr->bLength < 6) {
+			usb_audio_err(state->chip,
+				      "unit %u: invalid UAC_FEATURE_UNIT descriptor\n",
+				      unitid);
+			return -EINVAL;
+		}
 		csize = 4;
 		channels = (hdr->bLength - 6) / 4 - 1;
 		bmaControls = ftr->bmaControls;
@@ -2344,7 +2361,8 @@ static int parse_audio_selector_unit(struct mixer_build *state, int unitid,
 	const struct usbmix_name_map *map;
 	char **namelist;
 
-	if (!desc->bNrInPins || desc->bLength < 5 + desc->bNrInPins) {
+	if (desc->bLength < 5 || !desc->bNrInPins ||
+	    desc->bLength < 5 + desc->bNrInPins) {
 		usb_audio_err(state->chip,
 			"invalid SELECTOR UNIT descriptor %d\n", unitid);
 		return -EINVAL;
@@ -2414,19 +2432,25 @@ static int parse_audio_selector_unit(struct mixer_build *state, int unitid,
 	kctl->private_value = (unsigned long)namelist;
 	kctl->private_free = usb_mixer_selector_elem_free;
 
-	nameid = uac_selector_unit_iSelector(desc);
+	/* check the static mapping table at first */
 	len = check_mapped_name(map, kctl->id.name, sizeof(kctl->id.name));
-	if (len)
-		;
-	else if (nameid)
-		snd_usb_copy_string_desc(state, nameid, kctl->id.name,
-					 sizeof(kctl->id.name));
-	else {
-		len = get_term_name(state, &state->oterm,
+	if (!len) {
+		/* no mapping ? */
+		/* if iSelector is given, use it */
+		nameid = uac_selector_unit_iSelector(desc);
+		if (nameid)
+			len = snd_usb_copy_string_desc(state, nameid,
+						       kctl->id.name,
+						       sizeof(kctl->id.name));
+		/* ... or pick up the terminal name at next */
+		if (!len)
+			len = get_term_name(state, &state->oterm,
 				    kctl->id.name, sizeof(kctl->id.name), 0);
+		/* ... or use the fixed string "USB" as the last resort */
 		if (!len)
 			strlcpy(kctl->id.name, "USB", sizeof(kctl->id.name));
 
+		/* and add the proper suffix */
 		if (desc->bDescriptorSubtype == UAC2_CLOCK_SELECTOR)
 			append_ctl_name(kctl, " Clock Source");
 		else if ((state->oterm.type & 0xff00) == 0x0100)
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
index 7613b9e..1cd7f8b 100644
--- a/sound/usb/quirks.c
+++ b/sound/usb/quirks.c
@@ -1170,10 +1170,11 @@ static bool is_marantz_denon_dac(unsigned int id)
 /* TEAC UD-501/UD-503/NT-503 USB DACs need a vendor cmd to switch
  * between PCM/DOP and native DSD mode
  */
-static bool is_teac_50X_dac(unsigned int id)
+static bool is_teac_dsd_dac(unsigned int id)
 {
 	switch (id) {
 	case USB_ID(0x0644, 0x8043): /* TEAC UD-501/UD-503/NT-503 */
+	case USB_ID(0x0644, 0x8044): /* Esoteric D-05X */
 		return true;
 	}
 	return false;
@@ -1206,7 +1207,7 @@ int snd_usb_select_mode_quirk(struct snd_usb_substream *subs,
 			break;
 		}
 		mdelay(20);
-	} else if (is_teac_50X_dac(subs->stream->chip->usb_id)) {
+	} else if (is_teac_dsd_dac(subs->stream->chip->usb_id)) {
 		/* Vendor mode switch cmd is required. */
 		switch (fmt->altsetting) {
 		case 3: /* DSD mode (DSD_U32) requested */
@@ -1376,7 +1377,7 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip,
 	}
 
 	/* TEAC devices with USB DAC functionality */
-	if (is_teac_50X_dac(chip->usb_id)) {
+	if (is_teac_dsd_dac(chip->usb_id)) {
 		if (fp->altsetting == 3)
 			return SNDRV_PCM_FMTBIT_DSD_U32_BE;
 	}
diff --git a/tools/hv/hv_kvp_daemon.c b/tools/hv/hv_kvp_daemon.c
index bc7adb8..60a94b3 100644
--- a/tools/hv/hv_kvp_daemon.c
+++ b/tools/hv/hv_kvp_daemon.c
@@ -193,11 +193,14 @@ static void kvp_update_mem_state(int pool)
 	for (;;) {
 		readp = &record[records_read];
 		records_read += fread(readp, sizeof(struct kvp_record),
-					ENTRIES_PER_BLOCK * num_blocks,
-					filep);
+				ENTRIES_PER_BLOCK * num_blocks - records_read,
+				filep);
 
 		if (ferror(filep)) {
-			syslog(LOG_ERR, "Failed to read file, pool: %d", pool);
+			syslog(LOG_ERR,
+				"Failed to read file, pool: %d; error: %d %s",
+				 pool, errno, strerror(errno));
+			kvp_release_lock(pool);
 			exit(EXIT_FAILURE);
 		}
 
@@ -210,6 +213,7 @@ static void kvp_update_mem_state(int pool)
 
 			if (record == NULL) {
 				syslog(LOG_ERR, "malloc failed");
+				kvp_release_lock(pool);
 				exit(EXIT_FAILURE);
 			}
 			continue;
@@ -224,15 +228,11 @@ static void kvp_update_mem_state(int pool)
 	fclose(filep);
 	kvp_release_lock(pool);
 }
+
 static int kvp_file_init(void)
 {
 	int  fd;
-	FILE *filep;
-	size_t records_read;
 	char *fname;
-	struct kvp_record *record;
-	struct kvp_record *readp;
-	int num_blocks;
 	int i;
 	int alloc_unit = sizeof(struct kvp_record) * ENTRIES_PER_BLOCK;
 
@@ -246,61 +246,19 @@ static int kvp_file_init(void)
 
 	for (i = 0; i < KVP_POOL_COUNT; i++) {
 		fname = kvp_file_info[i].fname;
-		records_read = 0;
-		num_blocks = 1;
 		sprintf(fname, "%s/.kvp_pool_%d", KVP_CONFIG_LOC, i);
 		fd = open(fname, O_RDWR | O_CREAT | O_CLOEXEC, 0644 /* rw-r--r-- */);
 
 		if (fd == -1)
 			return 1;
 
-
-		filep = fopen(fname, "re");
-		if (!filep) {
-			close(fd);
-			return 1;
-		}
-
-		record = malloc(alloc_unit * num_blocks);
-		if (record == NULL) {
-			fclose(filep);
-			close(fd);
-			return 1;
-		}
-		for (;;) {
-			readp = &record[records_read];
-			records_read += fread(readp, sizeof(struct kvp_record),
-					ENTRIES_PER_BLOCK,
-					filep);
-
-			if (ferror(filep)) {
-				syslog(LOG_ERR, "Failed to read file, pool: %d",
-				       i);
-				exit(EXIT_FAILURE);
-			}
-
-			if (!feof(filep)) {
-				/*
-				 * We have more data to read.
-				 */
-				num_blocks++;
-				record = realloc(record, alloc_unit *
-						num_blocks);
-				if (record == NULL) {
-					fclose(filep);
-					close(fd);
-					return 1;
-				}
-				continue;
-			}
-			break;
-		}
 		kvp_file_info[i].fd = fd;
-		kvp_file_info[i].num_blocks = num_blocks;
-		kvp_file_info[i].records = record;
-		kvp_file_info[i].num_records = records_read;
-		fclose(filep);
-
+		kvp_file_info[i].num_blocks = 1;
+		kvp_file_info[i].records = malloc(alloc_unit);
+		if (kvp_file_info[i].records == NULL)
+			return 1;
+		kvp_file_info[i].num_records = 0;
+		kvp_update_mem_state(i);
 	}
 
 	return 0;
diff --git a/tools/include/linux/poison.h b/tools/include/linux/poison.h
index 51334ed..f306a76 100644
--- a/tools/include/linux/poison.h
+++ b/tools/include/linux/poison.h
@@ -14,6 +14,10 @@
 # define POISON_POINTER_DELTA 0
 #endif
 
+#ifdef __cplusplus
+#define LIST_POISON1  NULL
+#define LIST_POISON2  NULL
+#else
 /*
  * These are non-NULL pointers that will result in page faults
  * under normal circumstances, used to verify that nobody uses
@@ -21,6 +25,7 @@
  */
 #define LIST_POISON1  ((void *) 0x100 + POISON_POINTER_DELTA)
 #define LIST_POISON2  ((void *) 0x200 + POISON_POINTER_DELTA)
+#endif
 
 /********** include/linux/timer.h **********/
 /*
diff --git a/tools/perf/tests/attr.c b/tools/perf/tests/attr.c
index 28d1605..b60a6fd 100644
--- a/tools/perf/tests/attr.c
+++ b/tools/perf/tests/attr.c
@@ -150,7 +150,7 @@ static int run_dir(const char *d, const char *perf)
 	snprintf(cmd, 3*PATH_MAX, PYTHON " %s/attr.py -d %s/attr/ -p %s %.*s",
 		 d, d, perf, vcnt, v);
 
-	return system(cmd);
+	return system(cmd) ? TEST_FAIL : TEST_OK;
 }
 
 int test__attr(int subtest __maybe_unused)
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index f7b35e1..f199d5b 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -202,7 +202,7 @@ void symbols__fixup_end(struct rb_root *symbols)
 
 	/* Last entry */
 	if (curr->end == curr->start)
-		curr->end = roundup(curr->start, 4096);
+		curr->end = roundup(curr->start, 4096) + 4096;
 }
 
 void __map_groups__fixup_end(struct map_groups *mg, enum map_type type)
diff --git a/tools/testing/selftests/powerpc/harness.c b/tools/testing/selftests/powerpc/harness.c
index 248a820..66d31de 100644
--- a/tools/testing/selftests/powerpc/harness.c
+++ b/tools/testing/selftests/powerpc/harness.c
@@ -114,9 +114,11 @@ int test_harness(int (test_function)(void), char *name)
 
 	rc = run_test(test_function, name);
 
-	if (rc == MAGIC_SKIP_RETURN_VALUE)
+	if (rc == MAGIC_SKIP_RETURN_VALUE) {
 		test_skip(name);
-	else
+		/* so that skipped test is not marked as failed */
+		rc = 0;
+	} else
 		test_finish(name, rc);
 
 	return rc;
diff --git a/tools/testing/selftests/vm/Makefile b/tools/testing/selftests/vm/Makefile
index bbab7f4..d116a19 100644
--- a/tools/testing/selftests/vm/Makefile
+++ b/tools/testing/selftests/vm/Makefile
@@ -1,5 +1,9 @@
 # Makefile for vm selftests
 
+ifndef OUTPUT
+  OUTPUT := $(shell pwd)
+endif
+
 CFLAGS = -Wall -I ../../../../usr/include $(EXTRA_CFLAGS)
 BINARIES = compaction_test
 BINARIES += hugepage-mmap
diff --git a/tools/testing/selftests/x86/fsgsbase.c b/tools/testing/selftests/x86/fsgsbase.c
index 9b4610c..f249e04 100644
--- a/tools/testing/selftests/x86/fsgsbase.c
+++ b/tools/testing/selftests/x86/fsgsbase.c
@@ -245,7 +245,7 @@ void do_unexpected_base(void)
 		long ret;
 		asm volatile ("int $0x80"
 			      : "=a" (ret) : "a" (243), "b" (low_desc)
-			      : "flags");
+			      : "r8", "r9", "r10", "r11");
 		memcpy(&desc, low_desc, sizeof(desc));
 		munmap(low_desc, sizeof(desc));
 
diff --git a/tools/testing/selftests/x86/ldt_gdt.c b/tools/testing/selftests/x86/ldt_gdt.c
index e717fed..ac1a7a3 100644
--- a/tools/testing/selftests/x86/ldt_gdt.c
+++ b/tools/testing/selftests/x86/ldt_gdt.c
@@ -45,6 +45,12 @@
 #define AR_DB			(1 << 22)
 #define AR_G			(1 << 23)
 
+#ifdef __x86_64__
+# define INT80_CLOBBERS "r8", "r9", "r10", "r11"
+#else
+# define INT80_CLOBBERS
+#endif
+
 static int nerrs;
 
 /* Points to an array of 1024 ints, each holding its own index. */
@@ -360,9 +366,24 @@ static void do_simple_tests(void)
 	install_invalid(&desc, false);
 
 	desc.seg_not_present = 0;
-	desc.read_exec_only = 0;
 	desc.seg_32bit = 1;
+	desc.read_exec_only = 0;
+	desc.limit = 0xfffff;
+
 	install_valid(&desc, AR_DPL3 | AR_TYPE_RWDATA | AR_S | AR_P | AR_DB);
+
+	desc.limit_in_pages = 1;
+
+	install_valid(&desc, AR_DPL3 | AR_TYPE_RWDATA | AR_S | AR_P | AR_DB | AR_G);
+	desc.read_exec_only = 1;
+	install_valid(&desc, AR_DPL3 | AR_TYPE_RODATA | AR_S | AR_P | AR_DB | AR_G);
+	desc.contents = 1;
+	desc.read_exec_only = 0;
+	install_valid(&desc, AR_DPL3 | AR_TYPE_RWDATA_EXPDOWN | AR_S | AR_P | AR_DB | AR_G);
+	desc.read_exec_only = 1;
+	install_valid(&desc, AR_DPL3 | AR_TYPE_RODATA_EXPDOWN | AR_S | AR_P | AR_DB | AR_G);
+
+	desc.limit = 0;
 	install_invalid(&desc, true);
 }
 
@@ -634,7 +655,7 @@ static int invoke_set_thread_area(void)
 	asm volatile ("int $0x80"
 		      : "=a" (ret), "+m" (low_user_desc) :
 			"a" (243), "b" (low_user_desc)
-		      : "flags");
+		      : INT80_CLOBBERS);
 	return ret;
 }
 
@@ -703,7 +724,7 @@ static void test_gdt_invalidation(void)
 			"+a" (eax)
 		      : "m" (low_user_desc_clear),
 			[arg1] "r" ((unsigned int)(unsigned long)low_user_desc_clear)
-		      : "flags");
+		      : INT80_CLOBBERS);
 
 	if (sel != 0) {
 		result = "FAIL";
@@ -734,7 +755,7 @@ static void test_gdt_invalidation(void)
 			"+a" (eax)
 		      : "m" (low_user_desc_clear),
 			[arg1] "r" ((unsigned int)(unsigned long)low_user_desc_clear)
-		      : "flags");
+		      : INT80_CLOBBERS);
 
 	if (sel != 0) {
 		result = "FAIL";
@@ -767,7 +788,7 @@ static void test_gdt_invalidation(void)
 			"+a" (eax)
 		      : "m" (low_user_desc_clear),
 			[arg1] "r" ((unsigned int)(unsigned long)low_user_desc_clear)
-		      : "flags");
+		      : INT80_CLOBBERS);
 
 #ifdef __x86_64__
 	syscall(SYS_arch_prctl, ARCH_GET_FS, &new_base);
@@ -820,7 +841,7 @@ static void test_gdt_invalidation(void)
 			"+a" (eax)
 		      : "m" (low_user_desc_clear),
 			[arg1] "r" ((unsigned int)(unsigned long)low_user_desc_clear)
-		      : "flags");
+		      : INT80_CLOBBERS);
 
 #ifdef __x86_64__
 	syscall(SYS_arch_prctl, ARCH_GET_GS, &new_base);
diff --git a/tools/testing/selftests/x86/mpx-hw.h b/tools/testing/selftests/x86/mpx-hw.h
index 093c190..28b3c7c 100644
--- a/tools/testing/selftests/x86/mpx-hw.h
+++ b/tools/testing/selftests/x86/mpx-hw.h
@@ -51,14 +51,14 @@
 struct mpx_bd_entry {
 	union {
 		char x[MPX_BOUNDS_DIR_ENTRY_SIZE_BYTES];
-		void *contents[1];
+		void *contents[0];
 	};
 } __attribute__((packed));
 
 struct mpx_bt_entry {
 	union {
 		char x[MPX_BOUNDS_TABLE_ENTRY_SIZE_BYTES];
-		unsigned long contents[1];
+		unsigned long contents[0];
 	};
 } __attribute__((packed));
 
diff --git a/tools/testing/selftests/x86/ptrace_syscall.c b/tools/testing/selftests/x86/ptrace_syscall.c
index b037ce9c..eaea924 100644
--- a/tools/testing/selftests/x86/ptrace_syscall.c
+++ b/tools/testing/selftests/x86/ptrace_syscall.c
@@ -58,7 +58,8 @@ static void do_full_int80(struct syscall_args32 *args)
 	asm volatile ("int $0x80"
 		      : "+a" (args->nr),
 			"+b" (args->arg0), "+c" (args->arg1), "+d" (args->arg2),
-			"+S" (args->arg3), "+D" (args->arg4), "+r" (bp));
+			"+S" (args->arg3), "+D" (args->arg4), "+r" (bp)
+			: : "r8", "r9", "r10", "r11");
 	args->arg5 = bp;
 #else
 	sys32_helper(args, int80_and_ret);
diff --git a/tools/testing/selftests/x86/single_step_syscall.c b/tools/testing/selftests/x86/single_step_syscall.c
index 50c2635..a48da95 100644
--- a/tools/testing/selftests/x86/single_step_syscall.c
+++ b/tools/testing/selftests/x86/single_step_syscall.c
@@ -56,9 +56,11 @@ static volatile sig_atomic_t sig_traps;
 #ifdef __x86_64__
 # define REG_IP REG_RIP
 # define WIDTH "q"
+# define INT80_CLOBBERS "r8", "r9", "r10", "r11"
 #else
 # define REG_IP REG_EIP
 # define WIDTH "l"
+# define INT80_CLOBBERS
 #endif
 
 static unsigned long get_eflags(void)
@@ -140,7 +142,8 @@ int main()
 
 	printf("[RUN]\tSet TF and check int80\n");
 	set_eflags(get_eflags() | X86_EFLAGS_TF);
-	asm volatile ("int $0x80" : "=a" (tmp) : "a" (SYS_getpid));
+	asm volatile ("int $0x80" : "=a" (tmp) : "a" (SYS_getpid)
+			: INT80_CLOBBERS);
 	check_result();
 
 	/*
diff --git a/tools/usb/usbip/Makefile.am b/tools/usb/usbip/Makefile.am
index 66f8bf0..45eaa70 100644
--- a/tools/usb/usbip/Makefile.am
+++ b/tools/usb/usbip/Makefile.am
@@ -1,6 +1,7 @@
 SUBDIRS := libsrc src
 includedir = @includedir@/usbip
 include_HEADERS := $(addprefix libsrc/, \
-		     usbip_common.h vhci_driver.h usbip_host_driver.h)
+		     usbip_common.h vhci_driver.h usbip_host_driver.h \
+		     list.h sysfs_utils.h usbip_host_common.h)
 
 dist_man_MANS := $(addprefix doc/, usbip.8 usbipd.8)
diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c
index 27a1f63..7b49a13 100644
--- a/virt/kvm/arm/arch_timer.c
+++ b/virt/kvm/arm/arch_timer.c
@@ -89,9 +89,6 @@ static void kvm_timer_inject_irq_work(struct work_struct *work)
 	struct kvm_vcpu *vcpu;
 
 	vcpu = container_of(work, struct kvm_vcpu, arch.timer_cpu.expired);
-	vcpu->arch.timer_cpu.armed = false;
-
-	WARN_ON(!kvm_timer_should_fire(vcpu));
 
 	/*
 	 * If the vcpu is blocked we want to wake it up so that it will see
diff --git a/virt/kvm/arm/hyp/vgic-v2-sr.c b/virt/kvm/arm/hyp/vgic-v2-sr.c
index c8aeb7b..9502124 100644
--- a/virt/kvm/arm/hyp/vgic-v2-sr.c
+++ b/virt/kvm/arm/hyp/vgic-v2-sr.c
@@ -77,11 +77,7 @@ static void __hyp_text save_elrsr(struct kvm_vcpu *vcpu, void __iomem *base)
 	else
 		elrsr1 = 0;
 
-#ifdef CONFIG_CPU_BIG_ENDIAN
-	cpu_if->vgic_elrsr = ((u64)elrsr0 << 32) | elrsr1;
-#else
 	cpu_if->vgic_elrsr = ((u64)elrsr1 << 32) | elrsr0;
-#endif
 }
 
 static void __hyp_text save_lrs(struct kvm_vcpu *vcpu, void __iomem *base)
diff --git a/virt/kvm/arm/vgic/vgic-irqfd.c b/virt/kvm/arm/vgic/vgic-irqfd.c
index f138ed2..a26c677 100644
--- a/virt/kvm/arm/vgic/vgic-irqfd.c
+++ b/virt/kvm/arm/vgic/vgic-irqfd.c
@@ -112,8 +112,7 @@ int kvm_vgic_setup_default_irq_routing(struct kvm *kvm)
 	u32 nr = dist->nr_spis;
 	int i, ret;
 
-	entries = kcalloc(nr, sizeof(struct kvm_kernel_irq_routing_entry),
-			  GFP_KERNEL);
+	entries = kcalloc(nr, sizeof(*entries), GFP_KERNEL);
 	if (!entries)
 		return -ENOMEM;
 
diff --git a/virt/kvm/arm/vgic/vgic-its.c b/virt/kvm/arm/vgic/vgic-its.c
index 4660a7d..31f5625 100644
--- a/virt/kvm/arm/vgic/vgic-its.c
+++ b/virt/kvm/arm/vgic/vgic-its.c
@@ -322,6 +322,7 @@ static int its_sync_lpi_pending_table(struct kvm_vcpu *vcpu)
 	int ret = 0;
 	u32 *intids;
 	int nr_irqs, i;
+	u8 pendmask;
 
 	nr_irqs = vgic_copy_lpi_list(vcpu->kvm, &intids);
 	if (nr_irqs < 0)
@@ -329,7 +330,6 @@ static int its_sync_lpi_pending_table(struct kvm_vcpu *vcpu)
 
 	for (i = 0; i < nr_irqs; i++) {
 		int byte_offset, bit_nr;
-		u8 pendmask;
 
 		byte_offset = intids[i] / BITS_PER_BYTE;
 		bit_nr = intids[i] % BITS_PER_BYTE;
@@ -360,29 +360,6 @@ static int its_sync_lpi_pending_table(struct kvm_vcpu *vcpu)
 	return ret;
 }
 
-static unsigned long vgic_mmio_read_its_ctlr(struct kvm *vcpu,
-					     struct vgic_its *its,
-					     gpa_t addr, unsigned int len)
-{
-	u32 reg = 0;
-
-	mutex_lock(&its->cmd_lock);
-	if (its->creadr == its->cwriter)
-		reg |= GITS_CTLR_QUIESCENT;
-	if (its->enabled)
-		reg |= GITS_CTLR_ENABLE;
-	mutex_unlock(&its->cmd_lock);
-
-	return reg;
-}
-
-static void vgic_mmio_write_its_ctlr(struct kvm *kvm, struct vgic_its *its,
-				     gpa_t addr, unsigned int len,
-				     unsigned long val)
-{
-	its->enabled = !!(val & GITS_CTLR_ENABLE);
-}
-
 static unsigned long vgic_mmio_read_its_typer(struct kvm *kvm,
 					      struct vgic_its *its,
 					      gpa_t addr, unsigned int len)
@@ -687,6 +664,8 @@ static int vgic_its_alloc_collection(struct vgic_its *its,
 		return E_ITS_MAPC_COLLECTION_OOR;
 
 	collection = kzalloc(sizeof(*collection), GFP_KERNEL);
+	if (!collection)
+		return -ENOMEM;
 
 	collection->collection_id = coll_id;
 	collection->target_addr = COLLECTION_NOT_MAPPED;
@@ -1160,33 +1139,16 @@ static void vgic_mmio_write_its_cbaser(struct kvm *kvm, struct vgic_its *its,
 #define ITS_CMD_SIZE			32
 #define ITS_CMD_OFFSET(reg)		((reg) & GENMASK(19, 5))
 
-/*
- * By writing to CWRITER the guest announces new commands to be processed.
- * To avoid any races in the first place, we take the its_cmd lock, which
- * protects our ring buffer variables, so that there is only one user
- * per ITS handling commands at a given time.
- */
-static void vgic_mmio_write_its_cwriter(struct kvm *kvm, struct vgic_its *its,
-					gpa_t addr, unsigned int len,
-					unsigned long val)
+/* Must be called with the cmd_lock held. */
+static void vgic_its_process_commands(struct kvm *kvm, struct vgic_its *its)
 {
 	gpa_t cbaser;
 	u64 cmd_buf[4];
-	u32 reg;
 
-	if (!its)
+	/* Commands are only processed when the ITS is enabled. */
+	if (!its->enabled)
 		return;
 
-	mutex_lock(&its->cmd_lock);
-
-	reg = update_64bit_reg(its->cwriter, addr & 7, len, val);
-	reg = ITS_CMD_OFFSET(reg);
-	if (reg >= ITS_CMD_BUFFER_SIZE(its->cbaser)) {
-		mutex_unlock(&its->cmd_lock);
-		return;
-	}
-
-	its->cwriter = reg;
 	cbaser = CBASER_ADDRESS(its->cbaser);
 
 	while (its->cwriter != its->creadr) {
@@ -1206,6 +1168,34 @@ static void vgic_mmio_write_its_cwriter(struct kvm *kvm, struct vgic_its *its,
 		if (its->creadr == ITS_CMD_BUFFER_SIZE(its->cbaser))
 			its->creadr = 0;
 	}
+}
+
+/*
+ * By writing to CWRITER the guest announces new commands to be processed.
+ * To avoid any races in the first place, we take the its_cmd lock, which
+ * protects our ring buffer variables, so that there is only one user
+ * per ITS handling commands at a given time.
+ */
+static void vgic_mmio_write_its_cwriter(struct kvm *kvm, struct vgic_its *its,
+					gpa_t addr, unsigned int len,
+					unsigned long val)
+{
+	u64 reg;
+
+	if (!its)
+		return;
+
+	mutex_lock(&its->cmd_lock);
+
+	reg = update_64bit_reg(its->cwriter, addr & 7, len, val);
+	reg = ITS_CMD_OFFSET(reg);
+	if (reg >= ITS_CMD_BUFFER_SIZE(its->cbaser)) {
+		mutex_unlock(&its->cmd_lock);
+		return;
+	}
+	its->cwriter = reg;
+
+	vgic_its_process_commands(kvm, its);
 
 	mutex_unlock(&its->cmd_lock);
 }
@@ -1286,6 +1276,39 @@ static void vgic_mmio_write_its_baser(struct kvm *kvm,
 	*regptr = reg;
 }
 
+static unsigned long vgic_mmio_read_its_ctlr(struct kvm *vcpu,
+					     struct vgic_its *its,
+					     gpa_t addr, unsigned int len)
+{
+	u32 reg = 0;
+
+	mutex_lock(&its->cmd_lock);
+	if (its->creadr == its->cwriter)
+		reg |= GITS_CTLR_QUIESCENT;
+	if (its->enabled)
+		reg |= GITS_CTLR_ENABLE;
+	mutex_unlock(&its->cmd_lock);
+
+	return reg;
+}
+
+static void vgic_mmio_write_its_ctlr(struct kvm *kvm, struct vgic_its *its,
+				     gpa_t addr, unsigned int len,
+				     unsigned long val)
+{
+	mutex_lock(&its->cmd_lock);
+
+	its->enabled = !!(val & GITS_CTLR_ENABLE);
+
+	/*
+	 * Try to process any pending commands. This function bails out early
+	 * if the ITS is disabled or no commands have been queued.
+	 */
+	vgic_its_process_commands(kvm, its);
+
+	mutex_unlock(&its->cmd_lock);
+}
+
 #define REGISTER_ITS_DESC(off, rd, wr, length, acc)		\
 {								\
 	.reg_offset = off,					\
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index f4c6d4f..1b20768 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -125,6 +125,11 @@ EXPORT_SYMBOL_GPL(kvm_rebooting);
 
 static bool largepages_enabled = true;
 
+__weak void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
+		unsigned long start, unsigned long end)
+{
+}
+
 bool kvm_is_reserved_pfn(kvm_pfn_t pfn)
 {
 	if (pfn_valid(pfn))
@@ -361,6 +366,9 @@ static void kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
 		kvm_flush_remote_tlbs(kvm);
 
 	spin_unlock(&kvm->mmu_lock);
+
+	kvm_arch_mmu_notifier_invalidate_range(kvm, start, end);
+
 	srcu_read_unlock(&kvm->srcu, idx);
 }
 
@@ -1052,7 +1060,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
 	 * changes) is disallowed above, so any other attribute changes getting
 	 * here can be skipped.
 	 */
-	if ((change == KVM_MR_CREATE) || (change == KVM_MR_MOVE)) {
+	if (as_id == 0 && (change == KVM_MR_CREATE || change == KVM_MR_MOVE)) {
 		r = kvm_iommu_map_pages(kvm, &new);
 		return r;
 	}
@@ -3896,7 +3904,7 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
 	if (!vcpu_align)
 		vcpu_align = __alignof__(struct kvm_vcpu);
 	kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size, vcpu_align,
-					   0, NULL);
+					   SLAB_ACCOUNT, NULL);
 	if (!kvm_vcpu_cache) {
 		r = -ENOMEM;
 		goto out_free_3;