Merge "adreno_tz: Remove unnecessary devfreq NULL check in tz_handler()" into msm-4.9
diff --git a/Documentation/arm64/tagged-pointers.txt b/Documentation/arm64/tagged-pointers.txt
index d9995f1..a25a99e 100644
--- a/Documentation/arm64/tagged-pointers.txt
+++ b/Documentation/arm64/tagged-pointers.txt
@@ -11,24 +11,56 @@
 The kernel configures the translation tables so that translations made
 via TTBR0 (i.e. userspace mappings) have the top byte (bits 63:56) of
 the virtual address ignored by the translation hardware. This frees up
-this byte for application use, with the following caveats:
+this byte for application use.
 
-	(1) The kernel requires that all user addresses passed to EL1
-	    are tagged with tag 0x00. This means that any syscall
-	    parameters containing user virtual addresses *must* have
-	    their top byte cleared before trapping to the kernel.
 
-	(2) Non-zero tags are not preserved when delivering signals.
-	    This means that signal handlers in applications making use
-	    of tags cannot rely on the tag information for user virtual
-	    addresses being maintained for fields inside siginfo_t.
-	    One exception to this rule is for signals raised in response
-	    to watchpoint debug exceptions, where the tag information
-	    will be preserved.
+Passing tagged addresses to the kernel
+--------------------------------------
 
-	(3) Special care should be taken when using tagged pointers,
-	    since it is likely that C compilers will not hazard two
-	    virtual addresses differing only in the upper byte.
+All interpretation of userspace memory addresses by the kernel assumes
+an address tag of 0x00.
+
+This includes, but is not limited to, addresses found in:
+
+ - pointer arguments to system calls, including pointers in structures
+   passed to system calls,
+
+ - the stack pointer (sp), e.g. when interpreting it to deliver a
+   signal,
+
+ - the frame pointer (x29) and frame records, e.g. when interpreting
+   them to generate a backtrace or call graph.
+
+Using non-zero address tags in any of these locations may result in an
+error code being returned, a (fatal) signal being raised, or other modes
+of failure.
+
+For these reasons, passing non-zero address tags to the kernel via
+system calls is forbidden, and using a non-zero address tag for sp is
+strongly discouraged.
+
+Programs maintaining a frame pointer and frame records that use non-zero
+address tags may suffer impaired or inaccurate debug and profiling
+visibility.
+
+
+Preserving tags
+---------------
+
+Non-zero tags are not preserved when delivering signals. This means that
+signal handlers in applications making use of tags cannot rely on the
+tag information for user virtual addresses being maintained for fields
+inside siginfo_t. One exception to this rule is for signals raised in
+response to watchpoint debug exceptions, where the tag information will
+be preserved.
 
 The architecture prevents the use of a tagged PC, so the upper byte will
 be set to a sign-extension of bit 55 on exception return.
+
+
+Other considerations
+--------------------
+
+Special care should be taken when using tagged pointers, since it is
+likely that C compilers will not hazard two virtual addresses differing
+only in the upper byte.
diff --git a/Documentation/devicetree/bindings/arm/msm/board-id.txt b/Documentation/devicetree/bindings/arm/msm/board-id.txt
new file mode 100644
index 0000000..e07a1c9
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/msm/board-id.txt
@@ -0,0 +1,64 @@
+* BOARD-ID
+
+The qcom,board-id entry specifies the MSM platform and subtype revision.
+It can optionally be an array of these to indicate multiple hardware that use
+the same device tree.  It is expected that the bootloader will use this
+information at boot-up to decide which device tree to use when given multiple
+device trees, some of which may not be compatible with the actual hardware.  It
+is the bootloader's responsibility to pass the correct device tree to the kernel.
+
+Legacy format:
+
+It is expected that the qcom,board-id entry be at the top level of the device
+tree structure.  The format of the entry is:
+
+   qcom,board-id = <platform_id, subtype_id> [, <p2, s2> ...]
+
+where platform_id and subtype_id are the numeric values for the platform and
+subtype of the current hardware.
+
+The "subtype_id" cell is a 32-bit integer whose bit values are defined as follows:
+    bits 31-20 = Reserved bits
+    bits 19-16 = Boot Device Type.
+		 MSM:
+		     0: default (eMMC)
+		     2: EMMC_SDC1
+		     4: BOOT_UFS
+		 MDM:
+		     0: default (NAND)
+		     3: EMMC_SDC1
+    bits 15-8  = DDR Size. For devices with DDR Size as 512MB the value is 0x1, default value as 0x0
+    bits 7-0   = Platform Subtype
+
+In the event that a given device tree is applicable to all hardware versions
+matching a given Platform Type / Subtype ID, the major/minior platform version
+fields in the board_id property shall both be specified as 0xff.
+
+Modern format:
+The cell layout of the qcom,board-id property is as follows:
+
+   qcom,board-id = <board_id, reserved>
+
+where board_id is a 32-bit integer whose bit values are defined as follows:
+    bits 31-24 = Platform Subtype ID
+    bits 23-16 = Platform Version (Major)
+    bits 15-8  = Platform Version (Minor)
+    bits  7-0  = Platform Type ID
+
+and the 'reserved' cell is a 32-bit integer whose bit values are defined as follows:
+    bits 31-13 = Reserved Bits
+    bits 12-11 = Panel Detection. 00 - limit to HD, 01 - limit to 720p,
+		 10 - limit to qHD, 11 - limit to FWVGA
+    bits 10-8  = DDR Size. For devices with DDR Size as 512MB the value is 0x1,
+		 default value as 0x0
+    bits 7-0   = Platform Subtype
+
+In the event that a given device tree is applicable to all hardware versions
+matching a given Platform Type / Subtype ID, the major/minior platform version
+fields in the board_id property shall both be specified as 0xff.
+
+Example:
+   qcom,board-id = <15 0>;
+   qcom,board-id = <0x01040708, 0>;
+   qcom,board-id = <0x01ffff08, 0>;
+   qcom,board-id = <8, 0x100>;
diff --git a/Documentation/devicetree/bindings/arm/msm/msm-id.txt b/Documentation/devicetree/bindings/arm/msm/msm-id.txt
new file mode 100644
index 0000000..c243154
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/msm/msm-id.txt
@@ -0,0 +1,33 @@
+* MSM-ID
+
+The qcom,msm-id entry specifies the MSM chipset, platform, hardware revision
+and optional manufactured foundry.  It can optionally be an array of these to
+indicate multiple hardware that use the same device tree.  It is expected that
+the bootloader will use this information at boot-up to decide which device tree
+to use when given multiple device trees, some of which may not be compatible
+with the actual hardware.  It is the bootloader's responsibility to pass the
+correct device tree to the kernel.
+
+Format:
+
+It is expected that the qcom,msm-id entry be at the top level of the device
+tree structure.  The format can take one of the two forms below:
+
+   qcom,msm-id = <chipset_foundry_id, platform_id, rev_id> [, <c2, p2, r2> ...]
+   qcom,msm-id = <chipset_foundry_id, rev_id> [, <c2, r2> ...]
+
+If the second format is used one must also define the board-id.
+
+The "chipset_foundry_id" consists of three fields as below:
+
+   bits 0-15  = The unique MSM chipset id.
+   bits 16-23 = The optional foundry id. If bootloader doesn't find a device
+		tree which has exact matching foundry-id with hardware it
+		chooses the device tree with foundry-id = 0.
+   bits 24-31 = Reserved.
+
+Example:
+   qcom,msm-id = <0x1007e 15 0>;
+
+   qcom,board-id= <15 2>;
+   qcom,msm-id = <0x1007e 0>;
diff --git a/Documentation/devicetree/bindings/arm/msm/msm.txt b/Documentation/devicetree/bindings/arm/msm/msm.txt
index bf93a2a..6451b34 100644
--- a/Documentation/devicetree/bindings/arm/msm/msm.txt
+++ b/Documentation/devicetree/bindings/arm/msm/msm.txt
@@ -89,8 +89,8 @@
 - SDM845
   compatible = "qcom,sdm845"
 
-- SDM830
-  compatible = "qcom,sdm830"
+- SDM670
+  compatible = "qcom,sdm670"
 
 - MSM8952
   compatible = "qcom,msm8952"
@@ -267,10 +267,9 @@
 compatible = "qcom,sdm845-mtp"
 compatible = "qcom,sdm845-mtp"
 compatible = "qcom,sdm845-qrd"
-compatible = "qcom,sdm830-sim"
-compatible = "qcom,sdm830-rumi"
-compatible = "qcom,sdm830-cdp"
-compatible = "qcom,sdm830-mtp"
+compatible = "qcom,sdm670-rumi"
+compatible = "qcom,sdm670-cdp"
+compatible = "qcom,sdm670-mtp"
 compatible = "qcom,msm8952-rumi"
 compatible = "qcom,msm8952-sim"
 compatible = "qcom,msm8952-qrd"
diff --git a/Documentation/devicetree/bindings/arm/msm/msm_memory_dump.txt b/Documentation/devicetree/bindings/arm/msm/msm_memory_dump.txt
new file mode 100644
index 0000000..a415c8f
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/msm/msm_memory_dump.txt
@@ -0,0 +1,31 @@
+Qualcomm Technologies Inc. memory dump driver
+
+QTI memory dump driver allows various client subsystems to register and allocate respective
+dump regions. At the time of deadlocks or cpu hangs these dump regions
+are captured to give a snapshot of the system at the time of the crash.
+
+Required properties:
+
+-compatible: "qcom,mem-dump"
+-memory-region: phandle to the CMA region. The size of the CMA region
+		should be greater than sum of size of all child nodes
+		to account for padding.
+
+If any child nodes exist the following property are required:
+
+-qcom,dump-size: The size of memory that needs to be allocated for the
+		 particular node.
+-qcom,dump-id: The ID within the data dump table where this entry needs
+	       to be added.
+
+Example:
+
+	mem_dump {
+		compatible = "qcom,mem-dump";
+		memory-region = <&dump_mem>;
+
+		rpmh_dump {
+			qcom,dump-size = <0x2000000>;
+			qcom,dump-id = <0xEC>;
+		};
+	};
diff --git a/Documentation/devicetree/bindings/display/msm/sde.txt b/Documentation/devicetree/bindings/display/msm/sde.txt
index c766df8..b0eed20 100644
--- a/Documentation/devicetree/bindings/display/msm/sde.txt
+++ b/Documentation/devicetree/bindings/display/msm/sde.txt
@@ -297,6 +297,10 @@
 - qcom,sde-downscaling-prefill-lines:	A u32 value indicates the latency of downscaling in lines.
 - qcom,sde-max-per-pipe-bw-kbps:	Array of u32 value indicates the max per pipe bandwidth in Kbps.
 - qcom,sde-amortizable-threshold:	This value indicates the min for traffic shaping in lines.
+- qcom,sde-vbif-qos-rt-remap:	This array is used to program vbif qos remapper register
+				priority for realtime clients.
+- qcom,sde-vbif-qos-nrt-remap:	This array is used to program vbif qos remapper register
+				priority for non-realtime clients.
 
 Bus Scaling Subnodes:
 - qcom,sde-reg-bus:		Property to provide Bus scaling for register access for
@@ -500,6 +504,9 @@
         2400000 2400000 2400000 2400000>;
     qcom,sde-amortizable-threshold = <11>;
 
+    qcom,sde-vbif-qos-rt-remap = <3 3 4 4 5 5 6 6>;
+    qcom,sde-vbif-qos-nrt-remap = <3 3 3 3 3 3 3 3>;
+
     qcom,sde-sspp-vig-blocks {
         qcom,sde-vig-csc-off = <0x320>;
         qcom,sde-vig-qseed-off = <0x200>;
diff --git a/Documentation/devicetree/bindings/drm/msm/sde-dp.txt b/Documentation/devicetree/bindings/drm/msm/sde-dp.txt
new file mode 100644
index 0000000..790da12
--- /dev/null
+++ b/Documentation/devicetree/bindings/drm/msm/sde-dp.txt
@@ -0,0 +1,146 @@
+Qualcomm Technologies, Inc.
+sde-dp is the master Display Port device which supports DP host controllers that are compatible with VESA Display Port interface specification.
+DP Controller: Required properties:
+- compatible:           Should be "qcom,dp-display".
+- reg:                  Base address and length of DP hardware's memory mapped regions.
+- reg-names:            A list of strings that name the list of regs. "dp_ctrl" - DP controller memory region.
+			"dp_phy" - DP PHY memory region.
+			"dp_ln_tx0" - USB3 DP PHY combo TX-0 lane memory region.
+			"dp_ln_tx1" - USB3 DP PHY combo TX-1 lane memory region.
+			"dp_mmss_cc" - Display Clock Control memory region.
+			"qfprom_physical" - QFPROM Phys memory region.
+			"dp_pll" - USB3 DP combo PLL memory region.
+			"hdcp_physical" - DP HDCP memory region.
+- cell-index:           Specifies the controller instance.
+- clocks:               Clocks required for Display Port operation.
+- clock-names:          Names of the clocks corresponding to handles. Following clocks are required:
+			"core_aux_clk", "core_usb_ref_clk_src","core_usb_ref_clk", "core_usb_cfg_ahb_clk",
+			"core_usb_pipe_clk", "ctrl_link_clk", "ctrl_link_iface_clk", "ctrl_crypto_clk",
+			"ctrl_pixel_clk", "pixel_clk_rcg", "pixel_parent".
+- gdsc-supply:		phandle to gdsc regulator node.
+- vdda-1p2-supply:		phandle to vdda 1.2V regulator node.
+- vdda-0p9-supply:		phandle to vdda 0.9V regulator node.
+- interrupt-parent	phandle to the interrupt parent device node.
+- interrupts:		The interrupt signal from the DSI block.
+- qcom,aux-en-gpio:			Specifies the aux-channel enable gpio.
+- qcom,aux-sel-gpio:		Specifies the aux-channel select gpio.
+- qcom,usbplug-cc-gpio:		Specifies the usbplug orientation gpio.
+- qcom,aux-cfg-settings:	An array that specifies the DP AUX configuration settings.
+- qcom,max-pclk-frequency-khz:	An integer specifying the max. pixel clock in KHz supported by Display Port.
+- qcom,dp-usbpd-detection:	Phandle for the PMI regulator node for USB PHY PD detection.
+- qcom,<type>-supply-entries:		A node that lists the elements of the supply used by the a particular "type" of DSI module. The module "types"
+					can be "core", "ctrl", and "phy". Within the same type,
+					there can be more than one instance of this binding,
+					in which case the entry would be appended with the
+					supply entry index.
+					e.g. qcom,ctrl-supply-entry@0
+					-- qcom,supply-name: name of the supply (vdd/vdda/vddio)
+					-- qcom,supply-min-voltage: minimum voltage level (uV)
+					-- qcom,supply-max-voltage: maximum voltage level (uV)
+					-- qcom,supply-enable-load: load drawn (uA) from enabled supply
+					-- qcom,supply-disable-load: load drawn (uA) from disabled supply
+					-- qcom,supply-pre-on-sleep: time to sleep (ms) before turning on
+					-- qcom,supply-post-on-sleep: time to sleep (ms) after turning on
+					-- qcom,supply-pre-off-sleep: time to sleep (ms) before turning off
+					-- qcom,supply-post-off-sleep: time to sleep (ms) after turning off
+- pinctrl-names:	List of names to assign mdss pin states defined in pinctrl device node
+					Refer to pinctrl-bindings.txt
+- pinctrl-<0..n>:	Lists phandles each pointing to the pin configuration node within a pin
+					controller. These pin configurations are installed in the pinctrl
+					device node. Refer to pinctrl-bindings.txt
+
+Example:
+	sde_dp: qcom,dp_display@0{
+		cell-index = <0>;
+		compatible = "qcom,dp-display";
+
+		gdsc-supply = <&mdss_core_gdsc>;
+		vdda-1p2-supply = <&pm8998_l26>;
+		vdda-0p9-supply = <&pm8998_l1>;
+
+		reg =	<0xae90000 0xa84>,
+			<0x88eaa00 0x200>,
+			<0x88ea200 0x200>,
+			<0x88ea600 0x200>,
+			<0xaf02000 0x1a0>,
+			<0x780000 0x621c>,
+			<0x88ea030 0x10>,
+			<0x88e8000 0x621c>,
+			<0x0aee1000 0x034>;
+		reg-names = "dp_ctrl", "dp_phy", "dp_ln_tx0", "dp_ln_tx1",
+			"dp_mmss_cc", "qfprom_physical", "dp_pll",
+			"usb3_dp_com", "hdcp_physical";
+
+		interrupt-parent = <&mdss_mdp>;
+		interrupts = <12 0>;
+
+		clocks =  <&clock_dispcc DISP_CC_MDSS_DP_AUX_CLK>,
+			 <&clock_rpmh RPMH_CXO_CLK>,
+			 <&clock_gcc GCC_USB3_PRIM_CLKREF_CLK>,
+			 <&clock_gcc GCC_USB_PHY_CFG_AHB2PHY_CLK>,
+			 <&clock_gcc GCC_USB3_PRIM_PHY_PIPE_CLK>,
+			 <&clock_dispcc DISP_CC_MDSS_DP_LINK_CLK>,
+			 <&clock_dispcc DISP_CC_MDSS_DP_LINK_INTF_CLK>,
+			 <&clock_dispcc DISP_CC_MDSS_DP_CRYPTO_CLK>,
+			 <&clock_dispcc DISP_CC_MDSS_DP_PIXEL_CLK>,
+			 <&clock_dispcc DISP_CC_MDSS_DP_PIXEL_CLK_SRC>,
+			 <&mdss_dp_pll DP_VCO_DIVIDED_CLK_SRC_MUX>;
+		clock-names = "core_aux_clk", "core_usb_ref_clk_src",
+			"core_usb_ref_clk", "core_usb_cfg_ahb_clk",
+			"core_usb_pipe_clk", "ctrl_link_clk",
+			"ctrl_link_iface_clk", "ctrl_crypto_clk",
+			"ctrl_pixel_clk", "pixel_clk_rcg", "pixel_parent";
+
+		qcom,dp-usbpd-detection = <&pmi8998_pdphy>;
+
+		qcom,aux-cfg-settings = [00 13 04 00 0a 26 0a 03 bb 03];
+		qcom,max-pclk-frequency-khz = <593470>;
+		pinctrl-names = "mdss_dp_active", "mdss_dp_sleep";
+		pinctrl-0 = <&sde_dp_aux_active &sde_dp_usbplug_cc_active>;
+		pinctrl-1 = <&sde_dp_aux_suspend &sde_dp_usbplug_cc_suspend>;
+		qcom,aux-en-gpio = <&tlmm 43 0>;
+		qcom,aux-sel-gpio = <&tlmm 51 0>;
+		qcom,usbplug-cc-gpio = <&tlmm 38 0>;
+		qcom,core-supply-entries {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			qcom,core-supply-entry@0 {
+				reg = <0>;
+				qcom,supply-name = "gdsc";
+				qcom,supply-min-voltage = <0>;
+				qcom,supply-max-voltage = <0>;
+				qcom,supply-enable-load = <0>;
+				qcom,supply-disable-load = <0>;
+			};
+		};
+
+		qcom,ctrl-supply-entries {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			qcom,ctrl-supply-entry@0 {
+				reg = <0>;
+				qcom,supply-name = "vdda-1p2";
+				qcom,supply-min-voltage = <1200000>;
+				qcom,supply-max-voltage = <1200000>;
+				qcom,supply-enable-load = <21800>;
+				qcom,supply-disable-load = <4>;
+			};
+		};
+
+		qcom,phy-supply-entries {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			qcom,phy-supply-entry@0 {
+				reg = <0>;
+				qcom,supply-name = "vdda-0p9";
+				qcom,supply-min-voltage = <880000>;
+				qcom,supply-max-voltage = <880000>;
+				qcom,supply-enable-load = <36000>;
+				qcom,supply-disable-load = <32>;
+			};
+		};
+	};
+};
diff --git a/Documentation/devicetree/bindings/interrupt-controller/qti,pdc.txt b/Documentation/devicetree/bindings/interrupt-controller/qti,pdc.txt
new file mode 100644
index 0000000..8598d0c
--- /dev/null
+++ b/Documentation/devicetree/bindings/interrupt-controller/qti,pdc.txt
@@ -0,0 +1,63 @@
+QTI PDC interrupt controller
+
+PDC is QTI's platform parent interrupt controller that serves as wakeup source.
+
+Newer QTI SOCs are replacing MPM (MSM sleep Power Manager) with PDC (Power
+Domain Controller) to manage subsystem wakeups and resources during sleep.
+This driver marks the wakeup interrupts in APSS PDC such that it monitors the
+interrupts when the system is asleep, wakes up the APSS when one of these
+interrupts occur and replays it to the subsystem interrupt controller after it
+becomes operational.
+
+Earlier MPM architecture used arch-extension of GIC interrupt
+controller to mark enabled wake-up interrupts and monitor these when the
+system goes to sleep. Since the arch-extensions are no-longer available
+on newer kernel versions, this driver is implemented as hierarchical irq
+domain.  GIC is parent interrupt controller at the highest level.
+Platform interrupt controller PDC is next in hierarchy, followed by others.
+This driver only configures the interrupts, does not handle them.
+
+PDC interrupt configuration involves programming of 2 set of registers:
+IRQ_ENABLE_BANK    - Enable the irq
+IRQ_i_CFG          - Configure the interrupt i
+
+Properties:
+
+- compatible:
+	Usage: required
+	Value type: <string>
+	Definition: Should contain "qcom,pdc-<target>"
+
+- reg:
+	Usage: required
+	Value type: <prop-encoded-array>
+	Definition: Specifies the base physical address for PDC hardware
+			block for DRV2.
+
+- interrupt-cells:
+	Usage: required
+	Value type: <u32>
+	Definition: Specifies the number of cells needed to encode an interrupt source.
+			Value must be 3.
+			The encoding of these cells are same as described in
+			Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.txt
+
+- interrupt-parent:
+	Usage: required
+	Value type: <phandle>
+	Definition: Specifies the interrupt parent necessary for hierarchical domain to operate.
+
+- interrupt-controller:
+	Usage: required
+	Value type: <bool>
+	Definition: Identifies the node as an interrupt controller.
+
+Example:
+
+pdcgic: interrupt-controller@0xb220000{
+	compatible = "qcom,pdc-sdm845";
+	reg = <0xb220000 0x30000>;
+	#interrupt-cells = <3>;
+	interrupt-parent = <&intc>;
+	interrupt-controller;
+};
diff --git a/Documentation/devicetree/bindings/iommu/arm,smmu.txt b/Documentation/devicetree/bindings/iommu/arm,smmu.txt
index 2d971b7a..375eaf2 100644
--- a/Documentation/devicetree/bindings/iommu/arm,smmu.txt
+++ b/Documentation/devicetree/bindings/iommu/arm,smmu.txt
@@ -86,6 +86,11 @@
 		  useful if the upstream hardware is capable of switching
 		  between multiple domains within a single context bank.
 
+- qcom,use-3-lvl-tables:
+		  Some hardware configurations may not be optimized for using
+		  a four level page table configuration. Set to use a three
+		  level page table instead.
+
 - clocks        : List of clocks to be used during SMMU register access. See
                   Documentation/devicetree/bindings/clock/clock-bindings.txt
                   for information about the format. For each clock specified
diff --git a/Documentation/devicetree/bindings/leds/leds-qpnp-flash-v2.txt b/Documentation/devicetree/bindings/leds/leds-qpnp-flash-v2.txt
index da54fb1..176f9e1 100644
--- a/Documentation/devicetree/bindings/leds/leds-qpnp-flash-v2.txt
+++ b/Documentation/devicetree/bindings/leds/leds-qpnp-flash-v2.txt
@@ -169,8 +169,15 @@
                           sleep configuration defined for each pin or pin group.
 - qcom,hw-strobe-gpio	: phandle to specify GPIO for hardware strobing. This is used when there is no
 			  pinctrl support or PMIC GPIOs are used.
-- qcom,hw-strobe-sel	: Boolean property to enable hardware strobe. If not defined, software strobe
-			  will be used.
+- qcom,strobe-sel	: Property to select strobe type. If not defined,
+			  software strobe will be used. Allowed options are:
+			  0 - SW strobe
+			  1 - HW strobe
+			  2 - LPG strobe
+			  LPG strobe is supported only for LED3.
+			  If LPG strobe is specified, then strobe control is
+			  configured for active high and level triggered. Also
+			  qcom,hw-strobe-option should be set to 1 or 2.
 - qcom,hw-strobe-edge-trigger	: Boolean property to select trigger type. If defined, hw-strobe is set to
 				  be edge triggered. Otherwise, it is level triggered.
 - qcom,hw-strobe-active-low	: Boolean property to select strobe signal polarity. If defined, hw-strobe
diff --git a/Documentation/devicetree/bindings/media/video/msm-sde-rotator.txt b/Documentation/devicetree/bindings/media/video/msm-sde-rotator.txt
index 0295e1b..937ccb9 100644
--- a/Documentation/devicetree/bindings/media/video/msm-sde-rotator.txt
+++ b/Documentation/devicetree/bindings/media/video/msm-sde-rotator.txt
@@ -81,6 +81,32 @@
 				limits.
 - qcom,mdss-rot-vbif-qos-setting: This array is used to program vbif qos remapper register
 				  priority for rotator clients.
+- qcom,mdss-rot-cdp-setting:	Integer array of size two, to indicate client driven
+				prefetch is available or not. Index 0 represents
+				if CDP is enabled for read and index 1, if CDP
+				is enabled for write operation.
+- qcom,mdss-rot-qos-lut		A 4 cell property with the format of <rd_lut_0,
+				rd_lut_1, wr_lut_0, wr_lut_1> indicating the qos
+				lut settings for the rotator sspp and writeback
+				client.
+- qcom,mdss-rot-danger-lut	A two cell property with the format of <rd_lut,
+				wr_lut> indicating the danger lut settings for
+				the rotator sspp and writeback client.
+- qcom,mdss-rot-safe-lut	A two cell property with the format of <rd_lut,
+				wr_lut> indicating the safe lut settings for the
+				rotator sspp and writeback client.
+- qcom,mdss-inline-rot-qos-lut:	A 4 cell property with the format of <rd_lut_0,
+				rd_lut_1, wr_lut_0, wr_lut_1> indicating the qos
+				lut settings for the inline rotator sspp and
+				writeback client.
+- qcom,mdss-inline-rot-danger-lut: A two cell property with the format of
+				<rd_lut, wr_lut> indicating the danger lut
+				settings for the inline rotator sspp and
+				writeback client.
+- qcom,mdss-inline-rot-safe-lut: A two cell property with the format of
+				<rd_lut, wr_lut> indicating the safe lut
+				settings for the inline rotator sspp and
+				writeback client.
 - qcom,mdss-rot-mode:		This is integer value indicates operation mode
 				of the rotator device
 - qcom,mdss-sbuf-headroom:	This integer value indicates stream buffer headroom in lines.
@@ -146,9 +172,19 @@
 		/* VBIF QoS remapper settings*/
 		qcom,mdss-rot-vbif-qos-setting = <1 1 1 1>;
 
+		com,mdss-rot-cdp-setting = <1 1>;
+
 		qcom,mdss-default-ot-rd-limit = <8>;
 		qcom,mdss-default-ot-wr-limit = <16>;
 
+		qcom,mdss-rot-qos-lut = <0x0 0x0 0x0 0x0>;
+		qcom,mdss-rot-danger-lut = <0x0 0x0>;
+		qcom,mdss-rot-safe-lut = <0x0000ffff 0x0>;
+
+		qcom,mdss-inline-rot-qos-lut = <0x0 0x0 0x00112233 0x44556677>;
+		qcom,mdss-inline-rot-danger-lut = <0x0 0x0000ffff>;
+		qcom,mdss-inline-rot-safe-lut = <0x0 0x0000ff00>;
+
 		qcom,mdss-sbuf-headroom = <20>;
 		cache-slice-names = "rotator";
 		cache-slices = <&llcc 4>;
diff --git a/Documentation/devicetree/bindings/nfc/nq-nci.txt b/Documentation/devicetree/bindings/nfc/nq-nci.txt
new file mode 100644
index 0000000..b85e070
--- /dev/null
+++ b/Documentation/devicetree/bindings/nfc/nq-nci.txt
@@ -0,0 +1,49 @@
+Qualcomm Technologies, Inc NQxxxx NFC NCI device
+
+Near Field Communication (NFC) device is based on NFC Controller Interface (NCI)
+
+Required properties:
+
+- compatible: "qcom,nq-nci"
+- reg: NCI i2c slave address.
+- qcom,nq-ven: specific gpio for hardware reset.
+- qcom,nq-irq: specific gpio for read interrupt.
+- qcom,nq-firm: gpio for firmware download
+- qcom,nq-clkreq: gpio for clock
+- interrupt-parent: Should be phandle for the interrupt controller
+                    that services interrupts for this device.
+- interrupts: Nfc read interrupt,gpio-clk-req interrupt
+
+
+Recommended properties:
+
+- interrupt-names: names of interrupts, should include "nfc_irq", used for reference
+
+
+Optional properties:
+
+- pinctrl-names, pinctrl-0, pincntrl-1: references to our pincntrl settings
+- clocks, clock-names: must contain the NQxxxx's core clock.
+- qcom,nq-esepwr: gpio to control power of secure element
+
+Example:
+
+	nq-nci@2b {
+		compatible = "qcom,nq-nci";
+		reg = <0x2b>;
+		qcom,nq-irq = <&tlmm 29 0x00>;
+		qcom,nq-ven = <&tlmm 30 0x00>;
+		qcom,nq-firm = <&tlmm 93 0x00>;
+		qcom,nq-clkreq = <&pm8998_gpios 21 0x00>;
+		qcom,nq-esepwr = <&tlmm 116 0x00>;
+		qcom,clk-src = "BBCLK2";
+		interrupt-parent = <&tlmm>;
+		interrupts = <29 0>;
+		interrupt-names = "nfc_irq";
+		pinctrl-names = "nfc_active","nfc_suspend";
+		pinctrl-0 = <&nfc_int_active &nfc_disable_active>;
+		pinctrl-1 = <&nfc_int_suspend &nfc_disable_suspend>;
+		qcom,clk-gpio = <&pm8916_gpios 2 0>;
+		clocks = <&clock_rpm clk_bb_clk2_pin>;
+		clock-names = "ref_clk";
+	};
diff --git a/Documentation/devicetree/bindings/pil/subsys-pil-tz.txt b/Documentation/devicetree/bindings/pil/subsys-pil-tz.txt
index d7edafc..4a69e03 100644
--- a/Documentation/devicetree/bindings/pil/subsys-pil-tz.txt
+++ b/Documentation/devicetree/bindings/pil/subsys-pil-tz.txt
@@ -67,6 +67,7 @@
 - qcom,complete-ramdump: Boolean. If set, complete ramdump i.e. region between start address of
 			first segment to end address of last segment will be collected without
 			leaving any hole in between.
+- qcom,ignore-ssr-failure: Boolean. If set, SSR failures are not considered fatal.
 
 Example:
 	qcom,venus@fdce0000 {
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,sdm830-pinctrl b/Documentation/devicetree/bindings/pinctrl/qcom,sdm670-pinctrl
similarity index 95%
rename from Documentation/devicetree/bindings/pinctrl/qcom,sdm830-pinctrl
rename to Documentation/devicetree/bindings/pinctrl/qcom,sdm670-pinctrl
index 0fe8a1b..0eb1043f 100644
--- a/Documentation/devicetree/bindings/pinctrl/qcom,sdm830-pinctrl
+++ b/Documentation/devicetree/bindings/pinctrl/qcom,sdm670-pinctrl
@@ -1,12 +1,12 @@
-Qualcomm Technologies, Inc. SDM830 TLMM block
+Qualcomm Technologies, Inc. SDM670 TLMM block
 
 This binding describes the Top Level Mode Multiplexer block found in the
-SDM830 platform.
+SDM670 platform.
 
 - compatible:
 	Usage: required
 	Value type: <string>
-	Definition: must be "qcom,sdm830-pinctrl"
+	Definition: must be "qcom,sdm670-pinctrl"
 
 - reg:
 	Usage: required
@@ -135,9 +135,9 @@
 
 Example:
 
-	tlmm: pinctrl@03800000 {
-		compatible = "qcom,sdm830-pinctrl";
-		reg = <0x03800000 0xc00000>;
+	tlmm: pinctrl@03400000 {
+		compatible = "qcom,sdm670-pinctrl";
+		reg = <0x03400000 0xc00000>;
 		interrupts = <0 208 0>;
 		gpio-controller;
 		#gpio-cells = <2>;
diff --git a/Documentation/devicetree/bindings/power/supply/qcom/smb138x-charger.txt b/Documentation/devicetree/bindings/power/supply/qcom/smb138x-charger.txt
index 5529e308..92ef23c 100644
--- a/Documentation/devicetree/bindings/power/supply/qcom/smb138x-charger.txt
+++ b/Documentation/devicetree/bindings/power/supply/qcom/smb138x-charger.txt
@@ -22,8 +22,7 @@
   Definition: String which indicates the charging mode. Can be one of the
 	      following:
               Standalone/Parallel Master	- "qcom,smb138x-charger"
-	      smb138x Parallel Slave		- "qcom,smb138x-parallel-slave"
-	      smb1355 Parallel Slave		- "qcom,smb1355-parallel-slave",
+	      Parallel Slave			- "qcom,smb138x-parallel-slave"
 
 - qcom,pmic-revid
   Usage:      required
@@ -36,8 +35,7 @@
   Usage:      optional
   Value type: <u32>
   Definition: Specifies parallel charging mode. If not specified, MID-MID
-	      option is selected by default. Note that smb1355 can only
-	      run in MID-MID configuration.
+	      option is selected by default.
 
 - qcom,suspend-input
   Usage:      optional
@@ -127,7 +125,7 @@
 =======
 
 smb138x_charger: qcom,smb138x-charger {
-	compatible = "qcom,smb138x-charger";
+	compatible = "qcom,qpnp-smb138x-charger";
 	#address-cells = <1>;
 	#size-cells = <1>;
 
diff --git a/Documentation/devicetree/bindings/qbt1000/qbt1000.txt b/Documentation/devicetree/bindings/qbt1000/qbt1000.txt
new file mode 100644
index 0000000..c9861e4
--- /dev/null
+++ b/Documentation/devicetree/bindings/qbt1000/qbt1000.txt
@@ -0,0 +1,54 @@
+Qualcomm Technologies, Inc. QBT1000 Specific Bindings
+
+QBT is a fingerprint sensor ASIC capable of performing fingerprint image scans
+and detecting finger presence on the sensor using programmable firmware.
+
+=======================
+Required Node Structure
+=======================
+
+- compatible
+  Usage:      required
+  Value type: <string>
+  Definition: "qcom,qbt1000".
+
+- clock-names
+  Usage:      required
+  Value type: <stringlist>
+  Definition: List of clock names that need to be voted on/off.
+
+- clocks
+  Usage:      required
+  Value type: <prop_encoded-array>
+  Definition: Property pair that represents the clock controller and the clock
+		id. This in combination with the clock-name is used to obtain
+		the handle for the clock that needs to be voted on/off.
+
+- clock-frequency
+  Usage:      required
+  Value type: <u32>
+  Definition: Frequency of clock in Hz.
+
+- qcom,ipc-gpio
+  Usage:      required
+  Value type: <phandle>
+  Definition: phandle for GPIO to be used for IPC.
+
+- qcom,finger-detect-gpio
+  Usage:      required
+  Value type: <phandle>
+  Definition: phandle for GPIO to be used for finger detect.
+
+=======
+Example
+=======
+
+qcom,qbt1000 {
+	compatible = "qcom,qbt1000";
+	clock-names = "core", "iface";
+	clocks = <&clock_gcc clk_gcc_blsp2_qup6_spi_apps_clk>,
+		<&clock_gcc clk_gcc_blsp2_ahb_clk>;
+	clock-frequency = <15000000>;
+	qcom,ipc-gpio = <&tlmm 121 0>;
+	qcom,finger-detect-gpio = <&pmcobalt_gpios 2 0>;
+};
diff --git a/Documentation/gpu/drm-kms.rst b/Documentation/gpu/drm-kms.rst
index 53b872c..db86cda 100644
--- a/Documentation/gpu/drm-kms.rst
+++ b/Documentation/gpu/drm-kms.rst
@@ -308,6 +308,12 @@
 .. kernel-doc:: drivers/gpu/drm/drm_color_mgmt.c
    :export:
 
+Explicit Fencing Properties
+---------------------------
+
+.. kernel-doc:: drivers/gpu/drm/drm_atomic.c
+   :doc: explicit fencing properties
+
 Existing KMS Properties
 -----------------------
 
diff --git a/Makefile b/Makefile
index f47cd95..df4d437 100644
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
 VERSION = 4
 PATCHLEVEL = 9
-SUBLEVEL = 28
+SUBLEVEL = 30
 EXTRAVERSION =
 NAME = Roaring Lionus
 
diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
index ffb93f49..4f95577 100644
--- a/arch/alpha/kernel/osf_sys.c
+++ b/arch/alpha/kernel/osf_sys.c
@@ -1188,8 +1188,10 @@ SYSCALL_DEFINE4(osf_wait4, pid_t, pid, int __user *, ustatus, int, options,
 	if (!access_ok(VERIFY_WRITE, ur, sizeof(*ur)))
 		return -EFAULT;
 
-	err = 0;
-	err |= put_user(status, ustatus);
+	err = put_user(status, ustatus);
+	if (ret < 0)
+		return err ? err : ret;
+
 	err |= __put_user(r.ru_utime.tv_sec, &ur->ru_utime.tv_sec);
 	err |= __put_user(r.ru_utime.tv_usec, &ur->ru_utime.tv_usec);
 	err |= __put_user(r.ru_stime.tv_sec, &ur->ru_stime.tv_sec);
diff --git a/arch/arm/boot/dts/at91-sama5d3_xplained.dts b/arch/arm/boot/dts/at91-sama5d3_xplained.dts
index c51fc65..5a53fcf 100644
--- a/arch/arm/boot/dts/at91-sama5d3_xplained.dts
+++ b/arch/arm/boot/dts/at91-sama5d3_xplained.dts
@@ -162,9 +162,10 @@
 			};
 
 			adc0: adc@f8018000 {
+				atmel,adc-vref = <3300>;
+				atmel,adc-channels-used = <0xfe>;
 				pinctrl-0 = <
 					&pinctrl_adc0_adtrg
-					&pinctrl_adc0_ad0
 					&pinctrl_adc0_ad1
 					&pinctrl_adc0_ad2
 					&pinctrl_adc0_ad3
@@ -172,8 +173,6 @@
 					&pinctrl_adc0_ad5
 					&pinctrl_adc0_ad6
 					&pinctrl_adc0_ad7
-					&pinctrl_adc0_ad8
-					&pinctrl_adc0_ad9
 					>;
 				status = "okay";
 			};
diff --git a/arch/arm/boot/dts/imx6sx-sdb.dts b/arch/arm/boot/dts/imx6sx-sdb.dts
index 5bb8fd5..d71da30 100644
--- a/arch/arm/boot/dts/imx6sx-sdb.dts
+++ b/arch/arm/boot/dts/imx6sx-sdb.dts
@@ -12,23 +12,6 @@
 	model = "Freescale i.MX6 SoloX SDB RevB Board";
 };
 
-&cpu0 {
-	operating-points = <
-		/* kHz    uV */
-		996000  1250000
-		792000  1175000
-		396000  1175000
-		198000  1175000
-		>;
-	fsl,soc-operating-points = <
-		/* ARM kHz      SOC uV */
-		996000	1250000
-		792000	1175000
-		396000	1175000
-		198000  1175000
-	>;
-};
-
 &i2c1 {
 	clock-frequency = <100000>;
 	pinctrl-names = "default";
diff --git a/arch/arm/boot/dts/qcom/sdxpoorwills.dtsi b/arch/arm/boot/dts/qcom/sdxpoorwills.dtsi
index ba1da74..961adc9 100644
--- a/arch/arm/boot/dts/qcom/sdxpoorwills.dtsi
+++ b/arch/arm/boot/dts/qcom/sdxpoorwills.dtsi
@@ -142,6 +142,7 @@
 		compatible = "qcom,dummycc";
 		clock-output-names = "gcc_clocks";
 		#clock-cells = <1>;
+		#reset-cells = <1>;
 	};
 
 	clock_cpu: qcom,clock-a7@17810008 {
diff --git a/arch/arm/boot/dts/tegra20-paz00.dts b/arch/arm/boot/dts/tegra20-paz00.dts
index 4e361a8..b4bfa55 100644
--- a/arch/arm/boot/dts/tegra20-paz00.dts
+++ b/arch/arm/boot/dts/tegra20-paz00.dts
@@ -569,6 +569,7 @@
 			regulator-name = "+3VS,vdd_pnl";
 			regulator-min-microvolt = <3300000>;
 			regulator-max-microvolt = <3300000>;
+			regulator-boot-on;
 			gpio = <&gpio TEGRA_GPIO(A, 4) GPIO_ACTIVE_HIGH>;
 			enable-active-high;
 		};
diff --git a/arch/arm/include/asm/kvm_coproc.h b/arch/arm/include/asm/kvm_coproc.h
index 4917c2f..e74ab0f 100644
--- a/arch/arm/include/asm/kvm_coproc.h
+++ b/arch/arm/include/asm/kvm_coproc.h
@@ -31,7 +31,8 @@ void kvm_register_target_coproc_table(struct kvm_coproc_target_table *table);
 int kvm_handle_cp10_id(struct kvm_vcpu *vcpu, struct kvm_run *run);
 int kvm_handle_cp_0_13_access(struct kvm_vcpu *vcpu, struct kvm_run *run);
 int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run);
-int kvm_handle_cp14_access(struct kvm_vcpu *vcpu, struct kvm_run *run);
+int kvm_handle_cp14_32(struct kvm_vcpu *vcpu, struct kvm_run *run);
+int kvm_handle_cp14_64(struct kvm_vcpu *vcpu, struct kvm_run *run);
 int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run);
 int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run);
 
diff --git a/arch/arm/include/asm/module.h b/arch/arm/include/asm/module.h
index 464748b..ed23196 100644
--- a/arch/arm/include/asm/module.h
+++ b/arch/arm/include/asm/module.h
@@ -18,13 +18,18 @@ enum {
 };
 #endif
 
+struct mod_plt_sec {
+	struct elf32_shdr	*plt;
+	int			plt_count;
+};
+
 struct mod_arch_specific {
 #ifdef CONFIG_ARM_UNWIND
 	struct unwind_table *unwind[ARM_SEC_MAX];
 #endif
 #ifdef CONFIG_ARM_MODULE_PLTS
-	struct elf32_shdr   *plt;
-	int		    plt_count;
+	struct mod_plt_sec	core;
+	struct mod_plt_sec	init;
 #endif
 };
 
diff --git a/arch/arm/kernel/module-plts.c b/arch/arm/kernel/module-plts.c
index 3a5cba9..3d0c2e4 100644
--- a/arch/arm/kernel/module-plts.c
+++ b/arch/arm/kernel/module-plts.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2014 Linaro Ltd. <ard.biesheuvel@linaro.org>
+ * Copyright (C) 2014-2017 Linaro Ltd. <ard.biesheuvel@linaro.org>
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
@@ -31,9 +31,17 @@ struct plt_entries {
 	u32	lit[PLT_ENT_COUNT];
 };
 
+static bool in_init(const struct module *mod, unsigned long loc)
+{
+	return loc - (u32)mod->init_layout.base < mod->init_layout.size;
+}
+
 u32 get_module_plt(struct module *mod, unsigned long loc, Elf32_Addr val)
 {
-	struct plt_entries *plt = (struct plt_entries *)mod->arch.plt->sh_addr;
+	struct mod_plt_sec *pltsec = !in_init(mod, loc) ? &mod->arch.core :
+							  &mod->arch.init;
+
+	struct plt_entries *plt = (struct plt_entries *)pltsec->plt->sh_addr;
 	int idx = 0;
 
 	/*
@@ -41,9 +49,9 @@ u32 get_module_plt(struct module *mod, unsigned long loc, Elf32_Addr val)
 	 * relocations are sorted, this will be the last entry we allocated.
 	 * (if one exists).
 	 */
-	if (mod->arch.plt_count > 0) {
-		plt += (mod->arch.plt_count - 1) / PLT_ENT_COUNT;
-		idx = (mod->arch.plt_count - 1) % PLT_ENT_COUNT;
+	if (pltsec->plt_count > 0) {
+		plt += (pltsec->plt_count - 1) / PLT_ENT_COUNT;
+		idx = (pltsec->plt_count - 1) % PLT_ENT_COUNT;
 
 		if (plt->lit[idx] == val)
 			return (u32)&plt->ldr[idx];
@@ -53,8 +61,8 @@ u32 get_module_plt(struct module *mod, unsigned long loc, Elf32_Addr val)
 			plt++;
 	}
 
-	mod->arch.plt_count++;
-	BUG_ON(mod->arch.plt_count * PLT_ENT_SIZE > mod->arch.plt->sh_size);
+	pltsec->plt_count++;
+	BUG_ON(pltsec->plt_count * PLT_ENT_SIZE > pltsec->plt->sh_size);
 
 	if (!idx)
 		/* Populate a new set of entries */
@@ -129,7 +137,7 @@ static bool duplicate_rel(Elf32_Addr base, const Elf32_Rel *rel, int num)
 
 /* Count how many PLT entries we may need */
 static unsigned int count_plts(const Elf32_Sym *syms, Elf32_Addr base,
-			       const Elf32_Rel *rel, int num)
+			       const Elf32_Rel *rel, int num, Elf32_Word dstidx)
 {
 	unsigned int ret = 0;
 	const Elf32_Sym *s;
@@ -144,13 +152,17 @@ static unsigned int count_plts(const Elf32_Sym *syms, Elf32_Addr base,
 		case R_ARM_THM_JUMP24:
 			/*
 			 * We only have to consider branch targets that resolve
-			 * to undefined symbols. This is not simply a heuristic,
-			 * it is a fundamental limitation, since the PLT itself
-			 * is part of the module, and needs to be within range
-			 * as well, so modules can never grow beyond that limit.
+			 * to symbols that are defined in a different section.
+			 * This is not simply a heuristic, it is a fundamental
+			 * limitation, since there is no guaranteed way to emit
+			 * PLT entries sufficiently close to the branch if the
+			 * section size exceeds the range of a branch
+			 * instruction. So ignore relocations against defined
+			 * symbols if they live in the same section as the
+			 * relocation target.
 			 */
 			s = syms + ELF32_R_SYM(rel[i].r_info);
-			if (s->st_shndx != SHN_UNDEF)
+			if (s->st_shndx == dstidx)
 				break;
 
 			/*
@@ -161,7 +173,12 @@ static unsigned int count_plts(const Elf32_Sym *syms, Elf32_Addr base,
 			 * So we need to support them, but there is no need to
 			 * take them into consideration when trying to optimize
 			 * this code. So let's only check for duplicates when
-			 * the addend is zero.
+			 * the addend is zero. (Note that calls into the core
+			 * module via init PLT entries could involve section
+			 * relative symbol references with non-zero addends, for
+			 * which we may end up emitting duplicates, but the init
+			 * PLT is released along with the rest of the .init
+			 * region as soon as module loading completes.)
 			 */
 			if (!is_zero_addend_relocation(base, rel + i) ||
 			    !duplicate_rel(base, rel, i))
@@ -174,7 +191,8 @@ static unsigned int count_plts(const Elf32_Sym *syms, Elf32_Addr base,
 int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
 			      char *secstrings, struct module *mod)
 {
-	unsigned long plts = 0;
+	unsigned long core_plts = 0;
+	unsigned long init_plts = 0;
 	Elf32_Shdr *s, *sechdrs_end = sechdrs + ehdr->e_shnum;
 	Elf32_Sym *syms = NULL;
 
@@ -184,13 +202,15 @@ int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
 	 */
 	for (s = sechdrs; s < sechdrs_end; ++s) {
 		if (strcmp(".plt", secstrings + s->sh_name) == 0)
-			mod->arch.plt = s;
+			mod->arch.core.plt = s;
+		else if (strcmp(".init.plt", secstrings + s->sh_name) == 0)
+			mod->arch.init.plt = s;
 		else if (s->sh_type == SHT_SYMTAB)
 			syms = (Elf32_Sym *)s->sh_addr;
 	}
 
-	if (!mod->arch.plt) {
-		pr_err("%s: module PLT section missing\n", mod->name);
+	if (!mod->arch.core.plt || !mod->arch.init.plt) {
+		pr_err("%s: module PLT section(s) missing\n", mod->name);
 		return -ENOEXEC;
 	}
 	if (!syms) {
@@ -213,16 +233,29 @@ int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
 		/* sort by type and symbol index */
 		sort(rels, numrels, sizeof(Elf32_Rel), cmp_rel, NULL);
 
-		plts += count_plts(syms, dstsec->sh_addr, rels, numrels);
+		if (strncmp(secstrings + dstsec->sh_name, ".init", 5) != 0)
+			core_plts += count_plts(syms, dstsec->sh_addr, rels,
+						numrels, s->sh_info);
+		else
+			init_plts += count_plts(syms, dstsec->sh_addr, rels,
+						numrels, s->sh_info);
 	}
 
-	mod->arch.plt->sh_type = SHT_NOBITS;
-	mod->arch.plt->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
-	mod->arch.plt->sh_addralign = L1_CACHE_BYTES;
-	mod->arch.plt->sh_size = round_up(plts * PLT_ENT_SIZE,
-					  sizeof(struct plt_entries));
-	mod->arch.plt_count = 0;
+	mod->arch.core.plt->sh_type = SHT_NOBITS;
+	mod->arch.core.plt->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
+	mod->arch.core.plt->sh_addralign = L1_CACHE_BYTES;
+	mod->arch.core.plt->sh_size = round_up(core_plts * PLT_ENT_SIZE,
+					       sizeof(struct plt_entries));
+	mod->arch.core.plt_count = 0;
 
-	pr_debug("%s: plt=%x\n", __func__, mod->arch.plt->sh_size);
+	mod->arch.init.plt->sh_type = SHT_NOBITS;
+	mod->arch.init.plt->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
+	mod->arch.init.plt->sh_addralign = L1_CACHE_BYTES;
+	mod->arch.init.plt->sh_size = round_up(init_plts * PLT_ENT_SIZE,
+					       sizeof(struct plt_entries));
+	mod->arch.init.plt_count = 0;
+
+	pr_debug("%s: plt=%x, init.plt=%x\n", __func__,
+		 mod->arch.core.plt->sh_size, mod->arch.init.plt->sh_size);
 	return 0;
 }
diff --git a/arch/arm/kernel/module.lds b/arch/arm/kernel/module.lds
index 05881e2..eacb5c6 100644
--- a/arch/arm/kernel/module.lds
+++ b/arch/arm/kernel/module.lds
@@ -1,3 +1,4 @@
 SECTIONS {
 	.plt : { BYTE(0) }
+	.init.plt : { BYTE(0) }
 }
diff --git a/arch/arm/kvm/coproc.c b/arch/arm/kvm/coproc.c
index 3e5e419..c3ed6bd 100644
--- a/arch/arm/kvm/coproc.c
+++ b/arch/arm/kvm/coproc.c
@@ -93,12 +93,6 @@ int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run)
 	return 1;
 }
 
-int kvm_handle_cp14_access(struct kvm_vcpu *vcpu, struct kvm_run *run)
-{
-	kvm_inject_undefined(vcpu);
-	return 1;
-}
-
 static void reset_mpidr(struct kvm_vcpu *vcpu, const struct coproc_reg *r)
 {
 	/*
@@ -514,12 +508,7 @@ static int emulate_cp15(struct kvm_vcpu *vcpu,
 	return 1;
 }
 
-/**
- * kvm_handle_cp15_64 -- handles a mrrc/mcrr trap on a guest CP15 access
- * @vcpu: The VCPU pointer
- * @run:  The kvm_run struct
- */
-int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
+static struct coproc_params decode_64bit_hsr(struct kvm_vcpu *vcpu)
 {
 	struct coproc_params params;
 
@@ -533,9 +522,38 @@ int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
 	params.Rt2 = (kvm_vcpu_get_hsr(vcpu) >> 10) & 0xf;
 	params.CRm = 0;
 
+	return params;
+}
+
+/**
+ * kvm_handle_cp15_64 -- handles a mrrc/mcrr trap on a guest CP15 access
+ * @vcpu: The VCPU pointer
+ * @run:  The kvm_run struct
+ */
+int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
+{
+	struct coproc_params params = decode_64bit_hsr(vcpu);
+
 	return emulate_cp15(vcpu, &params);
 }
 
+/**
+ * kvm_handle_cp14_64 -- handles a mrrc/mcrr trap on a guest CP14 access
+ * @vcpu: The VCPU pointer
+ * @run:  The kvm_run struct
+ */
+int kvm_handle_cp14_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
+{
+	struct coproc_params params = decode_64bit_hsr(vcpu);
+
+	/* raz_wi cp14 */
+	pm_fake(vcpu, &params, NULL);
+
+	/* handled */
+	kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
+	return 1;
+}
+
 static void reset_coproc_regs(struct kvm_vcpu *vcpu,
 			      const struct coproc_reg *table, size_t num)
 {
@@ -546,12 +564,7 @@ static void reset_coproc_regs(struct kvm_vcpu *vcpu,
 			table[i].reset(vcpu, &table[i]);
 }
 
-/**
- * kvm_handle_cp15_32 -- handles a mrc/mcr trap on a guest CP15 access
- * @vcpu: The VCPU pointer
- * @run:  The kvm_run struct
- */
-int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
+static struct coproc_params decode_32bit_hsr(struct kvm_vcpu *vcpu)
 {
 	struct coproc_params params;
 
@@ -565,9 +578,37 @@ int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
 	params.Op2 = (kvm_vcpu_get_hsr(vcpu) >> 17) & 0x7;
 	params.Rt2 = 0;
 
+	return params;
+}
+
+/**
+ * kvm_handle_cp15_32 -- handles a mrc/mcr trap on a guest CP15 access
+ * @vcpu: The VCPU pointer
+ * @run:  The kvm_run struct
+ */
+int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
+{
+	struct coproc_params params = decode_32bit_hsr(vcpu);
 	return emulate_cp15(vcpu, &params);
 }
 
+/**
+ * kvm_handle_cp14_32 -- handles a mrc/mcr trap on a guest CP14 access
+ * @vcpu: The VCPU pointer
+ * @run:  The kvm_run struct
+ */
+int kvm_handle_cp14_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
+{
+	struct coproc_params params = decode_32bit_hsr(vcpu);
+
+	/* raz_wi cp14 */
+	pm_fake(vcpu, &params, NULL);
+
+	/* handled */
+	kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
+	return 1;
+}
+
 /******************************************************************************
  * Userspace API
  *****************************************************************************/
diff --git a/arch/arm/kvm/handle_exit.c b/arch/arm/kvm/handle_exit.c
index 4e40d19..066b6d4 100644
--- a/arch/arm/kvm/handle_exit.c
+++ b/arch/arm/kvm/handle_exit.c
@@ -83,9 +83,9 @@ static exit_handle_fn arm_exit_handlers[] = {
 	[HSR_EC_WFI]		= kvm_handle_wfx,
 	[HSR_EC_CP15_32]	= kvm_handle_cp15_32,
 	[HSR_EC_CP15_64]	= kvm_handle_cp15_64,
-	[HSR_EC_CP14_MR]	= kvm_handle_cp14_access,
+	[HSR_EC_CP14_MR]	= kvm_handle_cp14_32,
 	[HSR_EC_CP14_LS]	= kvm_handle_cp14_load_store,
-	[HSR_EC_CP14_64]	= kvm_handle_cp14_access,
+	[HSR_EC_CP14_64]	= kvm_handle_cp14_64,
 	[HSR_EC_CP_0_13]	= kvm_handle_cp_0_13_access,
 	[HSR_EC_CP10_ID]	= kvm_handle_cp10_id,
 	[HSR_EC_HVC]		= handle_hvc,
diff --git a/arch/arm/kvm/hyp/Makefile b/arch/arm/kvm/hyp/Makefile
index 3023bb5..8679405 100644
--- a/arch/arm/kvm/hyp/Makefile
+++ b/arch/arm/kvm/hyp/Makefile
@@ -2,6 +2,8 @@
 # Makefile for Kernel-based Virtual Machine module, HYP part
 #
 
+ccflags-y += -fno-stack-protector
+
 KVM=../../../../virt/kvm
 
 obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/vgic-v2-sr.o
diff --git a/arch/arm/kvm/hyp/switch.c b/arch/arm/kvm/hyp/switch.c
index 92678b7..624a510 100644
--- a/arch/arm/kvm/hyp/switch.c
+++ b/arch/arm/kvm/hyp/switch.c
@@ -48,7 +48,9 @@ static void __hyp_text __activate_traps(struct kvm_vcpu *vcpu, u32 *fpexc_host)
 	write_sysreg(HSTR_T(15), HSTR);
 	write_sysreg(HCPTR_TTA | HCPTR_TCP(10) | HCPTR_TCP(11), HCPTR);
 	val = read_sysreg(HDCR);
-	write_sysreg(val | HDCR_TPM | HDCR_TPMCR, HDCR);
+	val |= HDCR_TPM | HDCR_TPMCR; /* trap performance monitors */
+	val |= HDCR_TDRA | HDCR_TDOSA | HDCR_TDA; /* trap debug regs */
+	write_sysreg(val, HDCR);
 }
 
 static void __hyp_text __deactivate_traps(struct kvm_vcpu *vcpu)
diff --git a/arch/arm/kvm/psci.c b/arch/arm/kvm/psci.c
index c2b1315..a08d7a9 100644
--- a/arch/arm/kvm/psci.c
+++ b/arch/arm/kvm/psci.c
@@ -208,9 +208,10 @@ int kvm_psci_version(struct kvm_vcpu *vcpu)
 
 static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
 {
-	int ret = 1;
+	struct kvm *kvm = vcpu->kvm;
 	unsigned long psci_fn = vcpu_get_reg(vcpu, 0) & ~((u32) 0);
 	unsigned long val;
+	int ret = 1;
 
 	switch (psci_fn) {
 	case PSCI_0_2_FN_PSCI_VERSION:
@@ -230,7 +231,9 @@ static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
 		break;
 	case PSCI_0_2_FN_CPU_ON:
 	case PSCI_0_2_FN64_CPU_ON:
+		mutex_lock(&kvm->lock);
 		val = kvm_psci_vcpu_on(vcpu);
+		mutex_unlock(&kvm->lock);
 		break;
 	case PSCI_0_2_FN_AFFINITY_INFO:
 	case PSCI_0_2_FN64_AFFINITY_INFO:
@@ -279,6 +282,7 @@ static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
 
 static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu)
 {
+	struct kvm *kvm = vcpu->kvm;
 	unsigned long psci_fn = vcpu_get_reg(vcpu, 0) & ~((u32) 0);
 	unsigned long val;
 
@@ -288,7 +292,9 @@ static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu)
 		val = PSCI_RET_SUCCESS;
 		break;
 	case KVM_PSCI_FN_CPU_ON:
+		mutex_lock(&kvm->lock);
 		val = kvm_psci_vcpu_on(vcpu);
+		mutex_unlock(&kvm->lock);
 		break;
 	default:
 		val = PSCI_RET_NOT_SUPPORTED;
diff --git a/arch/arm/mm/proc-v7m.S b/arch/arm/mm/proc-v7m.S
index 8dea616..5049777 100644
--- a/arch/arm/mm/proc-v7m.S
+++ b/arch/arm/mm/proc-v7m.S
@@ -147,10 +147,10 @@
 
 	@ Configure caches (if implemented)
 	teq     r8, #0
-	stmneia	r12, {r0-r6, lr}	@ v7m_invalidate_l1 touches r0-r6
+	stmneia	sp, {r0-r6, lr}		@ v7m_invalidate_l1 touches r0-r6
 	blne	v7m_invalidate_l1
 	teq     r8, #0			@ re-evalutae condition
-	ldmneia	r12, {r0-r6, lr}
+	ldmneia	sp, {r0-r6, lr}
 
 	@ Configure the System Control Register to ensure 8-byte stack alignment
 	@ Note the STKALIGN bit is either RW or RAO.
diff --git a/arch/arm64/Kconfig.platforms b/arch/arm64/Kconfig.platforms
index 445aeb6..dae2f9f 100644
--- a/arch/arm64/Kconfig.platforms
+++ b/arch/arm64/Kconfig.platforms
@@ -128,13 +128,13 @@
 	  This enables support for the SDM845 chipset. If you do not
 	  wish to build a kernel that runs on this chipset, say 'N' here.
 
-config ARCH_SDM830
-	bool "Enable Support for Qualcomm Technologies Inc. SDM830"
+config ARCH_SDM670
+	bool "Enable Support for Qualcomm Technologies Inc. SDM670"
 	depends on ARCH_QCOM
 	select COMMON_CLK_QCOM
 	select QCOM_GDSC
 	help
-	  This enables support for the SDM830 chipset. If you do not
+	  This enables support for the SDM670 chipset. If you do not
 	  wish to build a kernel that runs on this chipset, say 'N' here.
 
 config ARCH_ROCKCHIP
diff --git a/arch/arm64/boot/dts/hisilicon/hi6220.dtsi b/arch/arm64/boot/dts/hisilicon/hi6220.dtsi
index 17839db..509a2ed 100644
--- a/arch/arm64/boot/dts/hisilicon/hi6220.dtsi
+++ b/arch/arm64/boot/dts/hisilicon/hi6220.dtsi
@@ -772,6 +772,7 @@
 			clocks = <&sys_ctrl 2>, <&sys_ctrl 1>;
 			clock-names = "ciu", "biu";
 			resets = <&sys_ctrl PERIPH_RSTDIS0_MMC0>;
+			reset-names = "reset";
 			bus-width = <0x8>;
 			vmmc-supply = <&ldo19>;
 			pinctrl-names = "default";
@@ -795,6 +796,7 @@
 			clocks = <&sys_ctrl 4>, <&sys_ctrl 3>;
 			clock-names = "ciu", "biu";
 			resets = <&sys_ctrl PERIPH_RSTDIS0_MMC1>;
+			reset-names = "reset";
 			vqmmc-supply = <&ldo7>;
 			vmmc-supply = <&ldo10>;
 			bus-width = <0x4>;
@@ -813,6 +815,7 @@
 			clocks = <&sys_ctrl HI6220_MMC2_CIUCLK>, <&sys_ctrl HI6220_MMC2_CLK>;
 			clock-names = "ciu", "biu";
 			resets = <&sys_ctrl PERIPH_RSTDIS0_MMC2>;
+			reset-names = "reset";
 			bus-width = <0x4>;
 			broken-cd;
 			pinctrl-names = "default", "idle";
diff --git a/arch/arm64/boot/dts/qcom/Makefile b/arch/arm64/boot/dts/qcom/Makefile
index cd13516..7ad029a 100644
--- a/arch/arm64/boot/dts/qcom/Makefile
+++ b/arch/arm64/boot/dts/qcom/Makefile
@@ -23,10 +23,9 @@
 sdm845-mtp-overlay.dtbo-base := sdm845.dtb
 endif
 
-dtb-$(CONFIG_ARCH_SDM830) += sdm830-sim.dtb \
-	sdm830-rumi.dtb \
-	sdm830-mtp.dtb \
-	sdm830-cdp.dtb
+dtb-$(CONFIG_ARCH_SDM670) += sdm670-rumi.dtb \
+	sdm670-mtp.dtb \
+	sdm670-cdp.dtb
 
 always		:= $(dtb-y)
 subdir-y	:= $(dts-dirs)
diff --git a/arch/arm64/boot/dts/qcom/msm-arm-smmu-sdm845.dtsi b/arch/arm64/boot/dts/qcom/msm-arm-smmu-sdm845.dtsi
index 6a3e8b4..ea89751 100644
--- a/arch/arm64/boot/dts/qcom/msm-arm-smmu-sdm845.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm-arm-smmu-sdm845.dtsi
@@ -20,6 +20,7 @@
 		reg = <0x5040000 0x10000>;
 		#iommu-cells = <1>;
 		qcom,dynamic;
+		qcom,use-3-lvl-tables;
 		#global-interrupts = <2>;
 		qcom,regulator-names = "vdd";
 		vdd-supply = <&gpu_cx_gdsc>;
@@ -63,6 +64,7 @@
 		reg-names = "base", "tcu-base";
 		#iommu-cells = <2>;
 		qcom,skip-init;
+		qcom,use-3-lvl-tables;
 		#global-interrupts = <1>;
 		#size-cells = <1>;
 		#address-cells = <1>;
diff --git a/arch/arm64/boot/dts/qcom/sdm830-pinctrl.dtsi b/arch/arm64/boot/dts/qcom/sdm670-cdp.dts
similarity index 70%
copy from arch/arm64/boot/dts/qcom/sdm830-pinctrl.dtsi
copy to arch/arm64/boot/dts/qcom/sdm670-cdp.dts
index 4b3fa93..7e5947b 100644
--- a/arch/arm64/boot/dts/qcom/sdm830-pinctrl.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-cdp.dts
@@ -10,14 +10,14 @@
  * GNU General Public License for more details.
  */
 
-&soc {
-	tlmm: pinctrl@03400000 {
-		compatible = "qcom,sdm830-pinctrl";
-		reg = <0x03400000 0xc00000>;
-		interrupts = <0 208 0>;
-		gpio-controller;
-		#gpio-cells = <2>;
-		interrupt-controller;
-		#interrupt-cells = <2>;
-	};
+
+/dts-v1/;
+
+#include "sdm670.dtsi"
+#include "sdm670-cdp.dtsi"
+
+/ {
+	model = "Qualcomm Technologies, Inc. SDM670 CDP";
+	compatible = "qcom,sdm670-cdp", "qcom,sdm670", "qcom,cdp";
+	qcom,board-id = <1 0>;
 };
diff --git a/arch/arm64/boot/dts/qcom/sdm830-cdp.dtsi b/arch/arm64/boot/dts/qcom/sdm670-cdp.dtsi
similarity index 78%
rename from arch/arm64/boot/dts/qcom/sdm830-cdp.dtsi
rename to arch/arm64/boot/dts/qcom/sdm670-cdp.dtsi
index c7bbef0..6ea92ee 100644
--- a/arch/arm64/boot/dts/qcom/sdm830-cdp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-cdp.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -9,6 +9,3 @@
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
  */
-
-#include "sdm845-cdp.dtsi"
-#include "sdm830-pinctrl.dtsi"
diff --git a/arch/arm64/boot/dts/qcom/sdm830-pinctrl.dtsi b/arch/arm64/boot/dts/qcom/sdm670-mtp.dts
similarity index 70%
copy from arch/arm64/boot/dts/qcom/sdm830-pinctrl.dtsi
copy to arch/arm64/boot/dts/qcom/sdm670-mtp.dts
index 4b3fa93..1de40b7 100644
--- a/arch/arm64/boot/dts/qcom/sdm830-pinctrl.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-mtp.dts
@@ -10,14 +10,14 @@
  * GNU General Public License for more details.
  */
 
-&soc {
-	tlmm: pinctrl@03400000 {
-		compatible = "qcom,sdm830-pinctrl";
-		reg = <0x03400000 0xc00000>;
-		interrupts = <0 208 0>;
-		gpio-controller;
-		#gpio-cells = <2>;
-		interrupt-controller;
-		#interrupt-cells = <2>;
-	};
+
+/dts-v1/;
+
+#include "sdm670.dtsi"
+#include "sdm670-mtp.dtsi"
+
+/ {
+	model = "Qualcomm Technologies, Inc. SDM670 MTP";
+	compatible = "qcom,sdm670-mtp", "qcom,sdm670", "qcom,mtp";
+	qcom,board-id = <8 0>;
 };
diff --git a/arch/arm64/boot/dts/qcom/sdm830-mtp.dtsi b/arch/arm64/boot/dts/qcom/sdm670-mtp.dtsi
similarity index 78%
copy from arch/arm64/boot/dts/qcom/sdm830-mtp.dtsi
copy to arch/arm64/boot/dts/qcom/sdm670-mtp.dtsi
index b2d607d..6ea92ee 100644
--- a/arch/arm64/boot/dts/qcom/sdm830-mtp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-mtp.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -9,6 +9,3 @@
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
  */
-
-#include "sdm845-mtp.dtsi"
-#include "sdm830-pinctrl.dtsi"
diff --git a/arch/arm64/boot/dts/qcom/sdm830-pinctrl.dtsi b/arch/arm64/boot/dts/qcom/sdm670-pinctrl.dtsi
similarity index 94%
rename from arch/arm64/boot/dts/qcom/sdm830-pinctrl.dtsi
rename to arch/arm64/boot/dts/qcom/sdm670-pinctrl.dtsi
index 4b3fa93..09ce9d2 100644
--- a/arch/arm64/boot/dts/qcom/sdm830-pinctrl.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-pinctrl.dtsi
@@ -12,7 +12,7 @@
 
 &soc {
 	tlmm: pinctrl@03400000 {
-		compatible = "qcom,sdm830-pinctrl";
+		compatible = "qcom,sdm670-pinctrl";
 		reg = <0x03400000 0xc00000>;
 		interrupts = <0 208 0>;
 		gpio-controller;
diff --git a/arch/arm64/boot/dts/qcom/sdm830-rumi.dts b/arch/arm64/boot/dts/qcom/sdm670-rumi.dts
similarity index 65%
rename from arch/arm64/boot/dts/qcom/sdm830-rumi.dts
rename to arch/arm64/boot/dts/qcom/sdm670-rumi.dts
index 2485051..6201488 100644
--- a/arch/arm64/boot/dts/qcom/sdm830-rumi.dts
+++ b/arch/arm64/boot/dts/qcom/sdm670-rumi.dts
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -14,12 +14,16 @@
 /dts-v1/;
 /memreserve/ 0x90000000 0x00000100;
 
-#include "sdm830.dtsi"
-#include "sdm830-rumi.dtsi"
-
+#include "sdm670.dtsi"
+#include "sdm670-rumi.dtsi"
 / {
-	model = "Qualcomm Technologies, Inc. SDM830 RUMI";
-	compatible = "qcom,sdm830-rumi", "qcom,sdm830", "qcom,rumi";
+	model = "Qualcomm Technologies, Inc. SDM670 RUMI";
+	compatible = "qcom,sdm670-rumi", "qcom,sdm670", "qcom,rumi";
 	qcom,board-id = <15 0>;
 };
 
+&soc {
+	wdog: qcom,wdt@17980000{
+		status = "disabled";
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm830-mtp.dtsi b/arch/arm64/boot/dts/qcom/sdm670-rumi.dtsi
similarity index 77%
copy from arch/arm64/boot/dts/qcom/sdm830-mtp.dtsi
copy to arch/arm64/boot/dts/qcom/sdm670-rumi.dtsi
index b2d607d..6ea92ee 100644
--- a/arch/arm64/boot/dts/qcom/sdm830-mtp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-rumi.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -9,6 +9,3 @@
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
  */
-
-#include "sdm845-mtp.dtsi"
-#include "sdm830-pinctrl.dtsi"
diff --git a/arch/arm64/boot/dts/qcom/sdm670.dtsi b/arch/arm64/boot/dts/qcom/sdm670.dtsi
new file mode 100644
index 0000000..2cbb990
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm670.dtsi
@@ -0,0 +1,547 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "skeleton64.dtsi"
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+/ {
+	model = "Qualcomm Technologies, Inc. SDM670";
+	compatible = "qcom,sdm670";
+	qcom,msm-id = <336 0x0>;
+	interrupt-parent = <&intc>;
+
+	aliases { };
+
+	cpus {
+		#address-cells = <2>;
+		#size-cells = <0>;
+
+		CPU0: cpu@0 {
+			device_type = "cpu";
+			compatible = "arm,armv8";
+			reg = <0x0 0x0>;
+			enable-method = "psci";
+			efficiency = <1024>;
+			cache-size = <0x8000>;
+			cpu-release-addr = <0x0 0x90000000>;
+			next-level-cache = <&L2_0>;
+			L2_0: l2-cache {
+				compatible = "arm,arch-cache";
+				cache-size = <0x20000>;
+				cache-level = <2>;
+				next-level-cache = <&L3_0>;
+				L3_0: l3-cache {
+					compatible = "arm,arch-cache";
+					cache-size = <0x100000>;
+					cache-level = <3>;
+				};
+			};
+			L1_I_0: l1-icache {
+				compatible = "arm,arch-cache";
+				qcom,dump-size = <0x9000>;
+			};
+			L1_D_0: l1-dcache {
+				compatible = "arm,arch-cache";
+				qcom,dump-size = <0x9000>;
+			};
+		};
+
+		CPU1: cpu@100 {
+			device_type = "cpu";
+			compatible = "arm,armv8";
+			reg = <0x0 0x100>;
+			enable-method = "psci";
+			efficiency = <1024>;
+			cache-size = <0x8000>;
+			cpu-release-addr = <0x0 0x90000000>;
+			next-level-cache = <&L2_100>;
+			L2_100: l2-cache {
+				compatible = "arm,arch-cache";
+				cache-size = <0x20000>;
+				cache-level = <2>;
+				next-level-cache = <&L3_0>;
+			};
+			L1_I_100: l1-icache {
+				compatible = "arm,arch-cache";
+				qcom,dump-size = <0x9000>;
+			};
+			L1_D_100: l1-dcache {
+				compatible = "arm,arch-cache";
+				qcom,dump-size = <0x9000>;
+			};
+		};
+
+		CPU2: cpu@200 {
+			device_type = "cpu";
+			compatible = "arm,armv8";
+			reg = <0x0 0x200>;
+			enable-method = "psci";
+			efficiency = <1024>;
+			cache-size = <0x8000>;
+			cpu-release-addr = <0x0 0x90000000>;
+			next-level-cache = <&L2_200>;
+			L2_200: l2-cache {
+				compatible = "arm,arch-cache";
+				cache-size = <0x20000>;
+				cache-level = <2>;
+				next-level-cache = <&L3_0>;
+			};
+			L1_I_200: l1-icache {
+				compatible = "arm,arch-cache";
+				qcom,dump-size = <0x9000>;
+			};
+			L1_D_200: l1-dcache {
+				compatible = "arm,arch-cache";
+				qcom,dump-size = <0x9000>;
+			};
+		};
+
+		CPU3: cpu@300 {
+			device_type = "cpu";
+			compatible = "arm,armv8";
+			reg = <0x0 0x300>;
+			enable-method = "psci";
+			efficiency = <1024>;
+			cache-size = <0x8000>;
+			cpu-release-addr = <0x0 0x90000000>;
+			next-level-cache = <&L2_300>;
+			L2_300: l2-cache {
+				compatible = "arm,arch-cache";
+				cache-size = <0x20000>;
+				cache-level = <2>;
+				next-level-cache = <&L3_0>;
+			};
+			L1_I_300: l1-icache {
+				compatible = "arm,arch-cache";
+				qcom,dump-size = <0x9000>;
+			};
+			L1_D_300: l1-dcache {
+				compatible = "arm,arch-cache";
+				qcom,dump-size = <0x9000>;
+			};
+		};
+
+		CPU4: cpu@400 {
+			device_type = "cpu";
+			compatible = "arm,armv8";
+			reg = <0x0 0x400>;
+			enable-method = "psci";
+			efficiency = <1024>;
+			cache-size = <0x8000>;
+			cpu-release-addr = <0x0 0x90000000>;
+			next-level-cache = <&L2_400>;
+			L2_400: l2-cache {
+				compatible = "arm,arch-cache";
+				cache-size = <0x20000>;
+				cache-level = <2>;
+				next-level-cache = <&L3_0>;
+			};
+			L1_I_400: l1-icache {
+				compatible = "arm,arch-cache";
+				qcom,dump-size = <0x9000>;
+			};
+			L1_D_400: l1-dcache {
+				compatible = "arm,arch-cache";
+				qcom,dump-size = <0x9000>;
+			};
+		};
+
+		CPU5: cpu@500 {
+			device_type = "cpu";
+			compatible = "arm,armv8";
+			reg = <0x0 0x500>;
+			enable-method = "psci";
+			efficiency = <1024>;
+			cache-size = <0x8000>;
+			cpu-release-addr = <0x0 0x90000000>;
+			next-level-cache = <&L2_500>;
+			L2_500: l2-cache {
+				compatible = "arm,arch-cache";
+				cache-size = <0x20000>;
+				cache-level = <2>;
+				next-level-cache = <&L3_0>;
+			};
+			L1_I_500: l1-icache {
+				compatible = "arm,arch-cache";
+				qcom,dump-size = <0x9000>;
+			};
+			L1_D_500: l1-dcache {
+				compatible = "arm,arch-cache";
+				qcom,dump-size = <0x9000>;
+			};
+		};
+
+		CPU6: cpu@600 {
+			device_type = "cpu";
+			compatible = "arm,armv8";
+			reg = <0x0 0x600>;
+			enable-method = "psci";
+			efficiency = <1740>;
+			cache-size = <0x10000>;
+			cpu-release-addr = <0x0 0x90000000>;
+			next-level-cache = <&L2_600>;
+			L2_600: l2-cache {
+				compatible = "arm,arch-cache";
+				cache-size = <0x40000>;
+				cache-level = <2>;
+				next-level-cache = <&L3_0>;
+			};
+			L1_I_600: l1-icache {
+				compatible = "arm,arch-cache";
+				qcom,dump-size = <0x12000>;
+			};
+			L1_D_600: l1-dcache {
+				compatible = "arm,arch-cache";
+				qcom,dump-size = <0x12000>;
+			};
+		};
+
+		CPU7: cpu@700 {
+			device_type = "cpu";
+			compatible = "arm,armv8";
+			reg = <0x0 0x700>;
+			enable-method = "psci";
+			efficiency = <1740>;
+			cache-size = <0x10000>;
+			cpu-release-addr = <0x0 0x90000000>;
+			next-level-cache = <&L2_700>;
+			L2_700: l2-cache {
+				compatible = "arm,arch-cache";
+				cache-size = <0x40000>;
+				cache-level = <2>;
+				next-level-cache = <&L3_0>;
+			};
+			L1_I_700: l1-icache {
+				compatible = "arm,arch-cache";
+				qcom,dump-size = <0x12000>;
+			};
+			L1_D_700: l1-dcache {
+				compatible = "arm,arch-cache";
+				qcom,dump-size = <0x12000>;
+			};
+		};
+
+		cpu-map {
+			cluster0 {
+				core0 {
+					cpu = <&CPU0>;
+				};
+
+				core1 {
+					cpu = <&CPU1>;
+				};
+
+				core2 {
+					cpu = <&CPU2>;
+				};
+
+				core3 {
+					cpu = <&CPU3>;
+				};
+
+				core4 {
+					cpu = <&CPU4>;
+				};
+
+				core5 {
+					cpu = <&CPU5>;
+				};
+			};
+			cluster1 {
+				core0 {
+					cpu = <&CPU6>;
+				};
+
+				core1 {
+					cpu = <&CPU7>;
+				};
+			};
+		};
+	};
+
+	psci {
+		compatible = "arm,psci-1.0";
+		method = "smc";
+	};
+
+	soc: soc { };
+
+	reserved-memory {
+		#address-cells = <2>;
+		#size-cells = <2>;
+		ranges;
+	};
+};
+
+&soc {
+	#address-cells = <1>;
+	#size-cells = <1>;
+	ranges = <0 0 0 0xffffffff>;
+	compatible = "simple-bus";
+
+	intc: interrupt-controller@17a00000 {
+		compatible = "arm,gic-v3";
+		#interrupt-cells = <3>;
+		interrupt-controller;
+		#redistributor-regions = <1>;
+		redistributor-stride = <0x0 0x20000>;
+		reg = <0x17a00000 0x10000>,     /* GICD */
+		      <0x17a60000 0x100000>;    /* GICR * 8 */
+		interrupts = <1 9 4>;
+	};
+
+	timer {
+		compatible = "arm,armv8-timer";
+		interrupts = <1 1 0xf08>,
+			     <1 2 0xf08>,
+			     <1 3 0xf08>,
+			     <1 0 0xf08>;
+		clock-frequency = <19200000>;
+	};
+
+	timer@0x17c90000{
+		#address-cells = <1>;
+		#size-cells = <1>;
+		ranges;
+		compatible = "arm,armv7-timer-mem";
+		reg = <0x17c90000 0x1000>;
+		clock-frequency = <19200000>;
+
+		frame@0x17ca0000 {
+			frame-number = <0>;
+			interrupts = <0 7 0x4>,
+				     <0 6 0x4>;
+			reg = <0x17ca0000 0x1000>,
+			      <0x17cb0000 0x1000>;
+		};
+
+		frame@17cc0000 {
+			frame-number = <1>;
+			interrupts = <0 8 0x4>;
+			reg = <0x17cc0000 0x1000>;
+			status = "disabled";
+		};
+
+		frame@17cd0000 {
+			frame-number = <2>;
+			interrupts = <0 9 0x4>;
+			reg = <0x17cd0000 0x1000>;
+			status = "disabled";
+		};
+
+		frame@17ce0000 {
+			frame-number = <3>;
+			interrupts = <0 10 0x4>;
+			reg = <0x17ce0000 0x1000>;
+			status = "disabled";
+		};
+
+		frame@17cf0000 {
+			frame-number = <4>;
+			interrupts = <0 11 0x4>;
+			reg = <0x17cf0000 0x1000>;
+			status = "disabled";
+		};
+
+		frame@17d00000 {
+			frame-number = <5>;
+			interrupts = <0 12 0x4>;
+			reg = <0x17d00000 0x1000>;
+			status = "disabled";
+		};
+
+		frame@17d10000 {
+			frame-number = <6>;
+			interrupts = <0 13 0x4>;
+			reg = <0x17d10000 0x1000>;
+			status = "disabled";
+		};
+	};
+
+	restart@10ac000 {
+		compatible = "qcom,pshold";
+		reg = <0xC264000 0x4>,
+		      <0x1fd3000 0x4>;
+		reg-names = "pshold-base", "tcsr-boot-misc-detect";
+	};
+
+	clock_cpucc: qcom,cpucc {
+		compatible = "qcom,dummycc";
+		clock-output-names = "cpucc_clocks";
+		#clock-cells = <1>;
+		#reset-cells = <1>;
+	};
+
+	wdog: qcom,wdt@17980000{
+		compatible = "qcom,msm-watchdog";
+		reg = <0x17980000 0x1000>;
+		reg-names = "wdt-base";
+		interrupts = <0 3 0>, <0 4 0>;
+		qcom,bark-time = <11000>;
+		qcom,pet-time = <10000>;
+		qcom,ipi-ping;
+		qcom,wakeup-enable;
+	};
+
+	qcom,msm-rtb {
+		compatible = "qcom,msm-rtb";
+		qcom,rtb-size = <0x100000>;
+	};
+
+	qcom,msm-imem@146bf000 {
+		compatible = "qcom,msm-imem";
+		reg = <0x146bf000 0x1000>;
+		ranges = <0x0 0x146bf000 0x1000>;
+		#address-cells = <1>;
+		#size-cells = <1>;
+
+		mem_dump_table@10 {
+			compatible = "qcom,msm-imem-mem_dump_table";
+			reg = <0x10 8>;
+		};
+
+		restart_reason@65c {
+			compatible = "qcom,msm-imem-restart_reason";
+			reg = <0x65c 4>;
+		};
+
+		pil@94c {
+			compatible = "qcom,msm-imem-pil";
+			reg = <0x94c 200>;
+		};
+
+		kaslr_offset@6d0 {
+			compatible = "qcom,msm-imem-kaslr_offset";
+			reg = <0x6d0 12>;
+		};
+	};
+
+	cpuss_dump {
+		compatible = "qcom,cpuss-dump";
+		qcom,l1_i_cache0 {
+			qcom,dump-node = <&L1_I_0>;
+			qcom,dump-id = <0x60>;
+		};
+		qcom,l1_i_cache1 {
+			qcom,dump-node = <&L1_I_100>;
+			qcom,dump-id = <0x61>;
+		};
+		qcom,l1_i_cache2 {
+			qcom,dump-node = <&L1_I_200>;
+			qcom,dump-id = <0x62>;
+		};
+		qcom,l1_i_cache3 {
+			qcom,dump-node = <&L1_I_300>;
+			qcom,dump-id = <0x63>;
+		};
+		qcom,l1_i_cache100 {
+			qcom,dump-node = <&L1_I_400>;
+			qcom,dump-id = <0x64>;
+		};
+		qcom,l1_i_cache101 {
+			qcom,dump-node = <&L1_I_500>;
+			qcom,dump-id = <0x65>;
+		};
+		qcom,l1_i_cache102 {
+			qcom,dump-node = <&L1_I_600>;
+			qcom,dump-id = <0x66>;
+		};
+		qcom,l1_i_cache103 {
+			qcom,dump-node = <&L1_I_700>;
+			qcom,dump-id = <0x67>;
+		};
+		qcom,l1_d_cache0 {
+			qcom,dump-node = <&L1_D_0>;
+			qcom,dump-id = <0x80>;
+		};
+		qcom,l1_d_cache1 {
+			qcom,dump-node = <&L1_D_100>;
+			qcom,dump-id = <0x81>;
+		};
+		qcom,l1_d_cache2 {
+			qcom,dump-node = <&L1_D_200>;
+			qcom,dump-id = <0x82>;
+		};
+		qcom,l1_d_cache3 {
+			qcom,dump-node = <&L1_D_300>;
+			qcom,dump-id = <0x83>;
+		};
+		qcom,l1_d_cache100 {
+			qcom,dump-node = <&L1_D_400>;
+			qcom,dump-id = <0x84>;
+		};
+		qcom,l1_d_cache101 {
+			qcom,dump-node = <&L1_D_500>;
+			qcom,dump-id = <0x85>;
+		};
+		qcom,l1_d_cache102 {
+			qcom,dump-node = <&L1_D_600>;
+			qcom,dump-id = <0x86>;
+		};
+		qcom,l1_d_cache103 {
+			qcom,dump-node = <&L1_D_700>;
+			qcom,dump-id = <0x87>;
+		};
+	};
+
+	kryo3xx-erp {
+		compatible = "arm,arm64-kryo3xx-cpu-erp";
+		interrupts = <1 6 4>,
+			     <1 7 4>,
+			     <0 34 4>,
+			     <0 35 4>;
+
+		interrupt-names = "l1-l2-faultirq",
+				  "l1-l2-errirq",
+				  "l3-scu-errirq",
+				  "l3-scu-faultirq";
+	};
+
+	qcom,chd_sliver {
+		compatible = "qcom,core-hang-detect";
+		label = "silver";
+		qcom,threshold-arr = <0x17e00058 0x17e10058 0x17e20058
+					0x17e30058 0x17e40058 0x17e50058>;
+		qcom,config-arr = <0x17e00060 0x17e10060 0x17e20060
+					0x17e30060 0x17e40060 0x17e50060>;
+	};
+
+	qcom,chd_gold {
+		compatible = "qcom,core-hang-detect";
+		label = "gold";
+		qcom,threshold-arr = <0x17e60058 0x17e70058>;
+		qcom,config-arr = <0x17e60060 0x17e70060>;
+	};
+
+	qcom,ghd {
+		compatible = "qcom,gladiator-hang-detect-v2";
+		qcom,threshold-arr = <0x1799041c 0x17990420>;
+		qcom,config-reg = <0x17990434>;
+	};
+
+	qcom,msm-gladiator-v3@17900000 {
+		compatible = "qcom,msm-gladiator-v3";
+		reg = <0x17900000 0xd080>;
+		reg-names = "gladiator_base";
+		interrupts = <0 17 0>;
+	};
+
+	dcc: dcc_v2@10a2000 {
+		compatible = "qcom,dcc_v2";
+		reg = <0x10a2000 0x1000>,
+		      <0x10ae000 0x2000>;
+		reg-names = "dcc-base", "dcc-ram-base";
+	};
+
+};
+
+#include "sdm670-pinctrl.dtsi"
diff --git a/arch/arm64/boot/dts/qcom/sdm830-cdp.dts b/arch/arm64/boot/dts/qcom/sdm830-cdp.dts
deleted file mode 100644
index dab4a9d..0000000
--- a/arch/arm64/boot/dts/qcom/sdm830-cdp.dts
+++ /dev/null
@@ -1,23 +0,0 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-
-
-/dts-v1/;
-
-#include "sdm830.dtsi"
-#include "sdm830-cdp.dtsi"
-
-/ {
-	model = "Qualcomm Technologies, Inc. SDM bat v1 CDP";
-	compatible = "qcom,sdm830-cdp", "qcom,sdm830", "qcom,cdp";
-	qcom,board-id = <1 0>;
-};
diff --git a/arch/arm64/boot/dts/qcom/sdm830-mtp.dts b/arch/arm64/boot/dts/qcom/sdm830-mtp.dts
deleted file mode 100644
index 5da16e6..0000000
--- a/arch/arm64/boot/dts/qcom/sdm830-mtp.dts
+++ /dev/null
@@ -1,23 +0,0 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-
-
-/dts-v1/;
-
-#include "sdm830.dtsi"
-#include "sdm830-mtp.dtsi"
-
-/ {
-	model = "Qualcomm Technologies, Inc. SDM bat v1 MTP";
-	compatible = "qcom,sdm830-mtp", "qcom,sdm830", "qcom,mtp";
-	qcom,board-id = <8 0>;
-};
diff --git a/arch/arm64/boot/dts/qcom/sdm830-rumi.dtsi b/arch/arm64/boot/dts/qcom/sdm830-rumi.dtsi
deleted file mode 100644
index 2bc5f3f..0000000
--- a/arch/arm64/boot/dts/qcom/sdm830-rumi.dtsi
+++ /dev/null
@@ -1,20 +0,0 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-
-/*
- * As a general rule, only version-specific property overrides should be placed
- * inside this file. Common device definitions should be placed inside the
- * sdm845-rumi.dtsi file.
- */
-
- #include "sdm845-rumi.dtsi"
-
diff --git a/arch/arm64/boot/dts/qcom/sdm830-sim.dts b/arch/arm64/boot/dts/qcom/sdm830-sim.dts
deleted file mode 100644
index 57cd155..0000000
--- a/arch/arm64/boot/dts/qcom/sdm830-sim.dts
+++ /dev/null
@@ -1,25 +0,0 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-
-
-/dts-v1/;
-/memreserve/ 0x90000000 0x00000100;
-
-#include "sdm830.dtsi"
-#include "sdm830-sim.dtsi"
-
-/ {
-	model = "Qualcomm Technologies, Inc. SDM830 SIM";
-	compatible = "qcom,sdm830-sim", "qcom,sdm830", "qcom,sim";
-	qcom,board-id = <16 0>;
-};
-
diff --git a/arch/arm64/boot/dts/qcom/sdm830-sim.dtsi b/arch/arm64/boot/dts/qcom/sdm830-sim.dtsi
deleted file mode 100644
index 85e8075..0000000
--- a/arch/arm64/boot/dts/qcom/sdm830-sim.dtsi
+++ /dev/null
@@ -1,20 +0,0 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-
-/*
- * As a general rule, only version-specific property overrides should be placed
- * inside this file. Common device definitions should be placed inside the
- * sdm845-sim.dtsi file.
- */
-
- #include "sdm845-sim.dtsi"
-
diff --git a/arch/arm64/boot/dts/qcom/sdm830.dtsi b/arch/arm64/boot/dts/qcom/sdm830.dtsi
deleted file mode 100644
index 81ae913..0000000
--- a/arch/arm64/boot/dts/qcom/sdm830.dtsi
+++ /dev/null
@@ -1,40 +0,0 @@
-/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-
-/*
- * As a general rule, only version-specific property overrides should be placed
- * inside this file. Common device definitions should be placed inside the
- * sdm845.dtsi file.
- */
-
- #include "sdm845.dtsi"
-
-/ {
-	model = "Qualcomm Technologies, Inc. SDM830";
-	compatible = "qcom,sdm830";
-	qcom,msm-id = <328 0x0>;
-
-};
-
-&soc {
-	qcom,llcc@1300000 {
-		status = "disabled";
-	};
-
-	qcom,spss@1880000 {
-		status = "disabled";
-	};
-
-	qcom,glink-mailbox-xprt-spss@1885008 {
-		status = "disabled";
-	};
-};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-4k-panel-cdp.dts b/arch/arm64/boot/dts/qcom/sdm845-4k-panel-cdp.dts
index 122299c..e431d3a 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-4k-panel-cdp.dts
+++ b/arch/arm64/boot/dts/qcom/sdm845-4k-panel-cdp.dts
@@ -22,7 +22,7 @@
 	qcom,board-id = <1 1>;
 };
 
-&dsi_dual_nt35597_truly_video_display {
+&dsi_nt35597_truly_dsc_cmd_display {
 	/delete-property/ qcom,dsi-display-active;
 };
 
@@ -35,7 +35,7 @@
 	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
 	qcom,mdss-dsi-bl-min-level = <1>;
 	qcom,mdss-dsi-bl-max-level = <4095>;
-	qcom,mdss-dsi-panel-mode-gpio-state = "dual_port";
+	qcom,mdss-dsi-mode-sel-gpio-state = "dual_port";
 	qcom,panel-mode-gpio = <&tlmm 52 0>;
 	qcom,platform-te-gpio = <&tlmm 10 0>;
 	qcom,platform-reset-gpio = <&tlmm 6 0>;
diff --git a/arch/arm64/boot/dts/qcom/sdm845-4k-panel-mtp.dts b/arch/arm64/boot/dts/qcom/sdm845-4k-panel-mtp.dts
index 55e615c..46b7226 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-4k-panel-mtp.dts
+++ b/arch/arm64/boot/dts/qcom/sdm845-4k-panel-mtp.dts
@@ -22,7 +22,7 @@
 	qcom,board-id = <8 1>;
 };
 
-&dsi_dual_nt35597_truly_video_display {
+&dsi_nt35597_truly_dsc_cmd_display {
 	/delete-property/ qcom,dsi-display-active;
 };
 
@@ -35,7 +35,7 @@
 	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
 	qcom,mdss-dsi-bl-min-level = <1>;
 	qcom,mdss-dsi-bl-max-level = <4095>;
-	qcom,mdss-dsi-panel-mode-gpio-state = "dual_port";
+	qcom,mdss-dsi-mode-sel-gpio-state = "dual_port";
 	qcom,panel-mode-gpio = <&tlmm 52 0>;
 	qcom,platform-te-gpio = <&tlmm 10 0>;
 	qcom,platform-reset-gpio = <&tlmm 6 0>;
diff --git a/arch/arm64/boot/dts/qcom/sdm845-audio.dtsi b/arch/arm64/boot/dts/qcom/sdm845-audio.dtsi
index fcc09a0..628b6cc 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-audio.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-audio.dtsi
@@ -169,6 +169,14 @@
 	qocm,wcd-dsp-glink {
 		compatible = "qcom,wcd-dsp-glink";
 	};
+
+	qcom,wcd-dsp-mgr {
+		compatible = "qcom,wcd-dsp-mgr";
+		qcom,wdsp-components = <&wcd934x_cdc 0>,
+				       <&wcd_spi_0 1>,
+				       <&glink_spi_xprt_wdsp 2>;
+		qcom,img-filename = "cpe_9340";
+	};
 };
 
 &slim_aud {
@@ -229,5 +237,13 @@
 		qcom,cdc-mad-dmic-rate = <600000>;
 
 		qcom,wdsp-cmpnt-dev-name = "tavil_codec";
+
+		wcd_spi_0: wcd_spi {
+			compatible = "qcom,wcd-spi-v2";
+			qcom,master-bus-num = <0>;
+			qcom,chip-select = <0>;
+			qcom,max-frequency = <9600000>;
+			qcom,mem-base-addr = <0x100000>;
+		};
 	};
 };
diff --git a/arch/arm64/boot/dts/qcom/sdm845-bus.dtsi b/arch/arm64/boot/dts/qcom/sdm845-bus.dtsi
index 1702e80..b1c91bf 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-bus.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-bus.dtsi
@@ -1073,6 +1073,15 @@
 			qcom,bcms = <&bcm_sn4>;
 		};
 
+		mas_alc: mas-alc {
+			cell-id = <MSM_BUS_MASTER_ALC>;
+			label = "mas-alc";
+			qcom,buswidth = <1>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_mc_virt>;
+			qcom,bcms = <&bcm_alc>;
+		};
+
 		mas_llcc_mc_display: mas-llcc-mc_display {
 			cell-id = <MSM_BUS_MASTER_LLCC_DISPLAY>;
 			label = "mas-llcc-mc_display";
@@ -1635,7 +1644,7 @@
 			qcom,buswidth = <4>;
 			qcom,agg-ports = <4>;
 			qcom,bus-dev = <&fab_mc_virt>;
-			qcom,bcms = <&bcm_mc0>;
+			qcom,bcms = <&bcm_mc0>, <&bcm_acv>;
 		};
 
 		slv_qhs_mdsp_ms_mpu_cfg:slv-qhs-mdsp-ms-mpu-cfg {
diff --git a/arch/arm64/boot/dts/qcom/sdm845-camera.dtsi b/arch/arm64/boot/dts/qcom/sdm845-camera.dtsi
index 4c642e3..cb20e0f 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-camera.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-camera.dtsi
@@ -464,7 +464,7 @@
 		cell-index = <0>;
 		label = "cam-cdm-intf";
 		num-hw-cdm = <1>;
-		cdm-client-names = "ife",
+		cdm-client-names = "vfe",
 			"jpeg-dma",
 			"jpeg",
 			"fd";
@@ -493,7 +493,7 @@
 			<&clock_camcc CAM_CC_CPAS_AHB_CLK>,
 			<&clock_camcc CAM_CC_CAMNOC_AXI_CLK>;
 		clock-rates = <0 0 0 0 0>;
-		cdm-client-names = "vfe";
+		cdm-client-names = "ife";
 		status = "ok";
 	};
 
@@ -761,24 +761,18 @@
 			"cpas_ahb_clk",
 			"camnoc_axi_clk",
 			"icp_apb_clk",
-			"icp_atb_clk",
 			"icp_clk",
-			"icp_clk_src",
-			"icp_cti_clk",
-			"icp_ts_clk";
+			"icp_clk_src";
 		clocks = <&clock_gcc GCC_CAMERA_AHB_CLK>,
 				<&clock_gcc GCC_CAMERA_AXI_CLK>,
 				<&clock_camcc CAM_CC_SOC_AHB_CLK>,
 				<&clock_camcc CAM_CC_CPAS_AHB_CLK>,
 				<&clock_camcc CAM_CC_CAMNOC_AXI_CLK>,
 				<&clock_camcc CAM_CC_ICP_APB_CLK>,
-				<&clock_camcc CAM_CC_ICP_ATB_CLK>,
 				<&clock_camcc CAM_CC_ICP_CLK>,
-				<&clock_camcc CAM_CC_ICP_CLK_SRC>,
-				<&clock_camcc CAM_CC_ICP_CTI_CLK>,
-				<&clock_camcc CAM_CC_ICP_TS_CLK>;
+				<&clock_camcc CAM_CC_ICP_CLK_SRC>;
 
-		clock-rates = <0 0 0 80000000 0 0 0 0 600000000 0 0>;
+		clock-rates = <0 0 0 80000000 0 0 0 600000000>;
 		fw_name = "CAMERA_ICP.elf";
 		status = "ok";
 	};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-cdp.dtsi b/arch/arm64/boot/dts/qcom/sdm845-cdp.dtsi
index 1fdf740..211dda2 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-cdp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-cdp.dtsi
@@ -237,7 +237,7 @@
 	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
 	qcom,mdss-dsi-bl-min-level = <1>;
 	qcom,mdss-dsi-bl-max-level = <4095>;
-	qcom,mdss-dsi-panel-mode-gpio-state = "dual_port";
+	qcom,mdss-dsi-mode-sel-gpio-state = "dual_port";
 	qcom,panel-mode-gpio = <&tlmm 52 0>;
 	qcom,platform-reset-gpio = <&tlmm 6 0>;
 };
@@ -247,7 +247,29 @@
 	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
 	qcom,mdss-dsi-bl-min-level = <1>;
 	qcom,mdss-dsi-bl-max-level = <4095>;
-	qcom,mdss-dsi-panel-mode-gpio-state = "dual_port";
+	qcom,mdss-dsi-mode-sel-gpio-state = "dual_port";
+	qcom,panel-mode-gpio = <&tlmm 52 0>;
+	qcom,platform-reset-gpio = <&tlmm 6 0>;
+	qcom,platform-te-gpio = <&tlmm 10 0>;
+};
+
+&dsi_nt35597_truly_dsc_cmd {
+	qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
+	qcom,mdss-dsi-bl-min-level = <1>;
+	qcom,mdss-dsi-bl-max-level = <4095>;
+	qcom,mdss-dsi-mode-sel-gpio-state = "single_port";
+	qcom,panel-mode-gpio = <&tlmm 52 0>;
+	qcom,platform-reset-gpio = <&tlmm 6 0>;
+	qcom,platform-te-gpio = <&tlmm 10 0>;
+};
+
+&dsi_nt35597_truly_dsc_video {
+	qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
+	qcom,mdss-dsi-bl-min-level = <1>;
+	qcom,mdss-dsi-bl-max-level = <4095>;
+	qcom,mdss-dsi-mode-sel-gpio-state = "single_port";
 	qcom,panel-mode-gpio = <&tlmm 52 0>;
 	qcom,platform-reset-gpio = <&tlmm 6 0>;
 };
@@ -276,7 +298,7 @@
 	qcom,platform-reset-gpio = <&tlmm 6 0>;
 };
 
-&dsi_dual_nt35597_truly_cmd_display {
+&dsi_nt35597_truly_dsc_cmd_display {
 	qcom,dsi-display-active;
 };
 
@@ -291,6 +313,24 @@
 
 &qupv3_se3_i2c {
 	status = "ok";
+	nq@28 {
+		compatible = "qcom,nq-nci";
+		reg = <0x28>;
+		qcom,nq-irq = <&tlmm 63 0x00>;
+		qcom,nq-ven = <&tlmm 12 0x00>;
+		qcom,nq-firm = <&tlmm 62 0x00>;
+		qcom,nq-clkreq = <&pm8998_gpios 21 0x00>;
+		qcom,nq-esepwr = <&tlmm 116 0x00>;
+		interrupt-parent = <&tlmm>;
+		qcom,clk-src = "BBCLK3";
+		interrupts = <63 0>;
+		interrupt-names = "nfc_irq";
+		pinctrl-names = "nfc_active", "nfc_suspend";
+		pinctrl-0 = <&nfc_int_active &nfc_enable_active>;
+		pinctrl-1 = <&nfc_int_suspend &nfc_enable_suspend>;
+		clocks = <&clock_rpmh RPMH_LN_BB_CLK3>;
+		clock-names = "ref_clk";
+	};
 };
 
 &qupv3_se10_i2c {
diff --git a/arch/arm64/boot/dts/qcom/sdm845-coresight.dtsi b/arch/arm64/boot/dts/qcom/sdm845-coresight.dtsi
index 9b7865f..e32ec6e 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-coresight.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-coresight.dtsi
@@ -780,7 +780,8 @@
 	};
 
 	tpdm_lpass: tpdm@6844000 {
-		compatible = "qcom,coresight-tpdm";
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b968>;
 		reg = <0x6844000 0x1000>;
 		reg-names = "tpdm-base";
 
@@ -1396,25 +1397,61 @@
 		};
 	};
 
-	cti_ddr0: cti@69e1000 {
+	cti0_ddr0: cti@69e1000 {
 		compatible = "arm,primecell";
 		arm,primecell-periphid = <0x0003b966>;
 		reg = <0x69e1000 0x1000>;
 		reg-names = "cti-base";
 
-		coresight-name = "coresight-cti-ddr0";
+		coresight-name = "coresight-cti0-ddr0";
 
 		clocks = <&clock_aop QDSS_CLK>;
 		clock-names = "apb_pclk";
 	};
 
-	cti_ddr1: cti@69e4000 {
+	cti0_ddr1: cti@69e4000 {
 		compatible = "arm,primecell";
 		arm,primecell-periphid = <0x0003b966>;
 		reg = <0x69e4000 0x1000>;
 		reg-names = "cti-base";
 
-		coresight-name = "coresight-cti-ddr1";
+		coresight-name = "coresight-cti0-ddr1";
+
+		clocks = <&clock_aop QDSS_CLK>;
+		clock-names = "apb_pclk";
+	};
+
+	cti1_ddr1: cti@69e5000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b966>;
+		reg = <0x69e5000 0x1000>;
+		reg-names = "cti-base";
+
+		coresight-name = "coresight-cti1-ddr1";
+
+		clocks = <&clock_aop QDSS_CLK>;
+		clock-names = "apb_pclk";
+	};
+
+	cti0_dlmm: cti@6c09000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b966>;
+		reg = <0x6c09000 0x1000>;
+		reg-names = "cti-base";
+
+		coresight-name = "coresight-cti0-dlmm";
+
+		clocks = <&clock_aop QDSS_CLK>;
+		clock-names = "apb_pclk";
+	};
+
+	cti1_dlmm: cti@6c0a000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b966>;
+		reg = <0x6c0a000 0x1000>;
+		reg-names = "cti-base";
+
+		coresight-name = "coresight-cti1-dlmm";
 
 		clocks = <&clock_aop QDSS_CLK>;
 		clock-names = "apb_pclk";
diff --git a/arch/arm64/boot/dts/qcom/sdm845-gpu.dtsi b/arch/arm64/boot/dts/qcom/sdm845-gpu.dtsi
index 1b3f2a6..92f8586 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-gpu.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-gpu.dtsi
@@ -65,7 +65,7 @@
 
 		qcom,gpu-quirk-hfi-use-reg;
 
-		qcom,idle-timeout = <100000000>; //msecs
+		qcom,idle-timeout = <80>; //msecs
 		qcom,no-nap;
 
 		qcom,highest-bank-bit = <15>;
@@ -237,7 +237,7 @@
 		label = "kgsl-gmu";
 		compatible = "qcom,gpu-gmu";
 
-		reg = <0x506a000 0x26000>, <0xb200000 0x300000>;
+		reg = <0x506a000 0x30000>, <0xb200000 0x300000>;
 		reg-names = "kgsl_gmu_reg", "kgsl_gmu_pdc_reg";
 
 		interrupts = <0 304 0>, <0 305 0>;
diff --git a/arch/arm64/boot/dts/qcom/sdm845-mtp.dtsi b/arch/arm64/boot/dts/qcom/sdm845-mtp.dtsi
index 508b645..f59ca56 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-mtp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-mtp.dtsi
@@ -89,7 +89,7 @@
 	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
 	qcom,mdss-dsi-bl-min-level = <1>;
 	qcom,mdss-dsi-bl-max-level = <4095>;
-	qcom,mdss-dsi-panel-mode-gpio-state = "dual_port";
+	qcom,mdss-dsi-mode-sel-gpio-state = "dual_port";
 	qcom,panel-mode-gpio = <&tlmm 52 0>;
 	qcom,platform-reset-gpio = <&tlmm 6 0>;
 };
@@ -99,9 +99,31 @@
 	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
 	qcom,mdss-dsi-bl-min-level = <1>;
 	qcom,mdss-dsi-bl-max-level = <4095>;
-	qcom,mdss-dsi-panel-mode-gpio-state = "dual_port";
+	qcom,mdss-dsi-mode-sel-gpio-state = "dual_port";
 	qcom,panel-mode-gpio = <&tlmm 52 0>;
 	qcom,platform-reset-gpio = <&tlmm 6 0>;
+	qcom,platform-te-gpio = <&tlmm 10 0>;
+};
+
+&dsi_nt35597_truly_dsc_video {
+	qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
+	qcom,mdss-dsi-bl-min-level = <1>;
+	qcom,mdss-dsi-bl-max-level = <4095>;
+	qcom,mdss-dsi-mode-sel-gpio-state = "single_port";
+	qcom,panel-mode-gpio = <&tlmm 52 0>;
+	qcom,platform-reset-gpio = <&tlmm 6 0>;
+};
+
+&dsi_nt35597_truly_dsc_cmd {
+	qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
+	qcom,mdss-dsi-bl-min-level = <1>;
+	qcom,mdss-dsi-bl-max-level = <4095>;
+	qcom,mdss-dsi-mode-sel-gpio-state = "single_port";
+	qcom,panel-mode-gpio = <&tlmm 52 0>;
+	qcom,platform-reset-gpio = <&tlmm 6 0>;
+	qcom,platform-te-gpio = <&tlmm 10 0>;
 };
 
 &dsi_sim_vid {
@@ -128,7 +150,7 @@
 	qcom,platform-reset-gpio = <&tlmm 6 0>;
 };
 
-&dsi_dual_nt35597_truly_cmd_display {
+&dsi_nt35597_truly_dsc_cmd_display {
 	qcom,dsi-display-active;
 };
 
@@ -262,6 +284,24 @@
 
 &qupv3_se3_i2c {
 	status = "ok";
+	nq@28 {
+		compatible = "qcom,nq-nci";
+		reg = <0x28>;
+		qcom,nq-irq = <&tlmm 63 0x00>;
+		qcom,nq-ven = <&tlmm 12 0x00>;
+		qcom,nq-firm = <&tlmm 62 0x00>;
+		qcom,nq-clkreq = <&pm8998_gpios 21 0x00>;
+		qcom,nq-esepwr = <&tlmm 116 0x00>;
+		interrupt-parent = <&tlmm>;
+		qcom,clk-src = "BBCLK3";
+		interrupts = <63 0>;
+		interrupt-names = "nfc_irq";
+		pinctrl-names = "nfc_active", "nfc_suspend";
+		pinctrl-0 = <&nfc_int_active &nfc_enable_active>;
+		pinctrl-1 = <&nfc_int_suspend &nfc_enable_suspend>;
+		clocks = <&clock_rpmh RPMH_LN_BB_CLK3>;
+		clock-names = "ref_clk";
+	};
 };
 
 &qupv3_se10_i2c {
diff --git a/arch/arm64/boot/dts/qcom/sdm845-pcie.dtsi b/arch/arm64/boot/dts/qcom/sdm845-pcie.dtsi
index da5d6fa..c7a4d7d 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-pcie.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-pcie.dtsi
@@ -39,44 +39,44 @@
 				36 37>;
 		#interrupt-cells = <1>;
 		interrupt-map-mask = <0 0 0 0xffffffff>;
-		interrupt-map = <0 0 0 0 &intc 0 141 0
-				0 0 0 1 &intc 0 149 0
-				0 0 0 2 &intc 0 150 0
-				0 0 0 3 &intc 0 151 0
-				0 0 0 4 &intc 0 152 0
-				0 0 0 5 &intc 0 140 0
-				0 0 0 6 &intc 0 672 0
-				0 0 0 7 &intc 0 673 0
-				0 0 0 8 &intc 0 674 0
-				0 0 0 9 &intc 0 675 0
-				0 0 0 10 &intc 0 676 0
-				0 0 0 11 &intc 0 677 0
-				0 0 0 12 &intc 0 678 0
-				0 0 0 13 &intc 0 679 0
-				0 0 0 14 &intc 0 680 0
-				0 0 0 15 &intc 0 681 0
-				0 0 0 16 &intc 0 682 0
-				0 0 0 17 &intc 0 683 0
-				0 0 0 18 &intc 0 684 0
-				0 0 0 19 &intc 0 685 0
-				0 0 0 20 &intc 0 686 0
-				0 0 0 21 &intc 0 687 0
-				0 0 0 22 &intc 0 688 0
-				0 0 0 23 &intc 0 689 0
-				0 0 0 24 &intc 0 690 0
-				0 0 0 25 &intc 0 691 0
-				0 0 0 26 &intc 0 692 0
-				0 0 0 27 &intc 0 693 0
-				0 0 0 28 &intc 0 694 0
-				0 0 0 29 &intc 0 695 0
-				0 0 0 30 &intc 0 696 0
-				0 0 0 31 &intc 0 697 0
-				0 0 0 32 &intc 0 698 0
-				0 0 0 33 &intc 0 699 0
-				0 0 0 34 &intc 0 700 0
-				0 0 0 35 &intc 0 701 0
-				0 0 0 36 &intc 0 702 0
-				0 0 0 37 &intc 0 703 0>;
+		interrupt-map = <0 0 0 0 &pdc 0 141 0
+				0 0 0 1 &pdc 0 149 0
+				0 0 0 2 &pdc 0 150 0
+				0 0 0 3 &pdc 0 151 0
+				0 0 0 4 &pdc 0 152 0
+				0 0 0 5 &pdc 0 140 0
+				0 0 0 6 &pdc 0 672 0
+				0 0 0 7 &pdc 0 673 0
+				0 0 0 8 &pdc 0 674 0
+				0 0 0 9 &pdc 0 675 0
+				0 0 0 10 &pdc 0 676 0
+				0 0 0 11 &pdc 0 677 0
+				0 0 0 12 &pdc 0 678 0
+				0 0 0 13 &pdc 0 679 0
+				0 0 0 14 &pdc 0 680 0
+				0 0 0 15 &pdc 0 681 0
+				0 0 0 16 &pdc 0 682 0
+				0 0 0 17 &pdc 0 683 0
+				0 0 0 18 &pdc 0 684 0
+				0 0 0 19 &pdc 0 685 0
+				0 0 0 20 &pdc 0 686 0
+				0 0 0 21 &pdc 0 687 0
+				0 0 0 22 &pdc 0 688 0
+				0 0 0 23 &pdc 0 689 0
+				0 0 0 24 &pdc 0 690 0
+				0 0 0 25 &pdc 0 691 0
+				0 0 0 26 &pdc 0 692 0
+				0 0 0 27 &pdc 0 693 0
+				0 0 0 28 &pdc 0 694 0
+				0 0 0 29 &pdc 0 695 0
+				0 0 0 30 &pdc 0 696 0
+				0 0 0 31 &pdc 0 697 0
+				0 0 0 32 &pdc 0 698 0
+				0 0 0 33 &pdc 0 699 0
+				0 0 0 34 &pdc 0 700 0
+				0 0 0 35 &pdc 0 701 0
+				0 0 0 36 &pdc 0 702 0
+				0 0 0 37 &pdc 0 703 0>;
 
 		interrupt-names = "int_msi", "int_a", "int_b", "int_c",
 				"int_d", "int_global_int",
diff --git a/arch/arm64/boot/dts/qcom/sdm845-pinctrl.dtsi b/arch/arm64/boot/dts/qcom/sdm845-pinctrl.dtsi
index 59b3396..f534891 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-pinctrl.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-pinctrl.dtsi
@@ -19,6 +19,7 @@
 		#gpio-cells = <2>;
 		interrupt-controller;
 		#interrupt-cells = <2>;
+		interrupt-parent = <&pdc>;
 
 		ufs_dev_reset_assert: ufs_dev_reset_assert {
 			config {
@@ -1555,6 +1556,68 @@
 			};
 		};
 
+		nfc {
+			nfc_int_active: nfc_int_active {
+				/* active state */
+				mux {
+					/* GPIO 63 NFC Read Interrupt */
+					pins = "gpio63";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio63";
+					drive-strength = <2>; /* 2 MA */
+					bias-pull-up;
+				};
+			};
+
+			nfc_int_suspend: nfc_int_suspend {
+				/* sleep state */
+				mux {
+					/* GPIO 63 NFC Read Interrupt */
+					pins = "gpio63";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio63";
+					drive-strength = <2>; /* 2 MA */
+					bias-pull-up;
+				};
+			};
+
+			nfc_enable_active: nfc_enable_active {
+				/* active state */
+				mux {
+					/* 12: NFC ENABLE 116:ESE Enable */
+					pins = "gpio12", "gpio62", "gpio116";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio12", "gpio62", "gpio116";
+					drive-strength = <2>; /* 2 MA */
+					bias-pull-up;
+				};
+			};
+
+			nfc_enable_suspend: nfc_enable_suspend {
+				/* sleep state */
+				mux {
+					/* 12: NFC ENABLE 116:ESE Enable */
+					pins = "gpio12", "gpio62", "gpio116";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio12", "gpio62", "gpio116";
+					drive-strength = <2>; /* 2 MA */
+					bias-disable;
+				};
+			};
+		};
+
 		qupv3_se3_spi_pins: qupv3_se3_spi_pins {
 			qupv3_se3_spi_active: qupv3_se3_spi_active {
 				mux {
@@ -2667,11 +2730,11 @@
 
 		trigout_a: trigout_a {
 			mux {
-				pins = "gpio62", "gpio51";
+				pins = "gpio90";
 				function = "qdss_cti";
 			};
 			config {
-				pins = "gpio62", "gpio51";
+				pins = "gpio90";
 				drive-strength = <2>;
 				bias-disable;
 			};
@@ -2680,6 +2743,14 @@
 };
 
 &pm8998_gpios {
+	gpio@d400 {
+		qcom,mode = <0>;
+		qcom,vin-sel = <1>;
+		qcom,src-sel = <0>;
+		qcom,master-en = <1>;
+		status = "okay";
+	};
+
 	key_home {
 		key_home_default: key_home_default {
 			pins = "gpio5";
diff --git a/arch/arm64/boot/dts/qcom/sdm845-pm.dtsi b/arch/arm64/boot/dts/qcom/sdm845-pm.dtsi
index 70e749b..6806145 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-pm.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-pm.dtsi
@@ -145,4 +145,12 @@
 		reg = <0xC300000 0x1000>, <0xC3F0004 0x4>;
 		reg-names = "phys_addr_base", "offset_addr";
 	};
+
+	pdc: interrupt-controller@0xb220000{
+		compatible = "qcom,pdc-sdm845";
+		reg = <0xb220000 0x400>;
+		#interrupt-cells = <3>;
+		interrupt-parent = <&intc>;
+		interrupt-controller;
+	};
 };
diff --git a/arch/arm64/boot/dts/qcom/sdm845-qrd.dtsi b/arch/arm64/boot/dts/qcom/sdm845-qrd.dtsi
index b51996d..0f115f8 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-qrd.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-qrd.dtsi
@@ -14,6 +14,27 @@
 #include <dt-bindings/gpio/gpio.h>
 
 /{
+	bluetooth: bt_wcn3990 {
+		compatible = "qca,wcn3990";
+		qca,bt-vdd-io-supply = <&pm8998_s3>;
+		qca,bt-vdd-xtal-supply = <&pm8998_s5>;
+		qca,bt-vdd-core-supply = <&pm8998_l7>;
+		qca,bt-vdd-pa-supply = <&pm8998_l17>;
+		qca,bt-vdd-ldo-supply = <&pm8998_l25>;
+
+		qca,bt-vdd-io-voltage-level = <1352000 1352000>;
+		qca,bt-vdd-xtal-voltage-level = <2040000 2040000>;
+		qca,bt-vdd-core-voltage-level = <1800000 1800000>;
+		qca,bt-vdd-pa-voltage-level = <1304000 1304000>;
+		qca,bt-vdd-ldo-voltage-level = <3312000 3312000>;
+
+		qca,bt-vdd-io-current-level = <1>; /* LPM/PFM */
+		qca,bt-vdd-xtal-current-level = <1>; /* LPM/PFM */
+		qca,bt-vdd-core-current-level = <1>; /* LPM/PFM */
+		qca,bt-vdd-pa-current-level = <1>; /* LPM/PFM */
+		qca,bt-vdd-ldo-current-level = <1>; /* LPM/PFM */
+	};
+
 	qrd_batterydata: qcom,battery-data {
 		qcom,batt-id-range-pct = <15>;
 		#include "fg-gen3-batterydata-itech-3000mah.dtsi"
@@ -39,6 +60,24 @@
 
 &qupv3_se3_i2c {
 	status = "ok";
+	nq@28 {
+		compatible = "qcom,nq-nci";
+		reg = <0x28>;
+		qcom,nq-irq = <&tlmm 63 0x00>;
+		qcom,nq-ven = <&tlmm 12 0x00>;
+		qcom,nq-firm = <&tlmm 62 0x00>;
+		qcom,nq-clkreq = <&pm8998_gpios 21 0x00>;
+		qcom,nq-esepwr = <&tlmm 116 0x00>;
+		interrupt-parent = <&tlmm>;
+		qcom,clk-src = "BBCLK3";
+		interrupts = <63 0>;
+		interrupt-names = "nfc_irq";
+		pinctrl-names = "nfc_active", "nfc_suspend";
+		pinctrl-0 = <&nfc_int_active &nfc_enable_active>;
+		pinctrl-1 = <&nfc_int_suspend &nfc_enable_suspend>;
+		clocks = <&clock_rpmh RPMH_LN_BB_CLK3>;
+		clock-names = "ref_clk";
+	};
 };
 
 &qupv3_se10_i2c {
@@ -138,7 +177,7 @@
 	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
 	qcom,mdss-dsi-bl-min-level = <1>;
 	qcom,mdss-dsi-bl-max-level = <4095>;
-	qcom,mdss-dsi-panel-mode-gpio-state = "dual_port";
+	qcom,mdss-dsi-mode-sel-gpio-state = "dual_port";
 	qcom,panel-mode-gpio = <&tlmm 52 0>;
 	qcom,platform-te-gpio = <&tlmm 10 0>;
 	qcom,platform-reset-gpio = <&tlmm 6 0>;
diff --git a/arch/arm64/boot/dts/qcom/sdm845-qupv3.dtsi b/arch/arm64/boot/dts/qcom/sdm845-qupv3.dtsi
index 0fb455f..1fa6e26 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-qupv3.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-qupv3.dtsi
@@ -42,7 +42,7 @@
 		pinctrl-names = "default", "sleep";
 		pinctrl-0 = <&qupv3_se6_4uart_active>;
 		pinctrl-1 = <&qupv3_se6_4uart_sleep>;
-		interrupts-extended = <&intc GIC_SPI 607 0>,
+		interrupts-extended = <&pdc GIC_SPI 607 0>,
 				<&tlmm 48 0>;
 		status = "disabled";
 		qcom,wakeup-byte = <0xFD>;
@@ -60,7 +60,7 @@
 		pinctrl-names = "default", "sleep";
 		pinctrl-0 = <&qupv3_se7_4uart_active>;
 		pinctrl-1 = <&qupv3_se7_4uart_sleep>;
-		interrupts-extended = <&intc GIC_SPI 608 0>,
+		interrupts-extended = <&pdc GIC_SPI 608 0>,
 				<&tlmm 96 0>;
 		status = "disabled";
 		qcom,wakeup-byte = <0xFD>;
diff --git a/arch/arm64/boot/dts/qcom/sdm845-regulator.dtsi b/arch/arm64/boot/dts/qcom/sdm845-regulator.dtsi
index 79ac3b1..9da61dc 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-regulator.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-regulator.dtsi
@@ -36,7 +36,7 @@
 			reg = <0x3500 0x100>;
 			regulator-name = "pm8998_s12";
 			regulator-min-microvolt = <568000>;
-			regulator-max-microvolt = <1056000>;
+			regulator-max-microvolt = <1136000>;
 			qcom,enable-time = <500>;
 			regulator-always-on;
 		};
@@ -114,9 +114,9 @@
 				regulator-max-microvolt = <19>;
 
 				qcom,cpr-fuse-corners = <4>;
-				qcom,cpr-fuse-combos = <16>;
-				qcom,cpr-speed-bins = <2>;
-				qcom,cpr-speed-bin-corners = <19 19>;
+				qcom,cpr-fuse-combos = <24>;
+				qcom,cpr-speed-bins = <3>;
+				qcom,cpr-speed-bin-corners = <19 19 19>;
 				qcom,cpr-corners = <19>;
 
 				qcom,cpr-corner-fmax-map = <6 12 17 19>;
@@ -137,6 +137,11 @@
 					<568000  568000  568000  568000  568000
 					 568000  568000  568000  568000  584000
 					 584000  584000  632000  632000  632000
+					 632000  672000  712000  712000>,
+					/* Speed bin 2 */
+					<568000  568000  568000  568000  568000
+					 568000  568000  568000  568000  584000
+					 584000  584000  632000  632000  632000
 					 632000  672000  712000  712000>;
 
 				qcom,cpr-floor-to-ceiling-max-range =
@@ -146,13 +151,30 @@
 					 32000  32000  40000  40000>;
 
 				qcom,corner-frequencies =
+					/* Speed bin 0 */
 					<300000000  422400000  499200000
 					 576000000  652800000  748800000
 					 825600000  902400000  979200000
 					1056000000 1132800000 1209600000
 					1286400000 1363200000 1440000000
 					1516800000 1593600000 1651200000
-					1708800000>;
+					1708800000>,
+					/* Speed bin 1 */
+					<300000000  422400000  499200000
+					 576000000  652800000  748800000
+					 825600000  902400000  979200000
+					1056000000 1132800000 1209600000
+					1286400000 1363200000 1440000000
+					1516800000 1593600000 1651200000
+					1708800000>,
+					/* Speed bin 2 */
+					<300000000  422400000  499200000
+					 576000000  652800000  748800000
+					 825600000  902400000  979200000
+					1056000000 1132800000 1209600000
+					1286400000 1363200000 1440000000
+					1516800000 1593600000 1670400000
+					1747200000>;
 
 				qcom,cpr-ro-scaling-factor =
 					<2594 2795 2576 2761 2469 2673 2198
@@ -185,6 +207,8 @@
 					/* Speed bin 0 */
 					<0 1 1 1 1 1 1 1>,
 					/* Speed bin 1 */
+					<0 1 1 1 1 1 1 1>,
+					/* Speed bin 2 */
 					<0 1 1 1 1 1 1 1>;
 				qcom,allow-aging-open-loop-voltage-adjustment =
 					<1>;
@@ -201,20 +225,41 @@
 			apc0_l3_vreg: regulator {
 				regulator-name = "apc0_l3_corner";
 				regulator-min-microvolt = <1>;
-				regulator-max-microvolt = <11>;
+				regulator-max-microvolt = <13>;
 
 				qcom,cpr-fuse-corners = <4>;
-				qcom,cpr-fuse-combos = <16>;
-				qcom,cpr-speed-bins = <2>;
-				qcom,cpr-speed-bin-corners = <11 11>;
-				qcom,cpr-corners = <11>;
+				qcom,cpr-fuse-combos = <24>;
+				qcom,cpr-speed-bins = <3>;
+				qcom,cpr-speed-bin-corners = <11 11 13>;
+				qcom,cpr-corners =
+					/* Speed bin 0 */
+					<11 11 11 11 11 11 11 11>,
+					/* Speed bin 1 */
+					<11 11 11 11 11 11 11 11>,
+					/* Speed bin 2 */
+					<13 13 13 13 13 13 13 13>;
 
-				qcom,cpr-corner-fmax-map = <4 7 9 11>;
+				qcom,cpr-corner-fmax-map =
+					/* Speed bin 0 */
+					<4 7 9 11>,
+					/* Speed bin 1 */
+					<4 7 9 11>,
+					/* Speed bin 2 */
+					<4 7 9 13>;
 
 				qcom,cpr-voltage-ceiling =
+					/* Speed bin 0 */
 					<872000  872000  872000  872000  872000
 					 872000  872000  872000  928000  996000
-					 996000>;
+					 996000>,
+					/* Speed bin 1 */
+					<872000  872000  872000  872000  872000
+					 872000  872000  872000  928000  996000
+					 996000>,
+					/* Speed bin 2 */
+					<872000  872000  872000  872000  872000
+					 872000  872000  872000  928000  996000
+					 996000  996000  996000>;
 
 				qcom,cpr-voltage-floor =
 					/* Speed bin 0 */
@@ -224,18 +269,43 @@
 					/* Speed bin 1 */
 					<568000  568000  568000  568000  568000
 					 584000  584000  632000  672000  712000
-					 712000>;
+					 712000>,
+					/* Speed bin 2 */
+					<568000  568000  568000  568000  568000
+					 584000  584000  632000  672000  712000
+					 712000  712000  712000>;
 
 				qcom,cpr-floor-to-ceiling-max-range =
+					/* Speed bin 0 */
 					<32000  32000  32000  32000  32000
 					 32000  32000  32000  32000  40000
-					 40000>;
+					 40000>,
+					/* Speed bin 1 */
+					<32000  32000  32000  32000  32000
+					 32000  32000  32000  32000  40000
+					 40000>,
+					/* Speed bin 2 */
+					<32000  32000  32000  32000  32000
+					 32000  32000  32000  32000  40000
+					 40000  40000  40000>;
 
 				qcom,corner-frequencies =
+					/* Speed bin 0 */
 					<300000000  422400000  499200000
 					 576000000  652800000  729600000
 					 806400000  883200000  960000000
-					1036800000 1094400000>;
+					1036800000 1094400000>,
+					/* Speed bin 1 */
+					<300000000  422400000  499200000
+					 576000000  652800000  729600000
+					 806400000  883200000  960000000
+					1036800000 1094400000>,
+					/* Speed bin 2 */
+					<300000000  422400000  499200000
+					 576000000  652800000  729600000
+					 806400000  883200000  960000000
+					1036800000 1113600000 1209600000
+					1305600000>;
 
 				qcom,cpr-ro-scaling-factor =
 					<2857 3056 2828 2952 2699 2796 2447
@@ -262,12 +332,14 @@
 				qcom,cpr-scaled-open-loop-voltage-as-ceiling;
 
 				qcom,cpr-aging-max-voltage-adjustment = <15000>;
-				qcom,cpr-aging-ref-corner = <11>;
+				qcom,cpr-aging-ref-corner = <11 11 13>;
 				qcom,cpr-aging-ro-scaling-factor = <1620>;
 				qcom,allow-aging-voltage-adjustment =
 					/* Speed bin 0 */
 					<0 1 1 1 1 1 1 1>,
 					/* Speed bin 1 */
+					<0 1 1 1 1 1 1 1>,
+					/* Speed bin 2 */
 					<0 1 1 1 1 1 1 1>;
 				qcom,allow-aging-open-loop-voltage-adjustment =
 					<1>;
@@ -320,7 +392,7 @@
 		qcom,cpr-panic-reg-name-list =
 			"APSS_GOLD_CPRH_STATUS_0", "GOLD_SAW4_PMIC_STS";
 
-		qcom,cpr-aging-ref-voltage = <1056000>;
+		qcom,cpr-aging-ref-voltage = <1136000>;
 		vdd-supply = <&pm8998_s12>;
 
 		thread@0 {
@@ -333,23 +405,27 @@
 			apc1_perfcl_vreg: regulator {
 				regulator-name = "apc1_perfcl_corner";
 				regulator-min-microvolt = <1>;
-				regulator-max-microvolt = <26>;
+				regulator-max-microvolt = <27>;
 
 				qcom,cpr-fuse-corners = <3>;
-				qcom,cpr-fuse-combos = <16>;
-				qcom,cpr-speed-bins = <2>;
-				qcom,cpr-speed-bin-corners = <22 24>;
+				qcom,cpr-fuse-combos = <24>;
+				qcom,cpr-speed-bins = <3>;
+				qcom,cpr-speed-bin-corners = <22 24 25>;
 				qcom,cpr-corners =
 					/* Speed bin 0 */
 					<22 22 22 22 22 22 22 22>,
 					/* Speed bin 1 */
-					<24 24 24 24 24 24 24 24>;
+					<24 24 24 24 24 24 24 24>,
+					/* Speed bin 2 */
+					<25 25 25 25 25 25 25 25>;
 
 				qcom,cpr-corner-fmax-map =
 					/* Speed bin 0 */
 					<10 17 22>,
 					/* Speed bin 1 */
-					<10 17 24>;
+					<10 17 24>,
+					/* Speed bin 2 */
+					<10 17 25>;
 
 				qcom,cpr-voltage-ceiling =
 					/* Speed bin 0 */
@@ -357,13 +433,20 @@
 					 828000  828000  828000  828000  828000
 					 828000  828000  828000  828000  828000
 					 828000  828000  884000  952000  952000
-					1056000 1056000>,
+					1136000 1136000>,
 					/* Speed bin 1 */
 					<828000  828000  828000  828000  828000
 					 828000  828000  828000  828000  828000
 					 828000  828000  828000  828000  828000
 					 828000  828000  884000  952000  952000
-					1056000 1056000 1056000 1056000>;
+					1136000 1136000 1136000 1136000>,
+					/* Speed bin 2 */
+					<828000  828000  828000  828000  828000
+					 828000  828000  828000  828000  828000
+					 828000  828000  828000  828000  828000
+					 828000  828000  884000  952000  952000
+					1136000 1136000 1136000 1136000
+					1136000>;
 
 				qcom,cpr-voltage-floor =
 					/* Speed bin 0 */
@@ -377,7 +460,14 @@
 					 568000  568000  568000  568000  568000
 					 584000  584000  632000  632000  632000
 					 632000  632000  672000  712000  712000
-					 772000  772000  772000  772000>;
+					 772000  772000  772000  772000>,
+					/* Speed bin 2 */
+					<568000  568000  568000  568000  568000
+					 568000  568000  568000  568000  568000
+					 584000  584000  632000  632000  632000
+					 632000  632000  672000  712000  712000
+					 772000  772000  772000  772000
+					 772000>;
 
 				qcom,cpr-floor-to-ceiling-max-range =
 					/* Speed bin 0 */
@@ -391,7 +481,13 @@
 					 32000  32000  32000  32000  32000
 					 32000  32000  32000  32000  32000
 					 32000  32000  40000  40000  40000
-					 40000  40000  40000  40000>;
+					 40000  40000  40000  40000>,
+					/* Speed bin 2 */
+					<32000  32000  32000  32000  32000
+					 32000  32000  32000  32000  32000
+					 32000  32000  32000  32000  32000
+					 32000  32000  40000  40000  40000
+					 40000  40000  40000  40000  40000>;
 
 				qcom,corner-frequencies =
 					/* Speed bin 0 */
@@ -411,7 +507,17 @@
 					1267200000 1344000000 1420800000
 					1497600000 1574400000 1651200000
 					1728000000 1804800000 1881600000
-					1958400000 2035200000 2092800000>;
+					1958400000 2035200000 2092800000>,
+					/* Speed bin 2 */
+					<300000000  422400000  499200000
+					 576000000  652800000  729600000
+					 806400000  883200000  960000000
+					1036800000 1113600000 1190400000
+					1267200000 1344000000 1420800000
+					1497600000 1574400000 1651200000
+					1728000000 1804800000 1881600000
+					1958400000 2035200000 2112000000
+					2208000000>;
 
 				qcom,cpr-ro-scaling-factor =
 					<2857 3056 2828 2952 2699 2796 2447
@@ -425,22 +531,76 @@
 					 2003 1675>;
 
 				qcom,cpr-open-loop-voltage-fuse-adjustment =
-					<100000 100000 100000>;
+					/* Speed bin 0 */
+					<100000 100000 100000>,
+					<     0      0      0>,
+					<     0      0      0>,
+					<     0      0      0>,
+					<     0      0      0>,
+					<     0      0      0>,
+					<     0      0      0>,
+					<     0      0      0>,
+					/* Speed bin 1 */
+					<100000 100000 100000>,
+					<     0      0      0>,
+					<     0      0      0>,
+					<     0      0      0>,
+					<     0      0      0>,
+					<     0      0      0>,
+					<     0      0      0>,
+					<     0      0      0>,
+					/* Speed bin 2 */
+					<100000 100000 100000>,
+					<     0      0      0>,
+					<     0      0      0>,
+					<     0      0      0>,
+					<     0      0      0>,
+					<     0      0      0>,
+					<     0      0      0>,
+					<     0      0      0>;
 
 				qcom,cpr-closed-loop-voltage-fuse-adjustment =
-					<100000 100000 100000>;
+					/* Speed bin 0 */
+					<100000 100000 100000>,
+					<     0      0      0>,
+					<     0      0      0>,
+					<     0      0      0>,
+					<     0      0      0>,
+					<     0      0      0>,
+					<     0      0      0>,
+					<     0      0      0>,
+					/* Speed bin 1 */
+					<100000 100000 100000>,
+					<     0      0      0>,
+					<     0      0      0>,
+					<     0      0      0>,
+					<     0      0      0>,
+					<     0      0      0>,
+					<     0      0      0>,
+					<     0      0      0>,
+					/* Speed bin 2 */
+					<100000 100000 100000>,
+					<     0      0      0>,
+					<     0      0      0>,
+					<     0      0      0>,
+					<     0      0      0>,
+					<     0      0      0>,
+					<     0      0      0>,
+					<     0      0      0>;
 
 				qcom,allow-voltage-interpolation;
 				qcom,allow-quotient-interpolation;
 				qcom,cpr-scaled-open-loop-voltage-as-ceiling;
 
 				qcom,cpr-aging-max-voltage-adjustment = <15000>;
-				qcom,cpr-aging-ref-corner = <22 24>;
+				qcom,cpr-aging-ref-corner = <22 24 25>;
 				qcom,cpr-aging-ro-scaling-factor = <1700>;
 				qcom,allow-aging-voltage-adjustment =
 					/* Speed bin 0 */
 					<0 1 1 1 1 1 1 1>,
 					/* Speed bin 1 */
+					<0 1 1 1 1 1 1 1>,
+					/* Speed bin 2 */
 					<0 1 1 1 1 1 1 1>;
 				qcom,allow-aging-open-loop-voltage-adjustment =
 					<1>;
@@ -1100,8 +1260,12 @@
 		pm8005_s1_level: regulator-s1-level {
 			regulator-name = "pm8005_s1_level";
 			qcom,set = <RPMH_REGULATOR_SET_ALL>;
-			regulator-min-microvolt = <RPMH_REGULATOR_LEVEL_OFF>;
-			regulator-max-microvolt = <RPMH_REGULATOR_LEVEL_MAX>;
+			regulator-min-microvolt
+				= <RPMH_REGULATOR_LEVEL_MIN_SVS>;
+			regulator-max-microvolt
+				= <RPMH_REGULATOR_LEVEL_MAX>;
+			qcom,init-voltage-level
+				= <RPMH_REGULATOR_LEVEL_MIN_SVS>;
 		};
 	};
 
diff --git a/arch/arm64/boot/dts/qcom/sdm845-sde-display.dtsi b/arch/arm64/boot/dts/qcom/sdm845-sde-display.dtsi
index 255c0b3..8cccb0f 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-sde-display.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-sde-display.dtsi
@@ -248,10 +248,10 @@
 		label = "dsi_nt35597_truly_dsc_cmd_display";
 		qcom,display-type = "primary";
 
-		qcom,dsi-ctrl = <&mdss_dsi0 &mdss_dsi1>;
-		qcom,dsi-phy = <&mdss_dsi_phy0 &mdss_dsi_phy1>;
-		clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>,
-			<&mdss_dsi0_pll PCLK_MUX_0_CLK>;
+		qcom,dsi-ctrl = <&mdss_dsi1>;
+		qcom,dsi-phy = <&mdss_dsi_phy1>;
+		clocks = <&mdss_dsi1_pll BYTECLK_MUX_1_CLK>,
+			<&mdss_dsi1_pll PCLK_MUX_1_CLK>;
 		clock-names = "src_byte_clk", "src_pixel_clk";
 
 		pinctrl-names = "panel_active", "panel_suspend";
@@ -272,10 +272,10 @@
 		label = "dsi_nt35597_truly_dsc_video_display";
 		qcom,display-type = "primary";
 
-		qcom,dsi-ctrl = <&mdss_dsi0 &mdss_dsi1>;
-		qcom,dsi-phy = <&mdss_dsi_phy0 &mdss_dsi_phy1>;
-		clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>,
-			<&mdss_dsi0_pll PCLK_MUX_0_CLK>;
+		qcom,dsi-ctrl = <&mdss_dsi1>;
+		qcom,dsi-phy = <&mdss_dsi_phy1>;
+		clocks = <&mdss_dsi1_pll BYTECLK_MUX_1_CLK>,
+			<&mdss_dsi1_pll PCLK_MUX_1_CLK>;
 		clock-names = "src_byte_clk", "src_pixel_clk";
 
 		pinctrl-names = "panel_active", "panel_suspend";
@@ -371,7 +371,7 @@
 };
 
 &mdss_mdp {
-	connectors = <&sde_rscc &sde_wb &dsi_dual_nt35597_truly_cmd_display>;
+	connectors = <&sde_rscc &sde_wb &dsi_nt35597_truly_dsc_cmd_display>;
 };
 
 &dsi_dual_nt35597_truly_video {
@@ -396,7 +396,8 @@
 	qcom,mdss-dsi-panel-phy-timings = [00 15 05 05 20 1f 05 05 03 03 04 00];
 	qcom,mdss-dsi-t-clk-post = <0x0b>;
 	qcom,mdss-dsi-t-clk-pre = <0x23>;
-	qcom,display-topology = <2 2 2>;
+	qcom,display-topology = <1 1 1>,
+				<2 2 1>;
 	qcom,default-topology-index = <0>;
 };
 
@@ -404,7 +405,8 @@
 	qcom,mdss-dsi-panel-phy-timings = [00 15 05 05 20 1f 05 05 03 03 04 00];
 	qcom,mdss-dsi-t-clk-post = <0x0b>;
 	qcom,mdss-dsi-t-clk-pre = <0x23>;
-	qcom,display-topology = <2 2 2>;
+	qcom,display-topology = <1 1 1>,
+				<2 2 1>;
 	qcom,default-topology-index = <0>;
 };
 
diff --git a/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi b/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi
index 6d9e321..c350800 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi
@@ -35,7 +35,7 @@
 		sde-vdd-supply = <&mdss_core_gdsc>;
 
 		/* interrupt config */
-		interrupt-parent = <&intc>;
+		interrupt-parent = <&pdc>;
 		interrupts = <0 83 0>;
 		interrupt-controller;
 		#interrupt-cells = <1>;
@@ -130,11 +130,16 @@
 		qcom,sde-max-bw-high-kbps = <9600000>;
 		qcom,sde-dram-channels = <2>;
 		qcom,sde-num-nrt-paths = <0>;
+		qcom,sde-dspp-ad-version = <0x00040000>;
+		qcom,sde-dspp-ad-off = <0x28000 0x27000>;
 
 		qcom,sde-vbif-off = <0>;
 		qcom,sde-vbif-size = <0x1040>;
 		qcom,sde-vbif-id = <0>;
 
+		qcom,sde-vbif-qos-rt-remap = <3 3 4 4 5 5 6 6>;
+		qcom,sde-vbif-qos-nrt-remap = <3 3 3 3 3 3 3 3>;
+
 		qcom,sde-inline-rotator = <&mdss_rotator 0>;
 
 		qcom,sde-reg-dma-off = <0>;
@@ -259,7 +264,19 @@
 		interrupt-parent = <&mdss_mdp>;
 		interrupts = <2 0>;
 
-		qcom,mdss-rot-vbif-qos-setting = <1 1 1 1>;
+		/* Offline rotator QoS setting */
+		qcom,mdss-rot-vbif-qos-setting = <3 3 3 3 3 3 3 3>;
+		qcom,mdss-rot-cdp-setting = <1 1>;
+		qcom,mdss-rot-qos-lut = <0x0 0x0 0x0 0x0>;
+		qcom,mdss-rot-danger-lut = <0x0 0x0>;
+		qcom,mdss-rot-safe-lut = <0x0000ffff 0x0000ffff>;
+
+		/* Inline rotator QoS Setting */
+		/* setting default register values for RD - qos/danger/safe */
+		qcom,mdss-inline-rot-qos-lut = <0x44556677 0x00112233
+							0x44556677 0x00112233>;
+		qcom,mdss-inline-rot-danger-lut = <0x0055aaff 0x0000ffff>;
+		qcom,mdss-inline-rot-safe-lut = <0x0000f000 0x0000ff00>;
 
 		qcom,mdss-default-ot-rd-limit = <32>;
 		qcom,mdss-default-ot-wr-limit = <32>;
diff --git a/arch/arm64/boot/dts/qcom/sdm845-usb.dtsi b/arch/arm64/boot/dts/qcom/sdm845-usb.dtsi
index 6fb6fb8..3870d8f 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-usb.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-usb.dtsi
@@ -67,7 +67,6 @@
 		dwc3@a600000 {
 			compatible = "snps,dwc3";
 			reg = <0x0a600000 0xcd00>;
-			interrupt-parent = <&intc>;
 			interrupts = <0 133 0>;
 			usb-phy = <&qusb_phy0>, <&usb_qmp_dp_phy>;
 			tx-fifo-resize;
@@ -80,7 +79,6 @@
 		qcom,usbbam@a704000 {
 			compatible = "qcom,usb-bam-msm";
 			reg = <0xa704000 0x17000>;
-			interrupt-parent = <&intc>;
 			interrupts = <0 132 0>;
 
 			qcom,bam-type = <0>;
@@ -361,7 +359,6 @@
 		dwc3@a600000 {
 			compatible = "snps,dwc3";
 			reg = <0x0a800000 0xcd00>;
-			interrupt-parent = <&intc>;
 			interrupts = <0 138 0>;
 			usb-phy = <&qusb_phy1>, <&usb_qmp_phy>;
 			tx-fifo-resize;
diff --git a/arch/arm64/boot/dts/qcom/sdm845.dtsi b/arch/arm64/boot/dts/qcom/sdm845.dtsi
index 6038b6e..843e326 100644
--- a/arch/arm64/boot/dts/qcom/sdm845.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845.dtsi
@@ -30,7 +30,7 @@
 	model = "Qualcomm Technologies, Inc. SDM845";
 	compatible = "qcom,sdm845";
 	qcom,msm-id = <321 0x0>;
-	interrupt-parent = <&intc>;
+	interrupt-parent = <&pdc>;
 
 	aliases {
 		ufshc1 = &ufshc_mem; /* Embedded UFS slot */
@@ -387,7 +387,7 @@
 			       1024 1131 /* 1958400 */
 			>;
 			idle-cost-data = <
-				520 500 480 460
+				100 80 60 40
 			>;
 		};
 		CLUSTER_COST_0: cluster-cost0 {
@@ -567,6 +567,12 @@
 			size = <0 0x5c00000>;
 		};
 
+		dump_mem: mem_dump_region {
+			compatible = "shared-dma-pool";
+			reusable;
+			size = <0 0x2400000>;
+		};
+
 		/* global autoconfigured region for contiguous allocations */
 		linux,cma {
 			compatible = "shared-dma-pool";
@@ -600,6 +606,7 @@
 		reg = <0x17a00000 0x10000>,     /* GICD */
 		      <0x17a60000 0x100000>;    /* GICR * 8 */
 		interrupts = <1 9 4>;
+		interrupt-parent = <&intc>;
 	};
 
 	timer {
@@ -801,7 +808,8 @@
 			< 1881600 >,
 			< 1958400 >,
 			< 2035200 >,
-			< 2092800 >;
+			< 2092800 >,
+			< 2208000 >;
 	};
 
 	cpubw: qcom,cpubw {
@@ -955,7 +963,9 @@
 			< 883200 >,
 			< 960000 >,
 			< 1036800 >,
-			< 1094400 >;
+			< 1094400 >,
+			< 1209600 >,
+			< 1305600 >;
 	};
 
 	l3_cpu4: qcom,l3-cpu4 {
@@ -975,7 +985,9 @@
 			< 883200 >,
 			< 960000 >,
 			< 1036800 >,
-			< 1094400 >;
+			< 1094400 >,
+			< 1209600 >,
+			< 1305600 >;
 	};
 
 	devfreq_l3lat_0: qcom,cpu0-l3lat-mon {
@@ -1011,6 +1023,35 @@
 		interrupts = <1 5 4>;
 	};
 
+	mincpubw: qcom,mincpubw {
+		compatible = "qcom,devbw";
+		governor = "powersave";
+		qcom,src-dst-ports = <1 512>;
+		qcom,active-only;
+		qcom,bw-tbl =
+			<  762 /*  200 MHz */ >,
+			< 1144 /*  300 MHz */ >,
+			< 1720 /*  451 MHz */ >,
+			< 2086 /*  547 MHz */ >,
+			< 2597 /*  681 MHz */ >,
+			< 2929 /*  768 MHz */ >,
+			< 3879 /* 1017 MHz */ >,
+			< 4943 /* 1296 MHz */ >,
+			< 5931 /* 1555 MHz */ >,
+			< 6881 /* 1804 MHz */ >;
+	};
+
+	devfreq-cpufreq {
+		mincpubw-cpufreq {
+			target-dev = <&mincpubw>;
+			cpu-to-dev-map-0 =
+				< 1708800  762 >;
+			cpu-to-dev-map-4 =
+				< 1881600  762 >,
+				< 2208000 2597 >;
+		};
+	};
+
 	clock_rpmh: qcom,rpmhclk {
 		compatible = "qcom,rpmh-clk-sdm845";
 		#clock-cells = <1>;
@@ -1105,6 +1146,9 @@
 		vdd-pwrcl-supply = <&apc0_pwrcl_vreg>;
 		vdd-perfcl-supply = <&apc1_perfcl_vreg>;
 
+		l3-dev0 = <&l3_cpu0>;
+		l3-dev4 = <&l3_cpu4>;
+
 		qcom,l3-speedbin0-v0 =
 			<   300000000 0x000c000f 0x00002020 0x1 1 >,
 			<   422400000 0x50140116 0x00002020 0x1 2 >,
@@ -1129,6 +1173,21 @@
 			<  1036800000 0x40240936 0x00002b2b 0x3 10 >,
 			<  1094400000 0x402c0a39 0x00002e2e 0x3 11 >;
 
+		qcom,l3-speedbin2-v0 =
+			<   300000000 0x000c000f 0x00002020 0x1 1 >,
+			<   422400000 0x50140116 0x00002020 0x1 2 >,
+			<   499200000 0x5014021a 0x00002020 0x1 3 >,
+			<   576000000 0x5014031e 0x00002020 0x1 4 >,
+			<   652800000 0x401c0422 0x00002020 0x1 5 >,
+			<   729600000 0x401c0526 0x00002020 0x1 6 >,
+			<   806400000 0x401c062a 0x00002222 0x1 7 >,
+			<   883200000 0x4024072e 0x00002525 0x2 8 >,
+			<   960000000 0x40240832 0x00002828 0x2 9 >,
+			<  1036800000 0x40240936 0x00002b2b 0x3 10 >,
+			<  1113600000 0x402c0a3a 0x00002e2e 0x3 11 >,
+			<  1209600000 0x402c0b3f 0x00003232 0x3 12 >,
+			<  1305600000 0x40340c44 0x00003636 0x3 13 >;
+
 		qcom,pwrcl-speedbin0-v0 =
 			<   300000000 0x000c000f 0x00002020 0x1 1 >,
 			<   422400000 0x50140116 0x00002020 0x1 2 >,
@@ -1169,6 +1228,27 @@
 			<  1651200000 0x403c1156 0x00004545 0x3 18 >,
 			<  1708800000 0x40441259 0x00004747 0x3 19 >;
 
+		qcom,pwrcl-speedbin2-v0 =
+			<   300000000 0x000c000f 0x00002020 0x1 1 >,
+			<   422400000 0x50140116 0x00002020 0x1 2 >,
+			<   499200000 0x5014021a 0x00002020 0x1 3 >,
+			<   576000000 0x5014031e 0x00002020 0x1 4 >,
+			<   652800000 0x401c0422 0x00002020 0x1 5 >,
+			<   748800000 0x401c0527 0x00002020 0x1 6 >,
+			<   825600000 0x401c062b 0x00002222 0x1 7 >,
+			<   902400000 0x4024072f 0x00002626 0x1 8 >,
+			<   979200000 0x40240833 0x00002929 0x1 9 >,
+			<  1056000000 0x402c0937 0x00002c2c 0x1 10 >,
+			<  1132800000 0x402c0a3b 0x00002f2f 0x1 11 >,
+			<  1209600000 0x402c0b3f 0x00003232 0x1 12 >,
+			<  1286400000 0x40340c43 0x00003636 0x2 13 >,
+			<  1363200000 0x40340d47 0x00003939 0x2 14 >,
+			<  1440000000 0x40340e4b 0x00003c3c 0x2 15 >,
+			<  1516800000 0x403c0f4f 0x00003f3f 0x2 16 >,
+			<  1593600000 0x403c1053 0x00004242 0x2 17 >,
+			<  1670400000 0x40441157 0x00004646 0x3 18 >,
+			<  1747200000 0x4044125b 0x00004949 0x3 19 >;
+
 		qcom,perfcl-speedbin0-v0 =
 			<   300000000 0x000c000f 0x00002020 0x1 1 >,
 			<   422400000 0x50140116 0x00002020 0x1 2 >,
@@ -1219,6 +1299,33 @@
 			<  2035200000 0x404c166a 0x00005555 0x3 23 >,
 			<  2092800000 0x4054176d 0x00005757 0x3 24 >;
 
+		qcom,perfcl-speedbin2-v0 =
+			<   300000000 0x000c000f 0x00002020 0x1 1 >,
+			<   422400000 0x50140116 0x00002020 0x1 2 >,
+			<   499200000 0x5014021a 0x00002020 0x1 3 >,
+			<   576000000 0x5014031e 0x00002020 0x1 4 >,
+			<   652800000 0x401c0422 0x00002020 0x1 5 >,
+			<   729600000 0x401c0526 0x00002020 0x1 6 >,
+			<   806400000 0x401c062a 0x00002222 0x1 7 >,
+			<   883200000 0x4024072e 0x00002525 0x1 8 >,
+			<   960000000 0x40240832 0x00002828 0x1 9 >,
+			<  1036800000 0x40240936 0x00002b2b 0x1 10 >,
+			<  1113600000 0x402c0a3a 0x00002e2e 0x1 11 >,
+			<  1190400000 0x402c0b3e 0x00003232 0x1 12 >,
+			<  1267200000 0x40340c42 0x00003535 0x2 13 >,
+			<  1344000000 0x40340d46 0x00003838 0x2 14 >,
+			<  1420800000 0x40340e4a 0x00003b3b 0x2 15 >,
+			<  1497600000 0x403c0f4e 0x00003e3e 0x2 16 >,
+			<  1574400000 0x403c1052 0x00004242 0x2 17 >,
+			<  1651200000 0x403c1156 0x00004545 0x2 18 >,
+			<  1728000000 0x4044125a 0x00004848 0x3 19 >,
+			<  1804800000 0x4044135e 0x00004b4b 0x3 20 >,
+			<  1881600000 0x404c1462 0x00004e4e 0x3 21 >,
+			<  1958400000 0x404c1566 0x00005252 0x3 22 >,
+			<  2035200000 0x404c166a 0x00005555 0x3 23 >,
+			<  2112000000 0x4054176e 0x00005858 0x3 24 >,
+			<  2208000000 0x40541873 0x00005c5c 0x3 25 >;
+
 		qcom,l3-min-cpr-vc-bin0 = <7>;
 		qcom,pwrcl-min-cpr-vc-bin0 = <6>;
 		qcom,perfcl-min-cpr-vc-bin0 = <7>;
@@ -1683,7 +1790,7 @@
 	qcom,ssc@5c00000 {
 		compatible = "qcom,pil-tz-generic";
 		reg = <0x5c00000 0x4000>;
-		interrupts = <0 377 1>;
+		interrupts = <0 494 1>;
 
 		vdd_cx-supply = <&pm8998_l27_level>;
 		vdd_px-supply = <&pm8998_lvs2>;
@@ -2486,6 +2593,14 @@
 		qcom,pipe-attr-ee;
 	};
 
+	qcom,qbt1000 {
+		compatible = "qcom,qbt1000";
+		clock-names = "core", "iface";
+		clock-frequency = <25000000>;
+		qcom,ipc-gpio = <&tlmm 121 0>;
+		qcom,finger-detect-gpio = <&pm8998_gpios 5 0>;
+	};
+
 	qcom_seecom: qseecom@86d00000 {
 		compatible = "qcom,qseecom";
 		reg = <0x86d00000 0x2200000>;
@@ -3875,6 +3990,56 @@
 		#thermal-sensor-cells = <1>;
 	};
 
+	mem_dump {
+		compatible = "qcom,mem-dump";
+		memory-region = <&dump_mem>;
+
+		rpmh_dump {
+			qcom,dump-size = <0x2000000>;
+			qcom,dump-id = <0xec>;
+		};
+
+		rpm_sw_dump {
+			qcom,dump-size = <0x28000>;
+			qcom,dump-id = <0xea>;
+		};
+
+		pmic_dump {
+			qcom,dump-size = <0x10000>;
+			qcom,dump-id = <0xe4>;
+		};
+
+		tmc_etf_dump {
+			qcom,dump-size = <0x10000>;
+			qcom,dump-id = <0xf0>;
+		};
+
+		tmc_etf_swao_dump {
+			qcom,dump-size = <0x8400>;
+			qcom,dump-id = <0xf1>;
+		};
+
+		tmc_etr_reg_dump {
+			qcom,dump-size = <0x1000>;
+			qcom,dump-id = <0x100>;
+		};
+
+		tmc_etf_reg_dump {
+			qcom,dump-size = <0x1000>;
+			qcom,dump-id = <0x101>;
+		};
+
+		tmc_etf_swao_reg_dump {
+			qcom,dump-size = <0x1000>;
+			qcom,dump-id = <0x102>;
+		};
+
+		misc_data_dump {
+			qcom,dump-size = <0x1000>;
+			qcom,dump-id = <0xe8>;
+		};
+	};
+
 	gpi_dma0: qcom,gpi-dma@0x800000 {
 		#dma-cells = <6>;
 		compatible = "qcom,gpi-dma";
diff --git a/arch/arm64/configs/sdm845-perf_defconfig b/arch/arm64/configs/sdm845-perf_defconfig
index 10b44f8..8c20b3f 100644
--- a/arch/arm64/configs/sdm845-perf_defconfig
+++ b/arch/arm64/configs/sdm845-perf_defconfig
@@ -47,7 +47,7 @@
 CONFIG_PARTITION_ADVANCED=y
 CONFIG_ARCH_QCOM=y
 CONFIG_ARCH_SDM845=y
-CONFIG_ARCH_SDM830=y
+CONFIG_ARCH_SDM670=y
 CONFIG_PCI=y
 CONFIG_PCI_MSM=y
 CONFIG_SCHED_MC=y
@@ -222,6 +222,7 @@
 CONFIG_CFG80211=y
 CONFIG_CFG80211_INTERNAL_REGDB=y
 CONFIG_RFKILL=y
+CONFIG_NFC_NQ=y
 CONFIG_IPC_ROUTER=y
 CONFIG_IPC_ROUTER_SECURITY=y
 CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y
@@ -296,7 +297,7 @@
 CONFIG_SPMI=y
 CONFIG_SPMI_MSM_PMIC_ARB_DEBUG=y
 CONFIG_PINCTRL_SDM845=y
-CONFIG_PINCTRL_SDM830=y
+CONFIG_PINCTRL_SDM670=y
 CONFIG_PINCTRL_QCOM_SPMI_PMIC=y
 CONFIG_GPIOLIB=y
 CONFIG_GPIO_SYSFS=y
@@ -491,6 +492,7 @@
 CONFIG_MSM_AVTIMER=y
 CONFIG_MSM_EVENT_TIMER=y
 CONFIG_MSM_PM=y
+CONFIG_MSM_QBT1000=y
 CONFIG_APSS_CORE_EA=y
 CONFIG_QTI_RPM_STATS_LOG=y
 CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y
@@ -546,6 +548,7 @@
 CONFIG_CORESIGHT_DUMMY=y
 CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
 CONFIG_SECURITY=y
+CONFIG_HARDENED_USERCOPY=y
 CONFIG_SECURITY_SELINUX=y
 CONFIG_SECURITY_SMACK=y
 CONFIG_CRYPTO_XCBC=y
diff --git a/arch/arm64/configs/sdm845_defconfig b/arch/arm64/configs/sdm845_defconfig
index 737f47f..0bebc63b 100644
--- a/arch/arm64/configs/sdm845_defconfig
+++ b/arch/arm64/configs/sdm845_defconfig
@@ -52,7 +52,7 @@
 # CONFIG_IOSCHED_DEADLINE is not set
 CONFIG_ARCH_QCOM=y
 CONFIG_ARCH_SDM845=y
-CONFIG_ARCH_SDM830=y
+CONFIG_ARCH_SDM670=y
 CONFIG_PCI=y
 CONFIG_PCI_MSM=y
 CONFIG_SCHED_MC=y
@@ -232,6 +232,7 @@
 CONFIG_CFG80211_INTERNAL_REGDB=y
 # CONFIG_CFG80211_CRDA_SUPPORT is not set
 CONFIG_RFKILL=y
+CONFIG_NFC_NQ=y
 CONFIG_IPC_ROUTER=y
 CONFIG_IPC_ROUTER_SECURITY=y
 CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y
@@ -305,7 +306,7 @@
 CONFIG_SPMI=y
 CONFIG_SPMI_MSM_PMIC_ARB_DEBUG=y
 CONFIG_PINCTRL_SDM845=y
-CONFIG_PINCTRL_SDM830=y
+CONFIG_PINCTRL_SDM670=y
 CONFIG_PINCTRL_QCOM_SPMI_PMIC=y
 CONFIG_GPIOLIB=y
 CONFIG_GPIO_SYSFS=y
@@ -513,6 +514,7 @@
 CONFIG_MSM_AVTIMER=y
 CONFIG_MSM_EVENT_TIMER=y
 CONFIG_MSM_PM=y
+CONFIG_MSM_QBT1000=y
 CONFIG_APSS_CORE_EA=y
 CONFIG_QCOM_DCC_V2=y
 CONFIG_QTI_RPM_STATS_LOG=y
@@ -619,6 +621,7 @@
 CONFIG_CORESIGHT_DUMMY=y
 CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
 CONFIG_SECURITY=y
+CONFIG_HARDENED_USERCOPY=y
 CONFIG_SECURITY_SELINUX=y
 CONFIG_SECURITY_SMACK=y
 CONFIG_CRYPTO_XCBC=y
diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h
index 4e0497f..0fe7e43 100644
--- a/arch/arm64/include/asm/barrier.h
+++ b/arch/arm64/include/asm/barrier.h
@@ -42,25 +42,35 @@
 #define __smp_rmb()	dmb(ishld)
 #define __smp_wmb()	dmb(ishst)
 
-#define __smp_store_release(p, v)						\
+#define __smp_store_release(p, v)					\
 do {									\
+	union { typeof(*p) __val; char __c[1]; } __u =			\
+		{ .__val = (__force typeof(*p)) (v) }; 			\
 	compiletime_assert_atomic_type(*p);				\
 	switch (sizeof(*p)) {						\
 	case 1:								\
 		asm volatile ("stlrb %w1, %0"				\
-				: "=Q" (*p) : "r" (v) : "memory");	\
+				: "=Q" (*p)				\
+				: "r" (*(__u8 *)__u.__c)		\
+				: "memory");				\
 		break;							\
 	case 2:								\
 		asm volatile ("stlrh %w1, %0"				\
-				: "=Q" (*p) : "r" (v) : "memory");	\
+				: "=Q" (*p)				\
+				: "r" (*(__u16 *)__u.__c)		\
+				: "memory");				\
 		break;							\
 	case 4:								\
 		asm volatile ("stlr %w1, %0"				\
-				: "=Q" (*p) : "r" (v) : "memory");	\
+				: "=Q" (*p)				\
+				: "r" (*(__u32 *)__u.__c)		\
+				: "memory");				\
 		break;							\
 	case 8:								\
 		asm volatile ("stlr %1, %0"				\
-				: "=Q" (*p) : "r" (v) : "memory");	\
+				: "=Q" (*p)				\
+				: "r" (*(__u64 *)__u.__c)		\
+				: "memory");				\
 		break;							\
 	}								\
 } while (0)
diff --git a/arch/arm64/include/asm/cmpxchg.h b/arch/arm64/include/asm/cmpxchg.h
index 91b26d2..ae852ad 100644
--- a/arch/arm64/include/asm/cmpxchg.h
+++ b/arch/arm64/include/asm/cmpxchg.h
@@ -46,7 +46,7 @@ static inline unsigned long __xchg_case_##name(unsigned long x,		\
 	"	swp" #acq_lse #rel #sz "\t%" #w "3, %" #w "0, %2\n"	\
 		__nops(3)						\
 	"	" #nop_lse)						\
-	: "=&r" (ret), "=&r" (tmp), "+Q" (*(u8 *)ptr)			\
+	: "=&r" (ret), "=&r" (tmp), "+Q" (*(unsigned long *)ptr)	\
 	: "r" (x)							\
 	: cl);								\
 									\
diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
index f5ea0ba..fe39e68 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -240,6 +240,12 @@ static inline u8 kvm_vcpu_trap_get_fault_type(const struct kvm_vcpu *vcpu)
 	return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_FSC_TYPE;
 }
 
+static inline int kvm_vcpu_sys_get_rt(struct kvm_vcpu *vcpu)
+{
+	u32 esr = kvm_vcpu_get_hsr(vcpu);
+	return (esr & ESR_ELx_SYS64_ISS_RT_MASK) >> ESR_ELx_SYS64_ISS_RT_SHIFT;
+}
+
 static inline unsigned long kvm_vcpu_get_mpidr_aff(struct kvm_vcpu *vcpu)
 {
 	return vcpu_sys_reg(vcpu, MPIDR_EL1) & MPIDR_HWID_BITMASK;
diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
index 73fee2c..21934d1 100644
--- a/arch/arm64/include/asm/uaccess.h
+++ b/arch/arm64/include/asm/uaccess.h
@@ -98,11 +98,12 @@ static inline void set_fs(mm_segment_t fs)
  */
 #define __range_ok(addr, size)						\
 ({									\
+	unsigned long __addr = (unsigned long __force)(addr);		\
 	unsigned long flag, roksum;					\
 	__chk_user_ptr(addr);						\
 	asm("adds %1, %1, %3; ccmp %1, %4, #2, cc; cset %0, ls"		\
 		: "=&r" (flag), "=&r" (roksum)				\
-		: "1" (addr), "Ir" (size),				\
+		: "1" (__addr), "Ir" (size),				\
 		  "r" (current_thread_info()->addr_limit)		\
 		: "cc");						\
 	flag;								\
diff --git a/arch/arm64/kernel/armv8_deprecated.c b/arch/arm64/kernel/armv8_deprecated.c
index bdb35b9..29d2ad8 100644
--- a/arch/arm64/kernel/armv8_deprecated.c
+++ b/arch/arm64/kernel/armv8_deprecated.c
@@ -306,7 +306,8 @@ do {								\
 	_ASM_EXTABLE(0b, 4b)					\
 	_ASM_EXTABLE(1b, 4b)					\
 	: "=&r" (res), "+r" (data), "=&r" (temp), "=&r" (temp2)	\
-	: "r" (addr), "i" (-EAGAIN), "i" (-EFAULT),		\
+	: "r" ((unsigned long)addr), "i" (-EAGAIN),		\
+	  "i" (-EFAULT),					\
 	  "i" (__SWP_LL_SC_LOOPS)				\
 	: "memory");						\
 	uaccess_disable();					\
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index da845fd..fe8f94a 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -277,12 +277,10 @@ void __show_regs(struct pt_regs *regs)
 	}
 	if (!user_mode(regs))
 		show_extra_register_data(regs, 64);
-	printk("\n");
 }
 
 void show_regs(struct pt_regs * regs)
 {
-	printk("\n");
 	__show_regs(regs);
 }
 
diff --git a/arch/arm64/kvm/hyp/Makefile b/arch/arm64/kvm/hyp/Makefile
index aaf42ae..14c4e3b 100644
--- a/arch/arm64/kvm/hyp/Makefile
+++ b/arch/arm64/kvm/hyp/Makefile
@@ -2,6 +2,8 @@
 # Makefile for Kernel-based Virtual Machine module, HYP part
 #
 
+ccflags-y += -fno-stack-protector
+
 KVM=../../../../virt/kvm
 
 obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/vgic-v2-sr.o
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index 87e7e66..7cee552 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -1573,8 +1573,8 @@ static int kvm_handle_cp_64(struct kvm_vcpu *vcpu,
 {
 	struct sys_reg_params params;
 	u32 hsr = kvm_vcpu_get_hsr(vcpu);
-	int Rt = (hsr >> 5) & 0xf;
-	int Rt2 = (hsr >> 10) & 0xf;
+	int Rt = kvm_vcpu_sys_get_rt(vcpu);
+	int Rt2 = (hsr >> 10) & 0x1f;
 
 	params.is_aarch32 = true;
 	params.is_32bit = false;
@@ -1625,7 +1625,7 @@ static int kvm_handle_cp_32(struct kvm_vcpu *vcpu,
 {
 	struct sys_reg_params params;
 	u32 hsr = kvm_vcpu_get_hsr(vcpu);
-	int Rt  = (hsr >> 5) & 0xf;
+	int Rt  = kvm_vcpu_sys_get_rt(vcpu);
 
 	params.is_aarch32 = true;
 	params.is_32bit = true;
@@ -1740,7 +1740,7 @@ int kvm_handle_sys_reg(struct kvm_vcpu *vcpu, struct kvm_run *run)
 {
 	struct sys_reg_params params;
 	unsigned long esr = kvm_vcpu_get_hsr(vcpu);
-	int Rt = (esr >> 5) & 0x1f;
+	int Rt = kvm_vcpu_sys_get_rt(vcpu);
 	int ret;
 
 	trace_kvm_handle_sys_reg(esr);
diff --git a/arch/metag/include/asm/uaccess.h b/arch/metag/include/asm/uaccess.h
index 07238b3..3db3812 100644
--- a/arch/metag/include/asm/uaccess.h
+++ b/arch/metag/include/asm/uaccess.h
@@ -28,24 +28,32 @@
 
 #define segment_eq(a, b)	((a).seg == (b).seg)
 
-#define __kernel_ok (segment_eq(get_fs(), KERNEL_DS))
-/*
- * Explicitly allow NULL pointers here. Parts of the kernel such
- * as readv/writev use access_ok to validate pointers, but want
- * to allow NULL pointers for various reasons. NULL pointers are
- * safe to allow through because the first page is not mappable on
- * Meta.
- *
- * We also wish to avoid letting user code access the system area
- * and the kernel half of the address space.
- */
-#define __user_bad(addr, size) (((addr) > 0 && (addr) < META_MEMORY_BASE) || \
-				((addr) > PAGE_OFFSET &&		\
-				 (addr) < LINCORE_BASE))
-
 static inline int __access_ok(unsigned long addr, unsigned long size)
 {
-	return __kernel_ok || !__user_bad(addr, size);
+	/*
+	 * Allow access to the user mapped memory area, but not the system area
+	 * before it. The check extends to the top of the address space when
+	 * kernel access is allowed (there's no real reason to user copy to the
+	 * system area in any case).
+	 */
+	if (likely(addr >= META_MEMORY_BASE && addr < get_fs().seg &&
+		   size <= get_fs().seg - addr))
+		return true;
+	/*
+	 * Explicitly allow NULL pointers here. Parts of the kernel such
+	 * as readv/writev use access_ok to validate pointers, but want
+	 * to allow NULL pointers for various reasons. NULL pointers are
+	 * safe to allow through because the first page is not mappable on
+	 * Meta.
+	 */
+	if (!addr)
+		return true;
+	/* Allow access to core code memory area... */
+	if (addr >= LINCORE_CODE_BASE && addr <= LINCORE_CODE_LIMIT &&
+	    size <= LINCORE_CODE_LIMIT + 1 - addr)
+		return true;
+	/* ... but no other areas. */
+	return false;
 }
 
 #define access_ok(type, addr, size) __access_ok((unsigned long)(addr),	\
@@ -186,8 +194,13 @@ do {                                                            \
 extern long __must_check __strncpy_from_user(char *dst, const char __user *src,
 					     long count);
 
-#define strncpy_from_user(dst, src, count) __strncpy_from_user(dst, src, count)
-
+static inline long
+strncpy_from_user(char *dst, const char __user *src, long count)
+{
+	if (!access_ok(VERIFY_READ, src, 1))
+		return -EFAULT;
+	return __strncpy_from_user(dst, src, count);
+}
 /*
  * Return the size of a string (including the ending 0)
  *
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 5a4f2eb..5e844f6 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -1368,6 +1368,7 @@
 	select WEAK_ORDERING
 	select WEAK_REORDERING_BEYOND_LLSC
 	select MIPS_PGD_C0_CONTEXT
+	select MIPS_L1_CACHE_SHIFT_6
 	select GPIOLIB
 	help
 		The Loongson 3 processor implements the MIPS64R2 instruction
diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h
index b9e3f0a..0012f03 100644
--- a/arch/powerpc/include/asm/mmu_context.h
+++ b/arch/powerpc/include/asm/mmu_context.h
@@ -70,8 +70,9 @@ extern void drop_cop(unsigned long acop, struct mm_struct *mm);
  * switch_mm is the entry point called from the architecture independent
  * code in kernel/sched/core.c
  */
-static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
-			     struct task_struct *tsk)
+static inline void switch_mm_irqs_off(struct mm_struct *prev,
+				      struct mm_struct *next,
+				      struct task_struct *tsk)
 {
 	/* Mark this context has been used on the new CPU */
 	if (!cpumask_test_cpu(smp_processor_id(), mm_cpumask(next)))
@@ -110,6 +111,18 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
 	switch_mmu_context(prev, next, tsk);
 }
 
+static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
+			     struct task_struct *tsk)
+{
+	unsigned long flags;
+
+	local_irq_save(flags);
+	switch_mm_irqs_off(prev, next, tsk);
+	local_irq_restore(flags);
+}
+#define switch_mm_irqs_off switch_mm_irqs_off
+
+
 #define deactivate_mm(tsk,mm)	do { } while (0)
 
 /*
diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c
index a5dd493..6ef8f0b 100644
--- a/arch/powerpc/kernel/eeh_driver.c
+++ b/arch/powerpc/kernel/eeh_driver.c
@@ -724,7 +724,7 @@ static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus,
  */
 #define MAX_WAIT_FOR_RECOVERY 300
 
-static void eeh_handle_normal_event(struct eeh_pe *pe)
+static bool eeh_handle_normal_event(struct eeh_pe *pe)
 {
 	struct pci_bus *frozen_bus;
 	struct eeh_dev *edev, *tmp;
@@ -736,7 +736,7 @@ static void eeh_handle_normal_event(struct eeh_pe *pe)
 	if (!frozen_bus) {
 		pr_err("%s: Cannot find PCI bus for PHB#%d-PE#%x\n",
 			__func__, pe->phb->global_number, pe->addr);
-		return;
+		return false;
 	}
 
 	eeh_pe_update_time_stamp(pe);
@@ -870,7 +870,7 @@ static void eeh_handle_normal_event(struct eeh_pe *pe)
 	pr_info("EEH: Notify device driver to resume\n");
 	eeh_pe_dev_traverse(pe, eeh_report_resume, NULL);
 
-	return;
+	return false;
 
 excess_failures:
 	/*
@@ -915,8 +915,12 @@ static void eeh_handle_normal_event(struct eeh_pe *pe)
 			pci_lock_rescan_remove();
 			pci_hp_remove_devices(frozen_bus);
 			pci_unlock_rescan_remove();
+
+			/* The passed PE should no longer be used */
+			return true;
 		}
 	}
+	return false;
 }
 
 static void eeh_handle_special_event(void)
@@ -982,7 +986,14 @@ static void eeh_handle_special_event(void)
 		 */
 		if (rc == EEH_NEXT_ERR_FROZEN_PE ||
 		    rc == EEH_NEXT_ERR_FENCED_PHB) {
-			eeh_handle_normal_event(pe);
+			/*
+			 * eeh_handle_normal_event() can make the PE stale if it
+			 * determines that the PE cannot possibly be recovered.
+			 * Don't modify the PE state if that's the case.
+			 */
+			if (eeh_handle_normal_event(pe))
+				continue;
+
 			eeh_pe_state_clear(pe, EEH_PE_RECOVERING);
 		} else {
 			pci_lock_rescan_remove();
diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
index 38a1f96..ca03eb2 100644
--- a/arch/powerpc/kernel/exceptions-64e.S
+++ b/arch/powerpc/kernel/exceptions-64e.S
@@ -735,8 +735,14 @@
 	andis.	r15,r14,(DBSR_IC|DBSR_BT)@h
 	beq+	1f
 
+#ifdef CONFIG_RELOCATABLE
+	ld	r15,PACATOC(r13)
+	ld	r14,interrupt_base_book3e@got(r15)
+	ld	r15,__end_interrupts@got(r15)
+#else
 	LOAD_REG_IMMEDIATE(r14,interrupt_base_book3e)
 	LOAD_REG_IMMEDIATE(r15,__end_interrupts)
+#endif
 	cmpld	cr0,r10,r14
 	cmpld	cr1,r10,r15
 	blt+	cr0,1f
@@ -799,8 +805,14 @@
 	andis.	r15,r14,(DBSR_IC|DBSR_BT)@h
 	beq+	1f
 
+#ifdef CONFIG_RELOCATABLE
+	ld	r15,PACATOC(r13)
+	ld	r14,interrupt_base_book3e@got(r15)
+	ld	r15,__end_interrupts@got(r15)
+#else
 	LOAD_REG_IMMEDIATE(r14,interrupt_base_book3e)
 	LOAD_REG_IMMEDIATE(r15,__end_interrupts)
+#endif
 	cmpld	cr0,r10,r14
 	cmpld	cr1,r10,r15
 	blt+	cr0,1f
diff --git a/arch/powerpc/kernel/mce.c b/arch/powerpc/kernel/mce.c
index 5e7ece0..ea236bf 100644
--- a/arch/powerpc/kernel/mce.c
+++ b/arch/powerpc/kernel/mce.c
@@ -205,6 +205,8 @@ static void machine_check_process_queued_event(struct irq_work *work)
 {
 	int index;
 
+	add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE);
+
 	/*
 	 * For now just print it to console.
 	 * TODO: log this error event to FSP or nvram.
diff --git a/arch/powerpc/kernel/nvram_64.c b/arch/powerpc/kernel/nvram_64.c
index 34d2c59..7362267 100644
--- a/arch/powerpc/kernel/nvram_64.c
+++ b/arch/powerpc/kernel/nvram_64.c
@@ -561,6 +561,7 @@ static ssize_t nvram_pstore_read(u64 *id, enum pstore_type_id *type,
 static struct pstore_info nvram_pstore_info = {
 	.owner = THIS_MODULE,
 	.name = "nvram",
+	.flags = PSTORE_FLAGS_DMESG,
 	.open = nvram_pstore_open,
 	.read = nvram_pstore_read,
 	.write = nvram_pstore_write,
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 49a680d..c716473 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -839,6 +839,25 @@ static void tm_reclaim_thread(struct thread_struct *thr,
 	if (!MSR_TM_SUSPENDED(mfmsr()))
 		return;
 
+	/*
+	 * If we are in a transaction and FP is off then we can't have
+	 * used FP inside that transaction. Hence the checkpointed
+	 * state is the same as the live state. We need to copy the
+	 * live state to the checkpointed state so that when the
+	 * transaction is restored, the checkpointed state is correct
+	 * and the aborted transaction sees the correct state. We use
+	 * ckpt_regs.msr here as that's what tm_reclaim will use to
+	 * determine if it's going to write the checkpointed state or
+	 * not. So either this will write the checkpointed registers,
+	 * or reclaim will. Similarly for VMX.
+	 */
+	if ((thr->ckpt_regs.msr & MSR_FP) == 0)
+		memcpy(&thr->ckfp_state, &thr->fp_state,
+		       sizeof(struct thread_fp_state));
+	if ((thr->ckpt_regs.msr & MSR_VEC) == 0)
+		memcpy(&thr->ckvr_state, &thr->vr_state,
+		       sizeof(struct thread_vr_state));
+
 	giveup_all(container_of(thr, struct task_struct, thread));
 
 	tm_reclaim(thr, thr->ckpt_regs.msr, cause);
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index 023a462..43021f8 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -302,8 +302,6 @@ long machine_check_early(struct pt_regs *regs)
 
 	__this_cpu_inc(irq_stat.mce_exceptions);
 
-	add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE);
-
 	if (cur_cpu_spec && cur_cpu_spec->machine_check_early)
 		handled = cur_cpu_spec->machine_check_early(regs);
 	return handled;
@@ -737,6 +735,8 @@ void machine_check_exception(struct pt_regs *regs)
 
 	__this_cpu_inc(irq_stat.mce_exceptions);
 
+	add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE);
+
 	/* See if any machine dependent calls. In theory, we would want
 	 * to call the CPU first, and call the ppc_md. one if the CPU
 	 * one returns a positive number. However there is existing code
diff --git a/arch/powerpc/mm/mmu_context_iommu.c b/arch/powerpc/mm/mmu_context_iommu.c
index 7de7124..fd59680 100644
--- a/arch/powerpc/mm/mmu_context_iommu.c
+++ b/arch/powerpc/mm/mmu_context_iommu.c
@@ -81,7 +81,7 @@ struct page *new_iommu_non_cma_page(struct page *page, unsigned long private,
 	gfp_t gfp_mask = GFP_USER;
 	struct page *new_page;
 
-	if (PageHuge(page) || PageTransHuge(page) || PageCompound(page))
+	if (PageCompound(page))
 		return NULL;
 
 	if (PageHighMem(page))
@@ -100,7 +100,7 @@ static int mm_iommu_move_page_from_cma(struct page *page)
 	LIST_HEAD(cma_migrate_pages);
 
 	/* Ignore huge pages for now */
-	if (PageHuge(page) || PageTransHuge(page) || PageCompound(page))
+	if (PageCompound(page))
 		return -EBUSY;
 
 	lru_add_drain();
diff --git a/arch/powerpc/platforms/pseries/dlpar.c b/arch/powerpc/platforms/pseries/dlpar.c
index 423e450..72ae2cd 100644
--- a/arch/powerpc/platforms/pseries/dlpar.c
+++ b/arch/powerpc/platforms/pseries/dlpar.c
@@ -288,7 +288,6 @@ int dlpar_detach_node(struct device_node *dn)
 	if (rc)
 		return rc;
 
-	of_node_put(dn); /* Must decrement the refcount */
 	return 0;
 }
 
diff --git a/arch/s390/kernel/crash_dump.c b/arch/s390/kernel/crash_dump.c
index 408b4f4..5982544 100644
--- a/arch/s390/kernel/crash_dump.c
+++ b/arch/s390/kernel/crash_dump.c
@@ -427,6 +427,20 @@ static void *nt_vmcoreinfo(void *ptr)
 }
 
 /*
+ * Initialize final note (needed for /proc/vmcore code)
+ */
+static void *nt_final(void *ptr)
+{
+	Elf64_Nhdr *note;
+
+	note = (Elf64_Nhdr *) ptr;
+	note->n_namesz = 0;
+	note->n_descsz = 0;
+	note->n_type = 0;
+	return PTR_ADD(ptr, sizeof(Elf64_Nhdr));
+}
+
+/*
  * Initialize ELF header (new kernel)
  */
 static void *ehdr_init(Elf64_Ehdr *ehdr, int mem_chunk_cnt)
@@ -513,6 +527,7 @@ static void *notes_init(Elf64_Phdr *phdr, void *ptr, u64 notes_offset)
 		if (sa->prefix != 0)
 			ptr = fill_cpu_elf_notes(ptr, cpu++, sa);
 	ptr = nt_vmcoreinfo(ptr);
+	ptr = nt_final(ptr);
 	memset(phdr, 0, sizeof(*phdr));
 	phdr->p_type = PT_NOTE;
 	phdr->p_offset = notes_offset;
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index 49a3073..c438168 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -321,6 +321,7 @@
 	lg	%r14,__LC_VDSO_PER_CPU
 	lmg	%r0,%r10,__PT_R0(%r11)
 	mvc	__LC_RETURN_PSW(16),__PT_PSW(%r11)
+.Lsysc_exit_timer:
 	stpt	__LC_EXIT_TIMER
 	mvc	__VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
 	lmg	%r11,%r15,__PT_R11(%r11)
@@ -606,6 +607,7 @@
 	lg	%r14,__LC_VDSO_PER_CPU
 	lmg	%r0,%r10,__PT_R0(%r11)
 	mvc	__LC_RETURN_PSW(16),__PT_PSW(%r11)
+.Lio_exit_timer:
 	stpt	__LC_EXIT_TIMER
 	mvc	__VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
 	lmg	%r11,%r15,__PT_R11(%r11)
@@ -1135,15 +1137,23 @@
 	br	%r14
 
 .Lcleanup_sysc_restore:
+	# check if stpt has been executed
 	clg	%r9,BASED(.Lcleanup_sysc_restore_insn)
+	jh	0f
+	mvc	__LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER
+	cghi	%r11,__LC_SAVE_AREA_ASYNC
 	je	0f
+	mvc	__LC_EXIT_TIMER(8),__LC_MCCK_ENTER_TIMER
+0:	clg	%r9,BASED(.Lcleanup_sysc_restore_insn+8)
+	je	1f
 	lg	%r9,24(%r11)		# get saved pointer to pt_regs
 	mvc	__LC_RETURN_PSW(16),__PT_PSW(%r9)
 	mvc	0(64,%r11),__PT_R8(%r9)
 	lmg	%r0,%r7,__PT_R0(%r9)
-0:	lmg	%r8,%r9,__LC_RETURN_PSW
+1:	lmg	%r8,%r9,__LC_RETURN_PSW
 	br	%r14
 .Lcleanup_sysc_restore_insn:
+	.quad	.Lsysc_exit_timer
 	.quad	.Lsysc_done - 4
 
 .Lcleanup_io_tif:
@@ -1151,15 +1161,20 @@
 	br	%r14
 
 .Lcleanup_io_restore:
+	# check if stpt has been executed
 	clg	%r9,BASED(.Lcleanup_io_restore_insn)
-	je	0f
+	jh	0f
+	mvc	__LC_EXIT_TIMER(8),__LC_MCCK_ENTER_TIMER
+0:	clg	%r9,BASED(.Lcleanup_io_restore_insn+8)
+	je	1f
 	lg	%r9,24(%r11)		# get saved r11 pointer to pt_regs
 	mvc	__LC_RETURN_PSW(16),__PT_PSW(%r9)
 	mvc	0(64,%r11),__PT_R8(%r9)
 	lmg	%r0,%r7,__PT_R0(%r9)
-0:	lmg	%r8,%r9,__LC_RETURN_PSW
+1:	lmg	%r8,%r9,__LC_RETURN_PSW
 	br	%r14
 .Lcleanup_io_restore_insn:
+	.quad	.Lio_exit_timer
 	.quad	.Lio_done - 4
 
 .Lcleanup_idle:
diff --git a/arch/um/kernel/initrd.c b/arch/um/kernel/initrd.c
index 48bae81..6f6e789 100644
--- a/arch/um/kernel/initrd.c
+++ b/arch/um/kernel/initrd.c
@@ -14,7 +14,7 @@
 static char *initrd __initdata = NULL;
 static int load_initrd(char *filename, void *buf, int size);
 
-static int __init read_initrd(void)
+int __init read_initrd(void)
 {
 	void *area;
 	long long size;
@@ -46,8 +46,6 @@ static int __init read_initrd(void)
 	return 0;
 }
 
-__uml_postsetup(read_initrd);
-
 static int __init uml_initrd_setup(char *line, int *add)
 {
 	initrd = line;
diff --git a/arch/um/kernel/um_arch.c b/arch/um/kernel/um_arch.c
index e8175a8..26b47de 100644
--- a/arch/um/kernel/um_arch.c
+++ b/arch/um/kernel/um_arch.c
@@ -336,11 +336,17 @@ int __init linux_main(int argc, char **argv)
 	return start_uml();
 }
 
+int __init __weak read_initrd(void)
+{
+	return 0;
+}
+
 void __init setup_arch(char **cmdline_p)
 {
 	stack_protections((unsigned long) &init_thread_info);
 	setup_physmem(uml_physmem, uml_reserved, physmem_size, highmem);
 	mem_total_pages(physmem_size, iomem_size, highmem);
+	read_initrd();
 
 	paging_init();
 	strlcpy(boot_command_line, command_line, COMMAND_LINE_SIZE);
diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h
index e5612f3..d7ac721 100644
--- a/arch/x86/boot/boot.h
+++ b/arch/x86/boot/boot.h
@@ -16,7 +16,7 @@
 #ifndef BOOT_BOOT_H
 #define BOOT_BOOT_H
 
-#define STACK_SIZE	512	/* Minimum number of bytes for stack */
+#define STACK_SIZE	1024	/* Minimum number of bytes for stack */
 
 #ifndef __ASSEMBLY__
 
diff --git a/arch/x86/events/intel/rapl.c b/arch/x86/events/intel/rapl.c
index 0a535ce..8b902b6 100644
--- a/arch/x86/events/intel/rapl.c
+++ b/arch/x86/events/intel/rapl.c
@@ -759,7 +759,7 @@ static const struct x86_cpu_id rapl_cpu_match[] __initconst = {
 
 	X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_CORE,   hsw_rapl_init),
 	X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_GT3E,   hsw_rapl_init),
-	X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_X,	  hsw_rapl_init),
+	X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_X,	  hsx_rapl_init),
 	X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_XEON_D, hsw_rapl_init),
 
 	X86_RAPL_MODEL_MATCH(INTEL_FAM6_XEON_PHI_KNL, knl_rapl_init),
diff --git a/arch/x86/include/asm/pmem.h b/arch/x86/include/asm/pmem.h
index 529bb4a..e290437 100644
--- a/arch/x86/include/asm/pmem.h
+++ b/arch/x86/include/asm/pmem.h
@@ -103,7 +103,7 @@ static inline size_t arch_copy_from_iter_pmem(void *addr, size_t bytes,
 
 		if (bytes < 8) {
 			if (!IS_ALIGNED(dest, 4) || (bytes != 4))
-				arch_wb_cache_pmem(addr, 1);
+				arch_wb_cache_pmem(addr, bytes);
 		} else {
 			if (!IS_ALIGNED(dest, 8)) {
 				dest = ALIGN(dest, boot_cpu_data.x86_clflush_size);
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
index faf3687..a300aa1 100644
--- a/arch/x86/include/asm/uaccess.h
+++ b/arch/x86/include/asm/uaccess.h
@@ -315,10 +315,10 @@ do {									\
 #define __get_user_asm_u64(x, ptr, retval, errret)			\
 ({									\
 	__typeof__(ptr) __ptr = (ptr);					\
-	asm volatile(ASM_STAC "\n"					\
+	asm volatile("\n"					\
 		     "1:	movl %2,%%eax\n"			\
 		     "2:	movl %3,%%edx\n"			\
-		     "3: " ASM_CLAC "\n"				\
+		     "3:\n"				\
 		     ".section .fixup,\"ax\"\n"				\
 		     "4:	mov %4,%0\n"				\
 		     "	xorl %%eax,%%eax\n"				\
@@ -327,7 +327,7 @@ do {									\
 		     ".previous\n"					\
 		     _ASM_EXTABLE(1b, 4b)				\
 		     _ASM_EXTABLE(2b, 4b)				\
-		     : "=r" (retval), "=A"(x)				\
+		     : "=r" (retval), "=&A"(x)				\
 		     : "m" (__m(__ptr)), "m" __m(((u32 *)(__ptr)) + 1),	\
 		       "i" (errret), "0" (retval));			\
 })
diff --git a/arch/x86/kernel/fpu/init.c b/arch/x86/kernel/fpu/init.c
index 2f2b8c7..6f0ab305 100644
--- a/arch/x86/kernel/fpu/init.c
+++ b/arch/x86/kernel/fpu/init.c
@@ -101,6 +101,7 @@ static void fpu__init_system_early_generic(struct cpuinfo_x86 *c)
  * Boot time FPU feature detection code:
  */
 unsigned int mxcsr_feature_mask __read_mostly = 0xffffffffu;
+EXPORT_SYMBOL_GPL(mxcsr_feature_mask);
 
 static void __init fpu__init_system_mxcsr(void)
 {
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index e5bc139..81bba3c 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1735,6 +1735,7 @@ static u64 __get_kvmclock_ns(struct kvm *kvm)
 {
 	struct kvm_arch *ka = &kvm->arch;
 	struct pvclock_vcpu_time_info hv_clock;
+	u64 ret;
 
 	spin_lock(&ka->pvclock_gtod_sync_lock);
 	if (!ka->use_master_clock) {
@@ -1746,10 +1747,17 @@ static u64 __get_kvmclock_ns(struct kvm *kvm)
 	hv_clock.system_time = ka->master_kernel_ns + ka->kvmclock_offset;
 	spin_unlock(&ka->pvclock_gtod_sync_lock);
 
+	/* both __this_cpu_read() and rdtsc() should be on the same cpu */
+	get_cpu();
+
 	kvm_get_time_scale(NSEC_PER_SEC, __this_cpu_read(cpu_tsc_khz) * 1000LL,
 			   &hv_clock.tsc_shift,
 			   &hv_clock.tsc_to_system_mul);
-	return __pvclock_read_cycles(&hv_clock, rdtsc());
+	ret = __pvclock_read_cycles(&hv_clock, rdtsc());
+
+	put_cpu();
+
+	return ret;
 }
 
 u64 get_kvmclock_ns(struct kvm *kvm)
@@ -3051,6 +3059,12 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
 	    (events->exception.nr > 31 || events->exception.nr == NMI_VECTOR))
 		return -EINVAL;
 
+	/* INITs are latched while in SMM */
+	if (events->flags & KVM_VCPUEVENT_VALID_SMM &&
+	    (events->smi.smm || events->smi.pending) &&
+	    vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED)
+		return -EINVAL;
+
 	process_nmi(vcpu);
 	vcpu->arch.exception.pending = events->exception.injected;
 	vcpu->arch.exception.nr = events->exception.nr;
@@ -3225,11 +3239,14 @@ static void kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu,
 	}
 }
 
+#define XSAVE_MXCSR_OFFSET 24
+
 static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu,
 					struct kvm_xsave *guest_xsave)
 {
 	u64 xstate_bv =
 		*(u64 *)&guest_xsave->region[XSAVE_HDR_OFFSET / sizeof(u32)];
+	u32 mxcsr = *(u32 *)&guest_xsave->region[XSAVE_MXCSR_OFFSET / sizeof(u32)];
 
 	if (boot_cpu_has(X86_FEATURE_XSAVE)) {
 		/*
@@ -3237,11 +3254,13 @@ static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu,
 		 * CPUID leaf 0xD, index 0, EDX:EAX.  This is for compatibility
 		 * with old userspace.
 		 */
-		if (xstate_bv & ~kvm_supported_xcr0())
+		if (xstate_bv & ~kvm_supported_xcr0() ||
+			mxcsr & ~mxcsr_feature_mask)
 			return -EINVAL;
 		load_xsave(vcpu, (u8 *)guest_xsave->region);
 	} else {
-		if (xstate_bv & ~XFEATURE_MASK_FPSSE)
+		if (xstate_bv & ~XFEATURE_MASK_FPSSE ||
+			mxcsr & ~mxcsr_feature_mask)
 			return -EINVAL;
 		memcpy(&vcpu->arch.guest_fpu.state.fxsave,
 			guest_xsave->region, sizeof(struct fxregs_state));
@@ -4744,16 +4763,20 @@ static int emulator_cmpxchg_emulated(struct x86_emulate_ctxt *ctxt,
 
 static int kernel_pio(struct kvm_vcpu *vcpu, void *pd)
 {
-	/* TODO: String I/O for in kernel device */
-	int r;
+	int r = 0, i;
 
-	if (vcpu->arch.pio.in)
-		r = kvm_io_bus_read(vcpu, KVM_PIO_BUS, vcpu->arch.pio.port,
-				    vcpu->arch.pio.size, pd);
-	else
-		r = kvm_io_bus_write(vcpu, KVM_PIO_BUS,
-				     vcpu->arch.pio.port, vcpu->arch.pio.size,
-				     pd);
+	for (i = 0; i < vcpu->arch.pio.count; i++) {
+		if (vcpu->arch.pio.in)
+			r = kvm_io_bus_read(vcpu, KVM_PIO_BUS, vcpu->arch.pio.port,
+					    vcpu->arch.pio.size, pd);
+		else
+			r = kvm_io_bus_write(vcpu, KVM_PIO_BUS,
+					     vcpu->arch.pio.port, vcpu->arch.pio.size,
+					     pd);
+		if (r)
+			break;
+		pd += vcpu->arch.pio.size;
+	}
 	return r;
 }
 
@@ -4791,6 +4814,8 @@ static int emulator_pio_in_emulated(struct x86_emulate_ctxt *ctxt,
 	if (vcpu->arch.pio.count)
 		goto data_avail;
 
+	memset(vcpu->arch.pio_data, 0, size * count);
+
 	ret = emulator_pio_in_out(vcpu, size, port, val, count, true);
 	if (ret) {
 data_avail:
@@ -7162,6 +7187,12 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
 	    mp_state->mp_state != KVM_MP_STATE_RUNNABLE)
 		return -EINVAL;
 
+	/* INITs are latched while in SMM */
+	if ((is_smm(vcpu) || vcpu->arch.smi_pending) &&
+	    (mp_state->mp_state == KVM_MP_STATE_SIPI_RECEIVED ||
+	     mp_state->mp_state == KVM_MP_STATE_INIT_RECEIVED))
+		return -EINVAL;
+
 	if (mp_state->mp_state == KVM_MP_STATE_SIPI_RECEIVED) {
 		vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED;
 		set_bit(KVM_APIC_SIPI, &vcpu->arch.apic->pending_events);
diff --git a/arch/x86/um/ptrace_64.c b/arch/x86/um/ptrace_64.c
index e30202b..7c16017 100644
--- a/arch/x86/um/ptrace_64.c
+++ b/arch/x86/um/ptrace_64.c
@@ -125,7 +125,7 @@ int poke_user(struct task_struct *child, long addr, long data)
 	else if ((addr >= offsetof(struct user, u_debugreg[0])) &&
 		(addr <= offsetof(struct user, u_debugreg[7]))) {
 		addr -= offsetof(struct user, u_debugreg[0]);
-		addr = addr >> 2;
+		addr = addr >> 3;
 		if ((addr == 4) || (addr == 5))
 			return -EIO;
 		child->thread.arch.debugregs[addr] = data;
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index 7d5afdb..418f1b8 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -2028,7 +2028,8 @@ static unsigned long __init xen_read_phys_ulong(phys_addr_t addr)
 
 /*
  * Translate a virtual address to a physical one without relying on mapped
- * page tables.
+ * page tables. Don't rely on big pages being aligned in (guest) physical
+ * space!
  */
 static phys_addr_t __init xen_early_virt_to_phys(unsigned long vaddr)
 {
@@ -2049,7 +2050,7 @@ static phys_addr_t __init xen_early_virt_to_phys(unsigned long vaddr)
 						       sizeof(pud)));
 	if (!pud_present(pud))
 		return 0;
-	pa = pud_pfn(pud) << PAGE_SHIFT;
+	pa = pud_val(pud) & PTE_PFN_MASK;
 	if (pud_large(pud))
 		return pa + (vaddr & ~PUD_MASK);
 
@@ -2057,7 +2058,7 @@ static phys_addr_t __init xen_early_virt_to_phys(unsigned long vaddr)
 						       sizeof(pmd)));
 	if (!pmd_present(pmd))
 		return 0;
-	pa = pmd_pfn(pmd) << PAGE_SHIFT;
+	pa = pmd_val(pmd) & PTE_PFN_MASK;
 	if (pmd_large(pmd))
 		return pa + (vaddr & ~PMD_MASK);
 
diff --git a/block/blk-integrity.c b/block/blk-integrity.c
index 319f2e4..478f572 100644
--- a/block/blk-integrity.c
+++ b/block/blk-integrity.c
@@ -412,7 +412,8 @@ void blk_integrity_register(struct gendisk *disk, struct blk_integrity *template
 
 	bi->flags = BLK_INTEGRITY_VERIFY | BLK_INTEGRITY_GENERATE |
 		template->flags;
-	bi->interval_exp = ilog2(queue_logical_block_size(disk->queue));
+	bi->interval_exp = template->interval_exp ? :
+		ilog2(queue_logical_block_size(disk->queue));
 	bi->profile = template->profile ? template->profile : &nop_profile;
 	bi->tuple_size = template->tuple_size;
 	bi->tag_size = template->tag_size;
diff --git a/crypto/algif_aead.c b/crypto/algif_aead.c
index fde8d88..6c11537 100644
--- a/crypto/algif_aead.c
+++ b/crypto/algif_aead.c
@@ -44,6 +44,11 @@ struct aead_async_req {
 	char iv[];
 };
 
+struct aead_tfm {
+	struct crypto_aead *aead;
+	bool has_key;
+};
+
 struct aead_ctx {
 	struct aead_sg_list tsgl;
 	struct aead_async_rsgl first_rsgl;
@@ -732,24 +737,146 @@ static struct proto_ops algif_aead_ops = {
 	.poll		=	aead_poll,
 };
 
+static int aead_check_key(struct socket *sock)
+{
+	int err = 0;
+	struct sock *psk;
+	struct alg_sock *pask;
+	struct aead_tfm *tfm;
+	struct sock *sk = sock->sk;
+	struct alg_sock *ask = alg_sk(sk);
+
+	lock_sock(sk);
+	if (ask->refcnt)
+		goto unlock_child;
+
+	psk = ask->parent;
+	pask = alg_sk(ask->parent);
+	tfm = pask->private;
+
+	err = -ENOKEY;
+	lock_sock_nested(psk, SINGLE_DEPTH_NESTING);
+	if (!tfm->has_key)
+		goto unlock;
+
+	if (!pask->refcnt++)
+		sock_hold(psk);
+
+	ask->refcnt = 1;
+	sock_put(psk);
+
+	err = 0;
+
+unlock:
+	release_sock(psk);
+unlock_child:
+	release_sock(sk);
+
+	return err;
+}
+
+static int aead_sendmsg_nokey(struct socket *sock, struct msghdr *msg,
+				  size_t size)
+{
+	int err;
+
+	err = aead_check_key(sock);
+	if (err)
+		return err;
+
+	return aead_sendmsg(sock, msg, size);
+}
+
+static ssize_t aead_sendpage_nokey(struct socket *sock, struct page *page,
+				       int offset, size_t size, int flags)
+{
+	int err;
+
+	err = aead_check_key(sock);
+	if (err)
+		return err;
+
+	return aead_sendpage(sock, page, offset, size, flags);
+}
+
+static int aead_recvmsg_nokey(struct socket *sock, struct msghdr *msg,
+				  size_t ignored, int flags)
+{
+	int err;
+
+	err = aead_check_key(sock);
+	if (err)
+		return err;
+
+	return aead_recvmsg(sock, msg, ignored, flags);
+}
+
+static struct proto_ops algif_aead_ops_nokey = {
+	.family		=	PF_ALG,
+
+	.connect	=	sock_no_connect,
+	.socketpair	=	sock_no_socketpair,
+	.getname	=	sock_no_getname,
+	.ioctl		=	sock_no_ioctl,
+	.listen		=	sock_no_listen,
+	.shutdown	=	sock_no_shutdown,
+	.getsockopt	=	sock_no_getsockopt,
+	.mmap		=	sock_no_mmap,
+	.bind		=	sock_no_bind,
+	.accept		=	sock_no_accept,
+	.setsockopt	=	sock_no_setsockopt,
+
+	.release	=	af_alg_release,
+	.sendmsg	=	aead_sendmsg_nokey,
+	.sendpage	=	aead_sendpage_nokey,
+	.recvmsg	=	aead_recvmsg_nokey,
+	.poll		=	aead_poll,
+};
+
 static void *aead_bind(const char *name, u32 type, u32 mask)
 {
-	return crypto_alloc_aead(name, type, mask);
+	struct aead_tfm *tfm;
+	struct crypto_aead *aead;
+
+	tfm = kzalloc(sizeof(*tfm), GFP_KERNEL);
+	if (!tfm)
+		return ERR_PTR(-ENOMEM);
+
+	aead = crypto_alloc_aead(name, type, mask);
+	if (IS_ERR(aead)) {
+		kfree(tfm);
+		return ERR_CAST(aead);
+	}
+
+	tfm->aead = aead;
+
+	return tfm;
 }
 
 static void aead_release(void *private)
 {
-	crypto_free_aead(private);
+	struct aead_tfm *tfm = private;
+
+	crypto_free_aead(tfm->aead);
+	kfree(tfm);
 }
 
 static int aead_setauthsize(void *private, unsigned int authsize)
 {
-	return crypto_aead_setauthsize(private, authsize);
+	struct aead_tfm *tfm = private;
+
+	return crypto_aead_setauthsize(tfm->aead, authsize);
 }
 
 static int aead_setkey(void *private, const u8 *key, unsigned int keylen)
 {
-	return crypto_aead_setkey(private, key, keylen);
+	struct aead_tfm *tfm = private;
+	int err;
+
+	err = crypto_aead_setkey(tfm->aead, key, keylen);
+	tfm->has_key = !err;
+
+	return err;
 }
 
 static void aead_sock_destruct(struct sock *sk)
@@ -766,12 +893,14 @@ static void aead_sock_destruct(struct sock *sk)
 	af_alg_release_parent(sk);
 }
 
-static int aead_accept_parent(void *private, struct sock *sk)
+static int aead_accept_parent_nokey(void *private, struct sock *sk)
 {
 	struct aead_ctx *ctx;
 	struct alg_sock *ask = alg_sk(sk);
-	unsigned int len = sizeof(*ctx) + crypto_aead_reqsize(private);
-	unsigned int ivlen = crypto_aead_ivsize(private);
+	struct aead_tfm *tfm = private;
+	struct crypto_aead *aead = tfm->aead;
+	unsigned int len = sizeof(*ctx) + crypto_aead_reqsize(aead);
+	unsigned int ivlen = crypto_aead_ivsize(aead);
 
 	ctx = sock_kmalloc(sk, len, GFP_KERNEL);
 	if (!ctx)
@@ -798,7 +927,7 @@ static int aead_accept_parent(void *private, struct sock *sk)
 
 	ask->private = ctx;
 
-	aead_request_set_tfm(&ctx->aead_req, private);
+	aead_request_set_tfm(&ctx->aead_req, aead);
 	aead_request_set_callback(&ctx->aead_req, CRYPTO_TFM_REQ_MAY_BACKLOG,
 				  af_alg_complete, &ctx->completion);
 
@@ -807,13 +936,25 @@ static int aead_accept_parent(void *private, struct sock *sk)
 	return 0;
 }
 
+static int aead_accept_parent(void *private, struct sock *sk)
+{
+	struct aead_tfm *tfm = private;
+
+	if (!tfm->has_key)
+		return -ENOKEY;
+
+	return aead_accept_parent_nokey(private, sk);
+}
+
 static const struct af_alg_type algif_type_aead = {
 	.bind		=	aead_bind,
 	.release	=	aead_release,
 	.setkey		=	aead_setkey,
 	.setauthsize	=	aead_setauthsize,
 	.accept		=	aead_accept_parent,
+	.accept_nokey	=	aead_accept_parent_nokey,
 	.ops		=	&algif_aead_ops,
+	.ops_nokey	=	&algif_aead_ops_nokey,
 	.name		=	"aead",
 	.owner		=	THIS_MODULE
 };
diff --git a/drivers/Makefile b/drivers/Makefile
index 413dff9..d0abb5a 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -73,6 +73,7 @@
 obj-$(CONFIG_NUBUS)		+= nubus/
 obj-y				+= macintosh/
 obj-$(CONFIG_IDE)		+= ide/
+obj-$(CONFIG_CRYPTO)		+= crypto/
 obj-$(CONFIG_SCSI)		+= scsi/
 obj-y				+= nvme/
 obj-$(CONFIG_ATA)		+= ata/
@@ -103,6 +104,7 @@
 obj-$(CONFIG_USB)		+= usb/
 obj-$(CONFIG_PCI)		+= usb/
 obj-$(CONFIG_USB_GADGET)	+= usb/
+obj-$(CONFIG_OF)		+= usb/
 obj-$(CONFIG_SERIO)		+= input/serio/
 obj-$(CONFIG_GAMEPORT)		+= input/gameport/
 obj-$(CONFIG_INPUT)		+= input/
@@ -130,7 +132,6 @@
 obj-$(CONFIG_INFINIBAND)	+= infiniband/
 obj-$(CONFIG_SGI_SN)		+= sn/
 obj-y				+= firmware/
-obj-$(CONFIG_CRYPTO)		+= crypto/
 obj-$(CONFIG_SUPERH)		+= sh/
 ifndef CONFIG_ARCH_USES_GETTIMEOFFSET
 obj-y				+= clocksource/
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
index 4256d9b..b0beb52 100644
--- a/drivers/base/firmware_class.c
+++ b/drivers/base/firmware_class.c
@@ -206,6 +206,7 @@ static struct firmware_buf *__allocate_fw_buf(const char *fw_name,
 	buf->data = dbuf;
 	buf->allocated_size = size;
 	init_completion(&buf->completion);
+	INIT_LIST_HEAD(&buf->list);
 #ifdef CONFIG_FW_LOADER_USER_HELPER
 	INIT_LIST_HEAD(&buf->pending_list);
 #endif
diff --git a/drivers/bluetooth/hci_bcm.c b/drivers/bluetooth/hci_bcm.c
index 8f6c23c..deed580 100644
--- a/drivers/bluetooth/hci_bcm.c
+++ b/drivers/bluetooth/hci_bcm.c
@@ -287,6 +287,9 @@ static int bcm_open(struct hci_uart *hu)
 
 	hu->priv = bcm;
 
+	if (!hu->tty->dev)
+		goto out;
+
 	mutex_lock(&bcm_device_lock);
 	list_for_each(p, &bcm_device_list) {
 		struct bcm_device *dev = list_entry(p, struct bcm_device, list);
@@ -307,7 +310,7 @@ static int bcm_open(struct hci_uart *hu)
 	}
 
 	mutex_unlock(&bcm_device_lock);
-
+out:
 	return 0;
 }
 
diff --git a/drivers/bluetooth/hci_intel.c b/drivers/bluetooth/hci_intel.c
index 9e27128..7330638 100644
--- a/drivers/bluetooth/hci_intel.c
+++ b/drivers/bluetooth/hci_intel.c
@@ -307,6 +307,9 @@ static int intel_set_power(struct hci_uart *hu, bool powered)
 	struct list_head *p;
 	int err = -ENODEV;
 
+	if (!hu->tty->dev)
+		return err;
+
 	mutex_lock(&intel_device_list_lock);
 
 	list_for_each(p, &intel_device_list) {
@@ -379,6 +382,9 @@ static void intel_busy_work(struct work_struct *work)
 	struct intel_data *intel = container_of(work, struct intel_data,
 						busy_work);
 
+	if (!intel->hu->tty->dev)
+		return;
+
 	/* Link is busy, delay the suspend */
 	mutex_lock(&intel_device_list_lock);
 	list_for_each(p, &intel_device_list) {
@@ -889,6 +895,8 @@ static int intel_setup(struct hci_uart *hu)
 	list_for_each(p, &intel_device_list) {
 		struct intel_device *dev = list_entry(p, struct intel_device,
 						      list);
+		if (!hu->tty->dev)
+			break;
 		if (hu->tty->dev->parent == dev->pdev->dev.parent) {
 			if (device_may_wakeup(&dev->pdev->dev)) {
 				set_bit(STATE_LPM_ENABLED, &intel->flags);
@@ -1056,6 +1064,9 @@ static int intel_enqueue(struct hci_uart *hu, struct sk_buff *skb)
 
 	BT_DBG("hu %p skb %p", hu, skb);
 
+	if (!hu->tty->dev)
+		goto out_enqueue;
+
 	/* Be sure our controller is resumed and potential LPM transaction
 	 * completed before enqueuing any packet.
 	 */
@@ -1072,7 +1083,7 @@ static int intel_enqueue(struct hci_uart *hu, struct sk_buff *skb)
 		}
 	}
 	mutex_unlock(&intel_device_list_lock);
-
+out_enqueue:
 	skb_queue_tail(&intel->txq, skb);
 
 	return 0;
diff --git a/drivers/char/diag/diagfwd_glink.h b/drivers/char/diag/diagfwd_glink.h
index bad4629..73f2fe8 100644
--- a/drivers/char/diag/diagfwd_glink.h
+++ b/drivers/char/diag/diagfwd_glink.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
index 5673fff..6958b5c 100644
--- a/drivers/char/ipmi/ipmi_ssif.c
+++ b/drivers/char/ipmi/ipmi_ssif.c
@@ -892,6 +892,7 @@ static void msg_written_handler(struct ssif_info *ssif_info, int result,
 		 * for details on the intricacies of this.
 		 */
 		int left;
+		unsigned char *data_to_send;
 
 		ssif_inc_stat(ssif_info, sent_messages_parts);
 
@@ -900,6 +901,7 @@ static void msg_written_handler(struct ssif_info *ssif_info, int result,
 			left = 32;
 		/* Length byte. */
 		ssif_info->multi_data[ssif_info->multi_pos] = left;
+		data_to_send = ssif_info->multi_data + ssif_info->multi_pos;
 		ssif_info->multi_pos += left;
 		if (left < 32)
 			/*
@@ -913,7 +915,7 @@ static void msg_written_handler(struct ssif_info *ssif_info, int result,
 		rv = ssif_i2c_send(ssif_info, msg_written_handler,
 				  I2C_SMBUS_WRITE,
 				  SSIF_IPMI_MULTI_PART_REQUEST_MIDDLE,
-				  ssif_info->multi_data + ssif_info->multi_pos,
+				  data_to_send,
 				  I2C_SMBUS_BLOCK_DATA);
 		if (rv < 0) {
 			/* request failed, just return the error. */
diff --git a/drivers/char/lp.c b/drivers/char/lp.c
index c4094c4..34ef474 100644
--- a/drivers/char/lp.c
+++ b/drivers/char/lp.c
@@ -859,7 +859,11 @@ static int __init lp_setup (char *str)
 	} else if (!strcmp(str, "auto")) {
 		parport_nr[0] = LP_PARPORT_AUTO;
 	} else if (!strcmp(str, "none")) {
-		parport_nr[parport_ptr++] = LP_PARPORT_NONE;
+		if (parport_ptr < LP_NO)
+			parport_nr[parport_ptr++] = LP_PARPORT_NONE;
+		else
+			printk(KERN_INFO "lp: too many ports, %s ignored.\n",
+			       str);
 	} else if (!strcmp(str, "reset")) {
 		reset = 1;
 	}
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index 7e4a9d1..6e0cbe0 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -340,6 +340,11 @@ static const struct vm_operations_struct mmap_mem_ops = {
 static int mmap_mem(struct file *file, struct vm_area_struct *vma)
 {
 	size_t size = vma->vm_end - vma->vm_start;
+	phys_addr_t offset = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT;
+
+	/* It's illegal to wrap around the end of the physical address space. */
+	if (offset + (phys_addr_t)size < offset)
+		return -EINVAL;
 
 	if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size))
 		return -EINVAL;
diff --git a/drivers/char/tpm/tpm_crb.c b/drivers/char/tpm/tpm_crb.c
index a7c870a..fa0f668 100644
--- a/drivers/char/tpm/tpm_crb.c
+++ b/drivers/char/tpm/tpm_crb.c
@@ -111,8 +111,7 @@ static int crb_recv(struct tpm_chip *chip, u8 *buf, size_t count)
 
 	memcpy_fromio(buf, priv->rsp, 6);
 	expected = be32_to_cpup((__be32 *) &buf[2]);
-
-	if (expected > count)
+	if (expected > count || expected < 6)
 		return -EIO;
 
 	memcpy_fromio(&buf[6], &priv->rsp[6], expected - 6);
diff --git a/drivers/char/tpm/tpm_i2c_nuvoton.c b/drivers/char/tpm/tpm_i2c_nuvoton.c
index e3a9155..c642877 100644
--- a/drivers/char/tpm/tpm_i2c_nuvoton.c
+++ b/drivers/char/tpm/tpm_i2c_nuvoton.c
@@ -49,9 +49,10 @@
  */
 #define TPM_I2C_MAX_BUF_SIZE           32
 #define TPM_I2C_RETRY_COUNT            32
-#define TPM_I2C_BUS_DELAY              1       /* msec */
-#define TPM_I2C_RETRY_DELAY_SHORT      2       /* msec */
-#define TPM_I2C_RETRY_DELAY_LONG       10      /* msec */
+#define TPM_I2C_BUS_DELAY              1000      	/* usec */
+#define TPM_I2C_RETRY_DELAY_SHORT      (2 * 1000)	/* usec */
+#define TPM_I2C_RETRY_DELAY_LONG       (10 * 1000) 	/* usec */
+#define TPM_I2C_DELAY_RANGE            300		/* usec */
 
 #define OF_IS_TPM2 ((void *)1)
 #define I2C_IS_TPM2 1
@@ -123,7 +124,9 @@ static s32 i2c_nuvoton_write_status(struct i2c_client *client, u8 data)
 	/* this causes the current command to be aborted */
 	for (i = 0, status = -1; i < TPM_I2C_RETRY_COUNT && status < 0; i++) {
 		status = i2c_nuvoton_write_buf(client, TPM_STS, 1, &data);
-		msleep(TPM_I2C_BUS_DELAY);
+		if (status < 0)
+			usleep_range(TPM_I2C_BUS_DELAY, TPM_I2C_BUS_DELAY
+				     + TPM_I2C_DELAY_RANGE);
 	}
 	return status;
 }
@@ -160,7 +163,8 @@ static int i2c_nuvoton_get_burstcount(struct i2c_client *client,
 			burst_count = min_t(u8, TPM_I2C_MAX_BUF_SIZE, data);
 			break;
 		}
-		msleep(TPM_I2C_BUS_DELAY);
+		usleep_range(TPM_I2C_BUS_DELAY, TPM_I2C_BUS_DELAY
+			     + TPM_I2C_DELAY_RANGE);
 	} while (time_before(jiffies, stop));
 
 	return burst_count;
@@ -203,13 +207,17 @@ static int i2c_nuvoton_wait_for_stat(struct tpm_chip *chip, u8 mask, u8 value,
 			return 0;
 
 		/* use polling to wait for the event */
-		ten_msec = jiffies + msecs_to_jiffies(TPM_I2C_RETRY_DELAY_LONG);
+		ten_msec = jiffies + usecs_to_jiffies(TPM_I2C_RETRY_DELAY_LONG);
 		stop = jiffies + timeout;
 		do {
 			if (time_before(jiffies, ten_msec))
-				msleep(TPM_I2C_RETRY_DELAY_SHORT);
+				usleep_range(TPM_I2C_RETRY_DELAY_SHORT,
+					     TPM_I2C_RETRY_DELAY_SHORT
+					     + TPM_I2C_DELAY_RANGE);
 			else
-				msleep(TPM_I2C_RETRY_DELAY_LONG);
+				usleep_range(TPM_I2C_RETRY_DELAY_LONG,
+					     TPM_I2C_RETRY_DELAY_LONG
+					     + TPM_I2C_DELAY_RANGE);
 			status_valid = i2c_nuvoton_check_status(chip, mask,
 								value);
 			if (status_valid)
diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c
index a1ce060..4d24ec3 100644
--- a/drivers/char/tpm/tpm_tis_core.c
+++ b/drivers/char/tpm/tpm_tis_core.c
@@ -160,8 +160,10 @@ static int get_burstcount(struct tpm_chip *chip)
 	u32 value;
 
 	/* wait for burstcount */
-	/* which timeout value, spec has 2 answers (c & d) */
-	stop = jiffies + chip->timeout_d;
+	if (chip->flags & TPM_CHIP_FLAG_TPM2)
+		stop = jiffies + chip->timeout_a;
+	else
+		stop = jiffies + chip->timeout_d;
 	do {
 		rc = tpm_tis_read32(priv, TPM_STS(priv->locality), &value);
 		if (rc < 0)
diff --git a/drivers/char/tpm/tpm_tis_spi.c b/drivers/char/tpm/tpm_tis_spi.c
index dbaad9c..3b97b14 100644
--- a/drivers/char/tpm/tpm_tis_spi.c
+++ b/drivers/char/tpm/tpm_tis_spi.c
@@ -48,8 +48,8 @@ struct tpm_tis_spi_phy {
 	struct tpm_tis_data priv;
 	struct spi_device *spi_device;
 
-	u8 tx_buf[MAX_SPI_FRAMESIZE + 4];
-	u8 rx_buf[MAX_SPI_FRAMESIZE + 4];
+	u8 tx_buf[4];
+	u8 rx_buf[4];
 };
 
 static inline struct tpm_tis_spi_phy *to_tpm_tis_spi_phy(struct tpm_tis_data *data)
@@ -57,120 +57,96 @@ static inline struct tpm_tis_spi_phy *to_tpm_tis_spi_phy(struct tpm_tis_data *da
 	return container_of(data, struct tpm_tis_spi_phy, priv);
 }
 
-static int tpm_tis_spi_read_bytes(struct tpm_tis_data *data, u32 addr,
-				  u16 len, u8 *result)
+static int tpm_tis_spi_transfer(struct tpm_tis_data *data, u32 addr, u16 len,
+				u8 *buffer, u8 direction)
 {
 	struct tpm_tis_spi_phy *phy = to_tpm_tis_spi_phy(data);
-	int ret, i;
+	int ret = 0;
+	int i;
 	struct spi_message m;
-	struct spi_transfer spi_xfer = {
-		.tx_buf = phy->tx_buf,
-		.rx_buf = phy->rx_buf,
-		.len = 4,
-	};
-
-	if (len > MAX_SPI_FRAMESIZE)
-		return -ENOMEM;
-
-	phy->tx_buf[0] = 0x80 | (len - 1);
-	phy->tx_buf[1] = 0xd4;
-	phy->tx_buf[2] = (addr >> 8)  & 0xFF;
-	phy->tx_buf[3] = addr	      & 0xFF;
-
-	spi_xfer.cs_change = 1;
-	spi_message_init(&m);
-	spi_message_add_tail(&spi_xfer, &m);
+	struct spi_transfer spi_xfer;
+	u8 transfer_len;
 
 	spi_bus_lock(phy->spi_device->master);
-	ret = spi_sync_locked(phy->spi_device, &m);
-	if (ret < 0)
-		goto exit;
 
-	memset(phy->tx_buf, 0, len);
+	while (len) {
+		transfer_len = min_t(u16, len, MAX_SPI_FRAMESIZE);
 
-	/* According to TCG PTP specification, if there is no TPM present at
-	 * all, then the design has a weak pull-up on MISO. If a TPM is not
-	 * present, a pull-up on MISO means that the SB controller sees a 1,
-	 * and will latch in 0xFF on the read.
-	 */
-	for (i = 0; (phy->rx_buf[0] & 0x01) == 0 && i < TPM_RETRY; i++) {
-		spi_xfer.len = 1;
+		phy->tx_buf[0] = direction | (transfer_len - 1);
+		phy->tx_buf[1] = 0xd4;
+		phy->tx_buf[2] = addr >> 8;
+		phy->tx_buf[3] = addr;
+
+		memset(&spi_xfer, 0, sizeof(spi_xfer));
+		spi_xfer.tx_buf = phy->tx_buf;
+		spi_xfer.rx_buf = phy->rx_buf;
+		spi_xfer.len = 4;
+		spi_xfer.cs_change = 1;
+
 		spi_message_init(&m);
 		spi_message_add_tail(&spi_xfer, &m);
 		ret = spi_sync_locked(phy->spi_device, &m);
 		if (ret < 0)
 			goto exit;
+
+		if ((phy->rx_buf[3] & 0x01) == 0) {
+			// handle SPI wait states
+			phy->tx_buf[0] = 0;
+
+			for (i = 0; i < TPM_RETRY; i++) {
+				spi_xfer.len = 1;
+				spi_message_init(&m);
+				spi_message_add_tail(&spi_xfer, &m);
+				ret = spi_sync_locked(phy->spi_device, &m);
+				if (ret < 0)
+					goto exit;
+				if (phy->rx_buf[0] & 0x01)
+					break;
+			}
+
+			if (i == TPM_RETRY) {
+				ret = -ETIMEDOUT;
+				goto exit;
+			}
+		}
+
+		spi_xfer.cs_change = 0;
+		spi_xfer.len = transfer_len;
+		spi_xfer.delay_usecs = 5;
+
+		if (direction) {
+			spi_xfer.tx_buf = NULL;
+			spi_xfer.rx_buf = buffer;
+		} else {
+			spi_xfer.tx_buf = buffer;
+			spi_xfer.rx_buf = NULL;
+		}
+
+		spi_message_init(&m);
+		spi_message_add_tail(&spi_xfer, &m);
+		ret = spi_sync_locked(phy->spi_device, &m);
+		if (ret < 0)
+			goto exit;
+
+		len -= transfer_len;
+		buffer += transfer_len;
 	}
 
-	spi_xfer.cs_change = 0;
-	spi_xfer.len = len;
-	spi_xfer.rx_buf = result;
-
-	spi_message_init(&m);
-	spi_message_add_tail(&spi_xfer, &m);
-	ret = spi_sync_locked(phy->spi_device, &m);
-
 exit:
 	spi_bus_unlock(phy->spi_device->master);
 	return ret;
 }
 
+static int tpm_tis_spi_read_bytes(struct tpm_tis_data *data, u32 addr,
+				  u16 len, u8 *result)
+{
+	return tpm_tis_spi_transfer(data, addr, len, result, 0x80);
+}
+
 static int tpm_tis_spi_write_bytes(struct tpm_tis_data *data, u32 addr,
 				   u16 len, u8 *value)
 {
-	struct tpm_tis_spi_phy *phy = to_tpm_tis_spi_phy(data);
-	int ret, i;
-	struct spi_message m;
-	struct spi_transfer spi_xfer = {
-		.tx_buf = phy->tx_buf,
-		.rx_buf = phy->rx_buf,
-		.len = 4,
-	};
-
-	if (len > MAX_SPI_FRAMESIZE)
-		return -ENOMEM;
-
-	phy->tx_buf[0] = len - 1;
-	phy->tx_buf[1] = 0xd4;
-	phy->tx_buf[2] = (addr >> 8)  & 0xFF;
-	phy->tx_buf[3] = addr         & 0xFF;
-
-	spi_xfer.cs_change = 1;
-	spi_message_init(&m);
-	spi_message_add_tail(&spi_xfer, &m);
-
-	spi_bus_lock(phy->spi_device->master);
-	ret = spi_sync_locked(phy->spi_device, &m);
-	if (ret < 0)
-		goto exit;
-
-	memset(phy->tx_buf, 0, len);
-
-	/* According to TCG PTP specification, if there is no TPM present at
-	 * all, then the design has a weak pull-up on MISO. If a TPM is not
-	 * present, a pull-up on MISO means that the SB controller sees a 1,
-	 * and will latch in 0xFF on the read.
-	 */
-	for (i = 0; (phy->rx_buf[0] & 0x01) == 0 && i < TPM_RETRY; i++) {
-		spi_xfer.len = 1;
-		spi_message_init(&m);
-		spi_message_add_tail(&spi_xfer, &m);
-		ret = spi_sync_locked(phy->spi_device, &m);
-		if (ret < 0)
-			goto exit;
-	}
-
-	spi_xfer.len = len;
-	spi_xfer.tx_buf = value;
-	spi_xfer.cs_change = 0;
-	spi_xfer.tx_buf = value;
-	spi_message_init(&m);
-	spi_message_add_tail(&spi_xfer, &m);
-	ret = spi_sync_locked(phy->spi_device, &m);
-
-exit:
-	spi_bus_unlock(phy->spi_device->master);
-	return ret;
+	return tpm_tis_spi_transfer(data, addr, len, value, 0);
 }
 
 static int tpm_tis_spi_read16(struct tpm_tis_data *data, u32 addr, u16 *result)
diff --git a/drivers/clk/qcom/clk-alpha-pll.c b/drivers/clk/qcom/clk-alpha-pll.c
index d15d1bb..fd3617b 100644
--- a/drivers/clk/qcom/clk-alpha-pll.c
+++ b/drivers/clk/qcom/clk-alpha-pll.c
@@ -59,7 +59,6 @@
 
 #define FABIA_USER_CTL_LO	0xc
 #define FABIA_USER_CTL_HI	0x10
-#define FABIA_CAL_L_VAL		0x8
 #define FABIA_FRAC_VAL		0x38
 #define FABIA_OPMODE		0x2c
 #define FABIA_PLL_STANDBY	0x0
@@ -463,12 +462,9 @@ void clk_fabia_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
 {
 	u32 val, mask;
 
-	if (config->l) {
+	if (config->l)
 		regmap_write(regmap, pll->offset + PLL_L_VAL,
 						config->l);
-		regmap_write(regmap, pll->offset + FABIA_CAL_L_VAL,
-						config->l);
-	}
 
 	if (config->frac)
 		regmap_write(regmap, pll->offset + FABIA_FRAC_VAL,
@@ -627,12 +623,6 @@ static int clk_fabia_pll_set_rate(struct clk_hw *hw, unsigned long rate,
 	}
 
 	regmap_write(pll->clkr.regmap, off + PLL_L_VAL, l);
-	/*
-	 * pll_cal_l_val is set to pll_l_val on MOST targets. Set it
-	 * explicitly here for PLL out-of-reset calibration to work
-	 * without a glitch on ALL of them.
-	 */
-	regmap_write(pll->clkr.regmap, off + FABIA_CAL_L_VAL, l);
 	regmap_write(pll->clkr.regmap, off + FABIA_FRAC_VAL, a);
 
 	/* Latch the PLL input */
diff --git a/drivers/clk/qcom/clk-cpu-osm.c b/drivers/clk/qcom/clk-cpu-osm.c
index 4efecef..3a0677f 100644
--- a/drivers/clk/qcom/clk-cpu-osm.c
+++ b/drivers/clk/qcom/clk-cpu-osm.c
@@ -1737,6 +1737,8 @@ static void populate_opp_table(struct platform_device *pdev)
 	struct device *cpu_dev;
 	struct clk_osm *c, *parent;
 	struct clk_hw *hw_parent;
+	struct device_node *l3_node_0, *l3_node_4;
+	struct platform_device *l3_dev_0, *l3_dev_4;
 
 	for_each_possible_cpu(cpu) {
 		c = logical_cpu_to_clk(cpu);
@@ -1754,7 +1756,35 @@ static void populate_opp_table(struct platform_device *pdev)
 					dev_name(cpu_dev));
 	}
 
-	/*TODO: Figure out which device to tag the L3 table to */
+	l3_node_0 = of_parse_phandle(pdev->dev.of_node, "l3-dev0", 0);
+	if (!l3_node_0) {
+		pr_err("can't find the L3 cluster 0 dt node\n");
+		return;
+	}
+
+	l3_dev_0 = of_find_device_by_node(l3_node_0);
+	if (!l3_dev_0) {
+		pr_err("can't find the L3 cluster 0 dt device\n");
+		return;
+	}
+
+	if (add_opp(&l3_clk, &l3_dev_0->dev))
+		pr_err("Failed to add OPP levels for L3 cluster 0\n");
+
+	l3_node_4 = of_parse_phandle(pdev->dev.of_node, "l3-dev4", 0);
+	if (!l3_node_4) {
+		pr_err("can't find the L3 cluster 1 dt node\n");
+		return;
+	}
+
+	l3_dev_4 = of_find_device_by_node(l3_node_4);
+	if (!l3_dev_4) {
+		pr_err("can't find the L3 cluster 1 dt device\n");
+		return;
+	}
+
+	if (add_opp(&l3_clk, &l3_dev_4->dev))
+		pr_err("Failed to add OPP levels for L3 cluster 1\n");
 }
 
 static u64 clk_osm_get_cpu_cycle_counter(int cpu)
diff --git a/drivers/clk/qcom/clk-rcg.h b/drivers/clk/qcom/clk-rcg.h
index 3a38d37..7f56fb6 100644
--- a/drivers/clk/qcom/clk-rcg.h
+++ b/drivers/clk/qcom/clk-rcg.h
@@ -174,6 +174,7 @@ struct clk_rcg2 {
 	struct clk_regmap	clkr;
 	u8			flags;
 #define FORCE_ENABLE_RCG	BIT(0)
+#define DFS_ENABLE_RCG		BIT(1)
 };
 
 #define to_clk_rcg2(_hw) container_of(to_clk_regmap(_hw), struct clk_rcg2, clkr)
@@ -187,4 +188,6 @@ extern const struct clk_ops clk_pixel_ops;
 extern const struct clk_ops clk_gfx3d_ops;
 extern const struct clk_ops clk_dp_ops;
 
+extern int clk_rcg2_get_dfs_clock_rate(struct clk_rcg2 *clk,
+				struct device *dev, u8 rcg_flags);
 #endif
diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c
index 3d101ac..6bdea53 100644
--- a/drivers/clk/qcom/clk-rcg2.c
+++ b/drivers/clk/qcom/clk-rcg2.c
@@ -18,6 +18,7 @@
 #include <linux/export.h>
 #include <linux/clk-provider.h>
 #include <linux/delay.h>
+#include <linux/device.h>
 #include <linux/regmap.h>
 #include <linux/rational.h>
 #include <linux/math64.h>
@@ -50,6 +51,14 @@
 #define N_REG			0xc
 #define D_REG			0x10
 
+/* Dynamic Frequency Scaling */
+#define MAX_PERF_LEVEL		16
+#define SE_CMD_DFSR_OFFSET	0x14
+#define SE_CMD_DFS_EN		BIT(0)
+#define SE_PERF_DFSR(level)	(0x1c + 0x4 * (level))
+#define SE_PERF_M_DFSR(level)	(0x5c + 0x4 * (level))
+#define SE_PERF_N_DFSR(level)	(0x9c + 0x4 * (level))
+
 static struct freq_tbl cxo_f = {
 	.freq = 19200000,
 	.src = 0,
@@ -127,6 +136,9 @@ static int clk_rcg2_set_parent(struct clk_hw *hw, u8 index)
 	int ret;
 	u32 cfg = rcg->parent_map[index].cfg << CFG_SRC_SEL_SHIFT;
 
+	if (rcg->flags & DFS_ENABLE_RCG)
+		return 0;
+
 	ret = regmap_update_bits(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG,
 				 CFG_SRC_SEL_MASK, cfg);
 	if (ret)
@@ -236,6 +248,9 @@ clk_rcg2_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
 	u32 cfg, hid_div, m = 0, n = 0, mode = 0, mask;
 
+	if (rcg->flags & DFS_ENABLE_RCG)
+		return rcg->current_freq;
+
 	if (rcg->enable_safe_config && !clk_hw_is_prepared(hw)) {
 		if (!rcg->current_freq)
 			rcg->current_freq = cxo_f.freq;
@@ -333,6 +348,9 @@ static int clk_rcg2_configure(struct clk_rcg2 *rcg, const struct freq_tbl *f)
 	struct clk_hw *hw = &rcg->clkr.hw;
 	int ret, index = qcom_find_src_index(hw, rcg->parent_map, f->src);
 
+	if (rcg->flags & DFS_ENABLE_RCG)
+		return -EPERM;
+
 	if (index < 0)
 		return index;
 
@@ -461,7 +479,7 @@ static int __clk_rcg2_set_rate(struct clk_hw *hw, unsigned long rate)
 	}
 
 	ret = clk_rcg2_configure(rcg, f);
-	if (ret)
+	if (ret && ret != -EPERM)
 		return ret;
 
 	if (rcg->flags & FORCE_ENABLE_RCG) {
@@ -1170,3 +1188,167 @@ const struct clk_ops clk_gfx3d_ops = {
 	.list_registers = clk_rcg2_list_registers,
 };
 EXPORT_SYMBOL_GPL(clk_gfx3d_ops);
+
+/* Common APIs to be used for DFS based RCGR */
+static u8 clk_parent_index_pre_div_and_mode(struct clk_hw *hw, u32 offset,
+		u32 *mode, u32 *pre_div)
+{
+	struct clk_rcg2 *rcg;
+	int num_parents = clk_hw_get_num_parents(hw);
+	u32 cfg, mask;
+	int i, ret;
+
+	if (!hw)
+		return -EINVAL;
+
+	rcg = to_clk_rcg2(hw);
+
+	ret = regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + offset, &cfg);
+	if (ret)
+		goto err;
+
+	mask = BIT(rcg->hid_width) - 1;
+	*pre_div = cfg & mask ? (cfg & mask) : 1;
+
+	*mode = cfg & CFG_MODE_MASK;
+	*mode >>= CFG_MODE_SHIFT;
+
+	cfg &= CFG_SRC_SEL_MASK;
+	cfg >>= CFG_SRC_SEL_SHIFT;
+
+	for (i = 0; i < num_parents; i++)
+		if (cfg == rcg->parent_map[i].cfg)
+			return i;
+err:
+	pr_debug("%s: Clock %s has invalid parent, using default.\n",
+		 __func__, clk_hw_get_name(hw));
+	return 0;
+}
+
+static int calculate_m_and_n(struct clk_hw *hw, u32 m_offset, u32 n_offset,
+		u32 mode, u32 *m, u32 *n)
+{
+	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+	u32 val, mask;
+	int ret = 0;
+
+	if (!hw)
+		return -EINVAL;
+
+	*m = *n = 0;
+
+	if (mode) {
+		/* Calculate M & N values */
+		mask = BIT(rcg->mnd_width) - 1;
+		ret =  regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + m_offset,
+					&val);
+		if (ret) {
+			pr_err("Failed to read M offset register\n");
+			goto err;
+		}
+
+		val &= mask;
+		*m  = val;
+
+		ret =  regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + n_offset,
+					&val);
+		if (ret) {
+			pr_err("Failed to read N offset register\n");
+			goto err;
+		}
+
+		/* val ~(N-M) */
+		val = ~val;
+		val &= mask;
+		val += *m;
+		*n = val;
+	}
+err:
+	return ret;
+}
+
+int clk_rcg2_get_dfs_clock_rate(struct clk_rcg2 *clk, struct device *dev,
+						u8 rcg_flags)
+{
+	int i, j, index, ret = 0;
+	unsigned long calc_freq, prate;
+	u32 val, pre_div = 0, mode = 0, m = 0, n = 0;
+	struct freq_tbl *dfs_freq_tbl;
+	struct clk_hw *phw;
+
+	if (!clk)
+		return -EINVAL;
+
+	/* Check for DFS_EN */
+	ret = regmap_read(clk->clkr.regmap, clk->cmd_rcgr + SE_CMD_DFSR_OFFSET,
+						&val);
+	if (ret) {
+		dev_err(dev, "Failed to read DFS enable register\n");
+		return -EINVAL;
+	}
+
+	if (!(val & SE_CMD_DFS_EN))
+		return ret;
+
+	dfs_freq_tbl = devm_kzalloc(dev, MAX_PERF_LEVEL *
+				sizeof(struct freq_tbl), GFP_KERNEL);
+	if (!dfs_freq_tbl)
+		return -ENOMEM;
+
+	/* Populate the Perf Level */
+	for (i = 0; i < MAX_PERF_LEVEL; i++) {
+		/* Get parent index and mode */
+		index = clk_parent_index_pre_div_and_mode(&clk->clkr.hw,
+							SE_PERF_DFSR(i), &mode,
+							&pre_div);
+		if (index < 0) {
+			pr_err("Failed to get parent index & mode %d\n", index);
+			return index;
+		}
+
+		/* clock pre_div */
+		dfs_freq_tbl[i].pre_div = pre_div;
+
+		/* Fill the parent src */
+		dfs_freq_tbl[i].src = clk->parent_map[index].src;
+
+		/* Get the parent clock and parent rate */
+		phw = clk_hw_get_parent_by_index(&clk->clkr.hw, index);
+		prate = clk_hw_get_rate(phw);
+
+		ret = calculate_m_and_n(&clk->clkr.hw, SE_PERF_M_DFSR(i),
+					SE_PERF_N_DFSR(i), mode, &m, &n);
+		if (ret)
+			goto err;
+
+		dfs_freq_tbl[i].m = m;
+		dfs_freq_tbl[i].n = n;
+
+		/* calculate the final frequency */
+		calc_freq = calc_rate(prate, dfs_freq_tbl[i].m,
+						dfs_freq_tbl[i].n, mode,
+						dfs_freq_tbl[i].pre_div);
+
+		/* Check for duplicate frequencies */
+		for (j = 0; j  < i; j++) {
+			if (dfs_freq_tbl[j].freq == calc_freq)
+				goto done;
+		}
+
+		dfs_freq_tbl[i].freq = calc_freq;
+	}
+done:
+	j = i;
+
+	for (i = 0; i < j; i++)
+		pr_debug("Index[%d]\tfreq_table.freq %ld\tfreq_table.src %d\t"
+		"freq_table.pre_div %d\tfreq_table.m %d\tfreq_table.n %d\t"
+		"RCG flags %x\n", i, dfs_freq_tbl[i].freq, dfs_freq_tbl[i].src,
+				dfs_freq_tbl[i].pre_div, dfs_freq_tbl[i].m,
+				dfs_freq_tbl[i].n, rcg_flags);
+
+	clk->flags |= rcg_flags;
+	clk->freq_tbl = dfs_freq_tbl;
+err:
+	return ret;
+}
diff --git a/drivers/clk/qcom/common.c b/drivers/clk/qcom/common.c
index fffcbaf..b2ff04a 100644
--- a/drivers/clk/qcom/common.c
+++ b/drivers/clk/qcom/common.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013-2014, 2017, The Linux Foundation. All rights reserved.
  *
  * This software is licensed under the terms of the GNU General Public
  * License version 2, as published by the Free Software Foundation, and
@@ -275,4 +275,26 @@ int qcom_cc_probe(struct platform_device *pdev, const struct qcom_cc_desc *desc)
 }
 EXPORT_SYMBOL_GPL(qcom_cc_probe);
 
+int qcom_cc_register_rcg_dfs(struct platform_device *pdev,
+			 const struct qcom_cc_dfs_desc *desc)
+{
+	struct clk_dfs *clks = desc->clks;
+	size_t num_clks = desc->num_clks;
+	int i, ret = 0;
+
+	for (i = 0; i < num_clks; i++) {
+		ret = clk_rcg2_get_dfs_clock_rate(clks[i].rcg, &pdev->dev,
+						clks[i].rcg_flags);
+		if (ret) {
+			dev_err(&pdev->dev,
+				"Failed calculating DFS frequencies for %s\n",
+				clk_hw_get_name(&(clks[i].rcg)->clkr.hw));
+			break;
+		}
+	}
+
+	return ret;
+}
+EXPORT_SYMBOL(qcom_cc_register_rcg_dfs);
+
 MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/qcom/common.h b/drivers/clk/qcom/common.h
index e728dec..5e26763 100644
--- a/drivers/clk/qcom/common.h
+++ b/drivers/clk/qcom/common.h
@@ -14,6 +14,7 @@
 #define __QCOM_CLK_COMMON_H__
 
 #include <linux/reset-controller.h>
+#include "clk-rcg.h"
 
 struct platform_device;
 struct regmap_config;
@@ -40,6 +41,16 @@ struct clk_dummy {
 	unsigned long rrate;
 };
 
+struct clk_dfs {
+	struct clk_rcg2 *rcg;
+	u8 rcg_flags;
+};
+
+struct qcom_cc_dfs_desc {
+	struct clk_dfs *clks;
+	size_t num_clks;
+};
+
 extern const struct freq_tbl *qcom_find_freq(const struct freq_tbl *f,
 					     unsigned long rate);
 extern int qcom_find_src_index(struct clk_hw *hw, const struct parent_map *map,
@@ -56,6 +67,10 @@ extern int qcom_cc_really_probe(struct platform_device *pdev,
 				struct regmap *regmap);
 extern int qcom_cc_probe(struct platform_device *pdev,
 			 const struct qcom_cc_desc *desc);
+
+extern int qcom_cc_register_rcg_dfs(struct platform_device *pdev,
+			 const struct qcom_cc_dfs_desc *desc);
+
 extern struct clk_ops clk_dummy_ops;
 
 #define BM(msb, lsb)	(((((uint32_t)-1) << (31-msb)) >> (31-msb+lsb)) << lsb)
diff --git a/drivers/clk/qcom/gcc-sdm845.c b/drivers/clk/qcom/gcc-sdm845.c
index 228f716..2742ab3 100644
--- a/drivers/clk/qcom/gcc-sdm845.c
+++ b/drivers/clk/qcom/gcc-sdm845.c
@@ -245,28 +245,6 @@ static struct clk_alpha_pll_postdiv gpll0_out_even = {
 	},
 };
 
-static struct clk_alpha_pll gpll1 = {
-	.offset = 0x1000,
-	.vco_table = fabia_vco,
-	.num_vco = ARRAY_SIZE(fabia_vco),
-	.type = FABIA_PLL,
-	.clkr = {
-		.enable_reg = 0x52000,
-		.enable_mask = BIT(1),
-		.hw.init = &(struct clk_init_data){
-			.name = "gpll1",
-			.parent_names = (const char *[]){ "bi_tcxo" },
-			.num_parents = 1,
-			.ops = &clk_fabia_fixed_pll_ops,
-			VDD_CX_FMAX_MAP4(
-				MIN, 615000000,
-				LOW, 1066000000,
-				LOW_L1, 1600000000,
-				NOMINAL, 2000000000),
-		},
-	},
-};
-
 static const struct freq_tbl ftbl_gcc_cpuss_ahb_clk_src[] = {
 	F(19200000, P_BI_TCXO, 1, 0, 0),
 	{ }
@@ -314,6 +292,7 @@ static struct clk_rcg2 gcc_cpuss_rbcpr_clk_src = {
 };
 
 static const struct freq_tbl ftbl_gcc_gp1_clk_src[] = {
+	F(19200000, P_BI_TCXO, 1, 0, 0),
 	F(25000000, P_GPLL0_OUT_EVEN, 12, 0, 0),
 	F(50000000, P_GPLL0_OUT_EVEN, 6, 0, 0),
 	F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0),
@@ -448,6 +427,7 @@ static struct clk_rcg2 gcc_pcie_phy_refgen_clk_src = {
 };
 
 static const struct freq_tbl ftbl_gcc_pdm2_clk_src[] = {
+	F(9600000, P_BI_TCXO, 2, 0, 0),
 	F(19200000, P_BI_TCXO, 1, 0, 0),
 	F(60000000, P_GPLL0_OUT_MAIN, 10, 0, 0),
 	{ }
@@ -808,6 +788,7 @@ static struct clk_rcg2 gcc_qupv3_wrap1_s7_clk_src = {
 
 static const struct freq_tbl ftbl_gcc_sdcc2_apps_clk_src[] = {
 	F(400000, P_BI_TCXO, 12, 1, 4),
+	F(9600000, P_BI_TCXO, 2, 0, 0),
 	F(19200000, P_BI_TCXO, 1, 0, 0),
 	F(25000000, P_GPLL0_OUT_MAIN, 12, 1, 2),
 	F(50000000, P_GPLL0_OUT_MAIN, 12, 0, 0),
@@ -839,6 +820,7 @@ static struct clk_rcg2 gcc_sdcc2_apps_clk_src = {
 
 static const struct freq_tbl ftbl_gcc_sdcc4_apps_clk_src[] = {
 	F(400000, P_BI_TCXO, 12, 1, 4),
+	F(9600000, P_BI_TCXO, 2, 0, 0),
 	F(19200000, P_BI_TCXO, 1, 0, 0),
 	F(25000000, P_GPLL0_OUT_MAIN, 12, 1, 2),
 	F(50000000, P_GPLL0_OUT_MAIN, 12, 0, 0),
@@ -889,12 +871,20 @@ static struct clk_rcg2 gcc_tsif_ref_clk_src = {
 	},
 };
 
+static const struct freq_tbl ftbl_gcc_ufs_card_axi_clk_src[] = {
+	F(25000000, P_GPLL0_OUT_EVEN, 12, 0, 0),
+	F(50000000, P_GPLL0_OUT_EVEN, 6, 0, 0),
+	F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0),
+	F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0),
+	{ }
+};
+
 static struct clk_rcg2 gcc_ufs_card_axi_clk_src = {
 	.cmd_rcgr = 0x7501c,
 	.mnd_width = 8,
 	.hid_width = 5,
 	.parent_map = gcc_parent_map_0,
-	.freq_tbl = ftbl_gcc_gp1_clk_src,
+	.freq_tbl = ftbl_gcc_ufs_card_axi_clk_src,
 	.flags = FORCE_ENABLE_RCG,
 	.clkr.hw.init = &(struct clk_init_data){
 		.name = "gcc_ufs_card_axi_clk_src",
@@ -1102,6 +1092,7 @@ static struct clk_rcg2 gcc_usb30_prim_master_clk_src = {
 };
 
 static const struct freq_tbl ftbl_gcc_usb30_prim_mock_utmi_clk_src[] = {
+	F(19200000, P_BI_TCXO, 1, 0, 0),
 	F(20000000, P_GPLL0_OUT_EVEN, 15, 0, 0),
 	F(40000000, P_GPLL0_OUT_EVEN, 7.5, 0, 0),
 	F(60000000, P_GPLL0_OUT_MAIN, 10, 0, 0),
@@ -3476,7 +3467,6 @@ static struct clk_regmap *gcc_sdm845_clocks[] = {
 	[GCC_VIDEO_XO_CLK] = &gcc_video_xo_clk.clkr,
 	[GPLL0] = &gpll0.clkr,
 	[GPLL0_OUT_EVEN] = &gpll0_out_even.clkr,
-	[GPLL1] = &gpll1.clkr,
 };
 
 static const struct qcom_reset_map gcc_sdm845_resets[] = {
@@ -3509,6 +3499,31 @@ static const struct qcom_reset_map gcc_sdm845_resets[] = {
 	[GCC_PCIE_1_PHY_BCR] = { 0x8e01c },
 };
 
+/* List of RCG clocks and corresponding flags requested for DFS Mode */
+static struct clk_dfs gcc_dfs_clocks[] = {
+	{ &gcc_qupv3_wrap0_s0_clk_src, DFS_ENABLE_RCG },
+	{ &gcc_qupv3_wrap0_s1_clk_src, DFS_ENABLE_RCG },
+	{ &gcc_qupv3_wrap0_s2_clk_src, DFS_ENABLE_RCG },
+	{ &gcc_qupv3_wrap0_s3_clk_src, DFS_ENABLE_RCG },
+	{ &gcc_qupv3_wrap0_s4_clk_src, DFS_ENABLE_RCG },
+	{ &gcc_qupv3_wrap0_s5_clk_src, DFS_ENABLE_RCG },
+	{ &gcc_qupv3_wrap0_s6_clk_src, DFS_ENABLE_RCG },
+	{ &gcc_qupv3_wrap0_s7_clk_src, DFS_ENABLE_RCG },
+	{ &gcc_qupv3_wrap1_s0_clk_src, DFS_ENABLE_RCG },
+	{ &gcc_qupv3_wrap1_s1_clk_src, DFS_ENABLE_RCG },
+	{ &gcc_qupv3_wrap1_s2_clk_src, DFS_ENABLE_RCG },
+	{ &gcc_qupv3_wrap1_s3_clk_src, DFS_ENABLE_RCG },
+	{ &gcc_qupv3_wrap1_s4_clk_src, DFS_ENABLE_RCG },
+	{ &gcc_qupv3_wrap1_s5_clk_src, DFS_ENABLE_RCG },
+	{ &gcc_qupv3_wrap1_s6_clk_src, DFS_ENABLE_RCG },
+	{ &gcc_qupv3_wrap1_s7_clk_src, DFS_ENABLE_RCG },
+};
+
+static const struct qcom_cc_dfs_desc gcc_sdm845_dfs_desc = {
+	.clks = gcc_dfs_clocks,
+	.num_clks = ARRAY_SIZE(gcc_dfs_clocks),
+};
+
 static const struct regmap_config gcc_sdm845_regmap_config = {
 	.reg_bits	= 32,
 	.reg_stride	= 4,
@@ -3599,10 +3614,10 @@ static int gcc_sdm845_probe(struct platform_device *pdev)
 	clk_prepare_enable(gcc_camera_ahb_clk.clkr.hw.clk);
 	clk_prepare_enable(gcc_video_ahb_clk.clkr.hw.clk);
 
-	/*
-	 * TODO:
-	 * 1. QUPv3 support
-	 */
+	/* DFS clock registration */
+	ret = qcom_cc_register_rcg_dfs(pdev, &gcc_sdm845_dfs_desc);
+	if (ret)
+		dev_err(&pdev->dev, "Failed to register with DFS!\n");
 
 	dev_info(&pdev->dev, "Registered GCC clocks\n");
 	return ret;
diff --git a/drivers/clk/qcom/gpucc-sdm845.c b/drivers/clk/qcom/gpucc-sdm845.c
index ae9d509..a5548e0 100644
--- a/drivers/clk/qcom/gpucc-sdm845.c
+++ b/drivers/clk/qcom/gpucc-sdm845.c
@@ -105,20 +105,6 @@ static const char * const gpu_cc_parent_names_1[] = {
 	"core_bi_pll_test_se",
 };
 
-static const struct parent_map gpu_cc_parent_map_2[] = {
-	{ P_BI_TCXO, 0 },
-	{ P_GPLL0_OUT_MAIN, 5 },
-	{ P_GPLL0_OUT_MAIN_DIV, 6 },
-	{ P_CORE_BI_PLL_TEST_SE, 7 },
-};
-
-static const char * const gpu_cc_parent_names_2[] = {
-	"bi_tcxo",
-	"gcc_gpu_gpll0_clk_src",
-	"gcc_gpu_gpll0_div_clk_src",
-	"core_bi_pll_test_se",
-};
-
 static struct pll_vco fabia_vco[] = {
 	{ 250000000, 2000000000, 0 },
 	{ 125000000, 1000000000, 1 },
@@ -232,29 +218,6 @@ static struct clk_rcg2 gpu_cc_gx_gfx3d_clk_src = {
 	},
 };
 
-static const struct freq_tbl ftbl_gpu_cc_rbcpr_clk_src[] = {
-	F(19200000, P_BI_TCXO, 1, 0, 0),
-	{ }
-};
-
-static struct clk_rcg2 gpu_cc_rbcpr_clk_src = {
-	.cmd_rcgr = 0x10b0,
-	.mnd_width = 0,
-	.hid_width = 5,
-	.parent_map = gpu_cc_parent_map_2,
-	.freq_tbl = ftbl_gpu_cc_rbcpr_clk_src,
-	.clkr.hw.init = &(struct clk_init_data){
-		.name = "gpu_cc_rbcpr_clk_src",
-		.parent_names = gpu_cc_parent_names_2,
-		.num_parents = 4,
-		.flags = CLK_SET_RATE_PARENT,
-		.ops = &clk_rcg2_ops,
-		VDD_CX_FMAX_MAP2(
-			MIN, 19200000,
-			NOMINAL, 50000000),
-	},
-};
-
 static struct clk_branch gpu_cc_acd_ahb_clk = {
 	.halt_reg = 0x1168,
 	.halt_check = BRANCH_HALT,
@@ -488,37 +451,6 @@ static struct clk_branch gpu_cc_pll_test_clk = {
 	},
 };
 
-static struct clk_branch gpu_cc_rbcpr_ahb_clk = {
-	.halt_reg = 0x10f4,
-	.halt_check = BRANCH_HALT,
-	.clkr = {
-		.enable_reg = 0x10f4,
-		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data){
-			.name = "gpu_cc_rbcpr_ahb_clk",
-			.ops = &clk_branch2_ops,
-		},
-	},
-};
-
-static struct clk_branch gpu_cc_rbcpr_clk = {
-	.halt_reg = 0x10f0,
-	.halt_check = BRANCH_HALT,
-	.clkr = {
-		.enable_reg = 0x10f0,
-		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data){
-			.name = "gpu_cc_rbcpr_clk",
-			.parent_names = (const char *[]){
-				"gpu_cc_rbcpr_clk_src",
-			},
-			.num_parents = 1,
-			.flags = CLK_SET_RATE_PARENT,
-			.ops = &clk_branch2_ops,
-		},
-	},
-};
-
 static struct clk_regmap *gpu_cc_sdm845_clocks[] = {
 	[GPU_CC_ACD_AHB_CLK] = &gpu_cc_acd_ahb_clk.clkr,
 	[GPU_CC_ACD_CXO_CLK] = &gpu_cc_acd_cxo_clk.clkr,
@@ -536,9 +468,6 @@ static struct clk_regmap *gpu_cc_sdm845_clocks[] = {
 	[GPU_CC_GX_GMU_CLK] = &gpu_cc_gx_gmu_clk.clkr,
 	[GPU_CC_GX_VSENSE_CLK] = &gpu_cc_gx_vsense_clk.clkr,
 	[GPU_CC_PLL_TEST_CLK] = &gpu_cc_pll_test_clk.clkr,
-	[GPU_CC_RBCPR_AHB_CLK] = &gpu_cc_rbcpr_ahb_clk.clkr,
-	[GPU_CC_RBCPR_CLK] = &gpu_cc_rbcpr_clk.clkr,
-	[GPU_CC_RBCPR_CLK_SRC] = &gpu_cc_rbcpr_clk_src.clkr,
 };
 
 static struct clk_regmap *gpu_cc_gfx_sdm845_clocks[] = {
@@ -554,7 +483,6 @@ static const struct qcom_reset_map gpu_cc_sdm845_resets[] = {
 	[GPUCC_GPU_CC_GFX3D_AON_BCR] = { 0x10a0 },
 	[GPUCC_GPU_CC_GMU_BCR] = { 0x111c },
 	[GPUCC_GPU_CC_GX_BCR] = { 0x1008 },
-	[GPUCC_GPU_CC_RBCPR_BCR] = { 0x10ac },
 	[GPUCC_GPU_CC_SPDM_BCR] = { 0x1110 },
 	[GPUCC_GPU_CC_XO_BCR] = { 0x1000 },
 };
diff --git a/drivers/crypto/ccp/ccp-dev-v3.c b/drivers/crypto/ccp/ccp-dev-v3.c
index 8d2dbac..e68966b 100644
--- a/drivers/crypto/ccp/ccp-dev-v3.c
+++ b/drivers/crypto/ccp/ccp-dev-v3.c
@@ -315,17 +315,73 @@ static int ccp_perform_ecc(struct ccp_op *op)
 	return ccp_do_cmd(op, cr, ARRAY_SIZE(cr));
 }
 
+static void ccp_disable_queue_interrupts(struct ccp_device *ccp)
+{
+	iowrite32(0x00, ccp->io_regs + IRQ_MASK_REG);
+}
+
+static void ccp_enable_queue_interrupts(struct ccp_device *ccp)
+{
+	iowrite32(ccp->qim, ccp->io_regs + IRQ_MASK_REG);
+}
+
+static void ccp_irq_bh(unsigned long data)
+{
+	struct ccp_device *ccp = (struct ccp_device *)data;
+	struct ccp_cmd_queue *cmd_q;
+	u32 q_int, status;
+	unsigned int i;
+
+	status = ioread32(ccp->io_regs + IRQ_STATUS_REG);
+
+	for (i = 0; i < ccp->cmd_q_count; i++) {
+		cmd_q = &ccp->cmd_q[i];
+
+		q_int = status & (cmd_q->int_ok | cmd_q->int_err);
+		if (q_int) {
+			cmd_q->int_status = status;
+			cmd_q->q_status = ioread32(cmd_q->reg_status);
+			cmd_q->q_int_status = ioread32(cmd_q->reg_int_status);
+
+			/* On error, only save the first error value */
+			if ((q_int & cmd_q->int_err) && !cmd_q->cmd_error)
+				cmd_q->cmd_error = CMD_Q_ERROR(cmd_q->q_status);
+
+			cmd_q->int_rcvd = 1;
+
+			/* Acknowledge the interrupt and wake the kthread */
+			iowrite32(q_int, ccp->io_regs + IRQ_STATUS_REG);
+			wake_up_interruptible(&cmd_q->int_queue);
+		}
+	}
+	ccp_enable_queue_interrupts(ccp);
+}
+
+static irqreturn_t ccp_irq_handler(int irq, void *data)
+{
+	struct device *dev = data;
+	struct ccp_device *ccp = dev_get_drvdata(dev);
+
+	ccp_disable_queue_interrupts(ccp);
+	if (ccp->use_tasklet)
+		tasklet_schedule(&ccp->irq_tasklet);
+	else
+		ccp_irq_bh((unsigned long)ccp);
+
+	return IRQ_HANDLED;
+}
+
 static int ccp_init(struct ccp_device *ccp)
 {
 	struct device *dev = ccp->dev;
 	struct ccp_cmd_queue *cmd_q;
 	struct dma_pool *dma_pool;
 	char dma_pool_name[MAX_DMAPOOL_NAME_LEN];
-	unsigned int qmr, qim, i;
+	unsigned int qmr, i;
 	int ret;
 
 	/* Find available queues */
-	qim = 0;
+	ccp->qim = 0;
 	qmr = ioread32(ccp->io_regs + Q_MASK_REG);
 	for (i = 0; i < MAX_HW_QUEUES; i++) {
 		if (!(qmr & (1 << i)))
@@ -370,7 +426,7 @@ static int ccp_init(struct ccp_device *ccp)
 		init_waitqueue_head(&cmd_q->int_queue);
 
 		/* Build queue interrupt mask (two interrupts per queue) */
-		qim |= cmd_q->int_ok | cmd_q->int_err;
+		ccp->qim |= cmd_q->int_ok | cmd_q->int_err;
 
 #ifdef CONFIG_ARM64
 		/* For arm64 set the recommended queue cache settings */
@@ -388,14 +444,14 @@ static int ccp_init(struct ccp_device *ccp)
 	dev_notice(dev, "%u command queues available\n", ccp->cmd_q_count);
 
 	/* Disable and clear interrupts until ready */
-	iowrite32(0x00, ccp->io_regs + IRQ_MASK_REG);
+	ccp_disable_queue_interrupts(ccp);
 	for (i = 0; i < ccp->cmd_q_count; i++) {
 		cmd_q = &ccp->cmd_q[i];
 
 		ioread32(cmd_q->reg_int_status);
 		ioread32(cmd_q->reg_status);
 	}
-	iowrite32(qim, ccp->io_regs + IRQ_STATUS_REG);
+	iowrite32(ccp->qim, ccp->io_regs + IRQ_STATUS_REG);
 
 	/* Request an irq */
 	ret = ccp->get_irq(ccp);
@@ -408,6 +464,11 @@ static int ccp_init(struct ccp_device *ccp)
 	init_waitqueue_head(&ccp->sb_queue);
 	init_waitqueue_head(&ccp->suspend_queue);
 
+	/* Initialize the ISR tasklet? */
+	if (ccp->use_tasklet)
+		tasklet_init(&ccp->irq_tasklet, ccp_irq_bh,
+			     (unsigned long)ccp);
+
 	dev_dbg(dev, "Starting threads...\n");
 	/* Create a kthread for each queue */
 	for (i = 0; i < ccp->cmd_q_count; i++) {
@@ -430,7 +491,7 @@ static int ccp_init(struct ccp_device *ccp)
 
 	dev_dbg(dev, "Enabling interrupts...\n");
 	/* Enable interrupts */
-	iowrite32(qim, ccp->io_regs + IRQ_MASK_REG);
+	ccp_enable_queue_interrupts(ccp);
 
 	dev_dbg(dev, "Registering device...\n");
 	ccp_add_device(ccp);
@@ -467,7 +528,7 @@ static void ccp_destroy(struct ccp_device *ccp)
 {
 	struct ccp_cmd_queue *cmd_q;
 	struct ccp_cmd *cmd;
-	unsigned int qim, i;
+	unsigned int i;
 
 	/* Unregister the DMA engine */
 	ccp_dmaengine_unregister(ccp);
@@ -478,22 +539,15 @@ static void ccp_destroy(struct ccp_device *ccp)
 	/* Remove this device from the list of available units */
 	ccp_del_device(ccp);
 
-	/* Build queue interrupt mask (two interrupt masks per queue) */
-	qim = 0;
-	for (i = 0; i < ccp->cmd_q_count; i++) {
-		cmd_q = &ccp->cmd_q[i];
-		qim |= cmd_q->int_ok | cmd_q->int_err;
-	}
-
 	/* Disable and clear interrupts */
-	iowrite32(0x00, ccp->io_regs + IRQ_MASK_REG);
+	ccp_disable_queue_interrupts(ccp);
 	for (i = 0; i < ccp->cmd_q_count; i++) {
 		cmd_q = &ccp->cmd_q[i];
 
 		ioread32(cmd_q->reg_int_status);
 		ioread32(cmd_q->reg_status);
 	}
-	iowrite32(qim, ccp->io_regs + IRQ_STATUS_REG);
+	iowrite32(ccp->qim, ccp->io_regs + IRQ_STATUS_REG);
 
 	/* Stop the queue kthreads */
 	for (i = 0; i < ccp->cmd_q_count; i++)
@@ -520,40 +574,6 @@ static void ccp_destroy(struct ccp_device *ccp)
 	}
 }
 
-static irqreturn_t ccp_irq_handler(int irq, void *data)
-{
-	struct device *dev = data;
-	struct ccp_device *ccp = dev_get_drvdata(dev);
-	struct ccp_cmd_queue *cmd_q;
-	u32 q_int, status;
-	unsigned int i;
-
-	status = ioread32(ccp->io_regs + IRQ_STATUS_REG);
-
-	for (i = 0; i < ccp->cmd_q_count; i++) {
-		cmd_q = &ccp->cmd_q[i];
-
-		q_int = status & (cmd_q->int_ok | cmd_q->int_err);
-		if (q_int) {
-			cmd_q->int_status = status;
-			cmd_q->q_status = ioread32(cmd_q->reg_status);
-			cmd_q->q_int_status = ioread32(cmd_q->reg_int_status);
-
-			/* On error, only save the first error value */
-			if ((q_int & cmd_q->int_err) && !cmd_q->cmd_error)
-				cmd_q->cmd_error = CMD_Q_ERROR(cmd_q->q_status);
-
-			cmd_q->int_rcvd = 1;
-
-			/* Acknowledge the interrupt and wake the kthread */
-			iowrite32(q_int, ccp->io_regs + IRQ_STATUS_REG);
-			wake_up_interruptible(&cmd_q->int_queue);
-		}
-	}
-
-	return IRQ_HANDLED;
-}
-
 static const struct ccp_actions ccp3_actions = {
 	.aes = ccp_perform_aes,
 	.xts_aes = ccp_perform_xts_aes,
diff --git a/drivers/crypto/ccp/ccp-dev-v5.c b/drivers/crypto/ccp/ccp-dev-v5.c
index a388bf2..2c0ce5f 100644
--- a/drivers/crypto/ccp/ccp-dev-v5.c
+++ b/drivers/crypto/ccp/ccp-dev-v5.c
@@ -644,6 +644,65 @@ static int ccp_assign_lsbs(struct ccp_device *ccp)
 	return rc;
 }
 
+static void ccp5_disable_queue_interrupts(struct ccp_device *ccp)
+{
+	unsigned int i;
+
+	for (i = 0; i < ccp->cmd_q_count; i++)
+		iowrite32(0x0, ccp->cmd_q[i].reg_int_enable);
+}
+
+static void ccp5_enable_queue_interrupts(struct ccp_device *ccp)
+{
+	unsigned int i;
+
+	for (i = 0; i < ccp->cmd_q_count; i++)
+		iowrite32(SUPPORTED_INTERRUPTS, ccp->cmd_q[i].reg_int_enable);
+}
+
+static void ccp5_irq_bh(unsigned long data)
+{
+	struct ccp_device *ccp = (struct ccp_device *)data;
+	u32 status;
+	unsigned int i;
+
+	for (i = 0; i < ccp->cmd_q_count; i++) {
+		struct ccp_cmd_queue *cmd_q = &ccp->cmd_q[i];
+
+		status = ioread32(cmd_q->reg_interrupt_status);
+
+		if (status) {
+			cmd_q->int_status = status;
+			cmd_q->q_status = ioread32(cmd_q->reg_status);
+			cmd_q->q_int_status = ioread32(cmd_q->reg_int_status);
+
+			/* On error, only save the first error value */
+			if ((status & INT_ERROR) && !cmd_q->cmd_error)
+				cmd_q->cmd_error = CMD_Q_ERROR(cmd_q->q_status);
+
+			cmd_q->int_rcvd = 1;
+
+			/* Acknowledge the interrupt and wake the kthread */
+			iowrite32(status, cmd_q->reg_interrupt_status);
+			wake_up_interruptible(&cmd_q->int_queue);
+		}
+	}
+	ccp5_enable_queue_interrupts(ccp);
+}
+
+static irqreturn_t ccp5_irq_handler(int irq, void *data)
+{
+	struct device *dev = data;
+	struct ccp_device *ccp = dev_get_drvdata(dev);
+
+	ccp5_disable_queue_interrupts(ccp);
+	if (ccp->use_tasklet)
+		tasklet_schedule(&ccp->irq_tasklet);
+	else
+		ccp5_irq_bh((unsigned long)ccp);
+	return IRQ_HANDLED;
+}
+
 static int ccp5_init(struct ccp_device *ccp)
 {
 	struct device *dev = ccp->dev;
@@ -728,19 +787,18 @@ static int ccp5_init(struct ccp_device *ccp)
 	dev_notice(dev, "%u command queues available\n", ccp->cmd_q_count);
 
 	/* Turn off the queues and disable interrupts until ready */
+	ccp5_disable_queue_interrupts(ccp);
 	for (i = 0; i < ccp->cmd_q_count; i++) {
 		cmd_q = &ccp->cmd_q[i];
 
 		cmd_q->qcontrol = 0; /* Start with nothing */
 		iowrite32(cmd_q->qcontrol, cmd_q->reg_control);
 
-		/* Disable the interrupts */
-		iowrite32(0x00, cmd_q->reg_int_enable);
 		ioread32(cmd_q->reg_int_status);
 		ioread32(cmd_q->reg_status);
 
-		/* Clear the interrupts */
-		iowrite32(ALL_INTERRUPTS, cmd_q->reg_interrupt_status);
+		/* Clear the interrupt status */
+		iowrite32(SUPPORTED_INTERRUPTS, cmd_q->reg_interrupt_status);
 	}
 
 	dev_dbg(dev, "Requesting an IRQ...\n");
@@ -750,6 +808,10 @@ static int ccp5_init(struct ccp_device *ccp)
 		dev_err(dev, "unable to allocate an IRQ\n");
 		goto e_pool;
 	}
+	/* Initialize the ISR tasklet */
+	if (ccp->use_tasklet)
+		tasklet_init(&ccp->irq_tasklet, ccp5_irq_bh,
+			     (unsigned long)ccp);
 
 	/* Initialize the queue used to suspend */
 	init_waitqueue_head(&ccp->suspend_queue);
@@ -821,11 +883,7 @@ static int ccp5_init(struct ccp_device *ccp)
 	}
 
 	dev_dbg(dev, "Enabling interrupts...\n");
-	/* Enable interrupts */
-	for (i = 0; i < ccp->cmd_q_count; i++) {
-		cmd_q = &ccp->cmd_q[i];
-		iowrite32(ALL_INTERRUPTS, cmd_q->reg_int_enable);
-	}
+	ccp5_enable_queue_interrupts(ccp);
 
 	dev_dbg(dev, "Registering device...\n");
 	/* Put this on the unit list to make it available */
@@ -877,17 +935,15 @@ static void ccp5_destroy(struct ccp_device *ccp)
 	ccp_del_device(ccp);
 
 	/* Disable and clear interrupts */
+	ccp5_disable_queue_interrupts(ccp);
 	for (i = 0; i < ccp->cmd_q_count; i++) {
 		cmd_q = &ccp->cmd_q[i];
 
 		/* Turn off the run bit */
 		iowrite32(cmd_q->qcontrol & ~CMD5_Q_RUN, cmd_q->reg_control);
 
-		/* Disable the interrupts */
-		iowrite32(ALL_INTERRUPTS, cmd_q->reg_interrupt_status);
-
 		/* Clear the interrupt status */
-		iowrite32(0x00, cmd_q->reg_int_enable);
+		iowrite32(SUPPORTED_INTERRUPTS, cmd_q->reg_interrupt_status);
 		ioread32(cmd_q->reg_int_status);
 		ioread32(cmd_q->reg_status);
 	}
@@ -920,38 +976,6 @@ static void ccp5_destroy(struct ccp_device *ccp)
 	}
 }
 
-static irqreturn_t ccp5_irq_handler(int irq, void *data)
-{
-	struct device *dev = data;
-	struct ccp_device *ccp = dev_get_drvdata(dev);
-	u32 status;
-	unsigned int i;
-
-	for (i = 0; i < ccp->cmd_q_count; i++) {
-		struct ccp_cmd_queue *cmd_q = &ccp->cmd_q[i];
-
-		status = ioread32(cmd_q->reg_interrupt_status);
-
-		if (status) {
-			cmd_q->int_status = status;
-			cmd_q->q_status = ioread32(cmd_q->reg_status);
-			cmd_q->q_int_status = ioread32(cmd_q->reg_int_status);
-
-			/* On error, only save the first error value */
-			if ((status & INT_ERROR) && !cmd_q->cmd_error)
-				cmd_q->cmd_error = CMD_Q_ERROR(cmd_q->q_status);
-
-			cmd_q->int_rcvd = 1;
-
-			/* Acknowledge the interrupt and wake the kthread */
-			iowrite32(ALL_INTERRUPTS, cmd_q->reg_interrupt_status);
-			wake_up_interruptible(&cmd_q->int_queue);
-		}
-	}
-
-	return IRQ_HANDLED;
-}
-
 static void ccp5_config(struct ccp_device *ccp)
 {
 	/* Public side */
diff --git a/drivers/crypto/ccp/ccp-dev.h b/drivers/crypto/ccp/ccp-dev.h
index 340aef1..8ac7ae1 100644
--- a/drivers/crypto/ccp/ccp-dev.h
+++ b/drivers/crypto/ccp/ccp-dev.h
@@ -109,9 +109,8 @@
 #define INT_COMPLETION			0x1
 #define INT_ERROR			0x2
 #define INT_QUEUE_STOPPED		0x4
-#define ALL_INTERRUPTS			(INT_COMPLETION| \
-					 INT_ERROR| \
-					 INT_QUEUE_STOPPED)
+#define	INT_EMPTY_QUEUE			0x8
+#define SUPPORTED_INTERRUPTS		(INT_COMPLETION | INT_ERROR)
 
 #define LSB_REGION_WIDTH		5
 #define MAX_LSB_CNT			8
@@ -333,7 +332,10 @@ struct ccp_device {
 	void *dev_specific;
 	int (*get_irq)(struct ccp_device *ccp);
 	void (*free_irq)(struct ccp_device *ccp);
+	unsigned int qim;
 	unsigned int irq;
+	bool use_tasklet;
+	struct tasklet_struct irq_tasklet;
 
 	/* I/O area used for device communication. The register mapping
 	 * starts at an offset into the mapped bar.
diff --git a/drivers/crypto/ccp/ccp-pci.c b/drivers/crypto/ccp/ccp-pci.c
index 28a9996..e880d4cf4 100644
--- a/drivers/crypto/ccp/ccp-pci.c
+++ b/drivers/crypto/ccp/ccp-pci.c
@@ -69,6 +69,7 @@ static int ccp_get_msix_irqs(struct ccp_device *ccp)
 			goto e_irq;
 		}
 	}
+	ccp->use_tasklet = true;
 
 	return 0;
 
@@ -100,6 +101,7 @@ static int ccp_get_msi_irq(struct ccp_device *ccp)
 		dev_notice(dev, "unable to allocate MSI IRQ (%d)\n", ret);
 		goto e_msi;
 	}
+	ccp->use_tasklet = true;
 
 	return 0;
 
diff --git a/drivers/crypto/msm/qcrypto.c b/drivers/crypto/msm/qcrypto.c
index f184ee1..ff64631 100644
--- a/drivers/crypto/msm/qcrypto.c
+++ b/drivers/crypto/msm/qcrypto.c
@@ -583,44 +583,92 @@ static void _words_to_byte_stream(uint32_t *iv, unsigned char *b,
 static void qcrypto_ce_set_bus(struct crypto_engine *pengine,
 				 bool high_bw_req)
 {
+	struct crypto_priv *cp = pengine->pcp;
+	unsigned int control_flag;
 	int ret = 0;
 
-	if (high_bw_req) {
+	if (cp->ce_support.req_bw_before_clk) {
+		if (high_bw_req)
+			control_flag = QCE_BW_REQUEST_FIRST;
+		else
+			control_flag = QCE_CLK_DISABLE_FIRST;
+	} else {
+		if (high_bw_req)
+			control_flag = QCE_CLK_ENABLE_FIRST;
+		else
+			control_flag = QCE_BW_REQUEST_RESET_FIRST;
+	}
+
+	switch (control_flag) {
+	case QCE_CLK_ENABLE_FIRST:
 		ret = qce_enable_clk(pengine->qce);
 		if (ret) {
 			pr_err("%s Unable enable clk\n", __func__);
-			goto clk_err;
+			return;
 		}
 		ret = msm_bus_scale_client_update_request(
 				pengine->bus_scale_handle, 1);
 		if (ret) {
-			pr_err("%s Unable to set to high bandwidth\n",
-						__func__);
-			qce_disable_clk(pengine->qce);
-			goto clk_err;
+			pr_err("%s Unable to set high bw\n", __func__);
+			ret = qce_disable_clk(pengine->qce);
+			if (ret)
+				pr_err("%s Unable disable clk\n", __func__);
+			return;
 		}
-	} else {
+		break;
+	case QCE_BW_REQUEST_FIRST:
+		ret = msm_bus_scale_client_update_request(
+				pengine->bus_scale_handle, 1);
+		if (ret) {
+			pr_err("%s Unable to set high bw\n", __func__);
+			return;
+		}
+		ret = qce_enable_clk(pengine->qce);
+		if (ret) {
+			pr_err("%s Unable enable clk\n", __func__);
+			ret = msm_bus_scale_client_update_request(
+				pengine->bus_scale_handle, 0);
+			if (ret)
+				pr_err("%s Unable to set low bw\n", __func__);
+			return;
+		}
+		break;
+	case QCE_CLK_DISABLE_FIRST:
+		ret = qce_disable_clk(pengine->qce);
+		if (ret) {
+			pr_err("%s Unable to disable clk\n", __func__);
+			return;
+		}
 		ret = msm_bus_scale_client_update_request(
 				pengine->bus_scale_handle, 0);
 		if (ret) {
-			pr_err("%s Unable to set to low bandwidth\n",
-						__func__);
-			goto clk_err;
+			pr_err("%s Unable to set low bw\n", __func__);
+			ret = qce_enable_clk(pengine->qce);
+			if (ret)
+				pr_err("%s Unable enable clk\n", __func__);
+			return;
+		}
+		break;
+	case QCE_BW_REQUEST_RESET_FIRST:
+		ret = msm_bus_scale_client_update_request(
+				pengine->bus_scale_handle, 0);
+		if (ret) {
+			pr_err("%s Unable to set low bw\n", __func__);
+			return;
 		}
 		ret = qce_disable_clk(pengine->qce);
 		if (ret) {
-			pr_err("%s Unable disable clk\n", __func__);
+			pr_err("%s Unable to disable clk\n", __func__);
 			ret = msm_bus_scale_client_update_request(
 				pengine->bus_scale_handle, 1);
 			if (ret)
-				pr_err("%s Unable to set to high bandwidth\n",
-						__func__);
-			goto clk_err;
+				pr_err("%s Unable to set high bw\n", __func__);
+			return;
 		}
+		break;
+	default:
+		return;
 	}
-clk_err:
-	return;
-
 }
 
 static void qcrypto_bw_reaper_timer_callback(unsigned long data)
@@ -4856,12 +4904,36 @@ static int  _qcrypto_probe(struct platform_device *pdev)
 	if (!pengine)
 		return -ENOMEM;
 
-	/* open qce */
+	cp->platform_support.bus_scale_table = (struct msm_bus_scale_pdata *)
+					msm_bus_cl_get_pdata(pdev);
+	if (!cp->platform_support.bus_scale_table) {
+		dev_err(&pdev->dev, "bus_scale_table is NULL\n");
+		pengine->bw_state = BUS_HAS_BANDWIDTH;
+	} else {
+		pengine->bus_scale_handle = msm_bus_scale_register_client(
+				(struct msm_bus_scale_pdata *)
+				cp->platform_support.bus_scale_table);
+		if (!pengine->bus_scale_handle) {
+			dev_err(&pdev->dev, "failed to get bus scale handle\n");
+			rc = -ENOMEM;
+			goto exit_kzfree;
+		}
+		pengine->bw_state = BUS_NO_BANDWIDTH;
+	}
+	rc = msm_bus_scale_client_update_request(pengine->bus_scale_handle, 1);
+	if (rc) {
+		dev_err(&pdev->dev, "failed to set high bandwidth\n");
+		goto exit_kzfree;
+	}
 	handle = qce_open(pdev, &rc);
 	if (handle == NULL) {
-		kzfree(pengine);
-		platform_set_drvdata(pdev, NULL);
-		return rc;
+		rc = -ENODEV;
+		goto exit_free_pdata;
+	}
+	rc = msm_bus_scale_client_update_request(pengine->bus_scale_handle, 0);
+	if (rc) {
+		dev_err(&pdev->dev, "failed to set low bandwidth\n");
+		goto exit_qce_close;
 	}
 
 	platform_set_drvdata(pdev, pengine);
@@ -4903,7 +4975,7 @@ static int  _qcrypto_probe(struct platform_device *pdev)
 			pengine->max_req, GFP_KERNEL);
 	if (pqcrypto_req_control == NULL) {
 		rc = -ENOMEM;
-		goto err;
+		goto exit_unlock_mutex;
 	}
 	qcrypto_init_req_control(pengine, pqcrypto_req_control);
 	if (cp->ce_support.bam)	 {
@@ -4911,15 +4983,7 @@ static int  _qcrypto_probe(struct platform_device *pdev)
 		cp->platform_support.shared_ce_resource = 0;
 		cp->platform_support.hw_key_support = cp->ce_support.hw_key;
 		cp->platform_support.sha_hmac = 1;
-
-		cp->platform_support.bus_scale_table =
-			(struct msm_bus_scale_pdata *)
-					msm_bus_cl_get_pdata(pdev);
-		if (!cp->platform_support.bus_scale_table)
-			pr_warn("bus_scale_table is NULL\n");
-
 		pengine->ce_device = cp->ce_support.ce_device;
-
 	} else {
 		platform_support =
 			(struct msm_ce_hw_support *)pdev->dev.platform_data;
@@ -4928,33 +4992,11 @@ static int  _qcrypto_probe(struct platform_device *pdev)
 				platform_support->shared_ce_resource;
 		cp->platform_support.hw_key_support =
 				platform_support->hw_key_support;
-		cp->platform_support.bus_scale_table =
-				platform_support->bus_scale_table;
 		cp->platform_support.sha_hmac = platform_support->sha_hmac;
 	}
 
-	pengine->bus_scale_handle = 0;
-
-	if (cp->platform_support.bus_scale_table != NULL) {
-		pengine->bus_scale_handle =
-			msm_bus_scale_register_client(
-				(struct msm_bus_scale_pdata *)
-					cp->platform_support.bus_scale_table);
-		if (!pengine->bus_scale_handle) {
-			pr_err("%s not able to get bus scale\n",
-				__func__);
-			rc =  -ENOMEM;
-			goto err;
-		}
-		pengine->bw_state = BUS_NO_BANDWIDTH;
-	} else {
-		pengine->bw_state = BUS_HAS_BANDWIDTH;
-	}
-
-	if (cp->total_units != 1) {
-		mutex_unlock(&cp->engine_lock);
-		return 0;
-	}
+	if (cp->total_units != 1)
+		goto exit_unlock_mutex;
 
 	/* register crypto cipher algorithms the device supports */
 	for (i = 0; i < ARRAY_SIZE(_qcrypto_ablk_cipher_algos); i++) {
@@ -5243,13 +5285,19 @@ static int  _qcrypto_probe(struct platform_device *pdev)
 	}
 	mutex_unlock(&cp->engine_lock);
 
-
 	return 0;
 err:
 	_qcrypto_remove_engine(pengine);
+	kzfree(pqcrypto_req_control);
+exit_unlock_mutex:
 	mutex_unlock(&cp->engine_lock);
+exit_qce_close:
 	if (pengine->qce)
 		qce_close(pengine->qce);
+exit_free_pdata:
+	msm_bus_scale_client_update_request(pengine->bus_scale_handle, 0);
+	platform_set_drvdata(pdev, NULL);
+exit_kzfree:
 	kzfree(pengine);
 	return rc;
 };
diff --git a/drivers/dax/dax.c b/drivers/dax/dax.c
index 1932248..586f954 100644
--- a/drivers/dax/dax.c
+++ b/drivers/dax/dax.c
@@ -553,13 +553,10 @@ static void dax_dev_release(struct device *dev)
 	kfree(dax_dev);
 }
 
-static void unregister_dax_dev(void *dev)
+static void kill_dax_dev(struct dax_dev *dax_dev)
 {
-	struct dax_dev *dax_dev = to_dax_dev(dev);
 	struct cdev *cdev = &dax_dev->cdev;
 
-	dev_dbg(dev, "%s\n", __func__);
-
 	/*
 	 * Note, rcu is not protecting the liveness of dax_dev, rcu is
 	 * ensuring that any fault handlers that might have seen
@@ -571,6 +568,15 @@ static void unregister_dax_dev(void *dev)
 	synchronize_srcu(&dax_srcu);
 	unmap_mapping_range(dax_dev->inode->i_mapping, 0, 0, 1);
 	cdev_del(cdev);
+}
+
+static void unregister_dax_dev(void *dev)
+{
+	struct dax_dev *dax_dev = to_dax_dev(dev);
+
+	dev_dbg(dev, "%s\n", __func__);
+
+	kill_dax_dev(dax_dev);
 	device_unregister(dev);
 }
 
@@ -647,6 +653,7 @@ struct dax_dev *devm_create_dax_dev(struct dax_region *dax_region,
 	dev_set_name(dev, "dax%d.%d", dax_region->id, dax_dev->id);
 	rc = device_add(dev);
 	if (rc) {
+		kill_dax_dev(dax_dev);
 		put_device(dev);
 		return ERR_PTR(rc);
 	}
diff --git a/drivers/devfreq/governor_memlat.h b/drivers/devfreq/governor_memlat.h
index a0e52a0..8c533ee 100644
--- a/drivers/devfreq/governor_memlat.h
+++ b/drivers/devfreq/governor_memlat.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -11,8 +11,8 @@
  * GNU General Public License for more details.
  */
 
-#ifndef _GOVERNOR_BW_HWMON_H
-#define _GOVERNOR_BW_HWMON_H
+#ifndef _GOVERNOR_MEMLAT_H
+#define _GOVERNOR_MEMLAT_H
 
 #include <linux/kernel.h>
 #include <linux/devfreq.h>
diff --git a/drivers/dma-buf/fence.c b/drivers/dma-buf/fence.c
index 2453e07..094548b 100644
--- a/drivers/dma-buf/fence.c
+++ b/drivers/dma-buf/fence.c
@@ -68,6 +68,8 @@ int fence_signal_locked(struct fence *fence)
 	struct fence_cb *cur, *tmp;
 	int ret = 0;
 
+	lockdep_assert_held(fence->lock);
+
 	if (WARN_ON(!fence))
 		return -EINVAL;
 
@@ -159,9 +161,6 @@ fence_wait_timeout(struct fence *fence, bool intr, signed long timeout)
 	if (WARN_ON(timeout < 0))
 		return -EINVAL;
 
-	if (timeout == 0)
-		return fence_is_signaled(fence);
-
 	trace_fence_wait_start(fence);
 	ret = fence->ops->wait(fence, intr, timeout);
 	trace_fence_wait_end(fence);
diff --git a/drivers/dma-buf/reservation.c b/drivers/dma-buf/reservation.c
index 723d8af..82f35a4 100644
--- a/drivers/dma-buf/reservation.c
+++ b/drivers/dma-buf/reservation.c
@@ -280,18 +280,24 @@ int reservation_object_get_fences_rcu(struct reservation_object *obj,
 				      unsigned *pshared_count,
 				      struct fence ***pshared)
 {
-	unsigned shared_count = 0;
-	unsigned retry = 1;
-	struct fence **shared = NULL, *fence_excl = NULL;
-	int ret = 0;
+	struct fence **shared = NULL;
+	struct fence *fence_excl;
+	unsigned int shared_count;
+	int ret = 1;
 
-	while (retry) {
+	do {
 		struct reservation_object_list *fobj;
 		unsigned seq;
+		unsigned int i;
 
-		seq = read_seqcount_begin(&obj->seq);
+		shared_count = i = 0;
 
 		rcu_read_lock();
+		seq = read_seqcount_begin(&obj->seq);
+
+		fence_excl = rcu_dereference(obj->fence_excl);
+		if (fence_excl && !fence_get_rcu(fence_excl))
+			goto unlock;
 
 		fobj = rcu_dereference(obj->fence);
 		if (fobj) {
@@ -309,52 +315,37 @@ int reservation_object_get_fences_rcu(struct reservation_object *obj,
 				}
 
 				ret = -ENOMEM;
-				shared_count = 0;
 				break;
 			}
 			shared = nshared;
-			memcpy(shared, fobj->shared, sz);
 			shared_count = fobj->shared_count;
-		} else
-			shared_count = 0;
-		fence_excl = rcu_dereference(obj->fence_excl);
-
-		retry = read_seqcount_retry(&obj->seq, seq);
-		if (retry)
-			goto unlock;
-
-		if (!fence_excl || fence_get_rcu(fence_excl)) {
-			unsigned i;
 
 			for (i = 0; i < shared_count; ++i) {
-				if (fence_get_rcu(shared[i]))
-					continue;
-
-				/* uh oh, refcount failed, abort and retry */
-				while (i--)
-					fence_put(shared[i]);
-
-				if (fence_excl) {
-					fence_put(fence_excl);
-					fence_excl = NULL;
-				}
-
-				retry = 1;
-				break;
+				shared[i] = rcu_dereference(fobj->shared[i]);
+				if (!fence_get_rcu(shared[i]))
+					break;
 			}
-		} else
-			retry = 1;
+		}
 
+		if (i != shared_count || read_seqcount_retry(&obj->seq, seq)) {
+			while (i--)
+				fence_put(shared[i]);
+			fence_put(fence_excl);
+			goto unlock;
+		}
+
+		ret = 0;
 unlock:
 		rcu_read_unlock();
-	}
-	*pshared_count = shared_count;
-	if (shared_count)
-		*pshared = shared;
-	else {
-		*pshared = NULL;
+	} while (ret);
+
+	if (!shared_count) {
 		kfree(shared);
+		shared = NULL;
 	}
+
+	*pshared_count = shared_count;
+	*pshared = shared;
 	*pfence_excl = fence_excl;
 
 	return ret;
@@ -379,10 +370,7 @@ long reservation_object_wait_timeout_rcu(struct reservation_object *obj,
 {
 	struct fence *fence;
 	unsigned seq, shared_count, i = 0;
-	long ret = timeout;
-
-	if (!timeout)
-		return reservation_object_test_signaled_rcu(obj, wait_all);
+	long ret = timeout ? timeout : 1;
 
 retry:
 	fence = NULL;
@@ -397,9 +385,6 @@ long reservation_object_wait_timeout_rcu(struct reservation_object *obj,
 		if (fobj)
 			shared_count = fobj->shared_count;
 
-		if (read_seqcount_retry(&obj->seq, seq))
-			goto unlock_retry;
-
 		for (i = 0; i < shared_count; ++i) {
 			struct fence *lfence = rcu_dereference(fobj->shared[i]);
 
@@ -422,9 +407,6 @@ long reservation_object_wait_timeout_rcu(struct reservation_object *obj,
 	if (!shared_count) {
 		struct fence *fence_excl = rcu_dereference(obj->fence_excl);
 
-		if (read_seqcount_retry(&obj->seq, seq))
-			goto unlock_retry;
-
 		if (fence_excl &&
 		    !test_bit(FENCE_FLAG_SIGNALED_BIT, &fence_excl->flags)) {
 			if (!fence_get_rcu(fence_excl))
@@ -439,6 +421,11 @@ long reservation_object_wait_timeout_rcu(struct reservation_object *obj,
 
 	rcu_read_unlock();
 	if (fence) {
+		if (read_seqcount_retry(&obj->seq, seq)) {
+			fence_put(fence);
+			goto retry;
+		}
+
 		ret = fence_wait_timeout(fence, intr, ret);
 		fence_put(fence);
 		if (ret > 0 && wait_all && (i + 1 < shared_count))
@@ -484,12 +471,13 @@ bool reservation_object_test_signaled_rcu(struct reservation_object *obj,
 					  bool test_all)
 {
 	unsigned seq, shared_count;
-	int ret = true;
+	int ret;
 
+	rcu_read_lock();
 retry:
+	ret = true;
 	shared_count = 0;
 	seq = read_seqcount_begin(&obj->seq);
-	rcu_read_lock();
 
 	if (test_all) {
 		unsigned i;
@@ -500,46 +488,35 @@ bool reservation_object_test_signaled_rcu(struct reservation_object *obj,
 		if (fobj)
 			shared_count = fobj->shared_count;
 
-		if (read_seqcount_retry(&obj->seq, seq))
-			goto unlock_retry;
-
 		for (i = 0; i < shared_count; ++i) {
 			struct fence *fence = rcu_dereference(fobj->shared[i]);
 
 			ret = reservation_object_test_signaled_single(fence);
 			if (ret < 0)
-				goto unlock_retry;
+				goto retry;
 			else if (!ret)
 				break;
 		}
 
-		/*
-		 * There could be a read_seqcount_retry here, but nothing cares
-		 * about whether it's the old or newer fence pointers that are
-		 * signaled. That race could still have happened after checking
-		 * read_seqcount_retry. If you care, use ww_mutex_lock.
-		 */
+		if (read_seqcount_retry(&obj->seq, seq))
+			goto retry;
 	}
 
 	if (!shared_count) {
 		struct fence *fence_excl = rcu_dereference(obj->fence_excl);
 
-		if (read_seqcount_retry(&obj->seq, seq))
-			goto unlock_retry;
-
 		if (fence_excl) {
 			ret = reservation_object_test_signaled_single(
 								fence_excl);
 			if (ret < 0)
-				goto unlock_retry;
+				goto retry;
+
+			if (read_seqcount_retry(&obj->seq, seq))
+				goto retry;
 		}
 	}
 
 	rcu_read_unlock();
 	return ret;
-
-unlock_retry:
-	rcu_read_unlock();
-	goto retry;
 }
 EXPORT_SYMBOL_GPL(reservation_object_test_signaled_rcu);
diff --git a/drivers/dma-buf/sw_sync.c b/drivers/dma-buf/sw_sync.c
index 454d3b3..0cb8d9d 100644
--- a/drivers/dma-buf/sw_sync.c
+++ b/drivers/dma-buf/sw_sync.c
@@ -324,8 +324,8 @@ static long sw_sync_ioctl_create_fence(struct sync_timeline *obj,
 	}
 
 	sync_file = sync_file_create(&pt->base);
+	fence_put(&pt->base);
 	if (!sync_file) {
-		fence_put(&pt->base);
 		err = -ENOMEM;
 		goto err;
 	}
diff --git a/drivers/dma-buf/sync_file.c b/drivers/dma-buf/sync_file.c
index 8a9cf92..5a9166a 100644
--- a/drivers/dma-buf/sync_file.c
+++ b/drivers/dma-buf/sync_file.c
@@ -67,9 +67,10 @@ static void fence_check_cb_func(struct fence *f, struct fence_cb *cb)
  * sync_file_create() - creates a sync file
  * @fence:	fence to add to the sync_fence
  *
- * Creates a sync_file containg @fence. Once this is called, the sync_file
- * takes ownership of @fence. The sync_file can be released with
- * fput(sync_file->file). Returns the sync_file or NULL in case of error.
+ * Creates a sync_file containg @fence. This function acquires and additional
+ * reference of @fence for the newly-created &sync_file, if it succeeds. The
+ * sync_file can be released with fput(sync_file->file). Returns the
+ * sync_file or NULL in case of error.
  */
 struct sync_file *sync_file_create(struct fence *fence)
 {
@@ -79,7 +80,7 @@ struct sync_file *sync_file_create(struct fence *fence)
 	if (!sync_file)
 		return NULL;
 
-	sync_file->fence = fence;
+	sync_file->fence = fence_get(fence);
 
 	snprintf(sync_file->name, sizeof(sync_file->name), "%s-%s%llu-%d",
 		 fence->ops->get_driver_name(fence),
@@ -90,13 +91,6 @@ struct sync_file *sync_file_create(struct fence *fence)
 }
 EXPORT_SYMBOL(sync_file_create);
 
-/**
- * sync_file_fdget() - get a sync_file from an fd
- * @fd:		fd referencing a fence
- *
- * Ensures @fd references a valid sync_file, increments the refcount of the
- * backing file. Returns the sync_file or NULL in case of error.
- */
 static struct sync_file *sync_file_fdget(int fd)
 {
 	struct file *file = fget(fd);
@@ -311,10 +305,9 @@ static unsigned int sync_file_poll(struct file *file, poll_table *wait)
 
 	poll_wait(file, &sync_file->wq, wait);
 
-	if (!poll_does_not_wait(wait) &&
-	    !test_and_set_bit(POLL_ENABLED, &sync_file->fence->flags)) {
+	if (!test_and_set_bit(POLL_ENABLED, &sync_file->fence->flags)) {
 		if (fence_add_callback(sync_file->fence, &sync_file->cb,
-				       fence_check_cb_func) < 0)
+					   fence_check_cb_func) < 0)
 			wake_up_all(&sync_file->wq);
 	}
 
diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
index b98ede7..6f9c9ac 100644
--- a/drivers/gpio/gpio-omap.c
+++ b/drivers/gpio/gpio-omap.c
@@ -208,9 +208,11 @@ static inline void omap_gpio_dbck_disable(struct gpio_bank *bank)
  * OMAP's debounce time is in 31us steps
  *   <debounce time> = (GPIO_DEBOUNCINGTIME[7:0].DEBOUNCETIME + 1) x 31
  * so we need to convert and round up to the closest unit.
+ *
+ * Return: 0 on success, negative error otherwise.
  */
-static void omap2_set_gpio_debounce(struct gpio_bank *bank, unsigned offset,
-				    unsigned debounce)
+static int omap2_set_gpio_debounce(struct gpio_bank *bank, unsigned offset,
+				   unsigned debounce)
 {
 	void __iomem		*reg;
 	u32			val;
@@ -218,11 +220,12 @@ static void omap2_set_gpio_debounce(struct gpio_bank *bank, unsigned offset,
 	bool			enable = !!debounce;
 
 	if (!bank->dbck_flag)
-		return;
+		return -ENOTSUPP;
 
 	if (enable) {
 		debounce = DIV_ROUND_UP(debounce, 31) - 1;
-		debounce &= OMAP4_GPIO_DEBOUNCINGTIME_MASK;
+		if ((debounce & OMAP4_GPIO_DEBOUNCINGTIME_MASK) != debounce)
+			return -EINVAL;
 	}
 
 	l = BIT(offset);
@@ -255,6 +258,8 @@ static void omap2_set_gpio_debounce(struct gpio_bank *bank, unsigned offset,
 		bank->context.debounce = debounce;
 		bank->context.debounce_en = val;
 	}
+
+	return 0;
 }
 
 /**
@@ -964,14 +969,20 @@ static int omap_gpio_debounce(struct gpio_chip *chip, unsigned offset,
 {
 	struct gpio_bank *bank;
 	unsigned long flags;
+	int ret;
 
 	bank = gpiochip_get_data(chip);
 
 	raw_spin_lock_irqsave(&bank->lock, flags);
-	omap2_set_gpio_debounce(bank, offset, debounce);
+	ret = omap2_set_gpio_debounce(bank, offset, debounce);
 	raw_spin_unlock_irqrestore(&bank->lock, flags);
 
-	return 0;
+	if (ret)
+		dev_info(chip->parent,
+			 "Could not set line %u debounce to %u microseconds (%d)",
+			 offset, debounce, ret);
+
+	return ret;
 }
 
 static void omap_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 6898aa0..2f936a7 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -13,6 +13,7 @@
 	select I2C
 	select I2C_ALGOBIT
 	select DMA_SHARED_BUFFER
+	select SYNC_FILE
 	help
 	  Kernel-level support for the Direct Rendering Infrastructure (DRI)
 	  introduced in XFree86 4.0. If you say Y here, you need to select
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index 882404c..42448c7 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -1173,23 +1173,10 @@ static u32 dce_v10_0_latency_watermark(struct dce10_wm_params *wm)
 	a.full = dfixed_const(available_bandwidth);
 	b.full = dfixed_const(wm->num_heads);
 	a.full = dfixed_div(a, b);
+	tmp = div_u64((u64) dmif_size * (u64) wm->disp_clk, mc_latency + 512);
+	tmp = min(dfixed_trunc(a), tmp);
 
-	b.full = dfixed_const(mc_latency + 512);
-	c.full = dfixed_const(wm->disp_clk);
-	b.full = dfixed_div(b, c);
-
-	c.full = dfixed_const(dmif_size);
-	b.full = dfixed_div(c, b);
-
-	tmp = min(dfixed_trunc(a), dfixed_trunc(b));
-
-	b.full = dfixed_const(1000);
-	c.full = dfixed_const(wm->disp_clk);
-	b.full = dfixed_div(c, b);
-	c.full = dfixed_const(wm->bytes_per_pixel);
-	b.full = dfixed_mul(b, c);
-
-	lb_fill_bw = min(tmp, dfixed_trunc(b));
+	lb_fill_bw = min(tmp, wm->disp_clk * wm->bytes_per_pixel / 1000);
 
 	a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
 	b.full = dfixed_const(1000);
@@ -1297,14 +1284,14 @@ static void dce_v10_0_program_watermarks(struct amdgpu_device *adev,
 {
 	struct drm_display_mode *mode = &amdgpu_crtc->base.mode;
 	struct dce10_wm_params wm_low, wm_high;
-	u32 pixel_period;
+	u32 active_time;
 	u32 line_time = 0;
 	u32 latency_watermark_a = 0, latency_watermark_b = 0;
 	u32 tmp, wm_mask, lb_vblank_lead_lines = 0;
 
 	if (amdgpu_crtc->base.enabled && num_heads && mode) {
-		pixel_period = 1000000 / (u32)mode->clock;
-		line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535);
+		active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock;
+		line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535);
 
 		/* watermark for high clocks */
 		if (adev->pm.dpm_enabled) {
@@ -1319,7 +1306,7 @@ static void dce_v10_0_program_watermarks(struct amdgpu_device *adev,
 
 		wm_high.disp_clk = mode->clock;
 		wm_high.src_width = mode->crtc_hdisplay;
-		wm_high.active_time = mode->crtc_hdisplay * pixel_period;
+		wm_high.active_time = active_time;
 		wm_high.blank_time = line_time - wm_high.active_time;
 		wm_high.interlaced = false;
 		if (mode->flags & DRM_MODE_FLAG_INTERLACE)
@@ -1358,7 +1345,7 @@ static void dce_v10_0_program_watermarks(struct amdgpu_device *adev,
 
 		wm_low.disp_clk = mode->clock;
 		wm_low.src_width = mode->crtc_hdisplay;
-		wm_low.active_time = mode->crtc_hdisplay * pixel_period;
+		wm_low.active_time = active_time;
 		wm_low.blank_time = line_time - wm_low.active_time;
 		wm_low.interlaced = false;
 		if (mode->flags & DRM_MODE_FLAG_INTERLACE)
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index 64a1df6..904dabd 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -1140,23 +1140,10 @@ static u32 dce_v11_0_latency_watermark(struct dce10_wm_params *wm)
 	a.full = dfixed_const(available_bandwidth);
 	b.full = dfixed_const(wm->num_heads);
 	a.full = dfixed_div(a, b);
+	tmp = div_u64((u64) dmif_size * (u64) wm->disp_clk, mc_latency + 512);
+	tmp = min(dfixed_trunc(a), tmp);
 
-	b.full = dfixed_const(mc_latency + 512);
-	c.full = dfixed_const(wm->disp_clk);
-	b.full = dfixed_div(b, c);
-
-	c.full = dfixed_const(dmif_size);
-	b.full = dfixed_div(c, b);
-
-	tmp = min(dfixed_trunc(a), dfixed_trunc(b));
-
-	b.full = dfixed_const(1000);
-	c.full = dfixed_const(wm->disp_clk);
-	b.full = dfixed_div(c, b);
-	c.full = dfixed_const(wm->bytes_per_pixel);
-	b.full = dfixed_mul(b, c);
-
-	lb_fill_bw = min(tmp, dfixed_trunc(b));
+	lb_fill_bw = min(tmp, wm->disp_clk * wm->bytes_per_pixel / 1000);
 
 	a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
 	b.full = dfixed_const(1000);
@@ -1264,14 +1251,14 @@ static void dce_v11_0_program_watermarks(struct amdgpu_device *adev,
 {
 	struct drm_display_mode *mode = &amdgpu_crtc->base.mode;
 	struct dce10_wm_params wm_low, wm_high;
-	u32 pixel_period;
+	u32 active_time;
 	u32 line_time = 0;
 	u32 latency_watermark_a = 0, latency_watermark_b = 0;
 	u32 tmp, wm_mask, lb_vblank_lead_lines = 0;
 
 	if (amdgpu_crtc->base.enabled && num_heads && mode) {
-		pixel_period = 1000000 / (u32)mode->clock;
-		line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535);
+		active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock;
+		line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535);
 
 		/* watermark for high clocks */
 		if (adev->pm.dpm_enabled) {
@@ -1286,7 +1273,7 @@ static void dce_v11_0_program_watermarks(struct amdgpu_device *adev,
 
 		wm_high.disp_clk = mode->clock;
 		wm_high.src_width = mode->crtc_hdisplay;
-		wm_high.active_time = mode->crtc_hdisplay * pixel_period;
+		wm_high.active_time = active_time;
 		wm_high.blank_time = line_time - wm_high.active_time;
 		wm_high.interlaced = false;
 		if (mode->flags & DRM_MODE_FLAG_INTERLACE)
@@ -1325,7 +1312,7 @@ static void dce_v11_0_program_watermarks(struct amdgpu_device *adev,
 
 		wm_low.disp_clk = mode->clock;
 		wm_low.src_width = mode->crtc_hdisplay;
-		wm_low.active_time = mode->crtc_hdisplay * pixel_period;
+		wm_low.active_time = active_time;
 		wm_low.blank_time = line_time - wm_low.active_time;
 		wm_low.interlaced = false;
 		if (mode->flags & DRM_MODE_FLAG_INTERLACE)
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
index fde6ee1..6d02bdb 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
@@ -932,23 +932,10 @@ static u32 dce_v6_0_latency_watermark(struct dce6_wm_params *wm)
 	a.full = dfixed_const(available_bandwidth);
 	b.full = dfixed_const(wm->num_heads);
 	a.full = dfixed_div(a, b);
+	tmp = div_u64((u64) dmif_size * (u64) wm->disp_clk, mc_latency + 512);
+	tmp = min(dfixed_trunc(a), tmp);
 
-	b.full = dfixed_const(mc_latency + 512);
-	c.full = dfixed_const(wm->disp_clk);
-	b.full = dfixed_div(b, c);
-
-	c.full = dfixed_const(dmif_size);
-	b.full = dfixed_div(c, b);
-
-	tmp = min(dfixed_trunc(a), dfixed_trunc(b));
-
-	b.full = dfixed_const(1000);
-	c.full = dfixed_const(wm->disp_clk);
-	b.full = dfixed_div(c, b);
-	c.full = dfixed_const(wm->bytes_per_pixel);
-	b.full = dfixed_mul(b, c);
-
-	lb_fill_bw = min(tmp, dfixed_trunc(b));
+	lb_fill_bw = min(tmp, wm->disp_clk * wm->bytes_per_pixel / 1000);
 
 	a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
 	b.full = dfixed_const(1000);
@@ -1057,18 +1044,18 @@ static void dce_v6_0_program_watermarks(struct amdgpu_device *adev,
 	struct drm_display_mode *mode = &amdgpu_crtc->base.mode;
 	struct dce6_wm_params wm_low, wm_high;
 	u32 dram_channels;
-	u32 pixel_period;
+	u32 active_time;
 	u32 line_time = 0;
 	u32 latency_watermark_a = 0, latency_watermark_b = 0;
 	u32 priority_a_mark = 0, priority_b_mark = 0;
 	u32 priority_a_cnt = PRIORITY_OFF;
 	u32 priority_b_cnt = PRIORITY_OFF;
-	u32 tmp, arb_control3;
+	u32 tmp, arb_control3, lb_vblank_lead_lines = 0;
 	fixed20_12 a, b, c;
 
 	if (amdgpu_crtc->base.enabled && num_heads && mode) {
-		pixel_period = 1000000 / (u32)mode->clock;
-		line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535);
+		active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock;
+		line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535);
 		priority_a_cnt = 0;
 		priority_b_cnt = 0;
 
@@ -1087,7 +1074,7 @@ static void dce_v6_0_program_watermarks(struct amdgpu_device *adev,
 
 		wm_high.disp_clk = mode->clock;
 		wm_high.src_width = mode->crtc_hdisplay;
-		wm_high.active_time = mode->crtc_hdisplay * pixel_period;
+		wm_high.active_time = active_time;
 		wm_high.blank_time = line_time - wm_high.active_time;
 		wm_high.interlaced = false;
 		if (mode->flags & DRM_MODE_FLAG_INTERLACE)
@@ -1114,7 +1101,7 @@ static void dce_v6_0_program_watermarks(struct amdgpu_device *adev,
 
 		wm_low.disp_clk = mode->clock;
 		wm_low.src_width = mode->crtc_hdisplay;
-		wm_low.active_time = mode->crtc_hdisplay * pixel_period;
+		wm_low.active_time = active_time;
 		wm_low.blank_time = line_time - wm_low.active_time;
 		wm_low.interlaced = false;
 		if (mode->flags & DRM_MODE_FLAG_INTERLACE)
@@ -1175,6 +1162,8 @@ static void dce_v6_0_program_watermarks(struct amdgpu_device *adev,
 		c.full = dfixed_div(c, a);
 		priority_b_mark = dfixed_trunc(c);
 		priority_b_cnt |= priority_b_mark & PRIORITY_MARK_MASK;
+
+		lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode->crtc_hdisplay);
 	}
 
 	/* select wm A */
@@ -1204,6 +1193,9 @@ static void dce_v6_0_program_watermarks(struct amdgpu_device *adev,
 	/* save values for DPM */
 	amdgpu_crtc->line_time = line_time;
 	amdgpu_crtc->wm_high = latency_watermark_a;
+
+	/* Save number of lines the linebuffer leads before the scanout */
+	amdgpu_crtc->lb_vblank_lead_lines = lb_vblank_lead_lines;
 }
 
 /* watermark setup */
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index 7d9ffde..b1fb601 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -1094,23 +1094,10 @@ static u32 dce_v8_0_latency_watermark(struct dce8_wm_params *wm)
 	a.full = dfixed_const(available_bandwidth);
 	b.full = dfixed_const(wm->num_heads);
 	a.full = dfixed_div(a, b);
+	tmp = div_u64((u64) dmif_size * (u64) wm->disp_clk, mc_latency + 512);
+	tmp = min(dfixed_trunc(a), tmp);
 
-	b.full = dfixed_const(mc_latency + 512);
-	c.full = dfixed_const(wm->disp_clk);
-	b.full = dfixed_div(b, c);
-
-	c.full = dfixed_const(dmif_size);
-	b.full = dfixed_div(c, b);
-
-	tmp = min(dfixed_trunc(a), dfixed_trunc(b));
-
-	b.full = dfixed_const(1000);
-	c.full = dfixed_const(wm->disp_clk);
-	b.full = dfixed_div(c, b);
-	c.full = dfixed_const(wm->bytes_per_pixel);
-	b.full = dfixed_mul(b, c);
-
-	lb_fill_bw = min(tmp, dfixed_trunc(b));
+	lb_fill_bw = min(tmp, wm->disp_clk * wm->bytes_per_pixel / 1000);
 
 	a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
 	b.full = dfixed_const(1000);
@@ -1218,14 +1205,14 @@ static void dce_v8_0_program_watermarks(struct amdgpu_device *adev,
 {
 	struct drm_display_mode *mode = &amdgpu_crtc->base.mode;
 	struct dce8_wm_params wm_low, wm_high;
-	u32 pixel_period;
+	u32 active_time;
 	u32 line_time = 0;
 	u32 latency_watermark_a = 0, latency_watermark_b = 0;
 	u32 tmp, wm_mask, lb_vblank_lead_lines = 0;
 
 	if (amdgpu_crtc->base.enabled && num_heads && mode) {
-		pixel_period = 1000000 / (u32)mode->clock;
-		line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535);
+		active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock;
+		line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535);
 
 		/* watermark for high clocks */
 		if (adev->pm.dpm_enabled) {
@@ -1240,7 +1227,7 @@ static void dce_v8_0_program_watermarks(struct amdgpu_device *adev,
 
 		wm_high.disp_clk = mode->clock;
 		wm_high.src_width = mode->crtc_hdisplay;
-		wm_high.active_time = mode->crtc_hdisplay * pixel_period;
+		wm_high.active_time = active_time;
 		wm_high.blank_time = line_time - wm_high.active_time;
 		wm_high.interlaced = false;
 		if (mode->flags & DRM_MODE_FLAG_INTERLACE)
@@ -1279,7 +1266,7 @@ static void dce_v8_0_program_watermarks(struct amdgpu_device *adev,
 
 		wm_low.disp_clk = mode->clock;
 		wm_low.src_width = mode->crtc_hdisplay;
-		wm_low.active_time = mode->crtc_hdisplay * pixel_period;
+		wm_low.active_time = active_time;
 		wm_low.blank_time = line_time - wm_low.active_time;
 		wm_low.interlaced = false;
 		if (mode->flags & DRM_MODE_FLAG_INTERLACE)
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index 4e19bde..99011621 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -30,6 +30,7 @@
 #include <drm/drm_atomic.h>
 #include <drm/drm_mode.h>
 #include <drm/drm_plane_helper.h>
+#include <linux/sync_file.h>
 
 #include "drm_crtc_internal.h"
 
@@ -292,6 +293,23 @@ drm_atomic_get_crtc_state(struct drm_atomic_state *state,
 }
 EXPORT_SYMBOL(drm_atomic_get_crtc_state);
 
+static void set_out_fence_for_crtc(struct drm_atomic_state *state,
+				   struct drm_crtc *crtc, s32 __user *fence_ptr)
+{
+	state->crtcs[drm_crtc_index(crtc)].out_fence_ptr = fence_ptr;
+}
+
+static s32 __user *get_out_fence_for_crtc(struct drm_atomic_state *state,
+					  struct drm_crtc *crtc)
+{
+	s32 __user *fence_ptr;
+
+	fence_ptr = state->crtcs[drm_crtc_index(crtc)].out_fence_ptr;
+	state->crtcs[drm_crtc_index(crtc)].out_fence_ptr = NULL;
+
+	return fence_ptr;
+}
+
 /**
  * drm_atomic_set_mode_for_crtc - set mode for CRTC
  * @state: the CRTC whose incoming state to update
@@ -496,6 +514,16 @@ int drm_atomic_crtc_set_property(struct drm_crtc *crtc,
 					&replaced);
 		state->color_mgmt_changed |= replaced;
 		return ret;
+	} else if (property == config->prop_out_fence_ptr) {
+		s32 __user *fence_ptr = u64_to_user_ptr(val);
+
+		if (!fence_ptr)
+			return 0;
+
+		if (put_user(-1, fence_ptr))
+			return -EFAULT;
+
+		set_out_fence_for_crtc(state->state, crtc, fence_ptr);
 	} else if (crtc->funcs->atomic_set_property)
 		return crtc->funcs->atomic_set_property(crtc, state, property, val);
 	else
@@ -538,6 +566,8 @@ drm_atomic_crtc_get_property(struct drm_crtc *crtc,
 		*val = (state->ctm) ? state->ctm->base.id : 0;
 	else if (property == config->gamma_lut_property)
 		*val = (state->gamma_lut) ? state->gamma_lut->base.id : 0;
+	else if (property == config->prop_out_fence_ptr)
+		*val = 0;
 	else if (crtc->funcs->atomic_get_property)
 		return crtc->funcs->atomic_get_property(crtc, state, property, val);
 	else
@@ -693,6 +723,17 @@ int drm_atomic_plane_set_property(struct drm_plane *plane,
 		drm_atomic_set_fb_for_plane(state, fb);
 		if (fb)
 			drm_framebuffer_unreference(fb);
+	} else if (property == config->prop_in_fence_fd) {
+		if (state->fence)
+			return -EINVAL;
+
+		if (U642I64(val) == -1)
+			return 0;
+
+		state->fence = sync_file_get_fence(val);
+		if (!state->fence)
+			return -EINVAL;
+
 	} else if (property == config->prop_crtc_id) {
 		struct drm_crtc *crtc = drm_crtc_find(dev, val);
 		return drm_atomic_set_crtc_for_plane(state, crtc);
@@ -752,6 +793,8 @@ drm_atomic_plane_get_property(struct drm_plane *plane,
 
 	if (property == config->prop_fb_id) {
 		*val = (state->fb) ? state->fb->base.id : 0;
+	} else if (property == config->prop_in_fence_fd) {
+		*val = -1;
 	} else if (property == config->prop_crtc_id) {
 		*val = (state->crtc) ? state->crtc->base.id : 0;
 	} else if (property == config->prop_crtc_x) {
@@ -1152,6 +1195,36 @@ drm_atomic_set_fb_for_plane(struct drm_plane_state *plane_state,
 EXPORT_SYMBOL(drm_atomic_set_fb_for_plane);
 
 /**
+ * drm_atomic_set_fence_for_plane - set fence for plane
+ * @plane_state: atomic state object for the plane
+ * @fence: fence to use for the plane
+ *
+ * Helper to setup the plane_state fence in case it is not set yet.
+ * By using this drivers doesn't need to worry if the user choose
+ * implicit or explicit fencing.
+ *
+ * This function will not set the fence to the state if it was set
+ * via explicit fencing interfaces on the atomic ioctl. It will
+ * all drope the reference to the fence as we not storing it
+ * anywhere.
+ *
+ * Otherwise, if plane_state->fence is not set this function we
+ * just set it with the received implict fence.
+ */
+void
+drm_atomic_set_fence_for_plane(struct drm_plane_state *plane_state,
+			       struct fence *fence)
+{
+	if (plane_state->fence) {
+		fence_put(fence);
+		return;
+	}
+
+	plane_state->fence = fence;
+}
+EXPORT_SYMBOL(drm_atomic_set_fence_for_plane);
+
+/**
  * drm_atomic_set_crtc_for_connector - set crtc for connector
  * @conn_state: atomic state object for the connector
  * @crtc: crtc to use for the connector
@@ -1467,11 +1540,9 @@ EXPORT_SYMBOL(drm_atomic_nonblocking_commit);
  */
 
 static struct drm_pending_vblank_event *create_vblank_event(
-		struct drm_device *dev, struct drm_file *file_priv,
-		struct fence *fence, uint64_t user_data)
+		struct drm_device *dev, uint64_t user_data)
 {
 	struct drm_pending_vblank_event *e = NULL;
-	int ret;
 
 	e = kzalloc(sizeof *e, GFP_KERNEL);
 	if (!e)
@@ -1481,17 +1552,6 @@ static struct drm_pending_vblank_event *create_vblank_event(
 	e->event.base.length = sizeof(e->event);
 	e->event.user_data = user_data;
 
-	if (file_priv) {
-		ret = drm_event_reserve_init(dev, file_priv, &e->base,
-					     &e->event.base);
-		if (ret) {
-			kfree(e);
-			return NULL;
-		}
-	}
-
-	e->base.fence = fence;
-
 	return e;
 }
 
@@ -1596,6 +1656,206 @@ void drm_atomic_clean_old_fb(struct drm_device *dev,
 }
 EXPORT_SYMBOL(drm_atomic_clean_old_fb);
 
+/**
+ * DOC: explicit fencing properties
+ *
+ * Explicit fencing allows userspace to control the buffer synchronization
+ * between devices. A Fence or a group of fences are transfered to/from
+ * userspace using Sync File fds and there are two DRM properties for that.
+ * IN_FENCE_FD on each DRM Plane to send fences to the kernel and
+ * OUT_FENCE_PTR on each DRM CRTC to receive fences from the kernel.
+ *
+ * As a contrast, with implicit fencing the kernel keeps track of any
+ * ongoing rendering, and automatically ensures that the atomic update waits
+ * for any pending rendering to complete. For shared buffers represented with
+ * a struct &dma_buf this is tracked in &reservation_object structures.
+ * Implicit syncing is how Linux traditionally worked (e.g. DRI2/3 on X.org),
+ * whereas explicit fencing is what Android wants.
+ *
+ * "IN_FENCE_FD”:
+ *	Use this property to pass a fence that DRM should wait on before
+ *	proceeding with the Atomic Commit request and show the framebuffer for
+ *	the plane on the screen. The fence can be either a normal fence or a
+ *	merged one, the sync_file framework will handle both cases and use a
+ *	fence_array if a merged fence is received. Passing -1 here means no
+ *	fences to wait on.
+ *
+ *	If the Atomic Commit request has the DRM_MODE_ATOMIC_TEST_ONLY flag
+ *	it will only check if the Sync File is a valid one.
+ *
+ *	On the driver side the fence is stored on the @fence parameter of
+ *	struct &drm_plane_state. Drivers which also support implicit fencing
+ *	should set the implicit fence using drm_atomic_set_fence_for_plane(),
+ *	to make sure there's consistent behaviour between drivers in precedence
+ *	of implicit vs. explicit fencing.
+ *
+ * "OUT_FENCE_PTR”:
+ *	Use this property to pass a file descriptor pointer to DRM. Once the
+ *	Atomic Commit request call returns OUT_FENCE_PTR will be filled with
+ *	the file descriptor number of a Sync File. This Sync File contains the
+ *	CRTC fence that will be signaled when all framebuffers present on the
+ *	Atomic Commit * request for that given CRTC are scanned out on the
+ *	screen.
+ *
+ *	The Atomic Commit request fails if a invalid pointer is passed. If the
+ *	Atomic Commit request fails for any other reason the out fence fd
+ *	returned will be -1. On a Atomic Commit with the
+ *	DRM_MODE_ATOMIC_TEST_ONLY flag the out fence will also be set to -1.
+ *
+ *	Note that out-fences don't have a special interface to drivers and are
+ *	internally represented by a struct &drm_pending_vblank_event in struct
+ *	&drm_crtc_state, which is also used by the nonblocking atomic commit
+ *	helpers and for the DRM event handling for existing userspace.
+ */
+
+struct drm_out_fence_state {
+	s32 __user *out_fence_ptr;
+	struct sync_file *sync_file;
+	int fd;
+};
+
+static int setup_out_fence(struct drm_out_fence_state *fence_state,
+			   struct fence *fence)
+{
+	fence_state->fd = get_unused_fd_flags(O_CLOEXEC);
+	if (fence_state->fd < 0)
+		return fence_state->fd;
+
+	if (put_user(fence_state->fd, fence_state->out_fence_ptr))
+		return -EFAULT;
+
+	fence_state->sync_file = sync_file_create(fence);
+	if (!fence_state->sync_file)
+		return -ENOMEM;
+
+	return 0;
+}
+
+static int prepare_crtc_signaling(struct drm_device *dev,
+				  struct drm_atomic_state *state,
+				  struct drm_mode_atomic *arg,
+				  struct drm_file *file_priv,
+				  struct drm_out_fence_state **fence_state,
+				  unsigned int *num_fences)
+{
+	struct drm_crtc *crtc;
+	struct drm_crtc_state *crtc_state;
+	int i, ret;
+
+	if (arg->flags & DRM_MODE_ATOMIC_TEST_ONLY)
+		return 0;
+
+	for_each_crtc_in_state(state, crtc, crtc_state, i) {
+		s32 __user *fence_ptr;
+
+		fence_ptr = get_out_fence_for_crtc(crtc_state->state, crtc);
+
+		if (arg->flags & DRM_MODE_PAGE_FLIP_EVENT || fence_ptr) {
+			struct drm_pending_vblank_event *e;
+
+			e = create_vblank_event(dev, arg->user_data);
+			if (!e)
+				return -ENOMEM;
+
+			crtc_state->event = e;
+		}
+
+		if (arg->flags & DRM_MODE_PAGE_FLIP_EVENT) {
+			struct drm_pending_vblank_event *e = crtc_state->event;
+
+			if (!file_priv)
+				continue;
+
+			ret = drm_event_reserve_init(dev, file_priv, &e->base,
+						     &e->event.base);
+			if (ret) {
+				kfree(e);
+				crtc_state->event = NULL;
+				return ret;
+			}
+		}
+
+		if (fence_ptr) {
+			struct fence *fence;
+			struct drm_out_fence_state *f;
+
+			f = krealloc(*fence_state, sizeof(**fence_state) *
+				     (*num_fences + 1), GFP_KERNEL);
+			if (!f)
+				return -ENOMEM;
+
+			memset(&f[*num_fences], 0, sizeof(*f));
+
+			f[*num_fences].out_fence_ptr = fence_ptr;
+			*fence_state = f;
+
+			fence = drm_crtc_create_fence(crtc);
+			if (!fence)
+				return -ENOMEM;
+
+			ret = setup_out_fence(&f[(*num_fences)++], fence);
+			if (ret) {
+				fence_put(fence);
+				return ret;
+			}
+
+			crtc_state->event->base.fence = fence;
+		}
+	}
+
+	return 0;
+}
+
+static void complete_crtc_signaling(struct drm_device *dev,
+				    struct drm_atomic_state *state,
+				    struct drm_out_fence_state *fence_state,
+				    unsigned int num_fences,
+				    bool install_fds)
+{
+	struct drm_crtc *crtc;
+	struct drm_crtc_state *crtc_state;
+	int i;
+
+	if (install_fds) {
+		for (i = 0; i < num_fences; i++)
+			fd_install(fence_state[i].fd,
+				   fence_state[i].sync_file->file);
+
+		kfree(fence_state);
+		return;
+	}
+
+	for_each_crtc_in_state(state, crtc, crtc_state, i) {
+		struct drm_pending_vblank_event *event = crtc_state->event;
+		/*
+		 * Free the allocated event. drm_atomic_helper_setup_commit
+		 * can allocate an event too, so only free it if it's ours
+		 * to prevent a double free in drm_atomic_state_clear.
+		 */
+		if (event && (event->base.fence || event->base.file_priv)) {
+			drm_event_cancel_free(dev, &event->base);
+			crtc_state->event = NULL;
+		}
+	}
+
+	if (!fence_state)
+		return;
+
+	for (i = 0; i < num_fences; i++) {
+		if (fence_state[i].sync_file)
+			fput(fence_state[i].sync_file->file);
+		if (fence_state[i].fd >= 0)
+			put_unused_fd(fence_state[i].fd);
+
+		/* If this fails log error to the user */
+		if (fence_state[i].out_fence_ptr &&
+		    put_user(-1, fence_state[i].out_fence_ptr))
+			DRM_DEBUG_ATOMIC("Couldn't clear out_fence_ptr\n");
+	}
+
+	kfree(fence_state);
+}
+
 int drm_mode_atomic_ioctl(struct drm_device *dev,
 			  void *data, struct drm_file *file_priv)
 {
@@ -1608,11 +1868,10 @@ int drm_mode_atomic_ioctl(struct drm_device *dev,
 	struct drm_atomic_state *state;
 	struct drm_modeset_acquire_ctx ctx;
 	struct drm_plane *plane;
-	struct drm_crtc *crtc;
-	struct drm_crtc_state *crtc_state;
+	struct drm_out_fence_state *fence_state = NULL;
 	unsigned plane_mask;
 	int ret = 0;
-	unsigned int i, j;
+	unsigned int i, j, num_fences = 0;
 
 	/* disallow for drivers not supporting atomic: */
 	if (!drm_core_check_feature(dev, DRIVER_ATOMIC))
@@ -1727,20 +1986,10 @@ int drm_mode_atomic_ioctl(struct drm_device *dev,
 		drm_mode_object_unreference(obj);
 	}
 
-	if (arg->flags & DRM_MODE_PAGE_FLIP_EVENT) {
-		for_each_crtc_in_state(state, crtc, crtc_state, i) {
-			struct drm_pending_vblank_event *e;
-
-			e = create_vblank_event(dev, file_priv, NULL,
-						arg->user_data);
-			if (!e) {
-				ret = -ENOMEM;
-				goto out;
-			}
-
-			crtc_state->event = e;
-		}
-	}
+	ret = prepare_crtc_signaling(dev, state, arg, file_priv, &fence_state,
+				     &num_fences);
+	if (ret)
+		goto out;
 
 	if (arg->flags & DRM_MODE_ATOMIC_TEST_ONLY) {
 		/*
@@ -1757,20 +2006,7 @@ int drm_mode_atomic_ioctl(struct drm_device *dev,
 out:
 	drm_atomic_clean_old_fb(dev, plane_mask, ret);
 
-	if (ret && arg->flags & DRM_MODE_PAGE_FLIP_EVENT) {
-		/*
-		 * Free the allocated event. drm_atomic_helper_setup_commit
-		 * can allocate an event too, so only free it if it's ours
-		 * to prevent a double free in drm_atomic_state_clear.
-		 */
-		for_each_crtc_in_state(state, crtc, crtc_state, i) {
-			struct drm_pending_vblank_event *event = crtc_state->event;
-			if (event && (event->base.fence || event->base.file_priv)) {
-				drm_event_cancel_free(dev, &event->base);
-				crtc_state->event = NULL;
-			}
-		}
-	}
+	complete_crtc_signaling(dev, state, fence_state, num_fences, !ret);
 
 	if (ret == -EDEADLK) {
 		drm_atomic_state_clear(state);
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 50acd79..f34b4e8 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -3166,6 +3166,9 @@ void __drm_atomic_helper_plane_destroy_state(struct drm_plane_state *state)
 {
 	if (state->fb)
 		drm_framebuffer_unreference(state->fb);
+
+	if (state->fence)
+		fence_put(state->fence);
 }
 EXPORT_SYMBOL(__drm_atomic_helper_plane_destroy_state);
 
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 2d7bedf..79b3d52 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -33,6 +33,7 @@
 #include <linux/list.h>
 #include <linux/slab.h>
 #include <linux/export.h>
+#include <linux/fence.h>
 #include <drm/drmP.h>
 #include <drm/drm_crtc.h>
 #include <drm/drm_edid.h>
@@ -141,6 +142,54 @@ static void drm_crtc_unregister_all(struct drm_device *dev)
 	}
 }
 
+static const struct fence_ops drm_crtc_fence_ops;
+
+static struct drm_crtc *fence_to_crtc(struct fence *fence)
+{
+	BUG_ON(fence->ops != &drm_crtc_fence_ops);
+	return container_of(fence->lock, struct drm_crtc, fence_lock);
+}
+
+static const char *drm_crtc_fence_get_driver_name(struct fence *fence)
+{
+	struct drm_crtc *crtc = fence_to_crtc(fence);
+
+	return crtc->dev->driver->name;
+}
+
+static const char *drm_crtc_fence_get_timeline_name(struct fence *fence)
+{
+	struct drm_crtc *crtc = fence_to_crtc(fence);
+
+	return crtc->timeline_name;
+}
+
+static bool drm_crtc_fence_enable_signaling(struct fence *fence)
+{
+	return true;
+}
+
+static const struct fence_ops drm_crtc_fence_ops = {
+	.get_driver_name = drm_crtc_fence_get_driver_name,
+	.get_timeline_name = drm_crtc_fence_get_timeline_name,
+	.enable_signaling = drm_crtc_fence_enable_signaling,
+	.wait = fence_default_wait,
+};
+
+struct fence *drm_crtc_create_fence(struct drm_crtc *crtc)
+{
+	struct fence *fence;
+
+	fence = kzalloc(sizeof(*fence), GFP_KERNEL);
+	if (!fence)
+		return NULL;
+
+	fence_init(fence, &drm_crtc_fence_ops, &crtc->fence_lock,
+		       crtc->fence_context, ++crtc->fence_seqno);
+
+	return fence;
+}
+
 /**
  * drm_crtc_init_with_planes - Initialise a new CRTC object with
  *    specified primary and cursor planes.
@@ -198,6 +247,11 @@ int drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc,
 		return -ENOMEM;
 	}
 
+	crtc->fence_context = fence_context_alloc(1);
+	spin_lock_init(&crtc->fence_lock);
+	snprintf(crtc->timeline_name, sizeof(crtc->timeline_name),
+		 "CRTC:%d-%s", crtc->base.id, crtc->name);
+
 	crtc->base.properties = &crtc->properties;
 
 	list_add_tail(&crtc->head, &config->crtc_list);
@@ -213,6 +267,8 @@ int drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc,
 	if (drm_core_check_feature(dev, DRIVER_ATOMIC)) {
 		drm_object_attach_property(&crtc->base, config->prop_active, 0);
 		drm_object_attach_property(&crtc->base, config->prop_mode_id, 0);
+		drm_object_attach_property(&crtc->base,
+					   config->prop_out_fence_ptr, 0);
 	}
 
 	return 0;
@@ -365,6 +421,18 @@ static int drm_mode_create_standard_properties(struct drm_device *dev)
 		return -ENOMEM;
 	dev->mode_config.prop_fb_id = prop;
 
+	prop = drm_property_create_signed_range(dev, DRM_MODE_PROP_ATOMIC,
+			"IN_FENCE_FD", -1, INT_MAX);
+	if (!prop)
+		return -ENOMEM;
+	dev->mode_config.prop_in_fence_fd = prop;
+
+	prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC,
+			"OUT_FENCE_PTR", 0, U64_MAX);
+	if (!prop)
+		return -ENOMEM;
+	dev->mode_config.prop_out_fence_ptr = prop;
+
 	prop = drm_property_create_object(dev, DRM_MODE_PROP_ATOMIC,
 			"CRTC_ID", DRM_MODE_OBJECT_CRTC);
 	if (!prop)
diff --git a/drivers/gpu/drm/drm_crtc_internal.h b/drivers/gpu/drm/drm_crtc_internal.h
index c48ba02..df2b51a 100644
--- a/drivers/gpu/drm/drm_crtc_internal.h
+++ b/drivers/gpu/drm/drm_crtc_internal.h
@@ -41,6 +41,8 @@ int drm_crtc_check_viewport(const struct drm_crtc *crtc,
 			    const struct drm_display_mode *mode,
 			    const struct drm_framebuffer *fb);
 
+struct fence *drm_crtc_create_fence(struct drm_crtc *crtc);
+
 void drm_fb_release(struct drm_file *file_priv);
 
 /* dumb buffer support IOCTLs */
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 0bc0afb..4e5ba7e 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -76,6 +76,8 @@
 #define EDID_QUIRK_FORCE_12BPC			(1 << 9)
 /* Force 6bpc */
 #define EDID_QUIRK_FORCE_6BPC			(1 << 10)
+/* Force 10bpc */
+#define EDID_QUIRK_FORCE_10BPC			(1 << 11)
 
 struct detailed_mode_closure {
 	struct drm_connector *connector;
@@ -90,6 +92,14 @@ struct detailed_mode_closure {
 #define LEVEL_GTF2	2
 #define LEVEL_CVT	3
 
+/*Enum storing luminance types for HDR blocks in EDID*/
+enum luminance_value {
+	NO_LUMINANCE_DATA = 3,
+	MAXIMUM_LUMINANCE = 4,
+	FRAME_AVERAGE_LUMINANCE = 5,
+	MINIMUM_LUMINANCE = 6
+};
+
 static const struct edid_quirk {
 	char vendor[4];
 	int product_id;
@@ -118,6 +128,9 @@ static const struct edid_quirk {
 	{ "FCM", 13600, EDID_QUIRK_PREFER_LARGE_75 |
 	  EDID_QUIRK_DETAILED_IN_CM },
 
+	/* LGD panel of HP zBook 17 G2, eDP 10 bpc, but reports unknown bpc */
+	{ "LGD", 764, EDID_QUIRK_FORCE_10BPC },
+
 	/* LG Philips LCD LP154W01-A5 */
 	{ "LPL", 0, EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE },
 	{ "LPL", 0x2a00, EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE },
@@ -997,6 +1010,221 @@ static const struct drm_display_mode edid_cea_modes[] = {
 		   2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
 		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
 	 .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+	/* 65 - 1280x720@24Hz */
+	{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 59400, 1280, 3040,
+		   3080, 3300, 0, 720, 725, 730, 750, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	  .vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+	/* 66 - 1280x720@25Hz */
+	{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 3700,
+		   3740, 3960, 0, 720, 725, 730, 750, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	  .vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+	/* 67 - 1280x720@30Hz */
+	{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 3040,
+		   3080, 3300, 0, 720, 725, 730, 750, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	  .vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+	/* 68 - 1280x720@50Hz */
+	{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1720,
+		   1760, 1980, 0, 720, 725, 730, 750, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	  .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+	/* 69 - 1280x720@60Hz */
+	{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1390,
+		   1430, 1650, 0, 720, 725, 730, 750, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	  .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+	/* 70 - 1280x720@100Hz */
+	{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1720,
+		   1760, 1980, 0, 720, 725, 730, 750, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	  .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+	/* 71 - 1280x720@120Hz */
+	{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1390,
+		   1430, 1650, 0, 720, 725, 730, 750, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	  .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+	/* 72 - 1920x1080@24Hz */
+	{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2558,
+		   2602, 2750, 0, 1080, 1084, 1089, 1125, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	  .vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+	/* 73 - 1920x1080@25Hz */
+	{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448,
+		   2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	  .vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+	/* 74 - 1920x1080@30Hz */
+	{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008,
+		   2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	  .vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+	/* 75 - 1920x1080@50Hz */
+	{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448,
+		   2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	  .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+	/* 76 - 1920x1080@60Hz */
+	{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008,
+		   2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	  .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+	/* 77 - 1920x1080@100Hz */
+	{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2448,
+		   2492, 2640, 0, 1080, 1084, 1094, 1125, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	 .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+	/* 78 - 1920x1080@120Hz */
+	{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2008,
+		   2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	 .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+	/* 79 - 1680x720@24Hz */
+	{ DRM_MODE("1680x720", DRM_MODE_TYPE_DRIVER, 59400, 1680, 3040,
+		3080, 3300, 0, 720, 725, 730, 750, 0,
+		DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	.vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+	/* 80 - 1680x720@25Hz */
+	{ DRM_MODE("1680x720", DRM_MODE_TYPE_DRIVER, 59400, 1680, 2908,
+		2948, 3168, 0, 720, 725, 730, 750, 0,
+		DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	.vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+	/* 81 - 1680x720@30Hz */
+	{ DRM_MODE("1680x720", DRM_MODE_TYPE_DRIVER, 59400, 1680, 2380,
+		2420, 2640, 0, 720, 725, 730, 750, 0,
+		DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	.vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+	/* 82 - 1680x720@50Hz */
+	{ DRM_MODE("1680x720", DRM_MODE_TYPE_DRIVER, 82500, 1680, 1940,
+		1980, 2200, 0, 720, 725, 730, 750, 0,
+		DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+	/* 83 - 1680x720@60Hz */
+	{ DRM_MODE("1680x720", DRM_MODE_TYPE_DRIVER, 99000, 1680, 1940,
+		1980, 2200, 0, 720, 725, 730, 750, 0,
+		DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+	/* 84 - 1680x720@100Hz */
+	{ DRM_MODE("1680x720", DRM_MODE_TYPE_DRIVER, 165000, 1680, 1740,
+		1780, 2000, 0, 720, 725, 730, 825, 0,
+		DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	.vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+	/* 85 - 1680x720@120Hz */
+	{ DRM_MODE("1680x720", DRM_MODE_TYPE_DRIVER, 198000, 1680, 1740,
+		1780, 2000, 0, 720, 725, 730, 825, 0,
+		DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	.vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+	/* 86 - 2560x1080@24Hz */
+	{ DRM_MODE("2560x1080", DRM_MODE_TYPE_DRIVER, 99000, 2560, 3558,
+		3602, 3750, 0, 1080, 1084, 1089, 1100, 0,
+		DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	.vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+	/* 87 - 2560x1080@25Hz */
+	{ DRM_MODE("2560x1080", DRM_MODE_TYPE_DRIVER, 90000, 2560, 3008,
+		3052, 3200, 0, 1080, 1084, 1089, 1125, 0,
+		DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	.vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+	/* 88 - 2560x1080@30Hz */
+	{ DRM_MODE("2560x1080", DRM_MODE_TYPE_DRIVER, 118800, 2560, 3328,
+		3372, 3520, 0, 1080, 1084, 1089, 1125, 0,
+		DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	.vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+	/* 89 - 2560x1080@50Hz */
+	{ DRM_MODE("2560x1080", DRM_MODE_TYPE_DRIVER, 185625, 2560, 3108,
+		3152, 3300, 0, 1080, 1084, 1089, 1125, 0,
+		DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+	/* 90 - 2560x1080@60Hz */
+	{ DRM_MODE("2560x1080", DRM_MODE_TYPE_DRIVER, 198000, 2560, 2808,
+		2852, 3000, 0, 1080, 1084, 1089, 1100, 0,
+		DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+	/* 91 - 2560x1080@100Hz */
+	{ DRM_MODE("2560x1080", DRM_MODE_TYPE_DRIVER, 371250, 2560, 2778,
+		2822, 2970, 0, 1080, 1084, 1089, 1250, 0,
+		DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	.vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+	/* 92 - 2560x1080@120Hz */
+	{ DRM_MODE("2560x1080", DRM_MODE_TYPE_DRIVER, 495000, 2560, 3108,
+		3152, 3300, 0, 1080, 1084, 1089, 1250, 0,
+		DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	.vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+	/* 93 - 3840x2160p@24Hz 16:9 */
+	{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000, 3840, 5116,
+		5204, 5500, 0, 2160, 2168, 2178, 2250, 0,
+		DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	.vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9,},
+	/* 94 - 3840x2160p@25Hz 16:9 */
+	{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000, 3840, 4896,
+		4984, 5280, 0, 2160, 2168, 2178, 2250, 0,
+		DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	.vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9},
+	/* 95 - 3840x2160p@30Hz 16:9 */
+	{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000, 3840, 4016,
+		4104, 4400, 0, 2160, 2168, 2178, 2250, 0,
+		DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	.vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9},
+	/* 96 - 3840x2160p@50Hz 16:9 */
+	{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 594000, 3840, 4896,
+		4984, 5280, 0, 2160, 2168, 2178, 2250, 0,
+		DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9},
+	/* 97 - 3840x2160p@60Hz 16:9 */
+	{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 594000, 3840, 4016,
+		4104, 4400, 0, 2160, 2168, 2178, 2250, 0,
+		DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9},
+	/* 98 - 4096x2160p@24Hz 256:135 */
+	{ DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 297000, 4096, 5116,
+		5204, 5500, 0, 2160, 2168, 2178, 2250, 0,
+		DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	.vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_256_135},
+	/* 99 - 4096x2160p@25Hz 256:135 */
+	{ DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 297000, 4096, 5064,
+		5152, 5280, 0, 2160, 2168, 2178, 2250, 0,
+		DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	.vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_256_135},
+	/* 100 - 4096x2160p@30Hz 256:135 */
+	{ DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 297000, 4096, 4184,
+		4272, 4400, 0, 2160, 2168, 2178, 2250, 0,
+		DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	.vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_256_135},
+	/* 101 - 4096x2160p@50Hz 256:135 */
+	{ DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 594000, 4096, 5064,
+		5152, 5280, 0, 2160, 2168, 2178, 2250, 0,
+		DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_256_135},
+	/* 102 - 4096x2160p@60Hz 256:135 */
+	{ DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 594000, 4096, 4184,
+		4272, 4400, 0, 2160, 2168, 2178, 2250, 0,
+		DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_256_135},
+	/* 103 - 3840x2160p@24Hz 64:27 */
+	{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000, 3840, 5116,
+		5204, 5500, 0, 2160, 2168, 2178, 2250, 0,
+		DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	.vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27},
+	/* 104 - 3840x2160p@25Hz 64:27 */
+	{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000, 3840, 4016,
+		4104, 4400, 0, 2160, 2168, 2178, 2250, 0,
+		DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	.vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27},
+	/* 105 - 3840x2160p@30Hz 64:27 */
+	{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000, 3840, 4016,
+		4104, 4400, 0, 2160, 2168, 2178, 2250, 0,
+		DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	.vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27},
+	/* 106 - 3840x2160p@50Hz 64:27 */
+	{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 594000, 3840, 4896,
+		4984, 5280, 0, 2160, 2168, 2178, 2250, 0,
+		DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27},
+	/* 107 - 3840x2160p@60Hz 64:27 */
+	{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 594000, 3840, 4016,
+		4104, 4400, 0, 2160, 2168, 2178, 2250, 0,
+		DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27},
 };
 
 /*
@@ -2514,12 +2742,15 @@ add_detailed_modes(struct drm_connector *connector, struct edid *edid,
 
 	return closure.modes;
 }
-
+#define VIDEO_CAPABILITY_EXTENDED_DATA_BLOCK 0x0
 #define AUDIO_BLOCK	0x01
 #define VIDEO_BLOCK     0x02
 #define VENDOR_BLOCK    0x03
 #define SPEAKER_BLOCK	0x04
+#define HDR_STATIC_METADATA_EXTENDED_DATA_BLOCK 0x06
+#define EXTENDED_TAG  0x07
 #define VIDEO_CAPABILITY_BLOCK	0x07
+#define Y420_VIDEO_DATA_BLOCK	0x0E
 #define EDID_BASIC_AUDIO	(1 << 6)
 #define EDID_CEA_YCRCB444	(1 << 5)
 #define EDID_CEA_YCRCB422	(1 << 4)
@@ -3168,6 +3399,21 @@ static bool cea_db_is_hdmi_vsdb(const u8 *db)
 	return hdmi_id == HDMI_IEEE_OUI;
 }
 
+static bool cea_db_is_hdmi_hf_vsdb(const u8 *db)
+{
+	int hdmi_id;
+
+	if (cea_db_tag(db) != VENDOR_BLOCK)
+		return false;
+
+	if (cea_db_payload_len(db) < 7)
+		return false;
+
+	hdmi_id = db[1] | (db[2] << 8) | (db[3] << 16);
+
+	return hdmi_id == HDMI_IEEE_OUI_HF;
+}
+
 #define for_each_cea_db(cea, i, start, end) \
 	for ((i) = (start); (i) < (end) && (i) + cea_db_payload_len(&(cea)[(i)]) < (end); (i) += cea_db_payload_len(&(cea)[(i)]) + 1)
 
@@ -3287,6 +3533,227 @@ drm_parse_hdmi_vsdb_audio(struct drm_connector *connector, const u8 *db)
 }
 
 static void
+parse_hdmi_hf_vsdb(struct drm_connector *connector, const u8 *db)
+{
+	u8 len = cea_db_payload_len(db);
+
+	if (len < 7)
+		return;
+
+	if (db[4] != 1)
+		return; /* invalid version */
+
+	connector->max_tmds_char = db[5] * 5;
+	connector->scdc_present = db[6] & (1 << 7);
+	connector->rr_capable = db[6] & (1 << 6);
+	connector->flags_3d = db[6] & 0x7;
+	connector->supports_scramble = connector->scdc_present &&
+			(db[6] & (1 << 3));
+
+	DRM_DEBUG_KMS("HDMI v2: max TMDS char %d, "
+			"scdc %s, "
+			"rr %s, "
+			"3D flags 0x%x, "
+			"scramble %s\n",
+			connector->max_tmds_char,
+			connector->scdc_present ? "available" : "not available",
+			connector->rr_capable ? "capable" : "not capable",
+			connector->flags_3d,
+			connector->supports_scramble ?
+				"supported" : "not supported");
+}
+
+/*
+ * drm_extract_vcdb_info - Parse the HDMI Video Capability Data Block
+ * @connector: connector corresponding to the HDMI sink
+ * @db: start of the CEA vendor specific block
+ *
+ * Parses the HDMI VCDB to extract sink info for @connector.
+ */
+static void
+drm_extract_vcdb_info(struct drm_connector *connector, const u8 *db)
+{
+	/*
+	 * Check if the sink specifies underscan
+	 * support for:
+	 * BIT 5: preferred video format
+	 * BIT 3: IT video format
+	 * BIT 1: CE video format
+	 */
+
+	connector->pt_scan_info =
+		(db[2] & (BIT(4) | BIT(5))) >> 4;
+	connector->it_scan_info =
+		(db[2] & (BIT(3) | BIT(2))) >> 2;
+	connector->ce_scan_info =
+		db[2] & (BIT(1) | BIT(0));
+
+	DRM_DEBUG_KMS("Scan Info (pt|it|ce): (%d|%d|%d)",
+			  (int) connector->pt_scan_info,
+			  (int) connector->it_scan_info,
+			  (int) connector->ce_scan_info);
+}
+
+static bool drm_edid_is_luminance_value_present(
+u32 block_length, enum luminance_value value)
+{
+	return block_length > NO_LUMINANCE_DATA && value <= block_length;
+}
+
+/*
+ * drm_extract_hdr_db - Parse the HDMI HDR extended block
+ * @connector: connector corresponding to the HDMI sink
+ * @db: start of the HDMI HDR extended block
+ *
+ * Parses the HDMI HDR extended block to extract sink info for @connector.
+ */
+static void
+drm_extract_hdr_db(struct drm_connector *connector, const u8 *db)
+{
+
+	u8 len = 0;
+
+	if (!db)
+		return;
+
+	len = db[0] & 0x1f;
+	/* Byte 3: Electro-Optical Transfer Functions */
+	connector->hdr_eotf = db[2] & 0x3F;
+
+	/* Byte 4: Static Metadata Descriptor Type 1 */
+	connector->hdr_metadata_type_one = (db[3] & BIT(0));
+
+	/* Byte 5: Desired Content Maximum Luminance */
+	if (drm_edid_is_luminance_value_present(len, MAXIMUM_LUMINANCE))
+		connector->hdr_max_luminance =
+			db[MAXIMUM_LUMINANCE];
+
+	/* Byte 6: Desired Content Max Frame-average Luminance */
+	if (drm_edid_is_luminance_value_present(len, FRAME_AVERAGE_LUMINANCE))
+		connector->hdr_avg_luminance =
+			db[FRAME_AVERAGE_LUMINANCE];
+
+	/* Byte 7: Desired Content Min Luminance */
+	if (drm_edid_is_luminance_value_present(len, MINIMUM_LUMINANCE))
+		connector->hdr_min_luminance =
+			db[MINIMUM_LUMINANCE];
+
+	connector->hdr_supported = true;
+
+	DRM_DEBUG_KMS("HDR electro-optical %d\n", connector->hdr_eotf);
+	DRM_DEBUG_KMS("metadata desc 1 %d\n", connector->hdr_metadata_type_one);
+	DRM_DEBUG_KMS("max luminance %d\n", connector->hdr_max_luminance);
+	DRM_DEBUG_KMS("avg luminance %d\n", connector->hdr_avg_luminance);
+	DRM_DEBUG_KMS("min luminance %d\n", connector->hdr_min_luminance);
+}
+
+/*
+ * drm_hdmi_extract_extended_blk_info - Parse the HDMI extended tag blocks
+ * @connector: connector corresponding to the HDMI sink
+ * @edid: handle to the EDID structure
+ * Parses the all extended tag blocks extract sink info for @connector.
+ */
+static void
+drm_hdmi_extract_extended_blk_info(struct drm_connector *connector,
+struct edid *edid)
+{
+	const u8 *cea = drm_find_cea_extension(edid);
+	const u8 *db = NULL;
+
+	if (cea && cea_revision(cea) >= 3) {
+		int i, start, end;
+
+		if (cea_db_offsets(cea, &start, &end))
+			return;
+
+		for_each_cea_db(cea, i, start, end) {
+			db = &cea[i];
+
+			if (cea_db_tag(db) == EXTENDED_TAG) {
+				DRM_DEBUG_KMS("found extended tag block = %d\n",
+				db[1]);
+				switch (db[1]) {
+				case VIDEO_CAPABILITY_EXTENDED_DATA_BLOCK:
+					drm_extract_vcdb_info(connector, db);
+					break;
+				case HDR_STATIC_METADATA_EXTENDED_DATA_BLOCK:
+					drm_extract_hdr_db(connector, db);
+					break;
+				default:
+					break;
+				}
+			}
+		}
+	}
+}
+
+static u8 *
+drm_edid_find_extended_tag_block(struct edid *edid, int blk_id)
+{
+	u8 *db = NULL;
+	u8 *cea = NULL;
+
+	if (!edid)
+		return NULL;
+
+	cea = drm_find_cea_extension(edid);
+
+	if (cea && cea_revision(cea) >= 3) {
+		int i, start, end;
+
+		if (cea_db_offsets(cea, &start, &end))
+			return NULL;
+
+		for_each_cea_db(cea, i, start, end) {
+			db = &cea[i];
+			if ((cea_db_tag(db) == EXTENDED_TAG) &&
+				(db[1] == blk_id))
+				return db;
+		}
+	}
+	return NULL;
+}
+
+/*
+ * add_YCbCr420VDB_modes - add the modes found in Ycbcr420 VDB block
+ * @connector: connector corresponding to the HDMI sink
+ * @edid: handle to the EDID structure
+ * Parses the YCbCr420 VDB block and adds the modes to @connector.
+ */
+static int
+add_YCbCr420VDB_modes(struct drm_connector *connector, struct edid *edid)
+{
+
+	const u8 *db = NULL;
+	u32 i = 0;
+	u32 modes = 0;
+	u32 video_format = 0;
+	u8 len = 0;
+
+	/*Find the YCbCr420 VDB*/
+	db = drm_edid_find_extended_tag_block(edid, Y420_VIDEO_DATA_BLOCK);
+	/* Offset to byte 3 */
+	if (db) {
+		len = db[0] & 0x1F;
+		db += 2;
+		for (i = 0; i < len - 1; i++) {
+			struct drm_display_mode *mode;
+
+			video_format = *(db + i) & 0x7F;
+			mode = drm_display_mode_from_vic_index(connector,
+					db, len-1, i);
+			if (mode) {
+				DRM_DEBUG_KMS("Adding mode for vic = %d\n",
+				video_format);
+				drm_mode_probed_add(connector, mode);
+				modes++;
+			}
+		}
+	}
+	return modes;
+}
+
+static void
 monitor_name(struct detailed_timing *t, void *data)
 {
 	if (t->data.other_data.type == EDID_DETAIL_MONITOR_NAME)
@@ -3410,6 +3877,9 @@ void drm_edid_to_eld(struct drm_connector *connector, struct edid *edid)
 				/* HDMI Vendor-Specific Data Block */
 				if (cea_db_is_hdmi_vsdb(db))
 					drm_parse_hdmi_vsdb_audio(connector, db);
+				/* HDMI Forum Vendor-Specific Data Block */
+				else if (cea_db_is_hdmi_hf_vsdb(db))
+					parse_hdmi_hf_vsdb(connector, db);
 				break;
 			default:
 				break;
@@ -3840,6 +4310,37 @@ static void drm_parse_cea_ext(struct drm_connector *connector,
 	}
 }
 
+static void
+drm_hdmi_extract_vsdbs_info(struct drm_connector *connector, struct edid *edid)
+{
+	const u8 *cea = drm_find_cea_extension(edid);
+	const u8 *db = NULL;
+
+	if (cea && cea_revision(cea) >= 3) {
+		int i, start, end;
+
+		if (cea_db_offsets(cea, &start, &end))
+			return;
+
+		for_each_cea_db(cea, i, start, end) {
+			db = &cea[i];
+
+			if (cea_db_tag(db) == VENDOR_BLOCK) {
+				/* HDMI Vendor-Specific Data Block */
+				if (cea_db_is_hdmi_vsdb(db)) {
+					drm_parse_hdmi_vsdb_video(
+						connector, db);
+					drm_parse_hdmi_vsdb_audio(
+						connector, db);
+				}
+				/* HDMI Forum Vendor-Specific Data Block */
+				else if (cea_db_is_hdmi_hf_vsdb(db))
+					parse_hdmi_hf_vsdb(connector, db);
+			}
+		}
+	}
+}
+
 static void drm_add_display_info(struct drm_connector *connector,
 				 struct edid *edid)
 {
@@ -3877,6 +4378,11 @@ static void drm_add_display_info(struct drm_connector *connector,
 			  connector->name, info->bpc);
 	}
 
+	/* Extract audio and video latency fields for the sink */
+	drm_hdmi_extract_vsdbs_info(connector, edid);
+	/* Extract info from extended tag blocks */
+	drm_hdmi_extract_extended_blk_info(connector, edid);
+
 	/* Only defined for 1.4 with digital displays */
 	if (edid->revision < 4)
 		return;
@@ -4091,6 +4597,7 @@ int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid)
 	num_modes += add_cea_modes(connector, edid);
 	num_modes += add_alternate_cea_modes(connector, edid);
 	num_modes += add_displayid_detailed_modes(connector, edid);
+	num_modes += add_YCbCr420VDB_modes(connector, edid);
 	if (edid->features & DRM_EDID_FEATURE_DEFAULT_GTF)
 		num_modes += add_inferred_modes(connector, edid);
 
@@ -4105,6 +4612,9 @@ int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid)
 	if (quirks & EDID_QUIRK_FORCE_8BPC)
 		connector->display_info.bpc = 8;
 
+	if (quirks & EDID_QUIRK_FORCE_10BPC)
+		connector->display_info.bpc = 10;
+
 	if (quirks & EDID_QUIRK_FORCE_12BPC)
 		connector->display_info.bpc = 12;
 
diff --git a/drivers/gpu/drm/drm_fb_cma_helper.c b/drivers/gpu/drm/drm_fb_cma_helper.c
index 1fd6eac..52629b6 100644
--- a/drivers/gpu/drm/drm_fb_cma_helper.c
+++ b/drivers/gpu/drm/drm_fb_cma_helper.c
@@ -18,13 +18,16 @@
  */
 
 #include <drm/drmP.h>
+#include <drm/drm_atomic.h>
 #include <drm/drm_crtc.h>
 #include <drm/drm_fb_helper.h>
 #include <drm/drm_crtc_helper.h>
 #include <drm/drm_gem_cma_helper.h>
 #include <drm/drm_fb_cma_helper.h>
+#include <linux/dma-buf.h>
 #include <linux/dma-mapping.h>
 #include <linux/module.h>
+#include <linux/reservation.h>
 
 #define DEFAULT_FBDEFIO_DELAY_MS 50
 
@@ -265,6 +268,38 @@ struct drm_gem_cma_object *drm_fb_cma_get_gem_obj(struct drm_framebuffer *fb,
 }
 EXPORT_SYMBOL_GPL(drm_fb_cma_get_gem_obj);
 
+/**
+ * drm_fb_cma_prepare_fb() - Prepare CMA framebuffer
+ * @plane: Which plane
+ * @state: Plane state attach fence to
+ *
+ * This should be put into prepare_fb hook of struct &drm_plane_helper_funcs .
+ *
+ * This function checks if the plane FB has an dma-buf attached, extracts
+ * the exclusive fence and attaches it to plane state for the atomic helper
+ * to wait on.
+ *
+ * There is no need for cleanup_fb for CMA based framebuffer drivers.
+ */
+int drm_fb_cma_prepare_fb(struct drm_plane *plane,
+			  struct drm_plane_state *state)
+{
+	struct dma_buf *dma_buf;
+	struct fence *fence;
+
+	if ((plane->state->fb == state->fb) || !state->fb)
+		return 0;
+
+	dma_buf = drm_fb_cma_get_gem_obj(state->fb, 0)->base.dma_buf;
+	if (dma_buf) {
+		fence = reservation_object_get_excl_rcu(dma_buf->resv);
+		drm_atomic_set_fence_for_plane(state, fence);
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(drm_fb_cma_prepare_fb);
+
 #ifdef CONFIG_DEBUG_FS
 static void drm_fb_cma_describe(struct drm_framebuffer *fb, struct seq_file *m)
 {
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
index f5815e1..fe00bea 100644
--- a/drivers/gpu/drm/drm_fops.c
+++ b/drivers/gpu/drm/drm_fops.c
@@ -663,6 +663,10 @@ void drm_event_cancel_free(struct drm_device *dev,
 		list_del(&p->pending_link);
 	}
 	spin_unlock_irqrestore(&dev->event_lock, flags);
+
+	if (p->fence)
+		fence_put(p->fence);
+
 	kfree(p);
 }
 EXPORT_SYMBOL(drm_event_cancel_free);
diff --git a/drivers/gpu/drm/drm_plane.c b/drivers/gpu/drm/drm_plane.c
index 249c0ae..3957ef8 100644
--- a/drivers/gpu/drm/drm_plane.c
+++ b/drivers/gpu/drm/drm_plane.c
@@ -137,6 +137,7 @@ int drm_universal_plane_init(struct drm_device *dev, struct drm_plane *plane,
 
 	if (drm_core_check_feature(dev, DRIVER_ATOMIC)) {
 		drm_object_attach_property(&plane->base, config->prop_fb_id, 0);
+		drm_object_attach_property(&plane->base, config->prop_in_fence_fd, -1);
 		drm_object_attach_property(&plane->base, config->prop_crtc_id, 0);
 		drm_object_attach_property(&plane->base, config->prop_crtc_x, 0);
 		drm_object_attach_property(&plane->base, config->prop_crtc_y, 0);
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index f46aac1..c75f4bb 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -420,6 +420,11 @@ int i915_gem_init_stolen(struct drm_device *dev)
 		return 0;
 	}
 
+	if (intel_vgpu_active(dev_priv)) {
+		DRM_INFO("iGVT-g active, disabling use of stolen memory\n");
+		return 0;
+	}
+
 #ifdef CONFIG_INTEL_IOMMU
 	if (intel_iommu_gfx_mapped && INTEL_INFO(dev)->gen < 8) {
 		DRM_INFO("DMAR active, disabling use of stolen memory\n");
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile
index 4112bef..9ded825 100644
--- a/drivers/gpu/drm/msm/Makefile
+++ b/drivers/gpu/drm/msm/Makefile
@@ -1,10 +1,20 @@
-ccflags-y := -Iinclude/drm -Idrivers/gpu/drm/msm -Idrivers/gpu/drm/msm/dsi-staging
+ccflags-y := -Iinclude/drm -Idrivers/gpu/drm/msm -Idrivers/gpu/drm/msm/dsi-staging -Idrivers/gpu/drm/msm/dp
 ccflags-$(CONFIG_DRM_MSM_DSI) += -Idrivers/gpu/drm/msm/dsi
 ccflags-$(CONFIG_DRM_MSM_DSI_PLL) += -Idrivers/gpu/drm/msm/dsi
 ccflags-y += -Idrivers/gpu/drm/msm/sde
 ccflags-y += -Idrivers/media/platform/msm/sde/rotator
 
 msm_drm-y := \
+	dp/dp_usbpd.o \
+	dp/dp_parser.o \
+	dp/dp_power.o \
+	dp/dp_catalog.o \
+	dp/dp_aux.o \
+	dp/dp_panel.o \
+	dp/dp_link.o \
+	dp/dp_ctrl.o \
+	dp/dp_display.o \
+	dp/dp_drm.o \
 	hdmi/hdmi.o \
 	hdmi/hdmi_audio.o \
 	hdmi/hdmi_bridge.o \
@@ -51,6 +61,7 @@
 	sde/sde_hw_reg_dma_v1_color_proc.o \
 	sde/sde_hw_color_proc_v4.o \
 	sde/sde_hw_ad4.o \
+	sde_edid_parser.o
 
 msm_drm-$(CONFIG_DRM_SDE_RSC) += sde_rsc.o \
 	sde_rsc_hw.o \
diff --git a/drivers/gpu/drm/msm/dp/dp_aux.c b/drivers/gpu/drm/msm/dp/dp_aux.c
new file mode 100644
index 0000000..a79a9c9
--- /dev/null
+++ b/drivers/gpu/drm/msm/dp/dp_aux.c
@@ -0,0 +1,587 @@
+/*
+ * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt)	"[drm-dp] %s: " fmt, __func__
+
+#include <linux/delay.h>
+
+#include "dp_aux.h"
+
+#define DP_AUX_ENUM_STR(x)		#x
+
+struct aux_buf {
+	u8 *start;      /* buffer start addr */
+	u8 *end;	/* buffer end addr */
+	u8 *data;       /* data pou32er */
+	u32 size;       /* size of buffer */
+	u32 len;	/* dara length */
+	u8 trans_num;   /* transaction number */
+	enum aux_tx_mode tx_mode;
+};
+
+struct dp_aux_private {
+	struct device *dev;
+	struct dp_aux dp_aux;
+	struct dp_catalog_aux *catalog;
+
+	struct mutex mutex;
+	struct completion comp;
+
+	struct aux_cmd *cmds;
+	struct aux_buf txp;
+	struct aux_buf rxp;
+
+	u32 aux_error_num;
+	bool cmd_busy;
+
+	u8 txbuf[256];
+	u8 rxbuf[256];
+};
+
+static char *dp_aux_get_error(u32 aux_error)
+{
+	switch (aux_error) {
+	case DP_AUX_ERR_NONE:
+		return DP_AUX_ENUM_STR(DP_AUX_ERR_NONE);
+	case DP_AUX_ERR_ADDR:
+		return DP_AUX_ENUM_STR(DP_AUX_ERR_ADDR);
+	case DP_AUX_ERR_TOUT:
+		return DP_AUX_ENUM_STR(DP_AUX_ERR_TOUT);
+	case DP_AUX_ERR_NACK:
+		return DP_AUX_ENUM_STR(DP_AUX_ERR_NACK);
+	case DP_AUX_ERR_DEFER:
+		return DP_AUX_ENUM_STR(DP_AUX_ERR_DEFER);
+	case DP_AUX_ERR_NACK_DEFER:
+		return DP_AUX_ENUM_STR(DP_AUX_ERR_NACK_DEFER);
+	default:
+		return "unknown";
+	}
+}
+
+static void dp_aux_buf_init(struct aux_buf *buf, u8 *data, u32 size)
+{
+	buf->start     = data;
+	buf->size      = size;
+	buf->data      = buf->start;
+	buf->end       = buf->start + buf->size;
+	buf->len       = 0;
+	buf->trans_num = 0;
+	buf->tx_mode   = AUX_NATIVE;
+}
+
+static void dp_aux_buf_set(struct dp_aux_private *aux)
+{
+	init_completion(&aux->comp);
+	aux->cmd_busy = false;
+	mutex_init(&aux->mutex);
+
+	dp_aux_buf_init(&aux->txp, aux->txbuf, sizeof(aux->txbuf));
+	dp_aux_buf_init(&aux->rxp, aux->rxbuf, sizeof(aux->rxbuf));
+}
+
+static void dp_aux_buf_reset(struct aux_buf *buf)
+{
+	buf->data      = buf->start;
+	buf->len       = 0;
+	buf->trans_num = 0;
+	buf->tx_mode   = AUX_NATIVE;
+
+	memset(buf->start, 0x0, 256);
+}
+
+static void dp_aux_buf_push(struct aux_buf *buf, u32 len)
+{
+	buf->data += len;
+	buf->len  += len;
+}
+
+static u32 dp_aux_buf_trailing(struct aux_buf *buf)
+{
+	return (u32)(buf->end - buf->data);
+}
+
+static u32 dp_aux_add_cmd(struct aux_buf *buf, struct aux_cmd *cmd)
+{
+	u8 data;
+	u8 *bp, *cp;
+	u32 i, len;
+
+	if (cmd->ex_mode == AUX_READ)
+		len = 4;
+	else
+		len = cmd->len + 4;
+
+	if (dp_aux_buf_trailing(buf) < len) {
+		pr_err("buf trailing error\n");
+		return 0;
+	}
+
+	/*
+	 * cmd fifo only has depth of 144 bytes
+	 * limit buf length to 128 bytes here
+	 */
+	if ((buf->len + len) > 128) {
+		pr_err("buf len error\n");
+		return 0;
+	}
+
+	bp = buf->data;
+	data = cmd->addr >> 16;
+	data &= 0x0f;  /* 4 addr bits */
+
+	if (cmd->ex_mode == AUX_READ)
+		data |=  BIT(4);
+
+	*bp++ = data;
+	*bp++ = cmd->addr >> 8;
+	*bp++ = cmd->addr;
+	*bp++ = cmd->len - 1;
+
+	if (cmd->ex_mode == AUX_WRITE) {
+		cp = cmd->buf;
+
+		for (i = 0; i < cmd->len; i++)
+			*bp++ = *cp++;
+	}
+
+	dp_aux_buf_push(buf, len);
+
+	buf->tx_mode = cmd->tx_mode;
+
+	buf->trans_num++;
+
+	return cmd->len - 1;
+}
+
+static u32 dp_aux_cmd_fifo_tx(struct dp_aux_private *aux)
+{
+	u8 *dp;
+	u32 data, len, cnt;
+	struct aux_buf *tp = &aux->txp;
+
+	len = tp->len;
+	if (len == 0) {
+		pr_err("invalid len\n");
+		return 0;
+	}
+
+	cnt = 0;
+	dp = tp->start;
+
+	while (cnt < len) {
+		data = *dp;
+		data <<= 8;
+		data &= 0x00ff00;
+		if (cnt == 0)
+			data |= BIT(31);
+
+		aux->catalog->data = data;
+		aux->catalog->write_data(aux->catalog);
+
+		cnt++;
+		dp++;
+	}
+
+	data = (tp->trans_num - 1);
+	if (tp->tx_mode == AUX_I2C) {
+		data |= BIT(8); /* I2C */
+		data |= BIT(10); /* NO SEND ADDR */
+		data |= BIT(11); /* NO SEND STOP */
+	}
+
+	data |= BIT(9); /* GO */
+	aux->catalog->data = data;
+	aux->catalog->write_trans(aux->catalog);
+
+	return tp->len;
+}
+
+static u32 dp_cmd_fifo_rx(struct dp_aux_private *aux, u32 len)
+{
+	u32 data;
+	u8 *dp;
+	u32 i;
+	struct aux_buf *rp = &aux->rxp;
+
+	data = 0;
+	data |= BIT(31); /* INDEX_WRITE */
+	data |= BIT(0);  /* read */
+
+	aux->catalog->data = data;
+	aux->catalog->write_data(aux->catalog);
+
+	dp = rp->data;
+
+	/* discard first byte */
+	data = aux->catalog->read_data(aux->catalog);
+
+	for (i = 0; i < len; i++) {
+		data = aux->catalog->read_data(aux->catalog);
+		*dp++ = (u8)((data >> 8) & 0xff);
+	}
+
+	rp->len = len;
+	return len;
+}
+
+static void dp_aux_native_handler(struct dp_aux_private *aux)
+{
+	u32 isr = aux->catalog->isr;
+
+	if (isr & DP_INTR_AUX_I2C_DONE)
+		aux->aux_error_num = DP_AUX_ERR_NONE;
+	else if (isr & DP_INTR_WRONG_ADDR)
+		aux->aux_error_num = DP_AUX_ERR_ADDR;
+	else if (isr & DP_INTR_TIMEOUT)
+		aux->aux_error_num = DP_AUX_ERR_TOUT;
+	if (isr & DP_INTR_NACK_DEFER)
+		aux->aux_error_num = DP_AUX_ERR_NACK;
+
+	complete(&aux->comp);
+}
+
+static void dp_aux_i2c_handler(struct dp_aux_private *aux)
+{
+	u32 isr = aux->catalog->isr;
+
+	if (isr & DP_INTR_AUX_I2C_DONE) {
+		if (isr & (DP_INTR_I2C_NACK | DP_INTR_I2C_DEFER))
+			aux->aux_error_num = DP_AUX_ERR_NACK;
+		else
+			aux->aux_error_num = DP_AUX_ERR_NONE;
+	} else {
+		if (isr & DP_INTR_WRONG_ADDR)
+			aux->aux_error_num = DP_AUX_ERR_ADDR;
+		else if (isr & DP_INTR_TIMEOUT)
+			aux->aux_error_num = DP_AUX_ERR_TOUT;
+		if (isr & DP_INTR_NACK_DEFER)
+			aux->aux_error_num = DP_AUX_ERR_NACK_DEFER;
+		if (isr & DP_INTR_I2C_NACK)
+			aux->aux_error_num = DP_AUX_ERR_NACK;
+		if (isr & DP_INTR_I2C_DEFER)
+			aux->aux_error_num = DP_AUX_ERR_DEFER;
+	}
+
+	complete(&aux->comp);
+}
+
+static void dp_aux_isr(struct dp_aux *dp_aux)
+{
+	struct dp_aux_private *aux;
+
+	if (!dp_aux) {
+		pr_err("invalid input\n");
+		return;
+	}
+
+	aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
+
+	aux->catalog->get_irq(aux->catalog, aux->cmd_busy);
+
+	if (!aux->cmd_busy)
+		return;
+
+	if (aux->cmds->tx_mode == AUX_NATIVE)
+		dp_aux_native_handler(aux);
+	else
+		dp_aux_i2c_handler(aux);
+}
+
+
+
+static int dp_aux_write(struct dp_aux_private *aux)
+{
+	struct aux_cmd *cm;
+	struct aux_buf *tp;
+	u32 len, ret, timeout;
+
+	mutex_lock(&aux->mutex);
+
+	tp = &aux->txp;
+	dp_aux_buf_reset(tp);
+
+	cm = aux->cmds;
+	while (cm) {
+		ret = dp_aux_add_cmd(tp, cm);
+		if (ret <= 0)
+			break;
+
+		if (!cm->next)
+			break;
+		cm++;
+	}
+
+	reinit_completion(&aux->comp);
+	aux->cmd_busy = true;
+
+	len = dp_aux_cmd_fifo_tx(aux);
+
+	timeout = wait_for_completion_timeout(&aux->comp, HZ/4);
+	if (!timeout)
+		pr_err("aux write timeout\n");
+
+	pr_debug("aux status %s\n",
+		dp_aux_get_error(aux->aux_error_num));
+
+	if (aux->aux_error_num == DP_AUX_ERR_NONE)
+		ret = len;
+	else
+		ret = aux->aux_error_num;
+
+	aux->cmd_busy = false;
+	mutex_unlock(&aux->mutex);
+	return  ret;
+}
+
+static int dp_aux_read(struct dp_aux_private *aux)
+{
+	struct aux_cmd *cm;
+	struct aux_buf *tp, *rp;
+	u32 len, ret, timeout;
+
+	mutex_lock(&aux->mutex);
+
+	tp = &aux->txp;
+	rp = &aux->rxp;
+
+	dp_aux_buf_reset(tp);
+	dp_aux_buf_reset(rp);
+
+	cm = aux->cmds;
+	len = 0;
+
+	while (cm) {
+		ret = dp_aux_add_cmd(tp, cm);
+		len += cm->len;
+
+		if (ret <= 0)
+			break;
+
+		if (!cm->next)
+			break;
+		cm++;
+	}
+
+	reinit_completion(&aux->comp);
+	aux->cmd_busy = true;
+
+	dp_aux_cmd_fifo_tx(aux);
+
+	timeout = wait_for_completion_timeout(&aux->comp, HZ/4);
+	if (!timeout)
+		pr_err("aux read timeout\n");
+
+	pr_debug("aux status %s\n",
+		dp_aux_get_error(aux->aux_error_num));
+
+	if (aux->aux_error_num == DP_AUX_ERR_NONE)
+		ret = dp_cmd_fifo_rx(aux, len);
+	else
+		ret = aux->aux_error_num;
+
+	aux->cmds->buf = rp->data;
+	aux->cmd_busy = false;
+
+	mutex_unlock(&aux->mutex);
+
+	return ret;
+}
+
+static int dp_aux_write_ex(struct dp_aux *dp_aux, u32 addr, u32 len,
+				enum aux_tx_mode mode, u8 *buf)
+{
+	struct aux_cmd cmd = {0};
+	struct dp_aux_private *aux;
+
+	if (!dp_aux || !len) {
+		pr_err("invalid input\n");
+		return -EINVAL;
+	}
+
+	aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
+
+	cmd.ex_mode = AUX_WRITE;
+	cmd.tx_mode = mode;
+	cmd.addr    = addr;
+	cmd.len     = len;
+	cmd.buf     = buf;
+
+	aux->cmds = &cmd;
+
+	return dp_aux_write(aux);
+}
+
+static int dp_aux_read_ex(struct dp_aux *dp_aux, u32 addr, u32 len,
+				enum aux_tx_mode mode, u8 **buf)
+{
+	int rc = 0;
+	struct aux_cmd cmd = {0};
+	struct dp_aux_private *aux;
+
+	if (!dp_aux || !len) {
+		pr_err("invalid input\n");
+		rc = -EINVAL;
+		goto end;
+	}
+
+	aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
+
+	cmd.ex_mode = AUX_READ;
+	cmd.tx_mode = mode;
+	cmd.addr    = addr;
+	cmd.len     = len;
+
+	aux->cmds = &cmd;
+
+	rc = dp_aux_read(aux);
+	if (rc <= 0) {
+		rc = -EINVAL;
+		goto end;
+	}
+
+	*buf = cmd.buf;
+end:
+	return rc;
+}
+
+static int dp_aux_process(struct dp_aux *dp_aux, struct aux_cmd *cmds)
+{
+	struct dp_aux_private *aux;
+
+	if (!dp_aux || !cmds) {
+		pr_err("invalid input\n");
+		return -EINVAL;
+	}
+
+	aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
+
+	aux->cmds = cmds;
+
+	if (cmds->ex_mode == AUX_READ)
+		return dp_aux_read(aux);
+	else
+		return dp_aux_write(aux);
+}
+
+static bool dp_aux_ready(struct dp_aux *dp_aux)
+{
+	u8 data = 0;
+	int count, ret;
+	struct dp_aux_private *aux;
+
+	if (!dp_aux) {
+		pr_err("invalid input\n");
+		goto error;
+	}
+
+	aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
+
+	for (count = 5; count; count--) {
+		ret = dp_aux_write_ex(dp_aux, 0x50, 1, AUX_I2C, &data);
+		if (ret >= 0)
+			break;
+
+		msleep(100);
+	}
+
+	if (count <= 0) {
+		pr_err("aux chan NOT ready\n");
+		goto error;
+	}
+
+	return true;
+error:
+	return false;
+}
+
+static void dp_aux_init(struct dp_aux *dp_aux, u32 *aux_cfg)
+{
+	struct dp_aux_private *aux;
+
+	if (!dp_aux) {
+		pr_err("invalid input\n");
+		return;
+	}
+
+	aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
+
+	aux->catalog->reset(aux->catalog);
+	aux->catalog->enable(aux->catalog, true);
+	aux->catalog->setup(aux->catalog, aux_cfg);
+}
+
+static void dp_aux_deinit(struct dp_aux *dp_aux)
+{
+	struct dp_aux_private *aux;
+
+	if (!dp_aux) {
+		pr_err("invalid input\n");
+		return;
+	}
+
+	aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
+
+	aux->catalog->enable(aux->catalog, false);
+}
+
+struct dp_aux *dp_aux_get(struct device *dev, struct dp_catalog_aux *catalog)
+{
+	int rc = 0;
+	struct dp_aux_private *aux;
+	struct dp_aux *dp_aux;
+
+	if (!catalog) {
+		pr_err("invalid input\n");
+		rc = -ENODEV;
+		goto error;
+	}
+
+	aux = devm_kzalloc(dev, sizeof(*aux), GFP_KERNEL);
+	if (!aux) {
+		rc = -ENOMEM;
+		goto error;
+	}
+
+	aux->dev = dev;
+
+	dp_aux_buf_set(aux);
+
+	aux->catalog = catalog;
+
+	dp_aux = &aux->dp_aux;
+
+	dp_aux->process = dp_aux_process;
+	dp_aux->read    = dp_aux_read_ex;
+	dp_aux->write   = dp_aux_write_ex;
+	dp_aux->ready   = dp_aux_ready;
+	dp_aux->isr     = dp_aux_isr;
+	dp_aux->init    = dp_aux_init;
+	dp_aux->deinit  = dp_aux_deinit;
+
+	return dp_aux;
+error:
+	return ERR_PTR(rc);
+}
+
+void dp_aux_put(struct dp_aux *dp_aux)
+{
+	struct dp_aux_private *aux;
+
+	if (!dp_aux)
+		return;
+
+	aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
+
+	devm_kfree(aux->dev, aux);
+}
diff --git a/drivers/gpu/drm/msm/dp/dp_aux.h b/drivers/gpu/drm/msm/dp/dp_aux.h
new file mode 100644
index 0000000..0603c15
--- /dev/null
+++ b/drivers/gpu/drm/msm/dp/dp_aux.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _DP_AUX_H_
+#define _DP_AUX_H_
+
+#include "dp_catalog.h"
+
+enum dp_aux_error {
+	DP_AUX_ERR_NONE	= 0,
+	DP_AUX_ERR_ADDR	= -1,
+	DP_AUX_ERR_TOUT	= -2,
+	DP_AUX_ERR_NACK	= -3,
+	DP_AUX_ERR_DEFER	= -4,
+	DP_AUX_ERR_NACK_DEFER	= -5,
+};
+
+enum aux_tx_mode {
+	AUX_NATIVE,
+	AUX_I2C,
+};
+
+enum aux_exe_mode {
+	AUX_WRITE,
+	AUX_READ,
+};
+
+struct aux_cmd {
+	enum aux_exe_mode ex_mode;
+	enum aux_tx_mode tx_mode;
+	u32 addr;
+	u32 len;
+	u8 *buf;
+	bool next;
+};
+
+struct dp_aux {
+	int (*process)(struct dp_aux *aux, struct aux_cmd *cmd);
+	int (*write)(struct dp_aux *aux, u32 addr, u32 len,
+			enum aux_tx_mode mode, u8 *buf);
+	int (*read)(struct dp_aux *aux, u32 addr, u32 len,
+			enum aux_tx_mode mode, u8 **buf);
+	bool (*ready)(struct dp_aux *aux);
+	void (*isr)(struct dp_aux *aux);
+	void (*init)(struct dp_aux *aux, u32 *aux_cfg);
+	void (*deinit)(struct dp_aux *aux);
+};
+
+struct dp_aux *dp_aux_get(struct device *dev, struct dp_catalog_aux *catalog);
+void dp_aux_put(struct dp_aux *aux);
+
+#endif /*__DP_AUX_H_*/
diff --git a/drivers/gpu/drm/msm/dp/dp_catalog.c b/drivers/gpu/drm/msm/dp/dp_catalog.c
new file mode 100644
index 0000000..ca55d16
--- /dev/null
+++ b/drivers/gpu/drm/msm/dp/dp_catalog.c
@@ -0,0 +1,964 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt)	"[drm-dp] %s: " fmt, __func__
+
+#include <linux/delay.h>
+
+#include "dp_catalog.h"
+
+/* DP_TX Registers */
+#define DP_HW_VERSION				(0x00000000)
+#define DP_SW_RESET				(0x00000010)
+#define DP_PHY_CTRL				(0x00000014)
+#define DP_CLK_CTRL				(0x00000018)
+#define DP_CLK_ACTIVE				(0x0000001C)
+#define DP_INTR_STATUS				(0x00000020)
+#define DP_INTR_STATUS2				(0x00000024)
+#define DP_INTR_STATUS3				(0x00000028)
+
+#define DP_DP_HPD_CTRL				(0x00000200)
+#define DP_DP_HPD_INT_STATUS			(0x00000204)
+#define DP_DP_HPD_INT_ACK			(0x00000208)
+#define DP_DP_HPD_INT_MASK			(0x0000020C)
+#define DP_DP_HPD_REFTIMER			(0x00000218)
+#define DP_DP_HPD_EVENT_TIME_0			(0x0000021C)
+#define DP_DP_HPD_EVENT_TIME_1			(0x00000220)
+#define DP_AUX_CTRL				(0x00000230)
+#define DP_AUX_DATA				(0x00000234)
+#define DP_AUX_TRANS_CTRL			(0x00000238)
+#define DP_TIMEOUT_COUNT			(0x0000023C)
+#define DP_AUX_LIMITS				(0x00000240)
+#define DP_AUX_STATUS				(0x00000244)
+
+#define DP_DPCD_CP_IRQ				(0x201)
+#define DP_DPCD_RXSTATUS			(0x69493)
+
+#define DP_INTERRUPT_TRANS_NUM			(0x000002A0)
+
+#define DP_MAINLINK_CTRL			(0x00000400)
+#define DP_STATE_CTRL				(0x00000404)
+#define DP_CONFIGURATION_CTRL			(0x00000408)
+#define DP_SOFTWARE_MVID			(0x00000410)
+#define DP_SOFTWARE_NVID			(0x00000418)
+#define DP_TOTAL_HOR_VER			(0x0000041C)
+#define DP_START_HOR_VER_FROM_SYNC		(0x00000420)
+#define DP_HSYNC_VSYNC_WIDTH_POLARITY		(0x00000424)
+#define DP_ACTIVE_HOR_VER			(0x00000428)
+#define DP_MISC1_MISC0				(0x0000042C)
+#define DP_VALID_BOUNDARY			(0x00000430)
+#define DP_VALID_BOUNDARY_2			(0x00000434)
+#define DP_LOGICAL2PHYSCIAL_LANE_MAPPING	(0x00000438)
+
+#define DP_MAINLINK_READY			(0x00000440)
+#define DP_MAINLINK_LEVELS			(0x00000444)
+#define DP_TU					(0x0000044C)
+
+#define DP_HBR2_COMPLIANCE_SCRAMBLER_RESET	(0x00000454)
+#define DP_TEST_80BIT_CUSTOM_PATTERN_REG0	(0x000004C0)
+#define DP_TEST_80BIT_CUSTOM_PATTERN_REG1	(0x000004C4)
+#define DP_TEST_80BIT_CUSTOM_PATTERN_REG2	(0x000004C8)
+
+#define MMSS_DP_MISC1_MISC0			(0x0000042C)
+#define MMSS_DP_AUDIO_TIMING_GEN		(0x00000480)
+#define MMSS_DP_AUDIO_TIMING_RBR_32		(0x00000484)
+#define MMSS_DP_AUDIO_TIMING_HBR_32		(0x00000488)
+#define MMSS_DP_AUDIO_TIMING_RBR_44		(0x0000048C)
+#define MMSS_DP_AUDIO_TIMING_HBR_44		(0x00000490)
+#define MMSS_DP_AUDIO_TIMING_RBR_48		(0x00000494)
+#define MMSS_DP_AUDIO_TIMING_HBR_48		(0x00000498)
+
+#define MMSS_DP_PSR_CRC_RG			(0x00000554)
+#define MMSS_DP_PSR_CRC_B			(0x00000558)
+
+#define MMSS_DP_AUDIO_CFG			(0x00000600)
+#define MMSS_DP_AUDIO_STATUS			(0x00000604)
+#define MMSS_DP_AUDIO_PKT_CTRL			(0x00000608)
+#define MMSS_DP_AUDIO_PKT_CTRL2			(0x0000060C)
+#define MMSS_DP_AUDIO_ACR_CTRL			(0x00000610)
+#define MMSS_DP_AUDIO_CTRL_RESET		(0x00000614)
+
+#define MMSS_DP_SDP_CFG				(0x00000628)
+#define MMSS_DP_SDP_CFG2			(0x0000062C)
+#define MMSS_DP_AUDIO_TIMESTAMP_0		(0x00000630)
+#define MMSS_DP_AUDIO_TIMESTAMP_1		(0x00000634)
+
+#define MMSS_DP_AUDIO_STREAM_0			(0x00000640)
+#define MMSS_DP_AUDIO_STREAM_1			(0x00000644)
+
+#define MMSS_DP_EXTENSION_0			(0x00000650)
+#define MMSS_DP_EXTENSION_1			(0x00000654)
+#define MMSS_DP_EXTENSION_2			(0x00000658)
+#define MMSS_DP_EXTENSION_3			(0x0000065C)
+#define MMSS_DP_EXTENSION_4			(0x00000660)
+#define MMSS_DP_EXTENSION_5			(0x00000664)
+#define MMSS_DP_EXTENSION_6			(0x00000668)
+#define MMSS_DP_EXTENSION_7			(0x0000066C)
+#define MMSS_DP_EXTENSION_8			(0x00000670)
+#define MMSS_DP_EXTENSION_9			(0x00000674)
+#define MMSS_DP_AUDIO_COPYMANAGEMENT_0		(0x00000678)
+#define MMSS_DP_AUDIO_COPYMANAGEMENT_1		(0x0000067C)
+#define MMSS_DP_AUDIO_COPYMANAGEMENT_2		(0x00000680)
+#define MMSS_DP_AUDIO_COPYMANAGEMENT_3		(0x00000684)
+#define MMSS_DP_AUDIO_COPYMANAGEMENT_4		(0x00000688)
+#define MMSS_DP_AUDIO_COPYMANAGEMENT_5		(0x0000068C)
+#define MMSS_DP_AUDIO_ISRC_0			(0x00000690)
+#define MMSS_DP_AUDIO_ISRC_1			(0x00000694)
+#define MMSS_DP_AUDIO_ISRC_2			(0x00000698)
+#define MMSS_DP_AUDIO_ISRC_3			(0x0000069C)
+#define MMSS_DP_AUDIO_ISRC_4			(0x000006A0)
+#define MMSS_DP_AUDIO_ISRC_5			(0x000006A4)
+#define MMSS_DP_AUDIO_INFOFRAME_0		(0x000006A8)
+#define MMSS_DP_AUDIO_INFOFRAME_1		(0x000006AC)
+#define MMSS_DP_AUDIO_INFOFRAME_2		(0x000006B0)
+
+#define MMSS_DP_GENERIC0_0			(0x00000700)
+#define MMSS_DP_GENERIC0_1			(0x00000704)
+#define MMSS_DP_GENERIC0_2			(0x00000708)
+#define MMSS_DP_GENERIC0_3			(0x0000070C)
+#define MMSS_DP_GENERIC0_4			(0x00000710)
+#define MMSS_DP_GENERIC0_5			(0x00000714)
+#define MMSS_DP_GENERIC0_6			(0x00000718)
+#define MMSS_DP_GENERIC0_7			(0x0000071C)
+#define MMSS_DP_GENERIC0_8			(0x00000720)
+#define MMSS_DP_GENERIC0_9			(0x00000724)
+#define MMSS_DP_GENERIC1_0			(0x00000728)
+#define MMSS_DP_GENERIC1_1			(0x0000072C)
+#define MMSS_DP_GENERIC1_2			(0x00000730)
+#define MMSS_DP_GENERIC1_3			(0x00000734)
+#define MMSS_DP_GENERIC1_4			(0x00000738)
+#define MMSS_DP_GENERIC1_5			(0x0000073C)
+#define MMSS_DP_GENERIC1_6			(0x00000740)
+#define MMSS_DP_GENERIC1_7			(0x00000744)
+#define MMSS_DP_GENERIC1_8			(0x00000748)
+#define MMSS_DP_GENERIC1_9			(0x0000074C)
+
+#define MMSS_DP_TIMING_ENGINE_EN		(0x00000A10)
+#define MMSS_DP_ASYNC_FIFO_CONFIG		(0x00000A88)
+
+/*DP PHY Register offsets */
+#define DP_PHY_REVISION_ID0                     (0x00000000)
+#define DP_PHY_REVISION_ID1                     (0x00000004)
+#define DP_PHY_REVISION_ID2                     (0x00000008)
+#define DP_PHY_REVISION_ID3                     (0x0000000C)
+
+#define DP_PHY_CFG                              (0x00000010)
+#define DP_PHY_PD_CTL                           (0x00000018)
+#define DP_PHY_MODE                             (0x0000001C)
+
+#define DP_PHY_AUX_CFG0                         (0x00000020)
+#define DP_PHY_AUX_CFG1                         (0x00000024)
+#define DP_PHY_AUX_CFG2                         (0x00000028)
+#define DP_PHY_AUX_CFG3                         (0x0000002C)
+#define DP_PHY_AUX_CFG4                         (0x00000030)
+#define DP_PHY_AUX_CFG5                         (0x00000034)
+#define DP_PHY_AUX_CFG6                         (0x00000038)
+#define DP_PHY_AUX_CFG7                         (0x0000003C)
+#define DP_PHY_AUX_CFG8                         (0x00000040)
+#define DP_PHY_AUX_CFG9                         (0x00000044)
+#define DP_PHY_AUX_INTERRUPT_MASK               (0x00000048)
+#define DP_PHY_AUX_INTERRUPT_CLEAR              (0x0000004C)
+
+#define DP_PHY_SPARE0				(0x00AC)
+
+#define TXn_TX_EMP_POST1_LVL			(0x000C)
+#define TXn_TX_DRV_LVL				(0x001C)
+
+#define QSERDES_COM_BIAS_EN_CLKBUFLR_EN		(0x004)
+
+#define EDID_START_ADDRESS			0x50
+
+/* DP MMSS_CC registers */
+#define MMSS_DP_LINK_CMD_RCGR			(0x0138)
+#define MMSS_DP_LINK_CFG_RCGR			(0x013C)
+#define MMSS_DP_PIXEL_M				(0x0174)
+#define MMSS_DP_PIXEL_N				(0x0178)
+
+/* DP HDCP 1.3 registers */
+#define DP_HDCP_CTRL                                   (0x0A0)
+#define DP_HDCP_STATUS                                 (0x0A4)
+#define DP_HDCP_SW_UPPER_AKSV                          (0x298)
+#define DP_HDCP_SW_LOWER_AKSV                          (0x29C)
+#define DP_HDCP_ENTROPY_CTRL0                          (0x750)
+#define DP_HDCP_ENTROPY_CTRL1                          (0x75C)
+#define DP_HDCP_SHA_STATUS                             (0x0C8)
+#define DP_HDCP_RCVPORT_DATA2_0                        (0x0B0)
+#define DP_HDCP_RCVPORT_DATA3                          (0x2A4)
+#define DP_HDCP_RCVPORT_DATA4                          (0x2A8)
+#define DP_HDCP_RCVPORT_DATA5                          (0x0C0)
+#define DP_HDCP_RCVPORT_DATA6                          (0x0C4)
+
+#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_SHA_CTRL           (0x024)
+#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_SHA_DATA           (0x028)
+#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_RCVPORT_DATA0      (0x004)
+#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_RCVPORT_DATA1      (0x008)
+#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_RCVPORT_DATA7      (0x00C)
+#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_RCVPORT_DATA8      (0x010)
+#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_RCVPORT_DATA9      (0x014)
+#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_RCVPORT_DATA10     (0x018)
+#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_RCVPORT_DATA11     (0x01C)
+#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_RCVPORT_DATA12     (0x020)
+
+#define dp_read(offset) readl_relaxed((offset))
+#define dp_write(offset, data) writel_relaxed((data), (offset))
+
+#define dp_catalog_get_priv(x) { \
+	struct dp_catalog *dp_catalog; \
+	dp_catalog = container_of(x, struct dp_catalog, x); \
+	catalog = container_of(dp_catalog, struct dp_catalog_private, \
+				dp_catalog); \
+}
+
+#define DP_INTERRUPT_STATUS1 \
+	(DP_INTR_AUX_I2C_DONE| \
+	DP_INTR_WRONG_ADDR | DP_INTR_TIMEOUT | \
+	DP_INTR_NACK_DEFER | DP_INTR_WRONG_DATA_CNT | \
+	DP_INTR_I2C_NACK | DP_INTR_I2C_DEFER | \
+	DP_INTR_PLL_UNLOCKED | DP_INTR_AUX_ERROR)
+
+#define DP_INTR_MASK1		(DP_INTERRUPT_STATUS1 << 2)
+
+#define DP_INTERRUPT_STATUS2 \
+	(DP_INTR_READY_FOR_VIDEO | DP_INTR_IDLE_PATTERN_SENT | \
+	DP_INTR_FRAME_END | DP_INTR_CRC_UPDATED)
+
+#define DP_INTR_MASK2		(DP_INTERRUPT_STATUS2 << 2)
+
+static u8 const vm_pre_emphasis[4][4] = {
+	{0x00, 0x0B, 0x12, 0xFF},       /* pe0, 0 db */
+	{0x00, 0x0A, 0x12, 0xFF},       /* pe1, 3.5 db */
+	{0x00, 0x0C, 0xFF, 0xFF},       /* pe2, 6.0 db */
+	{0xFF, 0xFF, 0xFF, 0xFF}        /* pe3, 9.5 db */
+};
+
+/* voltage swing, 0.2v and 1.0v are not support */
+static u8 const vm_voltage_swing[4][4] = {
+	{0x07, 0x0F, 0x14, 0xFF}, /* sw0, 0.4v  */
+	{0x11, 0x1D, 0x1F, 0xFF}, /* sw1, 0.6 v */
+	{0x18, 0x1F, 0xFF, 0xFF}, /* sw1, 0.8 v */
+	{0xFF, 0xFF, 0xFF, 0xFF}  /* sw1, 1.2 v, optional */
+};
+
+struct dp_catalog_private {
+	struct device *dev;
+	struct dp_io *io;
+	struct dp_catalog dp_catalog;
+};
+
+/* aux related catalog functions */
+static u32 dp_catalog_aux_read_data(struct dp_catalog_aux *aux)
+{
+	struct dp_catalog_private *catalog;
+	void __iomem *base;
+
+	if (!aux) {
+		pr_err("invalid input\n");
+		goto end;
+	}
+
+	dp_catalog_get_priv(aux);
+	base = catalog->io->ctrl_io.base;
+
+	return dp_read(base + DP_AUX_DATA);
+end:
+	return 0;
+}
+
+static int dp_catalog_aux_write_data(struct dp_catalog_aux *aux)
+{
+	int rc = 0;
+	struct dp_catalog_private *catalog;
+	void __iomem *base;
+
+	if (!aux) {
+		pr_err("invalid input\n");
+		rc = -EINVAL;
+		goto end;
+	}
+
+	dp_catalog_get_priv(aux);
+	base = catalog->io->ctrl_io.base;
+
+	dp_write(base + DP_AUX_DATA, aux->data);
+end:
+	return rc;
+}
+
+static int dp_catalog_aux_write_trans(struct dp_catalog_aux *aux)
+{
+	int rc = 0;
+	struct dp_catalog_private *catalog;
+	void __iomem *base;
+
+	if (!aux) {
+		pr_err("invalid input\n");
+		rc = -EINVAL;
+		goto end;
+	}
+
+	dp_catalog_get_priv(aux);
+	base = catalog->io->ctrl_io.base;
+
+	dp_write(base + DP_AUX_TRANS_CTRL, aux->data);
+end:
+	return rc;
+}
+
+static void dp_catalog_aux_reset(struct dp_catalog_aux *aux)
+{
+	u32 aux_ctrl;
+	struct dp_catalog_private *catalog;
+	void __iomem *base;
+
+	if (!aux) {
+		pr_err("invalid input\n");
+		return;
+	}
+
+	dp_catalog_get_priv(aux);
+	base = catalog->io->ctrl_io.base;
+
+	aux_ctrl = dp_read(base + DP_AUX_CTRL);
+
+	aux_ctrl |= BIT(1);
+	dp_write(base + DP_AUX_CTRL, aux_ctrl);
+	usleep_range(1000, 1010); /* h/w recommended delay */
+
+	aux_ctrl &= ~BIT(1);
+	dp_write(base + DP_AUX_CTRL, aux_ctrl);
+}
+
+static void dp_catalog_aux_enable(struct dp_catalog_aux *aux, bool enable)
+{
+	u32 aux_ctrl;
+	struct dp_catalog_private *catalog;
+	void __iomem *base;
+
+	if (!aux) {
+		pr_err("invalid input\n");
+		return;
+	}
+
+	dp_catalog_get_priv(aux);
+	base = catalog->io->ctrl_io.base;
+
+	aux_ctrl = dp_read(base + DP_AUX_CTRL);
+
+	if (enable) {
+		dp_write(base + DP_TIMEOUT_COUNT, 0xffff);
+		dp_write(base + DP_AUX_LIMITS, 0xffff);
+		aux_ctrl |= BIT(0);
+	} else {
+		aux_ctrl &= ~BIT(0);
+	}
+
+	dp_write(base + DP_AUX_CTRL, aux_ctrl);
+}
+
+static void dp_catalog_aux_setup(struct dp_catalog_aux *aux, u32 *aux_cfg)
+{
+	struct dp_catalog_private *catalog;
+
+	if (!aux || !aux_cfg) {
+		pr_err("invalid input\n");
+		return;
+	}
+
+	dp_catalog_get_priv(aux);
+
+	dp_write(catalog->io->phy_io.base + DP_PHY_PD_CTL, 0x02);
+	wmb(); /* make sure PD programming happened */
+	dp_write(catalog->io->phy_io.base + DP_PHY_PD_CTL, 0x7d);
+
+	/* Turn on BIAS current for PHY/PLL */
+	dp_write(catalog->io->dp_pll_io.base +
+		QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x3f);
+
+	/* DP AUX CFG register programming */
+	dp_write(catalog->io->phy_io.base + DP_PHY_AUX_CFG0, aux_cfg[0]);
+	dp_write(catalog->io->phy_io.base + DP_PHY_AUX_CFG1, aux_cfg[1]);
+	dp_write(catalog->io->phy_io.base + DP_PHY_AUX_CFG2, aux_cfg[2]);
+	dp_write(catalog->io->phy_io.base + DP_PHY_AUX_CFG3, aux_cfg[3]);
+	dp_write(catalog->io->phy_io.base + DP_PHY_AUX_CFG4, aux_cfg[4]);
+	dp_write(catalog->io->phy_io.base + DP_PHY_AUX_CFG5, aux_cfg[5]);
+	dp_write(catalog->io->phy_io.base + DP_PHY_AUX_CFG6, aux_cfg[6]);
+	dp_write(catalog->io->phy_io.base + DP_PHY_AUX_CFG7, aux_cfg[7]);
+	dp_write(catalog->io->phy_io.base + DP_PHY_AUX_CFG8, aux_cfg[8]);
+	dp_write(catalog->io->phy_io.base + DP_PHY_AUX_CFG9, aux_cfg[9]);
+
+	dp_write(catalog->io->phy_io.base + DP_PHY_AUX_INTERRUPT_MASK, 0x1F);
+}
+
+static void dp_catalog_aux_get_irq(struct dp_catalog_aux *aux, bool cmd_busy)
+{
+	u32 ack;
+	struct dp_catalog_private *catalog;
+	void __iomem *base;
+
+	if (!aux) {
+		pr_err("invalid input\n");
+		return;
+	}
+
+	dp_catalog_get_priv(aux);
+	base = catalog->io->ctrl_io.base;
+
+	if (cmd_busy)
+		dp_write(base + DP_AUX_TRANS_CTRL, 0x0);
+
+	aux->isr = dp_read(base + DP_INTR_STATUS);
+	aux->isr &= ~DP_INTR_MASK1;
+	ack = aux->isr & DP_INTERRUPT_STATUS1;
+	ack <<= 1;
+	ack |= DP_INTR_MASK1;
+	dp_write(base + DP_INTR_STATUS, ack);
+}
+
+/* controller related catalog functions */
+static void dp_catalog_ctrl_update_transfer_unit(struct dp_catalog_ctrl *ctrl)
+{
+	struct dp_catalog_private *catalog;
+	void __iomem *base;
+
+	if (!ctrl) {
+		pr_err("invalid input\n");
+		return;
+	}
+
+	dp_catalog_get_priv(ctrl);
+	base = catalog->io->ctrl_io.base;
+
+	dp_write(base + DP_VALID_BOUNDARY, ctrl->valid_boundary);
+	dp_write(base + DP_TU, ctrl->dp_tu);
+	dp_write(base + DP_VALID_BOUNDARY_2, ctrl->valid_boundary2);
+}
+
+static void dp_catalog_ctrl_state_ctrl(struct dp_catalog_ctrl *ctrl, u32 state)
+{
+	struct dp_catalog_private *catalog;
+	void __iomem *base;
+
+	if (!ctrl) {
+		pr_err("invalid input\n");
+		return;
+	}
+
+	dp_catalog_get_priv(ctrl);
+	base = catalog->io->ctrl_io.base;
+
+	dp_write(base + DP_STATE_CTRL, state);
+}
+
+static void dp_catalog_ctrl_config_ctrl(struct dp_catalog_ctrl *ctrl, u32 cfg)
+{
+	struct dp_catalog_private *catalog;
+	void __iomem *base;
+
+	if (!ctrl) {
+		pr_err("invalid input\n");
+		return;
+	}
+
+	dp_catalog_get_priv(ctrl);
+	base = catalog->io->ctrl_io.base;
+
+	dp_write(base + DP_CONFIGURATION_CTRL, cfg);
+	dp_write(base + DP_MAINLINK_LEVELS, 0xa08);
+	dp_write(base + MMSS_DP_ASYNC_FIFO_CONFIG, 0x1);
+}
+
+static void dp_catalog_ctrl_lane_mapping(struct dp_catalog_ctrl *ctrl)
+{
+	struct dp_catalog_private *catalog;
+	void __iomem *base;
+
+	if (!ctrl) {
+		pr_err("invalid input\n");
+		return;
+	}
+
+	dp_catalog_get_priv(ctrl);
+	base = catalog->io->ctrl_io.base;
+
+	dp_write(base + DP_LOGICAL2PHYSCIAL_LANE_MAPPING, 0xe4);
+}
+
+static void dp_catalog_ctrl_mainlink_ctrl(struct dp_catalog_ctrl *ctrl,
+						bool enable)
+{
+	u32 mainlink_ctrl;
+	struct dp_catalog_private *catalog;
+	void __iomem *base;
+
+	if (!ctrl) {
+		pr_err("invalid input\n");
+		return;
+	}
+
+	dp_catalog_get_priv(ctrl);
+	base = catalog->io->ctrl_io.base;
+
+	mainlink_ctrl = dp_read(base + DP_MAINLINK_CTRL);
+
+	if (enable) {
+		mainlink_ctrl |= BIT(0);
+		dp_write(base + DP_MAINLINK_CTRL, 0x02000000);
+		wmb(); /* make sure mainlink is turned off before reset */
+		dp_write(base + DP_MAINLINK_CTRL, 0x02000002);
+		wmb(); /* make sure mainlink entered reset */
+		dp_write(base + DP_MAINLINK_CTRL, 0x02000000);
+		wmb(); /* make sure mainlink reset done */
+		dp_write(base + DP_MAINLINK_CTRL, 0x02000001);
+		wmb(); /* make sure mainlink turned on */
+	} else {
+		mainlink_ctrl &= ~BIT(0);
+		dp_write(base + DP_MAINLINK_CTRL, 0x0);
+	}
+}
+
+static void dp_catalog_ctrl_config_misc(struct dp_catalog_ctrl *ctrl,
+					u32 cc, u32 tb)
+{
+	u32 misc_val = cc;
+	struct dp_catalog_private *catalog;
+	void __iomem *base;
+
+	if (!ctrl) {
+		pr_err("invalid input\n");
+		return;
+	}
+
+	dp_catalog_get_priv(ctrl);
+	base = catalog->io->ctrl_io.base;
+
+	misc_val |= (tb << 5);
+	misc_val |= BIT(0); /* Configure clock to synchronous mode */
+
+	pr_debug("isc settings = 0x%x\n", misc_val);
+	dp_write(base + DP_MISC1_MISC0, misc_val);
+}
+
+static void dp_catalog_ctrl_config_msa(struct dp_catalog_ctrl *ctrl)
+{
+	u32 pixel_m, pixel_n;
+	u32 mvid, nvid;
+	struct dp_catalog_private *catalog;
+	void __iomem *base_cc, *base_ctrl;
+
+	if (!ctrl) {
+		pr_err("invalid input\n");
+		return;
+	}
+
+	dp_catalog_get_priv(ctrl);
+	base_cc = catalog->io->dp_cc_io.base;
+	base_ctrl = catalog->io->ctrl_io.base;
+
+	pixel_m = dp_read(base_cc + MMSS_DP_PIXEL_M);
+	pixel_n = dp_read(base_cc + MMSS_DP_PIXEL_N);
+	pr_debug("pixel_m=0x%x, pixel_n=0x%x\n", pixel_m, pixel_n);
+
+	mvid = (pixel_m & 0xFFFF) * 5;
+	nvid = (0xFFFF & (~pixel_n)) + (pixel_m & 0xFFFF);
+
+	pr_debug("mvid=0x%x, nvid=0x%x\n", mvid, nvid);
+	dp_write(base_ctrl + DP_SOFTWARE_MVID, mvid);
+	dp_write(base_ctrl + DP_SOFTWARE_NVID, nvid);
+}
+
+static void dp_catalog_ctrl_set_pattern(struct dp_catalog_ctrl *ctrl,
+					u32 pattern)
+{
+	int bit, cnt = 10;
+	u32 data;
+	struct dp_catalog_private *catalog;
+	void __iomem *base;
+
+	if (!ctrl) {
+		pr_err("invalid input\n");
+		return;
+	}
+
+	dp_catalog_get_priv(ctrl);
+	base = catalog->io->ctrl_io.base;
+
+	bit = 1;
+	bit <<= (pattern - 1);
+	pr_debug("bit=%d train=%d\n", bit, pattern);
+	dp_write(base + DP_STATE_CTRL, bit);
+
+	bit = 8;
+	bit <<= (pattern - 1);
+
+	while (cnt--) {
+		data = dp_read(base + DP_MAINLINK_READY);
+		if (data & bit)
+			break;
+	}
+
+	if (cnt == 0)
+		pr_err("set link_train=%d failed\n", pattern);
+}
+
+static void dp_catalog_ctrl_reset(struct dp_catalog_ctrl *ctrl)
+{
+	u32 sw_reset;
+	struct dp_catalog_private *catalog;
+	void __iomem *base;
+
+	if (!ctrl) {
+		pr_err("invalid input\n");
+		return;
+	}
+
+	dp_catalog_get_priv(ctrl);
+	base = catalog->io->ctrl_io.base;
+
+	sw_reset = dp_read(base + DP_SW_RESET);
+
+	sw_reset |= BIT(0);
+	dp_write(base + DP_SW_RESET, sw_reset);
+	usleep_range(1000, 1010); /* h/w recommended delay */
+
+	sw_reset &= ~BIT(0);
+	dp_write(base + DP_SW_RESET, sw_reset);
+}
+
+static bool dp_catalog_ctrl_mainlink_ready(struct dp_catalog_ctrl *ctrl)
+{
+	u32 data;
+	int cnt = 10;
+	struct dp_catalog_private *catalog;
+	void __iomem *base;
+
+	if (!ctrl) {
+		pr_err("invalid input\n");
+		goto end;
+	}
+
+	dp_catalog_get_priv(ctrl);
+	base = catalog->io->ctrl_io.base;
+
+	while (--cnt) {
+		/* DP_MAINLINK_READY */
+		data = dp_read(base + DP_MAINLINK_READY);
+		if (data & BIT(0))
+			return true;
+
+		usleep_range(1000, 1010); /* 1ms wait before next reg read */
+	}
+	pr_err("mainlink not ready\n");
+end:
+	return false;
+}
+
+static void dp_catalog_ctrl_enable_irq(struct dp_catalog_ctrl *ctrl,
+						bool enable)
+{
+	struct dp_catalog_private *catalog;
+	void __iomem *base;
+
+	if (!ctrl) {
+		pr_err("invalid input\n");
+		return;
+	}
+
+	dp_catalog_get_priv(ctrl);
+	base = catalog->io->ctrl_io.base;
+
+	if (enable) {
+		dp_write(base + DP_INTR_STATUS, DP_INTR_MASK1);
+		dp_write(base + DP_INTR_STATUS2, DP_INTR_MASK2);
+	} else {
+		dp_write(base + DP_INTR_STATUS, 0x00);
+		dp_write(base + DP_INTR_STATUS2, 0x00);
+	}
+}
+
+static void dp_catalog_ctrl_hpd_config(struct dp_catalog_ctrl *ctrl, bool en)
+{
+	struct dp_catalog_private *catalog;
+	void __iomem *base;
+
+	if (!ctrl) {
+		pr_err("invalid input\n");
+		return;
+	}
+
+	dp_catalog_get_priv(ctrl);
+	base = catalog->io->ctrl_io.base;
+
+	if (en) {
+		u32 reftimer = dp_read(base + DP_DP_HPD_REFTIMER);
+
+		dp_write(base + DP_DP_HPD_INT_ACK, 0xF);
+		dp_write(base + DP_DP_HPD_INT_MASK, 0xF);
+
+		/* Enabling REFTIMER */
+		reftimer |= BIT(16);
+		dp_write(base + DP_DP_HPD_REFTIMER, 0xF);
+		/* Enable HPD */
+		dp_write(base + DP_DP_HPD_CTRL, 0x1);
+	} else {
+		/*Disable HPD */
+		dp_write(base + DP_DP_HPD_CTRL, 0x0);
+	}
+}
+
+static void dp_catalog_ctrl_get_interrupt(struct dp_catalog_ctrl *ctrl)
+{
+	u32 ack = 0;
+	struct dp_catalog_private *catalog;
+	void __iomem *base;
+
+	if (!ctrl) {
+		pr_err("invalid input\n");
+		return;
+	}
+
+	dp_catalog_get_priv(ctrl);
+	base = catalog->io->ctrl_io.base;
+
+	ctrl->isr = dp_read(base + DP_INTR_STATUS2);
+	ctrl->isr &= ~DP_INTR_MASK2;
+	ack = ctrl->isr & DP_INTERRUPT_STATUS2;
+	ack <<= 1;
+	ack |= DP_INTR_MASK2;
+	dp_write(base + DP_INTR_STATUS2, ack);
+}
+
+static void dp_catalog_ctrl_phy_reset(struct dp_catalog_ctrl *ctrl)
+{
+	struct dp_catalog_private *catalog;
+	void __iomem *base;
+
+	if (!ctrl) {
+		pr_err("invalid input\n");
+		return;
+	}
+
+	dp_catalog_get_priv(ctrl);
+	base = catalog->io->ctrl_io.base;
+
+	dp_write(base + DP_PHY_CTRL, 0x5); /* bit 0 & 2 */
+	usleep_range(1000, 1010); /* h/w recommended delay */
+	dp_write(base + DP_PHY_CTRL, 0x0);
+	wmb(); /* make sure PHY reset done */
+}
+
+static void dp_catalog_ctrl_phy_lane_cfg(struct dp_catalog_ctrl *ctrl,
+		bool flipped, u8 ln_cnt)
+{
+	u32 info = 0x0;
+	struct dp_catalog_private *catalog;
+	u8 orientation = BIT(!!flipped);
+
+	if (!ctrl) {
+		pr_err("invalid input\n");
+		return;
+	}
+
+	dp_catalog_get_priv(ctrl);
+
+	info |= (ln_cnt & 0x0F);
+	info |= ((orientation & 0x0F) << 4);
+	pr_debug("Shared Info = 0x%x\n", info);
+
+	dp_write(catalog->io->phy_io.base + DP_PHY_SPARE0, info);
+}
+
+static void dp_catalog_ctrl_update_vx_px(struct dp_catalog_ctrl *ctrl,
+		u8 v_level, u8 p_level)
+{
+	struct dp_catalog_private *catalog;
+	void __iomem *base0, *base1;
+	u8 value0, value1;
+
+	if (!ctrl) {
+		pr_err("invalid input\n");
+		return;
+	}
+
+	dp_catalog_get_priv(ctrl);
+	base0 = catalog->io->ln_tx0_io.base;
+	base1 = catalog->io->ln_tx1_io.base;
+
+	pr_debug("v=%d p=%d\n", v_level, p_level);
+
+	value0 = vm_voltage_swing[v_level][p_level];
+	value1 = vm_pre_emphasis[v_level][p_level];
+
+	/* program default setting first */
+	dp_write(base0 + TXn_TX_DRV_LVL, 0x2A);
+	dp_write(base1 + TXn_TX_DRV_LVL, 0x2A);
+	dp_write(base0 + TXn_TX_EMP_POST1_LVL, 0x20);
+	dp_write(base1 + TXn_TX_EMP_POST1_LVL, 0x20);
+
+	/* Enable MUX to use Cursor values from these registers */
+	value0 |= BIT(5);
+	value1 |= BIT(5);
+
+	/* Configure host and panel only if both values are allowed */
+	if (value0 != 0xFF && value1 != 0xFF) {
+		dp_write(base0 + TXn_TX_DRV_LVL, value0);
+		dp_write(base1 + TXn_TX_DRV_LVL, value0);
+		dp_write(base0 + TXn_TX_EMP_POST1_LVL, value1);
+		dp_write(base1 + TXn_TX_EMP_POST1_LVL, value1);
+
+		pr_debug("host PHY settings: value0=0x%x value1=0x%x",
+						value0, value1);
+	}
+}
+
+/* panel related catalog functions */
+static int dp_catalog_panel_timing_cfg(struct dp_catalog_panel *panel)
+{
+	struct dp_catalog_private *catalog;
+	void __iomem *base;
+
+	if (!panel) {
+		pr_err("invalid input\n");
+		goto end;
+	}
+
+	dp_catalog_get_priv(panel);
+	base = catalog->io->ctrl_io.base;
+
+	dp_write(base + DP_TOTAL_HOR_VER, panel->total);
+	dp_write(base + DP_START_HOR_VER_FROM_SYNC, panel->sync_start);
+	dp_write(base + DP_HSYNC_VSYNC_WIDTH_POLARITY, panel->width_blanking);
+	dp_write(base + DP_ACTIVE_HOR_VER, panel->dp_active);
+end:
+	return 0;
+}
+ /* audio related catalog functions */
+static int dp_catalog_audio_acr_ctrl(struct dp_catalog_audio *audio)
+{
+	return 0;
+}
+
+static int dp_catalog_audio_stream_sdp(struct dp_catalog_audio *audio)
+{
+	return 0;
+}
+
+static int dp_catalog_audio_timestamp_sdp(struct dp_catalog_audio *audio)
+{
+	return 0;
+}
+
+static int dp_catalog_audio_infoframe_sdp(struct dp_catalog_audio *audio)
+{
+	return 0;
+}
+
+static int dp_catalog_audio_copy_mgmt_sdp(struct dp_catalog_audio *audio)
+{
+	return 0;
+}
+
+static int dp_catalog_audio_isrc_sdp(struct dp_catalog_audio *audio)
+{
+	return 0;
+}
+
+static int dp_catalog_audio_setup_sdp(struct dp_catalog_audio *audio)
+{
+	return 0;
+}
+
+struct dp_catalog *dp_catalog_get(struct device *dev, struct dp_io *io)
+{
+	int rc = 0;
+	struct dp_catalog *dp_catalog;
+	struct dp_catalog_private *catalog;
+	struct dp_catalog_aux aux = {
+		.read_data     = dp_catalog_aux_read_data,
+		.write_data    = dp_catalog_aux_write_data,
+		.write_trans   = dp_catalog_aux_write_trans,
+		.reset         = dp_catalog_aux_reset,
+		.enable        = dp_catalog_aux_enable,
+		.setup         = dp_catalog_aux_setup,
+		.get_irq       = dp_catalog_aux_get_irq,
+	};
+	struct dp_catalog_ctrl ctrl = {
+		.state_ctrl     = dp_catalog_ctrl_state_ctrl,
+		.config_ctrl    = dp_catalog_ctrl_config_ctrl,
+		.lane_mapping   = dp_catalog_ctrl_lane_mapping,
+		.mainlink_ctrl  = dp_catalog_ctrl_mainlink_ctrl,
+		.config_misc    = dp_catalog_ctrl_config_misc,
+		.config_msa     = dp_catalog_ctrl_config_msa,
+		.set_pattern    = dp_catalog_ctrl_set_pattern,
+		.reset          = dp_catalog_ctrl_reset,
+		.mainlink_ready = dp_catalog_ctrl_mainlink_ready,
+		.enable_irq     = dp_catalog_ctrl_enable_irq,
+		.hpd_config     = dp_catalog_ctrl_hpd_config,
+		.phy_reset      = dp_catalog_ctrl_phy_reset,
+		.phy_lane_cfg   = dp_catalog_ctrl_phy_lane_cfg,
+		.update_vx_px   = dp_catalog_ctrl_update_vx_px,
+		.get_interrupt  = dp_catalog_ctrl_get_interrupt,
+		.update_transfer_unit = dp_catalog_ctrl_update_transfer_unit,
+	};
+	struct dp_catalog_audio audio = {
+		.acr_ctrl      = dp_catalog_audio_acr_ctrl,
+		.stream_sdp    = dp_catalog_audio_stream_sdp,
+		.timestamp_sdp = dp_catalog_audio_timestamp_sdp,
+		.infoframe_sdp = dp_catalog_audio_infoframe_sdp,
+		.copy_mgmt_sdp = dp_catalog_audio_copy_mgmt_sdp,
+		.isrc_sdp      = dp_catalog_audio_isrc_sdp,
+		.setup_sdp     = dp_catalog_audio_setup_sdp,
+	};
+	struct dp_catalog_panel panel = {
+		.timing_cfg = dp_catalog_panel_timing_cfg,
+	};
+
+	if (!io) {
+		pr_err("invalid input\n");
+		rc = -EINVAL;
+		goto error;
+	}
+
+	catalog  = devm_kzalloc(dev, sizeof(*catalog), GFP_KERNEL);
+	if (!catalog) {
+		rc = -ENOMEM;
+		goto error;
+	}
+
+	catalog->dev = dev;
+	catalog->io = io;
+
+	dp_catalog = &catalog->dp_catalog;
+
+	dp_catalog->aux   = aux;
+	dp_catalog->ctrl  = ctrl;
+	dp_catalog->audio = audio;
+	dp_catalog->panel = panel;
+
+	return dp_catalog;
+error:
+	return ERR_PTR(rc);
+}
+
+void dp_catalog_put(struct dp_catalog *dp_catalog)
+{
+	struct dp_catalog_private *catalog;
+
+	if (!dp_catalog)
+		return;
+
+	catalog = container_of(dp_catalog, struct dp_catalog_private,
+				dp_catalog);
+
+	devm_kfree(catalog->dev, catalog);
+}
diff --git a/drivers/gpu/drm/msm/dp/dp_catalog.h b/drivers/gpu/drm/msm/dp/dp_catalog.h
new file mode 100644
index 0000000..ce88569
--- /dev/null
+++ b/drivers/gpu/drm/msm/dp/dp_catalog.h
@@ -0,0 +1,107 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _DP_CATALOG_H_
+#define _DP_CATALOG_H_
+
+#include "dp_parser.h"
+
+/* interrupts */
+#define DP_INTR_HPD		BIT(0)
+#define DP_INTR_AUX_I2C_DONE	BIT(3)
+#define DP_INTR_WRONG_ADDR	BIT(6)
+#define DP_INTR_TIMEOUT		BIT(9)
+#define DP_INTR_NACK_DEFER	BIT(12)
+#define DP_INTR_WRONG_DATA_CNT	BIT(15)
+#define DP_INTR_I2C_NACK	BIT(18)
+#define DP_INTR_I2C_DEFER	BIT(21)
+#define DP_INTR_PLL_UNLOCKED	BIT(24)
+#define DP_INTR_AUX_ERROR	BIT(27)
+
+#define DP_INTR_READY_FOR_VIDEO		BIT(0)
+#define DP_INTR_IDLE_PATTERN_SENT	BIT(3)
+#define DP_INTR_FRAME_END		BIT(6)
+#define DP_INTR_CRC_UPDATED		BIT(9)
+
+struct dp_catalog_aux {
+	u32 data;
+	u32 isr;
+
+	u32 (*read_data)(struct dp_catalog_aux *aux);
+	int (*write_data)(struct dp_catalog_aux *aux);
+	int (*write_trans)(struct dp_catalog_aux *aux);
+	void (*reset)(struct dp_catalog_aux *aux);
+	void (*enable)(struct dp_catalog_aux *aux, bool enable);
+	void (*setup)(struct dp_catalog_aux *aux, u32 *aux_cfg);
+	void (*get_irq)(struct dp_catalog_aux *aux, bool cmd_busy);
+};
+
+struct dp_catalog_ctrl {
+	u32 dp_tu;
+	u32 valid_boundary;
+	u32 valid_boundary2;
+	u32 isr;
+
+	void (*state_ctrl)(struct dp_catalog_ctrl *ctrl, u32 state);
+	void (*config_ctrl)(struct dp_catalog_ctrl *ctrl, u32 config);
+	void (*lane_mapping)(struct dp_catalog_ctrl *ctrl);
+	void (*mainlink_ctrl)(struct dp_catalog_ctrl *ctrl, bool enable);
+	void (*config_misc)(struct dp_catalog_ctrl *ctrl, u32 cc, u32 tb);
+	void (*config_msa)(struct dp_catalog_ctrl *ctrl);
+	void (*set_pattern)(struct dp_catalog_ctrl *ctrl, u32 pattern);
+	void (*reset)(struct dp_catalog_ctrl *ctrl);
+	bool (*mainlink_ready)(struct dp_catalog_ctrl *ctrl);
+	void (*enable_irq)(struct dp_catalog_ctrl *ctrl, bool enable);
+	void (*hpd_config)(struct dp_catalog_ctrl *ctrl, bool enable);
+	void (*phy_reset)(struct dp_catalog_ctrl *ctrl);
+	void (*phy_lane_cfg)(struct dp_catalog_ctrl *ctrl, bool flipped,
+				u8 lane_cnt);
+	void (*update_vx_px)(struct dp_catalog_ctrl *ctrl, u8 v_level,
+				u8 p_level);
+	void (*get_interrupt)(struct dp_catalog_ctrl *ctrl);
+	void (*update_transfer_unit)(struct dp_catalog_ctrl *ctrl);
+};
+
+struct dp_catalog_audio {
+	u32 data;
+
+	int (*acr_ctrl)(struct dp_catalog_audio *audio);
+	int (*stream_sdp)(struct dp_catalog_audio *audio);
+	int (*timestamp_sdp)(struct dp_catalog_audio *audio);
+	int (*infoframe_sdp)(struct dp_catalog_audio *audio);
+	int (*copy_mgmt_sdp)(struct dp_catalog_audio *audio);
+	int (*isrc_sdp)(struct dp_catalog_audio *audio);
+	int (*setup_sdp)(struct dp_catalog_audio *audio);
+};
+
+struct dp_catalog_panel {
+	u32 total;
+	u32 sync_start;
+	u32 width_blanking;
+	u32 dp_active;
+
+	int (*timing_cfg)(struct dp_catalog_panel *panel);
+};
+
+struct dp_catalog {
+	struct dp_catalog_aux aux;
+	struct dp_catalog_ctrl ctrl;
+	struct dp_catalog_audio audio;
+	struct dp_catalog_panel panel;
+};
+
+struct dp_catalog *dp_catalog_get(struct device *dev, struct dp_io *io);
+void dp_catalog_put(struct dp_catalog *catalog);
+
+#endif /* _DP_CATALOG_H_ */
diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.c b/drivers/gpu/drm/msm/dp/dp_ctrl.c
new file mode 100644
index 0000000..56f6052
--- /dev/null
+++ b/drivers/gpu/drm/msm/dp/dp_ctrl.c
@@ -0,0 +1,1395 @@
+/*
+ * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt)	"[drm-dp] %s: " fmt, __func__
+
+#include <linux/types.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+
+#include "dp_ctrl.h"
+
+#define DP_LINK_RATE_MULTIPLIER	27000000
+#define DP_KHZ_TO_HZ 1000
+#define DP_CRYPTO_CLK_RATE_KHZ 180000
+
+/* sink power state  */
+#define SINK_POWER_ON		1
+#define SINK_POWER_OFF		2
+
+#define DP_CTRL_INTR_READY_FOR_VIDEO     BIT(0)
+#define DP_CTRL_INTR_IDLE_PATTERN_SENT  BIT(3)
+
+/* dp state ctrl */
+#define ST_TRAIN_PATTERN_1		BIT(0)
+#define ST_TRAIN_PATTERN_2		BIT(1)
+#define ST_TRAIN_PATTERN_3		BIT(2)
+#define ST_TRAIN_PATTERN_4		BIT(3)
+#define ST_SYMBOL_ERR_RATE_MEASUREMENT	BIT(4)
+#define ST_PRBS7			BIT(5)
+#define ST_CUSTOM_80_BIT_PATTERN	BIT(6)
+#define ST_SEND_VIDEO			BIT(7)
+#define ST_PUSH_IDLE			BIT(8)
+
+struct dp_vc_tu_mapping_table {
+	u32 vic;
+	u8 lanes;
+	u8 lrate; /* DP_LINK_RATE -> 162(6), 270(10), 540(20), 810 (30) */
+	u8 bpp;
+	u8 valid_boundary_link;
+	u16 delay_start_link;
+	bool boundary_moderation_en;
+	u8 valid_lower_boundary_link;
+	u8 upper_boundary_count;
+	u8 lower_boundary_count;
+	u8 tu_size_minus1;
+};
+
+struct dp_ctrl_private {
+	struct dp_ctrl dp_ctrl;
+
+	struct device *dev;
+	struct dp_aux *aux;
+	struct dp_panel *panel;
+	struct dp_link *link;
+	struct dp_power *power;
+	struct dp_parser *parser;
+	struct dp_catalog_ctrl *catalog;
+
+	struct completion idle_comp;
+	struct completion video_comp;
+	struct completion irq_comp;
+
+	bool hpd_irq_on;
+	bool power_on;
+	bool sink_info_read;
+	bool cont_splash;
+	bool psm_enabled;
+	bool initialized;
+	bool orientation;
+
+	u32 pixel_rate;
+	u32 vic;
+};
+
+enum notification_status {
+	NOTIFY_UNKNOWN,
+	NOTIFY_CONNECT,
+	NOTIFY_DISCONNECT,
+	NOTIFY_CONNECT_IRQ_HPD,
+	NOTIFY_DISCONNECT_IRQ_HPD,
+};
+
+static void dp_ctrl_idle_patterns_sent(struct dp_ctrl_private *ctrl)
+{
+	pr_debug("idle_patterns_sent\n");
+	complete(&ctrl->idle_comp);
+}
+
+static void dp_ctrl_video_ready(struct dp_ctrl_private *ctrl)
+{
+	pr_debug("dp_video_ready\n");
+	complete(&ctrl->video_comp);
+}
+
+static void dp_ctrl_set_sink_power_state(struct dp_ctrl_private *ctrl,
+		u8 power_state)
+{
+	const int len = 1;
+
+	ctrl->aux->write(ctrl->aux, 0x600, len, AUX_NATIVE, &power_state);
+}
+
+static void dp_ctrl_state_ctrl(struct dp_ctrl_private *ctrl, u32 state)
+{
+	ctrl->catalog->state_ctrl(ctrl->catalog, state);
+}
+
+static void dp_ctrl_push_idle(struct dp_ctrl *dp_ctrl)
+{
+	int const idle_pattern_completion_timeout_ms = 3 * HZ / 100;
+	struct dp_ctrl_private *ctrl;
+
+	if (!dp_ctrl) {
+		pr_err("Invalid input data\n");
+		return;
+	}
+
+	ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
+
+	dp_ctrl_set_sink_power_state(ctrl, SINK_POWER_OFF);
+
+	reinit_completion(&ctrl->idle_comp);
+	dp_ctrl_state_ctrl(ctrl, ST_PUSH_IDLE);
+
+	if (!wait_for_completion_timeout(&ctrl->idle_comp,
+			idle_pattern_completion_timeout_ms))
+		pr_warn("PUSH_IDLE pattern timedout\n");
+
+	pr_debug("mainlink off done\n");
+}
+
+static void dp_ctrl_config_ctrl(struct dp_ctrl_private *ctrl)
+{
+	u32 config = 0, tbd;
+
+	config |= (2 << 13); /* Default-> LSCLK DIV: 1/4 LCLK  */
+	config |= (0 << 11); /* RGB */
+
+	/* Scrambler reset enable */
+	if (ctrl->panel->dpcd.scrambler_reset)
+		config |= (1 << 10);
+
+	tbd = ctrl->link->get_test_bits_depth(ctrl->link,
+			ctrl->panel->pinfo.bpp);
+	config |= tbd << 8;
+
+	/* Num of Lanes */
+	config |= ((ctrl->link->lane_count - 1) << 4);
+
+	if (ctrl->panel->dpcd.enhanced_frame)
+		config |= 0x40;
+
+	config |= 0x04; /* progressive video */
+
+	config |= 0x03;	/* sycn clock & static Mvid */
+
+	ctrl->catalog->config_ctrl(ctrl->catalog, config);
+}
+
+/**
+ * dp_ctrl_configure_source_params() - configures DP transmitter source params
+ * @ctrl: Display Port Driver data
+ *
+ * Configures the DP transmitter source params including details such as lane
+ * configuration, output format and sink/panel timing information.
+ */
+static void dp_ctrl_configure_source_params(struct dp_ctrl_private *ctrl)
+{
+	u32 cc, tb;
+
+	ctrl->catalog->lane_mapping(ctrl->catalog);
+	ctrl->catalog->mainlink_ctrl(ctrl->catalog, true);
+
+	dp_ctrl_config_ctrl(ctrl);
+
+	tb = ctrl->link->get_test_bits_depth(ctrl->link,
+		ctrl->panel->pinfo.bpp);
+	cc = ctrl->link->get_colorimetry_config(ctrl->link);
+	ctrl->catalog->config_misc(ctrl->catalog, cc, tb);
+
+	ctrl->catalog->config_msa(ctrl->catalog);
+
+	ctrl->panel->timing_cfg(ctrl->panel);
+}
+
+static void dp_ctrl_get_extra_req_bytes(u64 result_valid,
+					int valid_bdary_link,
+					u64 value1, u64 value2,
+					bool *negative, u64 *result,
+					u64 compare)
+{
+	*negative = false;
+	if (result_valid >= compare) {
+		if (valid_bdary_link
+				>= compare)
+			*result = value1 + value2;
+		else {
+			if (value1 < value2)
+				*negative = true;
+			*result = (value1 >= value2) ?
+				(value1 - value2) : (value2 - value1);
+		}
+	} else {
+		if (valid_bdary_link
+				>= compare) {
+			if (value1 >= value2)
+				*negative = true;
+			*result = (value1 >= value2) ?
+				(value1 - value2) : (value2 - value1);
+		} else {
+			*result = value1 + value2;
+			*negative = true;
+		}
+	}
+}
+
+static u64 roundup_u64(u64 x, u64 y)
+{
+	x += (y - 1);
+	return (div64_ul(x, y) * y);
+}
+
+static u64 rounddown_u64(u64 x, u64 y)
+{
+	u64 rem;
+
+	div64_u64_rem(x, y, &rem);
+	return (x - rem);
+}
+
+static void dp_ctrl_calc_tu_parameters(struct dp_ctrl_private *ctrl,
+		struct dp_vc_tu_mapping_table *tu_table)
+{
+	u32 const multiplier = 1000000;
+	u64 pclk, lclk;
+	u8 bpp, ln_cnt, link_rate;
+	int run_idx = 0;
+	u32 lwidth, h_blank;
+	u32 fifo_empty = 0;
+	u32 ratio_scale = 1001;
+	u64 temp, ratio, original_ratio;
+	u64 temp2, reminder;
+	u64 temp3, temp4, result = 0;
+
+	u64 err = multiplier;
+	u64 n_err = 0, n_n_err = 0;
+	bool n_err_neg, nn_err_neg;
+	u8 hblank_margin = 16;
+
+	u8 tu_size, tu_size_desired = 0, tu_size_minus1;
+	int valid_boundary_link;
+	u64 resulting_valid;
+	u64 total_valid;
+	u64 effective_valid;
+	u64 effective_valid_recorded;
+	int n_tus;
+	int n_tus_per_lane;
+	int paired_tus;
+	int remainder_tus;
+	int remainder_tus_upper, remainder_tus_lower;
+	int extra_bytes;
+	int filler_size;
+	int delay_start_link;
+	int boundary_moderation_en = 0;
+	int upper_bdry_cnt = 0;
+	int lower_bdry_cnt = 0;
+	int i_upper_bdry_cnt = 0;
+	int i_lower_bdry_cnt = 0;
+	int valid_lower_boundary_link = 0;
+	int even_distribution_bf = 0;
+	int even_distribution_legacy = 0;
+	int even_distribution = 0;
+	int min_hblank = 0;
+	int extra_pclk_cycles;
+	u8 extra_pclk_cycle_delay = 4;
+	int extra_pclk_cycles_in_link_clk;
+	u64 ratio_by_tu;
+	u64 average_valid2;
+	u64 extra_buffer_margin;
+	int new_valid_boundary_link;
+
+	u64 resulting_valid_tmp;
+	u64 ratio_by_tu_tmp;
+	int n_tus_tmp;
+	int extra_pclk_cycles_tmp;
+	int extra_pclk_cycles_in_lclk_tmp;
+	int extra_req_bytes_new_tmp;
+	int filler_size_tmp;
+	int lower_filler_size_tmp;
+	int delay_start_link_tmp;
+	int min_hblank_tmp = 0;
+	bool extra_req_bytes_is_neg = false;
+	struct dp_panel_info *pinfo = &ctrl->panel->pinfo;
+
+	u8 dp_brute_force = 1;
+	u64 brute_force_threshold = 10;
+	u64 diff_abs;
+
+	link_rate = ctrl->link->link_rate;
+	ln_cnt =  ctrl->link->lane_count;
+
+	bpp = pinfo->bpp;
+	lwidth = pinfo->h_active;
+	h_blank = pinfo->h_back_porch + pinfo->h_front_porch +
+				pinfo->h_sync_width;
+	pclk = pinfo->pixel_clk_khz * 1000;
+
+	boundary_moderation_en = 0;
+	upper_bdry_cnt = 0;
+	lower_bdry_cnt = 0;
+	i_upper_bdry_cnt = 0;
+	i_lower_bdry_cnt = 0;
+	valid_lower_boundary_link = 0;
+	even_distribution_bf = 0;
+	even_distribution_legacy = 0;
+	even_distribution = 0;
+	min_hblank = 0;
+
+	lclk = link_rate * DP_LINK_RATE_MULTIPLIER;
+
+	pr_debug("pclk=%lld, active_width=%d, h_blank=%d\n",
+						pclk, lwidth, h_blank);
+	pr_debug("lclk = %lld, ln_cnt = %d\n", lclk, ln_cnt);
+	ratio = div64_u64_rem(pclk * bpp * multiplier,
+				8 * ln_cnt * lclk, &reminder);
+	ratio = div64_u64((pclk * bpp * multiplier), (8 * ln_cnt * lclk));
+	original_ratio = ratio;
+
+	extra_buffer_margin = roundup_u64(div64_u64(extra_pclk_cycle_delay
+				* lclk * multiplier, pclk), multiplier);
+	extra_buffer_margin = div64_u64(extra_buffer_margin, multiplier);
+
+	/* To deal with cases where lines are not distributable */
+	if (((lwidth % ln_cnt) != 0) && ratio < multiplier) {
+		ratio = ratio * ratio_scale;
+		ratio = ratio < (1000 * multiplier)
+				? ratio : (1000 * multiplier);
+	}
+	pr_debug("ratio = %lld\n", ratio);
+
+	for (tu_size = 32; tu_size <= 64; tu_size++) {
+		temp = ratio * tu_size;
+		temp2 = ((temp / multiplier) + 1) * multiplier;
+		n_err = roundup_u64(temp, multiplier) - temp;
+
+		if (n_err < err) {
+			err = n_err;
+			tu_size_desired = tu_size;
+		}
+	}
+	pr_debug("Info: tu_size_desired = %d\n", tu_size_desired);
+
+	tu_size_minus1 = tu_size_desired - 1;
+
+	valid_boundary_link = roundup_u64(ratio * tu_size_desired, multiplier);
+	valid_boundary_link /= multiplier;
+	n_tus = rounddown((lwidth * bpp * multiplier)
+			/ (8 * valid_boundary_link), multiplier) / multiplier;
+	even_distribution_legacy = n_tus % ln_cnt == 0 ? 1 : 0;
+	pr_debug("Info: n_symbol_per_tu=%d, number_of_tus=%d\n",
+					valid_boundary_link, n_tus);
+
+	extra_bytes = roundup_u64((n_tus + 1)
+			* ((valid_boundary_link * multiplier)
+			- (original_ratio * tu_size_desired)), multiplier);
+	extra_bytes /= multiplier;
+	extra_pclk_cycles = roundup(extra_bytes * 8 * multiplier / bpp,
+			multiplier);
+	extra_pclk_cycles /= multiplier;
+	extra_pclk_cycles_in_link_clk = roundup_u64(div64_u64(extra_pclk_cycles
+				* lclk * multiplier, pclk), multiplier);
+	extra_pclk_cycles_in_link_clk /= multiplier;
+	filler_size = roundup_u64((tu_size_desired - valid_boundary_link)
+						* multiplier, multiplier);
+	filler_size /= multiplier;
+	ratio_by_tu = div64_u64(ratio * tu_size_desired, multiplier);
+
+	pr_debug("extra_pclk_cycles_in_link_clk=%d, extra_bytes=%d\n",
+				extra_pclk_cycles_in_link_clk, extra_bytes);
+	pr_debug("extra_pclk_cycles_in_link_clk=%d\n",
+				extra_pclk_cycles_in_link_clk);
+	pr_debug("filler_size=%d, extra_buffer_margin=%lld\n",
+				filler_size, extra_buffer_margin);
+
+	delay_start_link = ((extra_bytes > extra_pclk_cycles_in_link_clk)
+			? extra_bytes
+			: extra_pclk_cycles_in_link_clk)
+				+ filler_size + extra_buffer_margin;
+	resulting_valid = valid_boundary_link;
+	pr_debug("Info: delay_start_link=%d, filler_size=%d\n",
+				delay_start_link, filler_size);
+	pr_debug("valid_boundary_link=%d ratio_by_tu=%lld\n",
+				valid_boundary_link, ratio_by_tu);
+
+	diff_abs = (resulting_valid >= ratio_by_tu)
+				? (resulting_valid - ratio_by_tu)
+				: (ratio_by_tu - resulting_valid);
+
+	if (err != 0 && ((diff_abs > brute_force_threshold)
+			|| (even_distribution_legacy == 0)
+			|| (dp_brute_force == 1))) {
+		err = multiplier;
+		for (tu_size = 32; tu_size <= 64; tu_size++) {
+			for (i_upper_bdry_cnt = 1; i_upper_bdry_cnt <= 15;
+						i_upper_bdry_cnt++) {
+				for (i_lower_bdry_cnt = 1;
+					i_lower_bdry_cnt <= 15;
+					i_lower_bdry_cnt++) {
+					new_valid_boundary_link =
+						roundup_u64(ratio
+						* tu_size, multiplier);
+					average_valid2 = (i_upper_bdry_cnt
+						* new_valid_boundary_link
+						+ i_lower_bdry_cnt
+						* (new_valid_boundary_link
+							- multiplier))
+						/ (i_upper_bdry_cnt
+							+ i_lower_bdry_cnt);
+					n_tus = rounddown_u64(div64_u64(lwidth
+						* multiplier * multiplier
+						* (bpp / 8), average_valid2),
+							multiplier);
+					n_tus /= multiplier;
+					n_tus_per_lane
+						= rounddown(n_tus
+							* multiplier
+							/ ln_cnt, multiplier);
+					n_tus_per_lane /= multiplier;
+					paired_tus =
+						rounddown((n_tus_per_lane)
+							* multiplier
+							/ (i_upper_bdry_cnt
+							+ i_lower_bdry_cnt),
+							multiplier);
+					paired_tus /= multiplier;
+					remainder_tus = n_tus_per_lane
+							- paired_tus
+						* (i_upper_bdry_cnt
+							+ i_lower_bdry_cnt);
+					if ((remainder_tus
+						- i_upper_bdry_cnt) > 0) {
+						remainder_tus_upper
+							= i_upper_bdry_cnt;
+						remainder_tus_lower =
+							remainder_tus
+							- i_upper_bdry_cnt;
+					} else {
+						remainder_tus_upper
+							= remainder_tus;
+						remainder_tus_lower = 0;
+					}
+					total_valid = paired_tus
+						* (i_upper_bdry_cnt
+						* new_valid_boundary_link
+							+ i_lower_bdry_cnt
+						* (new_valid_boundary_link
+							- multiplier))
+						+ (remainder_tus_upper
+						* new_valid_boundary_link)
+						+ (remainder_tus_lower
+						* (new_valid_boundary_link
+							- multiplier));
+					n_err_neg = nn_err_neg = false;
+					effective_valid
+						= div_u64(total_valid,
+							n_tus_per_lane);
+					n_n_err = (effective_valid
+							>= (ratio * tu_size))
+						? (effective_valid
+							- (ratio * tu_size))
+						: ((ratio * tu_size)
+							- effective_valid);
+					if (effective_valid < (ratio * tu_size))
+						nn_err_neg = true;
+					n_err = (average_valid2
+						>= (ratio * tu_size))
+						? (average_valid2
+							- (ratio * tu_size))
+						: ((ratio * tu_size)
+							- average_valid2);
+					if (average_valid2 < (ratio * tu_size))
+						n_err_neg = true;
+					even_distribution =
+						n_tus % ln_cnt == 0 ? 1 : 0;
+					diff_abs =
+						resulting_valid >= ratio_by_tu
+						? (resulting_valid
+							- ratio_by_tu)
+						: (ratio_by_tu
+							- resulting_valid);
+
+					resulting_valid_tmp = div64_u64(
+						(i_upper_bdry_cnt
+						* new_valid_boundary_link
+						+ i_lower_bdry_cnt
+						* (new_valid_boundary_link
+							- multiplier)),
+						(i_upper_bdry_cnt
+							+ i_lower_bdry_cnt));
+					ratio_by_tu_tmp =
+						original_ratio * tu_size;
+					ratio_by_tu_tmp /= multiplier;
+					n_tus_tmp = rounddown_u64(
+						div64_u64(lwidth
+						* multiplier * multiplier
+						* bpp / 8,
+						resulting_valid_tmp),
+						multiplier);
+					n_tus_tmp /= multiplier;
+
+					temp3 = (resulting_valid_tmp
+						>= (original_ratio * tu_size))
+						? (resulting_valid_tmp
+						- original_ratio * tu_size)
+						: (original_ratio * tu_size)
+						- resulting_valid_tmp;
+					temp3 = (n_tus_tmp + 1) * temp3;
+					temp4 = (new_valid_boundary_link
+						>= (original_ratio * tu_size))
+						? (new_valid_boundary_link
+							- original_ratio
+							* tu_size)
+						: (original_ratio * tu_size)
+						- new_valid_boundary_link;
+					temp4 = (i_upper_bdry_cnt
+							* ln_cnt * temp4);
+
+					temp3 = roundup_u64(temp3, multiplier);
+					temp4 = roundup_u64(temp4, multiplier);
+					dp_ctrl_get_extra_req_bytes
+						(resulting_valid_tmp,
+						new_valid_boundary_link,
+						temp3, temp4,
+						&extra_req_bytes_is_neg,
+						&result,
+						(original_ratio * tu_size));
+					extra_req_bytes_new_tmp
+						= div64_ul(result, multiplier);
+					if ((extra_req_bytes_is_neg)
+						&& (extra_req_bytes_new_tmp
+							> 1))
+						extra_req_bytes_new_tmp
+						= extra_req_bytes_new_tmp - 1;
+					if (extra_req_bytes_new_tmp == 0)
+						extra_req_bytes_new_tmp = 1;
+					extra_pclk_cycles_tmp =
+						(u64)(extra_req_bytes_new_tmp
+						      * 8 * multiplier) / bpp;
+					extra_pclk_cycles_tmp /= multiplier;
+
+					if (extra_pclk_cycles_tmp <= 0)
+						extra_pclk_cycles_tmp = 1;
+					extra_pclk_cycles_in_lclk_tmp =
+						roundup_u64(div64_u64(
+							extra_pclk_cycles_tmp
+							* lclk * multiplier,
+							pclk), multiplier);
+					extra_pclk_cycles_in_lclk_tmp
+						/= multiplier;
+					filler_size_tmp = roundup_u64(
+						(tu_size * multiplier *
+						new_valid_boundary_link),
+						multiplier);
+					filler_size_tmp /= multiplier;
+					lower_filler_size_tmp =
+						filler_size_tmp + 1;
+					if (extra_req_bytes_is_neg)
+						temp3 = (extra_req_bytes_new_tmp
+						> extra_pclk_cycles_in_lclk_tmp
+						? extra_pclk_cycles_in_lclk_tmp
+						: extra_req_bytes_new_tmp);
+					else
+						temp3 = (extra_req_bytes_new_tmp
+						> extra_pclk_cycles_in_lclk_tmp
+						? extra_req_bytes_new_tmp :
+						extra_pclk_cycles_in_lclk_tmp);
+
+					temp4 = lower_filler_size_tmp
+						+ extra_buffer_margin;
+					if (extra_req_bytes_is_neg)
+						delay_start_link_tmp
+							= (temp3 >= temp4)
+							? (temp3 - temp4)
+							: (temp4 - temp3);
+					else
+						delay_start_link_tmp
+							= temp3 + temp4;
+
+					min_hblank_tmp = (int)div64_u64(
+						roundup_u64(
+						div64_u64(delay_start_link_tmp
+						* pclk * multiplier, lclk),
+						multiplier), multiplier)
+						+ hblank_margin;
+
+					if (((even_distribution == 1)
+						|| ((even_distribution_bf == 0)
+						&& (even_distribution_legacy
+								== 0)))
+						&& !n_err_neg && !nn_err_neg
+						&& n_n_err < err
+						&& (n_n_err < diff_abs
+						|| (dp_brute_force == 1))
+						&& (new_valid_boundary_link
+									- 1) > 0
+						&& (h_blank >=
+							(u32)min_hblank_tmp)) {
+						upper_bdry_cnt =
+							i_upper_bdry_cnt;
+						lower_bdry_cnt =
+							i_lower_bdry_cnt;
+						err = n_n_err;
+						boundary_moderation_en = 1;
+						tu_size_desired = tu_size;
+						valid_boundary_link =
+							new_valid_boundary_link;
+						effective_valid_recorded
+							= effective_valid;
+						delay_start_link
+							= delay_start_link_tmp;
+						filler_size = filler_size_tmp;
+						min_hblank = min_hblank_tmp;
+						n_tus = n_tus_tmp;
+						even_distribution_bf = 1;
+
+						pr_debug("upper_bdry_cnt=%d, lower_boundary_cnt=%d, err=%lld, tu_size_desired=%d, valid_boundary_link=%d, effective_valid=%lld\n",
+							upper_bdry_cnt,
+							lower_bdry_cnt, err,
+							tu_size_desired,
+							valid_boundary_link,
+							effective_valid);
+					}
+				}
+			}
+		}
+
+		if (boundary_moderation_en == 1) {
+			resulting_valid = (u64)(upper_bdry_cnt
+					*valid_boundary_link + lower_bdry_cnt
+					* (valid_boundary_link - 1))
+					/ (upper_bdry_cnt + lower_bdry_cnt);
+			ratio_by_tu = original_ratio * tu_size_desired;
+			valid_lower_boundary_link =
+				(valid_boundary_link / multiplier) - 1;
+
+			tu_size_minus1 = tu_size_desired - 1;
+			even_distribution_bf = 1;
+			valid_boundary_link /= multiplier;
+			pr_debug("Info: Boundary_moderation enabled\n");
+		}
+	}
+
+	min_hblank = ((int) roundup_u64(div64_u64(delay_start_link * pclk
+			* multiplier, lclk), multiplier))
+			/ multiplier + hblank_margin;
+	if (h_blank < (u32)min_hblank) {
+		pr_debug(" WARNING: run_idx=%d Programmed h_blank %d is smaller than the min_hblank %d supported.\n",
+					run_idx, h_blank, min_hblank);
+	}
+
+	if (fifo_empty)	{
+		tu_size_minus1 = 31;
+		valid_boundary_link = 32;
+		delay_start_link = 0;
+		boundary_moderation_en = 0;
+	}
+
+	pr_debug("tu_size_minus1=%d valid_boundary_link=%d delay_start_link=%d boundary_moderation_en=%d\n upper_boundary_cnt=%d lower_boundary_cnt=%d valid_lower_boundary_link=%d min_hblank=%d\n",
+		tu_size_minus1, valid_boundary_link, delay_start_link,
+		boundary_moderation_en, upper_bdry_cnt, lower_bdry_cnt,
+		valid_lower_boundary_link, min_hblank);
+
+	tu_table->valid_boundary_link = valid_boundary_link;
+	tu_table->delay_start_link = delay_start_link;
+	tu_table->boundary_moderation_en = boundary_moderation_en;
+	tu_table->valid_lower_boundary_link = valid_lower_boundary_link;
+	tu_table->upper_boundary_count = upper_bdry_cnt;
+	tu_table->lower_boundary_count = lower_bdry_cnt;
+	tu_table->tu_size_minus1 = tu_size_minus1;
+}
+
+static void dp_ctrl_setup_tr_unit(struct dp_ctrl_private *ctrl)
+{
+	u32 dp_tu = 0x0;
+	u32 valid_boundary = 0x0;
+	u32 valid_boundary2 = 0x0;
+	struct dp_vc_tu_mapping_table tu_calc_table;
+
+	dp_ctrl_calc_tu_parameters(ctrl, &tu_calc_table);
+
+	dp_tu |= tu_calc_table.tu_size_minus1;
+	valid_boundary |= tu_calc_table.valid_boundary_link;
+	valid_boundary |= (tu_calc_table.delay_start_link << 16);
+
+	valid_boundary2 |= (tu_calc_table.valid_lower_boundary_link << 1);
+	valid_boundary2 |= (tu_calc_table.upper_boundary_count << 16);
+	valid_boundary2 |= (tu_calc_table.lower_boundary_count << 20);
+
+	if (tu_calc_table.boundary_moderation_en)
+		valid_boundary2 |= BIT(0);
+
+	pr_debug("dp_tu=0x%x, valid_boundary=0x%x, valid_boundary2=0x%x\n",
+			dp_tu, valid_boundary, valid_boundary2);
+
+	ctrl->catalog->dp_tu = dp_tu;
+	ctrl->catalog->valid_boundary = valid_boundary;
+	ctrl->catalog->valid_boundary2 = valid_boundary2;
+
+	ctrl->catalog->update_transfer_unit(ctrl->catalog);
+}
+
+static int dp_ctrl_wait4video_ready(struct dp_ctrl_private *ctrl)
+{
+	int ret = 0;
+
+	if (ctrl->cont_splash)
+		return ret;
+
+	ret = wait_for_completion_timeout(&ctrl->video_comp, HZ / 2);
+	if (ret <= 0) {
+		pr_err("Link Train timedout\n");
+		ret = -EINVAL;
+	}
+
+	return ret;
+}
+
+static int dp_ctrl_update_sink_vx_px(struct dp_ctrl_private *ctrl,
+		u32 voltage_level, u32 pre_emphasis_level)
+{
+	int i;
+	u8 buf[4];
+	u32 max_level_reached = 0;
+
+	if (voltage_level == DP_LINK_VOLTAGE_MAX) {
+		pr_debug("max. voltage swing level reached %d\n",
+				voltage_level);
+		max_level_reached |= BIT(2);
+	}
+
+	if (pre_emphasis_level == DP_LINK_PRE_EMPHASIS_MAX) {
+		pr_debug("max. pre-emphasis level reached %d\n",
+				pre_emphasis_level);
+		max_level_reached  |= BIT(5);
+	}
+
+	pr_debug("max_level_reached = 0x%x\n", max_level_reached);
+
+	pre_emphasis_level <<= 3;
+
+	for (i = 0; i < 4; i++)
+		buf[i] = voltage_level | pre_emphasis_level | max_level_reached;
+
+	pr_debug("p|v=0x%x\n", voltage_level | pre_emphasis_level);
+	return ctrl->aux->write(ctrl->aux, 0x103, 4, AUX_NATIVE, buf);
+}
+
+static void dp_ctrl_update_vx_px(struct dp_ctrl_private *ctrl)
+{
+	struct dp_link *link = ctrl->link;
+
+	pr_debug("v=%d p=%d\n", link->v_level, link->p_level);
+
+	ctrl->catalog->update_vx_px(ctrl->catalog,
+			link->v_level, link->p_level);
+
+	dp_ctrl_update_sink_vx_px(ctrl, link->v_level, link->p_level);
+}
+
+static void dp_ctrl_cap_lane_rate_set(struct dp_ctrl_private *ctrl)
+{
+	u8 buf[4];
+	struct dp_panel_dpcd *cap;
+
+	cap = &ctrl->panel->dpcd;
+
+	pr_debug("bw=%x lane=%d\n", ctrl->link->link_rate,
+		ctrl->link->lane_count);
+
+	buf[0] = ctrl->link->link_rate;
+	buf[1] = ctrl->link->lane_count;
+
+	if (cap->enhanced_frame)
+		buf[1] |= 0x80;
+
+	ctrl->aux->write(ctrl->aux, 0x100, 2, AUX_NATIVE, buf);
+}
+
+static void dp_ctrl_train_pattern_set(struct dp_ctrl_private *ctrl,
+		u8 pattern)
+{
+	u8 buf[4];
+
+	pr_debug("pattern=%x\n", pattern);
+
+	buf[0] = pattern;
+	ctrl->aux->write(ctrl->aux, 0x102, 1, AUX_NATIVE, buf);
+}
+
+static int dp_ctrl_link_train_1(struct dp_ctrl_private *ctrl)
+{
+	int tries, old_v_level;
+	int ret = 0;
+	int usleep_time;
+	int const maximum_retries = 5;
+
+	dp_ctrl_state_ctrl(ctrl, 0);
+
+	/* Make sure to clear the current pattern before starting a new one */
+	wmb();
+
+	ctrl->catalog->set_pattern(ctrl->catalog, 0x01);
+	dp_ctrl_cap_lane_rate_set(ctrl);
+	dp_ctrl_train_pattern_set(ctrl, 0x21); /* train_1 */
+	dp_ctrl_update_vx_px(ctrl);
+
+	tries = 0;
+	old_v_level = ctrl->link->v_level;
+	while (1) {
+		usleep_time = ctrl->panel->dpcd.training_read_interval;
+		usleep_range(usleep_time, usleep_time * 2);
+
+		if (ctrl->link->clock_recovery(ctrl->link)) {
+			ret = 0;
+			break;
+		}
+
+		if (ctrl->link->v_level == DP_LINK_VOLTAGE_MAX) {
+			ret = -1;
+			break;	/* quit */
+		}
+
+		if (old_v_level == ctrl->link->v_level) {
+			tries++;
+			if (tries >= maximum_retries) {
+				ret = -1;
+				break;	/* quit */
+			}
+		} else {
+			tries = 0;
+			old_v_level = ctrl->link->v_level;
+		}
+
+		ctrl->link->adjust_levels(ctrl->link);
+
+		dp_ctrl_update_vx_px(ctrl);
+	}
+
+	return ret;
+}
+
+static int dp_ctrl_link_rate_down_shift(struct dp_ctrl_private *ctrl)
+{
+	int ret = 0;
+
+	if (!ctrl)
+		return -EINVAL;
+
+	switch (ctrl->link->link_rate) {
+	case DP_LINK_RATE_810:
+		ctrl->link->link_rate = DP_LINK_RATE_540;
+		break;
+	case DP_LINK_RATE_540:
+		ctrl->link->link_rate = DP_LINK_RATE_270;
+		break;
+	case DP_LINK_RATE_270:
+		ctrl->link->link_rate = DP_LINK_RATE_162;
+		break;
+	case DP_LINK_RATE_162:
+	default:
+		ret = -EINVAL;
+		break;
+	};
+
+	pr_debug("new rate=%d\n", ctrl->link->link_rate);
+
+	return ret;
+}
+
+static void dp_ctrl_clear_training_pattern(struct dp_ctrl_private *ctrl)
+{
+	int usleep_time;
+
+	dp_ctrl_train_pattern_set(ctrl, 0);
+
+	usleep_time = ctrl->panel->dpcd.training_read_interval;
+	usleep_range(usleep_time, usleep_time * 2);
+}
+
+static int dp_ctrl_link_training_2(struct dp_ctrl_private *ctrl)
+{
+	int tries = 0;
+	int ret = 0;
+	int usleep_time;
+	char pattern;
+	int const maximum_retries = 5;
+
+	if (ctrl->panel->dpcd.flags & DPCD_TPS3)
+		pattern = 0x03;
+	else
+		pattern = 0x02;
+
+	dp_ctrl_update_vx_px(ctrl);
+	ctrl->catalog->set_pattern(ctrl->catalog, pattern);
+	dp_ctrl_train_pattern_set(ctrl, pattern | 0x20);
+
+	do  {
+		usleep_time = ctrl->panel->dpcd.training_read_interval;
+		usleep_range(usleep_time, usleep_time * 2);
+
+		if (ctrl->link->channel_equalization(ctrl->link)) {
+			ret = 0;
+			break;
+		}
+
+		if (tries > maximum_retries) {
+			ret = -1;
+			break;
+		}
+		tries++;
+
+		ctrl->link->adjust_levels(ctrl->link);
+
+		dp_ctrl_update_vx_px(ctrl);
+	} while (1);
+
+	return ret;
+}
+
+static int dp_ctrl_link_train(struct dp_ctrl_private *ctrl)
+{
+	int ret = 0;
+
+	ret = ctrl->aux->ready(ctrl->aux);
+	if (!ret) {
+		pr_err("aux chan NOT ready\n");
+		return ret;
+	}
+
+	ctrl->link->p_level = 0;
+	ctrl->link->v_level = 0;
+
+	dp_ctrl_config_ctrl(ctrl);
+	dp_ctrl_state_ctrl(ctrl, 0);
+
+	ret = dp_ctrl_link_train_1(ctrl);
+	if (ret < 0) {
+		if (!dp_ctrl_link_rate_down_shift(ctrl)) {
+			pr_debug("retry with lower rate\n");
+
+			dp_ctrl_clear_training_pattern(ctrl);
+			return -EAGAIN;
+		}
+
+		pr_err("Training 1 failed\n");
+		ret = -EINVAL;
+		goto clear;
+	}
+
+	pr_debug("Training 1 completed successfully\n");
+
+	dp_ctrl_state_ctrl(ctrl, 0);
+
+	/* Make sure to clear the current pattern before starting a new one */
+	wmb();
+
+	ret = dp_ctrl_link_training_2(ctrl);
+	if (ret < 0) {
+		if (!dp_ctrl_link_rate_down_shift(ctrl)) {
+			pr_debug("retry with lower rate\n");
+
+			dp_ctrl_clear_training_pattern(ctrl);
+			return -EAGAIN;
+		}
+
+		pr_err("Training 2 failed\n");
+		ret = -EINVAL;
+		goto clear;
+	}
+
+	pr_debug("Training 2 completed successfully\n");
+
+	dp_ctrl_state_ctrl(ctrl, 0);
+	/* Make sure to clear the current pattern before starting a new one */
+	wmb();
+
+clear:
+	dp_ctrl_clear_training_pattern(ctrl);
+	return ret;
+}
+
+static int dp_ctrl_setup_main_link(struct dp_ctrl_private *ctrl, bool train)
+{
+	bool mainlink_ready = false;
+	int ret = 0;
+
+	ctrl->catalog->mainlink_ctrl(ctrl->catalog, true);
+
+	dp_ctrl_set_sink_power_state(ctrl, SINK_POWER_ON);
+
+	if (ctrl->link->phy_pattern_requested(ctrl->link))
+		goto end;
+
+	if (!train)
+		goto send_video;
+
+	/*
+	 * As part of previous calls, DP controller state might have
+	 * transitioned to PUSH_IDLE. In order to start transmitting a link
+	 * training pattern, we have to first to a DP software reset.
+	 */
+	ctrl->catalog->reset(ctrl->catalog);
+
+	ret = dp_ctrl_link_train(ctrl);
+	if (ret)
+		goto end;
+
+send_video:
+	/*
+	 * Set up transfer unit values and set controller state to send
+	 * video.
+	 */
+	dp_ctrl_setup_tr_unit(ctrl);
+	ctrl->catalog->state_ctrl(ctrl->catalog, ST_SEND_VIDEO);
+
+	dp_ctrl_wait4video_ready(ctrl);
+	mainlink_ready = ctrl->catalog->mainlink_ready(ctrl->catalog);
+	pr_debug("mainlink %s\n", mainlink_ready ? "READY" : "NOT READY");
+end:
+	return ret;
+}
+
+static void dp_ctrl_set_clock_rate(struct dp_ctrl_private *ctrl,
+		char *name, u32 rate)
+{
+	u32 num = ctrl->parser->mp[DP_CTRL_PM].num_clk;
+	struct dss_clk *cfg = ctrl->parser->mp[DP_CTRL_PM].clk_config;
+
+	while (num && strcmp(cfg->clk_name, name)) {
+		num--;
+		cfg++;
+	}
+
+	if (num)
+		cfg->rate = rate;
+	else
+		pr_err("%s clock could not be set with rate %d\n", name, rate);
+}
+
+static int dp_ctrl_enable_mainlink_clocks(struct dp_ctrl_private *ctrl)
+{
+	int ret = 0;
+
+	ctrl->power->set_pixel_clk_parent(ctrl->power);
+
+	dp_ctrl_set_clock_rate(ctrl, "ctrl_link_clk",
+		(ctrl->link->link_rate * DP_LINK_RATE_MULTIPLIER) /
+			DP_KHZ_TO_HZ);
+
+	dp_ctrl_set_clock_rate(ctrl, "ctrl_crypto_clk", DP_CRYPTO_CLK_RATE_KHZ);
+
+	dp_ctrl_set_clock_rate(ctrl, "ctrl_pixel_clk", ctrl->pixel_rate);
+
+	ret = ctrl->power->clk_enable(ctrl->power, DP_CTRL_PM, true);
+	if (ret) {
+		pr_err("Unabled to start link clocks\n");
+		ret = -EINVAL;
+	}
+
+	return ret;
+}
+
+static int dp_ctrl_disable_mainlink_clocks(struct dp_ctrl_private *ctrl)
+{
+	return ctrl->power->clk_enable(ctrl->power, DP_CTRL_PM, false);
+}
+
+static int dp_ctrl_host_init(struct dp_ctrl *dp_ctrl, bool flip)
+{
+	struct dp_ctrl_private *ctrl;
+	struct dp_catalog_ctrl *catalog;
+
+	if (!dp_ctrl) {
+		pr_err("Invalid input data\n");
+		return -EINVAL;
+	}
+
+	ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
+
+	if (ctrl->initialized) {
+		pr_debug("host init done already\n");
+		return 0;
+	}
+
+	ctrl->orientation = flip;
+	catalog = ctrl->catalog;
+
+	catalog->reset(ctrl->catalog);
+	catalog->phy_reset(ctrl->catalog);
+	catalog->enable_irq(ctrl->catalog, true);
+
+	ctrl->initialized = true;
+
+	return 0;
+}
+
+/**
+ * dp_ctrl_host_deinit() - Uninitialize DP controller
+ * @ctrl: Display Port Driver data
+ *
+ * Perform required steps to uninitialize DP controller
+ * and its resources.
+ */
+static void dp_ctrl_host_deinit(struct dp_ctrl *dp_ctrl)
+{
+	struct dp_ctrl_private *ctrl;
+
+	if (!dp_ctrl) {
+		pr_err("Invalid input data\n");
+		return;
+	}
+
+	ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
+
+	if (!ctrl->initialized) {
+		pr_debug("host deinit done already\n");
+		return;
+	}
+
+	ctrl->catalog->enable_irq(ctrl->catalog, false);
+	ctrl->catalog->reset(ctrl->catalog);
+
+	/* Make sure DP is disabled before clk disable */
+	wmb();
+
+	dp_ctrl_disable_mainlink_clocks(ctrl);
+
+	ctrl->initialized = false;
+	pr_debug("Host deinitialized successfully\n");
+}
+
+static int dp_ctrl_on_irq(struct dp_ctrl_private *ctrl, bool lt_needed)
+{
+	int ret = 0;
+
+	do {
+		if (ret == -EAGAIN)
+			ctrl->catalog->mainlink_ctrl(ctrl->catalog, false);
+
+		ctrl->catalog->phy_lane_cfg(ctrl->catalog,
+			ctrl->orientation, ctrl->link->lane_count);
+
+		if (lt_needed) {
+			/*
+			 * Diasable and re-enable the mainlink clock since the
+			 * link clock might have been adjusted as part of the
+			 * link maintenance.
+			 */
+			if (!ctrl->link->phy_pattern_requested(
+					ctrl->link))
+				dp_ctrl_disable_mainlink_clocks(ctrl);
+
+			ret = dp_ctrl_enable_mainlink_clocks(ctrl);
+			if (ret)
+				continue;
+		}
+
+		dp_ctrl_configure_source_params(ctrl);
+
+		reinit_completion(&ctrl->idle_comp);
+
+		ctrl->power_on = true;
+
+		if (ctrl->psm_enabled) {
+			ret = ctrl->link->send_psm_request(ctrl->link, false);
+			if (ret) {
+				pr_err("failed to exit low power mode, rc=%d\n",
+					ret);
+				continue;
+			}
+		}
+
+		ret = dp_ctrl_setup_main_link(ctrl, lt_needed);
+	} while (ret == -EAGAIN);
+
+	return ret;
+}
+
+static int dp_ctrl_on_hpd(struct dp_ctrl_private *ctrl)
+{
+	int ret = 0;
+
+	if (ctrl->cont_splash)
+		goto link_training;
+
+	ctrl->power->clk_enable(ctrl->power, DP_CORE_PM, true);
+	ctrl->catalog->hpd_config(ctrl->catalog, true);
+
+	ctrl->link->link_rate  = ctrl->panel->get_link_rate(ctrl->panel);
+	ctrl->link->lane_count = ctrl->panel->dpcd.max_lane_count;
+	ctrl->pixel_rate = ctrl->panel->pinfo.pixel_clk_khz;
+
+	pr_debug("link_rate=%d, lane_count=%d, pixel_rate=%d\n",
+		ctrl->link->link_rate, ctrl->link->lane_count,
+		ctrl->pixel_rate);
+
+	ctrl->catalog->phy_lane_cfg(ctrl->catalog,
+			ctrl->orientation, ctrl->link->lane_count);
+
+	ret = dp_ctrl_enable_mainlink_clocks(ctrl);
+	if (ret)
+		goto exit;
+
+	reinit_completion(&ctrl->idle_comp);
+
+	dp_ctrl_configure_source_params(ctrl);
+
+	if (ctrl->psm_enabled)
+		ret = ctrl->link->send_psm_request(ctrl->link, false);
+link_training:
+	ctrl->power_on = true;
+
+	while (-EAGAIN == dp_ctrl_setup_main_link(ctrl, true))
+		pr_debug("MAIN LINK TRAINING RETRY\n");
+
+	ctrl->cont_splash = 0;
+
+	ctrl->power_on = true;
+	pr_debug("End-\n");
+
+exit:
+	return ret;
+}
+
+static int dp_ctrl_off_irq(struct dp_ctrl_private *ctrl)
+{
+	if (!ctrl->power_on) {
+		pr_debug("ctrl already powered off\n");
+		return 0;
+	}
+
+	ctrl->catalog->mainlink_ctrl(ctrl->catalog, false);
+
+	/* Make sure DP mainlink and audio engines are disabled */
+	wmb();
+
+	complete_all(&ctrl->irq_comp);
+	pr_debug("end\n");
+
+	return 0;
+}
+
+static int dp_ctrl_off_hpd(struct dp_ctrl_private *ctrl)
+{
+	if (!ctrl->power_on) {
+		pr_debug("panel already powered off\n");
+		return 0;
+	}
+
+	ctrl->catalog->mainlink_ctrl(ctrl->catalog, false);
+
+	ctrl->power_on = false;
+	ctrl->sink_info_read = false;
+
+	pr_debug("DP off done\n");
+
+	return 0;
+}
+
+static int dp_ctrl_on(struct dp_ctrl *dp_ctrl)
+{
+	int rc = 0;
+	struct dp_ctrl_private *ctrl;
+
+	if (!dp_ctrl) {
+		rc = -EINVAL;
+		goto end;
+	}
+
+	ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
+
+	if (ctrl->hpd_irq_on)
+		rc = dp_ctrl_on_irq(ctrl, false);
+	else
+		rc = dp_ctrl_on_hpd(ctrl);
+end:
+	return rc;
+}
+
+static int dp_ctrl_off(struct dp_ctrl *dp_ctrl)
+{
+	int rc = 0;
+	struct dp_ctrl_private *ctrl;
+
+	if (!dp_ctrl) {
+		rc = -EINVAL;
+		goto end;
+	}
+
+	ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
+
+	if (ctrl->hpd_irq_on)
+		rc = dp_ctrl_off_irq(ctrl);
+	else
+		rc = dp_ctrl_off_hpd(ctrl);
+end:
+	return rc;
+}
+
+static void dp_ctrl_isr(struct dp_ctrl *dp_ctrl)
+{
+	struct dp_ctrl_private *ctrl;
+
+	if (!dp_ctrl)
+		return;
+
+	ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
+
+	ctrl->catalog->get_interrupt(ctrl->catalog);
+
+	if (ctrl->catalog->isr & DP_CTRL_INTR_READY_FOR_VIDEO)
+		dp_ctrl_video_ready(ctrl);
+
+	if (ctrl->catalog->isr & DP_CTRL_INTR_IDLE_PATTERN_SENT)
+		dp_ctrl_idle_patterns_sent(ctrl);
+}
+
+struct dp_ctrl *dp_ctrl_get(struct dp_ctrl_in *in)
+{
+	int rc = 0;
+	struct dp_ctrl_private *ctrl;
+	struct dp_ctrl *dp_ctrl;
+
+	if (!in->dev || !in->panel || !in->aux ||
+	    !in->link || !in->catalog) {
+		pr_err("invalid input\n");
+		rc = -EINVAL;
+		goto error;
+	}
+
+	ctrl = devm_kzalloc(in->dev, sizeof(*ctrl), GFP_KERNEL);
+	if (!ctrl) {
+		rc = -ENOMEM;
+		goto error;
+	}
+
+	init_completion(&ctrl->idle_comp);
+	init_completion(&ctrl->video_comp);
+	init_completion(&ctrl->irq_comp);
+
+	/* in parameters */
+	ctrl->parser   = in->parser;
+	ctrl->panel    = in->panel;
+	ctrl->power    = in->power;
+	ctrl->aux      = in->aux;
+	ctrl->link     = in->link;
+	ctrl->catalog  = in->catalog;
+
+	dp_ctrl = &ctrl->dp_ctrl;
+
+	/* out parameters */
+	dp_ctrl->init      = dp_ctrl_host_init;
+	dp_ctrl->deinit    = dp_ctrl_host_deinit;
+	dp_ctrl->on        = dp_ctrl_on;
+	dp_ctrl->off       = dp_ctrl_off;
+	dp_ctrl->push_idle = dp_ctrl_push_idle;
+	dp_ctrl->isr       = dp_ctrl_isr;
+
+	return dp_ctrl;
+error:
+	return ERR_PTR(rc);
+}
+
+void dp_ctrl_put(struct dp_ctrl *dp_ctrl)
+{
+	struct dp_ctrl_private *ctrl;
+
+	if (!dp_ctrl)
+		return;
+
+	ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
+
+	devm_kfree(ctrl->dev, ctrl);
+}
diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.h b/drivers/gpu/drm/msm/dp/dp_ctrl.h
new file mode 100644
index 0000000..5efe505
--- /dev/null
+++ b/drivers/gpu/drm/msm/dp/dp_ctrl.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _DP_CTRL_H_
+#define _DP_CTRL_H_
+
+#include "dp_aux.h"
+#include "dp_panel.h"
+#include "dp_link.h"
+#include "dp_parser.h"
+#include "dp_power.h"
+#include "dp_catalog.h"
+
+struct dp_ctrl {
+	int (*init)(struct dp_ctrl *dp_ctrl, bool flip);
+	void (*deinit)(struct dp_ctrl *dp_ctrl);
+	int (*on)(struct dp_ctrl *dp_ctrl);
+	int (*off)(struct dp_ctrl *dp_ctrl);
+	void (*push_idle)(struct dp_ctrl *dp_ctrl);
+	void (*isr)(struct dp_ctrl *dp_ctrl);
+};
+
+struct dp_ctrl_in {
+	struct device *dev;
+	struct dp_panel *panel;
+	struct dp_aux *aux;
+	struct dp_link *link;
+	struct dp_parser *parser;
+	struct dp_power *power;
+	struct dp_catalog_ctrl *catalog;
+};
+
+struct dp_ctrl *dp_ctrl_get(struct dp_ctrl_in *in);
+void dp_ctrl_put(struct dp_ctrl *dp_ctrl);
+
+#endif /* _DP_CTRL_H_ */
diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c
new file mode 100644
index 0000000..850acbf
--- /dev/null
+++ b/drivers/gpu/drm/msm/dp/dp_display.c
@@ -0,0 +1,730 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt)	"[drm-dp]: %s: " fmt, __func__
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/debugfs.h>
+#include <linux/component.h>
+#include <linux/of_irq.h>
+
+#include "msm_drv.h"
+#include "dp_usbpd.h"
+#include "dp_parser.h"
+#include "dp_power.h"
+#include "dp_catalog.h"
+#include "dp_aux.h"
+#include "dp_link.h"
+#include "dp_panel.h"
+#include "dp_ctrl.h"
+#include "dp_display.h"
+
+static struct dp_display *g_dp_display;
+
+struct dp_display_private {
+	char *name;
+	int irq;
+
+	struct platform_device *pdev;
+	struct dentry *root;
+	struct mutex lock;
+
+	struct dp_usbpd   *usbpd;
+	struct dp_parser  *parser;
+	struct dp_power   *power;
+	struct dp_catalog *catalog;
+	struct dp_aux     *aux;
+	struct dp_link    *link;
+	struct dp_panel   *panel;
+	struct dp_ctrl    *ctrl;
+
+	struct dp_usbpd_cb usbpd_cb;
+	struct dp_display_mode mode;
+	struct dp_display dp_display;
+};
+
+static const struct of_device_id dp_dt_match[] = {
+	{.compatible = "qcom,dp-display"},
+	{}
+};
+
+static irqreturn_t dp_display_irq(int irq, void *dev_id)
+{
+	struct dp_display_private *dp = dev_id;
+
+	if (!dp) {
+		pr_err("invalid data\n");
+		return IRQ_NONE;
+	}
+
+	/* DP controller isr */
+	dp->ctrl->isr(dp->ctrl);
+
+	/* DP aux isr */
+	dp->aux->isr(dp->aux);
+
+	return IRQ_HANDLED;
+}
+
+static ssize_t debugfs_dp_info_read(struct file *file, char __user *buff,
+		size_t count, loff_t *ppos)
+{
+	struct dp_display_private *dp = file->private_data;
+	char *buf;
+	u32 len = 0;
+
+	if (!dp)
+		return -ENODEV;
+
+	if (*ppos)
+		return 0;
+
+	buf = kzalloc(SZ_4K, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	len += snprintf(buf + len, (SZ_4K - len), "name = %s\n", dp->name);
+	len += snprintf(buf + len, (SZ_4K - len),
+			"\tResolution = %dx%d\n",
+			dp->panel->pinfo.h_active,
+			dp->panel->pinfo.v_active);
+
+	if (copy_to_user(buff, buf, len)) {
+		kfree(buf);
+		return -EFAULT;
+	}
+
+	*ppos += len;
+
+	kfree(buf);
+	return len;
+}
+
+static const struct file_operations dp_debug_fops = {
+	.open = simple_open,
+	.read = debugfs_dp_info_read,
+};
+
+static int dp_display_debugfs_init(struct dp_display_private *dp)
+{
+	int rc = 0;
+	struct dentry *dir, *file;
+
+	dir = debugfs_create_dir(dp->name, NULL);
+	if (IS_ERR_OR_NULL(dir)) {
+		rc = PTR_ERR(dir);
+		pr_err("[%s] debugfs create dir failed, rc = %d\n",
+		       dp->name, rc);
+		goto error;
+	}
+
+	file = debugfs_create_file("dp_debug", 0444, dir, dp, &dp_debug_fops);
+	if (IS_ERR_OR_NULL(file)) {
+		rc = PTR_ERR(file);
+		pr_err("[%s] debugfs create file failed, rc=%d\n",
+		       dp->name, rc);
+		goto error_remove_dir;
+	}
+
+	dp->root = dir;
+	return rc;
+error_remove_dir:
+	debugfs_remove(dir);
+error:
+	return rc;
+}
+
+static int dp_display_debugfs_deinit(struct dp_display_private *dp)
+{
+	debugfs_remove(dp->root);
+	return 0;
+}
+
+static int dp_display_bind(struct device *dev, struct device *master,
+		void *data)
+{
+	int rc = 0;
+	struct dp_display_private *dp;
+	struct drm_device *drm;
+	struct msm_drm_private *priv;
+	struct platform_device *pdev = to_platform_device(dev);
+
+	if (!dev || !pdev || !master) {
+		pr_err("invalid param(s), dev %pK, pdev %pK, master %pK\n",
+				dev, pdev, master);
+		rc = -EINVAL;
+		goto error;
+	}
+
+	drm = dev_get_drvdata(master);
+	dp = platform_get_drvdata(pdev);
+	if (!drm || !dp) {
+		pr_err("invalid param(s), drm %pK, dp %pK\n",
+				drm, dp);
+		rc = -EINVAL;
+		goto error;
+	}
+
+	dp->dp_display.drm_dev = drm;
+	priv = drm->dev_private;
+
+	mutex_lock(&dp->lock);
+
+	rc = dp_display_debugfs_init(dp);
+	if (rc) {
+		pr_err("[%s]Debugfs init failed, rc=%d\n", dp->name, rc);
+		goto end;
+	}
+
+	rc = dp->parser->parse(dp->parser);
+	if (rc) {
+		pr_err("device tree parsing failed\n");
+		goto end;
+	}
+
+	rc = dp->power->power_client_init(dp->power, &priv->phandle);
+	if (rc) {
+		pr_err("Power client create failed\n");
+		goto end;
+	}
+end:
+	mutex_unlock(&dp->lock);
+error:
+	return rc;
+}
+
+static void dp_display_unbind(struct device *dev, struct device *master,
+		void *data)
+{
+	struct dp_display_private *dp;
+	struct platform_device *pdev = to_platform_device(dev);
+
+	if (!dev || !pdev) {
+		pr_err("invalid param(s)\n");
+		return;
+	}
+
+	dp = platform_get_drvdata(pdev);
+	if (!dp) {
+		pr_err("Invalid params\n");
+		return;
+	}
+
+	mutex_lock(&dp->lock);
+
+	(void)dp->power->power_client_deinit(dp->power);
+
+	(void)dp_display_debugfs_deinit(dp);
+
+	mutex_unlock(&dp->lock);
+}
+
+static const struct component_ops dp_display_comp_ops = {
+	.bind = dp_display_bind,
+	.unbind = dp_display_unbind,
+};
+
+static int dp_display_process_hpd_high(struct dp_display_private *dp)
+{
+	int rc;
+
+	rc = dp->panel->read_dpcd(dp->panel);
+	if (rc)
+		goto end;
+
+	rc = dp->panel->read_edid(dp->panel);
+	if (rc)
+		goto end;
+
+	return 0;
+end:
+	return rc;
+}
+
+static int dp_display_process_hpd_low(struct dp_display_private *dp)
+{
+	return 0;
+}
+
+static int dp_display_usbpd_configure_cb(struct device *dev)
+{
+	int rc = 0;
+	bool flip = false;
+	struct dp_display_private *dp;
+
+	if (!dev) {
+		pr_err("invalid dev\n");
+		rc = -EINVAL;
+		goto end;
+	}
+
+	dp = dev_get_drvdata(dev);
+	if (!dp) {
+		pr_err("no driver data found\n");
+		rc = -ENODEV;
+		goto end;
+	}
+
+	mutex_lock(&dp->lock);
+
+	if (dp->usbpd->orientation == ORIENTATION_CC2)
+		flip = true;
+
+	dp->power->init(dp->power, flip);
+	dp->ctrl->init(dp->ctrl, flip);
+	dp->aux->init(dp->aux, dp->parser->aux_cfg);
+	enable_irq(dp->irq);
+
+	if (dp->usbpd->hpd_high)
+		dp_display_process_hpd_high(dp);
+
+	mutex_unlock(&dp->lock);
+end:
+	return rc;
+}
+
+static int dp_display_usbpd_disconnect_cb(struct device *dev)
+{
+	int rc = 0;
+	struct dp_display_private *dp;
+
+	if (!dev) {
+		pr_err("invalid dev\n");
+		rc = -EINVAL;
+		goto end;
+	}
+
+	dp = dev_get_drvdata(dev);
+	if (!dp) {
+		pr_err("no driver data found\n");
+		rc = -ENODEV;
+		goto end;
+	}
+
+	mutex_lock(&dp->lock);
+	disable_irq(dp->irq);
+	mutex_unlock(&dp->lock);
+
+end:
+	return rc;
+}
+
+static int dp_display_usbpd_attention_cb(struct device *dev)
+{
+	int rc = 0;
+	struct dp_display_private *dp;
+
+	if (!dev) {
+		pr_err("invalid dev\n");
+		rc = -EINVAL;
+		goto end;
+	}
+
+	dp = dev_get_drvdata(dev);
+	if (!dp) {
+		pr_err("no driver data found\n");
+		rc = -ENODEV;
+		goto end;
+	}
+
+	mutex_lock(&dp->lock);
+
+	if (dp->usbpd->hpd_irq) {
+		if (!dp->link->process_request(dp->link))
+			goto end;
+	}
+
+	if (dp->usbpd->hpd_high)
+		dp_display_process_hpd_high(dp);
+	else
+		dp_display_process_hpd_low(dp);
+
+	mutex_unlock(&dp->lock);
+end:
+	return rc;
+}
+
+static int dp_init_sub_modules(struct dp_display_private *dp)
+{
+	int rc = 0;
+	struct device *dev = &dp->pdev->dev;
+	struct dp_usbpd_cb *cb = &dp->usbpd_cb;
+	struct dp_ctrl_in ctrl_in = {
+		.dev = dev,
+	};
+
+	cb->configure  = dp_display_usbpd_configure_cb;
+	cb->disconnect = dp_display_usbpd_disconnect_cb;
+	cb->attention  = dp_display_usbpd_attention_cb;
+
+	dp->usbpd = dp_usbpd_get(dev, cb);
+	if (IS_ERR(dp->usbpd)) {
+		rc = PTR_ERR(dp->usbpd);
+		pr_err("failed to initialize usbpd, rc = %d\n", rc);
+		goto err;
+	}
+
+	dp->parser = dp_parser_get(dp->pdev);
+	if (IS_ERR(dp->parser)) {
+		rc = PTR_ERR(dp->parser);
+		pr_err("failed to initialize parser, rc = %d\n", rc);
+		goto err;
+	}
+
+	dp->catalog = dp_catalog_get(dev, &dp->parser->io);
+	if (IS_ERR(dp->catalog)) {
+		rc = PTR_ERR(dp->catalog);
+		pr_err("failed to initialize catalog, rc = %d\n", rc);
+		goto err;
+	}
+
+	dp->power = dp_power_get(dp->parser);
+	if (IS_ERR(dp->power)) {
+		rc = PTR_ERR(dp->power);
+		pr_err("failed to initialize power, rc = %d\n", rc);
+		goto err;
+	}
+
+	dp->aux = dp_aux_get(dev, &dp->catalog->aux);
+	if (IS_ERR(dp->aux)) {
+		rc = PTR_ERR(dp->aux);
+		pr_err("failed to initialize aux, rc = %d\n", rc);
+		goto err;
+	}
+
+	dp->panel = dp_panel_get(dev, dp->aux, &dp->catalog->panel);
+	if (IS_ERR(dp->panel)) {
+		rc = PTR_ERR(dp->panel);
+		pr_err("failed to initialize panel, rc = %d\n", rc);
+		goto err;
+	}
+
+	dp->link = dp_link_get(dev, dp->aux);
+	if (IS_ERR(dp->link)) {
+		rc = PTR_ERR(dp->link);
+		pr_err("failed to initialize link, rc = %d\n", rc);
+		goto err;
+	}
+
+	ctrl_in.link = dp->link;
+	ctrl_in.panel = dp->panel;
+	ctrl_in.aux = dp->aux;
+	ctrl_in.power = dp->power;
+	ctrl_in.catalog = &dp->catalog->ctrl;
+	ctrl_in.parser = dp->parser;
+
+	dp->ctrl = dp_ctrl_get(&ctrl_in);
+	if (IS_ERR(dp->ctrl)) {
+		rc = PTR_ERR(dp->ctrl);
+		pr_err("failed to initialize ctrl, rc = %d\n", rc);
+		goto err;
+	}
+err:
+	return rc;
+}
+
+static int dp_display_set_mode(struct dp_display *dp_display,
+		struct dp_display_mode *mode)
+{
+	int rc = 0;
+	struct dp_display_private *dp;
+
+	if (!dp_display) {
+		pr_err("invalid input\n");
+		rc = -EINVAL;
+		goto error;
+	}
+	dp = container_of(dp_display, struct dp_display_private, dp_display);
+
+	dp->panel->pinfo = mode->timing;
+	dp->panel->init_info(dp->panel);
+error:
+	return rc;
+}
+
+static int dp_display_prepare(struct dp_display *dp)
+{
+	return 0;
+}
+
+static int dp_display_enable(struct dp_display *dp_display)
+{
+	int rc = 0;
+	struct dp_display_private *dp;
+
+	if (!dp_display) {
+		pr_err("invalid input\n");
+		rc = -EINVAL;
+		goto error;
+	}
+
+	dp = container_of(dp_display, struct dp_display_private, dp_display);
+
+	mutex_lock(&dp->lock);
+	dp->ctrl->on(dp->ctrl);
+	mutex_unlock(&dp->lock);
+error:
+	return rc;
+}
+
+static int dp_display_post_enable(struct dp_display *dp)
+{
+	return 0;
+}
+
+static int dp_display_pre_disable(struct dp_display *dp_display)
+{
+	int rc = 0;
+	struct dp_display_private *dp;
+
+	if (!dp_display) {
+		pr_err("invalid input\n");
+		rc = -EINVAL;
+		goto error;
+	}
+
+	dp = container_of(dp_display, struct dp_display_private, dp_display);
+
+	mutex_lock(&dp->lock);
+
+	dp->ctrl->off(dp->ctrl);
+
+	mutex_unlock(&dp->lock);
+error:
+	return rc;
+}
+
+static int dp_display_disable(struct dp_display *dp_display)
+{
+	int rc = 0;
+	struct dp_display_private *dp;
+
+	if (!dp_display) {
+		pr_err("invalid input\n");
+		rc = -EINVAL;
+		goto error;
+	}
+
+	dp = container_of(dp_display, struct dp_display_private, dp_display);
+
+	mutex_lock(&dp->lock);
+
+	dp->aux->deinit(dp->aux);
+	dp->ctrl->deinit(dp->ctrl);
+	dp->power->deinit(dp->power);
+
+	mutex_unlock(&dp->lock);
+error:
+	return rc;
+}
+
+static int dp_request_irq(struct dp_display *dp_display)
+{
+	int rc = 0;
+	struct dp_display_private *dp;
+
+	if (!dp_display) {
+		pr_err("invalid input\n");
+		return -EINVAL;
+	}
+
+	dp = container_of(dp_display, struct dp_display_private, dp_display);
+
+	dp->irq = irq_of_parse_and_map(dp->pdev->dev.of_node, 0);
+	if (dp->irq < 0) {
+		rc = dp->irq;
+		pr_err("failed to get irq: %d\n", rc);
+		return rc;
+	}
+
+	rc = devm_request_irq(&dp->pdev->dev, dp->irq, dp_display_irq,
+		IRQF_TRIGGER_HIGH, "dp_display_isr", dp);
+	if (rc < 0) {
+		pr_err("failed to request IRQ%u: %d\n",
+				dp->irq, rc);
+		return rc;
+	}
+	disable_irq(dp->irq);
+
+	return 0;
+}
+
+static int dp_display_unprepare(struct dp_display *dp)
+{
+	return 0;
+}
+
+static int dp_display_validate_mode(struct dp_display *dp,
+	struct dp_display_mode *mode)
+{
+	return 0;
+}
+
+static int dp_display_get_modes(struct dp_display *dp,
+	struct dp_display_mode *modes, u32 *count)
+{
+	*count = 1;
+
+	if (modes) {
+		modes->timing.h_active = 1920;
+		modes->timing.v_active = 1080;
+		modes->timing.h_back_porch = 148;
+		modes->timing.h_front_porch = 88;
+		modes->timing.h_sync_width = 44;
+		modes->timing.h_active_low = 0;
+		modes->timing.v_back_porch = 36;
+		modes->timing.v_front_porch = 4;
+		modes->timing.v_sync_width = 5;
+		modes->timing.v_active_low = 0;
+		modes->timing.h_skew = 0;
+		modes->timing.refresh_rate = 60;
+		modes->timing.pixel_clk_khz = 148500;
+	}
+
+	return 0;
+}
+
+static int dp_display_detect(struct dp_display *dp)
+{
+	return 0;
+}
+
+static int dp_display_probe(struct platform_device *pdev)
+{
+	int rc = 0;
+	struct dp_display_private *dp;
+
+	if (!pdev || !pdev->dev.of_node) {
+		pr_err("pdev not found\n");
+		return -ENODEV;
+	}
+
+	dp = devm_kzalloc(&pdev->dev, sizeof(*dp), GFP_KERNEL);
+	if (!dp)
+		return -ENOMEM;
+
+	mutex_init(&dp->lock);
+	dp->pdev = pdev;
+	dp->name = "drm_dp";
+
+	rc = dp_init_sub_modules(dp);
+	if (rc) {
+		devm_kfree(&pdev->dev, dp);
+		return -EPROBE_DEFER;
+	}
+
+	platform_set_drvdata(pdev, dp);
+
+	g_dp_display = &dp->dp_display;
+
+	g_dp_display->enable        = dp_display_enable;
+	g_dp_display->post_enable   = dp_display_post_enable;
+	g_dp_display->pre_disable   = dp_display_pre_disable;
+	g_dp_display->disable       = dp_display_disable;
+	g_dp_display->set_mode      = dp_display_set_mode;
+	g_dp_display->validate_mode = dp_display_validate_mode;
+	g_dp_display->get_modes     = dp_display_get_modes;
+	g_dp_display->detect        = dp_display_detect;
+	g_dp_display->prepare       = dp_display_prepare;
+	g_dp_display->unprepare     = dp_display_unprepare;
+	g_dp_display->request_irq   = dp_request_irq;
+
+	rc = component_add(&pdev->dev, &dp_display_comp_ops);
+	if (rc)
+		pr_err("component add failed, rc=%d\n", rc);
+
+	return rc;
+}
+
+int dp_display_get_displays(void **displays, int count)
+{
+	if (!displays) {
+		pr_err("invalid data\n");
+		return -EINVAL;
+	}
+
+	if (count != 1) {
+		pr_err("invalid number of displays\n");
+		return -EINVAL;
+	}
+
+	displays[0] = g_dp_display;
+	return count;
+}
+
+int dp_display_get_num_of_displays(void)
+{
+	return 1;
+}
+
+static void dp_display_deinit_sub_modules(struct dp_display_private *dp)
+{
+	dp_ctrl_put(dp->ctrl);
+	dp_link_put(dp->link);
+	dp_panel_put(dp->panel);
+	dp_aux_put(dp->aux);
+	dp_power_put(dp->power);
+	dp_catalog_put(dp->catalog);
+	dp_parser_put(dp->parser);
+	dp_usbpd_put(dp->usbpd);
+}
+
+static int dp_display_remove(struct platform_device *pdev)
+{
+	struct dp_display_private *dp;
+
+	if (!pdev)
+		return -EINVAL;
+
+	dp = platform_get_drvdata(pdev);
+
+	dp_display_deinit_sub_modules(dp);
+
+	platform_set_drvdata(pdev, NULL);
+	devm_kfree(&pdev->dev, dp);
+
+	return 0;
+}
+
+static struct platform_driver dp_display_driver = {
+	.probe  = dp_display_probe,
+	.remove = dp_display_remove,
+	.driver = {
+		.name = "msm-dp-display",
+		.of_match_table = dp_dt_match,
+	},
+};
+
+static int __init dp_display_init(void)
+{
+	int ret;
+
+	ret = platform_driver_register(&dp_display_driver);
+	if (ret) {
+		pr_err("driver register failed");
+		return ret;
+	}
+
+	return ret;
+}
+module_init(dp_display_init);
+
+static void __exit dp_display_cleanup(void)
+{
+	platform_driver_unregister(&dp_display_driver);
+}
+module_exit(dp_display_cleanup);
+
diff --git a/drivers/gpu/drm/msm/dp/dp_display.h b/drivers/gpu/drm/msm/dp/dp_display.h
new file mode 100644
index 0000000..e684854
--- /dev/null
+++ b/drivers/gpu/drm/msm/dp/dp_display.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _DP_DISPLAY_H_
+#define _DP_DISPLAY_H_
+
+#include <drm/drmP.h>
+
+#include "dp_panel.h"
+
+struct dp_display_mode {
+	struct dp_panel_info timing;
+	u32 capabilities;
+};
+
+struct dp_display {
+	struct drm_device *drm_dev;
+	struct dp_bridge *bridge;
+
+	int (*enable)(struct dp_display *dp_display);
+	int (*post_enable)(struct dp_display *dp_display);
+
+	int (*pre_disable)(struct dp_display *dp_display);
+	int (*disable)(struct dp_display *dp_display);
+
+	int (*set_mode)(struct dp_display *dp_display,
+			struct dp_display_mode *mode);
+	int (*validate_mode)(struct dp_display *dp_display,
+			struct dp_display_mode *mode);
+	int (*get_modes)(struct dp_display *dp_display,
+		struct dp_display_mode *modes, u32 *count);
+
+	int (*detect)(struct dp_display *dp_display);
+
+	int (*prepare)(struct dp_display *dp_display);
+	int (*unprepare)(struct dp_display *dp_display);
+	int (*request_irq)(struct dp_display *dp_display);
+};
+
+int dp_display_get_num_of_displays(void);
+int dp_display_get_displays(void **displays, int count);
+#endif /* _DP_DISPLAY_H_ */
diff --git a/drivers/gpu/drm/msm/dp/dp_drm.c b/drivers/gpu/drm/msm/dp/dp_drm.c
new file mode 100644
index 0000000..0f6e36f
--- /dev/null
+++ b/drivers/gpu/drm/msm/dp/dp_drm.c
@@ -0,0 +1,494 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt)	"[drm-dp]: %s: " fmt, __func__
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_crtc.h>
+
+#include "msm_drv.h"
+#include "msm_kms.h"
+#include "sde_connector.h"
+#include "dp_drm.h"
+
+#define to_dp_bridge(x)     container_of((x), struct dp_bridge, base)
+
+static void convert_to_dp_mode(const struct drm_display_mode *drm_mode,
+				struct dp_display_mode *dp_mode)
+{
+	memset(dp_mode, 0, sizeof(*dp_mode));
+
+	dp_mode->timing.h_active = drm_mode->hdisplay;
+	dp_mode->timing.h_back_porch = drm_mode->htotal - drm_mode->hsync_end;
+	dp_mode->timing.h_sync_width = drm_mode->htotal -
+			(drm_mode->hsync_start + dp_mode->timing.h_back_porch);
+	dp_mode->timing.h_front_porch = drm_mode->hsync_start -
+					 drm_mode->hdisplay;
+	dp_mode->timing.h_skew = drm_mode->hskew;
+
+	dp_mode->timing.v_active = drm_mode->vdisplay;
+	dp_mode->timing.v_back_porch = drm_mode->vtotal - drm_mode->vsync_end;
+	dp_mode->timing.v_sync_width = drm_mode->vtotal -
+		(drm_mode->vsync_start + dp_mode->timing.v_back_porch);
+
+	dp_mode->timing.v_front_porch = drm_mode->vsync_start -
+					 drm_mode->vdisplay;
+
+	dp_mode->timing.refresh_rate = drm_mode->vrefresh;
+
+	dp_mode->timing.pixel_clk_khz = drm_mode->clock;
+
+	dp_mode->timing.v_active_low =
+		!!(drm_mode->flags & DRM_MODE_FLAG_NVSYNC);
+
+	dp_mode->timing.h_active_low =
+		!!(drm_mode->flags & DRM_MODE_FLAG_NHSYNC);
+}
+
+static void convert_to_drm_mode(const struct dp_display_mode *dp_mode,
+				struct drm_display_mode *drm_mode)
+{
+	u32 flags = 0;
+
+	memset(drm_mode, 0, sizeof(*drm_mode));
+
+	drm_mode->hdisplay = dp_mode->timing.h_active;
+	drm_mode->hsync_start = drm_mode->hdisplay +
+				dp_mode->timing.h_front_porch;
+	drm_mode->hsync_end = drm_mode->hsync_start +
+			      dp_mode->timing.h_sync_width;
+	drm_mode->htotal = drm_mode->hsync_end + dp_mode->timing.h_back_porch;
+	drm_mode->hskew = dp_mode->timing.h_skew;
+
+	drm_mode->vdisplay = dp_mode->timing.v_active;
+	drm_mode->vsync_start = drm_mode->vdisplay +
+				dp_mode->timing.v_front_porch;
+	drm_mode->vsync_end = drm_mode->vsync_start +
+			      dp_mode->timing.v_sync_width;
+	drm_mode->vtotal = drm_mode->vsync_end + dp_mode->timing.v_back_porch;
+
+	drm_mode->vrefresh = dp_mode->timing.refresh_rate;
+	drm_mode->clock = dp_mode->timing.pixel_clk_khz;
+
+	if (dp_mode->timing.h_active_low)
+		flags |= DRM_MODE_FLAG_NHSYNC;
+	else
+		flags |= DRM_MODE_FLAG_PHSYNC;
+
+	if (dp_mode->timing.v_active_low)
+		flags |= DRM_MODE_FLAG_NVSYNC;
+	else
+		flags |= DRM_MODE_FLAG_PVSYNC;
+
+	drm_mode->flags = flags;
+
+	drm_mode->type = 0x48;
+	drm_mode_set_name(drm_mode);
+}
+
+static int dp_bridge_attach(struct drm_bridge *dp_bridge)
+{
+	struct dp_bridge *bridge = to_dp_bridge(dp_bridge);
+
+	if (!dp_bridge) {
+		pr_err("Invalid params\n");
+		return -EINVAL;
+	}
+
+	pr_debug("[%d] attached\n", bridge->id);
+
+	return 0;
+}
+
+static void dp_bridge_pre_enable(struct drm_bridge *drm_bridge)
+{
+	int rc = 0;
+	struct dp_bridge *bridge;
+	struct dp_display *dp;
+
+	if (!drm_bridge) {
+		pr_err("Invalid params\n");
+		return;
+	}
+
+	bridge = to_dp_bridge(drm_bridge);
+	dp = bridge->display;
+
+	/* By this point mode should have been validated through mode_fixup */
+	rc = dp->set_mode(dp, &bridge->dp_mode);
+	if (rc) {
+		pr_err("[%d] failed to perform a mode set, rc=%d\n",
+		       bridge->id, rc);
+		return;
+	}
+
+	rc = dp->prepare(dp);
+	if (rc) {
+		pr_err("[%d] DP display prepare failed, rc=%d\n",
+		       bridge->id, rc);
+		return;
+	}
+
+	rc = dp->enable(dp);
+	if (rc) {
+		pr_err("[%d] DP display enable failed, rc=%d\n",
+		       bridge->id, rc);
+		dp->unprepare(dp);
+	}
+}
+
+static void dp_bridge_enable(struct drm_bridge *drm_bridge)
+{
+	int rc = 0;
+	struct dp_bridge *bridge;
+	struct dp_display *dp;
+
+	if (!drm_bridge) {
+		pr_err("Invalid params\n");
+		return;
+	}
+
+	bridge = to_dp_bridge(drm_bridge);
+	dp = bridge->display;
+
+	rc = dp->post_enable(dp);
+	if (rc)
+		pr_err("[%d] DP display post enable failed, rc=%d\n",
+		       bridge->id, rc);
+}
+
+static void dp_bridge_disable(struct drm_bridge *drm_bridge)
+{
+	int rc = 0;
+	struct dp_bridge *bridge;
+	struct dp_display *dp;
+
+	if (!drm_bridge) {
+		pr_err("Invalid params\n");
+		return;
+	}
+
+	bridge = to_dp_bridge(drm_bridge);
+	dp = bridge->display;
+
+	rc = dp->pre_disable(dp);
+	if (rc) {
+		pr_err("[%d] DP display pre disable failed, rc=%d\n",
+		       bridge->id, rc);
+	}
+}
+
+static void dp_bridge_post_disable(struct drm_bridge *drm_bridge)
+{
+	int rc = 0;
+	struct dp_bridge *bridge;
+	struct dp_display *dp;
+
+	if (!drm_bridge) {
+		pr_err("Invalid params\n");
+		return;
+	}
+
+	bridge = to_dp_bridge(drm_bridge);
+	dp = bridge->display;
+
+	rc = dp->disable(dp);
+	if (rc) {
+		pr_err("[%d] DP display disable failed, rc=%d\n",
+		       bridge->id, rc);
+		return;
+	}
+
+	rc = dp->unprepare(dp);
+	if (rc) {
+		pr_err("[%d] DP display unprepare failed, rc=%d\n",
+		       bridge->id, rc);
+		return;
+	}
+}
+
+static void dp_bridge_mode_set(struct drm_bridge *drm_bridge,
+				struct drm_display_mode *mode,
+				struct drm_display_mode *adjusted_mode)
+{
+	struct dp_bridge *bridge;
+	struct dp_display *dp;
+
+	if (!drm_bridge || !mode || !adjusted_mode) {
+		pr_err("Invalid params\n");
+		return;
+	}
+
+	bridge = to_dp_bridge(drm_bridge);
+	dp = bridge->display;
+
+	memset(&bridge->dp_mode, 0x0, sizeof(struct dp_display_mode));
+	convert_to_dp_mode(adjusted_mode, &bridge->dp_mode);
+}
+
+static bool dp_bridge_mode_fixup(struct drm_bridge *drm_bridge,
+				  const struct drm_display_mode *mode,
+				  struct drm_display_mode *adjusted_mode)
+{
+	int rc = 0;
+	bool ret = true;
+	struct dp_display_mode dp_mode;
+	struct dp_bridge *bridge;
+	struct dp_display *dp;
+
+	if (!drm_bridge || !mode || !adjusted_mode) {
+		pr_err("Invalid params\n");
+		ret = false;
+		goto end;
+	}
+
+	bridge = to_dp_bridge(drm_bridge);
+	dp = bridge->display;
+
+	convert_to_dp_mode(mode, &dp_mode);
+
+	rc = dp->validate_mode(dp, &dp_mode);
+	if (rc) {
+		pr_err("[%d] mode is not valid, rc=%d\n", bridge->id, rc);
+		ret = false;
+	} else {
+		convert_to_drm_mode(&dp_mode, adjusted_mode);
+	}
+end:
+	return ret;
+}
+
+static const struct drm_bridge_funcs dp_bridge_ops = {
+	.attach       = dp_bridge_attach,
+	.mode_fixup   = dp_bridge_mode_fixup,
+	.pre_enable   = dp_bridge_pre_enable,
+	.enable       = dp_bridge_enable,
+	.disable      = dp_bridge_disable,
+	.post_disable = dp_bridge_post_disable,
+	.mode_set     = dp_bridge_mode_set,
+};
+
+int dp_connector_post_init(struct drm_connector *connector,
+		void *info,
+		void *display)
+{
+	struct dp_display *dp_display = display;
+
+	if (!info || !dp_display)
+		return -EINVAL;
+
+	return 0;
+}
+
+int dp_connector_get_topology(const struct drm_display_mode *drm_mode,
+	struct msm_display_topology *topology, u32 max_mixer_width)
+{
+	const u32 dual_lm = 2;
+	const u32 single_lm = 1;
+	const u32 single_intf = 1;
+	const u32 no_enc = 0;
+
+	if (!drm_mode || !topology || !max_mixer_width) {
+		pr_err("invalid params\n");
+		return -EINVAL;
+	}
+
+	topology->num_lm = (max_mixer_width <= drm_mode->hdisplay) ?
+							dual_lm : single_lm;
+	topology->num_enc = no_enc;
+	topology->num_intf = single_intf;
+
+	return 0;
+}
+
+int dp_connector_get_info(struct msm_display_info *info, void *data)
+{
+	struct dsi_display *display = data;
+
+	if (!info || !display) {
+		pr_err("invalid params\n");
+		return -EINVAL;
+	}
+
+	info->intf_type = DRM_MODE_CONNECTOR_DisplayPort;
+
+	info->num_of_h_tiles = 1;
+	info->h_tile_instance[0] = 0;
+
+	info->is_connected = true;
+	info->frame_rate = 60;
+	info->width_mm = 160;
+	info->height_mm = 90;
+	info->max_width = 1920;
+	info->max_height = 1080;
+	info->vtotal = 1125;
+	info->is_primary = true;
+	info->comp_info.comp_type = MSM_DISPLAY_COMPRESSION_NONE;
+	info->capabilities |= MSM_DISPLAY_CAP_VID_MODE;
+
+	return 0;
+}
+
+enum drm_connector_status dp_connector_detect(struct drm_connector *conn,
+		bool force,
+		void *display)
+{
+	enum drm_connector_status status = connector_status_unknown;
+	struct msm_display_info info;
+	int rc;
+
+	if (!conn || !display)
+		return status;
+
+	/* get display dp_info */
+	memset(&info, 0x0, sizeof(info));
+	rc = dp_connector_get_info(&info, display);
+	if (rc) {
+		pr_err("failed to get display info, rc=%d\n", rc);
+		return connector_status_disconnected;
+	}
+
+	if (info.capabilities & MSM_DISPLAY_CAP_HOT_PLUG)
+		status = (info.is_connected ? connector_status_connected :
+					      connector_status_disconnected);
+	else
+		status = connector_status_connected;
+
+	conn->display_info.width_mm = info.width_mm;
+	conn->display_info.height_mm = info.height_mm;
+
+	return status;
+}
+
+int dp_connector_get_modes(struct drm_connector *connector,
+		void *display)
+{
+	u32 count = 0;
+	u32 size = 0;
+	struct dp_display_mode *modes;
+	struct drm_display_mode drm_mode;
+	struct dp_display *dp;
+	int rc, i;
+
+	if (!connector || !display || sde_connector_get_panel(connector))
+		goto end;
+
+	dp = display;
+
+	rc = dp->get_modes(dp, NULL, &count);
+	if (rc) {
+		pr_err("failed to get num of modes, rc=%d\n", rc);
+		goto end;
+	}
+
+	size = count * sizeof(*modes);
+	modes = kzalloc(size,  GFP_KERNEL);
+	if (!modes) {
+		count = 0;
+		goto end;
+	}
+
+	rc = dp->get_modes(dp, modes, &count);
+	if (rc) {
+		pr_err("failed to get modes, rc=%d\n", rc);
+		count = 0;
+		goto error;
+	}
+
+	for (i = 0; i < count; i++) {
+		struct drm_display_mode *m;
+
+		memset(&drm_mode, 0x0, sizeof(drm_mode));
+		convert_to_drm_mode(&modes[i], &drm_mode);
+		m = drm_mode_duplicate(connector->dev, &drm_mode);
+		if (!m) {
+			pr_err("failed to add mode %ux%u\n",
+			       drm_mode.hdisplay,
+			       drm_mode.vdisplay);
+			count = -ENOMEM;
+			goto error;
+		}
+		m->width_mm = connector->display_info.width_mm;
+		m->height_mm = connector->display_info.height_mm;
+		drm_mode_probed_add(connector, m);
+	}
+error:
+	kfree(modes);
+end:
+	pr_debug("MODE COUNT =%d\n\n", count);
+	return count;
+}
+
+int dp_drm_bridge_init(void *data, struct drm_encoder *encoder)
+{
+	int rc = 0;
+	struct dp_bridge *bridge;
+	struct drm_device *dev;
+	struct dp_display *display = data;
+	struct msm_drm_private *priv = NULL;
+
+	bridge = kzalloc(sizeof(*bridge), GFP_KERNEL);
+	if (!bridge) {
+		rc = -ENOMEM;
+		goto error;
+	}
+
+	dev = display->drm_dev;
+	bridge->display = display;
+	bridge->base.funcs = &dp_bridge_ops;
+	bridge->base.encoder = encoder;
+
+	priv = dev->dev_private;
+
+	rc = drm_bridge_attach(dev, &bridge->base);
+	if (rc) {
+		pr_err("failed to attach bridge, rc=%d\n", rc);
+		goto error_free_bridge;
+	}
+
+	rc = display->request_irq(display);
+	if (rc) {
+		pr_err("request_irq failed, rc=%d\n", rc);
+		goto error_free_bridge;
+	}
+
+	encoder->bridge = &bridge->base;
+	priv->bridges[priv->num_bridges++] = &bridge->base;
+	display->bridge = bridge;
+
+	return 0;
+error_free_bridge:
+	kfree(bridge);
+error:
+	return rc;
+}
+
+void dp_drm_bridge_deinit(void *data)
+{
+	struct dp_display *display = data;
+	struct dp_bridge *bridge = display->bridge;
+
+	if (bridge && bridge->base.encoder)
+		bridge->base.encoder->bridge = NULL;
+
+	kfree(bridge);
+}
+
+enum drm_mode_status dp_connector_mode_valid(struct drm_connector *connector,
+		struct drm_display_mode *mode,
+		void *display)
+{
+	return MODE_OK;
+}
diff --git a/drivers/gpu/drm/msm/dp/dp_drm.h b/drivers/gpu/drm/msm/dp/dp_drm.h
new file mode 100644
index 0000000..bef3758
--- /dev/null
+++ b/drivers/gpu/drm/msm/dp/dp_drm.h
@@ -0,0 +1,94 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _DP_DRM_H_
+#define _DP_DRM_H_
+
+#include <linux/types.h>
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+
+#include "msm_drv.h"
+#include "dp_display.h"
+
+struct dp_bridge {
+	struct drm_bridge base;
+	u32 id;
+
+	struct dp_display *display;
+	struct dp_display_mode dp_mode;
+};
+
+/**
+ * dp_connector_post_init - callback to perform additional initialization steps
+ * @connector: Pointer to drm connector structure
+ * @info: Pointer to sde connector info structure
+ * @display: Pointer to private display handle
+ * Returns: Zero on success
+ */
+int dp_connector_post_init(struct drm_connector *connector,
+		void *info,
+		void *display);
+
+/**
+ * dp_connector_detect - callback to determine if connector is connected
+ * @connector: Pointer to drm connector structure
+ * @force: Force detect setting from drm framework
+ * @display: Pointer to private display handle
+ * Returns: Connector 'is connected' status
+ */
+enum drm_connector_status dp_connector_detect(struct drm_connector *conn,
+		bool force,
+		void *display);
+
+/**
+ * dp_connector_get_modes - callback to add drm modes via drm_mode_probed_add()
+ * @connector: Pointer to drm connector structure
+ * @display: Pointer to private display handle
+ * Returns: Number of modes added
+ */
+int dp_connector_get_modes(struct drm_connector *connector,
+		void *display);
+
+/**
+ * dp_connector_mode_valid - callback to determine if specified mode is valid
+ * @connector: Pointer to drm connector structure
+ * @mode: Pointer to drm mode structure
+ * @display: Pointer to private display handle
+ * Returns: Validity status for specified mode
+ */
+enum drm_mode_status dp_connector_mode_valid(struct drm_connector *connector,
+		struct drm_display_mode *mode,
+		void *display);
+
+/**
+ * dp_connector_get_topology - retrieve current topology for the mode selected
+ * @drm_mode: Display mode set for the display
+ * @topology: Out parameter. Topology for the mode.
+ * @max_mixer_width: max width supported by HW layer mixer
+ * Returns: zero on success
+ */
+int dp_connector_get_topology(const struct drm_display_mode *drm_mode,
+		struct msm_display_topology *topology,
+		u32 max_mixer_width);
+
+int dp_connector_get_info(struct msm_display_info *info, void *display);
+
+int dp_drm_bridge_init(void *display,
+	struct drm_encoder *encoder);
+
+void dp_drm_bridge_deinit(void *display);
+#endif /* _DP_DRM_H_ */
+
diff --git a/drivers/gpu/drm/msm/dp/dp_link.c b/drivers/gpu/drm/msm/dp/dp_link.c
new file mode 100644
index 0000000..e9955a9
--- /dev/null
+++ b/drivers/gpu/drm/msm/dp/dp_link.c
@@ -0,0 +1,1809 @@
+/*
+ * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt)	"[drm-dp] %s: " fmt, __func__
+
+#include "dp_link.h"
+#include "dp_panel.h"
+
+#define DP_LINK_ENUM_STR(x)		#x
+
+enum dp_lane_count {
+	DP_LANE_COUNT_1	= 1,
+	DP_LANE_COUNT_2	= 2,
+	DP_LANE_COUNT_4	= 4,
+};
+
+enum phy_test_pattern {
+	PHY_TEST_PATTERN_NONE,
+	PHY_TEST_PATTERN_D10_2_NO_SCRAMBLING,
+	PHY_TEST_PATTERN_SYMBOL_ERR_MEASUREMENT_CNT,
+	PHY_TEST_PATTERN_PRBS7,
+	PHY_TEST_PATTERN_80_BIT_CUSTOM_PATTERN,
+	PHY_TEST_PATTERN_HBR2_CTS_EYE_PATTERN,
+};
+
+enum dynamic_range {
+	DP_DYNAMIC_RANGE_RGB_VESA = 0x00,
+	DP_DYNAMIC_RANGE_RGB_CEA = 0x01,
+	DP_DYNAMIC_RANGE_UNKNOWN = 0xFFFFFFFF,
+};
+
+enum test_video_pattern {
+	DP_TEST_VIDEO_PATTERN_NONE = 0x00,
+	DP_TEST_VIDEO_PATTERN_COLOR_RAMPS = 0x01,
+	DP_TEST_VIDEO_PATTERN_BW_VERT_LINES = 0x02,
+	DP_TEST_VIDEO_PATTERN_COLOR_SQUARE = 0x03,
+};
+
+enum test_bit_depth {
+	DP_TEST_BIT_DEPTH_6 = 0x00,
+	DP_TEST_BIT_DEPTH_8 = 0x01,
+	DP_TEST_BIT_DEPTH_10 = 0x02,
+	DP_TEST_BIT_DEPTH_UNKNOWN = 0xFFFFFFFF,
+};
+
+enum dp_link_response {
+	TEST_ACK			= 0x1,
+	TEST_NACK			= 0x2,
+	TEST_EDID_CHECKSUM_WRITE	= 0x4,
+};
+
+enum audio_sample_rate {
+	AUDIO_SAMPLE_RATE_32_KHZ	= 0x00,
+	AUDIO_SAMPLE_RATE_44_1_KHZ	= 0x01,
+	AUDIO_SAMPLE_RATE_48_KHZ	= 0x02,
+	AUDIO_SAMPLE_RATE_88_2_KHZ	= 0x03,
+	AUDIO_SAMPLE_RATE_96_KHZ	= 0x04,
+	AUDIO_SAMPLE_RATE_176_4_KHZ	= 0x05,
+	AUDIO_SAMPLE_RATE_192_KHZ	= 0x06,
+};
+
+enum audio_pattern_type {
+	AUDIO_TEST_PATTERN_OPERATOR_DEFINED	= 0x00,
+	AUDIO_TEST_PATTERN_SAWTOOTH		= 0x01,
+};
+
+struct dp_link_request {
+	u32 test_requested;
+	u32 test_link_rate;
+	u32 test_lane_count;
+	u32 phy_test_pattern_sel;
+	u32 test_video_pattern;
+	u32 test_bit_depth;
+	u32 test_dyn_range;
+	u32 test_h_total;
+	u32 test_v_total;
+	u32 test_h_start;
+	u32 test_v_start;
+	u32 test_hsync_pol;
+	u32 test_hsync_width;
+	u32 test_vsync_pol;
+	u32 test_vsync_width;
+	u32 test_h_width;
+	u32 test_v_height;
+	u32 test_rr_d;
+	u32 test_rr_n;
+	u32 test_audio_sampling_rate;
+	u32 test_audio_channel_count;
+	u32 test_audio_pattern_type;
+	u32 test_audio_period_ch_1;
+	u32 test_audio_period_ch_2;
+	u32 test_audio_period_ch_3;
+	u32 test_audio_period_ch_4;
+	u32 test_audio_period_ch_5;
+	u32 test_audio_period_ch_6;
+	u32 test_audio_period_ch_7;
+	u32 test_audio_period_ch_8;
+	u32 response;
+};
+
+struct dp_link_sink_count {
+	u32 count;
+	bool cp_ready;
+};
+
+struct dp_link_status {
+	u8 lane_01_status;
+	u8 lane_23_status;
+	u8 interlane_align_done;
+	u8 downstream_port_status_changed;
+	u8 link_status_updated;
+	u8 port_0_in_sync;
+	u8 port_1_in_sync;
+	u8 req_voltage_swing[4];
+	u8 req_pre_emphasis[4];
+};
+
+struct dp_link_private {
+	struct device *dev;
+	struct dp_aux *aux;
+	struct dp_link dp_link;
+
+	struct dp_link_request request;
+	struct dp_link_sink_count sink_count;
+	struct dp_link_status link_status;
+};
+
+/**
+ * mdss_dp_test_bit_depth_to_bpp() - convert test bit depth to bpp
+ * @tbd: test bit depth
+ *
+ * Returns the bits per pixel (bpp) to be used corresponding to the
+ * git bit depth value. This function assumes that bit depth has
+ * already been validated.
+ */
+static inline u32 dp_link_bit_depth_to_bpp(enum test_bit_depth tbd)
+{
+	u32 bpp;
+
+	/*
+	 * Few simplistic rules and assumptions made here:
+	 *    1. Bit depth is per color component
+	 *    2. If bit depth is unknown return 0
+	 *    3. Assume 3 color components
+	 */
+	switch (tbd) {
+	case DP_TEST_BIT_DEPTH_6:
+		bpp = 18;
+		break;
+	case DP_TEST_BIT_DEPTH_8:
+		bpp = 24;
+		break;
+	case DP_TEST_BIT_DEPTH_10:
+		bpp = 30;
+		break;
+	case DP_TEST_BIT_DEPTH_UNKNOWN:
+	default:
+		bpp = 0;
+	}
+
+	return bpp;
+}
+
+static char *dp_link_get_phy_test_pattern(u32 phy_test_pattern_sel)
+{
+	switch (phy_test_pattern_sel) {
+	case PHY_TEST_PATTERN_NONE:
+		return DP_LINK_ENUM_STR(PHY_TEST_PATTERN_NONE);
+	case PHY_TEST_PATTERN_D10_2_NO_SCRAMBLING:
+		return DP_LINK_ENUM_STR(PHY_TEST_PATTERN_D10_2_NO_SCRAMBLING);
+	case PHY_TEST_PATTERN_SYMBOL_ERR_MEASUREMENT_CNT:
+		return DP_LINK_ENUM_STR(
+			PHY_TEST_PATTERN_SYMBOL_ERR_MEASUREMENT_CNT);
+	case PHY_TEST_PATTERN_PRBS7:
+		return DP_LINK_ENUM_STR(PHY_TEST_PATTERN_PRBS7);
+	case PHY_TEST_PATTERN_80_BIT_CUSTOM_PATTERN:
+		return DP_LINK_ENUM_STR(PHY_TEST_PATTERN_80_BIT_CUSTOM_PATTERN);
+	case PHY_TEST_PATTERN_HBR2_CTS_EYE_PATTERN:
+		return DP_LINK_ENUM_STR(PHY_TEST_PATTERN_HBR2_CTS_EYE_PATTERN);
+	default:
+		return "unknown";
+	}
+}
+
+static char *dp_link_get_audio_test_pattern(u32 pattern)
+{
+	switch (pattern) {
+	case AUDIO_TEST_PATTERN_OPERATOR_DEFINED:
+		return DP_LINK_ENUM_STR(AUDIO_TEST_PATTERN_OPERATOR_DEFINED);
+	case AUDIO_TEST_PATTERN_SAWTOOTH:
+		return DP_LINK_ENUM_STR(AUDIO_TEST_PATTERN_SAWTOOTH);
+	default:
+		return "unknown";
+	}
+}
+
+static char *dp_link_get_audio_sample_rate(u32 rate)
+{
+	switch (rate) {
+	case AUDIO_SAMPLE_RATE_32_KHZ:
+		return DP_LINK_ENUM_STR(AUDIO_SAMPLE_RATE_32_KHZ);
+	case AUDIO_SAMPLE_RATE_44_1_KHZ:
+		return DP_LINK_ENUM_STR(AUDIO_SAMPLE_RATE_44_1_KHZ);
+	case AUDIO_SAMPLE_RATE_48_KHZ:
+		return DP_LINK_ENUM_STR(AUDIO_SAMPLE_RATE_48_KHZ);
+	case AUDIO_SAMPLE_RATE_88_2_KHZ:
+		return DP_LINK_ENUM_STR(AUDIO_SAMPLE_RATE_88_2_KHZ);
+	case AUDIO_SAMPLE_RATE_96_KHZ:
+		return DP_LINK_ENUM_STR(AUDIO_SAMPLE_RATE_96_KHZ);
+	case AUDIO_SAMPLE_RATE_176_4_KHZ:
+		return DP_LINK_ENUM_STR(AUDIO_SAMPLE_RATE_176_4_KHZ);
+	case AUDIO_SAMPLE_RATE_192_KHZ:
+		return DP_LINK_ENUM_STR(AUDIO_SAMPLE_RATE_192_KHZ);
+	default:
+		return "unknown";
+	}
+}
+
+static int dp_link_get_period(struct dp_link_private *link, int const addr)
+{
+	int ret = 0;
+	u8 *bp;
+	u8 data;
+	int rlen;
+	u32 const param_len = 0x1;
+	u32 const max_audio_period = 0xA;
+
+	/* TEST_AUDIO_PERIOD_CH_XX */
+	rlen = link->aux->read(link->aux, addr, param_len, AUX_NATIVE, &bp);
+	if (rlen < param_len) {
+		pr_err("failed to read test_audio_period (0x%x)\n", addr);
+		ret = -EINVAL;
+		goto exit;
+	}
+
+	data = *bp;
+
+	/* Period - Bits 3:0 */
+	data = data & 0xF;
+	if ((int)data > max_audio_period) {
+		pr_err("invalid test_audio_period_ch_1 = 0x%x\n", data);
+		ret = -EINVAL;
+		goto exit;
+	}
+
+	ret = data;
+exit:
+	return ret;
+}
+
+static int dp_link_parse_audio_channel_period(struct dp_link_private *link)
+{
+	int ret = 0;
+	int const test_audio_period_ch_1_addr = 0x273;
+	int const test_audio_period_ch_2_addr = 0x274;
+	int const test_audio_period_ch_3_addr = 0x275;
+	int const test_audio_period_ch_4_addr = 0x276;
+	int const test_audio_period_ch_5_addr = 0x277;
+	int const test_audio_period_ch_6_addr = 0x278;
+	int const test_audio_period_ch_7_addr = 0x279;
+	int const test_audio_period_ch_8_addr = 0x27A;
+	struct dp_link_request *req = &link->request;
+
+	/* TEST_AUDIO_PERIOD_CH_1 (Byte 0x273) */
+	ret = dp_link_get_period(link, test_audio_period_ch_1_addr);
+	if (ret == -EINVAL)
+		goto exit;
+
+	req->test_audio_period_ch_1 = ret;
+	pr_debug("test_audio_period_ch_1 = 0x%x\n", ret);
+
+	/* TEST_AUDIO_PERIOD_CH_2 (Byte 0x274) */
+	ret = dp_link_get_period(link, test_audio_period_ch_2_addr);
+	if (ret == -EINVAL)
+		goto exit;
+
+	req->test_audio_period_ch_2 = ret;
+	pr_debug("test_audio_period_ch_2 = 0x%x\n", ret);
+
+	/* TEST_AUDIO_PERIOD_CH_3 (Byte 0x275) */
+	ret = dp_link_get_period(link, test_audio_period_ch_3_addr);
+	if (ret == -EINVAL)
+		goto exit;
+
+	req->test_audio_period_ch_3 = ret;
+	pr_debug("test_audio_period_ch_3 = 0x%x\n", ret);
+
+	/* TEST_AUDIO_PERIOD_CH_4 (Byte 0x276) */
+	ret = dp_link_get_period(link, test_audio_period_ch_4_addr);
+	if (ret == -EINVAL)
+		goto exit;
+
+	req->test_audio_period_ch_4 = ret;
+	pr_debug("test_audio_period_ch_4 = 0x%x\n", ret);
+
+	/* TEST_AUDIO_PERIOD_CH_5 (Byte 0x277) */
+	ret = dp_link_get_period(link, test_audio_period_ch_5_addr);
+	if (ret == -EINVAL)
+		goto exit;
+
+	req->test_audio_period_ch_5 = ret;
+	pr_debug("test_audio_period_ch_5 = 0x%x\n", ret);
+
+	/* TEST_AUDIO_PERIOD_CH_6 (Byte 0x278) */
+	ret = dp_link_get_period(link, test_audio_period_ch_6_addr);
+	if (ret == -EINVAL)
+		goto exit;
+
+	req->test_audio_period_ch_6 = ret;
+	pr_debug("test_audio_period_ch_6 = 0x%x\n", ret);
+
+	/* TEST_AUDIO_PERIOD_CH_7 (Byte 0x279) */
+	ret = dp_link_get_period(link, test_audio_period_ch_7_addr);
+	if (ret == -EINVAL)
+		goto exit;
+
+	req->test_audio_period_ch_7 = ret;
+	pr_debug("test_audio_period_ch_7 = 0x%x\n", ret);
+
+	/* TEST_AUDIO_PERIOD_CH_8 (Byte 0x27A) */
+	ret = dp_link_get_period(link, test_audio_period_ch_8_addr);
+	if (ret == -EINVAL)
+		goto exit;
+
+	req->test_audio_period_ch_8 = ret;
+	pr_debug("test_audio_period_ch_8 = 0x%x\n", ret);
+exit:
+	return ret;
+}
+
+static int dp_link_parse_audio_pattern_type(struct dp_link_private *link)
+{
+	int ret = 0;
+	u8 *bp;
+	u8 data;
+	int rlen;
+	int const param_len = 0x1;
+	int const test_audio_pattern_type_addr = 0x272;
+	int const max_audio_pattern_type = 0x1;
+
+	/* Read the requested audio pattern type (Byte 0x272). */
+	rlen = link->aux->read(link->aux, test_audio_pattern_type_addr,
+			param_len, AUX_NATIVE, &bp);
+	if (rlen < param_len) {
+		pr_err("failed to read link audio mode data\n");
+		ret = -EINVAL;
+		goto exit;
+	}
+	data = *bp;
+
+	/* Audio Pattern Type - Bits 7:0 */
+	if ((int)data > max_audio_pattern_type) {
+		pr_err("invalid audio pattern type = 0x%x\n", data);
+		ret = -EINVAL;
+		goto exit;
+	}
+
+	link->request.test_audio_pattern_type = data;
+	pr_debug("audio pattern type = %s\n",
+			dp_link_get_audio_test_pattern(data));
+exit:
+	return ret;
+}
+
+static int dp_link_parse_audio_mode(struct dp_link_private *link)
+{
+	int ret = 0;
+	u8 *bp;
+	u8 data;
+	int rlen;
+	int const param_len = 0x1;
+	int const test_audio_mode_addr = 0x271;
+	int const max_audio_sampling_rate = 0x6;
+	int const max_audio_channel_count = 0x8;
+	int sampling_rate = 0x0;
+	int channel_count = 0x0;
+
+	/* Read the requested audio mode (Byte 0x271). */
+	rlen = link->aux->read(link->aux, test_audio_mode_addr,
+			param_len, AUX_NATIVE, &bp);
+	if (rlen < param_len) {
+		pr_err("failed to read link audio mode data\n");
+		ret = -EINVAL;
+		goto exit;
+	}
+	data = *bp;
+
+	/* Sampling Rate - Bits 3:0 */
+	sampling_rate = data & 0xF;
+	if (sampling_rate > max_audio_sampling_rate) {
+		pr_err("sampling rate (0x%x) greater than max (0x%x)\n",
+				sampling_rate, max_audio_sampling_rate);
+		ret = -EINVAL;
+		goto exit;
+	}
+
+	/* Channel Count - Bits 7:4 */
+	channel_count = ((data & 0xF0) >> 4) + 1;
+	if (channel_count > max_audio_channel_count) {
+		pr_err("channel_count (0x%x) greater than max (0x%x)\n",
+				channel_count, max_audio_channel_count);
+		ret = -EINVAL;
+		goto exit;
+	}
+
+	link->request.test_audio_sampling_rate = sampling_rate;
+	link->request.test_audio_channel_count = channel_count;
+	pr_debug("sampling_rate = %s, channel_count = 0x%x\n",
+		dp_link_get_audio_sample_rate(sampling_rate), channel_count);
+exit:
+	return ret;
+}
+
+/**
+ * dp_parse_audio_pattern_params() - parses audio pattern parameters from DPCD
+ * @link: Display Port Driver data
+ *
+ * Returns 0 if it successfully parses the audio link pattern parameters.
+ */
+static int dp_link_parse_audio_pattern_params(struct dp_link_private *link)
+{
+	int ret = 0;
+
+	ret = dp_link_parse_audio_mode(link);
+	if (ret)
+		goto exit;
+
+	ret = dp_link_parse_audio_pattern_type(link);
+	if (ret)
+		goto exit;
+
+	ret = dp_link_parse_audio_channel_period(link);
+
+exit:
+	return ret;
+}
+
+/**
+ * dp_link_is_video_pattern_valid() - validates the video pattern
+ * @pattern: video pattern requested by the sink
+ *
+ * Returns true if the requested video pattern is supported.
+ */
+static bool dp_link_is_video_pattern_valid(u32 pattern)
+{
+	switch (pattern) {
+	case DP_TEST_VIDEO_PATTERN_NONE:
+	case DP_TEST_VIDEO_PATTERN_COLOR_RAMPS:
+	case DP_TEST_VIDEO_PATTERN_BW_VERT_LINES:
+	case DP_TEST_VIDEO_PATTERN_COLOR_SQUARE:
+		return true;
+	default:
+		return false;
+	}
+}
+
+static char *dp_link_video_pattern_to_string(u32 test_video_pattern)
+{
+	switch (test_video_pattern) {
+	case DP_TEST_VIDEO_PATTERN_NONE:
+		return DP_LINK_ENUM_STR(DP_TEST_VIDEO_PATTERN_NONE);
+	case DP_TEST_VIDEO_PATTERN_COLOR_RAMPS:
+		return DP_LINK_ENUM_STR(DP_TEST_VIDEO_PATTERN_COLOR_RAMPS);
+	case DP_TEST_VIDEO_PATTERN_BW_VERT_LINES:
+		return DP_LINK_ENUM_STR(DP_TEST_VIDEO_PATTERN_BW_VERT_LINES);
+	case DP_TEST_VIDEO_PATTERN_COLOR_SQUARE:
+		return DP_LINK_ENUM_STR(DP_TEST_VIDEO_PATTERN_COLOR_SQUARE);
+	default:
+		return "unknown";
+	}
+}
+
+/**
+ * dp_link_is_dynamic_range_valid() - validates the dynamic range
+ * @bit_depth: the dynamic range value to be checked
+ *
+ * Returns true if the dynamic range value is supported.
+ */
+static bool dp_link_is_dynamic_range_valid(u32 dr)
+{
+	switch (dr) {
+	case DP_DYNAMIC_RANGE_RGB_VESA:
+	case DP_DYNAMIC_RANGE_RGB_CEA:
+		return true;
+	default:
+		return false;
+	}
+}
+
+static char *dp_link_dynamic_range_to_string(u32 dr)
+{
+	switch (dr) {
+	case DP_DYNAMIC_RANGE_RGB_VESA:
+		return DP_LINK_ENUM_STR(DP_DYNAMIC_RANGE_RGB_VESA);
+	case DP_DYNAMIC_RANGE_RGB_CEA:
+		return DP_LINK_ENUM_STR(DP_DYNAMIC_RANGE_RGB_CEA);
+	case DP_DYNAMIC_RANGE_UNKNOWN:
+	default:
+		return "unknown";
+	}
+}
+
+/**
+ * dp_link_is_bit_depth_valid() - validates the bit depth requested
+ * @bit_depth: bit depth requested by the sink
+ *
+ * Returns true if the requested bit depth is supported.
+ */
+static bool dp_link_is_bit_depth_valid(u32 tbd)
+{
+	/* DP_TEST_VIDEO_PATTERN_NONE is treated as invalid */
+	switch (tbd) {
+	case DP_TEST_BIT_DEPTH_6:
+	case DP_TEST_BIT_DEPTH_8:
+	case DP_TEST_BIT_DEPTH_10:
+		return true;
+	default:
+		return false;
+	}
+}
+
+static char *dp_link_bit_depth_to_string(u32 tbd)
+{
+	switch (tbd) {
+	case DP_TEST_BIT_DEPTH_6:
+		return DP_LINK_ENUM_STR(DP_TEST_BIT_DEPTH_6);
+	case DP_TEST_BIT_DEPTH_8:
+		return DP_LINK_ENUM_STR(DP_TEST_BIT_DEPTH_8);
+	case DP_TEST_BIT_DEPTH_10:
+		return DP_LINK_ENUM_STR(DP_TEST_BIT_DEPTH_10);
+	case DP_TEST_BIT_DEPTH_UNKNOWN:
+	default:
+		return "unknown";
+	}
+}
+
+static int dp_link_parse_timing_params1(struct dp_link_private *link,
+	int const addr, int const len, u32 *val)
+{
+	u8 *bp;
+	int rlen;
+
+	if (len < 2)
+		return -EINVAL;
+
+	/* Read the requested video link pattern (Byte 0x221). */
+	rlen = link->aux->read(link->aux, addr, len, AUX_NATIVE, &bp);
+	if (rlen < len) {
+		pr_err("failed to read 0x%x\n", addr);
+		return -EINVAL;
+	}
+
+	*val = bp[1] | (bp[0] << 8);
+
+	return 0;
+}
+
+static int dp_link_parse_timing_params2(struct dp_link_private *link,
+	int const addr, int const len, u32 *val1, u32 *val2)
+{
+	u8 *bp;
+	int rlen;
+
+	if (len < 2)
+		return -EINVAL;
+
+	/* Read the requested video link pattern (Byte 0x221). */
+	rlen = link->aux->read(link->aux, addr, len, AUX_NATIVE, &bp);
+	if (rlen < len) {
+		pr_err("failed to read 0x%x\n", addr);
+		return -EINVAL;
+	}
+
+	*val1 = (bp[0] & BIT(7)) >> 7;
+	*val2 = bp[1] | ((bp[0] & 0x7F) << 8);
+
+	return 0;
+}
+
+static int dp_link_parse_timing_params3(struct dp_link_private *link,
+	int const addr, u32 *val)
+{
+	u8 *bp;
+	u32 len = 1;
+	int rlen;
+
+	/* Read the requested video link pattern (Byte 0x221). */
+	rlen = link->aux->read(link->aux, addr, len, AUX_NATIVE, &bp);
+	if (rlen < 1) {
+		pr_err("failed to read 0x%x\n", addr);
+		return -EINVAL;
+	}
+	*val = bp[0];
+
+	return 0;
+}
+
+/**
+ * dp_parse_video_pattern_params() - parses video pattern parameters from DPCD
+ * @link: Display Port Driver data
+ *
+ * Returns 0 if it successfully parses the video link pattern and the link
+ * bit depth requested by the sink and, and if the values parsed are valid.
+ */
+static int dp_link_parse_video_pattern_params(struct dp_link_private *link)
+{
+	int ret = 0;
+	int rlen;
+	u8 *bp;
+	u8 data;
+	u32 dyn_range;
+	int const param_len = 0x1;
+	int const test_video_pattern_addr = 0x221;
+	int const test_misc_addr = 0x232;
+
+	/* Read the requested video link pattern (Byte 0x221). */
+	rlen = link->aux->read(link->aux, test_video_pattern_addr,
+			param_len, AUX_NATIVE, &bp);
+	if (rlen < param_len) {
+		pr_err("failed to read link video pattern\n");
+		ret = -EINVAL;
+		goto exit;
+	}
+	data = *bp;
+
+	if (!dp_link_is_video_pattern_valid(data)) {
+		pr_err("invalid link video pattern = 0x%x\n", data);
+		ret = -EINVAL;
+		goto exit;
+	}
+
+	link->request.test_video_pattern = data;
+	pr_debug("link video pattern = 0x%x (%s)\n",
+		link->request.test_video_pattern,
+		dp_link_video_pattern_to_string(
+			link->request.test_video_pattern));
+
+	/* Read the requested color bit depth and dynamic range (Byte 0x232) */
+	rlen = link->aux->read(link->aux, test_misc_addr,
+			param_len, AUX_NATIVE, &bp);
+	if (rlen < param_len) {
+		pr_err("failed to read link bit depth\n");
+		ret = -EINVAL;
+		goto exit;
+	}
+	data = *bp;
+
+	/* Dynamic Range */
+	dyn_range = (data & BIT(3)) >> 3;
+	if (!dp_link_is_dynamic_range_valid(dyn_range)) {
+		pr_err("invalid link dynamic range = 0x%x", dyn_range);
+		ret = -EINVAL;
+		goto exit;
+	}
+	link->request.test_dyn_range = dyn_range;
+	pr_debug("link dynamic range = 0x%x (%s)\n",
+		link->request.test_dyn_range,
+		dp_link_dynamic_range_to_string(
+			link->request.test_dyn_range));
+
+	/* Color bit depth */
+	data &= (BIT(5) | BIT(6) | BIT(7));
+	data >>= 5;
+	if (!dp_link_is_bit_depth_valid(data)) {
+		pr_err("invalid link bit depth = 0x%x\n", data);
+		ret = -EINVAL;
+		goto exit;
+	}
+
+	link->request.test_bit_depth = data;
+	pr_debug("link bit depth = 0x%x (%s)\n",
+		link->request.test_bit_depth,
+		dp_link_bit_depth_to_string(link->request.test_bit_depth));
+
+	/* resolution timing params */
+	ret = dp_link_parse_timing_params1(link, 0x222, 2,
+			&link->request.test_h_total);
+	if (ret) {
+		pr_err("failed to parse test_h_total (0x222)\n");
+		goto exit;
+	}
+	pr_debug("TEST_H_TOTAL = %d\n", link->request.test_h_total);
+
+	ret = dp_link_parse_timing_params1(link, 0x224, 2,
+			&link->request.test_v_total);
+	if (ret) {
+		pr_err("failed to parse test_v_total (0x224)\n");
+		goto exit;
+	}
+	pr_debug("TEST_V_TOTAL = %d\n", link->request.test_v_total);
+
+	ret = dp_link_parse_timing_params1(link, 0x226, 2,
+			&link->request.test_h_start);
+	if (ret) {
+		pr_err("failed to parse test_h_start (0x226)\n");
+		goto exit;
+	}
+	pr_debug("TEST_H_START = %d\n", link->request.test_h_start);
+
+	ret = dp_link_parse_timing_params1(link, 0x228, 2,
+			&link->request.test_v_start);
+	if (ret) {
+		pr_err("failed to parse test_v_start (0x228)\n");
+		goto exit;
+	}
+	pr_debug("TEST_V_START = %d\n", link->request.test_v_start);
+
+	ret = dp_link_parse_timing_params2(link, 0x22A, 2,
+			&link->request.test_hsync_pol,
+			&link->request.test_hsync_width);
+	if (ret) {
+		pr_err("failed to parse (0x22A)\n");
+		goto exit;
+	}
+	pr_debug("TEST_HSYNC_POL = %d\n", link->request.test_hsync_pol);
+	pr_debug("TEST_HSYNC_WIDTH = %d\n", link->request.test_hsync_width);
+
+	ret = dp_link_parse_timing_params2(link, 0x22C, 2,
+			&link->request.test_vsync_pol,
+			&link->request.test_vsync_width);
+	if (ret) {
+		pr_err("failed to parse (0x22C)\n");
+		goto exit;
+	}
+	pr_debug("TEST_VSYNC_POL = %d\n", link->request.test_vsync_pol);
+	pr_debug("TEST_VSYNC_WIDTH = %d\n", link->request.test_vsync_width);
+
+	ret = dp_link_parse_timing_params1(link, 0x22E, 2,
+			&link->request.test_h_width);
+	if (ret) {
+		pr_err("failed to parse test_h_width (0x22E)\n");
+		goto exit;
+	}
+	pr_debug("TEST_H_WIDTH = %d\n", link->request.test_h_width);
+
+	ret = dp_link_parse_timing_params1(link, 0x230, 2,
+			&link->request.test_v_height);
+	if (ret) {
+		pr_err("failed to parse test_v_height (0x230)\n");
+		goto exit;
+	}
+	pr_debug("TEST_V_HEIGHT = %d\n", link->request.test_v_height);
+
+	ret = dp_link_parse_timing_params3(link, 0x233,
+		&link->request.test_rr_d);
+	link->request.test_rr_d &= BIT(0);
+	if (ret) {
+		pr_err("failed to parse test_rr_d (0x233)\n");
+		goto exit;
+	}
+	pr_debug("TEST_REFRESH_DENOMINATOR = %d\n", link->request.test_rr_d);
+
+	ret = dp_link_parse_timing_params3(link, 0x234,
+		&link->request.test_rr_n);
+	if (ret) {
+		pr_err("failed to parse test_rr_n (0x234)\n");
+		goto exit;
+	}
+	pr_debug("TEST_REFRESH_NUMERATOR = %d\n", link->request.test_rr_n);
+exit:
+	return ret;
+}
+
+/**
+ * dp_link_is_link_rate_valid() - validates the link rate
+ * @lane_rate: link rate requested by the sink
+ *
+ * Returns true if the requested link rate is supported.
+ */
+static bool dp_link_is_link_rate_valid(u32 link_rate)
+{
+	return ((link_rate == DP_LINK_RATE_162) ||
+		(link_rate == DP_LINK_RATE_270) ||
+		(link_rate == DP_LINK_RATE_540) ||
+		(link_rate == DP_LINK_RATE_810));
+}
+
+/**
+ * dp_link_is_lane_count_valid() - validates the lane count
+ * @lane_count: lane count requested by the sink
+ *
+ * Returns true if the requested lane count is supported.
+ */
+static bool dp_link_is_lane_count_valid(u32 lane_count)
+{
+	return (lane_count == DP_LANE_COUNT_1) ||
+		(lane_count == DP_LANE_COUNT_2) ||
+		(lane_count == DP_LANE_COUNT_4);
+}
+
+/**
+ * dp_link_parse_link_training_params() - parses link training parameters from
+ * DPCD
+ * @link: Display Port Driver data
+ *
+ * Returns 0 if it successfully parses the link rate (Byte 0x219) and lane
+ * count (Byte 0x220), and if these values parse are valid.
+ */
+static int dp_link_parse_link_training_params(struct dp_link_private *link)
+{
+	u8 *bp;
+	u8 data;
+	int ret = 0;
+	int rlen;
+	int const param_len = 0x1;
+	int const test_link_rate_addr = 0x219;
+	int const test_lane_count_addr = 0x220;
+
+	/* Read the requested link rate (Byte 0x219). */
+	rlen = link->aux->read(link->aux, test_link_rate_addr,
+			param_len, AUX_NATIVE, &bp);
+	if (rlen < param_len) {
+		pr_err("failed to read link rate\n");
+		ret = -EINVAL;
+		goto exit;
+	}
+	data = *bp;
+
+	if (!dp_link_is_link_rate_valid(data)) {
+		pr_err("invalid link rate = 0x%x\n", data);
+		ret = -EINVAL;
+		goto exit;
+	}
+
+	link->request.test_link_rate = data;
+	pr_debug("link rate = 0x%x\n", link->request.test_link_rate);
+
+	/* Read the requested lane count (Byte 0x220). */
+	rlen = link->aux->read(link->aux, test_lane_count_addr,
+			param_len, AUX_NATIVE, &bp);
+	if (rlen < param_len) {
+		pr_err("failed to read lane count\n");
+		ret = -EINVAL;
+		goto exit;
+	}
+	data = *bp;
+	data &= 0x1F;
+
+	if (!dp_link_is_lane_count_valid(data)) {
+		pr_err("invalid lane count = 0x%x\n", data);
+		ret = -EINVAL;
+		goto exit;
+	}
+
+	link->request.test_lane_count = data;
+	pr_debug("lane count = 0x%x\n", link->request.test_lane_count);
+exit:
+	return ret;
+}
+
+static bool dp_link_is_phy_test_pattern_supported(u32 phy_test_pattern_sel)
+{
+	switch (phy_test_pattern_sel) {
+	case PHY_TEST_PATTERN_NONE:
+	case PHY_TEST_PATTERN_D10_2_NO_SCRAMBLING:
+	case PHY_TEST_PATTERN_SYMBOL_ERR_MEASUREMENT_CNT:
+	case PHY_TEST_PATTERN_PRBS7:
+	case PHY_TEST_PATTERN_80_BIT_CUSTOM_PATTERN:
+	case PHY_TEST_PATTERN_HBR2_CTS_EYE_PATTERN:
+		return true;
+	default:
+		return false;
+	}
+}
+
+/**
+ * dp_parse_phy_test_params() - parses the phy link parameters
+ * @link: Display Port Driver data
+ *
+ * Parses the DPCD (Byte 0x248) for the DP PHY link pattern that is being
+ * requested.
+ */
+static int dp_link_parse_phy_test_params(struct dp_link_private *link)
+{
+	u8 *bp;
+	u8 data;
+	int rlen;
+	int const param_len = 0x1;
+	int const phy_test_pattern_addr = 0x248;
+	int ret = 0;
+
+	rlen = link->aux->read(link->aux, phy_test_pattern_addr,
+				param_len, AUX_NATIVE, &bp);
+	if (rlen < param_len) {
+		pr_err("failed to read phy link pattern\n");
+		ret = -EINVAL;
+		goto end;
+	}
+
+	data = *bp;
+
+	link->request.phy_test_pattern_sel = data;
+
+	pr_debug("phy_test_pattern_sel = %s\n",
+			dp_link_get_phy_test_pattern(data));
+
+	if (!dp_link_is_phy_test_pattern_supported(data))
+		ret = -EINVAL;
+end:
+	return ret;
+}
+
+static char *dp_link_get_test_name(u32 test_requested)
+{
+	switch (test_requested) {
+	case TEST_LINK_TRAINING: return DP_LINK_ENUM_STR(TEST_LINK_TRAINING);
+	case TEST_VIDEO_PATTERN: return DP_LINK_ENUM_STR(TEST_VIDEO_PATTERN);
+	case PHY_TEST_PATTERN:	 return DP_LINK_ENUM_STR(PHY_TEST_PATTERN);
+	case TEST_EDID_READ:	 return DP_LINK_ENUM_STR(TEST_EDID_READ);
+	case TEST_AUDIO_PATTERN: return DP_LINK_ENUM_STR(TEST_AUDIO_PATTERN);
+	default:		 return "unknown";
+	}
+}
+
+/**
+ * dp_link_is_video_audio_test_requested() - checks for audio/video link request
+ * @link: link requested by the sink
+ *
+ * Returns true if the requested link is a permitted audio/video link.
+ */
+static bool dp_link_is_video_audio_test_requested(u32 link)
+{
+	return (link == TEST_VIDEO_PATTERN) ||
+		(link == (TEST_AUDIO_PATTERN | TEST_VIDEO_PATTERN)) ||
+		(link == TEST_AUDIO_PATTERN) ||
+		(link == (TEST_AUDIO_PATTERN | TEST_AUDIO_DISABLED_VIDEO));
+}
+
+/**
+ * dp_link_supported() - checks if link requested by sink is supported
+ * @test_requested: link requested by the sink
+ *
+ * Returns true if the requested link is supported.
+ */
+static bool dp_link_is_test_supported(u32 test_requested)
+{
+	return (test_requested == TEST_LINK_TRAINING) ||
+		(test_requested == TEST_EDID_READ) ||
+		(test_requested == PHY_TEST_PATTERN) ||
+		dp_link_is_video_audio_test_requested(test_requested);
+}
+
+/**
+ * dp_sink_parse_test_request() - parses link request parameters from sink
+ * @link: Display Port Driver data
+ *
+ * Parses the DPCD to check if an automated link is requested (Byte 0x201),
+ * and what type of link automation is being requested (Byte 0x218).
+ */
+static int dp_link_parse_request(struct dp_link_private *link)
+{
+	int ret = 0;
+	u8 *bp;
+	u8 data;
+	int rlen;
+	u32 const param_len = 0x1;
+	u32 const device_service_irq_addr = 0x201;
+	u32 const test_request_addr = 0x218;
+	u8 buf[4];
+
+	/**
+	 * Read the device service IRQ vector (Byte 0x201) to determine
+	 * whether an automated link has been requested by the sink.
+	 */
+	rlen = link->aux->read(link->aux, device_service_irq_addr,
+				param_len, AUX_NATIVE, &bp);
+	if (rlen < param_len) {
+		pr_err("aux read failed\n");
+		ret = -EINVAL;
+		goto end;
+	}
+
+	data = *bp;
+
+	pr_debug("device service irq vector = 0x%x\n", data);
+
+	if (!(data & BIT(1))) {
+		pr_debug("no link requested\n");
+		goto end;
+	}
+
+	/**
+	 * Read the link request byte (Byte 0x218) to determine what type
+	 * of automated link has been requested by the sink.
+	 */
+	rlen = link->aux->read(link->aux, test_request_addr,
+				param_len, AUX_NATIVE, &bp);
+	if (rlen < param_len) {
+		pr_err("aux read failed\n");
+		ret = -EINVAL;
+		goto end;
+	}
+
+	data = *bp;
+
+	if (!dp_link_is_test_supported(data)) {
+		pr_debug("link 0x%x not supported\n", data);
+		goto end;
+	}
+
+	pr_debug("%s (0x%x) requested\n", dp_link_get_test_name(data), data);
+	link->request.test_requested = data;
+
+	if (link->request.test_requested == PHY_TEST_PATTERN) {
+		ret = dp_link_parse_phy_test_params(link);
+		if (ret)
+			goto end;
+		ret = dp_link_parse_link_training_params(link);
+	}
+
+	if (link->request.test_requested == TEST_LINK_TRAINING)
+		ret = dp_link_parse_link_training_params(link);
+
+	if (dp_link_is_video_audio_test_requested(
+			link->request.test_requested)) {
+		ret = dp_link_parse_video_pattern_params(link);
+		if (ret)
+			goto end;
+
+		ret = dp_link_parse_audio_pattern_params(link);
+	}
+end:
+	/* clear the link request IRQ */
+	buf[0] = 1;
+	link->aux->write(link->aux, test_request_addr, 1, AUX_NATIVE, buf);
+
+	/**
+	 * Send a TEST_ACK if all link parameters are valid, otherwise send
+	 * a TEST_NACK.
+	 */
+	if (ret)
+		link->request.response = TEST_NACK;
+	else
+		link->request.response = TEST_ACK;
+
+	return ret;
+}
+
+/**
+ * dp_link_parse_sink_count() - parses the sink count
+ *
+ * Parses the DPCD to check if there is an update to the sink count
+ * (Byte 0x200), and whether all the sink devices connected have Content
+ * Protection enabled.
+ */
+static void dp_link_parse_sink_count(struct dp_link_private *link)
+{
+	u8 *bp;
+	u8 data;
+	int rlen;
+	int const param_len = 0x1;
+	int const sink_count_addr = 0x200;
+
+	rlen = link->aux->read(link->aux, sink_count_addr,
+				param_len, AUX_NATIVE, &bp);
+	if (rlen < param_len) {
+		pr_err("failed to read sink count\n");
+		return;
+	}
+
+	data = *bp;
+
+	/* BIT 7, BIT 5:0 */
+	link->sink_count.count = (data & BIT(7)) << 6 | (data & 0x63);
+	/* BIT 6*/
+	link->sink_count.cp_ready = data & BIT(6);
+
+	pr_debug("sink_count = 0x%x, cp_ready = 0x%x\n",
+		link->sink_count.count, link->sink_count.cp_ready);
+}
+
+static int dp_link_link_status_read(struct dp_link_private *link)
+{
+	u8 *bp;
+	u8 data;
+	int rlen, ret = 0;
+	int const addr = 0x202;
+	int const len = 6;
+	struct dp_link_status *sp;
+
+	rlen = link->aux->read(link->aux, addr, len, AUX_NATIVE, &bp);
+	if (rlen < len) {
+		pr_err("edp aux read failed\n");
+		ret = -EINVAL;
+		goto error;
+	}
+
+	sp = &link->link_status;
+
+	data = *bp++; /* byte 0x202 */
+	sp->lane_01_status = data; /* lane 0, 1 */
+
+	data = *bp++; /* byte 0x203 */
+	sp->lane_23_status = data; /* lane 2, 3 */
+
+	data = *bp++; /* byte 0x204 */
+	sp->interlane_align_done = (data & BIT(0));
+	sp->downstream_port_status_changed = (data & BIT(6));
+	sp->link_status_updated = (data & BIT(7));
+
+	data = *bp++; /* byte 0x205 */
+	sp->port_0_in_sync = (data & BIT(0));
+	sp->port_1_in_sync = (data & BIT(1));
+
+	data = *bp++; /* byte 0x206 */
+	sp->req_voltage_swing[0] = data & 0x03;
+	data >>= 2;
+	sp->req_pre_emphasis[0] = data & 0x03;
+	data >>= 2;
+	sp->req_voltage_swing[1] = data & 0x03;
+	data >>= 2;
+	sp->req_pre_emphasis[1] = data & 0x03;
+
+	data = *bp++; /* byte 0x207 */
+	sp->req_voltage_swing[2] = data & 0x03;
+	data >>= 2;
+	sp->req_pre_emphasis[2] = data & 0x03;
+	data >>= 2;
+	sp->req_voltage_swing[3] = data & 0x03;
+	data >>= 2;
+	sp->req_pre_emphasis[3] = data & 0x03;
+
+	return 0;
+error:
+	return ret;
+}
+
+static void dp_link_parse_sink_status_field(struct dp_link_private *link)
+{
+	dp_link_parse_sink_count(link);
+	dp_link_parse_request(link);
+	dp_link_link_status_read(link);
+}
+
+static bool dp_link_is_link_training_requested(struct dp_link_private *link)
+{
+	return (link->request.test_requested == TEST_LINK_TRAINING);
+}
+
+/**
+ * dp_link_process_link_training_request() - processes new training requests
+ * @link: Display Port link data
+ *
+ * This function will handle new link training requests that are initiated by
+ * the sink. In particular, it will update the requested lane count and link
+ * link rate, and then trigger the link retraining procedure.
+ *
+ * The function will return 0 if a link training request has been processed,
+ * otherwise it will return -EINVAL.
+ */
+static int dp_link_process_link_training_request(struct dp_link_private *link)
+{
+	if (!dp_link_is_link_training_requested(link))
+		return -EINVAL;
+
+	pr_debug("%s link rate = 0x%x, lane count = 0x%x\n",
+			dp_link_get_test_name(TEST_LINK_TRAINING),
+			link->request.test_link_rate,
+			link->request.test_lane_count);
+
+	link->dp_link.lane_count = link->request.test_lane_count;
+	link->dp_link.link_rate = link->request.test_link_rate;
+
+	return 0;
+}
+
+static bool dp_link_phy_pattern_requested(struct dp_link *dp_link)
+{
+	struct dp_link_private *link = container_of(dp_link,
+			struct dp_link_private, dp_link);
+
+	return (link->request.test_requested == PHY_TEST_PATTERN);
+}
+
+static int dp_link_parse_vx_px(struct dp_link_private *link)
+{
+	u8 *bp;
+	u8 data;
+	int const param_len = 0x1;
+	int const addr1 = 0x206;
+	int const addr2 = 0x207;
+	int ret = 0;
+	u32 v0, p0, v1, p1, v2, p2, v3, p3;
+	int rlen;
+
+	pr_debug("\n");
+
+	rlen = link->aux->read(link->aux, addr1, param_len, AUX_NATIVE, &bp);
+	if (rlen < param_len) {
+		pr_err("failed reading lanes 0/1\n");
+		ret = -EINVAL;
+		goto end;
+	}
+
+	data = *bp;
+
+	pr_debug("lanes 0/1 (Byte 0x206): 0x%x\n", data);
+
+	v0 = data & 0x3;
+	data = data >> 2;
+	p0 = data & 0x3;
+	data = data >> 2;
+
+	v1 = data & 0x3;
+	data = data >> 2;
+	p1 = data & 0x3;
+	data = data >> 2;
+
+	rlen = link->aux->read(link->aux, addr2, param_len, AUX_NATIVE, &bp);
+	if (rlen < param_len) {
+		pr_err("failed reading lanes 2/3\n");
+		ret = -EINVAL;
+		goto end;
+	}
+
+	data = *bp;
+
+	pr_debug("lanes 2/3 (Byte 0x207): 0x%x\n", data);
+
+	v2 = data & 0x3;
+	data = data >> 2;
+	p2 = data & 0x3;
+	data = data >> 2;
+
+	v3 = data & 0x3;
+	data = data >> 2;
+	p3 = data & 0x3;
+	data = data >> 2;
+
+	pr_debug("vx: 0=%d, 1=%d, 2=%d, 3=%d\n", v0, v1, v2, v3);
+	pr_debug("px: 0=%d, 1=%d, 2=%d, 3=%d\n", p0, p1, p2, p3);
+
+	/**
+	 * Update the voltage and pre-emphasis levels as per DPCD request
+	 * vector.
+	 */
+	pr_debug("Current: v_level = 0x%x, p_level = 0x%x\n",
+			link->dp_link.v_level, link->dp_link.p_level);
+	pr_debug("Requested: v_level = 0x%x, p_level = 0x%x\n", v0, p0);
+	link->dp_link.v_level = v0;
+	link->dp_link.p_level = p0;
+
+	pr_debug("Success\n");
+end:
+	return ret;
+}
+
+/**
+ * dp_link_process_phy_test_pattern_request() - process new phy link requests
+ * @link: Display Port Driver data
+ *
+ * This function will handle new phy link pattern requests that are initiated
+ * by the sink. The function will return 0 if a phy link pattern has been
+ * processed, otherwise it will return -EINVAL.
+ */
+static int dp_link_process_phy_test_pattern_request(
+		struct dp_link_private *link)
+{
+	u32 test_link_rate = 0, test_lane_count = 0;
+
+	if (!dp_link_phy_pattern_requested(&link->dp_link))
+		return -EINVAL;
+
+	test_link_rate = link->request.test_link_rate;
+	test_lane_count = link->request.test_lane_count;
+
+	if (!dp_link_is_link_rate_valid(test_link_rate) ||
+		!dp_link_is_lane_count_valid(test_lane_count)) {
+		pr_err("Invalid params: link rate = 0x%x, lane count = 0x%x\n",
+				test_link_rate, test_lane_count);
+		return -EINVAL;
+	}
+
+	pr_debug("start\n");
+
+	link->dp_link.lane_count = link->request.test_lane_count;
+	link->dp_link.link_rate = link->request.test_link_rate;
+
+	dp_link_parse_vx_px(link);
+
+	pr_debug("end\n");
+
+	return 0;
+}
+
+static bool dp_link_is_link_status_updated(struct dp_link_private *link)
+{
+	return link->link_status.link_status_updated;
+}
+
+static bool dp_link_channel_eq_done(struct dp_link_private *link)
+{
+	u32 mask, data;
+	struct dp_link *dp_link = &link->dp_link;
+
+	pr_debug("\n");
+
+	dp_link_link_status_read(link);
+
+	if (!link->link_status.interlane_align_done) { /* not align */
+		pr_err("interlane align failed\n");
+		return 0;
+	}
+
+	if (dp_link->lane_count == 1) {
+		mask = 0x7;
+		data = link->link_status.lane_01_status;
+	} else if (dp_link->lane_count == 2) {
+		mask = 0x77;
+		data = link->link_status.lane_01_status;
+	} else {
+		mask = 0x7777;
+		data = link->link_status.lane_23_status;
+		data <<= 8;
+		data |= link->link_status.lane_01_status;
+	}
+
+	data &= mask;
+	pr_debug("data=%x mask=%x\n", data, mask);
+
+	if (data == mask)/* all done */
+		return true;
+
+	return false;
+}
+
+static bool dp_link_clock_recovery_done(struct dp_link_private *link)
+{
+	u32 mask, data;
+	struct dp_link *dp_link = &link->dp_link;
+
+	dp_link_link_status_read(link);
+
+	if (dp_link->lane_count == 1) {
+		mask = 0x01;	/* lane 0 */
+		data = link->link_status.lane_01_status;
+	} else if (dp_link->lane_count == 2) {
+		mask = 0x011; /*B lane 0, 1 */
+		data = link->link_status.lane_01_status;
+	} else {
+		mask = 0x01111; /*B lane 0, 1 */
+		data = link->link_status.lane_23_status;
+		data <<= 8;
+		data |= link->link_status.lane_01_status;
+	}
+
+	data &= mask;
+	pr_debug("data=%x mask=%x\n", data, mask);
+
+	if (data == mask) /* all done */
+		return true;
+
+	return false;
+}
+
+/**
+ * dp_link_process_link_status_update() - processes link status updates
+ * @link: Display Port link module data
+ *
+ * This function will check for changes in the link status, e.g. clock
+ * recovery done on all lanes, and trigger link training if there is a
+ * failure/error on the link.
+ *
+ * The function will return 0 if the a link status update has been processed,
+ * otherwise it will return -EINVAL.
+ */
+static int dp_link_process_link_status_update(struct dp_link_private *link)
+{
+	if (!dp_link_is_link_status_updated(link) ||
+	    (dp_link_channel_eq_done(link) &&
+	     dp_link_clock_recovery_done(link)))
+		return -EINVAL;
+
+	pr_debug("channel_eq_done = %d, clock_recovery_done = %d\n",
+			dp_link_channel_eq_done(link),
+			dp_link_clock_recovery_done(link));
+
+	return 0;
+}
+
+static bool dp_link_is_ds_port_status_changed(struct dp_link_private *link)
+{
+	return link->link_status.downstream_port_status_changed;
+}
+
+/**
+ * dp_link_process_downstream_port_status_change() - process port status changes
+ * @link: Display Port Driver data
+ *
+ * This function will handle downstream port updates that are initiated by
+ * the sink. If the downstream port status has changed, the EDID is read via
+ * AUX.
+ *
+ * The function will return 0 if a downstream port update has been
+ * processed, otherwise it will return -EINVAL.
+ */
+static int dp_link_process_ds_port_status_change(struct dp_link_private *link)
+{
+	if (!dp_link_is_ds_port_status_changed(link))
+		return -EINVAL;
+
+	return 0;
+}
+
+static bool dp_link_is_video_pattern_requested(struct dp_link_private *link)
+{
+	return (link->request.test_requested & TEST_VIDEO_PATTERN)
+		&& !(link->request.test_requested & TEST_AUDIO_DISABLED_VIDEO);
+}
+
+static bool dp_link_is_audio_pattern_requested(struct dp_link_private *link)
+{
+	return (link->request.test_requested & TEST_AUDIO_PATTERN);
+}
+
+/**
+ * dp_link_process_video_pattern_request() - process new video pattern request
+ * @link: Display Port link module's data
+ *
+ * This function will handle a new video pattern request that are initiated by
+ * the sink. This is acheieved by first sending a disconnect notification to
+ * the sink followed by a subsequent connect notification to the user modules,
+ * where it is expected that the user modules would draw the required link
+ * pattern.
+ */
+static int dp_link_process_video_pattern_request(struct dp_link_private *link)
+{
+	if (!dp_link_is_video_pattern_requested(link))
+		goto end;
+
+	pr_debug("%s: bit depth=%d(%d bpp) pattern=%s\n",
+		dp_link_get_test_name(TEST_VIDEO_PATTERN),
+		link->request.test_bit_depth,
+		dp_link_bit_depth_to_bpp(link->request.test_bit_depth),
+		dp_link_video_pattern_to_string(
+			link->request.test_video_pattern));
+
+	return 0;
+end:
+	return -EINVAL;
+}
+
+/**
+ * dp_link_process_audio_pattern_request() - process new audio pattern request
+ * @link: Display Port link module data
+ *
+ * This function will handle a new audio pattern request that is initiated by
+ * the sink. This is acheieved by sending the necessary secondary data packets
+ * to the sink. It is expected that any simulatenous requests for video
+ * patterns will be handled before the audio pattern is sent to the sink.
+ */
+static int dp_link_process_audio_pattern_request(struct dp_link_private *link)
+{
+	if (!dp_link_is_audio_pattern_requested(link))
+		return -EINVAL;
+
+	pr_debug("sampling_rate=%s, channel_count=%d, pattern_type=%s\n",
+		dp_link_get_audio_sample_rate(
+			link->request.test_audio_sampling_rate),
+		link->request.test_audio_channel_count,
+		dp_link_get_audio_test_pattern(
+			link->request.test_audio_pattern_type));
+
+	pr_debug("audio_period: ch1=0x%x, ch2=0x%x, ch3=0x%x, ch4=0x%x\n",
+		link->request.test_audio_period_ch_1,
+		link->request.test_audio_period_ch_2,
+		link->request.test_audio_period_ch_3,
+		link->request.test_audio_period_ch_4);
+
+	pr_debug("audio_period: ch5=0x%x, ch6=0x%x, ch7=0x%x, ch8=0x%x\n",
+		link->request.test_audio_period_ch_5,
+		link->request.test_audio_period_ch_6,
+		link->request.test_audio_period_ch_7,
+		link->request.test_audio_period_ch_8);
+
+	return 0;
+}
+
+static void dp_link_reset_data(struct dp_link_private *link)
+{
+	link->request = (const struct dp_link_request){ 0 };
+	link->request.test_bit_depth = DP_TEST_BIT_DEPTH_UNKNOWN;
+
+	link->dp_link.test_requested = 0;
+}
+
+/**
+ * dp_link_process_request() - handle HPD IRQ transition to HIGH
+ * @link: pointer to link module data
+ *
+ * This function will handle the HPD IRQ state transitions from LOW to HIGH
+ * (including cases when there are back to back HPD IRQ HIGH) indicating
+ * the start of a new link training request or sink status update.
+ */
+static int dp_link_process_request(struct dp_link *dp_link)
+{
+	int ret = 0;
+	struct dp_link_private *link;
+
+	if (!dp_link) {
+		pr_err("invalid input\n");
+		return -EINVAL;
+	}
+
+	link = container_of(dp_link, struct dp_link_private, dp_link);
+
+	pr_debug("start\n");
+
+	dp_link_reset_data(link);
+
+	dp_link_parse_sink_status_field(link);
+
+	ret = dp_link_process_link_training_request(link);
+	if (!ret) {
+		dp_link->test_requested |= TEST_LINK_TRAINING;
+		goto exit;
+	}
+
+	ret = dp_link_process_phy_test_pattern_request(link);
+	if (!ret) {
+		dp_link->test_requested |= PHY_TEST_PATTERN;
+		goto exit;
+	}
+
+	ret = dp_link_process_link_status_update(link);
+	if (!ret) {
+		dp_link->test_requested |= LINK_STATUS_UPDATED;
+		goto exit;
+	}
+
+	ret = dp_link_process_ds_port_status_change(link);
+	if (!ret) {
+		dp_link->test_requested |= DS_PORT_STATUS_CHANGED;
+		goto exit;
+	}
+
+	ret = dp_link_process_video_pattern_request(link);
+	if (!ret) {
+		dp_link->test_requested |= TEST_VIDEO_PATTERN;
+		goto exit;
+	}
+
+	ret = dp_link_process_audio_pattern_request(link);
+	if (!ret) {
+		dp_link->test_requested |= TEST_AUDIO_PATTERN;
+		goto exit;
+	}
+
+	pr_debug("done\n");
+exit:
+	return ret;
+}
+
+static u8 *dp_link_get_voltage_swing(struct dp_link *dp_link)
+
+{
+	struct dp_link_private *link;
+
+	if (!dp_link) {
+		pr_err("invalid input\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	link = container_of(dp_link, struct dp_link_private, dp_link);
+
+	return link->link_status.req_voltage_swing;
+}
+
+static u8 *dp_link_get_pre_emphasis(struct dp_link *dp_link)
+
+{
+	struct dp_link_private *link;
+
+
+	if (!dp_link) {
+		pr_err("invalid input\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	link = container_of(dp_link, struct dp_link_private, dp_link);
+
+	return link->link_status.req_pre_emphasis;
+}
+
+static int dp_link_get_colorimetry_config(struct dp_link *dp_link)
+{
+	u32 cc;
+	enum dynamic_range dr;
+	struct dp_link_private *link;
+
+	if (!dp_link) {
+		pr_err("invalid input\n");
+		return -EINVAL;
+	}
+
+	link = container_of(dp_link, struct dp_link_private, dp_link);
+
+	/* unless a video pattern CTS test is ongoing, use CEA_VESA */
+	if (dp_link_is_video_pattern_requested(link))
+		dr = link->request.test_dyn_range;
+	else
+		dr = DP_DYNAMIC_RANGE_RGB_VESA;
+
+	/* Only RGB_VESA nd RGB_CEA supported for now */
+	switch (dr) {
+	case DP_DYNAMIC_RANGE_RGB_CEA:
+		cc = BIT(3);
+		break;
+	case DP_DYNAMIC_RANGE_RGB_VESA:
+	default:
+		cc = 0;
+	}
+
+	return cc;
+}
+
+static bool dp_link_clock_recovery(struct dp_link *dp_link)
+{
+	struct dp_link_private *link;
+
+	if (!dp_link) {
+		pr_err("invalid input\n");
+		return -EINVAL;
+	}
+
+	link = container_of(dp_link, struct dp_link_private, dp_link);
+
+	return dp_link_clock_recovery_done(link);
+}
+
+static bool dp_link_channel_equalization(struct dp_link *dp_link)
+{
+	struct dp_link_private *link;
+
+	if (!dp_link) {
+		pr_err("invalid input\n");
+		return -EINVAL;
+	}
+
+	link = container_of(dp_link, struct dp_link_private, dp_link);
+
+	return dp_link_channel_eq_done(link);
+}
+
+static int dp_link_adjust_levels(struct dp_link *dp_link)
+{
+	int i;
+	int max = 0;
+	struct dp_link_private *link;
+
+	if (!dp_link) {
+		pr_err("invalid input\n");
+		return -EINVAL;
+	}
+
+	link = container_of(dp_link, struct dp_link_private, dp_link);
+
+	/* use the max level across lanes */
+	for (i = 0; i < dp_link->lane_count; i++) {
+		pr_debug("lane=%d req_voltage_swing=%d\n",
+			i, link->link_status.req_voltage_swing[i]);
+		if (max < link->link_status.req_voltage_swing[i])
+			max = link->link_status.req_voltage_swing[i];
+	}
+
+	dp_link->v_level = max;
+
+	/* use the max level across lanes */
+	max = 0;
+	for (i = 0; i < dp_link->lane_count; i++) {
+		pr_debug("lane=%d req_pre_emphasis=%d\n",
+			i, link->link_status.req_pre_emphasis[i]);
+		if (max < link->link_status.req_pre_emphasis[i])
+			max = link->link_status.req_pre_emphasis[i];
+	}
+
+	dp_link->p_level = max;
+
+	/**
+	 * Adjust the voltage swing and pre-emphasis level combination to within
+	 * the allowable range.
+	 */
+	if (dp_link->v_level > DP_LINK_VOLTAGE_MAX) {
+		pr_debug("Requested vSwingLevel=%d, change to %d\n",
+				dp_link->v_level, DP_LINK_VOLTAGE_MAX);
+		dp_link->v_level = DP_LINK_VOLTAGE_MAX;
+	}
+
+	if (dp_link->p_level > DP_LINK_PRE_EMPHASIS_MAX) {
+		pr_debug("Requested preEmphasisLevel=%d, change to %d\n",
+				dp_link->p_level, DP_LINK_PRE_EMPHASIS_MAX);
+		dp_link->p_level = DP_LINK_PRE_EMPHASIS_MAX;
+	}
+
+	if ((dp_link->p_level > DP_LINK_PRE_EMPHASIS_LEVEL_1)
+			&& (dp_link->v_level == DP_LINK_VOLTAGE_LEVEL_2)) {
+		pr_debug("Requested preEmphasisLevel=%d, change to %d\n",
+				dp_link->p_level, DP_LINK_PRE_EMPHASIS_LEVEL_1);
+		dp_link->p_level = DP_LINK_PRE_EMPHASIS_LEVEL_1;
+	}
+
+	pr_debug("v_level=%d, p_level=%d\n",
+		dp_link->v_level, dp_link->p_level);
+
+	return 0;
+}
+
+static int dp_link_send_psm_request(struct dp_link *dp_link, bool req)
+{
+	struct dp_link_private *link;
+
+	if (!dp_link) {
+		pr_err("invalid input\n");
+		return -EINVAL;
+	}
+
+	link = container_of(dp_link, struct dp_link_private, dp_link);
+
+	return 0;
+}
+
+static u32 dp_link_get_test_bits_depth(struct dp_link *dp_link, u32 bpp)
+{
+	enum test_bit_depth tbd;
+
+	/*
+	 * Few simplistic rules and assumptions made here:
+	 *    1. Test bit depth is bit depth per color component
+	 *    2. Assume 3 color components
+	 */
+	switch (bpp) {
+	case 18:
+		tbd = DP_TEST_BIT_DEPTH_6;
+		break;
+	case 24:
+		tbd = DP_TEST_BIT_DEPTH_8;
+		break;
+	case 30:
+		tbd = DP_TEST_BIT_DEPTH_10;
+		break;
+	default:
+		tbd = DP_TEST_BIT_DEPTH_UNKNOWN;
+		break;
+	}
+
+	return tbd;
+}
+
+struct dp_link *dp_link_get(struct device *dev, struct dp_aux *aux)
+{
+	int rc = 0;
+	struct dp_link_private *link;
+	struct dp_link *dp_link;
+
+	if (!dev || !aux) {
+		pr_err("invalid input\n");
+		rc = -EINVAL;
+		goto error;
+	}
+
+	link = devm_kzalloc(dev, sizeof(*link), GFP_KERNEL);
+	if (!link) {
+		rc = -EINVAL;
+		goto error;
+	}
+
+	link->dev   = dev;
+	link->aux   = aux;
+
+	dp_link = &link->dp_link;
+
+	dp_link->process_request        = dp_link_process_request;
+	dp_link->get_voltage_swing      = dp_link_get_voltage_swing;
+	dp_link->get_test_bits_depth    = dp_link_get_test_bits_depth;
+	dp_link->get_pre_emphasis       = dp_link_get_pre_emphasis;
+	dp_link->get_colorimetry_config = dp_link_get_colorimetry_config;
+	dp_link->clock_recovery         = dp_link_clock_recovery;
+	dp_link->channel_equalization   = dp_link_channel_equalization;
+	dp_link->adjust_levels          = dp_link_adjust_levels;
+	dp_link->send_psm_request       = dp_link_send_psm_request;
+	dp_link->phy_pattern_requested  = dp_link_phy_pattern_requested;
+
+	return dp_link;
+error:
+	return ERR_PTR(rc);
+}
+
+void dp_link_put(struct dp_link *dp_link)
+{
+	struct dp_link_private *link;
+
+	if (!dp_link)
+		return;
+
+	link = container_of(dp_link, struct dp_link_private, dp_link);
+
+	devm_kfree(link->dev, link);
+}
diff --git a/drivers/gpu/drm/msm/dp/dp_link.h b/drivers/gpu/drm/msm/dp/dp_link.h
new file mode 100644
index 0000000..de10e9a
--- /dev/null
+++ b/drivers/gpu/drm/msm/dp/dp_link.h
@@ -0,0 +1,85 @@
+/*
+ * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _DP_LINK_H_
+#define _DP_LINK_H_
+
+#include "dp_aux.h"
+
+enum dp_link_voltage_level {
+	DP_LINK_VOLTAGE_LEVEL_0	= 0,
+	DP_LINK_VOLTAGE_LEVEL_1	= 1,
+	DP_LINK_VOLTAGE_LEVEL_2	= 2,
+	DP_LINK_VOLTAGE_MAX	= DP_LINK_VOLTAGE_LEVEL_2,
+};
+
+enum dp_link_preemaphasis_level {
+	DP_LINK_PRE_EMPHASIS_LEVEL_0	= 0,
+	DP_LINK_PRE_EMPHASIS_LEVEL_1	= 1,
+	DP_LINK_PRE_EMPHASIS_LEVEL_2	= 2,
+	DP_LINK_PRE_EMPHASIS_MAX	= DP_LINK_PRE_EMPHASIS_LEVEL_2,
+};
+
+enum test_type {
+	UNKNOWN_TEST		  = 0,
+	TEST_LINK_TRAINING	  = 0x01,
+	TEST_VIDEO_PATTERN	  = 0x02,
+	PHY_TEST_PATTERN	  = 0x08,
+	TEST_EDID_READ		  = 0x04,
+	TEST_AUDIO_PATTERN	  = 0x20,
+	TEST_AUDIO_DISABLED_VIDEO = 0x40,
+};
+
+enum status_update {
+	LINK_STATUS_UPDATED    = 0x100,
+	DS_PORT_STATUS_CHANGED = 0x200,
+};
+
+struct dp_link {
+	u32 test_requested;
+
+	u32 lane_count;
+	u32 link_rate;
+	u32 v_level;
+	u32 p_level;
+
+	u8 *(*get_voltage_swing)(struct dp_link *dp_link);
+	u8 *(*get_pre_emphasis)(struct dp_link *dp_link);
+	u32 (*get_test_bits_depth)(struct dp_link *dp_link, u32 bpp);
+	int (*process_request)(struct dp_link *dp_link);
+	int (*get_colorimetry_config)(struct dp_link *dp_link);
+	int (*adjust_levels)(struct dp_link *dp_link);
+	int (*send_psm_request)(struct dp_link *dp_link, bool req);
+	bool (*clock_recovery)(struct dp_link *dp_link);
+	bool (*channel_equalization)(struct dp_link *dp_link);
+	bool (*phy_pattern_requested)(struct dp_link *dp_link);
+};
+
+/**
+ * dp_link_get() - get the functionalities of dp test module
+ *
+ *
+ * return: a pointer to dp_link struct
+ */
+struct dp_link *dp_link_get(struct device *dev, struct dp_aux *aux);
+
+/**
+ * dp_link_put() - releases the dp test module's resources
+ *
+ * @dp_link: an instance of dp_link module
+ *
+ */
+void dp_link_put(struct dp_link *dp_link);
+
+#endif /* _DP_LINK_H_ */
diff --git a/drivers/gpu/drm/msm/dp/dp_panel.c b/drivers/gpu/drm/msm/dp/dp_panel.c
new file mode 100644
index 0000000..f9616c4
--- /dev/null
+++ b/drivers/gpu/drm/msm/dp/dp_panel.c
@@ -0,0 +1,468 @@
+/*
+ * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt)	"[drm-dp] %s: " fmt, __func__
+
+#include "dp_panel.h"
+
+#define DP_LINK_RATE_MULTIPLIER	27000000
+
+struct dp_panel_private {
+	struct device *dev;
+	struct dp_panel dp_panel;
+	struct dp_aux *aux;
+	struct dp_catalog_panel *catalog;
+};
+
+static int dp_panel_read_dpcd(struct dp_panel *dp_panel)
+{
+	u8 *bp;
+	u8 data;
+	u32 const addr = 0x0;
+	u32 const len = 16;
+	int rlen, rc = 0;
+	struct dp_panel_private *panel;
+	struct dp_panel_dpcd *cap;
+
+	if (!dp_panel) {
+		pr_err("invalid input\n");
+		rc = -EINVAL;
+		goto end;
+	}
+
+	cap = &dp_panel->dpcd;
+	panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
+
+	rlen = panel->aux->read(panel->aux, addr, len, AUX_NATIVE, &bp);
+	if (rlen != len) {
+		pr_err("dpcd read failed, rlen=%d\n", rlen);
+		rc = -EINVAL;
+		goto end;
+	}
+
+	memset(cap, 0, sizeof(*cap));
+
+	data = *bp++; /* byte 0 */
+	cap->major = (data >> 4) & 0x0f;
+	cap->minor = data & 0x0f;
+	pr_debug("version: %d.%d\n", cap->major, cap->minor);
+
+	data = *bp++; /* byte 1 */
+	/* 162, 270, 540, 810 MB, symbol rate, NOT bit rate */
+	cap->max_link_rate = data;
+	pr_debug("link_rate=%d\n", cap->max_link_rate);
+
+	data = *bp++; /* byte 2 */
+	if (data & BIT(7))
+		cap->enhanced_frame++;
+
+	if (data & 0x40) {
+		cap->flags |=  DPCD_TPS3;
+		pr_debug("pattern 3 supported\n");
+	} else {
+		pr_debug("pattern 3 not supported\n");
+	}
+
+	data &= 0x0f;
+	cap->max_lane_count = data;
+	pr_debug("lane_count=%d\n", cap->max_lane_count);
+
+	data = *bp++; /* byte 3 */
+	if (data & BIT(0)) {
+		cap->flags |= DPCD_MAX_DOWNSPREAD_0_5;
+		pr_debug("max_downspread\n");
+	}
+
+	if (data & BIT(6)) {
+		cap->flags |= DPCD_NO_AUX_HANDSHAKE;
+		pr_debug("NO Link Training\n");
+	}
+
+	data = *bp++; /* byte 4 */
+	cap->num_rx_port = (data & BIT(0)) + 1;
+	pr_debug("rx_ports=%d", cap->num_rx_port);
+
+	data = *bp++; /* Byte 5: DOWN_STREAM_PORT_PRESENT */
+	cap->downstream_port.dfp_present = data & BIT(0);
+	cap->downstream_port.dfp_type = data & 0x6;
+	cap->downstream_port.format_conversion = data & BIT(3);
+	cap->downstream_port.detailed_cap_info_available = data & BIT(4);
+	pr_debug("dfp_present = %d, dfp_type = %d\n",
+			cap->downstream_port.dfp_present,
+			cap->downstream_port.dfp_type);
+	pr_debug("format_conversion = %d, detailed_cap_info_available = %d\n",
+			cap->downstream_port.format_conversion,
+			cap->downstream_port.detailed_cap_info_available);
+
+	bp += 1;	/* Skip Byte 6 */
+	rlen -= 1;
+
+	data = *bp++; /* Byte 7: DOWN_STREAM_PORT_COUNT */
+	cap->downstream_port.dfp_count = data & 0x7;
+	cap->downstream_port.msa_timing_par_ignored = data & BIT(6);
+	cap->downstream_port.oui_support = data & BIT(7);
+	pr_debug("dfp_count = %d, msa_timing_par_ignored = %d\n",
+			cap->downstream_port.dfp_count,
+			cap->downstream_port.msa_timing_par_ignored);
+	pr_debug("oui_support = %d\n", cap->downstream_port.oui_support);
+
+	data = *bp++; /* byte 8 */
+	if (data & BIT(1)) {
+		cap->flags |= DPCD_PORT_0_EDID_PRESENTED;
+		pr_debug("edid presented\n");
+	}
+
+	data = *bp++; /* byte 9 */
+	cap->rx_port0_buf_size = (data + 1) * 32;
+	pr_debug("lane_buf_size=%d\n", cap->rx_port0_buf_size);
+
+	bp += 2; /* skip 10, 11 port1 capability */
+	rlen -= 2;
+
+	data = *bp++;	/* byte 12 */
+	cap->i2c_speed_ctrl = data;
+	if (cap->i2c_speed_ctrl > 0)
+		pr_debug("i2c_rate=%d", cap->i2c_speed_ctrl);
+
+	data = *bp++;	/* byte 13 */
+	cap->scrambler_reset = data & BIT(0);
+	pr_debug("scrambler_reset=%d\n", cap->scrambler_reset);
+
+	if (data & BIT(1))
+		cap->enhanced_frame++;
+
+	pr_debug("enhanced_framing=%d\n", cap->enhanced_frame);
+
+	data = *bp++; /* byte 14 */
+	if (data == 0)
+		cap->training_read_interval = 4000; /* us */
+	else
+		cap->training_read_interval = 4000 * data; /* us */
+	pr_debug("training_interval=%d\n", cap->training_read_interval);
+end:
+	return rc;
+}
+
+/*
+ * edid standard header bytes
+ */
+static u8 edid_hdr[8] = {0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00};
+
+static bool dp_panel_is_edid_header_valid(u8 *buf)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(edid_hdr); i++) {
+		if (buf[i] != edid_hdr[i])
+			return false;
+	}
+
+	return true;
+}
+
+static int dp_panel_validate_edid(u8 *bp, int len)
+{
+	int i;
+	u8 csum = 0;
+	u32 const size = 128;
+
+	if (len < size) {
+		pr_err("Error: len=%x\n", len);
+		return -EINVAL;
+	}
+
+	for (i = 0; i < size; i++)
+		csum += *bp++;
+
+	if (csum != 0) {
+		pr_err("error: csum=0x%x\n", csum);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int dp_panel_read_edid(struct dp_panel *dp_panel)
+{
+	u8 *edid_buf;
+	u32 checksum = 0;
+	int rlen, ret = 0;
+	int edid_blk = 0, blk_num = 0, retries = 10;
+	u32 const segment_addr = 0x30;
+	bool edid_parsing_done = false;
+	struct dp_panel_private *panel;
+
+	panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
+
+	ret = panel->aux->ready(panel->aux);
+	if (!ret) {
+		pr_err("aux chan NOT ready\n");
+		goto end;
+	}
+
+	do {
+		u8 segment;
+
+
+		/*
+		 * Write the segment first.
+		 * Segment = 0, for blocks 0 and 1
+		 * Segment = 1, for blocks 2 and 3
+		 * Segment = 2, for blocks 3 and 4
+		 * and so on ...
+		 */
+		segment = blk_num >> 1;
+
+		panel->aux->write(panel->aux, segment_addr, 1, AUX_I2C,
+					&segment);
+
+		rlen = panel->aux->read(panel->aux, EDID_START_ADDRESS +
+				(blk_num * EDID_BLOCK_SIZE),
+				EDID_BLOCK_SIZE, AUX_I2C, &edid_buf);
+		if (rlen != EDID_BLOCK_SIZE) {
+			pr_err("invalid edid len: %d\n", rlen);
+			continue;
+		}
+
+		pr_debug("=== EDID data ===\n");
+		print_hex_dump(KERN_DEBUG, "EDID: ", DUMP_PREFIX_NONE, 16, 1,
+			edid_buf, EDID_BLOCK_SIZE, false);
+
+		pr_debug("blk_num=%d, rlen=%d\n", blk_num, rlen);
+
+		if (dp_panel_is_edid_header_valid(edid_buf)) {
+			ret = dp_panel_validate_edid(edid_buf, rlen);
+			if (ret) {
+				pr_err("corrupt edid block detected\n");
+				goto end;
+			}
+
+			if (edid_parsing_done) {
+				blk_num++;
+				continue;
+			}
+
+			dp_panel->edid.ext_block_cnt = edid_buf[0x7E];
+			edid_parsing_done = true;
+			checksum = edid_buf[rlen - 1];
+		} else {
+			edid_blk++;
+			blk_num++;
+		}
+
+		memcpy(dp_panel->edid.buf + (edid_blk * EDID_BLOCK_SIZE),
+			edid_buf, EDID_BLOCK_SIZE);
+
+		if (edid_blk == dp_panel->edid.ext_block_cnt)
+			goto end;
+	} while (retries--);
+end:
+	return ret;
+}
+
+static int dp_panel_timing_cfg(struct dp_panel *dp_panel)
+{
+	int rc = 0;
+	u32 data, total_ver, total_hor;
+	struct dp_catalog_panel *catalog;
+	struct dp_panel_private *panel;
+	struct dp_panel_info *pinfo;
+
+	if (!dp_panel) {
+		pr_err("invalid input\n");
+		rc = -EINVAL;
+		goto end;
+	}
+
+	panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
+	catalog = panel->catalog;
+	pinfo = &panel->dp_panel.pinfo;
+
+	pr_debug("width=%d hporch= %d %d %d\n",
+		pinfo->h_active, pinfo->h_back_porch,
+		pinfo->h_front_porch, pinfo->h_sync_width);
+
+	pr_debug("height=%d vporch= %d %d %d\n",
+		pinfo->v_active, pinfo->v_back_porch,
+		pinfo->v_front_porch, pinfo->v_sync_width);
+
+	total_hor = pinfo->h_active + pinfo->h_back_porch +
+		pinfo->h_front_porch + pinfo->h_sync_width;
+
+	total_ver = pinfo->v_active + pinfo->v_back_porch +
+			pinfo->v_front_porch + pinfo->v_sync_width;
+
+	data = total_ver;
+	data <<= 16;
+	data |= total_hor;
+
+	catalog->total = data;
+
+	data = (pinfo->v_back_porch + pinfo->v_sync_width);
+	data <<= 16;
+	data |= (pinfo->h_back_porch + pinfo->h_sync_width);
+
+	catalog->sync_start = data;
+
+	data = pinfo->v_sync_width;
+	data <<= 16;
+	data |= (pinfo->v_active_low << 31);
+	data |= pinfo->h_sync_width;
+	data |= (pinfo->h_active_low << 15);
+
+	catalog->width_blanking = data;
+
+	data = pinfo->v_active;
+	data <<= 16;
+	data |= pinfo->h_active;
+
+	catalog->dp_active = data;
+
+	panel->catalog->timing_cfg(catalog);
+end:
+	return rc;
+}
+
+static int dp_panel_init_panel_info(struct dp_panel *dp_panel)
+{
+	int rc = 0;
+	struct dp_panel_private *panel;
+
+	if (!dp_panel) {
+		pr_err("invalid input\n");
+		rc = -EINVAL;
+		goto end;
+	}
+
+	panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
+end:
+	return rc;
+}
+
+static u8 dp_panel_get_link_rate(struct dp_panel *dp_panel)
+{
+	const u32 encoding_factx10 = 8;
+	const u32 ln_to_link_ratio = 10;
+	u32 min_link_rate, reminder = 0;
+	u8 calc_link_rate = 0, lane_cnt;
+	struct dp_panel_private *panel;
+	struct dp_panel_info *pinfo;
+
+	if (!dp_panel) {
+		pr_err("invalid input\n");
+		goto end;
+	}
+
+	panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
+
+	lane_cnt = dp_panel->dpcd.max_lane_count;
+	pinfo = &dp_panel->pinfo;
+
+	pinfo->bpp = 24;
+
+	/*
+	 * The max pixel clock supported is 675Mhz. The
+	 * current calculations below will make sure
+	 * the min_link_rate is within 32 bit limits.
+	 * Any changes in the section of code should
+	 * consider this limitation.
+	 */
+	min_link_rate = (u32)div_u64(pinfo->pixel_clk_khz * 1000,
+				(lane_cnt * encoding_factx10));
+	min_link_rate /= ln_to_link_ratio;
+	min_link_rate = (min_link_rate * pinfo->bpp);
+	min_link_rate = (u32)div_u64_rem(min_link_rate * 10,
+				DP_LINK_RATE_MULTIPLIER, &reminder);
+
+	/*
+	 * To avoid any fractional values,
+	 * increment the min_link_rate
+	 */
+	if (reminder)
+		min_link_rate += 1;
+	pr_debug("min_link_rate = %d\n", min_link_rate);
+
+	if (min_link_rate <= DP_LINK_RATE_162)
+		calc_link_rate = DP_LINK_RATE_162;
+	else if (min_link_rate <= DP_LINK_RATE_270)
+		calc_link_rate = DP_LINK_RATE_270;
+	else if (min_link_rate <= DP_LINK_RATE_540)
+		calc_link_rate = DP_LINK_RATE_540;
+	else if (min_link_rate <= DP_LINK_RATE_810)
+		calc_link_rate = DP_LINK_RATE_810;
+	else {
+		/* Cap the link rate to the max supported rate */
+		pr_debug("link_rate = %d is unsupported\n", min_link_rate);
+		calc_link_rate = DP_LINK_RATE_810;
+	}
+
+	if (calc_link_rate > dp_panel->dpcd.max_link_rate)
+		calc_link_rate = dp_panel->dpcd.max_link_rate;
+
+	pr_debug("calc_link_rate = 0x%x\n", calc_link_rate);
+end:
+	return calc_link_rate;
+}
+
+struct dp_panel *dp_panel_get(struct device *dev, struct dp_aux *aux,
+				struct dp_catalog_panel *catalog)
+{
+	int rc = 0;
+	struct dp_panel_private *panel;
+	struct dp_panel *dp_panel;
+
+	if (!dev || !aux || !catalog) {
+		pr_err("invalid input\n");
+		rc = -EINVAL;
+		goto error;
+	}
+
+	panel = devm_kzalloc(dev, sizeof(*panel), GFP_KERNEL);
+	if (!panel) {
+		rc = -ENOMEM;
+		goto error;
+	}
+
+	panel->dev = dev;
+	panel->aux = aux;
+	panel->catalog = catalog;
+
+	dp_panel = &panel->dp_panel;
+
+	dp_panel->edid.buf = devm_kzalloc(dev,
+				sizeof(EDID_BLOCK_SIZE) * 4, GFP_KERNEL);
+
+	dp_panel->init_info = dp_panel_init_panel_info;
+	dp_panel->timing_cfg = dp_panel_timing_cfg;
+	dp_panel->read_edid = dp_panel_read_edid;
+	dp_panel->read_dpcd = dp_panel_read_dpcd;
+	dp_panel->get_link_rate = dp_panel_get_link_rate;
+
+	return dp_panel;
+error:
+	return ERR_PTR(rc);
+}
+
+void dp_panel_put(struct dp_panel *dp_panel)
+{
+	struct dp_panel_private *panel;
+
+	if (!dp_panel)
+		return;
+
+	panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
+
+	devm_kfree(panel->dev, dp_panel->edid.buf);
+	devm_kfree(panel->dev, panel);
+}
diff --git a/drivers/gpu/drm/msm/dp/dp_panel.h b/drivers/gpu/drm/msm/dp/dp_panel.h
new file mode 100644
index 0000000..5c145eb
--- /dev/null
+++ b/drivers/gpu/drm/msm/dp/dp_panel.h
@@ -0,0 +1,114 @@
+/*
+ * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _DP_PANEL_H_
+#define _DP_PANEL_H_
+
+#include "dp_aux.h"
+
+#define DPCD_ENHANCED_FRAME     BIT(0)
+#define DPCD_TPS3               BIT(1)
+#define DPCD_MAX_DOWNSPREAD_0_5 BIT(2)
+#define DPCD_NO_AUX_HANDSHAKE   BIT(3)
+#define DPCD_PORT_0_EDID_PRESENTED BIT(4)
+
+#define EDID_START_ADDRESS	0x50
+#define EDID_BLOCK_SIZE		0x80
+
+
+#define DP_LINK_RATE_162	6	/* 1.62G = 270M * 6 */
+#define DP_LINK_RATE_270	10	/* 2.70G = 270M * 10 */
+#define DP_LINK_RATE_540	20	/* 5.40G = 270M * 20 */
+#define DP_LINK_RATE_810	30	/* 8.10G = 270M * 30 */
+#define DP_LINK_RATE_MAX	DP_LINK_RATE_810
+
+struct downstream_port_config {
+	/* Byte 02205h */
+	bool dfp_present;
+	u32 dfp_type;
+	bool format_conversion;
+	bool detailed_cap_info_available;
+	/* Byte 02207h */
+	u32 dfp_count;
+	bool msa_timing_par_ignored;
+	bool oui_support;
+};
+
+struct dp_panel_dpcd {
+	u8 major;
+	u8 minor;
+	u8 max_lane_count;
+	u8 num_rx_port;
+	u8 i2c_speed_ctrl;
+	u8 scrambler_reset;
+	u8 enhanced_frame;
+	u32 max_link_rate;  /* 162, 270 and 540 Mb, divided by 10 */
+	u32 flags;
+	u32 rx_port0_buf_size;
+	u32 training_read_interval;/* us */
+	struct downstream_port_config downstream_port;
+};
+
+struct dp_panel_edid {
+	u8 *buf;
+	u8 id_name[4];
+	u8 id_product;
+	u8 version;
+	u8 revision;
+	u8 video_intf;	/* dp == 0x5 */
+	u8 color_depth;	/* 6, 8, 10, 12 and 14 bits */
+	u8 color_format;	/* RGB 4:4:4, YCrCb 4:4:4, Ycrcb 4:2:2 */
+	u8 dpm;		/* display power management */
+	u8 sync_digital;	/* 1 = digital */
+	u8 sync_separate;	/* 1 = separate */
+	u8 vsync_pol;		/* 0 = negative, 1 = positive */
+	u8 hsync_pol;		/* 0 = negative, 1 = positive */
+	u8 ext_block_cnt;
+};
+
+struct dp_panel_info {
+	u32 h_active;
+	u32 v_active;
+	u32 h_back_porch;
+	u32 h_front_porch;
+	u32 h_sync_width;
+	u32 h_active_low;
+	u32 v_back_porch;
+	u32 v_front_porch;
+	u32 v_sync_width;
+	u32 v_active_low;
+	u32 h_skew;
+	u32 refresh_rate;
+	u32 pixel_clk_khz;
+	u32 bpp;
+};
+
+struct dp_panel {
+	struct dp_panel_dpcd dpcd;
+	struct dp_panel_edid edid;
+	struct dp_panel_info pinfo;
+
+	u32 vic;
+
+	int (*init_info)(struct dp_panel *dp_panel);
+	int (*timing_cfg)(struct dp_panel *dp_panel);
+	int (*read_edid)(struct dp_panel *dp_panel);
+	int (*read_dpcd)(struct dp_panel *dp_panel);
+	u8 (*get_link_rate)(struct dp_panel *dp_panel);
+};
+
+struct dp_panel *dp_panel_get(struct device *dev, struct dp_aux *aux,
+				struct dp_catalog_panel *catalog);
+void dp_panel_put(struct dp_panel *dp_panel);
+#endif /* _DP_PANEL_H_ */
diff --git a/drivers/gpu/drm/msm/dp/dp_parser.c b/drivers/gpu/drm/msm/dp/dp_parser.c
new file mode 100644
index 0000000..722c436
--- /dev/null
+++ b/drivers/gpu/drm/msm/dp/dp_parser.c
@@ -0,0 +1,585 @@
+/*
+ * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt)	"[drm-dp] %s: " fmt, __func__
+
+#include <linux/of_gpio.h>
+
+#include "dp_parser.h"
+
+static void dp_parser_unmap_io_resources(struct dp_parser *parser)
+{
+	struct dp_io *io = &parser->io;
+
+	if (&io->ctrl_io)
+		msm_dss_iounmap(&io->ctrl_io);
+	if (&io->phy_io)
+		msm_dss_iounmap(&io->phy_io);
+	if (&io->ln_tx0_io)
+		msm_dss_iounmap(&io->ln_tx0_io);
+	if (&io->ln_tx1_io)
+		msm_dss_iounmap(&io->ln_tx0_io);
+	if (&io->dp_pll_io)
+		msm_dss_iounmap(&io->dp_pll_io);
+	if (&io->dp_cc_io)
+		msm_dss_iounmap(&io->dp_cc_io);
+	if (&io->qfprom_io)
+		msm_dss_iounmap(&io->qfprom_io);
+	if (&io->hdcp_io)
+		msm_dss_iounmap(&io->hdcp_io);
+}
+
+static int dp_parser_ctrl_res(struct dp_parser *parser)
+{
+	int rc = 0;
+	u32 index;
+	struct platform_device *pdev = parser->pdev;
+	struct device_node *of_node = parser->pdev->dev.of_node;
+	struct dp_io *io = &parser->io;
+
+	rc = of_property_read_u32(of_node, "cell-index", &index);
+	if (rc) {
+		pr_err("cell-index not specified, rc=%d\n", rc);
+		goto err;
+	}
+
+	rc = msm_dss_ioremap_byname(pdev, &io->ctrl_io, "dp_ctrl");
+	if (rc) {
+		pr_err("unable to remap dp io resources\n");
+		goto err;
+	}
+
+	rc = msm_dss_ioremap_byname(pdev, &io->phy_io, "dp_phy");
+	if (rc) {
+		pr_err("unable to remap dp PHY resources\n");
+		goto err;
+	}
+
+	rc = msm_dss_ioremap_byname(pdev, &io->ln_tx0_io, "dp_ln_tx0");
+	if (rc) {
+		pr_err("unable to remap dp TX0 resources\n");
+		goto err;
+	}
+
+	rc = msm_dss_ioremap_byname(pdev, &io->ln_tx1_io, "dp_ln_tx1");
+	if (rc) {
+		pr_err("unable to remap dp TX1 resources\n");
+		goto err;
+	}
+
+	rc = msm_dss_ioremap_byname(pdev, &io->dp_pll_io, "dp_pll");
+	if (rc) {
+		pr_err("unable to remap DP PLL resources\n");
+		goto err;
+	}
+
+	if (msm_dss_ioremap_byname(pdev, &io->dp_cc_io, "dp_mmss_cc")) {
+		pr_err("unable to remap dp MMSS_CC resources\n");
+		goto err;
+	}
+
+	if (msm_dss_ioremap_byname(pdev, &io->qfprom_io, "qfprom_physical"))
+		pr_warn("unable to remap dp qfprom resources\n");
+
+	if (msm_dss_ioremap_byname(pdev, &io->hdcp_io, "hdcp_physical"))
+		pr_warn("unable to remap dp hdcp resources\n");
+
+	return 0;
+err:
+	dp_parser_unmap_io_resources(parser);
+	return rc;
+}
+
+static int dp_parser_aux(struct dp_parser *parser)
+{
+	int len = 0, i = 0, rc = 0;
+	struct device_node *of_node = parser->pdev->dev.of_node;
+	const char *data;
+
+	data = of_get_property(of_node, "qcom,aux-cfg-settings", &len);
+	if (!data || (len != AUX_CFG_LEN)) {
+		pr_err("Unable to read DP AUX CFG settings\n");
+		rc = -EINVAL;
+		goto end;
+	}
+
+	for (i = 0; i < len; i++)
+		parser->aux_cfg[i] = data[i];
+end:
+	return rc;
+}
+
+static int dp_parser_misc(struct dp_parser *parser)
+{
+	int rc = 0;
+	struct device_node *of_node = parser->pdev->dev.of_node;
+
+	rc = of_property_read_u32(of_node,
+		"qcom,max-pclk-frequency-khz", &parser->max_pclk_khz);
+	if (rc)
+		parser->max_pclk_khz = DP_MAX_PIXEL_CLK_KHZ;
+
+	return 0;
+}
+
+static int dp_parser_pinctrl(struct dp_parser *parser)
+{
+	int rc = 0;
+	struct dp_pinctrl *pinctrl = &parser->pinctrl;
+
+	pinctrl->pin = devm_pinctrl_get(&parser->pdev->dev);
+
+	if (IS_ERR_OR_NULL(pinctrl->pin)) {
+		rc = PTR_ERR(pinctrl->pin);
+		pr_err("failed to get pinctrl, rc=%d\n", rc);
+		goto error;
+	}
+
+	pinctrl->state_active = pinctrl_lookup_state(pinctrl->pin,
+					"mdss_dp_active");
+	if (IS_ERR_OR_NULL(pinctrl->state_active)) {
+		rc = PTR_ERR(pinctrl->state_active);
+		pr_err("failed to get pinctrl active state, rc=%d\n", rc);
+		goto error;
+	}
+
+	pinctrl->state_suspend = pinctrl_lookup_state(pinctrl->pin,
+					"mdss_dp_sleep");
+	if (IS_ERR_OR_NULL(pinctrl->state_suspend)) {
+		rc = PTR_ERR(pinctrl->state_suspend);
+		pr_err("failed to get pinctrl suspend state, rc=%d\n", rc);
+		goto error;
+	}
+error:
+	return rc;
+}
+
+static int dp_parser_gpio(struct dp_parser *parser)
+{
+	int i = 0;
+	struct device *dev = &parser->pdev->dev;
+	struct device_node *of_node = dev->of_node;
+	struct dss_module_power *mp = &parser->mp[DP_CORE_PM];
+	static const char * const dp_gpios[] = {
+		"qcom,aux-en-gpio",
+		"qcom,aux-sel-gpio",
+		"qcom,usbplug-cc-gpio",
+	};
+
+	mp->gpio_config = devm_kzalloc(dev,
+		sizeof(struct dss_gpio) * ARRAY_SIZE(dp_gpios), GFP_KERNEL);
+	mp->num_gpio = ARRAY_SIZE(dp_gpios);
+
+	for (i = 0; i < ARRAY_SIZE(dp_gpios); i++) {
+		mp->gpio_config[i].gpio = of_get_named_gpio(of_node,
+			dp_gpios[i], 0);
+
+		if (!gpio_is_valid(mp->gpio_config[i].gpio)) {
+			pr_err("%s gpio not specified\n", dp_gpios[i]);
+			return -EINVAL;
+		}
+
+		strlcpy(mp->gpio_config[i].gpio_name, dp_gpios[i],
+			sizeof(mp->gpio_config[i].gpio_name));
+
+		mp->gpio_config[i].value = 0;
+	}
+
+	return 0;
+}
+
+static const char *dp_parser_supply_node_name(enum dp_pm_type module)
+{
+	switch (module) {
+	case DP_CORE_PM:	return "qcom,core-supply-entries";
+	case DP_CTRL_PM:	return "qcom,ctrl-supply-entries";
+	case DP_PHY_PM:		return "qcom,phy-supply-entries";
+	default:		return "???";
+	}
+}
+
+static int dp_parser_get_vreg(struct dp_parser *parser,
+		enum dp_pm_type module)
+{
+	int i = 0, rc = 0;
+	u32 tmp = 0;
+	const char *pm_supply_name = NULL;
+	struct device_node *supply_node = NULL;
+	struct device_node *of_node = parser->pdev->dev.of_node;
+	struct device_node *supply_root_node = NULL;
+	struct dss_module_power *mp = &parser->mp[module];
+
+	mp->num_vreg = 0;
+	pm_supply_name = dp_parser_supply_node_name(module);
+	supply_root_node = of_get_child_by_name(of_node, pm_supply_name);
+	if (!supply_root_node) {
+		pr_err("no supply entry present: %s\n", pm_supply_name);
+		goto novreg;
+	}
+
+	mp->num_vreg = of_get_available_child_count(supply_root_node);
+
+	if (mp->num_vreg == 0) {
+		pr_debug("no vreg\n");
+		goto novreg;
+	} else {
+		pr_debug("vreg found. count=%d\n", mp->num_vreg);
+	}
+
+	mp->vreg_config = devm_kzalloc(&parser->pdev->dev,
+		sizeof(struct dss_vreg) * mp->num_vreg, GFP_KERNEL);
+	if (!mp->vreg_config) {
+		rc = -ENOMEM;
+		goto error;
+	}
+
+	for_each_child_of_node(supply_root_node, supply_node) {
+		const char *st = NULL;
+		/* vreg-name */
+		rc = of_property_read_string(supply_node,
+			"qcom,supply-name", &st);
+		if (rc) {
+			pr_err("error reading name. rc=%d\n",
+				 rc);
+			goto error;
+		}
+		snprintf(mp->vreg_config[i].vreg_name,
+			ARRAY_SIZE((mp->vreg_config[i].vreg_name)), "%s", st);
+		/* vreg-min-voltage */
+		rc = of_property_read_u32(supply_node,
+			"qcom,supply-min-voltage", &tmp);
+		if (rc) {
+			pr_err("error reading min volt. rc=%d\n",
+				rc);
+			goto error;
+		}
+		mp->vreg_config[i].min_voltage = tmp;
+
+		/* vreg-max-voltage */
+		rc = of_property_read_u32(supply_node,
+			"qcom,supply-max-voltage", &tmp);
+		if (rc) {
+			pr_err("error reading max volt. rc=%d\n",
+				rc);
+			goto error;
+		}
+		mp->vreg_config[i].max_voltage = tmp;
+
+		/* enable-load */
+		rc = of_property_read_u32(supply_node,
+			"qcom,supply-enable-load", &tmp);
+		if (rc) {
+			pr_err("error reading enable load. rc=%d\n",
+				rc);
+			goto error;
+		}
+		mp->vreg_config[i].enable_load = tmp;
+
+		/* disable-load */
+		rc = of_property_read_u32(supply_node,
+			"qcom,supply-disable-load", &tmp);
+		if (rc) {
+			pr_err("error reading disable load. rc=%d\n",
+				rc);
+			goto error;
+		}
+		mp->vreg_config[i].disable_load = tmp;
+
+		pr_debug("%s min=%d, max=%d, enable=%d, disable=%d\n",
+			mp->vreg_config[i].vreg_name,
+			mp->vreg_config[i].min_voltage,
+			mp->vreg_config[i].max_voltage,
+			mp->vreg_config[i].enable_load,
+			mp->vreg_config[i].disable_load
+			);
+		++i;
+	}
+
+	return rc;
+
+error:
+	if (mp->vreg_config) {
+		devm_kfree(&parser->pdev->dev, mp->vreg_config);
+		mp->vreg_config = NULL;
+	}
+novreg:
+	mp->num_vreg = 0;
+
+	return rc;
+}
+
+static void dp_parser_put_vreg_data(struct device *dev,
+	struct dss_module_power *mp)
+{
+	if (!mp) {
+		DEV_ERR("invalid input\n");
+		return;
+	}
+
+	if (mp->vreg_config) {
+		devm_kfree(dev, mp->vreg_config);
+		mp->vreg_config = NULL;
+	}
+	mp->num_vreg = 0;
+}
+
+static int dp_parser_regulator(struct dp_parser *parser)
+{
+	int i, rc = 0;
+	struct platform_device *pdev = parser->pdev;
+
+	/* Parse the regulator information */
+	for (i = DP_CORE_PM; i < DP_MAX_PM; i++) {
+		rc = dp_parser_get_vreg(parser, i);
+		if (rc) {
+			pr_err("get_dt_vreg_data failed for %s. rc=%d\n",
+				dp_parser_pm_name(i), rc);
+			i--;
+			for (; i >= DP_CORE_PM; i--)
+				dp_parser_put_vreg_data(&pdev->dev,
+					&parser->mp[i]);
+			break;
+		}
+	}
+
+	return rc;
+}
+
+static bool dp_parser_check_prefix(const char *clk_prefix, const char *clk_name)
+{
+	return !!strnstr(clk_name, clk_prefix, strlen(clk_name));
+}
+
+static void dp_parser_put_clk_data(struct device *dev,
+	struct dss_module_power *mp)
+{
+	if (!mp) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return;
+	}
+
+	if (mp->clk_config) {
+		devm_kfree(dev, mp->clk_config);
+		mp->clk_config = NULL;
+	}
+
+	mp->num_clk = 0;
+}
+
+static int dp_parser_init_clk_data(struct dp_parser *parser)
+{
+	int num_clk = 0, i = 0, rc = 0;
+	int core_clk_count = 0, ctrl_clk_count = 0;
+	const char *core_clk = "core";
+	const char *ctrl_clk = "ctrl";
+	const char *clk_name;
+	struct device *dev = &parser->pdev->dev;
+	struct dss_module_power *core_power = &parser->mp[DP_CORE_PM];
+	struct dss_module_power *ctrl_power = &parser->mp[DP_CTRL_PM];
+
+	num_clk = of_property_count_strings(dev->of_node, "clock-names");
+	if (num_clk <= 0) {
+		pr_err("no clocks are defined\n");
+		rc = -EINVAL;
+		goto exit;
+	}
+
+	for (i = 0; i < num_clk; i++) {
+		of_property_read_string_index(dev->of_node,
+				"clock-names", i, &clk_name);
+
+		if (dp_parser_check_prefix(core_clk, clk_name))
+			core_clk_count++;
+
+		if (dp_parser_check_prefix(ctrl_clk, clk_name))
+			ctrl_clk_count++;
+	}
+
+	/* Initialize the CORE power module */
+	if (core_clk_count <= 0) {
+		pr_err("no core clocks are defined\n");
+		rc = -EINVAL;
+		goto exit;
+	}
+
+	core_power->num_clk = core_clk_count;
+	core_power->clk_config = devm_kzalloc(dev,
+			sizeof(struct dss_clk) * core_power->num_clk,
+			GFP_KERNEL);
+	if (!core_power->clk_config) {
+		rc = -EINVAL;
+		goto exit;
+	}
+
+	/* Initialize the CTRL power module */
+	if (ctrl_clk_count <= 0) {
+		pr_err("no ctrl clocks are defined\n");
+		rc = -EINVAL;
+		goto ctrl_clock_error;
+	}
+
+	ctrl_power->num_clk = ctrl_clk_count;
+	ctrl_power->clk_config = devm_kzalloc(dev,
+			sizeof(struct dss_clk) * ctrl_power->num_clk,
+			GFP_KERNEL);
+	if (!ctrl_power->clk_config) {
+		ctrl_power->num_clk = 0;
+		rc = -EINVAL;
+		goto ctrl_clock_error;
+	}
+
+	return rc;
+
+ctrl_clock_error:
+	dp_parser_put_clk_data(dev, core_power);
+exit:
+	return rc;
+}
+
+static int dp_parser_clock(struct dp_parser *parser)
+{
+	int rc = 0, i = 0;
+	int num_clk = 0;
+	int core_clk_index = 0, ctrl_clk_index = 0;
+	int core_clk_count = 0, ctrl_clk_count = 0;
+	const char *clk_name;
+	const char *core_clk = "core";
+	const char *ctrl_clk = "ctrl";
+	struct device *dev = &parser->pdev->dev;
+	struct dss_module_power *core_power = &parser->mp[DP_CORE_PM];
+	struct dss_module_power *ctrl_power = &parser->mp[DP_CTRL_PM];
+
+	core_power = &parser->mp[DP_CORE_PM];
+	ctrl_power = &parser->mp[DP_CTRL_PM];
+
+	rc =  dp_parser_init_clk_data(parser);
+	if (rc) {
+		pr_err("failed to initialize power data\n");
+		rc = -EINVAL;
+		goto exit;
+	}
+
+	core_clk_count = core_power->num_clk;
+	ctrl_clk_count = ctrl_power->num_clk;
+
+	num_clk = core_clk_count + ctrl_clk_count;
+
+	for (i = 0; i < num_clk; i++) {
+		of_property_read_string_index(dev->of_node, "clock-names",
+				i, &clk_name);
+
+		if (dp_parser_check_prefix(core_clk, clk_name) &&
+				core_clk_index < core_clk_count) {
+			struct dss_clk *clk =
+				&core_power->clk_config[core_clk_index];
+			strlcpy(clk->clk_name, clk_name, sizeof(clk->clk_name));
+			clk->type = DSS_CLK_AHB;
+			core_clk_index++;
+		} else if (dp_parser_check_prefix(ctrl_clk, clk_name) &&
+			   ctrl_clk_index < ctrl_clk_count) {
+			struct dss_clk *clk =
+				&ctrl_power->clk_config[ctrl_clk_index];
+			strlcpy(clk->clk_name, clk_name, sizeof(clk->clk_name));
+			ctrl_clk_index++;
+
+			if (!strcmp(clk_name, "ctrl_link_clk") ||
+			    !strcmp(clk_name, "ctrl_pixel_clk") ||
+			    !strcmp(clk_name, "ctrl_crypto_clk"))
+				clk->type = DSS_CLK_PCLK;
+			else
+				clk->type = DSS_CLK_AHB;
+		}
+	}
+
+	pr_debug("clock parsing successful\n");
+
+exit:
+	return rc;
+}
+
+static int dp_parser_parse(struct dp_parser *parser)
+{
+	int rc = 0;
+
+	if (!parser) {
+		pr_err("invalid input\n");
+		rc = -EINVAL;
+		goto err;
+	}
+
+	rc = dp_parser_ctrl_res(parser);
+	if (rc)
+		goto err;
+
+	rc = dp_parser_aux(parser);
+	if (rc)
+		goto err;
+
+	rc = dp_parser_misc(parser);
+	if (rc)
+		goto err;
+
+	rc = dp_parser_clock(parser);
+	if (rc)
+		goto err;
+
+	rc = dp_parser_regulator(parser);
+	if (rc)
+		goto err;
+
+	rc = dp_parser_gpio(parser);
+	if (rc)
+		goto err;
+
+	rc = dp_parser_pinctrl(parser);
+err:
+	return rc;
+}
+
+struct dp_parser *dp_parser_get(struct platform_device *pdev)
+{
+	struct dp_parser *parser;
+
+	parser = devm_kzalloc(&pdev->dev, sizeof(*parser), GFP_KERNEL);
+	if (!parser)
+		return ERR_PTR(-ENOMEM);
+
+	parser->parse = dp_parser_parse;
+	parser->pdev = pdev;
+
+	return parser;
+}
+
+void dp_parser_put(struct dp_parser *parser)
+{
+	int i = 0;
+	struct dss_module_power *power = NULL;
+
+	if (!parser) {
+		pr_err("invalid parser module\n");
+		return;
+	}
+
+	power = parser->mp;
+
+	for (i = 0; i < DP_MAX_PM; i++) {
+		struct dss_module_power *mp = &power[i];
+
+		devm_kfree(&parser->pdev->dev, mp->clk_config);
+		devm_kfree(&parser->pdev->dev, mp->vreg_config);
+		devm_kfree(&parser->pdev->dev, mp->gpio_config);
+	}
+
+	devm_kfree(&parser->pdev->dev, parser);
+}
diff --git a/drivers/gpu/drm/msm/dp/dp_parser.h b/drivers/gpu/drm/msm/dp/dp_parser.h
new file mode 100644
index 0000000..fdcdd3a
--- /dev/null
+++ b/drivers/gpu/drm/msm/dp/dp_parser.h
@@ -0,0 +1,139 @@
+/*
+ * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _DP_PARSER_H_
+#define _DP_PARSER_H_
+
+#include <linux/sde_io_util.h>
+
+#define DP_LABEL "MDSS DP DISPLAY"
+#define AUX_CFG_LEN	10
+#define DP_MAX_PIXEL_CLK_KHZ	675000
+
+enum dp_pm_type {
+	DP_CORE_PM,
+	DP_CTRL_PM,
+	DP_PHY_PM,
+	DP_MAX_PM
+};
+
+static inline const char *dp_parser_pm_name(enum dp_pm_type module)
+{
+	switch (module) {
+	case DP_CORE_PM:	return "DP_CORE_PM";
+	case DP_CTRL_PM:	return "DP_CTRL_PM";
+	case DP_PHY_PM:		return "DP_PHY_PM";
+	default:		return "???";
+	}
+}
+
+/**
+ * struct dp_display_data  - display related device tree data.
+ *
+ * @ctrl_node: referece to controller device
+ * @phy_node:  reference to phy device
+ * @is_active: is the controller currently active
+ * @name: name of the display
+ * @display_type: type of the display
+ */
+struct dp_display_data {
+	struct device_node *ctrl_node;
+	struct device_node *phy_node;
+	bool is_active;
+	const char *name;
+	const char *display_type;
+};
+
+/**
+ * struct dp_ctrl_resource - controller's IO related data
+ *
+ * @ctrl_io: controller's mapped memory address
+ * @phy_io: phy's mapped memory address
+ * @ln_tx0_io: USB-DP lane TX0's mapped memory address
+ * @ln_tx1_io: USB-DP lane TX1's mapped memory address
+ * @dp_cc_io: DP cc's mapped memory address
+ * @qfprom_io: qfprom's mapped memory address
+ * @dp_pll_io: DP PLL mapped memory address
+ * @hdcp_io: hdcp's mapped memory address
+ */
+struct dp_io {
+	struct dss_io_data ctrl_io;
+	struct dss_io_data phy_io;
+	struct dss_io_data ln_tx0_io;
+	struct dss_io_data ln_tx1_io;
+	struct dss_io_data dp_cc_io;
+	struct dss_io_data qfprom_io;
+	struct dss_io_data dp_pll_io;
+	struct dss_io_data hdcp_io;
+};
+
+/**
+ * struct dp_pinctrl - DP's pin control
+ *
+ * @pin: pin-controller's instance
+ * @state_active: active state pin control
+ * @state_hpd_active: hpd active state pin control
+ * @state_suspend: suspend state pin control
+ */
+struct dp_pinctrl {
+	struct pinctrl *pin;
+	struct pinctrl_state *state_active;
+	struct pinctrl_state *state_hpd_active;
+	struct pinctrl_state *state_suspend;
+};
+
+/**
+ * struct dp_parser - DP parser's data exposed to clients
+ *
+ * @pdev: platform data of the client
+ * @mp: gpio, regulator and clock related data
+ * @pinctrl: pin-control related data
+ * @ctrl_resouce: controller's register address realated data
+ * @disp_data: controller's display related data
+ * @parse: function to be called by client to parse device tree.
+ */
+struct dp_parser {
+	struct platform_device *pdev;
+	struct dss_module_power mp[DP_MAX_PM];
+	struct dp_pinctrl pinctrl;
+	struct dp_io io;
+	struct dp_display_data disp_data;
+
+	u8 l_map[4];
+	u32 aux_cfg[AUX_CFG_LEN];
+	u32 max_pclk_khz;
+
+	int (*parse)(struct dp_parser *parser);
+};
+
+/**
+ * dp_parser_get() - get the DP's device tree parser module
+ *
+ * @pdev: platform data of the client
+ * return: pointer to dp_parser structure.
+ *
+ * This function provides client capability to parse the
+ * device tree and populate the data structures. The data
+ * related to clock, regulators, pin-control and other
+ * can be parsed using this module.
+ */
+struct dp_parser *dp_parser_get(struct platform_device *pdev);
+
+/**
+ * dp_parser_put() - cleans the dp_parser module
+ *
+ * @parser: pointer to the parser's data.
+ */
+void dp_parser_put(struct dp_parser *parser);
+#endif
diff --git a/drivers/gpu/drm/msm/dp/dp_power.c b/drivers/gpu/drm/msm/dp/dp_power.c
new file mode 100644
index 0000000..60c8966
--- /dev/null
+++ b/drivers/gpu/drm/msm/dp/dp_power.c
@@ -0,0 +1,619 @@
+/*
+ * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt)	"[drm-dp] %s: " fmt, __func__
+
+#include <linux/clk.h>
+#include "dp_power.h"
+
+#define DP_CLIENT_NAME_SIZE	20
+
+struct dp_power_private {
+	struct dp_parser *parser;
+	struct platform_device *pdev;
+	struct clk *pixel_clk_rcg;
+	struct clk *pixel_parent;
+
+	struct dp_power dp_power;
+	struct sde_power_client *dp_core_client;
+	struct sde_power_handle *phandle;
+
+	bool core_clks_on;
+	bool link_clks_on;
+};
+
+static int dp_power_regulator_init(struct dp_power_private *power)
+{
+	int rc = 0, i = 0, j = 0;
+	struct platform_device *pdev;
+	struct dp_parser *parser;
+
+	parser = power->parser;
+	pdev = power->pdev;
+
+	for (i = DP_CORE_PM; !rc && (i < DP_MAX_PM); i++) {
+		rc = msm_dss_config_vreg(&pdev->dev,
+			parser->mp[i].vreg_config,
+			parser->mp[i].num_vreg, 1);
+		if (rc) {
+			pr_err("failed to init vregs for %s\n",
+				dp_parser_pm_name(i));
+			for (j = i - 1; j >= DP_CORE_PM; j--) {
+				msm_dss_config_vreg(&pdev->dev,
+				parser->mp[j].vreg_config,
+				parser->mp[j].num_vreg, 0);
+			}
+
+			goto error;
+		}
+	}
+error:
+	return rc;
+}
+
+static void dp_power_regulator_deinit(struct dp_power_private *power)
+{
+	int rc = 0, i = 0;
+	struct platform_device *pdev;
+	struct dp_parser *parser;
+
+	parser = power->parser;
+	pdev = power->pdev;
+
+	for (i = DP_CORE_PM; (i < DP_MAX_PM); i++) {
+		rc = msm_dss_config_vreg(&pdev->dev,
+			parser->mp[i].vreg_config,
+			parser->mp[i].num_vreg, 0);
+		if (rc)
+			pr_err("failed to deinit vregs for %s\n",
+				dp_parser_pm_name(i));
+	}
+}
+
+static int dp_power_regulator_ctrl(struct dp_power_private *power, bool enable)
+{
+	int rc = 0, i = 0, j = 0;
+	struct dp_parser *parser;
+
+	parser = power->parser;
+
+	for (i = DP_CORE_PM; i < DP_MAX_PM; i++) {
+		rc = msm_dss_enable_vreg(
+			parser->mp[i].vreg_config,
+			parser->mp[i].num_vreg, enable);
+		if (rc) {
+			pr_err("failed to '%s' vregs for %s\n",
+					enable ? "enable" : "disable",
+					dp_parser_pm_name(i));
+			if (enable) {
+				for (j = i-1; j >= DP_CORE_PM; j--) {
+					msm_dss_enable_vreg(
+					parser->mp[j].vreg_config,
+					parser->mp[j].num_vreg, 0);
+				}
+			}
+			goto error;
+		}
+	}
+error:
+	return rc;
+}
+
+static int dp_power_pinctrl_set(struct dp_power_private *power, bool active)
+{
+	int rc = -EFAULT;
+	struct pinctrl_state *pin_state;
+	struct dp_parser *parser;
+
+	parser = power->parser;
+
+	if (IS_ERR_OR_NULL(parser->pinctrl.pin))
+		return PTR_ERR(parser->pinctrl.pin);
+
+	pin_state = active ? parser->pinctrl.state_active
+				: parser->pinctrl.state_suspend;
+	if (!IS_ERR_OR_NULL(pin_state)) {
+		rc = pinctrl_select_state(parser->pinctrl.pin,
+				pin_state);
+		if (rc)
+			pr_err("can not set %s pins\n",
+			       active ? "dp_active"
+			       : "dp_sleep");
+	} else {
+		pr_err("invalid '%s' pinstate\n",
+		       active ? "dp_active"
+		       : "dp_sleep");
+	}
+
+	return rc;
+}
+
+static int dp_power_clk_init(struct dp_power_private *power, bool enable)
+{
+	int rc = 0;
+	struct dss_module_power *core, *ctrl;
+	struct device *dev;
+
+	core = &power->parser->mp[DP_CORE_PM];
+	ctrl = &power->parser->mp[DP_CTRL_PM];
+
+	dev = &power->pdev->dev;
+
+	if (!core || !ctrl) {
+		pr_err("invalid power_data\n");
+		rc = -EINVAL;
+		goto exit;
+	}
+
+	if (enable) {
+		rc = msm_dss_get_clk(dev, core->clk_config, core->num_clk);
+		if (rc) {
+			pr_err("failed to get %s clk. err=%d\n",
+				dp_parser_pm_name(DP_CORE_PM), rc);
+			goto exit;
+		}
+
+		rc = msm_dss_get_clk(dev, ctrl->clk_config, ctrl->num_clk);
+		if (rc) {
+			pr_err("failed to get %s clk. err=%d\n",
+				dp_parser_pm_name(DP_CTRL_PM), rc);
+			goto ctrl_get_error;
+		}
+
+		power->pixel_clk_rcg = devm_clk_get(dev, "pixel_clk_rcg");
+		if (IS_ERR(power->pixel_clk_rcg)) {
+			pr_debug("Unable to get DP pixel clk RCG\n");
+			power->pixel_clk_rcg = NULL;
+		}
+
+		power->pixel_parent = devm_clk_get(dev, "pixel_parent");
+		if (IS_ERR(power->pixel_parent)) {
+			pr_debug("Unable to get DP pixel RCG parent\n");
+			power->pixel_parent = NULL;
+		}
+	} else {
+		if (power->pixel_parent)
+			devm_clk_put(dev, power->pixel_parent);
+
+		if (power->pixel_clk_rcg)
+			devm_clk_put(dev, power->pixel_clk_rcg);
+
+		msm_dss_put_clk(ctrl->clk_config, ctrl->num_clk);
+		msm_dss_put_clk(core->clk_config, core->num_clk);
+	}
+
+	return rc;
+
+ctrl_get_error:
+	msm_dss_put_clk(core->clk_config, core->num_clk);
+exit:
+	return rc;
+}
+
+static int dp_power_clk_set_rate(struct dp_power_private *power,
+		enum dp_pm_type module, bool enable)
+{
+	int rc = 0;
+	struct dss_module_power *mp;
+
+	if (!power) {
+		pr_err("invalid power data\n");
+		rc = -EINVAL;
+		goto exit;
+	}
+
+	mp = &power->parser->mp[module];
+
+	if (enable) {
+		rc = msm_dss_clk_set_rate(mp->clk_config, mp->num_clk);
+		if (rc) {
+			pr_err("failed to set clks rate.\n");
+			goto exit;
+		}
+
+		rc = msm_dss_enable_clk(mp->clk_config, mp->num_clk, 1);
+		if (rc) {
+			pr_err("failed to enable clks\n");
+			goto exit;
+		}
+	} else {
+		rc = msm_dss_enable_clk(mp->clk_config, mp->num_clk, 0);
+		if (rc) {
+			pr_err("failed to disable clks\n");
+				goto exit;
+		}
+	}
+exit:
+	return rc;
+}
+
+static int dp_power_clk_enable(struct dp_power *dp_power,
+		enum dp_pm_type pm_type, bool enable)
+{
+	int rc = 0;
+	struct dss_module_power *mp;
+	struct dp_power_private *power;
+
+	if (!dp_power) {
+		pr_err("invalid power data\n");
+		rc = -EINVAL;
+		goto error;
+	}
+
+	power = container_of(dp_power, struct dp_power_private, dp_power);
+
+	mp = &power->parser->mp[pm_type];
+
+	if ((pm_type != DP_CORE_PM) && (pm_type != DP_CTRL_PM)) {
+		pr_err("unsupported power module: %s\n",
+				dp_parser_pm_name(pm_type));
+		return -EINVAL;
+	}
+
+	if (enable) {
+		if ((pm_type == DP_CORE_PM)
+			&& (power->core_clks_on)) {
+			pr_debug("core clks already enabled\n");
+			return 0;
+		}
+
+		if ((pm_type == DP_CTRL_PM)
+			&& (power->link_clks_on)) {
+			pr_debug("links clks already enabled\n");
+			return 0;
+		}
+
+		if ((pm_type == DP_CTRL_PM) && (!power->core_clks_on)) {
+			pr_debug("Need to enable core clks before link clks\n");
+
+			rc = dp_power_clk_set_rate(power, pm_type, enable);
+			if (rc) {
+				pr_err("failed to enable clks: %s. err=%d\n",
+					dp_parser_pm_name(DP_CORE_PM), rc);
+				goto error;
+			} else {
+				power->core_clks_on = true;
+			}
+		}
+	}
+
+	rc = dp_power_clk_set_rate(power, pm_type, enable);
+	if (rc) {
+		pr_err("failed to '%s' clks for: %s. err=%d\n",
+			enable ? "enable" : "disable",
+			dp_parser_pm_name(pm_type), rc);
+			goto error;
+	}
+
+	if (pm_type == DP_CORE_PM)
+		power->core_clks_on = enable;
+	else
+		power->link_clks_on = enable;
+
+	pr_debug("%s clocks for %s\n",
+			enable ? "enable" : "disable",
+			dp_parser_pm_name(pm_type));
+	pr_debug("link_clks:%s core_clks:%s\n",
+		power->link_clks_on ? "on" : "off",
+		power->core_clks_on ? "on" : "off");
+error:
+	return rc;
+}
+
+static int dp_power_request_gpios(struct dp_power_private *power)
+{
+	int rc = 0, i;
+	struct device *dev;
+	struct dss_module_power *mp;
+	static const char * const gpio_names[] = {
+		"aux_enable", "aux_sel", "usbplug_cc",
+	};
+
+	if (!power) {
+		pr_err("invalid power data\n");
+		return -EINVAL;
+	}
+
+	dev = &power->pdev->dev;
+	mp = &power->parser->mp[DP_CORE_PM];
+
+	for (i = 0; i < ARRAY_SIZE(gpio_names); i++) {
+		unsigned int gpio = mp->gpio_config[i].gpio;
+
+		if (gpio_is_valid(gpio)) {
+			rc = devm_gpio_request(dev, gpio, gpio_names[i]);
+			if (rc) {
+				pr_err("request %s gpio failed, rc=%d\n",
+					       gpio_names[i], rc);
+				goto error;
+			}
+		}
+	}
+	return 0;
+error:
+	for (i = 0; i < ARRAY_SIZE(gpio_names); i++) {
+		unsigned int gpio = mp->gpio_config[i].gpio;
+
+		if (gpio_is_valid(gpio))
+			gpio_free(gpio);
+	}
+	return rc;
+}
+
+static bool dp_power_find_gpio(const char *gpio1, const char *gpio2)
+{
+	return !!strnstr(gpio1, gpio2, strlen(gpio1));
+}
+
+static void dp_power_set_gpio(struct dp_power_private *power, bool flip)
+{
+	int i;
+	struct dss_module_power *mp = &power->parser->mp[DP_CORE_PM];
+	struct dss_gpio *config = mp->gpio_config;
+
+	for (i = 0; i < mp->num_gpio; i++) {
+		if (dp_power_find_gpio(config->gpio_name, "aux-sel"))
+			config->value = flip;
+
+		if (gpio_is_valid(config->gpio)) {
+			pr_debug("gpio %s, value %d\n", config->gpio_name,
+				config->value);
+
+			if (dp_power_find_gpio(config->gpio_name, "aux-en") ||
+			    dp_power_find_gpio(config->gpio_name, "aux-sel"))
+				gpio_direction_output(config->gpio,
+					config->value);
+			else
+				gpio_set_value(config->gpio, config->value);
+
+		}
+		config++;
+	}
+}
+
+static int dp_power_config_gpios(struct dp_power_private *power, bool flip,
+					bool enable)
+{
+	int rc = 0, i;
+	struct dss_module_power *mp;
+	struct dss_gpio *config;
+
+	mp = &power->parser->mp[DP_CORE_PM];
+	config = mp->gpio_config;
+
+	if (enable) {
+		rc = dp_power_request_gpios(power);
+		if (rc) {
+			pr_err("gpio request failed\n");
+			return rc;
+		}
+
+		dp_power_set_gpio(power, flip);
+	} else {
+		for (i = 0; i < mp->num_gpio; i++) {
+			gpio_set_value(config[i].gpio, 0);
+			gpio_free(config[i].gpio);
+		}
+	}
+
+	return 0;
+}
+
+static int dp_power_client_init(struct dp_power *dp_power,
+		struct sde_power_handle *phandle)
+{
+	int rc = 0;
+	struct dp_power_private *power;
+	char dp_client_name[DP_CLIENT_NAME_SIZE];
+
+	if (!dp_power) {
+		pr_err("invalid power data\n");
+		return -EINVAL;
+	}
+
+	power = container_of(dp_power, struct dp_power_private, dp_power);
+
+	rc = dp_power_regulator_init(power);
+	if (rc) {
+		pr_err("failed to init regulators\n");
+		goto error_power;
+	}
+
+	rc = dp_power_clk_init(power, true);
+	if (rc) {
+		pr_err("failed to init clocks\n");
+		goto error_clk;
+	}
+
+	power->phandle = phandle;
+	snprintf(dp_client_name, DP_CLIENT_NAME_SIZE, "dp_core_client");
+	power->dp_core_client = sde_power_client_create(phandle,
+			dp_client_name);
+	if (IS_ERR_OR_NULL(power->dp_core_client)) {
+		pr_err("[%s] client creation failed for DP", dp_client_name);
+		rc = -EINVAL;
+		goto error_client;
+	}
+	return 0;
+
+error_client:
+	dp_power_clk_init(power, false);
+error_clk:
+	dp_power_regulator_deinit(power);
+error_power:
+	return rc;
+}
+
+static void dp_power_client_deinit(struct dp_power *dp_power)
+{
+	struct dp_power_private *power;
+
+	if (!dp_power) {
+		pr_err("invalid power data\n");
+		return;
+	}
+
+	power = container_of(dp_power, struct dp_power_private, dp_power);
+
+	sde_power_client_destroy(power->phandle, power->dp_core_client);
+	dp_power_clk_init(power, false);
+	dp_power_regulator_deinit(power);
+}
+
+static int dp_power_set_pixel_clk_parent(struct dp_power *dp_power)
+{
+	int rc = 0;
+	struct dp_power_private *power;
+
+	if (!dp_power) {
+		pr_err("invalid power data\n");
+		rc = -EINVAL;
+		goto exit;
+	}
+
+	power = container_of(dp_power, struct dp_power_private, dp_power);
+
+	if (power->pixel_clk_rcg && power->pixel_parent)
+		clk_set_parent(power->pixel_clk_rcg, power->pixel_parent);
+exit:
+	return rc;
+}
+
+static int dp_power_init(struct dp_power *dp_power, bool flip)
+{
+	int rc = 0;
+	struct dp_power_private *power;
+
+	if (!dp_power) {
+		pr_err("invalid power data\n");
+		rc = -EINVAL;
+		goto exit;
+	}
+
+	power = container_of(dp_power, struct dp_power_private, dp_power);
+
+	rc = dp_power_regulator_ctrl(power, true);
+	if (rc) {
+		pr_err("failed to enable regulators\n");
+		goto exit;
+	}
+
+	rc = dp_power_pinctrl_set(power, true);
+	if (rc) {
+		pr_err("failed to set pinctrl state\n");
+		goto err_pinctrl;
+	}
+
+	rc = dp_power_config_gpios(power, flip, true);
+	if (rc) {
+		pr_err("failed to enable gpios\n");
+		goto err_gpio;
+	}
+
+	rc = sde_power_resource_enable(power->phandle,
+		power->dp_core_client, true);
+	if (rc) {
+		pr_err("Power resource enable failed\n");
+		goto err_sde_power;
+	}
+
+	rc = dp_power_clk_enable(dp_power, DP_CORE_PM, true);
+	if (rc) {
+		pr_err("failed to enable DP core clocks\n");
+		goto err_clk;
+	}
+
+	return 0;
+
+err_clk:
+	sde_power_resource_enable(power->phandle, power->dp_core_client, false);
+err_sde_power:
+	dp_power_config_gpios(power, flip, false);
+err_gpio:
+	dp_power_pinctrl_set(power, false);
+err_pinctrl:
+	dp_power_regulator_ctrl(power, false);
+exit:
+	return rc;
+}
+
+static int dp_power_deinit(struct dp_power *dp_power)
+{
+	int rc = 0;
+	struct dp_power_private *power;
+
+	if (!dp_power) {
+		pr_err("invalid power data\n");
+		rc = -EINVAL;
+		goto exit;
+	}
+
+	power = container_of(dp_power, struct dp_power_private, dp_power);
+
+	dp_power_clk_enable(dp_power, DP_CORE_PM, false);
+	rc = sde_power_resource_enable(power->phandle,
+			power->dp_core_client, false);
+	if (rc) {
+		pr_err("Power resource enable failed, rc=%d\n", rc);
+		goto exit;
+	}
+	dp_power_config_gpios(power, false, false);
+	dp_power_pinctrl_set(power, false);
+	dp_power_regulator_ctrl(power, false);
+exit:
+	return rc;
+}
+
+struct dp_power *dp_power_get(struct dp_parser *parser)
+{
+	int rc = 0;
+	struct dp_power_private *power;
+	struct dp_power *dp_power;
+
+	if (!parser) {
+		pr_err("invalid input\n");
+		rc = -EINVAL;
+		goto error;
+	}
+
+	power = devm_kzalloc(&parser->pdev->dev, sizeof(*power), GFP_KERNEL);
+	if (!power) {
+		rc = -ENOMEM;
+		goto error;
+	}
+
+	power->parser = parser;
+	power->pdev = parser->pdev;
+
+	dp_power = &power->dp_power;
+
+	dp_power->init = dp_power_init;
+	dp_power->deinit = dp_power_deinit;
+	dp_power->clk_enable = dp_power_clk_enable;
+	dp_power->set_pixel_clk_parent = dp_power_set_pixel_clk_parent;
+	dp_power->power_client_init = dp_power_client_init;
+	dp_power->power_client_deinit = dp_power_client_deinit;
+
+	return dp_power;
+error:
+	return ERR_PTR(rc);
+}
+
+void dp_power_put(struct dp_power *dp_power)
+{
+	struct dp_power_private *power = container_of(dp_power,
+			struct dp_power_private, dp_power);
+
+	devm_kfree(&power->pdev->dev, power);
+}
diff --git a/drivers/gpu/drm/msm/dp/dp_power.h b/drivers/gpu/drm/msm/dp/dp_power.h
new file mode 100644
index 0000000..e6e9900
--- /dev/null
+++ b/drivers/gpu/drm/msm/dp/dp_power.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _DP_POWER_H_
+#define _DP_POWER_H_
+
+#include "dp_parser.h"
+#include "sde_power_handle.h"
+
+/**
+ * sruct dp_power - DisplayPort's power related data
+ *
+ * @init: initializes the regulators/core clocks/GPIOs/pinctrl
+ * @deinit: turns off the regulators/core clocks/GPIOs/pinctrl
+ * @clk_enable: enable/disable the DP clocks
+ * @set_pixel_clk_parent: set the parent of DP pixel clock
+ */
+struct dp_power {
+	int (*init)(struct dp_power *power, bool flip);
+	int (*deinit)(struct dp_power *power);
+	int (*clk_enable)(struct dp_power *power, enum dp_pm_type pm_type,
+				bool enable);
+	int (*set_pixel_clk_parent)(struct dp_power *power);
+	int (*power_client_init)(struct dp_power *power,
+				struct sde_power_handle *phandle);
+	void (*power_client_deinit)(struct dp_power *power);
+};
+
+/**
+ * dp_power_get() - configure and get the DisplayPort power module data
+ *
+ * @parser: instance of parser module
+ * return: pointer to allocated power module data
+ *
+ * This API will configure the DisplayPort's power module and provides
+ * methods to be called by the client to configure the power related
+ * modueles.
+ */
+struct dp_power *dp_power_get(struct dp_parser *parser);
+
+/**
+ * dp_power_put() - release the power related resources
+ *
+ * @power: pointer to the power module's data
+ */
+void dp_power_put(struct dp_power *power);
+#endif /* _DP_POWER_H_ */
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.h b/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.h
index 5dcdf46..2d7b174 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.h
@@ -142,7 +142,8 @@ void dsi_ctrl_hw_cmn_cmd_engine_en(struct dsi_ctrl_hw *ctrl, bool on);
 void dsi_ctrl_hw_cmn_setup_cmd_stream(struct dsi_ctrl_hw *ctrl,
 				     struct dsi_mode_info *mode,
 				     u32 h_stride,
-				     u32 vc_id);
+				     u32 vc_id,
+				     struct dsi_rect *roi);
 void dsi_ctrl_hw_cmn_phy_sw_reset(struct dsi_ctrl_hw *ctrl);
 void dsi_ctrl_hw_cmn_soft_reset(struct dsi_ctrl_hw *ctrl);
 
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c
index f187ad1..da7a7c0 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c
@@ -879,14 +879,12 @@ static int dsi_message_tx(struct dsi_ctrl *dsi_ctrl,
 	int rc = 0;
 	struct mipi_dsi_packet packet;
 	struct dsi_ctrl_cmd_dma_fifo_info cmd;
+	struct dsi_ctrl_cmd_dma_info cmd_mem;
 	u32 hw_flags = 0;
 	u32 length = 0;
 	u8 *buffer = NULL;
-
-	if (!(flags & DSI_CTRL_CMD_FIFO_STORE)) {
-		pr_err("Memory DMA is not supported, use FIFO\n");
-		goto error;
-	}
+	u32 cnt = 0;
+	u8 *cmdbuf;
 
 	rc = mipi_dsi_create_packet(&packet, msg);
 	if (rc) {
@@ -894,7 +892,32 @@ static int dsi_message_tx(struct dsi_ctrl *dsi_ctrl,
 		goto error;
 	}
 
-	if (flags & DSI_CTRL_CMD_FIFO_STORE) {
+	if (flags & DSI_CTRL_CMD_FETCH_MEMORY) {
+		rc = dsi_ctrl_copy_and_pad_cmd(dsi_ctrl,
+				&packet,
+				&buffer,
+				&length);
+
+		if (rc) {
+			pr_err("[%s] failed to copy message, rc=%d\n",
+					dsi_ctrl->name, rc);
+			goto error;
+		}
+
+		cmd_mem.offset = dsi_ctrl->cmd_buffer_iova;
+		cmd_mem.length = length;
+		cmd_mem.en_broadcast = (flags & DSI_CTRL_CMD_BROADCAST) ?
+			true : false;
+		cmd_mem.is_master = (flags & DSI_CTRL_CMD_BROADCAST_MASTER) ?
+			true : false;
+		cmd_mem.use_lpm = (msg->flags & MIPI_DSI_MSG_USE_LPM) ?
+			true : false;
+
+		cmdbuf = (u8 *)(dsi_ctrl->vaddr);
+		for (cnt = 0; cnt < length; cnt++)
+			cmdbuf[cnt] = buffer[cnt];
+
+	} else if (flags & DSI_CTRL_CMD_FIFO_STORE) {
 		rc = dsi_ctrl_copy_and_pad_cmd(dsi_ctrl,
 					       &packet,
 					       &buffer,
@@ -920,10 +943,15 @@ static int dsi_message_tx(struct dsi_ctrl *dsi_ctrl,
 	if (!(flags & DSI_CTRL_CMD_DEFER_TRIGGER))
 		reinit_completion(&dsi_ctrl->int_info.cmd_dma_done);
 
-	if (flags & DSI_CTRL_CMD_FIFO_STORE)
+	if (flags & DSI_CTRL_CMD_FETCH_MEMORY) {
+		dsi_ctrl->hw.ops.kickoff_command(&dsi_ctrl->hw,
+						&cmd_mem,
+						hw_flags);
+	} else if (flags & DSI_CTRL_CMD_FIFO_STORE) {
 		dsi_ctrl->hw.ops.kickoff_fifo_command(&dsi_ctrl->hw,
 						      &cmd,
 						      hw_flags);
+	}
 
 	if (!(flags & DSI_CTRL_CMD_DEFER_TRIGGER)) {
 		u32 retry = 10;
@@ -1558,7 +1586,6 @@ int dsi_ctrl_async_timing_update(struct dsi_ctrl *dsi_ctrl,
 
 int dsi_ctrl_setup(struct dsi_ctrl *dsi_ctrl)
 {
-	struct dsi_mode_info video_timing;
 	int rc = 0;
 
 	if (!dsi_ctrl) {
@@ -1568,12 +1595,6 @@ int dsi_ctrl_setup(struct dsi_ctrl *dsi_ctrl)
 
 	mutex_lock(&dsi_ctrl->ctrl_lock);
 
-	/* replace video mode width with actual roi width */
-	memcpy(&video_timing, &dsi_ctrl->host_config.video_timing,
-			sizeof(video_timing));
-	video_timing.h_active = dsi_ctrl->roi.w;
-	video_timing.v_active = dsi_ctrl->roi.h;
-
 	dsi_ctrl->hw.ops.setup_lane_map(&dsi_ctrl->hw,
 					&dsi_ctrl->host_config.lane_map);
 
@@ -1586,9 +1607,10 @@ int dsi_ctrl_setup(struct dsi_ctrl *dsi_ctrl)
 					&dsi_ctrl->host_config.u.cmd_engine);
 
 		dsi_ctrl->hw.ops.setup_cmd_stream(&dsi_ctrl->hw,
-				&video_timing,
-				video_timing.h_active * 3,
-				0x0);
+				&dsi_ctrl->host_config.video_timing,
+				dsi_ctrl->host_config.video_timing.h_active * 3,
+				0x0,
+				&dsi_ctrl->roi);
 		dsi_ctrl->hw.ops.cmd_engine_en(&dsi_ctrl->hw, true);
 	} else {
 		dsi_ctrl->hw.ops.video_engine_setup(&dsi_ctrl->hw,
@@ -1690,7 +1712,8 @@ int dsi_ctrl_host_init(struct dsi_ctrl *dsi_ctrl)
 		dsi_ctrl->hw.ops.setup_cmd_stream(&dsi_ctrl->hw,
 				&dsi_ctrl->host_config.video_timing,
 				dsi_ctrl->host_config.video_timing.h_active * 3,
-				0x0);
+				0x0,
+				NULL);
 	} else {
 		dsi_ctrl->hw.ops.video_engine_setup(&dsi_ctrl->hw,
 					&dsi_ctrl->host_config.common_config,
@@ -2176,14 +2199,14 @@ int dsi_ctrl_set_vid_engine_state(struct dsi_ctrl *dsi_ctrl,
 }
 
 /**
-  * dsi_ctrl_set_ulps() - set ULPS state for DSI lanes.
-  * @dsi_ctrl:		DSI controller handle.
-  * @enable:		enable/disable ULPS.
-  *
-  * ULPS can be enabled/disabled after DSI host engine is turned on.
-  *
-  * Return: error code.
-  */
+ * dsi_ctrl_set_ulps() - set ULPS state for DSI lanes.
+ * @dsi_ctrl:		DSI controller handle.
+ * @enable:		enable/disable ULPS.
+ *
+ * ULPS can be enabled/disabled after DSI host engine is turned on.
+ *
+ * Return: error code.
+ */
 int dsi_ctrl_set_ulps(struct dsi_ctrl *dsi_ctrl, bool enable)
 {
 	int rc = 0;
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.h b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.h
index f89cb68..7f36fde 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.h
@@ -33,12 +33,15 @@
  * @DSI_CTRL_CMD_DEFER_TRIGGER:    Defer the command trigger to later.
  * @DSI_CTRL_CMD_FIFO_STORE:       Use FIFO for command transfer in place of
  *				   reading data from memory.
+ * @DSI_CTRL_CMD_FETCH_MEMORY:     Fetch command from memory through AXI bus
+ *				   and transfer it.
  */
 #define DSI_CTRL_CMD_READ             0x1
 #define DSI_CTRL_CMD_BROADCAST        0x2
 #define DSI_CTRL_CMD_BROADCAST_MASTER 0x4
 #define DSI_CTRL_CMD_DEFER_TRIGGER    0x8
 #define DSI_CTRL_CMD_FIFO_STORE       0x10
+#define DSI_CTRL_CMD_FETCH_MEMORY     0x20
 
 /**
  * enum dsi_power_state - defines power states for dsi controller.
@@ -188,6 +191,8 @@ struct dsi_ctrl_interrupts {
  * @roi:                 Partial update region of interest.
  *                       Origin is top left of this CTRL.
  * @tx_cmd_buf:          Tx command buffer.
+ * @cmd_buffer_iova:     cmd buffer mapped address.
+ * @vaddr:		 CPU virtual address of cmd buffer.
  * @cmd_buffer_size:     Size of command buffer.
  * @debugfs_root:        Root for debugfs entries.
  */
@@ -221,6 +226,8 @@ struct dsi_ctrl {
 	/* Command tx and rx */
 	struct drm_gem_object *tx_cmd_buf;
 	u32 cmd_buffer_size;
+	u32 cmd_buffer_iova;
+	void *vaddr;
 
 	/* Debug Information */
 	struct dentry *debugfs_root;
@@ -377,14 +384,14 @@ int dsi_ctrl_host_init(struct dsi_ctrl *dsi_ctrl);
 int dsi_ctrl_host_deinit(struct dsi_ctrl *dsi_ctrl);
 
 /**
-  * dsi_ctrl_set_ulps() - set ULPS state for DSI lanes.
-  * @dsi_ctrl:		DSI controller handle.
-  * @enable:		enable/disable ULPS.
-  *
-  * ULPS can be enabled/disabled after DSI host engine is turned on.
-  *
-  * Return: error code.
-  */
+ * dsi_ctrl_set_ulps() - set ULPS state for DSI lanes.
+ * @dsi_ctrl:		DSI controller handle.
+ * @enable:		enable/disable ULPS.
+ *
+ * ULPS can be enabled/disabled after DSI host engine is turned on.
+ *
+ * Return: error code.
+ */
 int dsi_ctrl_set_ulps(struct dsi_ctrl *dsi_ctrl, bool enable);
 
 /**
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw.h b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw.h
index 859d707..bb72807 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw.h
@@ -319,7 +319,8 @@ struct dsi_ctrl_hw_ops {
 	void (*setup_cmd_stream)(struct dsi_ctrl_hw *ctrl,
 				 struct dsi_mode_info *mode,
 				 u32 h_stride,
-				 u32 vc_id);
+				 u32 vc_id,
+				 struct dsi_rect *roi);
 
 	/**
 	 * ctrl_en() - enable DSI controller engine
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_cmn.c b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_cmn.c
index 48c2370..a024c43 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_cmn.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_cmn.c
@@ -19,6 +19,7 @@
 #include "dsi_ctrl_hw.h"
 #include "dsi_ctrl_reg.h"
 #include "dsi_hw.h"
+#include "dsi_panel.h"
 
 #define MMSS_MISC_CLAMP_REG_OFF           0x0014
 #define DSI_CTRL_DYNAMIC_FORCE_ON         (0x23F|BIT(8)|BIT(9)|BIT(11)|BIT(21))
@@ -234,21 +235,36 @@ void dsi_ctrl_hw_cmn_set_video_timing(struct dsi_ctrl_hw *ctrl,
 void dsi_ctrl_hw_cmn_setup_cmd_stream(struct dsi_ctrl_hw *ctrl,
 				     struct dsi_mode_info *mode,
 				     u32 h_stride,
-				     u32 vc_id)
+				     u32 vc_id,
+				     struct dsi_rect *roi)
 {
-	u32 reg = 0;
 	u32 width_final, stride_final;
+	u32 height_final;
+	u32 stream_total = 0, stream_ctrl = 0;
+	u32 reg_ctrl = 0, reg_ctrl2 = 0;
+
+	if (roi && (!roi->w || !roi->h))
+		return;
 
 	if (mode->dsc_enabled && mode->dsc) {
+		u32 reg = 0;
 		u32 offset = 0;
-		u32 reg_ctrl, reg_ctrl2;
+		int pic_width, this_frame_slices, intf_ip_w;
+		struct msm_display_dsc_info dsc;
+
+		memcpy(&dsc, mode->dsc, sizeof(dsc));
+		pic_width = roi ? roi->w : mode->h_active;
+		this_frame_slices = pic_width / dsc.slice_width;
+		intf_ip_w = this_frame_slices * dsc.slice_width;
+		dsi_dsc_pclk_param_calc(&dsc, intf_ip_w);
 
 		if (vc_id != 0)
 			offset = 16;
 		reg_ctrl = DSI_R32(ctrl, DSI_COMMAND_COMPRESSION_MODE_CTRL);
 		reg_ctrl2 = DSI_R32(ctrl, DSI_COMMAND_COMPRESSION_MODE_CTRL2);
-		width_final = mode->dsc->pclk_per_line;
-		stride_final = mode->dsc->bytes_per_pkt;
+		width_final = dsc.pclk_per_line;
+		stride_final = dsc.bytes_per_pkt;
+		height_final = roi ? roi->h : mode->v_active;
 
 		reg = 0x39 << 8;
 		/*
@@ -258,34 +274,45 @@ void dsi_ctrl_hw_cmn_setup_cmd_stream(struct dsi_ctrl_hw *ctrl,
 		 * 2 == 4 pkt
 		 * 3 pkt is not support
 		 */
-		if (mode->dsc->pkt_per_line == 4)
-			reg |= (mode->dsc->pkt_per_line - 2) << 6;
+		if (dsc.pkt_per_line == 4)
+			reg |= (dsc.pkt_per_line - 2) << 6;
 		else
-			reg |= (mode->dsc->pkt_per_line - 1) << 6;
-		reg |= mode->dsc->eol_byte_num << 4;
+			reg |= (dsc.pkt_per_line - 1) << 6;
+		reg |= dsc.eol_byte_num << 4;
 		reg |= 1;
 
 		reg_ctrl &= ~(0xFFFF << offset);
 		reg_ctrl |= (reg << offset);
 		reg_ctrl2 &= ~(0xFFFF << offset);
-		reg_ctrl2 |= (mode->dsc->bytes_in_slice << offset);
+		reg_ctrl2 |= (dsc.bytes_in_slice << offset);
 		DSI_W32(ctrl, DSI_COMMAND_COMPRESSION_MODE_CTRL, reg_ctrl);
 		DSI_W32(ctrl, DSI_COMMAND_COMPRESSION_MODE_CTRL2, reg_ctrl2);
+
+		pr_debug("ctrl %d reg_ctrl 0x%x reg_ctrl2 0x%x\n", ctrl->index,
+				reg_ctrl, reg_ctrl2);
+	} else if (roi) {
+		width_final = roi->w;
+		stride_final = roi->w * 3;
+		height_final = roi->h;
 	} else {
 		width_final = mode->h_active;
 		stride_final = h_stride;
+		height_final = mode->v_active;
 	}
 
-	reg = (stride_final + 1) << 16;
-	reg |= (vc_id & 0x3) << 8;
-	reg |= 0x39; /* packet data type */
+	stream_ctrl = (stride_final + 1) << 16;
+	stream_ctrl |= (vc_id & 0x3) << 8;
+	stream_ctrl |= 0x39; /* packet data type */
 
-	DSI_W32(ctrl, DSI_COMMAND_MODE_MDP_STREAM0_CTRL, reg);
-	DSI_W32(ctrl, DSI_COMMAND_MODE_MDP_STREAM1_CTRL, reg);
+	DSI_W32(ctrl, DSI_COMMAND_MODE_MDP_STREAM0_CTRL, stream_ctrl);
+	DSI_W32(ctrl, DSI_COMMAND_MODE_MDP_STREAM1_CTRL, stream_ctrl);
 
-	reg = (mode->v_active << 16) | width_final;
-	DSI_W32(ctrl, DSI_COMMAND_MODE_MDP_STREAM0_TOTAL, reg);
-	DSI_W32(ctrl, DSI_COMMAND_MODE_MDP_STREAM1_TOTAL, reg);
+	stream_total = (height_final << 16) | width_final;
+	DSI_W32(ctrl, DSI_COMMAND_MODE_MDP_STREAM0_TOTAL, stream_total);
+	DSI_W32(ctrl, DSI_COMMAND_MODE_MDP_STREAM1_TOTAL, stream_total);
+
+	pr_debug("ctrl %d stream_ctrl 0x%x stream_total 0x%x\n", ctrl->index,
+			stream_ctrl, stream_total);
 }
 
 /**
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_display.c b/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
index c2cf2cb..d6a7193 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
@@ -16,6 +16,7 @@
 
 #include <linux/list.h>
 #include <linux/of.h>
+#include <linux/err.h>
 
 #include "msm_drv.h"
 #include "dsi_display.h"
@@ -1004,9 +1005,9 @@ static int dsi_display_broadcast_cmd(struct dsi_display *display,
 	int i;
 
 	m_flags = (DSI_CTRL_CMD_BROADCAST | DSI_CTRL_CMD_BROADCAST_MASTER |
-		   DSI_CTRL_CMD_DEFER_TRIGGER | DSI_CTRL_CMD_FIFO_STORE);
+		   DSI_CTRL_CMD_DEFER_TRIGGER | DSI_CTRL_CMD_FETCH_MEMORY);
 	flags = (DSI_CTRL_CMD_BROADCAST | DSI_CTRL_CMD_DEFER_TRIGGER |
-		 DSI_CTRL_CMD_FIFO_STORE);
+		 DSI_CTRL_CMD_FETCH_MEMORY);
 
 	/*
 	 * 1. Setup commands in FIFO
@@ -1101,8 +1102,8 @@ static ssize_t dsi_host_transfer(struct mipi_dsi_host *host,
 				 const struct mipi_dsi_msg *msg)
 {
 	struct dsi_display *display = to_dsi_display(host);
-
-	int rc = 0;
+	struct dsi_display_ctrl *display_ctrl;
+	int rc = 0, cnt = 0;
 
 	if (!host || !msg) {
 		pr_err("Invalid params\n");
@@ -1131,6 +1132,44 @@ static ssize_t dsi_host_transfer(struct mipi_dsi_host *host,
 		goto error_disable_clks;
 	}
 
+	if (display->tx_cmd_buf == NULL) {
+		mutex_lock(&display->drm_dev->struct_mutex);
+		display->tx_cmd_buf = msm_gem_new(display->drm_dev,
+				SZ_4K,
+				MSM_BO_UNCACHED);
+		mutex_unlock(&display->drm_dev->struct_mutex);
+
+		display->cmd_buffer_size = SZ_4K;
+
+		if ((display->tx_cmd_buf) == NULL) {
+			pr_err("value of display->tx_cmd_buf is NULL");
+			goto error_disable_cmd_engine;
+		}
+		rc = msm_gem_get_iova(display->tx_cmd_buf, 0,
+					&(display->cmd_buffer_iova));
+		if (rc) {
+			pr_err("failed to get the iova rc %d\n", rc);
+			goto free_gem;
+		}
+
+		display->vaddr =
+			(void *) msm_gem_get_vaddr(display->tx_cmd_buf);
+
+		if (IS_ERR_OR_NULL(display->vaddr)) {
+			pr_err("failed to get va rc %d\n", rc);
+			rc = -EINVAL;
+			goto put_iova;
+		}
+
+		for (cnt = 0; cnt < display->ctrl_count; cnt++) {
+			display_ctrl = &display->ctrl[cnt];
+			display_ctrl->ctrl->cmd_buffer_size = SZ_4K;
+			display_ctrl->ctrl->cmd_buffer_iova =
+						display->cmd_buffer_iova;
+			display_ctrl->ctrl->vaddr = display->vaddr;
+		}
+	}
+
 	if (display->ctrl_count > 1 && !(msg->flags & MIPI_DSI_MSG_UNICAST)) {
 		rc = dsi_display_broadcast_cmd(display, msg);
 		if (rc) {
@@ -1143,13 +1182,19 @@ static ssize_t dsi_host_transfer(struct mipi_dsi_host *host,
 				msg->ctrl : 0;
 
 		rc = dsi_ctrl_cmd_transfer(display->ctrl[ctrl_idx].ctrl, msg,
-					  DSI_CTRL_CMD_FIFO_STORE);
+					  DSI_CTRL_CMD_FETCH_MEMORY);
 		if (rc) {
 			pr_err("[%s] cmd transfer failed, rc=%d\n",
 			       display->name, rc);
 			goto error_disable_cmd_engine;
 		}
 	}
+	return rc;
+
+put_iova:
+	msm_gem_put_iova(display->tx_cmd_buf, 0);
+free_gem:
+	msm_gem_free_object(display->tx_cmd_buf);
 error_disable_cmd_engine:
 	(void)dsi_display_cmd_engine_disable(display);
 error_disable_clks:
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_display.h b/drivers/gpu/drm/msm/dsi-staging/dsi_display.h
index d2bc7d8..89f31af 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_display.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_display.h
@@ -155,6 +155,10 @@ struct dsi_display {
 	bool ulps_enabled;
 	bool clamp_enabled;
 	bool phy_idle_power_off;
+	struct drm_gem_object *tx_cmd_buf;
+	u32 cmd_buffer_size;
+	u32 cmd_buffer_iova;
+	void *vaddr;
 
 	struct mipi_dsi_host host;
 	struct dsi_bridge    *bridge;
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_panel.h b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.h
index 4c9fbbe..f254af5 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_panel.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.h
@@ -249,4 +249,6 @@ int dsi_panel_update_pps(struct dsi_panel *panel);
 int dsi_panel_send_roi_dcs(struct dsi_panel *panel, int ctrl_idx,
 		struct dsi_rect *roi);
 
+void dsi_dsc_pclk_param_calc(struct msm_display_dsc_info *dsc, int intf_width);
+
 #endif /* _DSI_PANEL_H_ */
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 4e0b678..a3a9142 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -1271,22 +1271,20 @@ static int msm_ioctl_deregister_event(struct drm_device *dev, void *data,
 	return ret;
 }
 
-void msm_send_crtc_notification(struct drm_crtc *crtc,
-				struct drm_event *event, u8 *payload)
+void msm_mode_object_event_nofity(struct drm_mode_object *obj,
+		struct drm_device *dev, struct drm_event *event, u8 *payload)
 {
-	struct drm_device *dev = NULL;
 	struct msm_drm_private *priv = NULL;
 	unsigned long flags;
 	struct msm_drm_event *notify, *node;
 	int len = 0, ret;
 
-	if (!crtc || !event || !event->length || !payload) {
-		DRM_ERROR("err param crtc %pK event %pK len %d payload %pK\n",
-			crtc, event, ((event) ? (event->length) : -1),
+	if (!obj || !event || !event->length || !payload) {
+		DRM_ERROR("err param obj %pK event %pK len %d payload %pK\n",
+			obj, event, ((event) ? (event->length) : -1),
 			payload);
 		return;
 	}
-	dev = crtc->dev;
 	priv = (dev) ? dev->dev_private : NULL;
 	if (!dev || !priv) {
 		DRM_ERROR("invalid dev %pK priv %pK\n", dev, priv);
@@ -1296,7 +1294,7 @@ void msm_send_crtc_notification(struct drm_crtc *crtc,
 	spin_lock_irqsave(&dev->event_lock, flags);
 	list_for_each_entry(node, &priv->client_event_list, base.link) {
 		if (node->event.type != event->type ||
-			crtc->base.id != node->info.object_id)
+			obj->id != node->info.object_id)
 			continue;
 		len = event->length + sizeof(struct drm_msm_event_resp);
 		if (node->base.file_priv->event_space < len) {
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index d50a185..2cd9aa1 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -110,6 +110,8 @@ enum msm_mdp_plane_property {
 	PLANE_PROP_ROT_DST_Y,
 	PLANE_PROP_ROT_DST_W,
 	PLANE_PROP_ROT_DST_H,
+	PLANE_PROP_PREFILL_SIZE,
+	PLANE_PROP_PREFILL_TIME,
 
 	/* enum/bitmask properties */
 	PLANE_PROP_ROTATION,
@@ -710,13 +712,14 @@ enum msm_dsi_encoder_id {
 };
 
 /* *
- * msm_send_crtc_notification - notify user-space clients of crtc events.
- * @crtc: crtc that is generating the event.
+ * msm_mode_object_event_notify - notify user-space clients of drm object
+ *                                events.
+ * @obj: mode object (crtc/connector) that is generating the event.
  * @event: event that needs to be notified.
  * @payload: payload for the event.
  */
-void msm_send_crtc_notification(struct drm_crtc *crtc,
-		struct drm_event *event, u8 *payload);
+void msm_mode_object_event_nofity(struct drm_mode_object *obj,
+		struct drm_device *dev, struct drm_event *event, u8 *payload);
 #ifdef CONFIG_DRM_MSM_DSI
 void __init msm_dsi_register(void);
 void __exit msm_dsi_unregister(void);
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 3061099..acd7af5 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -341,7 +341,8 @@ int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id,
 			if (obj->import_attach && mmu->funcs->map_dma_buf) {
 				ret = mmu->funcs->map_dma_buf(mmu, msm_obj->sgt,
 						obj->import_attach->dmabuf,
-						DMA_BIDIRECTIONAL);
+						DMA_BIDIRECTIONAL,
+						msm_obj->flags);
 				if (ret) {
 					DRM_ERROR("Unable to map dma buf\n");
 					return ret;
diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h
index 2cf170d..19c7726 100644
--- a/drivers/gpu/drm/msm/msm_gem.h
+++ b/drivers/gpu/drm/msm/msm_gem.h
@@ -23,6 +23,7 @@
 
 /* Additional internal-use only BO flags: */
 #define MSM_BO_STOLEN        0x10000000    /* try to use stolen/splash memory */
+#define MSM_BO_KEEPATTRS     0x20000000     /* keep h/w bus attributes */
 
 struct msm_gem_object {
 	struct drm_gem_object base;
diff --git a/drivers/gpu/drm/msm/msm_mmu.h b/drivers/gpu/drm/msm/msm_mmu.h
index ee93339..fbf7e7b 100644
--- a/drivers/gpu/drm/msm/msm_mmu.h
+++ b/drivers/gpu/drm/msm/msm_mmu.h
@@ -43,7 +43,7 @@ struct msm_mmu_funcs {
 	void (*unmap_sg)(struct msm_mmu *mmu, struct sg_table *sgt,
 		enum dma_data_direction dir);
 	int (*map_dma_buf)(struct msm_mmu *mmu, struct sg_table *sgt,
-			struct dma_buf *dma_buf, int dir);
+			struct dma_buf *dma_buf, int dir, u32 flags);
 	void (*unmap_dma_buf)(struct msm_mmu *mmu, struct sg_table *sgt,
 			struct dma_buf *dma_buf, int dir);
 	void (*destroy)(struct msm_mmu *mmu);
diff --git a/drivers/gpu/drm/msm/msm_smmu.c b/drivers/gpu/drm/msm/msm_smmu.c
index c279d01..4d45898 100644
--- a/drivers/gpu/drm/msm/msm_smmu.c
+++ b/drivers/gpu/drm/msm/msm_smmu.c
@@ -25,6 +25,7 @@
 #include <soc/qcom/secure_buffer.h>
 
 #include "msm_drv.h"
+#include "msm_gem.h"
 #include "msm_mmu.h"
 
 #ifndef SZ_4G
@@ -220,14 +221,18 @@ static void msm_smmu_destroy(struct msm_mmu *mmu)
 }
 
 static int msm_smmu_map_dma_buf(struct msm_mmu *mmu, struct sg_table *sgt,
-			struct dma_buf *dma_buf, int dir)
+			struct dma_buf *dma_buf, int dir, u32 flags)
 {
 	struct msm_smmu *smmu = to_msm_smmu(mmu);
 	struct msm_smmu_client *client = msm_smmu_to_client(smmu);
+	unsigned long attrs = 0x0;
 	int ret;
 
-	ret = msm_dma_map_sg_lazy(client->dev, sgt->sgl, sgt->nents, dir,
-			dma_buf);
+	if (flags & MSM_BO_KEEPATTRS)
+		attrs |= DMA_ATTR_IOMMU_USE_UPSTREAM_HINT;
+
+	ret = msm_dma_map_sg_attrs(client->dev, sgt->sgl, sgt->nents, dir,
+			dma_buf, attrs);
 	if (ret != sgt->nents) {
 		DRM_ERROR("dma map sg failed\n");
 		return -ENOMEM;
diff --git a/drivers/gpu/drm/msm/sde/sde_color_processing.c b/drivers/gpu/drm/msm/sde/sde_color_processing.c
index d5207b9..b410302 100644
--- a/drivers/gpu/drm/msm/sde/sde_color_processing.c
+++ b/drivers/gpu/drm/msm/sde/sde_color_processing.c
@@ -180,7 +180,7 @@ static int sde_cp_disable_crtc_blob_property(struct sde_cp_node *prop_node)
 	struct drm_property_blob *blob = prop_node->blob_ptr;
 
 	if (!blob)
-		return -EINVAL;
+		return 0;
 	drm_property_unreference_blob(blob);
 	prop_node->blob_ptr = NULL;
 	return 0;
@@ -1357,7 +1357,8 @@ static void sde_cp_notify_ad_event(struct drm_crtc *crtc_drm, void *arg)
 	hw_dspp->ops.ad_read_intr_resp(hw_dspp, AD4_BACKLIGHT, &bl);
 	event.length = sizeof(u32);
 	event.type = DRM_EVENT_AD_BACKLIGHT;
-	msm_send_crtc_notification(&crtc->base, &event, (u8 *)&bl);
+	msm_mode_object_event_nofity(&crtc_drm->base, crtc_drm->dev,
+			&event, (u8 *)&bl);
 }
 
 int sde_cp_ad_interrupt(struct drm_crtc *crtc_drm, bool en,
diff --git a/drivers/gpu/drm/msm/sde/sde_connector.c b/drivers/gpu/drm/msm/sde/sde_connector.c
index 6593b47..f13c6c9 100644
--- a/drivers/gpu/drm/msm/sde/sde_connector.c
+++ b/drivers/gpu/drm/msm/sde/sde_connector.c
@@ -59,6 +59,7 @@ static int sde_backlight_device_update_status(struct backlight_device *bd)
 	struct dsi_display *display;
 	struct sde_connector *c_conn;
 	int bl_lvl;
+	struct drm_event event;
 
 	brightness = bd->props.brightness;
 
@@ -79,8 +80,13 @@ static int sde_backlight_device_update_status(struct backlight_device *bd)
 	if (!bl_lvl && brightness)
 		bl_lvl = 1;
 
-	if (c_conn->ops.set_backlight)
+	if (c_conn->ops.set_backlight) {
+		event.type = DRM_EVENT_SYS_BACKLIGHT;
+		event.length = sizeof(u32);
+		msm_mode_object_event_nofity(&c_conn->base.base,
+				c_conn->base.dev, &event, (u8 *)&brightness);
 		c_conn->ops.set_backlight(c_conn->display, bl_lvl);
+	}
 
 	return 0;
 }
@@ -1165,5 +1171,14 @@ struct drm_connector *sde_connector_init(struct drm_device *dev,
 int sde_connector_register_custom_event(struct sde_kms *kms,
 		struct drm_connector *conn_drm, u32 event, bool val)
 {
-	return -EINVAL;
+	int ret = -EINVAL;
+
+	switch (event) {
+	case DRM_EVENT_SYS_BACKLIGHT:
+		ret = 0;
+		break;
+	default:
+		break;
+	}
+	return ret;
 }
diff --git a/drivers/gpu/drm/msm/sde/sde_connector.h b/drivers/gpu/drm/msm/sde/sde_connector.h
index 71e64e4..497d0db 100644
--- a/drivers/gpu/drm/msm/sde/sde_connector.h
+++ b/drivers/gpu/drm/msm/sde/sde_connector.h
@@ -282,7 +282,7 @@ struct sde_connector {
  * Returns: Pointer to associated private display structure
  */
 #define sde_connector_get_panel(C) \
-	((C) ? to_sde_connector((C))->panel : 0)
+	((C) ? to_sde_connector((C))->panel : NULL)
 
 /**
  * sde_connector_get_encoder - get sde connector's private encoder pointer
diff --git a/drivers/gpu/drm/msm/sde/sde_core_irq.c b/drivers/gpu/drm/msm/sde/sde_core_irq.c
index 5adef2d..1b40161 100644
--- a/drivers/gpu/drm/msm/sde/sde_core_irq.c
+++ b/drivers/gpu/drm/msm/sde/sde_core_irq.c
@@ -428,6 +428,103 @@ void sde_core_irq_uninstall(struct sde_kms *sde_kms)
 	sde_kms->irq_obj.total_irqs = 0;
 }
 
+static void sde_core_irq_mask(struct irq_data *irqd)
+{
+	struct sde_kms *sde_kms;
+
+	if (!irqd || !irq_data_get_irq_chip_data(irqd)) {
+		SDE_ERROR("invalid parameters irqd %d\n", irqd != NULL);
+		return;
+	}
+	sde_kms = irq_data_get_irq_chip_data(irqd);
+
+	/* memory barrier */
+	smp_mb__before_atomic();
+	clear_bit(irqd->hwirq, &sde_kms->irq_controller.enabled_mask);
+	/* memory barrier */
+	smp_mb__after_atomic();
+}
+
+static void sde_core_irq_unmask(struct irq_data *irqd)
+{
+	struct sde_kms *sde_kms;
+
+	if (!irqd || !irq_data_get_irq_chip_data(irqd)) {
+		SDE_ERROR("invalid parameters irqd %d\n", irqd != NULL);
+		return;
+	}
+	sde_kms = irq_data_get_irq_chip_data(irqd);
+
+	/* memory barrier */
+	smp_mb__before_atomic();
+	set_bit(irqd->hwirq, &sde_kms->irq_controller.enabled_mask);
+	/* memory barrier */
+	smp_mb__after_atomic();
+}
+
+static struct irq_chip sde_core_irq_chip = {
+	.name = "sde",
+	.irq_mask = sde_core_irq_mask,
+	.irq_unmask = sde_core_irq_unmask,
+};
+
+static int sde_core_irqdomain_map(struct irq_domain *domain,
+		unsigned int irq, irq_hw_number_t hwirq)
+{
+	struct sde_kms *sde_kms;
+	int rc;
+
+	if (!domain || !domain->host_data) {
+		SDE_ERROR("invalid parameters domain %d\n", domain != NULL);
+		return -EINVAL;
+	}
+	sde_kms = domain->host_data;
+
+	irq_set_chip_and_handler(irq, &sde_core_irq_chip, handle_level_irq);
+	rc = irq_set_chip_data(irq, sde_kms);
+
+	return rc;
+}
+
+static const struct irq_domain_ops sde_core_irqdomain_ops = {
+	.map = sde_core_irqdomain_map,
+	.xlate = irq_domain_xlate_onecell,
+};
+
+int sde_core_irq_domain_add(struct sde_kms *sde_kms)
+{
+	struct device *dev;
+	struct irq_domain *domain;
+
+	if (!sde_kms->dev || !sde_kms->dev->dev) {
+		pr_err("invalid device handles\n");
+		return -EINVAL;
+	}
+
+	dev = sde_kms->dev->dev;
+
+	domain = irq_domain_add_linear(dev->of_node, 32,
+			&sde_core_irqdomain_ops, sde_kms);
+	if (!domain) {
+		pr_err("failed to add irq_domain\n");
+		return -EINVAL;
+	}
+
+	sde_kms->irq_controller.enabled_mask = 0;
+	sde_kms->irq_controller.domain = domain;
+
+	return 0;
+}
+
+int sde_core_irq_domain_fini(struct sde_kms *sde_kms)
+{
+	if (sde_kms->irq_controller.domain) {
+		irq_domain_remove(sde_kms->irq_controller.domain);
+		sde_kms->irq_controller.domain = NULL;
+	}
+	return 0;
+}
+
 irqreturn_t sde_core_irq(struct sde_kms *sde_kms)
 {
 	/*
diff --git a/drivers/gpu/drm/msm/sde/sde_core_irq.h b/drivers/gpu/drm/msm/sde/sde_core_irq.h
index 64f4160..c775f8c 100644
--- a/drivers/gpu/drm/msm/sde/sde_core_irq.h
+++ b/drivers/gpu/drm/msm/sde/sde_core_irq.h
@@ -38,6 +38,20 @@ int sde_core_irq_postinstall(struct sde_kms *sde_kms);
 void sde_core_irq_uninstall(struct sde_kms *sde_kms);
 
 /**
+ * sde_core_irq_domain_add - Add core IRQ domain for SDE
+ * @sde_kms:		SDE handle
+ * @return:		none
+ */
+int sde_core_irq_domain_add(struct sde_kms *sde_kms);
+
+/**
+ * sde_core_irq_domain_fini - uninstall core IRQ domain
+ * @sde_kms:		SDE handle
+ * @return:		0 if success; error code otherwise
+ */
+int sde_core_irq_domain_fini(struct sde_kms *sde_kms);
+
+/**
  * sde_core_irq - core IRQ handler
  * @sde_kms:		SDE handle
  * @return:		interrupt handling status
diff --git a/drivers/gpu/drm/msm/sde/sde_core_perf.c b/drivers/gpu/drm/msm/sde/sde_core_perf.c
index 7671649..448a1e7 100644
--- a/drivers/gpu/drm/msm/sde/sde_core_perf.c
+++ b/drivers/gpu/drm/msm/sde/sde_core_perf.c
@@ -42,6 +42,23 @@ enum sde_perf_mode {
 	SDE_PERF_MODE_MAX
 };
 
+/**
+ * enum sde_perf_vote_mode: perf vote mode.
+ * @APPS_RSC_MODE:	It combines the vote for all displays and votes it
+ *                      through APPS rsc. This is default mode when display
+ *                      rsc is not available.
+ * @DISP_RSC_MODE:	It combines the vote for all displays and votes it
+ *                      through display rsc. This is default configuration
+ *                      when display rsc is available.
+ * @DISP_RSC_PRIMARY_MODE:	The primary display votes through display rsc
+ *                      while all other displays votes through apps rsc.
+ */
+enum sde_perf_vote_mode {
+	APPS_RSC_MODE,
+	DISP_RSC_MODE,
+	DISP_RSC_PRIMARY_MODE,
+};
+
 static struct sde_kms *_sde_crtc_get_kms(struct drm_crtc *crtc)
 {
 	struct msm_drm_private *priv;
@@ -169,7 +186,9 @@ int sde_core_perf_crtc_check(struct drm_crtc *crtc,
 
 	SDE_DEBUG("final threshold bw limit = %d\n", threshold);
 
-	if (!threshold) {
+	if (!sde_cstate->bw_control) {
+		SDE_DEBUG("bypass bandwidth check\n");
+	} else if (!threshold) {
 		sde_cstate->new_perf = sde_cstate->cur_perf;
 		SDE_ERROR("no bandwidth limits specified\n");
 		return -E2BIG;
@@ -182,12 +201,38 @@ int sde_core_perf_crtc_check(struct drm_crtc *crtc,
 	return 0;
 }
 
+static inline bool _is_crtc_client_type_matches(struct drm_crtc *tmp_crtc,
+	enum sde_crtc_client_type curr_client_type,
+	struct sde_core_perf *perf)
+{
+	if (!tmp_crtc)
+		return false;
+	else if (perf->bw_vote_mode == DISP_RSC_PRIMARY_MODE &&
+							perf->sde_rsc_available)
+		return curr_client_type == sde_crtc_get_client_type(tmp_crtc);
+	else
+		return true;
+}
+
+static inline enum sde_crtc_client_type _get_sde_client_type(
+	enum sde_crtc_client_type curr_client_type,
+	struct sde_core_perf *perf)
+{
+	if (perf->bw_vote_mode == DISP_RSC_PRIMARY_MODE &&
+						perf->sde_rsc_available)
+		return curr_client_type;
+	else if (perf->bw_vote_mode != APPS_RSC_MODE && perf->sde_rsc_available)
+		return RT_RSC_CLIENT;
+	else
+		return RT_CLIENT;
+}
+
 static void _sde_core_perf_crtc_update_bus(struct sde_kms *kms,
 		struct drm_crtc *crtc)
 {
 	u64 bw_sum_of_intfs = 0, bus_ab_quota, bus_ib_quota;
 	struct sde_core_perf_params perf = {0};
-	enum sde_crtc_client_type curr_client_type
+	enum sde_crtc_client_type client_vote, curr_client_type
 					= sde_crtc_get_client_type(crtc);
 	struct drm_crtc *tmp_crtc;
 	struct sde_crtc_state *sde_cstate;
@@ -195,7 +240,8 @@ static void _sde_core_perf_crtc_update_bus(struct sde_kms *kms,
 
 	drm_for_each_crtc(tmp_crtc, crtc->dev) {
 		if (_sde_core_perf_crtc_is_power_on(tmp_crtc) &&
-		    (curr_client_type == sde_crtc_get_client_type(tmp_crtc))) {
+		    _is_crtc_client_type_matches(tmp_crtc, curr_client_type,
+								&kms->perf)) {
 			sde_cstate = to_sde_crtc_state(tmp_crtc->state);
 
 			perf.max_per_pipe_ib = max(perf.max_per_pipe_ib,
@@ -217,7 +263,8 @@ static void _sde_core_perf_crtc_update_bus(struct sde_kms *kms,
 		bus_ib_quota = kms->perf.fix_core_ib_vote;
 	}
 
-	switch (curr_client_type) {
+	client_vote = _get_sde_client_type(curr_client_type, &kms->perf);
+	switch (client_vote) {
 	case NRT_CLIENT:
 		sde_power_data_bus_set_quota(&priv->phandle, kms->core_client,
 				SDE_POWER_HANDLE_DATA_BUS_CLIENT_NRT,
@@ -246,6 +293,32 @@ static void _sde_core_perf_crtc_update_bus(struct sde_kms *kms,
 		SDE_ERROR("invalid client type:%d\n", curr_client_type);
 		break;
 	}
+
+	if (kms->perf.bw_vote_mode_updated) {
+		switch (kms->perf.bw_vote_mode) {
+		case DISP_RSC_MODE:
+			sde_power_data_bus_set_quota(&priv->phandle,
+				kms->core_client,
+				SDE_POWER_HANDLE_DATA_BUS_CLIENT_NRT, 0, 0);
+			sde_power_data_bus_set_quota(&priv->phandle,
+				kms->core_client,
+				SDE_POWER_HANDLE_DATA_BUS_CLIENT_RT, 0, 0);
+			kms->perf.bw_vote_mode_updated = false;
+			break;
+
+		case APPS_RSC_MODE:
+			sde_cstate = to_sde_crtc_state(crtc->state);
+			if (sde_cstate->rsc_client) {
+				sde_rsc_client_vote(sde_cstate->rsc_client,
+									0, 0);
+				kms->perf.bw_vote_mode_updated = false;
+			}
+			break;
+
+		default:
+			break;
+		}
+	}
 }
 
 /**
@@ -349,6 +422,10 @@ void sde_core_perf_crtc_update(struct drm_crtc *crtc,
 	}
 	priv = kms->dev->dev_private;
 
+	/* wake vote update is not required with display rsc */
+	if (kms->perf.bw_vote_mode == DISP_RSC_MODE && stop_req)
+		return;
+
 	sde_crtc = to_sde_crtc(crtc);
 	sde_cstate = to_sde_crtc_state(crtc->state);
 
@@ -376,6 +453,21 @@ void sde_core_perf_crtc_update(struct drm_crtc *crtc,
 			update_bus = 1;
 		}
 
+		/* display rsc override during solver mode */
+		if (kms->perf.bw_vote_mode == DISP_RSC_MODE &&
+				get_sde_rsc_current_state(SDE_RSC_INDEX) ==
+							    SDE_RSC_CMD_STATE) {
+			/* update new bandwdith in all cases */
+			if (params_changed && new->bw_ctl != old->bw_ctl) {
+				old->bw_ctl = new->bw_ctl;
+				old->max_per_pipe_ib = new->max_per_pipe_ib;
+				update_bus = 1;
+			/* reduce bw vote is not required in solver mode */
+			} else if (!params_changed) {
+				update_bus = 0;
+			}
+		}
+
 		if ((params_changed &&
 				(new->core_clk_rate > old->core_clk_rate)) ||
 				(!params_changed &&
@@ -535,6 +627,10 @@ int sde_core_perf_debugfs_init(struct sde_core_perf *perf,
 			(u32 *)&catalog->perf.max_bw_high);
 	debugfs_create_file("perf_mode", 0644, perf->debugfs_root,
 			(u32 *)perf, &sde_core_perf_mode_fops);
+	debugfs_create_u32("bw_vote_mode", 0600, perf->debugfs_root,
+			&perf->bw_vote_mode);
+	debugfs_create_bool("bw_vote_mode_updated", 0600, perf->debugfs_root,
+			&perf->bw_vote_mode_updated);
 	debugfs_create_u64("fix_core_clk_rate", 0644, perf->debugfs_root,
 			&perf->fix_core_clk_rate);
 	debugfs_create_u64("fix_core_ib_vote", 0644, perf->debugfs_root,
@@ -566,7 +662,6 @@ void sde_core_perf_destroy(struct sde_core_perf *perf)
 	sde_core_perf_debugfs_destroy(perf);
 	perf->max_core_clk_rate = 0;
 	perf->core_clk = NULL;
-	mutex_destroy(&perf->perf_lock);
 	perf->clk_name = NULL;
 	perf->phandle = NULL;
 	perf->catalog = NULL;
@@ -590,7 +685,12 @@ int sde_core_perf_init(struct sde_core_perf *perf,
 	perf->phandle = phandle;
 	perf->pclient = pclient;
 	perf->clk_name = clk_name;
-	mutex_init(&perf->perf_lock);
+	perf->sde_rsc_available = is_sde_rsc_available(SDE_RSC_INDEX);
+	/* set default mode */
+	if (perf->sde_rsc_available)
+		perf->bw_vote_mode = DISP_RSC_MODE;
+	else
+		perf->bw_vote_mode = APPS_RSC_MODE;
 
 	perf->core_clk = sde_power_clk_get_clk(phandle, clk_name);
 	if (!perf->core_clk) {
diff --git a/drivers/gpu/drm/msm/sde/sde_core_perf.h b/drivers/gpu/drm/msm/sde/sde_core_perf.h
index 31851be..4a1bdad 100644
--- a/drivers/gpu/drm/msm/sde/sde_core_perf.h
+++ b/drivers/gpu/drm/msm/sde/sde_core_perf.h
@@ -22,8 +22,6 @@
 #include "sde_power_handle.h"
 
 #define	SDE_PERF_DEFAULT_MAX_CORE_CLK_RATE	320000000
-#define	SDE_PERF_DEFAULT_MAX_BUS_AB_QUOTA	2000000000
-#define	SDE_PERF_DEFAULT_MAX_BUS_IB_QUOTA	2000000000
 
 /**
  * struct sde_core_perf_params - definition of performance parameters
@@ -53,7 +51,6 @@ struct sde_core_perf_tune {
  * struct sde_core_perf - definition of core performance context
  * @dev: Pointer to drm device
  * @debugfs_root: top level debug folder
- * @perf_lock: serialization lock for this context
  * @catalog: Pointer to catalog configuration
  * @phandle: Pointer to power handler
  * @pclient: Pointer to power client
@@ -66,11 +63,13 @@ struct sde_core_perf_tune {
  * @fix_core_clk_rate: fixed core clock request in Hz used in mode 2
  * @fix_core_ib_vote: fixed core ib vote in bps used in mode 2
  * @fix_core_ab_vote: fixed core ab vote in bps used in mode 2
+ * @bw_vote_mode: apps rsc vs display rsc bandwidth vote mode
+ * @sde_rsc_available: is display rsc available
+ * @bw_vote_mode_updated: bandwidth vote mode update
  */
 struct sde_core_perf {
 	struct drm_device *dev;
 	struct dentry *debugfs_root;
-	struct mutex perf_lock;
 	struct sde_mdss_cfg *catalog;
 	struct sde_power_handle *phandle;
 	struct sde_power_client *pclient;
@@ -83,6 +82,9 @@ struct sde_core_perf {
 	u64 fix_core_clk_rate;
 	u64 fix_core_ib_vote;
 	u64 fix_core_ab_vote;
+	u32 bw_vote_mode;
+	bool sde_rsc_available;
+	bool bw_vote_mode_updated;
 };
 
 /**
diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.c b/drivers/gpu/drm/msm/sde/sde_crtc.c
index dcf3c08..7d0fad0 100644
--- a/drivers/gpu/drm/msm/sde/sde_crtc.c
+++ b/drivers/gpu/drm/msm/sde/sde_crtc.c
@@ -51,8 +51,12 @@ struct sde_crtc_custom_events {
 			struct sde_irq_callback *irq);
 };
 
+static int sde_crtc_power_interrupt_handler(struct drm_crtc *crtc_drm,
+	bool en, struct sde_irq_callback *ad_irq);
+
 static struct sde_crtc_custom_events custom_events[] = {
-	{DRM_EVENT_AD_BACKLIGHT, sde_cp_ad_interrupt}
+	{DRM_EVENT_AD_BACKLIGHT, sde_cp_ad_interrupt},
+	{DRM_EVENT_CRTC_POWER, sde_crtc_power_interrupt_handler}
 };
 
 /* default input fence timeout, in ms */
@@ -608,12 +612,15 @@ static void _sde_crtc_setup_dim_layer_cfg(struct drm_crtc *crtc,
 	int i;
 
 	if (!dim_layer->rect.w || !dim_layer->rect.h) {
-		SDE_DEBUG("empty dim layer\n");
+		SDE_DEBUG("empty dim_layer\n");
 		return;
 	}
 
 	cstate = to_sde_crtc_state(crtc->state);
 
+	SDE_DEBUG("dim_layer - flags:%d, stage:%d\n",
+			dim_layer->flags, dim_layer->stage);
+
 	split_dim_layer.stage = dim_layer->stage;
 	split_dim_layer.color_fill = dim_layer->color_fill;
 
@@ -647,9 +654,13 @@ static void _sde_crtc_setup_dim_layer_cfg(struct drm_crtc *crtc,
 		} else {
 			split_dim_layer.rect.x =
 					split_dim_layer.rect.x -
-					cstate->lm_bounds[i].w;
+						cstate->lm_bounds[i].x;
 		}
 
+		SDE_DEBUG("split_dim_layer - LM:%d, rect:{%d,%d,%d,%d}}\n",
+			i, split_dim_layer.rect.x, split_dim_layer.rect.y,
+			split_dim_layer.rect.w, split_dim_layer.rect.h);
+
 		lm = mixer[i].hw_lm;
 		mixer[i].mixer_op_mode |= 1 << split_dim_layer.stage;
 		lm->ops.setup_dim_layer(lm, &split_dim_layer);
@@ -854,9 +865,24 @@ static u32 _sde_crtc_get_displays_affected(struct drm_crtc *crtc,
 	sde_crtc = to_sde_crtc(crtc);
 	crtc_state = to_sde_crtc_state(state);
 
-	for (i = 0; i < sde_crtc->num_mixers; i++) {
-		if (!sde_kms_rect_is_null(&crtc_state->lm_roi[i]))
-			disp_bitmask |= BIT(i);
+	/* pingpong split: one ROI, one LM, two physical displays */
+	if (crtc_state->is_ppsplit) {
+		u32 lm_split_width = crtc_state->lm_bounds[0].w / 2;
+		struct sde_rect *roi = &crtc_state->lm_roi[0];
+
+		if (sde_kms_rect_is_null(roi))
+			disp_bitmask = 0;
+		else if ((u32)roi->x + (u32)roi->w <= lm_split_width)
+			disp_bitmask = BIT(0);		/* left only */
+		else if (roi->x >= lm_split_width)
+			disp_bitmask = BIT(1);		/* right only */
+		else
+			disp_bitmask = BIT(0) | BIT(1); /* left and right */
+	} else {
+		for (i = 0; i < sde_crtc->num_mixers; i++) {
+			if (!sde_kms_rect_is_null(&crtc_state->lm_roi[i]))
+				disp_bitmask |= BIT(i);
+		}
 	}
 
 	SDE_DEBUG("affected displays 0x%x\n", disp_bitmask);
@@ -877,9 +903,6 @@ static int _sde_crtc_check_rois_centered_and_symmetric(struct drm_crtc *crtc,
 	sde_crtc = to_sde_crtc(crtc);
 	crtc_state = to_sde_crtc_state(state);
 
-	if (sde_crtc->num_mixers == 1)
-		return 0;
-
 	if (sde_crtc->num_mixers > CRTC_DUAL_MIXERS) {
 		SDE_ERROR("%s: unsupported number of mixers: %d\n",
 				sde_crtc->name, sde_crtc->num_mixers);
@@ -887,9 +910,41 @@ static int _sde_crtc_check_rois_centered_and_symmetric(struct drm_crtc *crtc,
 	}
 
 	/*
-	 * On certain HW, ROIs must be centered on the split between LMs,
-	 * and be of equal width.
+	 * If using pingpong split: one ROI, one LM, two physical displays
+	 * then the ROI must be centered on the panel split boundary and
+	 * be of equal width across the split.
 	 */
+	if (crtc_state->is_ppsplit) {
+		u16 panel_split_width;
+		u32 display_mask;
+
+		roi[0] = &crtc_state->lm_roi[0];
+
+		if (sde_kms_rect_is_null(roi[0]))
+			return 0;
+
+		display_mask = _sde_crtc_get_displays_affected(crtc, state);
+		if (display_mask != (BIT(0) | BIT(1)))
+			return 0;
+
+		panel_split_width = crtc_state->lm_bounds[0].w / 2;
+		if (roi[0]->x + roi[0]->w / 2 != panel_split_width) {
+			SDE_ERROR("%s: roi x %d w %d split %d\n",
+					sde_crtc->name, roi[0]->x, roi[0]->w,
+					panel_split_width);
+			return -EINVAL;
+		}
+
+		return 0;
+	}
+
+	/*
+	 * On certain HW, if using 2 LM, ROIs must be split evenly between the
+	 * LMs and be of equal width.
+	 */
+	if (sde_crtc->num_mixers == 1)
+		return 0;
+
 	roi[0] = &crtc_state->lm_roi[0];
 	roi[1] = &crtc_state->lm_roi[1];
 
@@ -1177,6 +1232,69 @@ static void _sde_crtc_blend_setup_mixer(struct drm_crtc *crtc,
 	_sde_crtc_program_lm_output_roi(crtc);
 }
 
+static void _sde_crtc_swap_mixers_for_right_partial_update(
+		struct drm_crtc *crtc)
+{
+	struct sde_crtc *sde_crtc;
+	struct sde_crtc_state *cstate;
+	struct drm_encoder *drm_enc;
+	bool is_right_only;
+	bool encoder_in_dsc_merge = false;
+
+	if (!crtc || !crtc->state)
+		return;
+
+	sde_crtc = to_sde_crtc(crtc);
+	cstate = to_sde_crtc_state(crtc->state);
+
+	if (sde_crtc->num_mixers != CRTC_DUAL_MIXERS)
+		return;
+
+	drm_for_each_encoder(drm_enc, crtc->dev) {
+		if (drm_enc->crtc == crtc &&
+				sde_encoder_is_dsc_merge(drm_enc)) {
+			encoder_in_dsc_merge = true;
+			break;
+		}
+	}
+
+	/**
+	 * For right-only partial update with DSC merge, we swap LM0 & LM1.
+	 * This is due to two reasons:
+	 * - On 8996, there is a DSC HW requirement that in DSC Merge Mode,
+	 *   the left DSC must be used, right DSC cannot be used alone.
+	 *   For right-only partial update, this means swap layer mixers to map
+	 *   Left LM to Right INTF. On later HW this was relaxed.
+	 * - In DSC Merge mode, the physical encoder has already registered
+	 *   PP0 as the master, to switch to right-only we would have to
+	 *   reprogram to be driven by PP1 instead.
+	 * To support both cases, we prefer to support the mixer swap solution.
+	 */
+	if (!encoder_in_dsc_merge)
+		return;
+
+	is_right_only = sde_kms_rect_is_null(&cstate->lm_roi[0]) &&
+			!sde_kms_rect_is_null(&cstate->lm_roi[1]);
+
+	if (is_right_only && !sde_crtc->mixers_swapped) {
+		/* right-only update swap mixers */
+		swap(sde_crtc->mixers[0], sde_crtc->mixers[1]);
+		sde_crtc->mixers_swapped = true;
+	} else if (!is_right_only && sde_crtc->mixers_swapped) {
+		/* left-only or full update, swap back */
+		swap(sde_crtc->mixers[0], sde_crtc->mixers[1]);
+		sde_crtc->mixers_swapped = false;
+	}
+
+	SDE_DEBUG("%s: right_only %d swapped %d, mix0->lm%d, mix1->lm%d\n",
+			sde_crtc->name, is_right_only, sde_crtc->mixers_swapped,
+			sde_crtc->mixers[0].hw_lm->idx - LM_0,
+			sde_crtc->mixers[1].hw_lm->idx - LM_0);
+	SDE_EVT32(DRMID(crtc), is_right_only, sde_crtc->mixers_swapped,
+			sde_crtc->mixers[0].hw_lm->idx - LM_0,
+			sde_crtc->mixers[1].hw_lm->idx - LM_0);
+}
+
 /**
  * _sde_crtc_blend_setup - configure crtc mixers
  * @crtc: Pointer to drm crtc structure
@@ -1222,6 +1340,8 @@ static void _sde_crtc_blend_setup(struct drm_crtc *crtc)
 			lm->ops.clear_dim_layer(lm);
 	}
 
+	_sde_crtc_swap_mixers_for_right_partial_update(crtc);
+
 	/* initialize stage cfg */
 	memset(&sde_crtc->stage_cfg, 0, sizeof(struct sde_hw_stage_cfg));
 
@@ -1530,26 +1650,28 @@ static void _sde_crtc_set_dim_layer_v1(struct sde_crtc_state *cstate,
 {
 	struct sde_drm_dim_layer_v1 dim_layer_v1;
 	struct sde_drm_dim_layer_cfg *user_cfg;
+	struct sde_hw_dim_layer *dim_layer;
 	u32 count, i;
 
 	if (!cstate) {
 		SDE_ERROR("invalid cstate\n");
 		return;
 	}
+	dim_layer = cstate->dim_layer;
 
 	if (!usr_ptr) {
-		SDE_DEBUG("dim layer data removed\n");
+		SDE_DEBUG("dim_layer data removed\n");
 		return;
 	}
 
 	if (copy_from_user(&dim_layer_v1, usr_ptr, sizeof(dim_layer_v1))) {
-		SDE_ERROR("failed to copy dim layer data\n");
+		SDE_ERROR("failed to copy dim_layer data\n");
 		return;
 	}
 
 	count = dim_layer_v1.num_layers;
-	if (!count || (count > SDE_MAX_DIM_LAYERS)) {
-		SDE_ERROR("invalid number of Dim Layers:%d", count);
+	if (count > SDE_MAX_DIM_LAYERS) {
+		SDE_ERROR("invalid number of dim_layers:%d", count);
 		return;
 	}
 
@@ -1557,22 +1679,31 @@ static void _sde_crtc_set_dim_layer_v1(struct sde_crtc_state *cstate,
 	cstate->num_dim_layers = count;
 	for (i = 0; i < count; i++) {
 		user_cfg = &dim_layer_v1.layer_cfg[i];
-		cstate->dim_layer[i].flags = user_cfg->flags;
-		cstate->dim_layer[i].stage = user_cfg->stage + SDE_STAGE_0;
 
-		cstate->dim_layer[i].rect.x = user_cfg->rect.x1;
-		cstate->dim_layer[i].rect.y = user_cfg->rect.y1;
-		cstate->dim_layer[i].rect.w = user_cfg->rect.x2 -
-						user_cfg->rect.x1 + 1;
-		cstate->dim_layer[i].rect.h = user_cfg->rect.y2 -
-						user_cfg->rect.y1 + 1;
+		dim_layer[i].flags = user_cfg->flags;
+		dim_layer[i].stage = user_cfg->stage + SDE_STAGE_0;
 
-		cstate->dim_layer[i].color_fill = (struct sde_mdss_color) {
+		dim_layer[i].rect.x = user_cfg->rect.x1;
+		dim_layer[i].rect.y = user_cfg->rect.y1;
+		dim_layer[i].rect.w = user_cfg->rect.x2 - user_cfg->rect.x1;
+		dim_layer[i].rect.h = user_cfg->rect.y2 - user_cfg->rect.y1;
+
+		dim_layer[i].color_fill = (struct sde_mdss_color) {
 				user_cfg->color_fill.color_0,
 				user_cfg->color_fill.color_1,
 				user_cfg->color_fill.color_2,
 				user_cfg->color_fill.color_3,
 		};
+
+		SDE_DEBUG("dim_layer[%d] - flags:%d, stage:%d\n",
+				i, dim_layer[i].flags, dim_layer[i].stage);
+		SDE_DEBUG(" rect:{%d,%d,%d,%d}, color:{%d,%d,%d,%d}\n",
+				dim_layer[i].rect.x, dim_layer[i].rect.y,
+				dim_layer[i].rect.w, dim_layer[i].rect.h,
+				dim_layer[i].color_fill.color_0,
+				dim_layer[i].color_fill.color_1,
+				dim_layer[i].color_fill.color_2,
+				dim_layer[i].color_fill.color_3);
 	}
 }
 
@@ -1696,6 +1827,23 @@ static void _sde_crtc_setup_mixers(struct drm_crtc *crtc)
 	mutex_unlock(&sde_crtc->crtc_lock);
 }
 
+static void _sde_crtc_setup_is_ppsplit(struct drm_crtc_state *state)
+{
+	int i;
+	struct sde_crtc_state *cstate;
+
+	cstate = to_sde_crtc_state(state);
+
+	cstate->is_ppsplit = false;
+	for (i = 0; i < cstate->num_connectors; i++) {
+		struct drm_connector *conn = cstate->connectors[i];
+
+		if (sde_connector_get_topology_name(conn) ==
+				SDE_RM_TOPOLOGY_PPSPLIT)
+			cstate->is_ppsplit = true;
+	}
+}
+
 static void _sde_crtc_setup_lm_bounds(struct drm_crtc *crtc,
 		struct drm_crtc_state *state)
 {
@@ -1760,6 +1908,7 @@ static void sde_crtc_atomic_begin(struct drm_crtc *crtc,
 
 	if (!sde_crtc->num_mixers) {
 		_sde_crtc_setup_mixers(crtc);
+		_sde_crtc_setup_is_ppsplit(crtc->state);
 		_sde_crtc_setup_lm_bounds(crtc, crtc->state);
 	}
 
@@ -1987,7 +2136,13 @@ static void _sde_crtc_vblank_enable_nolock(
 	dev = crtc->dev;
 
 	if (enable) {
-		if (_sde_crtc_power_enable(sde_crtc, true))
+		int ret;
+
+		/* drop lock since power crtc cb may try to re-acquire lock */
+		mutex_unlock(&sde_crtc->crtc_lock);
+		ret = _sde_crtc_power_enable(sde_crtc, true);
+		mutex_lock(&sde_crtc->crtc_lock);
+		if (ret)
 			return;
 
 		list_for_each_entry(enc, &dev->mode_config.encoder_list, head) {
@@ -2008,7 +2163,11 @@ static void _sde_crtc_vblank_enable_nolock(
 
 			sde_encoder_register_vblank_callback(enc, NULL, NULL);
 		}
+
+		/* drop lock since power crtc cb may try to re-acquire lock */
+		mutex_unlock(&sde_crtc->crtc_lock);
 		_sde_crtc_power_enable(sde_crtc, false);
+		mutex_lock(&sde_crtc->crtc_lock);
 	}
 }
 
@@ -2022,6 +2181,8 @@ static void _sde_crtc_set_suspend(struct drm_crtc *crtc, bool enable)
 	struct sde_crtc *sde_crtc;
 	struct msm_drm_private *priv;
 	struct sde_kms *sde_kms;
+	struct drm_event event;
+	u32 power_on;
 
 	if (!crtc || !crtc->dev || !crtc->dev->dev_private) {
 		SDE_ERROR("invalid crtc\n");
@@ -2040,13 +2201,18 @@ static void _sde_crtc_set_suspend(struct drm_crtc *crtc, bool enable)
 
 	mutex_lock(&sde_crtc->crtc_lock);
 
+	event.type = DRM_EVENT_CRTC_POWER;
+	event.length = sizeof(u32);
 	/*
 	 * Update CP on suspend/resume transitions
 	 */
-	if (enable && !sde_crtc->suspend)
+	if (enable && !sde_crtc->suspend) {
 		sde_cp_crtc_suspend(crtc);
-	else if (!enable && sde_crtc->suspend)
+		power_on = 0;
+	} else if (!enable && sde_crtc->suspend) {
 		sde_cp_crtc_resume(crtc);
+		power_on = 1;
+	}
 
 	/*
 	 * If the vblank refcount != 0, release a power reference on suspend
@@ -2059,7 +2225,8 @@ static void _sde_crtc_set_suspend(struct drm_crtc *crtc, bool enable)
 		_sde_crtc_vblank_enable_nolock(sde_crtc, !enable);
 
 	sde_crtc->suspend = enable;
-
+	msm_mode_object_event_nofity(&crtc->base, crtc->dev, &event,
+			(u8 *)&power_on);
 	mutex_unlock(&sde_crtc->crtc_lock);
 }
 
@@ -2474,6 +2641,7 @@ static int sde_crtc_atomic_check(struct drm_crtc *crtc,
 
 	mixer_width = sde_crtc_mixer_width(sde_crtc, mode);
 
+	_sde_crtc_setup_is_ppsplit(state);
 	_sde_crtc_setup_lm_bounds(crtc, state);
 
 	 /* get plane state for all drm planes associated with crtc state */
@@ -2496,9 +2664,10 @@ static int sde_crtc_atomic_check(struct drm_crtc *crtc,
 		/* check dim layer stage with every plane */
 		for (i = 0; i < cstate->num_dim_layers; i++) {
 			if (pstates[cnt].stage == cstate->dim_layer[i].stage) {
-				SDE_ERROR("plane%d/dimlayer in same stage:%d\n",
-						plane->base.id,
-						cstate->dim_layer[i].stage);
+				SDE_ERROR(
+					"plane:%d/dim_layer:%i-same stage:%d\n",
+					plane->base.id, i,
+					cstate->dim_layer[i].stage);
 				rc = -EINVAL;
 				goto end;
 			}
@@ -2743,19 +2912,19 @@ static void sde_crtc_install_properties(struct drm_crtc *crtc,
 			CRTC_PROP_CORE_CLK);
 	msm_property_install_range(&sde_crtc->property_info,
 			"core_ab", 0x0, 0, U64_MAX,
-			SDE_PERF_DEFAULT_MAX_BUS_AB_QUOTA,
+			catalog->perf.max_bw_high * 1000ULL,
 			CRTC_PROP_CORE_AB);
 	msm_property_install_range(&sde_crtc->property_info,
 			"core_ib", 0x0, 0, U64_MAX,
-			SDE_PERF_DEFAULT_MAX_BUS_IB_QUOTA,
+			catalog->perf.max_bw_high * 1000ULL,
 			CRTC_PROP_CORE_IB);
 	msm_property_install_range(&sde_crtc->property_info,
 			"mem_ab", 0x0, 0, U64_MAX,
-			SDE_PERF_DEFAULT_MAX_BUS_AB_QUOTA,
+			catalog->perf.max_bw_high * 1000ULL,
 			CRTC_PROP_MEM_AB);
 	msm_property_install_range(&sde_crtc->property_info,
 			"mem_ib", 0x0, 0, U64_MAX,
-			SDE_PERF_DEFAULT_MAX_BUS_IB_QUOTA,
+			catalog->perf.max_bw_high * 1000ULL,
 			CRTC_PROP_MEM_IB);
 	msm_property_install_range(&sde_crtc->property_info,
 			"rot_prefill_bw", 0, 0, U64_MAX,
@@ -2769,16 +2938,18 @@ static void sde_crtc_install_properties(struct drm_crtc *crtc,
 	msm_property_install_blob(&sde_crtc->property_info, "capabilities",
 		DRM_MODE_PROP_IMMUTABLE, CRTC_PROP_INFO);
 
-	if (catalog->has_dim_layer) {
-		msm_property_install_volatile_range(&sde_crtc->property_info,
-			"dim_layer_v1", 0x0, 0, ~0, 0, CRTC_PROP_DIM_LAYER_V1);
-	}
-
 	msm_property_install_volatile_range(&sde_crtc->property_info,
 		"sde_drm_roi_v1", 0x0, 0, ~0, 0, CRTC_PROP_ROI_V1);
 
 	sde_kms_info_reset(info);
 
+	if (catalog->has_dim_layer) {
+		msm_property_install_volatile_range(&sde_crtc->property_info,
+			"dim_layer_v1", 0x0, 0, ~0, 0, CRTC_PROP_DIM_LAYER_V1);
+		sde_kms_info_add_keyint(info, "dim_layer_v1_max_layers",
+				SDE_MAX_DIM_LAYERS);
+	}
+
 	sde_kms_info_add_keyint(info, "hw_version", catalog->hwversion);
 	sde_kms_info_add_keyint(info, "max_linewidth",
 			catalog->max_mixer_width);
@@ -2879,6 +3050,12 @@ static int sde_crtc_atomic_set_property(struct drm_crtc *crtc,
 			case CRTC_PROP_ROI_V1:
 				ret = _sde_crtc_set_roi_v1(state, (void *)val);
 				break;
+			case CRTC_PROP_CORE_AB:
+			case CRTC_PROP_CORE_IB:
+			case CRTC_PROP_MEM_AB:
+			case CRTC_PROP_MEM_IB:
+				cstate->bw_control = true;
+				break;
 			default:
 				/* nothing to do */
 				break;
@@ -2978,6 +3155,7 @@ static int _sde_debugfs_status_show(struct seq_file *s, void *data)
 	struct drm_display_mode *mode;
 	struct drm_framebuffer *fb;
 	struct drm_plane_state *state;
+	struct sde_crtc_state *cstate;
 
 	int i, out_width;
 
@@ -2986,6 +3164,7 @@ static int _sde_debugfs_status_show(struct seq_file *s, void *data)
 
 	sde_crtc = s->private;
 	crtc = &sde_crtc->base;
+	cstate = to_sde_crtc_state(crtc->state);
 
 	mutex_lock(&sde_crtc->crtc_lock);
 	mode = &crtc->state->adjusted_mode;
@@ -3010,6 +3189,23 @@ static int _sde_debugfs_status_show(struct seq_file *s, void *data)
 
 	seq_puts(s, "\n");
 
+	for (i = 0; i < cstate->num_dim_layers; i++) {
+		struct sde_hw_dim_layer *dim_layer = &cstate->dim_layer[i];
+
+		seq_printf(s, "\tdim_layer:%d] stage:%d flags:%d\n",
+				i, dim_layer->stage, dim_layer->flags);
+		seq_printf(s, "\tdst_x:%d dst_y:%d dst_w:%d dst_h:%d\n",
+				dim_layer->rect.x, dim_layer->rect.y,
+				dim_layer->rect.w, dim_layer->rect.h);
+		seq_printf(s,
+			"\tcolor_0:%d color_1:%d color_2:%d color_3:%d\n",
+				dim_layer->color_fill.color_0,
+				dim_layer->color_fill.color_1,
+				dim_layer->color_fill.color_2,
+				dim_layer->color_fill.color_3);
+		seq_puts(s, "\n");
+	}
+
 	drm_atomic_crtc_for_each_plane(plane, crtc) {
 		pstate = to_sde_plane_state(plane->state);
 		state = plane->state;
@@ -3054,6 +3250,11 @@ static int _sde_debugfs_status_show(struct seq_file *s, void *data)
 			state->crtc_h);
 		seq_printf(s, "\tmultirect: mode: %d index: %d\n",
 			pstate->multirect_mode, pstate->multirect_index);
+
+		seq_printf(s, "\texcl_rect: x:%4d y:%4d w:%4d h:%4d\n",
+			pstate->excl_rect.x, pstate->excl_rect.y,
+			pstate->excl_rect.w, pstate->excl_rect.h);
+
 		seq_puts(s, "\n");
 	}
 
@@ -3615,3 +3816,9 @@ int sde_crtc_register_custom_event(struct sde_kms *kms,
 
 	return ret;
 }
+
+static int sde_crtc_power_interrupt_handler(struct drm_crtc *crtc_drm,
+	bool en, struct sde_irq_callback *irq)
+{
+	return 0;
+}
diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.h b/drivers/gpu/drm/msm/sde/sde_crtc.h
index 6a22115..4b3c814 100644
--- a/drivers/gpu/drm/msm/sde/sde_crtc.h
+++ b/drivers/gpu/drm/msm/sde/sde_crtc.h
@@ -108,6 +108,8 @@ struct sde_crtc_event {
  * @name          : ASCII description of this crtc
  * @num_ctls      : Number of ctl paths in use
  * @num_mixers    : Number of mixers in use
+ * @mixers_swapped: Whether the mixers have been swapped for left/right update
+ *                  especially in the case of DSC Merge.
  * @mixers        : List of active mixers
  * @event         : Pointer to last received drm vblank event. If there is a
  *                  pending vblank event, this will be non-null.
@@ -147,6 +149,7 @@ struct sde_crtc {
 	/* HW Resources reserved for the crtc */
 	u32 num_ctls;
 	u32 num_mixers;
+	bool mixers_swapped;
 	struct sde_crtc_mixer mixers[CRTC_DUAL_MIXERS];
 
 	struct drm_pending_vblank_event *event;
@@ -251,6 +254,8 @@ struct sde_crtc_respool {
  * @num_connectors: Number of associated drm connectors
  * @intf_mode     : Interface mode of the primary connector
  * @rsc_client    : sde rsc client when mode is valid
+ * @is_ppsplit    : Whether current topology requires PPSplit special handling
+ * @bw_control    : true if bw controlled by bw properties
  * @crtc_roi      : Current CRTC ROI. Possibly sub-rectangle of mode.
  *                  Origin top left of CRTC.
  * @lm_bounds     : LM boundaries based on current mode full resolution, no ROI.
@@ -276,7 +281,9 @@ struct sde_crtc_state {
 	enum sde_intf_mode intf_mode;
 	struct sde_rsc_client *rsc_client;
 	bool rsc_update;
+	bool bw_control;
 
+	bool is_ppsplit;
 	struct sde_rect crtc_roi;
 	struct sde_rect lm_bounds[CRTC_DUAL_MIXERS];
 	struct sde_rect lm_roi[CRTC_DUAL_MIXERS];
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder.c b/drivers/gpu/drm/msm/sde/sde_encoder.c
index f11ba51..3d48a17 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder.c
@@ -123,8 +123,11 @@ enum sde_enc_rc_states {
  * @cur_master:		Pointer to the current master in this mode. Optimization
  *			Only valid after enable. Cleared as disable.
  * @hw_pp		Handle to the pingpong blocks used for the display. No.
- *                      pingpong blocks can be different than num_phys_encs.
+ *			pingpong blocks can be different than num_phys_encs.
  * @hw_dsc:		Array of DSC block handles used for the display.
+ * @intfs_swapped	Whether or not the phys_enc interfaces have been swapped
+ *			for partial update right-only cases, such as pingpong
+ *			split where virtual pingpong does not generate IRQs
  * @crtc_vblank_cb:	Callback into the upper layer / CRTC for
  *			notification of the VBLANK
  * @crtc_vblank_cb_data:	Data from upper layer for VBLANK notification
@@ -155,6 +158,8 @@ enum sde_enc_rc_states {
  * @topology:                   topology of the display
  * @mode_set_complete:          flag to indicate modeset completion
  * @rsc_cfg:			rsc configuration
+ * @cur_conn_roi:		current connector roi
+ * @prv_conn_roi:		previous connector roi to optimize if unchanged
  */
 struct sde_encoder_virt {
 	struct drm_encoder base;
@@ -169,6 +174,8 @@ struct sde_encoder_virt {
 	struct sde_hw_pingpong *hw_pp[MAX_CHANNELS_PER_ENC];
 	struct sde_hw_dsc *hw_dsc[MAX_CHANNELS_PER_ENC];
 
+	bool intfs_swapped;
+
 	void (*crtc_vblank_cb)(void *);
 	void *crtc_vblank_cb_data;
 
@@ -195,17 +202,51 @@ struct sde_encoder_virt {
 	bool mode_set_complete;
 
 	struct sde_encoder_rsc_config rsc_cfg;
+	struct sde_rect cur_conn_roi;
+	struct sde_rect prv_conn_roi;
 };
 
 #define to_sde_encoder_virt(x) container_of(x, struct sde_encoder_virt, base)
 
-inline bool _sde_is_dsc_enabled(struct sde_encoder_virt *sde_enc)
+bool sde_encoder_is_dsc_enabled(struct drm_encoder *drm_enc)
+
 {
-	struct msm_compression_info *comp_info = &sde_enc->disp_info.comp_info;
+	struct sde_encoder_virt *sde_enc;
+	struct msm_compression_info *comp_info;
+
+	if (!drm_enc)
+		return false;
+
+	sde_enc = to_sde_encoder_virt(drm_enc);
+	comp_info = &sde_enc->disp_info.comp_info;
 
 	return (comp_info->comp_type == MSM_DISPLAY_COMPRESSION_DSC);
 }
 
+bool sde_encoder_is_dsc_merge(struct drm_encoder *drm_enc)
+{
+	enum sde_rm_topology_name topology;
+	struct sde_encoder_virt *sde_enc;
+	struct drm_connector *drm_conn;
+
+	if (!drm_enc)
+		return false;
+
+	sde_enc = to_sde_encoder_virt(drm_enc);
+	if (!sde_enc->cur_master)
+		return false;
+
+	drm_conn = sde_enc->cur_master->connector;
+	if (!drm_conn)
+		return false;
+
+	topology = sde_connector_get_topology_name(drm_conn);
+	if (topology == SDE_RM_TOPOLOGY_DUALPIPE_DSCMERGE)
+		return true;
+
+	return false;
+}
+
 static inline int _sde_encoder_power_enable(struct sde_encoder_virt *sde_enc,
 								bool enable)
 {
@@ -320,7 +361,22 @@ void sde_encoder_helper_split_config(
 
 	sde_enc = to_sde_encoder_virt(phys_enc->parent);
 	hw_mdptop = phys_enc->hw_mdptop;
-	cfg.en = phys_enc->split_role != ENC_ROLE_SOLO;
+
+	/**
+	 * disable split modes since encoder will be operating in as the only
+	 * encoder, either for the entire use case in the case of, for example,
+	 * single DSI, or for this frame in the case of left/right only partial
+	 * update.
+	 */
+	if (phys_enc->split_role == ENC_ROLE_SOLO) {
+		if (hw_mdptop->ops.setup_split_pipe)
+			hw_mdptop->ops.setup_split_pipe(hw_mdptop, &cfg);
+		if (hw_mdptop->ops.setup_pp_split)
+			hw_mdptop->ops.setup_pp_split(hw_mdptop, &cfg);
+		return;
+	}
+
+	cfg.en = true;
 	cfg.mode = phys_enc->intf_mode;
 	cfg.intf = interface;
 
@@ -334,8 +390,7 @@ void sde_encoder_helper_split_config(
 	else
 		cfg.pp_split_slave = INTF_MAX;
 
-	if (phys_enc->split_role != ENC_ROLE_SLAVE) {
-		/* master/solo encoder */
+	if (phys_enc->split_role == ENC_ROLE_MASTER) {
 		SDE_DEBUG_ENC(sde_enc, "enable %d\n", cfg.en);
 
 		if (hw_mdptop->ops.setup_split_pipe)
@@ -555,8 +610,14 @@ static bool _sde_encoder_dsc_ich_reset_override_needed(bool pu_en,
 
 static void _sde_encoder_dsc_pipe_cfg(struct sde_hw_dsc *hw_dsc,
 		struct sde_hw_pingpong *hw_pp, struct msm_display_dsc_info *dsc,
-		u32 common_mode, bool ich_reset)
+		u32 common_mode, bool ich_reset, bool enable)
 {
+	if (!enable) {
+		if (hw_pp->ops.disable_dsc)
+			hw_pp->ops.disable_dsc(hw_pp);
+		return;
+	}
+
 	if (hw_dsc->ops.dsc_config)
 		hw_dsc->ops.dsc_config(hw_dsc, dsc, common_mode, ich_reset);
 
@@ -570,9 +631,27 @@ static void _sde_encoder_dsc_pipe_cfg(struct sde_hw_dsc *hw_dsc,
 		hw_pp->ops.enable_dsc(hw_pp);
 }
 
+static void _sde_encoder_get_connector_roi(
+		struct sde_encoder_virt *sde_enc,
+		struct sde_rect *merged_conn_roi)
+{
+	struct drm_connector *drm_conn;
+	struct sde_connector_state *c_state;
+
+	if (!sde_enc || !merged_conn_roi)
+		return;
+
+	drm_conn = sde_enc->phys_encs[0]->connector;
+
+	if (!drm_conn || !drm_conn->state)
+		return;
+
+	c_state = to_sde_connector_state(drm_conn->state);
+	sde_kms_rect_merge_rectangles(&c_state->rois, merged_conn_roi);
+}
+
 static int _sde_encoder_dsc_1_lm_1_enc_1_intf(struct sde_encoder_virt *sde_enc)
 {
-	int pic_width, pic_height;
 	int this_frame_slices;
 	int intf_ip_w, enc_ip_w;
 	int ich_res, dsc_common_mode = 0;
@@ -580,22 +659,18 @@ static int _sde_encoder_dsc_1_lm_1_enc_1_intf(struct sde_encoder_virt *sde_enc)
 	struct sde_hw_pingpong *hw_pp = sde_enc->hw_pp[0];
 	struct sde_hw_dsc *hw_dsc = sde_enc->hw_dsc[0];
 	struct sde_encoder_phys *enc_master = sde_enc->cur_master;
-	struct sde_hw_mdp *hw_mdp_top  = enc_master->hw_mdptop;
+	const struct sde_rect *roi = &sde_enc->cur_conn_roi;
 	struct msm_display_dsc_info *dsc =
 		&sde_enc->disp_info.comp_info.dsc_info;
 
-	if (dsc == NULL || hw_dsc == NULL || hw_pp == NULL ||
-						hw_mdp_top == NULL) {
+	if (dsc == NULL || hw_dsc == NULL || hw_pp == NULL || !enc_master) {
 		SDE_ERROR_ENC(sde_enc, "invalid params for DSC\n");
 		return -EINVAL;
 	}
 
-	pic_width = dsc->pic_width;
-	pic_height = dsc->pic_height;
+	_sde_encoder_dsc_update_pic_dim(dsc, roi->w, roi->h);
 
-	_sde_encoder_dsc_update_pic_dim(dsc, pic_width, pic_height);
-
-	this_frame_slices = pic_width / dsc->slice_width;
+	this_frame_slices = roi->w / dsc->slice_width;
 	intf_ip_w = this_frame_slices * dsc->slice_width;
 	_sde_encoder_dsc_pclk_param_calc(dsc, intf_ip_w);
 
@@ -608,132 +683,208 @@ static int _sde_encoder_dsc_1_lm_1_enc_1_intf(struct sde_encoder_virt *sde_enc)
 		dsc_common_mode = DSC_MODE_VIDEO;
 
 	SDE_DEBUG_ENC(sde_enc, "pic_w: %d pic_h: %d mode:%d\n",
-		pic_width, pic_height, dsc_common_mode);
-	SDE_EVT32(DRMID(&sde_enc->base), pic_width, pic_height,
-			dsc_common_mode);
+		roi->w, roi->h, dsc_common_mode);
+	SDE_EVT32(DRMID(&sde_enc->base), roi->w, roi->h, dsc_common_mode);
 
 	_sde_encoder_dsc_pipe_cfg(hw_dsc, hw_pp, dsc, dsc_common_mode,
-			ich_res);
+			ich_res, true);
 
 	return 0;
 }
-static int _sde_encoder_dsc_2_lm_2_enc_2_intf(struct sde_encoder_virt *sde_enc)
+static int _sde_encoder_dsc_2_lm_2_enc_2_intf(struct sde_encoder_virt *sde_enc,
+		struct sde_encoder_kickoff_params *params)
 {
-	int pic_width, pic_height;
 	int this_frame_slices;
 	int intf_ip_w, enc_ip_w;
 	int ich_res, dsc_common_mode;
 
 	struct sde_encoder_phys *enc_master = sde_enc->cur_master;
-	struct sde_hw_dsc *l_hw_dsc = sde_enc->hw_dsc[0];
-	struct sde_hw_dsc *r_hw_dsc = sde_enc->hw_dsc[1];
-	struct sde_hw_pingpong *l_hw_pp = sde_enc->hw_pp[0];
-	struct sde_hw_pingpong *r_hw_pp = sde_enc->hw_pp[1];
-	struct sde_hw_mdp *hw_mdp_top  = enc_master->hw_mdptop;
-	struct msm_display_dsc_info *dsc =
-		&sde_enc->disp_info.comp_info.dsc_info;
+	const struct sde_rect *roi = &sde_enc->cur_conn_roi;
+	struct sde_hw_dsc *hw_dsc[MAX_CHANNELS_PER_ENC];
+	struct sde_hw_pingpong *hw_pp[MAX_CHANNELS_PER_ENC];
+	struct msm_display_dsc_info dsc[MAX_CHANNELS_PER_ENC];
+	bool half_panel_partial_update;
+	int i;
 
-	if (l_hw_dsc == NULL || r_hw_dsc == NULL || hw_mdp_top == NULL ||
-		l_hw_pp == NULL || r_hw_pp == NULL) {
-		SDE_ERROR_ENC(sde_enc, "invalid params for DSC\n");
-		return -EINVAL;
+	for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) {
+		hw_pp[i] = sde_enc->hw_pp[i];
+		hw_dsc[i] = sde_enc->hw_dsc[i];
+
+		if (!hw_pp[i] || !hw_dsc[i]) {
+			SDE_ERROR_ENC(sde_enc, "invalid params for DSC\n");
+			return -EINVAL;
+		}
 	}
 
-	pic_width = dsc->pic_width * sde_enc->display_num_of_h_tiles;
-	pic_height = dsc->pic_height;
+	half_panel_partial_update =
+			hweight_long(params->affected_displays) == 1;
 
-	_sde_encoder_dsc_update_pic_dim(dsc, pic_width, pic_height);
-
-	this_frame_slices = pic_width / dsc->slice_width;
-	intf_ip_w = this_frame_slices * dsc->slice_width;
-
-	intf_ip_w /= 2;
-	_sde_encoder_dsc_pclk_param_calc(dsc, intf_ip_w);
-
-	enc_ip_w = intf_ip_w;
-	_sde_encoder_dsc_initial_line_calc(dsc, enc_ip_w);
-
-	ich_res = _sde_encoder_dsc_ich_reset_override_needed(false, dsc);
-
-	dsc_common_mode = DSC_MODE_SPLIT_PANEL;
+	dsc_common_mode = 0;
+	if (!half_panel_partial_update)
+		dsc_common_mode |= DSC_MODE_SPLIT_PANEL;
 	if (enc_master->intf_mode == INTF_MODE_VIDEO)
 		dsc_common_mode |= DSC_MODE_VIDEO;
 
-	SDE_DEBUG_ENC(sde_enc, "pic_w: %d pic_h: %d mode:%d\n",
-		pic_width, pic_height, dsc_common_mode);
-	SDE_EVT32(DRMID(&sde_enc->base), pic_width, pic_height,
-			dsc_common_mode);
+	memcpy(&dsc[0], &sde_enc->disp_info.comp_info.dsc_info, sizeof(dsc[0]));
+	memcpy(&dsc[1], &sde_enc->disp_info.comp_info.dsc_info, sizeof(dsc[1]));
 
-	_sde_encoder_dsc_pipe_cfg(l_hw_dsc, l_hw_pp, dsc, dsc_common_mode,
-			ich_res);
-	_sde_encoder_dsc_pipe_cfg(r_hw_dsc, r_hw_pp, dsc, dsc_common_mode,
-			ich_res);
+	/*
+	 * Since both DSC use same pic dimension, set same pic dimension
+	 * to both DSC structures.
+	 */
+	_sde_encoder_dsc_update_pic_dim(&dsc[0], roi->w, roi->h);
+	_sde_encoder_dsc_update_pic_dim(&dsc[1], roi->w, roi->h);
+
+	this_frame_slices = roi->w / dsc[0].slice_width;
+	intf_ip_w = this_frame_slices * dsc[0].slice_width;
+
+	if (!half_panel_partial_update)
+		intf_ip_w /= 2;
+
+	/*
+	 * In this topology when both interfaces are active, they have same
+	 * load so intf_ip_w will be same.
+	 */
+	_sde_encoder_dsc_pclk_param_calc(&dsc[0], intf_ip_w);
+	_sde_encoder_dsc_pclk_param_calc(&dsc[1], intf_ip_w);
+
+	/*
+	 * In this topology, since there is no dsc_merge, uncompressed input
+	 * to encoder and interface is same.
+	 */
+	enc_ip_w = intf_ip_w;
+	_sde_encoder_dsc_initial_line_calc(&dsc[0], enc_ip_w);
+	_sde_encoder_dsc_initial_line_calc(&dsc[1], enc_ip_w);
+
+	/*
+	 * __is_ich_reset_override_needed should be called only after
+	 * updating pic dimension, mdss_panel_dsc_update_pic_dim.
+	 */
+	ich_res = _sde_encoder_dsc_ich_reset_override_needed(
+			half_panel_partial_update, &dsc[0]);
+
+	SDE_DEBUG_ENC(sde_enc, "pic_w: %d pic_h: %d mode:%d\n",
+			roi->w, roi->h, dsc_common_mode);
+
+	for (i = 0; i < sde_enc->num_phys_encs; i++) {
+		bool active = !!((1 << i) & params->affected_displays);
+
+		SDE_EVT32(DRMID(&sde_enc->base), roi->w, roi->h,
+				dsc_common_mode, i, active);
+		_sde_encoder_dsc_pipe_cfg(hw_dsc[i], hw_pp[i], &dsc[i],
+				dsc_common_mode, ich_res, active);
+	}
 
 	return 0;
 }
 
-static int _sde_encoder_dsc_2_lm_2_enc_1_intf(struct sde_encoder_virt *sde_enc)
+static int _sde_encoder_dsc_2_lm_2_enc_1_intf(struct sde_encoder_virt *sde_enc,
+		struct sde_encoder_kickoff_params *params)
 {
-	int pic_width, pic_height;
 	int this_frame_slices;
 	int intf_ip_w, enc_ip_w;
 	int ich_res, dsc_common_mode;
 
 	struct sde_encoder_phys *enc_master = sde_enc->cur_master;
-	struct sde_hw_dsc *l_hw_dsc = sde_enc->hw_dsc[0];
-	struct sde_hw_dsc *r_hw_dsc = sde_enc->hw_dsc[1];
-	struct sde_hw_pingpong *l_hw_pp = sde_enc->hw_pp[0];
-	struct sde_hw_pingpong *r_hw_pp = sde_enc->hw_pp[1];
-	struct sde_hw_mdp *hw_mdp_top  = enc_master->hw_mdptop;
+	const struct sde_rect *roi = &sde_enc->cur_conn_roi;
+	struct sde_hw_dsc *hw_dsc[MAX_CHANNELS_PER_ENC];
+	struct sde_hw_pingpong *hw_pp[MAX_CHANNELS_PER_ENC];
 	struct msm_display_dsc_info *dsc =
 		&sde_enc->disp_info.comp_info.dsc_info;
+	bool half_panel_partial_update;
+	int i;
 
-	if (l_hw_dsc == NULL || r_hw_dsc == NULL || hw_mdp_top == NULL ||
-					l_hw_pp == NULL || r_hw_pp == NULL) {
-		SDE_ERROR_ENC(sde_enc, "invalid params for DSC\n");
-		return -EINVAL;
+	for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) {
+		hw_pp[i] = sde_enc->hw_pp[i];
+		hw_dsc[i] = sde_enc->hw_dsc[i];
+
+		if (!hw_pp[i] || !hw_dsc[i]) {
+			SDE_ERROR_ENC(sde_enc, "invalid params for DSC\n");
+			return -EINVAL;
+		}
 	}
 
-	pic_width = dsc->pic_width;
-	pic_height = dsc->pic_height;
-	_sde_encoder_dsc_update_pic_dim(dsc, pic_width, pic_height);
+	half_panel_partial_update =
+			hweight_long(params->affected_displays) == 1;
 
-	this_frame_slices = pic_width / dsc->slice_width;
+	dsc_common_mode = 0;
+	if (!half_panel_partial_update)
+		dsc_common_mode |= DSC_MODE_SPLIT_PANEL | DSC_MODE_MULTIPLEX;
+	if (enc_master->intf_mode == INTF_MODE_VIDEO)
+		dsc_common_mode |= DSC_MODE_VIDEO;
+
+	_sde_encoder_dsc_update_pic_dim(dsc, roi->w, roi->h);
+
+	this_frame_slices = roi->w / dsc->slice_width;
 	intf_ip_w = this_frame_slices * dsc->slice_width;
 	_sde_encoder_dsc_pclk_param_calc(dsc, intf_ip_w);
 
 	/*
-	 * when using 2 encoders for the same stream, no. of slices
-	 * need to be same on both the encoders.
+	 * dsc merge case: when using 2 encoders for the same stream,
+	 * no. of slices need to be same on both the encoders.
 	 */
 	enc_ip_w = intf_ip_w / 2;
 	_sde_encoder_dsc_initial_line_calc(dsc, enc_ip_w);
 
-	ich_res = _sde_encoder_dsc_ich_reset_override_needed(false, dsc);
-
-	dsc_common_mode = DSC_MODE_MULTIPLEX | DSC_MODE_SPLIT_PANEL;
-	if (enc_master->intf_mode == INTF_MODE_VIDEO)
-		dsc_common_mode |= DSC_MODE_VIDEO;
+	ich_res = _sde_encoder_dsc_ich_reset_override_needed(
+			half_panel_partial_update, dsc);
 
 	SDE_DEBUG_ENC(sde_enc, "pic_w: %d pic_h: %d mode:%d\n",
-		pic_width, pic_height, dsc_common_mode);
-	SDE_EVT32(DRMID(&sde_enc->base), pic_width, pic_height,
-			dsc_common_mode);
+			roi->w, roi->h, dsc_common_mode);
+	SDE_EVT32(DRMID(&sde_enc->base), roi->w, roi->h,
+			dsc_common_mode, i, params->affected_displays);
 
-	_sde_encoder_dsc_pipe_cfg(l_hw_dsc, l_hw_pp, dsc, dsc_common_mode,
-			ich_res);
-	_sde_encoder_dsc_pipe_cfg(r_hw_dsc, r_hw_pp, dsc, dsc_common_mode,
-			ich_res);
+	_sde_encoder_dsc_pipe_cfg(hw_dsc[0], hw_pp[0], dsc, dsc_common_mode,
+			ich_res, true);
+	_sde_encoder_dsc_pipe_cfg(hw_dsc[1], hw_pp[1], dsc, dsc_common_mode,
+			ich_res, !half_panel_partial_update);
 
 	return 0;
 }
 
-static int _sde_encoder_dsc_setup(struct sde_encoder_virt *sde_enc)
+static int _sde_encoder_update_roi(struct drm_encoder *drm_enc)
+{
+	struct sde_encoder_virt *sde_enc;
+	struct drm_connector *drm_conn;
+	struct drm_display_mode *adj_mode;
+	struct sde_rect roi;
+
+	if (!drm_enc || !drm_enc->crtc || !drm_enc->crtc->state)
+		return -EINVAL;
+	sde_enc = to_sde_encoder_virt(drm_enc);
+
+	if (!sde_enc->cur_master)
+		return -EINVAL;
+
+	adj_mode = &sde_enc->base.crtc->state->adjusted_mode;
+	drm_conn = sde_enc->cur_master->connector;
+
+	_sde_encoder_get_connector_roi(sde_enc, &roi);
+	if (sde_kms_rect_is_null(&roi)) {
+		roi.w = adj_mode->hdisplay;
+		roi.h = adj_mode->vdisplay;
+	}
+
+	memcpy(&sde_enc->prv_conn_roi, &sde_enc->cur_conn_roi,
+			sizeof(sde_enc->prv_conn_roi));
+	memcpy(&sde_enc->cur_conn_roi, &roi, sizeof(sde_enc->cur_conn_roi));
+
+	return 0;
+}
+
+static int _sde_encoder_dsc_setup(struct sde_encoder_virt *sde_enc,
+		struct sde_encoder_kickoff_params *params)
 {
 	enum sde_rm_topology_name topology;
-	struct drm_connector *drm_conn = sde_enc->phys_encs[0]->connector;
+	struct drm_connector *drm_conn;
 	int ret = 0;
 
+	if (!sde_enc || !params || !sde_enc->phys_encs[0] ||
+			!sde_enc->phys_encs[0]->connector)
+		return -EINVAL;
+
+	drm_conn = sde_enc->phys_encs[0]->connector;
+
 	topology = sde_connector_get_topology_name(drm_conn);
 	if (topology == SDE_RM_TOPOLOGY_NONE) {
 		SDE_ERROR_ENC(sde_enc, "topology not set yet\n");
@@ -743,15 +894,19 @@ static int _sde_encoder_dsc_setup(struct sde_encoder_virt *sde_enc)
 	SDE_DEBUG_ENC(sde_enc, "\n");
 	SDE_EVT32(DRMID(&sde_enc->base));
 
+	if (sde_kms_rect_is_equal(&sde_enc->cur_conn_roi,
+			&sde_enc->prv_conn_roi))
+		return ret;
+
 	switch (topology) {
 	case SDE_RM_TOPOLOGY_SINGLEPIPE_DSC:
 		ret = _sde_encoder_dsc_1_lm_1_enc_1_intf(sde_enc);
 		break;
 	case SDE_RM_TOPOLOGY_DUALPIPE_DSCMERGE:
-		ret = _sde_encoder_dsc_2_lm_2_enc_1_intf(sde_enc);
+		ret = _sde_encoder_dsc_2_lm_2_enc_1_intf(sde_enc, params);
 		break;
 	case SDE_RM_TOPOLOGY_DUALPIPE_DSC:
-		ret = _sde_encoder_dsc_2_lm_2_enc_2_intf(sde_enc);
+		ret = _sde_encoder_dsc_2_lm_2_enc_2_intf(sde_enc, params);
 		break;
 	default:
 		SDE_ERROR_ENC(sde_enc, "No DSC support for topology %d",
@@ -1217,7 +1372,6 @@ static void _sde_encoder_virt_enable_helper(struct drm_encoder *drm_enc)
 	struct sde_kms *sde_kms;
 	struct sde_hw_mdp *hw_mdptop;
 	int i = 0;
-	int ret = 0;
 	struct sde_watchdog_te_status te_cfg = { 0 };
 
 	if (!drm_enc || !drm_enc->dev || !drm_enc->dev->dev_private) {
@@ -1252,12 +1406,6 @@ static void _sde_encoder_virt_enable_helper(struct drm_encoder *drm_enc)
 				sde_enc->cur_master->hw_mdptop,
 				sde_kms->catalog);
 
-	if (_sde_is_dsc_enabled(sde_enc)) {
-		ret = _sde_encoder_dsc_setup(sde_enc);
-		if (ret)
-			SDE_ERROR_ENC(sde_enc, "failed to setup DSC:%d\n", ret);
-	}
-
 	if (hw_mdptop->ops.setup_vsync_sel) {
 		for (i = 0; i < sde_enc->num_phys_encs; i++)
 			te_cfg.ppnumber[i] = sde_enc->hw_pp[i]->idx;
@@ -1763,6 +1911,65 @@ static void _sde_encoder_kickoff_phys(struct sde_encoder_virt *sde_enc)
 	spin_unlock_irqrestore(&sde_enc->enc_spinlock, lock_flags);
 }
 
+static void _sde_encoder_ppsplit_swap_intf_for_right_only_update(
+		struct drm_encoder *drm_enc,
+		unsigned long *affected_displays,
+		int num_active_phys)
+{
+	struct sde_encoder_virt *sde_enc;
+	struct sde_encoder_phys *master;
+	enum sde_rm_topology_name topology;
+	bool is_right_only;
+
+	if (!drm_enc || !affected_displays)
+		return;
+
+	sde_enc = to_sde_encoder_virt(drm_enc);
+	master = sde_enc->cur_master;
+	if (!master || !master->connector)
+		return;
+
+	topology = sde_connector_get_topology_name(master->connector);
+	if (topology != SDE_RM_TOPOLOGY_PPSPLIT)
+		return;
+
+	/*
+	 * For pingpong split, the slave pingpong won't generate IRQs. For
+	 * right-only updates, we can't swap pingpongs, or simply swap the
+	 * master/slave assignment, we actually have to swap the interfaces
+	 * so that the master physical encoder will use a pingpong/interface
+	 * that generates irqs on which to wait.
+	 */
+	is_right_only = !test_bit(0, affected_displays) &&
+			test_bit(1, affected_displays);
+
+	if (is_right_only && !sde_enc->intfs_swapped) {
+		/* right-only update swap interfaces */
+		swap(sde_enc->phys_encs[0]->intf_idx,
+				sde_enc->phys_encs[1]->intf_idx);
+		sde_enc->intfs_swapped = true;
+	} else if (!is_right_only && sde_enc->intfs_swapped) {
+		/* left-only or full update, swap back */
+		swap(sde_enc->phys_encs[0]->intf_idx,
+				sde_enc->phys_encs[1]->intf_idx);
+		sde_enc->intfs_swapped = false;
+	}
+
+	SDE_DEBUG_ENC(sde_enc,
+			"right_only %d swapped %d phys0->intf%d, phys1->intf%d\n",
+			is_right_only, sde_enc->intfs_swapped,
+			sde_enc->phys_encs[0]->intf_idx - INTF_0,
+			sde_enc->phys_encs[1]->intf_idx - INTF_0);
+	SDE_EVT32(DRMID(drm_enc), is_right_only, sde_enc->intfs_swapped,
+			sde_enc->phys_encs[0]->intf_idx - INTF_0,
+			sde_enc->phys_encs[1]->intf_idx - INTF_0,
+			*affected_displays);
+
+	/* ppsplit always uses master since ppslave invalid for irqs*/
+	if (num_active_phys == 1)
+		*affected_displays = BIT(0);
+}
+
 static void _sde_encoder_update_master(struct drm_encoder *drm_enc,
 		struct sde_encoder_kickoff_params *params)
 {
@@ -1785,6 +1992,10 @@ static void _sde_encoder_update_master(struct drm_encoder *drm_enc,
 	SDE_DEBUG_ENC(sde_enc, "affected_displays 0x%lx num_active_phys %d\n",
 			params->affected_displays, num_active_phys);
 
+	/* for left/right only update, ppsplit master switches interface */
+	_sde_encoder_ppsplit_swap_intf_for_right_only_update(drm_enc,
+			&params->affected_displays, num_active_phys);
+
 	for (i = 0; i < sde_enc->num_phys_encs; i++) {
 		enum sde_enc_split_role prv_role, new_role;
 		bool active;
@@ -1814,6 +2025,9 @@ static void _sde_encoder_update_master(struct drm_encoder *drm_enc,
 		SDE_DEBUG_ENC(sde_enc, "pp %d role prv %d new %d active %d\n",
 				phys->hw_pp->idx - PINGPONG_0, prv_role,
 				phys->split_role, active);
+		SDE_EVT32(DRMID(drm_enc), params->affected_displays,
+				phys->hw_pp->idx - PINGPONG_0, prv_role,
+				phys->split_role, active, num_active_phys);
 	}
 }
 
@@ -1892,6 +2106,8 @@ void sde_encoder_prepare_for_kickoff(struct drm_encoder *drm_enc,
 
 	_sde_encoder_update_master(drm_enc, params);
 
+	_sde_encoder_update_roi(drm_enc);
+
 	if (sde_enc->cur_master && sde_enc->cur_master->connector) {
 		rc = sde_connector_pre_kickoff(sde_enc->cur_master->connector);
 		if (rc)
@@ -1899,6 +2115,12 @@ void sde_encoder_prepare_for_kickoff(struct drm_encoder *drm_enc,
 					sde_enc->cur_master->connector->base.id,
 					rc);
 	}
+
+	if (sde_encoder_is_dsc_enabled(drm_enc)) {
+		rc = _sde_encoder_dsc_setup(sde_enc, params);
+		if (rc)
+			SDE_ERROR_ENC(sde_enc, "failed to setup DSC: %d\n", rc);
+	}
 }
 
 void sde_encoder_kickoff(struct drm_encoder *drm_enc)
@@ -2357,6 +2579,9 @@ static int sde_encoder_setup_display(struct sde_encoder_virt *sde_enc,
 	} else if (disp_info->intf_type == DRM_MODE_CONNECTOR_HDMIA) {
 		*drm_enc_mode = DRM_MODE_ENCODER_TMDS;
 		intf_type = INTF_HDMI;
+	} else if (disp_info->intf_type == DRM_MODE_CONNECTOR_DisplayPort) {
+		*drm_enc_mode = DRM_MODE_ENCODER_TMDS;
+		intf_type = INTF_DP;
 	} else if (disp_info->intf_type == DRM_MODE_CONNECTOR_VIRTUAL) {
 		*drm_enc_mode = DRM_MODE_ENCODER_VIRTUAL;
 		intf_type = INTF_WB;
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder.h b/drivers/gpu/drm/msm/sde/sde_encoder.h
index 7292a12..6ef245b 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder.h
+++ b/drivers/gpu/drm/msm/sde/sde_encoder.h
@@ -149,6 +149,20 @@ enum sde_intf_mode sde_encoder_get_intf_mode(struct drm_encoder *encoder);
 void sde_encoder_virt_restore(struct drm_encoder *encoder);
 
 /**
+ * sde_encoder_is_dsc_enabled - check if encoder is in DSC mode
+ * @drm_enc: Pointer to drm encoder object
+ * @Return: true if encoder is in DSC mode
+ */
+bool sde_encoder_is_dsc_enabled(struct drm_encoder *drm_enc);
+
+/**
+ * sde_encoder_is_dsc_merge - check if encoder is in DSC merge mode
+ * @drm_enc: Pointer to drm encoder object
+ * @Return: true if encoder is in DSC merge mode
+ */
+bool sde_encoder_is_dsc_merge(struct drm_encoder *drm_enc);
+
+/**
  * sde_encoder_init - initialize virtual encoder object
  * @dev:        Pointer to drm device structure
  * @disp_info:  Pointer to display information structure
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c b/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c
index 572bd9e..7adab09 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c
@@ -615,7 +615,8 @@ static void _sde_encoder_phys_cmd_pingpong_config(
 			phys_enc->hw_pp->idx - PINGPONG_0);
 	drm_mode_debug_printmodeline(&phys_enc->cached_mode);
 
-	_sde_encoder_phys_cmd_update_intf_cfg(phys_enc);
+	if (!_sde_encoder_phys_is_ppsplit_slave(phys_enc))
+		_sde_encoder_phys_cmd_update_intf_cfg(phys_enc);
 	sde_encoder_phys_cmd_tearcheck_config(phys_enc);
 }
 
@@ -699,6 +700,8 @@ static void sde_encoder_phys_cmd_disable(struct sde_encoder_phys *phys_enc)
 		}
 	}
 
+	if (phys_enc->hw_pp->ops.enable_tearcheck)
+		phys_enc->hw_pp->ops.enable_tearcheck(phys_enc->hw_pp, false);
 	phys_enc->enable_state = SDE_ENC_DISABLED;
 }
 
@@ -832,15 +835,28 @@ static void sde_encoder_phys_cmd_update_split_role(
 		struct sde_encoder_phys *phys_enc,
 		enum sde_enc_split_role role)
 {
-	struct sde_encoder_phys_cmd *cmd_enc =
-		to_sde_encoder_phys_cmd(phys_enc);
-	enum sde_enc_split_role old_role = phys_enc->split_role;
+	struct sde_encoder_phys_cmd *cmd_enc;
+	enum sde_enc_split_role old_role;
+	bool is_ppsplit;
+
+	if (!phys_enc)
+		return;
+
+	cmd_enc = to_sde_encoder_phys_cmd(phys_enc);
+	old_role = phys_enc->split_role;
+	is_ppsplit = _sde_encoder_phys_is_ppsplit(phys_enc);
+
+	phys_enc->split_role = role;
 
 	SDE_DEBUG_CMDENC(cmd_enc, "old role %d new role %d\n",
 			old_role, role);
 
-	phys_enc->split_role = role;
-	if (role == ENC_ROLE_SKIP || role == old_role)
+	/*
+	 * ppsplit solo needs to reprogram because intf may have swapped without
+	 * role changing on left-only, right-only back-to-back commits
+	 */
+	if (!(is_ppsplit && role == ENC_ROLE_SOLO) &&
+			(role == old_role || role == ENC_ROLE_SKIP))
 		return;
 
 	sde_encoder_helper_split_config(phys_enc, phys_enc->intf_idx);
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c b/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c
index 28a2b16..385c610 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c
@@ -21,6 +21,7 @@
 #include "sde_core_irq.h"
 #include "sde_wb.h"
 #include "sde_vbif.h"
+#include "sde_crtc.h"
 
 #define to_sde_encoder_phys_wb(x) \
 	container_of(x, struct sde_encoder_phys_wb, base)
@@ -104,6 +105,48 @@ static void sde_encoder_phys_wb_set_traffic_shaper(
 }
 
 /**
+ * sde_encoder_phys_wb_set_qos_remap - set QoS remapper for writeback
+ * @phys_enc:	Pointer to physical encoder
+ */
+static void sde_encoder_phys_wb_set_qos_remap(
+		struct sde_encoder_phys *phys_enc)
+{
+	struct sde_encoder_phys_wb *wb_enc;
+	struct sde_hw_wb *hw_wb;
+	struct drm_crtc *crtc;
+	struct sde_vbif_set_qos_params qos_params;
+
+	if (!phys_enc || !phys_enc->parent || !phys_enc->parent->crtc) {
+		SDE_ERROR("invalid arguments\n");
+		return;
+	}
+
+	wb_enc = to_sde_encoder_phys_wb(phys_enc);
+	crtc = phys_enc->parent->crtc;
+
+	if (!wb_enc->hw_wb || !wb_enc->hw_wb->caps) {
+		SDE_ERROR("invalid writeback hardware\n");
+		return;
+	}
+
+	hw_wb = wb_enc->hw_wb;
+
+	memset(&qos_params, 0, sizeof(qos_params));
+	qos_params.vbif_idx = hw_wb->caps->vbif_idx;
+	qos_params.xin_id = hw_wb->caps->xin_id;
+	qos_params.clk_ctrl = hw_wb->caps->clk_ctrl;
+	qos_params.num = hw_wb->idx - WB_0;
+	qos_params.is_rt = sde_crtc_get_client_type(crtc) != NRT_CLIENT;
+
+	SDE_DEBUG("[qos_remap] wb:%d vbif:%d xin:%d rt:%d\n",
+			qos_params.num,
+			qos_params.vbif_idx,
+			qos_params.xin_id, qos_params.is_rt);
+
+	sde_vbif_set_qos_remap(phys_enc->sde_kms, &qos_params);
+}
+
+/**
  * sde_encoder_phys_setup_cdm - setup chroma down block
  * @phys_enc:	Pointer to physical encoder
  * @fb:		Pointer to output framebuffer
@@ -528,6 +571,8 @@ static void sde_encoder_phys_wb_setup(
 
 	sde_encoder_phys_wb_set_traffic_shaper(phys_enc);
 
+	sde_encoder_phys_wb_set_qos_remap(phys_enc);
+
 	sde_encoder_phys_setup_cdm(phys_enc, fb, wb_enc->wb_fmt, wb_roi);
 
 	sde_encoder_phys_wb_setup_fb(phys_enc, fb, wb_roi);
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_catalog.c b/drivers/gpu/drm/msm/sde/sde_hw_catalog.c
index 1faa46e2..30e63da 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_catalog.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_catalog.c
@@ -285,6 +285,8 @@ enum {
 	VBIF_DEFAULT_OT_WR_LIMIT,
 	VBIF_DYNAMIC_OT_RD_LIMIT,
 	VBIF_DYNAMIC_OT_WR_LIMIT,
+	VBIF_QOS_RT_REMAP,
+	VBIF_QOS_NRT_REMAP,
 	VBIF_PROP_MAX,
 };
 
@@ -512,6 +514,10 @@ static struct sde_prop_type vbif_prop[] = {
 		PROP_TYPE_U32_ARRAY},
 	{VBIF_DYNAMIC_OT_WR_LIMIT, "qcom,sde-vbif-dynamic-ot-wr-limit", false,
 		PROP_TYPE_U32_ARRAY},
+	{VBIF_QOS_RT_REMAP, "qcom,sde-vbif-qos-rt-remap", false,
+		PROP_TYPE_U32_ARRAY},
+	{VBIF_QOS_NRT_REMAP, "qcom,sde-vbif-qos-nrt-remap", false,
+		PROP_TYPE_U32_ARRAY},
 };
 
 static struct sde_prop_type reg_dma_prop[REG_DMA_PROP_MAX] = {
@@ -1049,6 +1055,13 @@ static int sde_sspp_parse_dt(struct device_node *np,
 
 		set_bit(SDE_SSPP_SRC, &sspp->features);
 
+		if (sde_cfg->ts_prefill_rev == 1) {
+			set_bit(SDE_SSPP_TS_PREFILL, &sspp->features);
+		} else if (sde_cfg->ts_prefill_rev == 2) {
+			set_bit(SDE_SSPP_TS_PREFILL, &sspp->features);
+			set_bit(SDE_SSPP_TS_PREFILL_REC1, &sspp->features);
+		}
+
 		sblk->smart_dma_priority =
 			PROP_VALUE_ACCESS(prop_value, SSPP_SMART_DMA, i);
 
@@ -1926,7 +1939,7 @@ static int sde_vbif_parse_dt(struct device_node *np,
 	int rc, prop_count[VBIF_PROP_MAX], i, j, k;
 	struct sde_prop_value *prop_value = NULL;
 	bool prop_exists[VBIF_PROP_MAX];
-	u32 off_count, vbif_len, rd_len = 0, wr_len = 0;
+	u32 off_count, vbif_len;
 	struct sde_vbif_cfg *vbif;
 
 	if (!sde_cfg) {
@@ -1948,12 +1961,22 @@ static int sde_vbif_parse_dt(struct device_node *np,
 		goto end;
 
 	rc = _validate_dt_entry(np, &vbif_prop[VBIF_DYNAMIC_OT_RD_LIMIT], 1,
-			&prop_count[VBIF_DYNAMIC_OT_RD_LIMIT], &rd_len);
+			&prop_count[VBIF_DYNAMIC_OT_RD_LIMIT], NULL);
 	if (rc)
 		goto end;
 
 	rc = _validate_dt_entry(np, &vbif_prop[VBIF_DYNAMIC_OT_WR_LIMIT], 1,
-			&prop_count[VBIF_DYNAMIC_OT_WR_LIMIT], &wr_len);
+			&prop_count[VBIF_DYNAMIC_OT_WR_LIMIT], NULL);
+	if (rc)
+		goto end;
+
+	rc = _validate_dt_entry(np, &vbif_prop[VBIF_QOS_RT_REMAP], 1,
+			&prop_count[VBIF_QOS_RT_REMAP], NULL);
+	if (rc)
+		goto end;
+
+	rc = _validate_dt_entry(np, &vbif_prop[VBIF_QOS_NRT_REMAP], 1,
+			&prop_count[VBIF_QOS_NRT_REMAP], NULL);
 	if (rc)
 		goto end;
 
@@ -2048,6 +2071,63 @@ static int sde_vbif_parse_dt(struct device_node *np,
 				vbif->dynamic_ot_rd_tbl.count ||
 				vbif->dynamic_ot_wr_tbl.count)
 			set_bit(SDE_VBIF_QOS_OTLIM, &vbif->features);
+
+		vbif->qos_rt_tbl.npriority_lvl =
+				prop_count[VBIF_QOS_RT_REMAP];
+		SDE_DEBUG("qos_rt_tbl.npriority_lvl=%u\n",
+				vbif->qos_rt_tbl.npriority_lvl);
+		if (vbif->qos_rt_tbl.npriority_lvl == sde_cfg->vbif_qos_nlvl) {
+			vbif->qos_rt_tbl.priority_lvl = kcalloc(
+				vbif->qos_rt_tbl.npriority_lvl, sizeof(u32),
+				GFP_KERNEL);
+			if (!vbif->qos_rt_tbl.priority_lvl) {
+				rc = -ENOMEM;
+				goto end;
+			}
+		} else if (vbif->qos_rt_tbl.npriority_lvl) {
+			vbif->qos_rt_tbl.npriority_lvl = 0;
+			vbif->qos_rt_tbl.priority_lvl = NULL;
+			SDE_ERROR("invalid qos rt table\n");
+		}
+
+		for (j = 0; j < vbif->qos_rt_tbl.npriority_lvl; j++) {
+			vbif->qos_rt_tbl.priority_lvl[j] =
+				PROP_VALUE_ACCESS(prop_value,
+						VBIF_QOS_RT_REMAP, j);
+			SDE_DEBUG("lvl[%d]=%u\n", j,
+					vbif->qos_rt_tbl.priority_lvl[j]);
+		}
+
+		vbif->qos_nrt_tbl.npriority_lvl =
+				prop_count[VBIF_QOS_NRT_REMAP];
+		SDE_DEBUG("qos_nrt_tbl.npriority_lvl=%u\n",
+				vbif->qos_nrt_tbl.npriority_lvl);
+
+		if (vbif->qos_nrt_tbl.npriority_lvl == sde_cfg->vbif_qos_nlvl) {
+			vbif->qos_nrt_tbl.priority_lvl = kcalloc(
+				vbif->qos_nrt_tbl.npriority_lvl, sizeof(u32),
+				GFP_KERNEL);
+			if (!vbif->qos_nrt_tbl.priority_lvl) {
+				rc = -ENOMEM;
+				goto end;
+			}
+		} else if (vbif->qos_nrt_tbl.npriority_lvl) {
+			vbif->qos_nrt_tbl.npriority_lvl = 0;
+			vbif->qos_nrt_tbl.priority_lvl = NULL;
+			SDE_ERROR("invalid qos nrt table\n");
+		}
+
+		for (j = 0; j < vbif->qos_nrt_tbl.npriority_lvl; j++) {
+			vbif->qos_nrt_tbl.priority_lvl[j] =
+				PROP_VALUE_ACCESS(prop_value,
+						VBIF_QOS_NRT_REMAP, j);
+			SDE_DEBUG("lvl[%d]=%u\n", j,
+					vbif->qos_nrt_tbl.priority_lvl[j]);
+		}
+
+		if (vbif->qos_rt_tbl.npriority_lvl ||
+				vbif->qos_nrt_tbl.npriority_lvl)
+			set_bit(SDE_VBIF_QOS_REMAP, &vbif->features);
 	}
 
 end:
@@ -2510,11 +2590,17 @@ static int _sde_hardware_caps(struct sde_mdss_cfg *sde_cfg, uint32_t hw_rev)
 		/* update msm8998 target here */
 		sde_cfg->has_wb_ubwc = true;
 		sde_cfg->perf.min_prefill_lines = 25;
+		sde_cfg->vbif_qos_nlvl = 4;
+		sde_cfg->ts_prefill_rev = 1;
+		sde_cfg->perf.min_prefill_lines = 25;
 		break;
 	case SDE_HW_VER_400:
-		/* update msm8998 and sdm845 target here */
+		/* update sdm845 target here */
 		sde_cfg->has_wb_ubwc = true;
 		sde_cfg->perf.min_prefill_lines = 24;
+		sde_cfg->vbif_qos_nlvl = 8;
+		sde_cfg->ts_prefill_rev = 2;
+		sde_cfg->perf.min_prefill_lines = 24;
 		break;
 	default:
 		sde_cfg->perf.min_prefill_lines = 0xffff;
@@ -2549,6 +2635,8 @@ void sde_hw_catalog_deinit(struct sde_mdss_cfg *sde_cfg)
 	for (i = 0; i < sde_cfg->vbif_count; i++) {
 		kfree(sde_cfg->vbif[i].dynamic_ot_rd_tbl.cfg);
 		kfree(sde_cfg->vbif[i].dynamic_ot_wr_tbl.cfg);
+		kfree(sde_cfg->vbif[i].qos_rt_tbl.priority_lvl);
+		kfree(sde_cfg->vbif[i].qos_nrt_tbl.priority_lvl);
 	}
 
 	kfree(sde_cfg->dma_formats);
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_catalog.h b/drivers/gpu/drm/msm/sde/sde_hw_catalog.h
index cfb1b67..e24192b 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_catalog.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_catalog.h
@@ -111,6 +111,8 @@ enum {
  * @SDE_SSPP_SMART_DMA_V1,   SmartDMA 1.0 support
  * @SDE_SSPP_SMART_DMA_V2,   SmartDMA 2.0 support
  * @SDE_SSPP_SBUF,           SSPP support inline stream buffer
+ * @SDE_SSPP_TS_PREFILL      Supports prefill with traffic shaper
+ * @SDE_SSPP_TS_PREFILL_REC1 Supports prefill with traffic shaper multirec
  * @SDE_SSPP_MAX             maximum value
  */
 enum {
@@ -130,6 +132,8 @@ enum {
 	SDE_SSPP_SMART_DMA_V1,
 	SDE_SSPP_SMART_DMA_V2,
 	SDE_SSPP_SBUF,
+	SDE_SSPP_TS_PREFILL,
+	SDE_SSPP_TS_PREFILL_REC1,
 	SDE_SSPP_MAX
 };
 
@@ -258,10 +262,12 @@ enum {
 /**
  * VBIF sub-blocks and features
  * @SDE_VBIF_QOS_OTLIM        VBIF supports OT Limit
+ * @SDE_VBIF_QOS_REMAP        VBIF supports QoS priority remap
  * @SDE_VBIF_MAX              maximum value
  */
 enum {
 	SDE_VBIF_QOS_OTLIM = 0x1,
+	SDE_VBIF_QOS_REMAP,
 	SDE_VBIF_MAX
 };
 
@@ -653,6 +659,16 @@ struct sde_vbif_dynamic_ot_tbl {
 };
 
 /**
+ * struct sde_vbif_qos_tbl - QoS priority table
+ * @npriority_lvl      num of priority level
+ * @priority_lvl       pointer to array of priority level in ascending order
+ */
+struct sde_vbif_qos_tbl {
+	u32 npriority_lvl;
+	u32 *priority_lvl;
+};
+
+/**
  * struct sde_vbif_cfg - information of VBIF blocks
  * @id                 enum identifying this block
  * @base               register offset of this block
@@ -662,6 +678,8 @@ struct sde_vbif_dynamic_ot_tbl {
  * @xin_halt_timeout   maximum time (in usec) for xin to halt
  * @dynamic_ot_rd_tbl  dynamic OT read configuration table
  * @dynamic_ot_wr_tbl  dynamic OT write configuration table
+ * @qos_rt_tbl         real-time QoS priority table
+ * @qos_nrt_tbl        non-real-time QoS priority table
  */
 struct sde_vbif_cfg {
 	SDE_HW_BLK_INFO;
@@ -670,6 +688,8 @@ struct sde_vbif_cfg {
 	u32 xin_halt_timeout;
 	struct sde_vbif_dynamic_ot_tbl dynamic_ot_rd_tbl;
 	struct sde_vbif_dynamic_ot_tbl dynamic_ot_wr_tbl;
+	struct sde_vbif_qos_tbl qos_rt_tbl;
+	struct sde_vbif_qos_tbl qos_nrt_tbl;
 };
 /**
  * struct sde_reg_dma_cfg - information of lut dma blocks
@@ -746,6 +766,8 @@ struct sde_perf_cfg {
  * @cursor_formats     Supported formats for cursor pipe
  * @vig_formats        Supported formats for vig pipe
  * @wb_formats         Supported formats for wb
+ * @vbif_qos_nlvl      number of vbif QoS priority level
+ * @ts_prefill_rev     prefill traffic shaper feature revision
  */
 struct sde_mdss_cfg {
 	u32 hwversion;
@@ -765,6 +787,8 @@ struct sde_mdss_cfg {
 	bool has_sbuf;
 	u32 sbuf_headroom;
 	bool has_idle_pc;
+	u32 vbif_qos_nlvl;
+	u32 ts_prefill_rev;
 
 	u32 mdss_count;
 	struct sde_mdss_base_cfg mdss[MAX_BLOCKS];
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_color_proc_common_v4.h b/drivers/gpu/drm/msm/sde/sde_hw_color_proc_common_v4.h
index 28479ab..8f7764d 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_color_proc_common_v4.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_color_proc_common_v4.h
@@ -21,6 +21,7 @@
 #define GAMUT_MAP_EN BIT(1)
 #define GAMUT_EN BIT(0)
 #define GAMUT_MODE_13B_OFF 640
+#define GAMUT_MODE_5_OFF 1248
 
 enum {
 	gamut_mode_17 = 0,
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_interrupts.c b/drivers/gpu/drm/msm/sde/sde_hw_interrupts.c
index 24f16c6..9c96b5e 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_interrupts.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_interrupts.c
@@ -986,7 +986,7 @@ static u32 sde_hw_intr_get_interrupt_status(struct sde_hw_intr *intr,
 			sde_intr_set[reg_idx].status_off) &
 					sde_irq_map[irq_idx].irq_mask;
 	if (intr_status && clear)
-		SDE_REG_WRITE(&intr->hw, sde_intr_set[irq_idx].clr_off,
+		SDE_REG_WRITE(&intr->hw, sde_intr_set[reg_idx].clr_off,
 				intr_status);
 
 	spin_unlock_irqrestore(&intr->mask_lock, irq_flags);
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_intf.c b/drivers/gpu/drm/msm/sde/sde_hw_intf.c
index 1f17378..be83afe 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_intf.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_intf.c
@@ -116,7 +116,7 @@ static void sde_hw_intf_setup_timing_engine(struct sde_hw_intf *ctx,
 	display_v_end = ((vsync_period - p->v_front_porch) * hsync_period) +
 	p->hsync_skew - 1;
 
-	if (ctx->cap->type == INTF_EDP) {
+	if (ctx->cap->type == INTF_EDP || ctx->cap->type == INTF_DP) {
 		display_v_start += p->hsync_pulse_width + p->h_back_porch;
 		display_v_end -= p->h_front_porch;
 	}
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_lm.c b/drivers/gpu/drm/msm/sde/sde_hw_lm.c
index 7780c5b..fedc72c 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_lm.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_lm.c
@@ -71,7 +71,7 @@ static inline int _stage_offset(struct sde_hw_mixer *ctx, enum sde_stage stage)
 	if (stage == SDE_STAGE_BASE)
 		rc = -EINVAL;
 	else if (stage <= sblk->maxblendstages)
-		rc = sblk->blendstage_base[stage - 1];
+		rc = sblk->blendstage_base[stage - SDE_STAGE_0];
 	else
 		rc = -EINVAL;
 
@@ -198,7 +198,7 @@ static void sde_hw_lm_setup_dim_layer(struct sde_hw_mixer *ctx,
 {
 	struct sde_hw_blk_reg_map *c = &ctx->hw;
 	int stage_off;
-	u32 val = 0;
+	u32 val = 0, alpha = 0;
 
 	stage_off = _stage_offset(ctx, dim_layer->stage);
 	if (stage_off < 0) {
@@ -206,13 +206,13 @@ static void sde_hw_lm_setup_dim_layer(struct sde_hw_mixer *ctx,
 		return;
 	}
 
-	val = (dim_layer->color_fill.color_1 & 0xFFF) << 16 |
-			(dim_layer->color_fill.color_0 & 0xFFF);
+	alpha = dim_layer->color_fill.color_3 & 0xFF;
+	val = ((dim_layer->color_fill.color_1 << 2) & 0xFFF) << 16 |
+			((dim_layer->color_fill.color_0 << 2) & 0xFFF);
 	SDE_REG_WRITE(c, LM_FG_COLOR_FILL_COLOR_0 + stage_off, val);
 
-	val = 0;
-	val = (dim_layer->color_fill.color_3 & 0xFFF) << 16 |
-			(dim_layer->color_fill.color_2 & 0xFFF);
+	val = (alpha << 4) << 16 |
+			((dim_layer->color_fill.color_2 << 2) & 0xFFF);
 	SDE_REG_WRITE(c, LM_FG_COLOR_FILL_COLOR_1 + stage_off, val);
 
 	val = dim_layer->rect.h << 16 | dim_layer->rect.w;
@@ -222,9 +222,14 @@ static void sde_hw_lm_setup_dim_layer(struct sde_hw_mixer *ctx,
 	SDE_REG_WRITE(c, LM_FG_COLOR_FILL_XY + stage_off, val);
 
 	val = BIT(16); /* enable dim layer */
+	val |= SDE_BLEND_FG_ALPHA_FG_CONST | SDE_BLEND_BG_ALPHA_BG_CONST;
 	if (dim_layer->flags & SDE_DRM_DIM_LAYER_EXCLUSIVE)
 		val |= BIT(17);
+	else
+		val &= ~BIT(17);
 	SDE_REG_WRITE(c, LM_BLEND0_OP + stage_off, val);
+	val = (alpha << 16) | (0xff - alpha);
+	SDE_REG_WRITE(c, LM_BLEND0_CONST_ALPHA + stage_off, val);
 }
 
 static void sde_hw_lm_setup_misr(struct sde_hw_mixer *ctx,
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_reg_dma_v1_color_proc.c b/drivers/gpu/drm/msm/sde/sde_hw_reg_dma_v1_color_proc.c
index 5719c51..0dcbb7e 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_reg_dma_v1_color_proc.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_reg_dma_v1_color_proc.c
@@ -347,10 +347,9 @@ static int sde_gamut_get_mode_info(struct drm_msm_3d_gamut *payload,
 		break;
 	case GAMUT_3D_MODE_5:
 		*tbl_len = GAMUT_3D_MODE5_TBL_SZ * sizeof(u32) * 2;
-		*tbl_off = 0;
+		*tbl_off = GAMUT_MODE_5_OFF;
 		*scale_off = GAMUT_SCALEB_OFFSET_OFF;
 		*opcode = gamut_mode_5 << 2;
-		*opcode |= GAMUT_MAP_EN;
 		break;
 	case GAMUT_3D_MODE_13:
 		*tbl_len = GAMUT_3D_MODE13_TBL_SZ * sizeof(u32) * 2;
@@ -364,7 +363,6 @@ static int sde_gamut_get_mode_info(struct drm_msm_3d_gamut *payload,
 		*scale_off = (*opcode == gamut_mode_13a) ?
 			GAMUT_SCALEA_OFFSET_OFF : GAMUT_SCALEB_OFFSET_OFF;
 		*opcode <<= 2;
-		*opcode |= GAMUT_MAP_EN;
 		break;
 	default:
 		rc = -EINVAL;
@@ -377,6 +375,45 @@ static int sde_gamut_get_mode_info(struct drm_msm_3d_gamut *payload,
 	return rc;
 }
 
+static void dspp_3d_gamutv4_off(struct sde_hw_dspp *ctx, void *cfg)
+{
+	struct sde_reg_dma_kickoff_cfg kick_off;
+	struct sde_hw_cp_cfg *hw_cfg = cfg;
+	u32 op_mode;
+	struct sde_hw_reg_dma_ops *dma_ops;
+	struct sde_reg_dma_setup_ops_cfg dma_write_cfg;
+	int rc;
+
+	dma_ops = sde_reg_dma_get_ops();
+	dma_ops->reset_reg_dma_buf(dspp_buf[GAMUT][ctx->idx]);
+
+	REG_DMA_INIT_OPS(dma_write_cfg, dspp_mapping[ctx->idx], GAMUT,
+			dspp_buf[GAMUT][ctx->idx]);
+
+	REG_DMA_SETUP_OPS(dma_write_cfg, 0, NULL, 0, HW_BLK_SELECT, 0, 0);
+	rc = dma_ops->setup_payload(&dma_write_cfg);
+	if (rc) {
+		DRM_ERROR("write decode select failed ret %d\n", rc);
+		return;
+	}
+
+	REG_DMA_SETUP_OPS(dma_write_cfg,
+		ctx->cap->sblk->gamut.base,
+		&op_mode, sizeof(op_mode), REG_SINGLE_WRITE, 0, 0);
+	rc = dma_ops->setup_payload(&dma_write_cfg);
+	if (rc) {
+		DRM_ERROR("opmode write single reg failed ret %d\n", rc);
+		return;
+	}
+
+	REG_DMA_SETUP_KICKOFF(kick_off, hw_cfg->ctl, dspp_buf[GAMUT][ctx->idx],
+			REG_DMA_WRITE, DMA_CTL_QUEUE0, WRITE_IMMEDIATE);
+	kick_off.last_command = hw_cfg->last_feature;
+	rc = dma_ops->kick_off(&kick_off);
+	if (rc)
+		DRM_ERROR("failed to kick off ret %d\n", rc);
+}
+
 void reg_dmav1_setup_dspp_3d_gamutv4(struct sde_hw_dspp *ctx, void *cfg)
 {
 	struct drm_msm_3d_gamut *payload;
@@ -394,7 +431,7 @@ void reg_dmav1_setup_dspp_3d_gamutv4(struct sde_hw_dspp *ctx, void *cfg)
 	op_mode = SDE_REG_READ(&ctx->hw, ctx->cap->sblk->gamut.base);
 	if (!hw_cfg->payload) {
 		DRM_DEBUG_DRIVER("disable gamut feature\n");
-		SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->gamut.base, 0);
+		dspp_3d_gamutv4_off(ctx, cfg);
 		return;
 	}
 
@@ -436,7 +473,7 @@ void reg_dmav1_setup_dspp_3d_gamutv4(struct sde_hw_dspp *ctx, void *cfg)
 		}
 		REG_DMA_SETUP_OPS(dma_write_cfg,
 		    ctx->cap->sblk->gamut.base + GAMUT_LOWER_COLOR_OFF,
-		    &payload->col[i][0].c0, tbl_len,
+		    &payload->col[i][0].c2_c1, tbl_len,
 		    REG_BLK_WRITE_MULTIPLE, 2, 0);
 		rc = dma_ops->setup_payload(&dma_write_cfg);
 		if (rc) {
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_rot.c b/drivers/gpu/drm/msm/sde/sde_hw_rot.c
index d15b804..d5f03a6a 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_rot.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_rot.c
@@ -563,6 +563,10 @@ static int sde_hw_rot_commit(struct sde_hw_rot *hw, struct sde_hw_rot_cmd *data,
 	case SDE_HW_ROT_CMD_COMMIT:
 		cmd_type = SDE_ROTATOR_INLINE_CMD_COMMIT;
 		break;
+	case SDE_HW_ROT_CMD_START:
+		cmd_type = SDE_ROTATOR_INLINE_CMD_START;
+		priv_handle = data->priv_handle;
+		break;
 	case SDE_HW_ROT_CMD_CLEANUP:
 		cmd_type = SDE_ROTATOR_INLINE_CMD_CLEANUP;
 		priv_handle = data->priv_handle;
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_rot.h b/drivers/gpu/drm/msm/sde/sde_hw_rot.h
index a4f5b49..e490052 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_rot.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_rot.h
@@ -24,11 +24,13 @@ struct sde_hw_rot;
  * enum sde_hw_rot_cmd_type - type of rotator hardware command
  * @SDE_HW_ROT_CMD_VALDIATE: validate rotator command; do not commit
  * @SDE_HW_ROT_CMD_COMMIT: commit/execute rotator command
+ * @SDE_HW_ROT_CMD_START: mdp is ready to start
  * @SDE_HW_ROT_CMD_CLEANUP: cleanup rotator command after it is done
  */
 enum sde_hw_rot_cmd_type {
 	SDE_HW_ROT_CMD_VALIDATE,
 	SDE_HW_ROT_CMD_COMMIT,
+	SDE_HW_ROT_CMD_START,
 	SDE_HW_ROT_CMD_CLEANUP,
 };
 
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_sspp.c b/drivers/gpu/drm/msm/sde/sde_hw_sspp.c
index a1f5cee..694d267 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_sspp.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_sspp.c
@@ -79,11 +79,16 @@
 #define SSPP_SW_PIX_EXT_C3_LR              0x120
 #define SSPP_SW_PIX_EXT_C3_TB              0x124
 #define SSPP_SW_PIX_EXT_C3_REQ_PIXELS      0x128
+#define SSPP_TRAFFIC_SHAPER                0x130
 #define SSPP_UBWC_ERROR_STATUS             0x138
+#define SSPP_TRAFFIC_SHAPER_PREFILL        0x150
+#define SSPP_TRAFFIC_SHAPER_REC1_PREFILL   0x154
+#define SSPP_TRAFFIC_SHAPER_REC1           0x158
 #define SSPP_EXCL_REC_SIZE                 0x1B4
 #define SSPP_EXCL_REC_XY                   0x1B8
 #define SSPP_VIG_OP_MODE                   0x0
 #define SSPP_VIG_CSC_10_OP_MODE            0x0
+#define SSPP_TRAFFIC_SHAPER_BPC_MAX        0xFF
 
 /* SSPP_QOS_CTRL */
 #define SSPP_QOS_CTRL_VBLANK_EN            BIT(16)
@@ -186,6 +191,9 @@
 #define VIG_CSC_10_EN          BIT(0)
 #define CSC_10BIT_OFFSET       4
 
+/* traffic shaper clock in Hz */
+#define TS_CLK			19200000
+
 static inline int _sspp_subblk_offset(struct sde_hw_pipe *ctx,
 		int s_id,
 		u32 *idx)
@@ -1041,6 +1049,51 @@ static void sde_hw_sspp_get_sbuf_status(struct sde_hw_pipe *ctx,
 	status->rd_ptr[1] = val & 0xffff;
 }
 
+static void sde_hw_sspp_setup_ts_prefill(struct sde_hw_pipe *ctx,
+		struct sde_hw_pipe_ts_cfg *cfg,
+		enum sde_sspp_multirect_index index)
+{
+	u32 idx;
+	u32 ts_offset, ts_prefill_offset;
+	u32 ts_count = 0, ts_bytes = 0;
+	const struct sde_sspp_cfg *cap;
+
+	if (!ctx || !cfg || !ctx->cap)
+		return;
+
+	if (_sspp_subblk_offset(ctx, SDE_SSPP_SRC, &idx))
+		return;
+
+	cap = ctx->cap;
+
+	if (index == SDE_SSPP_RECT_0 &&
+			test_bit(SDE_SSPP_TS_PREFILL, &cap->features)) {
+		ts_offset = SSPP_TRAFFIC_SHAPER;
+		ts_prefill_offset = SSPP_TRAFFIC_SHAPER_PREFILL;
+	} else if (index == SDE_SSPP_RECT_1 &&
+			test_bit(SDE_SSPP_TS_PREFILL_REC1, &cap->features)) {
+		ts_offset = SSPP_TRAFFIC_SHAPER_REC1;
+		ts_prefill_offset = SSPP_TRAFFIC_SHAPER_REC1_PREFILL;
+	} else {
+		return;
+	}
+
+	if (cfg->time) {
+		ts_bytes = mult_frac(TS_CLK * 1000000ULL, cfg->size,
+				cfg->time);
+		if (ts_bytes > SSPP_TRAFFIC_SHAPER_BPC_MAX)
+			ts_bytes = SSPP_TRAFFIC_SHAPER_BPC_MAX;
+	}
+
+	if (ts_bytes) {
+		ts_count = DIV_ROUND_UP_ULL(cfg->size, ts_bytes);
+		ts_bytes |= BIT(31) | BIT(27);
+	}
+
+	SDE_REG_WRITE(&ctx->hw, ts_offset, ts_bytes);
+	SDE_REG_WRITE(&ctx->hw, ts_prefill_offset, ts_count);
+}
+
 static void _setup_layer_ops(struct sde_hw_pipe *c,
 		unsigned long features)
 {
@@ -1062,6 +1115,9 @@ static void _setup_layer_ops(struct sde_hw_pipe *c,
 		c->ops.setup_qos_ctrl = sde_hw_sspp_setup_qos_ctrl;
 	}
 
+	if (test_bit(SDE_SSPP_TS_PREFILL, &features))
+		c->ops.setup_ts_prefill = sde_hw_sspp_setup_ts_prefill;
+
 	if (test_bit(SDE_SSPP_CSC, &features) ||
 		test_bit(SDE_SSPP_CSC_10BIT, &features))
 		c->ops.setup_csc = sde_hw_sspp_setup_csc;
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_sspp.h b/drivers/gpu/drm/msm/sde/sde_hw_sspp.h
index 1b81e54..010b363 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_sspp.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_sspp.h
@@ -343,6 +343,16 @@ struct sde_hw_pipe_sc_cfg {
 };
 
 /**
+ * struct sde_hw_pipe_ts_cfg - traffic shaper configuration
+ * @size: size to prefill in bytes, or zero to disable
+ * @time: time to prefill in usec, or zero to disable
+ */
+struct sde_hw_pipe_ts_cfg {
+	u64 size;
+	u64 time;
+};
+
+/**
  * Maximum number of stream buffer plane
  */
 #define SDE_PIPE_SBUF_PLANE_NUM	2
@@ -554,6 +564,16 @@ struct sde_hw_sspp_ops {
 	 */
 	void (*get_sbuf_status)(struct sde_hw_pipe *ctx,
 			struct sde_hw_pipe_sbuf_status *status);
+
+	/**
+	 * setup_ts_prefill - setup prefill traffic shaper
+	 * @ctx: Pointer to pipe context
+	 * @cfg: Pointer to traffic shaper configuration
+	 * @index: rectangle index in multirect
+	 */
+	void (*setup_ts_prefill)(struct sde_hw_pipe *ctx,
+			struct sde_hw_pipe_ts_cfg *cfg,
+			enum sde_sspp_multirect_index index);
 };
 
 /**
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_top.c b/drivers/gpu/drm/msm/sde/sde_hw_top.c
index bd212e2..19f999e 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_top.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_top.c
@@ -216,10 +216,12 @@ static void sde_hw_setup_vsync_sel(struct sde_hw_mdp *mdp,
 
 	reg = SDE_REG_READ(c, MDP_VSYNC_SEL);
 	for (i = 0; i < cfg->pp_count; i++) {
+		int pp_idx = cfg->ppnumber[i] - PINGPONG_0;
+
 		if (watchdog_te)
-			reg |= 0xF << pp_offset[cfg->ppnumber[i] - 1];
+			reg |= 0xF << pp_offset[pp_idx];
 		else
-			reg &= ~(0xF << pp_offset[cfg->ppnumber[i] - 1]);
+			reg &= ~(0xF << pp_offset[pp_idx]);
 	}
 
 	SDE_REG_WRITE(c, MDP_VSYNC_SEL, reg);
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_top.h b/drivers/gpu/drm/msm/sde/sde_hw_top.h
index 9cb4494..faf25c7 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_top.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_top.h
@@ -80,12 +80,12 @@ struct sde_danger_safe_status {
  * struct sde_watchdog_te_status - configure watchdog timer to generate TE
  * @pp_count: number of ping pongs active
  * @frame_rate: Display frame rate
- * @ppnumber: base address of ping pong info
+ * @ppnumber: ping pong index array
  */
 struct sde_watchdog_te_status {
 	u32 pp_count;
 	u32 frame_rate;
-	u32 ppnumber[];
+	u32 ppnumber[PINGPONG_MAX];
 };
 
 /**
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_vbif.c b/drivers/gpu/drm/msm/sde/sde_hw_vbif.c
index 048ec47..9b9763a 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_vbif.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_vbif.c
@@ -33,6 +33,8 @@
 #define VBIF_OUT_WR_LIM_CONF0		0x00D4
 #define VBIF_XIN_HALT_CTRL0		0x0200
 #define VBIF_XIN_HALT_CTRL1		0x0204
+#define VBIF_XINL_QOS_RP_REMAP_000	0x0550
+#define VBIF_XINL_QOS_LVL_REMAP_000	0x0590
 
 static void sde_hw_set_limit_conf(struct sde_hw_vbif *vbif,
 		u32 xin_id, bool rd, u32 limit)
@@ -104,6 +106,35 @@ static bool sde_hw_get_halt_ctrl(struct sde_hw_vbif *vbif,
 	return (reg_val & BIT(xin_id)) ? true : false;
 }
 
+static void sde_hw_set_qos_remap(struct sde_hw_vbif *vbif,
+		u32 xin_id, u32 level, u32 remap_level)
+{
+	struct sde_hw_blk_reg_map *c;
+	u32 reg_val, reg_val_lvl, mask, reg_high, reg_shift;
+
+	if (!vbif)
+		return;
+
+	c = &vbif->hw;
+
+	reg_high = ((xin_id & 0x8) >> 3) * 4 + (level * 8);
+	reg_shift = (xin_id & 0x7) * 4;
+
+	reg_val = SDE_REG_READ(c, VBIF_XINL_QOS_RP_REMAP_000 + reg_high);
+	reg_val_lvl = SDE_REG_READ(c, VBIF_XINL_QOS_LVL_REMAP_000 + reg_high);
+
+	mask = 0x7 << reg_shift;
+
+	reg_val &= ~mask;
+	reg_val |= (remap_level << reg_shift) & mask;
+
+	reg_val_lvl &= ~mask;
+	reg_val_lvl |= (remap_level << reg_shift) & mask;
+
+	SDE_REG_WRITE(c, VBIF_XINL_QOS_RP_REMAP_000 + reg_high, reg_val);
+	SDE_REG_WRITE(c, VBIF_XINL_QOS_LVL_REMAP_000 + reg_high, reg_val_lvl);
+}
+
 static void _setup_vbif_ops(struct sde_hw_vbif_ops *ops,
 		unsigned long cap)
 {
@@ -111,6 +142,8 @@ static void _setup_vbif_ops(struct sde_hw_vbif_ops *ops,
 	ops->get_limit_conf = sde_hw_get_limit_conf;
 	ops->set_halt_ctrl = sde_hw_set_halt_ctrl;
 	ops->get_halt_ctrl = sde_hw_get_halt_ctrl;
+	if (test_bit(SDE_VBIF_QOS_REMAP, &cap))
+		ops->set_qos_remap = sde_hw_set_qos_remap;
 }
 
 static const struct sde_vbif_cfg *_top_offset(enum sde_vbif vbif,
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_vbif.h b/drivers/gpu/drm/msm/sde/sde_hw_vbif.h
index de7fac0..c67738b 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_vbif.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_vbif.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -61,6 +61,16 @@ struct sde_hw_vbif_ops {
 	 */
 	bool (*get_halt_ctrl)(struct sde_hw_vbif *vbif,
 			u32 xin_id);
+
+	/**
+	 * set_qos_remap - set QoS priority remap
+	 * @vbif: vbif context driver
+	 * @xin_id: client interface identifier
+	 * @level: priority level
+	 * @remap_level: remapped level
+	 */
+	void (*set_qos_remap)(struct sde_hw_vbif *vbif,
+			u32 xin_id, u32 level, u32 remap_level);
 };
 
 struct sde_hw_vbif {
diff --git a/drivers/gpu/drm/msm/sde/sde_irq.c b/drivers/gpu/drm/msm/sde/sde_irq.c
index e3b658a..eeb7a00 100644
--- a/drivers/gpu/drm/msm/sde/sde_irq.c
+++ b/drivers/gpu/drm/msm/sde/sde_irq.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -49,90 +49,14 @@ irqreturn_t sde_irq(struct msm_kms *kms)
 	return IRQ_HANDLED;
 }
 
-static void sde_hw_irq_mask(struct irq_data *irqd)
-{
-	struct sde_kms *sde_kms;
-
-	if (!irqd || !irq_data_get_irq_chip_data(irqd)) {
-		SDE_ERROR("invalid parameters irqd %d\n", irqd != 0);
-		return;
-	}
-	sde_kms = irq_data_get_irq_chip_data(irqd);
-
-	/* memory barrier */
-	smp_mb__before_atomic();
-	clear_bit(irqd->hwirq, &sde_kms->irq_controller.enabled_mask);
-	/* memory barrier */
-	smp_mb__after_atomic();
-}
-
-static void sde_hw_irq_unmask(struct irq_data *irqd)
-{
-	struct sde_kms *sde_kms;
-
-	if (!irqd || !irq_data_get_irq_chip_data(irqd)) {
-		SDE_ERROR("invalid parameters irqd %d\n", irqd != 0);
-		return;
-	}
-	sde_kms = irq_data_get_irq_chip_data(irqd);
-
-	/* memory barrier */
-	smp_mb__before_atomic();
-	set_bit(irqd->hwirq, &sde_kms->irq_controller.enabled_mask);
-	/* memory barrier */
-	smp_mb__after_atomic();
-}
-
-static struct irq_chip sde_hw_irq_chip = {
-	.name = "sde",
-	.irq_mask = sde_hw_irq_mask,
-	.irq_unmask = sde_hw_irq_unmask,
-};
-
-static int sde_hw_irqdomain_map(struct irq_domain *domain,
-		unsigned int irq, irq_hw_number_t hwirq)
-{
-	struct sde_kms *sde_kms;
-	int rc;
-
-	if (!domain || !domain->host_data) {
-		SDE_ERROR("invalid parameters domain %d\n", domain != 0);
-		return -EINVAL;
-	}
-	sde_kms = domain->host_data;
-
-	irq_set_chip_and_handler(irq, &sde_hw_irq_chip, handle_level_irq);
-	rc = irq_set_chip_data(irq, sde_kms);
-
-	return rc;
-}
-
-static const struct irq_domain_ops sde_hw_irqdomain_ops = {
-	.map = sde_hw_irqdomain_map,
-	.xlate = irq_domain_xlate_onecell,
-};
-
 void sde_irq_preinstall(struct msm_kms *kms)
 {
 	struct sde_kms *sde_kms = to_sde_kms(kms);
-	struct device *dev;
-	struct irq_domain *domain;
 
 	if (!sde_kms->dev || !sde_kms->dev->dev) {
 		pr_err("invalid device handles\n");
 		return;
 	}
-	dev = sde_kms->dev->dev;
-
-	domain = irq_domain_add_linear(dev->of_node, 32,
-			&sde_hw_irqdomain_ops, sde_kms);
-	if (!domain) {
-		pr_err("failed to add irq_domain\n");
-		return;
-	}
-
-	sde_kms->irq_controller.enabled_mask = 0;
-	sde_kms->irq_controller.domain = domain;
 
 	sde_core_irq_preinstall(sde_kms);
 }
@@ -162,9 +86,5 @@ void sde_irq_uninstall(struct msm_kms *kms)
 	}
 
 	sde_core_irq_uninstall(sde_kms);
-
-	if (sde_kms->irq_controller.domain) {
-		irq_domain_remove(sde_kms->irq_controller.domain);
-		sde_kms->irq_controller.domain = NULL;
-	}
+	sde_core_irq_domain_fini(sde_kms);
 }
diff --git a/drivers/gpu/drm/msm/sde/sde_kms.c b/drivers/gpu/drm/msm/sde/sde_kms.c
index 8cc196a..a7d6ecf 100644
--- a/drivers/gpu/drm/msm/sde/sde_kms.c
+++ b/drivers/gpu/drm/msm/sde/sde_kms.c
@@ -25,10 +25,13 @@
 
 #include "msm_drv.h"
 #include "msm_mmu.h"
+#include "msm_gem.h"
 
 #include "dsi_display.h"
 #include "dsi_drm.h"
 #include "sde_wb.h"
+#include "dp_display.h"
+#include "dp_drm.h"
 
 #include "sde_kms.h"
 #include "sde_core_irq.h"
@@ -512,8 +515,28 @@ static int _sde_kms_get_displays(struct sde_kms *sde_kms)
 			wb_display_get_displays(sde_kms->wb_displays,
 					sde_kms->wb_display_count);
 	}
+
+	/* dp */
+	sde_kms->dp_displays = NULL;
+	sde_kms->dp_display_count = dp_display_get_num_of_displays();
+	if (sde_kms->dp_display_count) {
+		sde_kms->dp_displays = kcalloc(sde_kms->dp_display_count,
+				sizeof(void *), GFP_KERNEL);
+		if (!sde_kms->dp_displays) {
+			SDE_ERROR("failed to allocate dp displays\n");
+			goto exit_deinit_dp;
+		}
+		sde_kms->dp_display_count =
+			dp_display_get_displays(sde_kms->dp_displays,
+					sde_kms->dp_display_count);
+	}
 	return 0;
 
+exit_deinit_dp:
+	kfree(sde_kms->dp_displays);
+	sde_kms->dp_display_count = 0;
+	sde_kms->dp_displays = NULL;
+
 exit_deinit_wb:
 	kfree(sde_kms->wb_displays);
 	sde_kms->wb_display_count = 0;
@@ -579,6 +602,14 @@ static int _sde_kms_setup_displays(struct drm_device *dev,
 		.soft_reset =   NULL,
 		.get_topology = sde_wb_get_topology
 	};
+	static const struct sde_connector_ops dp_ops = {
+		.post_init  = dp_connector_post_init,
+		.detect     = dp_connector_detect,
+		.get_modes  = dp_connector_get_modes,
+		.mode_valid = dp_connector_mode_valid,
+		.get_info   = dp_connector_get_info,
+		.get_topology   = dp_connector_get_topology,
+	};
 	struct msm_display_info info;
 	struct drm_encoder *encoder;
 	void *display, *connector;
@@ -590,7 +621,8 @@ static int _sde_kms_setup_displays(struct drm_device *dev,
 		return -EINVAL;
 	}
 
-	max_encoders = sde_kms->dsi_display_count + sde_kms->wb_display_count;
+	max_encoders = sde_kms->dsi_display_count + sde_kms->wb_display_count +
+				sde_kms->dp_display_count;
 	if (max_encoders > ARRAY_SIZE(priv->encoders)) {
 		max_encoders = ARRAY_SIZE(priv->encoders);
 		SDE_ERROR("capping number of displays to %d", max_encoders);
@@ -679,6 +711,47 @@ static int _sde_kms_setup_displays(struct drm_device *dev,
 			sde_encoder_destroy(encoder);
 		}
 	}
+	/* dp */
+	for (i = 0; i < sde_kms->dp_display_count &&
+			priv->num_encoders < max_encoders; ++i) {
+		display = sde_kms->dp_displays[i];
+		encoder = NULL;
+
+		memset(&info, 0x0, sizeof(info));
+		rc = dp_connector_get_info(&info, display);
+		if (rc) {
+			SDE_ERROR("dp get_info %d failed\n", i);
+			continue;
+		}
+
+		encoder = sde_encoder_init(dev, &info);
+		if (IS_ERR_OR_NULL(encoder)) {
+			SDE_ERROR("dp encoder init failed %d\n", i);
+			continue;
+		}
+
+		rc = dp_drm_bridge_init(display, encoder);
+		if (rc) {
+			SDE_ERROR("dp bridge %d init failed, %d\n", i, rc);
+			sde_encoder_destroy(encoder);
+			continue;
+		}
+
+		connector = sde_connector_init(dev,
+					encoder,
+					NULL,
+					display,
+					&dp_ops,
+					DRM_CONNECTOR_POLL_HPD,
+					DRM_MODE_CONNECTOR_DisplayPort);
+		if (connector) {
+			priv->encoders[priv->num_encoders++] = encoder;
+		} else {
+			SDE_ERROR("dp %d connector init failed\n", i);
+			dp_drm_bridge_deinit(display);
+			sde_encoder_destroy(encoder);
+		}
+	}
 
 	return 0;
 }
@@ -744,6 +817,9 @@ static int _sde_kms_drm_obj_init(struct sde_kms *sde_kms)
 	priv = dev->dev_private;
 	catalog = sde_kms->catalog;
 
+	ret = sde_core_irq_domain_add(sde_kms);
+	if (ret)
+		goto fail_irq;
 	/*
 	 * Query for underlying display drivers, and create connectors,
 	 * bridges and encoders for them.
@@ -821,6 +897,8 @@ static int _sde_kms_drm_obj_init(struct sde_kms *sde_kms)
 	return 0;
 fail:
 	_sde_kms_drm_obj_destroy(sde_kms);
+fail_irq:
+	sde_core_irq_domain_fini(sde_kms);
 	return ret;
 }
 
@@ -950,6 +1028,13 @@ static void sde_kms_fbo_destroy(struct sde_kms_fbo *fbo)
 	}
 }
 
+static void sde_kms_set_gem_flags(struct msm_gem_object *msm_obj,
+		uint32_t flags)
+{
+	if (msm_obj)
+		msm_obj->flags |= flags;
+}
+
 struct sde_kms_fbo *sde_kms_fbo_alloc(struct drm_device *dev, u32 width,
 		u32 height, u32 pixel_format, u64 modifier[4], u32 flags)
 {
@@ -1036,10 +1121,13 @@ struct sde_kms_fbo *sde_kms_fbo_alloc(struct drm_device *dev, u32 width,
 			fbo->bo[0] = NULL;
 			goto done;
 		}
+
+		/* insert extra bo flags */
+		sde_kms_set_gem_flags(to_msm_bo(fbo->bo[0]), MSM_BO_KEEPATTRS);
 	} else {
 		mutex_lock(&dev->struct_mutex);
 		fbo->bo[0] = msm_gem_new(dev, fbo->layout.total_size,
-				MSM_BO_SCANOUT | MSM_BO_WC);
+				MSM_BO_SCANOUT | MSM_BO_WC | MSM_BO_KEEPATTRS);
 		if (IS_ERR(fbo->bo[0])) {
 			mutex_unlock(&dev->struct_mutex);
 			SDE_ERROR("failed to new gem buffer\n");
@@ -1539,6 +1627,14 @@ static int sde_kms_hw_init(struct msm_kms *kms)
 		goto perf_err;
 	}
 
+	sde_kms->hw_intr = sde_hw_intr_init(sde_kms->mmio, sde_kms->catalog);
+	if (IS_ERR_OR_NULL(sde_kms->hw_intr)) {
+		rc = PTR_ERR(sde_kms->hw_intr);
+		SDE_ERROR("hw_intr init failed: %d\n", rc);
+		sde_kms->hw_intr = NULL;
+		goto hw_intr_init_err;
+	}
+
 	/*
 	 * _sde_kms_drm_obj_init should create the DRM related objects
 	 * i.e. CRTCs, planes, encoders, connectors and so forth
@@ -1564,23 +1660,12 @@ static int sde_kms_hw_init(struct msm_kms *kms)
 	 */
 	dev->mode_config.allow_fb_modifiers = true;
 
-	sde_kms->hw_intr = sde_hw_intr_init(sde_kms->mmio, sde_kms->catalog);
-	if (IS_ERR_OR_NULL(sde_kms->hw_intr)) {
-		rc = PTR_ERR(sde_kms->hw_intr);
-		if (!sde_kms->hw_intr)
-			rc = -EINVAL;
-		SDE_ERROR("hw_intr init failed: %d\n", rc);
-		sde_kms->hw_intr = NULL;
-		goto hw_intr_init_err;
-	}
-
 	sde_power_resource_enable(&priv->phandle, sde_kms->core_client, false);
 	return 0;
 
-hw_intr_init_err:
-	_sde_kms_drm_obj_destroy(sde_kms);
 drm_obj_init_err:
 	sde_core_perf_destroy(&sde_kms->perf);
+hw_intr_init_err:
 perf_err:
 power_error:
 	sde_power_resource_enable(&priv->phandle, sde_kms->core_client, false);
diff --git a/drivers/gpu/drm/msm/sde/sde_kms.h b/drivers/gpu/drm/msm/sde/sde_kms.h
index 1f56d73..d20af9f 100644
--- a/drivers/gpu/drm/msm/sde/sde_kms.h
+++ b/drivers/gpu/drm/msm/sde/sde_kms.h
@@ -188,6 +188,8 @@ struct sde_kms {
 	void **dsi_displays;
 	int wb_display_count;
 	void **wb_displays;
+	int dp_display_count;
+	void **dp_displays;
 
 	bool has_danger_ctrl;
 };
diff --git a/drivers/gpu/drm/msm/sde/sde_kms_utils.c b/drivers/gpu/drm/msm/sde/sde_kms_utils.c
index dcc0bd5..b77d64d 100644
--- a/drivers/gpu/drm/msm/sde/sde_kms_utils.c
+++ b/drivers/gpu/drm/msm/sde/sde_kms_utils.c
@@ -166,7 +166,7 @@ void sde_kms_rect_intersect(const struct sde_rect *r1,
 	r = min((r1->x + r1->w), (r2->x + r2->w));
 	b = min((r1->y + r1->h), (r2->y + r2->h));
 
-	if (r < l || b < t) {
+	if (r <= l || b <= t) {
 		memset(result, 0, sizeof(*result));
 	} else {
 		result->x = l;
diff --git a/drivers/gpu/drm/msm/sde/sde_plane.c b/drivers/gpu/drm/msm/sde/sde_plane.c
index 3a6de75..463c84e 100644
--- a/drivers/gpu/drm/msm/sde/sde_plane.c
+++ b/drivers/gpu/drm/msm/sde/sde_plane.c
@@ -585,6 +585,99 @@ static void _sde_plane_set_ot_limit(struct drm_plane *plane,
 	sde_vbif_set_ot_limit(sde_kms, &ot_params);
 }
 
+/**
+ * _sde_plane_set_vbif_qos - set vbif QoS for the given plane
+ * @plane:		Pointer to drm plane
+ */
+static void _sde_plane_set_qos_remap(struct drm_plane *plane)
+{
+	struct sde_plane *psde;
+	struct sde_vbif_set_qos_params qos_params;
+	struct msm_drm_private *priv;
+	struct sde_kms *sde_kms;
+
+	if (!plane || !plane->dev) {
+		SDE_ERROR("invalid arguments\n");
+		return;
+	}
+
+	priv = plane->dev->dev_private;
+	if (!priv || !priv->kms) {
+		SDE_ERROR("invalid KMS reference\n");
+		return;
+	}
+
+	sde_kms = to_sde_kms(priv->kms);
+	psde = to_sde_plane(plane);
+	if (!psde->pipe_hw) {
+		SDE_ERROR("invalid pipe reference\n");
+		return;
+	}
+
+	memset(&qos_params, 0, sizeof(qos_params));
+	qos_params.vbif_idx = VBIF_RT;
+	qos_params.clk_ctrl = psde->pipe_hw->cap->clk_ctrl;
+	qos_params.xin_id = psde->pipe_hw->cap->xin_id;
+	qos_params.num = psde->pipe_hw->idx - SSPP_VIG0;
+	qos_params.is_rt = psde->is_rt_pipe;
+
+	SDE_DEBUG("plane%d pipe:%d vbif:%d xin:%d rt:%d\n",
+			plane->base.id, qos_params.num,
+			qos_params.vbif_idx,
+			qos_params.xin_id, qos_params.is_rt);
+
+	sde_vbif_set_qos_remap(sde_kms, &qos_params);
+}
+
+/**
+ * _sde_plane_set_ts_prefill - set prefill with traffic shaper
+ * @plane:	Pointer to drm plane
+ * @pstate:	Pointer to sde plane state
+ */
+static void _sde_plane_set_ts_prefill(struct drm_plane *plane,
+		struct sde_plane_state *pstate)
+{
+	struct sde_plane *psde;
+	struct sde_hw_pipe_ts_cfg cfg;
+	struct msm_drm_private *priv;
+	struct sde_kms *sde_kms;
+
+	if (!plane || !plane->dev) {
+		SDE_ERROR("invalid arguments");
+		return;
+	}
+
+	priv = plane->dev->dev_private;
+	if (!priv || !priv->kms) {
+		SDE_ERROR("invalid KMS reference\n");
+		return;
+	}
+
+	sde_kms = to_sde_kms(priv->kms);
+	psde = to_sde_plane(plane);
+	if (!psde->pipe_hw) {
+		SDE_ERROR("invalid pipe reference\n");
+		return;
+	}
+
+	if (!psde->pipe_hw || !psde->pipe_hw->ops.setup_ts_prefill)
+		return;
+
+	_sde_plane_set_qos_ctrl(plane, false, SDE_PLANE_QOS_VBLANK_AMORTIZE);
+
+	memset(&cfg, 0, sizeof(cfg));
+	cfg.size = sde_plane_get_property(pstate,
+			PLANE_PROP_PREFILL_SIZE);
+	cfg.time = sde_plane_get_property(pstate,
+			PLANE_PROP_PREFILL_TIME);
+
+	SDE_DEBUG("plane%d size:%llu time:%llu\n",
+			plane->base.id, cfg.size, cfg.time);
+	SDE_EVT32(DRMID(plane), cfg.size, cfg.time);
+	psde->pipe_hw->ops.setup_ts_prefill(psde->pipe_hw, &cfg,
+			pstate->multirect_index);
+}
+
 /* helper to update a state's input fence pointer from the property */
 static void _sde_plane_set_input_fence(struct sde_plane *psde,
 		struct sde_plane_state *pstate, uint64_t fd)
@@ -2041,6 +2134,23 @@ static void sde_plane_rot_atomic_update(struct drm_plane *plane,
 }
 
 /**
+ * sde_plane_rot_flush - perform final flush related rotator options
+ * @plane: Pointer to drm plane
+ * @pstate: Pointer to sde plane state
+ */
+static void sde_plane_rot_flush(struct drm_plane *plane,
+		struct sde_plane_state *pstate)
+{
+	if (!plane || !pstate || !pstate->rot.rot_hw ||
+			!pstate->rot.rot_hw->ops.commit)
+		return;
+
+	pstate->rot.rot_hw->ops.commit(pstate->rot.rot_hw,
+			&pstate->rot.rot_cmd,
+			SDE_HW_ROT_CMD_START);
+}
+
+/**
  * sde_plane_rot_destroy_state - destroy state for rotator stage
  * @plane: Pointer to drm plane
  * @state: Pointer to state to be destroyed
@@ -2447,7 +2557,7 @@ static void _sde_plane_sspp_atomic_check_mode_changed(struct sde_plane *psde,
 		   pstate->excl_rect.h != old_pstate->excl_rect.h ||
 		   pstate->excl_rect.x != old_pstate->excl_rect.x ||
 		   pstate->excl_rect.y != old_pstate->excl_rect.y) {
-		SDE_DEBUG_PLANE(psde, "excl rect updated\n");
+		SDE_DEBUG_PLANE(psde, "excl_rect updated\n");
 		pstate->dirty |= SDE_PLANE_DIRTY_RECTS;
 	}
 
@@ -2660,6 +2770,9 @@ static int sde_plane_sspp_atomic_check(struct drm_plane *plane,
 				(char *)&fmt->base.pixel_format);
 			ret = -EINVAL;
 		}
+		SDE_DEBUG_PLANE(psde, "excl_rect: {%d,%d,%d,%d}\n",
+				pstate->excl_rect.x, pstate->excl_rect.y,
+				pstate->excl_rect.w, pstate->excl_rect.h);
 	}
 
 modeset_update:
@@ -2706,13 +2819,15 @@ static int sde_plane_atomic_check(struct drm_plane *plane,
 void sde_plane_flush(struct drm_plane *plane)
 {
 	struct sde_plane *psde;
+	struct sde_plane_state *pstate;
 
-	if (!plane) {
+	if (!plane || !plane->state) {
 		SDE_ERROR("invalid plane\n");
 		return;
 	}
 
 	psde = to_sde_plane(plane);
+	pstate = to_sde_plane_state(plane->state);
 
 	/*
 	 * These updates have to be done immediately before the plane flush
@@ -2733,7 +2848,10 @@ void sde_plane_flush(struct drm_plane *plane)
 
 	/* flag h/w flush complete */
 	if (plane->state)
-		to_sde_plane_state(plane->state)->pending = false;
+		pstate->pending = false;
+
+	/* signal inline rotator start */
+	sde_plane_rot_flush(plane, pstate);
 }
 
 static int sde_plane_sspp_atomic_update(struct drm_plane *plane,
@@ -2830,6 +2948,10 @@ static int sde_plane_sspp_atomic_update(struct drm_plane *plane,
 		case PLANE_PROP_BLEND_OP:
 			/* no special action required */
 			break;
+		case PLANE_PROP_PREFILL_SIZE:
+		case PLANE_PROP_PREFILL_TIME:
+			pstate->dirty |= SDE_PLANE_DIRTY_PERF;
+			break;
 		case PLANE_PROP_ROT_DST_X:
 		case PLANE_PROP_ROT_DST_Y:
 		case PLANE_PROP_ROT_DST_W:
@@ -2985,7 +3107,7 @@ static int sde_plane_sspp_atomic_update(struct drm_plane *plane,
 						SDE_PIPE_SC_OP_MODE_OFFLINE;
 				pstate->sc_cfg.rd_en = false;
 				pstate->sc_cfg.rd_scid = 0;
-				pstate->sc_cfg.rd_noallocate = false;
+				pstate->sc_cfg.rd_noallocate = true;
 				pstate->sc_cfg.rd_op_type =
 					SDE_PIPE_SC_RD_OP_TYPE_CACHEABLE;
 			}
@@ -3021,8 +3143,12 @@ static int sde_plane_sspp_atomic_update(struct drm_plane *plane,
 	if (plane->type != DRM_PLANE_TYPE_CURSOR) {
 		_sde_plane_set_qos_ctrl(plane, true, SDE_PLANE_QOS_PANIC_CTRL);
 		_sde_plane_set_ot_limit(plane, crtc);
+		if (pstate->dirty & SDE_PLANE_DIRTY_PERF)
+			_sde_plane_set_ts_prefill(plane, pstate);
 	}
 
+	_sde_plane_set_qos_remap(plane);
+
 	/* clear dirty */
 	pstate->dirty = 0x0;
 
@@ -3207,6 +3333,13 @@ static void _sde_plane_install_properties(struct drm_plane *plane,
 		msm_property_install_range(&psde->property_info, "color_fill",
 				0, 0, 0xFFFFFFFF, 0, PLANE_PROP_COLOR_FILL);
 
+	msm_property_install_range(&psde->property_info,
+			"prefill_size", 0x0, 0, ~0, 0,
+			PLANE_PROP_PREFILL_SIZE);
+	msm_property_install_range(&psde->property_info,
+			"prefill_time", 0x0, 0, ~0, 0,
+			PLANE_PROP_PREFILL_TIME);
+
 	info = kzalloc(sizeof(struct sde_kms_info), GFP_KERNEL);
 	if (!info) {
 		SDE_ERROR("failed to allocate info memory\n");
@@ -3480,20 +3613,24 @@ static void _sde_plane_set_excl_rect_v1(struct sde_plane *psde,
 	}
 
 	if (!usr_ptr) {
-		SDE_DEBUG_PLANE(psde, "excl rect data removed\n");
+		SDE_DEBUG_PLANE(psde, "invalid  excl_rect user data\n");
 		return;
 	}
 
 	if (copy_from_user(&excl_rect_v1, usr_ptr, sizeof(excl_rect_v1))) {
-		SDE_ERROR_PLANE(psde, "failed to copy excl rect data\n");
+		SDE_ERROR_PLANE(psde, "failed to copy excl_rect data\n");
 		return;
 	}
 
 	/* populate from user space */
 	pstate->excl_rect.x = excl_rect_v1.x1;
 	pstate->excl_rect.y = excl_rect_v1.y1;
-	pstate->excl_rect.w = excl_rect_v1.x2 - excl_rect_v1.x1 + 1;
-	pstate->excl_rect.h = excl_rect_v1.y2 - excl_rect_v1.y1 + 1;
+	pstate->excl_rect.w = excl_rect_v1.x2 - excl_rect_v1.x1;
+	pstate->excl_rect.h = excl_rect_v1.y2 - excl_rect_v1.y1;
+
+	SDE_DEBUG_PLANE(psde, "excl_rect: {%d,%d,%d,%d}\n",
+			pstate->excl_rect.x, pstate->excl_rect.y,
+			pstate->excl_rect.w, pstate->excl_rect.h);
 }
 
 static int sde_plane_atomic_set_property(struct drm_plane *plane,
diff --git a/drivers/gpu/drm/msm/sde/sde_plane.h b/drivers/gpu/drm/msm/sde/sde_plane.h
index 2056a70..47611d1 100644
--- a/drivers/gpu/drm/msm/sde/sde_plane.h
+++ b/drivers/gpu/drm/msm/sde/sde_plane.h
@@ -95,6 +95,7 @@ struct sde_plane_rot_state {
 #define SDE_PLANE_DIRTY_RECTS	0x1
 #define SDE_PLANE_DIRTY_FORMAT	0x2
 #define SDE_PLANE_DIRTY_SHARPEN	0x4
+#define SDE_PLANE_DIRTY_PERF	0x8
 #define SDE_PLANE_DIRTY_ALL	0xFFFFFFFF
 
 /**
diff --git a/drivers/gpu/drm/msm/sde/sde_vbif.c b/drivers/gpu/drm/msm/sde/sde_vbif.c
index c0c8248..c675216 100644
--- a/drivers/gpu/drm/msm/sde/sde_vbif.c
+++ b/drivers/gpu/drm/msm/sde/sde_vbif.c
@@ -210,6 +210,61 @@ void sde_vbif_set_ot_limit(struct sde_kms *sde_kms,
 	return;
 }
 
+void sde_vbif_set_qos_remap(struct sde_kms *sde_kms,
+		struct sde_vbif_set_qos_params *params)
+{
+	struct sde_hw_vbif *vbif = NULL;
+	struct sde_hw_mdp *mdp;
+	bool forced_on = false;
+	const struct sde_vbif_qos_tbl *qos_tbl;
+	int i;
+
+	if (!sde_kms || !params || !sde_kms->hw_mdp) {
+		SDE_ERROR("invalid arguments\n");
+		return;
+	}
+	mdp = sde_kms->hw_mdp;
+
+	for (i = 0; i < ARRAY_SIZE(sde_kms->hw_vbif); i++) {
+		if (sde_kms->hw_vbif[i] &&
+				sde_kms->hw_vbif[i]->idx == params->vbif_idx) {
+			vbif = sde_kms->hw_vbif[i];
+			break;
+		}
+	}
+
+	if (!vbif || !vbif->cap) {
+		SDE_ERROR("invalid vbif %d\n", params->vbif_idx);
+		return;
+	}
+
+	if (!vbif->ops.set_qos_remap || !mdp->ops.setup_clk_force_ctrl) {
+		SDE_DEBUG("qos remap not supported\n");
+		return;
+	}
+
+	qos_tbl = params->is_rt ? &vbif->cap->qos_rt_tbl :
+			&vbif->cap->qos_nrt_tbl;
+
+	if (!qos_tbl->npriority_lvl || !qos_tbl->priority_lvl) {
+		SDE_DEBUG("qos tbl not defined\n");
+		return;
+	}
+
+	forced_on = mdp->ops.setup_clk_force_ctrl(mdp, params->clk_ctrl, true);
+
+	for (i = 0; i < qos_tbl->npriority_lvl; i++) {
+		SDE_DEBUG("vbif:%d xin:%d lvl:%d/%d\n",
+				params->vbif_idx, params->xin_id, i,
+				qos_tbl->priority_lvl[i]);
+		vbif->ops.set_qos_remap(vbif, params->xin_id, i,
+				qos_tbl->priority_lvl[i]);
+	}
+
+	if (forced_on)
+		mdp->ops.setup_clk_force_ctrl(mdp, params->clk_ctrl, false);
+}
+
 #ifdef CONFIG_DEBUG_FS
 void sde_debugfs_vbif_destroy(struct sde_kms *sde_kms)
 {
diff --git a/drivers/gpu/drm/msm/sde/sde_vbif.h b/drivers/gpu/drm/msm/sde/sde_vbif.h
index 4b1cb1c..d05c2e0 100644
--- a/drivers/gpu/drm/msm/sde/sde_vbif.h
+++ b/drivers/gpu/drm/msm/sde/sde_vbif.h
@@ -28,6 +28,22 @@ struct sde_vbif_set_ot_params {
 };
 
 /**
+ * struct sde_vbif_set_qos_params - QoS remapper parameter
+ * @vbif_idx: vbif identifier
+ * @xin_id: client interface identifier
+ * @clk_ctrl: clock control identifier of the xin
+ * @num: pipe identifier (debug only)
+ * @is_rt: true if pipe is used in real-time use case
+ */
+struct sde_vbif_set_qos_params {
+	u32 vbif_idx;
+	u32 xin_id;
+	u32 clk_ctrl;
+	u32 num;
+	bool is_rt;
+};
+
+/**
  * sde_vbif_set_ot_limit - set OT limit for vbif client
  * @sde_kms:	SDE handler
  * @params:	Pointer to OT configuration parameters
@@ -35,6 +51,14 @@ struct sde_vbif_set_ot_params {
 void sde_vbif_set_ot_limit(struct sde_kms *sde_kms,
 		struct sde_vbif_set_ot_params *params);
 
+/**
+ * sde_vbif_set_qos_remap - set QoS priority level remap
+ * @sde_kms:	SDE handler
+ * @params:	Pointer to QoS configuration parameters
+ */
+void sde_vbif_set_qos_remap(struct sde_kms *sde_kms,
+		struct sde_vbif_set_qos_params *params);
+
 #ifdef CONFIG_DEBUG_FS
 int sde_debugfs_vbif_init(struct sde_kms *sde_kms, struct dentry *debugfs_root);
 void sde_debugfs_vbif_destroy(struct sde_kms *sde_kms);
diff --git a/drivers/gpu/drm/msm/sde_edid_parser.c b/drivers/gpu/drm/msm/sde_edid_parser.c
new file mode 100644
index 0000000..12165e8
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde_edid_parser.c
@@ -0,0 +1,511 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <drm/drm_edid.h>
+
+#include "sde_kms.h"
+#include "sde_edid_parser.h"
+
+#define DBC_START_OFFSET 4
+#define EDID_DTD_LEN 18
+
+enum data_block_types {
+	RESERVED,
+	AUDIO_DATA_BLOCK,
+	VIDEO_DATA_BLOCK,
+	VENDOR_SPECIFIC_DATA_BLOCK,
+	SPEAKER_ALLOCATION_DATA_BLOCK,
+	VESA_DTC_DATA_BLOCK,
+	RESERVED2,
+	USE_EXTENDED_TAG
+};
+
+static u8 *sde_find_edid_extension(struct edid *edid, int ext_id)
+{
+	u8 *edid_ext = NULL;
+	int i;
+
+	/* No EDID or EDID extensions */
+	if (edid == NULL || edid->extensions == 0)
+		return NULL;
+
+	/* Find CEA extension */
+	for (i = 0; i < edid->extensions; i++) {
+		edid_ext = (u8 *)edid + EDID_LENGTH * (i + 1);
+		if (edid_ext[0] == ext_id)
+			break;
+	}
+
+	if (i == edid->extensions)
+		return NULL;
+
+	return edid_ext;
+}
+
+static u8 *sde_find_cea_extension(struct edid *edid)
+{
+	return sde_find_edid_extension(edid, SDE_CEA_EXT);
+}
+
+static int
+sde_cea_db_payload_len(const u8 *db)
+{
+	return db[0] & 0x1f;
+}
+
+static int
+sde_cea_db_tag(const u8 *db)
+{
+	return db[0] >> 5;
+}
+
+static int
+sde_cea_revision(const u8 *cea)
+{
+	return cea[1];
+}
+
+static int
+sde_cea_db_offsets(const u8 *cea, int *start, int *end)
+{
+	/* Data block offset in CEA extension block */
+	*start = 4;
+	*end = cea[2];
+	if (*end == 0)
+		*end = 127;
+	if (*end < 4 || *end > 127)
+		return -ERANGE;
+	return 0;
+}
+
+#define sde_for_each_cea_db(cea, i, start, end) \
+for ((i) = (start); \
+(i) < (end) && (i) + sde_cea_db_payload_len(&(cea)[(i)]) < (end); \
+(i) += sde_cea_db_payload_len(&(cea)[(i)]) + 1)
+
+static u8 *sde_edid_find_extended_tag_block(struct edid *edid, int blk_id)
+{
+	u8 *db = NULL;
+	u8 *cea = NULL;
+
+	if (!edid) {
+		SDE_ERROR("%s: invalid input\n", __func__);
+		return NULL;
+	}
+
+	cea = sde_find_cea_extension(edid);
+
+	if (cea && sde_cea_revision(cea) >= 3) {
+		int i, start, end;
+
+		if (sde_cea_db_offsets(cea, &start, &end))
+			return NULL;
+
+		sde_for_each_cea_db(cea, i, start, end) {
+			db = &cea[i];
+			if ((sde_cea_db_tag(db) == SDE_EXTENDED_TAG) &&
+				(db[1] == blk_id))
+				return db;
+		}
+	}
+	return NULL;
+}
+
+static u8 *
+sde_edid_find_block(struct edid *edid, int blk_id)
+{
+	u8 *db = NULL;
+	u8 *cea = NULL;
+
+	if (!edid) {
+		SDE_ERROR("%s: invalid input\n", __func__);
+		return NULL;
+	}
+
+	cea = sde_find_cea_extension(edid);
+
+	if (cea && sde_cea_revision(cea) >= 3) {
+		int i, start, end;
+
+		if (sde_cea_db_offsets(cea, &start, &end))
+			return NULL;
+
+		sde_for_each_cea_db(cea, i, start, end) {
+			db = &cea[i];
+			if (sde_cea_db_tag(db) == blk_id)
+				return db;
+		}
+	}
+	return NULL;
+}
+
+
+static const u8 *_sde_edid_find_block(const u8 *in_buf, u32 start_offset,
+	u8 type, u8 *len)
+{
+	/* the start of data block collection, start of Video Data Block */
+	u32 offset = start_offset;
+	u32 dbc_offset = in_buf[2];
+
+	SDE_EDID_DEBUG("%s +", __func__);
+	/*
+	 * * edid buffer 1, byte 2 being 4 means no non-DTD/Data block
+	 *   collection present.
+	 * * edid buffer 1, byte 2 being 0 means no non-DTD/DATA block
+	 *   collection present and no DTD data present.
+	 */
+	if ((dbc_offset == 0) || (dbc_offset == 4)) {
+		SDE_ERROR("EDID: no DTD or non-DTD data present\n");
+		return NULL;
+	}
+
+	while (offset < dbc_offset) {
+		u8 block_len = in_buf[offset] & 0x1F;
+
+		if ((offset + block_len <= dbc_offset) &&
+		    (in_buf[offset] >> 5) == type) {
+			*len = block_len;
+			SDE_EDID_DEBUG("block=%d found @ 0x%x w/ len=%d\n",
+				type, offset, block_len);
+
+			return in_buf + offset;
+		}
+		offset += 1 + block_len;
+	}
+
+	return NULL;
+}
+
+static void sde_edid_extract_vendor_id(struct sde_edid_ctrl *edid_ctrl)
+{
+	char *vendor_id;
+	u32 id_codes;
+
+	SDE_EDID_DEBUG("%s +", __func__);
+	if (!edid_ctrl) {
+		SDE_ERROR("%s: invalid input\n", __func__);
+		return;
+	}
+
+	vendor_id = edid_ctrl->vendor_id;
+	id_codes = ((u32)edid_ctrl->edid->mfg_id[0] << 8) +
+		edid_ctrl->edid->mfg_id[1];
+
+	vendor_id[0] = 'A' - 1 + ((id_codes >> 10) & 0x1F);
+	vendor_id[1] = 'A' - 1 + ((id_codes >> 5) & 0x1F);
+	vendor_id[2] = 'A' - 1 + (id_codes & 0x1F);
+	vendor_id[3] = 0;
+	SDE_EDID_DEBUG("vendor id is %s ", vendor_id);
+	SDE_EDID_DEBUG("%s -", __func__);
+}
+
+static void sde_edid_set_y420_support(struct drm_connector *connector,
+u32 video_format)
+{
+	u8 cea_mode = 0;
+	struct drm_display_mode *mode;
+
+	/* Need to add Y420 support flag to the modes */
+	list_for_each_entry(mode, &connector->probed_modes, head) {
+		cea_mode = drm_match_cea_mode(mode);
+		if ((cea_mode != 0) && (cea_mode == video_format)) {
+			SDE_EDID_DEBUG("%s found match for %d ", __func__,
+			video_format);
+			mode->flags |= DRM_MODE_FLAG_SUPPORTS_YUV;
+		}
+	}
+}
+
+static void sde_edid_parse_Y420CMDB(
+struct drm_connector *connector, struct sde_edid_ctrl *edid_ctrl,
+const u8 *db)
+{
+	u32 offset = 0;
+	u8 len = 0;
+	u8 svd_len = 0;
+	const u8 *svd = NULL;
+	u32 i = 0, j = 0;
+	u32 video_format = 0;
+
+	if (!edid_ctrl) {
+		SDE_ERROR("%s: edid_ctrl is NULL\n", __func__);
+		return;
+	}
+
+	if (!db) {
+		SDE_ERROR("%s: invalid input\n", __func__);
+		return;
+	}
+	SDE_EDID_DEBUG("%s +\n", __func__);
+	len = db[0] & 0x1f;
+
+	if (len < 7)
+		return;
+	/* Byte 3 to L+1 contain SVDs */
+	offset += 2;
+
+	svd = sde_edid_find_block(edid_ctrl->edid, VIDEO_DATA_BLOCK);
+
+	if (svd) {
+		/*moving to the next byte as vic info begins there*/
+		++svd;
+		svd_len = svd[0] & 0x1f;
+	}
+
+	for (i = 0; i < svd_len; i++, j++) {
+		video_format = *svd & 0x7F;
+		if (db[offset] & (1 << j))
+			sde_edid_set_y420_support(connector, video_format);
+
+		if (j & 0x80) {
+			j = j/8;
+			offset++;
+			if (offset >= len)
+				break;
+		}
+	}
+
+	SDE_EDID_DEBUG("%s -\n", __func__);
+
+}
+
+static void sde_edid_parse_Y420VDB(
+struct drm_connector *connector, struct sde_edid_ctrl *edid_ctrl,
+const u8 *db)
+{
+	u8 len = db[0] & 0x1f;
+	u32 i = 0;
+	u32 video_format = 0;
+
+	if (!edid_ctrl) {
+		SDE_ERROR("%s: invalid input\n", __func__);
+		return;
+	}
+
+	SDE_EDID_DEBUG("%s +\n", __func__);
+
+	/* Offset to byte 3 */
+	db += 2;
+	for (i = 0; i < len - 1; i++) {
+		video_format = *(db + i) & 0x7F;
+		/*
+		 * mode was already added in get_modes()
+		 * only need to set the Y420 support flag
+		 */
+		sde_edid_set_y420_support(connector, video_format);
+	}
+	SDE_EDID_DEBUG("%s -", __func__);
+}
+
+static void sde_edid_set_mode_format(
+struct drm_connector *connector, struct sde_edid_ctrl *edid_ctrl)
+{
+	const u8 *db = NULL;
+	struct drm_display_mode *mode;
+
+	SDE_EDID_DEBUG("%s +\n", __func__);
+	/* Set YUV mode support flags for YCbcr420VDB */
+	db = sde_edid_find_extended_tag_block(edid_ctrl->edid,
+			Y420_VIDEO_DATA_BLOCK);
+	if (db)
+		sde_edid_parse_Y420VDB(connector, edid_ctrl, db);
+	else
+		SDE_EDID_DEBUG("YCbCr420 VDB is not present\n");
+
+	/* Set RGB supported on all modes where YUV is not set */
+	list_for_each_entry(mode, &connector->probed_modes, head) {
+		if (!(mode->flags & DRM_MODE_FLAG_SUPPORTS_YUV))
+			mode->flags |= DRM_MODE_FLAG_SUPPORTS_RGB;
+	}
+
+
+	db = sde_edid_find_extended_tag_block(edid_ctrl->edid,
+			Y420_CAPABILITY_MAP_DATA_BLOCK);
+	if (db)
+		sde_edid_parse_Y420CMDB(connector, edid_ctrl, db);
+	else
+		SDE_EDID_DEBUG("YCbCr420 CMDB is not present\n");
+
+	SDE_EDID_DEBUG("%s -\n", __func__);
+}
+
+static void _sde_edid_extract_audio_data_blocks(
+	struct sde_edid_ctrl *edid_ctrl)
+{
+	u8 len = 0;
+	u8 adb_max = 0;
+	const u8 *adb = NULL;
+	u32 offset = DBC_START_OFFSET;
+	u8 *cea = NULL;
+
+	if (!edid_ctrl) {
+		SDE_ERROR("invalid edid_ctrl\n");
+		return;
+	}
+	SDE_EDID_DEBUG("%s +", __func__);
+	cea = sde_find_cea_extension(edid_ctrl->edid);
+	if (!cea) {
+		SDE_DEBUG("CEA extension not found\n");
+		return;
+	}
+
+	edid_ctrl->adb_size = 0;
+
+	memset(edid_ctrl->audio_data_block, 0,
+		sizeof(edid_ctrl->audio_data_block));
+
+	do {
+		len = 0;
+		adb = _sde_edid_find_block(cea, offset, AUDIO_DATA_BLOCK,
+			&len);
+
+		if ((adb == NULL) || (len > MAX_AUDIO_DATA_BLOCK_SIZE ||
+			adb_max >= MAX_NUMBER_ADB)) {
+			if (!edid_ctrl->adb_size) {
+				SDE_DEBUG("No/Invalid Audio Data Block\n");
+				return;
+			}
+
+			continue;
+		}
+
+		memcpy(edid_ctrl->audio_data_block + edid_ctrl->adb_size,
+			adb + 1, len);
+		offset = (adb - cea) + 1 + len;
+
+		edid_ctrl->adb_size += len;
+		adb_max++;
+	} while (adb);
+	SDE_EDID_DEBUG("%s -", __func__);
+}
+
+static void _sde_edid_extract_speaker_allocation_data(
+	struct sde_edid_ctrl *edid_ctrl)
+{
+	u8 len;
+	const u8 *sadb = NULL;
+	u8 *cea = NULL;
+
+	if (!edid_ctrl) {
+		SDE_ERROR("invalid edid_ctrl\n");
+		return;
+	}
+	SDE_EDID_DEBUG("%s +", __func__);
+	cea = sde_find_cea_extension(edid_ctrl->edid);
+	if (!cea) {
+		SDE_DEBUG("CEA extension not found\n");
+		return;
+	}
+
+	sadb = _sde_edid_find_block(cea, DBC_START_OFFSET,
+		SPEAKER_ALLOCATION_DATA_BLOCK, &len);
+	if ((sadb == NULL) || (len != MAX_SPKR_ALLOC_DATA_BLOCK_SIZE)) {
+		SDE_DEBUG("No/Invalid Speaker Allocation Data Block\n");
+		return;
+	}
+
+	memcpy(edid_ctrl->spkr_alloc_data_block, sadb + 1, len);
+	edid_ctrl->sadb_size = len;
+
+	SDE_EDID_DEBUG("speaker alloc data SP byte = %08x %s%s%s%s%s%s%s\n",
+		sadb[1],
+		(sadb[1] & BIT(0)) ? "FL/FR," : "",
+		(sadb[1] & BIT(1)) ? "LFE," : "",
+		(sadb[1] & BIT(2)) ? "FC," : "",
+		(sadb[1] & BIT(3)) ? "RL/RR," : "",
+		(sadb[1] & BIT(4)) ? "RC," : "",
+		(sadb[1] & BIT(5)) ? "FLC/FRC," : "",
+		(sadb[1] & BIT(6)) ? "RLC/RRC," : "");
+	SDE_EDID_DEBUG("%s -", __func__);
+}
+
+struct sde_edid_ctrl *sde_edid_init(void)
+{
+	struct sde_edid_ctrl *edid_ctrl = NULL;
+
+	SDE_EDID_DEBUG("%s +\n", __func__);
+	edid_ctrl = kzalloc(sizeof(*edid_ctrl), GFP_KERNEL);
+	if (!edid_ctrl) {
+		SDE_ERROR("edid_ctrl alloc failed\n");
+		return NULL;
+	}
+	memset((edid_ctrl), 0, sizeof(*edid_ctrl));
+	SDE_EDID_DEBUG("%s -\n", __func__);
+	return edid_ctrl;
+}
+
+void sde_free_edid(void **input)
+{
+	struct sde_edid_ctrl *edid_ctrl = (struct sde_edid_ctrl *)(*input);
+
+	SDE_EDID_DEBUG("%s +", __func__);
+	kfree(edid_ctrl->edid);
+	edid_ctrl->edid = NULL;
+}
+
+void sde_edid_deinit(void **input)
+{
+	struct sde_edid_ctrl *edid_ctrl = (struct sde_edid_ctrl *)(*input);
+
+	SDE_EDID_DEBUG("%s +", __func__);
+	sde_free_edid((void *)&edid_ctrl);
+	kfree(edid_ctrl);
+	SDE_EDID_DEBUG("%s -", __func__);
+}
+
+int _sde_edid_update_modes(struct drm_connector *connector,
+	void *input)
+{
+	int rc = 0;
+	struct sde_edid_ctrl *edid_ctrl = (struct sde_edid_ctrl *)(input);
+
+	SDE_EDID_DEBUG("%s +", __func__);
+	if (edid_ctrl->edid) {
+		drm_mode_connector_update_edid_property(connector,
+			edid_ctrl->edid);
+
+		rc = drm_add_edid_modes(connector, edid_ctrl->edid);
+		sde_edid_set_mode_format(connector, edid_ctrl);
+		SDE_EDID_DEBUG("%s -", __func__);
+		return rc;
+	}
+
+	drm_mode_connector_update_edid_property(connector, NULL);
+	SDE_EDID_DEBUG("%s null edid -", __func__);
+	return rc;
+}
+
+bool sde_detect_hdmi_monitor(void *input)
+{
+	struct sde_edid_ctrl *edid_ctrl = (struct sde_edid_ctrl *)(input);
+
+	return drm_detect_hdmi_monitor(edid_ctrl->edid);
+}
+
+void sde_get_edid(struct drm_connector *connector,
+				  struct i2c_adapter *adapter, void **input)
+{
+	struct sde_edid_ctrl *edid_ctrl = (struct sde_edid_ctrl *)(*input);
+
+	edid_ctrl->edid = drm_get_edid(connector, adapter);
+	SDE_EDID_DEBUG("%s +\n", __func__);
+
+	if (!edid_ctrl->edid)
+		SDE_ERROR("EDID read failed\n");
+
+	if (edid_ctrl->edid) {
+		sde_edid_extract_vendor_id(edid_ctrl);
+		_sde_edid_extract_audio_data_blocks(edid_ctrl);
+		_sde_edid_extract_speaker_allocation_data(edid_ctrl);
+	}
+	SDE_EDID_DEBUG("%s -\n", __func__);
+};
diff --git a/drivers/gpu/drm/msm/sde_edid_parser.h b/drivers/gpu/drm/msm/sde_edid_parser.h
new file mode 100644
index 0000000..1143dc2
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde_edid_parser.h
@@ -0,0 +1,148 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _SDE_EDID_PARSER_H_
+#define _SDE_EDID_PARSER_H_
+
+#include <linux/types.h>
+#include <linux/bitops.h>
+#include <linux/debugfs.h>
+#include <linux/of_device.h>
+#include <linux/i2c.h>
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_edid.h>
+
+
+#define MAX_NUMBER_ADB 5
+#define MAX_AUDIO_DATA_BLOCK_SIZE 30
+#define MAX_SPKR_ALLOC_DATA_BLOCK_SIZE 3
+#define EDID_VENDOR_ID_SIZE     4
+
+#define SDE_CEA_EXT    0x02
+#define SDE_EXTENDED_TAG 0x07
+
+enum extended_data_block_types {
+	VIDEO_CAPABILITY_DATA_BLOCK = 0x0,
+	VENDOR_SPECIFIC_VIDEO_DATA_BLOCK = 0x01,
+	HDMI_VIDEO_DATA_BLOCK = 0x04,
+	HDR_STATIC_METADATA_DATA_BLOCK = 0x06,
+	Y420_VIDEO_DATA_BLOCK = 0x0E,
+	VIDEO_FORMAT_PREFERENCE_DATA_BLOCK = 0x0D,
+	Y420_CAPABILITY_MAP_DATA_BLOCK = 0x0F,
+	VENDOR_SPECIFIC_AUDIO_DATA_BLOCK = 0x11,
+	INFOFRAME_DATA_BLOCK = 0x20,
+};
+
+#ifdef SDE_EDID_DEBUG_ENABLE
+#define SDE_EDID_DEBUG(fmt, args...)   SDE_ERROR(fmt, ##args)
+#else
+#define SDE_EDID_DEBUG(fmt, args...)   SDE_DEBUG(fmt, ##args)
+#endif
+
+/*
+ * struct hdmi_edid_hdr_data - HDR Static Metadata
+ * @eotf: Electro-Optical Transfer Function
+ * @metadata_type_one: Static Metadata Type 1 support
+ * @max_luminance: Desired Content Maximum Luminance
+ * @avg_luminance: Desired Content Frame-average Luminance
+ * @min_luminance: Desired Content Minimum Luminance
+ */
+struct sde_edid_hdr_data {
+	u32 eotf;
+	bool metadata_type_one;
+	u32 max_luminance;
+	u32 avg_luminance;
+	u32 min_luminance;
+};
+
+struct sde_edid_sink_caps {
+	u32 max_pclk_in_hz;
+	bool scdc_present;
+	bool scramble_support; /* scramble support for less than 340Mcsc */
+	bool read_req_support;
+	bool osd_disparity;
+	bool dual_view_support;
+	bool ind_view_support;
+};
+
+struct sde_edid_ctrl {
+	struct edid *edid;
+	u8 pt_scan_info;
+	u8 it_scan_info;
+	u8 ce_scan_info;
+	u8 audio_data_block[MAX_NUMBER_ADB * MAX_AUDIO_DATA_BLOCK_SIZE];
+	int adb_size;
+	u8 spkr_alloc_data_block[MAX_SPKR_ALLOC_DATA_BLOCK_SIZE];
+	int sadb_size;
+	bool hdr_supported;
+	char vendor_id[EDID_VENDOR_ID_SIZE];
+	struct sde_edid_sink_caps sink_caps;
+	struct sde_edid_hdr_data hdr_data;
+};
+
+/**
+ * sde_edid_init() - init edid structure.
+ * @edid_ctrl:     Handle to the edid_ctrl structure.
+ * Return: handle to sde_edid_ctrl for the client.
+ */
+struct sde_edid_ctrl *sde_edid_init(void);
+
+/**
+ * sde_edid_deinit() - deinit edid structure.
+ * @edid_ctrl:     Handle to the edid_ctrl structure.
+ *
+ * Return: void.
+ */
+void sde_edid_deinit(void **edid_ctrl);
+
+/**
+ * sde_get_edid() - get edid info.
+ * @connector:   Handle to the drm_connector.
+ * @adapter:     handle to i2c adapter for DDC read
+ * @edid_ctrl:   Handle to the edid_ctrl structure.
+ *
+ * Return: void.
+ */
+void sde_get_edid(struct drm_connector *connector,
+struct i2c_adapter *adapter,
+void **edid_ctrl);
+
+/**
+ * sde_free_edid() - free edid structure.
+ * @edid_ctrl:     Handle to the edid_ctrl structure.
+ *
+ * Return: void.
+ */
+void sde_free_edid(void **edid_ctrl);
+
+/**
+ * sde_detect_hdmi_monitor() - detect HDMI mode.
+ * @edid_ctrl:     Handle to the edid_ctrl structure.
+ *
+ * Return: error code.
+ */
+bool sde_detect_hdmi_monitor(void *edid_ctrl);
+
+/**
+ * _sde_edid_update_modes() - populate EDID modes.
+ * @edid_ctrl:     Handle to the edid_ctrl structure.
+ *
+ * Return: error code.
+ */
+int _sde_edid_update_modes(struct drm_connector *connector,
+							void *edid_ctrl);
+
+#endif /* _SDE_EDID_PARSER_H_ */
+
diff --git a/drivers/gpu/drm/msm/sde_power_handle.c b/drivers/gpu/drm/msm/sde_power_handle.c
index 1e4f6b1..fb7f85c 100644
--- a/drivers/gpu/drm/msm/sde_power_handle.c
+++ b/drivers/gpu/drm/msm/sde_power_handle.c
@@ -333,6 +333,31 @@ static int _sde_power_data_bus_set_quota(
 		return -EINVAL;
 	}
 
+	pdbus->ab_rt = ab_quota_rt;
+	pdbus->ib_rt = ib_quota_rt;
+	pdbus->ab_nrt = ab_quota_nrt;
+	pdbus->ib_nrt = ib_quota_nrt;
+
+	if (pdbus->enable) {
+		ab_quota_rt = max_t(u64, ab_quota_rt,
+				SDE_POWER_HANDLE_ENABLE_BUS_AB_QUOTA);
+		ib_quota_rt = max_t(u64, ib_quota_rt,
+				SDE_POWER_HANDLE_ENABLE_BUS_IB_QUOTA);
+		ab_quota_nrt = max_t(u64, ab_quota_nrt,
+				SDE_POWER_HANDLE_ENABLE_BUS_AB_QUOTA);
+		ib_quota_nrt = max_t(u64, ib_quota_nrt,
+				SDE_POWER_HANDLE_ENABLE_BUS_IB_QUOTA);
+	} else {
+		ab_quota_rt = max_t(u64, ab_quota_rt,
+				SDE_POWER_HANDLE_DISABLE_BUS_AB_QUOTA);
+		ib_quota_rt = max_t(u64, ib_quota_rt,
+				SDE_POWER_HANDLE_DISABLE_BUS_IB_QUOTA);
+		ab_quota_nrt = max_t(u64, ab_quota_nrt,
+				SDE_POWER_HANDLE_DISABLE_BUS_AB_QUOTA);
+		ib_quota_nrt = max_t(u64, ib_quota_nrt,
+				SDE_POWER_HANDLE_DISABLE_BUS_IB_QUOTA);
+	}
+
 	if (!ab_quota_rt && !ab_quota_nrt && !ib_quota_rt && !ib_quota_nrt)  {
 		new_uc_idx = 0;
 	} else {
@@ -571,19 +596,12 @@ static int sde_power_data_bus_update(struct sde_power_data_bus_handle *pdbus,
 							bool enable)
 {
 	int rc = 0;
-	u64 ab_quota_rt, ab_quota_nrt;
-	u64 ib_quota_rt, ib_quota_nrt;
 
-	ab_quota_rt = ab_quota_nrt = enable ?
-			SDE_POWER_HANDLE_ENABLE_BUS_AB_QUOTA :
-			SDE_POWER_HANDLE_DISABLE_BUS_AB_QUOTA;
-	ib_quota_rt = ib_quota_nrt = enable ?
-			SDE_POWER_HANDLE_ENABLE_BUS_IB_QUOTA :
-			SDE_POWER_HANDLE_DISABLE_BUS_IB_QUOTA;
+	pdbus->enable = enable;
 
 	if (pdbus->data_bus_hdl)
-		rc = _sde_power_data_bus_set_quota(pdbus, ab_quota_rt,
-				ab_quota_nrt, ib_quota_rt, ib_quota_nrt);
+		rc = _sde_power_data_bus_set_quota(pdbus, pdbus->ab_rt,
+				pdbus->ab_nrt, pdbus->ib_rt, pdbus->ib_nrt);
 
 	if (rc)
 		pr_err("failed to set data bus vote rc=%d enable:%d\n",
diff --git a/drivers/gpu/drm/msm/sde_power_handle.h b/drivers/gpu/drm/msm/sde_power_handle.h
index da68139..c526b71 100644
--- a/drivers/gpu/drm/msm/sde_power_handle.h
+++ b/drivers/gpu/drm/msm/sde_power_handle.h
@@ -93,6 +93,11 @@ struct sde_power_client {
  * @bus_channels: number of memory bus channels
  * @curr_bw_uc_idx: current use case index of data bus
  * @ao_bw_uc_idx: active only use case index of data bus
+ * @ab_rt: realtime ab quota
+ * @ib_rt: realtime ib quota
+ * @ab_nrt: non-realtime ab quota
+ * @ib_nrt: non-realtime ib quota
+ * @enable: true if bus is enabled
  */
 struct sde_power_data_bus_handle {
 	struct msm_bus_scale_pdata *data_bus_scale_table;
@@ -102,6 +107,11 @@ struct sde_power_data_bus_handle {
 	u32 bus_channels;
 	u32 curr_bw_uc_idx;
 	u32 ao_bw_uc_idx;
+	u64 ab_rt;
+	u64 ib_rt;
+	u64 ab_nrt;
+	u64 ib_nrt;
+	bool enable;
 };
 
 /*
diff --git a/drivers/gpu/drm/msm/sde_rsc.c b/drivers/gpu/drm/msm/sde_rsc.c
index 50710cd..7bf2211 100644
--- a/drivers/gpu/drm/msm/sde_rsc.c
+++ b/drivers/gpu/drm/msm/sde_rsc.c
@@ -123,6 +123,7 @@ EXPORT_SYMBOL(sde_rsc_client_create);
 void sde_rsc_client_destroy(struct sde_rsc_client *client)
 {
 	struct sde_rsc_priv *rsc;
+	enum sde_rsc_state state;
 
 	if (!client) {
 		pr_debug("invalid client\n");
@@ -138,9 +139,13 @@ void sde_rsc_client_destroy(struct sde_rsc_client *client)
 		goto end;
 
 	mutex_lock(&rsc->client_lock);
-	if (client->current_state != SDE_RSC_IDLE_STATE)
+	state = client->current_state;
+	mutex_unlock(&rsc->client_lock);
+
+	if (state != SDE_RSC_IDLE_STATE)
 		sde_rsc_client_state_update(client, SDE_RSC_IDLE_STATE,
 								NULL, -1);
+	mutex_lock(&rsc->client_lock);
 	list_del_init(&client->list);
 	mutex_unlock(&rsc->client_lock);
 
@@ -215,6 +220,39 @@ void sde_rsc_unregister_event(struct sde_rsc_event *event)
 }
 EXPORT_SYMBOL(sde_rsc_unregister_event);
 
+bool is_sde_rsc_available(int rsc_index)
+{
+	if (rsc_index >= MAX_RSC_COUNT) {
+		pr_err("invalid rsc index:%d\n", rsc_index);
+		return false;
+	} else if (!rsc_prv_list[rsc_index]) {
+		pr_err("rsc idx:%d not probed yet or not available\n",
+								rsc_index);
+		return false;
+	}
+
+	return true;
+}
+EXPORT_SYMBOL(is_sde_rsc_available);
+
+enum sde_rsc_state get_sde_rsc_current_state(int rsc_index)
+{
+	struct sde_rsc_priv *rsc;
+
+	if (rsc_index >= MAX_RSC_COUNT) {
+		pr_err("invalid rsc index:%d\n", rsc_index);
+		return SDE_RSC_IDLE_STATE;
+	} else if (!rsc_prv_list[rsc_index]) {
+		pr_err("rsc idx:%d not probed yet or not available\n",
+								rsc_index);
+		return SDE_RSC_IDLE_STATE;
+	}
+
+	rsc = rsc_prv_list[rsc_index];
+	return rsc->current_state;
+}
+EXPORT_SYMBOL(get_sde_rsc_current_state);
+
 static int sde_rsc_clk_enable(struct sde_power_handle *phandle,
 	struct sde_power_client *pclient, bool enable)
 {
@@ -442,7 +480,7 @@ static int sde_rsc_switch_to_cmd(struct sde_rsc_priv *rsc,
 	return rc;
 }
 
-static bool sde_rsc_switch_to_clk(struct sde_rsc_priv *rsc)
+static int sde_rsc_switch_to_clk(struct sde_rsc_priv *rsc)
 {
 	struct sde_rsc_client *client;
 	int rc = STATE_UPDATE_NOT_ALLOWED;
@@ -467,7 +505,7 @@ static bool sde_rsc_switch_to_clk(struct sde_rsc_priv *rsc)
 	return rc;
 }
 
-static bool sde_rsc_switch_to_vid(struct sde_rsc_priv *rsc,
+static int sde_rsc_switch_to_vid(struct sde_rsc_priv *rsc,
 	struct sde_rsc_cmd_config *config,
 	struct sde_rsc_client *caller_client)
 {
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c
index 8894fee..cbacbb6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c
@@ -130,7 +130,7 @@ nvkm_therm_update(struct nvkm_therm *therm, int mode)
 		poll = false;
 	}
 
-	if (list_empty(&therm->alarm.head) && poll)
+	if (poll)
 		nvkm_timer_alarm(tmr, 1000000000ULL, &therm->alarm);
 	spin_unlock_irqrestore(&therm->lock, flags);
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fan.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fan.c
index 91198d7..e2fecce 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fan.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fan.c
@@ -83,7 +83,7 @@ nvkm_fan_update(struct nvkm_fan *fan, bool immediate, int target)
 	spin_unlock_irqrestore(&fan->lock, flags);
 
 	/* schedule next fan update, if not at target speed already */
-	if (list_empty(&fan->alarm.head) && target != duty) {
+	if (target != duty) {
 		u16 bump_period = fan->bios.bump_period;
 		u16 slow_down_period = fan->bios.slow_down_period;
 		u64 delay;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fantog.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fantog.c
index 59701b7..ff9fbe7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fantog.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fantog.c
@@ -53,7 +53,7 @@ nvkm_fantog_update(struct nvkm_fantog *fan, int percent)
 	duty = !nvkm_gpio_get(gpio, 0, DCB_GPIO_FAN, 0xff);
 	nvkm_gpio_set(gpio, 0, DCB_GPIO_FAN, 0xff, duty);
 
-	if (list_empty(&fan->alarm.head) && percent != (duty * 100)) {
+	if (percent != (duty * 100)) {
 		u64 next_change = (percent * fan->period_us) / 100;
 		if (!duty)
 			next_change = fan->period_us - next_change;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/temp.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/temp.c
index b9703c0..9a79e91 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/temp.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/temp.c
@@ -185,7 +185,7 @@ alarm_timer_callback(struct nvkm_alarm *alarm)
 	spin_unlock_irqrestore(&therm->sensor.alarm_program_lock, flags);
 
 	/* schedule the next poll in one second */
-	if (therm->func->temp_get(therm) >= 0 && list_empty(&alarm->head))
+	if (therm->func->temp_get(therm) >= 0)
 		nvkm_timer_alarm(tmr, 1000000000ULL, alarm);
 }
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c
index 07dc82b..f2a86ea 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c
@@ -36,23 +36,29 @@ nvkm_timer_alarm_trigger(struct nvkm_timer *tmr)
 	unsigned long flags;
 	LIST_HEAD(exec);
 
-	/* move any due alarms off the pending list */
+	/* Process pending alarms. */
 	spin_lock_irqsave(&tmr->lock, flags);
 	list_for_each_entry_safe(alarm, atemp, &tmr->alarms, head) {
-		if (alarm->timestamp <= nvkm_timer_read(tmr))
-			list_move_tail(&alarm->head, &exec);
+		/* Have we hit the earliest alarm that hasn't gone off? */
+		if (alarm->timestamp > nvkm_timer_read(tmr)) {
+			/* Schedule it.  If we didn't race, we're done. */
+			tmr->func->alarm_init(tmr, alarm->timestamp);
+			if (alarm->timestamp > nvkm_timer_read(tmr))
+				break;
+		}
+
+		/* Move to completed list.  We'll drop the lock before
+		 * executing the callback so it can reschedule itself.
+		 */
+		list_move_tail(&alarm->head, &exec);
 	}
 
-	/* reschedule interrupt for next alarm time */
-	if (!list_empty(&tmr->alarms)) {
-		alarm = list_first_entry(&tmr->alarms, typeof(*alarm), head);
-		tmr->func->alarm_init(tmr, alarm->timestamp);
-	} else {
+	/* Shut down interrupt if no more pending alarms. */
+	if (list_empty(&tmr->alarms))
 		tmr->func->alarm_fini(tmr);
-	}
 	spin_unlock_irqrestore(&tmr->lock, flags);
 
-	/* execute any pending alarm handlers */
+	/* Execute completed callbacks. */
 	list_for_each_entry_safe(alarm, atemp, &exec, head) {
 		list_del_init(&alarm->head);
 		alarm->func(alarm);
@@ -65,24 +71,37 @@ nvkm_timer_alarm(struct nvkm_timer *tmr, u32 nsec, struct nvkm_alarm *alarm)
 	struct nvkm_alarm *list;
 	unsigned long flags;
 
-	alarm->timestamp = nvkm_timer_read(tmr) + nsec;
-
-	/* append new alarm to list, in soonest-alarm-first order */
+	/* Remove alarm from pending list.
+	 *
+	 * This both protects against the corruption of the list,
+	 * and implements alarm rescheduling/cancellation.
+	 */
 	spin_lock_irqsave(&tmr->lock, flags);
-	if (!nsec) {
-		if (!list_empty(&alarm->head))
-			list_del(&alarm->head);
-	} else {
+	list_del_init(&alarm->head);
+
+	if (nsec) {
+		/* Insert into pending list, ordered earliest to latest. */
+		alarm->timestamp = nvkm_timer_read(tmr) + nsec;
 		list_for_each_entry(list, &tmr->alarms, head) {
 			if (list->timestamp > alarm->timestamp)
 				break;
 		}
+
 		list_add_tail(&alarm->head, &list->head);
+
+		/* Update HW if this is now the earliest alarm. */
+		list = list_first_entry(&tmr->alarms, typeof(*list), head);
+		if (list == alarm) {
+			tmr->func->alarm_init(tmr, alarm->timestamp);
+			/* This shouldn't happen if callers aren't stupid.
+			 *
+			 * Worst case scenario is that it'll take roughly
+			 * 4 seconds for the next alarm to trigger.
+			 */
+			WARN_ON(alarm->timestamp <= nvkm_timer_read(tmr));
+		}
 	}
 	spin_unlock_irqrestore(&tmr->lock, flags);
-
-	/* process pending alarms */
-	nvkm_timer_alarm_trigger(tmr);
 }
 
 void
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv04.c
index 7b9ce87..7f48249 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv04.c
@@ -76,8 +76,8 @@ nv04_timer_intr(struct nvkm_timer *tmr)
 	u32 stat = nvkm_rd32(device, NV04_PTIMER_INTR_0);
 
 	if (stat & 0x00000001) {
-		nvkm_timer_alarm_trigger(tmr);
 		nvkm_wr32(device, NV04_PTIMER_INTR_0, 0x00000001);
+		nvkm_timer_alarm_trigger(tmr);
 		stat &= ~0x00000001;
 	}
 
diff --git a/drivers/gpu/msm/a6xx_reg.h b/drivers/gpu/msm/a6xx_reg.h
index e5cfd69..dbacb20 100644
--- a/drivers/gpu/msm/a6xx_reg.h
+++ b/drivers/gpu/msm/a6xx_reg.h
@@ -114,6 +114,7 @@
 #define A6XX_RBBM_INT_0_STATUS                   0x201
 #define A6XX_RBBM_STATUS                         0x210
 #define A6XX_RBBM_STATUS3                        0x213
+#define A6XX_RBBM_VBIF_GX_RESET_STATUS           0x215
 #define A6XX_RBBM_PERFCTR_CP_0_LO                0x400
 #define A6XX_RBBM_PERFCTR_CP_0_HI                0x401
 #define A6XX_RBBM_PERFCTR_CP_1_LO                0x402
@@ -782,6 +783,7 @@
 #define A6XX_GMU_GX_SPTPRAC_POWER_CONTROL	0x1A881
 #define A6XX_GMU_CM3_ITCM_START			0x1B400
 #define A6XX_GMU_CM3_DTCM_START			0x1C400
+#define A6XX_GMU_NMI_CONTROL_STATUS		0x1CBF0
 #define A6XX_GMU_BOOT_SLUMBER_OPTION		0x1CBF8
 #define A6XX_GMU_GX_VOTE_IDX			0x1CBF9
 #define A6XX_GMU_MX_VOTE_IDX			0x1CBFA
@@ -793,6 +795,11 @@
 #define A6XX_GMU_CM3_BOOT_CONFIG		0x1F801
 #define A6XX_GMU_CM3_FW_BUSY			0x1F81A
 #define A6XX_GMU_CM3_FW_INIT_RESULT		0x1F81C
+#define A6XX_GMU_CM3_CFG			0x1F82D
+#define A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE	0x1F840
+#define A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_0	0x1F841
+#define A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_0_L	0x1F844
+#define A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_0_H	0x1F845
 #define A6XX_GMU_PWR_COL_INTER_FRAME_CTRL	0x1F8C0
 #define A6XX_GMU_PWR_COL_INTER_FRAME_HYST	0x1F8C1
 #define A6XX_GMU_PWR_COL_SPTPRAC_HYST		0x1F8C2
@@ -802,6 +809,8 @@
 #define A6XX_GMU_RPMH_HYST_CTRL			0x1F8E9
 #define A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE    0x1F8EC
 #define A6XX_GMU_BOOT_KMD_LM_HANDSHAKE		0x1F9F0
+#define A6XX_GMU_LLM_GLM_SLEEP_CTRL		0x1F957
+#define A6XX_GMU_LLM_GLM_SLEEP_STATUS		0x1F958
 
 /* HFI registers*/
 #define A6XX_GMU_ALWAYS_ON_COUNTER_L		0x1F888
@@ -831,6 +840,10 @@
 #define A6XX_GMU_HOST2GMU_INTR_INFO_3		0x1F99E
 #define A6XX_GMU_GENERAL_7			0x1F9CC
 
+/* ISENSE registers */
+#define A6XX_GMU_ISENSE_CTRL			0x1F95D
+#define A6XX_GPU_CS_ENABLE_REG			0x23120
+
 #define A6XX_GMU_AO_INTERRUPT_EN		0x23B03
 #define A6XX_GMU_AO_HOST_INTERRUPT_CLR		0x23B04
 #define A6XX_GMU_AO_HOST_INTERRUPT_STATUS	0x23B05
@@ -839,6 +852,7 @@
 #define A6XX_GPU_GMU_AO_GMU_CGC_DELAY_CNTL      0x23B0A
 #define A6XX_GPU_GMU_AO_GMU_CGC_HYST_CNTL       0x23B0B
 #define A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS	0x23B0C
+#define A6XX_GPU_GMU_AO_GPU_CX_BUSY_MASK	0x23B0E
 #define A6XX_GMU_AHB_FENCE_STATUS		0x23B13
 #define A6XX_GMU_RBBM_INT_UNMASKED_STATUS	0x23B15
 #define A6XX_GMU_AO_SPARE_CNTL			0x23B16
@@ -851,6 +865,9 @@
 #define A6XX_GMU_AHB_FENCE_RANGE_0		0x23B11
 #define A6XX_GMU_AHB_FENCE_RANGE_1		0x23B12
 
+/* GPUCC registers */
+#define A6XX_GPU_CC_GX_GDSCR                   0x24403
+
 /* GPU RSC sequencer registers */
 #define	A6XX_RSCC_PDC_SEQ_START_ADDR			0x23408
 #define A6XX_RSCC_PDC_MATCH_VALUE_LO			0x23409
@@ -865,6 +882,10 @@
 #define A6XX_RSCC_OVERRIDE_START_ADDR			0x23500
 #define A6XX_RSCC_SEQ_BUSY_DRV0				0x23501
 #define A6XX_RSCC_SEQ_MEM_0_DRV0			0x23580
+#define A6XX_RSCC_TCS0_DRV0_STATUS			0x23746
+#define A6XX_RSCC_TCS1_DRV0_STATUS                      0x238AE
+#define A6XX_RSCC_TCS2_DRV0_STATUS                      0x23A16
+#define A6XX_RSCC_TCS3_DRV0_STATUS                      0x23B7E
 
 /* GPU PDC sequencer registers in AOSS.RPMh domain */
 #define	PDC_GPU_ENABLE_PDC			0x21140
diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c
index bf3a91a..f581cff 100644
--- a/drivers/gpu/msm/adreno.c
+++ b/drivers/gpu/msm/adreno.c
@@ -56,9 +56,6 @@ MODULE_PARM_DESC(swfdetect, "Enable soft fault detection");
 #define DRIVER_VERSION_MAJOR   3
 #define DRIVER_VERSION_MINOR   1
 
-/* Number of times to try hard reset */
-#define NUM_TIMES_RESET_RETRY 5
-
 #define KGSL_LOG_LEVEL_DEFAULT 3
 
 static void adreno_input_work(struct work_struct *work);
@@ -514,8 +511,6 @@ static struct input_handler adreno_input_handler = {
 	.id_table = adreno_input_ids,
 };
 
-static int adreno_soft_reset(struct kgsl_device *device);
-
 /*
  * _soft_reset() - Soft reset GPU
  * @adreno_dev: Pointer to adreno device
@@ -526,7 +521,7 @@ static int adreno_soft_reset(struct kgsl_device *device);
  * all the HW logic, restores GPU registers to default state and
  * flushes out pending VBIF transactions.
  */
-static void _soft_reset(struct adreno_device *adreno_dev)
+static int _soft_reset(struct adreno_device *adreno_dev)
 {
 	struct adreno_gpudev *gpudev  = ADRENO_GPU_DEVICE(adreno_dev);
 	unsigned int reg;
@@ -555,6 +550,8 @@ static void _soft_reset(struct adreno_device *adreno_dev)
 
 	if (gpudev->regulator_enable)
 		gpudev->regulator_enable(adreno_dev);
+
+	return 0;
 }
 
 
@@ -1509,8 +1506,7 @@ static int _adreno_start(struct adreno_device *adreno_dev)
 			PERFCOUNTER_FLAG_KERNEL);
 
 		if (ret) {
-			KGSL_DRV_ERR(device,
-				"Unable to get the perf counters for DCVS\n");
+			WARN_ONCE(1, "Unable to get perf counters for DCVS\n");
 			adreno_dev->perfctr_pwr_lo = 0;
 		}
 	}
@@ -1624,7 +1620,7 @@ static int _adreno_start(struct adreno_device *adreno_dev)
  * Power up the GPU and initialize it.  If priority is specified then elevate
  * the thread priority for the duration of the start operation
  */
-static int adreno_start(struct kgsl_device *device, int priority)
+int adreno_start(struct kgsl_device *device, int priority)
 {
 	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
 	int nice = task_nice(current);
@@ -1641,38 +1637,6 @@ static int adreno_start(struct kgsl_device *device, int priority)
 	return ret;
 }
 
-/**
- * adreno_vbif_clear_pending_transactions() - Clear transactions in VBIF pipe
- * @device: Pointer to the device whose VBIF pipe is to be cleared
- */
-static int adreno_vbif_clear_pending_transactions(struct kgsl_device *device)
-{
-	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
-	struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
-	unsigned int mask = gpudev->vbif_xin_halt_ctrl0_mask;
-	unsigned int val;
-	unsigned long wait_for_vbif;
-	int ret = 0;
-
-	adreno_writereg(adreno_dev, ADRENO_REG_VBIF_XIN_HALT_CTRL0, mask);
-	/* wait for the transactions to clear */
-	wait_for_vbif = jiffies + msecs_to_jiffies(100);
-	while (1) {
-		adreno_readreg(adreno_dev,
-			ADRENO_REG_VBIF_XIN_HALT_CTRL1, &val);
-		if ((val & mask) == mask)
-			break;
-		if (time_after(jiffies, wait_for_vbif)) {
-			KGSL_DRV_ERR(device,
-				"Wait limit reached for VBIF XIN Halt\n");
-			ret = -ETIMEDOUT;
-			break;
-		}
-	}
-	adreno_writereg(adreno_dev, ADRENO_REG_VBIF_XIN_HALT_CTRL0, 0);
-	return ret;
-}
-
 static void adreno_set_active_ctxs_null(struct adreno_device *adreno_dev)
 {
 	int i;
@@ -2341,12 +2305,20 @@ bool adreno_hw_isidle(struct adreno_device *adreno_dev)
  * The GPU hardware is reset but we never pull power so we can skip
  * a lot of the standard adreno_stop/adreno_start sequence
  */
-static int adreno_soft_reset(struct kgsl_device *device)
+int adreno_soft_reset(struct kgsl_device *device)
 {
 	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
 	struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
 	int ret;
 
+	if (gpudev->oob_set) {
+		ret = gpudev->oob_set(adreno_dev, OOB_CPINIT_SET_MASK,
+				OOB_CPINIT_CHECK_MASK,
+				OOB_CPINIT_CLEAR_MASK);
+		if (ret)
+			return ret;
+	}
+
 	kgsl_pwrctrl_change_state(device, KGSL_STATE_AWARE);
 	adreno_set_active_ctxs_null(adreno_dev);
 
@@ -2360,7 +2332,15 @@ static int adreno_soft_reset(struct kgsl_device *device)
 	adreno_perfcounter_save(adreno_dev);
 
 	/* Reset the GPU */
-	_soft_reset(adreno_dev);
+	if (gpudev->soft_reset)
+		ret = gpudev->soft_reset(adreno_dev);
+	else
+		ret = _soft_reset(adreno_dev);
+	if (ret) {
+		if (gpudev->oob_clear)
+			gpudev->oob_clear(adreno_dev, OOB_CPINIT_CLEAR_MASK);
+		return ret;
+	}
 
 	/* Set the page table back to the default page table */
 	adreno_ringbuffer_set_global(adreno_dev, 0);
@@ -2402,6 +2382,9 @@ static int adreno_soft_reset(struct kgsl_device *device)
 	/* Restore physical performance counter values after soft reset */
 	adreno_perfcounter_restore(adreno_dev);
 
+	if (gpudev->oob_clear)
+		gpudev->oob_clear(adreno_dev, OOB_CPINIT_CLEAR_MASK);
+
 	return ret;
 }
 
@@ -2980,11 +2963,11 @@ static void adreno_pwrlevel_change_settings(struct kgsl_device *device,
 }
 
 static void adreno_clk_set_options(struct kgsl_device *device, const char *name,
-	struct clk *clk)
+	struct clk *clk, bool on)
 {
 	if (ADRENO_GPU_DEVICE(ADRENO_DEVICE(device))->clk_set_options)
 		ADRENO_GPU_DEVICE(ADRENO_DEVICE(device))->clk_set_options(
-			ADRENO_DEVICE(device), name, clk);
+			ADRENO_DEVICE(device), name, clk, on);
 }
 
 static void adreno_iommu_sync(struct kgsl_device *device, bool sync)
diff --git a/drivers/gpu/msm/adreno.h b/drivers/gpu/msm/adreno.h
index 78cecd0..26c5505 100644
--- a/drivers/gpu/msm/adreno.h
+++ b/drivers/gpu/msm/adreno.h
@@ -159,10 +159,12 @@
 #define KGSL_END_OF_PROFILE_IDENTIFIER	0x2DEFADE2
 #define KGSL_PWRON_FIXUP_IDENTIFIER	0x2AFAFAFA
 
+/* Number of times to try hard reset */
+#define NUM_TIMES_RESET_RETRY 5
+
 /* One cannot wait forever for the core to idle, so set an upper limit to the
  * amount of time to wait for the core to go idle
  */
-
 #define ADRENO_IDLE_TIMEOUT (20 * 1000)
 
 #define ADRENO_UCHE_GMEM_BASE	0x100000
@@ -204,6 +206,7 @@ enum adreno_gpurev {
 #define ADRENO_TIMEOUT_FAULT BIT(2)
 #define ADRENO_IOMMU_PAGE_FAULT BIT(3)
 #define ADRENO_PREEMPT_FAULT BIT(4)
+#define ADRENO_GMU_FAULT BIT(5)
 
 #define ADRENO_SPTP_PC_CTRL 0
 #define ADRENO_PPD_CTRL     1
@@ -499,6 +502,7 @@ struct adreno_device {
  * attached and enabled
  * @ADRENO_DEVICE_CACHE_FLUSH_TS_SUSPENDED - Set if a CACHE_FLUSH_TS irq storm
  * is in progress
+ * @ADRENO_DEVICE_HARD_RESET - Set if soft reset fails and hard reset is needed
  */
 enum adreno_device_flags {
 	ADRENO_DEVICE_PWRON = 0,
@@ -515,6 +519,7 @@ enum adreno_device_flags {
 	ADRENO_DEVICE_GPMU_INITIALIZED = 11,
 	ADRENO_DEVICE_ISDB_ENABLED = 12,
 	ADRENO_DEVICE_CACHE_FLUSH_TS_SUSPENDED = 13,
+	ADRENO_DEVICE_HARD_RESET = 14,
 };
 
 /**
@@ -846,7 +851,7 @@ struct adreno_gpudev {
 	void (*preemption_schedule)(struct adreno_device *);
 	void (*enable_64bit)(struct adreno_device *);
 	void (*clk_set_options)(struct adreno_device *,
-				const char *, struct clk *);
+				const char *, struct clk *, bool on);
 	void (*llc_configure_gpu_scid)(struct adreno_device *adreno_dev);
 	void (*llc_configure_gpuhtw_scid)(struct adreno_device *adreno_dev);
 	void (*llc_enable_overrides)(struct adreno_device *adreno_dev);
@@ -864,6 +869,8 @@ struct adreno_gpudev {
 	int (*wait_for_gmu_idle)(struct adreno_device *);
 	const char *(*iommu_fault_block)(struct adreno_device *adreno_dev,
 				unsigned int fsynr1);
+	int (*reset)(struct kgsl_device *, int fault);
+	int (*soft_reset)(struct adreno_device *);
 };
 
 /**
@@ -952,6 +959,8 @@ extern struct adreno_gpudev adreno_a6xx_gpudev;
 extern int adreno_wake_nice;
 extern unsigned int adreno_wake_timeout;
 
+int adreno_start(struct kgsl_device *device, int priority);
+int adreno_soft_reset(struct kgsl_device *device);
 long adreno_ioctl(struct kgsl_device_private *dev_priv,
 		unsigned int cmd, unsigned long arg);
 
@@ -1707,4 +1716,37 @@ static inline void adreno_perfcntr_active_oob_put(
 	kgsl_active_count_put(KGSL_DEVICE(adreno_dev));
 }
 
+/**
+ * adreno_vbif_clear_pending_transactions() - Clear transactions in VBIF pipe
+ * @device: Pointer to the device whose VBIF pipe is to be cleared
+ */
+static inline int adreno_vbif_clear_pending_transactions(
+	struct kgsl_device *device)
+{
+	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+	struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
+	unsigned int mask = gpudev->vbif_xin_halt_ctrl0_mask;
+	unsigned int val;
+	unsigned long wait_for_vbif;
+	int ret = 0;
+
+	adreno_writereg(adreno_dev, ADRENO_REG_VBIF_XIN_HALT_CTRL0, mask);
+	/* wait for the transactions to clear */
+	wait_for_vbif = jiffies + msecs_to_jiffies(100);
+	while (1) {
+		adreno_readreg(adreno_dev,
+			ADRENO_REG_VBIF_XIN_HALT_CTRL1, &val);
+		if ((val & mask) == mask)
+			break;
+		if (time_after(jiffies, wait_for_vbif)) {
+			KGSL_DRV_ERR(device,
+				"Wait limit reached for VBIF XIN Halt\n");
+			ret = -ETIMEDOUT;
+			break;
+		}
+	}
+	adreno_writereg(adreno_dev, ADRENO_REG_VBIF_XIN_HALT_CTRL0, 0);
+	return ret;
+}
+
 #endif /*__ADRENO_H */
diff --git a/drivers/gpu/msm/adreno_a5xx.c b/drivers/gpu/msm/adreno_a5xx.c
index 6c8b677..314ac85a 100644
--- a/drivers/gpu/msm/adreno_a5xx.c
+++ b/drivers/gpu/msm/adreno_a5xx.c
@@ -55,7 +55,7 @@ static const struct adreno_vbif_platform a5xx_vbif_platforms[] = {
 	{ adreno_is_a530, a530_vbif },
 	{ adreno_is_a512, a540_vbif },
 	{ adreno_is_a510, a530_vbif },
-	{ adreno_is_a508, a530_vbif },
+	{ adreno_is_a508, a540_vbif },
 	{ adreno_is_a505, a530_vbif },
 	{ adreno_is_a506, a530_vbif },
 };
@@ -1608,11 +1608,15 @@ static void a5xx_pwrlevel_change_settings(struct adreno_device *adreno_dev,
 }
 
 static void a5xx_clk_set_options(struct adreno_device *adreno_dev,
-	const char *name, struct clk *clk)
+	const char *name, struct clk *clk, bool on)
 {
+
+	if (!adreno_is_a540(adreno_dev) && !adreno_is_a512(adreno_dev) &&
+		!adreno_is_a508(adreno_dev))
+		return;
+
 	/* Handle clock settings for GFX PSCBCs */
-	if (adreno_is_a540(adreno_dev) || adreno_is_a512(adreno_dev) ||
-		adreno_is_a508(adreno_dev)) {
+	if (on) {
 		if (!strcmp(name, "mem_iface_clk")) {
 			clk_set_flags(clk, CLKFLAG_NORETAIN_PERIPH);
 			clk_set_flags(clk, CLKFLAG_NORETAIN_MEM);
@@ -1620,6 +1624,11 @@ static void a5xx_clk_set_options(struct adreno_device *adreno_dev,
 			clk_set_flags(clk, CLKFLAG_RETAIN_PERIPH);
 			clk_set_flags(clk, CLKFLAG_RETAIN_MEM);
 		}
+	} else {
+		if (!strcmp(name, "core_clk")) {
+			clk_set_flags(clk, CLKFLAG_NORETAIN_PERIPH);
+			clk_set_flags(clk, CLKFLAG_NORETAIN_MEM);
+		}
 	}
 }
 
diff --git a/drivers/gpu/msm/adreno_a6xx.c b/drivers/gpu/msm/adreno_a6xx.c
index d04ddb0..314b2d8 100644
--- a/drivers/gpu/msm/adreno_a6xx.c
+++ b/drivers/gpu/msm/adreno_a6xx.c
@@ -29,8 +29,6 @@
 #include "kgsl_gmu.h"
 #include "kgsl_trace.h"
 
-#define OOB_REQUEST_TIMEOUT	10 /* ms */
-
 #define A6XX_CP_RB_CNTL_DEFAULT (((ilog2(4) << 8) & 0x1F00) | \
 		(ilog2(KGSL_RB_DWORDS >> 1) & 0x3F))
 
@@ -789,8 +787,10 @@ static void _load_gmu_rpmh_ucode(struct kgsl_device *device)
 	wmb();
 }
 
-#define GMU_START_TIMEOUT 10	/* ms */
-#define GPU_START_TIMEOUT 100	/* ms */
+#define GMU_START_TIMEOUT	10	/* ms */
+#define GPU_START_TIMEOUT	100	/* ms */
+#define GPU_RESET_TIMEOUT	1	/* ms */
+#define GPU_RESET_TIMEOUT_US	10	/* us */
 
 /*
  * timed_poll_check() - polling *gmu* register at given offset until
@@ -813,12 +813,12 @@ static int timed_poll_check(struct kgsl_device *device,
 
 	t = jiffies + msecs_to_jiffies(timeout);
 
-	while (!time_after(jiffies, t)) {
+	do {
 		kgsl_gmu_regread(device, offset, &value);
 		if ((value & mask) == expected_ret)
 			return 0;
 		cpu_relax();
-	}
+	} while (!time_after(jiffies, t));
 
 	return -EINVAL;
 }
@@ -952,7 +952,7 @@ static int a6xx_oob_set(struct adreno_device *adreno_dev,
 	int ret = 0;
 
 	if (!kgsl_gmu_isenabled(device))
-		return -ENODEV;
+		return 0;
 
 	kgsl_gmu_regwrite(device, A6XX_GMU_HOST2GMU_INTR_SET, set_mask);
 
@@ -962,7 +962,9 @@ static int a6xx_oob_set(struct adreno_device *adreno_dev,
 			GPU_START_TIMEOUT,
 			check_mask)) {
 		ret = -ETIMEDOUT;
-		dev_err(&gmu->pdev->dev, "OOB set timed out\n");
+		dev_err(&gmu->pdev->dev,
+			"OOB set timed out, mask %x\n", set_mask);
+		WARN_ON(true);
 	}
 
 	kgsl_gmu_regwrite(device, A6XX_GMU_GMU2HOST_INTR_CLR, clear_mask);
@@ -1005,6 +1007,7 @@ static inline void a6xx_gpu_keepalive(struct adreno_device *adreno_dev,
 #define SPTPRAC_POWEROFF_STATUS_MASK	BIT(2)
 #define SPTPRAC_POWERON_STATUS_MASK	BIT(3)
 #define SPTPRAC_CTRL_TIMEOUT		10 /* ms */
+#define A6XX_RETAIN_FF_ENABLE_ENABLE_MASK BIT(11)
 
 /*
  * a6xx_sptprac_enable() - Power on SPTPRAC
@@ -1045,6 +1048,10 @@ static void a6xx_sptprac_disable(struct adreno_device *adreno_dev)
 	if (!gmu->pdev)
 		return;
 
+	/* Ensure that retention is on */
+	kgsl_gmu_regrmw(device, A6XX_GPU_CC_GX_GDSCR, 0,
+			A6XX_RETAIN_FF_ENABLE_ENABLE_MASK);
+
 	kgsl_gmu_regwrite(device, A6XX_GMU_GX_SPTPRAC_POWER_CONTROL,
 			SPTPRAC_POWEROFF_CTRL_MASK);
 
@@ -1073,7 +1080,7 @@ static int a6xx_hm_enable(struct adreno_device *adreno_dev)
 	ret = regulator_enable(gmu->gx_gdsc);
 	if (ret) {
 		dev_err(&gmu->pdev->dev,
-				"Failed to turn on GPU HM HS\n");
+			"Failed to turn on GPU HM HS\n");
 		return ret;
 	}
 
@@ -1099,6 +1106,10 @@ static int a6xx_hm_disable(struct adreno_device *adreno_dev)
 	if (!regulator_is_enabled(gmu->gx_gdsc))
 		return 0;
 
+	/* Ensure that retention is on */
+	kgsl_gmu_regrmw(device, A6XX_GPU_CC_GX_GDSCR, 0,
+			A6XX_RETAIN_FF_ENABLE_ENABLE_MASK);
+
 	clk_disable_unprepare(pwr->grp_clks[0]);
 
 	clk_set_rate(pwr->grp_clks[0],
@@ -1119,11 +1130,14 @@ static int a6xx_hm_sptprac_enable(struct kgsl_device *device)
 
 	/* If GMU does not control HM we must */
 	if (gmu->idle_level < GPU_HW_IFPC) {
+
 		ret = a6xx_hm_enable(ADRENO_DEVICE(device));
 		if (ret) {
 			dev_err(&gmu->pdev->dev, "Failed to power on GPU HM\n");
 			return ret;
 		}
+
+
 	}
 
 	/* If GMU does not control SPTPRAC we must */
@@ -1162,19 +1176,6 @@ static int a6xx_hm_sptprac_disable(struct kgsl_device *device)
 }
 
 /*
- * a6xx_hm_sptprac_control() - Turn HM and SPTPRAC on or off
- * @device: Pointer to KGSL device
- * @on: True to turn on or false to turn off
- */
-static int a6xx_hm_sptprac_control(struct kgsl_device *device, bool on)
-{
-	if (on)
-		return a6xx_hm_sptprac_enable(device);
-	else
-		return a6xx_hm_sptprac_disable(device);
-}
-
-/*
  * a6xx_gfx_rail_on() - request GMU to power GPU at given OPP.
  * @device: Pointer to KGSL device
  *
@@ -1206,6 +1207,8 @@ static int a6xx_gfx_rail_on(struct kgsl_device *device)
 	return ret;
 }
 
+#define GMU_POWER_STATE_SLUMBER 15
+
 /*
  * a6xx_notify_slumber() - initiate request to GMU to prepare to slumber
  * @device: Pointer to KGSL device
@@ -1281,13 +1284,12 @@ static int a6xx_rpmh_power_on_gpu(struct kgsl_device *device)
 	kgsl_gmu_regwrite(device, A6XX_GMU_RSCC_CONTROL_REQ, 0);
 
 	/* Turn on the HM and SPTP head switches */
-	ret = a6xx_hm_sptprac_control(device, true);
+	ret = a6xx_hm_sptprac_enable(device);
 
 	return ret;
-
 error_rsc:
 	dev_err(dev, "GPU RSC sequence stuck in waking up GPU\n");
-	return -EINVAL;
+		return -EINVAL;
 }
 
 static int a6xx_rpmh_power_off_gpu(struct kgsl_device *device)
@@ -1296,7 +1298,7 @@ static int a6xx_rpmh_power_off_gpu(struct kgsl_device *device)
 	int val, ret = 0;
 
 	/* Turn off the SPTP and HM head switches */
-	ret = a6xx_hm_sptprac_control(device, false);
+	ret = a6xx_hm_sptprac_disable(device);
 
 	/* RSC sleep sequence */
 	kgsl_gmu_regwrite(device, A6XX_RSCC_TIMESTAMP_UNIT1_EN_DRV0, 1);
@@ -1339,7 +1341,12 @@ static int a6xx_gmu_fw_start(struct kgsl_device *device,
 	struct gmu_memdesc *mem_addr = gmu->hfi_mem;
 	int ret, i;
 
-	if (boot_state == GMU_COLD_BOOT || boot_state == GMU_RESET) {
+	switch (boot_state) {
+	case GMU_COLD_BOOT:
+		/* Turn on the HM and SPTP head switches */
+		ret = a6xx_hm_sptprac_enable(device);
+		if (ret)
+			return ret;
 
 		/* Turn on TCM retention */
 		kgsl_gmu_regwrite(device, A6XX_GMU_GENERAL_7, 1);
@@ -1347,7 +1354,7 @@ static int a6xx_gmu_fw_start(struct kgsl_device *device,
 		if (!test_and_set_bit(GMU_BOOT_INIT_DONE, &gmu->flags)) {
 			_load_gmu_rpmh_ucode(device);
 			/* Turn on the HM and SPTP head switches */
-			ret = a6xx_hm_sptprac_control(device, true);
+			ret = a6xx_hm_sptprac_enable(device);
 			if (ret)
 				return ret;
 		} else {
@@ -1371,10 +1378,19 @@ static int a6xx_gmu_fw_start(struct kgsl_device *device,
 					gmu->load_mode);
 			return -EINVAL;
 		}
-	} else {
+		break;
+	case GMU_WARM_BOOT:
 		ret = a6xx_rpmh_power_on_gpu(device);
 		if (ret)
 			return ret;
+		break;
+	case GMU_RESET:
+		/* Turn on the HM and SPTP head switches */
+		ret = a6xx_hm_sptprac_enable(device);
+		if (ret)
+			return ret;
+	default:
+		break;
 	}
 
 	/* Clear init result to make sure we are getting fresh value */
@@ -1394,8 +1410,7 @@ static int a6xx_gmu_fw_start(struct kgsl_device *device,
 	if (ret)
 		return ret;
 
-	if (ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_HFI_USE_REG)
-			&& boot_state == GMU_COLD_BOOT) {
+	if (ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_HFI_USE_REG)) {
 		ret = a6xx_gfx_rail_on(device);
 		if (ret) {
 			a6xx_oob_clear(adreno_dev,
@@ -1425,7 +1440,7 @@ static int a6xx_gmu_dcvs_nohfi(struct kgsl_device *device,
 		unsigned int perf_idx, unsigned int bw_idx)
 {
 	struct hfi_dcvs_cmd dcvs_cmd = {
-		.ack_type = ACK_BLOCK,
+		.ack_type = ACK_NONBLOCK,
 		.freq = {
 			.perf_idx = perf_idx,
 			.clkset_opt = OPTION_AT_LEAST,
@@ -1439,10 +1454,6 @@ static int a6xx_gmu_dcvs_nohfi(struct kgsl_device *device,
 	union gpu_perf_vote vote;
 	int ret;
 
-	if (device->state == KGSL_STATE_INIT ||
-			device->state == KGSL_STATE_SUSPEND)
-		dcvs_cmd.ack_type = ACK_NONBLOCK;
-
 	kgsl_gmu_regwrite(device, A6XX_GMU_DCVS_ACK_OPTION, dcvs_cmd.ack_type);
 
 	vote.fvote = dcvs_cmd.freq;
@@ -1469,43 +1480,6 @@ static int a6xx_gmu_dcvs_nohfi(struct kgsl_device *device,
 	return ret;
 }
 
-/*
- * a6xx_rpmh_gpu_pwrctrl() - GPU power control via RPMh/GMU interface
- * @adreno_dev: Pointer to adreno device
- * @mode: requested power mode
- * @arg1: first argument for mode control
- * @arg2: second argument for mode control
- */
-static int a6xx_rpmh_gpu_pwrctrl(struct adreno_device *adreno_dev,
-		unsigned int mode, unsigned int arg1, unsigned int arg2)
-{
-	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
-	struct gmu_device *gmu = &device->gmu;
-	int ret;
-
-	switch (mode) {
-	case GMU_FW_START:
-		ret = a6xx_gmu_fw_start(device, arg1);
-		break;
-	case GMU_FW_STOP:
-		ret = a6xx_rpmh_power_off_gpu(device);
-		break;
-	case GMU_DCVS_NOHFI:
-		ret = a6xx_gmu_dcvs_nohfi(device, arg1, arg2);
-		break;
-	case GMU_NOTIFY_SLUMBER:
-		ret = a6xx_notify_slumber(device);
-		break;
-	default:
-		dev_err(&gmu->pdev->dev,
-				"unsupported GMU power ctrl mode:%d\n", mode);
-		ret = -EINVAL;
-		break;
-	}
-
-	return ret;
-}
-
 static bool a6xx_hw_isidle(struct adreno_device *adreno_dev)
 {
 	unsigned int reg;
@@ -1585,6 +1559,290 @@ static int a6xx_microcode_read(struct adreno_device *adreno_dev)
 			ADRENO_FW(adreno_dev, ADRENO_FW_SQE));
 }
 
+#define VBIF_RESET_ACK_TIMEOUT	100
+#define VBIF_RESET_ACK_MASK	0x00f0
+
+static int a6xx_soft_reset(struct adreno_device *adreno_dev)
+{
+	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
+	unsigned int reg;
+
+	/*
+	 * For the soft reset case with GMU enabled this part is done
+	 * by the GMU firmware
+	 */
+	if (kgsl_gmu_isenabled(device) &&
+		!test_bit(ADRENO_DEVICE_HARD_RESET, &adreno_dev->priv))
+		return 0;
+
+
+	adreno_writereg(adreno_dev, ADRENO_REG_RBBM_SW_RESET_CMD, 1);
+	/*
+	 * Do a dummy read to get a brief read cycle delay for the
+	 * reset to take effect
+	 */
+	adreno_readreg(adreno_dev, ADRENO_REG_RBBM_SW_RESET_CMD, &reg);
+	adreno_writereg(adreno_dev, ADRENO_REG_RBBM_SW_RESET_CMD, 0);
+
+	/* Check VBIF status after reset */
+	if (timed_poll_check(device,
+			A6XX_RBBM_VBIF_GX_RESET_STATUS,
+			VBIF_RESET_ACK_MASK,
+			VBIF_RESET_ACK_TIMEOUT,
+			VBIF_RESET_ACK_MASK))
+		return -ETIMEDOUT;
+
+	a6xx_sptprac_enable(adreno_dev);
+
+	return 0;
+}
+
+#define A6XX_STATE_OF_CHILD             (BIT(4) | BIT(5))
+#define A6XX_IDLE_FULL_LLM              BIT(0)
+#define A6XX_WAKEUP_ACK                 BIT(1)
+#define A6XX_IDLE_FULL_ACK              BIT(0)
+#define A6XX_VBIF_XIN_HALT_CTRL1_ACKS   (BIT(0) | BIT(1) | BIT(2) | BIT(3))
+
+static void a6xx_isense_disable(struct kgsl_device *device)
+{
+	unsigned int val;
+	const struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+
+	if (!ADRENO_FEATURE(adreno_dev, ADRENO_LM))
+		return;
+
+	kgsl_gmu_regread(device, A6XX_GPU_CS_ENABLE_REG, &val);
+	if (val) {
+		kgsl_gmu_regwrite(device, A6XX_GPU_CS_ENABLE_REG, 0);
+		kgsl_gmu_regwrite(device, A6XX_GMU_ISENSE_CTRL, 0);
+	}
+}
+
+static int a6xx_llm_glm_handshake(struct kgsl_device *device)
+{
+	unsigned int val;
+	const struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+	struct gmu_device *gmu = &device->gmu;
+
+	if (!ADRENO_FEATURE(adreno_dev, ADRENO_LM))
+		return 0;
+
+	kgsl_gmu_regread(device, A6XX_GMU_LLM_GLM_SLEEP_CTRL, &val);
+	if (!(val & A6XX_STATE_OF_CHILD)) {
+		kgsl_gmu_regrmw(device, A6XX_GMU_LLM_GLM_SLEEP_CTRL, 0, BIT(4));
+		kgsl_gmu_regrmw(device, A6XX_GMU_LLM_GLM_SLEEP_CTRL, 0,
+				A6XX_IDLE_FULL_LLM);
+		if (timed_poll_check(device, A6XX_GMU_LLM_GLM_SLEEP_STATUS,
+				A6XX_IDLE_FULL_ACK, GPU_RESET_TIMEOUT,
+				A6XX_IDLE_FULL_ACK)) {
+			dev_err(&gmu->pdev->dev, "LLM-GLM handshake failed\n");
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+static int a6xx_complete_rpmh_votes(struct kgsl_device *device)
+{
+	int ret = 0;
+
+	if (!kgsl_gmu_isenabled(device))
+		return ret;
+
+	ret |= timed_poll_check(device, A6XX_RSCC_TCS0_DRV0_STATUS, BIT(0),
+			GPU_RESET_TIMEOUT, BIT(0));
+	ret |= timed_poll_check(device, A6XX_RSCC_TCS1_DRV0_STATUS, BIT(0),
+			GPU_RESET_TIMEOUT, BIT(0));
+	ret |= timed_poll_check(device, A6XX_RSCC_TCS2_DRV0_STATUS, BIT(0),
+			GPU_RESET_TIMEOUT, BIT(0));
+	ret |= timed_poll_check(device, A6XX_RSCC_TCS3_DRV0_STATUS, BIT(0),
+			GPU_RESET_TIMEOUT, BIT(0));
+
+	return ret;
+}
+
+static int a6xx_gmu_suspend(struct kgsl_device *device)
+{
+	/* Max GX clients on A6xx is 2: GMU and KMD */
+	int ret = 0, max_client_num = 2;
+	struct gmu_device *gmu = &device->gmu;
+	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+
+	/* do it only if LM feature is enabled */
+	/* Disable ISENSE if it's on */
+	a6xx_isense_disable(device);
+
+	/* LLM-GLM handshake sequence */
+	a6xx_llm_glm_handshake(device);
+
+	/* If SPTP_RAC is on, turn off SPTP_RAC HS */
+	a6xx_sptprac_disable(adreno_dev);
+
+	/* Disconnect GPU from BUS. Clear and reconnected after reset */
+	adreno_vbif_clear_pending_transactions(device);
+	/* Unnecessary: a6xx_soft_reset(adreno_dev); */
+
+	/* Check no outstanding RPMh voting */
+	a6xx_complete_rpmh_votes(device);
+
+	if (gmu->idle_level < GPU_HW_IFPC) {
+		/* HM GDSC is controlled by KGSL */
+		ret = a6xx_hm_disable(ADRENO_DEVICE(device));
+		if (ret)
+			dev_err(&gmu->pdev->dev,
+				"suspend: fail: power off GPU HM\n");
+	} else if (gmu->gx_gdsc) {
+		if (regulator_is_enabled(gmu->gx_gdsc)) {
+			/* Switch gx gdsc control from GMU to CPU
+			 * force non-zero reference count in clk driver
+			 * so next disable call will turn
+			 * off the GDSC
+			 */
+			ret = regulator_enable(gmu->gx_gdsc);
+			if (ret)
+				dev_err(&gmu->pdev->dev,
+					"suspend fail: gx enable\n");
+
+			while ((max_client_num)) {
+				ret = regulator_disable(gmu->gx_gdsc);
+				if (!regulator_is_enabled(gmu->gx_gdsc))
+					break;
+				max_client_num -= 1;
+			}
+
+			if (!max_client_num)
+				dev_err(&gmu->pdev->dev,
+					"suspend fail: cannot disable gx\n");
+		}
+	}
+
+	return ret;
+}
+
+/*
+ * a6xx_rpmh_gpu_pwrctrl() - GPU power control via RPMh/GMU interface
+ * @adreno_dev: Pointer to adreno device
+ * @mode: requested power mode
+ * @arg1: first argument for mode control
+ * @arg2: second argument for mode control
+ */
+static int a6xx_rpmh_gpu_pwrctrl(struct adreno_device *adreno_dev,
+		unsigned int mode, unsigned int arg1, unsigned int arg2)
+{
+	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
+	struct gmu_device *gmu = &device->gmu;
+	int ret;
+
+	switch (mode) {
+	case GMU_FW_START:
+		ret = a6xx_gmu_fw_start(device, arg1);
+		break;
+	case GMU_SUSPEND:
+		ret = a6xx_gmu_suspend(device);
+		break;
+	case GMU_FW_STOP:
+		ret = a6xx_rpmh_power_off_gpu(device);
+		break;
+	case GMU_DCVS_NOHFI:
+		ret = a6xx_gmu_dcvs_nohfi(device, arg1, arg2);
+		break;
+	case GMU_NOTIFY_SLUMBER:
+		ret = a6xx_notify_slumber(device);
+		break;
+	default:
+		dev_err(&gmu->pdev->dev,
+				"unsupported GMU power ctrl mode:%d\n", mode);
+		ret = -EINVAL;
+		break;
+	}
+
+	return ret;
+}
+
+/**
+ * a6xx_reset() - Helper function to reset the GPU
+ * @device: Pointer to the KGSL device structure for the GPU
+ * @fault: Type of fault. Needed to skip soft reset for MMU fault
+ *
+ * Try to reset the GPU to recover from a fault.  First, try to do a low latency
+ * soft reset.  If the soft reset fails for some reason, then bring out the big
+ * guns and toggle the footswitch.
+ */
+static int a6xx_reset(struct kgsl_device *device, int fault)
+{
+	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+	int ret = -EINVAL;
+	int i = 0;
+
+	/* Use the regular reset sequence for No GMU */
+	if (!kgsl_gmu_isenabled(device))
+		return adreno_reset(device, fault);
+
+	/* Transition from ACTIVE to RESET state */
+	kgsl_pwrctrl_change_state(device, KGSL_STATE_RESET);
+
+	/* Try soft reset first */
+	if (!(fault & ADRENO_IOMMU_PAGE_FAULT)) {
+		int acked;
+
+		/* NMI */
+		kgsl_gmu_regwrite(device, A6XX_GMU_NMI_CONTROL_STATUS, 0);
+		kgsl_gmu_regwrite(device, A6XX_GMU_CM3_CFG, (1 << 9));
+
+		for (i = 0; i < 10; i++) {
+			kgsl_gmu_regread(device,
+					A6XX_GMU_NMI_CONTROL_STATUS, &acked);
+
+			/* NMI FW ACK recevied */
+			if (acked == 0x1)
+				break;
+
+			udelay(100);
+		}
+
+		if (acked)
+			ret = adreno_soft_reset(device);
+		if (ret)
+			KGSL_DEV_ERR_ONCE(device, "Device soft reset failed\n");
+	}
+	if (ret) {
+		/* If soft reset failed/skipped, then pull the power */
+		set_bit(ADRENO_DEVICE_HARD_RESET, &adreno_dev->priv);
+		/* since device is officially off now clear start bit */
+		clear_bit(ADRENO_DEVICE_STARTED, &adreno_dev->priv);
+
+		/* Keep trying to start the device until it works */
+		for (i = 0; i < NUM_TIMES_RESET_RETRY; i++) {
+			ret = adreno_start(device, 0);
+			if (!ret)
+				break;
+
+			msleep(20);
+		}
+	}
+
+	clear_bit(ADRENO_DEVICE_HARD_RESET, &adreno_dev->priv);
+
+	if (ret)
+		return ret;
+
+	if (i != 0)
+		KGSL_DRV_WARN(device, "Device hard reset tried %d tries\n", i);
+
+	/*
+	 * If active_cnt is non-zero then the system was active before
+	 * going into a reset - put it back in that state
+	 */
+
+	if (atomic_read(&device->active_cnt))
+		kgsl_pwrctrl_change_state(device, KGSL_STATE_ACTIVE);
+	else
+		kgsl_pwrctrl_change_state(device, KGSL_STATE_NAP);
+
+	return ret;
+}
+
 static void a6xx_cp_hw_err_callback(struct adreno_device *adreno_dev, int bit)
 {
 	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
@@ -1671,7 +1929,6 @@ static inline void _reg_rmw(void __iomem *regaddr,
 	wmb();
 }
 
-
 /*
  * a6xx_llc_configure_gpu_scid() - Program the sub-cache ID for all GPU blocks
  * @adreno_dev: The adreno device pointer
@@ -2162,6 +2419,13 @@ static struct adreno_perfcount_register a6xx_perfcounters_vbif_pwr[] = {
 		A6XX_VBIF_PERF_PWR_CNT_HIGH2, -1, A6XX_VBIF_PERF_PWR_CNT_EN2 },
 };
 
+static struct adreno_perfcount_register a6xx_perfcounters_pwr[] = {
+	{ KGSL_PERFCOUNTER_BROKEN, 0, 0, 0, 0, -1, 0 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0,
+		A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_0_L,
+		A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_0_H, -1, 0 },
+};
+
 static struct adreno_perfcount_register a6xx_perfcounters_alwayson[] = {
 	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_CP_ALWAYS_ON_COUNTER_LO,
 		A6XX_CP_ALWAYS_ON_COUNTER_HI, -1 },
@@ -2194,6 +2458,8 @@ static struct adreno_perfcount_group a6xx_perfcounter_groups
 	A6XX_PERFCOUNTER_GROUP(VBIF, vbif),
 	A6XX_PERFCOUNTER_GROUP_FLAGS(VBIF_PWR, vbif_pwr,
 		ADRENO_PERFCOUNTER_GROUP_FIXED),
+	A6XX_PERFCOUNTER_GROUP_FLAGS(PWR, pwr,
+		ADRENO_PERFCOUNTER_GROUP_FIXED),
 	A6XX_PERFCOUNTER_GROUP_FLAGS(ALWAYSON, alwayson,
 		ADRENO_PERFCOUNTER_GROUP_FIXED),
 };
@@ -2203,6 +2469,30 @@ static struct adreno_perfcounters a6xx_perfcounters = {
 	ARRAY_SIZE(a6xx_perfcounter_groups),
 };
 
+/* Program the GMU power counter to count GPU busy cycles */
+static int a6xx_enable_pwr_counters(struct adreno_device *adreno_dev,
+		unsigned int counter)
+{
+	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
+
+	/*
+	 * We have a limited number of power counters. Since we're not using
+	 * total GPU cycle count, return error if requested.
+	 */
+	if (counter == 0)
+		return -EINVAL;
+
+	if (!device->gmu.pdev)
+		return -ENODEV;
+
+	kgsl_regwrite(device, A6XX_GPU_GMU_AO_GPU_CX_BUSY_MASK, 0);
+	kgsl_regrmw(device,
+		A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_0, 0xFF, 0x20);
+	kgsl_regwrite(device, A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE, 0x1);
+
+	return 0;
+}
+
 /* Register offset defines for A6XX, in order of enum adreno_regs */
 static unsigned int a6xx_register_offsets[ADRENO_REG_REGISTER_MAX] = {
 
@@ -2293,7 +2583,6 @@ static unsigned int a6xx_register_offsets[ADRENO_REG_REGISTER_MAX] = {
 				A6XX_GMU_HOST2GMU_INTR_CLR),
 	ADRENO_REG_DEFINE(ADRENO_REG_GMU_HOST2GMU_INTR_RAW_INFO,
 				A6XX_GMU_HOST2GMU_INTR_RAW_INFO),
-
 	ADRENO_REG_DEFINE(ADRENO_REG_RBBM_SECVID_TRUST_CONTROL,
 				A6XX_RBBM_SECVID_TRUST_CNTL),
 	ADRENO_REG_DEFINE(ADRENO_REG_RBBM_SECVID_TSB_TRUSTED_BASE,
@@ -2325,6 +2614,7 @@ struct adreno_gpudev adreno_a6xx_gpudev = {
 	.regulator_enable = a6xx_sptprac_enable,
 	.regulator_disable = a6xx_sptprac_disable,
 	.perfcounters = &a6xx_perfcounters,
+	.enable_pwr_counters = a6xx_enable_pwr_counters,
 	.microcode_read = a6xx_microcode_read,
 	.enable_64bit = a6xx_enable_64bit,
 	.llc_configure_gpu_scid = a6xx_llc_configure_gpu_scid,
@@ -2337,4 +2627,6 @@ struct adreno_gpudev adreno_a6xx_gpudev = {
 	.hw_isidle = a6xx_hw_isidle, /* Replaced by NULL if GMU is disabled */
 	.wait_for_gmu_idle = a6xx_wait_for_gmu_idle,
 	.iommu_fault_block = a6xx_iommu_fault_block,
+	.reset = a6xx_reset,
+	.soft_reset = a6xx_soft_reset,
 };
diff --git a/drivers/gpu/msm/adreno_a6xx_snapshot.c b/drivers/gpu/msm/adreno_a6xx_snapshot.c
index 63dbde0..bca3dd0 100644
--- a/drivers/gpu/msm/adreno_a6xx_snapshot.c
+++ b/drivers/gpu/msm/adreno_a6xx_snapshot.c
@@ -640,6 +640,9 @@ static size_t a6xx_legacy_snapshot_cluster_dbgahb(struct kgsl_device *device,
 	unsigned int *data = (unsigned int *)(buf + sizeof(*header));
 	int i, j;
 
+	if (!device->snapshot_legacy)
+		return 0;
+
 	if (remain < sizeof(*header)) {
 		SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
 		return 0;
@@ -748,6 +751,9 @@ static size_t a6xx_legacy_snapshot_non_ctx_dbgahb(struct kgsl_device *device,
 	unsigned int read_sel;
 	int i, j;
 
+	if (!device->snapshot_legacy)
+		return 0;
+
 	/* Figure out how many registers we are going to dump */
 	for (i = 0; i < regs->num_sets; i++) {
 		int start = regs->regs[i * 2];
@@ -1258,59 +1264,14 @@ static void a6xx_snapshot_debugbus(struct kgsl_device *device,
 	}
 }
 
-static size_t a6xx_snapshot_dump_gmu_registers(struct kgsl_device *device,
-		u8 *buf, size_t remain, void *priv)
-{
-	struct kgsl_snapshot_regs *header = (struct kgsl_snapshot_regs *)buf;
-	struct kgsl_snapshot_registers *regs = priv;
-	unsigned int *data = (unsigned int *)(buf + sizeof(*header));
-	int count = 0, j, k;
-
-	/* Figure out how many registers we are going to dump */
-	for (j = 0; j < regs->count; j++) {
-		int start = regs->regs[j * 2];
-		int end = regs->regs[j * 2 + 1];
-
-		count += (end - start + 1);
-	}
-
-	if (remain < (count * 8) + sizeof(*header)) {
-		SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
-		return 0;
-	}
-
-	for (j = 0; j < regs->count; j++) {
-		unsigned int start = regs->regs[j * 2];
-		unsigned int end = regs->regs[j * 2 + 1];
-
-		for (k = start; k <= end; k++) {
-			unsigned int val;
-
-			kgsl_gmu_regread(device, k, &val);
-			*data++ = k;
-			*data++ = val;
-		}
-	}
-
-	header->count = count;
-
-	/* Return the size of the section */
-	return (count * 8) + sizeof(*header);
-}
-
 static void a6xx_snapshot_gmu(struct kgsl_device *device,
 		struct kgsl_snapshot *snapshot)
 {
-	struct kgsl_snapshot_registers gmu_regs = {
-		.regs = a6xx_gmu_registers,
-		.count = ARRAY_SIZE(a6xx_gmu_registers) / 2,
-	};
-
 	if (!kgsl_gmu_isenabled(device))
 		return;
 
-	kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_REGS,
-			snapshot, a6xx_snapshot_dump_gmu_registers, &gmu_regs);
+	adreno_snapshot_registers(device, snapshot, a6xx_gmu_registers,
+					ARRAY_SIZE(a6xx_gmu_registers) / 2);
 }
 
 /* a6xx_snapshot_sqe() - Dump SQE data in snapshot */
diff --git a/drivers/gpu/msm/adreno_dispatch.c b/drivers/gpu/msm/adreno_dispatch.c
index b831d0d..e8b1c67 100644
--- a/drivers/gpu/msm/adreno_dispatch.c
+++ b/drivers/gpu/msm/adreno_dispatch.c
@@ -2097,7 +2097,12 @@ static int dispatcher_do_fault(struct adreno_device *adreno_dev)
 	/* Turn off all the timers */
 	del_timer_sync(&dispatcher->timer);
 	del_timer_sync(&dispatcher->fault_timer);
-	del_timer_sync(&adreno_dev->preempt.timer);
+	/*
+	 * Deleting uninitialized timer will block for ever on kernel debug
+	 * disable build. Hence skip del timer if it is not initialized.
+	 */
+	if (adreno_is_preemption_enabled(adreno_dev))
+		del_timer_sync(&adreno_dev->preempt.timer);
 
 	mutex_lock(&device->mutex);
 
@@ -2183,7 +2188,11 @@ static int dispatcher_do_fault(struct adreno_device *adreno_dev)
 		kgsl_process_event_group(device, &hung_rb->events);
 	}
 
-	ret = adreno_reset(device, fault);
+	if (gpudev->reset)
+		ret = gpudev->reset(device, fault);
+	else
+		ret = adreno_reset(device, fault);
+
 	mutex_unlock(&device->mutex);
 	/* if any other fault got in until reset then ignore */
 	atomic_set(&dispatcher->fault, 0);
diff --git a/drivers/gpu/msm/adreno_ringbuffer.c b/drivers/gpu/msm/adreno_ringbuffer.c
index 9d847ae..bff1fda 100644
--- a/drivers/gpu/msm/adreno_ringbuffer.c
+++ b/drivers/gpu/msm/adreno_ringbuffer.c
@@ -54,21 +54,10 @@ static void adreno_get_submit_time(struct adreno_device *adreno_dev,
 
 	/* Read always on registers */
 	if (!adreno_is_a3xx(adreno_dev)) {
-		if (kgsl_gmu_isenabled(KGSL_DEVICE(adreno_dev))) {
-			uint32_t val_lo, val_hi;
-
-			adreno_read_gmureg(adreno_dev,
-				ADRENO_REG_RBBM_ALWAYSON_COUNTER_LO, &val_lo);
-			adreno_read_gmureg(adreno_dev,
-				ADRENO_REG_RBBM_ALWAYSON_COUNTER_HI, &val_hi);
-
-			time->ticks = (val_lo | ((uint64_t)val_hi << 32));
-		} else {
-			adreno_readreg64(adreno_dev,
-				ADRENO_REG_RBBM_ALWAYSON_COUNTER_LO,
-				ADRENO_REG_RBBM_ALWAYSON_COUNTER_HI,
-				&time->ticks);
-		}
+		adreno_readreg64(adreno_dev,
+			ADRENO_REG_RBBM_ALWAYSON_COUNTER_LO,
+			ADRENO_REG_RBBM_ALWAYSON_COUNTER_HI,
+			&time->ticks);
 
 		/* Mask hi bits as they may be incorrect on some targets */
 		if (ADRENO_GPUREV(adreno_dev) >= 400 &&
diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c
index d836cbb..6a39792 100644
--- a/drivers/gpu/msm/kgsl.c
+++ b/drivers/gpu/msm/kgsl.c
@@ -257,6 +257,13 @@ static void _deferred_put(struct work_struct *work)
 	kgsl_mem_entry_put(entry);
 }
 
+static inline void
+kgsl_mem_entry_put_deferred(struct kgsl_mem_entry *entry)
+{
+	if (entry)
+		queue_work(kgsl_driver.mem_workqueue, &entry->work);
+}
+
 static inline struct kgsl_mem_entry *
 kgsl_mem_entry_create(void)
 {
@@ -266,6 +273,7 @@ kgsl_mem_entry_create(void)
 		kref_init(&entry->refcount);
 		/* put this ref in userspace memory alloc and map ioctls */
 		kref_get(&entry->refcount);
+		INIT_WORK(&entry->work, _deferred_put);
 	}
 
 	return entry;
@@ -1244,7 +1252,8 @@ kgsl_sharedmem_find(struct kgsl_process_private *private, uint64_t gpuaddr)
 	spin_lock(&private->mem_lock);
 	idr_for_each_entry(&private->mem_idr, entry, id) {
 		if (GPUADDR_IN_MEMDESC(gpuaddr, &entry->memdesc)) {
-			ret = kgsl_mem_entry_get(entry);
+			if (!entry->pending_free)
+				ret = kgsl_mem_entry_get(entry);
 			break;
 		}
 	}
@@ -1877,7 +1886,7 @@ long kgsl_ioctl_sharedmem_free(struct kgsl_device_private *dev_priv,
 		return -EINVAL;
 
 	ret = gpumem_free_entry(entry);
-	kgsl_mem_entry_put(entry);
+	kgsl_mem_entry_put_deferred(entry);
 
 	return ret;
 }
@@ -1895,7 +1904,7 @@ long kgsl_ioctl_gpumem_free_id(struct kgsl_device_private *dev_priv,
 		return -EINVAL;
 
 	ret = gpumem_free_entry(entry);
-	kgsl_mem_entry_put(entry);
+	kgsl_mem_entry_put_deferred(entry);
 
 	return ret;
 }
@@ -1932,8 +1941,7 @@ static void gpuobj_free_fence_func(void *priv)
 {
 	struct kgsl_mem_entry *entry = priv;
 
-	INIT_WORK(&entry->work, _deferred_put);
-	queue_work(kgsl_driver.mem_workqueue, &entry->work);
+	kgsl_mem_entry_put_deferred(entry);
 }
 
 static long gpuobj_free_on_fence(struct kgsl_device_private *dev_priv,
@@ -1997,7 +2005,7 @@ long kgsl_ioctl_gpuobj_free(struct kgsl_device_private *dev_priv,
 	else
 		ret = -EINVAL;
 
-	kgsl_mem_entry_put(entry);
+	kgsl_mem_entry_put_deferred(entry);
 	return ret;
 }
 
@@ -3377,7 +3385,13 @@ long kgsl_ioctl_sparse_phys_free(struct kgsl_device_private *dev_priv,
 	if (entry == NULL)
 		return -EINVAL;
 
+	if (!kgsl_mem_entry_set_pend(entry)) {
+		kgsl_mem_entry_put(entry);
+		return -EBUSY;
+	}
+
 	if (entry->memdesc.cur_bindings != 0) {
+		kgsl_mem_entry_unset_pend(entry);
 		kgsl_mem_entry_put(entry);
 		return -EINVAL;
 	}
@@ -3386,7 +3400,7 @@ long kgsl_ioctl_sparse_phys_free(struct kgsl_device_private *dev_priv,
 
 	/* One put for find_id(), one put for the kgsl_mem_entry_create() */
 	kgsl_mem_entry_put(entry);
-	kgsl_mem_entry_put(entry);
+	kgsl_mem_entry_put_deferred(entry);
 
 	return 0;
 }
@@ -3446,7 +3460,13 @@ long kgsl_ioctl_sparse_virt_free(struct kgsl_device_private *dev_priv,
 	if (entry == NULL)
 		return -EINVAL;
 
+	if (!kgsl_mem_entry_set_pend(entry)) {
+		kgsl_mem_entry_put(entry);
+		return -EBUSY;
+	}
+
 	if (entry->bind_tree.rb_node != NULL) {
+		kgsl_mem_entry_unset_pend(entry);
 		kgsl_mem_entry_put(entry);
 		return -EINVAL;
 	}
@@ -3455,7 +3475,7 @@ long kgsl_ioctl_sparse_virt_free(struct kgsl_device_private *dev_priv,
 
 	/* One put for find_id(), one put for the kgsl_mem_entry_create() */
 	kgsl_mem_entry_put(entry);
-	kgsl_mem_entry_put(entry);
+	kgsl_mem_entry_put_deferred(entry);
 
 	return 0;
 }
@@ -4853,7 +4873,7 @@ static int __init kgsl_core_init(void)
 		WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
 
 	kgsl_driver.mem_workqueue = alloc_workqueue("kgsl-mementry",
-		WQ_UNBOUND | WQ_MEM_RECLAIM, 0);
+		WQ_MEM_RECLAIM, 0);
 
 	kgsl_events_init();
 
diff --git a/drivers/gpu/msm/kgsl_device.h b/drivers/gpu/msm/kgsl_device.h
index 876b668..ca1f181 100644
--- a/drivers/gpu/msm/kgsl_device.h
+++ b/drivers/gpu/msm/kgsl_device.h
@@ -40,6 +40,8 @@
  * that the KGSL module believes a device is idle (has been inactive	*
  * past its timer) and all system resources are released.  SUSPEND is	*
  * requested by the kernel and will be enforced upon all open devices.	*
+ * RESET indicates that GPU or GMU hang happens. KGSL is handling	*
+ * snapshot or recover GPU from hang.					*
  */
 
 #define KGSL_STATE_NONE		0x00000000
@@ -49,6 +51,7 @@
 #define KGSL_STATE_SUSPEND	0x00000010
 #define KGSL_STATE_AWARE	0x00000020
 #define KGSL_STATE_SLUMBER	0x00000080
+#define KGSL_STATE_RESET	0x00000100
 
 /**
  * enum kgsl_event_results - result codes passed to an event callback when the
@@ -176,7 +179,7 @@ struct kgsl_functable {
 		unsigned int prelevel, unsigned int postlevel, bool post);
 	void (*regulator_disable_poll)(struct kgsl_device *device);
 	void (*clk_set_options)(struct kgsl_device *device,
-		const char *name, struct clk *clk);
+		const char *name, struct clk *clk, bool on);
 	void (*gpu_model)(struct kgsl_device *device, char *str,
 		size_t bufsz);
 	void (*stop_fault_timer)(struct kgsl_device *device);
@@ -291,6 +294,8 @@ struct kgsl_device {
 
 	/* Use CP Crash dumper to get GPU snapshot*/
 	bool snapshot_crashdumper;
+	/* Use HOST side register reads to get GPU snapshot*/
+	bool snapshot_legacy;
 
 	struct kobject snapshot_kobj;
 
@@ -527,18 +532,49 @@ static inline void kgsl_process_add_stats(struct kgsl_process_private *priv,
 		priv->stats[type].max = priv->stats[type].cur;
 }
 
+static inline bool kgsl_is_register_offset(struct kgsl_device *device,
+				unsigned int offsetwords)
+{
+	return ((offsetwords * sizeof(uint32_t)) < device->reg_len);
+}
+
+static inline bool kgsl_is_gmu_offset(struct kgsl_device *device,
+				unsigned int offsetwords)
+{
+	struct gmu_device *gmu = &device->gmu;
+
+	return (gmu->pdev &&
+		(offsetwords >= gmu->gmu2gpu_offset) &&
+		((offsetwords - gmu->gmu2gpu_offset) * sizeof(uint32_t) <
+			gmu->reg_len));
+}
+
 static inline void kgsl_regread(struct kgsl_device *device,
 				unsigned int offsetwords,
 				unsigned int *value)
 {
-	device->ftbl->regread(device, offsetwords, value);
+	if (kgsl_is_register_offset(device, offsetwords))
+		device->ftbl->regread(device, offsetwords, value);
+	else if (device->ftbl->gmu_regread &&
+			kgsl_is_gmu_offset(device, offsetwords))
+		device->ftbl->gmu_regread(device, offsetwords, value);
+	else {
+		WARN(1, "Out of bounds register read: 0x%x\n", offsetwords);
+		*value = 0;
+	}
 }
 
 static inline void kgsl_regwrite(struct kgsl_device *device,
 				 unsigned int offsetwords,
 				 unsigned int value)
 {
-	device->ftbl->regwrite(device, offsetwords, value);
+	if (kgsl_is_register_offset(device, offsetwords))
+		device->ftbl->regwrite(device, offsetwords, value);
+	else if (device->ftbl->gmu_regwrite &&
+			kgsl_is_gmu_offset(device, offsetwords))
+		device->ftbl->gmu_regwrite(device, offsetwords, value);
+	else
+		WARN(1, "Out of bounds register write: 0x%x\n", offsetwords);
 }
 
 static inline void kgsl_gmu_regread(struct kgsl_device *device,
@@ -565,9 +601,9 @@ static inline void kgsl_regrmw(struct kgsl_device *device,
 {
 	unsigned int val = 0;
 
-	device->ftbl->regread(device, offsetwords, &val);
+	kgsl_regread(device, offsetwords, &val);
 	val &= ~mask;
-	device->ftbl->regwrite(device, offsetwords, val | bits);
+	kgsl_regwrite(device, offsetwords, val | bits);
 }
 
 static inline void kgsl_gmu_regrmw(struct kgsl_device *device,
diff --git a/drivers/gpu/msm/kgsl_gmu.c b/drivers/gpu/msm/kgsl_gmu.c
index 54659fc..f72b3fa 100644
--- a/drivers/gpu/msm/kgsl_gmu.c
+++ b/drivers/gpu/msm/kgsl_gmu.c
@@ -748,6 +748,7 @@ static irqreturn_t gmu_irq_handler(int irq, void *data)
 {
 	struct gmu_device *gmu = data;
 	struct kgsl_device *device = container_of(gmu, struct kgsl_device, gmu);
+	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
 	unsigned int status = 0;
 
 	adreno_read_gmureg(ADRENO_DEVICE(device),
@@ -756,9 +757,12 @@ static irqreturn_t gmu_irq_handler(int irq, void *data)
 			ADRENO_REG_GMU_AO_HOST_INTERRUPT_CLR, status);
 
 	/* Ignore GMU_INT_RSCC_COMP and GMU_INT_DBD WAKEUP interrupts */
-	if (status & GMU_INT_WDOG_BITE)
+	if (status & GMU_INT_WDOG_BITE) {
 		dev_err_ratelimited(&gmu->pdev->dev,
 				"GMU watchdog expired interrupt received\n");
+		adreno_set_gpu_fault(adreno_dev, ADRENO_GMU_FAULT);
+		adreno_dispatcher_schedule(device);
+	}
 	if (status & GMU_INT_HOST_AHB_BUS_ERR)
 		dev_err_ratelimited(&gmu->pdev->dev,
 				"AHB bus error interrupt received\n");
@@ -775,6 +779,7 @@ static irqreturn_t hfi_irq_handler(int irq, void *data)
 	struct kgsl_hfi *hfi = data;
 	struct gmu_device *gmu = container_of(hfi, struct gmu_device, hfi);
 	struct kgsl_device *device = container_of(gmu, struct kgsl_device, gmu);
+	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
 	unsigned int status = 0;
 
 	adreno_read_gmureg(ADRENO_DEVICE(device),
@@ -784,9 +789,12 @@ static irqreturn_t hfi_irq_handler(int irq, void *data)
 
 	if (status & HFI_IRQ_MSGQ_MASK)
 		tasklet_hi_schedule(&hfi->tasklet);
-	if (status & HFI_IRQ_CM3_FAULT_MASK)
+	if (status & HFI_IRQ_CM3_FAULT_MASK) {
 		dev_err_ratelimited(&gmu->pdev->dev,
 				"GMU CM3 fault interrupt received\n");
+		adreno_set_gpu_fault(adreno_dev, ADRENO_GMU_FAULT);
+		adreno_dispatcher_schedule(device);
+	}
 	if (status & ~HFI_IRQ_MASK)
 		dev_err_ratelimited(&gmu->pdev->dev,
 				"Unhandled HFI interrupts 0x%lx\n",
@@ -850,14 +858,6 @@ static int gmu_reg_probe(struct gmu_device *gmu, const char *name, bool is_gmu)
 	}
 
 	if (is_gmu) {
-		if (!devm_request_mem_region(&gmu->pdev->dev, res->start,
-					resource_size(res),
-					res->name)) {
-			dev_err(&gmu->pdev->dev,
-				"GMU regs request mem region failed\n");
-			return -ENOMEM;
-		}
-
 		gmu->reg_phys = res->start;
 		gmu->reg_len = resource_size(res);
 		gmu->reg_virt = devm_ioremap(&gmu->pdev->dev, res->start,
@@ -1253,32 +1253,78 @@ static int gmu_disable_gdsc(struct gmu_device *gmu)
 	return ret;
 }
 
+static int gmu_fast_boot(struct kgsl_device *device)
+{
+	int ret;
+	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+	struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
+	struct gmu_device *gmu = &device->gmu;
+
+	hfi_stop(gmu);
+	clear_bit(GMU_HFI_ON, &gmu->flags);
+
+	ret = gpudev->rpmh_gpu_pwrctrl(adreno_dev, GMU_FW_START,
+		GMU_RESET, 0);
+	if (ret)
+		return ret;
+
+	/*FIXME: enabling WD interrupt*/
+
+	ret = hfi_start(gmu, GMU_WARM_BOOT);
+	if (ret)
+		return ret;
+
+	ret = gpudev->oob_set(adreno_dev, OOB_CPINIT_SET_MASK,
+			OOB_CPINIT_CHECK_MASK, OOB_CPINIT_CLEAR_MASK);
+
+	if (ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_HFI_USE_REG))
+		gpudev->oob_clear(adreno_dev,
+				OOB_BOOT_SLUMBER_CLEAR_MASK);
+
+	return ret;
+}
+
+static int gmu_suspend(struct kgsl_device *device)
+{
+	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+	struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
+	struct gmu_device *gmu = &device->gmu;
+
+	if (!test_bit(GMU_CLK_ON, &gmu->flags))
+		return 0;
+
+	/* Pending message in all queues are abandoned */
+	hfi_stop(gmu);
+	clear_bit(GMU_HFI_ON, &gmu->flags);
+	gmu_irq_disable(device);
+
+	if (gpudev->rpmh_gpu_pwrctrl(adreno_dev, GMU_SUSPEND, 0, 0))
+		return -EINVAL;
+
+	gmu_disable_clks(gmu);
+	gmu_disable_gdsc(gmu);
+	return 0;
+}
+
 /* To be called to power on both GPU and GMU */
 int gmu_start(struct kgsl_device *device)
 {
-	int ret = 0;
+	int ret = 0, perf_idx;
 	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
 	struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
 	struct kgsl_pwrctrl *pwr = &device->pwrctrl;
 	struct gmu_device *gmu = &device->gmu;
 	int bus_level = pwr->pwrlevels[pwr->default_pwrlevel].bus_freq;
 
-	if (!kgsl_gmu_isenabled(device))
-		return 0;
+	switch (device->state) {
+	case KGSL_STATE_INIT:
+	case KGSL_STATE_SUSPEND:
+		WARN_ON(test_bit(GMU_CLK_ON, &gmu->flags));
+		gmu_enable_gdsc(gmu);
+		gmu_enable_clks(gmu);
 
-	if (test_bit(GMU_CLK_ON, &gmu->flags))
-		return 0;
-
-	ret = gmu_enable_gdsc(gmu);
-	if (ret)
-		return ret;
-
-	gmu_enable_clks(gmu);
-
-	if (device->state == KGSL_STATE_INIT ||
-			device->state == KGSL_STATE_SUSPEND) {
 		/* Convert to RPMh frequency index */
-		int perf_idx = gmu->num_gpupwrlevels -
+		perf_idx = gmu->num_gpupwrlevels -
 				pwr->default_pwrlevel - 1;
 
 		/* Vote for 300MHz DDR for GMU to init */
@@ -1305,8 +1351,16 @@ int gmu_start(struct kgsl_device *device)
 		ret = gmu_dcvs_set(gmu, perf_idx, bus_level);
 		if (ret)
 			goto error_gpu;
-	} else {
-		int perf_idx = gmu->num_gpupwrlevels - gmu->wakeup_pwrlevel - 1;
+
+		msm_bus_scale_client_update_request(gmu->pcl, 0);
+		break;
+
+	case KGSL_STATE_SLUMBER:
+		WARN_ON(test_bit(GMU_CLK_ON, &gmu->flags));
+		gmu_enable_gdsc(gmu);
+		gmu_enable_clks(gmu);
+
+		perf_idx = gmu->num_gpupwrlevels - gmu->wakeup_pwrlevel - 1;
 
 		ret = gpudev->rpmh_gpu_pwrctrl(adreno_dev, GMU_FW_START,
 				GMU_WARM_BOOT, 0);
@@ -1325,6 +1379,46 @@ int gmu_start(struct kgsl_device *device)
 				goto error_gpu;
 			gmu->wakeup_pwrlevel = pwr->default_pwrlevel;
 		}
+		break;
+
+	case KGSL_STATE_RESET:
+		if (test_bit(ADRENO_DEVICE_HARD_RESET, &adreno_dev->priv)) {
+			gmu_suspend(device);
+			gmu_enable_gdsc(gmu);
+			gmu_enable_clks(gmu);
+
+			perf_idx = gmu->num_gpupwrlevels -
+				pwr->active_pwrlevel - 1;
+
+			bus_level =
+				pwr->pwrlevels[pwr->active_pwrlevel].bus_freq;
+			ret = gpudev->rpmh_gpu_pwrctrl(
+				adreno_dev, GMU_FW_START, GMU_RESET, 0);
+			if (ret)
+				goto error_clks;
+
+			gmu_irq_enable(device);
+
+			ret = hfi_start(gmu, GMU_WARM_BOOT);
+			if (ret)
+				goto error_gpu;
+
+			/* Send DCVS level prior to reset*/
+			ret = gmu_dcvs_set(gmu, perf_idx, bus_level);
+			if (ret)
+				goto error_gpu;
+
+			ret = gpudev->oob_set(adreno_dev,
+				OOB_CPINIT_SET_MASK,
+				OOB_CPINIT_CHECK_MASK,
+				OOB_CPINIT_CLEAR_MASK);
+
+		} else {
+			gmu_fast_boot(device);
+		}
+		break;
+	default:
+		break;
 	}
 
 	/*
@@ -1332,30 +1426,20 @@ int gmu_start(struct kgsl_device *device)
 	 * In v2, this function call shall move ahead
 	 * of hfi_start() to save power.
 	 */
+	if (ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_HFI_USE_REG))
+		gpudev->oob_clear(adreno_dev,
+				OOB_BOOT_SLUMBER_CLEAR_MASK);
 
-	if (device->state == KGSL_STATE_INIT ||
-			device->state == KGSL_STATE_SUSPEND) {
-		msm_bus_scale_client_update_request(gmu->pcl, 0);
-		if (ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_HFI_USE_REG))
-			gpudev->oob_clear(adreno_dev,
-					OOB_BOOT_SLUMBER_CLEAR_MASK);
-	}
-
-	return 0;
+	return ret;
 
 error_gpu:
 	hfi_stop(gmu);
 	gmu_irq_disable(device);
-	if (device->state == KGSL_STATE_INIT ||
-			device->state == KGSL_STATE_SUSPEND) {
 		if (ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_HFI_USE_REG))
 			gpudev->oob_clear(adreno_dev,
 					OOB_BOOT_SLUMBER_CLEAR_MASK);
-	}
 	gpudev->rpmh_gpu_pwrctrl(adreno_dev, GMU_FW_STOP, 0, 0);
 error_bus:
-	if (device->state == KGSL_STATE_INIT ||
-			device->state == KGSL_STATE_SUSPEND)
 		msm_bus_scale_client_update_request(gmu->pcl, 0);
 error_clks:
 	gmu_disable_clks(gmu);
@@ -1438,7 +1522,7 @@ void gmu_remove(struct kgsl_device *device)
 
 	if (hfi->hfi_interrupt_num) {
 		devm_free_irq(&gmu->pdev->dev,
-				hfi->hfi_interrupt_num, gmu);
+				hfi->hfi_interrupt_num, hfi);
 		hfi->hfi_interrupt_num = 0;
 	}
 
@@ -1459,8 +1543,6 @@ void gmu_remove(struct kgsl_device *device)
 
 	if (gmu->reg_virt) {
 		devm_iounmap(&gmu->pdev->dev, gmu->reg_virt);
-		devm_release_mem_region(&gmu->pdev->dev,
-				gmu->reg_phys, gmu->reg_len);
 		gmu->reg_virt = NULL;
 	}
 
diff --git a/drivers/gpu/msm/kgsl_gmu.h b/drivers/gpu/msm/kgsl_gmu.h
index 4cfc120..a741beb 100644
--- a/drivers/gpu/msm/kgsl_gmu.h
+++ b/drivers/gpu/msm/kgsl_gmu.h
@@ -139,7 +139,7 @@ enum gmu_load_mode {
 enum gmu_pwrctrl_mode {
 	GMU_FW_START,
 	GMU_FW_STOP,
-	GMU_POWER_RESET,
+	GMU_SUSPEND,
 	GMU_DCVS_NOHFI,
 	GMU_NOTIFY_SLUMBER,
 	INVALID_POWER_CTRL
diff --git a/drivers/gpu/msm/kgsl_hfi.c b/drivers/gpu/msm/kgsl_hfi.c
index 30e1d7c..b05e18d 100644
--- a/drivers/gpu/msm/kgsl_hfi.c
+++ b/drivers/gpu/msm/kgsl_hfi.c
@@ -573,44 +573,41 @@ int hfi_start(struct gmu_device *gmu, uint32_t boot_state)
 	if (result)
 		return result;
 
-	if (boot_state == GMU_COLD_BOOT) {
-		major = adreno_dev->gpucore->gpmu_major;
-		minor = adreno_dev->gpucore->gpmu_minor;
+	major = adreno_dev->gpucore->gpmu_major;
+	minor = adreno_dev->gpucore->gpmu_minor;
+	result = hfi_get_fw_version(gmu,
+			FW_VERSION(major, minor), &ver);
+	if (result)
+		dev_err(dev, "Failed to get FW version via HFI\n");
 
-		result = hfi_get_fw_version(gmu,
-				FW_VERSION(major, minor), &ver);
-		if (result)
-			dev_err(dev, "Failed to get FW version via HFI\n");
+	gmu->ver = ver;
+	if (major != FW_VER_MAJOR(ver))
+		dev_err(dev, "FW version major %d error (expect %d)\n",
+				FW_VER_MAJOR(ver),
+				adreno_dev->gpucore->gpmu_major);
 
-		gmu->ver = ver;
-		if (major != FW_VER_MAJOR(ver))
-			dev_err(dev, "FW version major %d error (expect %d)\n",
-					FW_VER_MAJOR(ver),
-					adreno_dev->gpucore->gpmu_major);
+	if (minor > FW_VER_MINOR(ver))
+		dev_err(dev, "FW version minor %d error (expect %d)\n",
+				FW_VER_MINOR(ver),
+				adreno_dev->gpucore->gpmu_minor);
 
-		if (minor > FW_VER_MINOR(ver))
-			dev_err(dev, "FW version minor %d error (expect %d)\n",
-					FW_VER_MINOR(ver),
-					adreno_dev->gpucore->gpmu_minor);
+	result = hfi_send_perftbl(gmu);
+	if (result)
+		return result;
 
-		result = hfi_send_perftbl(gmu);
-		if (result)
-			return result;
+	result = hfi_send_bwtbl(gmu);
+	if (result)
+		return result;
 
-		result = hfi_send_bwtbl(gmu);
-		if (result)
-			return result;
-
-		/*
-		 * FW is not ready for LM configuration
-		 * without powering on GPU.
-		 */
-		/*
-		 * result = hfi_send_lmconfig(gmu);
-		 * if (result)
-		 * return result;
-		 */
-	}
+	/*
+	 * FW is not ready for LM configuration
+	 * without powering on GPU.
+	 */
+	/*
+	 * result = hfi_send_lmconfig(gmu);
+	 * if (result)
+	 * return result;
+	 */
 
 	set_bit(GMU_HFI_ON, &gmu->flags);
 	return 0;
diff --git a/drivers/gpu/msm/kgsl_pwrctrl.c b/drivers/gpu/msm/kgsl_pwrctrl.c
index 7811079..4dd7b8e 100644
--- a/drivers/gpu/msm/kgsl_pwrctrl.c
+++ b/drivers/gpu/msm/kgsl_pwrctrl.c
@@ -156,9 +156,6 @@ static void _ab_buslevel_update(struct kgsl_pwrctrl *pwr,
 		*ab = pwr->bus_ab_mbytes;
 	else
 		*ab = (pwr->bus_percent_ab * max_bw) / 100;
-
-	if (*ab > ib)
-		*ab = ib;
 }
 
 /**
@@ -2052,10 +2049,6 @@ static int _get_clocks(struct kgsl_device *device)
 
 			if (!strcmp(name, "isense_clk"))
 				pwr->isense_clk_indx = i;
-
-			if (device->ftbl->clk_set_options)
-				device->ftbl->clk_set_options(device, name,
-					pwr->grp_clks[i]);
 			break;
 		}
 	}
@@ -2480,6 +2473,22 @@ static void kgsl_pwrctrl_disable(struct kgsl_device *device)
 	kgsl_pwrctrl_pwrrail(device, KGSL_PWRFLAGS_OFF);
 }
 
+static void
+kgsl_pwrctrl_clk_set_options(struct kgsl_device *device, bool on)
+{
+	struct kgsl_pwrctrl *pwr = &device->pwrctrl;
+	int i;
+
+	for (i = 0; i < KGSL_MAX_CLKS; i++) {
+		if (pwr->grp_clks[i] == NULL)
+			continue;
+
+		if (device->ftbl->clk_set_options)
+			device->ftbl->clk_set_options(device, clocks[i],
+				pwr->grp_clks[i], on);
+	}
+}
+
 /**
  * _init() - Get the GPU ready to start, but don't turn anything on
  * @device - Pointer to the kgsl_device struct
@@ -2493,6 +2502,8 @@ static int _init(struct kgsl_device *device)
 		/* Force power on to do the stop */
 		status = kgsl_pwrctrl_enable(device);
 	case KGSL_STATE_ACTIVE:
+		/* fall through */
+	case KGSL_STATE_RESET:
 		kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
 		del_timer_sync(&device->idle_timer);
 		kgsl_pwrscale_midframe_timer_cancel(device);
@@ -2527,6 +2538,7 @@ static int _wake(struct kgsl_device *device)
 		device->ftbl->resume(device);
 		/* fall through */
 	case KGSL_STATE_SLUMBER:
+		kgsl_pwrctrl_clk_set_options(device, true);
 		status = device->ftbl->start(device,
 				device->pwrctrl.superfast);
 		device->pwrctrl.superfast = false;
@@ -2563,6 +2575,7 @@ static int _wake(struct kgsl_device *device)
 				device->pwrctrl.interval_timeout);
 		break;
 	case KGSL_STATE_AWARE:
+		kgsl_pwrctrl_clk_set_options(device, true);
 		/* Enable state before turning on irq */
 		kgsl_pwrctrl_set_state(device, KGSL_STATE_ACTIVE);
 		kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_ON);
@@ -2595,6 +2608,11 @@ _aware(struct kgsl_device *device)
 	int status = 0;
 
 	switch (device->state) {
+	case KGSL_STATE_RESET:
+		if (!kgsl_gmu_isenabled(device))
+			break;
+		status = gmu_start(device);
+		break;
 	case KGSL_STATE_INIT:
 		status = kgsl_pwrctrl_enable(device);
 		break;
@@ -2645,6 +2663,7 @@ _nap(struct kgsl_device *device)
 		kgsl_pwrctrl_set_state(device, KGSL_STATE_NAP);
 		/* fallthrough */
 	case KGSL_STATE_SLUMBER:
+	case KGSL_STATE_RESET:
 		break;
 	case KGSL_STATE_AWARE:
 		KGSL_PWR_WARN(device,
@@ -2681,6 +2700,7 @@ _slumber(struct kgsl_device *device)
 		status = kgsl_pwrctrl_enable(device);
 		device->ftbl->suspend_context(device);
 		device->ftbl->stop(device);
+		kgsl_pwrctrl_clk_set_options(device, false);
 		kgsl_pwrctrl_disable(device);
 		kgsl_pwrscale_sleep(device);
 		kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
@@ -2787,6 +2807,8 @@ int kgsl_pwrctrl_change_state(struct kgsl_device *device, int state)
 		break;
 	case KGSL_STATE_SUSPEND:
 		status = _suspend(device);
+	case KGSL_STATE_RESET:
+		kgsl_pwrctrl_set_state(device, KGSL_STATE_RESET);
 		break;
 	default:
 		KGSL_PWR_INFO(device, "bad state request 0x%x\n", state);
@@ -2838,6 +2860,8 @@ const char *kgsl_pwrstate_to_str(unsigned int state)
 		return "SUSPEND";
 	case KGSL_STATE_SLUMBER:
 		return "SLUMBER";
+	case KGSL_STATE_RESET:
+		return "RESET";
 	default:
 		break;
 	}
diff --git a/drivers/gpu/msm/kgsl_snapshot.c b/drivers/gpu/msm/kgsl_snapshot.c
index 40d239c..7cbda72 100644
--- a/drivers/gpu/msm/kgsl_snapshot.c
+++ b/drivers/gpu/msm/kgsl_snapshot.c
@@ -875,6 +875,25 @@ static ssize_t timestamp_show(struct kgsl_device *device, char *buf)
 	return snprintf(buf, PAGE_SIZE, "%lu\n", timestamp);
 }
 
+static ssize_t snapshot_legacy_show(struct kgsl_device *device, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%d\n", device->snapshot_legacy);
+}
+
+static ssize_t snapshot_legacy_store(struct kgsl_device *device,
+	const char *buf, size_t count)
+{
+	unsigned int val = 0;
+	int ret;
+
+	ret = kgsl_sysfs_store(buf, &val);
+
+	if (!ret && device)
+		device->snapshot_legacy = (bool)val;
+
+	return (ssize_t) ret < 0 ? ret : count;
+}
+
 static struct bin_attribute snapshot_attr = {
 	.attr.name = "dump",
 	.attr.mode = 0444,
@@ -894,6 +913,8 @@ static SNAPSHOT_ATTR(faultcount, 0644, faultcount_show, faultcount_store);
 static SNAPSHOT_ATTR(force_panic, 0644, force_panic_show, force_panic_store);
 static SNAPSHOT_ATTR(snapshot_crashdumper, 0644, snapshot_crashdumper_show,
 	snapshot_crashdumper_store);
+static SNAPSHOT_ATTR(snapshot_legacy, 0644, snapshot_legacy_show,
+	snapshot_legacy_store);
 
 static ssize_t snapshot_sysfs_show(struct kobject *kobj,
 	struct attribute *attr, char *buf)
@@ -975,6 +996,7 @@ int kgsl_device_snapshot_init(struct kgsl_device *device)
 	device->snapshot_faultcount = 0;
 	device->force_panic = 0;
 	device->snapshot_crashdumper = 1;
+	device->snapshot_legacy = 0;
 
 	ret = kobject_init_and_add(&device->snapshot_kobj, &ktype_snapshot,
 		&device->dev->kobj, "snapshot");
@@ -1000,6 +1022,12 @@ int kgsl_device_snapshot_init(struct kgsl_device *device)
 
 	ret  = sysfs_create_file(&device->snapshot_kobj,
 			&attr_snapshot_crashdumper.attr);
+	if (ret)
+		goto done;
+
+	ret  = sysfs_create_file(&device->snapshot_kobj,
+			&attr_snapshot_legacy.attr);
+
 done:
 	return ret;
 }
diff --git a/drivers/i2c/busses/i2c-qcom-geni.c b/drivers/i2c/busses/i2c-qcom-geni.c
index 622ccbc..989af91 100644
--- a/drivers/i2c/busses/i2c-qcom-geni.c
+++ b/drivers/i2c/busses/i2c-qcom-geni.c
@@ -23,6 +23,7 @@
 #include <linux/of_platform.h>
 #include <linux/platform_device.h>
 #include <linux/pm_runtime.h>
+#include <linux/dma-mapping.h>
 #include <linux/qcom-geni-se.h>
 
 #define SE_I2C_TX_TRANS_LEN		(0x26C)
@@ -87,17 +88,31 @@ static irqreturn_t geni_i2c_irq(int irq, void *dev)
 	u32 m_stat = readl_relaxed(gi2c->base + SE_GENI_M_IRQ_STATUS);
 	u32 tx_stat = readl_relaxed(gi2c->base + SE_GENI_TX_FIFO_STATUS);
 	u32 rx_stat = readl_relaxed(gi2c->base + SE_GENI_RX_FIFO_STATUS);
+	u32 dm_tx_st = readl_relaxed(gi2c->base + SE_DMA_TX_IRQ_STAT);
+	u32 dm_rx_st = readl_relaxed(gi2c->base + SE_DMA_RX_IRQ_STAT);
+	u32 dma = readl_relaxed(gi2c->base + SE_GENI_DMA_MODE_EN);
 	struct i2c_msg *cur = gi2c->cur;
 
 	dev_dbg(gi2c->dev,
 		"got i2c irq:%d, stat:0x%x, tx stat:0x%x, rx stat:0x%x\n",
 		irq, m_stat, tx_stat, rx_stat);
-	if (!cur || m_stat & SE_I2C_ERR) {
-		dev_err(gi2c->dev, "i2c txn err");
-		writel_relaxed(0, (gi2c->base + SE_GENI_TX_WATERMARK_REG));
+	if (!cur || (m_stat & SE_I2C_ERR) || (dm_tx_st & TX_SBE) ||
+		    (dm_rx_st & RX_SBE)) {
+		dev_err(gi2c->dev, "i2c err:st:0x%x, dm_t: 0x%x, dm_r: 0x%x\n",
+				   m_stat, dm_tx_st, dm_tx_st);
+		if (!dma)
+			writel_relaxed(0, (gi2c->base +
+					   SE_GENI_TX_WATERMARK_REG));
 		gi2c->err = -EIO;
 		goto irqret;
 	}
+
+	if (dma) {
+		dev_dbg(gi2c->dev, "i2c dma tx:0x%x, dma rx:0x%x\n", dm_tx_st,
+			dm_rx_st);
+		goto irqret;
+	}
+
 	if (((m_stat & M_RX_FIFO_WATERMARK_EN) ||
 		(m_stat & M_RX_FIFO_LAST_EN)) && (cur->flags & I2C_M_RD)) {
 		u32 rxcnt = rx_stat & RX_FIFO_WC_MSK;
@@ -112,10 +127,11 @@ static irqreturn_t geni_i2c_irq(int irq, void *dev)
 				cur->buf[i] = (u8) ((temp >> (p * 8)) & 0xff);
 			gi2c->cur_rd = i;
 			if (gi2c->cur_rd == cur->len) {
-				dev_dbg(gi2c->dev, "i:%d,read 0x%x\n", i, temp);
+				dev_dbg(gi2c->dev, "FIFO i:%d,read 0x%x\n",
+					i, temp);
 				break;
 			}
-			dev_dbg(gi2c->dev, "i: %d, read 0x%x\n", i, temp);
+			dev_dbg(gi2c->dev, "FIFO i: %d, read 0x%x\n", i, temp);
 		}
 	} else if ((m_stat & M_TX_FIFO_WATERMARK_EN) &&
 					!(cur->flags & I2C_M_RD)) {
@@ -128,9 +144,9 @@ static irqreturn_t geni_i2c_irq(int irq, void *dev)
 				temp |= (((u32)(cur->buf[i]) << (p * 8)));
 			writel_relaxed(temp, gi2c->base + SE_GENI_TX_FIFOn);
 			gi2c->cur_wr = i;
-			dev_dbg(gi2c->dev, "i:%d,wrote 0x%x\n", i, temp);
+			dev_dbg(gi2c->dev, "FIFO i:%d,wrote 0x%x\n", i, temp);
 			if (gi2c->cur_wr == cur->len) {
-				dev_dbg(gi2c->dev, "i2c bytes done writing\n");
+				dev_dbg(gi2c->dev, "FIFO i2c bytes done writing\n");
 				writel_relaxed(0,
 				(gi2c->base + SE_GENI_TX_WATERMARK_REG));
 				break;
@@ -138,15 +154,25 @@ static irqreturn_t geni_i2c_irq(int irq, void *dev)
 		}
 	}
 irqret:
-	writel_relaxed(m_stat, gi2c->base + SE_GENI_M_IRQ_CLEAR);
-	/* Ensure all writes are done before returning from ISR. */
-	wmb();
-	/* if this is err with done-bit not set, handle that thr' timeout. */
-	if (m_stat & M_CMD_DONE_EN) {
-		dev_dbg(gi2c->dev, "i2c irq: err:%d, stat:0x%x\n",
-							gi2c->err, m_stat);
-		complete(&gi2c->xfer);
+	if (m_stat)
+		writel_relaxed(m_stat, gi2c->base + SE_GENI_M_IRQ_CLEAR);
+
+	if (dma) {
+		if (dm_tx_st)
+			writel_relaxed(dm_tx_st, gi2c->base +
+				       SE_DMA_TX_IRQ_CLR);
+		if (dm_rx_st)
+			writel_relaxed(dm_rx_st, gi2c->base +
+				       SE_DMA_RX_IRQ_CLR);
+		/* Ensure all writes are done before returning from ISR. */
+		wmb();
 	}
+	/* if this is err with done-bit not set, handle that thr' timeout. */
+	if (m_stat & M_CMD_DONE_EN)
+		complete(&gi2c->xfer);
+	else if ((dm_tx_st & TX_DMA_DONE) || (dm_rx_st & RX_DMA_DONE))
+		complete(&gi2c->xfer);
+
 	return IRQ_HANDLED;
 }
 
@@ -175,11 +201,21 @@ static int geni_i2c_xfer(struct i2c_adapter *adap,
 		int stretch = (i < (num - 1));
 		u32 m_param = 0;
 		u32 m_cmd = 0;
+		dma_addr_t tx_dma = 0;
+		dma_addr_t rx_dma = 0;
+		enum se_xfer_mode mode = FIFO_MODE;
 
 		m_param |= (stretch ? STOP_STRETCH : 0);
 		m_param |= ((msgs[i].addr & 0x7F) << SLV_ADDR_SHFT);
 
 		gi2c->cur = &msgs[i];
+		mode = msgs[i].len > 32 ? SE_DMA : FIFO_MODE;
+		ret = geni_se_select_mode(gi2c->base, mode);
+		if (ret) {
+			dev_err(gi2c->dev, "%s: Error mode init %d:%d:%d\n",
+				__func__, mode, i, msgs[i].len);
+			break;
+		}
 		if (msgs[i].flags & I2C_M_RD) {
 			dev_dbg(gi2c->dev,
 				"READ,n:%d,i:%d len:%d, stretch:%d\n",
@@ -188,22 +224,41 @@ static int geni_i2c_xfer(struct i2c_adapter *adap,
 				       gi2c->base, SE_I2C_RX_TRANS_LEN);
 			m_cmd = I2C_READ;
 			geni_setup_m_cmd(gi2c->base, m_cmd, m_param);
+			if (mode == SE_DMA) {
+				ret = geni_se_rx_dma_prep(gi2c->wrapper_dev,
+							gi2c->base, msgs[i].buf,
+							msgs[i].len, &rx_dma);
+				if (ret)
+					mode = FIFO_MODE;
+			}
+			if (mode == FIFO_MODE)
+				geni_se_select_mode(gi2c->base, mode);
 		} else {
 			dev_dbg(gi2c->dev,
-				"WRITE:n:%d,i%d len:%d, stretch:%d\n",
-					num, i, msgs[i].len, stretch);
+				"WRITE:n:%d,i:%d len:%d, stretch:%d, m_param:0x%x\n",
+					num, i, msgs[i].len, stretch, m_param);
 			geni_write_reg(msgs[i].len, gi2c->base,
 						SE_I2C_TX_TRANS_LEN);
 			m_cmd = I2C_WRITE;
 			geni_setup_m_cmd(gi2c->base, m_cmd, m_param);
-			/* Get FIFO IRQ */
-			geni_write_reg(1, gi2c->base, SE_GENI_TX_WATERMARK_REG);
+			if (mode == SE_DMA) {
+				ret = geni_se_tx_dma_prep(gi2c->wrapper_dev,
+							gi2c->base, msgs[i].buf,
+							msgs[i].len, &tx_dma);
+				if (ret)
+					mode = FIFO_MODE;
+			}
+			if (mode == FIFO_MODE) {
+				geni_se_select_mode(gi2c->base, mode);
+				/* Get FIFO IRQ */
+				geni_write_reg(1, gi2c->base,
+						SE_GENI_TX_WATERMARK_REG);
+			}
 		}
 		/* Ensure FIFO write go through before waiting for Done evet */
 		mb();
 		timeout = wait_for_completion_timeout(&gi2c->xfer, HZ);
 		if (!timeout) {
-			dev_err(gi2c->dev, "Timed out\n");
 			gi2c->err = -ETIMEDOUT;
 			gi2c->cur = NULL;
 			geni_abort_m_cmd(gi2c->base);
@@ -211,9 +266,24 @@ static int geni_i2c_xfer(struct i2c_adapter *adap,
 		}
 		gi2c->cur_wr = 0;
 		gi2c->cur_rd = 0;
+		if (mode == SE_DMA) {
+			if (gi2c->err) {
+				if (msgs[i].flags != I2C_M_RD)
+					writel_relaxed(1, gi2c->base +
+							SE_DMA_TX_FSM_RST);
+				else
+					writel_relaxed(1, gi2c->base +
+							SE_DMA_RX_FSM_RST);
+				wait_for_completion_timeout(&gi2c->xfer, HZ);
+			}
+			geni_se_rx_dma_unprep(gi2c->wrapper_dev, rx_dma,
+					      msgs[i].len);
+			geni_se_tx_dma_unprep(gi2c->wrapper_dev, tx_dma,
+					      msgs[i].len);
+		}
+		ret = gi2c->err;
 		if (gi2c->err) {
 			dev_err(gi2c->dev, "i2c error :%d\n", gi2c->err);
-			ret = gi2c->err;
 			break;
 		}
 	}
@@ -352,6 +422,7 @@ static int geni_i2c_probe(struct platform_device *pdev)
 	pm_runtime_enable(gi2c->dev);
 	i2c_add_adapter(&gi2c->adap);
 
+	dev_dbg(gi2c->dev, "I2C probed\n");
 	return 0;
 }
 
@@ -393,7 +464,6 @@ static int geni_i2c_runtime_resume(struct device *dev)
 
 		gi2c->tx_wm = gi2c_tx_depth - 1;
 		geni_se_init(gi2c->base, gi2c->tx_wm, gi2c_tx_depth);
-		geni_se_select_mode(gi2c->base, FIFO_MODE);
 		se_config_packing(gi2c->base, 8, 4, true);
 	}
 	enable_irq(gi2c->irq);
diff --git a/drivers/iio/common/hid-sensors/hid-sensor-attributes.c b/drivers/iio/common/hid-sensors/hid-sensor-attributes.c
index b5beea53..ab646a9 100644
--- a/drivers/iio/common/hid-sensors/hid-sensor-attributes.c
+++ b/drivers/iio/common/hid-sensors/hid-sensor-attributes.c
@@ -217,7 +217,15 @@ int hid_sensor_write_samp_freq_value(struct hid_sensor_common *st,
 	if (ret < 0 || value < 0)
 		ret = -EINVAL;
 
-	return ret;
+	ret = sensor_hub_get_feature(st->hsdev,
+				     st->poll.report_id,
+				     st->poll.index, sizeof(value), &value);
+	if (ret < 0 || value < 0)
+		return -EINVAL;
+
+	st->poll_interval = value;
+
+	return 0;
 }
 EXPORT_SYMBOL(hid_sensor_write_samp_freq_value);
 
@@ -259,7 +267,16 @@ int hid_sensor_write_raw_hyst_value(struct hid_sensor_common *st,
 	if (ret < 0 || value < 0)
 		ret = -EINVAL;
 
-	return ret;
+	ret = sensor_hub_get_feature(st->hsdev,
+				     st->sensitivity.report_id,
+				     st->sensitivity.index, sizeof(value),
+				     &value);
+	if (ret < 0 || value < 0)
+		return -EINVAL;
+
+	st->raw_hystersis = value;
+
+	return 0;
 }
 EXPORT_SYMBOL(hid_sensor_write_raw_hyst_value);
 
@@ -355,6 +372,9 @@ int hid_sensor_get_reporting_interval(struct hid_sensor_hub_device *hsdev,
 	/* Default unit of measure is milliseconds */
 	if (st->poll.units == 0)
 		st->poll.units = HID_USAGE_SENSOR_UNITS_MILLISECOND;
+
+	st->poll_interval = -1;
+
 	return 0;
 
 }
@@ -377,6 +397,8 @@ int hid_sensor_parse_common_attributes(struct hid_sensor_hub_device *hsdev,
 					HID_USAGE_SENSOR_PROY_POWER_STATE,
 					&st->power_state);
 
+	st->raw_hystersis = -1;
+
 	sensor_hub_input_get_attribute_info(hsdev,
 			HID_FEATURE_REPORT, usage_id,
 			HID_USAGE_SENSOR_PROP_SENSITIVITY_ABS,
diff --git a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
index ecf592d..6082934 100644
--- a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
+++ b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
@@ -51,6 +51,8 @@ static int _hid_sensor_power_state(struct hid_sensor_common *st, bool state)
 			st->report_state.report_id,
 			st->report_state.index,
 			HID_USAGE_SENSOR_PROP_REPORTING_STATE_ALL_EVENTS_ENUM);
+
+		poll_value = hid_sensor_read_poll_value(st);
 	} else {
 		int val;
 
@@ -87,9 +89,7 @@ static int _hid_sensor_power_state(struct hid_sensor_common *st, bool state)
 	sensor_hub_get_feature(st->hsdev, st->power_state.report_id,
 			       st->power_state.index,
 			       sizeof(state_val), &state_val);
-	if (state)
-		poll_value = hid_sensor_read_poll_value(st);
-	if (poll_value > 0)
+	if (state && poll_value)
 		msleep_interruptible(poll_value * 2);
 
 	return 0;
@@ -127,6 +127,20 @@ static void hid_sensor_set_power_work(struct work_struct *work)
 	struct hid_sensor_common *attrb = container_of(work,
 						       struct hid_sensor_common,
 						       work);
+
+	if (attrb->poll_interval >= 0)
+		sensor_hub_set_feature(attrb->hsdev, attrb->poll.report_id,
+				       attrb->poll.index,
+				       sizeof(attrb->poll_interval),
+				       &attrb->poll_interval);
+
+	if (attrb->raw_hystersis >= 0)
+		sensor_hub_set_feature(attrb->hsdev,
+				       attrb->sensitivity.report_id,
+				       attrb->sensitivity.index,
+				       sizeof(attrb->raw_hystersis),
+				       &attrb->raw_hystersis);
+
 	_hid_sensor_power_state(attrb, true);
 }
 
diff --git a/drivers/iio/dac/ad7303.c b/drivers/iio/dac/ad7303.c
index e690dd1..4b0f942 100644
--- a/drivers/iio/dac/ad7303.c
+++ b/drivers/iio/dac/ad7303.c
@@ -184,9 +184,9 @@ static const struct iio_chan_spec_ext_info ad7303_ext_info[] = {
 	.address = (chan),					\
 	.scan_type = {						\
 		.sign = 'u',					\
-		.realbits = '8',				\
-		.storagebits = '8',				\
-		.shift = '0',					\
+		.realbits = 8,					\
+		.storagebits = 8,				\
+		.shift = 0,					\
 	},							\
 	.ext_info = ad7303_ext_info,				\
 }
diff --git a/drivers/iio/pressure/bmp280-core.c b/drivers/iio/pressure/bmp280-core.c
index e5a533c..f762eb8 100644
--- a/drivers/iio/pressure/bmp280-core.c
+++ b/drivers/iio/pressure/bmp280-core.c
@@ -175,11 +175,12 @@ static u32 bmp280_compensate_humidity(struct bmp280_data *data,
 	}
 	H6 = sign_extend32(tmp, 7);
 
-	var = ((s32)data->t_fine) - 76800;
-	var = ((((adc_humidity << 14) - (H4 << 20) - (H5 * var)) + 16384) >> 15)
-		* (((((((var * H6) >> 10) * (((var * H3) >> 11) + 32768)) >> 10)
-		+ 2097152) * H2 + 8192) >> 14);
-	var -= ((((var >> 15) * (var >> 15)) >> 7) * H1) >> 4;
+	var = ((s32)data->t_fine) - (s32)76800;
+	var = ((((adc_humidity << 14) - (H4 << 20) - (H5 * var))
+		+ (s32)16384) >> 15) * (((((((var * H6) >> 10)
+		* (((var * (s32)H3) >> 11) + (s32)32768)) >> 10)
+		+ (s32)2097152) * H2 + 8192) >> 14);
+	var -= ((((var >> 15) * (var >> 15)) >> 7) * (s32)H1) >> 4;
 
 	return var >> 12;
 };
diff --git a/drivers/iio/proximity/as3935.c b/drivers/iio/proximity/as3935.c
index 5656deb..0204595 100644
--- a/drivers/iio/proximity/as3935.c
+++ b/drivers/iio/proximity/as3935.c
@@ -50,7 +50,6 @@
 #define AS3935_TUNE_CAP		0x08
 #define AS3935_CALIBRATE	0x3D
 
-#define AS3935_WRITE_DATA	BIT(15)
 #define AS3935_READ_DATA	BIT(14)
 #define AS3935_ADDRESS(x)	((x) << 8)
 
@@ -105,7 +104,7 @@ static int as3935_write(struct as3935_state *st,
 {
 	u8 *buf = st->buf;
 
-	buf[0] = (AS3935_WRITE_DATA | AS3935_ADDRESS(reg)) >> 8;
+	buf[0] = AS3935_ADDRESS(reg) >> 8;
 	buf[1] = val;
 
 	return spi_write(st->spi, buf, 2);
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
index 0f58f46..8fd108d 100644
--- a/drivers/infiniband/core/addr.c
+++ b/drivers/infiniband/core/addr.c
@@ -444,8 +444,8 @@ static int addr6_resolve(struct sockaddr_in6 *src_in,
 	fl6.saddr = src_in->sin6_addr;
 	fl6.flowi6_oif = addr->bound_dev_if;
 
-	dst = ip6_route_output(addr->net, NULL, &fl6);
-	if ((ret = dst->error))
+	ret = ipv6_stub->ipv6_dst_lookup(addr->net, NULL, &dst, &fl6);
+	if (ret < 0)
 		goto put;
 
 	rt = (struct rt6_info *)dst;
diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c
index c1fb545..42de5f2 100644
--- a/drivers/infiniband/core/sysfs.c
+++ b/drivers/infiniband/core/sysfs.c
@@ -1301,7 +1301,7 @@ int ib_device_register_sysfs(struct ib_device *device,
 	free_port_list_attributes(device);
 
 err_unregister:
-	device_unregister(class_dev);
+	device_del(class_dev);
 
 err:
 	return ret;
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index 8368764..0e64b52 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -1516,7 +1516,9 @@ int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid)
 
 	if (!qp->device->attach_mcast)
 		return -ENOSYS;
-	if (gid->raw[0] != 0xff || qp->qp_type != IB_QPT_UD)
+	if (gid->raw[0] != 0xff || qp->qp_type != IB_QPT_UD ||
+	    lid < be16_to_cpu(IB_MULTICAST_LID_BASE) ||
+	    lid == be16_to_cpu(IB_LID_PERMISSIVE))
 		return -EINVAL;
 
 	ret = qp->device->attach_mcast(qp, gid, lid);
@@ -1532,7 +1534,9 @@ int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid)
 
 	if (!qp->device->detach_mcast)
 		return -ENOSYS;
-	if (gid->raw[0] != 0xff || qp->qp_type != IB_QPT_UD)
+	if (gid->raw[0] != 0xff || qp->qp_type != IB_QPT_UD ||
+	    lid < be16_to_cpu(IB_MULTICAST_LID_BASE) ||
+	    lid == be16_to_cpu(IB_LID_PERMISSIVE))
 		return -EINVAL;
 
 	ret = qp->device->detach_mcast(qp, gid, lid);
diff --git a/drivers/infiniband/hw/hfi1/file_ops.c b/drivers/infiniband/hw/hfi1/file_ops.c
index bd786b7..bb72976 100644
--- a/drivers/infiniband/hw/hfi1/file_ops.c
+++ b/drivers/infiniband/hw/hfi1/file_ops.c
@@ -751,6 +751,9 @@ static int hfi1_file_close(struct inode *inode, struct file *fp)
 	/* release the cpu */
 	hfi1_put_proc_affinity(fdata->rec_cpu_num);
 
+	/* clean up rcv side */
+	hfi1_user_exp_rcv_free(fdata);
+
 	/*
 	 * Clear any left over, unhandled events so the next process that
 	 * gets this context doesn't get confused.
@@ -790,7 +793,7 @@ static int hfi1_file_close(struct inode *inode, struct file *fp)
 
 	dd->rcd[uctxt->ctxt] = NULL;
 
-	hfi1_user_exp_rcv_free(fdata);
+	hfi1_user_exp_rcv_grp_free(uctxt);
 	hfi1_clear_ctxt_pkey(dd, uctxt->ctxt);
 
 	uctxt->rcvwait_to = 0;
diff --git a/drivers/infiniband/hw/hfi1/init.c b/drivers/infiniband/hw/hfi1/init.c
index e3b5bc9..34cfd34 100644
--- a/drivers/infiniband/hw/hfi1/init.c
+++ b/drivers/infiniband/hw/hfi1/init.c
@@ -1757,6 +1757,7 @@ int hfi1_setup_eagerbufs(struct hfi1_ctxtdata *rcd)
 			    !HFI1_CAP_KGET_MASK(rcd->flags, MULTI_PKT_EGR)) {
 				dd_dev_err(dd, "ctxt%u: Failed to allocate eager buffers\n",
 					   rcd->ctxt);
+				ret = -ENOMEM;
 				goto bail_rcvegrbuf_phys;
 			}
 
diff --git a/drivers/infiniband/hw/hfi1/ruc.c b/drivers/infiniband/hw/hfi1/ruc.c
index a1576ae..9f768b4 100644
--- a/drivers/infiniband/hw/hfi1/ruc.c
+++ b/drivers/infiniband/hw/hfi1/ruc.c
@@ -1,5 +1,5 @@
 /*
- * Copyright(c) 2015, 2016 Intel Corporation.
+ * Copyright(c) 2015 - 2017 Intel Corporation.
  *
  * This file is provided under a dual BSD/GPLv2 license.  When using or
  * redistributing this file, you may do so under either license.
@@ -833,23 +833,29 @@ void hfi1_make_ruc_header(struct rvt_qp *qp, struct ib_other_headers *ohdr,
 /* when sending, force a reschedule every one of these periods */
 #define SEND_RESCHED_TIMEOUT (5 * HZ)  /* 5s in jiffies */
 
+void hfi1_do_send_from_rvt(struct rvt_qp *qp)
+{
+	hfi1_do_send(qp, false);
+}
+
 void _hfi1_do_send(struct work_struct *work)
 {
 	struct iowait *wait = container_of(work, struct iowait, iowork);
 	struct rvt_qp *qp = iowait_to_qp(wait);
 
-	hfi1_do_send(qp);
+	hfi1_do_send(qp, true);
 }
 
 /**
  * hfi1_do_send - perform a send on a QP
  * @work: contains a pointer to the QP
+ * @in_thread: true if in a workqueue thread
  *
  * Process entries in the send work queue until credit or queue is
  * exhausted.  Only allow one CPU to send a packet per QP.
  * Otherwise, two threads could send packets out of order.
  */
-void hfi1_do_send(struct rvt_qp *qp)
+void hfi1_do_send(struct rvt_qp *qp, bool in_thread)
 {
 	struct hfi1_pkt_state ps;
 	struct hfi1_qp_priv *priv = qp->priv;
@@ -917,8 +923,10 @@ void hfi1_do_send(struct rvt_qp *qp)
 			qp->s_hdrwords = 0;
 			/* allow other tasks to run */
 			if (unlikely(time_after(jiffies, timeout))) {
-				if (workqueue_congested(cpu,
-							ps.ppd->hfi1_wq)) {
+				if (!in_thread ||
+				    workqueue_congested(
+						cpu,
+						ps.ppd->hfi1_wq)) {
 					spin_lock_irqsave(
 						&qp->s_lock,
 						ps.flags);
@@ -931,11 +939,9 @@ void hfi1_do_send(struct rvt_qp *qp)
 						*ps.ppd->dd->send_schedule);
 					return;
 				}
-				if (!irqs_disabled()) {
-					cond_resched();
-					this_cpu_inc(
-					   *ps.ppd->dd->send_schedule);
-				}
+				cond_resched();
+				this_cpu_inc(
+					*ps.ppd->dd->send_schedule);
 				timeout = jiffies + (timeout_int) / 8;
 			}
 			spin_lock_irqsave(&qp->s_lock, ps.flags);
diff --git a/drivers/infiniband/hw/hfi1/user_exp_rcv.c b/drivers/infiniband/hw/hfi1/user_exp_rcv.c
index 64d2652..db0f140 100644
--- a/drivers/infiniband/hw/hfi1/user_exp_rcv.c
+++ b/drivers/infiniband/hw/hfi1/user_exp_rcv.c
@@ -250,36 +250,40 @@ int hfi1_user_exp_rcv_init(struct file *fp)
 	return ret;
 }
 
+void hfi1_user_exp_rcv_grp_free(struct hfi1_ctxtdata *uctxt)
+{
+	struct tid_group *grp, *gptr;
+
+	list_for_each_entry_safe(grp, gptr, &uctxt->tid_group_list.list,
+				 list) {
+		list_del_init(&grp->list);
+		kfree(grp);
+	}
+	hfi1_clear_tids(uctxt);
+}
+
 int hfi1_user_exp_rcv_free(struct hfi1_filedata *fd)
 {
 	struct hfi1_ctxtdata *uctxt = fd->uctxt;
-	struct tid_group *grp, *gptr;
 
-	if (!test_bit(HFI1_CTXT_SETUP_DONE, &uctxt->event_flags))
-		return 0;
 	/*
 	 * The notifier would have been removed when the process'es mm
 	 * was freed.
 	 */
-	if (fd->handler)
+	if (fd->handler) {
 		hfi1_mmu_rb_unregister(fd->handler);
-
-	kfree(fd->invalid_tids);
-
-	if (!uctxt->cnt) {
+	} else {
 		if (!EXP_TID_SET_EMPTY(uctxt->tid_full_list))
 			unlock_exp_tids(uctxt, &uctxt->tid_full_list, fd);
 		if (!EXP_TID_SET_EMPTY(uctxt->tid_used_list))
 			unlock_exp_tids(uctxt, &uctxt->tid_used_list, fd);
-		list_for_each_entry_safe(grp, gptr, &uctxt->tid_group_list.list,
-					 list) {
-			list_del_init(&grp->list);
-			kfree(grp);
-		}
-		hfi1_clear_tids(uctxt);
 	}
 
+	kfree(fd->invalid_tids);
+	fd->invalid_tids = NULL;
+
 	kfree(fd->entry_to_rb);
+	fd->entry_to_rb = NULL;
 	return 0;
 }
 
diff --git a/drivers/infiniband/hw/hfi1/user_exp_rcv.h b/drivers/infiniband/hw/hfi1/user_exp_rcv.h
index 9bc8d9f..d1d7d3d 100644
--- a/drivers/infiniband/hw/hfi1/user_exp_rcv.h
+++ b/drivers/infiniband/hw/hfi1/user_exp_rcv.h
@@ -70,6 +70,7 @@
 		(tid) |= EXP_TID_SET(field, (value));			\
 	} while (0)
 
+void hfi1_user_exp_rcv_grp_free(struct hfi1_ctxtdata *uctxt);
 int hfi1_user_exp_rcv_init(struct file *);
 int hfi1_user_exp_rcv_free(struct hfi1_filedata *);
 int hfi1_user_exp_rcv_setup(struct file *, struct hfi1_tid_info *);
diff --git a/drivers/infiniband/hw/hfi1/verbs.c b/drivers/infiniband/hw/hfi1/verbs.c
index 4b7a16c..01a380e 100644
--- a/drivers/infiniband/hw/hfi1/verbs.c
+++ b/drivers/infiniband/hw/hfi1/verbs.c
@@ -1,5 +1,5 @@
 /*
- * Copyright(c) 2015, 2016 Intel Corporation.
+ * Copyright(c) 2015 - 2017 Intel Corporation.
  *
  * This file is provided under a dual BSD/GPLv2 license.  When using or
  * redistributing this file, you may do so under either license.
@@ -1697,7 +1697,7 @@ int hfi1_register_ib_device(struct hfi1_devdata *dd)
 	dd->verbs_dev.rdi.driver_f.qp_priv_free = qp_priv_free;
 	dd->verbs_dev.rdi.driver_f.free_all_qps = free_all_qps;
 	dd->verbs_dev.rdi.driver_f.notify_qp_reset = notify_qp_reset;
-	dd->verbs_dev.rdi.driver_f.do_send = hfi1_do_send;
+	dd->verbs_dev.rdi.driver_f.do_send = hfi1_do_send_from_rvt;
 	dd->verbs_dev.rdi.driver_f.schedule_send = hfi1_schedule_send;
 	dd->verbs_dev.rdi.driver_f.schedule_send_no_lock = _hfi1_schedule_send;
 	dd->verbs_dev.rdi.driver_f.get_pmtu_from_attr = get_pmtu_from_attr;
diff --git a/drivers/infiniband/hw/hfi1/verbs.h b/drivers/infiniband/hw/hfi1/verbs.h
index 1c3815d..bac84f8 100644
--- a/drivers/infiniband/hw/hfi1/verbs.h
+++ b/drivers/infiniband/hw/hfi1/verbs.h
@@ -1,5 +1,5 @@
 /*
- * Copyright(c) 2015, 2016 Intel Corporation.
+ * Copyright(c) 2015 - 2017 Intel Corporation.
  *
  * This file is provided under a dual BSD/GPLv2 license.  When using or
  * redistributing this file, you may do so under either license.
@@ -372,7 +372,9 @@ void hfi1_make_ruc_header(struct rvt_qp *qp, struct ib_other_headers *ohdr,
 
 void _hfi1_do_send(struct work_struct *work);
 
-void hfi1_do_send(struct rvt_qp *qp);
+void hfi1_do_send_from_rvt(struct rvt_qp *qp);
+
+void hfi1_do_send(struct rvt_qp *qp, bool in_thread);
 
 void hfi1_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe,
 			enum ib_wc_status status);
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 46ad995..f2a885e 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -2926,6 +2926,7 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
 		mlx4_ib_delete_counters_table(ibdev, &ibdev->counters_table[i]);
 
 err_map:
+	mlx4_ib_free_eqs(dev, ibdev);
 	iounmap(ibdev->uar_map);
 
 err_uar:
diff --git a/drivers/infiniband/hw/mlx4/mcg.c b/drivers/infiniband/hw/mlx4/mcg.c
index a21d37f..e6ea81c 100644
--- a/drivers/infiniband/hw/mlx4/mcg.c
+++ b/drivers/infiniband/hw/mlx4/mcg.c
@@ -1102,7 +1102,8 @@ static void _mlx4_ib_mcg_port_cleanup(struct mlx4_ib_demux_ctx *ctx, int destroy
 	while ((p = rb_first(&ctx->mcg_table)) != NULL) {
 		group = rb_entry(p, struct mcast_group, node);
 		if (atomic_read(&group->refcount))
-			mcg_warn_group(group, "group refcount %d!!! (pointer %p)\n", atomic_read(&group->refcount), group);
+			mcg_debug_group(group, "group refcount %d!!! (pointer %p)\n",
+					atomic_read(&group->refcount), group);
 
 		force_clean_group(group);
 	}
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index be2d02b..1fb31a4 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -1828,7 +1828,7 @@ mlx5_ib_sg_to_klms(struct mlx5_ib_mr *mr,
 		klms[i].va = cpu_to_be64(sg_dma_address(sg) + sg_offset);
 		klms[i].bcount = cpu_to_be32(sg_dma_len(sg) - sg_offset);
 		klms[i].key = cpu_to_be32(lkey);
-		mr->ibmr.length += sg_dma_len(sg);
+		mr->ibmr.length += sg_dma_len(sg) - sg_offset;
 
 		sg_offset = 0;
 	}
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_fs.c b/drivers/infiniband/ulp/ipoib/ipoib_fs.c
index 6bd5740..09396bd 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_fs.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_fs.c
@@ -281,8 +281,11 @@ void ipoib_delete_debug_files(struct net_device *dev)
 {
 	struct ipoib_dev_priv *priv = netdev_priv(dev);
 
+	WARN_ONCE(!priv->mcg_dentry, "null mcg debug file\n");
+	WARN_ONCE(!priv->path_dentry, "null path debug file\n");
 	debugfs_remove(priv->mcg_dentry);
 	debugfs_remove(priv->path_dentry);
+	priv->mcg_dentry = priv->path_dentry = NULL;
 }
 
 int ipoib_register_debugfs(void)
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 3ef7b8f..08c4b02 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -108,6 +108,33 @@ static struct ib_client ipoib_client = {
 	.get_net_dev_by_params = ipoib_get_net_dev_by_params,
 };
 
+#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
+static int ipoib_netdev_event(struct notifier_block *this,
+			      unsigned long event, void *ptr)
+{
+	struct netdev_notifier_info *ni = ptr;
+	struct net_device *dev = ni->dev;
+
+	if (dev->netdev_ops->ndo_open != ipoib_open)
+		return NOTIFY_DONE;
+
+	switch (event) {
+	case NETDEV_REGISTER:
+		ipoib_create_debug_files(dev);
+		break;
+	case NETDEV_CHANGENAME:
+		ipoib_delete_debug_files(dev);
+		ipoib_create_debug_files(dev);
+		break;
+	case NETDEV_UNREGISTER:
+		ipoib_delete_debug_files(dev);
+		break;
+	}
+
+	return NOTIFY_DONE;
+}
+#endif
+
 int ipoib_open(struct net_device *dev)
 {
 	struct ipoib_dev_priv *priv = netdev_priv(dev);
@@ -1655,8 +1682,6 @@ void ipoib_dev_cleanup(struct net_device *dev)
 
 	ASSERT_RTNL();
 
-	ipoib_delete_debug_files(dev);
-
 	/* Delete any child interfaces first */
 	list_for_each_entry_safe(cpriv, tcpriv, &priv->child_intfs, list) {
 		/* Stop GC on child */
@@ -2074,8 +2099,6 @@ static struct net_device *ipoib_add_port(const char *format,
 		goto register_failed;
 	}
 
-	ipoib_create_debug_files(priv->dev);
-
 	if (ipoib_cm_add_mode_attr(priv->dev))
 		goto sysfs_failed;
 	if (ipoib_add_pkey_attr(priv->dev))
@@ -2090,7 +2113,6 @@ static struct net_device *ipoib_add_port(const char *format,
 	return priv->dev;
 
 sysfs_failed:
-	ipoib_delete_debug_files(priv->dev);
 	unregister_netdev(priv->dev);
 
 register_failed:
@@ -2175,6 +2197,12 @@ static void ipoib_remove_one(struct ib_device *device, void *client_data)
 	kfree(dev_list);
 }
 
+#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
+static struct notifier_block ipoib_netdev_notifier = {
+	.notifier_call = ipoib_netdev_event,
+};
+#endif
+
 static int __init ipoib_init_module(void)
 {
 	int ret;
@@ -2227,6 +2255,9 @@ static int __init ipoib_init_module(void)
 	if (ret)
 		goto err_client;
 
+#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
+	register_netdevice_notifier(&ipoib_netdev_notifier);
+#endif
 	return 0;
 
 err_client:
@@ -2244,6 +2275,9 @@ static int __init ipoib_init_module(void)
 
 static void __exit ipoib_cleanup_module(void)
 {
+#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
+	unregister_netdevice_notifier(&ipoib_netdev_notifier);
+#endif
 	ipoib_netlink_fini();
 	ib_unregister_client(&ipoib_client);
 	ib_sa_unregister_client(&ipoib_sa_client);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
index a2f9f29..57eadd2 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
@@ -87,8 +87,6 @@ int __ipoib_vlan_add(struct ipoib_dev_priv *ppriv, struct ipoib_dev_priv *priv,
 		goto register_failed;
 	}
 
-	ipoib_create_debug_files(priv->dev);
-
 	/* RTNL childs don't need proprietary sysfs entries */
 	if (type == IPOIB_LEGACY_CHILD) {
 		if (ipoib_cm_add_mode_attr(priv->dev))
@@ -109,7 +107,6 @@ int __ipoib_vlan_add(struct ipoib_dev_priv *ppriv, struct ipoib_dev_priv *priv,
 
 sysfs_failed:
 	result = -ENOMEM;
-	ipoib_delete_debug_files(priv->dev);
 	unregister_netdevice(priv->dev);
 
 register_failed:
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index dd96670..b91a6b5 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -411,6 +411,7 @@ struct arm_smmu_device {
 #define ARM_SMMU_OPT_FATAL_ASF		(1 << 1)
 #define ARM_SMMU_OPT_SKIP_INIT		(1 << 2)
 #define ARM_SMMU_OPT_DYNAMIC		(1 << 3)
+#define ARM_SMMU_OPT_3LVL_TABLES	(1 << 4)
 	u32				options;
 	enum arm_smmu_arch_version	version;
 	enum arm_smmu_implementation	model;
@@ -529,6 +530,7 @@ static struct arm_smmu_option_prop arm_smmu_options[] = {
 	{ ARM_SMMU_OPT_FATAL_ASF, "qcom,fatal-asf" },
 	{ ARM_SMMU_OPT_SKIP_INIT, "qcom,skip-init" },
 	{ ARM_SMMU_OPT_DYNAMIC, "qcom,dynamic" },
+	{ ARM_SMMU_OPT_3LVL_TABLES, "qcom,use-3-lvl-tables" },
 	{ 0, NULL},
 };
 
@@ -1567,6 +1569,8 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
 		oas = smmu->ipa_size;
 		if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
 			fmt = ARM_64_LPAE_S1;
+			if (smmu->options & ARM_SMMU_OPT_3LVL_TABLES)
+				ias = min(ias, 39UL);
 		} else if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_L) {
 			fmt = ARM_32_LPAE_S1;
 			ias = min(ias, 32UL);
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index b9e50c1..87fcbf7 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -2049,11 +2049,14 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
 	if (context_copied(context)) {
 		u16 did_old = context_domain_id(context);
 
-		if (did_old >= 0 && did_old < cap_ndoms(iommu->cap))
+		if (did_old >= 0 && did_old < cap_ndoms(iommu->cap)) {
 			iommu->flush.flush_context(iommu, did_old,
 						   (((u16)bus) << 8) | devfn,
 						   DMA_CCMD_MASK_NOBIT,
 						   DMA_CCMD_DEVICE_INVL);
+			iommu->flush.flush_iotlb(iommu, did_old, 0, 0,
+						 DMA_TLB_DSI_FLUSH);
+		}
 	}
 
 	pgd = domain->pgd;
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index 41515bb..ee50a61 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -299,3 +299,5 @@
 config STM32_EXTI
 	bool
 	select IRQ_DOMAIN
+
+source "drivers/irqchip/qcom/Kconfig"
diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile
index 987bd89..450059c 100644
--- a/drivers/irqchip/Makefile
+++ b/drivers/irqchip/Makefile
@@ -75,3 +75,4 @@
 obj-$(CONFIG_EZNPS_GIC)			+= irq-eznps.o
 obj-$(CONFIG_ARCH_ASPEED)		+= irq-aspeed-vic.o
 obj-$(CONFIG_STM32_EXTI) 		+= irq-stm32-exti.o
+obj-$(CONFIG_QTI_PDC)			+= qcom/
diff --git a/drivers/irqchip/qcom/Kconfig b/drivers/irqchip/qcom/Kconfig
new file mode 100644
index 0000000..e4a7a88
--- /dev/null
+++ b/drivers/irqchip/qcom/Kconfig
@@ -0,0 +1,15 @@
+config QTI_PDC
+        bool "QTI PDC"
+        depends on ARCH_QCOM
+	select IRQ_DOMAIN
+	select IRQ_DOMAIN_HIERARCHY
+        help
+          QTI Power Domain Controller driver to manage and configure wakeup
+          IRQs
+
+config QTI_PDC_SDM845
+        bool "QTI PDC SDM845"
+        select QTI_PDC
+        default y if ARCH_SDM845
+        help
+          QTI Power Domain Controller for SDM845
diff --git a/drivers/irqchip/qcom/Makefile b/drivers/irqchip/qcom/Makefile
new file mode 100644
index 0000000..1b7856d
--- /dev/null
+++ b/drivers/irqchip/qcom/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_QTI_PDC)			+= pdc.o
+obj-$(CONFIG_QTI_PDC_SDM845)		+= pdc-sdm845.o
diff --git a/drivers/irqchip/qcom/pdc-sdm845.c b/drivers/irqchip/qcom/pdc-sdm845.c
new file mode 100644
index 0000000..178cf1f0
--- /dev/null
+++ b/drivers/irqchip/qcom/pdc-sdm845.c
@@ -0,0 +1,139 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/irqchip.h>
+#include "pdc.h"
+
+static struct pdc_pin sdm845_data[] = {
+	{0, 512}, /* rpmh_wake */
+	{1, 513}, /* ee0_apps_hlos_spmi_periph_irq */
+	{2, 514}, /* ee1_apps_trustzone_spmi_periph_irq */
+	{3, 515}, /* secure_wdog_expired */
+	{4, 516}, /* secure_wdog_bark_irq */
+	{5, 517}, /* aop_wdog_expired_irq */
+	{6, 518}, /* qmp_usb3_lfps_rxterm_irq */
+	{7, 519}, /* qmp_usb3_lfps_rxterm_irq */
+	{8, 520}, /* eud_p0_dmse_int_mx */
+	{9, 521}, /* eud_p0_dpse_int_mx */
+	{10, 522}, /* eud_p1_dmse_int_mx */
+	{11, 523}, /* eud_p1_dpse_int_mx */
+	{12, 524}, /* eud_int_mx[1] */
+	{13, 525}, /* ssc_xpu_irq_summary */
+	{14, 526}, /* wd_bite_apps */
+	{15, 527}, /* ssc_vmidmt_irq_summary */
+	{16, 528}, /* q6ss_irq_out_apps_ipc[4] */
+	{17, 529}, /* not-connected */
+	{18, 530}, /* aoss_pmic_arb_mpu_xpu_summary_irq */
+	{19, 531}, /* apps_pdc_irq_in_19 */
+	{20, 532}, /* apps_pdc_irq_in_20 */
+	{21, 533}, /* apps_pdc_irq_in_21 */
+	{22, 534}, /* pdc_apps_epcb_timeout_summary_irq */
+	{23, 535}, /* spmi_protocol_irq */
+	{24, 536}, /* tsense0_tsense_max_min_int */
+	{25, 537}, /* tsense1_tsense_max_min_int */
+	{26, 538}, /* tsense0_upper_lower_intr */
+	{27, 539}, /* tsense1_upper_lower_intr */
+	{28, 540}, /* tsense0_critical_intr */
+	{29, 541}, /* tsense1_critical_intr */
+	{30, 542}, /* core_bi_px_gpio_1 */
+	{31, 543}, /* core_bi_px_gpio_3 */
+	{32, 544}, /* core_bi_px_gpio_5 */
+	{33, 545}, /* core_bi_px_gpio_10 */
+	{34, 546}, /* core_bi_px_gpio_11 */
+	{35, 547}, /* core_bi_px_gpio_20 */
+	{36, 548}, /* core_bi_px_gpio_22 */
+	{37, 549}, /* core_bi_px_gpio_24 */
+	{38, 550}, /* core_bi_px_gpio_26 */
+	{39, 551}, /* core_bi_px_gpio_30 */
+	{41, 553}, /* core_bi_px_gpio_32 */
+	{42, 554}, /* core_bi_px_gpio_34 */
+	{43, 555}, /* core_bi_px_gpio_36 */
+	{44, 556}, /* core_bi_px_gpio_37 */
+	{45, 557}, /* core_bi_px_gpio_38 */
+	{46, 558}, /* core_bi_px_gpio_39 */
+	{47, 559}, /* core_bi_px_gpio_40 */
+	{49, 561}, /* core_bi_px_gpio_43 */
+	{50, 562}, /* core_bi_px_gpio_44 */
+	{51, 563}, /* core_bi_px_gpio_46 */
+	{52, 564}, /* core_bi_px_gpio_48 */
+	{54, 566}, /* core_bi_px_gpio_52 */
+	{55, 567}, /* core_bi_px_gpio_53 */
+	{56, 568}, /* core_bi_px_gpio_54 */
+	{57, 569}, /* core_bi_px_gpio_56 */
+	{58, 570}, /* core_bi_px_gpio_57 */
+	{59, 571}, /* core_bi_px_gpio_58 */
+	{60, 572}, /* core_bi_px_gpio_59 */
+	{61, 573}, /* core_bi_px_gpio_60 */
+	{62, 574}, /* core_bi_px_gpio_61 */
+	{63, 575}, /* core_bi_px_gpio_62 */
+	{64, 576}, /* core_bi_px_gpio_63 */
+	{65, 577}, /* core_bi_px_gpio_64 */
+	{66, 578}, /* core_bi_px_gpio_66 */
+	{67, 579}, /* core_bi_px_gpio_68 */
+	{68, 580}, /* core_bi_px_gpio_71 */
+	{69, 581}, /* core_bi_px_gpio_73 */
+	{70, 582}, /* core_bi_px_gpio_77 */
+	{71, 583}, /* core_bi_px_gpio_78 */
+	{72, 584}, /* core_bi_px_gpio_79 */
+	{73, 585}, /* core_bi_px_gpio_80 */
+	{74, 586}, /* core_bi_px_gpio_84 */
+	{75, 587}, /* core_bi_px_gpio_85 */
+	{76, 588}, /* core_bi_px_gpio_86 */
+	{77, 589}, /* core_bi_px_gpio_88 */
+	{79, 591}, /* core_bi_px_gpio_91 */
+	{80, 592}, /* core_bi_px_gpio_92 */
+	{81, 593}, /* core_bi_px_gpio_95 */
+	{82, 594}, /* core_bi_px_gpio_96 */
+	{83, 595}, /* core_bi_px_gpio_97 */
+	{84, 596}, /* core_bi_px_gpio_101 */
+	{85, 597}, /* core_bi_px_gpio_103 */
+	{86, 598}, /* core_bi_px_gpio_104 */
+	{87, 599}, /* core_bi_px_to_mpm[6] */
+	{88, 600}, /* core_bi_px_to_mpm[0] */
+	{89, 601}, /* core_bi_px_to_mpm[1] */
+	{90, 602}, /* core_bi_px_gpio_115 */
+	{91, 603}, /* core_bi_px_gpio_116 */
+	{92, 604}, /* core_bi_px_gpio_117 */
+	{93, 605}, /* core_bi_px_gpio_118 */
+	{94, 641}, /* core_bi_px_gpio_119 */
+	{95, 642}, /* core_bi_px_gpio_120 */
+	{96, 643}, /* core_bi_px_gpio_121 */
+	{97, 644}, /* core_bi_px_gpio_122 */
+	{98, 645}, /* core_bi_px_gpio_123 */
+	{99, 646}, /* core_bi_px_gpio_124 */
+	{100, 647}, /* core_bi_px_gpio_125 */
+	{101, 648}, /* core_bi_px_to_mpm[5] */
+	{102, 649}, /* core_bi_px_gpio_127 */
+	{103, 650}, /* core_bi_px_gpio_128 */
+	{104, 651}, /* core_bi_px_gpio_129 */
+	{105, 652}, /* core_bi_px_gpio_130 */
+	{106, 653}, /* core_bi_px_gpio_132 */
+	{107, 654}, /* core_bi_px_gpio_133 */
+	{108, 655}, /* core_bi_px_gpio_145 */
+	{119, 666}, /* core_bi_px_to_mpm[2] */
+	{120, 667}, /* core_bi_px_to_mpm[3] */
+	{121, 668}, /* core_bi_px_to_mpm[4] */
+	{122, 669}, /* core_bi_px_gpio_41 */
+	{123, 670}, /* core_bi_px_gpio_89 */
+	{124, 671}, /* core_bi_px_gpio_31 */
+	{125, 672}, /* core_bi_px_gpio_49 */
+	{-1}
+};
+
+static int __init qcom_pdc_gic_init(struct device_node *node,
+		struct device_node *parent)
+{
+	return qcom_pdc_init(node, parent, sdm845_data);
+}
+
+IRQCHIP_DECLARE(pdc_sdm845, "qcom,pdc-sdm845", qcom_pdc_gic_init);
diff --git a/drivers/irqchip/qcom/pdc.c b/drivers/irqchip/qcom/pdc.c
new file mode 100644
index 0000000..923552f
--- /dev/null
+++ b/drivers/irqchip/qcom/pdc.c
@@ -0,0 +1,299 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqdomain.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/spinlock.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include "pdc.h"
+#define CREATE_TRACE_POINTS
+#include "trace/events/pdc.h"
+
+#define MAX_IRQS 126
+#define CLEAR_INTR(reg, intr) (reg & ~(1 << intr))
+#define ENABLE_INTR(reg, intr) (reg | (1 << intr))
+
+enum pdc_register_offsets {
+	IRQ_ENABLE_BANK = 0x10,
+	IRQ_i_CFG = 0x110,
+};
+
+static DEFINE_SPINLOCK(pdc_lock);
+static void __iomem *pdc_base;
+
+static int get_pdc_pin(irq_hw_number_t hwirq, void *data)
+{
+	int i;
+	struct pdc_pin *pdc_data = (struct pdc_pin *) data;
+
+	for (i = 0; pdc_data[i].pin >= 0; i++) {
+		if (pdc_data[i].hwirq == hwirq)
+			return pdc_data[i].pin;
+	}
+
+	return -EINVAL;
+}
+
+static inline int pdc_enable_intr(struct irq_data *d, bool on)
+{
+	int pin_out = get_pdc_pin(d->hwirq, d->chip_data);
+	unsigned int index, mask;
+	u32 enable, r_enable;
+	unsigned long flags;
+
+	if (pin_out < 0)
+		return 0;
+
+	index = pin_out / 32;
+	mask = pin_out % 32;
+	spin_lock_irqsave(&pdc_lock, flags);
+
+	enable = readl_relaxed(pdc_base + IRQ_ENABLE_BANK + (index *
+					sizeof(uint32_t)));
+	if (on)
+		enable = ENABLE_INTR(enable, mask);
+	else
+		enable = CLEAR_INTR(enable, mask);
+
+	writel_relaxed(enable, pdc_base + IRQ_ENABLE_BANK + (index *
+						sizeof(uint32_t)));
+
+	do {
+		r_enable = readl_relaxed(pdc_base + IRQ_ENABLE_BANK +
+					(index * sizeof(uint32_t)));
+		if (r_enable == enable)
+			break;
+		udelay(5);
+	} while (1);
+
+	spin_unlock_irqrestore(&pdc_lock, flags);
+
+	trace_irq_pin_config("enable", (u32)pin_out, (u32)d->hwirq,
+			0, on);
+
+	return 0;
+}
+
+static void qcom_pdc_gic_mask(struct irq_data *d)
+{
+	pdc_enable_intr(d, false);
+	irq_chip_mask_parent(d);
+}
+
+static void qcom_pdc_gic_unmask(struct irq_data *d)
+{
+	pdc_enable_intr(d, true);
+	irq_chip_unmask_parent(d);
+}
+
+static void qcom_pdc_gic_enable(struct irq_data *d)
+{
+	pdc_enable_intr(d, true);
+	irq_chip_enable_parent(d);
+}
+
+static void qcom_pdc_gic_disable(struct irq_data *d)
+{
+	pdc_enable_intr(d, false);
+	irq_chip_disable_parent(d);
+}
+
+/*
+ * GIC does not handle falling edge or active low. To allow falling edge and
+ * active low interrupts to be handled at GIC, PDC has an inverter that inverts
+ * falling edge into a rising edge and active low into an active high.
+ * For the inverter to work, the polarity bit in the IRQ_CONFIG register has to
+ * set as per the table below.
+ * (polarity, falling edge, rising edge )  ORIG          POL CONV     POLARITY
+ * 3'b0 00  Level sensitive active low    (~~~|_____)   (___|~~~~~)   LOW
+ * 3'b0 01  Rising edge sensitive         (___|~~|__)   (~~~|__|~~)   NOT USED
+ * 3'b0 10  Falling edge sensitive        (~~~|__|~~)   (___|~~|__)   LOW
+ * 3'b0 11  Dual Edge sensitive                                       NOT USED
+ * 3'b1 00  Level senstive active High    (___|~~~~~)   (___|~~~~~)   HIGH
+ * 3'b1 01  Falling Edge sensitive        (~~~|__|~~)   (~~~|__|~~)   NOT USED
+ * 3'b1 10  Rising edge sensitive         (___|~~|__)   (___|~~|__)   HIGH
+ * 3'b1 11  Dual Edge sensitive                                       HIGH
+ */
+enum pdc_irq_config_bits {
+	POLARITY_LOW = 0, //0 00
+	FALLING_EDGE = 2, //0 10
+	POLARITY_HIGH = 4,//1 00
+	RISING_EDGE = 6,  //1 10
+	DUAL_EDGE = 7,    //1 11
+};
+
+static int qcom_pdc_gic_set_type(struct irq_data *d, unsigned int type)
+{
+	int pin_out = get_pdc_pin(d->hwirq, d->chip_data);
+	u32 pdc_type = 0, config;
+
+	if (pin_out < 0)
+		goto fwd_to_parent;
+
+	switch (type) {
+	case IRQ_TYPE_EDGE_RISING:
+		pdc_type = RISING_EDGE;
+		break;
+	case IRQ_TYPE_EDGE_FALLING:
+		pdc_type = FALLING_EDGE;
+		break;
+	case IRQ_TYPE_EDGE_BOTH:
+		pdc_type = DUAL_EDGE;
+		break;
+	case IRQ_TYPE_LEVEL_HIGH:
+		pdc_type = POLARITY_HIGH;
+		break;
+	case IRQ_TYPE_LEVEL_LOW:
+		pdc_type = POLARITY_LOW;
+		break;
+	default:
+		pdc_type = POLARITY_HIGH;
+		break;
+	}
+	writel_relaxed(pdc_type, pdc_base + IRQ_i_CFG +
+			(pin_out * sizeof(uint32_t)));
+
+	do {
+		config = readl_relaxed(pdc_base + IRQ_i_CFG +
+				(pin_out * sizeof(uint32_t)));
+		if (config == pdc_type)
+			break;
+		udelay(5);
+	} while (1);
+
+	trace_irq_pin_config("type_config", (u32)pin_out, (u32)d->hwirq,
+			pdc_type, 0);
+
+	/*
+	 * If type is edge triggered, forward that as Rising edge as PDC
+	 * takes care of converting falling edge to rising edge signal
+	 */
+	if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING))
+		type = IRQ_TYPE_EDGE_RISING;
+
+	/*
+	 * If type is level, then forward that as level high as PDC
+	 * takes care of converting falling edge to rising edge signal
+	 */
+	if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
+		type = IRQ_TYPE_LEVEL_HIGH;
+
+fwd_to_parent:
+
+	return irq_chip_set_type_parent(d, type);
+}
+
+static struct irq_chip qcom_pdc_gic_chip = {
+	.name			= "PDC-GIC",
+	.irq_eoi		= irq_chip_eoi_parent,
+	.irq_mask		= qcom_pdc_gic_mask,
+	.irq_enable		= qcom_pdc_gic_enable,
+	.irq_unmask		= qcom_pdc_gic_unmask,
+	.irq_disable		= qcom_pdc_gic_disable,
+	.irq_retrigger		= irq_chip_retrigger_hierarchy,
+	.irq_set_type		= qcom_pdc_gic_set_type,
+	.flags			= IRQCHIP_MASK_ON_SUSPEND |
+					IRQCHIP_SET_TYPE_MASKED |
+					IRQCHIP_SKIP_SET_WAKE,
+	.irq_set_vcpu_affinity	= irq_chip_set_vcpu_affinity_parent,
+#ifdef CONFIG_SMP
+	.irq_set_affinity	= irq_chip_set_affinity_parent,
+#endif
+};
+
+static int qcom_pdc_translate(struct irq_domain *d,
+	struct irq_fwspec *fwspec, unsigned long *hwirq, unsigned int *type)
+{
+	return d->parent->ops->translate(d->parent, fwspec, hwirq, type);
+}
+
+static int qcom_pdc_alloc(struct irq_domain *domain,
+	unsigned int virq, unsigned int nr_irqs, void *data)
+{
+	struct irq_fwspec *fwspec = data;
+	struct irq_fwspec parent_fwspec;
+	irq_hw_number_t hwirq;
+	int i;
+	unsigned int type;
+	int ret;
+
+	ret = qcom_pdc_translate(domain, fwspec, &hwirq, &type);
+	if (ret)
+		return -EINVAL;
+
+	for (i = 0; i < nr_irqs; i++)
+		irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i,
+			&qcom_pdc_gic_chip, domain->host_data);
+
+	parent_fwspec = *fwspec;
+	parent_fwspec.fwnode = domain->parent->fwnode;
+
+	return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs,
+					    &parent_fwspec);
+}
+
+static const struct irq_domain_ops qcom_pdc_ops = {
+	.translate	= qcom_pdc_translate,
+	.alloc		= qcom_pdc_alloc,
+	.free		= irq_domain_free_irqs_common,
+};
+
+int qcom_pdc_init(struct device_node *node,
+		struct device_node *parent, void *data)
+{
+	struct irq_domain *parent_domain;
+	int ret;
+	struct irq_domain *pdc_domain;
+
+	pdc_base = of_iomap(node, 0);
+	if (!pdc_base) {
+		pr_err("%s(): unable to map PDC registers\n", node->full_name);
+		return -ENXIO;
+	}
+
+	parent_domain = irq_find_host(parent);
+	if (!parent_domain) {
+		pr_err("unable to obtain PDC parent domain\n");
+		ret = -ENXIO;
+		goto failure;
+	}
+
+	pdc_domain = irq_domain_add_hierarchy(parent_domain, 0, MAX_IRQS,
+			node, &qcom_pdc_ops, data);
+	if (!pdc_domain) {
+		pr_err("GIC domain add failed\n");
+		ret = -ENOMEM;
+		goto failure;
+	}
+
+	pdc_domain->name = "qcom,pdc";
+
+	return 0;
+
+failure:
+	iounmap(pdc_base);
+
+	return ret;
+}
+EXPORT_SYMBOL(qcom_pdc_init);
diff --git a/arch/arm64/boot/dts/qcom/sdm830-pinctrl.dtsi b/drivers/irqchip/qcom/pdc.h
similarity index 69%
copy from arch/arm64/boot/dts/qcom/sdm830-pinctrl.dtsi
copy to drivers/irqchip/qcom/pdc.h
index 4b3fa93..7c4d89c 100644
--- a/arch/arm64/boot/dts/qcom/sdm830-pinctrl.dtsi
+++ b/drivers/irqchip/qcom/pdc.h
@@ -8,16 +8,16 @@
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
+ *
  */
 
-&soc {
-	tlmm: pinctrl@03400000 {
-		compatible = "qcom,sdm830-pinctrl";
-		reg = <0x03400000 0xc00000>;
-		interrupts = <0 208 0>;
-		gpio-controller;
-		#gpio-cells = <2>;
-		interrupt-controller;
-		#interrupt-cells = <2>;
-	};
+#include <linux/irq.h>
+#include <linux/device.h>
+
+struct pdc_pin {
+	int pin;
+	irq_hw_number_t hwirq;
 };
+
+int qcom_pdc_init(struct device_node *node,
+		struct device_node *parent, void *data);
diff --git a/drivers/leds/leds-qpnp-flash-v2.c b/drivers/leds/leds-qpnp-flash-v2.c
index fdc4b30..2678a00 100644
--- a/drivers/leds/leds-qpnp-flash-v2.c
+++ b/drivers/leds/leds-qpnp-flash-v2.c
@@ -63,11 +63,13 @@
 #define	FLASH_LED_REG_MITIGATION_SEL(base)	(base + 0x6E)
 #define	FLASH_LED_REG_MITIGATION_SW(base)	(base + 0x6F)
 #define	FLASH_LED_REG_LMH_LEVEL(base)		(base + 0x70)
+#define	FLASH_LED_REG_MULTI_STROBE_CTRL(base)	(base + 0x71)
+#define	FLASH_LED_REG_LPG_INPUT_CTRL(base)	(base + 0x72)
 #define	FLASH_LED_REG_CURRENT_DERATE_EN(base)	(base + 0x76)
 
 #define	FLASH_LED_HDRM_VOL_MASK			GENMASK(7, 4)
 #define	FLASH_LED_CURRENT_MASK			GENMASK(6, 0)
-#define	FLASH_LED_ENABLE_MASK			GENMASK(2, 0)
+#define	FLASH_LED_STROBE_MASK			GENMASK(1, 0)
 #define	FLASH_HW_STROBE_MASK			GENMASK(2, 0)
 #define	FLASH_LED_ISC_WARMUP_DELAY_MASK		GENMASK(1, 0)
 #define	FLASH_LED_CURRENT_DERATE_EN_MASK	GENMASK(2, 0)
@@ -91,6 +93,9 @@
 #define	THERMAL_DERATE_SLOW_SHIFT		4
 #define	THERMAL_DERATE_SLOW_MASK		GENMASK(6, 4)
 #define	THERMAL_DERATE_FAST_MASK		GENMASK(2, 0)
+#define	LED1N2_FLASH_ONCE_ONLY_BIT		BIT(0)
+#define	LED3_FLASH_ONCE_ONLY_BIT		BIT(1)
+#define	LPG_INPUT_SEL_BIT			BIT(0)
 
 #define	VPH_DROOP_DEBOUNCE_US_TO_VAL(val_us)	(val_us / 8)
 #define	VPH_DROOP_HYST_MV_TO_VAL(val_mv)	(val_mv / 25)
@@ -127,11 +132,11 @@
 #define	FLASH_LED_LMH_MITIGATION_DISABLE	0
 #define	FLASH_LED_CHGR_MITIGATION_ENABLE	BIT(4)
 #define	FLASH_LED_CHGR_MITIGATION_DISABLE	0
-#define	FLASH_LED_MITIGATION_SEL_DEFAULT	2
+#define	FLASH_LED_LMH_MITIGATION_SEL_DEFAULT	2
 #define	FLASH_LED_MITIGATION_SEL_MAX		2
 #define	FLASH_LED_CHGR_MITIGATION_SEL_SHIFT	4
-#define	FLASH_LED_MITIGATION_THRSH_DEFAULT	0xA
-#define	FLASH_LED_MITIGATION_THRSH_MAX		0x1F
+#define	FLASH_LED_CHGR_MITIGATION_THRSH_DEFAULT	0xA
+#define	FLASH_LED_CHGR_MITIGATION_THRSH_MAX	0x1F
 #define	FLASH_LED_LMH_OCV_THRESH_DEFAULT_UV	3700000
 #define	FLASH_LED_LMH_RBATT_THRESH_DEFAULT_UOHM	400000
 #define	FLASH_LED_IRES_BASE			3
@@ -152,12 +157,17 @@
 #define	FLASH_LED_MOD_ENABLE			BIT(7)
 #define	FLASH_LED_DISABLE			0x00
 #define	FLASH_LED_SAFETY_TMR_DISABLED		0x13
-#define	FLASH_LED_MIN_CURRENT_MA		25
 #define	FLASH_LED_MAX_TOTAL_CURRENT_MA		3750
 
 /* notifier call chain for flash-led irqs */
 static ATOMIC_NOTIFIER_HEAD(irq_notifier_list);
 
+enum flash_charger_mitigation {
+	FLASH_DISABLE_CHARGER_MITIGATION,
+	FLASH_HW_CHARGER_MITIGATION_BY_ILED_THRSHLD,
+	FLASH_SW_CHARGER_MITIGATION,
+};
+
 enum flash_led_type {
 	FLASH_LED_TYPE_FLASH,
 	FLASH_LED_TYPE_TORCH,
@@ -169,6 +179,12 @@ enum {
 	LED3,
 };
 
+enum strobe_type {
+	SW_STROBE = 0,
+	HW_STROBE,
+	LPG_STROBE,
+};
+
 /*
  * Configurations for each individual LED
  */
@@ -182,13 +198,15 @@ struct flash_node_data {
 	int				ires_ua;
 	int				max_current;
 	int				current_ma;
+	int				prev_current_ma;
 	u8				duration;
 	u8				id;
 	u8				type;
 	u8				ires;
 	u8				hdrm_val;
 	u8				current_reg_val;
-	u8				trigger;
+	u8				strobe_ctrl;
+	u8				strobe_sel;
 	bool				led_on;
 };
 
@@ -226,6 +244,7 @@ struct flash_led_platform_data {
 	int			thermal_thrsh1;
 	int			thermal_thrsh2;
 	int			thermal_thrsh3;
+	int			hw_strobe_option;
 	u32			led1n2_iclamp_low_ma;
 	u32			led1n2_iclamp_mid_ma;
 	u32			led3_iclamp_low_ma;
@@ -240,7 +259,6 @@ struct flash_led_platform_data {
 	u8			chgr_mitigation_sel;
 	u8			lmh_level;
 	u8			iled_thrsh_val;
-	u8			hw_strobe_option;
 	bool			hdrm_auto_mode_en;
 	bool			thermal_derate_en;
 	bool			otst_ramp_bkup_en;
@@ -261,6 +279,7 @@ struct qpnp_flash_led {
 	int				num_fnodes;
 	int				num_snodes;
 	int				enable;
+	int				total_current_ma;
 	u16				base;
 	bool				trigger_lmh;
 	bool				trigger_chgr;
@@ -487,10 +506,12 @@ static int qpnp_flash_led_init_settings(struct qpnp_flash_led *led)
 	if (rc < 0)
 		return rc;
 
+	val = led->pdata->chgr_mitigation_sel
+				<< FLASH_LED_CHGR_MITIGATION_SEL_SHIFT;
 	rc = qpnp_flash_led_masked_write(led,
 			FLASH_LED_REG_MITIGATION_SEL(led->base),
 			FLASH_LED_CHGR_MITIGATION_SEL_MASK,
-			led->pdata->chgr_mitigation_sel);
+			val);
 	if (rc < 0)
 		return rc;
 
@@ -548,6 +569,28 @@ static int qpnp_flash_led_init_settings(struct qpnp_flash_led *led)
 			return rc;
 	}
 
+	if (led->pdata->hw_strobe_option > 0) {
+		rc = qpnp_flash_led_masked_write(led,
+				FLASH_LED_REG_STROBE_CFG(led->base),
+				FLASH_LED_STROBE_MASK,
+				led->pdata->hw_strobe_option);
+		if (rc < 0)
+			return rc;
+	}
+
+	if (led->fnode[LED3].strobe_sel == LPG_STROBE) {
+		rc = qpnp_flash_led_masked_write(led,
+			FLASH_LED_REG_MULTI_STROBE_CTRL(led->base),
+			LED3_FLASH_ONCE_ONLY_BIT, 0);
+		if (rc < 0)
+			return rc;
+
+		rc = qpnp_flash_led_masked_write(led,
+			FLASH_LED_REG_LPG_INPUT_CTRL(led->base),
+			LPG_INPUT_SEL_BIT, LPG_INPUT_SEL_BIT);
+		if (rc < 0)
+			return rc;
+	}
 	return 0;
 }
 
@@ -877,14 +920,29 @@ static int qpnp_flash_led_get_max_avail_current(struct qpnp_flash_led *led)
 	return max_avail_current;
 }
 
+static void qpnp_flash_led_aggregate_max_current(struct flash_node_data *fnode)
+{
+	struct qpnp_flash_led *led = dev_get_drvdata(&fnode->pdev->dev);
+
+	if (fnode->current_ma)
+		led->total_current_ma += fnode->current_ma
+						- fnode->prev_current_ma;
+	else
+		led->total_current_ma -= fnode->prev_current_ma;
+
+	fnode->prev_current_ma = fnode->current_ma;
+}
+
 static void qpnp_flash_led_node_set(struct flash_node_data *fnode, int value)
 {
 	int prgm_current_ma = value;
+	int min_ma = fnode->ires_ua / 1000;
+	struct qpnp_flash_led *led = dev_get_drvdata(&fnode->pdev->dev);
 
 	if (value <= 0)
 		prgm_current_ma = 0;
-	else if (value < FLASH_LED_MIN_CURRENT_MA)
-		prgm_current_ma = FLASH_LED_MIN_CURRENT_MA;
+	else if (value < min_ma)
+		prgm_current_ma = min_ma;
 
 	prgm_current_ma = min(prgm_current_ma, fnode->max_current);
 	fnode->current_ma = prgm_current_ma;
@@ -892,6 +950,13 @@ static void qpnp_flash_led_node_set(struct flash_node_data *fnode, int value)
 	fnode->current_reg_val = CURRENT_MA_TO_REG_VAL(prgm_current_ma,
 					fnode->ires_ua);
 	fnode->led_on = prgm_current_ma != 0;
+
+	if (led->pdata->chgr_mitigation_sel == FLASH_SW_CHARGER_MITIGATION) {
+		qpnp_flash_led_aggregate_max_current(fnode);
+		led->trigger_chgr = false;
+		if (led->total_current_ma >= 1000)
+			led->trigger_chgr = true;
+	}
 }
 
 static int qpnp_flash_led_switch_disable(struct flash_switch_data *snode)
@@ -950,7 +1015,7 @@ static int qpnp_flash_led_switch_disable(struct flash_switch_data *snode)
 
 		led->fnode[i].led_on = false;
 
-		if (led->fnode[i].trigger & FLASH_LED_HW_SW_STROBE_SEL_BIT) {
+		if (led->fnode[i].strobe_sel == HW_STROBE) {
 			rc = qpnp_flash_led_hw_strobe_enable(&led->fnode[i],
 					led->pdata->hw_strobe_option, false);
 			if (rc < 0) {
@@ -1004,13 +1069,6 @@ static int qpnp_flash_led_switch_set(struct flash_switch_data *snode, bool on)
 	if (rc < 0)
 		return rc;
 
-	rc = qpnp_flash_led_masked_write(led,
-					FLASH_LED_REG_STROBE_CFG(led->base),
-					FLASH_LED_ENABLE_MASK,
-					led->pdata->hw_strobe_option);
-	if (rc < 0)
-		return rc;
-
 	val = 0;
 	for (i = 0; i < led->num_fnodes; i++) {
 		if (!led->fnode[i].led_on ||
@@ -1018,13 +1076,13 @@ static int qpnp_flash_led_switch_set(struct flash_switch_data *snode, bool on)
 			continue;
 
 		addr_offset = led->fnode[i].id;
-		if (led->fnode[i].trigger & FLASH_LED_HW_SW_STROBE_SEL_BIT)
-			mask = FLASH_HW_STROBE_MASK;
-		else
+		if (led->fnode[i].strobe_sel == SW_STROBE)
 			mask = FLASH_LED_HW_SW_STROBE_SEL_BIT;
+		else
+			mask = FLASH_HW_STROBE_MASK;
 		rc = qpnp_flash_led_masked_write(led,
 			FLASH_LED_REG_STROBE_CTRL(led->base + addr_offset),
-			mask, led->fnode[i].trigger);
+			mask, led->fnode[i].strobe_ctrl);
 		if (rc < 0)
 			return rc;
 
@@ -1042,7 +1100,7 @@ static int qpnp_flash_led_switch_set(struct flash_switch_data *snode, bool on)
 
 		val |= FLASH_LED_ENABLE << led->fnode[i].id;
 
-		if (led->fnode[i].trigger & FLASH_LED_HW_SW_STROBE_SEL_BIT) {
+		if (led->fnode[i].strobe_sel == HW_STROBE) {
 			rc = qpnp_flash_led_hw_strobe_enable(&led->fnode[i],
 					led->pdata->hw_strobe_option, true);
 			if (rc < 0) {
@@ -1159,10 +1217,6 @@ int qpnp_flash_led_prepare(struct led_trigger *trig, int options,
 		*max_current = rc;
 	}
 
-	led->trigger_chgr = false;
-	if (options & PRE_FLASH)
-		led->trigger_chgr = true;
-
 	return 0;
 }
 
@@ -1336,9 +1390,9 @@ static int qpnp_flash_led_parse_each_led_dt(struct qpnp_flash_led *led,
 			struct flash_node_data *fnode, struct device_node *node)
 {
 	const char *temp_string;
-	int rc;
+	int rc, min_ma;
 	u32 val;
-	bool strobe_sel = 0, edge_trigger = 0, active_high = 0;
+	bool hw_strobe = 0, edge_trigger = 0, active_high = 0;
 
 	fnode->pdev = led->pdev;
 	fnode->cdev.brightness_set = qpnp_flash_led_brightness_set;
@@ -1392,10 +1446,11 @@ static int qpnp_flash_led_parse_each_led_dt(struct qpnp_flash_led *led,
 		return rc;
 	}
 
+	min_ma = fnode->ires_ua / 1000;
 	rc = of_property_read_u32(node, "qcom,max-current", &val);
 	if (!rc) {
-		if (val < FLASH_LED_MIN_CURRENT_MA)
-			val = FLASH_LED_MIN_CURRENT_MA;
+		if (val < min_ma)
+			val = min_ma;
 		fnode->max_current = val;
 		fnode->cdev.max_brightness = val;
 	} else {
@@ -1405,11 +1460,10 @@ static int qpnp_flash_led_parse_each_led_dt(struct qpnp_flash_led *led,
 
 	rc = of_property_read_u32(node, "qcom,current-ma", &val);
 	if (!rc) {
-		if (val < FLASH_LED_MIN_CURRENT_MA ||
-				val > fnode->max_current)
+		if (val < min_ma || val > fnode->max_current)
 			pr_warn("Invalid operational current specified, capping it\n");
-		if (val < FLASH_LED_MIN_CURRENT_MA)
-			val = FLASH_LED_MIN_CURRENT_MA;
+		if (val < min_ma)
+			val = min_ma;
 		if (val > fnode->max_current)
 			val = fnode->max_current;
 		fnode->current_ma = val;
@@ -1457,14 +1511,52 @@ static int qpnp_flash_led_parse_each_led_dt(struct qpnp_flash_led *led,
 		return rc;
 	}
 
-	strobe_sel = of_property_read_bool(node, "qcom,hw-strobe-sel");
-	if (strobe_sel) {
+	fnode->strobe_sel = SW_STROBE;
+	rc = of_property_read_u32(node, "qcom,strobe-sel", &val);
+	if (rc < 0) {
+		if (rc != -EINVAL) {
+			pr_err("Unable to read qcom,strobe-sel property\n");
+			return rc;
+		}
+	} else {
+		if (val < SW_STROBE || val > LPG_STROBE) {
+			pr_err("Incorrect strobe selection specified %d\n",
+				val);
+			return -EINVAL;
+		}
+		fnode->strobe_sel = (u8)val;
+	}
+
+	/*
+	 * LPG strobe is allowed only for LED3 and HW strobe option should be
+	 * option 2 or 3.
+	 */
+	if (fnode->strobe_sel == LPG_STROBE) {
+		if (led->pdata->hw_strobe_option ==
+				FLASH_LED_HW_STROBE_OPTION_1) {
+			pr_err("Incorrect strobe option for LPG strobe\n");
+			return -EINVAL;
+		}
+		if (fnode->id != LED3) {
+			pr_err("Incorrect LED chosen for LPG strobe\n");
+			return -EINVAL;
+		}
+	}
+
+	if (fnode->strobe_sel == HW_STROBE) {
 		edge_trigger = of_property_read_bool(node,
 						"qcom,hw-strobe-edge-trigger");
 		active_high = !of_property_read_bool(node,
 						"qcom,hw-strobe-active-low");
+		hw_strobe = 1;
+	} else if (fnode->strobe_sel == LPG_STROBE) {
+		/* LPG strobe requires level trigger and active high */
+		edge_trigger = 0;
+		active_high =  1;
+		hw_strobe = 1;
 	}
-	fnode->trigger = (strobe_sel << 2) | (edge_trigger << 1) | active_high;
+	fnode->strobe_ctrl = (hw_strobe << 2) | (edge_trigger << 1) |
+				active_high;
 
 	rc = led_classdev_register(&led->pdev->dev, &fnode->cdev);
 	if (rc < 0) {
@@ -1480,7 +1572,7 @@ static int qpnp_flash_led_parse_each_led_dt(struct qpnp_flash_led *led,
 		fnode->strobe_pinctrl = NULL;
 	}
 
-	if (fnode->trigger & FLASH_LED_HW_SW_STROBE_SEL_BIT) {
+	if (fnode->strobe_sel == HW_STROBE) {
 		if (of_find_property(node, "qcom,hw-strobe-gpio", NULL)) {
 			fnode->hw_strobe_gpio = of_get_named_gpio(node,
 						"qcom,hw-strobe-gpio", 0);
@@ -1860,9 +1952,10 @@ static int qpnp_flash_led_parse_common_dt(struct qpnp_flash_led *led,
 
 	led->pdata->vph_droop_hysteresis <<= FLASH_LED_VPH_DROOP_HYST_SHIFT;
 
+	led->pdata->hw_strobe_option = -EINVAL;
 	rc = of_property_read_u32(node, "qcom,hw-strobe-option", &val);
 	if (!rc) {
-		led->pdata->hw_strobe_option = (u8)val;
+		led->pdata->hw_strobe_option = val;
 	} else if (rc != -EINVAL) {
 		pr_err("Unable to parse hw strobe option, rc=%d\n", rc);
 		return rc;
@@ -1957,7 +2050,7 @@ static int qpnp_flash_led_parse_common_dt(struct qpnp_flash_led *led,
 		return rc;
 	}
 
-	led->pdata->lmh_mitigation_sel = FLASH_LED_MITIGATION_SEL_DEFAULT;
+	led->pdata->lmh_mitigation_sel = FLASH_LED_LMH_MITIGATION_SEL_DEFAULT;
 	rc = of_property_read_u32(node, "qcom,lmh-mitigation-sel", &val);
 	if (!rc) {
 		led->pdata->lmh_mitigation_sel = val;
@@ -1971,7 +2064,7 @@ static int qpnp_flash_led_parse_common_dt(struct qpnp_flash_led *led,
 		return -EINVAL;
 	}
 
-	led->pdata->chgr_mitigation_sel = FLASH_LED_MITIGATION_SEL_DEFAULT;
+	led->pdata->chgr_mitigation_sel = FLASH_SW_CHARGER_MITIGATION;
 	rc = of_property_read_u32(node, "qcom,chgr-mitigation-sel", &val);
 	if (!rc) {
 		led->pdata->chgr_mitigation_sel = val;
@@ -1985,9 +2078,7 @@ static int qpnp_flash_led_parse_common_dt(struct qpnp_flash_led *led,
 		return -EINVAL;
 	}
 
-	led->pdata->chgr_mitigation_sel <<= FLASH_LED_CHGR_MITIGATION_SEL_SHIFT;
-
-	led->pdata->iled_thrsh_val = FLASH_LED_MITIGATION_THRSH_DEFAULT;
+	led->pdata->iled_thrsh_val = FLASH_LED_CHGR_MITIGATION_THRSH_DEFAULT;
 	rc = of_property_read_u32(node, "qcom,iled-thrsh-ma", &val);
 	if (!rc) {
 		led->pdata->iled_thrsh_val = MITIGATION_THRSH_MA_TO_VAL(val);
@@ -1996,7 +2087,7 @@ static int qpnp_flash_led_parse_common_dt(struct qpnp_flash_led *led,
 		return rc;
 	}
 
-	if (led->pdata->iled_thrsh_val > FLASH_LED_MITIGATION_THRSH_MAX) {
+	if (led->pdata->iled_thrsh_val > FLASH_LED_CHGR_MITIGATION_THRSH_MAX) {
 		pr_err("Invalid iled_thrsh_val specified\n");
 		return -EINVAL;
 	}
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index 3b53f34..e7b8f49 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -357,6 +357,7 @@
 config DM_RAID
        tristate "RAID 1/4/5/6/10 target"
        depends on BLK_DEV_DM
+       select MD_RAID0
        select MD_RAID1
        select MD_RAID10
        select MD_RAID456
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index 125aedc..8bf9667 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -215,7 +215,7 @@ static DEFINE_SPINLOCK(param_spinlock);
  * Buffers are freed after this timeout
  */
 static unsigned dm_bufio_max_age = DM_BUFIO_DEFAULT_AGE_SECS;
-static unsigned dm_bufio_retain_bytes = DM_BUFIO_DEFAULT_RETAIN_BYTES;
+static unsigned long dm_bufio_retain_bytes = DM_BUFIO_DEFAULT_RETAIN_BYTES;
 
 static unsigned long dm_bufio_peak_allocated;
 static unsigned long dm_bufio_allocated_kmem_cache;
@@ -923,10 +923,11 @@ static void __get_memory_limit(struct dm_bufio_client *c,
 {
 	unsigned long buffers;
 
-	if (ACCESS_ONCE(dm_bufio_cache_size) != dm_bufio_cache_size_latch) {
-		mutex_lock(&dm_bufio_clients_lock);
-		__cache_size_refresh();
-		mutex_unlock(&dm_bufio_clients_lock);
+	if (unlikely(ACCESS_ONCE(dm_bufio_cache_size) != dm_bufio_cache_size_latch)) {
+		if (mutex_trylock(&dm_bufio_clients_lock)) {
+			__cache_size_refresh();
+			mutex_unlock(&dm_bufio_clients_lock);
+		}
 	}
 
 	buffers = dm_bufio_cache_size_per_client >>
@@ -1540,10 +1541,10 @@ static bool __try_evict_buffer(struct dm_buffer *b, gfp_t gfp)
 	return true;
 }
 
-static unsigned get_retain_buffers(struct dm_bufio_client *c)
+static unsigned long get_retain_buffers(struct dm_bufio_client *c)
 {
-        unsigned retain_bytes = ACCESS_ONCE(dm_bufio_retain_bytes);
-        return retain_bytes / c->block_size;
+        unsigned long retain_bytes = ACCESS_ONCE(dm_bufio_retain_bytes);
+        return retain_bytes >> (c->sectors_per_block_bits + SECTOR_SHIFT);
 }
 
 static unsigned long __scan(struct dm_bufio_client *c, unsigned long nr_to_scan,
@@ -1553,7 +1554,7 @@ static unsigned long __scan(struct dm_bufio_client *c, unsigned long nr_to_scan,
 	struct dm_buffer *b, *tmp;
 	unsigned long freed = 0;
 	unsigned long count = nr_to_scan;
-	unsigned retain_target = get_retain_buffers(c);
+	unsigned long retain_target = get_retain_buffers(c);
 
 	for (l = 0; l < LIST_SIZE; l++) {
 		list_for_each_entry_safe_reverse(b, tmp, &c->lru[l], lru_list) {
@@ -1779,11 +1780,19 @@ static bool older_than(struct dm_buffer *b, unsigned long age_hz)
 static void __evict_old_buffers(struct dm_bufio_client *c, unsigned long age_hz)
 {
 	struct dm_buffer *b, *tmp;
-	unsigned retain_target = get_retain_buffers(c);
-	unsigned count;
+	unsigned long retain_target = get_retain_buffers(c);
+	unsigned long count;
+	LIST_HEAD(write_list);
 
 	dm_bufio_lock(c);
 
+	__check_watermark(c, &write_list);
+	if (unlikely(!list_empty(&write_list))) {
+		dm_bufio_unlock(c);
+		__flush_write_list(&write_list);
+		dm_bufio_lock(c);
+	}
+
 	count = c->n_buffers[LIST_CLEAN] + c->n_buffers[LIST_DIRTY];
 	list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_CLEAN], lru_list) {
 		if (count <= retain_target)
@@ -1808,6 +1817,8 @@ static void cleanup_old_buffers(void)
 
 	mutex_lock(&dm_bufio_clients_lock);
 
+	__cache_size_refresh();
+
 	list_for_each_entry(c, &dm_bufio_all_clients, client_list)
 		__evict_old_buffers(c, max_age_hz);
 
@@ -1930,7 +1941,7 @@ MODULE_PARM_DESC(max_cache_size_bytes, "Size of metadata cache");
 module_param_named(max_age_seconds, dm_bufio_max_age, uint, S_IRUGO | S_IWUSR);
 MODULE_PARM_DESC(max_age_seconds, "Max age of a buffer in seconds");
 
-module_param_named(retain_bytes, dm_bufio_retain_bytes, uint, S_IRUGO | S_IWUSR);
+module_param_named(retain_bytes, dm_bufio_retain_bytes, ulong, S_IRUGO | S_IWUSR);
 MODULE_PARM_DESC(retain_bytes, "Try to keep at least this many bytes cached in memory");
 
 module_param_named(peak_allocated_bytes, dm_bufio_peak_allocated, ulong, S_IRUGO | S_IWUSR);
diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c
index 6955778..6937ca4 100644
--- a/drivers/md/dm-cache-metadata.c
+++ b/drivers/md/dm-cache-metadata.c
@@ -1383,17 +1383,19 @@ void dm_cache_metadata_set_stats(struct dm_cache_metadata *cmd,
 
 int dm_cache_commit(struct dm_cache_metadata *cmd, bool clean_shutdown)
 {
-	int r;
+	int r = -EINVAL;
 	flags_mutator mutator = (clean_shutdown ? set_clean_shutdown :
 				 clear_clean_shutdown);
 
 	WRITE_LOCK(cmd);
+	if (cmd->fail_io)
+		goto out;
+
 	r = __commit_transaction(cmd, mutator);
 	if (r)
 		goto out;
 
 	r = __begin_transaction(cmd);
-
 out:
 	WRITE_UNLOCK(cmd);
 	return r;
@@ -1405,7 +1407,8 @@ int dm_cache_get_free_metadata_block_count(struct dm_cache_metadata *cmd,
 	int r = -EINVAL;
 
 	READ_LOCK(cmd);
-	r = dm_sm_get_nr_free(cmd->metadata_sm, result);
+	if (!cmd->fail_io)
+		r = dm_sm_get_nr_free(cmd->metadata_sm, result);
 	READ_UNLOCK(cmd);
 
 	return r;
@@ -1417,7 +1420,8 @@ int dm_cache_get_metadata_dev_size(struct dm_cache_metadata *cmd,
 	int r = -EINVAL;
 
 	READ_LOCK(cmd);
-	r = dm_sm_get_nr_blocks(cmd->metadata_sm, result);
+	if (!cmd->fail_io)
+		r = dm_sm_get_nr_blocks(cmd->metadata_sm, result);
 	READ_UNLOCK(cmd);
 
 	return r;
diff --git a/drivers/md/dm-era-target.c b/drivers/md/dm-era-target.c
index bf2b267..80e3df1 100644
--- a/drivers/md/dm-era-target.c
+++ b/drivers/md/dm-era-target.c
@@ -961,18 +961,18 @@ static int metadata_commit(struct era_metadata *md)
 		}
 	}
 
-	r = save_sm_root(md);
-	if (r) {
-		DMERR("%s: save_sm_root failed", __func__);
-		return r;
-	}
-
 	r = dm_tm_pre_commit(md->tm);
 	if (r) {
 		DMERR("%s: pre commit failed", __func__);
 		return r;
 	}
 
+	r = save_sm_root(md);
+	if (r) {
+		DMERR("%s: save_sm_root failed", __func__);
+		return r;
+	}
+
 	r = superblock_lock(md, &sblock);
 	if (r) {
 		DMERR("%s: superblock lock failed", __func__);
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index e477af8..ac8235b 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -119,7 +119,8 @@ static struct kmem_cache *_mpio_cache;
 
 static struct workqueue_struct *kmultipathd, *kmpath_handlerd;
 static void trigger_event(struct work_struct *work);
-static void activate_path(struct work_struct *work);
+static void activate_or_offline_path(struct pgpath *pgpath);
+static void activate_path_work(struct work_struct *work);
 static void process_queued_bios(struct work_struct *work);
 
 /*-----------------------------------------------
@@ -144,7 +145,7 @@ static struct pgpath *alloc_pgpath(void)
 
 	if (pgpath) {
 		pgpath->is_active = true;
-		INIT_DELAYED_WORK(&pgpath->activate_path, activate_path);
+		INIT_DELAYED_WORK(&pgpath->activate_path, activate_path_work);
 	}
 
 	return pgpath;
@@ -1515,10 +1516,8 @@ static void pg_init_done(void *data, int errors)
 	spin_unlock_irqrestore(&m->lock, flags);
 }
 
-static void activate_path(struct work_struct *work)
+static void activate_or_offline_path(struct pgpath *pgpath)
 {
-	struct pgpath *pgpath =
-		container_of(work, struct pgpath, activate_path.work);
 	struct request_queue *q = bdev_get_queue(pgpath->path.dev->bdev);
 
 	if (pgpath->is_active && !blk_queue_dying(q))
@@ -1527,6 +1526,14 @@ static void activate_path(struct work_struct *work)
 		pg_init_done(pgpath, SCSI_DH_DEV_OFFLINED);
 }
 
+static void activate_path_work(struct work_struct *work)
+{
+	struct pgpath *pgpath =
+		container_of(work, struct pgpath, activate_path.work);
+
+	activate_or_offline_path(pgpath);
+}
+
 static int noretry_error(int error)
 {
 	switch (error) {
diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
index 2c96542..ba7c4c6 100644
--- a/drivers/md/dm-rq.c
+++ b/drivers/md/dm-rq.c
@@ -997,10 +997,14 @@ int dm_mq_init_request_queue(struct mapped_device *md, struct dm_table *t)
 	dm_init_md_queue(md);
 
 	/* backfill 'mq' sysfs registration normally done in blk_register_queue */
-	blk_mq_register_dev(disk_to_dev(md->disk), q);
+	err = blk_mq_register_dev(disk_to_dev(md->disk), q);
+	if (err)
+		goto out_cleanup_queue;
 
 	return 0;
 
+out_cleanup_queue:
+	blk_cleanup_queue(q);
 out_tag_set:
 	blk_mq_free_tag_set(md->tag_set);
 out_kfree_tag_set:
diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
index a15091a..4477bf9 100644
--- a/drivers/md/dm-thin-metadata.c
+++ b/drivers/md/dm-thin-metadata.c
@@ -485,11 +485,11 @@ static int __write_initial_superblock(struct dm_pool_metadata *pmd)
 	if (r < 0)
 		return r;
 
-	r = save_sm_roots(pmd);
+	r = dm_tm_pre_commit(pmd->tm);
 	if (r < 0)
 		return r;
 
-	r = dm_tm_pre_commit(pmd->tm);
+	r = save_sm_roots(pmd);
 	if (r < 0)
 		return r;
 
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index d1c05c1..be869a9 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -1070,6 +1070,7 @@ static void passdown_endio(struct bio *bio)
 	 * to unmap (we ignore err).
 	 */
 	queue_passdown_pt2(bio->bi_private);
+	bio_put(bio);
 }
 
 static void process_prepared_discard_passdown_pt1(struct dm_thin_new_mapping *m)
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 24925f2..eddd360 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -6752,6 +6752,7 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
 	void __user *argp = (void __user *)arg;
 	struct mddev *mddev = NULL;
 	int ro;
+	bool did_set_md_closing = false;
 
 	if (!md_ioctl_valid(cmd))
 		return -ENOTTY;
@@ -6841,7 +6842,9 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
 			err = -EBUSY;
 			goto out;
 		}
+		WARN_ON_ONCE(test_bit(MD_CLOSING, &mddev->flags));
 		set_bit(MD_CLOSING, &mddev->flags);
+		did_set_md_closing = true;
 		mutex_unlock(&mddev->open_mutex);
 		sync_blockdev(bdev);
 	}
@@ -7041,6 +7044,8 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
 		mddev->hold_active = 0;
 	mddev_unlock(mddev);
 out:
+	if(did_set_md_closing)
+		clear_bit(MD_CLOSING, &mddev->flags);
 	return err;
 }
 #ifdef CONFIG_COMPAT
diff --git a/drivers/md/persistent-data/dm-btree.c b/drivers/md/persistent-data/dm-btree.c
index 20a4032..7a75b50 100644
--- a/drivers/md/persistent-data/dm-btree.c
+++ b/drivers/md/persistent-data/dm-btree.c
@@ -897,8 +897,12 @@ static int find_key(struct ro_spine *s, dm_block_t block, bool find_highest,
 		else
 			*result_key = le64_to_cpu(ro_node(s)->keys[0]);
 
-		if (next_block || flags & INTERNAL_NODE)
-			block = value64(ro_node(s), i);
+		if (next_block || flags & INTERNAL_NODE) {
+			if (find_highest)
+				block = value64(ro_node(s), i);
+			else
+				block = value64(ro_node(s), 0);
+		}
 
 	} while (flags & INTERNAL_NODE);
 
diff --git a/drivers/md/persistent-data/dm-space-map-disk.c b/drivers/md/persistent-data/dm-space-map-disk.c
index ebb280a..32adf6b 100644
--- a/drivers/md/persistent-data/dm-space-map-disk.c
+++ b/drivers/md/persistent-data/dm-space-map-disk.c
@@ -142,10 +142,23 @@ static int sm_disk_inc_block(struct dm_space_map *sm, dm_block_t b)
 
 static int sm_disk_dec_block(struct dm_space_map *sm, dm_block_t b)
 {
+	int r;
+	uint32_t old_count;
 	enum allocation_event ev;
 	struct sm_disk *smd = container_of(sm, struct sm_disk, sm);
 
-	return sm_ll_dec(&smd->ll, b, &ev);
+	r = sm_ll_dec(&smd->ll, b, &ev);
+	if (!r && (ev == SM_FREE)) {
+		/*
+		 * It's only free if it's also free in the last
+		 * transaction.
+		 */
+		r = sm_ll_lookup(&smd->old_ll, b, &old_count);
+		if (!r && !old_count)
+			smd->nr_allocated_this_transaction--;
+	}
+
+	return r;
 }
 
 static int sm_disk_new_block(struct dm_space_map *sm, dm_block_t *b)
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index cce6057..f34ad2b 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -2253,6 +2253,10 @@ static int resize_stripes(struct r5conf *conf, int newsize)
 		err = -ENOMEM;
 
 	mutex_unlock(&conf->cache_size_mutex);
+
+	conf->slab_cache = sc;
+	conf->active_name = 1-conf->active_name;
+
 	/* Step 4, return new stripes to service */
 	while(!list_empty(&newstripes)) {
 		nsh = list_entry(newstripes.next, struct stripe_head, lru);
@@ -2270,8 +2274,6 @@ static int resize_stripes(struct r5conf *conf, int newsize)
 	}
 	/* critical section pass, GFP_NOIO no longer needed */
 
-	conf->slab_cache = sc;
-	conf->active_name = 1-conf->active_name;
 	if (!err)
 		conf->pool_size = newsize;
 	return err;
diff --git a/drivers/media/dvb-frontends/cxd2841er.c b/drivers/media/dvb-frontends/cxd2841er.c
index 5afb9c5..fd0f25e 100644
--- a/drivers/media/dvb-frontends/cxd2841er.c
+++ b/drivers/media/dvb-frontends/cxd2841er.c
@@ -3852,7 +3852,9 @@ static struct  dvb_frontend_ops cxd2841er_t_c_ops = {
 			FE_CAN_MUTE_TS |
 			FE_CAN_2G_MODULATION,
 		.frequency_min = 42000000,
-		.frequency_max = 1002000000
+		.frequency_max = 1002000000,
+		.symbol_rate_min = 870000,
+		.symbol_rate_max = 11700000
 	},
 	.init = cxd2841er_init_tc,
 	.sleep = cxd2841er_sleep_tc,
diff --git a/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.c b/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.c
index 76dd1f3..5a4e6e9 100644
--- a/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.c
+++ b/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.c
@@ -59,15 +59,22 @@ static int __cam_isp_ctx_handle_buf_done_in_activated_state(
 		}
 
 		if (!bubble_state) {
-			CDBG("%s: Sync success: fd 0x%x\n", __func__,
+			CDBG("%s: Sync with success: fd 0x%x\n", __func__,
 				   req_isp->fence_map_out[j].sync_id);
-			cam_sync_signal(req_isp->fence_map_out[j].sync_id,
+			rc = cam_sync_signal(req_isp->fence_map_out[j].sync_id,
 				CAM_SYNC_STATE_SIGNALED_SUCCESS);
+			if (rc)
+				pr_err("%s: Sync failed with rc = %d\n",
+					__func__, rc);
+
 		} else if (!req_isp->bubble_report) {
-			CDBG("%s: Sync failure: fd 0x%x\n", __func__,
+			CDBG("%s: Sync with failure: fd 0x%x\n", __func__,
 				   req_isp->fence_map_out[j].sync_id);
-			cam_sync_signal(req_isp->fence_map_out[j].sync_id,
+			rc = cam_sync_signal(req_isp->fence_map_out[j].sync_id,
 				CAM_SYNC_STATE_SIGNALED_ERROR);
+			if (rc)
+				pr_err("%s: Sync failed with rc = %d\n",
+					__func__, rc);
 		} else {
 			/*
 			 * Ignore the buffer done if bubble detect is on
@@ -277,7 +284,7 @@ static int __cam_isp_ctx_sof_in_epoch(struct cam_isp_context *ctx_isp,
 
 	ctx_isp->frame_id++;
 	ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_SOF;
-	pr_err("%s: next substate %d\n", __func__,
+	CDBG("%s: next substate %d\n", __func__,
 		ctx_isp->substate_activated);
 
 	return rc;
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c
index 259e773..4b2db07 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c
@@ -1493,19 +1493,6 @@ static int cam_ife_mgr_stop_hw(void *hw_mgr_priv, void *stop_hw_args)
 	if (i == ctx->num_base)
 		master_base_idx = ctx->base[0].idx;
 
-	/* Stop the master CIDs first */
-	cam_ife_mgr_csid_stop_hw(ctx, &ctx->res_list_ife_cid,
-			master_base_idx, CAM_CSID_HALT_AT_FRAME_BOUNDARY);
-
-	/* stop rest of the CIDs  */
-	for (i = 0; i < ctx->num_base; i++) {
-		if (i == master_base_idx)
-			continue;
-		cam_ife_mgr_csid_stop_hw(ctx, &ctx->res_list_ife_cid,
-			ctx->base[i].idx, CAM_CSID_HALT_AT_FRAME_BOUNDARY);
-	}
-
-
 	/* Stop the master CSID path first */
 	cam_ife_mgr_csid_stop_hw(ctx, &ctx->res_list_ife_csid,
 			master_base_idx, CAM_CSID_HALT_AT_FRAME_BOUNDARY);
@@ -1519,6 +1506,18 @@ static int cam_ife_mgr_stop_hw(void *hw_mgr_priv, void *stop_hw_args)
 			ctx->base[i].idx, CAM_CSID_HALT_AT_FRAME_BOUNDARY);
 	}
 
+	/* Stop the master CIDs first */
+	cam_ife_mgr_csid_stop_hw(ctx, &ctx->res_list_ife_cid,
+			master_base_idx, CAM_CSID_HALT_AT_FRAME_BOUNDARY);
+
+	/* stop rest of the CIDs  */
+	for (i = 0; i < ctx->num_base; i++) {
+		if (i == master_base_idx)
+			continue;
+		cam_ife_mgr_csid_stop_hw(ctx, &ctx->res_list_ife_cid,
+			ctx->base[i].idx, CAM_CSID_HALT_AT_FRAME_BOUNDARY);
+	}
+
 	if (cam_cdm_stream_off(ctx->cdm_handle))
 		pr_err("%s%d: CDM stream off failed %d\n",
 			__func__, __LINE__, ctx->cdm_handle);
@@ -2884,7 +2883,7 @@ int cam_ife_hw_mgr_init(struct cam_hw_mgr_intf *hw_mgr_intf)
 	int i, j;
 	struct cam_iommu_handle cdm_handles;
 
-	pr_info("%s: Enter\n", __func__);
+	CDBG("%s: Enter\n", __func__);
 
 	memset(&g_ife_hw_mgr, 0, sizeof(g_ife_hw_mgr));
 
@@ -3018,7 +3017,7 @@ int cam_ife_hw_mgr_init(struct cam_hw_mgr_intf *hw_mgr_intf)
 
 	/* Create Worker for ife_hw_mgr with 10 tasks */
 	rc = cam_req_mgr_workq_create("cam_ife_worker", 10,
-			&g_ife_hw_mgr.workq);
+			&g_ife_hw_mgr.workq, CRM_WORKQ_USAGE_NON_IRQ);
 
 	if (rc < 0) {
 		pr_err("%s: Unable to create worker\n", __func__);
@@ -3037,7 +3036,7 @@ int cam_ife_hw_mgr_init(struct cam_hw_mgr_intf *hw_mgr_intf)
 	hw_mgr_intf->hw_prepare_update = cam_ife_mgr_prepare_hw_update;
 	hw_mgr_intf->hw_config = cam_ife_mgr_config_hw;
 
-	pr_info("%s: Exit\n", __func__);
+	CDBG("%s: Exit\n", __func__);
 	return 0;
 end:
 	if (rc) {
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.c
index 6306df3..3ec9aa6 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.c
@@ -1133,7 +1133,7 @@ static int cam_ife_csid_init_config_ipp_path(
 	if (rc)
 		return rc;
 
-	/**
+	/*
 	 * configure the IPP and enable the time stamp capture.
 	 * enable the HW measrurement blocks
 	 */
@@ -1417,7 +1417,7 @@ static int cam_ife_csid_init_config_rdi_path(
 	if (rc)
 		return rc;
 
-	/**
+	/*
 	 * RDI path config and enable the time stamp capture
 	 * Enable the measurement blocks
 	 */
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.c b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.c
index e62c101..ed251eb 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.c
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.c
@@ -1092,10 +1092,9 @@ int cam_req_mgr_process_sched_req(void *priv, void *data)
 	slot = &in_q->slot[in_q->wr_idx];
 
 	if (slot->status != CRM_SLOT_STATUS_NO_REQ &&
-		slot->status != CRM_SLOT_STATUS_REQ_APPLIED) {
-		CRM_ERR("in_q overwrite %d", slot->status);
-		/* @TODO: error handling */
-	}
+		slot->status != CRM_SLOT_STATUS_REQ_APPLIED)
+		CRM_WARN("in_q overwrite %d", slot->status);
+
 	CRM_DBG("sched_req %lld at slot %d",
 		sched_req->req_id, in_q->wr_idx);
 
@@ -1106,7 +1105,6 @@ int cam_req_mgr_process_sched_req(void *priv, void *data)
 	__cam_req_mgr_inc_idx(&in_q->wr_idx, 1, in_q->num_slots);
 	mutex_unlock(&link->req.lock);
 
-	complete(&link->workq_comp);
 end:
 	return rc;
 }
@@ -1371,6 +1369,7 @@ static int cam_req_mgr_cb_add_req(struct cam_req_mgr_add_request *add_req)
 		goto end;
 	}
 
+	CRM_DBG("E: dev %x dev req %lld", add_req->dev_hdl, add_req->req_id);
 	link = (struct cam_req_mgr_core_link *)
 		cam_get_device_priv(add_req->link_hdl);
 
@@ -1404,6 +1403,7 @@ static int cam_req_mgr_cb_add_req(struct cam_req_mgr_add_request *add_req)
 	dev_req->dev_hdl = add_req->dev_hdl;
 	task->process_cb = &cam_req_mgr_process_add_req;
 	rc = cam_req_mgr_workq_enqueue_task(task, link, CRM_TASK_PRIORITY_0);
+	CRM_DBG("X: dev %x dev req %lld", add_req->dev_hdl, add_req->req_id);
 
 end:
 	return rc;
@@ -1813,7 +1813,8 @@ int cam_req_mgr_link(struct cam_req_mgr_link_info *link_info)
 	/* Create worker for current link */
 	snprintf(buf, sizeof(buf), "%x-%x",
 		link_info->session_hdl, link->link_hdl);
-	rc = cam_req_mgr_workq_create(buf, CRM_WORKQ_NUM_TASKS, &link->workq);
+	rc = cam_req_mgr_workq_create(buf, CRM_WORKQ_NUM_TASKS,
+		&link->workq, CRM_WORKQ_USAGE_NON_IRQ);
 	if (rc < 0) {
 		CRM_ERR("FATAL: unable to create worker");
 		__cam_req_mgr_destroy_link_info(link);
@@ -1919,11 +1920,10 @@ int cam_req_mgr_schedule_request(
 			struct cam_req_mgr_sched_request *sched_req)
 {
 	int                               rc = 0;
-	struct crm_workq_task            *task = NULL;
 	struct cam_req_mgr_core_link     *link = NULL;
 	struct cam_req_mgr_core_session  *session = NULL;
 	struct cam_req_mgr_sched_request *sched;
-	struct crm_task_payload          *task_data;
+	struct crm_task_payload           task_data;
 
 	if (!sched_req) {
 		CRM_ERR("csl_req is NULL");
@@ -1942,14 +1942,10 @@ int cam_req_mgr_schedule_request(
 		CRM_WARN("session ptr NULL %x", sched_req->link_hdl);
 		return -EINVAL;
 	}
+	CRM_DBG("link %x req %lld", sched_req->link_hdl, sched_req->req_id);
 
-	task = cam_req_mgr_workq_get_task(link->workq);
-	if (!task)
-		return -ENOMEM;
-
-	task_data = (struct crm_task_payload *)task->payload;
-	task_data->type = CRM_WORKQ_TASK_SCHED_REQ;
-	sched = (struct cam_req_mgr_sched_request *)&task_data->u;
+	task_data.type = CRM_WORKQ_TASK_SCHED_REQ;
+	sched = (struct cam_req_mgr_sched_request *)&task_data.u;
 	sched->req_id = sched_req->req_id;
 	sched->link_hdl = sched_req->link_hdl;
 	if (session->force_err_recovery == AUTO_RECOVERY) {
@@ -1958,14 +1954,10 @@ int cam_req_mgr_schedule_request(
 		sched->bubble_enable =
 		(session->force_err_recovery == FORCE_ENABLE_RECOVERY) ? 1 : 0;
 	}
-	task->process_cb = &cam_req_mgr_process_sched_req;
-	rc = cam_req_mgr_workq_enqueue_task(task, link, CRM_TASK_PRIORITY_0);
 
-	/* Blocking call */
-	init_completion(&link->workq_comp);
-	rc = wait_for_completion_timeout(
-		&link->workq_comp,
-		msecs_to_jiffies(CAM_REQ_MGR_SCHED_REQ_TIMEOUT));
+	rc = cam_req_mgr_process_sched_req(link, &task_data);
+
+	CRM_DBG("DONE dev %x req %lld", sched_req->link_hdl, sched_req->req_id);
 end:
 	return rc;
 }
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.h b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.h
index 889ee9c..3ee0e2f 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.h
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.h
@@ -27,7 +27,7 @@
 #define FORCE_ENABLE_RECOVERY   1
 #define AUTO_RECOVERY           0
 
-#define CRM_WORKQ_NUM_TASKS 30
+#define CRM_WORKQ_NUM_TASKS 60
 
 /**
  * enum crm_workq_task_type
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_dev.c b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_dev.c
index 13affe9..1a8356a 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_dev.c
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_dev.c
@@ -461,6 +461,27 @@ static int cam_video_device_setup(void)
 	return rc;
 }
 
+int cam_req_mgr_notify_frame_message(struct cam_req_mgr_message *msg,
+	uint32_t id,
+	uint32_t type)
+{
+	struct v4l2_event event;
+	struct cam_req_mgr_message *ev_header;
+
+	if (!msg)
+		return -EINVAL;
+
+	event.id = id;
+	event.type = type;
+	ev_header = CAM_REQ_MGR_GET_PAYLOAD_PTR(event,
+		struct cam_req_mgr_message);
+	memcpy(ev_header, msg, sizeof(struct cam_req_mgr_message));
+	v4l2_event_queue(g_dev.video, &event);
+
+	return 0;
+}
+EXPORT_SYMBOL(cam_req_mgr_notify_frame_message);
+
 void cam_video_device_cleanup(void)
 {
 	video_unregister_device(g_dev.video);
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_dev.h b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_dev.h
index 430e46e..77faed9 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_dev.h
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_dev.h
@@ -40,4 +40,11 @@ struct cam_req_mgr_device {
 	spinlock_t cam_eventq_lock;
 };
 
+#define CAM_REQ_MGR_GET_PAYLOAD_PTR(ev, type)        \
+	(type *)((char *)ev.u.data)
+
+int cam_req_mgr_notify_frame_message(struct cam_req_mgr_message *msg,
+	uint32_t id,
+	uint32_t type);
+
 #endif /* _CAM_REQ_MGR_DEV_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_workq.c b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_workq.c
index f53e41c..38dcb42 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_workq.c
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_workq.c
@@ -12,16 +12,30 @@
 
 #include "cam_req_mgr_workq.h"
 
+#define WORKQ_ACQUIRE_LOCK(workq, flags) {\
+	if ((workq)->in_irq) \
+		spin_lock_irqsave(&(workq)->lock_bh, (flags)); \
+	else \
+		spin_lock_bh(&(workq)->lock_bh); \
+}
+
+#define WORKQ_RELEASE_LOCK(workq, flags) {\
+	if ((workq)->in_irq) \
+		spin_unlock_irqrestore(&(workq)->lock_bh, (flags)); \
+	else	\
+		spin_unlock_bh(&(workq)->lock_bh); \
+}
 
 struct crm_workq_task *cam_req_mgr_workq_get_task(
 	struct cam_req_mgr_core_workq *workq)
 {
 	struct crm_workq_task *task = NULL;
+	unsigned long flags = 0;
 
 	if (!workq)
 		return NULL;
 
-	spin_lock_bh(&workq->lock_bh);
+	WORKQ_ACQUIRE_LOCK(workq, flags);
 	if (list_empty(&workq->task.empty_head))
 		goto end;
 
@@ -33,7 +47,8 @@ struct crm_workq_task *cam_req_mgr_workq_get_task(
 	}
 
 end:
-	spin_unlock_bh(&workq->lock_bh);
+	WORKQ_RELEASE_LOCK(workq, flags);
+
 	return task;
 }
 
@@ -41,8 +56,9 @@ static void cam_req_mgr_workq_put_task(struct crm_workq_task *task)
 {
 	struct cam_req_mgr_core_workq *workq =
 		(struct cam_req_mgr_core_workq *)task->parent;
+	unsigned long flags = 0;
 
-	spin_lock_bh(&workq->lock_bh);
+	WORKQ_ACQUIRE_LOCK(workq, flags);
 	list_del_init(&task->entry);
 	task->cancel = 0;
 	task->process_cb = NULL;
@@ -50,7 +66,7 @@ static void cam_req_mgr_workq_put_task(struct crm_workq_task *task)
 	list_add_tail(&task->entry,
 		&workq->task.empty_head);
 	atomic_add(1, &workq->task.free_cnt);
-	spin_unlock_bh(&workq->lock_bh);
+	WORKQ_RELEASE_LOCK(workq, flags);
 }
 
 /**
@@ -131,6 +147,7 @@ int cam_req_mgr_workq_enqueue_task(struct crm_workq_task *task,
 {
 	int rc = 0;
 	struct cam_req_mgr_core_workq *workq = NULL;
+	unsigned long flags = 0;
 
 	if (!task) {
 		CRM_WARN("NULL task pointer can not schedule");
@@ -148,24 +165,25 @@ int cam_req_mgr_workq_enqueue_task(struct crm_workq_task *task,
 		goto end;
 	}
 
-	spin_lock_bh(&workq->lock_bh);
 	if (task->cancel == 1) {
 		cam_req_mgr_workq_put_task(task);
 		CRM_WARN("task aborted and queued back to pool");
 		rc = 0;
-		spin_unlock_bh(&workq->lock_bh);
 		goto end;
 	}
 	task->priv = priv;
 	task->priority =
 		(prio < CRM_TASK_PRIORITY_MAX && prio >= CRM_TASK_PRIORITY_0)
 		? prio : CRM_TASK_PRIORITY_0;
+
+	WORKQ_ACQUIRE_LOCK(workq, flags);
 	list_add_tail(&task->entry,
 		&workq->task.process_head[task->priority]);
+	WORKQ_RELEASE_LOCK(workq, flags);
+
 	atomic_add(1, &workq->task.pending_cnt);
 	CRM_DBG("enq task %pK pending_cnt %d",
 		task, atomic_read(&workq->task.pending_cnt));
-	spin_unlock_bh(&workq->lock_bh);
 
 	queue_work(workq->job, &workq->work);
 
@@ -174,7 +192,7 @@ int cam_req_mgr_workq_enqueue_task(struct crm_workq_task *task,
 }
 
 int cam_req_mgr_workq_create(char *name, int32_t num_tasks,
-	struct cam_req_mgr_core_workq **workq)
+	struct cam_req_mgr_core_workq **workq, enum crm_workq_context in_irq)
 {
 	int32_t i;
 	struct crm_workq_task  *task;
@@ -209,6 +227,7 @@ int cam_req_mgr_workq_create(char *name, int32_t num_tasks,
 		for (i = CRM_TASK_PRIORITY_0; i < CRM_TASK_PRIORITY_MAX; i++)
 			INIT_LIST_HEAD(&crm_workq->task.process_head[i]);
 		INIT_LIST_HEAD(&crm_workq->task.empty_head);
+		crm_workq->in_irq = in_irq;
 		crm_workq->task.num_task = num_tasks;
 		crm_workq->task.pool = (struct crm_workq_task *)
 			kzalloc(sizeof(struct crm_workq_task) *
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_workq.h b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_workq.h
index 7d8ca59..eb3b804 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_workq.h
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_workq.h
@@ -25,9 +25,16 @@
 
 /* Task priorities, lower the number higher the priority*/
 enum crm_task_priority {
-	CRM_TASK_PRIORITY_0 = 0,
-	CRM_TASK_PRIORITY_1 = 1,
-	CRM_TASK_PRIORITY_MAX = 2,
+	CRM_TASK_PRIORITY_0,
+	CRM_TASK_PRIORITY_1,
+	CRM_TASK_PRIORITY_MAX,
+};
+
+/* workqueue will be used from irq context or not */
+enum crm_workq_context {
+	CRM_WORKQ_USAGE_NON_IRQ,
+	CRM_WORKQ_USAGE_IRQ,
+	CRM_WORKQ_USAGE_INVALID,
 };
 
 /** struct crm_workq_task
@@ -58,8 +65,9 @@ struct crm_workq_task {
  * @work       : work token used by workqueue
  * @job        : workqueue internal job struct
  * task -
- * @lock       : lock for task structs
- * @free_cnt   :  num of free/available tasks
+ * @lock_bh    : lock for task structs
+ * @in_irq     : set true if workque can be used in irq context
+ * @free_cnt   : num of free/available tasks
  * @empty_head : list  head of available taska which can be used
  *               or acquired in order to enqueue a task to workq
  * @pool       : pool of tasks used for handling events in workq context
@@ -70,6 +78,7 @@ struct cam_req_mgr_core_workq {
 	struct work_struct         work;
 	struct workqueue_struct   *job;
 	spinlock_t                 lock_bh;
+	uint32_t                   in_irq;
 
 	/* tasks */
 	struct {
@@ -91,11 +100,12 @@ struct cam_req_mgr_core_workq {
  *             of session handle and link handle
  * @num_task : Num_tasks to be allocated for workq
  * @workq    : Double pointer worker
+ * @in_irq   : Set to one if workq might be used in irq context
  * This function will allocate and create workqueue and pass
  * the workq pointer to caller.
  */
 int cam_req_mgr_workq_create(char *name, int32_t num_tasks,
-	struct cam_req_mgr_core_workq **workq);
+	struct cam_req_mgr_core_workq **workq, enum crm_workq_context in_irq);
 
 /**
  * cam_req_mgr_workq_destroy()
diff --git a/drivers/media/platform/msm/camera/icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c b/drivers/media/platform/msm/camera/icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c
index 2fa39c8..140542b 100644
--- a/drivers/media/platform/msm/camera/icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c
+++ b/drivers/media/platform/msm/camera/icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c
@@ -1904,14 +1904,14 @@ int cam_icp_hw_mgr_init(struct device_node *of_node, uint64_t *hw_mgr_hdl)
 	}
 
 	rc = cam_req_mgr_workq_create("icp_command_queue", ICP_WORKQ_NUM_TASK,
-					&icp_hw_mgr.cmd_work);
+		&icp_hw_mgr.cmd_work, CRM_WORKQ_USAGE_NON_IRQ);
 	if (rc < 0) {
 		pr_err("unable to create a worker\n");
 		goto cmd_work_failed;
 	}
 
 	rc = cam_req_mgr_workq_create("icp_message_queue", ICP_WORKQ_NUM_TASK,
-					&icp_hw_mgr.msg_work);
+		&icp_hw_mgr.msg_work, CRM_WORKQ_USAGE_IRQ);
 	if (rc < 0) {
 		pr_err("unable to create a worker\n");
 		goto msg_work_failed;
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_base.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_base.c
index 34243e6..15b8a2d 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_base.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_base.c
@@ -227,9 +227,10 @@ void sde_mdp_set_ot_limit(struct sde_mdp_set_ot_params *params)
 {
 	struct sde_rot_data_type *mdata = sde_rot_get_mdata();
 	u32 ot_lim;
-	u32 reg_off_vbif_lim_conf = (params->xin_id / 4) * 4 +
-		params->reg_off_vbif_lim_conf;
-	u32 bit_off_vbif_lim_conf = (params->xin_id % 4) * 8;
+	u32 reg_off_vbif_lim_conf = ((params->xin_id / mdata->npriority_lvl)
+					* mdata->npriority_lvl)
+					+ params->reg_off_vbif_lim_conf;
+	u32 bit_off_vbif_lim_conf = (params->xin_id % mdata->npriority_lvl) * 8;
 	u32 reg_val;
 	u32 sts;
 	bool forced_on;
@@ -420,6 +421,136 @@ static void sde_mdp_parse_vbif_qos(struct platform_device *pdev,
 	}
 }
 
+static void sde_mdp_parse_cdp_setting(struct platform_device *pdev,
+		struct sde_rot_data_type *mdata)
+{
+	int rc;
+	u32 len, data[SDE_ROT_OP_MAX] = {0};
+
+	len = sde_mdp_parse_dt_prop_len(pdev,
+			"qcom,mdss-rot-cdp-setting");
+	if (len == SDE_ROT_OP_MAX) {
+		rc = sde_mdp_parse_dt_handler(pdev,
+			"qcom,mdss-rot-cdp-setting", data, len);
+		if (rc) {
+			SDEROT_ERR("invalid CDP setting\n");
+			goto end;
+		}
+
+		set_bit(SDE_QOS_CDP, mdata->sde_qos_map);
+		mdata->enable_cdp[SDE_ROT_RD] = data[SDE_ROT_RD];
+		mdata->enable_cdp[SDE_ROT_WR] = data[SDE_ROT_WR];
+		return;
+	}
+end:
+	clear_bit(SDE_QOS_CDP, mdata->sde_qos_map);
+}
+
+static void sde_mdp_parse_rot_lut_setting(struct platform_device *pdev,
+		struct sde_rot_data_type *mdata)
+{
+	int rc;
+	u32 len, data[4];
+
+	len = sde_mdp_parse_dt_prop_len(pdev, "qcom,mdss-rot-qos-lut");
+	if (len == 4) {
+		rc = sde_mdp_parse_dt_handler(pdev,
+			"qcom,mdss-rot-qos-lut", data, len);
+		if (!rc) {
+			mdata->lut_cfg[SDE_ROT_RD].creq_lut_0 = data[0];
+			mdata->lut_cfg[SDE_ROT_RD].creq_lut_1 = data[1];
+			mdata->lut_cfg[SDE_ROT_WR].creq_lut_0 = data[2];
+			mdata->lut_cfg[SDE_ROT_WR].creq_lut_1 = data[3];
+			set_bit(SDE_QOS_LUT, mdata->sde_qos_map);
+		} else {
+			SDEROT_DBG("qos lut setting not found\n");
+		}
+	}
+
+	len = sde_mdp_parse_dt_prop_len(pdev, "qcom,mdss-rot-danger-lut");
+	if (len == SDE_ROT_OP_MAX) {
+		rc = sde_mdp_parse_dt_handler(pdev,
+			"qcom,mdss-rot-danger-lut", data, len);
+		if (!rc) {
+			mdata->lut_cfg[SDE_ROT_RD].danger_lut
+							= data[SDE_ROT_RD];
+			mdata->lut_cfg[SDE_ROT_WR].danger_lut
+							= data[SDE_ROT_WR];
+			set_bit(SDE_QOS_DANGER_LUT, mdata->sde_qos_map);
+		} else {
+			SDEROT_DBG("danger lut setting not found\n");
+		}
+	}
+
+	len = sde_mdp_parse_dt_prop_len(pdev, "qcom,mdss-rot-safe-lut");
+	if (len == SDE_ROT_OP_MAX) {
+		rc = sde_mdp_parse_dt_handler(pdev,
+			"qcom,mdss-rot-safe-lut", data, len);
+		if (!rc) {
+			mdata->lut_cfg[SDE_ROT_RD].safe_lut = data[SDE_ROT_RD];
+			mdata->lut_cfg[SDE_ROT_WR].safe_lut = data[SDE_ROT_WR];
+			set_bit(SDE_QOS_SAFE_LUT, mdata->sde_qos_map);
+		} else {
+			SDEROT_DBG("safe lut setting not found\n");
+		}
+	}
+}
+
+static void sde_mdp_parse_inline_rot_lut_setting(struct platform_device *pdev,
+		struct sde_rot_data_type *mdata)
+{
+	int rc;
+	u32 len, data[4];
+
+	len = sde_mdp_parse_dt_prop_len(pdev, "qcom,mdss-inline-rot-qos-lut");
+	if (len == 4) {
+		rc = sde_mdp_parse_dt_handler(pdev,
+			"qcom,mdss-inline-rot-qos-lut", data, len);
+		if (!rc) {
+			mdata->inline_lut_cfg[SDE_ROT_RD].creq_lut_0 = data[0];
+			mdata->inline_lut_cfg[SDE_ROT_RD].creq_lut_1 = data[1];
+			mdata->inline_lut_cfg[SDE_ROT_WR].creq_lut_0 = data[2];
+			mdata->inline_lut_cfg[SDE_ROT_WR].creq_lut_1 = data[3];
+			set_bit(SDE_INLINE_QOS_LUT, mdata->sde_inline_qos_map);
+		} else {
+			SDEROT_DBG("inline qos lut setting not found\n");
+		}
+	}
+
+	len = sde_mdp_parse_dt_prop_len(pdev,
+				"qcom,mdss-inline-rot-danger-lut");
+	if (len == SDE_ROT_OP_MAX) {
+		rc = sde_mdp_parse_dt_handler(pdev,
+			"qcom,mdss-inline-rot-danger-lut", data, len);
+		if (!rc) {
+			mdata->inline_lut_cfg[SDE_ROT_RD].danger_lut
+							= data[SDE_ROT_RD];
+			mdata->inline_lut_cfg[SDE_ROT_WR].danger_lut
+							= data[SDE_ROT_WR];
+			set_bit(SDE_INLINE_QOS_DANGER_LUT,
+					mdata->sde_inline_qos_map);
+		} else {
+			SDEROT_DBG("inline danger lut setting not found\n");
+		}
+	}
+
+	len = sde_mdp_parse_dt_prop_len(pdev, "qcom,mdss-inline-rot-safe-lut");
+	if (len == SDE_ROT_OP_MAX) {
+		rc = sde_mdp_parse_dt_handler(pdev,
+			"qcom,mdss-inline-rot-safe-lut", data, len);
+		if (!rc) {
+			mdata->inline_lut_cfg[SDE_ROT_RD].safe_lut
+							= data[SDE_ROT_RD];
+			mdata->inline_lut_cfg[SDE_ROT_WR].safe_lut
+							= data[SDE_ROT_WR];
+			set_bit(SDE_INLINE_QOS_SAFE_LUT,
+					mdata->sde_inline_qos_map);
+		} else {
+			SDEROT_DBG("inline safe lut setting not found\n");
+		}
+	}
+}
+
 static int sde_mdp_parse_dt_misc(struct platform_device *pdev,
 		struct sde_rot_data_type *mdata)
 {
@@ -444,8 +575,14 @@ static int sde_mdp_parse_dt_misc(struct platform_device *pdev,
 		SDEROT_DBG(
 			"Could not read optional property: highest bank bit\n");
 
+	sde_mdp_parse_cdp_setting(pdev, mdata);
+
 	sde_mdp_parse_vbif_qos(pdev, mdata);
 
+	sde_mdp_parse_rot_lut_setting(pdev, mdata);
+
+	sde_mdp_parse_inline_rot_lut_setting(pdev, mdata);
+
 	mdata->mdp_base = mdata->sde_io.base + SDE_MDP_OFFSET;
 
 	return 0;
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_base.h b/drivers/media/platform/msm/sde/rotator/sde_rotator_base.h
index 9194b44..313c709 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_base.h
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_base.h
@@ -40,6 +40,9 @@
 #define SDE_MDP_HW_REV_301	SDE_MDP_REV(3, 0, 1)	/* 8998 v1.1 */
 #define SDE_MDP_HW_REV_400	SDE_MDP_REV(4, 0, 0)	/* sdm845 v1.0 */
 
+#define SDE_MDP_VBIF_4_LEVEL_REMAPPER	4
+#define SDE_MDP_VBIF_8_LEVEL_REMAPPER	8
+
 struct sde_mult_factor {
 	uint32_t numer;
 	uint32_t denom;
@@ -77,9 +80,19 @@ enum sde_qos_settings {
 	SDE_QOS_PER_PIPE_LUT,
 	SDE_QOS_SIMPLIFIED_PREFILL,
 	SDE_QOS_VBLANK_PANIC_CTRL,
+	SDE_QOS_LUT,
+	SDE_QOS_DANGER_LUT,
+	SDE_QOS_SAFE_LUT,
 	SDE_QOS_MAX,
 };
 
+enum sde_inline_qos_settings {
+	SDE_INLINE_QOS_LUT,
+	SDE_INLINE_QOS_DANGER_LUT,
+	SDE_INLINE_QOS_SAFE_LUT,
+	SDE_INLINE_QOS_MAX,
+};
+
 /**
  * enum sde_rot_type: SDE rotator HW version
  * @SDE_ROT_TYPE_V1_0: V1.0 HW version
@@ -98,6 +111,7 @@ enum sde_rot_type {
  * @SDE_CAPS_R3_1P5_DOWNSCALE: 1.5x downscale rotator support
  * @SDE_CAPS_SBUF_1: stream buffer support for inline rotation
  * @SDE_CAPS_UBWC_2: universal bandwidth compression version 2
+ * @SDE_CAPS_PARTIALWR: partial write override
  */
 enum sde_caps_settings {
 	SDE_CAPS_R1_WB,
@@ -106,6 +120,7 @@ enum sde_caps_settings {
 	SDE_CAPS_SEC_ATTACH_DETACH_SMMU,
 	SDE_CAPS_SBUF_1,
 	SDE_CAPS_UBWC_2,
+	SDE_CAPS_PARTIALWR,
 	SDE_CAPS_MAX,
 };
 
@@ -115,6 +130,12 @@ enum sde_bus_clients {
 	SDE_MAX_BUS_CLIENTS
 };
 
+enum sde_rot_op {
+	SDE_ROT_RD,
+	SDE_ROT_WR,
+	SDE_ROT_OP_MAX
+};
+
 enum sde_rot_regdump_access {
 	SDE_ROT_REGDUMP_READ,
 	SDE_ROT_REGDUMP_WRITE,
@@ -165,6 +186,13 @@ struct sde_rot_regdump {
 	enum sde_rot_regdump_access access;
 };
 
+struct sde_rot_lut_cfg {
+	u32 creq_lut_0;
+	u32 creq_lut_1;
+	u32 danger_lut;
+	u32 safe_lut;
+};
+
 struct sde_rot_data_type {
 	u32 mdss_version;
 
@@ -177,6 +205,7 @@ struct sde_rot_data_type {
 
 	/* bitmap to track qos applicable settings */
 	DECLARE_BITMAP(sde_qos_map, SDE_QOS_MAX);
+	DECLARE_BITMAP(sde_inline_qos_map, SDE_QOS_MAX);
 
 	/* bitmap to track capability settings */
 	DECLARE_BITMAP(sde_caps_map, SDE_CAPS_MAX);
@@ -210,6 +239,11 @@ struct sde_rot_data_type {
 	void *sde_rot_hw;
 	int sec_cam_en;
 
+	u32 enable_cdp[SDE_ROT_OP_MAX];
+
+	struct sde_rot_lut_cfg lut_cfg[SDE_ROT_OP_MAX];
+	struct sde_rot_lut_cfg inline_lut_cfg[SDE_ROT_OP_MAX];
+
 	struct ion_client *iclient;
 
 	bool clk_always_on;
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c
index 30fda07..44a29aa 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c
@@ -60,6 +60,9 @@
 /* waiting for hw time out, 3 vsync for 30fps*/
 #define ROT_HW_ACQUIRE_TIMEOUT_IN_MS 100
 
+/* waiting for inline hw start */
+#define ROT_INLINE_START_TIMEOUT_IN_MS 2000
+
 /* default pixel per clock ratio */
 #define ROT_PIXEL_PER_CLK_NUMERATOR	36
 #define ROT_PIXEL_PER_CLK_DENOMINATOR	10
@@ -299,13 +302,13 @@ static int sde_rotator_update_clk(struct sde_rot_mgr *mgr)
 	return 0;
 }
 
-static void sde_rotator_footswitch_ctrl(struct sde_rot_mgr *mgr, bool on)
+static int sde_rotator_footswitch_ctrl(struct sde_rot_mgr *mgr, bool on)
 {
 	int ret;
 
-	if (WARN_ON(mgr->regulator_enable == on)) {
+	if (mgr->regulator_enable == on) {
 		SDEROT_ERR("Regulators already in selected mode on=%d\n", on);
-		return;
+		return 0;
 	}
 
 	SDEROT_EVTLOG(on);
@@ -327,9 +330,9 @@ static void sde_rotator_footswitch_ctrl(struct sde_rot_mgr *mgr, bool on)
 		ret = sde_rot_enable_vreg(mgr->module_power.vreg_config,
 			mgr->module_power.num_vreg, on);
 	if (ret) {
-		SDEROT_WARN("Rotator regulator failed to %s\n",
-			on ? "enable" : "disable");
-		return;
+		pr_err("rotator regulator failed to %s ret:%d client:%d\n",
+		      on ? "enable" : "disable", ret, mgr->rsc_client != NULL);
+		return ret;
 	}
 
 	if (mgr->ops_hw_post_pmevent)
@@ -341,6 +344,7 @@ static void sde_rotator_footswitch_ctrl(struct sde_rot_mgr *mgr, bool on)
 	}
 
 	mgr->regulator_enable = on;
+	return 0;
 }
 
 static int sde_rotator_enable_clk(struct sde_rot_mgr *mgr, int clk_idx)
@@ -1508,6 +1512,8 @@ static void sde_rotator_commit_handler(struct work_struct *work)
 	if (entry->item.ts)
 		entry->item.ts[SDE_ROTATOR_TS_FLUSH] = ktime_get();
 
+	SDEROT_EVTLOG(entry->item.session_id, 1);
+
 	queue_work(entry->doneq->rot_work_queue, &entry->done_work);
 	sde_rot_mgr_unlock(mgr);
 	return;
@@ -1564,6 +1570,13 @@ static void sde_rotator_done_handler(struct work_struct *work)
 		entry->item.flags,
 		entry->dnsc_factor_w, entry->dnsc_factor_h);
 
+	wait_for_completion_timeout(
+			&entry->item.inline_start,
+			msecs_to_jiffies(ROT_INLINE_START_TIMEOUT_IN_MS));
+
+	if (entry->item.ts)
+		entry->item.ts[SDE_ROTATOR_TS_START] = ktime_get();
+
 	SDEROT_EVTLOG(entry->item.session_id, 0);
 	ret = mgr->ops_wait_for_entry(hw, entry);
 	if (ret) {
@@ -2332,11 +2345,36 @@ struct sde_rot_entry_container *sde_rotator_req_init(
 	for (i = 0; i < count; i++) {
 		req->entries[i].item = items[i];
 		req->entries[i].private = private;
+
+		init_completion(&req->entries[i].item.inline_start);
+		complete_all(&req->entries[i].item.inline_start);
 	}
 
 	return req;
 }
 
+void sde_rotator_req_reset_start(struct sde_rot_entry_container *req)
+{
+	int i;
+
+	if (!req)
+		return;
+
+	for (i = 0; i < req->count; i++)
+		reinit_completion(&req->entries[i].item.inline_start);
+}
+
+void sde_rotator_req_set_start(struct sde_rot_entry_container *req)
+{
+	int i;
+
+	if (!req)
+		return;
+
+	for (i = 0; i < req->count; i++)
+		complete_all(&req->entries[i].item.inline_start);
+}
+
 void sde_rotator_req_finish(struct sde_rot_mgr *mgr,
 	struct sde_rot_file_private *private,
 	struct sde_rot_entry_container *req)
@@ -2885,12 +2923,11 @@ int sde_rotator_core_init(struct sde_rot_mgr **pmgr,
 	}
 
 	*pmgr = mgr;
-
-	pm_runtime_set_suspended(&pdev->dev);
-	pm_runtime_enable(&pdev->dev);
-	if (!pm_runtime_enabled(&pdev->dev)) {
-		SDEROT_ERR("fail to enable power, force on\n");
-		sde_rotator_footswitch_ctrl(mgr, true);
+	ret = sde_rotator_footswitch_ctrl(mgr, true);
+	if (ret) {
+		SDEROT_ERR("res_init failed %d\n", ret);
+		ret = -EPROBE_DEFER;
+		goto error_fs_en_fail;
 	}
 
 	/* enable power and clock before h/w initialization/query */
@@ -2931,6 +2968,9 @@ int sde_rotator_core_init(struct sde_rot_mgr **pmgr,
 	/* disable power and clock after h/w initialization/query */
 	sde_rotator_clk_ctrl(mgr, false);
 	sde_rotator_resource_ctrl(mgr, false);
+	sde_rotator_footswitch_ctrl(mgr, false);
+	pm_runtime_set_suspended(&pdev->dev);
+	pm_runtime_enable(&pdev->dev);
 
 	return 0;
 
@@ -2940,7 +2980,8 @@ int sde_rotator_core_init(struct sde_rot_mgr **pmgr,
 error_map_hw_ops:
 	sde_rotator_clk_ctrl(mgr, false);
 	sde_rotator_resource_ctrl(mgr, false);
-	pm_runtime_disable(mgr->device);
+	sde_rotator_footswitch_ctrl(mgr, false);
+error_fs_en_fail:
 	sde_rotator_res_destroy(mgr);
 error_res_init:
 error_parse_dt:
@@ -3024,8 +3065,7 @@ int sde_rotator_runtime_resume(struct device *dev)
 
 	SDEROT_DBG("begin runtime_active\n");
 	ATRACE_BEGIN("runtime_active");
-	sde_rotator_footswitch_ctrl(mgr, true);
-	return 0;
+	return sde_rotator_footswitch_ctrl(mgr, true);
 }
 
 /*
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_core.h b/drivers/media/platform/msm/sde/rotator/sde_rotator_core.h
index 0051e96..7b8a066 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_core.h
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_core.h
@@ -21,6 +21,7 @@
 #include <linux/types.h>
 #include <linux/cdev.h>
 #include <linux/pm_runtime.h>
+#include <linux/completion.h>
 
 #include "sde_rotator_base.h"
 #include "sde_rotator_util.h"
@@ -115,6 +116,7 @@ enum sde_rotator_ts {
 	SDE_ROTATOR_TS_QUEUE,		/* wait for h/w resource */
 	SDE_ROTATOR_TS_COMMIT,		/* prepare h/w command */
 	SDE_ROTATOR_TS_FLUSH,		/* initiate h/w processing */
+	SDE_ROTATOR_TS_START,		/* h/w triggered (if inline) */
 	SDE_ROTATOR_TS_DONE,		/* receive h/w completion */
 	SDE_ROTATOR_TS_RETIRE,		/* signal destination buffer fence */
 	SDE_ROTATOR_TS_SRCDQB,		/* dequeue source buffer */
@@ -199,6 +201,9 @@ struct sde_rotation_item {
 
 	/* Time stamp for profiling purposes */
 	ktime_t		*ts;
+
+	/* Completion structure for inline rotation */
+	struct completion inline_start;
 };
 
 /*
@@ -604,6 +609,23 @@ struct sde_rot_entry_container *sde_rotator_req_init(
 	u32 count, u32 flags);
 
 /*
+ * sde_rotator_req_reset_start - reset inline h/w 'start' indicator
+ *	For inline rotations, the time of rotation start is not controlled
+ *	by the rotator driver. This function resets an internal 'start'
+ *	indicator that allows the rotator to delay its rotator
+ *	timeout waiting until such time as the inline rotation has
+ *	really started.
+ * @req: Pointer to rotation request
+ */
+void sde_rotator_req_reset_start(struct sde_rot_entry_container *req);
+
+/*
+ * sde_rotator_req_set_start - set inline h/w 'start' indicator
+ * @req: Pointer to rotation request
+ */
+void sde_rotator_req_set_start(struct sde_rot_entry_container *req);
+
+/*
  * sde_rotator_req_finish - notify manager that client is finished with the
  *	given request and manager can release the request as required
  * @rot_dev: Pointer to rotator device
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_debug.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_debug.c
index e9ff67c..3e686e9 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_debug.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_debug.c
@@ -789,7 +789,7 @@ static int sde_rotator_stat_show(struct seq_file *s, void *data)
 					start_time));
 
 		seq_printf(s,
-			"s:%d sq:%lld dq:%lld fe:%lld q:%lld c:%lld fl:%lld d:%lld sdq:%lld ddq:%lld t:%lld oht:%lld\n",
+			"s:%d sq:%lld dq:%lld fe:%lld q:%lld c:%lld fl:%lld st:%lld d:%lld sdq:%lld ddq:%lld t:%lld oht:%lld\n",
 			i,
 			ktime_to_us(ktime_sub(ts[SDE_ROTATOR_TS_FENCE],
 					ts[SDE_ROTATOR_TS_SRCQB])),
@@ -801,8 +801,10 @@ static int sde_rotator_stat_show(struct seq_file *s, void *data)
 					ts[SDE_ROTATOR_TS_QUEUE])),
 			ktime_to_us(ktime_sub(ts[SDE_ROTATOR_TS_FLUSH],
 					ts[SDE_ROTATOR_TS_COMMIT])),
-			ktime_to_us(ktime_sub(ts[SDE_ROTATOR_TS_DONE],
+			ktime_to_us(ktime_sub(ts[SDE_ROTATOR_TS_START],
 					ts[SDE_ROTATOR_TS_FLUSH])),
+			ktime_to_us(ktime_sub(ts[SDE_ROTATOR_TS_DONE],
+					ts[SDE_ROTATOR_TS_START])),
 			ktime_to_us(ktime_sub(ts[SDE_ROTATOR_TS_RETIRE],
 					ts[SDE_ROTATOR_TS_DONE])),
 			ktime_to_us(ktime_sub(ts[SDE_ROTATOR_TS_SRCDQB],
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c
index 90b7194..2e91d54 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c
@@ -1467,7 +1467,9 @@ int sde_rotator_inline_commit(void *handle, struct sde_rotator_inline_cmd *cmd,
 		int scid = llcc_get_slice_id(ctx->slice);
 
 		/* allocate slot for timestamp */
-		ts = stats->ts[stats->count++ % SDE_ROTATOR_NUM_EVENTS];
+		ts = stats->ts[stats->count % SDE_ROTATOR_NUM_EVENTS];
+		if (cmd_type == SDE_ROTATOR_INLINE_CMD_COMMIT)
+			stats->count++;
 
 		if (cmd->rot90)
 			flags |= SDE_ROTATION_90;
@@ -1637,6 +1639,8 @@ int sde_rotator_inline_commit(void *handle, struct sde_rotator_inline_cmd *cmd,
 			goto error_handle_request;
 		}
 
+		sde_rotator_req_reset_start(req);
+
 		sde_rotator_commit_request(rot_dev->mgr, ctx->private, req);
 
 		request->committed = true;
@@ -1644,6 +1648,15 @@ int sde_rotator_inline_commit(void *handle, struct sde_rotator_inline_cmd *cmd,
 		/* save request in private handle */
 		cmd->priv_handle = request;
 
+	} else if (cmd_type == SDE_ROTATOR_INLINE_CMD_START) {
+		if (!cmd->priv_handle) {
+			ret = -EINVAL;
+			SDEROT_ERR("invalid private handle\n");
+			goto error_invalid_handle;
+		}
+
+		request = cmd->priv_handle;
+		sde_rotator_req_set_start(request->req);
 	} else if (cmd_type == SDE_ROTATOR_INLINE_CMD_CLEANUP) {
 		if (!cmd->priv_handle) {
 			ret = -EINVAL;
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_hwio.h b/drivers/media/platform/msm/sde/rotator/sde_rotator_hwio.h
index 051db78..de448a4 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_hwio.h
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_hwio.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -65,6 +65,8 @@
 #define MMSS_VBIF_NRT_VBIF_IN_WR_LIM_CONF2		0x00C8
 #define MMSS_VBIF_NRT_VBIF_OUT_RD_LIM_CONF0		0x00D0
 #define MMSS_VBIF_NRT_VBIF_OUT_WR_LIM_CONF0		0x00D4
+#define MMSS_VBIF_NRT_VBIF_QOS_RP_REMAP_000		0x0550
+#define MMSS_VBIF_NRT_VBIF_QOS_LVL_REMAP_000		0x0590
 
 #define SDE_MDP_REG_TRAFFIC_SHAPER_EN			BIT(31)
 #define SDE_MDP_REG_TRAFFIC_SHAPER_RD_CLIENT(num)	(0x030 + (num * 4))
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_inline.h b/drivers/media/platform/msm/sde/rotator/sde_rotator_inline.h
index 27fd0c3..705eb27 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_inline.h
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_inline.h
@@ -25,11 +25,13 @@
  * enum sde_rotator_inline_cmd_type - inline rotator command stages
  * @SDE_ROTATOR_INLINE_CMD_VALIDATE: validate command only
  * @SDE_ROTATOR_INLINE_CMD_COMMIT: commit command to hardware
+ * @SDE_ROTATOR_INLINE_CMD_START: ready to start inline rotation
  * @SDE_ROTATOR_INLINE_CMD_CLEANUP: cleanup after commit is done
  */
 enum sde_rotator_inline_cmd_type {
 	SDE_ROTATOR_INLINE_CMD_VALIDATE,
 	SDE_ROTATOR_INLINE_CMD_COMMIT,
+	SDE_ROTATOR_INLINE_CMD_START,
 	SDE_ROTATOR_INLINE_CMD_CLEANUP,
 };
 
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c
index 980df9f..17fa2cc 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c
@@ -351,12 +351,12 @@ static u32 sde_hw_rotator_v4_outpixfmts[] = {
 	SDE_PIX_FMT_Y_CBCR_H2V2_UBWC,
 	SDE_PIX_FMT_RGBA_1010102,
 	SDE_PIX_FMT_RGBX_1010102,
-	/* SDE_PIX_FMT_ARGB_2101010 */
-	/* SDE_PIX_FMT_XRGB_2101010 */
+	SDE_PIX_FMT_ARGB_2101010,
+	SDE_PIX_FMT_XRGB_2101010,
 	SDE_PIX_FMT_BGRA_1010102,
 	SDE_PIX_FMT_BGRX_1010102,
-	/* SDE_PIX_FMT_ABGR_2101010 */
-	/* SDE_PIX_FMT_XBGR_2101010 */
+	SDE_PIX_FMT_ABGR_2101010,
+	SDE_PIX_FMT_XBGR_2101010,
 	SDE_PIX_FMT_RGBA_1010102_UBWC,
 	SDE_PIX_FMT_RGBX_1010102_UBWC,
 	SDE_PIX_FMT_Y_CBCR_H2V2_P010,
@@ -493,6 +493,12 @@ static struct sde_rot_regdump sde_rot_r3_regdump[] = {
 		SDE_ROT_REGDUMP_VBIF },
 };
 
+struct sde_rot_cdp_params {
+	bool enable;
+	struct sde_mdp_format_params *fmt;
+	u32 offset;
+};
+
 /* Invalid software timestamp value for initialization */
 #define SDE_REGDMA_SWTS_INVALID	(~0)
 
@@ -741,6 +747,76 @@ static void sde_hw_rotator_unmap_vaddr(struct sde_dbg_buf *dbgbuf)
 }
 
 /*
+ * sde_hw_rotator_vbif_setting - helper function to set vbif QoS remapper
+ * levels, enable write gather enable and avoid clk gating setting for
+ * debug purpose.
+ *
+ * @rot: Pointer to rotator hw
+ */
+static void sde_hw_rotator_vbif_setting(struct sde_hw_rotator *rot)
+{
+	u32 i, mask, vbif_qos, reg_val = 0;
+	struct sde_rot_data_type *mdata = sde_rot_get_mdata();
+
+	/* VBIF_ROT QoS remapper setting */
+	switch (mdata->npriority_lvl) {
+
+	case SDE_MDP_VBIF_4_LEVEL_REMAPPER:
+		for (i = 0; i < mdata->npriority_lvl; i++) {
+			reg_val = SDE_VBIF_READ(mdata,
+					MMSS_VBIF_NRT_VBIF_QOS_REMAP_00 + i*4);
+			mask = 0x3 << (XIN_SSPP * 2);
+			vbif_qos = mdata->vbif_nrt_qos[i];
+			reg_val |= vbif_qos << (XIN_SSPP * 2);
+			/* ensure write is issued after the read operation */
+			mb();
+			SDE_VBIF_WRITE(mdata,
+					MMSS_VBIF_NRT_VBIF_QOS_REMAP_00 + i*4,
+					reg_val);
+		}
+		break;
+
+	case SDE_MDP_VBIF_8_LEVEL_REMAPPER:
+		mask = mdata->npriority_lvl - 1;
+		for (i = 0; i < mdata->npriority_lvl; i++) {
+			/* RD and WR client */
+			reg_val |= (mdata->vbif_nrt_qos[i] & mask)
+							<< (XIN_SSPP * 4);
+			reg_val |= (mdata->vbif_nrt_qos[i] & mask)
+							<< (XIN_WRITEBACK * 4);
+
+			SDE_VBIF_WRITE(mdata,
+				MMSS_VBIF_NRT_VBIF_QOS_RP_REMAP_000 + i*8,
+				reg_val);
+			SDE_VBIF_WRITE(mdata,
+				MMSS_VBIF_NRT_VBIF_QOS_LVL_REMAP_000 + i*8,
+				reg_val);
+		}
+		break;
+
+	default:
+		SDEROT_DBG("invalid vbif remapper levels\n");
+	}
+
+	/* Enable write gather for writeback to remove write gaps, which
+	 * may hang AXI/BIMC/SDE.
+	 */
+	SDE_VBIF_WRITE(mdata, MMSS_VBIF_NRT_VBIF_WRITE_GATHTER_EN,
+			BIT(XIN_WRITEBACK));
+
+	/*
+	 * For debug purpose, disable clock gating, i.e. Clocks always on
+	 */
+	if (mdata->clk_always_on) {
+		SDE_VBIF_WRITE(mdata, MMSS_VBIF_CLKON, 0x3);
+		SDE_VBIF_WRITE(mdata, MMSS_VBIF_NRT_VBIF_CLK_FORCE_CTRL0, 0x3);
+		SDE_VBIF_WRITE(mdata, MMSS_VBIF_NRT_VBIF_CLK_FORCE_CTRL1,
+				0xFFFF);
+		SDE_ROTREG_WRITE(rot->mdss_base, ROTTOP_CLK_CTRL, 1);
+	}
+}
+
+/*
  * sde_hw_rotator_setup_timestamp_packet - setup timestamp writeback command
  * @ctx: Pointer to rotator context
  * @mask: Bit mask location of the timestamp
@@ -796,6 +872,156 @@ static void sde_hw_rotator_setup_timestamp_packet(
 }
 
 /*
+ * sde_hw_rotator_cdp_configs - configures the CDP registers
+ * @ctx: Pointer to rotator context
+ * @params: Pointer to parameters needed for CDP configs
+ */
+static void sde_hw_rotator_cdp_configs(struct sde_hw_rotator_context *ctx,
+		struct sde_rot_cdp_params *params)
+{
+	int reg_val;
+	u32 *wrptr = sde_hw_rotator_get_regdma_segment(ctx);
+
+	if (!params->enable) {
+		SDE_REGDMA_WRITE(wrptr, params->offset, 0x0);
+		goto end;
+	}
+
+	reg_val = BIT(0); /* enable cdp */
+
+	if (sde_mdp_is_ubwc_format(params->fmt))
+		reg_val |= BIT(1); /* enable UBWC meta cdp */
+
+	if (sde_mdp_is_ubwc_format(params->fmt)
+			|| sde_mdp_is_tilea4x_format(params->fmt)
+			|| sde_mdp_is_tilea5x_format(params->fmt))
+		reg_val |= BIT(2); /* enable tile amortize */
+
+	reg_val |= BIT(3); /* enable preload addr ahead cnt 64 */
+
+	SDE_REGDMA_WRITE(wrptr, params->offset, reg_val);
+
+end:
+	sde_hw_rotator_put_regdma_segment(ctx, wrptr);
+}
+
+/*
+ * sde_hw_rotator_setup_qos_lut_wr - Set QoS LUT/Danger LUT/Safe LUT configs
+ * for the WRITEBACK rotator for inline and offline rotation.
+ *
+ * @ctx: Pointer to rotator context
+ */
+static void sde_hw_rotator_setup_qos_lut_wr(struct sde_hw_rotator_context *ctx)
+{
+	struct sde_rot_data_type *mdata = sde_rot_get_mdata();
+	u32 *wrptr = sde_hw_rotator_get_regdma_segment(ctx);
+
+	/* Offline rotation setting */
+	if (!ctx->sbuf_mode) {
+		/* QOS LUT WR setting */
+		if (test_bit(SDE_QOS_LUT, mdata->sde_qos_map)) {
+			SDE_REGDMA_WRITE(wrptr, ROT_WB_CREQ_LUT_0,
+					mdata->lut_cfg[SDE_ROT_WR].creq_lut_0);
+			SDE_REGDMA_WRITE(wrptr, ROT_WB_CREQ_LUT_1,
+					mdata->lut_cfg[SDE_ROT_WR].creq_lut_1);
+		}
+
+		/* Danger LUT WR setting */
+		if (test_bit(SDE_QOS_DANGER_LUT, mdata->sde_qos_map))
+			SDE_REGDMA_WRITE(wrptr, ROT_WB_DANGER_LUT,
+					mdata->lut_cfg[SDE_ROT_WR].danger_lut);
+
+		/* Safe LUT WR setting */
+		if (test_bit(SDE_QOS_SAFE_LUT, mdata->sde_qos_map))
+			SDE_REGDMA_WRITE(wrptr, ROT_WB_SAFE_LUT,
+					mdata->lut_cfg[SDE_ROT_WR].safe_lut);
+
+	/* Inline rotation setting */
+	} else {
+		/* QOS LUT WR setting */
+		if (test_bit(SDE_INLINE_QOS_LUT, mdata->sde_inline_qos_map)) {
+			SDE_REGDMA_WRITE(wrptr, ROT_WB_CREQ_LUT_0,
+				mdata->inline_lut_cfg[SDE_ROT_WR].creq_lut_0);
+			SDE_REGDMA_WRITE(wrptr, ROT_WB_CREQ_LUT_1,
+				mdata->inline_lut_cfg[SDE_ROT_WR].creq_lut_1);
+		}
+
+		/* Danger LUT WR setting */
+		if (test_bit(SDE_INLINE_QOS_DANGER_LUT,
+					mdata->sde_inline_qos_map))
+			SDE_REGDMA_WRITE(wrptr, ROT_WB_DANGER_LUT,
+				mdata->inline_lut_cfg[SDE_ROT_WR].danger_lut);
+
+		/* Safe LUT WR setting */
+		if (test_bit(SDE_INLINE_QOS_SAFE_LUT,
+					mdata->sde_inline_qos_map))
+			SDE_REGDMA_WRITE(wrptr, ROT_WB_SAFE_LUT,
+				mdata->inline_lut_cfg[SDE_ROT_WR].safe_lut);
+	}
+
+	/* Update command queue write ptr */
+	sde_hw_rotator_put_regdma_segment(ctx, wrptr);
+}
+
+/*
+ * sde_hw_rotator_setup_qos_lut_rd - Set QoS LUT/Danger LUT/Safe LUT configs
+ * for the SSPP rotator for inline and offline rotation.
+ *
+ * @ctx: Pointer to rotator context
+ */
+static void sde_hw_rotator_setup_qos_lut_rd(struct sde_hw_rotator_context *ctx)
+{
+	struct sde_rot_data_type *mdata = sde_rot_get_mdata();
+	u32 *wrptr = sde_hw_rotator_get_regdma_segment(ctx);
+
+	/* Offline rotation setting */
+	if (!ctx->sbuf_mode) {
+		/* QOS LUT RD setting */
+		if (test_bit(SDE_QOS_LUT, mdata->sde_qos_map)) {
+			SDE_REGDMA_WRITE(wrptr, ROT_SSPP_CREQ_LUT_0,
+					mdata->lut_cfg[SDE_ROT_RD].creq_lut_0);
+			SDE_REGDMA_WRITE(wrptr, ROT_SSPP_CREQ_LUT_1,
+					mdata->lut_cfg[SDE_ROT_RD].creq_lut_1);
+		}
+
+		/* Danger LUT RD setting */
+		if (test_bit(SDE_QOS_DANGER_LUT, mdata->sde_qos_map))
+			SDE_REGDMA_WRITE(wrptr, ROT_SSPP_DANGER_LUT,
+					mdata->lut_cfg[SDE_ROT_RD].danger_lut);
+
+		/* Safe LUT RD setting */
+		if (test_bit(SDE_QOS_SAFE_LUT, mdata->sde_qos_map))
+			SDE_REGDMA_WRITE(wrptr, ROT_SSPP_SAFE_LUT,
+					mdata->lut_cfg[SDE_ROT_RD].safe_lut);
+
+	/* inline rotation setting */
+	} else {
+		/* QOS LUT RD setting */
+		if (test_bit(SDE_INLINE_QOS_LUT, mdata->sde_inline_qos_map)) {
+			SDE_REGDMA_WRITE(wrptr, ROT_SSPP_CREQ_LUT_0,
+				mdata->inline_lut_cfg[SDE_ROT_RD].creq_lut_0);
+			SDE_REGDMA_WRITE(wrptr, ROT_SSPP_CREQ_LUT_1,
+				mdata->inline_lut_cfg[SDE_ROT_RD].creq_lut_1);
+		}
+
+		/* Danger LUT RD setting */
+		if (test_bit(SDE_INLINE_QOS_DANGER_LUT,
+					mdata->sde_inline_qos_map))
+			SDE_REGDMA_WRITE(wrptr, ROT_SSPP_DANGER_LUT,
+				mdata->inline_lut_cfg[SDE_ROT_RD].danger_lut);
+
+		/* Safe LUT RD setting */
+		if (test_bit(SDE_INLINE_QOS_SAFE_LUT,
+					mdata->sde_inline_qos_map))
+			SDE_REGDMA_WRITE(wrptr, ROT_SSPP_SAFE_LUT,
+				mdata->inline_lut_cfg[SDE_ROT_RD].safe_lut);
+	}
+
+	/* Update command queue write ptr */
+	sde_hw_rotator_put_regdma_segment(ctx, wrptr);
+}
+
+/*
  * sde_hw_rotator_setup_fetchengine - setup fetch engine
  * @ctx: Pointer to rotator context
  * @queue_id: Priority queue identifier
@@ -814,6 +1040,7 @@ static void sde_hw_rotator_setup_fetchengine(struct sde_hw_rotator_context *ctx,
 	struct sde_hw_rotator *rot = ctx->rot;
 	struct sde_mdp_format_params *fmt;
 	struct sde_mdp_data *data;
+	struct sde_rot_cdp_params cdp_params = {0};
 	struct sde_rot_data_type *mdata = sde_rot_get_mdata();
 	u32 *wrptr;
 	u32 opmode = 0;
@@ -985,13 +1212,29 @@ static void sde_hw_rotator_setup_fetchengine(struct sde_hw_rotator_context *ctx,
 		ctx->is_secure = false;
 	}
 
+	/* Update command queue write ptr */
+	sde_hw_rotator_put_regdma_segment(ctx, wrptr);
+
+	/* CDP register RD setting */
+	cdp_params.enable = test_bit(SDE_QOS_CDP, mdata->sde_qos_map) ?
+					 mdata->enable_cdp[SDE_ROT_RD] : false;
+	cdp_params.fmt = fmt;
+	cdp_params.offset = ROT_SSPP_CDP_CNTL;
+	sde_hw_rotator_cdp_configs(ctx, &cdp_params);
+
+	/* QOS LUT/ Danger LUT/ Safe Lut WR setting */
+	sde_hw_rotator_setup_qos_lut_rd(ctx);
+
+	wrptr = sde_hw_rotator_get_regdma_segment(ctx);
+
 	/*
 	 * Determine if traffic shaping is required. Only enable traffic
 	 * shaping when content is 4k@30fps. The actual traffic shaping
 	 * bandwidth calculation is done in output setup.
 	 */
-	if (((cfg->src_rect->w * cfg->src_rect->h) >= RES_UHD) &&
-			(cfg->fps <= 30)) {
+	if (((!ctx->sbuf_mode)
+			&& (cfg->src_rect->w * cfg->src_rect->h) >= RES_UHD)
+			&& (cfg->fps <= 30)) {
 		SDEROT_DBG("Enable Traffic Shaper\n");
 		ctx->is_traffic_shaping = true;
 	} else {
@@ -1017,9 +1260,11 @@ static void sde_hw_rotator_setup_wbengine(struct sde_hw_rotator_context *ctx,
 {
 	struct sde_rot_data_type *mdata = sde_rot_get_mdata();
 	struct sde_mdp_format_params *fmt;
+	struct sde_rot_cdp_params cdp_params = {0};
 	u32 *wrptr;
 	u32 pack = 0;
 	u32 dst_format = 0;
+	u32 partial_write = 0;
 	int i;
 
 	wrptr = sde_hw_rotator_get_regdma_segment(ctx);
@@ -1103,8 +1348,13 @@ static void sde_hw_rotator_setup_wbengine(struct sde_hw_rotator_context *ctx,
 			cfg->v_downscale_factor |
 			(cfg->h_downscale_factor << 16));
 
+	/* partial write check */
+	if (test_bit(SDE_CAPS_PARTIALWR, mdata->sde_caps_map) &&
+			!sde_mdp_is_ubwc_format(fmt))
+		partial_write = BIT(10);
+
 	/* write config setup for bank configuration */
-	SDE_REGDMA_WRITE(wrptr, ROT_WB_DST_WRITE_CONFIG,
+	SDE_REGDMA_WRITE(wrptr, ROT_WB_DST_WRITE_CONFIG, partial_write |
 			(ctx->rot->highest_bank & 0x3) << 8);
 
 	if (test_bit(SDE_CAPS_UBWC_2, mdata->sde_caps_map))
@@ -1120,8 +1370,23 @@ static void sde_hw_rotator_setup_wbengine(struct sde_hw_rotator_context *ctx,
 	SDE_REGDMA_WRITE(wrptr, ROTTOP_OP_MODE, ctx->op_mode |
 			(flags & SDE_ROT_FLAG_ROT_90 ? BIT(1) : 0) | BIT(0));
 
+	sde_hw_rotator_put_regdma_segment(ctx, wrptr);
+
+	/* CDP register WR setting */
+	cdp_params.enable = test_bit(SDE_QOS_CDP, mdata->sde_qos_map) ?
+					mdata->enable_cdp[SDE_ROT_WR] : false;
+	cdp_params.fmt = fmt;
+	cdp_params.offset = ROT_WB_CDP_CNTL;
+	sde_hw_rotator_cdp_configs(ctx, &cdp_params);
+
+	/* QOS LUT/ Danger LUT/ Safe LUT WR setting */
+	sde_hw_rotator_setup_qos_lut_wr(ctx);
+
+	wrptr = sde_hw_rotator_get_regdma_segment(ctx);
+
 	/* setup traffic shaper for 4k 30fps content or if prefill_bw is set */
-	if (ctx->is_traffic_shaping || cfg->prefill_bw) {
+	if (!ctx->sbuf_mode &&
+			(ctx->is_traffic_shaping || cfg->prefill_bw)) {
 		u32 bw;
 
 		/*
@@ -2136,7 +2401,7 @@ static int sde_hw_rotator_config(struct sde_rot_hw_resource *hw,
 			item->input.format, item->output.format,
 			entry->perf->config.frame_rate);
 
-	if (mdata->default_ot_rd_limit) {
+	if (!ctx->sbuf_mode && mdata->default_ot_rd_limit) {
 		struct sde_mdp_set_ot_params ot_params;
 
 		memset(&ot_params, 0, sizeof(struct sde_mdp_set_ot_params));
@@ -2158,7 +2423,7 @@ static int sde_hw_rotator_config(struct sde_rot_hw_resource *hw,
 		sde_mdp_set_ot_limit(&ot_params);
 	}
 
-	if (mdata->default_ot_wr_limit) {
+	if (!ctx->sbuf_mode && mdata->default_ot_wr_limit) {
 		struct sde_mdp_set_ot_params ot_params;
 
 		memset(&ot_params, 0, sizeof(struct sde_mdp_set_ot_params));
@@ -2189,46 +2454,9 @@ static int sde_hw_rotator_config(struct sde_rot_hw_resource *hw,
 		SDE_ROTREG_WRITE(rot->mdss_base, ROT_SSPP_CREQ_LUT, qos_lut);
 	}
 
-	/* Set CDP control registers to 0 if CDP is disabled */
-	if (!test_bit(SDE_QOS_CDP, mdata->sde_qos_map)) {
-		SDE_ROTREG_WRITE(rot->mdss_base, ROT_SSPP_CDP_CNTL, 0x0);
-		SDE_ROTREG_WRITE(rot->mdss_base, ROT_WB_CDP_CNTL, 0x0);
-	}
-
-	if (mdata->npriority_lvl > 0) {
-		u32 mask, reg_val, i, vbif_qos;
-
-		for (i = 0; i < mdata->npriority_lvl; i++) {
-			reg_val = SDE_VBIF_READ(mdata,
-					MMSS_VBIF_NRT_VBIF_QOS_REMAP_00 + i*4);
-			mask = 0x3 << (XIN_SSPP * 2);
-			reg_val &= ~(mask);
-			vbif_qos = mdata->vbif_nrt_qos[i];
-			reg_val |= vbif_qos << (XIN_SSPP * 2);
-			/* ensure write is issued after the read operation */
-			mb();
-			SDE_VBIF_WRITE(mdata,
-					MMSS_VBIF_NRT_VBIF_QOS_REMAP_00 + i*4,
-					reg_val);
-		}
-	}
-
-	/* Enable write gather for writeback to remove write gaps, which
-	 * may hang AXI/BIMC/SDE.
-	 */
-	SDE_VBIF_WRITE(mdata, MMSS_VBIF_NRT_VBIF_WRITE_GATHTER_EN,
-			BIT(XIN_WRITEBACK));
-
-	/*
-	 * For debug purpose, disable clock gating, i.e. Clocks always on
-	 */
-	if (mdata->clk_always_on) {
-		SDE_VBIF_WRITE(mdata, MMSS_VBIF_CLKON, 0x3);
-		SDE_VBIF_WRITE(mdata, MMSS_VBIF_NRT_VBIF_CLK_FORCE_CTRL0, 0x3);
-		SDE_VBIF_WRITE(mdata, MMSS_VBIF_NRT_VBIF_CLK_FORCE_CTRL1,
-				0xFFFF);
-		SDE_ROTREG_WRITE(rot->mdss_base, ROTTOP_CLK_CTRL, 1);
-	}
+	/* VBIF QoS and other settings */
+	if (!ctx->sbuf_mode)
+		sde_hw_rotator_vbif_setting(rot);
 
 	return 0;
 
@@ -2337,7 +2565,6 @@ static int sde_rotator_hw_rev_init(struct sde_hw_rotator *rot)
 
 	clear_bit(SDE_QOS_PER_PIPE_IB, mdata->sde_qos_map);
 	set_bit(SDE_QOS_OVERHEAD_FACTOR, mdata->sde_qos_map);
-	clear_bit(SDE_QOS_CDP, mdata->sde_qos_map);
 	set_bit(SDE_QOS_OTLIM, mdata->sde_qos_map);
 	set_bit(SDE_QOS_PER_PIPE_LUT, mdata->sde_qos_map);
 	clear_bit(SDE_QOS_SIMPLIFIED_PREFILL, mdata->sde_qos_map);
@@ -2368,6 +2595,7 @@ static int sde_rotator_hw_rev_init(struct sde_hw_rotator *rot)
 		SDEROT_DBG("Supporting sys cache inline rotation\n");
 		set_bit(SDE_CAPS_SBUF_1,  mdata->sde_caps_map);
 		set_bit(SDE_CAPS_UBWC_2,  mdata->sde_caps_map);
+		set_bit(SDE_CAPS_PARTIALWR,  mdata->sde_caps_map);
 		rot->inpixfmts = sde_hw_rotator_v4_inpixfmts;
 		rot->num_inpixfmt = ARRAY_SIZE(sde_hw_rotator_v4_inpixfmts);
 		rot->outpixfmts = sde_hw_rotator_v4_outpixfmts;
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3_hwio.h b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3_hwio.h
index aa762dd..d2b81d5 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3_hwio.h
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3_hwio.h
@@ -81,6 +81,8 @@
 #define ROT_SSPP_CREQ_LUT                       (SDE_ROT_SSPP_OFFSET+0x68)
 #define ROT_SSPP_QOS_CTRL                       (SDE_ROT_SSPP_OFFSET+0x6C)
 #define ROT_SSPP_SRC_ADDR_SW_STATUS             (SDE_ROT_SSPP_OFFSET+0x70)
+#define ROT_SSPP_CREQ_LUT_0                     (SDE_ROT_SSPP_OFFSET+0x74)
+#define ROT_SSPP_CREQ_LUT_1                     (SDE_ROT_SSPP_OFFSET+0x78)
 #define ROT_SSPP_CURRENT_SRC0_ADDR              (SDE_ROT_SSPP_OFFSET+0xA4)
 #define ROT_SSPP_CURRENT_SRC1_ADDR              (SDE_ROT_SSPP_OFFSET+0xA8)
 #define ROT_SSPP_CURRENT_SRC2_ADDR              (SDE_ROT_SSPP_OFFSET+0xAC)
@@ -167,6 +169,8 @@
 #define ROT_WB_CREQ_LUT                         (SDE_ROT_WB_OFFSET+0x08C)
 #define ROT_WB_QOS_CTRL                         (SDE_ROT_WB_OFFSET+0x090)
 #define ROT_WB_SYS_CACHE_MODE                   (SDE_ROT_WB_OFFSET+0x094)
+#define ROT_WB_CREQ_LUT_0                       (SDE_ROT_WB_OFFSET+0x098)
+#define ROT_WB_CREQ_LUT_1                       (SDE_ROT_WB_OFFSET+0x09C)
 #define ROT_WB_UBWC_STATIC_CTRL                 (SDE_ROT_WB_OFFSET+0x144)
 #define ROT_WB_SBUF_STATUS_PLANE0               (SDE_ROT_WB_OFFSET+0x148)
 #define ROT_WB_SBUF_STATUS_PLANE1               (SDE_ROT_WB_OFFSET+0x14C)
diff --git a/drivers/media/platform/msm/vidc/hfi_packetization.c b/drivers/media/platform/msm/vidc/hfi_packetization.c
index a477340..88250e1 100644
--- a/drivers/media/platform/msm/vidc/hfi_packetization.c
+++ b/drivers/media/platform/msm/vidc/hfi_packetization.c
@@ -515,13 +515,13 @@ static u32 get_hfi_buffer(int hal_buffer)
 		buffer = HFI_BUFFER_EXTRADATA_OUTPUT2;
 		break;
 	case HAL_BUFFER_INTERNAL_SCRATCH:
-		buffer = HFI_BUFFER_INTERNAL_SCRATCH;
+		buffer = HFI_BUFFER_COMMON_INTERNAL_SCRATCH;
 		break;
 	case HAL_BUFFER_INTERNAL_SCRATCH_1:
-		buffer = HFI_BUFFER_INTERNAL_SCRATCH_1;
+		buffer = HFI_BUFFER_COMMON_INTERNAL_SCRATCH_1;
 		break;
 	case HAL_BUFFER_INTERNAL_SCRATCH_2:
-		buffer = HFI_BUFFER_INTERNAL_SCRATCH_2;
+		buffer = HFI_BUFFER_COMMON_INTERNAL_SCRATCH_2;
 		break;
 	case HAL_BUFFER_INTERNAL_PERSIST:
 		buffer = HFI_BUFFER_INTERNAL_PERSIST;
@@ -1523,14 +1523,6 @@ int create_pkt_cmd_session_set_property(
 			sizeof(struct hfi_vui_timing_info);
 		break;
 	}
-	case HAL_CONFIG_VPE_DEINTERLACE:
-	{
-		create_pkt_enable(pkt->rg_property_data,
-				HFI_PROPERTY_CONFIG_VPE_DEINTERLACE,
-				((struct hal_enable *)pdata)->enable);
-		pkt->size += sizeof(u32) + sizeof(struct hfi_enable);
-		break;
-	}
 	case HAL_PARAM_VENC_GENERATE_AUDNAL:
 	{
 		create_pkt_enable(pkt->rg_property_data,
@@ -1863,14 +1855,6 @@ int create_pkt_cmd_session_set_property(
 		pkt->size += sizeof(u32) + sizeof(*work_mode);
 		break;
 	}
-	case HAL_PARAM_USE_SYS_CACHE:
-	{
-		create_pkt_enable(pkt->rg_property_data,
-			HFI_PROPERTY_PARAM_USE_SYS_CACHE,
-			(((struct hal_enable *) pdata)->enable));
-		pkt->size += sizeof(u32) * 2;
-		break;
-	}
 	/* FOLLOWING PROPERTIES ARE NOT IMPLEMENTED IN CORE YET */
 	case HAL_CONFIG_BUFFER_REQUIREMENTS:
 	case HAL_CONFIG_PRIORITY:
diff --git a/drivers/media/platform/msm/vidc/hfi_response_handler.c b/drivers/media/platform/msm/vidc/hfi_response_handler.c
index b424fbb..89e8356 100644
--- a/drivers/media/platform/msm/vidc/hfi_response_handler.c
+++ b/drivers/media/platform/msm/vidc/hfi_response_handler.c
@@ -1082,19 +1082,19 @@ static void hfi_process_sess_get_prop_buf_req(
 			buffreq->buffer[5].buffer_type =
 				HAL_BUFFER_EXTRADATA_OUTPUT2;
 			break;
-		case HFI_BUFFER_INTERNAL_SCRATCH:
+		case HFI_BUFFER_COMMON_INTERNAL_SCRATCH:
 			memcpy(&buffreq->buffer[6], hfi_buf_req,
 			sizeof(struct hfi_buffer_requirements));
 			buffreq->buffer[6].buffer_type =
 				HAL_BUFFER_INTERNAL_SCRATCH;
 			break;
-		case HFI_BUFFER_INTERNAL_SCRATCH_1:
+		case HFI_BUFFER_COMMON_INTERNAL_SCRATCH_1:
 			memcpy(&buffreq->buffer[7], hfi_buf_req,
 				sizeof(struct hfi_buffer_requirements));
 			buffreq->buffer[7].buffer_type =
 				HAL_BUFFER_INTERNAL_SCRATCH_1;
 			break;
-		case HFI_BUFFER_INTERNAL_SCRATCH_2:
+		case HFI_BUFFER_COMMON_INTERNAL_SCRATCH_2:
 			memcpy(&buffreq->buffer[8], hfi_buf_req,
 				sizeof(struct hfi_buffer_requirements));
 			buffreq->buffer[8].buffer_type =
diff --git a/drivers/media/platform/msm/vidc/msm_smem.c b/drivers/media/platform/msm/vidc/msm_smem.c
index 3d3d567..074ea4fa 100644
--- a/drivers/media/platform/msm/vidc/msm_smem.c
+++ b/drivers/media/platform/msm/vidc/msm_smem.c
@@ -94,10 +94,17 @@ static int get_device_address(struct smem_client *smem_client,
 		trace_msm_smem_buffer_iommu_op_start("MAP", 0, 0,
 			align, *iova, *buffer_size);
 
-		/* Map a scatterlist into an SMMU with system cacheability */
-		rc = msm_dma_map_sg_attrs(cb->dev, table->sgl,
-			table->nents, DMA_BIDIRECTIONAL,
-			buf, DMA_ATTR_IOMMU_USE_UPSTREAM_HINT);
+		/* Map a scatterlist into SMMU */
+		if (smem_client->res->sys_cache_present) {
+			/* with sys cache attribute & delayed unmap */
+			rc = msm_dma_map_sg_attrs(cb->dev, table->sgl,
+				table->nents, DMA_BIDIRECTIONAL,
+				buf, DMA_ATTR_IOMMU_USE_UPSTREAM_HINT);
+		} else {
+			/* with delayed unmap */
+			rc = msm_dma_map_sg_lazy(cb->dev, table->sgl,
+				table->nents, DMA_BIDIRECTIONAL, buf);
+		}
 
 		if (rc != table->nents) {
 			dprintk(VIDC_ERR,
diff --git a/drivers/media/platform/msm/vidc/msm_v4l2_vidc.c b/drivers/media/platform/msm/vidc/msm_v4l2_vidc.c
index c5c4269..7802d31 100644
--- a/drivers/media/platform/msm/vidc/msm_v4l2_vidc.c
+++ b/drivers/media/platform/msm/vidc/msm_v4l2_vidc.c
@@ -785,6 +785,8 @@ static int __init msm_vidc_init(void)
 	if (rc) {
 		dprintk(VIDC_ERR,
 			"Failed to register platform driver\n");
+		msm_vidc_debugfs_deinit_drv();
+		debugfs_remove_recursive(vidc_driver->debugfs_root);
 		kfree(vidc_driver);
 		vidc_driver = NULL;
 	}
@@ -795,6 +797,7 @@ static int __init msm_vidc_init(void)
 static void __exit msm_vidc_exit(void)
 {
 	platform_driver_unregister(&msm_vidc_driver);
+	msm_vidc_debugfs_deinit_drv();
 	debugfs_remove_recursive(vidc_driver->debugfs_root);
 	mutex_destroy(&vidc_driver->lock);
 	kfree(vidc_driver);
diff --git a/drivers/media/platform/msm/vidc/msm_venc.c b/drivers/media/platform/msm/vidc/msm_venc.c
index 8906027..aa5f18d 100644
--- a/drivers/media/platform/msm/vidc/msm_venc.c
+++ b/drivers/media/platform/msm/vidc/msm_venc.c
@@ -86,7 +86,6 @@ static const char *const h264_video_entropy_cabac_model[] = {
 };
 
 static const char *const hevc_tier_level[] = {
-	"Level unknown"
 	"Main Tier Level 1",
 	"Main Tier Level 2",
 	"Main Tier Level 2.1",
@@ -113,6 +112,7 @@ static const char *const hevc_tier_level[] = {
 	"High Tier Level 6",
 	"High Tier Level 6.1",
 	"High Tier Level 6.2",
+	"Level unknown",
 };
 
 static const char *const hevc_profile[] = {
@@ -1202,6 +1202,16 @@ int msm_venc_s_ctrl(struct msm_vidc_inst *inst, struct v4l2_ctrl *ctrl)
 		else if (ctrl->id == V4L2_CID_MPEG_VIDC_VIDEO_NUM_B_FRAMES)
 			num_b = ctrl->val;
 
+		if ((num_b < inst->capability.bframe.min) ||
+			(num_b > inst->capability.bframe.max)) {
+			dprintk(VIDC_ERR,
+				"Error setting num b frames %d min, max supported is %d, %d\n",
+				num_b, inst->capability.bframe.min,
+				inst->capability.bframe.max);
+			rc = -ENOTSUPP;
+			break;
+		}
+
 		property_id = HAL_CONFIG_VENC_INTRA_PERIOD;
 		intra_period.pframes = num_p;
 		intra_period.bframes = num_b;
diff --git a/drivers/media/platform/msm/vidc/msm_vidc.c b/drivers/media/platform/msm/vidc/msm_vidc.c
index 2289b23..6253632 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc.c
@@ -168,6 +168,15 @@ int msm_vidc_query_ctrl(void *instance, struct v4l2_queryctrl *ctrl)
 	case V4L2_CID_MPEG_VIDC_VIDEO_BLUR_HEIGHT:
 		msm_vidc_ctrl_get_range(ctrl, &inst->capability.blur_height);
 		break;
+	case V4L2_CID_MPEG_VIDC_VIDEO_NUM_B_FRAMES:
+		msm_vidc_ctrl_get_range(ctrl, &inst->capability.bframe);
+		break;
+	case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_MB:
+		msm_vidc_ctrl_get_range(ctrl, &inst->capability.slice_mbs);
+		break;
+	case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_BYTES:
+		msm_vidc_ctrl_get_range(ctrl, &inst->capability.slice_bytes);
+		break;
 	default:
 		rc = -EINVAL;
 	}
@@ -1267,10 +1276,9 @@ static int msm_vidc_queue_setup(struct vb2_queue *q,
 			return -EINVAL;
 		}
 		if (*num_buffers < bufreq->buffer_count_min_host) {
-			dprintk(VIDC_ERR,
-				"Invalid parameters : Req = %d Act = %d\n",
+			dprintk(VIDC_DBG,
+				"Client passed num buffers %d less than the min_host count %d\n",
 				*num_buffers, bufreq->buffer_count_min_host);
-			return -EINVAL;
 		}
 		*num_planes = inst->bufq[OUTPUT_PORT].num_planes;
 		if (*num_buffers < MIN_NUM_OUTPUT_BUFFERS ||
@@ -1299,11 +1307,10 @@ static int msm_vidc_queue_setup(struct vb2_queue *q,
 		if (inst->session_type != MSM_VIDC_DECODER &&
 			inst->state > MSM_VIDC_LOAD_RESOURCES_DONE) {
 			if (*num_buffers < bufreq->buffer_count_min_host) {
-				dprintk(VIDC_ERR,
-					"Invalid parameters : Req = %d Act = %d\n",
+				dprintk(VIDC_DBG,
+					"Client passed num buffers %d less than the min_host count %d\n",
 						*num_buffers,
 						bufreq->buffer_count_min_host);
-				return -EINVAL;
 			}
 		}
 		*num_planes = inst->bufq[CAPTURE_PORT].num_planes;
@@ -1449,8 +1456,6 @@ static inline int start_streaming(struct msm_vidc_inst *inst)
 		}
 	}
 
-	msm_comm_set_use_sys_cache(inst);
-
 	/*
 	 * For seq_changed_insufficient, driver should set session_continue
 	 * to firmware after the following sequence
@@ -1890,16 +1895,30 @@ static int try_get_ctrl(struct msm_vidc_inst *inst, struct v4l2_ctrl *ctrl)
 	switch (ctrl->id) {
 
 	case V4L2_CID_MPEG_VIDEO_H264_PROFILE:
-	case V4L2_CID_MPEG_VIDC_VIDEO_MPEG2_PROFILE:
+		ctrl->val = msm_comm_hal_to_v4l2(
+			V4L2_CID_MPEG_VIDEO_H264_PROFILE,
+			inst->profile);
+		break;
 	case V4L2_CID_MPEG_VIDC_VIDEO_HEVC_PROFILE:
-		ctrl->val = inst->profile;
+		ctrl->val = msm_comm_hal_to_v4l2(
+			V4L2_CID_MPEG_VIDC_VIDEO_HEVC_PROFILE,
+			inst->profile);
 		break;
 
 	case V4L2_CID_MPEG_VIDEO_H264_LEVEL:
+		ctrl->val = msm_comm_hal_to_v4l2(
+			V4L2_CID_MPEG_VIDEO_H264_LEVEL,
+			inst->level);
+		break;
 	case V4L2_CID_MPEG_VIDC_VIDEO_VP8_PROFILE_LEVEL:
-	case V4L2_CID_MPEG_VIDC_VIDEO_MPEG2_LEVEL:
+		ctrl->val = msm_comm_hal_to_v4l2(
+			V4L2_CID_MPEG_VIDC_VIDEO_VP8_PROFILE_LEVEL,
+			inst->level);
+		break;
 	case V4L2_CID_MPEG_VIDC_VIDEO_HEVC_TIER_LEVEL:
-		ctrl->val = inst->level;
+		ctrl->val = msm_comm_hal_to_v4l2(
+			V4L2_CID_MPEG_VIDC_VIDEO_HEVC_TIER_LEVEL,
+			inst->level);
 		break;
 
 	case V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE:
@@ -1922,6 +1941,7 @@ static int try_get_ctrl(struct msm_vidc_inst *inst, struct v4l2_ctrl *ctrl)
 		ctrl->val = bufreq->buffer_count_min_host;
 		break;
 	case V4L2_CID_MIN_BUFFERS_FOR_OUTPUT:
+		msm_comm_try_get_bufreqs(inst);
 		bufreq = get_buff_req_buffer(inst, HAL_BUFFER_INPUT);
 		if (!bufreq) {
 			dprintk(VIDC_ERR,
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_common.c b/drivers/media/platform/msm/vidc/msm_vidc_common.c
index c0bbfbb..b1a8e8b 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_common.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_common.c
@@ -199,6 +199,8 @@ int msm_comm_hal_to_v4l2(int id, int value)
 			return V4L2_MPEG_VIDEO_H264_LEVEL_5_0;
 		case HAL_H264_LEVEL_51:
 			return V4L2_MPEG_VIDEO_H264_LEVEL_5_1;
+		case HAL_H264_LEVEL_52:
+			return V4L2_MPEG_VIDEO_H264_LEVEL_5_2;
 		default:
 			goto unknown_value;
 		}
@@ -212,7 +214,91 @@ int msm_comm_hal_to_v4l2(int id, int value)
 		default:
 			goto unknown_value;
 		}
+	case V4L2_CID_MPEG_VIDC_VIDEO_HEVC_PROFILE:
+		switch (value) {
+		case HAL_HEVC_PROFILE_MAIN:
+			return V4L2_MPEG_VIDC_VIDEO_HEVC_PROFILE_MAIN;
+		case HAL_HEVC_PROFILE_MAIN10:
+			return V4L2_MPEG_VIDC_VIDEO_HEVC_PROFILE_MAIN10;
+		case HAL_HEVC_PROFILE_MAIN_STILL_PIC:
+			return V4L2_MPEG_VIDC_VIDEO_HEVC_PROFILE_MAIN_STILL_PIC;
+		default:
+			goto unknown_value;
+		}
+	case V4L2_CID_MPEG_VIDC_VIDEO_HEVC_TIER_LEVEL:
+	switch (value) {
+	case HAL_HEVC_MAIN_TIER_LEVEL_1:
+		return V4L2_MPEG_VIDC_VIDEO_HEVC_LEVEL_MAIN_TIER_LEVEL_1;
+	case HAL_HEVC_MAIN_TIER_LEVEL_2:
+		return V4L2_MPEG_VIDC_VIDEO_HEVC_LEVEL_MAIN_TIER_LEVEL_2;
+	case HAL_HEVC_MAIN_TIER_LEVEL_2_1:
+		return V4L2_MPEG_VIDC_VIDEO_HEVC_LEVEL_MAIN_TIER_LEVEL_2_1;
+	case HAL_HEVC_MAIN_TIER_LEVEL_3:
+		return V4L2_MPEG_VIDC_VIDEO_HEVC_LEVEL_MAIN_TIER_LEVEL_3;
+	case HAL_HEVC_MAIN_TIER_LEVEL_3_1:
+		return V4L2_MPEG_VIDC_VIDEO_HEVC_LEVEL_MAIN_TIER_LEVEL_3_1;
+	case HAL_HEVC_MAIN_TIER_LEVEL_4:
+		return V4L2_MPEG_VIDC_VIDEO_HEVC_LEVEL_MAIN_TIER_LEVEL_4;
+	case HAL_HEVC_MAIN_TIER_LEVEL_4_1:
+		return V4L2_MPEG_VIDC_VIDEO_HEVC_LEVEL_MAIN_TIER_LEVEL_4_1;
+	case HAL_HEVC_MAIN_TIER_LEVEL_5:
+		return V4L2_MPEG_VIDC_VIDEO_HEVC_LEVEL_MAIN_TIER_LEVEL_5;
+	case HAL_HEVC_MAIN_TIER_LEVEL_5_1:
+		return V4L2_MPEG_VIDC_VIDEO_HEVC_LEVEL_MAIN_TIER_LEVEL_5_1;
+	case HAL_HEVC_MAIN_TIER_LEVEL_5_2:
+		return V4L2_MPEG_VIDC_VIDEO_HEVC_LEVEL_MAIN_TIER_LEVEL_5_2;
+	case HAL_HEVC_MAIN_TIER_LEVEL_6:
+		return V4L2_MPEG_VIDC_VIDEO_HEVC_LEVEL_MAIN_TIER_LEVEL_6;
+	case HAL_HEVC_MAIN_TIER_LEVEL_6_1:
+		return V4L2_MPEG_VIDC_VIDEO_HEVC_LEVEL_MAIN_TIER_LEVEL_6_1;
+	case HAL_HEVC_MAIN_TIER_LEVEL_6_2:
+		return V4L2_MPEG_VIDC_VIDEO_HEVC_LEVEL_MAIN_TIER_LEVEL_6_2;
+	case HAL_HEVC_HIGH_TIER_LEVEL_1:
+		return V4L2_MPEG_VIDC_VIDEO_HEVC_LEVEL_HIGH_TIER_LEVEL_1;
+	case HAL_HEVC_HIGH_TIER_LEVEL_2:
+		return V4L2_MPEG_VIDC_VIDEO_HEVC_LEVEL_HIGH_TIER_LEVEL_2;
+	case HAL_HEVC_HIGH_TIER_LEVEL_2_1:
+		return V4L2_MPEG_VIDC_VIDEO_HEVC_LEVEL_HIGH_TIER_LEVEL_2_1;
+	case HAL_HEVC_HIGH_TIER_LEVEL_3:
+		return V4L2_MPEG_VIDC_VIDEO_HEVC_LEVEL_HIGH_TIER_LEVEL_3;
+	case HAL_HEVC_HIGH_TIER_LEVEL_3_1:
+		return V4L2_MPEG_VIDC_VIDEO_HEVC_LEVEL_HIGH_TIER_LEVEL_3_1;
+	case HAL_HEVC_HIGH_TIER_LEVEL_4:
+		return V4L2_MPEG_VIDC_VIDEO_HEVC_LEVEL_HIGH_TIER_LEVEL_4;
+	case HAL_HEVC_HIGH_TIER_LEVEL_4_1:
+		return V4L2_MPEG_VIDC_VIDEO_HEVC_LEVEL_HIGH_TIER_LEVEL_4_1;
+	case HAL_HEVC_HIGH_TIER_LEVEL_5:
+		return V4L2_MPEG_VIDC_VIDEO_HEVC_LEVEL_HIGH_TIER_LEVEL_5;
+	case HAL_HEVC_HIGH_TIER_LEVEL_5_1:
+		return V4L2_MPEG_VIDC_VIDEO_HEVC_LEVEL_HIGH_TIER_LEVEL_5_1;
+	case HAL_HEVC_HIGH_TIER_LEVEL_5_2:
+		return V4L2_MPEG_VIDC_VIDEO_HEVC_LEVEL_HIGH_TIER_LEVEL_5_2;
+	case HAL_HEVC_HIGH_TIER_LEVEL_6:
+		return V4L2_MPEG_VIDC_VIDEO_HEVC_LEVEL_HIGH_TIER_LEVEL_6;
+	case HAL_HEVC_HIGH_TIER_LEVEL_6_1:
+		return V4L2_MPEG_VIDC_VIDEO_HEVC_LEVEL_HIGH_TIER_LEVEL_6_1;
+	case HAL_HEVC_HIGH_TIER_LEVEL_6_2:
+		return V4L2_MPEG_VIDC_VIDEO_HEVC_LEVEL_HIGH_TIER_LEVEL_6_2;
+	case V4L2_MPEG_VIDC_VIDEO_HEVC_LEVEL_UNKNOWN:
+		return HAL_HEVC_TIER_LEVEL_UNKNOWN;
+	default:
+		goto unknown_value;
+	}
 	case V4L2_CID_MPEG_VIDC_VIDEO_VP8_PROFILE_LEVEL:
+		switch (value) {
+		case HAL_VPX_LEVEL_VERSION_0:
+			return V4L2_MPEG_VIDC_VIDEO_VP8_VERSION_0;
+		case HAL_VPX_LEVEL_VERSION_1:
+			return V4L2_MPEG_VIDC_VIDEO_VP8_VERSION_1;
+		case HAL_VPX_LEVEL_VERSION_2:
+			return V4L2_MPEG_VIDC_VIDEO_VP8_VERSION_2;
+		case HAL_VPX_LEVEL_VERSION_3:
+			return V4L2_MPEG_VIDC_VIDEO_VP8_VERSION_3;
+		case HAL_VPX_LEVEL_UNUSED:
+			return V4L2_MPEG_VIDC_VIDEO_VP8_UNUSED;
+		default:
+			goto unknown_value;
+		}
 	case V4L2_CID_MPEG_VIDC_VIDEO_MPEG2_PROFILE:
 	case V4L2_CID_MPEG_VIDC_VIDEO_MPEG2_LEVEL:
 		/*
@@ -396,6 +482,8 @@ int msm_comm_v4l2_to_hal(int id, int value)
 			return HAL_HEVC_HIGH_TIER_LEVEL_6;
 		case V4L2_MPEG_VIDC_VIDEO_HEVC_LEVEL_HIGH_TIER_LEVEL_6_1:
 			return HAL_HEVC_HIGH_TIER_LEVEL_6_1;
+		case V4L2_MPEG_VIDC_VIDEO_HEVC_LEVEL_HIGH_TIER_LEVEL_6_2:
+			return HAL_HEVC_HIGH_TIER_LEVEL_6_2;
 		case V4L2_MPEG_VIDC_VIDEO_HEVC_LEVEL_UNKNOWN:
 			return HAL_HEVC_TIER_LEVEL_UNKNOWN;
 		default:
@@ -1463,15 +1551,9 @@ static void handle_event_change(enum hal_command_response cmd, void *data)
 	* ptr[4] = colour space
 	*/
 
-	inst->entropy_mode = msm_comm_hal_to_v4l2(
-		V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE,
-			event_notify->entropy_mode);
-	inst->profile = msm_comm_hal_to_v4l2(
-		V4L2_CID_MPEG_VIDEO_H264_PROFILE,
-			event_notify->profile);
-	inst->level = msm_comm_hal_to_v4l2(
-		V4L2_CID_MPEG_VIDEO_H264_LEVEL,
-			event_notify->level);
+	inst->entropy_mode = event_notify->entropy_mode;
+	inst->profile = event_notify->profile;
+	inst->level = event_notify->level;
 
 	ptr = (u32 *)seq_changed_event.u.data;
 	ptr[0] = event_notify->height;
@@ -2342,7 +2424,7 @@ static void handle_fbd(enum hal_command_response cmd, void *data)
 		if (fill_buf_done->flags1 & HAL_BUFFERFLAG_EOS)
 			vbuf->flags |= V4L2_QCOM_BUF_FLAG_EOS;
 		if (fill_buf_done->flags1 & HAL_BUFFERFLAG_CODECCONFIG)
-			vbuf->flags &= ~V4L2_QCOM_BUF_FLAG_CODECCONFIG;
+			vbuf->flags |= V4L2_QCOM_BUF_FLAG_CODECCONFIG;
 		if (fill_buf_done->flags1 & HAL_BUFFERFLAG_SYNCFRAME)
 			vbuf->flags |= V4L2_QCOM_BUF_FLAG_IDRFRAME;
 		if (fill_buf_done->flags1 & HAL_BUFFERFLAG_EOSEQ)
@@ -5554,40 +5636,3 @@ u32 get_frame_size_tp10_ubwc(int plane, u32 height, u32 width)
 	return VENUS_BUFFER_SIZE(COLOR_FMT_NV12_BPP10_UBWC, width, height);
 }
 
-void msm_comm_set_use_sys_cache(struct msm_vidc_inst *inst)
-{
-	struct hal_enable syscache_use;
-	int rc = 0;
-
-
-	if (!inst->core->resources.sys_cache_enabled)
-		goto exit;
-
-	syscache_use.enable = false;
-	inst->clk_data.use_sys_cache = false;
-
-	if (inst->flags & VIDC_REALTIME)
-		syscache_use.enable = true;
-
-	if (inst->flags & VIDC_THUMBNAIL)
-		syscache_use.enable = false;
-
-	dprintk(VIDC_DBG,
-		"set_use_sys_cache: enable = %d inst = %pK flags =%d\n",
-		syscache_use.enable, inst, inst->flags);
-	rc = msm_comm_try_set_prop(inst, HAL_PARAM_USE_SYS_CACHE,
-		&syscache_use);
-	if (rc) {
-		dprintk(VIDC_ERR, "set_use_sys_cache: failed!!\n");
-			inst->clk_data.use_sys_cache = false;
-		goto exit;
-	}
-
-	inst->clk_data.use_sys_cache = syscache_use.enable;
-
-	return;
-
-exit:
-	return;
-}
-
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_debug.c b/drivers/media/platform/msm/vidc/msm_vidc_debug.c
index f62c132..8ffbf50 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_debug.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_debug.c
@@ -31,10 +31,12 @@ int msm_vidc_firmware_unload_delay = 15000;
 bool msm_vidc_thermal_mitigation_disabled = !true;
 bool msm_vidc_clock_scaling = true;
 bool msm_vidc_debug_timeout = !true;
+bool msm_vidc_syscache_disable = !true;
 
 #define MAX_DBG_BUF_SIZE 4096
 
 struct debug_buffer {
+	struct mutex lock;
 	char ptr[MAX_DBG_BUF_SIZE];
 	char *curr;
 	u32 filled_size;
@@ -62,8 +64,11 @@ static u32 write_str(struct debug_buffer *buffer, const char *fmt, ...)
 	va_list args;
 	u32 size;
 
+	char *curr = buffer->curr;
+	char *end = buffer->ptr + MAX_DBG_BUF_SIZE;
+
 	va_start(args, fmt);
-	size = vscnprintf(buffer->curr, MAX_DBG_BUF_SIZE - 1, fmt, args);
+	size = vscnprintf(curr, end - curr, fmt, args);
 	va_end(args);
 	buffer->curr += size;
 	buffer->filled_size += size;
@@ -77,12 +82,15 @@ static ssize_t core_info_read(struct file *file, char __user *buf,
 	struct hfi_device *hdev;
 	struct hal_fw_info fw_info = { {0} };
 	int i = 0, rc = 0;
+	ssize_t len = 0;
 
 	if (!core || !core->device) {
 		dprintk(VIDC_ERR, "Invalid params, core: %pK\n", core);
 		return 0;
 	}
 	hdev = core->device;
+
+	mutex_lock(&dbg_buf.lock);
 	INIT_DBG_BUF(dbg_buf);
 	write_str(&dbg_buf, "===============================\n");
 	write_str(&dbg_buf, "CORE %d: %pK\n", core->id, core);
@@ -106,8 +114,11 @@ static ssize_t core_info_read(struct file *file, char __user *buf,
 			completion_done(&core->completions[SYS_MSG_INDEX(i)]) ?
 			"pending" : "done");
 	}
-	return simple_read_from_buffer(buf, count, ppos,
+	len = simple_read_from_buffer(buf, count, ppos,
 			dbg_buf.ptr, dbg_buf.filled_size);
+
+	mutex_unlock(&dbg_buf.lock);
+	return len;
 }
 
 static const struct file_operations core_info_fops = {
@@ -149,8 +160,10 @@ static const struct file_operations ssr_fops = {
 struct dentry *msm_vidc_debugfs_init_drv(void)
 {
 	bool ok = false;
-	struct dentry *dir = debugfs_create_dir("msm_vidc", NULL);
+	struct dentry *dir = NULL;
 
+	mutex_init(&dbg_buf.lock);
+	dir = debugfs_create_dir("msm_vidc", NULL);
 	if (IS_ERR_OR_NULL(dir)) {
 		dir = NULL;
 		goto failed_create_dir;
@@ -185,7 +198,9 @@ struct dentry *msm_vidc_debugfs_init_drv(void)
 	__debugfs_create(bool, "clock_scaling",
 			&msm_vidc_clock_scaling) &&
 	__debugfs_create(bool, "debug_timeout",
-			&msm_vidc_debug_timeout);
+			&msm_vidc_debug_timeout) &&
+	__debugfs_create(bool, "disable_video_syscache",
+			&msm_vidc_syscache_disable);
 
 #undef __debugfs_create
 
@@ -271,11 +286,14 @@ static ssize_t inst_info_read(struct file *file, char __user *buf,
 {
 	struct msm_vidc_inst *inst = file->private_data;
 	int i, j;
+	ssize_t len = 0;
 
 	if (!inst) {
 		dprintk(VIDC_ERR, "Invalid params, inst %pK\n", inst);
 		return 0;
 	}
+
+	mutex_lock(&dbg_buf.lock);
 	INIT_DBG_BUF(dbg_buf);
 	write_str(&dbg_buf, "===============================\n");
 	write_str(&dbg_buf, "INSTANCE: %pK (%s)\n", inst,
@@ -330,8 +348,10 @@ static ssize_t inst_info_read(struct file *file, char __user *buf,
 
 	publish_unreleased_reference(inst);
 
-	return simple_read_from_buffer(buf, count, ppos,
+	len = simple_read_from_buffer(buf, count, ppos,
 		dbg_buf.ptr, dbg_buf.filled_size);
+	mutex_unlock(&dbg_buf.lock);
+	return len;
 }
 
 static const struct file_operations inst_info_fops = {
@@ -414,3 +434,8 @@ void msm_vidc_debugfs_update(struct msm_vidc_inst *inst,
 	}
 }
 
+void msm_vidc_debugfs_deinit_drv(void)
+{
+	mutex_destroy(&dbg_buf.lock);
+}
+
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_debug.h b/drivers/media/platform/msm/vidc/msm_vidc_debug.h
index f5c8e5a..8fd895d 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_debug.h
+++ b/drivers/media/platform/msm/vidc/msm_vidc_debug.h
@@ -64,6 +64,7 @@ extern int msm_vidc_firmware_unload_delay;
 extern bool msm_vidc_thermal_mitigation_disabled;
 extern bool msm_vidc_clock_scaling;
 extern bool msm_vidc_debug_timeout;
+extern bool msm_vidc_syscache_disable;
 
 #define VIDC_MSG_PRIO2STRING(__level) ({ \
 	char *__str; \
@@ -127,6 +128,7 @@ struct dentry *msm_vidc_debugfs_init_inst(struct msm_vidc_inst *inst,
 		struct dentry *parent);
 void msm_vidc_debugfs_update(struct msm_vidc_inst *inst,
 		enum msm_vidc_debugfs_event e);
+void msm_vidc_debugfs_deinit_drv(void);
 
 static inline void tic(struct msm_vidc_inst *i, enum profiling_points p,
 				 char *b)
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_internal.h b/drivers/media/platform/msm/vidc/msm_vidc_internal.h
index 17c3045..37bccbd 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_internal.h
+++ b/drivers/media/platform/msm/vidc/msm_vidc_internal.h
@@ -224,7 +224,6 @@ struct clock_data {
 	u32 core_id;
 	enum hal_work_mode work_mode;
 	bool low_latency_mode;
-	bool use_sys_cache;
 };
 
 struct profile_data {
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c b/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c
index 5cf4628..d259072 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c
@@ -275,12 +275,12 @@ static int msm_vidc_load_subcache_info(struct msm_vidc_platform_resources *res)
 			"cache-slice-names", c, &vsc->name);
 	}
 
-	res->sys_cache_enabled = true;
+	res->sys_cache_present = true;
 
 	return 0;
 
 err_load_subcache_table_fail:
-	res->sys_cache_enabled = false;
+	res->sys_cache_present = false;
 	subcaches->count = 0;
 	subcaches->subcache_tbl = NULL;
 
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_resources.h b/drivers/media/platform/msm/vidc/msm_vidc_resources.h
index d76985e..b07785a 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_resources.h
+++ b/drivers/media/platform/msm/vidc/msm_vidc_resources.h
@@ -159,6 +159,7 @@ struct msm_vidc_platform_resources {
 	struct dcvs_table *dcvs_tbl;
 	uint32_t dcvs_tbl_size;
 	struct dcvs_limit *dcvs_limit;
+	bool sys_cache_present;
 	bool sys_cache_enabled;
 	struct subcache_set subcache_set;
 	struct reg_set reg_set;
diff --git a/drivers/media/platform/msm/vidc/venus_hfi.c b/drivers/media/platform/msm/vidc/venus_hfi.c
index 5a8dd26..8968764 100644
--- a/drivers/media/platform/msm/vidc/venus_hfi.c
+++ b/drivers/media/platform/msm/vidc/venus_hfi.c
@@ -120,6 +120,11 @@ static inline bool __core_in_valid_state(struct venus_hfi_device *device)
 	return device->state != VENUS_STATE_DEINIT;
 }
 
+static inline bool is_sys_cache_present(struct venus_hfi_device *device)
+{
+	return device->res->sys_cache_present;
+}
+
 static void __dump_packet(u8 *packet, enum vidc_msg_prio log_level)
 {
 	u32 c = 0, packet_size = *(u32 *)packet;
@@ -3492,7 +3497,7 @@ static void __deinit_subcaches(struct venus_hfi_device *device)
 		goto exit;
 	}
 
-	if (!device->res->sys_cache_enabled)
+	if (!is_sys_cache_present(device))
 		goto exit;
 
 	venus_hfi_for_each_subcache_reverse(device, sinfo) {
@@ -3519,7 +3524,7 @@ static int __init_subcaches(struct venus_hfi_device *device)
 		return -EINVAL;
 	}
 
-	if (!device->res->sys_cache_enabled)
+	if (!is_sys_cache_present(device))
 		return 0;
 
 	venus_hfi_for_each_subcache(device, sinfo) {
@@ -3764,7 +3769,7 @@ static int __enable_subcaches(struct venus_hfi_device *device)
 	struct hfi_resource_subcache_type *sc_res;
 	struct vidc_resource_hdr rhdr;
 
-	if (!device->res->sys_cache_enabled)
+	if (msm_vidc_syscache_disable || !is_sys_cache_present(device))
 		return 0;
 
 	memset((void *)resource, 0x0, (sizeof(u32) * VIDC_MAX_SUBCACHE_SIZE));
@@ -3812,6 +3817,8 @@ static int __enable_subcaches(struct venus_hfi_device *device)
 
 	dprintk(VIDC_DBG, "Activated & Set Subcaches to Venus\n");
 
+	device->res->sys_cache_enabled = true;
+
 	return 0;
 
 err_fail_set_subacaches:
@@ -3830,7 +3837,7 @@ static int __disable_subcaches(struct venus_hfi_device *device)
 	struct hfi_resource_subcache_type *sc_res;
 	struct vidc_resource_hdr rhdr;
 
-	if (!device->res->sys_cache_enabled)
+	if (msm_vidc_syscache_disable || !is_sys_cache_present(device))
 		return 0;
 
 	dprintk(VIDC_DBG, "Disabling Subcaches\n");
@@ -3877,6 +3884,8 @@ static int __disable_subcaches(struct venus_hfi_device *device)
 		}
 	}
 
+	device->res->sys_cache_enabled = false;
+
 	return rc;
 }
 
diff --git a/drivers/media/platform/msm/vidc/vidc_hfi.h b/drivers/media/platform/msm/vidc/vidc_hfi.h
index 48a6f17..5601f1b 100644
--- a/drivers/media/platform/msm/vidc/vidc_hfi.h
+++ b/drivers/media/platform/msm/vidc/vidc_hfi.h
@@ -56,13 +56,6 @@
 #define  HFI_ERR_SESSION_START_CODE_NOT_FOUND		\
 	(HFI_OX_BASE + 0x1004)
 
-#define HFI_BUFFER_INTERNAL_SCRATCH (HFI_OX_BASE + 0x1)
-#define HFI_BUFFER_EXTRADATA_INPUT (HFI_OX_BASE + 0x2)
-#define HFI_BUFFER_EXTRADATA_OUTPUT (HFI_OX_BASE + 0x3)
-#define HFI_BUFFER_EXTRADATA_OUTPUT2 (HFI_OX_BASE + 0x4)
-#define HFI_BUFFER_INTERNAL_SCRATCH_1 (HFI_OX_BASE + 0x5)
-#define HFI_BUFFER_INTERNAL_SCRATCH_2 (HFI_OX_BASE + 0x6)
-#define HFI_BUFFER_INTERNAL_RECON (HFI_OX_BASE + 0x9)
 
 #define HFI_BUFFER_MODE_DYNAMIC (HFI_OX_BASE + 0x3)
 
diff --git a/drivers/media/platform/msm/vidc/vidc_hfi_api.h b/drivers/media/platform/msm/vidc/vidc_hfi_api.h
index bcc29c0..a2f076b 100644
--- a/drivers/media/platform/msm/vidc/vidc_hfi_api.h
+++ b/drivers/media/platform/msm/vidc/vidc_hfi_api.h
@@ -157,7 +157,6 @@ enum hal_property {
 	HAL_CONFIG_VPE_OPERATIONS,
 	HAL_PARAM_VENC_INTRA_REFRESH,
 	HAL_PARAM_VENC_MULTI_SLICE_CONTROL,
-	HAL_CONFIG_VPE_DEINTERLACE,
 	HAL_SYS_DEBUG_CONFIG,
 	HAL_CONFIG_BUFFER_REQUIREMENTS,
 	HAL_CONFIG_PRIORITY,
@@ -224,7 +223,6 @@ enum hal_property {
 	HAL_PARAM_VIDEO_CORES_USAGE,
 	HAL_PARAM_VIDEO_WORK_MODE,
 	HAL_PARAM_SECURE,
-	HAL_PARAM_USE_SYS_CACHE,
 };
 
 enum hal_domain {
@@ -377,11 +375,11 @@ enum hal_vpx_profile {
 };
 
 enum hal_vpx_level {
+	HAL_VPX_LEVEL_UNUSED = 0x00000000,
 	HAL_VPX_LEVEL_VERSION_0 = 0x00000001,
 	HAL_VPX_LEVEL_VERSION_1 = 0x00000002,
 	HAL_VPX_LEVEL_VERSION_2 = 0x00000004,
 	HAL_VPX_LEVEL_VERSION_3 = 0x00000008,
-	HAL_VPX_LEVEL_UNUSED = 0x10000000,
 };
 
 struct hal_frame_rate {
diff --git a/drivers/media/platform/msm/vidc/vidc_hfi_helper.h b/drivers/media/platform/msm/vidc/vidc_hfi_helper.h
index fc638f0..2d4a573 100644
--- a/drivers/media/platform/msm/vidc/vidc_hfi_helper.h
+++ b/drivers/media/platform/msm/vidc/vidc_hfi_helper.h
@@ -155,6 +155,13 @@
 #define HFI_BUFFER_OUTPUT2				(HFI_COMMON_BASE + 0x3)
 #define HFI_BUFFER_INTERNAL_PERSIST		(HFI_COMMON_BASE + 0x4)
 #define HFI_BUFFER_INTERNAL_PERSIST_1		(HFI_COMMON_BASE + 0x5)
+#define HFI_BUFFER_COMMON_INTERNAL_SCRATCH	(HFI_COMMON_BASE + 0x6)
+#define HFI_BUFFER_COMMON_INTERNAL_SCRATCH_1	(HFI_COMMON_BASE + 0x7)
+#define HFI_BUFFER_COMMON_INTERNAL_SCRATCH_2	(HFI_COMMON_BASE + 0x8)
+#define HFI_BUFFER_COMMON_INTERNAL_RECON	(HFI_COMMON_BASE + 0x9)
+#define HFI_BUFFER_EXTRADATA_OUTPUT		(HFI_COMMON_BASE + 0xA)
+#define HFI_BUFFER_EXTRADATA_OUTPUT2		(HFI_COMMON_BASE + 0xB)
+#define HFI_BUFFER_EXTRADATA_INPUT		(HFI_COMMON_BASE + 0xC)
 
 #define  HFI_BITDEPTH_8				(HFI_COMMON_BASE + 0x0)
 #define  HFI_BITDEPTH_9				(HFI_COMMON_BASE + 0x1)
@@ -220,8 +227,6 @@ struct hfi_buffer_info {
 	(HFI_PROPERTY_PARAM_COMMON_START + 0x010)
 #define  HFI_PROPERTY_PARAM_SECURE_SESSION		\
 	(HFI_PROPERTY_PARAM_COMMON_START + 0x011)
-#define  HFI_PROPERTY_PARAM_USE_SYS_CACHE				\
-	(HFI_PROPERTY_PARAM_COMMON_START + 0x012)
 #define  HFI_PROPERTY_PARAM_WORK_MODE                       \
 	(HFI_PROPERTY_PARAM_COMMON_START + 0x015)
 
@@ -344,8 +349,6 @@ struct hfi_buffer_info {
 	(HFI_DOMAIN_BASE_VPE + HFI_ARCH_COMMON_OFFSET + 0x8000)
 #define  HFI_PROPERTY_CONFIG_VENC_BLUR_FRAME_SIZE		\
 	(HFI_PROPERTY_CONFIG_COMMON_START + 0x010)
-#define HFI_PROPERTY_CONFIG_VPE_DEINTERLACE				\
-	(HFI_PROPERTY_CONFIG_VPE_COMMON_START + 0x001)
 #define HFI_PROPERTY_CONFIG_VPE_OPERATIONS				\
 	(HFI_PROPERTY_CONFIG_VPE_COMMON_START + 0x002)
 
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc.c b/drivers/media/platform/s5p-mfc/s5p_mfc.c
index 27e7cf6..7c24da5 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc.c
@@ -206,6 +206,7 @@ static void s5p_mfc_watchdog_worker(struct work_struct *work)
 		}
 		s5p_mfc_clock_on();
 		ret = s5p_mfc_init_hw(dev);
+		s5p_mfc_clock_off();
 		if (ret)
 			mfc_err("Failed to reinit FW\n");
 	}
@@ -663,9 +664,9 @@ static irqreturn_t s5p_mfc_irq(int irq, void *priv)
 				break;
 			}
 			s5p_mfc_hw_call(dev->mfc_ops, clear_int_flags, dev);
-			wake_up_ctx(ctx, reason, err);
 			WARN_ON(test_and_clear_bit(0, &dev->hw_lock) == 0);
 			s5p_mfc_clock_off();
+			wake_up_ctx(ctx, reason, err);
 			s5p_mfc_hw_call(dev->mfc_ops, try_run, dev);
 		} else {
 			s5p_mfc_handle_frame(ctx, reason, err);
@@ -679,15 +680,11 @@ static irqreturn_t s5p_mfc_irq(int irq, void *priv)
 	case S5P_MFC_R2H_CMD_OPEN_INSTANCE_RET:
 		ctx->inst_no = s5p_mfc_hw_call(dev->mfc_ops, get_inst_no, dev);
 		ctx->state = MFCINST_GOT_INST;
-		clear_work_bit(ctx);
-		wake_up(&ctx->queue);
 		goto irq_cleanup_hw;
 
 	case S5P_MFC_R2H_CMD_CLOSE_INSTANCE_RET:
-		clear_work_bit(ctx);
 		ctx->inst_no = MFC_NO_INSTANCE_SET;
 		ctx->state = MFCINST_FREE;
-		wake_up(&ctx->queue);
 		goto irq_cleanup_hw;
 
 	case S5P_MFC_R2H_CMD_SYS_INIT_RET:
@@ -697,9 +694,9 @@ static irqreturn_t s5p_mfc_irq(int irq, void *priv)
 		if (ctx)
 			clear_work_bit(ctx);
 		s5p_mfc_hw_call(dev->mfc_ops, clear_int_flags, dev);
-		wake_up_dev(dev, reason, err);
 		clear_bit(0, &dev->hw_lock);
 		clear_bit(0, &dev->enter_suspend);
+		wake_up_dev(dev, reason, err);
 		break;
 
 	case S5P_MFC_R2H_CMD_INIT_BUFFERS_RET:
@@ -714,9 +711,7 @@ static irqreturn_t s5p_mfc_irq(int irq, void *priv)
 		break;
 
 	case S5P_MFC_R2H_CMD_DPB_FLUSH_RET:
-		clear_work_bit(ctx);
 		ctx->state = MFCINST_RUNNING;
-		wake_up(&ctx->queue);
 		goto irq_cleanup_hw;
 
 	default:
@@ -735,6 +730,8 @@ static irqreturn_t s5p_mfc_irq(int irq, void *priv)
 		mfc_err("Failed to unlock hw\n");
 
 	s5p_mfc_clock_off();
+	clear_work_bit(ctx);
+	wake_up(&ctx->queue);
 
 	s5p_mfc_hw_call(dev->mfc_ops, try_run, dev);
 	spin_unlock(&dev->irqlock);
diff --git a/drivers/media/rc/mceusb.c b/drivers/media/rc/mceusb.c
index 4f8c7ef..db525cd 100644
--- a/drivers/media/rc/mceusb.c
+++ b/drivers/media/rc/mceusb.c
@@ -1332,8 +1332,8 @@ static int mceusb_dev_probe(struct usb_interface *intf,
 			}
 		}
 	}
-	if (ep_in == NULL) {
-		dev_dbg(&intf->dev, "inbound and/or endpoint not found");
+	if (!ep_in || !ep_out) {
+		dev_dbg(&intf->dev, "required endpoints not found\n");
 		return -ENODEV;
 	}
 
diff --git a/drivers/media/usb/cx231xx/cx231xx-audio.c b/drivers/media/usb/cx231xx/cx231xx-audio.c
index 8263c4b..bf4b3ca 100644
--- a/drivers/media/usb/cx231xx/cx231xx-audio.c
+++ b/drivers/media/usb/cx231xx/cx231xx-audio.c
@@ -674,10 +674,8 @@ static int cx231xx_audio_init(struct cx231xx *dev)
 
 	spin_lock_init(&adev->slock);
 	err = snd_pcm_new(card, "Cx231xx Audio", 0, 0, 1, &pcm);
-	if (err < 0) {
-		snd_card_free(card);
-		return err;
-	}
+	if (err < 0)
+		goto err_free_card;
 
 	snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE,
 			&snd_cx231xx_pcm_capture);
@@ -691,10 +689,9 @@ static int cx231xx_audio_init(struct cx231xx *dev)
 	INIT_WORK(&dev->wq_trigger, audio_trigger);
 
 	err = snd_card_register(card);
-	if (err < 0) {
-		snd_card_free(card);
-		return err;
-	}
+	if (err < 0)
+		goto err_free_card;
+
 	adev->sndcard = card;
 	adev->udev = dev->udev;
 
@@ -704,6 +701,11 @@ static int cx231xx_audio_init(struct cx231xx *dev)
 					    hs_config_info[0].interface_info.
 					    audio_index + 1];
 
+	if (uif->altsetting[0].desc.bNumEndpoints < isoc_pipe + 1) {
+		err = -ENODEV;
+		goto err_free_card;
+	}
+
 	adev->end_point_addr =
 	    uif->altsetting[0].endpoint[isoc_pipe].desc.
 			bEndpointAddress;
@@ -713,13 +715,20 @@ static int cx231xx_audio_init(struct cx231xx *dev)
 		"audio EndPoint Addr 0x%x, Alternate settings: %i\n",
 		adev->end_point_addr, adev->num_alt);
 	adev->alt_max_pkt_size = kmalloc(32 * adev->num_alt, GFP_KERNEL);
-
-	if (adev->alt_max_pkt_size == NULL)
-		return -ENOMEM;
+	if (!adev->alt_max_pkt_size) {
+		err = -ENOMEM;
+		goto err_free_card;
+	}
 
 	for (i = 0; i < adev->num_alt; i++) {
-		u16 tmp =
-		    le16_to_cpu(uif->altsetting[i].endpoint[isoc_pipe].desc.
+		u16 tmp;
+
+		if (uif->altsetting[i].desc.bNumEndpoints < isoc_pipe + 1) {
+			err = -ENODEV;
+			goto err_free_pkt_size;
+		}
+
+		tmp = le16_to_cpu(uif->altsetting[i].endpoint[isoc_pipe].desc.
 				wMaxPacketSize);
 		adev->alt_max_pkt_size[i] =
 		    (tmp & 0x07ff) * (((tmp & 0x1800) >> 11) + 1);
@@ -729,6 +738,13 @@ static int cx231xx_audio_init(struct cx231xx *dev)
 	}
 
 	return 0;
+
+err_free_pkt_size:
+	kfree(adev->alt_max_pkt_size);
+err_free_card:
+	snd_card_free(card);
+
+	return err;
 }
 
 static int cx231xx_audio_fini(struct cx231xx *dev)
diff --git a/drivers/media/usb/cx231xx/cx231xx-cards.c b/drivers/media/usb/cx231xx/cx231xx-cards.c
index 36bc254..be9e333 100644
--- a/drivers/media/usb/cx231xx/cx231xx-cards.c
+++ b/drivers/media/usb/cx231xx/cx231xx-cards.c
@@ -1397,6 +1397,9 @@ static int cx231xx_init_v4l2(struct cx231xx *dev,
 
 	uif = udev->actconfig->interface[idx];
 
+	if (uif->altsetting[0].desc.bNumEndpoints < isoc_pipe + 1)
+		return -ENODEV;
+
 	dev->video_mode.end_point_addr = uif->altsetting[0].endpoint[isoc_pipe].desc.bEndpointAddress;
 	dev->video_mode.num_alt = uif->num_altsetting;
 
@@ -1410,7 +1413,12 @@ static int cx231xx_init_v4l2(struct cx231xx *dev,
 		return -ENOMEM;
 
 	for (i = 0; i < dev->video_mode.num_alt; i++) {
-		u16 tmp = le16_to_cpu(uif->altsetting[i].endpoint[isoc_pipe].desc.wMaxPacketSize);
+		u16 tmp;
+
+		if (uif->altsetting[i].desc.bNumEndpoints < isoc_pipe + 1)
+			return -ENODEV;
+
+		tmp = le16_to_cpu(uif->altsetting[i].endpoint[isoc_pipe].desc.wMaxPacketSize);
 		dev->video_mode.alt_max_pkt_size[i] = (tmp & 0x07ff) * (((tmp & 0x1800) >> 11) + 1);
 		dev_dbg(dev->dev,
 			"Alternate setting %i, max size= %i\n", i,
@@ -1427,6 +1435,9 @@ static int cx231xx_init_v4l2(struct cx231xx *dev,
 	}
 	uif = udev->actconfig->interface[idx];
 
+	if (uif->altsetting[0].desc.bNumEndpoints < isoc_pipe + 1)
+		return -ENODEV;
+
 	dev->vbi_mode.end_point_addr =
 	    uif->altsetting[0].endpoint[isoc_pipe].desc.
 			bEndpointAddress;
@@ -1443,8 +1454,12 @@ static int cx231xx_init_v4l2(struct cx231xx *dev,
 		return -ENOMEM;
 
 	for (i = 0; i < dev->vbi_mode.num_alt; i++) {
-		u16 tmp =
-		    le16_to_cpu(uif->altsetting[i].endpoint[isoc_pipe].
+		u16 tmp;
+
+		if (uif->altsetting[i].desc.bNumEndpoints < isoc_pipe + 1)
+			return -ENODEV;
+
+		tmp = le16_to_cpu(uif->altsetting[i].endpoint[isoc_pipe].
 				desc.wMaxPacketSize);
 		dev->vbi_mode.alt_max_pkt_size[i] =
 		    (tmp & 0x07ff) * (((tmp & 0x1800) >> 11) + 1);
@@ -1464,6 +1479,9 @@ static int cx231xx_init_v4l2(struct cx231xx *dev,
 	}
 	uif = udev->actconfig->interface[idx];
 
+	if (uif->altsetting[0].desc.bNumEndpoints < isoc_pipe + 1)
+		return -ENODEV;
+
 	dev->sliced_cc_mode.end_point_addr =
 	    uif->altsetting[0].endpoint[isoc_pipe].desc.
 			bEndpointAddress;
@@ -1478,7 +1496,12 @@ static int cx231xx_init_v4l2(struct cx231xx *dev,
 		return -ENOMEM;
 
 	for (i = 0; i < dev->sliced_cc_mode.num_alt; i++) {
-		u16 tmp = le16_to_cpu(uif->altsetting[i].endpoint[isoc_pipe].
+		u16 tmp;
+
+		if (uif->altsetting[i].desc.bNumEndpoints < isoc_pipe + 1)
+			return -ENODEV;
+
+		tmp = le16_to_cpu(uif->altsetting[i].endpoint[isoc_pipe].
 				desc.wMaxPacketSize);
 		dev->sliced_cc_mode.alt_max_pkt_size[i] =
 		    (tmp & 0x07ff) * (((tmp & 0x1800) >> 11) + 1);
@@ -1647,6 +1670,11 @@ static int cx231xx_usb_probe(struct usb_interface *interface,
 		}
 		uif = udev->actconfig->interface[idx];
 
+		if (uif->altsetting[0].desc.bNumEndpoints < isoc_pipe + 1) {
+			retval = -ENODEV;
+			goto err_video_alt;
+		}
+
 		dev->ts1_mode.end_point_addr =
 		    uif->altsetting[0].endpoint[isoc_pipe].
 				desc.bEndpointAddress;
@@ -1664,7 +1692,14 @@ static int cx231xx_usb_probe(struct usb_interface *interface,
 		}
 
 		for (i = 0; i < dev->ts1_mode.num_alt; i++) {
-			u16 tmp = le16_to_cpu(uif->altsetting[i].
+			u16 tmp;
+
+			if (uif->altsetting[i].desc.bNumEndpoints < isoc_pipe + 1) {
+				retval = -ENODEV;
+				goto err_video_alt;
+			}
+
+			tmp = le16_to_cpu(uif->altsetting[i].
 						endpoint[isoc_pipe].desc.
 						wMaxPacketSize);
 			dev->ts1_mode.alt_max_pkt_size[i] =
diff --git a/drivers/media/usb/dvb-usb/dib0700_core.c b/drivers/media/usb/dvb-usb/dib0700_core.c
index 47ce9d5..563f690 100644
--- a/drivers/media/usb/dvb-usb/dib0700_core.c
+++ b/drivers/media/usb/dvb-usb/dib0700_core.c
@@ -812,6 +812,9 @@ int dib0700_rc_setup(struct dvb_usb_device *d, struct usb_interface *intf)
 
 	/* Starting in firmware 1.20, the RC info is provided on a bulk pipe */
 
+	if (intf->altsetting[0].desc.bNumEndpoints < rc_ep + 1)
+		return -ENODEV;
+
 	purb = usb_alloc_urb(0, GFP_KERNEL);
 	if (purb == NULL)
 		return -ENOMEM;
diff --git a/drivers/media/usb/dvb-usb/dibusb-mc-common.c b/drivers/media/usb/dvb-usb/dibusb-mc-common.c
index d66f56c..1f7bce6 100644
--- a/drivers/media/usb/dvb-usb/dibusb-mc-common.c
+++ b/drivers/media/usb/dvb-usb/dibusb-mc-common.c
@@ -12,6 +12,8 @@
 #include <linux/kconfig.h>
 #include "dibusb.h"
 
+MODULE_LICENSE("GPL");
+
 /* 3000MC/P stuff */
 // Config Adjacent channels  Perf -cal22
 static struct dibx000_agc_config dib3000p_mt2060_agc_config = {
diff --git a/drivers/media/usb/dvb-usb/digitv.c b/drivers/media/usb/dvb-usb/digitv.c
index 4284f69..475a3c0 100644
--- a/drivers/media/usb/dvb-usb/digitv.c
+++ b/drivers/media/usb/dvb-usb/digitv.c
@@ -33,6 +33,9 @@ static int digitv_ctrl_msg(struct dvb_usb_device *d,
 
 	wo = (rbuf == NULL || rlen == 0); /* write-only */
 
+	if (wlen > 4 || rlen > 4)
+		return -EIO;
+
 	memset(st->sndbuf, 0, 7);
 	memset(st->rcvbuf, 0, 7);
 
diff --git a/drivers/media/usb/dvb-usb/dw2102.c b/drivers/media/usb/dvb-usb/dw2102.c
index c3e6734..4a0cc54 100644
--- a/drivers/media/usb/dvb-usb/dw2102.c
+++ b/drivers/media/usb/dvb-usb/dw2102.c
@@ -205,6 +205,20 @@ static int dw2102_serit_i2c_transfer(struct i2c_adapter *adap,
 
 	switch (num) {
 	case 2:
+		if (msg[0].len != 1) {
+			warn("i2c rd: len=%d is not 1!\n",
+			     msg[0].len);
+			num = -EOPNOTSUPP;
+			break;
+		}
+
+		if (2 + msg[1].len > sizeof(buf6)) {
+			warn("i2c rd: len=%d is too big!\n",
+			     msg[1].len);
+			num = -EOPNOTSUPP;
+			break;
+		}
+
 		/* read si2109 register by number */
 		buf6[0] = msg[0].addr << 1;
 		buf6[1] = msg[0].len;
@@ -220,6 +234,13 @@ static int dw2102_serit_i2c_transfer(struct i2c_adapter *adap,
 	case 1:
 		switch (msg[0].addr) {
 		case 0x68:
+			if (2 + msg[0].len > sizeof(buf6)) {
+				warn("i2c wr: len=%d is too big!\n",
+				     msg[0].len);
+				num = -EOPNOTSUPP;
+				break;
+			}
+
 			/* write to si2109 register */
 			buf6[0] = msg[0].addr << 1;
 			buf6[1] = msg[0].len;
@@ -263,6 +284,13 @@ static int dw2102_earda_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg ms
 		/* first write first register number */
 		u8 ibuf[MAX_XFER_SIZE], obuf[3];
 
+		if (2 + msg[0].len != sizeof(obuf)) {
+			warn("i2c rd: len=%d is not 1!\n",
+			     msg[0].len);
+			ret = -EOPNOTSUPP;
+			goto unlock;
+		}
+
 		if (2 + msg[1].len > sizeof(ibuf)) {
 			warn("i2c rd: len=%d is too big!\n",
 			     msg[1].len);
@@ -463,6 +491,12 @@ static int dw3101_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
 		/* first write first register number */
 		u8 ibuf[MAX_XFER_SIZE], obuf[3];
 
+		if (2 + msg[0].len != sizeof(obuf)) {
+			warn("i2c rd: len=%d is not 1!\n",
+			     msg[0].len);
+			ret = -EOPNOTSUPP;
+			goto unlock;
+		}
 		if (2 + msg[1].len > sizeof(ibuf)) {
 			warn("i2c rd: len=%d is too big!\n",
 			     msg[1].len);
@@ -697,6 +731,13 @@ static int su3000_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
 			msg[0].buf[0] = state->data[1];
 			break;
 		default:
+			if (3 + msg[0].len > sizeof(state->data)) {
+				warn("i2c wr: len=%d is too big!\n",
+				     msg[0].len);
+				num = -EOPNOTSUPP;
+				break;
+			}
+
 			/* always i2c write*/
 			state->data[0] = 0x08;
 			state->data[1] = msg[0].addr;
@@ -712,6 +753,19 @@ static int su3000_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
 		break;
 	case 2:
 		/* always i2c read */
+		if (4 + msg[0].len > sizeof(state->data)) {
+			warn("i2c rd: len=%d is too big!\n",
+			     msg[0].len);
+			num = -EOPNOTSUPP;
+			break;
+		}
+		if (1 + msg[1].len > sizeof(state->data)) {
+			warn("i2c rd: len=%d is too big!\n",
+			     msg[1].len);
+			num = -EOPNOTSUPP;
+			break;
+		}
+
 		state->data[0] = 0x09;
 		state->data[1] = msg[0].len;
 		state->data[2] = msg[1].len;
diff --git a/drivers/media/usb/dvb-usb/ttusb2.c b/drivers/media/usb/dvb-usb/ttusb2.c
index ecc207f..9e0d6a4 100644
--- a/drivers/media/usb/dvb-usb/ttusb2.c
+++ b/drivers/media/usb/dvb-usb/ttusb2.c
@@ -78,6 +78,9 @@ static int ttusb2_msg(struct dvb_usb_device *d, u8 cmd,
 	u8 *s, *r = NULL;
 	int ret = 0;
 
+	if (4 + rlen > 64)
+		return -EIO;
+
 	s = kzalloc(wlen+4, GFP_KERNEL);
 	if (!s)
 		return -ENOMEM;
@@ -381,6 +384,22 @@ static int ttusb2_i2c_xfer(struct i2c_adapter *adap,struct i2c_msg msg[],int num
 		write_read = i+1 < num && (msg[i+1].flags & I2C_M_RD);
 		read = msg[i].flags & I2C_M_RD;
 
+		if (3 + msg[i].len > sizeof(obuf)) {
+			err("i2c wr len=%d too high", msg[i].len);
+			break;
+		}
+		if (write_read) {
+			if (3 + msg[i+1].len > sizeof(ibuf)) {
+				err("i2c rd len=%d too high", msg[i+1].len);
+				break;
+			}
+		} else if (read) {
+			if (3 + msg[i].len > sizeof(ibuf)) {
+				err("i2c rd len=%d too high", msg[i].len);
+				break;
+			}
+		}
+
 		obuf[0] = (msg[i].addr << 1) | (write_read | read);
 		if (read)
 			obuf[1] = 0;
diff --git a/drivers/media/usb/gspca/konica.c b/drivers/media/usb/gspca/konica.c
index 40aaaa9..78542ff 100644
--- a/drivers/media/usb/gspca/konica.c
+++ b/drivers/media/usb/gspca/konica.c
@@ -188,6 +188,9 @@ static int sd_start(struct gspca_dev *gspca_dev)
 		return -EIO;
 	}
 
+	if (alt->desc.bNumEndpoints < 2)
+		return -ENODEV;
+
 	packet_size = le16_to_cpu(alt->endpoint[0].desc.wMaxPacketSize);
 
 	n = gspca_dev->cam.cam_mode[gspca_dev->curr_mode].priv;
diff --git a/drivers/media/usb/usbvision/usbvision-video.c b/drivers/media/usb/usbvision/usbvision-video.c
index c8b4eb2..bfdf723 100644
--- a/drivers/media/usb/usbvision/usbvision-video.c
+++ b/drivers/media/usb/usbvision/usbvision-video.c
@@ -1506,7 +1506,14 @@ static int usbvision_probe(struct usb_interface *intf,
 	}
 
 	for (i = 0; i < usbvision->num_alt; i++) {
-		u16 tmp = le16_to_cpu(uif->altsetting[i].endpoint[1].desc.
+		u16 tmp;
+
+		if (uif->altsetting[i].desc.bNumEndpoints < 2) {
+			ret = -ENODEV;
+			goto err_pkt;
+		}
+
+		tmp = le16_to_cpu(uif->altsetting[i].endpoint[1].desc.
 				      wMaxPacketSize);
 		usbvision->alt_max_pkt_size[i] =
 			(tmp & 0x07ff) * (((tmp & 0x1800) >> 11) + 1);
diff --git a/drivers/media/usb/zr364xx/zr364xx.c b/drivers/media/usb/zr364xx/zr364xx.c
index cc128db..e3735bf 100644
--- a/drivers/media/usb/zr364xx/zr364xx.c
+++ b/drivers/media/usb/zr364xx/zr364xx.c
@@ -604,6 +604,14 @@ static int zr364xx_read_video_callback(struct zr364xx_camera *cam,
 	ptr = pdest = frm->lpvbits;
 
 	if (frm->ulState == ZR364XX_READ_IDLE) {
+		if (purb->actual_length < 128) {
+			/* header incomplete */
+			dev_info(&cam->udev->dev,
+				 "%s: buffer (%d bytes) too small to hold jpeg header. Discarding.\n",
+				 __func__, purb->actual_length);
+			return -EINVAL;
+		}
+
 		frm->ulState = ZR364XX_READ_FRAME;
 		frm->cur_size = 0;
 
diff --git a/drivers/mfd/wcd9xxx-irq.c b/drivers/mfd/wcd9xxx-irq.c
index 30ad689..0502e39d 100644
--- a/drivers/mfd/wcd9xxx-irq.c
+++ b/drivers/mfd/wcd9xxx-irq.c
@@ -293,7 +293,7 @@ static irqreturn_t wcd9xxx_irq_thread(int irq, void *data)
 	static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 1);
 	struct wcd9xxx_core_resource *wcd9xxx_res = data;
 	int num_irq_regs = wcd9xxx_res->num_irq_regs;
-	u8 status[num_irq_regs], status1[num_irq_regs];
+	u8 status[4], status1[4] = {0}, unmask_status[4] = {0};
 
 	if (unlikely(wcd9xxx_lock_sleep(wcd9xxx_res) == false)) {
 		dev_err(wcd9xxx_res->dev, "Failed to hold suspend\n");
@@ -317,6 +317,23 @@ static irqreturn_t wcd9xxx_irq_thread(int irq, void *data)
 				"Failed to read interrupt status: %d\n", ret);
 		goto err_disable_irq;
 	}
+	/*
+	 * If status is 0 return without clearing.
+	 * status contains: HW status - masked interrupts
+	 * status1 contains: unhandled interrupts - masked interrupts
+	 * unmasked_status contains: unhandled interrupts
+	 */
+	if (unlikely(!memcmp(status, status1, sizeof(status)))) {
+		pr_debug("%s: status is 0\n", __func__);
+		wcd9xxx_unlock_sleep(wcd9xxx_res);
+		return IRQ_HANDLED;
+	}
+
+	/*
+	 * Copy status to unmask_status before masking, otherwise SW may miss
+	 * to clear masked interrupt in corner case.
+	 */
+	memcpy(unmask_status, status, sizeof(unmask_status));
 
 	/* Apply masking */
 	for (i = 0; i < num_irq_regs; i++)
@@ -340,6 +357,8 @@ static irqreturn_t wcd9xxx_irq_thread(int irq, void *data)
 			wcd9xxx_irq_dispatch(wcd9xxx_res, &irqdata);
 			status1[BIT_BYTE(irqdata.intr_num)] &=
 					~BYTE_BIT_MASK(irqdata.intr_num);
+			unmask_status[BIT_BYTE(irqdata.intr_num)] &=
+					~BYTE_BIT_MASK(irqdata.intr_num);
 		}
 	}
 
@@ -361,12 +380,13 @@ static irqreturn_t wcd9xxx_irq_thread(int irq, void *data)
 					   linebuf, sizeof(linebuf), false);
 			pr_warn("%s: status1 : %s\n", __func__, linebuf);
 		}
-
-		memset(status, 0xff, num_irq_regs);
-
+		/*
+		 * unmask_status contains unhandled interrupts, hence clear all
+		 * unhandled interrupts.
+		 */
 		ret = regmap_bulk_write(wcd9xxx_res->wcd_core_regmap,
 			wcd9xxx_res->intr_reg[WCD9XXX_INTR_CLEAR_BASE],
-			status, num_irq_regs);
+			unmask_status, num_irq_regs);
 		if (wcd9xxx_get_intf_type() == WCD9XXX_INTERFACE_TYPE_I2C)
 			regmap_write(wcd9xxx_res->wcd_core_regmap,
 				wcd9xxx_res->intr_reg[WCD9XXX_INTR_CLR_COMMIT],
diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c
index dd99b06..fa4fe02 100644
--- a/drivers/misc/cxl/pci.c
+++ b/drivers/misc/cxl/pci.c
@@ -1496,8 +1496,6 @@ static int cxl_configure_adapter(struct cxl *adapter, struct pci_dev *dev)
 	if ((rc = cxl_native_register_psl_err_irq(adapter)))
 		goto err;
 
-	/* Release the context lock as adapter is configured */
-	cxl_adapter_context_unlock(adapter);
 	return 0;
 
 err:
@@ -1596,6 +1594,9 @@ static struct cxl *cxl_pci_init_adapter(struct pci_dev *dev)
 	if ((rc = cxl_sysfs_adapter_add(adapter)))
 		goto err_put1;
 
+	/* Release the context lock as adapter is configured */
+	cxl_adapter_context_unlock(adapter);
+
 	return adapter;
 
 err_put1:
@@ -1778,7 +1779,7 @@ static pci_ers_result_t cxl_pci_error_detected(struct pci_dev *pdev,
 {
 	struct cxl *adapter = pci_get_drvdata(pdev);
 	struct cxl_afu *afu;
-	pci_ers_result_t result = PCI_ERS_RESULT_NEED_RESET;
+	pci_ers_result_t result = PCI_ERS_RESULT_NEED_RESET, afu_result;
 	int i;
 
 	/* At this point, we could still have an interrupt pending.
@@ -1883,16 +1884,26 @@ static pci_ers_result_t cxl_pci_error_detected(struct pci_dev *pdev,
 	for (i = 0; i < adapter->slices; i++) {
 		afu = adapter->afu[i];
 
-		result = cxl_vphb_error_detected(afu, state);
-
-		/* Only continue if everyone agrees on NEED_RESET */
-		if (result != PCI_ERS_RESULT_NEED_RESET)
-			return result;
+		afu_result = cxl_vphb_error_detected(afu, state);
 
 		cxl_context_detach_all(afu);
 		cxl_ops->afu_deactivate_mode(afu, afu->current_mode);
 		pci_deconfigure_afu(afu);
+
+		/* Disconnect trumps all, NONE trumps NEED_RESET */
+		if (afu_result == PCI_ERS_RESULT_DISCONNECT)
+			result = PCI_ERS_RESULT_DISCONNECT;
+		else if ((afu_result == PCI_ERS_RESULT_NONE) &&
+			 (result == PCI_ERS_RESULT_NEED_RESET))
+			result = PCI_ERS_RESULT_NONE;
 	}
+
+	/* should take the context lock here */
+	if (cxl_adapter_context_lock(adapter) != 0)
+		dev_warn(&adapter->dev,
+			 "Couldn't take context lock with %d active-contexts\n",
+			 atomic_read(&adapter->contexts_num));
+
 	cxl_deconfigure_adapter(adapter);
 
 	return result;
@@ -1911,6 +1922,13 @@ static pci_ers_result_t cxl_pci_slot_reset(struct pci_dev *pdev)
 	if (cxl_configure_adapter(adapter, pdev))
 		goto err;
 
+	/*
+	 * Unlock context activation for the adapter. Ideally this should be
+	 * done in cxl_pci_resume but cxlflash module tries to activate the
+	 * master context as part of slot_reset callback.
+	 */
+	cxl_adapter_context_unlock(adapter);
+
 	for (i = 0; i < adapter->slices; i++) {
 		afu = adapter->afu[i];
 
diff --git a/drivers/misc/uid_sys_stats.c b/drivers/misc/uid_sys_stats.c
index 127a052..871040e 100644
--- a/drivers/misc/uid_sys_stats.c
+++ b/drivers/misc/uid_sys_stats.c
@@ -50,7 +50,8 @@ struct io_stats {
 
 #define UID_STATE_TOTAL_CURR	2
 #define UID_STATE_TOTAL_LAST	3
-#define UID_STATE_SIZE		4
+#define UID_STATE_DEAD_TASKS	4
+#define UID_STATE_SIZE		5
 
 struct uid_entry {
 	uid_t uid;
@@ -215,35 +216,44 @@ static u64 compute_write_bytes(struct task_struct *task)
 	return task->ioac.write_bytes - task->ioac.cancelled_write_bytes;
 }
 
-static void add_uid_io_curr_stats(struct uid_entry *uid_entry,
-			struct task_struct *task)
+static void add_uid_io_stats(struct uid_entry *uid_entry,
+			struct task_struct *task, int slot)
 {
-	struct io_stats *io_curr = &uid_entry->io[UID_STATE_TOTAL_CURR];
+	struct io_stats *io_slot = &uid_entry->io[slot];
 
-	io_curr->read_bytes += task->ioac.read_bytes;
-	io_curr->write_bytes += compute_write_bytes(task);
-	io_curr->rchar += task->ioac.rchar;
-	io_curr->wchar += task->ioac.wchar;
-	io_curr->fsync += task->ioac.syscfs;
+	io_slot->read_bytes += task->ioac.read_bytes;
+	io_slot->write_bytes += compute_write_bytes(task);
+	io_slot->rchar += task->ioac.rchar;
+	io_slot->wchar += task->ioac.wchar;
+	io_slot->fsync += task->ioac.syscfs;
 }
 
-static void clean_uid_io_last_stats(struct uid_entry *uid_entry,
-			struct task_struct *task)
+static void compute_uid_io_bucket_stats(struct io_stats *io_bucket,
+					struct io_stats *io_curr,
+					struct io_stats *io_last,
+					struct io_stats *io_dead)
 {
-	struct io_stats *io_last = &uid_entry->io[UID_STATE_TOTAL_LAST];
+	io_bucket->read_bytes += io_curr->read_bytes + io_dead->read_bytes -
+		io_last->read_bytes;
+	io_bucket->write_bytes += io_curr->write_bytes + io_dead->write_bytes -
+		io_last->write_bytes;
+	io_bucket->rchar += io_curr->rchar + io_dead->rchar - io_last->rchar;
+	io_bucket->wchar += io_curr->wchar + io_dead->wchar - io_last->wchar;
+	io_bucket->fsync += io_curr->fsync + io_dead->fsync - io_last->fsync;
 
-	io_last->read_bytes -= task->ioac.read_bytes;
-	io_last->write_bytes -= compute_write_bytes(task);
-	io_last->rchar -= task->ioac.rchar;
-	io_last->wchar -= task->ioac.wchar;
-	io_last->fsync -= task->ioac.syscfs;
+	io_last->read_bytes = io_curr->read_bytes;
+	io_last->write_bytes = io_curr->write_bytes;
+	io_last->rchar = io_curr->rchar;
+	io_last->wchar = io_curr->wchar;
+	io_last->fsync = io_curr->fsync;
+
+	memset(io_dead, 0, sizeof(struct io_stats));
 }
 
 static void update_io_stats_all_locked(void)
 {
 	struct uid_entry *uid_entry;
 	struct task_struct *task, *temp;
-	struct io_stats *io_bucket, *io_curr, *io_last;
 	struct user_namespace *user_ns = current_user_ns();
 	unsigned long bkt;
 	uid_t uid;
@@ -258,70 +268,38 @@ static void update_io_stats_all_locked(void)
 		uid_entry = find_or_register_uid(uid);
 		if (!uid_entry)
 			continue;
-		add_uid_io_curr_stats(uid_entry, task);
+		add_uid_io_stats(uid_entry, task, UID_STATE_TOTAL_CURR);
 	} while_each_thread(temp, task);
 	rcu_read_unlock();
 
 	hash_for_each(hash_table, bkt, uid_entry, hash) {
-		io_bucket = &uid_entry->io[uid_entry->state];
-		io_curr = &uid_entry->io[UID_STATE_TOTAL_CURR];
-		io_last = &uid_entry->io[UID_STATE_TOTAL_LAST];
-
-		io_bucket->read_bytes +=
-			io_curr->read_bytes - io_last->read_bytes;
-		io_bucket->write_bytes +=
-			io_curr->write_bytes - io_last->write_bytes;
-		io_bucket->rchar += io_curr->rchar - io_last->rchar;
-		io_bucket->wchar += io_curr->wchar - io_last->wchar;
-		io_bucket->fsync += io_curr->fsync - io_last->fsync;
-
-		io_last->read_bytes = io_curr->read_bytes;
-		io_last->write_bytes = io_curr->write_bytes;
-		io_last->rchar = io_curr->rchar;
-		io_last->wchar = io_curr->wchar;
-		io_last->fsync = io_curr->fsync;
+		compute_uid_io_bucket_stats(&uid_entry->io[uid_entry->state],
+					&uid_entry->io[UID_STATE_TOTAL_CURR],
+					&uid_entry->io[UID_STATE_TOTAL_LAST],
+					&uid_entry->io[UID_STATE_DEAD_TASKS]);
 	}
 }
 
-static void update_io_stats_uid_locked(uid_t target_uid)
+static void update_io_stats_uid_locked(struct uid_entry *uid_entry)
 {
-	struct uid_entry *uid_entry;
 	struct task_struct *task, *temp;
-	struct io_stats *io_bucket, *io_curr, *io_last;
 	struct user_namespace *user_ns = current_user_ns();
 
-	uid_entry = find_or_register_uid(target_uid);
-	if (!uid_entry)
-		return;
-
 	memset(&uid_entry->io[UID_STATE_TOTAL_CURR], 0,
 		sizeof(struct io_stats));
 
 	rcu_read_lock();
 	do_each_thread(temp, task) {
-		if (from_kuid_munged(user_ns, task_uid(task)) != target_uid)
+		if (from_kuid_munged(user_ns, task_uid(task)) != uid_entry->uid)
 			continue;
-		add_uid_io_curr_stats(uid_entry, task);
+		add_uid_io_stats(uid_entry, task, UID_STATE_TOTAL_CURR);
 	} while_each_thread(temp, task);
 	rcu_read_unlock();
 
-	io_bucket = &uid_entry->io[uid_entry->state];
-	io_curr = &uid_entry->io[UID_STATE_TOTAL_CURR];
-	io_last = &uid_entry->io[UID_STATE_TOTAL_LAST];
-
-	io_bucket->read_bytes +=
-		io_curr->read_bytes - io_last->read_bytes;
-	io_bucket->write_bytes +=
-		io_curr->write_bytes - io_last->write_bytes;
-	io_bucket->rchar += io_curr->rchar - io_last->rchar;
-	io_bucket->wchar += io_curr->wchar - io_last->wchar;
-	io_bucket->fsync += io_curr->fsync - io_last->fsync;
-
-	io_last->read_bytes = io_curr->read_bytes;
-	io_last->write_bytes = io_curr->write_bytes;
-	io_last->rchar = io_curr->rchar;
-	io_last->wchar = io_curr->wchar;
-	io_last->fsync = io_curr->fsync;
+	compute_uid_io_bucket_stats(&uid_entry->io[uid_entry->state],
+				&uid_entry->io[UID_STATE_TOTAL_CURR],
+				&uid_entry->io[UID_STATE_TOTAL_LAST],
+				&uid_entry->io[UID_STATE_DEAD_TASKS]);
 }
 
 static int uid_io_show(struct seq_file *m, void *v)
@@ -406,7 +384,7 @@ static ssize_t uid_procstat_write(struct file *file,
 		return count;
 	}
 
-	update_io_stats_uid_locked(uid);
+	update_io_stats_uid_locked(uid_entry);
 
 	uid_entry->state = state;
 
@@ -444,8 +422,7 @@ static int process_notifier(struct notifier_block *self,
 	uid_entry->utime += utime;
 	uid_entry->stime += stime;
 
-	update_io_stats_uid_locked(uid);
-	clean_uid_io_last_stats(uid_entry, task);
+	add_uid_io_stats(uid_entry, task, UID_STATE_DEAD_TASKS);
 
 exit:
 	rt_mutex_unlock(&uid_lock);
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index 3bde96a..f222f8a 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -138,6 +138,74 @@ const struct mtd_ooblayout_ops nand_ooblayout_lp_ops = {
 };
 EXPORT_SYMBOL_GPL(nand_ooblayout_lp_ops);
 
+/*
+ * Support the old "large page" layout used for 1-bit Hamming ECC where ECC
+ * are placed at a fixed offset.
+ */
+static int nand_ooblayout_ecc_lp_hamming(struct mtd_info *mtd, int section,
+					 struct mtd_oob_region *oobregion)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct nand_ecc_ctrl *ecc = &chip->ecc;
+
+	if (section)
+		return -ERANGE;
+
+	switch (mtd->oobsize) {
+	case 64:
+		oobregion->offset = 40;
+		break;
+	case 128:
+		oobregion->offset = 80;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	oobregion->length = ecc->total;
+	if (oobregion->offset + oobregion->length > mtd->oobsize)
+		return -ERANGE;
+
+	return 0;
+}
+
+static int nand_ooblayout_free_lp_hamming(struct mtd_info *mtd, int section,
+					  struct mtd_oob_region *oobregion)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct nand_ecc_ctrl *ecc = &chip->ecc;
+	int ecc_offset = 0;
+
+	if (section < 0 || section > 1)
+		return -ERANGE;
+
+	switch (mtd->oobsize) {
+	case 64:
+		ecc_offset = 40;
+		break;
+	case 128:
+		ecc_offset = 80;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	if (section == 0) {
+		oobregion->offset = 2;
+		oobregion->length = ecc_offset - 2;
+	} else {
+		oobregion->offset = ecc_offset + ecc->total;
+		oobregion->length = mtd->oobsize - oobregion->offset;
+	}
+
+	return 0;
+}
+
+const struct mtd_ooblayout_ops nand_ooblayout_lp_hamming_ops = {
+	.ecc = nand_ooblayout_ecc_lp_hamming,
+	.free = nand_ooblayout_free_lp_hamming,
+};
+
 static int check_offs_len(struct mtd_info *mtd,
 					loff_t ofs, uint64_t len)
 {
@@ -4565,7 +4633,7 @@ int nand_scan_tail(struct mtd_info *mtd)
 			break;
 		case 64:
 		case 128:
-			mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops);
+			mtd_set_ooblayout(mtd, &nand_ooblayout_lp_hamming_ops);
 			break;
 		default:
 			WARN(1, "No oob scheme defined for oobsize %d\n",
diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
index 5513bfd9..c178cb0d 100644
--- a/drivers/mtd/nand/omap2.c
+++ b/drivers/mtd/nand/omap2.c
@@ -1856,6 +1856,15 @@ static int omap_nand_probe(struct platform_device *pdev)
 	nand_chip->ecc.priv	= NULL;
 	nand_set_flash_node(nand_chip, dev->of_node);
 
+	if (!mtd->name) {
+		mtd->name = devm_kasprintf(&pdev->dev, GFP_KERNEL,
+					   "omap2-nand.%d", info->gpmc_cs);
+		if (!mtd->name) {
+			dev_err(&pdev->dev, "Failed to set MTD name\n");
+			return -ENOMEM;
+		}
+	}
+
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	nand_chip->IO_ADDR_R = devm_ioremap_resource(&pdev->dev, res);
 	if (IS_ERR(nand_chip->IO_ADDR_R))
diff --git a/drivers/mtd/nand/orion_nand.c b/drivers/mtd/nand/orion_nand.c
index 40a7c4a..af2f091 100644
--- a/drivers/mtd/nand/orion_nand.c
+++ b/drivers/mtd/nand/orion_nand.c
@@ -23,6 +23,11 @@
 #include <asm/sizes.h>
 #include <linux/platform_data/mtd-orion_nand.h>
 
+struct orion_nand_info {
+	struct nand_chip chip;
+	struct clk *clk;
+};
+
 static void orion_nand_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
 {
 	struct nand_chip *nc = mtd_to_nand(mtd);
@@ -75,20 +80,21 @@ static void orion_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
 
 static int __init orion_nand_probe(struct platform_device *pdev)
 {
+	struct orion_nand_info *info;
 	struct mtd_info *mtd;
 	struct nand_chip *nc;
 	struct orion_nand_data *board;
 	struct resource *res;
-	struct clk *clk;
 	void __iomem *io_base;
 	int ret = 0;
 	u32 val = 0;
 
-	nc = devm_kzalloc(&pdev->dev,
-			sizeof(struct nand_chip),
+	info = devm_kzalloc(&pdev->dev,
+			sizeof(struct orion_nand_info),
 			GFP_KERNEL);
-	if (!nc)
+	if (!info)
 		return -ENOMEM;
+	nc = &info->chip;
 	mtd = nand_to_mtd(nc);
 
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -145,15 +151,13 @@ static int __init orion_nand_probe(struct platform_device *pdev)
 	if (board->dev_ready)
 		nc->dev_ready = board->dev_ready;
 
-	platform_set_drvdata(pdev, mtd);
+	platform_set_drvdata(pdev, info);
 
 	/* Not all platforms can gate the clock, so it is not
 	   an error if the clock does not exists. */
-	clk = clk_get(&pdev->dev, NULL);
-	if (!IS_ERR(clk)) {
-		clk_prepare_enable(clk);
-		clk_put(clk);
-	}
+	info->clk = devm_clk_get(&pdev->dev, NULL);
+	if (!IS_ERR(info->clk))
+		clk_prepare_enable(info->clk);
 
 	if (nand_scan(mtd, 1)) {
 		ret = -ENXIO;
@@ -170,26 +174,22 @@ static int __init orion_nand_probe(struct platform_device *pdev)
 	return 0;
 
 no_dev:
-	if (!IS_ERR(clk)) {
-		clk_disable_unprepare(clk);
-		clk_put(clk);
-	}
+	if (!IS_ERR(info->clk))
+		clk_disable_unprepare(info->clk);
 
 	return ret;
 }
 
 static int orion_nand_remove(struct platform_device *pdev)
 {
-	struct mtd_info *mtd = platform_get_drvdata(pdev);
-	struct clk *clk;
+	struct orion_nand_info *info = platform_get_drvdata(pdev);
+	struct nand_chip *chip = &info->chip;
+	struct mtd_info *mtd = nand_to_mtd(chip);
 
 	nand_release(mtd);
 
-	clk = clk_get(&pdev->dev, NULL);
-	if (!IS_ERR(clk)) {
-		clk_disable_unprepare(clk);
-		clk_put(clk);
-	}
+	if (!IS_ERR(info->clk))
+		clk_disable_unprepare(info->clk);
 
 	return 0;
 }
diff --git a/drivers/net/irda/irda-usb.c b/drivers/net/irda/irda-usb.c
index 8716b8c..6f3c805 100644
--- a/drivers/net/irda/irda-usb.c
+++ b/drivers/net/irda/irda-usb.c
@@ -1077,7 +1077,7 @@ static int stir421x_patch_device(struct irda_usb_cb *self)
          * are "42101001.sb" or "42101002.sb"
          */
         sprintf(stir421x_fw_name, "4210%4X.sb",
-                self->usbdev->descriptor.bcdDevice);
+		le16_to_cpu(self->usbdev->descriptor.bcdDevice));
         ret = request_firmware(&fw, stir421x_fw_name, &self->usbdev->dev);
         if (ret < 0)
                 return ret;
diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c
index e1c338c..f15589c 100644
--- a/drivers/net/wireless/ath/ath9k/hif_usb.c
+++ b/drivers/net/wireless/ath/ath9k/hif_usb.c
@@ -37,6 +37,7 @@ static struct usb_device_id ath9k_hif_usb_ids[] = {
 	{ USB_DEVICE(0x0cf3, 0xb002) }, /* Ubiquiti WifiStation */
 	{ USB_DEVICE(0x057c, 0x8403) }, /* AVM FRITZ!WLAN 11N v2 USB */
 	{ USB_DEVICE(0x0471, 0x209e) }, /* Philips (or NXP) PTA01 */
+	{ USB_DEVICE(0x1eda, 0x2315) }, /* AirTies */
 
 	{ USB_DEVICE(0x0cf3, 0x7015),
 	  .driver_info = AR9287_USB },  /* Atheros */
@@ -1218,6 +1219,9 @@ static int send_eject_command(struct usb_interface *interface)
 	u8 bulk_out_ep;
 	int r;
 
+	if (iface_desc->desc.bNumEndpoints < 2)
+		return -ENODEV;
+
 	/* Find bulk out endpoint */
 	for (r = 1; r >= 0; r--) {
 		endpoint = &iface_desc->endpoint[r].desc;
diff --git a/drivers/net/wireless/ath/wil6210/pcie_bus.c b/drivers/net/wireless/ath/wil6210/pcie_bus.c
index d472e13..1afed52 100644
--- a/drivers/net/wireless/ath/wil6210/pcie_bus.c
+++ b/drivers/net/wireless/ath/wil6210/pcie_bus.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012-2016 Qualcomm Atheros, Inc.
+ * Copyright (c) 2012-2017 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
@@ -26,6 +26,10 @@ static bool use_msi = true;
 module_param(use_msi, bool, 0444);
 MODULE_PARM_DESC(use_msi, " Use MSI interrupt, default - true");
 
+static bool ftm_mode;
+module_param(ftm_mode, bool, 0444);
+MODULE_PARM_DESC(ftm_mode, " Set factory test mode, default - false");
+
 #ifdef CONFIG_PM
 #ifdef CONFIG_PM_SLEEP
 static int wil6210_pm_notify(struct notifier_block *notify_block,
@@ -36,13 +40,15 @@ static int wil6210_pm_notify(struct notifier_block *notify_block,
 static
 void wil_set_capabilities(struct wil6210_priv *wil)
 {
+	const char *wil_fw_name;
 	u32 jtag_id = wil_r(wil, RGF_USER_JTAG_DEV_ID);
 	u8 chip_revision = (wil_r(wil, RGF_USER_REVISION_ID) &
 			    RGF_USER_REVISION_ID_MASK);
 
 	bitmap_zero(wil->hw_capabilities, hw_capability_last);
 	bitmap_zero(wil->fw_capabilities, WMI_FW_CAPABILITY_MAX);
-	wil->wil_fw_name = WIL_FW_NAME_DEFAULT;
+	wil->wil_fw_name = ftm_mode ? WIL_FW_NAME_FTM_DEFAULT :
+			   WIL_FW_NAME_DEFAULT;
 	wil->chip_revision = chip_revision;
 
 	switch (jtag_id) {
@@ -51,9 +57,11 @@ void wil_set_capabilities(struct wil6210_priv *wil)
 		case REVISION_ID_SPARROW_D0:
 			wil->hw_name = "Sparrow D0";
 			wil->hw_version = HW_VER_SPARROW_D0;
-			if (wil_fw_verify_file_exists(wil,
-						      WIL_FW_NAME_SPARROW_PLUS))
-				wil->wil_fw_name = WIL_FW_NAME_SPARROW_PLUS;
+			wil_fw_name = ftm_mode ? WIL_FW_NAME_FTM_SPARROW_PLUS :
+				      WIL_FW_NAME_SPARROW_PLUS;
+
+			if (wil_fw_verify_file_exists(wil, wil_fw_name))
+				wil->wil_fw_name = wil_fw_name;
 			break;
 		case REVISION_ID_SPARROW_B0:
 			wil->hw_name = "Sparrow B0";
diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h
index d05bb36..ba1c33b 100644
--- a/drivers/net/wireless/ath/wil6210/wil6210.h
+++ b/drivers/net/wireless/ath/wil6210/wil6210.h
@@ -38,8 +38,13 @@ extern bool debug_fw;
 extern bool disable_ap_sme;
 
 #define WIL_NAME "wil6210"
-#define WIL_FW_NAME_DEFAULT "wil6210.fw" /* code Sparrow B0 */
-#define WIL_FW_NAME_SPARROW_PLUS "wil6210_sparrow_plus.fw" /* code Sparrow D0 */
+
+#define WIL_FW_NAME_DEFAULT "wil6210.fw"
+#define WIL_FW_NAME_FTM_DEFAULT "wil6210_ftm.fw"
+
+#define WIL_FW_NAME_SPARROW_PLUS "wil6210_sparrow_plus.fw"
+#define WIL_FW_NAME_FTM_SPARROW_PLUS "wil6210_sparrow_plus_ftm.fw"
+
 #define WIL_BOARD_FILE_NAME "wil6210.brd" /* board & radio parameters */
 
 #define WIL_DEFAULT_BUS_REQUEST_KBPS 128000 /* ~1Gbps */
diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
index 16241d2..afdbbf5 100644
--- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
@@ -2512,9 +2512,11 @@ mwifiex_cfg80211_scan(struct wiphy *wiphy,
 			priv->random_mac[i] |= get_random_int() &
 					       ~(request->mac_addr_mask[i]);
 		}
+		ether_addr_copy(user_scan_cfg->random_mac, priv->random_mac);
+	} else {
+		eth_zero_addr(priv->random_mac);
 	}
 
-	ether_addr_copy(user_scan_cfg->random_mac, priv->random_mac);
 	user_scan_cfg->num_ssids = request->n_ssids;
 	user_scan_cfg->ssid_list = request->ssids;
 
diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c
index 7a310c4..1fdb86c 100644
--- a/drivers/net/wireless/marvell/mwifiex/pcie.c
+++ b/drivers/net/wireless/marvell/mwifiex/pcie.c
@@ -995,6 +995,7 @@ static int mwifiex_pcie_delete_cmdrsp_buf(struct mwifiex_adapter *adapter)
 	if (card && card->cmd_buf) {
 		mwifiex_unmap_pci_memory(adapter, card->cmd_buf,
 					 PCI_DMA_TODEVICE);
+		dev_kfree_skb_any(card->cmd_buf);
 	}
 	return 0;
 }
@@ -1561,6 +1562,11 @@ mwifiex_pcie_send_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb)
 		return -1;
 
 	card->cmd_buf = skb;
+	/*
+	 * Need to keep a reference, since core driver might free up this
+	 * buffer before we've unmapped it.
+	 */
+	skb_get(skb);
 
 	/* To send a command, the driver will:
 		1. Write the 64bit physical address of the data buffer to
@@ -1658,6 +1664,7 @@ static int mwifiex_pcie_process_cmd_complete(struct mwifiex_adapter *adapter)
 	if (card->cmd_buf) {
 		mwifiex_unmap_pci_memory(adapter, card->cmd_buf,
 					 PCI_DMA_TODEVICE);
+		dev_kfree_skb_any(card->cmd_buf);
 		card->cmd_buf = NULL;
 	}
 
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c
index 5dad402..a74fad6 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c
@@ -359,6 +359,107 @@ bool rtl8821ae_phy_rf_config(struct ieee80211_hw *hw)
 	return rtl8821ae_phy_rf6052_config(hw);
 }
 
+static void _rtl8812ae_phy_set_rfe_reg_24g(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+	u8 tmp;
+
+	switch (rtlhal->rfe_type) {
+	case 3:
+		rtl_set_bbreg(hw, RA_RFE_PINMUX, BMASKDWORD, 0x54337770);
+		rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD, 0x54337770);
+		rtl_set_bbreg(hw, RA_RFE_INV, BMASKRFEINV, 0x010);
+		rtl_set_bbreg(hw, RB_RFE_INV, BMASKRFEINV, 0x010);
+		rtl_set_bbreg(hw, 0x900, 0x00000303, 0x1);
+		break;
+	case 4:
+		rtl_set_bbreg(hw, RA_RFE_PINMUX, BMASKDWORD, 0x77777777);
+		rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD, 0x77777777);
+		rtl_set_bbreg(hw, RA_RFE_INV, BMASKRFEINV, 0x001);
+		rtl_set_bbreg(hw, RB_RFE_INV, BMASKRFEINV, 0x001);
+		break;
+	case 5:
+		rtl_write_byte(rtlpriv, RA_RFE_PINMUX + 2, 0x77);
+		rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD, 0x77777777);
+		tmp = rtl_read_byte(rtlpriv, RA_RFE_INV + 3);
+		rtl_write_byte(rtlpriv, RA_RFE_INV + 3, tmp & ~0x1);
+		rtl_set_bbreg(hw, RB_RFE_INV, BMASKRFEINV, 0x000);
+		break;
+	case 1:
+		if (rtlpriv->btcoexist.bt_coexistence) {
+			rtl_set_bbreg(hw, RA_RFE_PINMUX, 0xffffff, 0x777777);
+			rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD,
+				      0x77777777);
+			rtl_set_bbreg(hw, RA_RFE_INV, 0x33f00000, 0x000);
+			rtl_set_bbreg(hw, RB_RFE_INV, BMASKRFEINV, 0x000);
+			break;
+		}
+	case 0:
+	case 2:
+	default:
+		rtl_set_bbreg(hw, RA_RFE_PINMUX, BMASKDWORD, 0x77777777);
+		rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD, 0x77777777);
+		rtl_set_bbreg(hw, RA_RFE_INV, BMASKRFEINV, 0x000);
+		rtl_set_bbreg(hw, RB_RFE_INV, BMASKRFEINV, 0x000);
+		break;
+	}
+}
+
+static void _rtl8812ae_phy_set_rfe_reg_5g(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+	u8 tmp;
+
+	switch (rtlhal->rfe_type) {
+	case 0:
+		rtl_set_bbreg(hw, RA_RFE_PINMUX, BMASKDWORD, 0x77337717);
+		rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD, 0x77337717);
+		rtl_set_bbreg(hw, RA_RFE_INV, BMASKRFEINV, 0x010);
+		rtl_set_bbreg(hw, RB_RFE_INV, BMASKRFEINV, 0x010);
+		break;
+	case 1:
+		if (rtlpriv->btcoexist.bt_coexistence) {
+			rtl_set_bbreg(hw, RA_RFE_PINMUX, 0xffffff, 0x337717);
+			rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD,
+				      0x77337717);
+			rtl_set_bbreg(hw, RA_RFE_INV, 0x33f00000, 0x000);
+			rtl_set_bbreg(hw, RB_RFE_INV, BMASKRFEINV, 0x000);
+		} else {
+			rtl_set_bbreg(hw, RA_RFE_PINMUX, BMASKDWORD,
+				      0x77337717);
+			rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD,
+				      0x77337717);
+			rtl_set_bbreg(hw, RA_RFE_INV, BMASKRFEINV, 0x000);
+			rtl_set_bbreg(hw, RB_RFE_INV, BMASKRFEINV, 0x000);
+		}
+		break;
+	case 3:
+		rtl_set_bbreg(hw, RA_RFE_PINMUX, BMASKDWORD, 0x54337717);
+		rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD, 0x54337717);
+		rtl_set_bbreg(hw, RA_RFE_INV, BMASKRFEINV, 0x010);
+		rtl_set_bbreg(hw, RB_RFE_INV, BMASKRFEINV, 0x010);
+		rtl_set_bbreg(hw, 0x900, 0x00000303, 0x1);
+		break;
+	case 5:
+		rtl_write_byte(rtlpriv, RA_RFE_PINMUX + 2, 0x33);
+		rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD, 0x77337777);
+		tmp = rtl_read_byte(rtlpriv, RA_RFE_INV + 3);
+		rtl_write_byte(rtlpriv, RA_RFE_INV + 3, tmp | 0x1);
+		rtl_set_bbreg(hw, RB_RFE_INV, BMASKRFEINV, 0x010);
+		break;
+	case 2:
+	case 4:
+	default:
+		rtl_set_bbreg(hw, RA_RFE_PINMUX, BMASKDWORD, 0x77337777);
+		rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD, 0x77337777);
+		rtl_set_bbreg(hw, RA_RFE_INV, BMASKRFEINV, 0x010);
+		rtl_set_bbreg(hw, RB_RFE_INV, BMASKRFEINV, 0x010);
+		break;
+	}
+}
+
 u32 phy_get_tx_swing_8812A(struct ieee80211_hw *hw, u8	band,
 			   u8 rf_path)
 {
@@ -553,14 +654,9 @@ void rtl8821ae_phy_switch_wirelessband(struct ieee80211_hw *hw, u8 band)
 			/* 0x82C[1:0] = 2b'00 */
 			rtl_set_bbreg(hw, 0x82c, 0x3, 0);
 		}
-		if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE) {
-			rtl_set_bbreg(hw, RA_RFE_PINMUX, BMASKDWORD,
-				      0x77777777);
-			rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD,
-				      0x77777777);
-			rtl_set_bbreg(hw, RA_RFE_INV, 0x3ff00000, 0x000);
-			rtl_set_bbreg(hw, RB_RFE_INV, 0x3ff00000, 0x000);
-		}
+
+		if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE)
+			_rtl8812ae_phy_set_rfe_reg_24g(hw);
 
 		rtl_set_bbreg(hw, RTXPATH, 0xf0, 0x1);
 		rtl_set_bbreg(hw, RCCK_RX, 0x0f000000, 0x1);
@@ -615,14 +711,8 @@ void rtl8821ae_phy_switch_wirelessband(struct ieee80211_hw *hw, u8 band)
 			/* 0x82C[1:0] = 2'b00 */
 			rtl_set_bbreg(hw, 0x82c, 0x3, 1);
 
-		if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE) {
-			rtl_set_bbreg(hw, RA_RFE_PINMUX, BMASKDWORD,
-				      0x77337777);
-			rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD,
-				      0x77337777);
-			rtl_set_bbreg(hw, RA_RFE_INV, 0x3ff00000, 0x010);
-			rtl_set_bbreg(hw, RB_RFE_INV, 0x3ff00000, 0x010);
-		}
+		if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE)
+			_rtl8812ae_phy_set_rfe_reg_5g(hw);
 
 		rtl_set_bbreg(hw, RTXPATH, 0xf0, 0);
 		rtl_set_bbreg(hw, RCCK_RX, 0x0f000000, 0xf);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/reg.h b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/reg.h
index 1d6110f..ed69dbe 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/reg.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/reg.h
@@ -2424,6 +2424,7 @@
 #define	BMASKH4BITS			0xf0000000
 #define BMASKOFDM_D			0xffc00000
 #define	BMASKCCK			0x3f3f3f3f
+#define BMASKRFEINV			0x3ff00000
 
 #define BRFREGOFFSETMASK		0xfffff
 
diff --git a/drivers/nfc/Kconfig b/drivers/nfc/Kconfig
index 9d23692..e1c6f99 100644
--- a/drivers/nfc/Kconfig
+++ b/drivers/nfc/Kconfig
@@ -70,3 +70,11 @@
 source "drivers/nfc/s3fwrn5/Kconfig"
 source "drivers/nfc/st95hf/Kconfig"
 endmenu
+
+config NFC_NQ
+        tristate "QTI NCI based NFC Controller Driver for NQx"
+        depends on I2C
+        help
+          This enables the NFC driver for NQx based devices.
+          This is for i2c connected version. NCI protocol logic
+          resides in the usermode and it has no other NFC dependencies.
diff --git a/drivers/nfc/Makefile b/drivers/nfc/Makefile
index bab8ef0..b691fd4 100644
--- a/drivers/nfc/Makefile
+++ b/drivers/nfc/Makefile
@@ -17,3 +17,4 @@
 obj-$(CONFIG_NFC_NXP_NCI)	+= nxp-nci/
 obj-$(CONFIG_NFC_S3FWRN5)	+= s3fwrn5/
 obj-$(CONFIG_NFC_ST95HF)	+= st95hf/
+obj-$(CONFIG_NFC_NQ)		+= nq-nci.o
diff --git a/drivers/nfc/nq-nci.c b/drivers/nfc/nq-nci.c
new file mode 100644
index 0000000..baa4f94
--- /dev/null
+++ b/drivers/nfc/nq-nci.c
@@ -0,0 +1,1242 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/reboot.h>
+#include <linux/slab.h>
+#include <linux/irq.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/gpio.h>
+#include <linux/spinlock.h>
+#include <linux/of_gpio.h>
+#include <linux/of_device.h>
+#include <linux/uaccess.h>
+#include "nq-nci.h"
+#include <linux/clk.h>
+#ifdef CONFIG_COMPAT
+#include <linux/compat.h>
+#endif
+
+struct nqx_platform_data {
+	unsigned int irq_gpio;
+	unsigned int en_gpio;
+	unsigned int clkreq_gpio;
+	unsigned int firm_gpio;
+	unsigned int ese_gpio;
+	const char *clk_src_name;
+};
+
+static const struct of_device_id msm_match_table[] = {
+	{.compatible = "qcom,nq-nci"},
+	{}
+};
+
+MODULE_DEVICE_TABLE(of, msm_match_table);
+
+#define MAX_BUFFER_SIZE			(320)
+#define WAKEUP_SRC_TIMEOUT		(2000)
+#define MAX_RETRY_COUNT			3
+
+struct nqx_dev {
+	wait_queue_head_t	read_wq;
+	struct	mutex		read_mutex;
+	struct	i2c_client	*client;
+	struct	miscdevice	nqx_device;
+	union  nqx_uinfo	nqx_info;
+	/* NFC GPIO variables */
+	unsigned int		irq_gpio;
+	unsigned int		en_gpio;
+	unsigned int		firm_gpio;
+	unsigned int		clkreq_gpio;
+	unsigned int		ese_gpio;
+	/* NFC VEN pin state powered by Nfc */
+	bool			nfc_ven_enabled;
+	/* NFC_IRQ state */
+	bool			irq_enabled;
+	/* NFC_IRQ wake-up state */
+	bool			irq_wake_up;
+	spinlock_t		irq_enabled_lock;
+	unsigned int		count_irq;
+	/* Initial CORE RESET notification */
+	unsigned int		core_reset_ntf;
+	/* CLK control */
+	bool			clk_run;
+	struct	clk		*s_clk;
+	/* read buffer*/
+	size_t kbuflen;
+	u8 *kbuf;
+	struct nqx_platform_data *pdata;
+};
+
+static int nfcc_reboot(struct notifier_block *notifier, unsigned long val,
+			void *v);
+/*clock enable function*/
+static int nqx_clock_select(struct nqx_dev *nqx_dev);
+/*clock disable function*/
+static int nqx_clock_deselect(struct nqx_dev *nqx_dev);
+static struct notifier_block nfcc_notifier = {
+	.notifier_call	= nfcc_reboot,
+	.next			= NULL,
+	.priority		= 0
+};
+
+unsigned int	disable_ctrl;
+
+static void nqx_init_stat(struct nqx_dev *nqx_dev)
+{
+	nqx_dev->count_irq = 0;
+}
+
+static void nqx_disable_irq(struct nqx_dev *nqx_dev)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&nqx_dev->irq_enabled_lock, flags);
+	if (nqx_dev->irq_enabled) {
+		disable_irq_nosync(nqx_dev->client->irq);
+		nqx_dev->irq_enabled = false;
+	}
+	spin_unlock_irqrestore(&nqx_dev->irq_enabled_lock, flags);
+}
+
+/**
+ * nqx_enable_irq()
+ *
+ * Check if interrupt is enabled or not
+ * and enable interrupt
+ *
+ * Return: void
+ */
+static void nqx_enable_irq(struct nqx_dev *nqx_dev)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&nqx_dev->irq_enabled_lock, flags);
+	if (!nqx_dev->irq_enabled) {
+		nqx_dev->irq_enabled = true;
+		enable_irq(nqx_dev->client->irq);
+	}
+	spin_unlock_irqrestore(&nqx_dev->irq_enabled_lock, flags);
+}
+
+static irqreturn_t nqx_dev_irq_handler(int irq, void *dev_id)
+{
+	struct nqx_dev *nqx_dev = dev_id;
+	unsigned long flags;
+
+	if (device_may_wakeup(&nqx_dev->client->dev))
+		pm_wakeup_event(&nqx_dev->client->dev, WAKEUP_SRC_TIMEOUT);
+
+	nqx_disable_irq(nqx_dev);
+	spin_lock_irqsave(&nqx_dev->irq_enabled_lock, flags);
+	nqx_dev->count_irq++;
+	spin_unlock_irqrestore(&nqx_dev->irq_enabled_lock, flags);
+	wake_up(&nqx_dev->read_wq);
+
+	return IRQ_HANDLED;
+}
+
+static ssize_t nfc_read(struct file *filp, char __user *buf,
+					size_t count, loff_t *offset)
+{
+	struct nqx_dev *nqx_dev = filp->private_data;
+	unsigned char *tmp = NULL;
+	int ret;
+	int irq_gpio_val = 0;
+
+	if (!nqx_dev) {
+		ret = -ENODEV;
+		goto out;
+	}
+
+	if (count > nqx_dev->kbuflen)
+		count = nqx_dev->kbuflen;
+
+	dev_dbg(&nqx_dev->client->dev, "%s : reading %zu bytes.\n",
+			__func__, count);
+
+	mutex_lock(&nqx_dev->read_mutex);
+
+	irq_gpio_val = gpio_get_value(nqx_dev->irq_gpio);
+	if (irq_gpio_val == 0) {
+		if (filp->f_flags & O_NONBLOCK) {
+			dev_err(&nqx_dev->client->dev,
+			":f_falg has O_NONBLOCK. EAGAIN\n");
+			ret = -EAGAIN;
+			goto err;
+		}
+		while (1) {
+			ret = 0;
+			if (!nqx_dev->irq_enabled) {
+				nqx_dev->irq_enabled = true;
+				enable_irq(nqx_dev->client->irq);
+			}
+			if (!gpio_get_value(nqx_dev->irq_gpio)) {
+				ret = wait_event_interruptible(nqx_dev->read_wq,
+					!nqx_dev->irq_enabled);
+			}
+			if (ret)
+				goto err;
+			nqx_disable_irq(nqx_dev);
+
+			if (gpio_get_value(nqx_dev->irq_gpio))
+				break;
+			dev_err_ratelimited(&nqx_dev->client->dev,
+			"gpio is low, no need to read data\n");
+		}
+	}
+
+	tmp = nqx_dev->kbuf;
+	if (!tmp) {
+		dev_err(&nqx_dev->client->dev,
+			"%s: device doesn't exist anymore\n", __func__);
+		ret = -ENODEV;
+		goto err;
+	}
+	memset(tmp, 0x00, count);
+
+	/* Read data */
+	ret = i2c_master_recv(nqx_dev->client, tmp, count);
+	if (ret < 0) {
+		dev_err(&nqx_dev->client->dev,
+			"%s: i2c_master_recv returned %d\n", __func__, ret);
+		goto err;
+	}
+	if (ret > count) {
+		dev_err(&nqx_dev->client->dev,
+			"%s: received too many bytes from i2c (%d)\n",
+			__func__, ret);
+		ret = -EIO;
+		goto err;
+	}
+#ifdef NFC_KERNEL_BU
+		dev_dbg(&nqx_dev->client->dev, "%s : NfcNciRx %x %x %x\n",
+			__func__, tmp[0], tmp[1], tmp[2]);
+#endif
+	if (copy_to_user(buf, tmp, ret)) {
+		dev_warn(&nqx_dev->client->dev,
+			"%s : failed to copy to user space\n", __func__);
+		ret = -EFAULT;
+		goto err;
+	}
+	mutex_unlock(&nqx_dev->read_mutex);
+	return ret;
+
+err:
+	mutex_unlock(&nqx_dev->read_mutex);
+out:
+	return ret;
+}
+
+static ssize_t nfc_write(struct file *filp, const char __user *buf,
+				size_t count, loff_t *offset)
+{
+	struct nqx_dev *nqx_dev = filp->private_data;
+	char *tmp = NULL;
+	int ret = 0;
+
+	if (!nqx_dev) {
+		ret = -ENODEV;
+		goto out;
+	}
+	if (count > nqx_dev->kbuflen) {
+		dev_err(&nqx_dev->client->dev, "%s: out of memory\n",
+			__func__);
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	tmp = memdup_user(buf, count);
+	if (IS_ERR(tmp)) {
+		dev_err(&nqx_dev->client->dev, "%s: memdup_user failed\n",
+			__func__);
+		ret = PTR_ERR(tmp);
+		goto out;
+	}
+
+	ret = i2c_master_send(nqx_dev->client, tmp, count);
+	if (ret != count) {
+		dev_err(&nqx_dev->client->dev,
+		"%s: failed to write %d\n", __func__, ret);
+		ret = -EIO;
+		goto out_free;
+	}
+#ifdef NFC_KERNEL_BU
+	dev_dbg(&nqx_dev->client->dev,
+			"%s : i2c-%d: NfcNciTx %x %x %x\n",
+			__func__, iminor(file_inode(filp)),
+			tmp[0], tmp[1], tmp[2]);
+#endif
+	usleep_range(1000, 1100);
+out_free:
+	kfree(tmp);
+out:
+	return ret;
+}
+
+/**
+ * nqx_standby_write()
+ * @buf:       pointer to data buffer
+ * @len:       # of bytes need to transfer
+ *
+ * write data buffer over I2C and retry
+ * if NFCC is in stand by mode
+ *
+ * Return: # of bytes written or -ve value in case of error
+ */
+static int nqx_standby_write(struct nqx_dev *nqx_dev,
+				const unsigned char *buf, size_t len)
+{
+	int ret = -EINVAL;
+	int retry_cnt;
+
+	for (retry_cnt = 1; retry_cnt <= MAX_RETRY_COUNT; retry_cnt++) {
+		ret = i2c_master_send(nqx_dev->client, buf, len);
+		if (ret < 0) {
+			dev_err(&nqx_dev->client->dev,
+				"%s: write failed, Maybe in Standby Mode - Retry(%d)\n",
+				 __func__, retry_cnt);
+			usleep_range(1000, 1100);
+		} else if (ret == len)
+			break;
+	}
+	return ret;
+}
+
+/*
+ * Power management of the eSE
+ * NFC & eSE ON : NFC_EN high and eSE_pwr_req high.
+ * NFC OFF & eSE ON : NFC_EN high and eSE_pwr_req high.
+ * NFC OFF & eSE OFF : NFC_EN low and eSE_pwr_req low.
+ */
+static int nqx_ese_pwr(struct nqx_dev *nqx_dev, unsigned long int arg)
+{
+	int r = -1;
+	const unsigned char svdd_off_cmd_warn[] =  {0x2F, 0x31, 0x01, 0x01};
+	const unsigned char svdd_off_cmd_done[] =  {0x2F, 0x31, 0x01, 0x00};
+
+	if (!gpio_is_valid(nqx_dev->ese_gpio)) {
+		dev_err(&nqx_dev->client->dev,
+			"%s: ese_gpio is not valid\n", __func__);
+		return -EINVAL;
+	}
+
+	if (arg == 0) {
+		/*
+		 * We want to power on the eSE and to do so we need the
+		 * eSE_pwr_req pin and the NFC_EN pin to be high
+		 */
+		if (gpio_get_value(nqx_dev->ese_gpio)) {
+			dev_dbg(&nqx_dev->client->dev, "ese_gpio is already high\n");
+			r = 0;
+		} else {
+			/**
+			 * Let's store the NFC_EN pin state
+			 * only if the eSE is not yet on
+			 */
+			nqx_dev->nfc_ven_enabled =
+					gpio_get_value(nqx_dev->en_gpio);
+			if (!nqx_dev->nfc_ven_enabled) {
+				gpio_set_value(nqx_dev->en_gpio, 1);
+				/* hardware dependent delay */
+				usleep_range(1000, 1100);
+			}
+			gpio_set_value(nqx_dev->ese_gpio, 1);
+			if (gpio_get_value(nqx_dev->ese_gpio)) {
+				dev_dbg(&nqx_dev->client->dev, "ese_gpio is enabled\n");
+				r = 0;
+			}
+		}
+	} else if (arg == 1) {
+		if (nqx_dev->nfc_ven_enabled &&
+			((nqx_dev->nqx_info.info.chip_type == NFCC_NQ_220) ||
+			(nqx_dev->nqx_info.info.chip_type == NFCC_PN66T))) {
+			/**
+			 * Let's inform the CLF we're
+			 * powering off the eSE
+			 */
+			r = nqx_standby_write(nqx_dev, svdd_off_cmd_warn,
+						sizeof(svdd_off_cmd_warn));
+			if (r < 0) {
+				dev_err(&nqx_dev->client->dev,
+					"%s: write failed after max retry\n",
+					 __func__);
+				return -ENXIO;
+			}
+			dev_dbg(&nqx_dev->client->dev,
+				"%s: svdd_off_cmd_warn sent\n", __func__);
+
+			/* let's power down the eSE */
+			gpio_set_value(nqx_dev->ese_gpio, 0);
+			dev_dbg(&nqx_dev->client->dev,
+				"%s: nqx_dev->ese_gpio set to 0\n", __func__);
+
+			/**
+			 * Time needed for the SVDD capacitor
+			 * to get discharged
+			 */
+			usleep_range(8000, 8100);
+
+			/* Let's inform the CLF the eSE is now off */
+			r = nqx_standby_write(nqx_dev, svdd_off_cmd_done,
+						sizeof(svdd_off_cmd_done));
+			if (r < 0) {
+				dev_err(&nqx_dev->client->dev,
+					"%s: write failed after max retry\n",
+					 __func__);
+				return -ENXIO;
+			}
+			dev_dbg(&nqx_dev->client->dev,
+				"%s: svdd_off_cmd_done sent\n", __func__);
+		} else {
+			/**
+			 * In case the NFC is off,
+			 * there's no need to send the i2c commands
+			 */
+			gpio_set_value(nqx_dev->ese_gpio, 0);
+		}
+
+		if (!gpio_get_value(nqx_dev->ese_gpio)) {
+			dev_dbg(&nqx_dev->client->dev, "ese_gpio is disabled\n");
+			r = 0;
+		}
+
+		if (!nqx_dev->nfc_ven_enabled) {
+			/* hardware dependent delay */
+			usleep_range(1000, 1100);
+			dev_dbg(&nqx_dev->client->dev, "disabling en_gpio\n");
+			gpio_set_value(nqx_dev->en_gpio, 0);
+		}
+	} else if (arg == 3) {
+		r = gpio_get_value(nqx_dev->ese_gpio);
+	}
+	return r;
+}
+
+static int nfc_open(struct inode *inode, struct file *filp)
+{
+	int ret = 0;
+	struct nqx_dev *nqx_dev = container_of(filp->private_data,
+				struct nqx_dev, nqx_device);
+
+	filp->private_data = nqx_dev;
+	nqx_init_stat(nqx_dev);
+
+	dev_dbg(&nqx_dev->client->dev,
+			"%s: %d,%d\n", __func__, imajor(inode), iminor(inode));
+	return ret;
+}
+
+/*
+ * nfc_ioctl_power_states() - power control
+ * @filp:	pointer to the file descriptor
+ * @arg:	mode that we want to move to
+ *
+ * Device power control. Depending on the arg value, device moves to
+ * different states
+ * (arg = 0): NFC_ENABLE	GPIO = 0, FW_DL GPIO = 0
+ * (arg = 1): NFC_ENABLE	GPIO = 1, FW_DL GPIO = 0
+ * (arg = 2): FW_DL GPIO = 1
+ *
+ * Return: -ENOIOCTLCMD if arg is not supported, 0 in any other case
+ */
+int nfc_ioctl_power_states(struct file *filp, unsigned long arg)
+{
+	int r = 0;
+	struct nqx_dev *nqx_dev = filp->private_data;
+
+	if (arg == 0) {
+		/*
+		 * We are attempting a hardware reset so let us disable
+		 * interrupts to avoid spurious notifications to upper
+		 * layers.
+		 */
+		nqx_disable_irq(nqx_dev);
+		dev_dbg(&nqx_dev->client->dev,
+			"gpio_set_value disable: %s: info: %p\n",
+			__func__, nqx_dev);
+		if (gpio_is_valid(nqx_dev->firm_gpio))
+			gpio_set_value(nqx_dev->firm_gpio, 0);
+
+		if (gpio_is_valid(nqx_dev->ese_gpio)) {
+			if (!gpio_get_value(nqx_dev->ese_gpio)) {
+				dev_dbg(&nqx_dev->client->dev, "disabling en_gpio\n");
+				gpio_set_value(nqx_dev->en_gpio, 0);
+			} else {
+				dev_dbg(&nqx_dev->client->dev, "keeping en_gpio high\n");
+			}
+		} else {
+			dev_dbg(&nqx_dev->client->dev, "ese_gpio invalid, set en_gpio to low\n");
+			gpio_set_value(nqx_dev->en_gpio, 0);
+		}
+		r = nqx_clock_deselect(nqx_dev);
+		if (r < 0)
+			dev_err(&nqx_dev->client->dev, "unable to disable clock\n");
+		nqx_dev->nfc_ven_enabled = false;
+		/* hardware dependent delay */
+		msleep(100);
+	} else if (arg == 1) {
+		nqx_enable_irq(nqx_dev);
+		dev_dbg(&nqx_dev->client->dev,
+			"gpio_set_value enable: %s: info: %p\n",
+			__func__, nqx_dev);
+		if (gpio_is_valid(nqx_dev->firm_gpio))
+			gpio_set_value(nqx_dev->firm_gpio, 0);
+		gpio_set_value(nqx_dev->en_gpio, 1);
+		r = nqx_clock_select(nqx_dev);
+		if (r < 0)
+			dev_err(&nqx_dev->client->dev, "unable to enable clock\n");
+		nqx_dev->nfc_ven_enabled = true;
+		msleep(20);
+	} else if (arg == 2) {
+		/*
+		 * We are switching to Dowload Mode, toggle the enable pin
+		 * in order to set the NFCC in the new mode
+		 */
+		if (gpio_is_valid(nqx_dev->ese_gpio)) {
+			if (gpio_get_value(nqx_dev->ese_gpio)) {
+				dev_err(&nqx_dev->client->dev,
+				"FW download forbidden while ese is on\n");
+				return -EBUSY; /* Device or resource busy */
+			}
+		}
+		gpio_set_value(nqx_dev->en_gpio, 1);
+		msleep(20);
+		if (gpio_is_valid(nqx_dev->firm_gpio))
+			gpio_set_value(nqx_dev->firm_gpio, 1);
+		msleep(20);
+		gpio_set_value(nqx_dev->en_gpio, 0);
+		msleep(100);
+		gpio_set_value(nqx_dev->en_gpio, 1);
+		msleep(20);
+	} else {
+		r = -ENOIOCTLCMD;
+	}
+
+	return r;
+}
+
+#ifdef CONFIG_COMPAT
+static long nfc_compat_ioctl(struct file *pfile, unsigned int cmd,
+				unsigned long arg)
+{
+	long r = 0;
+
+	arg = (compat_u64)arg;
+	switch (cmd) {
+	case NFC_SET_PWR:
+		nfc_ioctl_power_states(pfile, arg);
+		break;
+	case ESE_SET_PWR:
+		nqx_ese_pwr(pfile->private_data, arg);
+		break;
+	case ESE_GET_PWR:
+		nqx_ese_pwr(pfile->private_data, 3);
+		break;
+	case SET_RX_BLOCK:
+		break;
+	case SET_EMULATOR_TEST_POINT:
+		break;
+	default:
+		r = -ENOTTY;
+	}
+	return r;
+}
+#endif
+
+/*
+ * nfc_ioctl_core_reset_ntf()
+ * @filp:       pointer to the file descriptor
+ *
+ * Allows callers to determine if a CORE_RESET_NTF has arrived
+ *
+ * Return: the value of variable core_reset_ntf
+ */
+int nfc_ioctl_core_reset_ntf(struct file *filp)
+{
+	struct nqx_dev *nqx_dev = filp->private_data;
+
+	dev_dbg(&nqx_dev->client->dev, "%s: returning = %d\n", __func__,
+		nqx_dev->core_reset_ntf);
+	return nqx_dev->core_reset_ntf;
+}
+
+/*
+ * Inside nfc_ioctl_nfcc_info
+ *
+ * @brief   nfc_ioctl_nfcc_info
+ *
+ * Check the NQ Chipset and firmware version details
+ */
+unsigned int nfc_ioctl_nfcc_info(struct file *filp, unsigned long arg)
+{
+	unsigned int r = 0;
+	struct nqx_dev *nqx_dev = filp->private_data;
+
+	r = nqx_dev->nqx_info.i;
+	dev_dbg(&nqx_dev->client->dev,
+		"nqx nfc : nfc_ioctl_nfcc_info r = %d\n", r);
+
+	return r;
+}
+
+static long nfc_ioctl(struct file *pfile, unsigned int cmd,
+			unsigned long arg)
+{
+	int r = 0;
+
+	switch (cmd) {
+	case NFC_SET_PWR:
+		r = nfc_ioctl_power_states(pfile, arg);
+		break;
+	case ESE_SET_PWR:
+		r = nqx_ese_pwr(pfile->private_data, arg);
+		break;
+	case ESE_GET_PWR:
+		r = nqx_ese_pwr(pfile->private_data, 3);
+		break;
+	case SET_RX_BLOCK:
+		break;
+	case SET_EMULATOR_TEST_POINT:
+		break;
+	case NFCC_INITIAL_CORE_RESET_NTF:
+		r = nfc_ioctl_core_reset_ntf(pfile);
+		break;
+	case NFCC_GET_INFO:
+		r = nfc_ioctl_nfcc_info(pfile, arg);
+		break;
+	default:
+		r = -ENOIOCTLCMD;
+	}
+	return r;
+}
+
+static const struct file_operations nfc_dev_fops = {
+	.owner = THIS_MODULE,
+	.llseek = no_llseek,
+	.read  = nfc_read,
+	.write = nfc_write,
+	.open = nfc_open,
+	.unlocked_ioctl = nfc_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl = nfc_compat_ioctl
+#endif
+};
+
+/* Check for availability of NQ_ NFC controller hardware */
+static int nfcc_hw_check(struct i2c_client *client, struct nqx_dev *nqx_dev)
+{
+	int ret = 0;
+
+	unsigned char raw_nci_reset_cmd[] =  {0x20, 0x00, 0x01, 0x00};
+	unsigned char raw_nci_init_cmd[] =   {0x20, 0x01, 0x00};
+	unsigned char nci_init_rsp[28];
+	unsigned char nci_reset_rsp[6];
+	unsigned char init_rsp_len = 0;
+	unsigned int enable_gpio = nqx_dev->en_gpio;
+	/* making sure that the NFCC starts in a clean state. */
+	gpio_set_value(enable_gpio, 0);/* ULPM: Disable */
+	/* hardware dependent delay */
+	msleep(20);
+	gpio_set_value(enable_gpio, 1);/* HPD : Enable*/
+	/* hardware dependent delay */
+	msleep(20);
+
+	/* send NCI CORE RESET CMD with Keep Config parameters */
+	ret = i2c_master_send(client, raw_nci_reset_cmd,
+						sizeof(raw_nci_reset_cmd));
+	if (ret < 0) {
+		dev_err(&client->dev,
+		"%s: - i2c_master_send Error\n", __func__);
+		goto err_nfcc_hw_check;
+	}
+	/* hardware dependent delay */
+	msleep(30);
+
+	/* Read Response of RESET command */
+	ret = i2c_master_recv(client, nci_reset_rsp,
+		sizeof(nci_reset_rsp));
+	dev_err(&client->dev,
+	"%s: - nq - reset cmd answer : NfcNciRx %x %x %x\n",
+	__func__, nci_reset_rsp[0],
+	nci_reset_rsp[1], nci_reset_rsp[2]);
+	if (ret < 0) {
+		dev_err(&client->dev,
+		"%s: - i2c_master_recv Error\n", __func__);
+		goto err_nfcc_hw_check;
+	}
+	ret = i2c_master_send(client, raw_nci_init_cmd,
+		sizeof(raw_nci_init_cmd));
+	if (ret < 0) {
+		dev_err(&client->dev,
+		"%s: - i2c_master_send Error\n", __func__);
+		goto err_nfcc_hw_check;
+	}
+	/* hardware dependent delay */
+	msleep(30);
+	/* Read Response of INIT command */
+	ret = i2c_master_recv(client, nci_init_rsp,
+		sizeof(nci_init_rsp));
+	if (ret < 0) {
+		dev_err(&client->dev,
+		"%s: - i2c_master_recv Error\n", __func__);
+		goto err_nfcc_hw_check;
+	}
+	init_rsp_len = 2 + nci_init_rsp[2]; /*payload + len*/
+	if (init_rsp_len > PAYLOAD_HEADER_LENGTH) {
+		nqx_dev->nqx_info.info.chip_type =
+				nci_init_rsp[init_rsp_len - 3];
+		nqx_dev->nqx_info.info.rom_version =
+				nci_init_rsp[init_rsp_len - 2];
+		nqx_dev->nqx_info.info.fw_major =
+				nci_init_rsp[init_rsp_len - 1];
+		nqx_dev->nqx_info.info.fw_minor =
+				nci_init_rsp[init_rsp_len];
+	}
+	dev_dbg(&nqx_dev->client->dev, "NQ NFCC chip_type = %x\n",
+		nqx_dev->nqx_info.info.chip_type);
+	dev_dbg(&nqx_dev->client->dev, "NQ fw version = %x.%x.%x\n",
+		nqx_dev->nqx_info.info.rom_version,
+		nqx_dev->nqx_info.info.fw_major,
+		nqx_dev->nqx_info.info.fw_minor);
+
+	switch (nqx_dev->nqx_info.info.chip_type) {
+	case NFCC_NQ_210:
+		dev_dbg(&client->dev,
+		"%s: ## NFCC == NQ210 ##\n", __func__);
+		break;
+	case NFCC_NQ_220:
+		dev_dbg(&client->dev,
+		"%s: ## NFCC == NQ220 ##\n", __func__);
+		break;
+	case NFCC_NQ_310:
+		dev_dbg(&client->dev,
+		"%s: ## NFCC == NQ310 ##\n", __func__);
+		break;
+	case NFCC_NQ_330:
+		dev_dbg(&client->dev,
+		"%s: ## NFCC == NQ330 ##\n", __func__);
+		break;
+	case NFCC_PN66T:
+		dev_dbg(&client->dev,
+		"%s: ## NFCC == PN66T ##\n", __func__);
+		break;
+	default:
+		dev_err(&client->dev,
+		"%s: - NFCC HW not Supported\n", __func__);
+		break;
+	}
+
+	/*Disable NFC by default to save power on boot*/
+	gpio_set_value(enable_gpio, 0);/* ULPM: Disable */
+	ret = 0;
+	goto done;
+
+err_nfcc_hw_check:
+	ret = -ENXIO;
+	dev_err(&client->dev,
+		"%s: - NFCC HW not available\n", __func__);
+done:
+	return ret;
+}
+
+/*
+ * Routine to enable clock.
+ * this routine can be extended to select from multiple
+ * sources based on clk_src_name.
+ */
+static int nqx_clock_select(struct nqx_dev *nqx_dev)
+{
+	int r = 0;
+
+	nqx_dev->s_clk = clk_get(&nqx_dev->client->dev, "ref_clk");
+
+	if (nqx_dev->s_clk == NULL)
+		goto err_clk;
+
+	if (nqx_dev->clk_run == false)
+		r = clk_prepare_enable(nqx_dev->s_clk);
+
+	if (r)
+		goto err_clk;
+
+	nqx_dev->clk_run = true;
+
+	return r;
+
+err_clk:
+	r = -1;
+	return r;
+}
+
+/*
+ * Routine to disable clocks
+ */
+static int nqx_clock_deselect(struct nqx_dev *nqx_dev)
+{
+	int r = -1;
+
+	if (nqx_dev->s_clk != NULL) {
+		if (nqx_dev->clk_run == true) {
+			clk_disable_unprepare(nqx_dev->s_clk);
+			nqx_dev->clk_run = false;
+		}
+		return 0;
+	}
+	return r;
+}
+
+static int nfc_parse_dt(struct device *dev, struct nqx_platform_data *pdata)
+{
+	int r = 0;
+	struct device_node *np = dev->of_node;
+
+	pdata->en_gpio = of_get_named_gpio(np, "qcom,nq-ven", 0);
+	if ((!gpio_is_valid(pdata->en_gpio)))
+		return -EINVAL;
+	disable_ctrl = pdata->en_gpio;
+
+	pdata->irq_gpio = of_get_named_gpio(np, "qcom,nq-irq", 0);
+	if ((!gpio_is_valid(pdata->irq_gpio)))
+		return -EINVAL;
+
+	pdata->firm_gpio = of_get_named_gpio(np, "qcom,nq-firm", 0);
+	if (!gpio_is_valid(pdata->firm_gpio)) {
+		dev_warn(dev,
+			"FIRM GPIO <OPTIONAL> error getting from OF node\n");
+		pdata->firm_gpio = -EINVAL;
+	}
+
+	pdata->ese_gpio = of_get_named_gpio(np, "qcom,nq-esepwr", 0);
+	if (!gpio_is_valid(pdata->ese_gpio)) {
+		dev_warn(dev,
+			"ese GPIO <OPTIONAL> error getting from OF node\n");
+		pdata->ese_gpio = -EINVAL;
+	}
+
+	r = of_property_read_string(np, "qcom,clk-src", &pdata->clk_src_name);
+
+	pdata->clkreq_gpio = of_get_named_gpio(np, "qcom,nq-clkreq", 0);
+
+	if (r)
+		return -EINVAL;
+	return r;
+}
+
+static inline int gpio_input_init(const struct device * const dev,
+			const int gpio, const char * const gpio_name)
+{
+	int r = gpio_request(gpio, gpio_name);
+
+	if (r) {
+		dev_err(dev, "unable to request gpio [%d]\n", gpio);
+		return r;
+	}
+
+	r = gpio_direction_input(gpio);
+	if (r)
+		dev_err(dev, "unable to set direction for gpio [%d]\n", gpio);
+
+	return r;
+}
+
+static int nqx_probe(struct i2c_client *client,
+			const struct i2c_device_id *id)
+{
+	int r = 0;
+	int irqn = 0;
+	struct nqx_platform_data *platform_data;
+	struct nqx_dev *nqx_dev;
+
+	dev_dbg(&client->dev, "%s: enter\n", __func__);
+	if (client->dev.of_node) {
+		platform_data = devm_kzalloc(&client->dev,
+			sizeof(struct nqx_platform_data), GFP_KERNEL);
+		if (!platform_data) {
+			r = -ENOMEM;
+			goto err_platform_data;
+		}
+		r = nfc_parse_dt(&client->dev, platform_data);
+		if (r)
+			goto err_free_data;
+	} else
+		platform_data = client->dev.platform_data;
+
+	dev_dbg(&client->dev,
+		"%s, inside nfc-nci flags = %x\n",
+		__func__, client->flags);
+
+	if (platform_data == NULL) {
+		dev_err(&client->dev, "%s: failed\n", __func__);
+		r = -ENODEV;
+		goto err_platform_data;
+	}
+	if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+		dev_err(&client->dev, "%s: need I2C_FUNC_I2C\n", __func__);
+		r = -ENODEV;
+		goto err_free_data;
+	}
+	nqx_dev = kzalloc(sizeof(*nqx_dev), GFP_KERNEL);
+	if (nqx_dev == NULL) {
+		r = -ENOMEM;
+		goto err_free_data;
+	}
+	nqx_dev->client = client;
+	nqx_dev->kbuflen = MAX_BUFFER_SIZE;
+	nqx_dev->kbuf = kzalloc(MAX_BUFFER_SIZE, GFP_KERNEL);
+	if (!nqx_dev->kbuf) {
+		dev_err(&client->dev,
+			"failed to allocate memory for nqx_dev->kbuf\n");
+		r = -ENOMEM;
+		goto err_free_dev;
+	}
+
+	if (gpio_is_valid(platform_data->en_gpio)) {
+		r = gpio_request(platform_data->en_gpio, "nfc_reset_gpio");
+		if (r) {
+			dev_err(&client->dev,
+			"%s: unable to request nfc reset gpio [%d]\n",
+				__func__,
+				platform_data->en_gpio);
+			goto err_mem;
+		}
+		r = gpio_direction_output(platform_data->en_gpio, 0);
+		if (r) {
+			dev_err(&client->dev,
+				"%s: unable to set direction for nfc reset gpio [%d]\n",
+					__func__,
+					platform_data->en_gpio);
+			goto err_en_gpio;
+		}
+	} else {
+		dev_err(&client->dev,
+		"%s: nfc reset gpio not provided\n", __func__);
+		goto err_mem;
+	}
+
+	if (gpio_is_valid(platform_data->irq_gpio)) {
+		r = gpio_request(platform_data->irq_gpio, "nfc_irq_gpio");
+		if (r) {
+			dev_err(&client->dev, "%s: unable to request nfc irq gpio [%d]\n",
+				__func__, platform_data->irq_gpio);
+			goto err_en_gpio;
+		}
+		r = gpio_direction_input(platform_data->irq_gpio);
+		if (r) {
+			dev_err(&client->dev,
+			"%s: unable to set direction for nfc irq gpio [%d]\n",
+				__func__,
+				platform_data->irq_gpio);
+			goto err_irq_gpio;
+		}
+		irqn = gpio_to_irq(platform_data->irq_gpio);
+		if (irqn < 0) {
+			r = irqn;
+			goto err_irq_gpio;
+		}
+		client->irq = irqn;
+	} else {
+		dev_err(&client->dev, "%s: irq gpio not provided\n", __func__);
+		goto err_en_gpio;
+	}
+	if (gpio_is_valid(platform_data->firm_gpio)) {
+		r = gpio_request(platform_data->firm_gpio,
+			"nfc_firm_gpio");
+		if (r) {
+			dev_err(&client->dev,
+				"%s: unable to request nfc firmware gpio [%d]\n",
+				__func__, platform_data->firm_gpio);
+			goto err_irq_gpio;
+		}
+		r = gpio_direction_output(platform_data->firm_gpio, 0);
+		if (r) {
+			dev_err(&client->dev,
+			"%s: cannot set direction for nfc firmware gpio [%d]\n",
+			__func__, platform_data->firm_gpio);
+			goto err_firm_gpio;
+		}
+	} else {
+		dev_err(&client->dev,
+			"%s: firm gpio not provided\n", __func__);
+		goto err_irq_gpio;
+	}
+	if (gpio_is_valid(platform_data->ese_gpio)) {
+		r = gpio_request(platform_data->ese_gpio,
+				"nfc-ese_pwr");
+		if (r) {
+			nqx_dev->ese_gpio = -EINVAL;
+			dev_err(&client->dev,
+				"%s: unable to request nfc ese gpio [%d]\n",
+					__func__, platform_data->ese_gpio);
+			/* ese gpio optional so we should continue */
+		} else {
+			nqx_dev->ese_gpio = platform_data->ese_gpio;
+			r = gpio_direction_output(platform_data->ese_gpio, 0);
+			if (r) {
+				/*
+				 * free ese gpio and set invalid
+				 * to avoid further use
+				 */
+				gpio_free(platform_data->ese_gpio);
+				nqx_dev->ese_gpio = -EINVAL;
+				dev_err(&client->dev,
+					"%s: cannot set direction for nfc ese gpio [%d]\n",
+					__func__, platform_data->ese_gpio);
+				/* ese gpio optional so we should continue */
+			}
+		}
+	} else {
+		nqx_dev->ese_gpio = -EINVAL;
+		dev_err(&client->dev,
+			"%s: ese gpio not provided\n", __func__);
+		/* ese gpio optional so we should continue */
+	}
+	if (gpio_is_valid(platform_data->clkreq_gpio)) {
+		r = gpio_request(platform_data->clkreq_gpio,
+			"nfc_clkreq_gpio");
+		if (r) {
+			dev_err(&client->dev,
+				"%s: unable to request nfc clkreq gpio [%d]\n",
+				__func__, platform_data->clkreq_gpio);
+			goto err_ese_gpio;
+		}
+		r = gpio_direction_input(platform_data->clkreq_gpio);
+		if (r) {
+			dev_err(&client->dev,
+			"%s: cannot set direction for nfc clkreq gpio [%d]\n",
+			__func__, platform_data->clkreq_gpio);
+			goto err_clkreq_gpio;
+		}
+	} else {
+		dev_err(&client->dev,
+			"%s: clkreq gpio not provided\n", __func__);
+		goto err_ese_gpio;
+	}
+
+	nqx_dev->en_gpio = platform_data->en_gpio;
+	nqx_dev->irq_gpio = platform_data->irq_gpio;
+	nqx_dev->firm_gpio  = platform_data->firm_gpio;
+	nqx_dev->clkreq_gpio = platform_data->clkreq_gpio;
+	nqx_dev->pdata = platform_data;
+
+	/* init mutex and queues */
+	init_waitqueue_head(&nqx_dev->read_wq);
+	mutex_init(&nqx_dev->read_mutex);
+	spin_lock_init(&nqx_dev->irq_enabled_lock);
+
+	nqx_dev->nqx_device.minor = MISC_DYNAMIC_MINOR;
+	nqx_dev->nqx_device.name = "nq-nci";
+	nqx_dev->nqx_device.fops = &nfc_dev_fops;
+
+	r = misc_register(&nqx_dev->nqx_device);
+	if (r) {
+		dev_err(&client->dev, "%s: misc_register failed\n", __func__);
+		goto err_misc_register;
+	}
+
+	/* NFC_INT IRQ */
+	nqx_dev->irq_enabled = true;
+	r = request_irq(client->irq, nqx_dev_irq_handler,
+			  IRQF_TRIGGER_HIGH, client->name, nqx_dev);
+	if (r) {
+		dev_err(&client->dev, "%s: request_irq failed\n", __func__);
+		goto err_request_irq_failed;
+	}
+	nqx_disable_irq(nqx_dev);
+
+	/*
+	 * To be efficient we need to test whether nfcc hardware is physically
+	 * present before attempting further hardware initialisation.
+	 *
+	 */
+	r = nfcc_hw_check(client, nqx_dev);
+	if (r) {
+		/* make sure NFCC is not enabled */
+		gpio_set_value(platform_data->en_gpio, 0);
+		/* We don't think there is hardware switch NFC OFF */
+		goto err_request_hw_check_failed;
+	}
+
+	/* Register reboot notifier here */
+	r = register_reboot_notifier(&nfcc_notifier);
+	if (r) {
+		dev_err(&client->dev,
+			"%s: cannot register reboot notifier(err = %d)\n",
+			__func__, r);
+		/*
+		 * nfcc_hw_check function not doing memory
+		 * allocation so using same goto target here
+		 */
+		goto err_request_hw_check_failed;
+	}
+
+#ifdef NFC_KERNEL_BU
+	r = nqx_clock_select(nqx_dev);
+	if (r < 0) {
+		dev_err(&client->dev,
+			"%s: nqx_clock_select failed\n", __func__);
+		goto err_clock_en_failed;
+	}
+	gpio_set_value(platform_data->en_gpio, 1);
+#endif
+	device_init_wakeup(&client->dev, true);
+	device_set_wakeup_capable(&client->dev, true);
+	i2c_set_clientdata(client, nqx_dev);
+	nqx_dev->irq_wake_up = false;
+
+	dev_err(&client->dev,
+	"%s: probing NFCC NQxxx exited successfully\n",
+		 __func__);
+	return 0;
+
+#ifdef NFC_KERNEL_BU
+err_clock_en_failed:
+	unregister_reboot_notifier(&nfcc_notifier);
+#endif
+err_request_hw_check_failed:
+	free_irq(client->irq, nqx_dev);
+err_request_irq_failed:
+	misc_deregister(&nqx_dev->nqx_device);
+err_misc_register:
+	mutex_destroy(&nqx_dev->read_mutex);
+err_clkreq_gpio:
+	gpio_free(platform_data->clkreq_gpio);
+err_ese_gpio:
+	/* optional gpio, not sure was configured in probe */
+	if (nqx_dev->ese_gpio > 0)
+		gpio_free(platform_data->ese_gpio);
+err_firm_gpio:
+	gpio_free(platform_data->firm_gpio);
+err_irq_gpio:
+	gpio_free(platform_data->irq_gpio);
+err_en_gpio:
+	gpio_free(platform_data->en_gpio);
+err_mem:
+	kfree(nqx_dev->kbuf);
+err_free_dev:
+	kfree(nqx_dev);
+err_free_data:
+	if (client->dev.of_node)
+		devm_kfree(&client->dev, platform_data);
+err_platform_data:
+	dev_err(&client->dev,
+	"%s: probing nqxx failed, check hardware\n",
+		 __func__);
+	return r;
+}
+
+static int nqx_remove(struct i2c_client *client)
+{
+	int ret = 0;
+	struct nqx_dev *nqx_dev;
+
+	nqx_dev = i2c_get_clientdata(client);
+	if (!nqx_dev) {
+		dev_err(&client->dev,
+		"%s: device doesn't exist anymore\n", __func__);
+		ret = -ENODEV;
+		goto err;
+	}
+
+	unregister_reboot_notifier(&nfcc_notifier);
+	free_irq(client->irq, nqx_dev);
+	misc_deregister(&nqx_dev->nqx_device);
+	mutex_destroy(&nqx_dev->read_mutex);
+	gpio_free(nqx_dev->clkreq_gpio);
+	/* optional gpio, not sure was configured in probe */
+	if (nqx_dev->ese_gpio > 0)
+		gpio_free(nqx_dev->ese_gpio);
+	gpio_free(nqx_dev->firm_gpio);
+	gpio_free(nqx_dev->irq_gpio);
+	gpio_free(nqx_dev->en_gpio);
+	kfree(nqx_dev->kbuf);
+	if (client->dev.of_node)
+		devm_kfree(&client->dev, nqx_dev->pdata);
+
+	kfree(nqx_dev);
+err:
+	return ret;
+}
+
+static int nqx_suspend(struct device *device)
+{
+	struct i2c_client *client = to_i2c_client(device);
+	struct nqx_dev *nqx_dev = i2c_get_clientdata(client);
+
+	if (device_may_wakeup(&client->dev) && nqx_dev->irq_enabled) {
+		if (!enable_irq_wake(client->irq))
+			nqx_dev->irq_wake_up = true;
+	}
+	return 0;
+}
+
+static int nqx_resume(struct device *device)
+{
+	struct i2c_client *client = to_i2c_client(device);
+	struct nqx_dev *nqx_dev = i2c_get_clientdata(client);
+
+	if (device_may_wakeup(&client->dev) && nqx_dev->irq_wake_up) {
+		if (!disable_irq_wake(client->irq))
+			nqx_dev->irq_wake_up = false;
+	}
+	return 0;
+}
+
+static const struct i2c_device_id nqx_id[] = {
+	{"nqx-i2c", 0},
+	{}
+};
+
+static const struct dev_pm_ops nfc_pm_ops = {
+	SET_SYSTEM_SLEEP_PM_OPS(nqx_suspend, nqx_resume)
+};
+
+static struct i2c_driver nqx = {
+	.id_table = nqx_id,
+	.probe = nqx_probe,
+	.remove = nqx_remove,
+	.driver = {
+		.owner = THIS_MODULE,
+		.name = "nq-nci",
+		.of_match_table = msm_match_table,
+		.pm = &nfc_pm_ops,
+	},
+};
+
+static int nfcc_reboot(struct notifier_block *notifier, unsigned long val,
+			  void *v)
+{
+	gpio_set_value(disable_ctrl, 1);
+	return NOTIFY_OK;
+}
+
+/*
+ * module load/unload record keeping
+ */
+static int __init nqx_dev_init(void)
+{
+	return i2c_add_driver(&nqx);
+}
+module_init(nqx_dev_init);
+
+static void __exit nqx_dev_exit(void)
+{
+	unregister_reboot_notifier(&nfcc_notifier);
+	i2c_del_driver(&nqx);
+}
+module_exit(nqx_dev_exit);
+
+MODULE_DESCRIPTION("NFC nqx");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/nfc/nq-nci.h b/drivers/nfc/nq-nci.h
new file mode 100644
index 0000000..87715c2
--- /dev/null
+++ b/drivers/nfc/nq-nci.h
@@ -0,0 +1,54 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __NQ_NCI_H
+#define __NQ_NCI_H
+
+#include <linux/i2c.h>
+#include <linux/types.h>
+#include <linux/version.h>
+
+#include <linux/semaphore.h>
+#include <linux/completion.h>
+
+#include <linux/ioctl.h>
+#include <linux/miscdevice.h>
+#include <linux/nfcinfo.h>
+
+#define NFC_SET_PWR			_IOW(0xE9, 0x01, unsigned int)
+#define ESE_SET_PWR			_IOW(0xE9, 0x02, unsigned int)
+#define ESE_GET_PWR			_IOR(0xE9, 0x03, unsigned int)
+#define SET_RX_BLOCK			_IOW(0xE9, 0x04, unsigned int)
+#define SET_EMULATOR_TEST_POINT		_IOW(0xE9, 0x05, unsigned int)
+#define NFCC_INITIAL_CORE_RESET_NTF	_IOW(0xE9, 0x10, unsigned int)
+
+#define NFC_RX_BUFFER_CNT_START		(0x0)
+#define PAYLOAD_HEADER_LENGTH		(0x3)
+#define PAYLOAD_LENGTH_MAX		(256)
+#define BYTE				(0x8)
+#define NCI_IDENTIFIER			(0x10)
+
+enum nfcc_initial_core_reset_ntf {
+	TIMEDOUT_INITIAL_CORE_RESET_NTF = 0, /* 0*/
+	ARRIVED_INITIAL_CORE_RESET_NTF, /* 1 */
+	DEFAULT_INITIAL_CORE_RESET_NTF, /*2*/
+};
+
+enum nfcc_chip_variant {
+	NFCC_NQ_210			= 0x48,	/**< NFCC NQ210 */
+	NFCC_NQ_220			= 0x58,	/**< NFCC NQ220 */
+	NFCC_NQ_310			= 0x40,	/**< NFCC NQ310 */
+	NFCC_NQ_330			= 0x51,	/**< NFCC NQ330 */
+	NFCC_PN66T			= 0x18,	/**< NFCC PN66T */
+	NFCC_NOT_SUPPORTED	        = 0xFF	/**< NFCC is not supported */
+};
+#endif
diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c
index 351bac8..0392eb8 100644
--- a/drivers/nvdimm/bus.c
+++ b/drivers/nvdimm/bus.c
@@ -218,7 +218,10 @@ long nvdimm_clear_poison(struct device *dev, phys_addr_t phys,
 	if (cmd_rc < 0)
 		return cmd_rc;
 
-	nvdimm_clear_from_poison_list(nvdimm_bus, phys, len);
+	if (clear_err.cleared > 0)
+		nvdimm_clear_from_poison_list(nvdimm_bus, phys,
+					      clear_err.cleared);
+
 	return clear_err.cleared;
 }
 EXPORT_SYMBOL_GPL(nvdimm_clear_poison);
diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c
index 6c033c9..78cb3e2 100644
--- a/drivers/nvdimm/pfn_devs.c
+++ b/drivers/nvdimm/pfn_devs.c
@@ -538,7 +538,8 @@ static struct vmem_altmap *__nvdimm_setup_pfn(struct nd_pfn *nd_pfn,
 		nd_pfn->npfns = le64_to_cpu(pfn_sb->npfns);
 		altmap = NULL;
 	} else if (nd_pfn->mode == PFN_MODE_PMEM) {
-		nd_pfn->npfns = (resource_size(res) - offset) / PAGE_SIZE;
+		nd_pfn->npfns = PFN_SECTION_ALIGN_UP((resource_size(res)
+					- offset) / PAGE_SIZE);
 		if (le64_to_cpu(nd_pfn->pfn_sb->npfns) > nd_pfn->npfns)
 			dev_info(&nd_pfn->dev,
 					"number of pfns truncated from %lld to %ld\n",
@@ -625,7 +626,8 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
 	 */
 	start += start_pad;
 	size = resource_size(&nsio->res);
-	npfns = (size - start_pad - end_trunc - SZ_8K) / SZ_4K;
+	npfns = PFN_SECTION_ALIGN_UP((size - start_pad - end_trunc - SZ_8K)
+			/ PAGE_SIZE);
 	if (nd_pfn->mode == PFN_MODE_PMEM) {
 		/*
 		 * vmemmap_populate_hugepages() allocates the memmap array in
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index 2461843..b480859 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -383,12 +383,12 @@ static void nd_pmem_shutdown(struct device *dev)
 
 static void nd_pmem_notify(struct device *dev, enum nvdimm_event event)
 {
-	struct pmem_device *pmem = dev_get_drvdata(dev);
-	struct nd_region *nd_region = to_region(pmem);
+	struct nd_region *nd_region;
 	resource_size_t offset = 0, end_trunc = 0;
 	struct nd_namespace_common *ndns;
 	struct nd_namespace_io *nsio;
 	struct resource res;
+	struct badblocks *bb;
 
 	if (event != NVDIMM_REVALIDATE_POISON)
 		return;
@@ -397,20 +397,33 @@ static void nd_pmem_notify(struct device *dev, enum nvdimm_event event)
 		struct nd_btt *nd_btt = to_nd_btt(dev);
 
 		ndns = nd_btt->ndns;
-	} else if (is_nd_pfn(dev)) {
-		struct nd_pfn *nd_pfn = to_nd_pfn(dev);
-		struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
+		nd_region = to_nd_region(ndns->dev.parent);
+		nsio = to_nd_namespace_io(&ndns->dev);
+		bb = &nsio->bb;
+	} else {
+		struct pmem_device *pmem = dev_get_drvdata(dev);
 
-		ndns = nd_pfn->ndns;
-		offset = pmem->data_offset + __le32_to_cpu(pfn_sb->start_pad);
-		end_trunc = __le32_to_cpu(pfn_sb->end_trunc);
-	} else
-		ndns = to_ndns(dev);
+		nd_region = to_region(pmem);
+		bb = &pmem->bb;
 
-	nsio = to_nd_namespace_io(&ndns->dev);
+		if (is_nd_pfn(dev)) {
+			struct nd_pfn *nd_pfn = to_nd_pfn(dev);
+			struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
+
+			ndns = nd_pfn->ndns;
+			offset = pmem->data_offset +
+					__le32_to_cpu(pfn_sb->start_pad);
+			end_trunc = __le32_to_cpu(pfn_sb->end_trunc);
+		} else {
+			ndns = to_ndns(dev);
+		}
+
+		nsio = to_nd_namespace_io(&ndns->dev);
+	}
+
 	res.start = nsio->res.start + offset;
 	res.end = nsio->res.end - end_trunc;
-	nvdimm_badblocks_populate(nd_region, &pmem->bb, &res);
+	nvdimm_badblocks_populate(nd_region, bb, &res);
 }
 
 MODULE_ALIAS("pmem");
diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c
index 9cf6f1a..f623062 100644
--- a/drivers/nvdimm/region_devs.c
+++ b/drivers/nvdimm/region_devs.c
@@ -968,17 +968,20 @@ EXPORT_SYMBOL_GPL(nvdimm_flush);
  */
 int nvdimm_has_flush(struct nd_region *nd_region)
 {
-	struct nd_region_data *ndrd = dev_get_drvdata(&nd_region->dev);
 	int i;
 
 	/* no nvdimm == flushing capability unknown */
 	if (nd_region->ndr_mappings == 0)
 		return -ENXIO;
 
-	for (i = 0; i < nd_region->ndr_mappings; i++)
-		/* flush hints present, flushing required */
-		if (ndrd_get_flush_wpq(ndrd, i, 0))
+	for (i = 0; i < nd_region->ndr_mappings; i++) {
+		struct nd_mapping *nd_mapping = &nd_region->mapping[i];
+		struct nvdimm *nvdimm = nd_mapping->nvdimm;
+
+		/* flush hints present / available */
+		if (nvdimm->num_flush)
 			return 1;
+	}
 
 	/*
 	 * The platform defines dimm devices without hints, assume
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 8a9c186..14eac73 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -1384,6 +1384,11 @@ static inline void nvme_release_cmb(struct nvme_dev *dev)
 	if (dev->cmb) {
 		iounmap(dev->cmb);
 		dev->cmb = NULL;
+		if (dev->cmbsz) {
+			sysfs_remove_file_from_group(&dev->ctrl.device->kobj,
+						     &dev_attr_cmb.attr, NULL);
+			dev->cmbsz = 0;
+		}
 	}
 }
 
@@ -1655,6 +1660,7 @@ static void nvme_pci_disable(struct nvme_dev *dev)
 {
 	struct pci_dev *pdev = to_pci_dev(dev->dev);
 
+	nvme_release_cmb(dev);
 	pci_free_irq_vectors(pdev);
 
 	if (pci_is_enabled(pdev)) {
@@ -1993,7 +1999,6 @@ static void nvme_remove(struct pci_dev *pdev)
 	nvme_dev_disable(dev, true);
 	nvme_dev_remove_admin(dev);
 	nvme_free_queues(dev, 0);
-	nvme_release_cmb(dev);
 	nvme_release_prp_pools(dev);
 	nvme_dev_unmap(dev);
 	nvme_put_ctrl(&dev->ctrl);
diff --git a/drivers/of/address.c b/drivers/of/address.c
index 3723f57..2c1b08a 100644
--- a/drivers/of/address.c
+++ b/drivers/of/address.c
@@ -263,7 +263,7 @@ struct of_pci_range *of_pci_range_parser_one(struct of_pci_range_parser *parser,
 	if (!parser->range || parser->range + parser->np > parser->end)
 		return NULL;
 
-	range->pci_space = parser->range[0];
+	range->pci_space = be32_to_cpup(parser->range);
 	range->flags = of_bus_pci_get_flags(parser->range);
 	range->pci_addr = of_read_number(parser->range + 1, ns);
 	range->cpu_addr = of_translate_address(parser->node,
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index 8668808..66af185 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -505,6 +505,9 @@ static void *__unflatten_device_tree(const void *blob,
 
 	/* Allocate memory for the expanded device tree */
 	mem = dt_alloc(size + 4, __alignof__(struct device_node));
+	if (!mem)
+		return NULL;
+
 	memset(mem, 0, size);
 
 	*(__be32 *)(mem + size) = cpu_to_be32(0xdeadbeef);
diff --git a/drivers/of/of_numa.c b/drivers/of/of_numa.c
index a53982a..2db1f7a 100644
--- a/drivers/of/of_numa.c
+++ b/drivers/of/of_numa.c
@@ -57,6 +57,8 @@ static void __init of_numa_parse_cpu_nodes(void)
 		else
 			node_set(nid, numa_nodes_parsed);
 	}
+
+	of_node_put(cpus);
 }
 
 static int __init of_numa_parse_memory_nodes(void)
diff --git a/drivers/pci/host/pci-hyperv.c b/drivers/pci/host/pci-hyperv.c
index 61fc349..dafb4cd 100644
--- a/drivers/pci/host/pci-hyperv.c
+++ b/drivers/pci/host/pci-hyperv.c
@@ -72,6 +72,7 @@ enum {
 	PCI_PROTOCOL_VERSION_CURRENT = PCI_PROTOCOL_VERSION_1_1
 };
 
+#define CPU_AFFINITY_ALL	-1ULL
 #define PCI_CONFIG_MMIO_LENGTH	0x2000
 #define CFG_PAGE_OFFSET 0x1000
 #define CFG_PAGE_SIZE (PCI_CONFIG_MMIO_LENGTH - CFG_PAGE_OFFSET)
@@ -868,7 +869,7 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
 		hv_int_desc_free(hpdev, int_desc);
 	}
 
-	int_desc = kzalloc(sizeof(*int_desc), GFP_KERNEL);
+	int_desc = kzalloc(sizeof(*int_desc), GFP_ATOMIC);
 	if (!int_desc)
 		goto drop_reference;
 
@@ -889,9 +890,13 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
 	 * processors because Hyper-V only supports 64 in a guest.
 	 */
 	affinity = irq_data_get_affinity_mask(data);
-	for_each_cpu_and(cpu, affinity, cpu_online_mask) {
-		int_pkt->int_desc.cpu_mask |=
-			(1ULL << vmbus_cpu_number_to_vp_number(cpu));
+	if (cpumask_weight(affinity) >= 32) {
+		int_pkt->int_desc.cpu_mask = CPU_AFFINITY_ALL;
+	} else {
+		for_each_cpu_and(cpu, affinity, cpu_online_mask) {
+			int_pkt->int_desc.cpu_mask |=
+				(1ULL << vmbus_cpu_number_to_vp_number(cpu));
+		}
 	}
 
 	ret = vmbus_sendpacket(hpdev->hbus->hdev->channel, int_pkt,
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index bcd10c7..1b07865 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -974,15 +974,19 @@ void pci_remove_legacy_files(struct pci_bus *b)
 int pci_mmap_fits(struct pci_dev *pdev, int resno, struct vm_area_struct *vma,
 		  enum pci_mmap_api mmap_api)
 {
-	unsigned long nr, start, size, pci_start;
+	unsigned long nr, start, size;
+	resource_size_t pci_start = 0, pci_end;
 
 	if (pci_resource_len(pdev, resno) == 0)
 		return 0;
 	nr = vma_pages(vma);
 	start = vma->vm_pgoff;
 	size = ((pci_resource_len(pdev, resno) - 1) >> PAGE_SHIFT) + 1;
-	pci_start = (mmap_api == PCI_MMAP_PROCFS) ?
-			pci_resource_start(pdev, resno) >> PAGE_SHIFT : 0;
+	if (mmap_api == PCI_MMAP_PROCFS) {
+		pci_resource_to_user(pdev, resno, &pdev->resource[resno],
+				     &pci_start, &pci_end);
+		pci_start >>= PAGE_SHIFT;
+	}
 	if (start >= pci_start && start < pci_start + size &&
 			start + nr <= pci_start + size)
 		return 1;
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 6922964..579c494 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -1782,8 +1782,8 @@ static void pci_pme_list_scan(struct work_struct *work)
 		}
 	}
 	if (!list_empty(&pci_pme_list))
-		schedule_delayed_work(&pci_pme_work,
-				      msecs_to_jiffies(PME_TIMEOUT));
+		queue_delayed_work(system_freezable_wq, &pci_pme_work,
+				   msecs_to_jiffies(PME_TIMEOUT));
 	mutex_unlock(&pci_pme_list_mutex);
 }
 
@@ -1848,8 +1848,9 @@ void pci_pme_active(struct pci_dev *dev, bool enable)
 			mutex_lock(&pci_pme_list_mutex);
 			list_add(&pme_dev->list, &pci_pme_list);
 			if (list_is_singular(&pci_pme_list))
-				schedule_delayed_work(&pci_pme_work,
-						      msecs_to_jiffies(PME_TIMEOUT));
+				queue_delayed_work(system_freezable_wq,
+						   &pci_pme_work,
+						   msecs_to_jiffies(PME_TIMEOUT));
 			mutex_unlock(&pci_pme_list_mutex);
 		} else {
 			mutex_lock(&pci_pme_list_mutex);
diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
index 2408abe..66c8863 100644
--- a/drivers/pci/proc.c
+++ b/drivers/pci/proc.c
@@ -231,24 +231,33 @@ static int proc_bus_pci_mmap(struct file *file, struct vm_area_struct *vma)
 {
 	struct pci_dev *dev = PDE_DATA(file_inode(file));
 	struct pci_filp_private *fpriv = file->private_data;
-	int i, ret, write_combine;
+	int i, ret, write_combine = 0, res_bit;
 
 	if (!capable(CAP_SYS_RAWIO))
 		return -EPERM;
 
+	if (fpriv->mmap_state == pci_mmap_io)
+		res_bit = IORESOURCE_IO;
+	else
+		res_bit = IORESOURCE_MEM;
+
 	/* Make sure the caller is mapping a real resource for this device */
 	for (i = 0; i < PCI_ROM_RESOURCE; i++) {
-		if (pci_mmap_fits(dev, i, vma,  PCI_MMAP_PROCFS))
+		if (dev->resource[i].flags & res_bit &&
+		    pci_mmap_fits(dev, i, vma,  PCI_MMAP_PROCFS))
 			break;
 	}
 
 	if (i >= PCI_ROM_RESOURCE)
 		return -ENODEV;
 
-	if (fpriv->mmap_state == pci_mmap_mem)
-		write_combine = fpriv->write_combine;
-	else
-		write_combine = 0;
+	if (fpriv->mmap_state == pci_mmap_mem &&
+	    fpriv->write_combine) {
+		if (dev->resource[i].flags & IORESOURCE_PREFETCH)
+			write_combine = 1;
+		else
+			return -EINVAL;
+	}
 	ret = pci_mmap_page_range(dev, vma,
 				  fpriv->mmap_state, write_combine);
 	if (ret < 0)
diff --git a/drivers/pinctrl/qcom/Kconfig b/drivers/pinctrl/qcom/Kconfig
index 304e206..40ee647 100644
--- a/drivers/pinctrl/qcom/Kconfig
+++ b/drivers/pinctrl/qcom/Kconfig
@@ -88,14 +88,14 @@
 	  Qualcomm Technologies Inc TLMM block found on the Qualcomm
 	  Technologies Inc SDM845 platform.
 
-config PINCTRL_SDM830
-	tristate "Qualcomm Technologies Inc SDM830 pin controller driver"
+config PINCTRL_SDM670
+	tristate "Qualcomm Technologies Inc SDM670 pin controller driver"
 	depends on GPIOLIB && OF
 	select PINCTRL_MSM
 	help
 	  This is the pinctrl, pinmux, pinconf and gpiolib driver for the
 	  Qualcomm Technologies Inc TLMM block found on the Qualcomm
-	  Technologies Inc SDM830 platform.
+	  Technologies Inc SDM670 platform.
 
 config PINCTRL_SDXPOORWILLS
 	tristate "Qualcomm Technologies Inc SDXPOORWILLS pin controller driver"
diff --git a/drivers/pinctrl/qcom/Makefile b/drivers/pinctrl/qcom/Makefile
index 4786960..6a49671 100644
--- a/drivers/pinctrl/qcom/Makefile
+++ b/drivers/pinctrl/qcom/Makefile
@@ -16,7 +16,7 @@
 obj-$(CONFIG_PINCTRL_QCOM_SSBI_PMIC) += pinctrl-ssbi-gpio.o
 obj-$(CONFIG_PINCTRL_QCOM_SSBI_PMIC) += pinctrl-ssbi-mpp.o
 obj-$(CONFIG_PINCTRL_SDM845) += pinctrl-sdm845.o
-obj-$(CONFIG_PINCTRL_SDM830) += pinctrl-sdm830.o
+obj-$(CONFIG_PINCTRL_SDM670) += pinctrl-sdm670.o
 obj-$(CONFIG_PINCTRL_SDXPOORWILLS)	+= pinctrl-sdxpoorwills.o
 obj-$(CONFIG_PINCTRL_WCD)	+= pinctrl-wcd.o
 obj-$(CONFIG_PINCTRL_LPI)	+= pinctrl-lpi.o
diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c
index 2a1367e..9520166 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm.c
@@ -17,6 +17,7 @@
 #include <linux/io.h>
 #include <linux/module.h>
 #include <linux/of.h>
+#include <linux/of_irq.h>
 #include <linux/platform_device.h>
 #include <linux/pinctrl/machine.h>
 #include <linux/pinctrl/pinctrl.h>
@@ -31,7 +32,7 @@
 #include <linux/reboot.h>
 #include <linux/pm.h>
 #include <linux/log2.h>
-
+#include <linux/irq.h>
 #include "../core.h"
 #include "../pinconf.h"
 #include "pinctrl-msm.h"
@@ -749,6 +750,91 @@ static struct irq_chip msm_gpio_irq_chip = {
 	.irq_set_wake   = msm_gpio_irq_set_wake,
 };
 
+static void msm_dirconn_irq_mask(struct irq_data *d)
+{
+	struct irq_desc *desc = irq_data_to_desc(d);
+	struct irq_data *parent_data = irq_get_irq_data(desc->parent_irq);
+
+	if (parent_data->chip->irq_mask)
+		parent_data->chip->irq_mask(parent_data);
+}
+
+static void msm_dirconn_irq_unmask(struct irq_data *d)
+{
+	struct irq_desc *desc = irq_data_to_desc(d);
+	struct irq_data *parent_data = irq_get_irq_data(desc->parent_irq);
+
+	if (parent_data->chip->irq_unmask)
+		parent_data->chip->irq_unmask(parent_data);
+}
+
+static void msm_dirconn_irq_ack(struct irq_data *d)
+{
+	struct irq_desc *desc = irq_data_to_desc(d);
+	struct irq_data *parent_data = irq_get_irq_data(desc->parent_irq);
+
+	if (parent_data->chip->irq_ack)
+		parent_data->chip->irq_ack(parent_data);
+}
+
+static void msm_dirconn_irq_eoi(struct irq_data *d)
+{
+	struct irq_desc *desc = irq_data_to_desc(d);
+	struct irq_data *parent_data = irq_get_irq_data(desc->parent_irq);
+
+	if (parent_data->chip->irq_eoi)
+		parent_data->chip->irq_eoi(parent_data);
+}
+
+static int msm_dirconn_irq_set_affinity(struct irq_data *d,
+		const struct cpumask *maskval, bool force)
+{
+	struct irq_desc *desc = irq_data_to_desc(d);
+	struct irq_data *parent_data = irq_get_irq_data(desc->parent_irq);
+
+	if (parent_data->chip->irq_set_affinity)
+		return parent_data->chip->irq_set_affinity(parent_data,
+				maskval, force);
+	return 0;
+}
+
+static int msm_dirconn_irq_set_vcpu_affinity(struct irq_data *d,
+		void *vcpu_info)
+{
+	struct irq_desc *desc = irq_data_to_desc(d);
+	struct irq_data *parent_data = irq_get_irq_data(desc->parent_irq);
+
+	if (parent_data->chip->irq_set_vcpu_affinity)
+		return parent_data->chip->irq_set_vcpu_affinity(parent_data,
+				vcpu_info);
+	return 0;
+}
+
+static int msm_dirconn_irq_set_type(struct irq_data *d, unsigned int type)
+{
+	struct irq_desc *desc = irq_data_to_desc(d);
+	struct irq_data *parent_data = irq_get_irq_data(desc->parent_irq);
+
+	if (parent_data->chip->irq_set_type)
+		return parent_data->chip->irq_set_type(parent_data, type);
+
+	return 0;
+}
+
+static struct irq_chip msm_dirconn_irq_chip = {
+	.name			= "msmgpio-dc",
+	.irq_mask		= msm_dirconn_irq_mask,
+	.irq_unmask		= msm_dirconn_irq_unmask,
+	.irq_eoi		= msm_dirconn_irq_eoi,
+	.irq_ack		= msm_dirconn_irq_ack,
+	.irq_set_type		= msm_dirconn_irq_set_type,
+	.irq_set_affinity	= msm_dirconn_irq_set_affinity,
+	.irq_set_vcpu_affinity	= msm_dirconn_irq_set_vcpu_affinity,
+	.flags			= IRQCHIP_SKIP_SET_WAKE
+					| IRQCHIP_MASK_ON_SUSPEND
+					| IRQCHIP_SET_TYPE_MASKED,
+};
+
 static void msm_gpio_irq_handler(struct irq_desc *desc)
 {
 	struct gpio_chip *gc = irq_desc_get_handler_data(desc);
@@ -783,6 +869,55 @@ static void msm_gpio_irq_handler(struct irq_desc *desc)
 	chained_irq_exit(chip, desc);
 }
 
+static void msm_gpio_dirconn_handler(struct irq_desc *desc)
+{
+	struct irq_data *irqd = irq_desc_get_handler_data(desc);
+	struct irq_chip *chip = irq_desc_get_chip(desc);
+
+	chained_irq_enter(chip, desc);
+	generic_handle_irq(irqd->irq);
+	chained_irq_exit(chip, desc);
+}
+
+static void msm_gpio_setup_dir_connects(struct msm_pinctrl *pctrl)
+{
+	struct device_node *parent_node;
+	struct irq_domain *parent_domain;
+	struct irq_fwspec fwspec;
+	unsigned int i;
+
+	parent_node = of_irq_find_parent(pctrl->dev->of_node);
+
+	if (!parent_node)
+		return;
+
+	parent_domain = irq_find_host(parent_node);
+	if (!parent_domain)
+		return;
+
+	fwspec.fwnode = parent_domain->fwnode;
+	for (i = 0; i < pctrl->soc->n_dir_conns; i++) {
+		const struct msm_dir_conn *dirconn = &pctrl->soc->dir_conn[i];
+		unsigned int parent_irq;
+		int irq;
+
+		fwspec.param[0] = 0; /* SPI */
+		fwspec.param[1] = dirconn->hwirq;
+		fwspec.param[2] = IRQ_TYPE_NONE;
+		fwspec.param_count = 3;
+		parent_irq = irq_create_fwspec_mapping(&fwspec);
+
+		irq = irq_find_mapping(pctrl->chip.irqdomain, dirconn->gpio);
+
+		irq_set_parent(irq, parent_irq);
+		irq_set_chip(irq, &msm_dirconn_irq_chip);
+		irq_set_chip_data(irq, irq_get_irq_data(parent_irq));
+		__irq_set_handler(parent_irq, msm_gpio_dirconn_handler,
+				false, NULL);
+		irq_set_handler_data(parent_irq, irq_get_irq_data(irq));
+	}
+}
+
 static int msm_gpio_init(struct msm_pinctrl *pctrl)
 {
 	struct gpio_chip *chip;
@@ -827,6 +962,7 @@ static int msm_gpio_init(struct msm_pinctrl *pctrl)
 	gpiochip_set_chained_irqchip(chip, &msm_gpio_irq_chip, pctrl->irq,
 				     msm_gpio_irq_handler);
 
+	msm_gpio_setup_dir_connects(pctrl);
 	return 0;
 }
 
diff --git a/drivers/pinctrl/qcom/pinctrl-msm.h b/drivers/pinctrl/qcom/pinctrl-msm.h
index e986fda..0e223e0 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm.h
+++ b/drivers/pinctrl/qcom/pinctrl-msm.h
@@ -95,6 +95,16 @@ struct msm_pingroup {
 	unsigned intr_polarity_bit:5;
 	unsigned intr_detection_bit:5;
 	unsigned intr_detection_width:5;
+}
+
+/**
+ * struct msm_dir_conn - Direct GPIO connect configuration
+ * @gpio:	GPIO pin number
+ * @hwirq:	The GIC interrupt that the pin is connected to
+ */;
+struct msm_dir_conn {
+	unsigned int gpio;
+	irq_hw_number_t hwirq;
 };
 
 /**
@@ -106,6 +116,8 @@ struct msm_pingroup {
  * @groups:     An array describing all pin groups the pin SoC supports.
  * @ngroups:    The numbmer of entries in @groups.
  * @ngpio:      The number of pingroups the driver should expose as GPIOs.
+ * @dir_conn:   An array describing all the pins directly connected to GIC.
+ * @ndirconns:  The number of pins directly connected to GIC
  */
 struct msm_pinctrl_soc_data {
 	const struct pinctrl_pin_desc *pins;
@@ -115,6 +127,8 @@ struct msm_pinctrl_soc_data {
 	const struct msm_pingroup *groups;
 	unsigned ngroups;
 	unsigned ngpios;
+	const struct msm_dir_conn *dir_conn;
+	unsigned int n_dir_conns;
 };
 
 int msm_pinctrl_probe(struct platform_device *pdev,
diff --git a/drivers/pinctrl/qcom/pinctrl-sdm830.c b/drivers/pinctrl/qcom/pinctrl-sdm670.c
similarity index 96%
rename from drivers/pinctrl/qcom/pinctrl-sdm830.c
rename to drivers/pinctrl/qcom/pinctrl-sdm670.c
index fc3d0ad..c93628e 100644
--- a/drivers/pinctrl/qcom/pinctrl-sdm830.c
+++ b/drivers/pinctrl/qcom/pinctrl-sdm670.c
@@ -29,7 +29,6 @@
 #define SOUTH	0x00900000
 #define WEST	0x00100000
 #define REG_SIZE 0x1000
-
 #define PINGROUP(id, base, f1, f2, f3, f4, f5, f6, f7, f8, f9)	\
 	{						\
 		.name = "gpio" #id,			\
@@ -118,7 +117,7 @@
 		.intr_detection_bit = -1,		\
 		.intr_detection_width = -1,		\
 	}
-static const struct pinctrl_pin_desc sdm830_pins[] = {
+static const struct pinctrl_pin_desc sdm670_pins[] = {
 	PINCTRL_PIN(0, "GPIO_0"),
 	PINCTRL_PIN(1, "GPIO_1"),
 	PINCTRL_PIN(2, "GPIO_2"),
@@ -255,9 +254,12 @@ static const struct pinctrl_pin_desc sdm830_pins[] = {
 	PINCTRL_PIN(147, "GPIO_147"),
 	PINCTRL_PIN(148, "GPIO_148"),
 	PINCTRL_PIN(149, "GPIO_149"),
-	PINCTRL_PIN(150, "SDC2_CLK"),
-	PINCTRL_PIN(151, "SDC2_CMD"),
-	PINCTRL_PIN(152, "SDC2_DATA"),
+	PINCTRL_PIN(150, "SDC1_CLK"),
+	PINCTRL_PIN(151, "SDC1_CMD"),
+	PINCTRL_PIN(152, "SDC1_DATA"),
+	PINCTRL_PIN(153, "SDC2_CLK"),
+	PINCTRL_PIN(154, "SDC2_CMD"),
+	PINCTRL_PIN(155, "SDC2_DATA"),
 };
 
 #define DECLARE_MSM_GPIO_PINS(pin) \
@@ -399,11 +401,14 @@ DECLARE_MSM_GPIO_PINS(147);
 DECLARE_MSM_GPIO_PINS(148);
 DECLARE_MSM_GPIO_PINS(149);
 
-static const unsigned int sdc2_clk_pins[] = { 150 };
-static const unsigned int sdc2_cmd_pins[] = { 151 };
-static const unsigned int sdc2_data_pins[] = { 152 };
+static const unsigned int sdc1_clk_pins[] = { 150 };
+static const unsigned int sdc1_cmd_pins[] = { 151 };
+static const unsigned int sdc1_data_pins[] = { 152 };
+static const unsigned int sdc2_clk_pins[] = { 153 };
+static const unsigned int sdc2_cmd_pins[] = { 154 };
+static const unsigned int sdc2_data_pins[] = { 155 };
 
-enum sdm830_functions {
+enum sdm670_functions {
 	msm_mux_qup0,
 	msm_mux_gpio,
 	msm_mux_reserved0,
@@ -456,6 +461,16 @@ enum sdm830_functions {
 	msm_mux_qup1,
 	msm_mux_qdss_gpio4,
 	msm_mux_reserved17,
+	msm_mux_qdss_gpio5,
+	msm_mux_reserved18,
+	msm_mux_qdss_gpio6,
+	msm_mux_reserved19,
+	msm_mux_qdss_gpio7,
+	msm_mux_reserved20,
+	msm_mux_cci_timer0,
+	msm_mux_gcc_gp2,
+	msm_mux_qdss_gpio8,
+	msm_mux_reserved21,
 	msm_mux_cci_timer1,
 	msm_mux_gcc_gp3,
 	msm_mux_qdss_gpio,
@@ -470,16 +485,6 @@ enum sdm830_functions {
 	msm_mux_cci_timer4,
 	msm_mux_qdss_gpio11,
 	msm_mux_reserved25,
-	msm_mux_qdss_gpio5,
-	msm_mux_reserved18,
-	msm_mux_qdss_gpio6,
-	msm_mux_reserved19,
-	msm_mux_qdss_gpio7,
-	msm_mux_reserved20,
-	msm_mux_cci_timer0,
-	msm_mux_gcc_gp2,
-	msm_mux_qdss_gpio8,
-	msm_mux_reserved21,
 	msm_mux_qdss_gpio12,
 	msm_mux_JITTER_BIST,
 	msm_mux_reserved26,
@@ -894,6 +899,36 @@ static const char * const qdss_gpio4_groups[] = {
 static const char * const reserved17_groups[] = {
 	"gpio17",
 };
+static const char * const qdss_gpio5_groups[] = {
+	"gpio18", "gpio122",
+};
+static const char * const reserved18_groups[] = {
+	"gpio18",
+};
+static const char * const qdss_gpio6_groups[] = {
+	"gpio19", "gpio41",
+};
+static const char * const reserved19_groups[] = {
+	"gpio19",
+};
+static const char * const qdss_gpio7_groups[] = {
+	"gpio20", "gpio42",
+};
+static const char * const reserved20_groups[] = {
+	"gpio20",
+};
+static const char * const cci_timer0_groups[] = {
+	"gpio21",
+};
+static const char * const gcc_gp2_groups[] = {
+	"gpio21",
+};
+static const char * const qdss_gpio8_groups[] = {
+	"gpio21", "gpio75",
+};
+static const char * const reserved21_groups[] = {
+	"gpio21",
+};
 static const char * const cci_timer1_groups[] = {
 	"gpio22",
 };
@@ -936,36 +971,6 @@ static const char * const qdss_gpio11_groups[] = {
 static const char * const reserved25_groups[] = {
 	"gpio25",
 };
-static const char * const qdss_gpio5_groups[] = {
-	"gpio18", "gpio122",
-};
-static const char * const reserved18_groups[] = {
-	"gpio18",
-};
-static const char * const qdss_gpio6_groups[] = {
-	"gpio19", "gpio41",
-};
-static const char * const reserved19_groups[] = {
-	"gpio19",
-};
-static const char * const qdss_gpio7_groups[] = {
-	"gpio20", "gpio42",
-};
-static const char * const reserved20_groups[] = {
-	"gpio20",
-};
-static const char * const cci_timer0_groups[] = {
-	"gpio21",
-};
-static const char * const gcc_gp2_groups[] = {
-	"gpio21",
-};
-static const char * const qdss_gpio8_groups[] = {
-	"gpio21", "gpio75",
-};
-static const char * const reserved21_groups[] = {
-	"gpio21",
-};
 static const char * const qdss_gpio12_groups[] = {
 	"gpio26", "gpio80",
 };
@@ -1680,7 +1685,7 @@ static const char * const reserved123_groups[] = {
 	"gpio123",
 };
 
-static const struct msm_function sdm830_functions[] = {
+static const struct msm_function sdm670_functions[] = {
 	FUNCTION(qup0),
 	FUNCTION(gpio),
 	FUNCTION(reserved0),
@@ -1733,6 +1738,16 @@ static const struct msm_function sdm830_functions[] = {
 	FUNCTION(qup1),
 	FUNCTION(qdss_gpio4),
 	FUNCTION(reserved17),
+	FUNCTION(qdss_gpio5),
+	FUNCTION(reserved18),
+	FUNCTION(qdss_gpio6),
+	FUNCTION(reserved19),
+	FUNCTION(qdss_gpio7),
+	FUNCTION(reserved20),
+	FUNCTION(cci_timer0),
+	FUNCTION(gcc_gp2),
+	FUNCTION(qdss_gpio8),
+	FUNCTION(reserved21),
 	FUNCTION(cci_timer1),
 	FUNCTION(gcc_gp3),
 	FUNCTION(qdss_gpio),
@@ -1747,16 +1762,6 @@ static const struct msm_function sdm830_functions[] = {
 	FUNCTION(cci_timer4),
 	FUNCTION(qdss_gpio11),
 	FUNCTION(reserved25),
-	FUNCTION(qdss_gpio5),
-	FUNCTION(reserved18),
-	FUNCTION(qdss_gpio6),
-	FUNCTION(reserved19),
-	FUNCTION(qdss_gpio7),
-	FUNCTION(reserved20),
-	FUNCTION(cci_timer0),
-	FUNCTION(gcc_gp2),
-	FUNCTION(qdss_gpio8),
-	FUNCTION(reserved21),
 	FUNCTION(qdss_gpio12),
 	FUNCTION(JITTER_BIST),
 	FUNCTION(reserved26),
@@ -1996,7 +2001,7 @@ static const struct msm_function sdm830_functions[] = {
 	FUNCTION(reserved123),
 };
 
-static const struct msm_pingroup sdm830_groups[] = {
+static const struct msm_pingroup sdm670_groups[] = {
 	PINGROUP(0, SOUTH, qup0, NA, reserved0, NA, NA, NA, NA, NA, NA),
 	PINGROUP(1, SOUTH, qup0, NA, reserved1, NA, NA, NA, NA, NA, NA),
 	PINGROUP(2, SOUTH, qup0, NA, reserved2, NA, NA, NA, NA, NA, NA),
@@ -2108,9 +2113,9 @@ static const struct msm_pingroup sdm830_groups[] = {
 		 QUP_L5, reserved76, NA, NA, NA),
 	PINGROUP(77, NORTH, ter_mi2s, phase_flag4, qdss_gpio10, atest_usb20,
 		 QUP_L6, reserved77, NA, NA, NA),
-	PINGROUP(78, NORTH, ter_mi2s, gcc_gp1, reserved78, NA, NA, NA, NA, NA,
+	PINGROUP(78, NORTH, ter_mi2s, gcc_gp1, NA, reserved78, NA, NA, NA, NA,
 		 NA),
-	PINGROUP(79, NORTH, sec_mi2s, GP_PDM2, NA, qdss_gpio11, reserved79, NA,
+	PINGROUP(79, NORTH, sec_mi2s, GP_PDM2, NA, qdss_gpio11, NA, reserved79,
 		 NA, NA, NA),
 	PINGROUP(80, NORTH, sec_mi2s, NA, qdss_gpio12, reserved80, NA, NA, NA,
 		 NA, NA),
@@ -2231,53 +2236,56 @@ static const struct msm_pingroup sdm830_groups[] = {
 	PINGROUP(147, WEST, NA, NA, reserved147, NA, NA, NA, NA, NA, NA),
 	PINGROUP(148, WEST, NA, reserved148, NA, NA, NA, NA, NA, NA, NA),
 	PINGROUP(149, WEST, NA, reserved149, NA, NA, NA, NA, NA, NA, NA),
-	SDC_QDSD_PINGROUP(sdc2_clk, 0x59a000, 14, 6),
-	SDC_QDSD_PINGROUP(sdc2_cmd, 0x59a000, 11, 3),
-	SDC_QDSD_PINGROUP(sdc2_data, 0x59a000, 9, 0),
+	SDC_QDSD_PINGROUP(sdc1_clk, 0x599000, 13, 6),
+	SDC_QDSD_PINGROUP(sdc1_cmd, 0x599000, 11, 3),
+	SDC_QDSD_PINGROUP(sdc1_data, 0x599000, 9, 0),
+	SDC_QDSD_PINGROUP(sdc2_clk, 0x99a000, 14, 6),
+	SDC_QDSD_PINGROUP(sdc2_cmd, 0x99a000, 11, 3),
+	SDC_QDSD_PINGROUP(sdc2_data, 0x99a000, 9, 0),
 };
 
-static const struct msm_pinctrl_soc_data sdm830_pinctrl = {
-	.pins = sdm830_pins,
-	.npins = ARRAY_SIZE(sdm830_pins),
-	.functions = sdm830_functions,
-	.nfunctions = ARRAY_SIZE(sdm830_functions),
-	.groups = sdm830_groups,
-	.ngroups = ARRAY_SIZE(sdm830_groups),
+static const struct msm_pinctrl_soc_data sdm670_pinctrl = {
+	.pins = sdm670_pins,
+	.npins = ARRAY_SIZE(sdm670_pins),
+	.functions = sdm670_functions,
+	.nfunctions = ARRAY_SIZE(sdm670_functions),
+	.groups = sdm670_groups,
+	.ngroups = ARRAY_SIZE(sdm670_groups),
 	.ngpios = 136,
 };
 
-static int sdm830_pinctrl_probe(struct platform_device *pdev)
+static int sdm670_pinctrl_probe(struct platform_device *pdev)
 {
-	return msm_pinctrl_probe(pdev, &sdm830_pinctrl);
+	return msm_pinctrl_probe(pdev, &sdm670_pinctrl);
 }
 
-static const struct of_device_id sdm830_pinctrl_of_match[] = {
-	{ .compatible = "qcom,sdm830-pinctrl", },
+static const struct of_device_id sdm670_pinctrl_of_match[] = {
+	{ .compatible = "qcom,sdm670-pinctrl", },
 	{ },
 };
 
-static struct platform_driver sdm830_pinctrl_driver = {
+static struct platform_driver sdm670_pinctrl_driver = {
 	.driver = {
-		.name = "sdm830-pinctrl",
+		.name = "sdm670-pinctrl",
 		.owner = THIS_MODULE,
-		.of_match_table = sdm830_pinctrl_of_match,
+		.of_match_table = sdm670_pinctrl_of_match,
 	},
-	.probe = sdm830_pinctrl_probe,
+	.probe = sdm670_pinctrl_probe,
 	.remove = msm_pinctrl_remove,
 };
 
-static int __init sdm830_pinctrl_init(void)
+static int __init sdm670_pinctrl_init(void)
 {
-	return platform_driver_register(&sdm830_pinctrl_driver);
+	return platform_driver_register(&sdm670_pinctrl_driver);
 }
-arch_initcall(sdm830_pinctrl_init);
+arch_initcall(sdm670_pinctrl_init);
 
-static void __exit sdm830_pinctrl_exit(void)
+static void __exit sdm670_pinctrl_exit(void)
 {
-	platform_driver_unregister(&sdm830_pinctrl_driver);
+	platform_driver_unregister(&sdm670_pinctrl_driver);
 }
-module_exit(sdm830_pinctrl_exit);
+module_exit(sdm670_pinctrl_exit);
 
-MODULE_DESCRIPTION("QTI sdm830 pinctrl driver");
+MODULE_DESCRIPTION("QTI sdm670 pinctrl driver");
 MODULE_LICENSE("GPL v2");
-MODULE_DEVICE_TABLE(of, sdm830_pinctrl_of_match);
+MODULE_DEVICE_TABLE(of, sdm670_pinctrl_of_match);
diff --git a/drivers/pinctrl/qcom/pinctrl-sdm845.c b/drivers/pinctrl/qcom/pinctrl-sdm845.c
index 30c31a8..7d125eb 100644
--- a/drivers/pinctrl/qcom/pinctrl-sdm845.c
+++ b/drivers/pinctrl/qcom/pinctrl-sdm845.c
@@ -2377,6 +2377,84 @@ static const struct msm_pingroup sdm845_groups[] = {
 	UFS_RESET(ufs_reset, 0x99f000),
 };
 
+static const struct msm_dir_conn sdm845_dir_conn[] = {
+	{1, 510},
+	{3, 511},
+	{5, 512},
+	{10, 513},
+	{11, 514},
+	{20, 515},
+	{22, 516},
+	{24, 517},
+	{26, 518},
+	{30, 519},
+	{31, 639},
+	{32, 521},
+	{34, 522},
+	{36, 523},
+	{37, 524},
+	{38, 525},
+	{39, 526},
+	{40, 527},
+	{41, 637},
+	{43, 529},
+	{44, 530},
+	{46, 531},
+	{48, 532},
+	{49, 640},
+	{52, 534},
+	{53, 535},
+	{54, 536},
+	{56, 537},
+	{57, 538},
+	{58, 539},
+	{59, 540},
+	{60, 541},
+	{61, 542},
+	{62, 543},
+	{63, 544},
+	{64, 545},
+	{66, 546},
+	{68, 547},
+	{71, 548},
+	{73, 549},
+	{77, 550},
+	{78, 551},
+	{79, 552},
+	{80, 553},
+	{84, 554},
+	{85, 555},
+	{86, 556},
+	{88, 557},
+	{89, 638},
+	{91, 559},
+	{92, 560},
+	{95, 561},
+	{96, 562},
+	{97, 563},
+	{101, 564},
+	{103, 565},
+	{104, 566},
+	{115, 570},
+	{116, 571},
+	{117, 572},
+	{118, 573},
+	{119, 609},
+	{120, 610},
+	{121, 611},
+	{122, 612},
+	{123, 613},
+	{124, 614},
+	{125, 615},
+	{127, 617},
+	{128, 618},
+	{129, 619},
+	{130, 620},
+	{132, 621},
+	{133, 622},
+	{145, 623},
+};
+
 static const struct msm_pinctrl_soc_data sdm845_pinctrl = {
 	.pins = sdm845_pins,
 	.npins = ARRAY_SIZE(sdm845_pins),
@@ -2385,6 +2463,8 @@ static const struct msm_pinctrl_soc_data sdm845_pinctrl = {
 	.groups = sdm845_groups,
 	.ngroups = ARRAY_SIZE(sdm845_groups),
 	.ngpios = 150,
+	.dir_conn = sdm845_dir_conn,
+	.n_dir_conns = ARRAY_SIZE(sdm845_dir_conn),
 };
 
 static int sdm845_pinctrl_probe(struct platform_device *pdev)
diff --git a/drivers/platform/msm/ipa/ipa_api.c b/drivers/platform/msm/ipa/ipa_api.c
index 6c597f0..38264d9 100644
--- a/drivers/platform/msm/ipa/ipa_api.c
+++ b/drivers/platform/msm/ipa/ipa_api.c
@@ -101,76 +101,76 @@ static struct ipa_api_controller *ipa_api_ctrl;
 
 const char *ipa_clients_strings[IPA_CLIENT_MAX] = {
 	__stringify(IPA_CLIENT_HSIC1_PROD),
-	__stringify(IPA_CLIENT_WLAN1_PROD),
-	__stringify(IPA_CLIENT_HSIC2_PROD),
-	__stringify(IPA_CLIENT_USB2_PROD),
-	__stringify(IPA_CLIENT_HSIC3_PROD),
-	__stringify(IPA_CLIENT_USB3_PROD),
-	__stringify(IPA_CLIENT_HSIC4_PROD),
-	__stringify(IPA_CLIENT_USB4_PROD),
-	__stringify(IPA_CLIENT_HSIC5_PROD),
-	__stringify(IPA_CLIENT_USB_PROD),
-	__stringify(IPA_CLIENT_A5_WLAN_AMPDU_PROD),
-	__stringify(IPA_CLIENT_A2_EMBEDDED_PROD),
-	__stringify(IPA_CLIENT_A2_TETHERED_PROD),
-	__stringify(IPA_CLIENT_APPS_LAN_PROD),
-	__stringify(IPA_CLIENT_APPS_WAN_PROD),
-	__stringify(IPA_CLIENT_APPS_CMD_PROD),
-	__stringify(IPA_CLIENT_ODU_PROD),
-	__stringify(IPA_CLIENT_MHI_PROD),
-	__stringify(IPA_CLIENT_Q6_LAN_PROD),
-	__stringify(IPA_CLIENT_Q6_WAN_PROD),
-	__stringify(IPA_CLIENT_Q6_CMD_PROD),
-	__stringify(IPA_CLIENT_MEMCPY_DMA_SYNC_PROD),
-	__stringify(IPA_CLIENT_MEMCPY_DMA_ASYNC_PROD),
-	__stringify(IPA_CLIENT_Q6_DECOMP_PROD),
-	__stringify(IPA_CLIENT_Q6_DECOMP2_PROD),
-	__stringify(IPA_CLIENT_UC_USB_PROD),
-	__stringify(IPA_CLIENT_ETHERNET_PROD),
-
-	/* Below PROD client type is only for test purpose */
-	__stringify(IPA_CLIENT_TEST_PROD),
-	__stringify(IPA_CLIENT_TEST1_PROD),
-	__stringify(IPA_CLIENT_TEST2_PROD),
-	__stringify(IPA_CLIENT_TEST3_PROD),
-	__stringify(IPA_CLIENT_TEST4_PROD),
-
 	__stringify(IPA_CLIENT_HSIC1_CONS),
-	__stringify(IPA_CLIENT_WLAN1_CONS),
+	__stringify(IPA_CLIENT_HSIC2_PROD),
 	__stringify(IPA_CLIENT_HSIC2_CONS),
-	__stringify(IPA_CLIENT_USB2_CONS),
-	__stringify(IPA_CLIENT_WLAN2_CONS),
+	__stringify(IPA_CLIENT_HSIC3_PROD),
 	__stringify(IPA_CLIENT_HSIC3_CONS),
-	__stringify(IPA_CLIENT_USB3_CONS),
-	__stringify(IPA_CLIENT_WLAN3_CONS),
+	__stringify(IPA_CLIENT_HSIC4_PROD),
 	__stringify(IPA_CLIENT_HSIC4_CONS),
-	__stringify(IPA_CLIENT_USB4_CONS),
-	__stringify(IPA_CLIENT_WLAN4_CONS),
+	__stringify(IPA_CLIENT_HSIC5_PROD),
 	__stringify(IPA_CLIENT_HSIC5_CONS),
+	__stringify(IPA_CLIENT_WLAN1_PROD),
+	__stringify(IPA_CLIENT_WLAN1_CONS),
+	__stringify(IPA_CLIENT_A5_WLAN_AMPDU_PROD),
+	__stringify(IPA_CLIENT_WLAN2_CONS),
+	__stringify(RESERVERD_PROD_14),
+	__stringify(IPA_CLIENT_WLAN3_CONS),
+	__stringify(RESERVERD_PROD_16),
+	__stringify(IPA_CLIENT_WLAN4_CONS),
+	__stringify(IPA_CLIENT_USB_PROD),
 	__stringify(IPA_CLIENT_USB_CONS),
+	__stringify(IPA_CLIENT_USB2_PROD),
+	__stringify(IPA_CLIENT_USB2_CONS),
+	__stringify(IPA_CLIENT_USB3_PROD),
+	__stringify(IPA_CLIENT_USB3_CONS),
+	__stringify(IPA_CLIENT_USB4_PROD),
+	__stringify(IPA_CLIENT_USB4_CONS),
+	__stringify(IPA_CLIENT_UC_USB_PROD),
 	__stringify(IPA_CLIENT_USB_DPL_CONS),
+	__stringify(IPA_CLIENT_A2_EMBEDDED_PROD),
 	__stringify(IPA_CLIENT_A2_EMBEDDED_CONS),
+	__stringify(IPA_CLIENT_A2_TETHERED_PROD),
 	__stringify(IPA_CLIENT_A2_TETHERED_CONS),
-	__stringify(IPA_CLIENT_A5_LAN_WAN_CONS),
+	__stringify(IPA_CLIENT_APPS_LAN_PROD),
 	__stringify(IPA_CLIENT_APPS_LAN_CONS),
+	__stringify(IPA_CLIENT_APPS_WAN_PROD),
 	__stringify(IPA_CLIENT_APPS_WAN_CONS),
+	__stringify(IPA_CLIENT_APPS_CMD_PROD),
+	__stringify(IPA_CLIENT_A5_LAN_WAN_CONS),
+	__stringify(IPA_CLIENT_ODU_PROD),
 	__stringify(IPA_CLIENT_ODU_EMB_CONS),
+	__stringify(RESERVERD_PROD_40),
 	__stringify(IPA_CLIENT_ODU_TETH_CONS),
+	__stringify(IPA_CLIENT_MHI_PROD),
 	__stringify(IPA_CLIENT_MHI_CONS),
-	__stringify(IPA_CLIENT_Q6_LAN_CONS),
-	__stringify(IPA_CLIENT_Q6_WAN_CONS),
-	__stringify(IPA_CLIENT_Q6_DUN_CONS),
+	__stringify(IPA_CLIENT_MEMCPY_DMA_SYNC_PROD),
 	__stringify(IPA_CLIENT_MEMCPY_DMA_SYNC_CONS),
+	__stringify(IPA_CLIENT_MEMCPY_DMA_ASYNC_PROD),
 	__stringify(IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS),
-	__stringify(IPA_CLIENT_Q6_DECOMP_CONS),
-	__stringify(IPA_CLIENT_Q6_DECOMP2_CONS),
-	__stringify(IPA_CLIENT_Q6_LTE_WIFI_AGGR_CONS),
+	__stringify(IPA_CLIENT_ETHERNET_PROD),
 	__stringify(IPA_CLIENT_ETHERNET_CONS),
-	/* Below CONS client type is only for test purpose */
+	__stringify(IPA_CLIENT_Q6_LAN_PROD),
+	__stringify(IPA_CLIENT_Q6_LAN_CONS),
+	__stringify(IPA_CLIENT_Q6_WAN_PROD),
+	__stringify(IPA_CLIENT_Q6_WAN_CONS),
+	__stringify(IPA_CLIENT_Q6_CMD_PROD),
+	__stringify(IPA_CLIENT_Q6_DUN_CONS),
+	__stringify(IPA_CLIENT_Q6_DECOMP_PROD),
+	__stringify(IPA_CLIENT_Q6_DECOMP_CONS),
+	__stringify(IPA_CLIENT_Q6_DECOMP2_PROD),
+	__stringify(IPA_CLIENT_Q6_DECOMP2_CONS),
+	__stringify(RESERVERD_PROD_60),
+	__stringify(IPA_CLIENT_Q6_LTE_WIFI_AGGR_CONS),
+	__stringify(IPA_CLIENT_TEST_PROD),
 	__stringify(IPA_CLIENT_TEST_CONS),
+	__stringify(IPA_CLIENT_TEST1_PROD),
 	__stringify(IPA_CLIENT_TEST1_CONS),
+	__stringify(IPA_CLIENT_TEST2_PROD),
 	__stringify(IPA_CLIENT_TEST2_CONS),
+	__stringify(IPA_CLIENT_TEST3_PROD),
 	__stringify(IPA_CLIENT_TEST3_CONS),
+	__stringify(IPA_CLIENT_TEST4_PROD),
 	__stringify(IPA_CLIENT_TEST4_CONS),
 };
 
diff --git a/drivers/platform/msm/ipa/ipa_common_i.h b/drivers/platform/msm/ipa/ipa_common_i.h
index 981129e..50804ee 100644
--- a/drivers/platform/msm/ipa/ipa_common_i.h
+++ b/drivers/platform/msm/ipa/ipa_common_i.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -108,8 +108,10 @@ do {\
 		ipa_assert();\
 } while (0)
 
-#define IPA_CLIENT_IS_PROD(x) (x >= IPA_CLIENT_PROD && x < IPA_CLIENT_CONS)
-#define IPA_CLIENT_IS_CONS(x) (x >= IPA_CLIENT_CONS && x < IPA_CLIENT_MAX)
+#define IPA_CLIENT_IS_PROD(x) \
+	(x < IPA_CLIENT_MAX && (x & 0x1) == 0)
+#define IPA_CLIENT_IS_CONS(x) \
+	(x < IPA_CLIENT_MAX && (x & 0x1) == 1)
 
 #define IPA_GSI_CHANNEL_STOP_SLEEP_MIN_USEC (1000)
 #define IPA_GSI_CHANNEL_STOP_SLEEP_MAX_USEC (2000)
diff --git a/drivers/platform/msm/ipa/ipa_rm.c b/drivers/platform/msm/ipa/ipa_rm.c
index ea91b13..914028c 100644
--- a/drivers/platform/msm/ipa/ipa_rm.c
+++ b/drivers/platform/msm/ipa/ipa_rm.c
@@ -19,32 +19,33 @@
 
 static const char *resource_name_to_str[IPA_RM_RESOURCE_MAX] = {
 	__stringify(IPA_RM_RESOURCE_Q6_PROD),
-	__stringify(IPA_RM_RESOURCE_USB_PROD),
-	__stringify(IPA_RM_RESOURCE_USB_DPL_DUMMY_PROD),
-	__stringify(IPA_RM_RESOURCE_HSIC_PROD),
-	__stringify(IPA_RM_RESOURCE_STD_ECM_PROD),
-	__stringify(IPA_RM_RESOURCE_RNDIS_PROD),
-	__stringify(IPA_RM_RESOURCE_WWAN_0_PROD),
-	__stringify(IPA_RM_RESOURCE_WLAN_PROD),
-	__stringify(IPA_RM_RESOURCE_ODU_ADAPT_PROD),
-	__stringify(IPA_RM_RESOURCE_MHI_PROD),
-	__stringify(IPA_RM_RESOURCE_ETHERNET_PROD),
 	__stringify(IPA_RM_RESOURCE_Q6_CONS),
+	__stringify(IPA_RM_RESOURCE_USB_PROD),
 	__stringify(IPA_RM_RESOURCE_USB_CONS),
+	__stringify(IPA_RM_RESOURCE_USB_DPL_DUMMY_PROD),
 	__stringify(IPA_RM_RESOURCE_USB_DPL_CONS),
+	__stringify(IPA_RM_RESOURCE_HSIC_PROD),
 	__stringify(IPA_RM_RESOURCE_HSIC_CONS),
-	__stringify(IPA_RM_RESOURCE_WLAN_CONS),
+	__stringify(IPA_RM_RESOURCE_STD_ECM_PROD),
 	__stringify(IPA_RM_RESOURCE_APPS_CONS),
+	__stringify(IPA_RM_RESOURCE_RNDIS_PROD),
+	__stringify(RESERVED_CONS_11),
+	__stringify(IPA_RM_RESOURCE_WWAN_0_PROD),
+	__stringify(RESERVED_CONS_13),
+	__stringify(IPA_RM_RESOURCE_WLAN_PROD),
+	__stringify(IPA_RM_RESOURCE_WLAN_CONS),
+	__stringify(IPA_RM_RESOURCE_ODU_ADAPT_PROD),
 	__stringify(IPA_RM_RESOURCE_ODU_ADAPT_CONS),
+	__stringify(IPA_RM_RESOURCE_MHI_PROD),
 	__stringify(IPA_RM_RESOURCE_MHI_CONS),
+	__stringify(IPA_RM_RESOURCE_ETHERNET_PROD),
 	__stringify(IPA_RM_RESOURCE_ETHERNET_CONS),
 };
 
 struct ipa_rm_profile_vote_type {
 	enum ipa_voltage_level volt[IPA_RM_RESOURCE_MAX];
 	enum ipa_voltage_level curr_volt;
-	u32 bw_prods[IPA_RM_RESOURCE_PROD_MAX];
-	u32 bw_cons[IPA_RM_RESOURCE_CONS_MAX];
+	u32 bw_resources[IPA_RM_RESOURCE_MAX];
 	u32 curr_bw;
 };
 
@@ -999,7 +1000,9 @@ int ipa_rm_stat(char *buf, int size)
 		return result;
 
 	spin_lock_irqsave(&ipa_rm_ctx->ipa_rm_lock, flags);
-	for (i = 0; i < IPA_RM_RESOURCE_PROD_MAX; ++i) {
+	for (i = 0; i < IPA_RM_RESOURCE_MAX; ++i) {
+		if (!IPA_RM_RESORCE_IS_PROD(i))
+			continue;
 		result = ipa_rm_dep_graph_get_resource(
 				ipa_rm_ctx->dep_graph,
 				i,
@@ -1014,11 +1017,12 @@ int ipa_rm_stat(char *buf, int size)
 		}
 	}
 
-	for (i = 0; i < IPA_RM_RESOURCE_PROD_MAX; i++)
-		sum_bw_prod += ipa_rm_ctx->prof_vote.bw_prods[i];
-
-	for (i = 0; i < IPA_RM_RESOURCE_CONS_MAX; i++)
-		sum_bw_cons += ipa_rm_ctx->prof_vote.bw_cons[i];
+	for (i = 0; i < IPA_RM_RESOURCE_MAX; i++) {
+		if (IPA_RM_RESORCE_IS_PROD(i))
+			sum_bw_prod += ipa_rm_ctx->prof_vote.bw_resources[i];
+		else
+			sum_bw_cons += ipa_rm_ctx->prof_vote.bw_resources[i];
+	}
 
 	result = scnprintf(buf + cnt, size - cnt,
 		"All prod bandwidth: %d, All cons bandwidth: %d\n",
@@ -1118,15 +1122,7 @@ void ipa_rm_perf_profile_change(enum ipa_rm_resource_name resource_name)
 	old_volt = ipa_rm_ctx->prof_vote.curr_volt;
 	old_bw = ipa_rm_ctx->prof_vote.curr_bw;
 
-	if (IPA_RM_RESORCE_IS_PROD(resource_name)) {
-		bw_ptr = &ipa_rm_ctx->prof_vote.bw_prods[resource_name];
-	} else if (IPA_RM_RESORCE_IS_CONS(resource_name)) {
-		bw_ptr = &ipa_rm_ctx->prof_vote.bw_cons[
-				resource_name - IPA_RM_RESOURCE_PROD_MAX];
-	} else {
-		IPA_RM_ERR("Invalid resource_name\n");
-		return;
-	}
+	bw_ptr = &ipa_rm_ctx->prof_vote.bw_resources[resource_name];
 
 	switch (resource->state) {
 	case IPA_RM_GRANTED:
@@ -1161,11 +1157,12 @@ void ipa_rm_perf_profile_change(enum ipa_rm_resource_name resource_name)
 		}
 	}
 
-	for (i = 0; i < IPA_RM_RESOURCE_PROD_MAX; i++)
-		sum_bw_prod += ipa_rm_ctx->prof_vote.bw_prods[i];
-
-	for (i = 0; i < IPA_RM_RESOURCE_CONS_MAX; i++)
-		sum_bw_cons += ipa_rm_ctx->prof_vote.bw_cons[i];
+	for (i = 0; i < IPA_RM_RESOURCE_MAX; i++) {
+		if (IPA_RM_RESORCE_IS_PROD(i))
+			sum_bw_prod += ipa_rm_ctx->prof_vote.bw_resources[i];
+		else
+			sum_bw_cons += ipa_rm_ctx->prof_vote.bw_resources[i];
+	}
 
 	IPA_RM_DBG_LOW("all prod bandwidth: %d all cons bandwidth: %d\n",
 		sum_bw_prod, sum_bw_cons);
diff --git a/drivers/platform/msm/ipa/ipa_rm_i.h b/drivers/platform/msm/ipa/ipa_rm_i.h
index 1610bb1..c0e3ce2 100644
--- a/drivers/platform/msm/ipa/ipa_rm_i.h
+++ b/drivers/platform/msm/ipa/ipa_rm_i.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -47,12 +47,10 @@
 			IPA_RM_DRV_NAME " %s:%d " fmt, ## args); \
 	} while (0)
 
-#define IPA_RM_RESOURCE_CONS_MAX \
-	(IPA_RM_RESOURCE_MAX - IPA_RM_RESOURCE_PROD_MAX)
 #define IPA_RM_RESORCE_IS_PROD(x) \
-	(x >= IPA_RM_RESOURCE_PROD && x < IPA_RM_RESOURCE_PROD_MAX)
+	(x < IPA_RM_RESOURCE_MAX && (x & 0x1) == 0)
 #define IPA_RM_RESORCE_IS_CONS(x) \
-	(x >= IPA_RM_RESOURCE_PROD_MAX && x < IPA_RM_RESOURCE_MAX)
+	(x < IPA_RM_RESOURCE_MAX && (x & 0x1) == 1)
 #define IPA_RM_INDEX_INVALID	(-1)
 #define IPA_RM_RELEASE_DELAY_IN_MSEC 1000
 
diff --git a/drivers/platform/msm/ipa/ipa_rm_peers_list.c b/drivers/platform/msm/ipa/ipa_rm_peers_list.c
index fe8e781..8365120 100644
--- a/drivers/platform/msm/ipa/ipa_rm_peers_list.c
+++ b/drivers/platform/msm/ipa/ipa_rm_peers_list.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -32,9 +32,6 @@ static int ipa_rm_peers_list_get_resource_index(
 		resource_index = ipa_rm_prod_index(resource_name);
 	else if (IPA_RM_RESORCE_IS_CONS(resource_name)) {
 		resource_index = ipa_rm_cons_index(resource_name);
-		if (resource_index != IPA_RM_INDEX_INVALID)
-			resource_index =
-				resource_index - IPA_RM_RESOURCE_PROD_MAX;
 	}
 
 	return resource_index;
diff --git a/drivers/platform/msm/ipa/ipa_rm_resource.c b/drivers/platform/msm/ipa/ipa_rm_resource.c
index 9e74a3f..991208f 100644
--- a/drivers/platform/msm/ipa/ipa_rm_resource.c
+++ b/drivers/platform/msm/ipa/ipa_rm_resource.c
@@ -328,7 +328,7 @@ static int ipa_rm_resource_producer_create(struct ipa_rm_resource **resource,
 
 	(*resource) = (struct ipa_rm_resource *) (*producer);
 	(*resource)->type = IPA_RM_PRODUCER;
-	*max_peers = IPA_RM_RESOURCE_CONS_MAX;
+	*max_peers = IPA_RM_RESOURCE_MAX;
 	goto bail;
 register_fail:
 	kfree(*producer);
@@ -371,7 +371,7 @@ static int ipa_rm_resource_consumer_create(struct ipa_rm_resource **resource,
 	(*resource) = (struct ipa_rm_resource *) (*consumer);
 	(*resource)->type = IPA_RM_CONSUMER;
 	init_completion(&((*consumer)->request_consumer_in_progress));
-	*max_peers = IPA_RM_RESOURCE_PROD_MAX;
+	*max_peers = IPA_RM_RESOURCE_MAX;
 bail:
 	return result;
 }
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa.c b/drivers/platform/msm/ipa/ipa_v2/ipa.c
index f5d8227..53ab299 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa.c
@@ -1836,6 +1836,7 @@ static int ipa_q6_clean_q6_tables(void)
 	struct ipa_mem_buffer mem = { 0 };
 	u32 *entry;
 	u32 max_cmds = ipa_get_max_flt_rt_cmds(ipa_ctx->ipa_num_pipes);
+	gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0);
 
 	mem.base = dma_alloc_coherent(ipa_ctx->pdev, 4, &mem.phys_base,
 		GFP_ATOMIC);
@@ -1856,7 +1857,7 @@ static int ipa_q6_clean_q6_tables(void)
 	}
 
 	cmd = kcalloc(max_cmds, sizeof(struct ipa_hw_imm_cmd_dma_shared_mem),
-		GFP_KERNEL);
+		flag);
 	if (!cmd) {
 		IPAERR("failed to allocate memory\n");
 		retval = -ENOMEM;
@@ -3713,7 +3714,7 @@ void ipa_suspend_handler(enum ipa_irq_type interrupt,
 				resource = ipa2_get_rm_resource_from_ep(i);
 				res = ipa_rm_request_resource_with_timer(
 					resource);
-				if (res == -EPERM &&
+				if ((res == -EPERM) &&
 				    IPA_CLIENT_IS_CONS(
 					ipa_ctx->ep[i].client)) {
 					holb_cfg.en = 1;
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_dp.c b/drivers/platform/msm/ipa/ipa_v2/ipa_dp.c
index a822f66..80b97e7 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_dp.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_dp.c
@@ -420,15 +420,17 @@ int ipa_send(struct ipa_sys_context *sys, u32 num_desc, struct ipa_desc *desc,
 	int i = 0;
 	int j;
 	int result;
-	int fail_dma_wrap = 0;
 	uint size = num_desc * sizeof(struct sps_iovec);
-	u32 mem_flag = GFP_ATOMIC;
+	gfp_t mem_flag = GFP_ATOMIC;
 	struct sps_iovec iov;
 	int ret;
+	gfp_t flag;
 
 	if (unlikely(!in_atomic))
 		mem_flag = GFP_KERNEL;
 
+	flag = mem_flag | (ipa_ctx->use_dma_zone ? GFP_DMA : 0);
+
 	if (num_desc == IPA_NUM_DESC_PER_SW_TX) {
 		transfer.iovec = dma_pool_alloc(ipa_ctx->dma_pool, mem_flag,
 				&dma_addr);
@@ -437,7 +439,7 @@ int ipa_send(struct ipa_sys_context *sys, u32 num_desc, struct ipa_desc *desc,
 			return -EFAULT;
 		}
 	} else {
-		transfer.iovec = kmalloc(size, mem_flag);
+		transfer.iovec = kmalloc(size, flag);
 		if (!transfer.iovec) {
 			IPAERR("fail to alloc mem for sps xfr buff ");
 			IPAERR("num_desc = %d size = %d\n", num_desc, size);
@@ -457,7 +459,6 @@ int ipa_send(struct ipa_sys_context *sys, u32 num_desc, struct ipa_desc *desc,
 	spin_lock_bh(&sys->spinlock);
 
 	for (i = 0; i < num_desc; i++) {
-		fail_dma_wrap = 0;
 		tx_pkt = kmem_cache_zalloc(ipa_ctx->tx_pkt_wrapper_cache,
 					   mem_flag);
 		if (!tx_pkt) {
@@ -493,15 +494,6 @@ int ipa_send(struct ipa_sys_context *sys, u32 num_desc, struct ipa_desc *desc,
 					tx_pkt->mem.base,
 					tx_pkt->mem.size,
 					DMA_TO_DEVICE);
-
-				if (dma_mapping_error(ipa_ctx->pdev,
-					tx_pkt->mem.phys_base)) {
-					IPAERR("dma_map_single ");
-					IPAERR("failed\n");
-					fail_dma_wrap = 1;
-					goto failure;
-				}
-
 			} else {
 				tx_pkt->mem.phys_base = desc[i].dma_address;
 				tx_pkt->no_unmap_dma = true;
@@ -522,10 +514,9 @@ int ipa_send(struct ipa_sys_context *sys, u32 num_desc, struct ipa_desc *desc,
 			}
 		}
 
-		if (!tx_pkt->mem.phys_base) {
-			IPAERR("failed to alloc tx wrapper\n");
-			fail_dma_wrap = 1;
-			goto failure;
+		if (dma_mapping_error(ipa_ctx->pdev, tx_pkt->mem.phys_base)) {
+			IPAERR("dma_map_single failed\n");
+			goto failure_dma_map;
 		}
 
 		tx_pkt->sys = sys;
@@ -580,27 +571,30 @@ int ipa_send(struct ipa_sys_context *sys, u32 num_desc, struct ipa_desc *desc,
 	spin_unlock_bh(&sys->spinlock);
 	return 0;
 
+failure_dma_map:
+	kmem_cache_free(ipa_ctx->tx_pkt_wrapper_cache, tx_pkt);
+
 failure:
 	tx_pkt = transfer.user;
 	for (j = 0; j < i; j++) {
 		next_pkt = list_next_entry(tx_pkt, link);
 		list_del(&tx_pkt->link);
-		if (desc[j].type != IPA_DATA_DESC_SKB_PAGED) {
-			dma_unmap_single(ipa_ctx->pdev, tx_pkt->mem.phys_base,
-				tx_pkt->mem.size,
-				DMA_TO_DEVICE);
-		} else {
-			dma_unmap_page(ipa_ctx->pdev, tx_pkt->mem.phys_base,
-				tx_pkt->mem.size,
-				DMA_TO_DEVICE);
+		if (!tx_pkt->no_unmap_dma) {
+			if (desc[j].type != IPA_DATA_DESC_SKB_PAGED) {
+				dma_unmap_single(ipa_ctx->pdev,
+					tx_pkt->mem.phys_base,
+					tx_pkt->mem.size,
+					DMA_TO_DEVICE);
+			} else {
+				dma_unmap_page(ipa_ctx->pdev,
+					tx_pkt->mem.phys_base,
+					tx_pkt->mem.size,
+					DMA_TO_DEVICE);
+			}
 		}
 		kmem_cache_free(ipa_ctx->tx_pkt_wrapper_cache, tx_pkt);
 		tx_pkt = next_pkt;
 	}
-	if (j < num_desc)
-		/* last desc failed */
-		if (fail_dma_wrap)
-			kmem_cache_free(ipa_ctx->tx_pkt_wrapper_cache, tx_pkt);
 	if (transfer.iovec_phys) {
 		if (num_desc == IPA_NUM_DESC_PER_SW_TX) {
 			dma_pool_free(ipa_ctx->dma_pool, transfer.iovec,
@@ -953,7 +947,7 @@ void ipa_sps_irq_control_all(bool enable)
 
 	IPADBG("\n");
 
-	for (client_num = IPA_CLIENT_CONS;
+	for (client_num = 0;
 		client_num < IPA_CLIENT_MAX; client_num++) {
 		if (!IPA_CLIENT_IS_APPS_CONS(client_num))
 			continue;
@@ -1658,6 +1652,7 @@ int ipa2_tx_dp(enum ipa_client_type dst, struct sk_buff *skb,
 	struct ipa_sys_context *sys;
 	int src_ep_idx;
 	int num_frags, f;
+	gfp_t flag = GFP_ATOMIC | (ipa_ctx->use_dma_zone ? GFP_DMA : 0);
 
 	if (unlikely(!ipa_ctx)) {
 		IPAERR("IPA driver was not initialized\n");
@@ -1723,7 +1718,7 @@ int ipa2_tx_dp(enum ipa_client_type dst, struct sk_buff *skb,
 
 	if (dst_ep_idx != -1) {
 		/* SW data path */
-		cmd = kzalloc(sizeof(struct ipa_ip_packet_init), GFP_ATOMIC);
+		cmd = kzalloc(sizeof(struct ipa_ip_packet_init), flag);
 		if (!cmd) {
 			IPAERR("failed to alloc immediate command object\n");
 			goto fail_gen;
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_flt.c b/drivers/platform/msm/ipa/ipa_v2/ipa_flt.c
index b60c7a6..3418896 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_flt.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_flt.c
@@ -651,6 +651,7 @@ int __ipa_commit_flt_v1_1(enum ipa_ip_type ip)
 	struct ipa_ip_v6_filter_init *v6;
 	u16 avail;
 	u16 size;
+	gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0);
 
 	mem = kmalloc(sizeof(struct ipa_mem_buffer), GFP_KERNEL);
 	if (!mem) {
@@ -667,7 +668,7 @@ int __ipa_commit_flt_v1_1(enum ipa_ip_type ip)
 			IPA_MEM_PART(v6_flt_size_ddr);
 		size = sizeof(struct ipa_ip_v6_filter_init);
 	}
-	cmd = kmalloc(size, GFP_KERNEL);
+	cmd = kmalloc(size, flag);
 	if (!cmd) {
 		IPAERR("failed to alloc immediate command object\n");
 		goto fail_alloc_cmd;
@@ -840,6 +841,7 @@ int __ipa_commit_flt_v2(enum ipa_ip_type ip)
 	int num_desc = 0;
 	int i;
 	u16 avail;
+	gfp_t flag = GFP_ATOMIC | (ipa_ctx->use_dma_zone ? GFP_DMA : 0);
 
 	desc = kzalloc(16 * sizeof(*desc), GFP_ATOMIC);
 	if (desc == NULL) {
@@ -848,7 +850,7 @@ int __ipa_commit_flt_v2(enum ipa_ip_type ip)
 		goto fail_desc;
 	}
 
-	cmd = kzalloc(16 * sizeof(*cmd), GFP_ATOMIC);
+	cmd = kzalloc(16 * sizeof(*cmd), flag);
 	if (cmd == NULL) {
 		IPAERR("fail to alloc cmd blob ip %d\n", ip);
 		rc = -ENOMEM;
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_hdr.c b/drivers/platform/msm/ipa/ipa_v2/ipa_hdr.c
index 046f77f..d657a06 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_hdr.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_hdr.c
@@ -176,6 +176,7 @@ int __ipa_commit_hdr_v1_1(void)
 	struct ipa_mem_buffer *mem;
 	struct ipa_hdr_init_local *cmd;
 	u16 len;
+	gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0);
 
 	mem = kmalloc(sizeof(struct ipa_mem_buffer), GFP_KERNEL);
 	if (!mem) {
@@ -190,7 +191,7 @@ int __ipa_commit_hdr_v1_1(void)
 	 * we can use init_local ptr for init_system due to layout of the
 	 * struct
 	 */
-	cmd = kmalloc(len, GFP_KERNEL);
+	cmd = kmalloc(len, flag);
 	if (!cmd) {
 		IPAERR("failed to alloc immediate command object\n");
 		goto fail_alloc_cmd;
@@ -663,6 +664,7 @@ static int __ipa_add_hdr(struct ipa_hdr_add *hdr)
 	struct ipa_hdr_tbl *htbl = &ipa_ctx->hdr_tbl;
 	int id;
 	int mem_size;
+	gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0);
 
 	if (hdr->hdr_len == 0 || hdr->hdr_len > IPA_HDR_MAX_SIZE) {
 		IPAERR("bad parm\n");
@@ -674,7 +676,7 @@ static int __ipa_add_hdr(struct ipa_hdr_add *hdr)
 		goto error;
 	}
 
-	entry = kmem_cache_zalloc(ipa_ctx->hdr_cache, GFP_KERNEL);
+	entry = kmem_cache_zalloc(ipa_ctx->hdr_cache, flag);
 	if (!entry) {
 		IPAERR("failed to alloc hdr object\n");
 		goto error;
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_nat.c b/drivers/platform/msm/ipa/ipa_v2/ipa_nat.c
index 96e0125..a7f983e 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_nat.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_nat.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -325,6 +325,7 @@ int ipa2_nat_init_cmd(struct ipa_ioc_v4_nat_init *init)
 	int result;
 	u32 offset = 0;
 	size_t tmp;
+	gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0);
 
 	IPADBG("\n");
 	if (init->table_entries == 0) {
@@ -410,7 +411,7 @@ int ipa2_nat_init_cmd(struct ipa_ioc_v4_nat_init *init)
 
 	memset(&desc, 0, sizeof(desc));
 	/* NO-OP IC for ensuring that IPA pipeline is empty */
-	reg_write_nop = kzalloc(sizeof(*reg_write_nop), GFP_KERNEL);
+	reg_write_nop = kzalloc(sizeof(*reg_write_nop), flag);
 	if (!reg_write_nop) {
 		IPAERR("no mem\n");
 		result = -ENOMEM;
@@ -428,7 +429,7 @@ int ipa2_nat_init_cmd(struct ipa_ioc_v4_nat_init *init)
 	desc[0].pyld = (void *)reg_write_nop;
 	desc[0].len = sizeof(*reg_write_nop);
 
-	cmd = kmalloc(size, GFP_KERNEL);
+	cmd = kmalloc(size, flag);
 	if (!cmd) {
 		IPAERR("Failed to alloc immediate command object\n");
 		result = -ENOMEM;
@@ -573,6 +574,7 @@ int ipa2_nat_dma_cmd(struct ipa_ioc_nat_dma_cmd *dma)
 	struct ipa_desc *desc = NULL;
 	u16 size = 0, cnt = 0;
 	int ret = 0;
+	gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0);
 
 	IPADBG("\n");
 	if (dma->entries <= 0) {
@@ -656,7 +658,7 @@ int ipa2_nat_dma_cmd(struct ipa_ioc_nat_dma_cmd *dma)
 	}
 
 	size = sizeof(struct ipa_nat_dma);
-	cmd = kzalloc(size, GFP_KERNEL);
+	cmd = kzalloc(size, flag);
 	if (cmd == NULL) {
 		IPAERR("Failed to alloc memory\n");
 		ret = -ENOMEM;
@@ -664,7 +666,7 @@ int ipa2_nat_dma_cmd(struct ipa_ioc_nat_dma_cmd *dma)
 	}
 
 	/* NO-OP IC for ensuring that IPA pipeline is empty */
-	reg_write_nop = kzalloc(sizeof(*reg_write_nop), GFP_KERNEL);
+	reg_write_nop = kzalloc(sizeof(*reg_write_nop), flag);
 	if (!reg_write_nop) {
 		IPAERR("Failed to alloc memory\n");
 		ret = -ENOMEM;
@@ -758,6 +760,7 @@ int ipa2_nat_del_cmd(struct ipa_ioc_v4_nat_del *del)
 	u8 mem_type = IPA_NAT_SHARED_MEMORY;
 	u32 base_addr = IPA_NAT_PHYS_MEM_OFFSET;
 	int result;
+	gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0);
 
 	IPADBG("\n");
 	if (ipa_ctx->nat_mem.is_tmp_mem) {
@@ -774,7 +777,7 @@ int ipa2_nat_del_cmd(struct ipa_ioc_v4_nat_del *del)
 
 	memset(&desc, 0, sizeof(desc));
 	/* NO-OP IC for ensuring that IPA pipeline is empty */
-	reg_write_nop = kzalloc(sizeof(*reg_write_nop), GFP_KERNEL);
+	reg_write_nop = kzalloc(sizeof(*reg_write_nop), flag);
 	if (!reg_write_nop) {
 		IPAERR("no mem\n");
 		result = -ENOMEM;
@@ -792,7 +795,7 @@ int ipa2_nat_del_cmd(struct ipa_ioc_v4_nat_del *del)
 	desc[0].pyld = (void *)reg_write_nop;
 	desc[0].len = sizeof(*reg_write_nop);
 
-	cmd = kmalloc(size, GFP_KERNEL);
+	cmd = kmalloc(size, flag);
 	if (cmd == NULL) {
 		IPAERR("Failed to alloc immediate command object\n");
 		result = -ENOMEM;
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_rt.c b/drivers/platform/msm/ipa/ipa_v2/ipa_rt.c
index 21fdec0..5b70853 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_rt.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_rt.c
@@ -522,6 +522,7 @@ int __ipa_commit_rt_v1_1(enum ipa_ip_type ip)
 	struct ipa_ip_v6_routing_init *v6;
 	u16 avail;
 	u16 size;
+	gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0);
 
 	mem = kmalloc(sizeof(struct ipa_mem_buffer), GFP_KERNEL);
 	if (!mem) {
@@ -538,7 +539,7 @@ int __ipa_commit_rt_v1_1(enum ipa_ip_type ip)
 			IPA_MEM_PART(v6_rt_size_ddr);
 		size = sizeof(struct ipa_ip_v6_routing_init);
 	}
-	cmd = kmalloc(size, GFP_KERNEL);
+	cmd = kmalloc(size, flag);
 	if (!cmd) {
 		IPAERR("failed to alloc immediate command object\n");
 		goto fail_alloc_cmd;
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c b/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c
index a50665c..bec4264 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c
@@ -72,209 +72,124 @@ static const int ipa_ihl_ofst_meq32[] = { IPA_IHL_OFFSET_MEQ32_0,
 
 #define INVALID_EP_MAPPING_INDEX (-1)
 
-static const int ep_mapping[3][IPA_CLIENT_MAX] = {
-	[IPA_1_1][IPA_CLIENT_HSIC1_PROD]         = 19,
-	[IPA_1_1][IPA_CLIENT_WLAN1_PROD]         = -1,
-	[IPA_1_1][IPA_CLIENT_HSIC2_PROD]         = 12,
-	[IPA_1_1][IPA_CLIENT_USB2_PROD]          = 12,
-	[IPA_1_1][IPA_CLIENT_HSIC3_PROD]         = 13,
-	[IPA_1_1][IPA_CLIENT_USB3_PROD]          = 13,
-	[IPA_1_1][IPA_CLIENT_HSIC4_PROD]         =  0,
-	[IPA_1_1][IPA_CLIENT_USB4_PROD]          =  0,
-	[IPA_1_1][IPA_CLIENT_HSIC5_PROD]         = -1,
-	[IPA_1_1][IPA_CLIENT_USB_PROD]           = 11,
-	[IPA_1_1][IPA_CLIENT_A5_WLAN_AMPDU_PROD] = 15,
-	[IPA_1_1][IPA_CLIENT_A2_EMBEDDED_PROD]   =  8,
-	[IPA_1_1][IPA_CLIENT_A2_TETHERED_PROD]   =  6,
-	[IPA_1_1][IPA_CLIENT_APPS_LAN_PROD]      = -1,
-	[IPA_1_1][IPA_CLIENT_APPS_LAN_WAN_PROD]  =  2,
-	[IPA_1_1][IPA_CLIENT_APPS_CMD_PROD]      =  1,
-	[IPA_1_1][IPA_CLIENT_ODU_PROD]           = -1,
-	[IPA_1_1][IPA_CLIENT_MHI_PROD]           = -1,
-	[IPA_1_1][IPA_CLIENT_Q6_LAN_PROD]        =  5,
-	[IPA_1_1][IPA_CLIENT_Q6_WAN_PROD]        = -1,
-	[IPA_1_1][IPA_CLIENT_Q6_CMD_PROD]        = -1,
-	[IPA_1_1][IPA_CLIENT_ETHERNET_PROD]      = -1,
+struct ipa_ep_confing {
+	bool valid;
+	int pipe_num;
+};
 
-	[IPA_1_1][IPA_CLIENT_HSIC1_CONS]         = 14,
-	[IPA_1_1][IPA_CLIENT_WLAN1_CONS]         = -1,
-	[IPA_1_1][IPA_CLIENT_HSIC2_CONS]         = 16,
-	[IPA_1_1][IPA_CLIENT_USB2_CONS]          = 16,
-	[IPA_1_1][IPA_CLIENT_WLAN2_CONS]         = -1,
-	[IPA_1_1][IPA_CLIENT_HSIC3_CONS]         = 17,
-	[IPA_1_1][IPA_CLIENT_USB3_CONS]          = 17,
-	[IPA_1_1][IPA_CLIENT_WLAN3_CONS]         = -1,
-	[IPA_1_1][IPA_CLIENT_HSIC4_CONS]         = 18,
-	[IPA_1_1][IPA_CLIENT_USB4_CONS]          = 18,
-	[IPA_1_1][IPA_CLIENT_WLAN4_CONS]         = -1,
-	[IPA_1_1][IPA_CLIENT_HSIC5_CONS]         = -1,
-	[IPA_1_1][IPA_CLIENT_USB_CONS]           = 10,
-	[IPA_1_1][IPA_CLIENT_USB_DPL_CONS]       = -1,
-	[IPA_1_1][IPA_CLIENT_A2_EMBEDDED_CONS]   =  9,
-	[IPA_1_1][IPA_CLIENT_A2_TETHERED_CONS]   =  7,
-	[IPA_1_1][IPA_CLIENT_A5_LAN_WAN_CONS]    =  3,
-	[IPA_1_1][IPA_CLIENT_APPS_LAN_CONS]      = -1,
-	[IPA_1_1][IPA_CLIENT_APPS_WAN_CONS]      = -1,
-	[IPA_1_1][IPA_CLIENT_ODU_EMB_CONS]       = -1,
-	[IPA_1_1][IPA_CLIENT_ODU_TETH_CONS]      = -1,
-	[IPA_1_1][IPA_CLIENT_MHI_CONS]           = -1,
-	[IPA_1_1][IPA_CLIENT_Q6_LAN_CONS]        =  4,
-	[IPA_1_1][IPA_CLIENT_Q6_WAN_CONS]        = -1,
-	[IPA_1_1][IPA_CLIENT_ETHERNET_CONS]      = -1,
+static const struct ipa_ep_confing ep_mapping[3][IPA_CLIENT_MAX] = {
+	[IPA_1_1][IPA_CLIENT_HSIC1_PROD]         = {true, 19},
+	[IPA_1_1][IPA_CLIENT_HSIC2_PROD]         = {true, 12},
+	[IPA_1_1][IPA_CLIENT_USB2_PROD]          = {true, 12},
+	[IPA_1_1][IPA_CLIENT_HSIC3_PROD]         = {true, 13},
+	[IPA_1_1][IPA_CLIENT_USB3_PROD]          = {true, 13},
+	[IPA_1_1][IPA_CLIENT_HSIC4_PROD]         = {true,  0},
+	[IPA_1_1][IPA_CLIENT_USB4_PROD]          = {true,  0},
+	[IPA_1_1][IPA_CLIENT_USB_PROD]           = {true, 11},
+	[IPA_1_1][IPA_CLIENT_A5_WLAN_AMPDU_PROD] = {true, 15},
+	[IPA_1_1][IPA_CLIENT_A2_EMBEDDED_PROD]   = {true,  8},
+	[IPA_1_1][IPA_CLIENT_A2_TETHERED_PROD]   = {true,  6},
+	[IPA_1_1][IPA_CLIENT_APPS_LAN_WAN_PROD]  = {true,  2},
+	[IPA_1_1][IPA_CLIENT_APPS_CMD_PROD]      = {true,  1},
+	[IPA_1_1][IPA_CLIENT_Q6_LAN_PROD]        = {true,  5},
+
+	[IPA_1_1][IPA_CLIENT_HSIC1_CONS]         = {true, 14},
+	[IPA_1_1][IPA_CLIENT_HSIC2_CONS]         = {true, 16},
+	[IPA_1_1][IPA_CLIENT_USB2_CONS]          = {true, 16},
+	[IPA_1_1][IPA_CLIENT_HSIC3_CONS]         = {true, 17},
+	[IPA_1_1][IPA_CLIENT_USB3_CONS]          = {true, 17},
+	[IPA_1_1][IPA_CLIENT_HSIC4_CONS]         = {true, 18},
+	[IPA_1_1][IPA_CLIENT_USB4_CONS]          = {true, 18},
+	[IPA_1_1][IPA_CLIENT_USB_CONS]           = {true, 10},
+	[IPA_1_1][IPA_CLIENT_A2_EMBEDDED_CONS]   = {true,  9},
+	[IPA_1_1][IPA_CLIENT_A2_TETHERED_CONS]   = {true,  7},
+	[IPA_1_1][IPA_CLIENT_A5_LAN_WAN_CONS]    = {true,  3},
+	[IPA_1_1][IPA_CLIENT_Q6_LAN_CONS]        = {true,  4},
 
 
-	[IPA_2_0][IPA_CLIENT_HSIC1_PROD]         = 12,
-	[IPA_2_0][IPA_CLIENT_WLAN1_PROD]         = 18,
-	[IPA_2_0][IPA_CLIENT_HSIC2_PROD]         = -1,
-	[IPA_2_0][IPA_CLIENT_USB2_PROD]          = 12,
-	[IPA_2_0][IPA_CLIENT_HSIC3_PROD]         = -1,
-	[IPA_2_0][IPA_CLIENT_USB3_PROD]          = 13,
-	[IPA_2_0][IPA_CLIENT_HSIC4_PROD]         = -1,
-	[IPA_2_0][IPA_CLIENT_USB4_PROD]          =  0,
-	[IPA_2_0][IPA_CLIENT_HSIC5_PROD]         = -1,
-	[IPA_2_0][IPA_CLIENT_USB_PROD]           = 11,
-	[IPA_2_0][IPA_CLIENT_A5_WLAN_AMPDU_PROD] = -1,
-	[IPA_2_0][IPA_CLIENT_A2_EMBEDDED_PROD]   = -1,
-	[IPA_2_0][IPA_CLIENT_A2_TETHERED_PROD]   = -1,
-	[IPA_2_0][IPA_CLIENT_APPS_LAN_PROD]      = -1,
-	[IPA_2_0][IPA_CLIENT_APPS_LAN_WAN_PROD]  =  4,
-	[IPA_2_0][IPA_CLIENT_APPS_CMD_PROD]      =  3,
-	[IPA_2_0][IPA_CLIENT_ODU_PROD]           = 12,
-	[IPA_2_0][IPA_CLIENT_MHI_PROD]           = 18,
-	[IPA_2_0][IPA_CLIENT_Q6_LAN_PROD]        =  6,
-	[IPA_2_0][IPA_CLIENT_Q6_WAN_PROD]	 = -1,
-	[IPA_2_0][IPA_CLIENT_Q6_CMD_PROD]        =  7,
-	[IPA_2_0][IPA_CLIENT_Q6_DECOMP_PROD]     = -1,
-	[IPA_2_0][IPA_CLIENT_Q6_DECOMP2_PROD]    = -1,
+	[IPA_2_0][IPA_CLIENT_HSIC1_PROD]         = {true, 12},
+	[IPA_2_0][IPA_CLIENT_WLAN1_PROD]         = {true, 18},
+	[IPA_2_0][IPA_CLIENT_USB2_PROD]          = {true, 12},
+	[IPA_2_0][IPA_CLIENT_USB3_PROD]          = {true, 13},
+	[IPA_2_0][IPA_CLIENT_USB4_PROD]          = {true,  0},
+	[IPA_2_0][IPA_CLIENT_USB_PROD]           = {true, 11},
+	[IPA_2_0][IPA_CLIENT_APPS_LAN_WAN_PROD]  = {true,  4},
+	[IPA_2_0][IPA_CLIENT_APPS_CMD_PROD]      = {true,  3},
+	[IPA_2_0][IPA_CLIENT_ODU_PROD]           = {true, 12},
+	[IPA_2_0][IPA_CLIENT_MHI_PROD]           = {true, 18},
+	[IPA_2_0][IPA_CLIENT_Q6_LAN_PROD]        = {true,  6},
+	[IPA_2_0][IPA_CLIENT_Q6_CMD_PROD]        = {true,  7},
 	[IPA_2_0][IPA_CLIENT_MEMCPY_DMA_SYNC_PROD]
-						 =  12,
+						 = {true, 12},
 	[IPA_2_0][IPA_CLIENT_MEMCPY_DMA_ASYNC_PROD]
-						 =  19,
-	[IPA_2_0][IPA_CLIENT_ETHERNET_PROD]      = 12,
+						 = {true, 19},
+	[IPA_2_0][IPA_CLIENT_ETHERNET_PROD]      = {true, 12},
 	/* Only for test purpose */
-	[IPA_2_0][IPA_CLIENT_TEST_PROD]          = 19,
-	[IPA_2_0][IPA_CLIENT_TEST1_PROD]         = 19,
-	[IPA_2_0][IPA_CLIENT_TEST2_PROD]         = 12,
-	[IPA_2_0][IPA_CLIENT_TEST3_PROD]         = 11,
-	[IPA_2_0][IPA_CLIENT_TEST4_PROD]         =  0,
+	[IPA_2_0][IPA_CLIENT_TEST_PROD]          = {true, 19},
+	[IPA_2_0][IPA_CLIENT_TEST1_PROD]         = {true, 19},
+	[IPA_2_0][IPA_CLIENT_TEST2_PROD]         = {true, 12},
+	[IPA_2_0][IPA_CLIENT_TEST3_PROD]         = {true, 11},
+	[IPA_2_0][IPA_CLIENT_TEST4_PROD]         = {true,  0},
 
-	[IPA_2_0][IPA_CLIENT_HSIC1_CONS]         = 13,
-	[IPA_2_0][IPA_CLIENT_WLAN1_CONS]         = 17,
-	[IPA_2_0][IPA_CLIENT_HSIC2_CONS]         = -1,
-	[IPA_2_0][IPA_CLIENT_USB2_CONS]          = -1,
-	[IPA_2_0][IPA_CLIENT_WLAN2_CONS]         = 16,
-	[IPA_2_0][IPA_CLIENT_HSIC3_CONS]         = -1,
-	[IPA_2_0][IPA_CLIENT_USB3_CONS]          = -1,
-	[IPA_2_0][IPA_CLIENT_WLAN3_CONS]         = 14,
-	[IPA_2_0][IPA_CLIENT_HSIC4_CONS]         = -1,
-	[IPA_2_0][IPA_CLIENT_USB4_CONS]          = -1,
-	[IPA_2_0][IPA_CLIENT_WLAN4_CONS]         = 19,
-	[IPA_2_0][IPA_CLIENT_HSIC5_CONS]         = -1,
-	[IPA_2_0][IPA_CLIENT_USB_CONS]           = 15,
-	[IPA_2_0][IPA_CLIENT_USB_DPL_CONS]       =  0,
-	[IPA_2_0][IPA_CLIENT_A2_EMBEDDED_CONS]   = -1,
-	[IPA_2_0][IPA_CLIENT_A2_TETHERED_CONS]   = -1,
-	[IPA_2_0][IPA_CLIENT_A5_LAN_WAN_CONS]    = -1,
-	[IPA_2_0][IPA_CLIENT_APPS_LAN_CONS]      =  2,
-	[IPA_2_0][IPA_CLIENT_APPS_WAN_CONS]      =  5,
-	[IPA_2_0][IPA_CLIENT_ODU_EMB_CONS]       = 13,
-	[IPA_2_0][IPA_CLIENT_ODU_TETH_CONS]      =  1,
-	[IPA_2_0][IPA_CLIENT_MHI_CONS]           = 17,
-	[IPA_2_0][IPA_CLIENT_Q6_LAN_CONS]        =  8,
-	[IPA_2_0][IPA_CLIENT_Q6_WAN_CONS]        =  9,
-	[IPA_2_0][IPA_CLIENT_Q6_DUN_CONS]        = -1,
-	[IPA_2_0][IPA_CLIENT_Q6_DECOMP_CONS]     = -1,
-	[IPA_2_0][IPA_CLIENT_Q6_DECOMP2_CONS]    = -1,
+	[IPA_2_0][IPA_CLIENT_HSIC1_CONS]         = {true, 13},
+	[IPA_2_0][IPA_CLIENT_WLAN1_CONS]         = {true, 17},
+	[IPA_2_0][IPA_CLIENT_WLAN2_CONS]         = {true, 16},
+	[IPA_2_0][IPA_CLIENT_WLAN3_CONS]         = {true, 14},
+	[IPA_2_0][IPA_CLIENT_WLAN4_CONS]         = {true, 19},
+	[IPA_2_0][IPA_CLIENT_USB_CONS]           = {true, 15},
+	[IPA_2_0][IPA_CLIENT_USB_DPL_CONS]       = {true,  0},
+	[IPA_2_0][IPA_CLIENT_APPS_LAN_CONS]      = {true,  2},
+	[IPA_2_0][IPA_CLIENT_APPS_WAN_CONS]      = {true,  5},
+	[IPA_2_0][IPA_CLIENT_ODU_EMB_CONS]       = {true, 13},
+	[IPA_2_0][IPA_CLIENT_ODU_TETH_CONS]      = {true,  1},
+	[IPA_2_0][IPA_CLIENT_MHI_CONS]           = {true, 17},
+	[IPA_2_0][IPA_CLIENT_Q6_LAN_CONS]        = {true,  8},
+	[IPA_2_0][IPA_CLIENT_Q6_WAN_CONS]        = {true,  9},
 	[IPA_2_0][IPA_CLIENT_MEMCPY_DMA_SYNC_CONS]
-						 =  13,
+						 = {true, 13},
 	[IPA_2_0][IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS]
-						 =  16,
+						 = {true, 16},
 	[IPA_2_0][IPA_CLIENT_Q6_LTE_WIFI_AGGR_CONS]
-						 =  10,
-	[IPA_2_0][IPA_CLIENT_ETHERNET_CONS]      = 1,
+						 = {true, 10},
+	[IPA_2_0][IPA_CLIENT_ETHERNET_CONS]      = {true,  1},
+
 	/* Only for test purpose */
-	[IPA_2_0][IPA_CLIENT_TEST_CONS]          = 1,
-	[IPA_2_0][IPA_CLIENT_TEST1_CONS]         = 1,
-	[IPA_2_0][IPA_CLIENT_TEST2_CONS]         = 16,
-	[IPA_2_0][IPA_CLIENT_TEST3_CONS]         = 13,
-	[IPA_2_0][IPA_CLIENT_TEST4_CONS]         = 15,
+	[IPA_2_0][IPA_CLIENT_TEST_CONS]          = {true,  1},
+	[IPA_2_0][IPA_CLIENT_TEST1_CONS]         = {true,  1},
+	[IPA_2_0][IPA_CLIENT_TEST2_CONS]         = {true, 16},
+	[IPA_2_0][IPA_CLIENT_TEST3_CONS]         = {true, 13},
+	[IPA_2_0][IPA_CLIENT_TEST4_CONS]         = {true, 15},
 
 
-	[IPA_2_6L][IPA_CLIENT_HSIC1_PROD]         = -1,
-	[IPA_2_6L][IPA_CLIENT_WLAN1_PROD]         = -1,
-	[IPA_2_6L][IPA_CLIENT_HSIC2_PROD]         = -1,
-	[IPA_2_6L][IPA_CLIENT_USB2_PROD]          = -1,
-	[IPA_2_6L][IPA_CLIENT_HSIC3_PROD]         = -1,
-	[IPA_2_6L][IPA_CLIENT_USB3_PROD]          = -1,
-	[IPA_2_6L][IPA_CLIENT_HSIC4_PROD]         = -1,
-	[IPA_2_6L][IPA_CLIENT_USB4_PROD]          = -1,
-	[IPA_2_6L][IPA_CLIENT_HSIC5_PROD]         = -1,
-	[IPA_2_6L][IPA_CLIENT_USB_PROD]           =  1,
-	[IPA_2_6L][IPA_CLIENT_A5_WLAN_AMPDU_PROD] = -1,
-	[IPA_2_6L][IPA_CLIENT_A2_EMBEDDED_PROD]   = -1,
-	[IPA_2_6L][IPA_CLIENT_A2_TETHERED_PROD]   = -1,
-	[IPA_2_6L][IPA_CLIENT_APPS_LAN_PROD]      = -1,
-	[IPA_2_6L][IPA_CLIENT_APPS_LAN_WAN_PROD]  =  4,
-	[IPA_2_6L][IPA_CLIENT_APPS_CMD_PROD]      =  3,
-	[IPA_2_6L][IPA_CLIENT_ODU_PROD]           = -1,
-	[IPA_2_6L][IPA_CLIENT_MHI_PROD]           = -1,
-	[IPA_2_6L][IPA_CLIENT_Q6_LAN_PROD]        =  6,
-	[IPA_2_6L][IPA_CLIENT_Q6_WAN_PROD]	  = -1,
-	[IPA_2_6L][IPA_CLIENT_Q6_CMD_PROD]        =  7,
-	[IPA_2_6L][IPA_CLIENT_Q6_DECOMP_PROD]     = 11,
-	[IPA_2_6L][IPA_CLIENT_Q6_DECOMP2_PROD]    = 13,
-	[IPA_2_6L][IPA_CLIENT_MEMCPY_DMA_SYNC_PROD]
-						 =  -1,
-	[IPA_2_6L][IPA_CLIENT_MEMCPY_DMA_ASYNC_PROD]
-						 =  -1,
-	[IPA_2_6L][IPA_CLIENT_ETHERNET_PROD]      = -1,
-	/* Only for test purpose */
-	[IPA_2_6L][IPA_CLIENT_TEST_PROD]          = 11,
-	[IPA_2_6L][IPA_CLIENT_TEST1_PROD]         = 11,
-	[IPA_2_6L][IPA_CLIENT_TEST2_PROD]         = 12,
-	[IPA_2_6L][IPA_CLIENT_TEST3_PROD]         = 13,
-	[IPA_2_6L][IPA_CLIENT_TEST4_PROD]         = 14,
+	[IPA_2_6L][IPA_CLIENT_APPS_LAN_WAN_PROD]  = {true,  4},
+	[IPA_2_6L][IPA_CLIENT_APPS_CMD_PROD]      = {true,  3},
+	[IPA_2_6L][IPA_CLIENT_Q6_LAN_PROD]        = {true,  6},
+	[IPA_2_6L][IPA_CLIENT_Q6_CMD_PROD]        = {true,  7},
+	[IPA_2_6L][IPA_CLIENT_Q6_DECOMP_PROD]     = {true, 11},
+	[IPA_2_6L][IPA_CLIENT_Q6_DECOMP2_PROD]    = {true, 13},
 
-	[IPA_2_6L][IPA_CLIENT_HSIC1_CONS]         = -1,
-	[IPA_2_6L][IPA_CLIENT_WLAN1_CONS]         = -1,
-	[IPA_2_6L][IPA_CLIENT_HSIC2_CONS]         = -1,
-	[IPA_2_6L][IPA_CLIENT_USB2_CONS]          = -1,
-	[IPA_2_6L][IPA_CLIENT_WLAN2_CONS]         = -1,
-	[IPA_2_6L][IPA_CLIENT_HSIC3_CONS]         = -1,
-	[IPA_2_6L][IPA_CLIENT_USB3_CONS]          = -1,
-	[IPA_2_6L][IPA_CLIENT_WLAN3_CONS]         = -1,
-	[IPA_2_6L][IPA_CLIENT_HSIC4_CONS]         = -1,
-	[IPA_2_6L][IPA_CLIENT_USB4_CONS]          = -1,
-	[IPA_2_6L][IPA_CLIENT_WLAN4_CONS]         = -1,
-	[IPA_2_6L][IPA_CLIENT_HSIC5_CONS]         = -1,
-	[IPA_2_6L][IPA_CLIENT_USB_CONS]           =  0,
-	[IPA_2_6L][IPA_CLIENT_USB_DPL_CONS]       = 10,
-	[IPA_2_6L][IPA_CLIENT_A2_EMBEDDED_CONS]   = -1,
-	[IPA_2_6L][IPA_CLIENT_A2_TETHERED_CONS]   = -1,
-	[IPA_2_6L][IPA_CLIENT_A5_LAN_WAN_CONS]    = -1,
-	[IPA_2_6L][IPA_CLIENT_APPS_LAN_CONS]      =  2,
-	[IPA_2_6L][IPA_CLIENT_APPS_WAN_CONS]      =  5,
-	[IPA_2_6L][IPA_CLIENT_ODU_EMB_CONS]       = -1,
-	[IPA_2_6L][IPA_CLIENT_ODU_TETH_CONS]      = -1,
-	[IPA_2_6L][IPA_CLIENT_MHI_CONS]           = -1,
-	[IPA_2_6L][IPA_CLIENT_Q6_LAN_CONS]        =  8,
-	[IPA_2_6L][IPA_CLIENT_Q6_WAN_CONS]        =  9,
-	[IPA_2_6L][IPA_CLIENT_Q6_DUN_CONS]        = -1,
-	[IPA_2_6L][IPA_CLIENT_Q6_DECOMP_CONS]     = 12,
-	[IPA_2_6L][IPA_CLIENT_Q6_DECOMP2_CONS]    = 14,
-	[IPA_2_6L][IPA_CLIENT_MEMCPY_DMA_SYNC_CONS]
-						 =  -1,
-	[IPA_2_6L][IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS]
-						 =  -1,
-	[IPA_2_6L][IPA_CLIENT_Q6_LTE_WIFI_AGGR_CONS]
-						 =  -1,
-	[IPA_2_6L][IPA_CLIENT_ETHERNET_CONS]      = -1,
 	/* Only for test purpose */
-	[IPA_2_6L][IPA_CLIENT_TEST_CONS]          = 15,
-	[IPA_2_6L][IPA_CLIENT_TEST1_CONS]         = 15,
-	[IPA_2_6L][IPA_CLIENT_TEST2_CONS]         = 0,
-	[IPA_2_6L][IPA_CLIENT_TEST3_CONS]         = 1,
-	[IPA_2_6L][IPA_CLIENT_TEST4_CONS]         = 10,
+	[IPA_2_6L][IPA_CLIENT_TEST_PROD]          = {true, 11},
+	[IPA_2_6L][IPA_CLIENT_TEST1_PROD]         = {true, 11},
+	[IPA_2_6L][IPA_CLIENT_TEST2_PROD]         = {true, 12},
+	[IPA_2_6L][IPA_CLIENT_TEST3_PROD]         = {true, 13},
+	[IPA_2_6L][IPA_CLIENT_TEST4_PROD]         = {true, 14},
+
+	[IPA_2_6L][IPA_CLIENT_USB_CONS]           = {true,  0},
+	[IPA_2_6L][IPA_CLIENT_USB_DPL_CONS]       = {true, 10},
+	[IPA_2_6L][IPA_CLIENT_APPS_LAN_CONS]      = {true,  2},
+	[IPA_2_6L][IPA_CLIENT_APPS_WAN_CONS]      = {true,  5},
+	[IPA_2_6L][IPA_CLIENT_Q6_LAN_CONS]        = {true,  8},
+	[IPA_2_6L][IPA_CLIENT_Q6_WAN_CONS]        = {true,  9},
+	[IPA_2_6L][IPA_CLIENT_Q6_DECOMP_CONS]     = {true, 12},
+	[IPA_2_6L][IPA_CLIENT_Q6_DECOMP2_CONS]    = {true, 14},
+
+	/* Only for test purpose */
+	[IPA_2_6L][IPA_CLIENT_TEST_CONS]          = {true, 15},
+	[IPA_2_6L][IPA_CLIENT_TEST1_CONS]         = {true, 15},
+	[IPA_2_6L][IPA_CLIENT_TEST2_CONS]         = {true,  0},
+	[IPA_2_6L][IPA_CLIENT_TEST3_CONS]         = {true,  1},
+	[IPA_2_6L][IPA_CLIENT_TEST4_CONS]         = {true, 10},
 };
 
 static struct msm_bus_vectors ipa_init_vectors_v1_1[]  = {
@@ -949,7 +864,10 @@ int ipa2_get_ep_mapping(enum ipa_client_type client)
 		break;
 	}
 
-	return ep_mapping[hw_type_index][client];
+	if (!ep_mapping[hw_type_index][client].valid)
+		return INVALID_EP_MAPPING_INDEX;
+
+	return ep_mapping[hw_type_index][client].pipe_num;
 }
 
 /* ipa2_set_client() - provide client mapping
@@ -4522,6 +4440,7 @@ int ipa_tag_process(struct ipa_desc desc[],
 	int res;
 	struct ipa_tag_completion *comp;
 	int ep_idx;
+	gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0);
 
 	/* Not enough room for the required descriptors for the tag process */
 	if (IPA_TAG_MAX_DESC - descs_num < REQUIRED_TAG_PROCESS_DESCRIPTORS) {
@@ -4539,7 +4458,7 @@ int ipa_tag_process(struct ipa_desc desc[],
 	}
 	sys = ipa_ctx->ep[ep_idx].sys;
 
-	tag_desc = kzalloc(sizeof(*tag_desc) * IPA_TAG_MAX_DESC, GFP_KERNEL);
+	tag_desc = kzalloc(sizeof(*tag_desc) * IPA_TAG_MAX_DESC, flag);
 	if (!tag_desc) {
 		IPAERR("failed to allocate memory\n");
 		res = -ENOMEM;
diff --git a/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c b/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c
index 4672233..bcd602c 100644
--- a/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c
@@ -80,6 +80,7 @@ static void *subsys_notify_handle;
 
 u32 apps_to_ipa_hdl, ipa_to_apps_hdl; /* get handler from ipa */
 static struct mutex ipa_to_apps_pipe_handle_guard;
+static struct mutex add_mux_channel_lock;
 static int wwan_add_ul_flt_rule_to_ipa(void);
 static int wwan_del_ul_flt_rule_to_ipa(void);
 static void ipa_wwan_msg_free_cb(void*, u32, u32);
@@ -1527,9 +1528,11 @@ static int ipa_wwan_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
 					rmnet_mux_val.mux_id);
 				return rc;
 			}
+			mutex_lock(&add_mux_channel_lock);
 			if (rmnet_index >= MAX_NUM_OF_MUX_CHANNEL) {
 				IPAWANERR("Exceed mux_channel limit(%d)\n",
 				rmnet_index);
+				mutex_unlock(&add_mux_channel_lock);
 				return -EFAULT;
 			}
 			IPAWANDBG("ADD_MUX_CHANNEL(%d, name: %s)\n",
@@ -1558,6 +1561,7 @@ static int ipa_wwan_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
 					IPAWANERR("device %s reg IPA failed\n",
 						extend_ioctl_data.u.
 						rmnet_mux_val.vchannel_name);
+					mutex_unlock(&add_mux_channel_lock);
 					return -ENODEV;
 				}
 				mux_channel[rmnet_index].mux_channel_set = true;
@@ -1570,6 +1574,7 @@ static int ipa_wwan_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
 				mux_channel[rmnet_index].ul_flt_reg = false;
 			}
 			rmnet_index++;
+			mutex_unlock(&add_mux_channel_lock);
 			break;
 		case RMNET_IOCTL_SET_EGRESS_DATA_FORMAT:
 			IPAWANDBG("get RMNET_IOCTL_SET_EGRESS_DATA_FORMAT\n");
@@ -3084,6 +3089,7 @@ static int __init ipa_wwan_init(void)
 	atomic_set(&is_ssr, 0);
 
 	mutex_init(&ipa_to_apps_pipe_handle_guard);
+	mutex_init(&add_mux_channel_lock);
 	ipa_to_apps_hdl = -1;
 
 	ipa_qmi_init();
@@ -3103,6 +3109,7 @@ static void __exit ipa_wwan_cleanup(void)
 
 	ipa_qmi_cleanup();
 	mutex_destroy(&ipa_to_apps_pipe_handle_guard);
+	mutex_destroy(&add_mux_channel_lock);
 	ret = subsys_notif_unregister_notifier(subsys_notify_handle,
 					&ssr_notifier);
 	if (ret)
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
index bf13ac5..04d807f 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
@@ -3659,7 +3659,6 @@ int ipa3_rx_poll(u32 clnt_hdl, int weight)
 	struct ipa_mem_buffer mem_info = {0};
 	static int total_cnt;
 
-	IPADBG("\n");
 	if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
 		ipa3_ctx->ep[clnt_hdl].valid == 0) {
 		IPAERR("bad parm 0x%x\n", clnt_hdl);
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
index 73a405f..86442b1 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
@@ -139,9 +139,6 @@
 #define IPA_LAN_RX_HDR_NAME "ipa_lan_hdr"
 #define IPA_INVALID_L4_PROTOCOL 0xFF
 
-#define IPA_CLIENT_IS_PROD(x) (x >= IPA_CLIENT_PROD && x < IPA_CLIENT_CONS)
-#define IPA_CLIENT_IS_CONS(x) (x >= IPA_CLIENT_CONS && x < IPA_CLIENT_MAX)
-
 #define IPA_HDR_PROC_CTX_TABLE_ALIGNMENT_BYTE 8
 #define IPA_HDR_PROC_CTX_TABLE_ALIGNMENT(start_ofst) \
 	(((start_ofst) + IPA_HDR_PROC_CTX_TABLE_ALIGNMENT_BYTE - 1) & \
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
index f8b4d7d..ab26893 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
@@ -93,11 +93,6 @@
 #define QMB_MASTER_SELECT_DDR  (0)
 #define QMB_MASTER_SELECT_PCIE (1)
 
-#define IPA_CLIENT_NOT_USED \
-	{ IPA_EP_NOT_ALLOCATED, IPA_EP_NOT_ALLOCATED, false, \
-	IPA_DPS_HPS_SEQ_TYPE_INVALID, QMB_MASTER_SELECT_DDR, \
-		{ -1, -1, -1, -1, -1 } }
-
 /* Resource Group index*/
 #define IPA_v3_0_GROUP_UL		(0)
 #define IPA_v3_0_GROUP_DL		(1)
@@ -406,7 +401,7 @@ enum ipa_ees {
 };
 
 struct ipa_ep_configuration {
-	int pipe_num;
+	bool valid;
 	int group_num;
 	bool support_flt;
 	int sequencer_type;
@@ -414,506 +409,424 @@ struct ipa_ep_configuration {
 	struct ipa_gsi_ep_config ipa_gsi_ep_info;
 };
 
+/* clients not included in the list below are considered as invalid */
 static const struct ipa_ep_configuration ipa3_ep_mapping
 					[IPA_VER_MAX][IPA_CLIENT_MAX] = {
-	[IPA_3_0][IPA_CLIENT_HSIC1_PROD]          = IPA_CLIENT_NOT_USED,
 	[IPA_3_0][IPA_CLIENT_WLAN1_PROD]          = {
-			10, IPA_v3_0_GROUP_UL, true,
+			true, IPA_v3_0_GROUP_UL, true,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
 			{ 10, 1, 8, 16, IPA_EE_UC } },
-	[IPA_3_0][IPA_CLIENT_HSIC2_PROD]          = IPA_CLIENT_NOT_USED,
-	[IPA_3_0][IPA_CLIENT_USB2_PROD]           = IPA_CLIENT_NOT_USED,
-	[IPA_3_0][IPA_CLIENT_HSIC3_PROD]          = IPA_CLIENT_NOT_USED,
-	[IPA_3_0][IPA_CLIENT_USB3_PROD]           = IPA_CLIENT_NOT_USED,
-	[IPA_3_0][IPA_CLIENT_HSIC4_PROD]          = IPA_CLIENT_NOT_USED,
-	[IPA_3_0][IPA_CLIENT_USB4_PROD]           = IPA_CLIENT_NOT_USED,
-	[IPA_3_0][IPA_CLIENT_HSIC5_PROD]          = IPA_CLIENT_NOT_USED,
 	[IPA_3_0][IPA_CLIENT_USB_PROD]            = {
-			1, IPA_v3_0_GROUP_UL, true,
+			true, IPA_v3_0_GROUP_UL, true,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
 			{ 1, 3, 8, 16, IPA_EE_AP } },
-	[IPA_3_0][IPA_CLIENT_UC_USB_PROD]         = IPA_CLIENT_NOT_USED,
-	[IPA_3_0][IPA_CLIENT_A5_WLAN_AMPDU_PROD]  = IPA_CLIENT_NOT_USED,
-	[IPA_3_0][IPA_CLIENT_A2_EMBEDDED_PROD]    = IPA_CLIENT_NOT_USED,
-	[IPA_3_0][IPA_CLIENT_A2_TETHERED_PROD]    = IPA_CLIENT_NOT_USED,
 	[IPA_3_0][IPA_CLIENT_APPS_LAN_PROD] = {
-			14, IPA_v3_0_GROUP_DL, false,
+			true, IPA_v3_0_GROUP_DL, false,
 			IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
 			{ 14, 11, 8, 16, IPA_EE_AP } },
 	[IPA_3_0][IPA_CLIENT_APPS_WAN_PROD] = {
-			3, IPA_v3_0_GROUP_UL, true,
+			true, IPA_v3_0_GROUP_UL, true,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
 			{ 3, 5, 16, 32, IPA_EE_AP } },
 	[IPA_3_0][IPA_CLIENT_APPS_CMD_PROD]	  = {
-			22, IPA_v3_0_GROUP_IMM_CMD, false,
+			true, IPA_v3_0_GROUP_IMM_CMD, false,
 			IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY,
 			QMB_MASTER_SELECT_DDR,
 			{ 22, 6, 18, 28, IPA_EE_AP } },
 	[IPA_3_0][IPA_CLIENT_ODU_PROD]            = {
-			12, IPA_v3_0_GROUP_UL, true,
+			true, IPA_v3_0_GROUP_UL, true,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
 			{ 12, 9, 8, 16, IPA_EE_AP } },
 	[IPA_3_0][IPA_CLIENT_MHI_PROD]            = {
-			0, IPA_v3_0_GROUP_UL, true,
+			true, IPA_v3_0_GROUP_UL, true,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_PCIE,
 			{ 0, 0, 8, 16, IPA_EE_AP } },
 	[IPA_3_0][IPA_CLIENT_Q6_LAN_PROD]         = {
-			9, IPA_v3_0_GROUP_UL, false,
+			true, IPA_v3_0_GROUP_UL, false,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
 			{ 9, 4, 8, 12, IPA_EE_Q6 } },
 	[IPA_3_0][IPA_CLIENT_Q6_WAN_PROD]         = {
-			5, IPA_v3_0_GROUP_DL, true,
+			true, IPA_v3_0_GROUP_DL, true,
 			IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
 			{ 5, 0, 16, 32, IPA_EE_Q6 } },
 	[IPA_3_0][IPA_CLIENT_Q6_CMD_PROD] = {
-			6, IPA_v3_0_GROUP_IMM_CMD, false,
+			true, IPA_v3_0_GROUP_IMM_CMD, false,
 			IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
 			{ 6, 1, 18, 28, IPA_EE_Q6 } },
 	[IPA_3_0][IPA_CLIENT_Q6_DECOMP_PROD]      = {
-			7, IPA_v3_0_GROUP_Q6ZIP,
+			true, IPA_v3_0_GROUP_Q6ZIP,
 			false, IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
 			{ 7, 2, 0, 0, IPA_EE_Q6 } },
 	[IPA_3_0][IPA_CLIENT_Q6_DECOMP2_PROD]     = {
-			8, IPA_v3_0_GROUP_Q6ZIP,
+			true, IPA_v3_0_GROUP_Q6ZIP,
 			false, IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
 			{ 8, 3, 0, 0, IPA_EE_Q6 } },
 	[IPA_3_0][IPA_CLIENT_MEMCPY_DMA_SYNC_PROD] = {
-			12, IPA_v3_0_GROUP_DMA, false,
+			true, IPA_v3_0_GROUP_DMA, false,
 			IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY,
 			QMB_MASTER_SELECT_PCIE,
 			{ 12, 9, 8, 16, IPA_EE_AP } },
 	[IPA_3_0][IPA_CLIENT_MEMCPY_DMA_ASYNC_PROD] = {
-			13, IPA_v3_0_GROUP_DMA, false,
+			true, IPA_v3_0_GROUP_DMA, false,
 			IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY,
 			QMB_MASTER_SELECT_PCIE,
 			{ 13, 10, 8, 16, IPA_EE_AP } },
 	[IPA_3_0][IPA_CLIENT_ETHERNET_PROD]          = {
-			2, IPA_v3_0_GROUP_UL, true,
+			true, IPA_v3_0_GROUP_UL, true,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
 			{2, 0, 8, 16, IPA_EE_UC} },
 	/* Only for test purpose */
 	[IPA_3_0][IPA_CLIENT_TEST_PROD]           = {
-			1, IPA_v3_0_GROUP_UL, true,
+			true, IPA_v3_0_GROUP_UL, true,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
 			{ 1, 3, 8, 16, IPA_EE_AP } },
 	[IPA_3_0][IPA_CLIENT_TEST1_PROD]          = {
-			1, IPA_v3_0_GROUP_UL, true,
+			true, IPA_v3_0_GROUP_UL, true,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
 			{ 1, 3, 8, 16, IPA_EE_AP } },
 	[IPA_3_0][IPA_CLIENT_TEST2_PROD]          = {
-			3, IPA_v3_0_GROUP_UL, true,
+			true, IPA_v3_0_GROUP_UL, true,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
 			{ 3, 5, 16, 32, IPA_EE_AP } },
 	[IPA_3_0][IPA_CLIENT_TEST3_PROD]          = {
-			12, IPA_v3_0_GROUP_UL, true,
+			true, IPA_v3_0_GROUP_UL, true,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
 			{ 12, 9, 8, 16, IPA_EE_AP } },
 	[IPA_3_0][IPA_CLIENT_TEST4_PROD]          = {
-			13, IPA_v3_0_GROUP_UL, true,
+			true, IPA_v3_0_GROUP_UL, true,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
 			{ 13, 10, 8, 16, IPA_EE_AP } },
 
-	[IPA_3_0][IPA_CLIENT_HSIC1_CONS]          = IPA_CLIENT_NOT_USED,
 	[IPA_3_0][IPA_CLIENT_WLAN1_CONS]          = {
-			25, IPA_v3_0_GROUP_DL, false,
+			true, IPA_v3_0_GROUP_DL, false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
 			{ 25, 4, 8, 8, IPA_EE_UC } },
-	[IPA_3_0][IPA_CLIENT_HSIC2_CONS]          = IPA_CLIENT_NOT_USED,
-	[IPA_3_0][IPA_CLIENT_USB2_CONS]           = IPA_CLIENT_NOT_USED,
 	[IPA_3_0][IPA_CLIENT_WLAN2_CONS]          = {
-			27, IPA_v3_0_GROUP_DL, false,
+			true, IPA_v3_0_GROUP_DL, false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
 			{ 27, 4, 8, 8, IPA_EE_AP } },
-	[IPA_3_0][IPA_CLIENT_HSIC3_CONS]          = IPA_CLIENT_NOT_USED,
-	[IPA_3_0][IPA_CLIENT_USB3_CONS]           = IPA_CLIENT_NOT_USED,
 	[IPA_3_0][IPA_CLIENT_WLAN3_CONS]          = {
-			28, IPA_v3_0_GROUP_DL, false,
+			true, IPA_v3_0_GROUP_DL, false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
 			{ 28, 13, 8, 8, IPA_EE_AP } },
-	[IPA_3_0][IPA_CLIENT_HSIC4_CONS]          = IPA_CLIENT_NOT_USED,
-	[IPA_3_0][IPA_CLIENT_USB4_CONS]           = IPA_CLIENT_NOT_USED,
 	[IPA_3_0][IPA_CLIENT_WLAN4_CONS]          = {
-			29, IPA_v3_0_GROUP_DL, false,
+			true, IPA_v3_0_GROUP_DL, false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
 			{ 29, 14, 8, 8, IPA_EE_AP } },
-	[IPA_3_0][IPA_CLIENT_HSIC5_CONS]          = IPA_CLIENT_NOT_USED,
 	[IPA_3_0][IPA_CLIENT_USB_CONS]            = {
-			26, IPA_v3_0_GROUP_DL, false,
+			true, IPA_v3_0_GROUP_DL, false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
 			{ 26, 12, 8, 8, IPA_EE_AP } },
 	[IPA_3_0][IPA_CLIENT_USB_DPL_CONS]        = {
-			17, IPA_v3_0_GROUP_DPL, false,
+			true, IPA_v3_0_GROUP_DPL, false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
 			{ 17, 2, 8, 12, IPA_EE_AP } },
-	[IPA_3_0][IPA_CLIENT_A2_EMBEDDED_CONS]    = IPA_CLIENT_NOT_USED,
-	[IPA_3_0][IPA_CLIENT_A2_TETHERED_CONS]    = IPA_CLIENT_NOT_USED,
-	[IPA_3_0][IPA_CLIENT_A5_LAN_WAN_CONS]     = IPA_CLIENT_NOT_USED,
 	[IPA_3_0][IPA_CLIENT_APPS_LAN_CONS]       = {
-			15, IPA_v3_0_GROUP_UL, false,
+			true, IPA_v3_0_GROUP_UL, false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
 			{ 15, 7, 8, 12, IPA_EE_AP } },
 	[IPA_3_0][IPA_CLIENT_APPS_WAN_CONS]       = {
-			16, IPA_v3_0_GROUP_DL, false,
+			true, IPA_v3_0_GROUP_DL, false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
 			{ 16, 8, 8, 12, IPA_EE_AP } },
 	[IPA_3_0][IPA_CLIENT_ODU_EMB_CONS]        = {
-			23, IPA_v3_0_GROUP_DL, false,
+			true, IPA_v3_0_GROUP_DL, false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
 			{ 23, 1, 8, 8, IPA_EE_AP } },
-	[IPA_3_0][IPA_CLIENT_ODU_TETH_CONS]       = IPA_CLIENT_NOT_USED,
 	[IPA_3_0][IPA_CLIENT_MHI_CONS]            = {
-			23, IPA_v3_0_GROUP_DL, false,
+			true, IPA_v3_0_GROUP_DL, false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_PCIE,
 			{ 23, 1, 8, 8, IPA_EE_AP } },
 	[IPA_3_0][IPA_CLIENT_Q6_LAN_CONS]         = {
-			19, IPA_v3_0_GROUP_DL, false,
+			true, IPA_v3_0_GROUP_DL, false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
 			{ 19, 6, 8, 12, IPA_EE_Q6 } },
 	[IPA_3_0][IPA_CLIENT_Q6_WAN_CONS]         = {
-			18, IPA_v3_0_GROUP_UL, false,
+			true, IPA_v3_0_GROUP_UL, false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
 			{ 18, 5, 8, 12, IPA_EE_Q6 } },
 	[IPA_3_0][IPA_CLIENT_Q6_DUN_CONS]         = {
-			30, IPA_v3_0_GROUP_DIAG, false,
+			true, IPA_v3_0_GROUP_DIAG, false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
 			{ 30, 7, 4, 4, IPA_EE_Q6 } },
 	[IPA_3_0][IPA_CLIENT_Q6_DECOMP_CONS] = {
-			21, IPA_v3_0_GROUP_Q6ZIP, false,
+			true, IPA_v3_0_GROUP_Q6ZIP, false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
 			{ 21, 8, 4, 4, IPA_EE_Q6 } },
 	[IPA_3_0][IPA_CLIENT_Q6_DECOMP2_CONS] = {
-			4, IPA_v3_0_GROUP_Q6ZIP, false,
+			true, IPA_v3_0_GROUP_Q6ZIP, false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
 			{ 4, 9, 4, 4, IPA_EE_Q6 } },
 	[IPA_3_0][IPA_CLIENT_MEMCPY_DMA_SYNC_CONS] = {
-			28, IPA_v3_0_GROUP_DMA, false,
+			true, IPA_v3_0_GROUP_DMA, false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_PCIE,
 			{ 28, 13, 8, 8, IPA_EE_AP } },
 	[IPA_3_0][IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS] = {
-			29, IPA_v3_0_GROUP_DMA, false,
+			true, IPA_v3_0_GROUP_DMA, false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_PCIE,
 			{ 29, 14, 8, 8, IPA_EE_AP } },
-	[IPA_3_0][IPA_CLIENT_Q6_LTE_WIFI_AGGR_CONS]     = IPA_CLIENT_NOT_USED,
 	[IPA_3_0][IPA_CLIENT_ETHERNET_CONS]          = {
-			24, IPA_v3_0_GROUP_DL, false,
+			true, IPA_v3_0_GROUP_DL, false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
 			{24, 3, 8, 8, IPA_EE_UC} },
 	/* Only for test purpose */
 	[IPA_3_0][IPA_CLIENT_TEST_CONS]           = {
-			26, IPA_v3_0_GROUP_DL, false,
+			true, IPA_v3_0_GROUP_DL, false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
 			{ 26, 12, 8, 8, IPA_EE_AP } },
 	[IPA_3_0][IPA_CLIENT_TEST1_CONS]          = {
-			26, IPA_v3_0_GROUP_DL, false,
+			true, IPA_v3_0_GROUP_DL, false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
 			{ 26, 12, 8, 8, IPA_EE_AP } },
 	[IPA_3_0][IPA_CLIENT_TEST2_CONS]          = {
-			27, IPA_v3_0_GROUP_DL, false,
+			true, IPA_v3_0_GROUP_DL, false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
 			{ 27, 4, 8, 8, IPA_EE_AP } },
 	[IPA_3_0][IPA_CLIENT_TEST3_CONS]          = {
-			28, IPA_v3_0_GROUP_DL, false,
+			true, IPA_v3_0_GROUP_DL, false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
 			{ 28, 13, 8, 8, IPA_EE_AP } },
 	[IPA_3_0][IPA_CLIENT_TEST4_CONS]          = {
-			29, IPA_v3_0_GROUP_DL, false,
+			true, IPA_v3_0_GROUP_DL, false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
 			{ 29, 14, 8, 8, IPA_EE_AP } },
 
 
 	/* IPA_3_5 */
-	[IPA_3_5][IPA_CLIENT_HSIC1_PROD]          = IPA_CLIENT_NOT_USED,
 	[IPA_3_5][IPA_CLIENT_WLAN1_PROD]          = {
-			6, IPA_v3_5_GROUP_UL_DL, true,
+			true, IPA_v3_5_GROUP_UL_DL, true,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
 			{ 6, 1, 8, 16, IPA_EE_UC } },
-	[IPA_3_5][IPA_CLIENT_HSIC2_PROD]          = IPA_CLIENT_NOT_USED,
-	[IPA_3_5][IPA_CLIENT_USB2_PROD]           = IPA_CLIENT_NOT_USED,
-	[IPA_3_5][IPA_CLIENT_HSIC3_PROD]          = IPA_CLIENT_NOT_USED,
-	[IPA_3_5][IPA_CLIENT_USB3_PROD]           = IPA_CLIENT_NOT_USED,
-	[IPA_3_5][IPA_CLIENT_HSIC4_PROD]          = IPA_CLIENT_NOT_USED,
-	[IPA_3_5][IPA_CLIENT_USB4_PROD]           = IPA_CLIENT_NOT_USED,
-	[IPA_3_5][IPA_CLIENT_HSIC5_PROD]          = IPA_CLIENT_NOT_USED,
 	[IPA_3_5][IPA_CLIENT_USB_PROD]            = {
-			0, IPA_v3_5_GROUP_UL_DL, true,
+			true, IPA_v3_5_GROUP_UL_DL, true,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
 			{ 0, 7, 8, 16, IPA_EE_AP } },
-	[IPA_3_5][IPA_CLIENT_UC_USB_PROD]         = IPA_CLIENT_NOT_USED,
-	[IPA_3_5][IPA_CLIENT_A5_WLAN_AMPDU_PROD]  = IPA_CLIENT_NOT_USED,
-	[IPA_3_5][IPA_CLIENT_A2_EMBEDDED_PROD]    = IPA_CLIENT_NOT_USED,
-	[IPA_3_5][IPA_CLIENT_A2_TETHERED_PROD]    = IPA_CLIENT_NOT_USED,
 	[IPA_3_5][IPA_CLIENT_APPS_LAN_PROD]   = {
-			8, IPA_v3_5_GROUP_UL_DL, false,
+			true, IPA_v3_5_GROUP_UL_DL, false,
 			IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
 			{ 8, 9, 8, 16, IPA_EE_AP } },
 	[IPA_3_5][IPA_CLIENT_APPS_WAN_PROD] = {
-			2, IPA_v3_5_GROUP_UL_DL, true,
+			true, IPA_v3_5_GROUP_UL_DL, true,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
 			{ 2, 3, 16, 32, IPA_EE_AP } },
 	[IPA_3_5][IPA_CLIENT_APPS_CMD_PROD]	  = {
-			5, IPA_v3_5_GROUP_UL_DL, false,
+			true, IPA_v3_5_GROUP_UL_DL, false,
 			IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY,
 			QMB_MASTER_SELECT_DDR,
 			{ 5, 4, 20, 23, IPA_EE_AP } },
 	[IPA_3_5][IPA_CLIENT_ODU_PROD]            = {
-			1, IPA_v3_5_GROUP_UL_DL, true,
+			true, IPA_v3_5_GROUP_UL_DL, true,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
 			{ 1, 0, 8, 16, IPA_EE_UC } },
-	[IPA_3_5][IPA_CLIENT_MHI_PROD]            = IPA_CLIENT_NOT_USED,
 	[IPA_3_5][IPA_CLIENT_Q6_LAN_PROD]         = {
-			3, IPA_v3_5_GROUP_UL_DL, true,
+			true, IPA_v3_5_GROUP_UL_DL, true,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
 			{ 3, 0, 16, 32, IPA_EE_Q6 } },
-	[IPA_3_5][IPA_CLIENT_Q6_WAN_PROD]         = IPA_CLIENT_NOT_USED,
 	[IPA_3_5][IPA_CLIENT_Q6_CMD_PROD]	  = {
-			4, IPA_v3_5_GROUP_UL_DL, false,
+			true, IPA_v3_5_GROUP_UL_DL, false,
 			IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
 			{ 4, 1, 20, 23, IPA_EE_Q6 } },
-	[IPA_3_5][IPA_CLIENT_Q6_DECOMP_PROD]      = IPA_CLIENT_NOT_USED,
-	[IPA_3_5][IPA_CLIENT_Q6_DECOMP2_PROD]     = IPA_CLIENT_NOT_USED,
-	[IPA_3_5][IPA_CLIENT_MEMCPY_DMA_SYNC_PROD] = IPA_CLIENT_NOT_USED,
-	[IPA_3_5][IPA_CLIENT_MEMCPY_DMA_ASYNC_PROD] = IPA_CLIENT_NOT_USED,
-	[IPA_3_5][IPA_CLIENT_ETHERNET_PROD]         = IPA_CLIENT_NOT_USED,
 	/* Only for test purpose */
 	[IPA_3_5][IPA_CLIENT_TEST_PROD]           = {
-			0, IPA_v3_5_GROUP_UL_DL, true,
+			true, IPA_v3_5_GROUP_UL_DL, true,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
 			{0, 7, 8, 16, IPA_EE_AP } },
 	[IPA_3_5][IPA_CLIENT_TEST1_PROD]          = {
-			0, IPA_v3_5_GROUP_UL_DL, true,
+			true, IPA_v3_5_GROUP_UL_DL, true,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
 			{0, 7, 8, 16, IPA_EE_AP } },
 	[IPA_3_5][IPA_CLIENT_TEST2_PROD]          = {
-			1, IPA_v3_5_GROUP_UL_DL, true,
+			true, IPA_v3_5_GROUP_UL_DL, true,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
 			{ 1, 0, 8, 16, IPA_EE_AP } },
 	[IPA_3_5][IPA_CLIENT_TEST3_PROD]          = {
-			7, IPA_v3_5_GROUP_UL_DL, true,
+			true, IPA_v3_5_GROUP_UL_DL, true,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
 			{7, 8, 8, 16, IPA_EE_AP } },
 	[IPA_3_5][IPA_CLIENT_TEST4_PROD]          = {
-			8, IPA_v3_5_GROUP_UL_DL, true,
+			true, IPA_v3_5_GROUP_UL_DL, true,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
 			{ 8, 9, 8, 16, IPA_EE_AP } },
 
-	[IPA_3_5][IPA_CLIENT_HSIC1_CONS]          = IPA_CLIENT_NOT_USED,
 	[IPA_3_5][IPA_CLIENT_WLAN1_CONS]          = {
-			16, IPA_v3_5_GROUP_UL_DL, false,
+			true, IPA_v3_5_GROUP_UL_DL, false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
 			{ 16, 3, 8, 8, IPA_EE_UC } },
-	[IPA_3_5][IPA_CLIENT_HSIC2_CONS]          = IPA_CLIENT_NOT_USED,
-	[IPA_3_5][IPA_CLIENT_USB2_CONS]           = IPA_CLIENT_NOT_USED,
 	[IPA_3_5][IPA_CLIENT_WLAN2_CONS]          = {
-			18, IPA_v3_5_GROUP_UL_DL, false,
+			true, IPA_v3_5_GROUP_UL_DL, false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
 			{ 18, 12, 8, 8, IPA_EE_AP } },
-	[IPA_3_5][IPA_CLIENT_HSIC3_CONS]          = IPA_CLIENT_NOT_USED,
-	[IPA_3_5][IPA_CLIENT_USB3_CONS]           = IPA_CLIENT_NOT_USED,
 	[IPA_3_5][IPA_CLIENT_WLAN3_CONS]          = {
-			19, IPA_v3_5_GROUP_UL_DL, false,
+			true, IPA_v3_5_GROUP_UL_DL, false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
 			{ 19, 13, 8, 8, IPA_EE_AP } },
-	[IPA_3_5][IPA_CLIENT_HSIC4_CONS]          = IPA_CLIENT_NOT_USED,
-	[IPA_3_5][IPA_CLIENT_USB4_CONS]           = IPA_CLIENT_NOT_USED,
-	[IPA_3_5][IPA_CLIENT_WLAN4_CONS]          = IPA_CLIENT_NOT_USED,
-	[IPA_3_5][IPA_CLIENT_HSIC5_CONS]          = IPA_CLIENT_NOT_USED,
 	[IPA_3_5][IPA_CLIENT_USB_CONS]            = {
-			17, IPA_v3_5_GROUP_UL_DL, false,
+			true, IPA_v3_5_GROUP_UL_DL, false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_PCIE,
 			{ 17, 11, 8, 8, IPA_EE_AP } },
 	[IPA_3_5][IPA_CLIENT_USB_DPL_CONS]        = {
-			14, IPA_v3_5_GROUP_UL_DL, false,
+			true, IPA_v3_5_GROUP_UL_DL, false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
 			{ 14, 10, 4, 6, IPA_EE_AP } },
-	[IPA_3_5][IPA_CLIENT_A2_EMBEDDED_CONS]    = IPA_CLIENT_NOT_USED,
-	[IPA_3_5][IPA_CLIENT_A2_TETHERED_CONS]    = IPA_CLIENT_NOT_USED,
-	[IPA_3_5][IPA_CLIENT_A5_LAN_WAN_CONS]     = IPA_CLIENT_NOT_USED,
 	[IPA_3_5][IPA_CLIENT_APPS_LAN_CONS]       = {
-			9, IPA_v3_5_GROUP_UL_DL, false,
+			true, IPA_v3_5_GROUP_UL_DL, false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
 			{ 9, 5, 8, 12, IPA_EE_AP } },
 	[IPA_3_5][IPA_CLIENT_APPS_WAN_CONS]       = {
-			10, IPA_v3_5_GROUP_UL_DL, false,
+			true, IPA_v3_5_GROUP_UL_DL, false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
 			{ 10, 6, 8, 12, IPA_EE_AP } },
 	[IPA_3_5][IPA_CLIENT_ODU_EMB_CONS]        = {
-			15, IPA_v3_5_GROUP_UL_DL, false,
+			true, IPA_v3_5_GROUP_UL_DL, false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
 			{ 15, 1, 8, 8, IPA_EE_AP } },
-	[IPA_3_5][IPA_CLIENT_ODU_TETH_CONS]       = IPA_CLIENT_NOT_USED,
-	[IPA_3_5][IPA_CLIENT_MHI_CONS]            = IPA_CLIENT_NOT_USED,
 	[IPA_3_5][IPA_CLIENT_Q6_LAN_CONS]         = {
-			13, IPA_v3_5_GROUP_UL_DL, false,
+			true, IPA_v3_5_GROUP_UL_DL, false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
 			{ 13, 3, 8, 12, IPA_EE_Q6 } },
 	[IPA_3_5][IPA_CLIENT_Q6_WAN_CONS]         = {
-			12, IPA_v3_5_GROUP_UL_DL, false,
+			true, IPA_v3_5_GROUP_UL_DL, false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
 			{ 12, 2, 8, 12, IPA_EE_Q6 } },
-	[IPA_3_5][IPA_CLIENT_Q6_DUN_CONS]         = IPA_CLIENT_NOT_USED,
-	[IPA_3_5][IPA_CLIENT_Q6_DECOMP_CONS]	  = IPA_CLIENT_NOT_USED,
-	[IPA_3_5][IPA_CLIENT_Q6_DECOMP2_CONS]	  = IPA_CLIENT_NOT_USED,
-	[IPA_3_5][IPA_CLIENT_MEMCPY_DMA_SYNC_CONS] = IPA_CLIENT_NOT_USED,
-	[IPA_3_5][IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS] = IPA_CLIENT_NOT_USED,
-	[IPA_3_5][IPA_CLIENT_Q6_LTE_WIFI_AGGR_CONS]     = IPA_CLIENT_NOT_USED,
-	[IPA_3_5][IPA_CLIENT_ETHERNET_CONS]	  = IPA_CLIENT_NOT_USED,
 	/* Only for test purpose */
 	/* MBIM aggregation test pipes should have the same QMB as USB_CONS */
 	[IPA_3_5][IPA_CLIENT_TEST_CONS]           = {
-			15, IPA_v3_5_GROUP_UL_DL, false,
+			true, IPA_v3_5_GROUP_UL_DL, false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_PCIE,
 			{ 15, 1, 8, 8, IPA_EE_AP } },
 	[IPA_3_5][IPA_CLIENT_TEST1_CONS]           = {
-			15, IPA_v3_5_GROUP_UL_DL, false,
+			true, IPA_v3_5_GROUP_UL_DL, false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
 			{ 15, 1, 8, 8, IPA_EE_AP } },
 	[IPA_3_5][IPA_CLIENT_TEST2_CONS]          = {
-			17, IPA_v3_5_GROUP_UL_DL, false,
+			true, IPA_v3_5_GROUP_UL_DL, false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_PCIE,
 			{ 17, 11, 8, 8, IPA_EE_AP } },
 	[IPA_3_5][IPA_CLIENT_TEST3_CONS]          = {
-			18, IPA_v3_5_GROUP_UL_DL, false,
+			true, IPA_v3_5_GROUP_UL_DL, false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
 			{ 18, 12, 8, 8, IPA_EE_AP } },
 	[IPA_3_5][IPA_CLIENT_TEST4_CONS]          = {
-			19, IPA_v3_5_GROUP_UL_DL, false,
+			true, IPA_v3_5_GROUP_UL_DL, false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_PCIE,
 			{ 19, 13, 8, 8, IPA_EE_AP } },
 
 	/* IPA_3_5_MHI */
-	[IPA_3_5_MHI][IPA_CLIENT_HSIC1_PROD]          = IPA_CLIENT_NOT_USED,
-	[IPA_3_5_MHI][IPA_CLIENT_WLAN1_PROD]          = IPA_CLIENT_NOT_USED,
-	[IPA_3_5_MHI][IPA_CLIENT_HSIC2_PROD]          = IPA_CLIENT_NOT_USED,
-	[IPA_3_5_MHI][IPA_CLIENT_USB2_PROD]           = IPA_CLIENT_NOT_USED,
-	[IPA_3_5_MHI][IPA_CLIENT_HSIC3_PROD]          = IPA_CLIENT_NOT_USED,
-	[IPA_3_5_MHI][IPA_CLIENT_USB3_PROD]           = IPA_CLIENT_NOT_USED,
-	[IPA_3_5_MHI][IPA_CLIENT_HSIC4_PROD]          = IPA_CLIENT_NOT_USED,
-	[IPA_3_5_MHI][IPA_CLIENT_USB4_PROD]           = IPA_CLIENT_NOT_USED,
-	[IPA_3_5_MHI][IPA_CLIENT_HSIC5_PROD]          = IPA_CLIENT_NOT_USED,
 	[IPA_3_5_MHI][IPA_CLIENT_USB_PROD]            = {
-			0, IPA_v3_5_MHI_GROUP_DDR, true,
+			true, IPA_v3_5_MHI_GROUP_DDR, true,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
 			{ 0, 7, 8, 16, IPA_EE_AP } },
-	[IPA_3_5_MHI][IPA_CLIENT_UC_USB_PROD]         = IPA_CLIENT_NOT_USED,
-	[IPA_3_5_MHI][IPA_CLIENT_A5_WLAN_AMPDU_PROD]  = IPA_CLIENT_NOT_USED,
-	[IPA_3_5_MHI][IPA_CLIENT_A2_EMBEDDED_PROD]    = IPA_CLIENT_NOT_USED,
-	[IPA_3_5_MHI][IPA_CLIENT_A2_TETHERED_PROD]    = IPA_CLIENT_NOT_USED,
-	[IPA_3_5_MHI][IPA_CLIENT_APPS_LAN_PROD] = IPA_CLIENT_NOT_USED,
 	[IPA_3_5_MHI][IPA_CLIENT_APPS_WAN_PROD]   = {
-			2, IPA_v3_5_MHI_GROUP_DDR, true,
+			true, IPA_v3_5_MHI_GROUP_DDR, true,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
 			{ 2, 3, 16, 32, IPA_EE_AP } },
 	[IPA_3_5_MHI][IPA_CLIENT_APPS_CMD_PROD]	  = {
-			5, IPA_v3_5_MHI_GROUP_DDR, false,
+			true, IPA_v3_5_MHI_GROUP_DDR, false,
 			IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY,
 			QMB_MASTER_SELECT_DDR,
 			{ 5, 4, 20, 23, IPA_EE_AP } },
-	[IPA_3_5_MHI][IPA_CLIENT_ODU_PROD]            = IPA_CLIENT_NOT_USED,
 	[IPA_3_5_MHI][IPA_CLIENT_MHI_PROD]            = {
-			1, IPA_v3_5_MHI_GROUP_PCIE, true,
+			true, IPA_v3_5_MHI_GROUP_PCIE, true,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_PCIE,
 			{ 1, 0, 8, 16, IPA_EE_AP } },
 	[IPA_3_5_MHI][IPA_CLIENT_Q6_LAN_PROD]         = {
-			3, IPA_v3_5_MHI_GROUP_DDR, true,
+			true, IPA_v3_5_MHI_GROUP_DDR, true,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
 			{ 3, 0, 16, 32, IPA_EE_Q6 } },
 	[IPA_3_5_MHI][IPA_CLIENT_Q6_WAN_PROD]         = {
-			6, IPA_v3_5_MHI_GROUP_DDR, true,
+			true, IPA_v3_5_MHI_GROUP_DDR, true,
 			IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
 			{ 6, 4, 10, 30, IPA_EE_Q6 } },
 	[IPA_3_5_MHI][IPA_CLIENT_Q6_CMD_PROD]	  = {
-			4, IPA_v3_5_MHI_GROUP_PCIE, false,
+			true, IPA_v3_5_MHI_GROUP_PCIE, false,
 			IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
 			{ 4, 1, 20, 23, IPA_EE_Q6 } },
-	[IPA_3_5_MHI][IPA_CLIENT_Q6_DECOMP_PROD]      = IPA_CLIENT_NOT_USED,
-	[IPA_3_5_MHI][IPA_CLIENT_Q6_DECOMP2_PROD]     = IPA_CLIENT_NOT_USED,
 	[IPA_3_5_MHI][IPA_CLIENT_MEMCPY_DMA_SYNC_PROD] = {
-			7, IPA_v3_5_MHI_GROUP_DMA, false,
+			true, IPA_v3_5_MHI_GROUP_DMA, false,
 			IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY,
 			QMB_MASTER_SELECT_DDR,
 			{ 7, 8, 8, 16, IPA_EE_AP } },
 	[IPA_3_5_MHI][IPA_CLIENT_MEMCPY_DMA_ASYNC_PROD] = {
-			8, IPA_v3_5_MHI_GROUP_DMA, false,
+			true, IPA_v3_5_MHI_GROUP_DMA, false,
 			IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY,
 			QMB_MASTER_SELECT_DDR,
 			{ 8, 9, 8, 16, IPA_EE_AP } },
-	[IPA_3_5_MHI][IPA_CLIENT_ETHERNET_PROD]       = IPA_CLIENT_NOT_USED,
 	/* Only for test purpose */
 	[IPA_3_5_MHI][IPA_CLIENT_TEST_PROD]           = {
-			0, IPA_v3_5_MHI_GROUP_DDR, true,
+			true, IPA_v3_5_MHI_GROUP_DDR, true,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
 			{0, 7, 8, 16, IPA_EE_AP } },
@@ -923,300 +836,238 @@ static const struct ipa_ep_configuration ipa3_ep_mapping
 			QMB_MASTER_SELECT_DDR,
 			{0, 7, 8, 16, IPA_EE_AP } },
 	[IPA_3_5_MHI][IPA_CLIENT_TEST2_PROD]          = {
-			1, IPA_v3_5_MHI_GROUP_PCIE, true,
+			true, IPA_v3_5_MHI_GROUP_PCIE, true,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_PCIE,
 			{ 1, 0, 8, 16, IPA_EE_AP } },
 	[IPA_3_5_MHI][IPA_CLIENT_TEST3_PROD]          = {
-			7, IPA_v3_5_MHI_GROUP_DMA, true,
+			true, IPA_v3_5_MHI_GROUP_DMA, true,
 			IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY,
 			QMB_MASTER_SELECT_DDR,
 			{7, 8, 8, 16, IPA_EE_AP } },
 	[IPA_3_5_MHI][IPA_CLIENT_TEST4_PROD]          = {
-			8, IPA_v3_5_MHI_GROUP_DMA, true,
+			true, IPA_v3_5_MHI_GROUP_DMA, true,
 			IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY,
 			QMB_MASTER_SELECT_DDR,
 			{ 8, 9, 8, 16, IPA_EE_AP } },
 
-	[IPA_3_5_MHI][IPA_CLIENT_HSIC1_CONS]          = IPA_CLIENT_NOT_USED,
 	[IPA_3_5_MHI][IPA_CLIENT_WLAN1_CONS]          = {
-			16, IPA_v3_5_MHI_GROUP_DDR, false,
+			true, IPA_v3_5_MHI_GROUP_DDR, false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
 			{ 16, 3, 8, 8, IPA_EE_UC } },
-	[IPA_3_5_MHI][IPA_CLIENT_HSIC2_CONS]          = IPA_CLIENT_NOT_USED,
-	[IPA_3_5_MHI][IPA_CLIENT_USB2_CONS]           = IPA_CLIENT_NOT_USED,
-	[IPA_3_5_MHI][IPA_CLIENT_WLAN2_CONS]          = IPA_CLIENT_NOT_USED,
-	[IPA_3_5_MHI][IPA_CLIENT_HSIC3_CONS]          = IPA_CLIENT_NOT_USED,
-	[IPA_3_5_MHI][IPA_CLIENT_USB3_CONS]           = IPA_CLIENT_NOT_USED,
-	[IPA_3_5_MHI][IPA_CLIENT_WLAN3_CONS]          = IPA_CLIENT_NOT_USED,
-	[IPA_3_5_MHI][IPA_CLIENT_HSIC4_CONS]          = IPA_CLIENT_NOT_USED,
-	[IPA_3_5_MHI][IPA_CLIENT_USB4_CONS]           = IPA_CLIENT_NOT_USED,
-	[IPA_3_5_MHI][IPA_CLIENT_WLAN4_CONS]          = IPA_CLIENT_NOT_USED,
-	[IPA_3_5_MHI][IPA_CLIENT_HSIC5_CONS]          = IPA_CLIENT_NOT_USED,
 	[IPA_3_5_MHI][IPA_CLIENT_USB_CONS]            = {
-			17, IPA_v3_5_MHI_GROUP_DDR, false,
+			true, IPA_v3_5_MHI_GROUP_DDR, false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
 			{ 17, 11, 8, 8, IPA_EE_AP } },
 	[IPA_3_5_MHI][IPA_CLIENT_USB_DPL_CONS]        = {
-			14, IPA_v3_5_MHI_GROUP_DDR, false,
+			true, IPA_v3_5_MHI_GROUP_DDR, false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
 			{ 14, 10, 4, 6, IPA_EE_AP } },
-	[IPA_3_5_MHI][IPA_CLIENT_A2_EMBEDDED_CONS]    = IPA_CLIENT_NOT_USED,
-	[IPA_3_5_MHI][IPA_CLIENT_A2_TETHERED_CONS]    = IPA_CLIENT_NOT_USED,
-	[IPA_3_5_MHI][IPA_CLIENT_A5_LAN_WAN_CONS]     = IPA_CLIENT_NOT_USED,
 	[IPA_3_5_MHI][IPA_CLIENT_APPS_LAN_CONS]       = {
-			9, IPA_v3_5_MHI_GROUP_DDR, false,
+			true, IPA_v3_5_MHI_GROUP_DDR, false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
 			{ 9, 5, 8, 12, IPA_EE_AP } },
 	[IPA_3_5_MHI][IPA_CLIENT_APPS_WAN_CONS]       = {
-			10, IPA_v3_5_MHI_GROUP_DDR, false,
+			true, IPA_v3_5_MHI_GROUP_DDR, false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
 			{ 10, 6, 8, 12, IPA_EE_AP } },
-	[IPA_3_5_MHI][IPA_CLIENT_ODU_EMB_CONS]        = IPA_CLIENT_NOT_USED,
-	[IPA_3_5_MHI][IPA_CLIENT_ODU_TETH_CONS]       = IPA_CLIENT_NOT_USED,
 	[IPA_3_5_MHI][IPA_CLIENT_MHI_CONS]            = {
-			15, IPA_v3_5_MHI_GROUP_PCIE, false,
+			true, IPA_v3_5_MHI_GROUP_PCIE, false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_PCIE,
 			{ 15, 1, 8, 8, IPA_EE_AP } },
 	[IPA_3_5_MHI][IPA_CLIENT_Q6_LAN_CONS]         = {
-			13, IPA_v3_5_MHI_GROUP_DDR, false,
+			true, IPA_v3_5_MHI_GROUP_DDR, false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
 			{ 13, 3, 8, 12, IPA_EE_Q6 } },
 	[IPA_3_5_MHI][IPA_CLIENT_Q6_WAN_CONS]         = {
-			12, IPA_v3_5_MHI_GROUP_DDR, false,
+			true, IPA_v3_5_MHI_GROUP_DDR, false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
 			{ 12, 2, 8, 12, IPA_EE_Q6 } },
-	[IPA_3_5_MHI][IPA_CLIENT_Q6_DUN_CONS]		= IPA_CLIENT_NOT_USED,
-	[IPA_3_5_MHI][IPA_CLIENT_Q6_DECOMP_CONS]	= IPA_CLIENT_NOT_USED,
-	[IPA_3_5_MHI][IPA_CLIENT_Q6_DECOMP2_CONS]	= IPA_CLIENT_NOT_USED,
 	[IPA_3_5_MHI][IPA_CLIENT_MEMCPY_DMA_SYNC_CONS] = {
-			18, IPA_v3_5_MHI_GROUP_DMA, false,
+			true, IPA_v3_5_MHI_GROUP_DMA, false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_PCIE,
 			{ 18, 12, 8, 8, IPA_EE_AP } },
 	[IPA_3_5_MHI][IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS] = {
-			19, IPA_v3_5_MHI_GROUP_DMA, false,
+			true, IPA_v3_5_MHI_GROUP_DMA, false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_PCIE,
 			{ 19, 13, 8, 8, IPA_EE_AP } },
-	[IPA_3_5_MHI][IPA_CLIENT_Q6_LTE_WIFI_AGGR_CONS]	= IPA_CLIENT_NOT_USED,
-	[IPA_3_5_MHI][IPA_CLIENT_ETHERNET_CONS]       = IPA_CLIENT_NOT_USED,
 	/* Only for test purpose */
 	[IPA_3_5_MHI][IPA_CLIENT_TEST_CONS]           = {
-			15, IPA_v3_5_MHI_GROUP_PCIE, false,
+			true, IPA_v3_5_MHI_GROUP_PCIE, false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_PCIE,
 			{ 15, 1, 8, 8, IPA_EE_AP } },
 	[IPA_3_5_MHI][IPA_CLIENT_TEST1_CONS]           = {
-			15, IPA_v3_5_MHI_GROUP_PCIE, false,
+			true, IPA_v3_5_MHI_GROUP_PCIE, false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_PCIE,
 			{ 15, 1, 8, 8, IPA_EE_AP } },
 	[IPA_3_5_MHI][IPA_CLIENT_TEST2_CONS]          = {
-			17, IPA_v3_5_MHI_GROUP_DDR, false,
+			true, IPA_v3_5_MHI_GROUP_DDR, false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
 			{ 17, 11, 8, 8, IPA_EE_AP } },
 	[IPA_3_5_MHI][IPA_CLIENT_TEST3_CONS]          = {
-			18, IPA_v3_5_MHI_GROUP_DMA, false,
+			true, IPA_v3_5_MHI_GROUP_DMA, false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_PCIE,
 			{ 18, 12, 8, 8, IPA_EE_AP } },
 	[IPA_3_5_MHI][IPA_CLIENT_TEST4_CONS]          = {
-			19, IPA_v3_5_MHI_GROUP_DMA, false,
+			true, IPA_v3_5_MHI_GROUP_DMA, false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_PCIE,
 			{ 19, 13, 8, 8, IPA_EE_AP } },
 
 	/* IPA_3_5_1 */
-	[IPA_3_5_1][IPA_CLIENT_HSIC1_PROD]          = IPA_CLIENT_NOT_USED,
 	[IPA_3_5_1][IPA_CLIENT_WLAN1_PROD]          = {
-			7, IPA_v3_5_GROUP_UL_DL, true,
+			true, IPA_v3_5_GROUP_UL_DL, true,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
 			{ 7, 1, 8, 16, IPA_EE_UC } },
-	[IPA_3_5_1][IPA_CLIENT_HSIC2_PROD]          = IPA_CLIENT_NOT_USED,
-	[IPA_3_5_1][IPA_CLIENT_USB2_PROD]           = IPA_CLIENT_NOT_USED,
-	[IPA_3_5_1][IPA_CLIENT_HSIC3_PROD]          = IPA_CLIENT_NOT_USED,
-	[IPA_3_5_1][IPA_CLIENT_USB3_PROD]           = IPA_CLIENT_NOT_USED,
-	[IPA_3_5_1][IPA_CLIENT_HSIC4_PROD]          = IPA_CLIENT_NOT_USED,
-	[IPA_3_5_1][IPA_CLIENT_USB4_PROD]           = IPA_CLIENT_NOT_USED,
-	[IPA_3_5_1][IPA_CLIENT_HSIC5_PROD]          = IPA_CLIENT_NOT_USED,
 	[IPA_3_5_1][IPA_CLIENT_USB_PROD]            = {
-			0, IPA_v3_5_GROUP_UL_DL, true,
+			true, IPA_v3_5_GROUP_UL_DL, true,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
 			{ 0, 0, 8, 16, IPA_EE_AP } },
-	[IPA_3_5_1][IPA_CLIENT_UC_USB_PROD]         = IPA_CLIENT_NOT_USED,
-	[IPA_3_5_1][IPA_CLIENT_A5_WLAN_AMPDU_PROD]  = IPA_CLIENT_NOT_USED,
-	[IPA_3_5_1][IPA_CLIENT_A2_EMBEDDED_PROD]    = IPA_CLIENT_NOT_USED,
-	[IPA_3_5_1][IPA_CLIENT_A2_TETHERED_PROD]    = IPA_CLIENT_NOT_USED,
 	[IPA_3_5_1][IPA_CLIENT_APPS_LAN_PROD] = {
-			8, IPA_v3_5_GROUP_UL_DL, false,
+			true, IPA_v3_5_GROUP_UL_DL, false,
 			IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
 			{ 8, 7, 8, 16, IPA_EE_AP } },
 	[IPA_3_5_1][IPA_CLIENT_APPS_WAN_PROD] = {
-			2, IPA_v3_5_GROUP_UL_DL, true,
+			true, IPA_v3_5_GROUP_UL_DL, true,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
 			{ 2, 3, 16, 32, IPA_EE_AP } },
 	[IPA_3_5_1][IPA_CLIENT_APPS_CMD_PROD]		= {
-			5, IPA_v3_5_GROUP_UL_DL, false,
+			true, IPA_v3_5_GROUP_UL_DL, false,
 			IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY,
 			QMB_MASTER_SELECT_DDR,
 			{ 5, 4, 20, 23, IPA_EE_AP } },
-	[IPA_3_5_1][IPA_CLIENT_ODU_PROD]            = IPA_CLIENT_NOT_USED,
-	[IPA_3_5_1][IPA_CLIENT_MHI_PROD]            = IPA_CLIENT_NOT_USED,
 	[IPA_3_5_1][IPA_CLIENT_Q6_LAN_PROD]         = {
-			3, IPA_v3_5_GROUP_UL_DL, true,
+			true, IPA_v3_5_GROUP_UL_DL, true,
 			IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
 			{ 3, 0, 16, 32, IPA_EE_Q6 } },
 	[IPA_3_5_1][IPA_CLIENT_Q6_WAN_PROD]         = {
-			6, IPA_v3_5_GROUP_UL_DL, true,
+			true, IPA_v3_5_GROUP_UL_DL, true,
 			IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
 			{ 6, 4, 12, 30, IPA_EE_Q6 } },
 	[IPA_3_5_1][IPA_CLIENT_Q6_CMD_PROD]	    = {
-			4, IPA_v3_5_GROUP_UL_DL, false,
+			true, IPA_v3_5_GROUP_UL_DL, false,
 			IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
 			{ 4, 1, 20, 23, IPA_EE_Q6 } },
-	[IPA_3_5_1][IPA_CLIENT_Q6_DECOMP_PROD]      = IPA_CLIENT_NOT_USED,
-	[IPA_3_5_1][IPA_CLIENT_Q6_DECOMP2_PROD]     = IPA_CLIENT_NOT_USED,
-	[IPA_3_5_1][IPA_CLIENT_MEMCPY_DMA_SYNC_PROD] = IPA_CLIENT_NOT_USED,
-	[IPA_3_5_1][IPA_CLIENT_MEMCPY_DMA_ASYNC_PROD] = IPA_CLIENT_NOT_USED,
-	[IPA_3_5_1][IPA_CLIENT_ETHERNET_PROD]       = IPA_CLIENT_NOT_USED,
 	/* Only for test purpose */
 	[IPA_3_5_1][IPA_CLIENT_TEST_PROD]           = {
-			0, IPA_v3_5_GROUP_UL_DL, true,
+			true, IPA_v3_5_GROUP_UL_DL, true,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
 			{ 0, 0, 8, 16, IPA_EE_AP } },
 	[IPA_3_5_1][IPA_CLIENT_TEST1_PROD]          = {
-			0, IPA_v3_5_GROUP_UL_DL, true,
+			true, IPA_v3_5_GROUP_UL_DL, true,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
 			{ 0, 0, 8, 16, IPA_EE_AP } },
 	[IPA_3_5_1][IPA_CLIENT_TEST2_PROD]          = {
-			2, IPA_v3_5_GROUP_UL_DL, true,
+			true, IPA_v3_5_GROUP_UL_DL, true,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
 			{ 2, 3, 16, 32, IPA_EE_AP } },
 	[IPA_3_5_1][IPA_CLIENT_TEST3_PROD]          = {
-			4, IPA_v3_5_GROUP_UL_DL, true,
+			true, IPA_v3_5_GROUP_UL_DL, true,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
 			{ 4, 1, 20, 23, IPA_EE_Q6 } },
 	[IPA_3_5_1][IPA_CLIENT_TEST4_PROD]          = {
-			1, IPA_v3_5_GROUP_UL_DL, true,
+			true, IPA_v3_5_GROUP_UL_DL, true,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
 			{ 1, 0, 8, 16, IPA_EE_UC } },
 
-	[IPA_3_5_1][IPA_CLIENT_HSIC1_CONS]          = IPA_CLIENT_NOT_USED,
 	[IPA_3_5_1][IPA_CLIENT_WLAN1_CONS]          = {
-			16, IPA_v3_5_GROUP_UL_DL, false,
+			true, IPA_v3_5_GROUP_UL_DL, false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
 			{ 16, 3, 8, 8, IPA_EE_UC } },
-	[IPA_3_5_1][IPA_CLIENT_HSIC2_CONS]          = IPA_CLIENT_NOT_USED,
-	[IPA_3_5_1][IPA_CLIENT_USB2_CONS]           = IPA_CLIENT_NOT_USED,
 	[IPA_3_5_1][IPA_CLIENT_WLAN2_CONS]          =  {
-			18, IPA_v3_5_GROUP_UL_DL, false,
+			true, IPA_v3_5_GROUP_UL_DL, false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
 			{ 18, 9, 8, 8, IPA_EE_AP } },
-	[IPA_3_5_1][IPA_CLIENT_HSIC3_CONS]          = IPA_CLIENT_NOT_USED,
-	[IPA_3_5_1][IPA_CLIENT_USB3_CONS]           = IPA_CLIENT_NOT_USED,
 	[IPA_3_5_1][IPA_CLIENT_WLAN3_CONS]          =  {
-			19, IPA_v3_5_GROUP_UL_DL, false,
+			true, IPA_v3_5_GROUP_UL_DL, false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
 			{ 19, 10, 8, 8, IPA_EE_AP } },
-	[IPA_3_5_1][IPA_CLIENT_HSIC4_CONS]          = IPA_CLIENT_NOT_USED,
-	[IPA_3_5_1][IPA_CLIENT_USB4_CONS]           = IPA_CLIENT_NOT_USED,
-	[IPA_3_5_1][IPA_CLIENT_WLAN4_CONS]          = IPA_CLIENT_NOT_USED,
-	[IPA_3_5_1][IPA_CLIENT_HSIC5_CONS]          = IPA_CLIENT_NOT_USED,
 	[IPA_3_5_1][IPA_CLIENT_USB_CONS]            = {
-			17, IPA_v3_5_GROUP_UL_DL, false,
+			true, IPA_v3_5_GROUP_UL_DL, false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
 			{ 17, 8, 8, 8, IPA_EE_AP } },
 	[IPA_3_5_1][IPA_CLIENT_USB_DPL_CONS]        = {
-			11, IPA_v3_5_GROUP_UL_DL, false,
+			true, IPA_v3_5_GROUP_UL_DL, false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
 			{ 11, 2, 4, 6, IPA_EE_AP } },
-	[IPA_3_5_1][IPA_CLIENT_A2_EMBEDDED_CONS]    = IPA_CLIENT_NOT_USED,
-	[IPA_3_5_1][IPA_CLIENT_A2_TETHERED_CONS]    = IPA_CLIENT_NOT_USED,
-	[IPA_3_5_1][IPA_CLIENT_A5_LAN_WAN_CONS]     = IPA_CLIENT_NOT_USED,
 	[IPA_3_5_1][IPA_CLIENT_APPS_LAN_CONS]       = {
-			9, IPA_v3_5_GROUP_UL_DL, false,
+			true, IPA_v3_5_GROUP_UL_DL, false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
 			{ 9, 5, 8, 12, IPA_EE_AP } },
 	[IPA_3_5_1][IPA_CLIENT_APPS_WAN_CONS]       = {
-			10, IPA_v3_5_GROUP_UL_DL, false,
+			true, IPA_v3_5_GROUP_UL_DL, false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
 			{ 10, 6, 8, 12, IPA_EE_AP } },
-	[IPA_3_5_1][IPA_CLIENT_ODU_EMB_CONS]        = IPA_CLIENT_NOT_USED,
-	[IPA_3_5_1][IPA_CLIENT_ODU_TETH_CONS]       = IPA_CLIENT_NOT_USED,
-	[IPA_3_5_1][IPA_CLIENT_MHI_CONS]            = IPA_CLIENT_NOT_USED,
 	[IPA_3_5_1][IPA_CLIENT_Q6_LAN_CONS]         = {
-			13, IPA_v3_5_GROUP_UL_DL, false,
+			true, IPA_v3_5_GROUP_UL_DL, false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
 			{ 13, 3, 8, 12, IPA_EE_Q6 } },
 	[IPA_3_5_1][IPA_CLIENT_Q6_WAN_CONS]         = {
-			12, IPA_v3_5_GROUP_UL_DL, false,
+			true, IPA_v3_5_GROUP_UL_DL, false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
 			{ 12, 2, 8, 12, IPA_EE_Q6 } },
-	[IPA_3_5_1][IPA_CLIENT_Q6_DUN_CONS]           = IPA_CLIENT_NOT_USED,
-	[IPA_3_5_1][IPA_CLIENT_Q6_DECOMP_CONS]	      = IPA_CLIENT_NOT_USED,
-	[IPA_3_5_1][IPA_CLIENT_Q6_DECOMP2_CONS]	      = IPA_CLIENT_NOT_USED,
-	[IPA_3_5_1][IPA_CLIENT_MEMCPY_DMA_SYNC_CONS]  = IPA_CLIENT_NOT_USED,
-	[IPA_3_5_1][IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS] = IPA_CLIENT_NOT_USED,
-	[IPA_3_5_1][IPA_CLIENT_Q6_LTE_WIFI_AGGR_CONS] = IPA_CLIENT_NOT_USED,
-	[IPA_3_5_1][IPA_CLIENT_ETHERNET_CONS]       = IPA_CLIENT_NOT_USED,
 	/* Only for test purpose */
 	[IPA_3_5_1][IPA_CLIENT_TEST_CONS]           = {
-			17, IPA_v3_5_GROUP_UL_DL,
+			true, IPA_v3_5_GROUP_UL_DL,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
 			{ 17, 8, 8, 8, IPA_EE_AP } },
 	[IPA_3_5_1][IPA_CLIENT_TEST1_CONS]          = {
-			17, IPA_v3_5_GROUP_UL_DL,
+			true, IPA_v3_5_GROUP_UL_DL,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
 			{ 17, 8, 8, 8, IPA_EE_AP } },
 	[IPA_3_5_1][IPA_CLIENT_TEST2_CONS]          = {
-			18, IPA_v3_5_GROUP_UL_DL,
+			true, IPA_v3_5_GROUP_UL_DL,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
 			{ 18, 9, 8, 8, IPA_EE_AP } },
 	[IPA_3_5_1][IPA_CLIENT_TEST3_CONS]          = {
-			19, IPA_v3_5_GROUP_UL_DL,
+			true, IPA_v3_5_GROUP_UL_DL,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
 			{ 19, 10, 8, 8, IPA_EE_AP } },
 	[IPA_3_5_1][IPA_CLIENT_TEST4_CONS]          = {
-			11, IPA_v3_5_GROUP_UL_DL,
+			true, IPA_v3_5_GROUP_UL_DL,
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
@@ -1224,391 +1075,379 @@ static const struct ipa_ep_configuration ipa3_ep_mapping
 
 
 	/* IPA_4_0 */
-	[IPA_4_0][IPA_CLIENT_HSIC1_PROD]          = IPA_CLIENT_NOT_USED,
 	[IPA_4_0][IPA_CLIENT_WLAN1_PROD]          = {
-			7, IPA_v4_0_GROUP_UL_DL, true,
+			true, IPA_v4_0_GROUP_UL_DL,
+			true,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
 			{ 7, 9, 8, 16, IPA_EE_AP } },
-	[IPA_4_0][IPA_CLIENT_HSIC2_PROD]          = IPA_CLIENT_NOT_USED,
-	[IPA_4_0][IPA_CLIENT_USB2_PROD]           = IPA_CLIENT_NOT_USED,
-	[IPA_4_0][IPA_CLIENT_HSIC3_PROD]          = IPA_CLIENT_NOT_USED,
-	[IPA_4_0][IPA_CLIENT_USB3_PROD]           = IPA_CLIENT_NOT_USED,
-	[IPA_4_0][IPA_CLIENT_HSIC4_PROD]          = IPA_CLIENT_NOT_USED,
-	[IPA_4_0][IPA_CLIENT_USB4_PROD]           = IPA_CLIENT_NOT_USED,
-	[IPA_4_0][IPA_CLIENT_HSIC5_PROD]          = IPA_CLIENT_NOT_USED,
 	[IPA_4_0][IPA_CLIENT_USB_PROD]            = {
-			0, IPA_v4_0_GROUP_UL_DL, true,
+			true, IPA_v4_0_GROUP_UL_DL,
+			true,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
 			{ 0, 8, 8, 16, IPA_EE_AP } },
-	[IPA_4_0][IPA_CLIENT_UC_USB_PROD]         = IPA_CLIENT_NOT_USED,
-	[IPA_4_0][IPA_CLIENT_A5_WLAN_AMPDU_PROD]  = IPA_CLIENT_NOT_USED,
-	[IPA_4_0][IPA_CLIENT_A2_EMBEDDED_PROD]    = IPA_CLIENT_NOT_USED,
-	[IPA_4_0][IPA_CLIENT_A2_TETHERED_PROD]    = IPA_CLIENT_NOT_USED,
 	[IPA_4_0][IPA_CLIENT_APPS_LAN_PROD]   = {
-			8, IPA_v4_0_GROUP_UL_DL, false,
+			true, IPA_v4_0_GROUP_UL_DL,
+			false,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
 			{ 8, 10, 8, 16, IPA_EE_AP } },
 	[IPA_4_0][IPA_CLIENT_APPS_WAN_PROD] = {
-			2, IPA_v4_0_GROUP_UL_DL, true,
+			true, IPA_v4_0_GROUP_UL_DL,
+			true,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
 			{ 2, 3, 16, 32, IPA_EE_AP } },
 	[IPA_4_0][IPA_CLIENT_APPS_CMD_PROD]	  = {
-			5, IPA_v4_0_GROUP_UL_DL, false,
+			true, IPA_v4_0_GROUP_UL_DL,
+			false,
 			IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY,
 			QMB_MASTER_SELECT_DDR,
 			{ 5, 4, 20, 24, IPA_EE_AP } },
 	[IPA_4_0][IPA_CLIENT_ODU_PROD]            = {
-			0, IPA_v4_0_GROUP_UL_DL, true,
+			true, IPA_v4_0_GROUP_UL_DL,
+			true,
 			IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY,
 			QMB_MASTER_SELECT_DDR,
-			{ 0, 1, 8, 16, IPA_EE_AP } },
+			{ 1, 0, 8, 16, IPA_EE_AP } },
 	[IPA_4_0][IPA_CLIENT_ETHERNET_PROD]	  = {
-			9, IPA_v4_0_GROUP_UL_DL, true,
+			true, IPA_v4_0_GROUP_UL_DL,
+			true,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
 			{ 9, 0, 8, 16, IPA_EE_UC } },
-	[IPA_4_0][IPA_CLIENT_MHI_PROD]            = IPA_CLIENT_NOT_USED,
 	[IPA_4_0][IPA_CLIENT_Q6_LAN_PROD]         = {
-			6, IPA_v4_0_GROUP_UL_DL, true,
+			true, IPA_v4_0_GROUP_UL_DL,
+			true,
 			IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
 			{ 6, 2, 12, 24, IPA_EE_Q6 } },
 	[IPA_4_0][IPA_CLIENT_Q6_WAN_PROD]         = {
-			3, IPA_v4_0_GROUP_UL_DL, true,
+			true, IPA_v4_0_GROUP_UL_DL,
+			true,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
 			{ 3, 0, 16, 32, IPA_EE_Q6 } },
 	[IPA_4_0][IPA_CLIENT_Q6_CMD_PROD]	  = {
-			4, IPA_v4_0_GROUP_UL_DL, false,
+			true, IPA_v4_0_GROUP_UL_DL,
+			false,
 			IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
 			{ 4, 1, 20, 24, IPA_EE_Q6 } },
-	[IPA_4_0][IPA_CLIENT_Q6_DECOMP_PROD]      = IPA_CLIENT_NOT_USED,
-	[IPA_4_0][IPA_CLIENT_Q6_DECOMP2_PROD]     = IPA_CLIENT_NOT_USED,
-	[IPA_4_0][IPA_CLIENT_MEMCPY_DMA_SYNC_PROD] = IPA_CLIENT_NOT_USED,
-	[IPA_4_0][IPA_CLIENT_MEMCPY_DMA_ASYNC_PROD] = IPA_CLIENT_NOT_USED,
 	/* Only for test purpose */
 	[IPA_4_0][IPA_CLIENT_TEST_PROD]           = {
-			0, IPA_v4_0_GROUP_UL_DL, true,
+			true, IPA_v4_0_GROUP_UL_DL,
+			true,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
 			{0, 8, 8, 16, IPA_EE_AP } },
 	[IPA_4_0][IPA_CLIENT_TEST1_PROD]          = {
-			0, IPA_v4_0_GROUP_UL_DL, true,
+			true, IPA_v4_0_GROUP_UL_DL,
+			true,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
 			{0, 8, 8, 16, IPA_EE_AP } },
 	[IPA_4_0][IPA_CLIENT_TEST2_PROD]          = {
-			1, IPA_v4_0_GROUP_UL_DL, true,
+			true, IPA_v4_0_GROUP_UL_DL,
+			true,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
 			{ 1, 0, 8, 16, IPA_EE_AP } },
 	[IPA_4_0][IPA_CLIENT_TEST3_PROD]          = {
-			7, IPA_v4_0_GROUP_UL_DL, true,
+			true, IPA_v4_0_GROUP_UL_DL,
+			true,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
 			{7, 9, 8, 16, IPA_EE_AP } },
 	[IPA_4_0][IPA_CLIENT_TEST4_PROD]          = {
-			8, IPA_v4_0_GROUP_UL_DL, true,
+			true, IPA_v4_0_GROUP_UL_DL,
+			true,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
 			{ 8, 10, 8, 16, IPA_EE_AP } },
 
 
-	[IPA_4_0][IPA_CLIENT_HSIC1_CONS]          = IPA_CLIENT_NOT_USED,
 	[IPA_4_0][IPA_CLIENT_WLAN1_CONS]          = {
-			18, IPA_v4_0_GROUP_UL_DL, false,
+			true, IPA_v4_0_GROUP_UL_DL,
+			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
 			{ 18, 12, 6, 9, IPA_EE_AP } },
-	[IPA_4_0][IPA_CLIENT_HSIC2_CONS]          = IPA_CLIENT_NOT_USED,
-	[IPA_4_0][IPA_CLIENT_USB2_CONS]           = IPA_CLIENT_NOT_USED,
 	[IPA_4_0][IPA_CLIENT_WLAN2_CONS]          = {
-			20, IPA_v4_0_GROUP_UL_DL, false,
+			true, IPA_v4_0_GROUP_UL_DL,
+			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
 			{ 20, 14, 9, 9, IPA_EE_AP } },
-	[IPA_4_0][IPA_CLIENT_HSIC3_CONS]          = IPA_CLIENT_NOT_USED,
-	[IPA_4_0][IPA_CLIENT_USB3_CONS]           = IPA_CLIENT_NOT_USED,
 	[IPA_4_0][IPA_CLIENT_WLAN3_CONS]          = {
-			21, IPA_v4_0_GROUP_UL_DL, false,
+			true, IPA_v4_0_GROUP_UL_DL,
+			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
 			{ 21, 15, 9, 9, IPA_EE_AP } },
-	[IPA_4_0][IPA_CLIENT_HSIC4_CONS]          = IPA_CLIENT_NOT_USED,
-	[IPA_4_0][IPA_CLIENT_USB4_CONS]           = IPA_CLIENT_NOT_USED,
-	[IPA_4_0][IPA_CLIENT_WLAN4_CONS]          = IPA_CLIENT_NOT_USED,
-	[IPA_4_0][IPA_CLIENT_HSIC5_CONS]          = IPA_CLIENT_NOT_USED,
 	[IPA_4_0][IPA_CLIENT_USB_CONS]            = {
-			19, IPA_v4_0_GROUP_UL_DL, false,
+			true, IPA_v4_0_GROUP_UL_DL,
+			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_PCIE,
 			{ 19, 13, 9, 9, IPA_EE_AP } },
 	[IPA_4_0][IPA_CLIENT_USB_DPL_CONS]        = {
-			15, IPA_v4_0_GROUP_UL_DL, false,
+			true, IPA_v4_0_GROUP_UL_DL,
+			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
 			{ 15, 7, 5, 5, IPA_EE_AP } },
-	[IPA_4_0][IPA_CLIENT_A2_EMBEDDED_CONS]    = IPA_CLIENT_NOT_USED,
-	[IPA_4_0][IPA_CLIENT_A2_TETHERED_CONS]    = IPA_CLIENT_NOT_USED,
-	[IPA_4_0][IPA_CLIENT_A5_LAN_WAN_CONS]     = IPA_CLIENT_NOT_USED,
 	[IPA_4_0][IPA_CLIENT_APPS_LAN_CONS]       = {
-			10, IPA_v4_0_GROUP_UL_DL, false,
+			true, IPA_v4_0_GROUP_UL_DL,
+			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
 			{ 10, 5, 9, 9, IPA_EE_AP } },
 	[IPA_4_0][IPA_CLIENT_APPS_WAN_CONS]       = {
-			11, IPA_v4_0_GROUP_UL_DL, false,
+			true, IPA_v4_0_GROUP_UL_DL,
+			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
 			{ 11, 6, 9, 9, IPA_EE_AP } },
 	[IPA_4_0][IPA_CLIENT_ODU_EMB_CONS]        = {
-			17, IPA_v4_0_GROUP_UL_DL, false,
+			true, IPA_v4_0_GROUP_UL_DL,
+			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
 			{ 17, 1, 17, 17, IPA_EE_AP } },
 	[IPA_4_0][IPA_CLIENT_ETHERNET_CONS]	  = {
-			22, IPA_v4_0_GROUP_UL_DL, true,
+			true, IPA_v4_0_GROUP_UL_DL,
+			true,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
 			{ 22, 1, 17, 17, IPA_EE_UC } },
-	[IPA_4_0][IPA_CLIENT_ODU_TETH_CONS]       = IPA_CLIENT_NOT_USED,
-	[IPA_4_0][IPA_CLIENT_MHI_CONS]            = IPA_CLIENT_NOT_USED,
 	[IPA_4_0][IPA_CLIENT_Q6_LAN_CONS]         = {
-			14, IPA_v4_0_GROUP_UL_DL, false,
+			true, IPA_v4_0_GROUP_UL_DL,
+			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
 			{ 14, 4, 9, 9, IPA_EE_Q6 } },
 	[IPA_4_0][IPA_CLIENT_Q6_WAN_CONS]         = {
-			13, IPA_v4_0_GROUP_UL_DL, false,
+			true, IPA_v4_0_GROUP_UL_DL,
+			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
 			{ 13, 3, 9, 9, IPA_EE_Q6 } },
-	[IPA_4_0][IPA_CLIENT_Q6_DUN_CONS]         = IPA_CLIENT_NOT_USED,
-	[IPA_4_0][IPA_CLIENT_Q6_DECOMP_CONS]	  = IPA_CLIENT_NOT_USED,
-	[IPA_4_0][IPA_CLIENT_Q6_DECOMP2_CONS]	  = IPA_CLIENT_NOT_USED,
-	[IPA_4_0][IPA_CLIENT_MEMCPY_DMA_SYNC_CONS] = IPA_CLIENT_NOT_USED,
-	[IPA_4_0][IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS] = IPA_CLIENT_NOT_USED,
 	[IPA_4_0][IPA_CLIENT_Q6_LTE_WIFI_AGGR_CONS] = {
-			16, IPA_v4_0_GROUP_UL_DL, false,
+			true, IPA_v4_0_GROUP_UL_DL,
+			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
 			{ 16, 5, 9, 9, IPA_EE_Q6 } },
 	/* Only for test purpose */
 	/* MBIM aggregation test pipes should have the same QMB as USB_CONS */
 	[IPA_4_0][IPA_CLIENT_TEST_CONS]           = {
-			12, IPA_v4_0_GROUP_UL_DL, false,
+			true, IPA_v4_0_GROUP_UL_DL,
+			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_PCIE,
 			{ 12, 2, 5, 5, IPA_EE_AP } },
 	[IPA_4_0][IPA_CLIENT_TEST1_CONS]           = {
-			12, IPA_v4_0_GROUP_UL_DL, false,
+			true, IPA_v4_0_GROUP_UL_DL,
+			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
 			{ 12, 2, 5, 5, IPA_EE_AP } },
 	[IPA_4_0][IPA_CLIENT_TEST2_CONS]          = {
-			18, IPA_v4_0_GROUP_UL_DL, false,
+			true, IPA_v4_0_GROUP_UL_DL,
+			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_PCIE,
 			{ 18, 12, 6, 9, IPA_EE_AP } },
 	[IPA_4_0][IPA_CLIENT_TEST3_CONS]          = {
-			20, IPA_v4_0_GROUP_UL_DL, false,
+			true, IPA_v4_0_GROUP_UL_DL,
+			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
 			{ 20, 14, 9, 9, IPA_EE_AP } },
 	[IPA_4_0][IPA_CLIENT_TEST4_CONS]          = {
-			21, IPA_v4_0_GROUP_UL_DL, false,
+			true, IPA_v4_0_GROUP_UL_DL,
+			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_PCIE,
 			{ 21, 15, 9, 9, IPA_EE_AP } },
 
 	/* IPA_4_0_MHI */
-	[IPA_4_0_MHI][IPA_CLIENT_HSIC1_PROD]          = IPA_CLIENT_NOT_USED,
-	[IPA_4_0_MHI][IPA_CLIENT_WLAN1_PROD]          = IPA_CLIENT_NOT_USED,
-	[IPA_4_0_MHI][IPA_CLIENT_HSIC2_PROD]          = IPA_CLIENT_NOT_USED,
-	[IPA_4_0_MHI][IPA_CLIENT_USB2_PROD]           = IPA_CLIENT_NOT_USED,
-	[IPA_4_0_MHI][IPA_CLIENT_HSIC3_PROD]          = IPA_CLIENT_NOT_USED,
-	[IPA_4_0_MHI][IPA_CLIENT_USB3_PROD]           = IPA_CLIENT_NOT_USED,
-	[IPA_4_0_MHI][IPA_CLIENT_HSIC4_PROD]          = IPA_CLIENT_NOT_USED,
-	[IPA_4_0_MHI][IPA_CLIENT_USB4_PROD]           = IPA_CLIENT_NOT_USED,
-	[IPA_4_0_MHI][IPA_CLIENT_HSIC5_PROD]          = IPA_CLIENT_NOT_USED,
 	[IPA_4_0_MHI][IPA_CLIENT_USB_PROD]            = {
-			0, IPA_v4_0_MHI_GROUP_DDR, true,
+			true, IPA_v4_0_MHI_GROUP_DDR,
+			true,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
 			{ 0, 8, 8, 16, IPA_EE_AP } },
-	[IPA_4_0_MHI][IPA_CLIENT_UC_USB_PROD]         = IPA_CLIENT_NOT_USED,
-	[IPA_4_0_MHI][IPA_CLIENT_A5_WLAN_AMPDU_PROD]  = IPA_CLIENT_NOT_USED,
-	[IPA_4_0_MHI][IPA_CLIENT_A2_EMBEDDED_PROD]    = IPA_CLIENT_NOT_USED,
-	[IPA_4_0_MHI][IPA_CLIENT_A2_TETHERED_PROD]    = IPA_CLIENT_NOT_USED,
-	[IPA_4_0_MHI][IPA_CLIENT_APPS_LAN_PROD] = IPA_CLIENT_NOT_USED,
 	[IPA_4_0_MHI][IPA_CLIENT_APPS_WAN_PROD]   = {
-			2, IPA_v4_0_MHI_GROUP_DDR, true,
+			true, IPA_v4_0_MHI_GROUP_DDR,
+			true,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
 			{ 2, 3, 16, 32, IPA_EE_AP } },
 	[IPA_4_0_MHI][IPA_CLIENT_APPS_CMD_PROD]	  = {
-			5, IPA_v4_0_MHI_GROUP_DDR, false,
+			true, IPA_v4_0_MHI_GROUP_DDR,
+			false,
 			IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY,
 			QMB_MASTER_SELECT_DDR,
 			{ 5, 4, 20, 24, IPA_EE_AP } },
-	[IPA_4_0_MHI][IPA_CLIENT_ODU_PROD]            = IPA_CLIENT_NOT_USED,
 	[IPA_4_0_MHI][IPA_CLIENT_MHI_PROD]            = {
-			1, IPA_v4_0_MHI_GROUP_PCIE, true,
+			true, IPA_v4_0_MHI_GROUP_PCIE,
+			true,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_PCIE,
 			{ 1, 0, 8, 16, IPA_EE_AP } },
 	[IPA_4_0_MHI][IPA_CLIENT_Q6_LAN_PROD]         = {
-			3, IPA_v4_0_MHI_GROUP_DDR, true,
+			true, IPA_v4_0_MHI_GROUP_DDR,
+			true,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
 			{ 3, 0, 16, 32, IPA_EE_Q6 } },
 	[IPA_4_0_MHI][IPA_CLIENT_Q6_WAN_PROD]         = {
-			6, IPA_v4_0_GROUP_UL_DL, true,
+			true, IPA_v4_0_GROUP_UL_DL,
+			true,
 			IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
 			{ 6, 2, 12, 24, IPA_EE_Q6 } },
 	[IPA_4_0_MHI][IPA_CLIENT_Q6_CMD_PROD]	  = {
-			4, IPA_v4_0_MHI_GROUP_PCIE, false,
+			true, IPA_v4_0_MHI_GROUP_PCIE,
+			false,
 			IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
 			{ 4, 1, 20, 24, IPA_EE_Q6 } },
-	[IPA_4_0_MHI][IPA_CLIENT_Q6_DECOMP_PROD]      = IPA_CLIENT_NOT_USED,
-	[IPA_4_0_MHI][IPA_CLIENT_Q6_DECOMP2_PROD]     = IPA_CLIENT_NOT_USED,
 	[IPA_4_0_MHI][IPA_CLIENT_MEMCPY_DMA_SYNC_PROD] = {
-			7, IPA_v4_0_MHI_GROUP_DMA, false,
+			true, IPA_v4_0_MHI_GROUP_DMA,
+			false,
 			IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY,
 			QMB_MASTER_SELECT_DDR,
 			{ 7, 9, 8, 16, IPA_EE_AP } },
 	[IPA_4_0_MHI][IPA_CLIENT_MEMCPY_DMA_ASYNC_PROD] = {
-			8, IPA_v4_0_MHI_GROUP_DMA, false,
+			true, IPA_v4_0_MHI_GROUP_DMA,
+			false,
 			IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY,
 			QMB_MASTER_SELECT_DDR,
 			{ 8, 10, 8, 16, IPA_EE_AP } },
 	/* Only for test purpose */
 	[IPA_4_0_MHI][IPA_CLIENT_TEST_PROD]           = {
-			0, IPA_v4_0_GROUP_UL_DL, true,
+			true, IPA_v4_0_GROUP_UL_DL,
+			true,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
 			{0, 8, 8, 16, IPA_EE_AP } },
 	[IPA_4_0][IPA_CLIENT_TEST1_PROD]          = {
-			0, IPA_v4_0_GROUP_UL_DL, true,
+			true, IPA_v4_0_GROUP_UL_DL,
+			true,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
 			{0, 8, 8, 16, IPA_EE_AP } },
 	[IPA_4_0_MHI][IPA_CLIENT_TEST2_PROD]          = {
-			1, IPA_v4_0_GROUP_UL_DL, true,
+			true, IPA_v4_0_GROUP_UL_DL,
+			true,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
 			{ 1, 0, 8, 16, IPA_EE_AP } },
 	[IPA_4_0_MHI][IPA_CLIENT_TEST3_PROD]          = {
-			7, IPA_v4_0_GROUP_UL_DL, true,
+			true, IPA_v4_0_GROUP_UL_DL,
+			true,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
 			{7, 9, 8, 16, IPA_EE_AP } },
 	[IPA_4_0_MHI][IPA_CLIENT_TEST4_PROD]          = {
-			8, IPA_v4_0_GROUP_UL_DL, true,
+			true, IPA_v4_0_GROUP_UL_DL,
+			true,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
 			{ 8, 10, 8, 16, IPA_EE_AP } },
 
-	[IPA_4_0_MHI][IPA_CLIENT_HSIC1_CONS]          = IPA_CLIENT_NOT_USED,
-	[IPA_4_0_MHI][IPA_CLIENT_WLAN1_CONS]          = IPA_CLIENT_NOT_USED,
-	[IPA_4_0_MHI][IPA_CLIENT_HSIC2_CONS]          = IPA_CLIENT_NOT_USED,
-	[IPA_4_0_MHI][IPA_CLIENT_USB2_CONS]           = IPA_CLIENT_NOT_USED,
-	[IPA_4_0_MHI][IPA_CLIENT_WLAN2_CONS]          = IPA_CLIENT_NOT_USED,
-	[IPA_4_0_MHI][IPA_CLIENT_HSIC3_CONS]          = IPA_CLIENT_NOT_USED,
-	[IPA_4_0_MHI][IPA_CLIENT_USB3_CONS]           = IPA_CLIENT_NOT_USED,
-	[IPA_4_0_MHI][IPA_CLIENT_WLAN3_CONS]          = IPA_CLIENT_NOT_USED,
-	[IPA_4_0_MHI][IPA_CLIENT_HSIC4_CONS]          = IPA_CLIENT_NOT_USED,
-	[IPA_4_0_MHI][IPA_CLIENT_USB4_CONS]           = IPA_CLIENT_NOT_USED,
-	[IPA_4_0_MHI][IPA_CLIENT_WLAN4_CONS]          = IPA_CLIENT_NOT_USED,
-	[IPA_4_0_MHI][IPA_CLIENT_HSIC5_CONS]          = IPA_CLIENT_NOT_USED,
 	[IPA_4_0_MHI][IPA_CLIENT_USB_CONS]            = {
-			19, IPA_v4_0_MHI_GROUP_DDR, false,
+			true, IPA_v4_0_MHI_GROUP_DDR,
+			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
 			{ 19, 13, 9, 9, IPA_EE_AP } },
 	[IPA_4_0_MHI][IPA_CLIENT_USB_DPL_CONS]        = {
-			15, IPA_v4_0_MHI_GROUP_DDR, false,
+			true, IPA_v4_0_MHI_GROUP_DDR,
+			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
 			{ 15, 7, 5, 5, IPA_EE_AP } },
-	[IPA_4_0_MHI][IPA_CLIENT_A2_EMBEDDED_CONS]    = IPA_CLIENT_NOT_USED,
-	[IPA_4_0_MHI][IPA_CLIENT_A2_TETHERED_CONS]    = IPA_CLIENT_NOT_USED,
-	[IPA_4_0_MHI][IPA_CLIENT_A5_LAN_WAN_CONS]     = IPA_CLIENT_NOT_USED,
 	[IPA_4_0_MHI][IPA_CLIENT_APPS_LAN_CONS]       = {
-			10, IPA_v4_0_MHI_GROUP_DDR, false,
+			true, IPA_v4_0_MHI_GROUP_DDR,
+			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
 			{ 10, 5, 9, 9, IPA_EE_AP } },
 	[IPA_4_0_MHI][IPA_CLIENT_APPS_WAN_CONS]       = {
-			11, IPA_v4_0_MHI_GROUP_DDR, false,
+			true, IPA_v4_0_MHI_GROUP_DDR,
+			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
 			{ 11, 6, 9, 9, IPA_EE_AP } },
-	[IPA_4_0_MHI][IPA_CLIENT_ODU_EMB_CONS]        = IPA_CLIENT_NOT_USED,
-	[IPA_4_0_MHI][IPA_CLIENT_ODU_TETH_CONS]       = IPA_CLIENT_NOT_USED,
 	[IPA_4_0_MHI][IPA_CLIENT_MHI_CONS]            = {
-			17, IPA_v4_0_MHI_GROUP_PCIE, false,
+			true, IPA_v4_0_MHI_GROUP_PCIE,
+			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_PCIE,
 			{ 17, 1, 17, 17, IPA_EE_AP } },
 	[IPA_4_0_MHI][IPA_CLIENT_Q6_LAN_CONS]         = {
-			14, IPA_v4_0_MHI_GROUP_DDR, false,
+			true, IPA_v4_0_MHI_GROUP_DDR,
+			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
 			{ 14, 4, 9, 9, IPA_EE_Q6 } },
 	[IPA_4_0_MHI][IPA_CLIENT_Q6_WAN_CONS]         = {
-			13, IPA_v4_0_MHI_GROUP_DDR, false,
+			true, IPA_v4_0_MHI_GROUP_DDR,
+			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
 			{ 13, 3, 9, 9, IPA_EE_Q6 } },
-	[IPA_4_0_MHI][IPA_CLIENT_Q6_DUN_CONS]		= IPA_CLIENT_NOT_USED,
-	[IPA_4_0_MHI][IPA_CLIENT_Q6_DECOMP_CONS]	= IPA_CLIENT_NOT_USED,
-	[IPA_4_0_MHI][IPA_CLIENT_Q6_DECOMP2_CONS]	= IPA_CLIENT_NOT_USED,
 	[IPA_4_0_MHI][IPA_CLIENT_MEMCPY_DMA_SYNC_CONS] = {
-			20, IPA_v4_0_MHI_GROUP_DMA, false,
+			true, IPA_v4_0_MHI_GROUP_DMA,
+			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_PCIE,
 			{ 20, 14, 9, 9, IPA_EE_AP } },
 	[IPA_4_0_MHI][IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS] = {
-			21, IPA_v4_0_MHI_GROUP_DMA, false,
+			true, IPA_v4_0_MHI_GROUP_DMA,
+			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_PCIE,
 			{ 21, 15, 9, 9, IPA_EE_AP } },
 	[IPA_4_0_MHI][IPA_CLIENT_Q6_LTE_WIFI_AGGR_CONS] = {
-			16, IPA_v4_0_GROUP_UL_DL, false,
+			true, IPA_v4_0_GROUP_UL_DL,
+			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
 			{ 16, 5, 9, 9, IPA_EE_Q6 } },
 	/* Only for test purpose */
 	[IPA_4_0_MHI][IPA_CLIENT_TEST_CONS]           = {
-			12, IPA_v4_0_GROUP_UL_DL, false,
+			true, IPA_v4_0_GROUP_UL_DL,
+			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_PCIE,
 			{ 12, 2, 5, 5, IPA_EE_AP } },
 	[IPA_4_0_MHI][IPA_CLIENT_TEST1_CONS]           = {
-			12, IPA_v4_0_GROUP_UL_DL, false,
+			true, IPA_v4_0_GROUP_UL_DL,
+			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
 			{ 12, 2, 5, 5, IPA_EE_AP } },
 	[IPA_4_0_MHI][IPA_CLIENT_TEST2_CONS]          = {
-			18, IPA_v4_0_GROUP_UL_DL, false,
+			true, IPA_v4_0_GROUP_UL_DL,
+			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_PCIE,
 			{ 18, 12, 6, 9, IPA_EE_AP } },
 	[IPA_4_0_MHI][IPA_CLIENT_TEST3_CONS]          = {
-			20, IPA_v4_0_GROUP_UL_DL, false,
+			true, IPA_v4_0_GROUP_UL_DL,
+			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
 			{ 20, 14, 9, 9, IPA_EE_AP } },
 	[IPA_4_0_MHI][IPA_CLIENT_TEST4_CONS]          = {
-			21, IPA_v4_0_GROUP_UL_DL, false,
+			true, IPA_v4_0_GROUP_UL_DL,
+			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_PCIE,
 			{ 21, 15, 9, 9, IPA_EE_AP } },
@@ -2204,7 +2043,11 @@ int ipa3_get_ep_mapping(enum ipa_client_type client)
 		return IPA_EP_NOT_ALLOCATED;
 	}
 
-	ipa_ep_idx = ipa3_ep_mapping[ipa3_get_hw_type_index()][client].pipe_num;
+	if (!ipa3_ep_mapping[ipa3_get_hw_type_index()][client].valid)
+		return IPA_EP_NOT_ALLOCATED;
+
+	ipa_ep_idx = ipa3_ep_mapping[ipa3_get_hw_type_index()][client].
+		ipa_gsi_ep_info.ipa_ep_num;
 	if (ipa_ep_idx < 0 || ipa_ep_idx >= IPA3_MAX_NUM_PIPES)
 		return IPA_EP_NOT_ALLOCATED;
 
@@ -2226,6 +2069,9 @@ const struct ipa_gsi_ep_config *ipa3_get_gsi_ep_info
 	if (ep_idx == IPA_EP_NOT_ALLOCATED)
 		return NULL;
 
+	if (!ipa3_ep_mapping[ipa3_get_hw_type_index()][client].valid)
+		return NULL;
+
 	return &(ipa3_ep_mapping[ipa3_get_hw_type_index()]
 		[client].ipa_gsi_ep_info);
 }
@@ -2243,6 +2089,9 @@ int ipa_get_ep_group(enum ipa_client_type client)
 		return -EINVAL;
 	}
 
+	if (!ipa3_ep_mapping[ipa3_get_hw_type_index()][client].valid)
+		return -EINVAL;
+
 	return ipa3_ep_mapping[ipa3_get_hw_type_index()][client].group_num;
 }
 
@@ -2259,6 +2108,9 @@ u8 ipa3_get_qmb_master_sel(enum ipa_client_type client)
 		return -EINVAL;
 	}
 
+	if (!ipa3_ep_mapping[ipa3_get_hw_type_index()][client].valid)
+		return -EINVAL;
+
 	return ipa3_ep_mapping[ipa3_get_hw_type_index()]
 		[client].qmb_master_sel;
 }
@@ -2404,6 +2256,7 @@ void ipa_init_ep_flt_bitmap(void)
 	enum ipa_client_type cl;
 	u8 hw_type_idx = ipa3_get_hw_type_index();
 	u32 bitmap;
+	u32 pipe_num;
 
 	bitmap = 0;
 
@@ -2411,8 +2264,9 @@ void ipa_init_ep_flt_bitmap(void)
 
 	for (cl = 0; cl < IPA_CLIENT_MAX ; cl++) {
 		if (ipa3_ep_mapping[hw_type_idx][cl].support_flt) {
-			bitmap |=
-				(1U<<ipa3_ep_mapping[hw_type_idx][cl].pipe_num);
+			pipe_num = ipa3_ep_mapping[hw_type_idx][cl].
+				ipa_gsi_ep_info.ipa_ep_num;
+			bitmap |= (1U << pipe_num);
 			if (bitmap != ipa3_ctx->ep_flt_bitmap) {
 				ipa3_ctx->ep_flt_bitmap = bitmap;
 				ipa3_ctx->ep_flt_num++;
diff --git a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
index a15bd04..b198348 100644
--- a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
@@ -141,6 +141,7 @@ struct rmnet_ipa3_context {
 	u32 apps_to_ipa3_hdl;
 	u32 ipa3_to_apps_hdl;
 	struct mutex pipe_handle_guard;
+	struct mutex add_mux_channel_lock;
 };
 
 static struct rmnet_ipa3_context *rmnet_ipa3_ctx;
@@ -1636,10 +1637,13 @@ static int ipa3_wwan_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
 					rmnet_mux_val.mux_id);
 				return rc;
 			}
+			mutex_lock(&rmnet_ipa3_ctx->add_mux_channel_lock);
 			if (rmnet_ipa3_ctx->rmnet_index
 				>= MAX_NUM_OF_MUX_CHANNEL) {
 				IPAWANERR("Exceed mux_channel limit(%d)\n",
 				rmnet_ipa3_ctx->rmnet_index);
+				mutex_unlock(&rmnet_ipa3_ctx->
+					add_mux_channel_lock);
 				return -EFAULT;
 			}
 			IPAWANDBG("ADD_MUX_CHANNEL(%d, name: %s)\n",
@@ -1673,6 +1677,8 @@ static int ipa3_wwan_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
 					IPAWANERR("device %s reg IPA failed\n",
 						extend_ioctl_data.u.
 						rmnet_mux_val.vchannel_name);
+					mutex_unlock(&rmnet_ipa3_ctx->
+						add_mux_channel_lock);
 					return -ENODEV;
 				}
 				mux_channel[rmnet_index].mux_channel_set = true;
@@ -1685,6 +1691,7 @@ static int ipa3_wwan_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
 				mux_channel[rmnet_index].ul_flt_reg = false;
 			}
 			rmnet_ipa3_ctx->rmnet_index++;
+			mutex_unlock(&rmnet_ipa3_ctx->add_mux_channel_lock);
 			break;
 		case RMNET_IOCTL_SET_EGRESS_DATA_FORMAT:
 			rc = handle3_egress_format(dev, &extend_ioctl_data);
@@ -3204,6 +3211,7 @@ static int __init ipa3_wwan_init(void)
 	atomic_set(&rmnet_ipa3_ctx->is_ssr, 0);
 
 	mutex_init(&rmnet_ipa3_ctx->pipe_handle_guard);
+	mutex_init(&rmnet_ipa3_ctx->add_mux_channel_lock);
 	rmnet_ipa3_ctx->ipa3_to_apps_hdl = -1;
 	rmnet_ipa3_ctx->apps_to_ipa3_hdl = -1;
 
@@ -3222,8 +3230,10 @@ static int __init ipa3_wwan_init(void)
 static void __exit ipa3_wwan_cleanup(void)
 {
 	int ret;
+
 	ipa3_qmi_cleanup();
 	mutex_destroy(&rmnet_ipa3_ctx->pipe_handle_guard);
+	mutex_destroy(&rmnet_ipa3_ctx->add_mux_channel_lock);
 	ret = subsys_notif_unregister_notifier(
 		rmnet_ipa3_ctx->subsys_notify_handle, &ipa3_ssr_notifier);
 	if (ret)
diff --git a/drivers/power/supply/power_supply_sysfs.c b/drivers/power/supply/power_supply_sysfs.c
index 46dc148..cd76ca2 100644
--- a/drivers/power/supply/power_supply_sysfs.c
+++ b/drivers/power/supply/power_supply_sysfs.c
@@ -119,7 +119,8 @@ static ssize_t power_supply_show_property(struct device *dev,
 	else if (off == POWER_SUPPLY_PROP_CAPACITY_LEVEL)
 		return scnprintf(buf, PAGE_SIZE, "%s\n",
 				capacity_level_text[value.intval]);
-	else if (off == POWER_SUPPLY_PROP_TYPE)
+	else if (off == POWER_SUPPLY_PROP_TYPE ||
+			off == POWER_SUPPLY_PROP_REAL_TYPE)
 		return scnprintf(buf, PAGE_SIZE, "%s\n",
 				type_text[value.intval]);
 	else if (off == POWER_SUPPLY_PROP_SCOPE)
@@ -308,6 +309,7 @@ static struct device_attribute power_supply_attrs[] = {
 	POWER_SUPPLY_ATTR(connector_health),
 	POWER_SUPPLY_ATTR(ctm_current_max),
 	POWER_SUPPLY_ATTR(hw_current_max),
+	POWER_SUPPLY_ATTR(real_type),
 	/* Local extensions of type int64_t */
 	POWER_SUPPLY_ATTR(charge_counter_ext),
 	/* Properties of type `const char *' */
diff --git a/drivers/power/supply/qcom/battery.c b/drivers/power/supply/qcom/battery.c
index 54bef52..4ecf9a5 100644
--- a/drivers/power/supply/qcom/battery.c
+++ b/drivers/power/supply/qcom/battery.c
@@ -26,6 +26,7 @@
 #include <linux/pm_wakeup.h>
 #include <linux/slab.h>
 #include <linux/pmic-voter.h>
+#include "battery.h"
 
 #define DRV_MAJOR_VERSION	1
 #define DRV_MINOR_VERSION	0
@@ -410,19 +411,6 @@ static int pl_fcc_vote_callback(struct votable *votable, void *data,
 	if (!chip->main_psy)
 		return 0;
 
-	if (chip->batt_psy) {
-		rc = power_supply_get_property(chip->batt_psy,
-			POWER_SUPPLY_PROP_CURRENT_QNOVO,
-			&pval);
-		if (rc < 0) {
-			pr_err("Couldn't get qnovo fcc, rc=%d\n", rc);
-			return rc;
-		}
-
-		if (pval.intval != -EINVAL)
-			total_fcc_ua = pval.intval;
-	}
-
 	if (chip->pl_mode == POWER_SUPPLY_PL_NONE
 	    || get_effective_result_locked(chip->pl_disable_votable)) {
 		pval.intval = total_fcc_ua;
@@ -473,7 +461,6 @@ static int pl_fv_vote_callback(struct votable *votable, void *data,
 	struct pl_data *chip = data;
 	union power_supply_propval pval = {0, };
 	int rc = 0;
-	int effective_fv_uv = fv_uv;
 
 	if (fv_uv < 0)
 		return 0;
@@ -481,20 +468,7 @@ static int pl_fv_vote_callback(struct votable *votable, void *data,
 	if (!chip->main_psy)
 		return 0;
 
-	if (chip->batt_psy) {
-		rc = power_supply_get_property(chip->batt_psy,
-			POWER_SUPPLY_PROP_VOLTAGE_QNOVO,
-			&pval);
-		if (rc < 0) {
-			pr_err("Couldn't get qnovo fv, rc=%d\n", rc);
-			return rc;
-		}
-
-		if (pval.intval != -EINVAL)
-			effective_fv_uv = pval.intval;
-	}
-
-	pval.intval = effective_fv_uv;
+	pval.intval = fv_uv;
 
 	rc = power_supply_set_property(chip->main_psy,
 			POWER_SUPPLY_PROP_VOLTAGE_MAX, &pval);
@@ -930,11 +904,17 @@ static int pl_determine_initial_status(struct pl_data *chip)
 }
 
 #define DEFAULT_RESTRICTED_CURRENT_UA	1000000
-static int pl_init(void)
+int qcom_batt_init(void)
 {
 	struct pl_data *chip;
 	int rc = 0;
 
+	/* initialize just once */
+	if (the_chip) {
+		pr_err("was initialized earlier. Failing now\n");
+		return -EINVAL;
+	}
+
 	chip = kzalloc(sizeof(*chip), GFP_KERNEL);
 	if (!chip)
 		return -ENOMEM;
@@ -1014,7 +994,9 @@ static int pl_init(void)
 		goto unreg_notifier;
 	}
 
-	return rc;
+	the_chip = chip;
+
+	return 0;
 
 unreg_notifier:
 	power_supply_unreg_notifier(&chip->nb);
@@ -1031,21 +1013,23 @@ static int pl_init(void)
 	return rc;
 }
 
-static void pl_deinit(void)
+void qcom_batt_deinit(void)
 {
 	struct pl_data *chip = the_chip;
 
+	if (chip == NULL)
+		return;
+
+	cancel_work_sync(&chip->status_change_work);
+	cancel_delayed_work_sync(&chip->pl_taper_work);
+	cancel_work_sync(&chip->pl_disable_forever_work);
+
 	power_supply_unreg_notifier(&chip->nb);
 	destroy_votable(chip->pl_awake_votable);
 	destroy_votable(chip->pl_disable_votable);
 	destroy_votable(chip->fv_votable);
 	destroy_votable(chip->fcc_votable);
 	wakeup_source_unregister(chip->pl_ws);
+	the_chip = NULL;
 	kfree(chip);
 }
-
-module_init(pl_init);
-module_exit(pl_deinit)
-
-MODULE_DESCRIPTION("");
-MODULE_LICENSE("GPL v2");
diff --git a/arch/arm64/boot/dts/qcom/sdm830-mtp.dtsi b/drivers/power/supply/qcom/battery.h
similarity index 70%
copy from arch/arm64/boot/dts/qcom/sdm830-mtp.dtsi
copy to drivers/power/supply/qcom/battery.h
index b2d607d..38626e7 100644
--- a/arch/arm64/boot/dts/qcom/sdm830-mtp.dtsi
+++ b/drivers/power/supply/qcom/battery.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017 The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -10,5 +10,8 @@
  * GNU General Public License for more details.
  */
 
-#include "sdm845-mtp.dtsi"
-#include "sdm830-pinctrl.dtsi"
+#ifndef __BATTERY_H
+#define __BATTERY_H
+int qcom_batt_init(void);
+void qcom_batt_deinit(void);
+#endif /* __BATTERY_H */
diff --git a/drivers/power/supply/qcom/qpnp-smb2.c b/drivers/power/supply/qcom/qpnp-smb2.c
index 64e1f43..2266a2a 100644
--- a/drivers/power/supply/qcom/qpnp-smb2.c
+++ b/drivers/power/supply/qcom/qpnp-smb2.c
@@ -238,11 +238,9 @@ static struct smb_params pm660_params = {
 
 #define STEP_CHARGING_MAX_STEPS	5
 struct smb_dt_props {
-	int	fcc_ua;
 	int	usb_icl_ua;
 	int	dc_icl_ua;
 	int	boost_threshold_ua;
-	int	fv_uv;
 	int	wipower_max_uw;
 	int	min_freq_khz;
 	int	max_freq_khz;
@@ -310,14 +308,14 @@ static int smb2_parse_dt(struct smb2 *chip)
 						"qcom,external-vconn");
 
 	rc = of_property_read_u32(node,
-				"qcom,fcc-max-ua", &chip->dt.fcc_ua);
+				"qcom,fcc-max-ua", &chg->batt_profile_fcc_ua);
 	if (rc < 0)
-		chip->dt.fcc_ua = -EINVAL;
+		chg->batt_profile_fcc_ua = -EINVAL;
 
 	rc = of_property_read_u32(node,
-				"qcom,fv-max-uv", &chip->dt.fv_uv);
+				"qcom,fv-max-uv", &chg->batt_profile_fv_uv);
 	if (rc < 0)
-		chip->dt.fv_uv = -EINVAL;
+		chg->batt_profile_fv_uv = -EINVAL;
 
 	rc = of_property_read_u32(node,
 				"qcom,usb-icl-ua", &chip->dt.usb_icl_ua);
@@ -429,6 +427,7 @@ static enum power_supply_property smb2_usb_props[] = {
 	POWER_SUPPLY_PROP_PE_START,
 	POWER_SUPPLY_PROP_CTM_CURRENT_MAX,
 	POWER_SUPPLY_PROP_HW_CURRENT_MAX,
+	POWER_SUPPLY_PROP_REAL_TYPE,
 };
 
 static int smb2_usb_get_prop(struct power_supply *psy,
@@ -448,6 +447,16 @@ static int smb2_usb_get_prop(struct power_supply *psy,
 		break;
 	case POWER_SUPPLY_PROP_ONLINE:
 		rc = smblib_get_prop_usb_online(chg, val);
+		if (!val->intval)
+			break;
+
+		rc = smblib_get_prop_typec_mode(chg, val);
+		if ((val->intval == POWER_SUPPLY_TYPEC_SOURCE_DEFAULT ||
+			chg->micro_usb_mode) &&
+			chg->real_charger_type == POWER_SUPPLY_TYPE_USB)
+			val->intval = 0;
+		else
+			val->intval = 1;
 		break;
 	case POWER_SUPPLY_PROP_VOLTAGE_MIN:
 		val->intval = chg->voltage_min_uv;
@@ -465,10 +474,13 @@ static int smb2_usb_get_prop(struct power_supply *psy,
 		rc = smblib_get_prop_usb_current_max(chg, val);
 		break;
 	case POWER_SUPPLY_PROP_TYPE:
+		val->intval = POWER_SUPPLY_TYPE_USB_PD;
+		break;
+	case POWER_SUPPLY_PROP_REAL_TYPE:
 		if (chip->bad_part)
-			val->intval = POWER_SUPPLY_TYPE_USB;
+			val->intval = POWER_SUPPLY_TYPE_USB_PD;
 		else
-			val->intval = chg->usb_psy_desc.type;
+			val->intval = chg->real_charger_type;
 		break;
 	case POWER_SUPPLY_PROP_TYPEC_MODE:
 		if (chg->micro_usb_mode)
@@ -610,7 +622,7 @@ static int smb2_init_usb_psy(struct smb2 *chip)
 	struct smb_charger *chg = &chip->chg;
 
 	chg->usb_psy_desc.name			= "usb";
-	chg->usb_psy_desc.type			= POWER_SUPPLY_TYPE_UNKNOWN;
+	chg->usb_psy_desc.type			= POWER_SUPPLY_TYPE_USB_PD;
 	chg->usb_psy_desc.properties		= smb2_usb_props;
 	chg->usb_psy_desc.num_properties	= ARRAY_SIZE(smb2_usb_props);
 	chg->usb_psy_desc.get_property		= smb2_usb_get_prop;
@@ -619,7 +631,7 @@ static int smb2_init_usb_psy(struct smb2 *chip)
 
 	usb_cfg.drv_data = chip;
 	usb_cfg.of_node = chg->dev->of_node;
-	chg->usb_psy = devm_power_supply_register(chg->dev,
+	chg->usb_psy = power_supply_register(chg->dev,
 						  &chg->usb_psy_desc,
 						  &usb_cfg);
 	if (IS_ERR(chg->usb_psy)) {
@@ -630,6 +642,97 @@ static int smb2_init_usb_psy(struct smb2 *chip)
 	return 0;
 }
 
+/********************************
+ * USB PC_PORT PSY REGISTRATION *
+ ********************************/
+static enum power_supply_property smb2_usb_port_props[] = {
+	POWER_SUPPLY_PROP_TYPE,
+	POWER_SUPPLY_PROP_ONLINE,
+};
+
+static int smb2_usb_port_get_prop(struct power_supply *psy,
+		enum power_supply_property psp,
+		union power_supply_propval *val)
+{
+	struct smb2 *chip = power_supply_get_drvdata(psy);
+	struct smb_charger *chg = &chip->chg;
+	int rc = 0;
+
+	switch (psp) {
+	case POWER_SUPPLY_PROP_TYPE:
+		val->intval = POWER_SUPPLY_TYPE_USB;
+		break;
+	case POWER_SUPPLY_PROP_ONLINE:
+		rc = smblib_get_prop_usb_online(chg, val);
+		if (!val->intval)
+			break;
+
+		rc = smblib_get_prop_typec_mode(chg, val);
+		if ((val->intval == POWER_SUPPLY_TYPEC_SOURCE_DEFAULT ||
+			chg->micro_usb_mode) &&
+			chg->real_charger_type == POWER_SUPPLY_TYPE_USB)
+			val->intval = 1;
+		else
+			val->intval = 0;
+		break;
+	default:
+		pr_err_ratelimited("Get prop %d is not supported in pc_port\n",
+				psp);
+		return -EINVAL;
+	}
+
+	if (rc < 0) {
+		pr_debug("Couldn't get prop %d rc = %d\n", psp, rc);
+		return -ENODATA;
+	}
+
+	return 0;
+}
+
+static int smb2_usb_port_set_prop(struct power_supply *psy,
+		enum power_supply_property psp,
+		const union power_supply_propval *val)
+{
+	int rc = 0;
+
+	switch (psp) {
+	default:
+		pr_err_ratelimited("Set prop %d is not supported in pc_port\n",
+				psp);
+		rc = -EINVAL;
+		break;
+	}
+
+	return rc;
+}
+
+static const struct power_supply_desc usb_port_psy_desc = {
+	.name		= "pc_port",
+	.type		= POWER_SUPPLY_TYPE_USB,
+	.properties	= smb2_usb_port_props,
+	.num_properties	= ARRAY_SIZE(smb2_usb_port_props),
+	.get_property	= smb2_usb_port_get_prop,
+	.set_property	= smb2_usb_port_set_prop,
+};
+
+static int smb2_init_usb_port_psy(struct smb2 *chip)
+{
+	struct power_supply_config usb_port_cfg = {};
+	struct smb_charger *chg = &chip->chg;
+
+	usb_port_cfg.drv_data = chip;
+	usb_port_cfg.of_node = chg->dev->of_node;
+	chg->usb_port_psy = power_supply_register(chg->dev,
+						  &usb_port_psy_desc,
+						  &usb_port_cfg);
+	if (IS_ERR(chg->usb_port_psy)) {
+		pr_err("Couldn't register USB pc_port power supply\n");
+		return PTR_ERR(chg->usb_port_psy);
+	}
+
+	return 0;
+}
+
 /*****************************
  * USB MAIN PSY REGISTRATION *
  *****************************/
@@ -734,7 +837,7 @@ static int smb2_init_usb_main_psy(struct smb2 *chip)
 
 	usb_main_cfg.drv_data = chip;
 	usb_main_cfg.of_node = chg->dev->of_node;
-	chg->usb_main_psy = devm_power_supply_register(chg->dev,
+	chg->usb_main_psy = power_supply_register(chg->dev,
 						  &usb_main_psy_desc,
 						  &usb_main_cfg);
 	if (IS_ERR(chg->usb_main_psy)) {
@@ -836,7 +939,7 @@ static int smb2_init_dc_psy(struct smb2 *chip)
 
 	dc_cfg.drv_data = chip;
 	dc_cfg.of_node = chg->dev->of_node;
-	chg->dc_psy = devm_power_supply_register(chg->dev,
+	chg->dc_psy = power_supply_register(chg->dev,
 						  &dc_psy_desc,
 						  &dc_cfg);
 	if (IS_ERR(chg->dc_psy)) {
@@ -946,13 +1049,15 @@ static int smb2_batt_get_prop(struct power_supply *psy,
 		rc = smblib_get_prop_charge_qnovo_enable(chg, val);
 		break;
 	case POWER_SUPPLY_PROP_VOLTAGE_QNOVO:
-		val->intval = chg->qnovo_fv_uv;
+		val->intval = get_client_vote_locked(chg->fv_votable,
+				QNOVO_VOTER);
 		break;
 	case POWER_SUPPLY_PROP_CURRENT_NOW:
 		rc = smblib_get_prop_batt_current_now(chg, val);
 		break;
 	case POWER_SUPPLY_PROP_CURRENT_QNOVO:
-		val->intval = chg->qnovo_fcc_ua;
+		val->intval = get_client_vote_locked(chg->fcc_votable,
+				QNOVO_VOTER);
 		break;
 	case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX:
 		val->intval = get_client_vote(chg->fcc_votable,
@@ -1018,23 +1123,37 @@ static int smb2_batt_set_prop(struct power_supply *psy,
 		vote(chg->pl_disable_votable, USER_VOTER, (bool)val->intval, 0);
 		break;
 	case POWER_SUPPLY_PROP_VOLTAGE_MAX:
-		vote(chg->fv_votable, DEFAULT_VOTER, true, val->intval);
+		chg->batt_profile_fv_uv = val->intval;
+		vote(chg->fv_votable, BATT_PROFILE_VOTER, true, val->intval);
 		break;
 	case POWER_SUPPLY_PROP_CHARGE_QNOVO_ENABLE:
 		rc = smblib_set_prop_charge_qnovo_enable(chg, val);
 		break;
 	case POWER_SUPPLY_PROP_VOLTAGE_QNOVO:
-		chg->qnovo_fv_uv = val->intval;
-		rc = rerun_election(chg->fv_votable);
+		if (val->intval == -EINVAL) {
+			vote(chg->fv_votable, BATT_PROFILE_VOTER,
+					true, chg->batt_profile_fv_uv);
+			vote(chg->fv_votable, QNOVO_VOTER, false, 0);
+		} else {
+			vote(chg->fv_votable, QNOVO_VOTER, true, val->intval);
+			vote(chg->fv_votable, BATT_PROFILE_VOTER, false, 0);
+		}
 		break;
 	case POWER_SUPPLY_PROP_CURRENT_QNOVO:
-		chg->qnovo_fcc_ua = val->intval;
 		vote(chg->pl_disable_votable, PL_QNOVO_VOTER,
 			val->intval != -EINVAL && val->intval < 2000000, 0);
-		rc = rerun_election(chg->fcc_votable);
+		if (val->intval == -EINVAL) {
+			vote(chg->fcc_votable, BATT_PROFILE_VOTER,
+					true, chg->batt_profile_fcc_ua);
+			vote(chg->fcc_votable, QNOVO_VOTER, false, 0);
+		} else {
+			vote(chg->fcc_votable, QNOVO_VOTER, true, val->intval);
+			vote(chg->fcc_votable, BATT_PROFILE_VOTER, false, 0);
+		}
 		break;
 	case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX:
-		vote(chg->fcc_votable, DEFAULT_VOTER, true, val->intval);
+		chg->batt_profile_fcc_ua = val->intval;
+		vote(chg->fcc_votable, BATT_PROFILE_VOTER, true, val->intval);
 		break;
 	case POWER_SUPPLY_PROP_SET_SHIP_MODE:
 		/* Not in ship mode as long as the device is active */
@@ -1051,6 +1170,9 @@ static int smb2_batt_set_prop(struct power_supply *psy,
 	case POWER_SUPPLY_PROP_DP_DM:
 		rc = smblib_dp_dm(chg, val->intval);
 		break;
+	case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMITED:
+		rc = smblib_set_prop_input_current_limited(chg, val);
+		break;
 	default:
 		rc = -EINVAL;
 	}
@@ -1068,6 +1190,7 @@ static int smb2_batt_prop_is_writeable(struct power_supply *psy,
 	case POWER_SUPPLY_PROP_PARALLEL_DISABLE:
 	case POWER_SUPPLY_PROP_DP_DM:
 	case POWER_SUPPLY_PROP_RERUN_AICL:
+	case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMITED:
 		return 1;
 	default:
 		break;
@@ -1094,7 +1217,7 @@ static int smb2_init_batt_psy(struct smb2 *chip)
 
 	batt_cfg.drv_data = chg;
 	batt_cfg.of_node = chg->dev->of_node;
-	chg->batt_psy = devm_power_supply_register(chg->dev,
+	chg->batt_psy = power_supply_register(chg->dev,
 						   &batt_psy_desc,
 						   &batt_cfg);
 	if (IS_ERR(chg->batt_psy)) {
@@ -1451,11 +1574,13 @@ static int smb2_init_hw(struct smb2 *chip)
 	if (chip->dt.no_battery)
 		chg->fake_capacity = 50;
 
-	if (chip->dt.fcc_ua < 0)
-		smblib_get_charge_param(chg, &chg->param.fcc, &chip->dt.fcc_ua);
+	if (chg->batt_profile_fcc_ua < 0)
+		smblib_get_charge_param(chg, &chg->param.fcc,
+				&chg->batt_profile_fcc_ua);
 
-	if (chip->dt.fv_uv < 0)
-		smblib_get_charge_param(chg, &chg->param.fv, &chip->dt.fv_uv);
+	if (chg->batt_profile_fv_uv < 0)
+		smblib_get_charge_param(chg, &chg->param.fv,
+				&chg->batt_profile_fv_uv);
 
 	smblib_get_charge_param(chg, &chg->param.usb_icl,
 				&chg->default_icl_ua);
@@ -1516,9 +1641,9 @@ static int smb2_init_hw(struct smb2 *chip)
 	vote(chg->dc_suspend_votable,
 		DEFAULT_VOTER, chip->dt.no_battery, 0);
 	vote(chg->fcc_votable,
-		DEFAULT_VOTER, true, chip->dt.fcc_ua);
+		BATT_PROFILE_VOTER, true, chg->batt_profile_fcc_ua);
 	vote(chg->fv_votable,
-		DEFAULT_VOTER, true, chip->dt.fv_uv);
+		BATT_PROFILE_VOTER, true, chg->batt_profile_fv_uv);
 	vote(chg->dc_icl_votable,
 		DEFAULT_VOTER, true, chip->dt.dc_icl_ua);
 	vote(chg->hvdcp_disable_votable_indirect, PD_INACTIVE_VOTER,
@@ -2062,6 +2187,21 @@ static int smb2_request_interrupts(struct smb2 *chip)
 	return rc;
 }
 
+static void smb2_free_interrupts(struct smb_charger *chg)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(smb2_irqs); i++) {
+		if (smb2_irqs[i].irq > 0) {
+			if (smb2_irqs[i].wake)
+				disable_irq_wake(smb2_irqs[i].irq);
+
+			devm_free_irq(chg->dev, smb2_irqs[i].irq,
+					smb2_irqs[i].irq_data);
+		}
+	}
+}
+
 static void smb2_disable_interrupts(struct smb_charger *chg)
 {
 	int i;
@@ -2239,7 +2379,13 @@ static int smb2_probe(struct platform_device *pdev)
 
 	rc = smb2_init_usb_main_psy(chip);
 	if (rc < 0) {
-		pr_err("Couldn't initialize usb psy rc=%d\n", rc);
+		pr_err("Couldn't initialize usb main psy rc=%d\n", rc);
+		goto cleanup;
+	}
+
+	rc = smb2_init_usb_port_psy(chip);
+	if (rc < 0) {
+		pr_err("Couldn't initialize usb pc_port psy rc=%d\n", rc);
 		goto cleanup;
 	}
 
@@ -2297,20 +2443,29 @@ static int smb2_probe(struct platform_device *pdev)
 	device_init_wakeup(chg->dev, true);
 
 	pr_info("QPNP SMB2 probed successfully usb:present=%d type=%d batt:present = %d health = %d charge = %d\n",
-		usb_present, chg->usb_psy_desc.type,
+		usb_present, chg->real_charger_type,
 		batt_present, batt_health, batt_charge_type);
 	return rc;
 
 cleanup:
-	smblib_deinit(chg);
-	if (chg->usb_psy)
-		power_supply_unregister(chg->usb_psy);
+	smb2_free_interrupts(chg);
 	if (chg->batt_psy)
 		power_supply_unregister(chg->batt_psy);
+	if (chg->usb_main_psy)
+		power_supply_unregister(chg->usb_main_psy);
+	if (chg->usb_psy)
+		power_supply_unregister(chg->usb_psy);
+	if (chg->usb_port_psy)
+		power_supply_unregister(chg->usb_port_psy);
+	if (chg->dc_psy)
+		power_supply_unregister(chg->dc_psy);
 	if (chg->vconn_vreg && chg->vconn_vreg->rdev)
-		regulator_unregister(chg->vconn_vreg->rdev);
+		devm_regulator_unregister(chg->dev, chg->vconn_vreg->rdev);
 	if (chg->vbus_vreg && chg->vbus_vreg->rdev)
-		regulator_unregister(chg->vbus_vreg->rdev);
+		devm_regulator_unregister(chg->dev, chg->vbus_vreg->rdev);
+
+	smblib_deinit(chg);
+
 	platform_set_drvdata(pdev, NULL);
 	return rc;
 }
@@ -2322,6 +2477,7 @@ static int smb2_remove(struct platform_device *pdev)
 
 	power_supply_unregister(chg->batt_psy);
 	power_supply_unregister(chg->usb_psy);
+	power_supply_unregister(chg->usb_port_psy);
 	regulator_unregister(chg->vconn_vreg->rdev);
 	regulator_unregister(chg->vbus_vreg->rdev);
 
diff --git a/drivers/power/supply/qcom/smb-lib.c b/drivers/power/supply/qcom/smb-lib.c
index 6eb7009..b1070e8 100644
--- a/drivers/power/supply/qcom/smb-lib.c
+++ b/drivers/power/supply/qcom/smb-lib.c
@@ -18,10 +18,11 @@
 #include <linux/regulator/driver.h>
 #include <linux/qpnp/qpnp-revid.h>
 #include <linux/irq.h>
+#include <linux/pmic-voter.h>
 #include "smb-lib.h"
 #include "smb-reg.h"
+#include "battery.h"
 #include "storm-watch.h"
-#include <linux/pmic-voter.h>
 
 #define smblib_err(chg, fmt, ...)		\
 	pr_err("%s: %s: " fmt, chg->name,	\
@@ -548,9 +549,9 @@ static const struct apsd_result *smblib_update_usb_type(struct smb_charger *chg)
 
 	/* if PD is active, APSD is disabled so won't have a valid result */
 	if (chg->pd_active)
-		chg->usb_psy_desc.type = POWER_SUPPLY_TYPE_USB_PD;
+		chg->real_charger_type = POWER_SUPPLY_TYPE_USB_PD;
 	else
-		chg->usb_psy_desc.type = apsd_result->pst;
+		chg->real_charger_type = apsd_result->pst;
 
 	smblib_dbg(chg, PR_MISC, "APSD=%s PD=%d\n",
 					apsd_result->name, chg->pd_active);
@@ -856,7 +857,7 @@ int smblib_set_icl_current(struct smb_charger *chg, int icl_ua)
 
 	/* configure current */
 	if (pval.intval == POWER_SUPPLY_TYPEC_SOURCE_DEFAULT
-		&& (chg->usb_psy_desc.type == POWER_SUPPLY_TYPE_USB)) {
+		&& (chg->real_charger_type == POWER_SUPPLY_TYPE_USB)) {
 		rc = set_sdp_current(chg, icl_ua);
 		if (rc < 0) {
 			smblib_err(chg, "Couldn't set SDP ICL rc=%d\n", rc);
@@ -877,10 +878,10 @@ int smblib_set_icl_current(struct smb_charger *chg, int icl_ua)
 		/* remove override if no voters - hw defaults is desired */
 		override = false;
 	} else if (pval.intval == POWER_SUPPLY_TYPEC_SOURCE_DEFAULT) {
-		if (chg->usb_psy_desc.type == POWER_SUPPLY_TYPE_USB)
+		if (chg->real_charger_type == POWER_SUPPLY_TYPE_USB)
 			/* For std cable with type = SDP never override */
 			override = false;
-		else if (chg->usb_psy_desc.type == POWER_SUPPLY_TYPE_USB_CDP
+		else if (chg->real_charger_type == POWER_SUPPLY_TYPE_USB_CDP
 			&& icl_ua == 1500000)
 			/*
 			 * For std cable with type = CDP override only if
@@ -1712,6 +1713,11 @@ int smblib_get_prop_input_current_limited(struct smb_charger *chg,
 	u8 stat;
 	int rc;
 
+	if (chg->fake_input_current_limited >= 0) {
+		val->intval = chg->fake_input_current_limited;
+		return 0;
+	}
+
 	rc = smblib_read(chg, AICL_STATUS_REG, &stat);
 	if (rc < 0) {
 		smblib_err(chg, "Couldn't read AICL_STATUS rc=%d\n", rc);
@@ -1903,6 +1909,13 @@ int smblib_set_prop_charge_qnovo_enable(struct smb_charger *chg,
 	return rc;
 }
 
+int smblib_set_prop_input_current_limited(struct smb_charger *chg,
+				const union power_supply_propval *val)
+{
+	chg->fake_input_current_limited = val->intval;
+	return 0;
+}
+
 int smblib_rerun_aicl(struct smb_charger *chg)
 {
 	int rc, settled_icl_ua;
@@ -3319,7 +3332,7 @@ static void smblib_hvdcp_adaptive_voltage_change(struct smb_charger *chg)
 	int pulses;
 
 	power_supply_changed(chg->usb_main_psy);
-	if (chg->usb_psy_desc.type == POWER_SUPPLY_TYPE_USB_HVDCP) {
+	if (chg->real_charger_type == POWER_SUPPLY_TYPE_USB_HVDCP) {
 		rc = smblib_read(chg, QC_CHANGE_STATUS_REG, &stat);
 		if (rc < 0) {
 			smblib_err(chg,
@@ -3347,7 +3360,7 @@ static void smblib_hvdcp_adaptive_voltage_change(struct smb_charger *chg)
 		}
 	}
 
-	if (chg->usb_psy_desc.type == POWER_SUPPLY_TYPE_USB_HVDCP_3) {
+	if (chg->real_charger_type == POWER_SUPPLY_TYPE_USB_HVDCP_3) {
 		rc = smblib_read(chg, QC_PULSE_COUNT_STATUS_REG, &stat);
 		if (rc < 0) {
 			smblib_err(chg,
@@ -4278,26 +4291,30 @@ static int smblib_create_votables(struct smb_charger *chg)
 	int rc = 0;
 
 	chg->fcc_votable = find_votable("FCC");
-	if (!chg->fcc_votable) {
-		rc = -EPROBE_DEFER;
+	if (chg->fcc_votable == NULL) {
+		rc = -EINVAL;
+		smblib_err(chg, "Couldn't find FCC votable rc=%d\n", rc);
 		return rc;
 	}
 
 	chg->fv_votable = find_votable("FV");
-	if (!chg->fv_votable) {
-		rc = -EPROBE_DEFER;
+	if (chg->fv_votable == NULL) {
+		rc = -EINVAL;
+		smblib_err(chg, "Couldn't find FV votable rc=%d\n", rc);
 		return rc;
 	}
 
 	chg->usb_icl_votable = find_votable("USB_ICL");
 	if (!chg->usb_icl_votable) {
-		rc = -EPROBE_DEFER;
+		rc = -EINVAL;
+		smblib_err(chg, "Couldn't find USB_ICL votable rc=%d\n", rc);
 		return rc;
 	}
 
 	chg->pl_disable_votable = find_votable("PL_DISABLE");
-	if (!chg->pl_disable_votable) {
-		rc = -EPROBE_DEFER;
+	if (chg->pl_disable_votable == NULL) {
+		rc = -EINVAL;
+		smblib_err(chg, "Couldn't find votable PL_DISABLE rc=%d\n", rc);
 		return rc;
 	}
 	vote(chg->pl_disable_votable, PL_INDIRECT_VOTER, true, 0);
@@ -4476,11 +4493,17 @@ int smblib_init(struct smb_charger *chg)
 	INIT_DELAYED_WORK(&chg->pl_enable_work, smblib_pl_enable_work);
 	INIT_WORK(&chg->legacy_detection_work, smblib_legacy_detection_work);
 	chg->fake_capacity = -EINVAL;
+	chg->fake_input_current_limited = -EINVAL;
 
 	switch (chg->mode) {
 	case PARALLEL_MASTER:
-		chg->qnovo_fcc_ua = -EINVAL;
-		chg->qnovo_fv_uv = -EINVAL;
+		rc = qcom_batt_init();
+		if (rc < 0) {
+			smblib_err(chg, "Couldn't init qcom_batt_init rc=%d\n",
+				rc);
+			return rc;
+		}
+
 		rc = smblib_create_votables(chg);
 		if (rc < 0) {
 			smblib_err(chg, "Couldn't create votables rc=%d\n",
@@ -4512,8 +4535,20 @@ int smblib_deinit(struct smb_charger *chg)
 {
 	switch (chg->mode) {
 	case PARALLEL_MASTER:
+		cancel_work_sync(&chg->bms_update_work);
+		cancel_work_sync(&chg->rdstd_cc2_detach_work);
+		cancel_delayed_work_sync(&chg->hvdcp_detect_work);
+		cancel_delayed_work_sync(&chg->step_soc_req_work);
+		cancel_delayed_work_sync(&chg->clear_hdc_work);
+		cancel_work_sync(&chg->otg_oc_work);
+		cancel_work_sync(&chg->vconn_oc_work);
+		cancel_delayed_work_sync(&chg->otg_ss_done_work);
+		cancel_delayed_work_sync(&chg->icl_change_work);
+		cancel_delayed_work_sync(&chg->pl_enable_work);
+		cancel_work_sync(&chg->legacy_detection_work);
 		power_supply_unreg_notifier(&chg->nb);
 		smblib_destroy_votables(chg);
+		qcom_batt_deinit();
 		break;
 	case PARALLEL_SLAVE:
 		break;
diff --git a/drivers/power/supply/qcom/smb-lib.h b/drivers/power/supply/qcom/smb-lib.h
index e4679f4..42b357e 100644
--- a/drivers/power/supply/qcom/smb-lib.h
+++ b/drivers/power/supply/qcom/smb-lib.h
@@ -62,6 +62,8 @@ enum print_reason {
 #define AICL_RERUN_VOTER		"AICL_RERUN_VOTER"
 #define LEGACY_UNKNOWN_VOTER		"LEGACY_UNKNOWN_VOTER"
 #define CC2_WA_VOTER			"CC2_WA_VOTER"
+#define QNOVO_VOTER			"QNOVO_VOTER"
+#define BATT_PROFILE_VOTER		"BATT_PROFILE_VOTER"
 
 #define VCONN_MAX_ATTEMPTS	3
 #define OTG_MAX_ATTEMPTS	3
@@ -242,6 +244,8 @@ struct smb_charger {
 	struct power_supply		*bms_psy;
 	struct power_supply_desc	usb_psy_desc;
 	struct power_supply		*usb_main_psy;
+	struct power_supply		*usb_port_psy;
+	enum power_supply_type		real_charger_type;
 
 	/* notifiers */
 	struct notifier_block	nb;
@@ -314,6 +318,7 @@ struct smb_charger {
 	bool			typec_present;
 	u8			typec_status[5];
 	bool			typec_legacy_valid;
+	int			fake_input_current_limited;
 
 	/* workaround flag */
 	u32			wa_flags;
@@ -325,9 +330,11 @@ struct smb_charger {
 	/* extcon for VBUS / ID notification to USB for uUSB */
 	struct extcon_dev	*extcon;
 
+	/* battery profile */
+	int			batt_profile_fcc_ua;
+	int			batt_profile_fv_uv;
+
 	/* qnovo */
-	int			qnovo_fcc_ua;
-	int			qnovo_fv_uv;
 	int			usb_icl_delta_ua;
 	int			pulse_cnt;
 };
@@ -417,6 +424,8 @@ int smblib_set_prop_batt_capacity(struct smb_charger *chg,
 				const union power_supply_propval *val);
 int smblib_set_prop_system_temp_level(struct smb_charger *chg,
 				const union power_supply_propval *val);
+int smblib_set_prop_input_current_limited(struct smb_charger *chg,
+				const union power_supply_propval *val);
 
 int smblib_get_prop_dc_present(struct smb_charger *chg,
 				union power_supply_propval *val);
diff --git a/drivers/power/supply/qcom/smb-reg.h b/drivers/power/supply/qcom/smb-reg.h
index 3f260a4..167666a 100644
--- a/drivers/power/supply/qcom/smb-reg.h
+++ b/drivers/power/supply/qcom/smb-reg.h
@@ -1025,14 +1025,4 @@ enum {
 /* CHGR FREQ Peripheral registers */
 #define FREQ_CLK_DIV_REG			(CHGR_FREQ_BASE + 0x50)
 
-/* SMB1355 specific registers */
-#define SMB1355_TEMP_COMP_STATUS_REG		(MISC_BASE + 0x07)
-#define SKIN_TEMP_RST_HOT_BIT			BIT(6)
-#define SKIN_TEMP_UB_HOT_BIT			BIT(5)
-#define SKIN_TEMP_LB_HOT_BIT			BIT(4)
-#define DIE_TEMP_TSD_HOT_BIT			BIT(3)
-#define DIE_TEMP_RST_HOT_BIT			BIT(2)
-#define DIE_TEMP_UB_HOT_BIT			BIT(1)
-#define DIE_TEMP_LB_HOT_BIT			BIT(0)
-
 #endif /* __SMB2_CHARGER_REG_H */
diff --git a/drivers/power/supply/qcom/smb138x-charger.c b/drivers/power/supply/qcom/smb138x-charger.c
index 8725590..83374bb 100644
--- a/drivers/power/supply/qcom/smb138x-charger.c
+++ b/drivers/power/supply/qcom/smb138x-charger.c
@@ -104,8 +104,6 @@ struct smb138x {
 	struct smb_dt_props	dt;
 	struct power_supply	*parallel_psy;
 	u32			wa_flags;
-	struct pmic_revid_data	*pmic_rev_id;
-	char			*name;
 };
 
 static int __debug_mask;
@@ -169,14 +167,6 @@ static int smb138x_parse_dt(struct smb138x *chip)
 	if (rc < 0)
 		chip->dt.pl_mode = POWER_SUPPLY_PL_USBMID_USBMID;
 
-	/* check that smb1355 is configured to run in mid-mid mode */
-	if (chip->pmic_rev_id->pmic_subtype == SMB1355_SUBTYPE
-		&& chip->dt.pl_mode != POWER_SUPPLY_PL_USBMID_USBMID) {
-		pr_err("Smb1355 can only run in MID-MID mode, saw = %d mode\n",
-				chip->dt.pl_mode);
-		return -EINVAL;
-	}
-
 	chip->dt.suspend_input = of_property_read_bool(node,
 				"qcom,suspend-input");
 
@@ -489,30 +479,6 @@ static int smb138x_init_batt_psy(struct smb138x *chip)
  * PARALLEL PSY REGISTRATION *
  *****************************/
 
-static int smb1355_get_prop_connector_health(struct smb138x *chip)
-{
-	struct smb_charger *chg = &chip->chg;
-	u8 temp;
-	int rc;
-
-	rc = smblib_read(chg, SMB1355_TEMP_COMP_STATUS_REG, &temp);
-	if (rc < 0) {
-		pr_err("Couldn't read comp stat reg rc = %d\n", rc);
-		return POWER_SUPPLY_HEALTH_UNKNOWN;
-	}
-
-	if (temp & SKIN_TEMP_RST_HOT_BIT)
-		return POWER_SUPPLY_HEALTH_OVERHEAT;
-
-	if (temp & SKIN_TEMP_UB_HOT_BIT)
-		return POWER_SUPPLY_HEALTH_HOT;
-
-	if (temp & SKIN_TEMP_LB_HOT_BIT)
-		return POWER_SUPPLY_HEALTH_WARM;
-
-	return POWER_SUPPLY_HEALTH_COOL;
-}
-
 static int smb138x_get_prop_connector_health(struct smb138x *chip)
 {
 	struct smb_charger *chg = &chip->chg;
@@ -570,54 +536,18 @@ static enum power_supply_property smb138x_parallel_props[] = {
 	POWER_SUPPLY_PROP_PIN_ENABLED,
 	POWER_SUPPLY_PROP_INPUT_SUSPEND,
 	POWER_SUPPLY_PROP_INPUT_CURRENT_LIMITED,
-	POWER_SUPPLY_PROP_VOLTAGE_MAX,
-	POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX,
-	POWER_SUPPLY_PROP_MODEL_NAME,
-	POWER_SUPPLY_PROP_PARALLEL_MODE,
-	POWER_SUPPLY_PROP_CONNECTOR_HEALTH,
-	POWER_SUPPLY_PROP_SET_SHIP_MODE,
-	POWER_SUPPLY_PROP_CHARGER_TEMP,
-	POWER_SUPPLY_PROP_CHARGER_TEMP_MAX,
-	POWER_SUPPLY_PROP_CURRENT_NOW,
 	POWER_SUPPLY_PROP_CURRENT_MAX,
-};
-
-static enum power_supply_property smb1355_parallel_props[] = {
-	POWER_SUPPLY_PROP_CHARGE_TYPE,
-	POWER_SUPPLY_PROP_CHARGING_ENABLED,
-	POWER_SUPPLY_PROP_PIN_ENABLED,
-	POWER_SUPPLY_PROP_INPUT_SUSPEND,
-	POWER_SUPPLY_PROP_INPUT_CURRENT_LIMITED,
 	POWER_SUPPLY_PROP_VOLTAGE_MAX,
 	POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX,
+	POWER_SUPPLY_PROP_CURRENT_NOW,
+	POWER_SUPPLY_PROP_CHARGER_TEMP,
+	POWER_SUPPLY_PROP_CHARGER_TEMP_MAX,
 	POWER_SUPPLY_PROP_MODEL_NAME,
 	POWER_SUPPLY_PROP_PARALLEL_MODE,
 	POWER_SUPPLY_PROP_CONNECTOR_HEALTH,
 	POWER_SUPPLY_PROP_SET_SHIP_MODE,
-	POWER_SUPPLY_PROP_CHARGER_TEMP,
-	POWER_SUPPLY_PROP_CHARGER_TEMP_MAX,
 };
 
-static int smb138x_get_parallel_charging(struct smb138x *chip, int *disabled)
-{
-	struct smb_charger *chg = &chip->chg;
-	int rc = 0;
-	u8 cfg2;
-
-	rc = smblib_read(chg, CHGR_CFG2_REG, &cfg2);
-	if (rc < 0) {
-		pr_err("Couldn't read en_cmg_reg rc=%d\n", rc);
-		return rc;
-	}
-
-	if (cfg2 & CHG_EN_SRC_BIT)
-		*disabled = 0;
-	else
-		*disabled = 1;
-
-	return 0;
-}
-
 static int smb138x_parallel_get_prop(struct power_supply *psy,
 				     enum power_supply_property prop,
 				     union power_supply_propval *val)
@@ -644,7 +574,7 @@ static int smb138x_parallel_get_prop(struct power_supply *psy,
 			val->intval = !(temp & DISABLE_CHARGING_BIT);
 		break;
 	case POWER_SUPPLY_PROP_INPUT_SUSPEND:
-		rc = smb138x_get_parallel_charging(chip, &val->intval);
+		rc = smblib_get_usb_suspend(chg, &val->intval);
 		break;
 	case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMITED:
 		if ((chip->dt.pl_mode == POWER_SUPPLY_PL_USBIN_USBIN)
@@ -653,6 +583,14 @@ static int smb138x_parallel_get_prop(struct power_supply *psy,
 		else
 			val->intval = 0;
 		break;
+	case POWER_SUPPLY_PROP_CURRENT_MAX:
+		if ((chip->dt.pl_mode == POWER_SUPPLY_PL_USBIN_USBIN)
+		|| (chip->dt.pl_mode == POWER_SUPPLY_PL_USBIN_USBIN_EXT))
+			rc = smblib_get_charge_param(chg, &chg->param.usb_icl,
+				&val->intval);
+		else
+			val->intval = 0;
+		break;
 	case POWER_SUPPLY_PROP_VOLTAGE_MAX:
 		rc = smblib_get_charge_param(chg, &chg->param.fv, &val->intval);
 		break;
@@ -660,46 +598,28 @@ static int smb138x_parallel_get_prop(struct power_supply *psy,
 		rc = smblib_get_charge_param(chg, &chg->param.fcc,
 					     &val->intval);
 		break;
+	case POWER_SUPPLY_PROP_CURRENT_NOW:
+		rc = smblib_get_prop_slave_current_now(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_CHARGER_TEMP:
+		rc = smb138x_get_prop_charger_temp(chip, val);
+		break;
+	case POWER_SUPPLY_PROP_CHARGER_TEMP_MAX:
+		rc = smblib_get_prop_charger_temp_max(chg, val);
+		break;
 	case POWER_SUPPLY_PROP_MODEL_NAME:
-		val->strval = chip->name;
+		val->strval = "smb138x";
 		break;
 	case POWER_SUPPLY_PROP_PARALLEL_MODE:
 		val->intval = chip->dt.pl_mode;
 		break;
 	case POWER_SUPPLY_PROP_CONNECTOR_HEALTH:
-		if (chip->pmic_rev_id->pmic_subtype != SMB1355_SUBTYPE)
-			val->intval = smb138x_get_prop_connector_health(chip);
-		else
-			val->intval = smb1355_get_prop_connector_health(chip);
+		val->intval = smb138x_get_prop_connector_health(chip);
 		break;
 	case POWER_SUPPLY_PROP_SET_SHIP_MODE:
 		/* Not in ship mode as long as device is active */
 		val->intval = 0;
 		break;
-	case POWER_SUPPLY_PROP_CHARGER_TEMP:
-		if (chip->pmic_rev_id->pmic_subtype != SMB1355_SUBTYPE)
-			rc = smb138x_get_prop_charger_temp(chip, val);
-		else
-			rc = smblib_get_prop_charger_temp(chg, val);
-		break;
-	case POWER_SUPPLY_PROP_CHARGER_TEMP_MAX:
-		rc = smblib_get_prop_charger_temp_max(chg, val);
-		break;
-	case POWER_SUPPLY_PROP_CURRENT_NOW:
-		if (chip->pmic_rev_id->pmic_subtype != SMB1355_SUBTYPE)
-			rc = smblib_get_prop_slave_current_now(chg, val);
-		else
-			rc = -ENODATA;
-		break;
-	case POWER_SUPPLY_PROP_CURRENT_MAX:
-		if ((chip->pmic_rev_id->pmic_subtype != SMB1355_SUBTYPE)
-		  && ((chip->dt.pl_mode == POWER_SUPPLY_PL_USBIN_USBIN)
-		  || (chip->dt.pl_mode == POWER_SUPPLY_PL_USBIN_USBIN_EXT)))
-			rc = smblib_get_charge_param(chg, &chg->param.usb_icl,
-				&val->intval);
-		else
-			rc = -ENODATA;
-		break;
 	default:
 		pr_err("parallel power supply get prop %d not supported\n",
 			prop);
@@ -714,33 +634,28 @@ static int smb138x_parallel_get_prop(struct power_supply *psy,
 	return rc;
 }
 
-static int smb138x_set_parallel_charging(struct smb138x *chip, bool disable)
+static int smb138x_set_parallel_suspend(struct smb138x *chip, bool suspend)
 {
 	struct smb_charger *chg = &chip->chg;
 	int rc = 0;
 
 	rc = smblib_masked_write(chg, WD_CFG_REG, WDOG_TIMER_EN_BIT,
-				 disable ? 0 : WDOG_TIMER_EN_BIT);
+				 suspend ? 0 : WDOG_TIMER_EN_BIT);
 	if (rc < 0) {
 		pr_err("Couldn't %s watchdog rc=%d\n",
-		       disable ? "disable" : "enable", rc);
-		disable = true;
+		       suspend ? "disable" : "enable", rc);
+		suspend = true;
 	}
 
-	/*
-	 * Configure charge enable for high polarity and
-	 * When disabling charging set it to cmd register control(cmd bit=0)
-	 * When enabling charging set it to pin control
-	 */
-	rc = smblib_masked_write(chg, CHGR_CFG2_REG,
-			CHG_EN_POLARITY_BIT | CHG_EN_SRC_BIT,
-			disable ? 0 : CHG_EN_SRC_BIT);
+	rc = smblib_masked_write(chg, USBIN_CMD_IL_REG, USBIN_SUSPEND_BIT,
+				 suspend ? USBIN_SUSPEND_BIT : 0);
 	if (rc < 0) {
-		pr_err("Couldn't configure charge enable source rc=%d\n", rc);
+		pr_err("Couldn't %s parallel charger rc=%d\n",
+		       suspend ? "suspend" : "resume", rc);
 		return rc;
 	}
 
-	return 0;
+	return rc;
 }
 
 static int smb138x_parallel_set_prop(struct power_supply *psy,
@@ -753,7 +668,7 @@ static int smb138x_parallel_set_prop(struct power_supply *psy,
 
 	switch (prop) {
 	case POWER_SUPPLY_PROP_INPUT_SUSPEND:
-		rc = smb138x_set_parallel_charging(chip, (bool)val->intval);
+		rc = smb138x_set_parallel_suspend(chip, (bool)val->intval);
 		break;
 	case POWER_SUPPLY_PROP_CURRENT_MAX:
 		if ((chip->dt.pl_mode == POWER_SUPPLY_PL_USBIN_USBIN)
@@ -788,7 +703,7 @@ static int smb138x_parallel_prop_is_writeable(struct power_supply *psy,
 	return 0;
 }
 
-static struct power_supply_desc parallel_psy_desc = {
+static const struct power_supply_desc parallel_psy_desc = {
 	.name			= "parallel",
 	.type			= POWER_SUPPLY_TYPE_PARALLEL,
 	.properties		= smb138x_parallel_props,
@@ -816,28 +731,6 @@ static int smb138x_init_parallel_psy(struct smb138x *chip)
 	return 0;
 }
 
-static int smb1355_init_parallel_psy(struct smb138x *chip)
-{
-	struct power_supply_config parallel_cfg = {};
-	struct smb_charger *chg = &chip->chg;
-
-	parallel_cfg.drv_data = chip;
-	parallel_cfg.of_node = chg->dev->of_node;
-
-	/* change to smb1355's property list */
-	parallel_psy_desc.properties = smb1355_parallel_props;
-	parallel_psy_desc.num_properties = ARRAY_SIZE(smb1355_parallel_props);
-	chip->parallel_psy = devm_power_supply_register(chg->dev,
-						   &parallel_psy_desc,
-						   &parallel_cfg);
-	if (IS_ERR(chip->parallel_psy)) {
-		pr_err("Couldn't register parallel power supply\n");
-		return PTR_ERR(chip->parallel_psy);
-	}
-
-	return 0;
-}
-
 /******************************
  * VBUS REGULATOR REGISTRATION *
  ******************************/
@@ -971,25 +864,10 @@ static int smb138x_init_slave_hw(struct smb138x *chip)
 		return rc;
 	}
 
-	/* disable the charging path when under s/w control */
-	rc = smblib_masked_write(chg, CHARGING_ENABLE_CMD_REG,
-				 CHARGING_ENABLE_CMD_BIT, 0);
+	/* suspend parallel charging */
+	rc = smb138x_set_parallel_suspend(chip, true);
 	if (rc < 0) {
-		pr_err("Couldn't disable charging rc=%d\n", rc);
-		return rc;
-	}
-
-	/* disable parallel charging path */
-	rc = smb138x_set_parallel_charging(chip, true);
-	if (rc < 0) {
-		pr_err("Couldn't disable parallel path rc=%d\n", rc);
-		return rc;
-	}
-
-	/* unsuspend parallel charging */
-	rc = smblib_masked_write(chg, USBIN_CMD_IL_REG, USBIN_SUSPEND_BIT, 0);
-	if (rc < 0) {
-		pr_err("Couldn't unsuspend parallel charging rc=%d\n", rc);
+		pr_err("Couldn't suspend parallel charging rc=%d\n", rc);
 		return rc;
 	}
 
@@ -1000,6 +878,24 @@ static int smb138x_init_slave_hw(struct smb138x *chip)
 		return rc;
 	}
 
+	/* enable the charging path */
+	rc = smblib_masked_write(chg, CHARGING_ENABLE_CMD_REG,
+				 CHARGING_ENABLE_CMD_BIT,
+				 CHARGING_ENABLE_CMD_BIT);
+	if (rc < 0) {
+		pr_err("Couldn't enable charging rc=%d\n", rc);
+		return rc;
+	}
+
+	/* configure charge enable for software control; active high */
+	rc = smblib_masked_write(chg, CHGR_CFG2_REG,
+				 CHG_EN_POLARITY_BIT | CHG_EN_SRC_BIT, 0);
+	if (rc < 0) {
+		pr_err("Couldn't configure charge enable source rc=%d\n",
+			rc);
+		return rc;
+	}
+
 	/* enable parallel current sensing */
 	rc = smblib_masked_write(chg, CFG_REG,
 				 VCHG_EN_CFG_BIT, VCHG_EN_CFG_BIT);
@@ -1154,6 +1050,7 @@ static int smb138x_init_hw(struct smb138x *chip)
 
 static int smb138x_setup_wa_flags(struct smb138x *chip)
 {
+	struct pmic_revid_data *pmic_rev_id;
 	struct device_node *revid_dev_node;
 
 	revid_dev_node = of_parse_phandle(chip->chg.dev->of_node,
@@ -1163,8 +1060,8 @@ static int smb138x_setup_wa_flags(struct smb138x *chip)
 		return -EINVAL;
 	}
 
-	chip->pmic_rev_id = get_revid_data(revid_dev_node);
-	if (IS_ERR_OR_NULL(chip->pmic_rev_id)) {
+	pmic_rev_id = get_revid_data(revid_dev_node);
+	if (IS_ERR_OR_NULL(pmic_rev_id)) {
 		/*
 		 * the revid peripheral must be registered, any failure
 		 * here only indicates that the rev-id module has not
@@ -1173,14 +1070,14 @@ static int smb138x_setup_wa_flags(struct smb138x *chip)
 		return -EPROBE_DEFER;
 	}
 
-	switch (chip->pmic_rev_id->pmic_subtype) {
+	switch (pmic_rev_id->pmic_subtype) {
 	case SMB1381_SUBTYPE:
-		if (chip->pmic_rev_id->rev4 < 2) /* SMB1381 rev 1.0 */
+		if (pmic_rev_id->rev4 < 2) /* SMB1381 rev 1.0 */
 			chip->wa_flags |= OOB_COMP_WA_BIT;
 		break;
 	default:
 		pr_err("PMIC subtype %d not supported\n",
-				chip->pmic_rev_id->pmic_subtype);
+				pmic_rev_id->pmic_subtype);
 		return -EINVAL;
 	}
 
@@ -1478,7 +1375,6 @@ static int smb138x_master_probe(struct smb138x *chip)
 
 	chg->param = v1_params;
 
-	chip->name = "smb1381";
 	rc = smblib_init(chg);
 	if (rc < 0) {
 		pr_err("Couldn't initialize smblib rc=%d\n", rc);
@@ -1539,7 +1435,7 @@ static int smb138x_master_probe(struct smb138x *chip)
 	return rc;
 }
 
-static int smb1355_slave_probe(struct smb138x *chip)
+static int smb138x_slave_probe(struct smb138x *chip)
 {
 	struct smb_charger *chg = &chip->chg;
 	int rc = 0;
@@ -1552,55 +1448,6 @@ static int smb1355_slave_probe(struct smb138x *chip)
 		goto cleanup;
 	}
 
-	rc = smb138x_parse_dt(chip);
-	if (rc < 0) {
-		pr_err("Couldn't parse device tree rc=%d\n", rc);
-		goto cleanup;
-	}
-
-	rc = smb138x_init_slave_hw(chip);
-	if (rc < 0) {
-		pr_err("Couldn't initialize hardware rc=%d\n", rc);
-		goto cleanup;
-	}
-
-	rc = smb1355_init_parallel_psy(chip);
-	if (rc < 0) {
-		pr_err("Couldn't initialize parallel psy rc=%d\n", rc);
-		goto cleanup;
-	}
-
-	rc = smb138x_determine_initial_slave_status(chip);
-	if (rc < 0) {
-		pr_err("Couldn't determine initial status rc=%d\n", rc);
-		goto cleanup;
-	}
-
-	rc = smb138x_request_interrupts(chip);
-	if (rc < 0) {
-		pr_err("Couldn't request interrupts rc=%d\n", rc);
-		goto cleanup;
-	}
-
-	return 0;
-
-cleanup:
-	smblib_deinit(chg);
-	return rc;
-}
-
-static int smb1381_slave_probe(struct smb138x *chip)
-{
-	struct smb_charger *chg = &chip->chg;
-	int rc = 0;
-
-	chg->param = v1_params;
-
-	rc = smblib_init(chg);
-	if (rc < 0) {
-		pr_err("Couldn't initialize smblib rc=%d\n", rc);
-		goto cleanup;
-	}
 	chg->iio.temp_max_chan = iio_channel_get(chg->dev, "charger_temp_max");
 	if (IS_ERR(chg->iio.temp_max_chan)) {
 		rc = PTR_ERR(chg->iio.temp_max_chan);
@@ -1668,71 +1515,25 @@ static int smb1381_slave_probe(struct smb138x *chip)
 		goto cleanup;
 	}
 
-	return 0;
+	return rc;
 
 cleanup:
 	smblib_deinit(chg);
+	if (chip->parallel_psy)
+		power_supply_unregister(chip->parallel_psy);
+	if (chg->vbus_vreg && chg->vbus_vreg->rdev)
+		regulator_unregister(chg->vbus_vreg->rdev);
 	return rc;
 }
 
-static int slave_probe(struct smb138x *chip)
-{
-	struct device_node *revid_dev_node;
-	int rc = 0;
-
-	revid_dev_node = of_parse_phandle(chip->chg.dev->of_node,
-					"qcom,pmic-revid", 0);
-	if (!revid_dev_node) {
-		pr_err("Missing qcom,pmic-revid property\n");
-		return -EINVAL;
-	}
-
-	chip->pmic_rev_id = get_revid_data(revid_dev_node);
-	if (IS_ERR_OR_NULL(chip->pmic_rev_id)) {
-		/*
-		 * the revid peripheral must be registered, any failure
-		 * here only indicates that the rev-id module has not
-		 * probed yet.
-		 */
-		return -EPROBE_DEFER;
-	}
-
-	switch (chip->pmic_rev_id->pmic_subtype) {
-	case SMB1355_SUBTYPE:
-		chip->name = "smb1355";
-		rc = smb1355_slave_probe(chip);
-		break;
-	case SMB1381_SUBTYPE:
-		chip->name = "smb1381";
-		rc = smb1381_slave_probe(chip);
-		break;
-	default:
-		pr_err("Unsupported pmic subtype = 0x%02x\n",
-				chip->pmic_rev_id->pmic_subtype);
-		rc = -EINVAL;
-	}
-
-	if (rc < 0) {
-		if (rc != -EPROBE_DEFER)
-			pr_err("Couldn't probe SMB138X rc=%d\n", rc);
-		return rc;
-	}
-
-	return 0;
-}
-
 static const struct of_device_id match_table[] = {
 	{
-		.compatible	= "qcom,smb138x-charger",
-		.data		= (void *) PARALLEL_MASTER,
+		.compatible = "qcom,smb138x-charger",
+		.data = (void *) PARALLEL_MASTER
 	},
 	{
-		.compatible	= "qcom,smb138x-parallel-slave",
-		.data		= (void *) PARALLEL_SLAVE,
-	},
-	{
-		.compatible	= "qcom,smb1355-parallel-slave",
-		.data		= (void *) PARALLEL_SLAVE,
+		.compatible = "qcom,smb138x-parallel-slave",
+		.data = (void *) PARALLEL_SLAVE
 	},
 	{ },
 };
@@ -1779,7 +1580,7 @@ static int smb138x_probe(struct platform_device *pdev)
 		rc = smb138x_master_probe(chip);
 		break;
 	case PARALLEL_SLAVE:
-		rc = slave_probe(chip);
+		rc = smb138x_slave_probe(chip);
 		break;
 	default:
 		pr_err("Couldn't find a matching mode %d\n", chip->chg.mode);
@@ -1793,8 +1594,7 @@ static int smb138x_probe(struct platform_device *pdev)
 		goto cleanup;
 	}
 
-	pr_info("%s probed successfully mode=%d pl_mode = %d\n",
-		chip->name, chip->chg.mode, chip->dt.pl_mode);
+	pr_info("SMB138X probed successfully mode=%d\n", chip->chg.mode);
 	return rc;
 
 cleanup:
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index ad627fb..7ad650e 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -2980,7 +2980,8 @@ static int regulator_set_voltage_unlocked(struct regulator *regulator,
 		goto out2;
 
 	if (rdev->supply && (rdev->desc->min_dropout_uV ||
-				!rdev->desc->ops->get_voltage)) {
+				!(rdev->desc->ops->get_voltage ||
+					rdev->desc->ops->get_voltage_sel))) {
 		int current_supply_uV;
 		int selector;
 
diff --git a/drivers/regulator/cprh-kbss-regulator.c b/drivers/regulator/cprh-kbss-regulator.c
index cf7c35d..deb0ce5 100644
--- a/drivers/regulator/cprh-kbss-regulator.c
+++ b/drivers/regulator/cprh-kbss-regulator.c
@@ -86,7 +86,7 @@ struct cprh_kbss_fuses {
  */
 #define CPRH_MSM8998_KBSS_FUSE_COMBO_COUNT	32
 #define CPRH_SDM660_KBSS_FUSE_COMBO_COUNT	16
-#define CPRH_SDM845_KBSS_FUSE_COMBO_COUNT	16
+#define CPRH_SDM845_KBSS_FUSE_COMBO_COUNT	24
 
 /*
  * Constants which define the name of each fuse corner.
diff --git a/drivers/regulator/rk808-regulator.c b/drivers/regulator/rk808-regulator.c
index 3314bf2..dfa8d50 100644
--- a/drivers/regulator/rk808-regulator.c
+++ b/drivers/regulator/rk808-regulator.c
@@ -520,7 +520,7 @@ static const struct regulator_desc rk818_reg[] = {
 		RK818_LDO1_ON_VSEL_REG, RK818_LDO_VSEL_MASK, RK818_LDO_EN_REG,
 		BIT(0), 400),
 	RK8XX_DESC(RK818_ID_LDO2, "LDO_REG2", "vcc6", 1800, 3400, 100,
-		RK818_LDO1_ON_VSEL_REG, RK818_LDO_VSEL_MASK, RK818_LDO_EN_REG,
+		RK818_LDO2_ON_VSEL_REG, RK818_LDO_VSEL_MASK, RK818_LDO_EN_REG,
 		BIT(1), 400),
 	{
 		.name = "LDO_REG3",
diff --git a/drivers/regulator/rpmh-regulator.c b/drivers/regulator/rpmh-regulator.c
index 2987ed2..4f5f86c 100644
--- a/drivers/regulator/rpmh-regulator.c
+++ b/drivers/regulator/rpmh-regulator.c
@@ -393,10 +393,15 @@ static void rpmh_regulator_handle_arc_enable(struct rpmh_aggr_vreg *aggr_vreg,
 	 * Mask the voltage level if "off" level is supported and the regulator
 	 * has not been enabled.
 	 */
-	if (aggr_vreg->level[0] == RPMH_REGULATOR_LEVEL_OFF &&
-	    (!(req->valid & BIT(RPMH_REGULATOR_REG_ARC_PSEUDO_ENABLE)) ||
-	     !req->reg[RPMH_REGULATOR_REG_ARC_PSEUDO_ENABLE]))
-		req->reg[RPMH_REGULATOR_REG_ARC_LEVEL] = 0;
+	if (aggr_vreg->level[0] == RPMH_REGULATOR_LEVEL_OFF) {
+		if (req->valid & BIT(RPMH_REGULATOR_REG_ARC_PSEUDO_ENABLE)) {
+			if (!req->reg[RPMH_REGULATOR_REG_ARC_PSEUDO_ENABLE])
+				req->reg[RPMH_REGULATOR_REG_ARC_LEVEL] = 0;
+		} else {
+			/* Invalidate voltage level if enable is invalid. */
+			req->valid &= ~BIT(RPMH_REGULATOR_REG_ARC_LEVEL);
+		}
+	}
 
 	/*
 	 * Mark the pseudo enable bit as invalid so that it is not accidentally
diff --git a/drivers/regulator/tps65023-regulator.c b/drivers/regulator/tps65023-regulator.c
index d2c3d7c..5ca6d21 100644
--- a/drivers/regulator/tps65023-regulator.c
+++ b/drivers/regulator/tps65023-regulator.c
@@ -311,8 +311,7 @@ static int tps_65023_probe(struct i2c_client *client,
 
 	/* Enable setting output voltage by I2C */
 	regmap_update_bits(tps->regmap, TPS65023_REG_CON_CTRL2,
-					TPS65023_REG_CTRL2_CORE_ADJ,
-					TPS65023_REG_CTRL2_CORE_ADJ);
+			   TPS65023_REG_CTRL2_CORE_ADJ, 0);
 
 	return 0;
 }
diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
index c4fe95a..904422f 100644
--- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
+++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
@@ -1169,6 +1169,7 @@ static struct ibmvscsis_cmd *ibmvscsis_get_free_cmd(struct scsi_info *vscsi)
 		cmd = list_first_entry_or_null(&vscsi->free_cmd,
 					       struct ibmvscsis_cmd, list);
 		if (cmd) {
+			cmd->flags &= ~(DELAY_SEND);
 			list_del(&cmd->list);
 			cmd->iue = iue;
 			cmd->type = UNSET_TYPE;
@@ -1748,45 +1749,79 @@ static void srp_snd_msg_failed(struct scsi_info *vscsi, long rc)
 static void ibmvscsis_send_messages(struct scsi_info *vscsi)
 {
 	u64 msg_hi = 0;
-	/* note do not attmempt to access the IU_data_ptr with this pointer
+	/* note do not attempt to access the IU_data_ptr with this pointer
 	 * it is not valid
 	 */
 	struct viosrp_crq *crq = (struct viosrp_crq *)&msg_hi;
 	struct ibmvscsis_cmd *cmd, *nxt;
 	struct iu_entry *iue;
 	long rc = ADAPT_SUCCESS;
+	bool retry = false;
 
 	if (!(vscsi->flags & RESPONSE_Q_DOWN)) {
-		list_for_each_entry_safe(cmd, nxt, &vscsi->waiting_rsp, list) {
-			iue = cmd->iue;
+		do {
+			retry = false;
+			list_for_each_entry_safe(cmd, nxt, &vscsi->waiting_rsp,
+						 list) {
+				/*
+				 * Check to make sure abort cmd gets processed
+				 * prior to the abort tmr cmd
+				 */
+				if (cmd->flags & DELAY_SEND)
+					continue;
 
-			crq->valid = VALID_CMD_RESP_EL;
-			crq->format = cmd->rsp.format;
+				if (cmd->abort_cmd) {
+					retry = true;
+					cmd->abort_cmd->flags &= ~(DELAY_SEND);
+				}
 
-			if (cmd->flags & CMD_FAST_FAIL)
-				crq->status = VIOSRP_ADAPTER_FAIL;
+				/*
+				 * If CMD_T_ABORTED w/o CMD_T_TAS scenarios and
+				 * the case where LIO issued a
+				 * ABORT_TASK: Sending TMR_TASK_DOES_NOT_EXIST
+				 * case then we dont send a response, since it
+				 * was already done.
+				 */
+				if (cmd->se_cmd.transport_state & CMD_T_ABORTED &&
+				    !(cmd->se_cmd.transport_state & CMD_T_TAS)) {
+					list_del(&cmd->list);
+					ibmvscsis_free_cmd_resources(vscsi,
+								     cmd);
+				} else {
+					iue = cmd->iue;
 
-			crq->IU_length = cpu_to_be16(cmd->rsp.len);
+					crq->valid = VALID_CMD_RESP_EL;
+					crq->format = cmd->rsp.format;
 
-			rc = h_send_crq(vscsi->dma_dev->unit_address,
-					be64_to_cpu(msg_hi),
-					be64_to_cpu(cmd->rsp.tag));
+					if (cmd->flags & CMD_FAST_FAIL)
+						crq->status = VIOSRP_ADAPTER_FAIL;
 
-			pr_debug("send_messages: cmd %p, tag 0x%llx, rc %ld\n",
-				 cmd, be64_to_cpu(cmd->rsp.tag), rc);
+					crq->IU_length = cpu_to_be16(cmd->rsp.len);
 
-			/* if all ok free up the command element resources */
-			if (rc == H_SUCCESS) {
-				/* some movement has occurred */
-				vscsi->rsp_q_timer.timer_pops = 0;
-				list_del(&cmd->list);
+					rc = h_send_crq(vscsi->dma_dev->unit_address,
+							be64_to_cpu(msg_hi),
+							be64_to_cpu(cmd->rsp.tag));
 
-				ibmvscsis_free_cmd_resources(vscsi, cmd);
-			} else {
-				srp_snd_msg_failed(vscsi, rc);
-				break;
+					pr_debug("send_messages: cmd %p, tag 0x%llx, rc %ld\n",
+						 cmd, be64_to_cpu(cmd->rsp.tag), rc);
+
+					/* if all ok free up the command
+					 * element resources
+					 */
+					if (rc == H_SUCCESS) {
+						/* some movement has occurred */
+						vscsi->rsp_q_timer.timer_pops = 0;
+						list_del(&cmd->list);
+
+						ibmvscsis_free_cmd_resources(vscsi,
+									     cmd);
+					} else {
+						srp_snd_msg_failed(vscsi, rc);
+						break;
+					}
+				}
 			}
-		}
+		} while (retry);
 
 		if (!rc) {
 			/*
@@ -2707,6 +2742,7 @@ static int ibmvscsis_alloc_cmds(struct scsi_info *vscsi, int num)
 
 	for (i = 0, cmd = (struct ibmvscsis_cmd *)vscsi->cmd_pool; i < num;
 	     i++, cmd++) {
+		cmd->abort_cmd = NULL;
 		cmd->adapter = vscsi;
 		INIT_WORK(&cmd->work, ibmvscsis_scheduler);
 		list_add_tail(&cmd->list, &vscsi->free_cmd);
@@ -3578,9 +3614,20 @@ static int ibmvscsis_write_pending(struct se_cmd *se_cmd)
 {
 	struct ibmvscsis_cmd *cmd = container_of(se_cmd, struct ibmvscsis_cmd,
 						 se_cmd);
+	struct scsi_info *vscsi = cmd->adapter;
 	struct iu_entry *iue = cmd->iue;
 	int rc;
 
+	/*
+	 * If CLIENT_FAILED OR RESPONSE_Q_DOWN, then just return success
+	 * since LIO can't do anything about it, and we dont want to
+	 * attempt an srp_transfer_data.
+	 */
+	if ((vscsi->flags & (CLIENT_FAILED | RESPONSE_Q_DOWN))) {
+		pr_err("write_pending failed since: %d\n", vscsi->flags);
+		return 0;
+	}
+
 	rc = srp_transfer_data(cmd, &vio_iu(iue)->srp.cmd, ibmvscsis_rdma,
 			       1, 1);
 	if (rc) {
@@ -3659,11 +3706,28 @@ static void ibmvscsis_queue_tm_rsp(struct se_cmd *se_cmd)
 	struct ibmvscsis_cmd *cmd = container_of(se_cmd, struct ibmvscsis_cmd,
 						 se_cmd);
 	struct scsi_info *vscsi = cmd->adapter;
+	struct ibmvscsis_cmd *cmd_itr;
+	struct iu_entry *iue = iue = cmd->iue;
+	struct srp_tsk_mgmt *srp_tsk = &vio_iu(iue)->srp.tsk_mgmt;
+	u64 tag_to_abort = be64_to_cpu(srp_tsk->task_tag);
 	uint len;
 
 	pr_debug("queue_tm_rsp %p, status %d\n",
 		 se_cmd, (int)se_cmd->se_tmr_req->response);
 
+	if (srp_tsk->tsk_mgmt_func == SRP_TSK_ABORT_TASK &&
+	    cmd->se_cmd.se_tmr_req->response == TMR_TASK_DOES_NOT_EXIST) {
+		spin_lock_bh(&vscsi->intr_lock);
+		list_for_each_entry(cmd_itr, &vscsi->active_q, list) {
+			if (tag_to_abort == cmd_itr->se_cmd.tag) {
+				cmd_itr->abort_cmd = cmd;
+				cmd->flags |= DELAY_SEND;
+				break;
+			}
+		}
+		spin_unlock_bh(&vscsi->intr_lock);
+	}
+
 	srp_build_response(vscsi, cmd, &len);
 	cmd->rsp.format = SRP_FORMAT;
 	cmd->rsp.len = len;
@@ -3671,8 +3735,8 @@ static void ibmvscsis_queue_tm_rsp(struct se_cmd *se_cmd)
 
 static void ibmvscsis_aborted_task(struct se_cmd *se_cmd)
 {
-	/* TBD: What (if anything) should we do here? */
-	pr_debug("ibmvscsis_aborted_task %p\n", se_cmd);
+	pr_debug("ibmvscsis_aborted_task %p task_tag: %llu\n",
+		 se_cmd, se_cmd->tag);
 }
 
 static struct se_wwn *ibmvscsis_make_tport(struct target_fabric_configfs *tf,
diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.h b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.h
index 98b0ca7..f5683af 100644
--- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.h
+++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.h
@@ -167,10 +167,12 @@ struct ibmvscsis_cmd {
 	struct iu_rsp rsp;
 	struct work_struct work;
 	struct scsi_info *adapter;
+	struct ibmvscsis_cmd *abort_cmd;
 	/* Sense buffer that will be mapped into outgoing status */
 	unsigned char sense_buf[TRANSPORT_SENSE_BUFFER];
 	u64 init_time;
 #define CMD_FAST_FAIL	BIT(0)
+#define DELAY_SEND	BIT(1)
 	u32 flags;
 	char type;
 };
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 2f6cd95..6418c11 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -1523,6 +1523,7 @@ int ufshcd_hold(struct ufs_hba *hba, bool async)
 	}
 	spin_unlock_irqrestore(hba->host->host_lock, flags);
 out:
+	hba->ufs_stats.clk_hold.ts = ktime_get();
 	return rc;
 }
 EXPORT_SYMBOL_GPL(ufshcd_hold);
@@ -1627,6 +1628,7 @@ static void __ufshcd_release(struct ufs_hba *hba, bool no_sched)
 
 	hba->clk_gating.state = REQ_CLKS_OFF;
 	trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state);
+	hba->ufs_stats.clk_rel.ts = ktime_get();
 
 	hrtimer_start(&hba->clk_gating.gate_hrtimer,
 			ms_to_ktime(hba->clk_gating.delay_ms),
@@ -2073,8 +2075,10 @@ static void ufshcd_hibern8_exit_work(struct work_struct *work)
 
 	/* Exit from hibern8 */
 	if (ufshcd_is_link_hibern8(hba)) {
+		hba->ufs_stats.clk_hold.ctx = H8_EXIT_WORK;
 		ufshcd_hold(hba, false);
 		ret = ufshcd_uic_hibern8_exit(hba);
+		hba->ufs_stats.clk_rel.ctx = H8_EXIT_WORK;
 		ufshcd_release(hba, false);
 		if (!ret) {
 			spin_lock_irqsave(hba->host->host_lock, flags);
@@ -2500,6 +2504,7 @@ ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
 	int ret;
 	unsigned long flags;
 
+	hba->ufs_stats.clk_hold.ctx = UIC_CMD_SEND;
 	ufshcd_hold_all(hba);
 	mutex_lock(&hba->uic_cmd_mutex);
 	ufshcd_add_delay_before_dme_cmd(hba);
@@ -2513,6 +2518,7 @@ ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
 	ufshcd_save_tstamp_of_last_dme_cmd(hba);
 	mutex_unlock(&hba->uic_cmd_mutex);
 	ufshcd_release_all(hba);
+	hba->ufs_stats.clk_rel.ctx = UIC_CMD_SEND;
 
 	ufsdbg_error_inject_dispatcher(hba,
 		ERR_INJECT_UIC, 0, &ret);
@@ -2999,6 +3005,7 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
 		goto out;
 	}
 
+	hba->ufs_stats.clk_hold.ctx = QUEUE_CMD;
 	err = ufshcd_hold(hba, true);
 	if (err) {
 		err = SCSI_MLQUEUE_HOST_BUSY;
@@ -3013,6 +3020,7 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
 	if (err) {
 		clear_bit_unlock(tag, &hba->lrb_in_use);
 		err = SCSI_MLQUEUE_HOST_BUSY;
+		hba->ufs_stats.clk_rel.ctx = QUEUE_CMD;
 		ufshcd_release(hba, true);
 		goto out;
 	}
@@ -4392,8 +4400,10 @@ static int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode)
 	uic_cmd.command = UIC_CMD_DME_SET;
 	uic_cmd.argument1 = UIC_ARG_MIB(PA_PWRMODE);
 	uic_cmd.argument3 = mode;
+	hba->ufs_stats.clk_hold.ctx = PWRCTL_CMD_SEND;
 	ufshcd_hold_all(hba);
 	ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
+	hba->ufs_stats.clk_rel.ctx = PWRCTL_CMD_SEND;
 	ufshcd_release_all(hba);
 out:
 	return ret;
@@ -5580,6 +5590,7 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
 			update_req_stats(hba, lrbp);
 			/* Mark completed command as NULL in LRB */
 			lrbp->cmd = NULL;
+			hba->ufs_stats.clk_rel.ctx = XFR_REQ_COMPL;
 			__ufshcd_release(hba, false);
 			__ufshcd_hibern8_release(hba, false);
 			if (cmd->request) {
@@ -6101,6 +6112,7 @@ static void ufshcd_err_handler(struct work_struct *work)
 	if (unlikely((hba->clk_gating.state != CLKS_ON) &&
 	    ufshcd_is_auto_hibern8_supported(hba))) {
 		spin_unlock_irqrestore(hba->host->host_lock, flags);
+		hba->ufs_stats.clk_hold.ctx = ERR_HNDLR_WORK;
 		ufshcd_hold(hba, false);
 		spin_lock_irqsave(hba->host->host_lock, flags);
 		clks_enabled = true;
@@ -6245,8 +6257,10 @@ static void ufshcd_err_handler(struct work_struct *work)
 
 	hba->silence_err_logs = false;
 
-	if (clks_enabled)
+	if (clks_enabled) {
 		__ufshcd_release(hba, false);
+		hba->ufs_stats.clk_rel.ctx = ERR_HNDLR_WORK;
+	}
 out:
 	ufshcd_clear_eh_in_progress(hba);
 	spin_unlock_irqrestore(hba->host->host_lock, flags);
@@ -6482,7 +6496,8 @@ static irqreturn_t ufshcd_intr(int irq, void *__hba)
 
 	spin_lock(hba->host->host_lock);
 	intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
-
+	hba->ufs_stats.last_intr_status = intr_status;
+	hba->ufs_stats.last_intr_ts = ktime_get();
 	/*
 	 * There could be max of hba->nutrs reqs in flight and in worst case
 	 * if the reqs get finished 1 by 1 after the interrupt status is
@@ -6561,6 +6576,7 @@ static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
 	 * the maximum wait time is bounded by %TM_CMD_TIMEOUT.
 	 */
 	wait_event(hba->tm_tag_wq, ufshcd_get_tm_free_slot(hba, &free_slot));
+	hba->ufs_stats.clk_hold.ctx = TM_CMD_SEND;
 	ufshcd_hold_all(hba);
 
 	spin_lock_irqsave(host->host_lock, flags);
@@ -6618,6 +6634,7 @@ static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
 	clear_bit(free_slot, &hba->tm_condition);
 	ufshcd_put_tm_slot(hba, free_slot);
 	wake_up(&hba->tm_tag_wq);
+	hba->ufs_stats.clk_rel.ctx = TM_CMD_SEND;
 
 	ufshcd_release_all(hba);
 	return err;
@@ -9635,6 +9652,7 @@ static int ufshcd_devfreq_scale(struct ufs_hba *hba, bool scale_up)
 	int ret = 0;
 
 	/* let's not get into low power until clock scaling is completed */
+	hba->ufs_stats.clk_hold.ctx = CLK_SCALE_WORK;
 	ufshcd_hold_all(hba);
 
 	ret = ufshcd_clock_scaling_prepare(hba);
@@ -9698,6 +9716,7 @@ static int ufshcd_devfreq_scale(struct ufs_hba *hba, bool scale_up)
 clk_scaling_unprepare:
 	ufshcd_clock_scaling_unprepare(hba);
 out:
+	hba->ufs_stats.clk_rel.ctx = CLK_SCALE_WORK;
 	ufshcd_release_all(hba);
 	return ret;
 }
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
index 6966aac..77ccc39 100644
--- a/drivers/scsi/ufs/ufshcd.h
+++ b/drivers/scsi/ufs/ufshcd.h
@@ -584,6 +584,22 @@ struct ufshcd_req_stat {
 };
 #endif
 
+enum ufshcd_ctx {
+	QUEUE_CMD,
+	ERR_HNDLR_WORK,
+	H8_EXIT_WORK,
+	UIC_CMD_SEND,
+	PWRCTL_CMD_SEND,
+	TM_CMD_SEND,
+	XFR_REQ_COMPL,
+	CLK_SCALE_WORK,
+};
+
+struct ufshcd_clk_ctx {
+	ktime_t ts;
+	enum ufshcd_ctx ctx;
+};
+
 /**
  * struct ufs_stats - keeps usage/err statistics
  * @enabled: enable tag stats for debugfs
@@ -612,6 +628,10 @@ struct ufs_stats {
 	int query_stats_arr[UPIU_QUERY_OPCODE_MAX][MAX_QUERY_IDN];
 
 #endif
+	u32 last_intr_status;
+	ktime_t last_intr_ts;
+	struct ufshcd_clk_ctx clk_hold;
+	struct ufshcd_clk_ctx clk_rel;
 	u32 hibern8_exit_cnt;
 	ktime_t last_hibern8_exit_tstamp;
 	struct ufs_uic_err_reg_hist pa_err;
diff --git a/drivers/slimbus/slim-msm-ngd.c b/drivers/slimbus/slim-msm-ngd.c
index f7f0269..a72cb17 100644
--- a/drivers/slimbus/slim-msm-ngd.c
+++ b/drivers/slimbus/slim-msm-ngd.c
@@ -223,6 +223,7 @@ static int dsp_domr_notify_cb(struct notifier_block *n, unsigned long code,
 		/* make sure autosuspend is not called until ADSP comes up*/
 		pm_runtime_get_noresume(dev->dev);
 		dev->state = MSM_CTRL_DOWN;
+		dev->qmi.deferred_resp = false;
 		msm_slim_sps_exit(dev, false);
 		ngd_dom_down(dev);
 		mutex_unlock(&dev->tx_lock);
@@ -2019,19 +2020,18 @@ static int ngd_slim_suspend(struct device *dev)
 	if (!pm_runtime_enabled(dev) ||
 		(!pm_runtime_suspended(dev) &&
 			cdev->state == MSM_CTRL_IDLE)) {
+		cdev->qmi.deferred_resp = true;
 		ret = ngd_slim_runtime_suspend(dev);
 		/*
 		 * If runtime-PM still thinks it's active, then make sure its
 		 * status is in sync with HW status.
-		 * Since this suspend calls QMI api, it results in holding a
-		 * wakelock. That results in failure of first suspend.
-		 * Subsequent suspend should not call low-power transition
-		 * again since the HW is already in suspended state.
 		 */
 		if (!ret) {
 			pm_runtime_disable(dev);
 			pm_runtime_set_suspended(dev);
 			pm_runtime_enable(dev);
+		} else {
+			cdev->qmi.deferred_resp = false;
 		}
 	}
 	if (ret == -EBUSY) {
@@ -2053,13 +2053,29 @@ static int ngd_slim_resume(struct device *dev)
 {
 	struct platform_device *pdev = to_platform_device(dev);
 	struct msm_slim_ctrl *cdev = platform_get_drvdata(pdev);
+	int ret = 0;
+
+	/*
+	 * If deferred response was requested for power-off and it failed,
+	 * mark runtime-pm status as active to be consistent
+	 * with HW status
+	 */
+	if (cdev->qmi.deferred_resp) {
+		ret = msm_slim_qmi_deferred_status_req(cdev);
+		if (ret) {
+			pm_runtime_disable(dev);
+			pm_runtime_set_active(dev);
+			pm_runtime_enable(dev);
+		}
+		cdev->qmi.deferred_resp = false;
+	}
 	/*
 	 * Rely on runtime-PM to call resume in case it is enabled.
 	 * Even if it's not enabled, rely on 1st client transaction to do
 	 * clock/power on
 	 */
 	SLIM_INFO(cdev, "system resume\n");
-	return 0;
+	return ret;
 }
 #endif /* CONFIG_PM_SLEEP */
 
diff --git a/drivers/slimbus/slim-msm.c b/drivers/slimbus/slim-msm.c
index e7d3381..ef10e64 100644
--- a/drivers/slimbus/slim-msm.c
+++ b/drivers/slimbus/slim-msm.c
@@ -1224,12 +1224,16 @@ void msm_slim_sps_exit(struct msm_slim_ctrl *dev, bool dereg)
 #define SLIMBUS_QMI_POWER_RESP_V01 0x0021
 #define SLIMBUS_QMI_CHECK_FRAMER_STATUS_REQ 0x0022
 #define SLIMBUS_QMI_CHECK_FRAMER_STATUS_RESP 0x0022
+#define SLIMBUS_QMI_DEFERRED_STATUS_REQ 0x0023
+#define SLIMBUS_QMI_DEFERRED_STATUS_RESP 0x0023
 
-#define SLIMBUS_QMI_POWER_REQ_MAX_MSG_LEN 7
+#define SLIMBUS_QMI_POWER_REQ_MAX_MSG_LEN 14
 #define SLIMBUS_QMI_POWER_RESP_MAX_MSG_LEN 7
 #define SLIMBUS_QMI_SELECT_INSTANCE_REQ_MAX_MSG_LEN 14
 #define SLIMBUS_QMI_SELECT_INSTANCE_RESP_MAX_MSG_LEN 7
 #define SLIMBUS_QMI_CHECK_FRAMER_STAT_RESP_MAX_MSG_LEN 7
+#define SLIMBUS_QMI_DEFERRED_STATUS_REQ_MSG_MAX_MSG_LEN 0
+#define SLIMBUS_QMI_DEFERRED_STATUS_RESP_STAT_MSG_MAX_MSG_LEN 7
 
 enum slimbus_mode_enum_type_v01 {
 	/* To force a 32 bit signed enum. Do not change or use*/
@@ -1247,6 +1251,13 @@ enum slimbus_pm_enum_type_v01 {
 	SLIMBUS_PM_ENUM_TYPE_MAX_ENUM_VAL_V01 = INT_MAX,
 };
 
+enum slimbus_resp_enum_type_v01 {
+	SLIMBUS_RESP_ENUM_TYPE_MIN_VAL_V01 = INT_MIN,
+	SLIMBUS_RESP_SYNCHRONOUS_V01 = 1,
+	SLIMBUS_RESP_DEFERRED_V01 = 2,
+	SLIMBUS_RESP_ENUM_TYPE_MAX_VAL_V01 = INT_MAX,
+};
+
 struct slimbus_select_inst_req_msg_v01 {
 	/* Mandatory */
 	/* Hardware Instance Selection */
@@ -1269,6 +1280,12 @@ struct slimbus_power_req_msg_v01 {
 	/* Mandatory */
 	/* Power Request Operation */
 	enum slimbus_pm_enum_type_v01 pm_req;
+
+	/* Optional */
+	/* Optional Deferred Response type Operation */
+	/* Must be set to true if type is being passed */
+	uint8_t resp_type_valid;
+	enum slimbus_resp_enum_type_v01 resp_type;
 };
 
 struct slimbus_power_resp_msg_v01 {
@@ -1283,6 +1300,9 @@ struct slimbus_chkfrm_resp_msg {
 	struct qmi_response_type_v01 resp;
 };
 
+struct slimbus_deferred_status_resp {
+	struct qmi_response_type_v01 resp;
+};
 
 static struct elem_info slimbus_select_inst_req_msg_v01_ei[] = {
 	{
@@ -1359,6 +1379,24 @@ static struct elem_info slimbus_power_req_msg_v01_ei[] = {
 		.ei_array  = NULL,
 	},
 	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct slimbus_power_req_msg_v01,
+					   resp_type_valid),
+	},
+	{
+		.data_type      = QMI_SIGNED_4_BYTE_ENUM,
+		.elem_len       = 1,
+		.elem_size      = sizeof(enum slimbus_resp_enum_type_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct slimbus_power_req_msg_v01,
+					   resp_type),
+	},
+	{
 		.data_type = QMI_EOTI,
 		.elem_len  = 0,
 		.elem_size = 0,
@@ -1411,6 +1449,22 @@ static struct elem_info slimbus_chkfrm_resp_msg_v01_ei[] = {
 	},
 };
 
+static struct elem_info slimbus_deferred_status_resp_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct qmi_response_type_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(struct slimbus_deferred_status_resp,
+					   resp),
+		.ei_array      = get_qmi_response_type_v01_ei(),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+	},
+};
 static void msm_slim_qmi_recv_msg(struct kthread_work *work)
 {
 	int rc;
@@ -1488,32 +1542,56 @@ static int msm_slim_qmi_send_select_inst_req(struct msm_slim_ctrl *dev,
 	return 0;
 }
 
+static void slim_qmi_resp_cb(struct qmi_handle *handle, unsigned int msg_id,
+			     void *msg, void *resp_cb_data, int stat)
+{
+	struct slimbus_power_resp_msg_v01 *resp = msg;
+	struct msm_slim_ctrl *dev = resp_cb_data;
+
+	if (msg_id != SLIMBUS_QMI_POWER_RESP_V01)
+		SLIM_WARN(dev, "incorrect msg id in qmi-resp CB:0x%x", msg_id);
+	else if (resp->resp.result != QMI_RESULT_SUCCESS_V01)
+		SLIM_ERR(dev, "%s: QMI power failed 0x%x (%s)\n", __func__,
+			 resp->resp.result, get_qmi_error(&resp->resp));
+
+	complete(&dev->qmi.defer_comp);
+}
+
 static int msm_slim_qmi_send_power_request(struct msm_slim_ctrl *dev,
 				struct slimbus_power_req_msg_v01 *req)
 {
-	struct slimbus_power_resp_msg_v01 resp = { { 0, 0 } };
-	struct msg_desc req_desc, resp_desc;
+	struct slimbus_power_resp_msg_v01 *resp =
+		(struct slimbus_power_resp_msg_v01 *)&dev->qmi.resp;
+	struct msg_desc req_desc;
+	struct msg_desc *resp_desc = &dev->qmi.resp_desc;
 	int rc;
 
 	req_desc.msg_id = SLIMBUS_QMI_POWER_REQ_V01;
 	req_desc.max_msg_len = SLIMBUS_QMI_POWER_REQ_MAX_MSG_LEN;
 	req_desc.ei_array = slimbus_power_req_msg_v01_ei;
 
-	resp_desc.msg_id = SLIMBUS_QMI_POWER_RESP_V01;
-	resp_desc.max_msg_len = SLIMBUS_QMI_POWER_RESP_MAX_MSG_LEN;
-	resp_desc.ei_array = slimbus_power_resp_msg_v01_ei;
+	resp_desc->msg_id = SLIMBUS_QMI_POWER_RESP_V01;
+	resp_desc->max_msg_len = SLIMBUS_QMI_POWER_RESP_MAX_MSG_LEN;
+	resp_desc->ei_array = slimbus_power_resp_msg_v01_ei;
 
-	rc = qmi_send_req_wait(dev->qmi.handle, &req_desc, req, sizeof(*req),
-			&resp_desc, &resp, sizeof(resp), SLIM_QMI_RESP_TOUT);
-	if (rc < 0) {
+	if (dev->qmi.deferred_resp)
+		rc = qmi_send_req_nowait(dev->qmi.handle, &req_desc, req,
+				       sizeof(*req), resp_desc, resp,
+				       sizeof(*resp), slim_qmi_resp_cb, dev);
+	else
+		rc = qmi_send_req_wait(dev->qmi.handle, &req_desc, req,
+				       sizeof(*req), resp_desc, resp,
+				       sizeof(*resp), SLIM_QMI_RESP_TOUT);
+	if (rc < 0)
 		SLIM_ERR(dev, "%s: QMI send req failed %d\n", __func__, rc);
+
+	if (rc < 0 || dev->qmi.deferred_resp)
 		return rc;
-	}
 
 	/* Check the response */
-	if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+	if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
 		SLIM_ERR(dev, "%s: QMI request failed 0x%x (%s)\n", __func__,
-				resp.resp.result, get_qmi_error(&resp.resp));
+				resp->resp.result, get_qmi_error(&resp->resp));
 		return -EREMOTEIO;
 	}
 
@@ -1527,6 +1605,7 @@ int msm_slim_qmi_init(struct msm_slim_ctrl *dev, bool apps_is_master)
 	struct slimbus_select_inst_req_msg_v01 req;
 
 	kthread_init_worker(&dev->qmi.kworker);
+	init_completion(&dev->qmi.defer_comp);
 
 	dev->qmi.task = kthread_run(kthread_worker_fn,
 			&dev->qmi.kworker, "msm_slim_qmi_clnt%d", dev->ctrl.nr);
@@ -1604,6 +1683,13 @@ int msm_slim_qmi_power_request(struct msm_slim_ctrl *dev, bool active)
 	else
 		req.pm_req = SLIMBUS_PM_INACTIVE_V01;
 
+	if (dev->qmi.deferred_resp) {
+		req.resp_type = SLIMBUS_RESP_DEFERRED_V01;
+		req.resp_type_valid = 1;
+	} else {
+		req.resp_type_valid = 0;
+	}
+
 	return msm_slim_qmi_send_power_request(dev, &req);
 }
 
@@ -1635,3 +1721,46 @@ int msm_slim_qmi_check_framer_request(struct msm_slim_ctrl *dev)
 	}
 	return 0;
 }
+
+int msm_slim_qmi_deferred_status_req(struct msm_slim_ctrl *dev)
+{
+	struct slimbus_deferred_status_resp resp = { { 0, 0 } };
+	struct msg_desc req_desc, resp_desc;
+	int rc;
+
+	req_desc.msg_id = SLIMBUS_QMI_DEFERRED_STATUS_REQ;
+	req_desc.max_msg_len = 0;
+	req_desc.ei_array = NULL;
+
+	resp_desc.msg_id = SLIMBUS_QMI_DEFERRED_STATUS_RESP;
+	resp_desc.max_msg_len =
+		SLIMBUS_QMI_DEFERRED_STATUS_RESP_STAT_MSG_MAX_MSG_LEN;
+	resp_desc.ei_array = slimbus_deferred_status_resp_msg_v01_ei;
+
+	rc = qmi_send_req_wait(dev->qmi.handle, &req_desc, NULL, 0,
+		&resp_desc, &resp, sizeof(resp), SLIM_QMI_RESP_TOUT);
+	if (rc < 0) {
+		SLIM_ERR(dev, "%s: QMI send req failed %d\n", __func__, rc);
+		return rc;
+	}
+	/* Check the response */
+	if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+		SLIM_ERR(dev, "%s: QMI request failed 0x%x (%s)\n",
+			__func__, resp.resp.result, get_qmi_error(&resp.resp));
+		return -EREMOTEIO;
+	}
+
+	/* wait for the deferred response */
+	rc = wait_for_completion_timeout(&dev->qmi.defer_comp, HZ);
+	if (rc == 0) {
+		SLIM_WARN(dev, "slimbus power deferred response not rcvd\n");
+		return -ETIMEDOUT;
+	}
+	/* Check what response we got in callback */
+	if (dev->qmi.resp.result != QMI_RESULT_SUCCESS_V01) {
+		SLIM_WARN(dev, "QMI power req failed in CB");
+		return -EREMOTEIO;
+	}
+
+	return 0;
+}
diff --git a/drivers/slimbus/slim-msm.h b/drivers/slimbus/slim-msm.h
index 65b9fae..ee0f625 100644
--- a/drivers/slimbus/slim-msm.h
+++ b/drivers/slimbus/slim-msm.h
@@ -228,6 +228,10 @@ struct msm_slim_qmi {
 	struct kthread_worker		kworker;
 	struct completion		qmi_comp;
 	struct notifier_block		nb;
+	bool				deferred_resp;
+	struct qmi_response_type_v01	resp;
+	struct msg_desc			resp_desc;
+	struct completion		defer_comp;
 };
 
 enum msm_slim_dom {
@@ -437,4 +441,5 @@ void msm_slim_qmi_exit(struct msm_slim_ctrl *dev);
 int msm_slim_qmi_init(struct msm_slim_ctrl *dev, bool apps_is_master);
 int msm_slim_qmi_power_request(struct msm_slim_ctrl *dev, bool active);
 int msm_slim_qmi_check_framer_request(struct msm_slim_ctrl *dev);
+int msm_slim_qmi_deferred_status_req(struct msm_slim_ctrl *dev);
 #endif
diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig
index 0bdcc99..ec85506 100644
--- a/drivers/soc/qcom/Kconfig
+++ b/drivers/soc/qcom/Kconfig
@@ -616,6 +616,15 @@
 	  This enables bare minimum support of power management at platform level.
 	  i.e WFI
 
+config MSM_QBT1000
+	bool "QBT1000 Ultrasonic Fingerprint Sensor"
+	help
+	  This driver provides services for configuring the fingerprint
+	  sensor hardware and for communicating with the trusted app which
+	  uses it. It enables clocks and provides commands for loading
+	  trusted apps, unloading them and marshalling buffers to the
+	  trusted fingerprint app.
+
 config APSS_CORE_EA
 	depends on CPU_FREQ && PM_OPP
 	bool "Qualcomm Technology Inc specific power aware driver"
@@ -667,7 +676,7 @@
 
 config QTI_RPM_STATS_LOG
 	bool "Qualcomm Technologies RPM Stats Driver"
-	depends on DEBUG_FS
+	depends on SYSFS
 	default n
 	help
 	  This option enables a driver which reads RPM messages from a shared
diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile
index 9d175cd..4c59ca6 100644
--- a/drivers/soc/qcom/Makefile
+++ b/drivers/soc/qcom/Makefile
@@ -1,3 +1,4 @@
+KASAN_SANITIZE_scm.o := n
 obj-$(CONFIG_QCOM_CPUSS_DUMP) += cpuss_dump.o
 obj-$(CONFIG_QCOM_GSBI)	+=	qcom_gsbi.o
 obj-$(CONFIG_QCOM_LLCC) += llcc-core.o llcc-slice.o
@@ -65,6 +66,7 @@
        obj-y += ramdump.o
 endif
 obj-$(CONFIG_QCOM_COMMAND_DB) += cmd-db.o
+obj-$(CONFIG_MSM_QBT1000) += qbt1000.o
 obj-$(CONFIG_WCD_DSP_GLINK) += wcd-dsp-glink.o
 obj-$(CONFIG_MSM_EVENT_TIMER) += event_timer.o
 obj-$(CONFIG_MSM_IDLE_STATS)	+= lpm-stats.o
diff --git a/drivers/soc/qcom/dcc_v2.c b/drivers/soc/qcom/dcc_v2.c
index 4d2f54d..42f146d 100644
--- a/drivers/soc/qcom/dcc_v2.c
+++ b/drivers/soc/qcom/dcc_v2.c
@@ -74,6 +74,12 @@
 #define DCC_RD_MOD_WR_DESCRIPTOR	(BIT(31))
 #define DCC_LINK_DESCRIPTOR		(BIT(31) | BIT(30))
 
+#define DCC_READ_IND			0x00
+#define DCC_WRITE_IND			(BIT(28))
+
+#define DCC_AHB_IND			0x00
+#define DCC_APB_IND			BIT(29)
+
 #define DCC_MAX_LINK_LIST		5
 #define DCC_INVALID_LINK_LIST		0xFF
 
@@ -92,6 +98,13 @@ enum dcc_data_sink {
 	DCC_DATA_SINK_ATB
 };
 
+enum dcc_descriptor_type {
+	DCC_ADDR_TYPE,
+	DCC_LOOP_TYPE,
+	DCC_READ_WRITE_TYPE,
+	DCC_WRITE_TYPE
+};
+
 static const char * const str_dcc_data_sink[] = {
 	[DCC_DATA_SINK_SRAM]		= "sram",
 	[DCC_DATA_SINK_ATB]		= "atb",
@@ -103,15 +116,16 @@ struct rpm_trig_req {
 };
 
 struct dcc_config_entry {
-	uint32_t		base;
-	uint32_t		offset;
-	uint32_t		len;
-	uint32_t		index;
-	uint32_t		loop_cnt;
-	uint32_t		rd_mod_wr;
-	uint32_t		mask;
-	bool			rd_wr_entry;
-	struct list_head	list;
+	uint32_t			base;
+	uint32_t			offset;
+	uint32_t			len;
+	uint32_t			index;
+	uint32_t			loop_cnt;
+	uint32_t			write_val;
+	uint32_t			mask;
+	bool				apb_bus;
+	enum dcc_descriptor_type	desc_type;
+	struct list_head		list;
 };
 
 struct dcc_drvdata {
@@ -140,6 +154,7 @@ struct dcc_drvdata {
 	void			*sram_buf;
 	struct msm_dump_data	sram_data;
 	uint8_t			curr_list;
+	uint8_t			cti_trig;
 };
 
 static bool dcc_ready(struct dcc_drvdata *drvdata)
@@ -189,7 +204,7 @@ static int dcc_sw_trigger(struct dcc_drvdata *drvdata)
 	mutex_lock(&drvdata->mutex);
 
 	if (!dcc_ready(drvdata)) {
-		dev_err(drvdata->dev, "DCC is not ready!\n");
+		dev_err(drvdata->dev, "DCC is not ready\n");
 		ret = -EBUSY;
 		goto err;
 	}
@@ -224,7 +239,7 @@ static int __dcc_ll_cfg(struct dcc_drvdata *drvdata, int curr_list)
 	uint32_t loop_off = 0;
 	uint32_t link;
 	uint32_t pos, total_len = 0, loop_len = 0;
-	uint32_t loop, loop_cnt;
+	uint32_t loop, loop_cnt = 0;
 	bool loop_start = false;
 	struct dcc_config_entry *entry;
 
@@ -233,7 +248,9 @@ static int __dcc_ll_cfg(struct dcc_drvdata *drvdata, int curr_list)
 	link = 0;
 
 	list_for_each_entry(entry, &drvdata->cfg_head[curr_list], list) {
-		if (entry->rd_wr_entry) {
+		switch (entry->desc_type) {
+		case DCC_READ_WRITE_TYPE:
+		{
 			if (link) {
 				/* write new offset = 1 to continue
 				 * processing the list
@@ -255,12 +272,14 @@ static int __dcc_ll_cfg(struct dcc_drvdata *drvdata, int curr_list)
 			dcc_sram_writel(drvdata, entry->mask, sram_offset);
 				sram_offset += 4;
 
-			dcc_sram_writel(drvdata, entry->rd_mod_wr, sram_offset);
+			dcc_sram_writel(drvdata, entry->write_val, sram_offset);
 				sram_offset += 4;
-			continue;
+			addr = 0;
+			break;
 		}
 
-		if (entry->loop_cnt) {
+		case DCC_LOOP_TYPE:
+		{
 			/* Check if we need to write link of prev entry */
 			if (link) {
 				dcc_sram_writel(drvdata, link, sram_offset);
@@ -292,73 +311,130 @@ static int __dcc_ll_cfg(struct dcc_drvdata *drvdata, int curr_list)
 			prev_off = 0;
 			prev_addr = addr;
 
-			continue;
+			break;
 		}
 
-		/* Address type */
-		addr = (entry->base >> 4) & BM(0, 27);
-		addr |= DCC_ADDR_DESCRIPTOR;
-		off = entry->offset/4;
-		total_len += entry->len * 4;
-
-		if (!prev_addr || prev_addr != addr || prev_off > off) {
-			/* Check if we need to write link of prev entry */
+		case DCC_WRITE_TYPE:
+		{
 			if (link) {
+				/* write new offset = 1 to continue
+				 * processing the list
+				 */
+				link |= ((0x1 << 8) & BM(8, 14));
 				dcc_sram_writel(drvdata, link, sram_offset);
 				sram_offset += 4;
+				/* Reset link and prev_off */
+				addr = 0x00;
+				prev_off = 0;
+				prev_addr = addr;
 			}
-			dev_err(drvdata->dev,
-				"DCC: sram address.%d\n", sram_offset);
 
-			/* Write address */
+			off = entry->offset/4;
+			/* write new offset-length pair to correct position */
+			link |= ((off & BM(0, 7)) | BIT(15) |
+				 ((entry->len << 8) & BM(8, 14)));
+			link |= DCC_LINK_DESCRIPTOR;
+
+			/* Address type */
+			addr = (entry->base >> 4) & BM(0, 27);
+			if (entry->apb_bus)
+				addr |= DCC_ADDR_DESCRIPTOR | DCC_WRITE_IND
+					| DCC_APB_IND;
+			else
+				addr |= DCC_ADDR_DESCRIPTOR | DCC_WRITE_IND
+					| DCC_AHB_IND;
+
 			dcc_sram_writel(drvdata, addr, sram_offset);
-			sram_offset += 4;
+				sram_offset += 4;
 
-			/* Reset link and prev_off */
-			link = 0;
-			prev_off = 0;
-		}
-
-		if ((off - prev_off) > 0xFF || entry->len > MAX_DCC_LEN) {
-			dev_err(drvdata->dev,
-				"DCC: Progamming error! Base: 0x%x, offset 0x%x.\n",
-				entry->base, entry->offset);
-			ret = -EINVAL;
-			goto err;
-		}
-
-		if (link) {
-			/*
-			 * link already has one offset-length so new
-			 * offset-length needs to be placed at bits [29:15]
-			 */
-			pos = 15;
-
-			/* Clear bits [31:16] */
-			link &= BM(0, 14);
-		} else {
-			/*
-			 * link is empty, so new offset-length needs to be
-			 * placed at bits [15:0]
-			 */
-			pos = 0;
-			link = 1 << 15;
-		}
-
-		/* write new offset-length pair to correct position */
-		link |= (((off-prev_off) & BM(0, 7)) |
-			 ((entry->len << 8) & BM(8, 14))) << pos;
-
-		link |= DCC_LINK_DESCRIPTOR;
-
-		if (pos) {
 			dcc_sram_writel(drvdata, link, sram_offset);
-			sram_offset += 4;
+				sram_offset += 4;
+
+			dcc_sram_writel(drvdata, entry->write_val, sram_offset);
+				sram_offset += 4;
+			addr = 0x00;
 			link = 0;
+			break;
 		}
 
-		prev_off  = off;
-		prev_addr = addr;
+		default:
+		{
+			/* Address type */
+			addr = (entry->base >> 4) & BM(0, 27);
+			if (entry->apb_bus)
+				addr |= DCC_ADDR_DESCRIPTOR | DCC_READ_IND
+					| DCC_APB_IND;
+			else
+				addr |= DCC_ADDR_DESCRIPTOR | DCC_READ_IND
+					| DCC_AHB_IND;
+
+			off = entry->offset/4;
+			total_len += entry->len * 4;
+
+			if (!prev_addr || prev_addr != addr || prev_off > off) {
+				/* Check if we need to write prev link entry */
+				if (link) {
+					dcc_sram_writel(drvdata,
+							link, sram_offset);
+					sram_offset += 4;
+				}
+				dev_dbg(drvdata->dev,
+					"DCC: sram address 0x%x\n",
+					sram_offset);
+
+				/* Write address */
+				dcc_sram_writel(drvdata, addr, sram_offset);
+				sram_offset += 4;
+
+				/* Reset link and prev_off */
+				link = 0;
+				prev_off = 0;
+			}
+
+			if ((off - prev_off) > 0xFF ||
+			    entry->len > MAX_DCC_LEN) {
+				dev_err(drvdata->dev,
+					"DCC: Progamming error Base: 0x%x, offset 0x%x\n",
+					entry->base, entry->offset);
+				ret = -EINVAL;
+				goto err;
+			}
+
+			if (link) {
+				/*
+				 * link already has one offset-length so new
+				 * offset-length needs to be placed at
+				 * bits [29:15]
+				 */
+				pos = 15;
+
+				/* Clear bits [31:16] */
+				link &= BM(0, 14);
+			} else {
+				/*
+				 * link is empty, so new offset-length needs
+				 * to be placed at bits [15:0]
+				 */
+				pos = 0;
+				link = 1 << 15;
+			}
+
+			/* write new offset-length pair to correct position */
+			link |= (((off-prev_off) & BM(0, 7)) |
+				 ((entry->len << 8) & BM(8, 14))) << pos;
+
+			link |= DCC_LINK_DESCRIPTOR;
+
+			if (pos) {
+				dcc_sram_writel(drvdata, link, sram_offset);
+				sram_offset += 4;
+				link = 0;
+			}
+
+			prev_off  = off;
+			prev_addr = addr;
+			}
+		}
 	}
 
 	if (link) {
@@ -368,7 +444,7 @@ static int __dcc_ll_cfg(struct dcc_drvdata *drvdata, int curr_list)
 
 	if (loop_start) {
 		dev_err(drvdata->dev,
-			"DCC: Progamming error! Loop unterminated.\n");
+			"DCC: Progamming error: Loop unterminated\n");
 		ret = -EINVAL;
 		goto err;
 	}
@@ -457,7 +533,7 @@ static void __dcc_first_crc(struct dcc_drvdata *drvdata)
 	 */
 	for (i = 0; i < 2; i++) {
 		if (!dcc_ready(drvdata))
-			dev_err(drvdata->dev, "DCC is not ready!\n");
+			dev_err(drvdata->dev, "DCC is not ready\n");
 
 		dcc_writel(drvdata, 1,
 			   DCC_LL_SW_TRIGGER(drvdata->curr_list));
@@ -476,13 +552,13 @@ static int dcc_valid_list(struct dcc_drvdata *drvdata, int curr_list)
 		return -EINVAL;
 
 	if (drvdata->enable[curr_list]) {
-		dev_err(drvdata->dev, "DCC is already enabled!\n");
+		dev_err(drvdata->dev, "DCC is already enabled\n");
 		return -EINVAL;
 	}
 
 	lock_reg = dcc_readl(drvdata, DCC_LL_LOCK(curr_list));
 	if (lock_reg & 0x1) {
-		dev_err(drvdata->dev, "DCC is already enabled!\n");
+		dev_err(drvdata->dev, "DCC is already enabled\n");
 		return -EINVAL;
 	}
 
@@ -525,8 +601,9 @@ static int dcc_enable(struct dcc_drvdata *drvdata)
 			dcc_writel(drvdata, 0, DCC_LL_TIMEOUT(list));
 		}
 
-		/* 4. Configure data sink and function type */
-		dcc_writel(drvdata, ((drvdata->data_sink << 4) |
+		/* 4. Configure trigger, data sink and function type */
+		dcc_writel(drvdata, BIT(9) | ((drvdata->cti_trig << 8) |
+			   (drvdata->data_sink << 4) |
 			   (drvdata->func_type[list])), DCC_LL_CFG(list));
 
 		/* 5. Clears interrupt status register */
@@ -565,7 +642,7 @@ static void dcc_disable(struct dcc_drvdata *drvdata)
 	mutex_lock(&drvdata->mutex);
 
 	if (!dcc_ready(drvdata))
-		dev_err(drvdata->dev, "DCC is not ready! Disabling DCC...\n");
+		dev_err(drvdata->dev, "DCC is not ready Disabling DCC...\n");
 
 	for (curr_list = 0; curr_list < DCC_MAX_LINK_LIST; curr_list++) {
 		if (!drvdata->enable[curr_list])
@@ -600,7 +677,7 @@ static ssize_t dcc_curr_list(struct device *dev,
 	mutex_lock(&drvdata->mutex);
 	lock_reg = dcc_readl(drvdata, DCC_LL_LOCK(val));
 	if (lock_reg & 0x1) {
-		dev_err(drvdata->dev, "DCC linked list is already configured!\n");
+		dev_err(drvdata->dev, "DCC linked list is already configured\n");
 		mutex_unlock(&drvdata->mutex);
 		return -EINVAL;
 	}
@@ -783,25 +860,36 @@ static ssize_t dcc_show_config(struct device *dev,
 	mutex_lock(&drvdata->mutex);
 	list_for_each_entry(entry,
 			    &drvdata->cfg_head[drvdata->curr_list], list) {
-		if (entry->rd_wr_entry)
+		switch (entry->desc_type) {
+		case DCC_READ_WRITE_TYPE:
 			len = snprintf(local_buf, 64,
 				       "Index: 0x%x, mask: 0x%x, val: 0x%x\n",
 				       entry->index, entry->mask,
-				       entry->rd_mod_wr);
-		else if (entry->loop_cnt)
+				       entry->write_val);
+			break;
+		case DCC_LOOP_TYPE:
 			len = snprintf(local_buf, 64, "Index: 0x%x, Loop: %d\n",
 				       entry->index, entry->loop_cnt);
-		else
-			len = snprintf(local_buf, 64,
-				       "Index: 0x%x, Base: 0x%x, Offset: 0x%x, len: 0x%x\n",
-				       entry->index, entry->base,
-				       entry->offset, entry->len);
-
-		if ((count + len) > PAGE_SIZE) {
-			dev_err(dev, "DCC: Couldn't write complete config!\n");
 			break;
+		case DCC_WRITE_TYPE:
+			len = snprintf(local_buf, 64,
+				       "Write Index: 0x%x, Base: 0x%x, Offset: 0x%x, len: 0x%x APB: %d\n",
+				       entry->index, entry->base,
+				       entry->offset, entry->len,
+				       entry->apb_bus);
+			break;
+		default:
+			len = snprintf(local_buf, 64,
+				       "Read Index: 0x%x, Base: 0x%x, Offset: 0x%x, len: 0x%x APB: %d\n",
+				       entry->index, entry->base,
+				       entry->offset, entry->len,
+				       entry->apb_bus);
 		}
 
+		if ((count + len) > PAGE_SIZE) {
+			dev_err(dev, "DCC: Couldn't write complete config\n");
+			break;
+		}
 		strlcat(buf, local_buf, PAGE_SIZE);
 		count += len;
 	}
@@ -812,7 +900,7 @@ static ssize_t dcc_show_config(struct device *dev,
 }
 
 static int dcc_config_add(struct dcc_drvdata *drvdata, unsigned int addr,
-			  unsigned int len)
+			  unsigned int len, int apb_bus)
 {
 	int ret;
 	struct dcc_config_entry *entry, *pentry;
@@ -821,7 +909,7 @@ static int dcc_config_add(struct dcc_drvdata *drvdata, unsigned int addr,
 	mutex_lock(&drvdata->mutex);
 
 	if (!len) {
-		dev_err(drvdata->dev, "DCC: Invalid length!\n");
+		dev_err(drvdata->dev, "DCC: Invalid length\n");
 		ret = -EINVAL;
 		goto err;
 	}
@@ -883,6 +971,8 @@ static int dcc_config_add(struct dcc_drvdata *drvdata, unsigned int addr,
 		entry->offset = offset;
 		entry->len = min_t(uint32_t, len, MAX_DCC_LEN);
 		entry->index = drvdata->nr_config[drvdata->curr_list]++;
+		entry->desc_type = DCC_ADDR_TYPE;
+		entry->apb_bus = apb_bus;
 		INIT_LIST_HEAD(&entry->list);
 		list_add_tail(&entry->list,
 			      &drvdata->cfg_head[drvdata->curr_list]);
@@ -902,24 +992,30 @@ static ssize_t dcc_store_config(struct device *dev,
 				struct device_attribute *attr,
 				const char *buf, size_t size)
 {
-	int ret, len;
+	int ret, len, apb_bus;
 	unsigned int base;
 	struct dcc_drvdata *drvdata = dev_get_drvdata(dev);
 	int nval;
 
-	nval = sscanf(buf, "%x %i", &base, &len);
-	if (nval <= 0 || nval > 2)
+	nval = sscanf(buf, "%x %i %d", &base, &len, &apb_bus);
+	if (nval <= 0 || nval > 3)
 		return -EINVAL;
 
-	if (nval == 1)
-		len = 1;
-
 	if (drvdata->curr_list >= DCC_MAX_LINK_LIST) {
 		dev_err(dev, "Select link list to program using curr_list\n");
 		return -EINVAL;
 	}
 
-	ret = dcc_config_add(drvdata, base, len);
+	if (nval == 1) {
+		len = 1;
+		apb_bus = 0;
+	} else if (nval == 2) {
+		apb_bus = 0;
+	} else {
+		apb_bus = 1;
+	}
+
+	ret = dcc_config_add(drvdata, base, len, apb_bus);
 	if (ret)
 		return ret;
 
@@ -1087,7 +1183,7 @@ static ssize_t dcc_rd_mod_wr(struct device *dev,
 	}
 
 	if (list_empty(&drvdata->cfg_head[drvdata->curr_list])) {
-		dev_err(drvdata->dev, "DCC: No read address programmed!\n");
+		dev_err(drvdata->dev, "DCC: No read address programmed\n");
 		ret = -EPERM;
 		goto err;
 	}
@@ -1098,9 +1194,9 @@ static ssize_t dcc_rd_mod_wr(struct device *dev,
 		goto err;
 	}
 
-	entry->rd_wr_entry = true;
+	entry->desc_type = DCC_READ_WRITE_TYPE;
 	entry->mask = mask;
-	entry->rd_mod_wr = val;
+	entry->write_val = val;
 	entry->index = drvdata->nr_config[drvdata->curr_list]++;
 	INIT_LIST_HEAD(&entry->list);
 	list_add_tail(&entry->list, &drvdata->cfg_head[drvdata->curr_list]);
@@ -1110,6 +1206,91 @@ static ssize_t dcc_rd_mod_wr(struct device *dev,
 }
 static DEVICE_ATTR(rd_mod_wr, 0200, NULL, dcc_rd_mod_wr);
 
+static ssize_t dcc_write(struct device *dev,
+				    struct device_attribute *attr,
+				    const char *buf, size_t size)
+{
+	int ret = size;
+	int nval;
+	unsigned int addr, write_val;
+	int apb_bus;
+	struct dcc_drvdata *drvdata = dev_get_drvdata(dev);
+	struct dcc_config_entry *entry;
+
+	mutex_lock(&drvdata->mutex);
+
+	nval = sscanf(buf, "%x %x %d", &addr, &write_val, &apb_bus);
+
+	if (drvdata->curr_list >= DCC_MAX_LINK_LIST) {
+		dev_err(dev, "Select link list to program using curr_list\n");
+		return -EINVAL;
+	}
+
+	if (nval <= 1 || nval > 3) {
+		ret = -EINVAL;
+		goto err;
+	}
+
+	entry = devm_kzalloc(drvdata->dev, sizeof(*entry), GFP_KERNEL);
+	if (!entry) {
+		ret = -ENOMEM;
+		goto err;
+	}
+
+	if (nval == 3)
+		entry->apb_bus = true;
+
+	entry->desc_type = DCC_WRITE_TYPE;
+	entry->base = addr & BM(4, 31);
+	entry->offset = addr - entry->base;
+	entry->write_val = write_val;
+	entry->index = drvdata->nr_config[drvdata->curr_list]++;
+	entry->len = 1;
+	INIT_LIST_HEAD(&entry->list);
+	list_add_tail(&entry->list, &drvdata->cfg_head[drvdata->curr_list]);
+err:
+	mutex_unlock(&drvdata->mutex);
+	return ret;
+}
+static DEVICE_ATTR(config_write, 0200, NULL, dcc_write);
+
+static ssize_t dcc_show_cti_trig(struct device *dev,
+				  struct device_attribute *attr, char *buf)
+{
+	struct dcc_drvdata *drvdata = dev_get_drvdata(dev);
+
+	return scnprintf(buf, PAGE_SIZE, "%d\n", drvdata->cti_trig);
+}
+
+static ssize_t dcc_store_cti_trig(struct device *dev,
+				   struct device_attribute *attr,
+				   const char *buf, size_t size)
+{
+	unsigned long val;
+	int ret = 0;
+	struct dcc_drvdata *drvdata = dev_get_drvdata(dev);
+
+	if (kstrtoul(buf, 16, &val))
+		return -EINVAL;
+
+	mutex_lock(&drvdata->mutex);
+
+	if (drvdata->enable[drvdata->curr_list]) {
+		ret = -EBUSY;
+		goto out;
+	}
+
+	if (val)
+		drvdata->cti_trig = 1;
+	else
+		drvdata->cti_trig = 0;
+out:
+	mutex_unlock(&drvdata->mutex);
+	return ret;
+}
+static DEVICE_ATTR(cti_trig, 0644,
+		   dcc_show_cti_trig, dcc_store_cti_trig);
+
 static const struct device_attribute *dcc_attrs[] = {
 	&dev_attr_func_type,
 	&dev_attr_data_sink,
@@ -1123,6 +1304,8 @@ static const struct device_attribute *dcc_attrs[] = {
 	&dev_attr_loop,
 	&dev_attr_rd_mod_wr,
 	&dev_attr_curr_list,
+	&dev_attr_config_write,
+	&dev_attr_cti_trig,
 	NULL,
 };
 
@@ -1134,7 +1317,7 @@ static int dcc_create_files(struct device *dev,
 	for (i = 0; attrs[i] != NULL; i++) {
 		ret = device_create_file(dev, attrs[i]);
 		if (ret) {
-			dev_err(dev, "DCC: Couldn't create sysfs attribute: %s!\n",
+			dev_err(dev, "DCC: Couldn't create sysfs attribute: %s\n",
 				attrs[i]->attr.name);
 			break;
 		}
@@ -1173,7 +1356,7 @@ static ssize_t dcc_sram_read(struct file *file, char __user *data,
 
 	if (copy_to_user(data, buf, len)) {
 		dev_err(drvdata->dev,
-			"DCC: Couldn't copy all data to user!\n");
+			"DCC: Couldn't copy all data to user\n");
 		kfree(buf);
 		return -EFAULT;
 	}
@@ -1372,7 +1555,7 @@ static int dcc_probe(struct platform_device *pdev)
 			}
 
 		if (i == ARRAY_SIZE(str_dcc_data_sink)) {
-			dev_err(dev, "Unknown sink type for DCC! Using '%s' as data sink\n",
+			dev_err(dev, "Unknown sink type for DCC Using '%s' as data sink\n",
 				str_dcc_data_sink[drvdata->data_sink]);
 		}
 	}
diff --git a/drivers/soc/qcom/early_random.c b/drivers/soc/qcom/early_random.c
index 0c562ec..5156bc1 100644
--- a/drivers/soc/qcom/early_random.c
+++ b/drivers/soc/qcom/early_random.c
@@ -1,4 +1,5 @@
-/* Copyright (c) 2013-2014, 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2014, 2016-2017, The Linux Foundation. All rights
+ * reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -12,7 +13,7 @@
  */
 
 #include <linux/kernel.h>
-#include <linux/random.h>
+#include <linux/hw_random.h>
 #include <linux/io.h>
 
 #include <soc/qcom/scm.h>
@@ -57,7 +58,7 @@ void __init init_random_pool(void)
 	if (!ret) {
 		dmac_inv_range(random_buffer, random_buffer +
 						RANDOM_BUFFER_SIZE);
-		add_device_randomness(random_buffer, SZ_512);
+		add_hwgenerator_randomness(random_buffer, SZ_512, SZ_512 << 3);
 	}
 }
 
diff --git a/drivers/soc/qcom/icnss.c b/drivers/soc/qcom/icnss.c
index 69e0ebc..e3b5826 100644
--- a/drivers/soc/qcom/icnss.c
+++ b/drivers/soc/qcom/icnss.c
@@ -691,6 +691,8 @@ static int icnss_qmi_pin_connect_result_ind(void *msg, unsigned int msg_len)
 		goto out;
 	}
 
+	memset(&ind_msg, 0, sizeof(ind_msg));
+
 	ind_desc.msg_id = QMI_WLFW_PIN_CONNECT_RESULT_IND_V01;
 	ind_desc.max_msg_len = WLFW_PIN_CONNECT_RESULT_IND_MSG_V01_MAX_MSG_LEN;
 	ind_desc.ei_array = wlfw_pin_connect_result_ind_msg_v01_ei;
@@ -3969,6 +3971,9 @@ static ssize_t icnss_regread_write(struct file *fp, const char __user *user_buf,
 	    data_len > QMI_WLFW_MAX_DATA_SIZE_V01)
 		return -EINVAL;
 
+	kfree(priv->diag_reg_read_buf);
+	priv->diag_reg_read_buf = NULL;
+
 	reg_buf = kzalloc(data_len, GFP_KERNEL);
 	if (!reg_buf)
 		return -ENOMEM;
@@ -4002,12 +4007,13 @@ static const struct file_operations icnss_regread_fops = {
 	.llseek         = seq_lseek,
 };
 
+#ifdef CONFIG_ICNSS_DEBUG
 static int icnss_debugfs_create(struct icnss_priv *priv)
 {
 	int ret = 0;
 	struct dentry *root_dentry;
 
-	root_dentry = debugfs_create_dir("icnss", 0);
+	root_dentry = debugfs_create_dir("icnss", NULL);
 
 	if (IS_ERR(root_dentry)) {
 		ret = PTR_ERR(root_dentry);
@@ -4017,19 +4023,40 @@ static int icnss_debugfs_create(struct icnss_priv *priv)
 
 	priv->root_dentry = root_dentry;
 
-	debugfs_create_file("fw_debug", 0644, root_dentry, priv,
+	debugfs_create_file("fw_debug", 0600, root_dentry, priv,
 			    &icnss_fw_debug_fops);
 
-	debugfs_create_file("stats", 0644, root_dentry, priv,
+	debugfs_create_file("stats", 0600, root_dentry, priv,
 			    &icnss_stats_fops);
 	debugfs_create_file("reg_read", 0600, root_dentry, priv,
 			    &icnss_regread_fops);
-	debugfs_create_file("reg_write", 0644, root_dentry, priv,
+	debugfs_create_file("reg_write", 0600, root_dentry, priv,
 			    &icnss_regwrite_fops);
 
 out:
 	return ret;
 }
+#else
+static int icnss_debugfs_create(struct icnss_priv *priv)
+{
+	int ret = 0;
+	struct dentry *root_dentry;
+
+	root_dentry = debugfs_create_dir("icnss", NULL);
+
+	if (IS_ERR(root_dentry)) {
+		ret = PTR_ERR(root_dentry);
+		icnss_pr_err("Unable to create debugfs %d\n", ret);
+		return ret;
+	}
+
+	priv->root_dentry = root_dentry;
+
+	debugfs_create_file("stats", 0600, root_dentry, priv,
+			    &icnss_stats_fops);
+	return 0;
+}
+#endif
 
 static void icnss_debugfs_destroy(struct icnss_priv *priv)
 {
diff --git a/drivers/soc/qcom/memory_dump_v2.c b/drivers/soc/qcom/memory_dump_v2.c
index b9ce417..5ed66bf 100644
--- a/drivers/soc/qcom/memory_dump_v2.c
+++ b/drivers/soc/qcom/memory_dump_v2.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2016 The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -19,6 +19,9 @@
 #include <linux/of_address.h>
 #include <soc/qcom/memory_dump.h>
 #include <soc/qcom/scm.h>
+#include <linux/of_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/module.h>
 
 #define MSM_DUMP_TABLE_VERSION		MSM_DUMP_MAKE_VERSION(2, 0)
 
@@ -195,3 +198,84 @@ static int __init init_debug_lar_unlock(void)
 }
 early_initcall(init_debug_lar_unlock);
 #endif
+
+static int mem_dump_probe(struct platform_device *pdev)
+{
+	struct device_node *child_node;
+	const struct device_node *node = pdev->dev.of_node;
+	static dma_addr_t dump_addr;
+	static void *dump_vaddr;
+	struct msm_dump_data *dump_data;
+	struct msm_dump_entry dump_entry;
+	int ret;
+	u32 size, id;
+
+	for_each_available_child_of_node(node, child_node) {
+		ret = of_property_read_u32(child_node, "qcom,dump-size", &size);
+		if (ret) {
+			dev_err(&pdev->dev, "Unable to find size for %s\n",
+					child_node->name);
+			continue;
+		}
+
+		ret = of_property_read_u32(child_node, "qcom,dump-id", &id);
+		if (ret) {
+			dev_err(&pdev->dev, "Unable to find id for %s\n",
+					child_node->name);
+			continue;
+		}
+
+		dump_vaddr = (void *) dma_alloc_coherent(&pdev->dev, size,
+						&dump_addr, GFP_KERNEL);
+
+		if (!dump_vaddr) {
+			dev_err(&pdev->dev, "Couldn't get memory for dumping\n");
+			continue;
+		}
+
+		memset(dump_vaddr, 0x0, size);
+
+		dump_data = devm_kzalloc(&pdev->dev,
+				sizeof(struct msm_dump_data), GFP_KERNEL);
+		if (!dump_data) {
+			dma_free_coherent(&pdev->dev, size, dump_vaddr,
+					dump_addr);
+			continue;
+		}
+
+		dump_data->addr = dump_addr;
+		dump_data->len = size;
+		dump_entry.id = id;
+		dump_entry.addr = virt_to_phys(dump_data);
+		ret = msm_dump_data_register(MSM_DUMP_TABLE_APPS, &dump_entry);
+		if (ret) {
+			dev_err(&pdev->dev, "Data dump setup failed, id = %d\n",
+				id);
+			dma_free_coherent(&pdev->dev, size, dump_vaddr,
+					dump_addr);
+			devm_kfree(&pdev->dev, dump_data);
+		}
+	}
+	return 0;
+}
+
+static const struct of_device_id mem_dump_match_table[] = {
+	{.compatible = "qcom,mem-dump",},
+	{}
+};
+
+static struct platform_driver mem_dump_driver = {
+	.probe = mem_dump_probe,
+	.driver = {
+		.name = "msm_mem_dump",
+		.owner = THIS_MODULE,
+		.of_match_table = mem_dump_match_table,
+	},
+};
+
+static int __init mem_dump_init(void)
+{
+	return platform_driver_register(&mem_dump_driver);
+}
+
+pure_initcall(mem_dump_init);
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_arb_rpmh.c b/drivers/soc/qcom/msm_bus/msm_bus_arb_rpmh.c
index bf5a526..9d0adbb 100644
--- a/drivers/soc/qcom/msm_bus/msm_bus_arb_rpmh.c
+++ b/drivers/soc/qcom/msm_bus/msm_bus_arb_rpmh.c
@@ -24,8 +24,10 @@
 #define NUM_LNODES	3
 #define MAX_STR_CL	50
 
-#define MSM_BUS_MAS_ALC	144
-#define MSM_BUS_RSC_APPS 8000
+#define MSM_BUS_MAS_ALC			144
+#define MSM_BUS_RSC_APPS		8000
+#define MSM_BUS_RSC_DISP		8001
+#define BCM_TCS_CMD_ACV_APPS		0x8
 
 struct bus_search_type {
 	struct list_head link;
@@ -127,16 +129,14 @@ static void bcm_add_bus_req(struct device *dev)
 		goto exit_bcm_add_bus_req;
 	}
 
-	if (cur_dev->node_info->bcm_req_idx != -1)
-		goto exit_bcm_add_bus_req;
-
 	if (!cur_dev->node_info->num_bcm_devs)
 		goto exit_bcm_add_bus_req;
 
 	for (i = 0; i < cur_dev->node_info->num_bcm_devs; i++) {
+		if (cur_dev->node_info->bcm_req_idx[i] != -1)
+			continue;
 		bcm_dev = to_msm_bus_node(cur_dev->node_info->bcm_devs[i]);
 		max_num_lnodes = bcm_dev->bcmdev->num_bus_devs;
-
 		if (!bcm_dev->num_lnodes) {
 			bcm_dev->lnode_list = devm_kzalloc(dev,
 				sizeof(struct link_node) * max_num_lnodes,
@@ -183,7 +183,7 @@ static void bcm_add_bus_req(struct device *dev)
 
 		lnode->in_use = 1;
 		lnode->bus_dev_id = cur_dev->node_info->id;
-		cur_dev->node_info->bcm_req_idx = lnode_idx;
+		cur_dev->node_info->bcm_req_idx[i] = lnode_idx;
 		memset(lnode->lnode_ib, 0, sizeof(uint64_t) * NUM_CTX);
 		memset(lnode->lnode_ab, 0, sizeof(uint64_t) * NUM_CTX);
 	}
@@ -483,11 +483,35 @@ static int getpath(struct device *src_dev, int dest, const char *cl_name)
 	return first_hop;
 }
 
+static void bcm_update_acv_req(struct msm_bus_node_device_type *cur_rsc,
+				uint64_t max_ab, uint64_t max_ib,
+				uint64_t *vec_a, uint64_t *vec_b,
+				uint32_t *acv, int ctx)
+{
+	uint32_t acv_bmsk = 0;
+	/*
+	 * Base ACV voting on current RSC until mapping is set up in commanddb
+	 * that allows us to vote ACV based on master.
+	 */
+
+	if (cur_rsc->node_info->id == MSM_BUS_RSC_APPS)
+		acv_bmsk = BCM_TCS_CMD_ACV_APPS;
+
+	if (max_ab == 0 && max_ib == 0)
+		*acv = *acv & ~acv_bmsk;
+	else
+		*acv = *acv | acv_bmsk;
+	*vec_a = 0;
+	*vec_b = *acv;
+}
+
 static void bcm_update_bus_req(struct device *dev, int ctx)
 {
 	struct msm_bus_node_device_type *cur_dev = NULL;
 	struct msm_bus_node_device_type *bcm_dev = NULL;
-	int i;
+	struct msm_bus_node_device_type *cur_rsc = NULL;
+
+	int i, j;
 	uint64_t max_ib = 0;
 	uint64_t max_ab = 0;
 	int lnode_idx = 0;
@@ -507,7 +531,7 @@ static void bcm_update_bus_req(struct device *dev, int ctx)
 		if (!bcm_dev)
 			goto exit_bcm_update_bus_req;
 
-		lnode_idx = cur_dev->node_info->bcm_req_idx;
+		lnode_idx = cur_dev->node_info->bcm_req_idx[i];
 		bcm_dev->lnode_list[lnode_idx].lnode_ib[ctx] =
 			msm_bus_div64(cur_dev->node_bw[ctx].max_ib *
 					(uint64_t)bcm_dev->bcmdev->width,
@@ -519,19 +543,19 @@ static void bcm_update_bus_req(struct device *dev, int ctx)
 				cur_dev->node_info->agg_params.buswidth *
 				cur_dev->node_info->agg_params.num_aggports);
 
-		for (i = 0; i < bcm_dev->num_lnodes; i++) {
+		for (j = 0; j < bcm_dev->num_lnodes; j++) {
 			if (ctx == ACTIVE_CTX) {
 				max_ib = max(max_ib,
-				max(bcm_dev->lnode_list[i].lnode_ib[ACTIVE_CTX],
-				bcm_dev->lnode_list[i].lnode_ib[DUAL_CTX]));
+				max(bcm_dev->lnode_list[j].lnode_ib[ACTIVE_CTX],
+				bcm_dev->lnode_list[j].lnode_ib[DUAL_CTX]));
 				max_ab = max(max_ab,
-				bcm_dev->lnode_list[i].lnode_ab[ACTIVE_CTX] +
-				bcm_dev->lnode_list[i].lnode_ab[DUAL_CTX]);
+				bcm_dev->lnode_list[j].lnode_ab[ACTIVE_CTX] +
+				bcm_dev->lnode_list[j].lnode_ab[DUAL_CTX]);
 			} else {
 				max_ib = max(max_ib,
-					bcm_dev->lnode_list[i].lnode_ib[ctx]);
+					bcm_dev->lnode_list[j].lnode_ib[ctx]);
 				max_ab = max(max_ab,
-					bcm_dev->lnode_list[i].lnode_ab[ctx]);
+					bcm_dev->lnode_list[j].lnode_ab[ctx]);
 			}
 		}
 		bcm_dev->node_bw[ctx].max_ab = max_ab;
@@ -540,8 +564,18 @@ static void bcm_update_bus_req(struct device *dev, int ctx)
 		max_ab = msm_bus_div64(max_ab, bcm_dev->bcmdev->unit_size);
 		max_ib = msm_bus_div64(max_ib, bcm_dev->bcmdev->unit_size);
 
-		bcm_dev->node_vec[ctx].vec_a = max_ab;
-		bcm_dev->node_vec[ctx].vec_b = max_ib;
+		if (bcm_dev->node_info->id == MSM_BUS_BCM_ACV) {
+			cur_rsc = to_msm_bus_node(bcm_dev->node_info->
+						rsc_devs[0]);
+			bcm_update_acv_req(cur_rsc, max_ab, max_ib,
+					&bcm_dev->node_vec[ctx].vec_a,
+					&bcm_dev->node_vec[ctx].vec_b,
+					&cur_rsc->rscdev->acv[ctx], ctx);
+
+		} else {
+			bcm_dev->node_vec[ctx].vec_a = max_ab;
+			bcm_dev->node_vec[ctx].vec_b = max_ib;
+		}
 	}
 exit_bcm_update_bus_req:
 	return;
@@ -551,7 +585,8 @@ static void bcm_query_bus_req(struct device *dev, int ctx)
 {
 	struct msm_bus_node_device_type *cur_dev = NULL;
 	struct msm_bus_node_device_type *bcm_dev = NULL;
-	int i;
+	struct msm_bus_node_device_type *cur_rsc = NULL;
+	int i, j;
 	uint64_t max_query_ib = 0;
 	uint64_t max_query_ab = 0;
 	int lnode_idx = 0;
@@ -571,7 +606,7 @@ static void bcm_query_bus_req(struct device *dev, int ctx)
 		if (!bcm_dev)
 			goto exit_bcm_query_bus_req;
 
-		lnode_idx = cur_dev->node_info->bcm_req_idx;
+		lnode_idx = cur_dev->node_info->bcm_req_idx[i];
 		bcm_dev->lnode_list[lnode_idx].lnode_query_ib[ctx] =
 			msm_bus_div64(cur_dev->node_bw[ctx].max_query_ib *
 					(uint64_t)bcm_dev->bcmdev->width,
@@ -583,25 +618,25 @@ static void bcm_query_bus_req(struct device *dev, int ctx)
 				cur_dev->node_info->agg_params.num_aggports *
 				cur_dev->node_info->agg_params.buswidth);
 
-		for (i = 0; i < bcm_dev->num_lnodes; i++) {
+		for (j = 0; j < bcm_dev->num_lnodes; j++) {
 			if (ctx == ACTIVE_CTX) {
 				max_query_ib = max(max_query_ib,
-				max(bcm_dev->lnode_list[i].
+				max(bcm_dev->lnode_list[j].
 					lnode_query_ib[ACTIVE_CTX],
-				bcm_dev->lnode_list[i].
+				bcm_dev->lnode_list[j].
 					lnode_query_ib[DUAL_CTX]));
 
 				max_query_ab = max(max_query_ab,
-				bcm_dev->lnode_list[i].
+				bcm_dev->lnode_list[j].
 						lnode_query_ab[ACTIVE_CTX] +
-				bcm_dev->lnode_list[i].
+				bcm_dev->lnode_list[j].
 						lnode_query_ab[DUAL_CTX]);
 			} else {
 				max_query_ib = max(max_query_ib,
-					bcm_dev->lnode_list[i].
+					bcm_dev->lnode_list[j].
 						lnode_query_ib[ctx]);
 				max_query_ab = max(max_query_ab,
-					bcm_dev->lnode_list[i].
+					bcm_dev->lnode_list[j].
 						lnode_query_ab[ctx]);
 			}
 		}
@@ -611,6 +646,18 @@ static void bcm_query_bus_req(struct device *dev, int ctx)
 		max_query_ib = msm_bus_div64(max_query_ib,
 						bcm_dev->bcmdev->unit_size);
 
+		if (bcm_dev->node_info->id == MSM_BUS_BCM_ACV) {
+			cur_rsc = to_msm_bus_node(bcm_dev->node_info->
+						rsc_devs[0]);
+			bcm_update_acv_req(cur_rsc, max_query_ab, max_query_ib,
+					&bcm_dev->node_vec[ctx].query_vec_a,
+					&bcm_dev->node_vec[ctx].query_vec_b,
+					&cur_rsc->rscdev->query_acv[ctx], ctx);
+		} else {
+			bcm_dev->node_vec[ctx].query_vec_a = max_query_ab;
+			bcm_dev->node_vec[ctx].query_vec_b = max_query_ib;
+		}
+
 		bcm_dev->node_bw[ctx].max_query_ab = max_query_ab;
 		bcm_dev->node_bw[ctx].max_query_ib = max_query_ib;
 	}
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_fabric_rpmh.c b/drivers/soc/qcom/msm_bus/msm_bus_fabric_rpmh.c
index c950367..458cf0d 100644
--- a/drivers/soc/qcom/msm_bus/msm_bus_fabric_rpmh.c
+++ b/drivers/soc/qcom/msm_bus/msm_bus_fabric_rpmh.c
@@ -418,8 +418,8 @@ static int tcs_cmd_query_list_gen(struct tcs_cmd *cmdlist_active)
 				commit = true;
 			}
 			tcs_cmd_gen(cur_bcm, &cmdlist_active[k],
-				cur_bcm->node_bw[ACTIVE_CTX].max_query_ib,
-				cur_bcm->node_bw[ACTIVE_CTX].max_query_ab,
+				cur_bcm->node_vec[ACTIVE_CTX].query_vec_a,
+				cur_bcm->node_vec[ACTIVE_CTX].query_vec_b,
 								commit);
 			k++;
 		}
@@ -433,26 +433,30 @@ static int bcm_clist_add(struct msm_bus_node_device_type *cur_dev)
 {
 	int ret = 0;
 	int cur_vcd = 0;
+	int i = 0;
 	struct msm_bus_node_device_type *cur_bcm = NULL;
 
 	if (!cur_dev->node_info->num_bcm_devs)
 		goto exit_bcm_clist_add;
 
-	cur_bcm = to_msm_bus_node(cur_dev->node_info->bcm_devs[0]);
-	cur_vcd = cur_bcm->bcmdev->clk_domain;
+	for (i = 0; i < cur_dev->node_info->num_bcm_devs; i++) {
+		cur_bcm = to_msm_bus_node(cur_dev->node_info->bcm_devs[i]);
+		cur_vcd = cur_bcm->bcmdev->clk_domain;
 
-	if (!cur_bcm->node_info->num_rsc_devs)
-		goto exit_bcm_clist_add;
+		if (!cur_bcm->node_info->num_rsc_devs)
+			goto exit_bcm_clist_add;
 
-	if (!cur_rsc)
-		cur_rsc = to_msm_bus_node(cur_bcm->node_info->rsc_devs[0]);
+		if (!cur_rsc)
+			cur_rsc = to_msm_bus_node(cur_bcm->node_info->
+								rsc_devs[0]);
 
-	if (!cur_bcm->dirty) {
-		list_add_tail(&cur_bcm->link,
+		if (!cur_bcm->dirty) {
+			list_add_tail(&cur_bcm->link,
 					&cur_rsc->rscdev->bcm_clist[cur_vcd]);
-		cur_bcm->dirty = true;
+			cur_bcm->dirty = true;
+		}
+		cur_bcm->updated = false;
 	}
-	cur_bcm->updated = false;
 
 exit_bcm_clist_add:
 	return ret;
@@ -462,17 +466,20 @@ static int bcm_query_list_add(struct msm_bus_node_device_type *cur_dev)
 {
 	int ret = 0;
 	int cur_vcd = 0;
+	int i = 0;
 	struct msm_bus_node_device_type *cur_bcm = NULL;
 
 	if (!cur_dev->node_info->num_bcm_devs)
 		goto exit_bcm_query_list_add;
 
-	cur_bcm = to_msm_bus_node(cur_dev->node_info->bcm_devs[0]);
-	cur_vcd = cur_bcm->bcmdev->clk_domain;
+	for (i = 0; i < cur_dev->node_info->num_bcm_devs; i++) {
+		cur_bcm = to_msm_bus_node(cur_dev->node_info->bcm_devs[i]);
+		cur_vcd = cur_bcm->bcmdev->clk_domain;
 
-	if (!cur_bcm->query_dirty)
-		list_add_tail(&cur_bcm->query_link,
+		if (!cur_bcm->query_dirty)
+			list_add_tail(&cur_bcm->query_link,
 					&bcm_query_list_inorder[cur_vcd]);
+	}
 
 exit_bcm_query_list_add:
 	return ret;
@@ -481,20 +488,23 @@ static int bcm_query_list_add(struct msm_bus_node_device_type *cur_dev)
 static int bcm_clist_clean(struct msm_bus_node_device_type *cur_dev)
 {
 	int ret = 0;
+	int i = 0;
 	struct msm_bus_node_device_type *cur_bcm = NULL;
 
 	if (!cur_dev->node_info->num_bcm_devs)
 		goto exit_bcm_clist_clean;
 
-	cur_bcm = to_msm_bus_node(cur_dev->node_info->bcm_devs[0]);
+	for (i = 0; i < cur_dev->node_info->num_bcm_devs; i++) {
+		cur_bcm = to_msm_bus_node(cur_dev->node_info->bcm_devs[i]);
 
-	if (cur_bcm->node_vec[DUAL_CTX].vec_a == 0 &&
+		if (cur_bcm->node_vec[DUAL_CTX].vec_a == 0 &&
 			cur_bcm->node_vec[ACTIVE_CTX].vec_a == 0 &&
 			cur_bcm->node_vec[DUAL_CTX].vec_b == 0 &&
 			cur_bcm->node_vec[ACTIVE_CTX].vec_b == 0 &&
 			init_time == false) {
-		cur_bcm->dirty = false;
-		list_del_init(&cur_bcm->link);
+			cur_bcm->dirty = false;
+			list_del_init(&cur_bcm->link);
+		}
 	}
 
 exit_bcm_clist_clean:
@@ -504,15 +514,18 @@ static int bcm_clist_clean(struct msm_bus_node_device_type *cur_dev)
 static int bcm_query_list_clean(struct msm_bus_node_device_type *cur_dev)
 {
 	int ret = 0;
+	int i = 0;
 	struct msm_bus_node_device_type *cur_bcm = NULL;
 
 	if (!cur_dev->node_info->num_bcm_devs)
 		goto exit_bcm_clist_add;
 
-	cur_bcm = to_msm_bus_node(cur_dev->node_info->bcm_devs[0]);
+	for (i = 0; i < cur_dev->node_info->num_bcm_devs; i++) {
+		cur_bcm = to_msm_bus_node(cur_dev->node_info->bcm_devs[i]);
 
-	cur_bcm->query_dirty = false;
-	list_del_init(&cur_bcm->query_link);
+		cur_bcm->query_dirty = false;
+		list_del_init(&cur_bcm->query_link);
+	}
 
 exit_bcm_clist_add:
 	return ret;
@@ -1081,7 +1094,7 @@ static int msm_bus_init_clk(struct device *bus_dev,
 static int msm_bus_copy_node_info(struct msm_bus_node_device_type *pdata,
 				struct device *bus_dev)
 {
-	int ret = 0;
+	int ret = 0, i = 0;
 	struct msm_bus_node_info_type *node_info = NULL;
 	struct msm_bus_node_info_type *pdata_node_info = NULL;
 	struct msm_bus_node_device_type *bus_node = NULL;
@@ -1100,7 +1113,17 @@ static int msm_bus_copy_node_info(struct msm_bus_node_device_type *pdata,
 
 	node_info->name = pdata_node_info->name;
 	node_info->id =  pdata_node_info->id;
-	node_info->bcm_req_idx = -1;
+	node_info->bcm_req_idx = devm_kzalloc(bus_dev,
+			sizeof(int) * pdata_node_info->num_bcm_devs,
+			GFP_KERNEL);
+	if (!node_info->bcm_req_idx) {
+		ret = -ENOMEM;
+		goto exit_copy_node_info;
+	}
+
+	for (i = 0; i < pdata_node_info->num_bcm_devs; i++)
+		node_info->bcm_req_idx[i] = -1;
+
 	node_info->bus_device_id = pdata_node_info->bus_device_id;
 	node_info->mas_rpm_id = pdata_node_info->mas_rpm_id;
 	node_info->slv_rpm_id = pdata_node_info->slv_rpm_id;
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_rpmh.h b/drivers/soc/qcom/msm_bus/msm_bus_rpmh.h
index fad7afa..f7f17c3 100644
--- a/drivers/soc/qcom/msm_bus/msm_bus_rpmh.h
+++ b/drivers/soc/qcom/msm_bus/msm_bus_rpmh.h
@@ -71,12 +71,16 @@ struct nodebw {
 struct nodevector {
 	uint64_t vec_a;
 	uint64_t vec_b;
+	uint64_t query_vec_a;
+	uint64_t query_vec_b;
 };
 
 struct msm_bus_rsc_device_type {
 	struct rpmh_client *mbox;
 	struct list_head bcm_clist[VCD_MAX_CNT];
 	int req_state;
+	uint32_t acv[NUM_CTX];
+	uint32_t query_acv[NUM_CTX];
 };
 
 struct msm_bus_bcm_device_type {
@@ -157,7 +161,7 @@ struct msm_bus_node_info_type {
 	struct device **black_connections;
 	struct device **bcm_devs;
 	struct device **rsc_devs;
-	int bcm_req_idx;
+	int *bcm_req_idx;
 	unsigned int bus_device_id;
 	struct device *bus_device;
 	struct rule_update_path_info rule;
diff --git a/drivers/soc/qcom/peripheral-loader.c b/drivers/soc/qcom/peripheral-loader.c
index 11e1b4d..1f28712 100644
--- a/drivers/soc/qcom/peripheral-loader.c
+++ b/drivers/soc/qcom/peripheral-loader.c
@@ -464,6 +464,8 @@ static int pil_alloc_region(struct pil_priv *priv, phys_addr_t min_addr,
 	if (region == NULL) {
 		pil_err(priv->desc, "Failed to allocate relocatable region of size %zx\n",
 					size);
+		priv->region_start = 0;
+		priv->region_end = 0;
 		return -ENOMEM;
 	}
 
diff --git a/drivers/soc/qcom/qbt1000.c b/drivers/soc/qcom/qbt1000.c
new file mode 100644
index 0000000..67a5e05
--- /dev/null
+++ b/drivers/soc/qcom/qbt1000.c
@@ -0,0 +1,1207 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#define DEBUG
+#define pr_fmt(fmt) "qbt1000:%s: " fmt, __func__
+
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/uaccess.h>
+#include <linux/platform_device.h>
+#include <linux/debugfs.h>
+#include <linux/types.h>
+#include <linux/cdev.h>
+#include <linux/clk.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/workqueue.h>
+#include <linux/pm.h>
+#include <linux/of.h>
+#include <linux/mutex.h>
+#include <linux/atomic.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/input.h>
+#include <linux/kfifo.h>
+#include <linux/poll.h>
+#include <uapi/linux/qbt1000.h>
+#include <soc/qcom/scm.h>
+#include "../../misc/qseecom_kernel.h"
+
+#define QBT1000_DEV "qbt1000"
+#define QBT1000_IN_DEV_NAME "qbt1000_key_input"
+#define QBT1000_IN_DEV_VERSION 0x0100
+#define MAX_FW_EVENTS 128
+#define FP_APP_CMD_RX_IPC 132
+#define FW_MAX_IPC_MSG_DATA_SIZE 0x500
+#define IPC_MSG_ID_CBGE_REQUIRED 29
+
+/*
+ * shared buffer size - init with max value,
+ * user space will provide new value upon tz app load
+ */
+static uint32_t g_app_buf_size = SZ_256K;
+static char const *const FP_APP_NAME = "fingerpr";
+
+struct finger_detect_gpio {
+	int gpio;
+	int active_low;
+	int irq;
+	struct work_struct work;
+	unsigned int key_code;
+	int power_key_enabled;
+	int last_gpio_state;
+	int event_reported;
+};
+
+struct fw_event_desc {
+	enum qbt1000_fw_event ev;
+};
+
+struct fw_ipc_info {
+	int gpio;
+	int irq;
+};
+
+struct qbt1000_drvdata {
+	struct class	*qbt1000_class;
+	struct cdev	qbt1000_cdev;
+	struct device	*dev;
+	char		*qbt1000_node;
+	struct clk	**clocks;
+	unsigned int	clock_count;
+	uint8_t		clock_state;
+	unsigned int	root_clk_idx;
+	unsigned int	frequency;
+	atomic_t	available;
+	struct mutex	mutex;
+	struct mutex	fw_events_mutex;
+	struct input_dev	*in_dev;
+	struct fw_ipc_info	fw_ipc;
+	struct finger_detect_gpio	fd_gpio;
+	DECLARE_KFIFO(fw_events, struct fw_event_desc, MAX_FW_EVENTS);
+	wait_queue_head_t read_wait_queue;
+	struct qseecom_handle *app_handle;
+	struct qseecom_handle *fp_app_handle;
+};
+
+/*
+ * struct fw_ipc_cmd -
+ *      used to store IPC commands to/from firmware
+ * @status - indicates whether sending/getting the IPC message was successful
+ * @msg_type - the type of IPC message
+ * @msg_len - the length of the message data
+ * @resp_needed - whether a response is needed for this message
+ * @msg_data - any extra data associated with the message
+ */
+struct fw_ipc_cmd {
+	uint32_t status;
+	uint32_t numMsgs;
+	uint8_t msg_data[FW_MAX_IPC_MSG_DATA_SIZE];
+};
+
+struct fw_ipc_header {
+	uint32_t msg_type;
+	uint32_t msg_len;
+	uint32_t resp_needed;
+};
+
+/*
+ * struct ipc_msg_type_to_fw_event -
+ *      entry in mapping between an IPC message type to a firmware event
+ * @msg_type - IPC message type, as reported by firmware
+ * @fw_event - corresponding firmware event code to report to driver client
+ */
+struct ipc_msg_type_to_fw_event {
+	uint32_t msg_type;
+	enum qbt1000_fw_event fw_event;
+};
+
+/* mapping between firmware IPC message types to HLOS firmware events */
+struct ipc_msg_type_to_fw_event g_msg_to_event[] = {
+		{IPC_MSG_ID_CBGE_REQUIRED, FW_EVENT_CBGE_REQUIRED}
+};
+
+/**
+ * get_cmd_rsp_buffers() - Function sets cmd & rsp buffer pointers and
+ *                         aligns buffer lengths
+ * @hdl:	index of qseecom_handle
+ * @cmd:	req buffer - set to qseecom_handle.sbuf
+ * @cmd_len:	ptr to req buffer len
+ * @rsp:	rsp buffer - set to qseecom_handle.sbuf + offset
+ * @rsp_len:	ptr to rsp buffer len
+ *
+ * Return: 0 on success. Error code on failure.
+ */
+static int get_cmd_rsp_buffers(struct qseecom_handle *hdl,
+	void **cmd,
+	uint32_t *cmd_len,
+	void **rsp,
+	uint32_t *rsp_len)
+{
+	/* 64 bytes alignment for QSEECOM */
+	*cmd_len = ALIGN(*cmd_len, 64);
+	*rsp_len = ALIGN(*rsp_len, 64);
+
+	if (((uint64_t)*rsp_len + (uint64_t)*cmd_len)
+			> (uint64_t)g_app_buf_size) {
+		pr_err("buffer too small to hold cmd=%d and rsp=%d\n",
+			*cmd_len, *rsp_len);
+		return -ENOMEM;
+	}
+
+	*cmd = hdl->sbuf;
+	*rsp = hdl->sbuf + *cmd_len;
+	return 0;
+}
+
+/**
+ * send_tz_cmd() - Function sends a command to TZ
+ *
+ * @drvdata: pointer to driver data
+ * @app_handle: handle to tz app
+ * @is_user_space: 1 if the cmd buffer is in user space, 0
+ *          otherwise
+ * @cmd: command buffer to send
+ * @cmd_len: length of the command buffer
+ * @rsp: output, will be set to location of response buffer
+ * @rsp_len: max size of response
+ *
+ * Return: 0 on success.
+ */
+static int send_tz_cmd(struct qbt1000_drvdata *drvdata,
+	struct qseecom_handle *app_handle,
+	int is_user_space,
+	void *cmd, uint32_t cmd_len,
+	void **rsp, uint32_t rsp_len)
+{
+	int rc = 0;
+	void *aligned_cmd;
+	void *aligned_rsp;
+	uint32_t aligned_cmd_len;
+	uint32_t aligned_rsp_len;
+
+	/* init command and response buffers and align lengths */
+	aligned_cmd_len = cmd_len;
+	aligned_rsp_len = rsp_len;
+
+	rc = get_cmd_rsp_buffers(app_handle,
+		(void **)&aligned_cmd,
+		&aligned_cmd_len,
+		(void **)&aligned_rsp,
+		&aligned_rsp_len);
+
+	if (rc != 0)
+		goto end;
+
+	if (!aligned_cmd) {
+		dev_err(drvdata->dev, "%s: Null command buffer\n",
+			__func__);
+		rc = -EINVAL;
+		goto end;
+	}
+
+	if (aligned_cmd - cmd + cmd_len > g_app_buf_size) {
+		rc = -ENOMEM;
+		goto end;
+	}
+
+	if (is_user_space) {
+		rc = copy_from_user(aligned_cmd, (void __user *)cmd,
+				cmd_len);
+		if (rc != 0) {
+			pr_err("failure to copy user space buf %d\n", rc);
+			rc = -EFAULT;
+			goto end;
+		}
+	} else
+		memcpy(aligned_cmd, cmd, cmd_len);
+
+	/* send cmd to TZ */
+	rc = qseecom_send_command(app_handle,
+		aligned_cmd,
+		aligned_cmd_len,
+		aligned_rsp,
+		aligned_rsp_len);
+
+	if (rc != 0) {
+		pr_err("failure to send tz cmd %d\n", rc);
+		goto end;
+	}
+
+	*rsp = aligned_rsp;
+
+end:
+	return rc;
+}
+
+/**
+ * qbt1000_open() - Function called when user space opens device.
+ * Successful if driver not currently open.
+ * @inode:	ptr to inode object
+ * @file:	ptr to file object
+ *
+ * Return: 0 on success. Error code on failure.
+ */
+static int qbt1000_open(struct inode *inode, struct file *file)
+{
+	int rc = 0;
+
+	struct qbt1000_drvdata *drvdata = container_of(inode->i_cdev,
+						   struct qbt1000_drvdata,
+						   qbt1000_cdev);
+	file->private_data = drvdata;
+
+	pr_debug("qbt1000_open begin\n");
+	/* disallowing concurrent opens */
+	if (!atomic_dec_and_test(&drvdata->available)) {
+		atomic_inc(&drvdata->available);
+		rc = -EBUSY;
+	}
+
+	pr_debug("qbt1000_open end : %d\n", rc);
+	return rc;
+}
+
+/**
+ * qbt1000_release() - Function called when user space closes device.
+
+ * @inode:	ptr to inode object
+ * @file:	ptr to file object
+ *
+ * Return: 0 on success. Error code on failure.
+ */
+static int qbt1000_release(struct inode *inode, struct file *file)
+{
+	struct qbt1000_drvdata *drvdata;
+
+	if (!file || !file->private_data) {
+		pr_err("qbt1000_release: NULL pointer passed");
+		return -EINVAL;
+	}
+	drvdata = file->private_data;
+	atomic_inc(&drvdata->available);
+	return 0;
+}
+
+/**
+ * qbt1000_ioctl() - Function called when user space calls ioctl.
+ * @file:	struct file - not used
+ * @cmd:	cmd identifier:QBT1000_LOAD_APP,QBT1000_UNLOAD_APP,
+ *              QBT1000_SEND_TZCMD
+ * @arg:	ptr to relevant structe: either qbt1000_app or
+ *              qbt1000_send_tz_cmd depending on which cmd is passed
+ *
+ * Return: 0 on success. Error code on failure.
+ */
+static long qbt1000_ioctl(
+		struct file *file, unsigned int cmd, unsigned long arg)
+{
+	int rc = 0;
+	void __user *priv_arg = (void __user *)arg;
+	struct qbt1000_drvdata *drvdata;
+
+	if (!file || !file->private_data) {
+		pr_err("qbt1000_ioctl: NULL pointer passed");
+		return -EINVAL;
+	}
+
+	drvdata = file->private_data;
+
+	mutex_lock(&drvdata->mutex);
+
+	pr_debug("qbt1000_ioctl %d\n", cmd);
+
+	switch (cmd) {
+	case QBT1000_LOAD_APP:
+	{
+		struct qbt1000_app app;
+		struct qseecom_handle *app_handle;
+
+		if (copy_from_user(&app, priv_arg,
+			sizeof(app)) != 0) {
+			rc = -EFAULT;
+			pr_err("failed copy from user space-LOAD\n");
+			goto end;
+		}
+
+		if (!app.app_handle) {
+			dev_err(drvdata->dev, "%s: LOAD app_handle is null\n",
+				__func__);
+			rc = -EINVAL;
+			goto end;
+		}
+
+		if (drvdata->app_handle) {
+			dev_err(drvdata->dev, "%s: LOAD app already loaded, unloading first\n",
+				__func__);
+			drvdata->fp_app_handle = 0;
+			rc = qseecom_shutdown_app(&drvdata->app_handle);
+			if (rc != 0) {
+				dev_err(drvdata->dev, "%s: LOAD current app failed to shutdown\n",
+					  __func__);
+				goto end;
+			}
+		}
+
+		pr_debug("app %s load before\n", app.name);
+
+		/* start the TZ app */
+		rc = qseecom_start_app(
+				&drvdata->app_handle, app.name, app.size);
+		if (rc == 0) {
+			g_app_buf_size = app.size;
+			rc = qseecom_set_bandwidth(drvdata->app_handle,
+				app.high_band_width == 1 ? true : false);
+			if (rc != 0) {
+				/* log error, allow to continue */
+				pr_err("App %s failed to set bw\n", app.name);
+			}
+		} else {
+			pr_err("app %s failed to load\n", app.name);
+			goto end;
+		}
+
+		/* copy a fake app handle to user */
+		app_handle = drvdata->app_handle ?
+				(struct qseecom_handle *)123456 : 0;
+		rc = copy_to_user((void __user *)app.app_handle, &app_handle,
+			sizeof(*app.app_handle));
+
+		if (rc != 0) {
+			dev_err(drvdata->dev,
+				"%s: Failed copy 2us LOAD rc:%d\n",
+				 __func__, rc);
+			rc = -ENOMEM;
+			goto end;
+		}
+
+		pr_debug("app %s load after\n", app.name);
+
+		if (!strcmp(app.name, FP_APP_NAME))
+			drvdata->fp_app_handle = drvdata->app_handle;
+
+		break;
+	}
+	case QBT1000_UNLOAD_APP:
+	{
+		struct qbt1000_app app;
+		struct qseecom_handle *app_handle = 0;
+
+		if (copy_from_user(&app, priv_arg,
+			sizeof(app)) != 0) {
+			rc = -ENOMEM;
+			pr_err("failed copy from user space-UNLOAD\n");
+			goto end;
+		}
+
+		if (!app.app_handle) {
+			dev_err(drvdata->dev, "%s: UNLOAD app_handle is null\n",
+				__func__);
+			rc = -EINVAL;
+			goto end;
+		}
+
+		rc = copy_from_user(&app_handle, app.app_handle,
+			sizeof(app_handle));
+
+		if (rc != 0) {
+			dev_err(drvdata->dev,
+				"%s: Failed copy from user space-UNLOAD handle rc:%d\n",
+				 __func__, rc);
+			rc = -ENOMEM;
+			goto end;
+		}
+
+		/* if the app hasn't been loaded already, return err */
+		if (!drvdata->app_handle) {
+			pr_err("app not loaded\n");
+			rc = -EINVAL;
+			goto end;
+		}
+
+		if (drvdata->fp_app_handle == drvdata->app_handle)
+			drvdata->fp_app_handle = 0;
+
+		/* set bw & shutdown the TZ app */
+		qseecom_set_bandwidth(drvdata->app_handle,
+			app.high_band_width == 1 ? true : false);
+		rc = qseecom_shutdown_app(&drvdata->app_handle);
+		if (rc != 0) {
+			pr_err("app failed to shutdown\n");
+			goto end;
+		}
+
+		/* copy the app handle (should be null) to user */
+		rc = copy_to_user((void __user *)app.app_handle, &app_handle,
+			sizeof(*app.app_handle));
+
+		if (rc != 0) {
+			dev_err(drvdata->dev,
+				"%s: Failed copy 2us UNLOAD rc:%d\n",
+				 __func__, rc);
+			rc = -ENOMEM;
+			goto end;
+		}
+
+		break;
+	}
+	case QBT1000_SEND_TZCMD:
+	{
+		struct qbt1000_send_tz_cmd tzcmd;
+		void *rsp_buf;
+
+		if (copy_from_user(&tzcmd, priv_arg,
+			sizeof(tzcmd))
+				!= 0) {
+			rc = -EFAULT;
+			pr_err("failed copy from user space %d\n", rc);
+			goto end;
+		}
+
+		if (tzcmd.req_buf_len > g_app_buf_size ||
+			tzcmd.rsp_buf_len > g_app_buf_size) {
+			rc = -ENOMEM;
+			pr_err("invalid cmd buf len, req=%d, rsp=%d\n",
+				tzcmd.req_buf_len, tzcmd.rsp_buf_len);
+			goto end;
+		}
+
+		/* if the app hasn't been loaded already, return err */
+		if (!drvdata->app_handle) {
+			pr_err("app not loaded\n");
+			rc = -EINVAL;
+			goto end;
+		}
+
+		rc = send_tz_cmd(drvdata,
+			drvdata->app_handle, 1,
+			tzcmd.req_buf, tzcmd.req_buf_len,
+			&rsp_buf, tzcmd.rsp_buf_len);
+
+		if (rc < 0) {
+			pr_err("failure sending command to tz\n");
+			goto end;
+		}
+
+		/* copy rsp buf back to user space buffer */
+		rc = copy_to_user((void __user *)tzcmd.rsp_buf,
+			 rsp_buf, tzcmd.rsp_buf_len);
+		if (rc != 0) {
+			pr_err("failed copy 2us rc:%d bytes %d:\n",
+				rc, tzcmd.rsp_buf_len);
+			rc = -EFAULT;
+			goto end;
+		}
+
+		break;
+	}
+	case QBT1000_SET_FINGER_DETECT_KEY:
+	{
+		struct qbt1000_set_finger_detect_key set_fd_key;
+
+		if (copy_from_user(&set_fd_key, priv_arg,
+			sizeof(set_fd_key))
+				!= 0) {
+			rc = -EFAULT;
+			pr_err("failed copy from user space %d\n", rc);
+			goto end;
+		}
+
+		drvdata->fd_gpio.key_code = set_fd_key.key_code;
+
+		break;
+	}
+	case QBT1000_CONFIGURE_POWER_KEY:
+	{
+		struct qbt1000_configure_power_key power_key;
+
+		if (copy_from_user(&power_key, priv_arg,
+			sizeof(power_key))
+				!= 0) {
+			rc = -EFAULT;
+			pr_err("failed copy from user space %d\n", rc);
+			goto end;
+		}
+
+		drvdata->fd_gpio.power_key_enabled = power_key.enable;
+
+		break;
+	}
+	default:
+		pr_err("invalid cmd %d\n", cmd);
+		rc = -ENOIOCTLCMD;
+		goto end;
+	}
+
+end:
+	mutex_unlock(&drvdata->mutex);
+	return rc;
+}
+
+static int get_events_fifo_len_locked(struct qbt1000_drvdata *drvdata)
+{
+	int len;
+
+	mutex_lock(&drvdata->fw_events_mutex);
+	len = kfifo_len(&drvdata->fw_events);
+	mutex_unlock(&drvdata->fw_events_mutex);
+
+	return len;
+}
+
+static ssize_t qbt1000_read(struct file *filp, char __user *ubuf,
+		size_t cnt, loff_t *ppos)
+{
+	struct fw_event_desc fw_event;
+	struct qbt1000_drvdata *drvdata = filp->private_data;
+
+	if (cnt < sizeof(fw_event.ev))
+		return -EINVAL;
+
+	mutex_lock(&drvdata->fw_events_mutex);
+
+	while (kfifo_len(&drvdata->fw_events) == 0) {
+		mutex_unlock(&drvdata->fw_events_mutex);
+
+		if (filp->f_flags & O_NONBLOCK)
+			return -EAGAIN;
+
+		pr_debug("fw_events fifo: empty, waiting\n");
+
+		if (wait_event_interruptible(drvdata->read_wait_queue,
+			  (get_events_fifo_len_locked(drvdata) > 0)))
+			return -ERESTARTSYS;
+
+		mutex_lock(&drvdata->fw_events_mutex);
+	}
+
+	if (!kfifo_get(&drvdata->fw_events, &fw_event)) {
+		pr_debug("fw_events fifo: unexpectedly empty\n");
+
+		mutex_unlock(&drvdata->fw_events_mutex);
+		return -EINVAL;
+	}
+
+	mutex_unlock(&drvdata->fw_events_mutex);
+
+	pr_debug("fw_event: %d\n", (int)fw_event.ev);
+	return copy_to_user(ubuf, &fw_event.ev, sizeof(fw_event.ev));
+}
+
+static unsigned int qbt1000_poll(struct file *filp,
+	struct poll_table_struct *wait)
+{
+	struct qbt1000_drvdata *drvdata = filp->private_data;
+	unsigned int mask = 0;
+
+	poll_wait(filp, &drvdata->read_wait_queue, wait);
+
+	if (kfifo_len(&drvdata->fw_events) > 0)
+		mask |= (POLLIN | POLLRDNORM);
+
+	return mask;
+}
+
+static const struct file_operations qbt1000_fops = {
+	.owner = THIS_MODULE,
+	.unlocked_ioctl = qbt1000_ioctl,
+	.open = qbt1000_open,
+	.release = qbt1000_release,
+	.read = qbt1000_read,
+	.poll = qbt1000_poll
+};
+
+static int qbt1000_dev_register(struct qbt1000_drvdata *drvdata)
+{
+	dev_t dev_no;
+	int ret = 0;
+	size_t node_size;
+	char *node_name = QBT1000_DEV;
+	struct device *dev = drvdata->dev;
+	struct device *device;
+
+	node_size = strlen(node_name) + 1;
+
+	drvdata->qbt1000_node = devm_kzalloc(dev, node_size, GFP_KERNEL);
+	if (!drvdata->qbt1000_node) {
+		ret = -ENOMEM;
+		goto err_alloc;
+	}
+
+	strlcpy(drvdata->qbt1000_node, node_name, node_size);
+
+	ret = alloc_chrdev_region(&dev_no, 0, 1, drvdata->qbt1000_node);
+	if (ret) {
+		pr_err("alloc_chrdev_region failed %d\n", ret);
+		goto err_alloc;
+	}
+
+	cdev_init(&drvdata->qbt1000_cdev, &qbt1000_fops);
+
+	drvdata->qbt1000_cdev.owner = THIS_MODULE;
+	ret = cdev_add(&drvdata->qbt1000_cdev, dev_no, 1);
+	if (ret) {
+		pr_err("cdev_add failed %d\n", ret);
+		goto err_cdev_add;
+	}
+
+	drvdata->qbt1000_class = class_create(THIS_MODULE,
+					   drvdata->qbt1000_node);
+	if (IS_ERR(drvdata->qbt1000_class)) {
+		ret = PTR_ERR(drvdata->qbt1000_class);
+		pr_err("class_create failed %d\n", ret);
+		goto err_class_create;
+	}
+
+	device = device_create(drvdata->qbt1000_class, NULL,
+			       drvdata->qbt1000_cdev.dev, drvdata,
+			       drvdata->qbt1000_node);
+	if (IS_ERR(device)) {
+		ret = PTR_ERR(device);
+		pr_err("device_create failed %d\n", ret);
+		goto err_dev_create;
+	}
+
+	return 0;
+err_dev_create:
+	class_destroy(drvdata->qbt1000_class);
+err_class_create:
+	cdev_del(&drvdata->qbt1000_cdev);
+err_cdev_add:
+	unregister_chrdev_region(drvdata->qbt1000_cdev.dev, 1);
+err_alloc:
+	return ret;
+}
+
+/**
+ * qbt1000_create_input_device() - Function allocates an input
+ * device, configures it for key events and registers it
+ *
+ * @drvdata:	ptr to driver data
+ *
+ * Return: 0 on success. Error code on failure.
+ */
+static int qbt1000_create_input_device(struct qbt1000_drvdata *drvdata)
+{
+	int rc = 0;
+
+	drvdata->in_dev = input_allocate_device();
+	if (drvdata->in_dev == NULL) {
+		dev_err(drvdata->dev, "%s: input_allocate_device() failed\n",
+			__func__);
+		rc = -ENOMEM;
+		goto end;
+	}
+
+	drvdata->in_dev->name = QBT1000_IN_DEV_NAME;
+	drvdata->in_dev->phys = NULL;
+	drvdata->in_dev->id.bustype = BUS_HOST;
+	drvdata->in_dev->id.vendor  = 0x0001;
+	drvdata->in_dev->id.product = 0x0001;
+	drvdata->in_dev->id.version = QBT1000_IN_DEV_VERSION;
+
+	drvdata->in_dev->evbit[0] = BIT_MASK(EV_KEY) |  BIT_MASK(EV_ABS);
+	drvdata->in_dev->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH);
+
+	drvdata->in_dev->keybit[BIT_WORD(KEY_HOMEPAGE)] |=
+		BIT_MASK(KEY_HOMEPAGE);
+	drvdata->in_dev->keybit[BIT_WORD(KEY_CAMERA)] |=
+		BIT_MASK(KEY_CAMERA);
+	drvdata->in_dev->keybit[BIT_WORD(KEY_VOLUMEDOWN)] |=
+		BIT_MASK(KEY_VOLUMEDOWN);
+	drvdata->in_dev->keybit[BIT_WORD(KEY_POWER)] |=
+		BIT_MASK(KEY_POWER);
+
+	input_set_abs_params(drvdata->in_dev, ABS_X,
+			     0,
+			     1000,
+			     0, 0);
+	input_set_abs_params(drvdata->in_dev, ABS_Y,
+			     0,
+			     1000,
+			     0, 0);
+
+	rc = input_register_device(drvdata->in_dev);
+	if (rc) {
+		dev_err(drvdata->dev, "%s: input_reg_dev() failed %d\n",
+			__func__, rc);
+		goto end;
+	}
+
+end:
+	if (rc)
+		input_free_device(drvdata->in_dev);
+	return rc;
+}
+
+static void purge_finger_events(struct qbt1000_drvdata *drvdata)
+{
+	int i, fifo_len;
+	struct fw_event_desc fw_event;
+
+	fifo_len = kfifo_len(&drvdata->fw_events);
+
+	for (i = 0; i < fifo_len; i++) {
+		if (!kfifo_get(&drvdata->fw_events, &fw_event))
+			pr_err("fw events fifo: could not remove oldest item\n");
+		else if (fw_event.ev != FW_EVENT_FINGER_DOWN
+					&& fw_event.ev != FW_EVENT_FINGER_UP)
+			kfifo_put(&drvdata->fw_events, fw_event);
+	}
+}
+
+static void qbt1000_gpio_report_event(struct qbt1000_drvdata *drvdata)
+{
+	int state;
+	struct fw_event_desc fw_event;
+
+	state = (__gpio_get_value(drvdata->fd_gpio.gpio) ? 1 : 0)
+		^ drvdata->fd_gpio.active_low;
+
+	if (drvdata->fd_gpio.event_reported
+		  && state == drvdata->fd_gpio.last_gpio_state)
+		return;
+
+	pr_debug("gpio %d: report state %d\n", drvdata->fd_gpio.gpio, state);
+
+	drvdata->fd_gpio.event_reported = 1;
+	drvdata->fd_gpio.last_gpio_state = state;
+
+	if (drvdata->fd_gpio.key_code) {
+		input_event(drvdata->in_dev, EV_KEY,
+			drvdata->fd_gpio.key_code, !!state);
+		input_sync(drvdata->in_dev);
+	}
+
+	if (state && drvdata->fd_gpio.power_key_enabled) {
+		input_event(drvdata->in_dev, EV_KEY, KEY_POWER, 1);
+		input_sync(drvdata->in_dev);
+		input_event(drvdata->in_dev, EV_KEY, KEY_POWER, 0);
+		input_sync(drvdata->in_dev);
+	}
+
+	fw_event.ev = (state ? FW_EVENT_FINGER_DOWN : FW_EVENT_FINGER_UP);
+
+	mutex_lock(&drvdata->fw_events_mutex);
+
+	if (kfifo_is_full(&drvdata->fw_events)) {
+		struct fw_event_desc dummy_fw_event;
+
+		pr_warn("fw events fifo: full, dropping oldest item\n");
+		if (!kfifo_get(&drvdata->fw_events, &dummy_fw_event))
+			pr_err("fw events fifo: could not remove oldest item\n");
+	}
+
+	purge_finger_events(drvdata);
+
+	if (!kfifo_put(&drvdata->fw_events, fw_event))
+		pr_err("fw events fifo: error adding item\n");
+
+	mutex_unlock(&drvdata->fw_events_mutex);
+	wake_up_interruptible(&drvdata->read_wait_queue);
+}
+
+static void qbt1000_gpio_work_func(struct work_struct *work)
+{
+	struct qbt1000_drvdata *drvdata =
+		container_of(work, struct qbt1000_drvdata, fd_gpio.work);
+
+	qbt1000_gpio_report_event(drvdata);
+
+	pm_relax(drvdata->dev);
+}
+
+static irqreturn_t qbt1000_gpio_isr(int irq, void *dev_id)
+{
+	struct qbt1000_drvdata *drvdata = dev_id;
+
+	if (irq != drvdata->fd_gpio.irq) {
+		pr_warn("invalid irq %d (expected %d)\n",
+			irq, drvdata->fd_gpio.irq);
+		return IRQ_HANDLED;
+	}
+
+	pm_stay_awake(drvdata->dev);
+	schedule_work(&drvdata->fd_gpio.work);
+
+	return IRQ_HANDLED;
+}
+
+/**
+ * qbt1000_ipc_irq_handler() - function processes IPC
+ * interrupts on its own thread
+ * @irq:	the interrupt that occurred
+ * @dev_id: pointer to the qbt1000_drvdata
+ *
+ * Return: IRQ_HANDLED when complete
+ */
+static irqreturn_t qbt1000_ipc_irq_handler(int irq, void *dev_id)
+{
+	uint8_t *msg_buffer;
+	struct fw_ipc_cmd *rx_cmd;
+	struct fw_ipc_header *header;
+	int i, j;
+	uint32_t rxipc = FP_APP_CMD_RX_IPC;
+	struct qbt1000_drvdata *drvdata = (struct qbt1000_drvdata *)dev_id;
+	int rc = 0;
+	uint32_t retry_count = 10;
+
+	pm_stay_awake(drvdata->dev);
+
+	mutex_lock(&drvdata->mutex);
+
+	if (irq != drvdata->fw_ipc.irq) {
+		pr_warn("invalid irq %d (expected %d)\n",
+			irq, drvdata->fw_ipc.irq);
+		goto end;
+	}
+
+	pr_debug("firmware interrupt received (irq %d)\n", irq);
+
+	if (!drvdata->fp_app_handle)
+		goto end;
+
+	while (retry_count > 0) {
+		/*
+		 * send the TZ command to fetch the message from firmware
+		 * TZ will process the message if it can
+		 */
+		rc = send_tz_cmd(drvdata, drvdata->fp_app_handle, 0,
+				&rxipc, sizeof(rxipc),
+				(void *)&rx_cmd, sizeof(*rx_cmd));
+		if (rc < 0) {
+			msleep(50); // sleep for 50ms before retry
+			retry_count -= 1;
+			continue;
+		} else {
+			pr_err("retry_count %d\n", retry_count);
+			break;
+		}
+	}
+
+	if (rc < 0) {
+		pr_err("failure sending tz cmd %d\n", rxipc);
+		goto end;
+	}
+
+	if (rx_cmd->status != 0) {
+		pr_err("tz command failed to complete\n");
+		goto end;
+	}
+
+	msg_buffer = rx_cmd->msg_data;
+
+	for (j = 0; j < rx_cmd->numMsgs; j++) {
+		header = (struct fw_ipc_header *) msg_buffer;
+		/*
+		 * given the IPC message type, search for a corresponding
+		 * event for the driver client. If found, add to the events
+		 * FIFO
+		 */
+		for (i = 0; i < ARRAY_SIZE(g_msg_to_event); i++) {
+			if (g_msg_to_event[i].msg_type == header->msg_type) {
+				enum qbt1000_fw_event ev =
+						g_msg_to_event[i].fw_event;
+				struct fw_event_desc fw_ev_desc;
+
+				mutex_lock(&drvdata->fw_events_mutex);
+				pr_debug("fw events: add %d\n", (int) ev);
+				fw_ev_desc.ev = ev;
+
+				if (!kfifo_put(&drvdata->fw_events, fw_ev_desc))
+					pr_err("fw events: fifo full, drop event %d\n",
+						(int) ev);
+
+				mutex_unlock(&drvdata->fw_events_mutex);
+				break;
+			}
+		}
+		msg_buffer += sizeof(*header) + header->msg_len;
+	}
+	wake_up_interruptible(&drvdata->read_wait_queue);
+end:
+	mutex_unlock(&drvdata->mutex);
+	pm_relax(drvdata->dev);
+	return IRQ_HANDLED;
+}
+
+static int setup_fd_gpio_irq(struct platform_device *pdev,
+	struct qbt1000_drvdata *drvdata)
+{
+	int rc = 0;
+	int irq;
+	const char *desc = "qbt_finger_detect";
+
+	rc = devm_gpio_request_one(&pdev->dev, drvdata->fd_gpio.gpio,
+		GPIOF_IN, desc);
+
+	if (rc < 0) {
+		pr_err("failed to request gpio %d, error %d\n",
+			drvdata->fd_gpio.gpio, rc);
+		goto end;
+	}
+
+	irq = gpio_to_irq(drvdata->fd_gpio.gpio);
+	if (irq < 0) {
+		rc = irq;
+		pr_err("unable to get irq number for gpio %d, error %d\n",
+			drvdata->fd_gpio.gpio, rc);
+		goto end;
+	}
+
+	drvdata->fd_gpio.irq = irq;
+	INIT_WORK(&drvdata->fd_gpio.work, qbt1000_gpio_work_func);
+
+	rc = devm_request_any_context_irq(&pdev->dev, drvdata->fd_gpio.irq,
+		qbt1000_gpio_isr, IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+		desc, drvdata);
+
+	if (rc < 0) {
+		pr_err("unable to claim irq %d; error %d\n",
+			drvdata->fd_gpio.irq, rc);
+		goto end;
+	}
+
+end:
+	return rc;
+}
+
+static int setup_ipc_irq(struct platform_device *pdev,
+	struct qbt1000_drvdata *drvdata)
+{
+	int rc = 0;
+	const char *desc = "qbt_ipc";
+
+	drvdata->fw_ipc.irq = gpio_to_irq(drvdata->fw_ipc.gpio);
+	pr_debug("\nirq %d gpio %d\n",
+			drvdata->fw_ipc.irq, drvdata->fw_ipc.gpio);
+	if (drvdata->fw_ipc.irq < 0) {
+		rc = drvdata->fw_ipc.irq;
+		pr_err("no irq for gpio %d, error=%d\n",
+		  drvdata->fw_ipc.gpio, rc);
+		goto end;
+	}
+
+	rc = devm_gpio_request_one(&pdev->dev, drvdata->fw_ipc.gpio,
+			GPIOF_IN, desc);
+
+	if (rc < 0) {
+		pr_err("failed to request gpio %d, error %d\n",
+			drvdata->fw_ipc.gpio, rc);
+		goto end;
+	}
+
+	rc = devm_request_threaded_irq(&pdev->dev,
+		drvdata->fw_ipc.irq,
+		NULL,
+		qbt1000_ipc_irq_handler,
+		IRQF_ONESHOT | IRQF_TRIGGER_RISING,
+		desc,
+		drvdata);
+
+	if (rc < 0) {
+		pr_err("failed to register for ipc irq %d, rc = %d\n",
+			drvdata->fw_ipc.irq, rc);
+		goto end;
+	}
+
+end:
+	return rc;
+}
+
+/**
+ * qbt1000_read_device_tree() - Function reads device tree
+ * properties into driver data
+ * @pdev:	ptr to platform device object
+ * @drvdata:	ptr to driver data
+ *
+ * Return: 0 on success. Error code on failure.
+ */
+static int qbt1000_read_device_tree(struct platform_device *pdev,
+	struct qbt1000_drvdata *drvdata)
+{
+	int rc = 0;
+	uint32_t rate;
+	int gpio;
+	enum of_gpio_flags flags;
+
+	/* read clock frequency */
+	if (of_property_read_u32(pdev->dev.of_node,
+		"clock-frequency", &rate) == 0) {
+		pr_debug("clk frequency %d\n", rate);
+		drvdata->frequency = rate;
+	}
+
+	/* read IPC gpio */
+	drvdata->fw_ipc.gpio = of_get_named_gpio(pdev->dev.of_node,
+		"qcom,ipc-gpio", 0);
+	if (drvdata->fw_ipc.gpio < 0) {
+		rc = drvdata->fw_ipc.gpio;
+		pr_err("ipc gpio not found, error=%d\n", rc);
+		goto end;
+	}
+
+	/**
+	 * TODO: Need to revisit after adding GPIO in DTSI- read
+	 * finger detect GPIO configuration
+	 */
+
+	gpio = of_get_named_gpio_flags(pdev->dev.of_node,
+				"qcom,finger-detect-gpio", 0, &flags);
+	if (gpio < 0) {
+		pr_err("failed to get gpio flags\n");
+		rc = gpio;
+		goto end;
+	}
+
+	drvdata->fd_gpio.gpio = gpio;
+	drvdata->fd_gpio.active_low = flags & OF_GPIO_ACTIVE_LOW;
+
+end:
+	return rc;
+}
+
+/**
+ * qbt1000_probe() - Function loads hardware config from device tree
+ * @pdev:	ptr to platform device object
+ *
+ * Return: 0 on success. Error code on failure.
+ */
+static int qbt1000_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct qbt1000_drvdata *drvdata;
+	int rc = 0;
+
+	pr_debug("qbt1000_probe begin\n");
+	drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
+	if (!drvdata)
+		return -ENOMEM;
+
+	drvdata->dev = &pdev->dev;
+	platform_set_drvdata(pdev, drvdata);
+
+	rc = qbt1000_read_device_tree(pdev, drvdata);
+	if (rc < 0)
+		goto end;
+
+	atomic_set(&drvdata->available, 1);
+
+	mutex_init(&drvdata->mutex);
+	mutex_init(&drvdata->fw_events_mutex);
+
+	rc = qbt1000_dev_register(drvdata);
+	if (rc < 0)
+		goto end;
+
+	INIT_KFIFO(drvdata->fw_events);
+	init_waitqueue_head(&drvdata->read_wait_queue);
+
+	rc = qbt1000_create_input_device(drvdata);
+	if (rc < 0)
+		goto end;
+
+	rc = setup_fd_gpio_irq(pdev, drvdata);
+	if (rc < 0)
+		goto end;
+
+	rc = setup_ipc_irq(pdev, drvdata);
+	if (rc < 0)
+		goto end;
+
+	rc = device_init_wakeup(&pdev->dev, 1);
+	if (rc < 0)
+		goto end;
+
+end:
+	pr_debug("qbt1000_probe end : %d\n", rc);
+	return rc;
+}
+
+static int qbt1000_remove(struct platform_device *pdev)
+{
+	struct qbt1000_drvdata *drvdata = platform_get_drvdata(pdev);
+
+	input_unregister_device(drvdata->in_dev);
+
+	mutex_destroy(&drvdata->mutex);
+	mutex_destroy(&drvdata->fw_events_mutex);
+
+	device_destroy(drvdata->qbt1000_class, drvdata->qbt1000_cdev.dev);
+	class_destroy(drvdata->qbt1000_class);
+	cdev_del(&drvdata->qbt1000_cdev);
+	unregister_chrdev_region(drvdata->qbt1000_cdev.dev, 1);
+
+	device_init_wakeup(&pdev->dev, 0);
+
+	return 0;
+}
+
+static int qbt1000_suspend(struct platform_device *pdev, pm_message_t state)
+{
+	int rc = 0;
+	struct qbt1000_drvdata *drvdata = platform_get_drvdata(pdev);
+
+	/*
+	 * Returning an error code if driver currently making a TZ call.
+	 * Note: The purpose of this driver is to ensure that the clocks are on
+	 * while making a TZ call. Hence the clock check to determine if the
+	 * driver will allow suspend to occur.
+	 */
+	if (!mutex_trylock(&drvdata->mutex))
+		return -EBUSY;
+
+	if (drvdata->clock_state)
+		rc = -EBUSY;
+	else {
+		enable_irq_wake(drvdata->fd_gpio.irq);
+		enable_irq_wake(drvdata->fw_ipc.irq);
+	}
+
+	mutex_unlock(&drvdata->mutex);
+
+	return rc;
+}
+
+static int qbt1000_resume(struct platform_device *pdev)
+{
+	struct qbt1000_drvdata *drvdata = platform_get_drvdata(pdev);
+
+	disable_irq_wake(drvdata->fd_gpio.irq);
+	disable_irq_wake(drvdata->fw_ipc.irq);
+
+	return 0;
+}
+
+static const struct of_device_id qbt1000_match[] = {
+	{ .compatible = "qcom,qbt1000" },
+	{}
+};
+
+static struct platform_driver qbt1000_plat_driver = {
+	.probe = qbt1000_probe,
+	.remove = qbt1000_remove,
+	.suspend = qbt1000_suspend,
+	.resume = qbt1000_resume,
+	.driver = {
+		.name = "qbt1000",
+		.owner = THIS_MODULE,
+		.of_match_table = qbt1000_match,
+	},
+};
+
+module_platform_driver(qbt1000_plat_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Qualcomm Technologies, Inc. QBT1000 driver");
diff --git a/drivers/soc/qcom/service-locator.c b/drivers/soc/qcom/service-locator.c
index 82718c8..57f38d3 100644
--- a/drivers/soc/qcom/service-locator.c
+++ b/drivers/soc/qcom/service-locator.c
@@ -266,7 +266,6 @@ static int service_locator_send_msg(struct pd_qmi_client_data *pd)
 			pd->total_domains = resp->total_domains;
 			if (!resp->total_domains) {
 				pr_err("No matching domains found\n");
-				rc = -EIO;
 				goto out;
 			}
 
diff --git a/drivers/soc/qcom/service-notifier.c b/drivers/soc/qcom/service-notifier.c
index 62e2384..221ae0c 100644
--- a/drivers/soc/qcom/service-notifier.c
+++ b/drivers/soc/qcom/service-notifier.c
@@ -373,13 +373,6 @@ static void root_service_service_arrive(struct work_struct *work)
 	mutex_unlock(&qmi_client_release_lock);
 	pr_info("Connection established between QMI handle and %d service\n",
 							data->instance_id);
-	/* Register for indication messages about service */
-	rc = qmi_register_ind_cb(data->clnt_handle, root_service_service_ind_cb,
-							(void *)data);
-	if (rc < 0)
-		pr_err("Indication callback register failed(instance-id: %d) rc:%d\n",
-							data->instance_id, rc);
-
 	mutex_lock(&notif_add_lock);
 	mutex_lock(&service_list_lock);
 	list_for_each_entry(service_notif, &service_list, list) {
@@ -402,6 +395,12 @@ static void root_service_service_arrive(struct work_struct *work)
 	}
 	mutex_unlock(&service_list_lock);
 	mutex_unlock(&notif_add_lock);
+	/* Register for indication messages about service */
+	rc = qmi_register_ind_cb(data->clnt_handle,
+		root_service_service_ind_cb, (void *)data);
+	if (rc < 0)
+		pr_err("Indication callback register failed(instance-id: %d) rc:%d\n",
+							data->instance_id, rc);
 }
 
 static void root_service_service_exit(struct qmi_client_info *data,
diff --git a/drivers/soc/qcom/socinfo.c b/drivers/soc/qcom/socinfo.c
index 119ede3..c252040 100644
--- a/drivers/soc/qcom/socinfo.c
+++ b/drivers/soc/qcom/socinfo.c
@@ -36,6 +36,7 @@
 #include <soc/qcom/boot_stats.h>
 
 #define BUILD_ID_LENGTH 32
+#define CHIP_ID_LENGTH 32
 #define SMEM_IMAGE_VERSION_BLOCKS_COUNT 32
 #define SMEM_IMAGE_VERSION_SINGLE_BLOCK_SIZE 128
 #define SMEM_IMAGE_VERSION_SIZE 4096
@@ -199,6 +200,20 @@ struct socinfo_v0_12 {
 struct socinfo_v0_13 {
 	struct socinfo_v0_12 v0_12;
 	uint32_t nproduct_id;
+	char chip_name[CHIP_ID_LENGTH];
+};
+
+struct socinfo_v0_14 {
+	struct socinfo_v0_13 v0_13;
+	uint32_t num_clusters;
+	uint32_t ncluster_array_offset;
+	uint32_t num_defective_parts;
+	uint32_t ndefective_parts_array_offset;
+};
+
+struct socinfo_v0_15 {
+	struct socinfo_v0_14 v0_14;
+	uint32_t nmodem_supported;
 };
 
 static union {
@@ -215,10 +230,12 @@ static union {
 	struct socinfo_v0_11 v0_11;
 	struct socinfo_v0_12 v0_12;
 	struct socinfo_v0_13 v0_13;
+	struct socinfo_v0_14 v0_14;
+	struct socinfo_v0_15 v0_15;
 } *socinfo;
 
 /* max socinfo format version supported */
-#define MAX_SOCINFO_FORMAT SOCINFO_VERSION(0, 13)
+#define MAX_SOCINFO_FORMAT SOCINFO_VERSION(0, 15)
 
 static struct msm_soc_info cpu_of_id[] = {
 
@@ -547,12 +564,12 @@ static struct msm_soc_info cpu_of_id[] = {
 	/* sdm845 ID */
 	[321] = {MSM_CPU_SDM845, "SDM845"},
 
-	/* Bat ID */
-	[328] = {MSM_CPU_SDM830, "SDM830"},
-
 	/* sdxpoorwills ID */
 	[334] = {SDX_CPU_SDXPOORWILLS, "SDXPOORWILLS"},
 
+	/* SDM670 ID */
+	[336] = {MSM_CPU_SDM670, "SDM670"},
+
 	/* Uninitialized IDs are not known to run Linux.
 	 * MSM_CPU_UNKNOWN is set to 0 to ensure these IDs are
 	 * considered as unknown CPU.
@@ -705,6 +722,14 @@ static uint32_t socinfo_get_raw_device_number(void)
 		: 0;
 }
 
+static char *socinfo_get_chip_name(void)
+{
+	return socinfo ?
+		(socinfo_format >= SOCINFO_VERSION(0, 13) ?
+			socinfo->v0_13.chip_name : "N/A")
+		: "N/A";
+}
+
 static uint32_t socinfo_get_nproduct_id(void)
 {
 	return socinfo ?
@@ -713,6 +738,46 @@ static uint32_t socinfo_get_nproduct_id(void)
 		: 0;
 }
 
+static uint32_t socinfo_get_num_clusters(void)
+{
+	return socinfo ?
+		(socinfo_format >= SOCINFO_VERSION(0, 14) ?
+			socinfo->v0_14.num_clusters : 0)
+		: 0;
+}
+
+static uint32_t socinfo_get_ncluster_array_offset(void)
+{
+	return socinfo ?
+		(socinfo_format >= SOCINFO_VERSION(0, 14) ?
+			socinfo->v0_14.ncluster_array_offset : 0)
+		: 0;
+}
+
+static uint32_t socinfo_get_num_defective_parts(void)
+{
+	return socinfo ?
+		(socinfo_format >= SOCINFO_VERSION(0, 14) ?
+			socinfo->v0_14.num_defective_parts : 0)
+		: 0;
+}
+
+static uint32_t socinfo_get_ndefective_parts_array_offset(void)
+{
+	return socinfo ?
+		(socinfo_format >= SOCINFO_VERSION(0, 14) ?
+			socinfo->v0_14.ndefective_parts_array_offset : 0)
+		: 0;
+}
+
+static uint32_t socinfo_get_nmodem_supported(void)
+{
+	return socinfo ?
+		(socinfo_format >= SOCINFO_VERSION(0, 15) ?
+			socinfo->v0_15.nmodem_supported : 0)
+		: 0;
+}
+
 enum pmic_model socinfo_get_pmic_model(void)
 {
 	return socinfo ?
@@ -890,6 +955,15 @@ msm_get_raw_device_number(struct device *dev,
 }
 
 static ssize_t
+msm_get_chip_name(struct device *dev,
+		   struct device_attribute *attr,
+		   char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%-.32s\n",
+			socinfo_get_chip_name());
+}
+
+static ssize_t
 msm_get_nproduct_id(struct device *dev,
 			struct device_attribute *attr,
 			char *buf)
@@ -899,6 +973,51 @@ msm_get_nproduct_id(struct device *dev,
 }
 
 static ssize_t
+msm_get_num_clusters(struct device *dev,
+			struct device_attribute *attr,
+			char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "0x%x\n",
+		socinfo_get_num_clusters());
+}
+
+static ssize_t
+msm_get_ncluster_array_offset(struct device *dev,
+			struct device_attribute *attr,
+			char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "0x%x\n",
+		socinfo_get_ncluster_array_offset());
+}
+
+static ssize_t
+msm_get_num_defective_parts(struct device *dev,
+			struct device_attribute *attr,
+			char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "0x%x\n",
+		socinfo_get_num_defective_parts());
+}
+
+static ssize_t
+msm_get_ndefective_parts_array_offset(struct device *dev,
+			struct device_attribute *attr,
+			char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "0x%x\n",
+		socinfo_get_ndefective_parts_array_offset());
+}
+
+static ssize_t
+msm_get_nmodem_supported(struct device *dev,
+			struct device_attribute *attr,
+			char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "0x%x\n",
+		socinfo_get_nmodem_supported());
+}
+
+static ssize_t
 msm_get_pmic_model(struct device *dev,
 			struct device_attribute *attr,
 			char *buf)
@@ -1146,10 +1265,34 @@ static struct device_attribute msm_soc_attr_raw_device_number =
 	__ATTR(raw_device_number, S_IRUGO,
 			msm_get_raw_device_number, NULL);
 
+static struct device_attribute msm_soc_attr_chip_name =
+	__ATTR(chip_name, 0444,
+			msm_get_chip_name, NULL);
+
 static struct device_attribute msm_soc_attr_nproduct_id =
 	__ATTR(nproduct_id, 0444,
 			msm_get_nproduct_id, NULL);
 
+static struct device_attribute msm_soc_attr_num_clusters =
+	__ATTR(num_clusters, 0444,
+			msm_get_num_clusters, NULL);
+
+static struct device_attribute msm_soc_attr_ncluster_array_offset =
+	__ATTR(ncluster_array_offset, 0444,
+			msm_get_ncluster_array_offset, NULL);
+
+static struct device_attribute msm_soc_attr_num_defective_parts =
+	__ATTR(num_defective_parts, 0444,
+			msm_get_num_defective_parts, NULL);
+
+static struct device_attribute msm_soc_attr_ndefective_parts_array_offset =
+	__ATTR(ndefective_parts_array_offset, 0444,
+			msm_get_ndefective_parts_array_offset, NULL);
+
+static struct device_attribute msm_soc_attr_nmodem_supported =
+	__ATTR(nmodem_supported, 0444,
+			msm_get_nmodem_supported, NULL);
+
 static struct device_attribute msm_soc_attr_pmic_model =
 	__ATTR(pmic_model, S_IRUGO,
 			msm_get_pmic_model, NULL);
@@ -1255,9 +1398,9 @@ static void * __init setup_dummy_socinfo(void)
 		dummy_socinfo.id = 321;
 		strlcpy(dummy_socinfo.build_id, "sdm845 - ",
 			sizeof(dummy_socinfo.build_id));
-	} else if (early_machine_is_sdm830()) {
-		dummy_socinfo.id = 328;
-		strlcpy(dummy_socinfo.build_id, "sdm830 - ",
+	} else if (early_machine_is_sdm670()) {
+		dummy_socinfo.id = 336;
+		strlcpy(dummy_socinfo.build_id, "sdm670 - ",
 			sizeof(dummy_socinfo.build_id));
 	} else if (early_machine_is_sdxpoorwills()) {
 		dummy_socinfo.id = 334;
@@ -1280,9 +1423,23 @@ static void __init populate_soc_sysfs_files(struct device *msm_soc_device)
 	device_create_file(msm_soc_device, &images);
 
 	switch (socinfo_format) {
+	case SOCINFO_VERSION(0, 15):
+		device_create_file(msm_soc_device,
+					&msm_soc_attr_nmodem_supported);
+	case SOCINFO_VERSION(0, 14):
+		device_create_file(msm_soc_device,
+					&msm_soc_attr_num_clusters);
+		device_create_file(msm_soc_device,
+					&msm_soc_attr_ncluster_array_offset);
+		device_create_file(msm_soc_device,
+					&msm_soc_attr_num_defective_parts);
+		device_create_file(msm_soc_device,
+				&msm_soc_attr_ndefective_parts_array_offset);
 	case SOCINFO_VERSION(0, 13):
 		 device_create_file(msm_soc_device,
 					&msm_soc_attr_nproduct_id);
+		 device_create_file(msm_soc_device,
+					&msm_soc_attr_chip_name);
 	case SOCINFO_VERSION(0, 12):
 		device_create_file(msm_soc_device,
 					&msm_soc_attr_chip_family);
@@ -1522,6 +1679,53 @@ static void socinfo_print(void)
 			socinfo->v0_13.nproduct_id);
 		break;
 
+	case SOCINFO_VERSION(0, 14):
+		pr_info("v%u.%u, id=%u, ver=%u.%u, raw_id=%u, raw_ver=%u, hw_plat=%u, hw_plat_ver=%u\n accessory_chip=%u, hw_plat_subtype=%u, pmic_model=%u, pmic_die_revision=%u foundry_id=%u serial_number=%u num_pmics=%u chip_family=0x%x raw_device_family=0x%x raw_device_number=0x%x nproduct_id=0x%x num_clusters=0x%x ncluster_array_offset=0x%x num_defective_parts=0x%x ndefective_parts_array_offset=0x%x\n",
+			f_maj, f_min, socinfo->v0_1.id, v_maj, v_min,
+			socinfo->v0_2.raw_id, socinfo->v0_2.raw_version,
+			socinfo->v0_3.hw_platform,
+			socinfo->v0_4.platform_version,
+			socinfo->v0_5.accessory_chip,
+			socinfo->v0_6.hw_platform_subtype,
+			socinfo->v0_7.pmic_model,
+			socinfo->v0_7.pmic_die_revision,
+			socinfo->v0_9.foundry_id,
+			socinfo->v0_10.serial_number,
+			socinfo->v0_11.num_pmics,
+			socinfo->v0_12.chip_family,
+			socinfo->v0_12.raw_device_family,
+			socinfo->v0_12.raw_device_number,
+			socinfo->v0_13.nproduct_id,
+			socinfo->v0_14.num_clusters,
+			socinfo->v0_14.ncluster_array_offset,
+			socinfo->v0_14.num_defective_parts,
+			socinfo->v0_14.ndefective_parts_array_offset);
+		break;
+
+	case SOCINFO_VERSION(0, 15):
+		pr_info("v%u.%u, id=%u, ver=%u.%u, raw_id=%u, raw_ver=%u, hw_plat=%u, hw_plat_ver=%u\n accessory_chip=%u, hw_plat_subtype=%u, pmic_model=%u, pmic_die_revision=%u foundry_id=%u serial_number=%u num_pmics=%u chip_family=0x%x raw_device_family=0x%x raw_device_number=0x%x nproduct_id=0x%x num_clusters=0x%x ncluster_array_offset=0x%x num_defective_parts=0x%x ndefective_parts_array_offset=0x%x nmodem_supported=0x%x\n",
+			f_maj, f_min, socinfo->v0_1.id, v_maj, v_min,
+			socinfo->v0_2.raw_id, socinfo->v0_2.raw_version,
+			socinfo->v0_3.hw_platform,
+			socinfo->v0_4.platform_version,
+			socinfo->v0_5.accessory_chip,
+			socinfo->v0_6.hw_platform_subtype,
+			socinfo->v0_7.pmic_model,
+			socinfo->v0_7.pmic_die_revision,
+			socinfo->v0_9.foundry_id,
+			socinfo->v0_10.serial_number,
+			socinfo->v0_11.num_pmics,
+			socinfo->v0_12.chip_family,
+			socinfo->v0_12.raw_device_family,
+			socinfo->v0_12.raw_device_number,
+			socinfo->v0_13.nproduct_id,
+			socinfo->v0_14.num_clusters,
+			socinfo->v0_14.ncluster_array_offset,
+			socinfo->v0_14.num_defective_parts,
+			socinfo->v0_14.ndefective_parts_array_offset,
+			socinfo->v0_15.nmodem_supported);
+		break;
+
 	default:
 		pr_err("Unknown format found: v%u.%u\n", f_maj, f_min);
 		break;
diff --git a/drivers/soc/qcom/subsystem_restart.c b/drivers/soc/qcom/subsystem_restart.c
index e7c2bb2..21f3580 100644
--- a/drivers/soc/qcom/subsystem_restart.c
+++ b/drivers/soc/qcom/subsystem_restart.c
@@ -481,17 +481,21 @@ static void send_sysmon_notif(struct subsys_device *dev)
 	mutex_unlock(&subsys_list_lock);
 }
 
-static void for_each_subsys_device(struct subsys_device **list,
+static int for_each_subsys_device(struct subsys_device **list,
 		unsigned int count, void *data,
-		void (*fn)(struct subsys_device *, void *))
+		int (*fn)(struct subsys_device *, void *))
 {
+	int ret;
 	while (count--) {
 		struct subsys_device *dev = *list++;
 
 		if (!dev)
 			continue;
-		fn(dev, data);
+		ret = fn(dev, data);
+		if (ret)
+			return ret;
 	}
+	return 0;
 }
 
 static void notify_each_subsys_device(struct subsys_device **list,
@@ -593,21 +597,31 @@ static int wait_for_err_ready(struct subsys_device *subsys)
 	return 0;
 }
 
-static void subsystem_shutdown(struct subsys_device *dev, void *data)
+static int subsystem_shutdown(struct subsys_device *dev, void *data)
 {
 	const char *name = dev->desc->name;
+	int ret;
 
 	pr_info("[%s:%d]: Shutting down %s\n",
 			current->comm, current->pid, name);
-	if (dev->desc->shutdown(dev->desc, true) < 0)
-		panic("subsys-restart: [%s:%d]: Failed to shutdown %s!",
-			current->comm, current->pid, name);
+	ret = dev->desc->shutdown(dev->desc, true);
+	if (ret < 0) {
+		if (!dev->desc->ignore_ssr_failure) {
+			panic("subsys-restart: [%s:%d]: Failed to shutdown %s!",
+				current->comm, current->pid, name);
+		} else {
+			pr_err("Shutdown failure on %s\n", name);
+			return ret;
+		}
+	}
 	dev->crash_count++;
 	subsys_set_state(dev, SUBSYS_OFFLINE);
 	disable_all_irqs(dev);
+
+	return 0;
 }
 
-static void subsystem_ramdump(struct subsys_device *dev, void *data)
+static int subsystem_ramdump(struct subsys_device *dev, void *data)
 {
 	const char *name = dev->desc->name;
 
@@ -616,15 +630,17 @@ static void subsystem_ramdump(struct subsys_device *dev, void *data)
 			pr_warn("%s[%s:%d]: Ramdump failed.\n",
 				name, current->comm, current->pid);
 	dev->do_ramdump_on_put = false;
+	return 0;
 }
 
-static void subsystem_free_memory(struct subsys_device *dev, void *data)
+static int subsystem_free_memory(struct subsys_device *dev, void *data)
 {
 	if (dev->desc->free_memory)
 		dev->desc->free_memory(dev->desc);
+	return 0;
 }
 
-static void subsystem_powerup(struct subsys_device *dev, void *data)
+static int subsystem_powerup(struct subsys_device *dev, void *data)
 {
 	const char *name = dev->desc->name;
 	int ret;
@@ -632,11 +648,17 @@ static void subsystem_powerup(struct subsys_device *dev, void *data)
 	pr_info("[%s:%d]: Powering up %s\n", current->comm, current->pid, name);
 	init_completion(&dev->err_ready);
 
-	if (dev->desc->powerup(dev->desc) < 0) {
+	ret = dev->desc->powerup(dev->desc);
+	if (ret < 0) {
 		notify_each_subsys_device(&dev, 1, SUBSYS_POWERUP_FAILURE,
 								NULL);
-		panic("[%s:%d]: Powerup error: %s!",
-			current->comm, current->pid, name);
+		if (!dev->desc->ignore_ssr_failure) {
+			panic("[%s:%d]: Powerup error: %s!",
+				current->comm, current->pid, name);
+		} else {
+			pr_err("Powerup failure on %s\n", name);
+			return ret;
+		}
 	}
 	enable_all_irqs(dev);
 
@@ -644,11 +666,16 @@ static void subsystem_powerup(struct subsys_device *dev, void *data)
 	if (ret) {
 		notify_each_subsys_device(&dev, 1, SUBSYS_POWERUP_FAILURE,
 								NULL);
-		panic("[%s:%d]: Timed out waiting for error ready: %s!",
-			current->comm, current->pid, name);
+		if (!dev->desc->ignore_ssr_failure)
+			panic("[%s:%d]: Timed out waiting for error ready: %s!",
+				current->comm, current->pid, name);
+		else
+			return ret;
 	}
 	subsys_set_state(dev, SUBSYS_ONLINE);
 	subsys_set_crash_status(dev, CRASH_STATUS_NO_CRASH);
+
+	return 0;
 }
 
 static int __find_subsys(struct device *dev, void *data)
@@ -910,6 +937,7 @@ static void subsystem_restart_wq_func(struct work_struct *work)
 	struct subsys_tracking *track;
 	unsigned int count;
 	unsigned long flags;
+	int ret;
 
 	/*
 	 * It's OK to not take the registration lock at this point.
@@ -957,7 +985,9 @@ static void subsystem_restart_wq_func(struct work_struct *work)
 	pr_debug("[%s:%d]: Starting restart sequence for %s\n",
 			current->comm, current->pid, desc->name);
 	notify_each_subsys_device(list, count, SUBSYS_BEFORE_SHUTDOWN, NULL);
-	for_each_subsys_device(list, count, NULL, subsystem_shutdown);
+	ret = for_each_subsys_device(list, count, NULL, subsystem_shutdown);
+	if (ret)
+		goto err;
 	notify_each_subsys_device(list, count, SUBSYS_AFTER_SHUTDOWN, NULL);
 
 	notify_each_subsys_device(list, count, SUBSYS_RAMDUMP_NOTIFICATION,
@@ -973,12 +1003,19 @@ static void subsystem_restart_wq_func(struct work_struct *work)
 	for_each_subsys_device(list, count, NULL, subsystem_free_memory);
 
 	notify_each_subsys_device(list, count, SUBSYS_BEFORE_POWERUP, NULL);
-	for_each_subsys_device(list, count, NULL, subsystem_powerup);
+	ret = for_each_subsys_device(list, count, NULL, subsystem_powerup);
+	if (ret)
+		goto err;
 	notify_each_subsys_device(list, count, SUBSYS_AFTER_POWERUP, NULL);
 
 	pr_info("[%s:%d]: Restart sequence for %s completed.\n",
 			current->comm, current->pid, desc->name);
 
+err:
+	/* Reset subsys count */
+	if (ret)
+		dev->count = 0;
+
 	mutex_unlock(&soc_order_reg_lock);
 	mutex_unlock(&track->lock);
 
@@ -1470,6 +1507,9 @@ static int subsys_parse_devicetree(struct subsys_desc *desc)
 			desc->generic_irq = ret;
 	}
 
+	desc->ignore_ssr_failure = of_property_read_bool(pdev->dev.of_node,
+						"qcom,ignore-ssr-failure");
+
 	order = ssr_parse_restart_orders(desc);
 	if (IS_ERR(order)) {
 		pr_err("Could not initialize SSR restart order, err = %ld\n",
diff --git a/drivers/soc/qcom/watchdog_v2.c b/drivers/soc/qcom/watchdog_v2.c
index 7a784aa..8bf5659 100644
--- a/drivers/soc/qcom/watchdog_v2.c
+++ b/drivers/soc/qcom/watchdog_v2.c
@@ -136,6 +136,8 @@ static int msm_watchdog_suspend(struct device *dev)
 		return 0;
 	__raw_writel(1, wdog_dd->base + WDT0_RST);
 	if (wdog_dd->wakeup_irq_enable) {
+		/* Make sure register write is complete before proceeding */
+		mb();
 		wdog_dd->last_pet = sched_clock();
 		return 0;
 	}
@@ -151,8 +153,15 @@ static int msm_watchdog_resume(struct device *dev)
 {
 	struct msm_watchdog_data *wdog_dd =
 			(struct msm_watchdog_data *)dev_get_drvdata(dev);
-	if (!enable || wdog_dd->wakeup_irq_enable)
+	if (!enable)
 		return 0;
+	if (wdog_dd->wakeup_irq_enable) {
+		__raw_writel(1, wdog_dd->base + WDT0_RST);
+		/* Make sure register write is complete before proceeding */
+		mb();
+		wdog_dd->last_pet = sched_clock();
+		return 0;
+	}
 	__raw_writel(1, wdog_dd->base + WDT0_EN);
 	__raw_writel(1, wdog_dd->base + WDT0_RST);
 	/* Make sure watchdog is reset before setting enable */
diff --git a/drivers/spi/spi-geni-qcom.c b/drivers/spi/spi-geni-qcom.c
index 8cc77c1..08eb00a 100644
--- a/drivers/spi/spi-geni-qcom.c
+++ b/drivers/spi/spi-geni-qcom.c
@@ -73,11 +73,11 @@
 #define SPI_CS_DEASSERT		(9)
 #define SPI_SCK_ONLY		(10)
 /* M_CMD params for SPI */
-#define SPI_PRE_CMD_DELAY	(0)
-#define TIMESTAMP_BEFORE	(1)
-#define FRAGMENTATION		(2)
-#define TIMESTAMP_AFTER		(3)
-#define POST_CMD_DELAY		(4)
+#define SPI_PRE_CMD_DELAY	BIT(0)
+#define TIMESTAMP_BEFORE	BIT(1)
+#define FRAGMENTATION		BIT(2)
+#define TIMESTAMP_AFTER		BIT(3)
+#define POST_CMD_DELAY		BIT(4)
 
 #define SPI_CORE2X_VOTE		(10000)
 
@@ -172,15 +172,13 @@ static int spi_geni_prepare_message(struct spi_master *spi_mas,
 	u32 loopback_cfg = geni_read_reg(mas->base, SE_SPI_LOOPBACK);
 	u32 cpol = geni_read_reg(mas->base, SE_SPI_CPOL);
 	u32 cpha = geni_read_reg(mas->base, SE_SPI_CPHA);
-	u32 demux_sel = geni_read_reg(mas->base, SE_SPI_DEMUX_SEL);
-	u32 demux_output_inv =
-			geni_read_reg(mas->base, SE_SPI_DEMUX_OUTPUT_INV);
+	u32 demux_sel = 0;
+	u32 demux_output_inv = 0;
 	int ret = 0;
 
 	loopback_cfg &= ~LOOPBACK_MSK;
 	cpol &= ~CPOL;
 	cpha &= ~CPHA;
-	demux_output_inv &= ~BIT(spi_slv->chip_select);
 
 	if (mode & SPI_LOOP)
 		loopback_cfg |= LOOPBACK_ENABLE;
@@ -194,7 +192,7 @@ static int spi_geni_prepare_message(struct spi_master *spi_mas,
 	if (spi_slv->mode & SPI_CS_HIGH)
 		demux_output_inv |= BIT(spi_slv->chip_select);
 
-	demux_sel |= BIT(spi_slv->chip_select);
+	demux_sel = spi_slv->chip_select;
 	mas->cur_speed_hz = spi_slv->max_speed_hz;
 	mas->cur_word_len = spi_slv->bits_per_word;
 
diff --git a/drivers/spmi/spmi-pmic-arb.c b/drivers/spmi/spmi-pmic-arb.c
index a5bfeab..9cc85ee 100644
--- a/drivers/spmi/spmi-pmic-arb.c
+++ b/drivers/spmi/spmi-pmic-arb.c
@@ -584,6 +584,11 @@ static void __pmic_arb_chained_irq(struct spmi_pmic_arb *pa, bool show)
 			id = ffs(status) - 1;
 			status &= ~BIT(id);
 			apid = id + i * 32;
+			if (apid < pa->min_apid || apid > pa->max_apid) {
+				WARN_ONCE(true, "spurious spmi irq received for apid=%d\n",
+					apid);
+				continue;
+			}
 			enable = readl_relaxed(pa->intr +
 					pa->ver_ops->acc_enable(apid));
 			if (enable & SPMI_PIC_ACC_ENABLE_BIT)
diff --git a/drivers/staging/android/lowmemorykiller.c b/drivers/staging/android/lowmemorykiller.c
index 68f0217..9846c51 100644
--- a/drivers/staging/android/lowmemorykiller.c
+++ b/drivers/staging/android/lowmemorykiller.c
@@ -96,6 +96,7 @@ static unsigned long lowmem_scan(struct shrinker *s, struct shrink_control *sc)
 	int other_free = global_page_state(NR_FREE_PAGES) - totalreserve_pages;
 	int other_file = global_node_page_state(NR_FILE_PAGES) -
 				global_node_page_state(NR_SHMEM) -
+				global_node_page_state(NR_UNEVICTABLE) -
 				total_swapcache_pages();
 
 	if (lowmem_adj_size < array_size)
diff --git a/drivers/staging/comedi/drivers/jr3_pci.c b/drivers/staging/comedi/drivers/jr3_pci.c
index 70390de..eb0a095 100644
--- a/drivers/staging/comedi/drivers/jr3_pci.c
+++ b/drivers/staging/comedi/drivers/jr3_pci.c
@@ -611,7 +611,7 @@ static void jr3_pci_poll_dev(unsigned long data)
 		s = &dev->subdevices[i];
 		spriv = s->private;
 
-		if (now > spriv->next_time_min) {
+		if (time_after_eq(now, spriv->next_time_min)) {
 			struct jr3_pci_poll_delay sub_delay;
 
 			sub_delay = jr3_pci_poll_subdevice(s);
@@ -727,11 +727,12 @@ static int jr3_pci_auto_attach(struct comedi_device *dev,
 		s->insn_read	= jr3_pci_ai_insn_read;
 
 		spriv = jr3_pci_alloc_spriv(dev, s);
-		if (spriv) {
-			/* Channel specific range and maxdata */
-			s->range_table_list	= spriv->range_table_list;
-			s->maxdata_list		= spriv->maxdata_list;
-		}
+		if (!spriv)
+			return -ENOMEM;
+
+		/* Channel specific range and maxdata */
+		s->range_table_list	= spriv->range_table_list;
+		s->maxdata_list		= spriv->maxdata_list;
 	}
 
 	/* Reset DSP card */
diff --git a/drivers/staging/gdm724x/gdm_mux.c b/drivers/staging/gdm724x/gdm_mux.c
index 4009691..f03e43b 100644
--- a/drivers/staging/gdm724x/gdm_mux.c
+++ b/drivers/staging/gdm724x/gdm_mux.c
@@ -664,9 +664,8 @@ static int __init gdm_usb_mux_init(void)
 
 static void __exit gdm_usb_mux_exit(void)
 {
-	unregister_lte_tty_driver();
-
 	usb_deregister(&gdm_mux_driver);
+	unregister_lte_tty_driver();
 }
 
 module_init(gdm_usb_mux_init);
diff --git a/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c b/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c
index 8d6bca6..591f274 100644
--- a/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c
@@ -97,8 +97,9 @@ void rtl92e_set_reg(struct net_device *dev, u8 variable, u8 *val)
 
 	switch (variable) {
 	case HW_VAR_BSSID:
-		rtl92e_writel(dev, BSSIDR, ((u32 *)(val))[0]);
-		rtl92e_writew(dev, BSSIDR+2, ((u16 *)(val+2))[0]);
+		/* BSSIDR 2 byte alignment */
+		rtl92e_writew(dev, BSSIDR, *(u16 *)val);
+		rtl92e_writel(dev, BSSIDR + 2, *(u32 *)(val + 2));
 		break;
 
 	case HW_VAR_MEDIA_STATUS:
@@ -626,7 +627,7 @@ void rtl92e_get_eeprom_size(struct net_device *dev)
 	struct r8192_priv *priv = rtllib_priv(dev);
 
 	RT_TRACE(COMP_INIT, "===========>%s()\n", __func__);
-	curCR = rtl92e_readl(dev, EPROM_CMD);
+	curCR = rtl92e_readw(dev, EPROM_CMD);
 	RT_TRACE(COMP_INIT, "read from Reg Cmd9346CR(%x):%x\n", EPROM_CMD,
 		 curCR);
 	priv->epromtype = (curCR & EPROM_CMD_9356SEL) ? EEPROM_93C56 :
@@ -963,8 +964,8 @@ static void _rtl92e_net_update(struct net_device *dev)
 	rtl92e_config_rate(dev, &rate_config);
 	priv->dot11CurrentPreambleMode = PREAMBLE_AUTO;
 	 priv->basic_rate = rate_config &= 0x15f;
-	rtl92e_writel(dev, BSSIDR, ((u32 *)net->bssid)[0]);
-	rtl92e_writew(dev, BSSIDR+4, ((u16 *)net->bssid)[2]);
+	rtl92e_writew(dev, BSSIDR, *(u16 *)net->bssid);
+	rtl92e_writel(dev, BSSIDR + 2, *(u32 *)(net->bssid + 2));
 
 	if (priv->rtllib->iw_mode == IW_MODE_ADHOC) {
 		rtl92e_writew(dev, ATIMWND, 2);
@@ -1184,8 +1185,7 @@ void  rtl92e_fill_tx_desc(struct net_device *dev, struct tx_desc *pdesc,
 			  struct cb_desc *cb_desc, struct sk_buff *skb)
 {
 	struct r8192_priv *priv = rtllib_priv(dev);
-	dma_addr_t mapping = pci_map_single(priv->pdev, skb->data, skb->len,
-			 PCI_DMA_TODEVICE);
+	dma_addr_t mapping;
 	struct tx_fwinfo_8190pci *pTxFwInfo;
 
 	pTxFwInfo = (struct tx_fwinfo_8190pci *)skb->data;
@@ -1196,8 +1196,6 @@ void  rtl92e_fill_tx_desc(struct net_device *dev, struct tx_desc *pdesc,
 	pTxFwInfo->Short = _rtl92e_query_is_short(pTxFwInfo->TxHT,
 						  pTxFwInfo->TxRate, cb_desc);
 
-	if (pci_dma_mapping_error(priv->pdev, mapping))
-		netdev_err(dev, "%s(): DMA Mapping error\n", __func__);
 	if (cb_desc->bAMPDUEnable) {
 		pTxFwInfo->AllowAggregation = 1;
 		pTxFwInfo->RxMF = cb_desc->ampdu_factor;
@@ -1232,6 +1230,14 @@ void  rtl92e_fill_tx_desc(struct net_device *dev, struct tx_desc *pdesc,
 	}
 
 	memset((u8 *)pdesc, 0, 12);
+
+	mapping = pci_map_single(priv->pdev, skb->data, skb->len,
+				 PCI_DMA_TODEVICE);
+	if (pci_dma_mapping_error(priv->pdev, mapping)) {
+		netdev_err(dev, "%s(): DMA Mapping error\n", __func__);
+		return;
+	}
+
 	pdesc->LINIP = 0;
 	pdesc->CmdInit = 1;
 	pdesc->Offset = sizeof(struct tx_fwinfo_8190pci) + 8;
diff --git a/drivers/staging/rtl8192e/rtl819x_TSProc.c b/drivers/staging/rtl8192e/rtl819x_TSProc.c
index a966a8e..4615a6f 100644
--- a/drivers/staging/rtl8192e/rtl819x_TSProc.c
+++ b/drivers/staging/rtl8192e/rtl819x_TSProc.c
@@ -306,11 +306,6 @@ static void MakeTSEntry(struct ts_common_info *pTsCommonInfo, u8 *Addr,
 	pTsCommonInfo->TClasNum = TCLAS_Num;
 }
 
-static bool IsACValid(unsigned int tid)
-{
-	return tid < 7;
-}
-
 bool GetTs(struct rtllib_device *ieee, struct ts_common_info **ppTS,
 	   u8 *Addr, u8 TID, enum tr_select TxRxSelect, bool bAddNewTs)
 {
@@ -328,12 +323,6 @@ bool GetTs(struct rtllib_device *ieee, struct ts_common_info **ppTS,
 	if (ieee->current_network.qos_data.supported == 0) {
 		UP = 0;
 	} else {
-		if (!IsACValid(TID)) {
-			netdev_warn(ieee->dev, "%s(): TID(%d) is not valid\n",
-				    __func__, TID);
-			return false;
-		}
-
 		switch (TID) {
 		case 0:
 		case 3:
@@ -351,6 +340,10 @@ bool GetTs(struct rtllib_device *ieee, struct ts_common_info **ppTS,
 		case 7:
 			UP = 7;
 			break;
+		default:
+			netdev_warn(ieee->dev, "%s(): TID(%d) is not valid\n",
+				    __func__, TID);
+			return false;
 		}
 	}
 
diff --git a/drivers/staging/vt6656/usbpipe.c b/drivers/staging/vt6656/usbpipe.c
index e9b6b21..f759aa8 100644
--- a/drivers/staging/vt6656/usbpipe.c
+++ b/drivers/staging/vt6656/usbpipe.c
@@ -47,15 +47,25 @@ int vnt_control_out(struct vnt_private *priv, u8 request, u16 value,
 		u16 index, u16 length, u8 *buffer)
 {
 	int status = 0;
+	u8 *usb_buffer;
 
 	if (test_bit(DEVICE_FLAGS_DISCONNECTED, &priv->flags))
 		return STATUS_FAILURE;
 
 	mutex_lock(&priv->usb_lock);
 
+	usb_buffer = kmemdup(buffer, length, GFP_KERNEL);
+	if (!usb_buffer) {
+		mutex_unlock(&priv->usb_lock);
+		return -ENOMEM;
+	}
+
 	status = usb_control_msg(priv->usb,
-		usb_sndctrlpipe(priv->usb, 0), request, 0x40, value,
-			index, buffer, length, USB_CTL_WAIT);
+				 usb_sndctrlpipe(priv->usb, 0),
+				 request, 0x40, value,
+				 index, usb_buffer, length, USB_CTL_WAIT);
+
+	kfree(usb_buffer);
 
 	mutex_unlock(&priv->usb_lock);
 
@@ -75,15 +85,28 @@ int vnt_control_in(struct vnt_private *priv, u8 request, u16 value,
 		u16 index, u16 length, u8 *buffer)
 {
 	int status;
+	u8 *usb_buffer;
 
 	if (test_bit(DEVICE_FLAGS_DISCONNECTED, &priv->flags))
 		return STATUS_FAILURE;
 
 	mutex_lock(&priv->usb_lock);
 
+	usb_buffer = kmalloc(length, GFP_KERNEL);
+	if (!usb_buffer) {
+		mutex_unlock(&priv->usb_lock);
+		return -ENOMEM;
+	}
+
 	status = usb_control_msg(priv->usb,
-		usb_rcvctrlpipe(priv->usb, 0), request, 0xc0, value,
-			index, buffer, length, USB_CTL_WAIT);
+				 usb_rcvctrlpipe(priv->usb, 0),
+				 request, 0xc0, value,
+				 index, usb_buffer, length, USB_CTL_WAIT);
+
+	if (status == length)
+		memcpy(buffer, usb_buffer, length);
+
+	kfree(usb_buffer);
 
 	mutex_unlock(&priv->usb_lock);
 
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index b7d747e..40e50f2 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -4671,6 +4671,7 @@ int iscsit_release_sessions_for_tpg(struct iscsi_portal_group *tpg, int force)
 			continue;
 		}
 		atomic_set(&sess->session_reinstatement, 1);
+		atomic_set(&sess->session_fall_back_to_erl0, 1);
 		spin_unlock(&sess->conn_lock);
 
 		list_move_tail(&se_sess->sess_list, &free_list);
diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c
index e980e2d..7e70fe8 100644
--- a/drivers/target/iscsi/iscsi_target_configfs.c
+++ b/drivers/target/iscsi/iscsi_target_configfs.c
@@ -1530,6 +1530,7 @@ static void lio_tpg_close_session(struct se_session *se_sess)
 		return;
 	}
 	atomic_set(&sess->session_reinstatement, 1);
+	atomic_set(&sess->session_fall_back_to_erl0, 1);
 	spin_unlock(&sess->conn_lock);
 
 	iscsit_stop_time2retain_timer(sess);
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
index 15f79a2..96c55bc 100644
--- a/drivers/target/iscsi/iscsi_target_login.c
+++ b/drivers/target/iscsi/iscsi_target_login.c
@@ -204,6 +204,7 @@ int iscsi_check_for_session_reinstatement(struct iscsi_conn *conn)
 			    initiatorname_param->value) &&
 		   (sess_p->sess_ops->SessionType == sessiontype))) {
 			atomic_set(&sess_p->session_reinstatement, 1);
+			atomic_set(&sess_p->session_fall_back_to_erl0, 1);
 			spin_unlock(&sess_p->conn_lock);
 			iscsit_inc_session_usage_count(sess_p);
 			iscsit_stop_time2retain_timer(sess_p);
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
index d545993..29f807b 100644
--- a/drivers/target/target_core_file.c
+++ b/drivers/target/target_core_file.c
@@ -594,8 +594,7 @@ fd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
 	if (ret < 0)
 		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
 
-	if (ret)
-		target_complete_cmd(cmd, SAM_STAT_GOOD);
+	target_complete_cmd(cmd, SAM_STAT_GOOD);
 	return 0;
 }
 
diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
index a53fb23..b3b1461 100644
--- a/drivers/target/target_core_sbc.c
+++ b/drivers/target/target_core_sbc.c
@@ -506,8 +506,11 @@ static sense_reason_t compare_and_write_callback(struct se_cmd *cmd, bool succes
 	 * been failed with a non-zero SCSI status.
 	 */
 	if (cmd->scsi_status) {
-		pr_err("compare_and_write_callback: non zero scsi_status:"
+		pr_debug("compare_and_write_callback: non zero scsi_status:"
 			" 0x%02x\n", cmd->scsi_status);
+		*post_ret = 1;
+		if (cmd->scsi_status == SAM_STAT_CHECK_CONDITION)
+			ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
 		goto out;
 	}
 
diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c
index 7da9211..355d013 100644
--- a/drivers/thermal/cpu_cooling.c
+++ b/drivers/thermal/cpu_cooling.c
@@ -620,6 +620,7 @@ static int cpufreq_set_cur_state(struct thermal_cooling_device *cdev,
 	if (cpufreq_device->cpufreq_state == state)
 		return 0;
 
+	cpufreq_device->cpufreq_state = state;
 	/* If state is the last, isolate the CPU */
 	if (state == cpufreq_device->max_level)
 		return sched_isolate_cpu(cpu);
@@ -627,7 +628,6 @@ static int cpufreq_set_cur_state(struct thermal_cooling_device *cdev,
 		sched_unisolate_cpu(cpu);
 
 	clip_freq = cpufreq_device->freq_table[state];
-	cpufreq_device->cpufreq_state = state;
 	cpufreq_device->clipped_freq = clip_freq;
 
 	/* Check if the device has a platform mitigation function that
diff --git a/drivers/thermal/qpnp-adc-tm.c b/drivers/thermal/qpnp-adc-tm.c
index 342160e..04320d8 100644
--- a/drivers/thermal/qpnp-adc-tm.c
+++ b/drivers/thermal/qpnp-adc-tm.c
@@ -1973,8 +1973,6 @@ static int qpnp_adc_tm_probe(struct platform_device *pdev)
 			chip->sensor[sen_idx].thermal_node = true;
 			snprintf(name, sizeof(name), "%s",
 				chip->adc->adc_channels[sen_idx].name);
-			chip->sensor[sen_idx].meas_interval =
-				QPNP_ADC_TM_MEAS_INTERVAL;
 			chip->sensor[sen_idx].low_thr =
 						QPNP_ADC_TM_M0_LOW_THR;
 			chip->sensor[sen_idx].high_thr =
@@ -2027,7 +2025,7 @@ static int qpnp_adc_tm_probe(struct platform_device *pdev)
 
 	rc = devm_request_irq(&pdev->dev, chip->adc->adc_irq_eoc,
 			qpnp_adc_tm_rc_thr_isr,
-		IRQF_TRIGGER_RISING, "qpnp_adc_tm_interrupt", chip);
+		IRQF_TRIGGER_HIGH, "qpnp_adc_tm_interrupt", chip);
 	if (rc)
 		dev_err(&pdev->dev, "failed to request adc irq\n");
 	else
diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
index a23fa5e..2b90738 100644
--- a/drivers/tty/pty.c
+++ b/drivers/tty/pty.c
@@ -216,16 +216,11 @@ static int pty_signal(struct tty_struct *tty, int sig)
 static void pty_flush_buffer(struct tty_struct *tty)
 {
 	struct tty_struct *to = tty->link;
-	struct tty_ldisc *ld;
 
 	if (!to)
 		return;
 
-	ld = tty_ldisc_ref(to);
-	tty_buffer_flush(to, ld);
-	if (ld)
-		tty_ldisc_deref(ld);
-
+	tty_buffer_flush(to, NULL);
 	if (to->packet) {
 		spin_lock_irq(&tty->ctrl_lock);
 		tty->ctrl_status |= TIOCPKT_FLUSHWRITE;
diff --git a/drivers/tty/serial/msm_geni_serial.c b/drivers/tty/serial/msm_geni_serial.c
index 8108da8..2e12c3f 100644
--- a/drivers/tty/serial/msm_geni_serial.c
+++ b/drivers/tty/serial/msm_geni_serial.c
@@ -145,6 +145,7 @@ struct msm_geni_serial_port {
 	void *ipc_log_pwr;
 	void *ipc_log_misc;
 	unsigned int cur_baud;
+	int ioctl_count;
 };
 
 static const struct uart_ops msm_geni_serial_pops;
@@ -161,6 +162,8 @@ static int handle_rx_hs(struct uart_port *uport,
 static unsigned int msm_geni_serial_tx_empty(struct uart_port *port);
 static int msm_geni_serial_power_on(struct uart_port *uport);
 static void msm_geni_serial_power_off(struct uart_port *uport);
+static int msm_geni_serial_poll_bit(struct uart_port *uport,
+				int offset, int bit_field, bool set);
 
 static atomic_t uart_line_id = ATOMIC_INIT(0);
 
@@ -218,22 +221,22 @@ static void dump_ipc(void *ipc_ctx, char *prefix, char *string,
 					(unsigned int)addr, size, buf);
 }
 
-static void check_tx_active(struct uart_port *uport)
+static bool check_tx_active(struct uart_port *uport)
 {
-	u32 geni_status = geni_read_reg_nolog(uport->membase,
-					SE_GENI_STATUS);
-
-	while ((geni_status & M_GENI_CMD_ACTIVE)) {
-		cpu_relax();
-		geni_status = geni_read_reg_nolog(uport->membase,
-					SE_GENI_STATUS);
-	}
+	/*
+	 * Poll if the GENI STATUS bit for TX is cleared. If the bit is
+	 * clear (poll condition met), return false, meaning tx isn't active
+	 * else return true. So return not of the poll return.
+	 */
+	return !msm_geni_serial_poll_bit(uport, SE_GENI_STATUS,
+					M_GENI_CMD_ACTIVE, false);
 }
 
 static int vote_clock_on(struct uart_port *uport)
 {
 	int ret = 0;
 	struct msm_geni_serial_port *port = GET_DEV_PORT(uport);
+	int usage_count = atomic_read(&uport->dev->power.usage_count);
 
 	if (!pm_runtime_enabled(uport->dev)) {
 		dev_err(uport->dev, "RPM not available.Can't enable clocks\n");
@@ -245,8 +248,10 @@ static int vote_clock_on(struct uart_port *uport)
 		dev_err(uport->dev, "Failed to vote clock on\n");
 		return ret;
 	}
+	port->ioctl_count++;
 	__pm_relax(&port->geni_wake);
-	IPC_LOG_MSG(port->ipc_log_pwr, "%s\n", __func__);
+	IPC_LOG_MSG(port->ipc_log_pwr, "%s rpm %d ioctl %d\n",
+				__func__, usage_count, port->ioctl_count);
 	return 0;
 }
 
@@ -254,16 +259,29 @@ static int vote_clock_off(struct uart_port *uport)
 {
 	struct msm_geni_serial_port *port = GET_DEV_PORT(uport);
 	int ret = 0;
+	int usage_count = atomic_read(&uport->dev->power.usage_count);
 
 	if (!pm_runtime_enabled(uport->dev)) {
 		dev_err(uport->dev, "RPM not available.Can't enable clocks\n");
 		ret = -EPERM;
 		return ret;
 	}
-	/* Block till any on going Tx goes out.*/
-	check_tx_active(uport);
+	/* Check on going Tx. Don't block on this for now. */
+	if (check_tx_active(uport))
+		dev_warn(uport->dev, "%s: Vote off called during active Tx",
+								__func__);
+	if (!port->ioctl_count) {
+		dev_warn(uport->dev, "%s:Imbalanced vote off ioctl %d\n",
+						 __func__, usage_count);
+		IPC_LOG_MSG(port->ipc_log_pwr,
+				"%s:Imbalanced vote_off from userspace rpm%d",
+				__func__, usage_count);
+		return 0;
+	}
+	port->ioctl_count--;
 	msm_geni_serial_power_off(uport);
-	IPC_LOG_MSG(port->ipc_log_pwr, "%s\n", __func__);
+	IPC_LOG_MSG(port->ipc_log_pwr, "%s rpm %d ioctl %d\n",
+				__func__, usage_count, port->ioctl_count);
 	return 0;
 };
 
@@ -398,20 +416,20 @@ static int msm_geni_serial_poll_bit(struct uart_port *uport,
 	bool cond = false;
 	unsigned int baud = 115200;
 	unsigned int fifo_bits = DEF_FIFO_DEPTH_WORDS * DEF_FIFO_WIDTH_BITS;
-	unsigned long total_iter = 0;
+	unsigned long total_iter = 1000;
 
 
-	if (uport->private_data) {
+	if (uport->private_data && !uart_console(uport)) {
 		port = GET_DEV_PORT(uport);
 		baud = (port->cur_baud ? port->cur_baud : 115200);
 		fifo_bits = port->tx_fifo_depth * port->tx_fifo_width;
+		/*
+		 * Total polling iterations based on FIFO worth of bytes to be
+		 * sent at current baud .Add a little fluff to the wait.
+		 */
+		total_iter = ((fifo_bits * USEC_PER_SEC) / baud);
+		total_iter += 50;
 	}
-	/*
-	 * Total polling iterations based on FIFO worth of bytes to be
-	 * sent at current baud .Add a little fluff to the wait.
-	 */
-	total_iter = ((fifo_bits * USEC_PER_SEC) / baud);
-	total_iter += 50;
 
 	while (iter < total_iter) {
 		reg = geni_read_reg_nolog(uport->membase, offset);
@@ -449,17 +467,11 @@ static void msm_geni_serial_poll_cancel_tx(struct uart_port *uport)
 	done = msm_geni_serial_poll_bit(uport, SE_GENI_M_IRQ_STATUS,
 						M_CMD_DONE_EN, true);
 	if (!done) {
-		geni_write_reg_nolog(M_GENI_CMD_CANCEL, uport->membase,
-						SE_GENI_S_CMD_CTRL_REG);
-		irq_clear |= M_CMD_CANCEL_EN;
-		if (!msm_geni_serial_poll_bit(uport, SE_GENI_M_IRQ_STATUS,
-						M_CMD_CANCEL_EN, true)) {
-			geni_write_reg_nolog(M_GENI_CMD_ABORT, uport->membase,
-						SE_GENI_M_CMD_CTRL_REG);
-			irq_clear |= M_CMD_ABORT_EN;
-			msm_geni_serial_poll_bit(uport, SE_GENI_M_IRQ_STATUS,
+		geni_write_reg_nolog(M_GENI_CMD_ABORT, uport->membase,
+					SE_GENI_M_CMD_CTRL_REG);
+		irq_clear |= M_CMD_ABORT_EN;
+		msm_geni_serial_poll_bit(uport, SE_GENI_M_IRQ_STATUS,
 							M_CMD_ABORT_EN, true);
-		}
 	}
 	geni_write_reg_nolog(irq_clear, uport->membase, SE_GENI_M_IRQ_CLEAR);
 }
@@ -678,7 +690,6 @@ static void msm_geni_serial_start_tx(struct uart_port *uport)
 	geni_write_reg_nolog(geni_m_irq_en, uport->membase, SE_GENI_M_IRQ_EN);
 	/* Geni command setup/irq enables should complete before returning.*/
 	mb();
-	IPC_LOG_MSG(msm_port->ipc_log_misc, "%s\n", __func__);
 }
 
 static void msm_geni_serial_stop_tx(struct uart_port *uport)
@@ -1009,6 +1020,7 @@ static void msm_geni_serial_shutdown(struct uart_port *uport)
 	disable_irq(uport->irq);
 	free_irq(uport->irq, msm_port);
 	if (uart_console(uport)) {
+		console_stop(uport->cons);
 		se_geni_resources_off(&msm_port->serial_rsc);
 	} else {
 		if (msm_port->wakeup_irq > 0) {
@@ -1032,23 +1044,33 @@ static int msm_geni_serial_port_setup(struct uart_port *uport)
 	if (!uart_console(uport)) {
 		/* For now only assume FIFO mode. */
 		msm_port->xfer_mode = FIFO_MODE;
-		ret = geni_se_init(uport->membase,
-					msm_port->rx_wm, msm_port->rx_rfr);
-		if (ret) {
-			dev_err(uport->dev, "%s: Fail\n", __func__);
-			goto exit_portsetup;
-		}
-
-		ret = geni_se_select_mode(uport->membase, msm_port->xfer_mode);
-		if (ret)
-			goto exit_portsetup;
-
 		se_get_packing_config(8, 4, false, &cfg0, &cfg1);
 		geni_write_reg_nolog(cfg0, uport->membase,
 						SE_GENI_TX_PACKING_CFG0);
 		geni_write_reg_nolog(cfg1, uport->membase,
 						SE_GENI_TX_PACKING_CFG1);
+	} else {
+		/*
+		 * Make an unconditional cancel on the main sequencer to reset
+		 * it else we could end up in data loss scenarios.
+		 */
+		msm_port->xfer_mode = FIFO_MODE;
+		msm_geni_serial_poll_cancel_tx(uport);
+		se_get_packing_config(8, 1, false, &cfg0, &cfg1);
+		geni_write_reg_nolog(cfg0, uport->membase,
+						SE_GENI_TX_PACKING_CFG0);
+		geni_write_reg_nolog(cfg1, uport->membase,
+						SE_GENI_TX_PACKING_CFG1);
 	}
+	ret = geni_se_init(uport->membase, msm_port->rx_wm, msm_port->rx_rfr);
+	if (ret) {
+		dev_err(uport->dev, "%s: Fail\n", __func__);
+		goto exit_portsetup;
+	}
+
+	ret = geni_se_select_mode(uport->membase, msm_port->xfer_mode);
+	if (ret)
+		goto exit_portsetup;
 
 	msm_port->port_setup = true;
 	/*
@@ -1118,12 +1140,10 @@ static int msm_geni_serial_startup(struct uart_port *uport)
 	if (unlikely(get_se_proto(uport->membase) != UART)) {
 		dev_err(uport->dev, "%s: Invalid FW %d loaded.\n",
 				 __func__, get_se_proto(uport->membase));
-		if (unlikely(get_se_proto(uport->membase) != UART)) {
-			ret = -ENXIO;
-			disable_irq(uport->irq);
-			free_irq(uport->irq, msm_port);
-			goto exit_startup;
-		}
+		ret = -ENXIO;
+		disable_irq(uport->irq);
+		free_irq(uport->irq, msm_port);
+		goto exit_startup;
 	}
 
 	if (!msm_port->port_setup) {
@@ -1358,7 +1378,6 @@ static int __init msm_geni_console_setup(struct console *co, char *options)
 	int parity = 'n';
 	int flow = 'n';
 	int ret = 0;
-	unsigned long cfg0, cfg1;
 
 	if (unlikely(co->index >= GENI_UART_NR_PORTS  || co->index < 0))
 		return -ENXIO;
@@ -1386,14 +1405,6 @@ static int __init msm_geni_console_setup(struct console *co, char *options)
 	if (!dev_port->port_setup)
 		msm_geni_serial_port_setup(uport);
 
-	/*
-	 * Make an unconditional cancel on the main sequencer to reset
-	 * it else we could end up in data loss scenarios.
-	 */
-	msm_geni_serial_poll_cancel_tx(uport);
-	se_get_packing_config(8, 1, false, &cfg0, &cfg1);
-	geni_write_reg_nolog(cfg0, uport->membase, SE_GENI_TX_PACKING_CFG0);
-	geni_write_reg_nolog(cfg1, uport->membase, SE_GENI_TX_PACKING_CFG1);
 	if (options)
 		uart_parse_options(options, &baud, &parity, &bits, &flow);
 
@@ -1438,9 +1449,6 @@ msm_geni_serial_earlycon_setup(struct earlycon_device *dev,
 		goto exit_geni_serial_earlyconsetup;
 	}
 
-	geni_se_init(uport->membase, (DEF_FIFO_DEPTH_WORDS >> 1),
-					(DEF_FIFO_DEPTH_WORDS - 2));
-	geni_se_select_mode(uport->membase, FIFO_MODE);
 	/*
 	 * Ignore Flow control.
 	 * Disable Tx Parity.
@@ -1471,7 +1479,11 @@ msm_geni_serial_earlycon_setup(struct earlycon_device *dev,
 	 * it else we could end up in data loss scenarios.
 	 */
 	msm_geni_serial_poll_cancel_tx(uport);
+	msm_geni_serial_abort_rx(uport);
 	se_get_packing_config(8, 1, false, &cfg0, &cfg1);
+	geni_se_init(uport->membase, (DEF_FIFO_DEPTH_WORDS >> 1),
+					(DEF_FIFO_DEPTH_WORDS - 2));
+	geni_se_select_mode(uport->membase, FIFO_MODE);
 	geni_write_reg_nolog(cfg0, uport->membase, SE_GENI_TX_PACKING_CFG0);
 	geni_write_reg_nolog(cfg1, uport->membase, SE_GENI_TX_PACKING_CFG1);
 	geni_write_reg_nolog(tx_trans_cfg, uport->membase,
@@ -1802,8 +1814,7 @@ static int msm_geni_serial_runtime_suspend(struct device *dev)
 	}
 	if (port->wakeup_irq > 0)
 		enable_irq(port->wakeup_irq);
-	IPC_LOG_MSG(port->ipc_log_pwr, "%s: Current usage count %d\n", __func__,
-				atomic_read(&dev->power.usage_count));
+	IPC_LOG_MSG(port->ipc_log_pwr, "%s:\n", __func__);
 exit_runtime_suspend:
 	return ret;
 }
@@ -1821,8 +1832,7 @@ static int msm_geni_serial_runtime_resume(struct device *dev)
 		dev_err(dev, "%s: Error ret %d\n", __func__, ret);
 		goto exit_runtime_resume;
 	}
-	IPC_LOG_MSG(port->ipc_log_pwr, "%s: Current usage count %d\n", __func__,
-				atomic_read(&dev->power.usage_count));
+	IPC_LOG_MSG(port->ipc_log_pwr, "%s:\n", __func__);
 exit_runtime_resume:
 	return ret;
 }
@@ -1836,7 +1846,6 @@ static int msm_geni_serial_sys_suspend_noirq(struct device *dev)
 	if (uart_console(uport)) {
 		uart_suspend_port((struct uart_driver *)uport->private_data,
 					uport);
-		se_geni_resources_off(&port->serial_rsc);
 	} else {
 		if (!pm_runtime_status_suspended(dev)) {
 			dev_info(dev, "%s: Is still active\n", __func__);
diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c
index a2a5299..44e5b5b 100644
--- a/drivers/tty/serial/omap-serial.c
+++ b/drivers/tty/serial/omap-serial.c
@@ -1712,7 +1712,8 @@ static int serial_omap_probe(struct platform_device *pdev)
 	return 0;
 
 err_add_port:
-	pm_runtime_put(&pdev->dev);
+	pm_runtime_dont_use_autosuspend(&pdev->dev);
+	pm_runtime_put_sync(&pdev->dev);
 	pm_runtime_disable(&pdev->dev);
 	pm_qos_remove_request(&up->pm_qos_request);
 	device_init_wakeup(up->dev, false);
@@ -1725,9 +1726,13 @@ static int serial_omap_remove(struct platform_device *dev)
 {
 	struct uart_omap_port *up = platform_get_drvdata(dev);
 
+	pm_runtime_get_sync(up->dev);
+
+	uart_remove_one_port(&serial_omap_reg, &up->port);
+
+	pm_runtime_dont_use_autosuspend(up->dev);
 	pm_runtime_put_sync(up->dev);
 	pm_runtime_disable(up->dev);
-	uart_remove_one_port(&serial_omap_reg, &up->port);
 	pm_qos_remove_request(&up->pm_qos_request);
 	device_init_wakeup(&dev->dev, false);
 
diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
index 3e2ef4f..d65f92b 100644
--- a/drivers/tty/serial/samsung.c
+++ b/drivers/tty/serial/samsung.c
@@ -906,14 +906,13 @@ static int s3c24xx_serial_request_dma(struct s3c24xx_uart_port *p)
 		return -ENOMEM;
 	}
 
-	dma->rx_addr = dma_map_single(dma->rx_chan->device->dev, dma->rx_buf,
+	dma->rx_addr = dma_map_single(p->port.dev, dma->rx_buf,
 				dma->rx_size, DMA_FROM_DEVICE);
 
 	spin_lock_irqsave(&p->port.lock, flags);
 
 	/* TX buffer */
-	dma->tx_addr = dma_map_single(dma->tx_chan->device->dev,
-				p->port.state->xmit.buf,
+	dma->tx_addr = dma_map_single(p->port.dev, p->port.state->xmit.buf,
 				UART_XMIT_SIZE, DMA_TO_DEVICE);
 
 	spin_unlock_irqrestore(&p->port.lock, flags);
@@ -927,7 +926,7 @@ static void s3c24xx_serial_release_dma(struct s3c24xx_uart_port *p)
 
 	if (dma->rx_chan) {
 		dmaengine_terminate_all(dma->rx_chan);
-		dma_unmap_single(dma->rx_chan->device->dev, dma->rx_addr,
+		dma_unmap_single(p->port.dev, dma->rx_addr,
 				dma->rx_size, DMA_FROM_DEVICE);
 		kfree(dma->rx_buf);
 		dma_release_channel(dma->rx_chan);
@@ -936,7 +935,7 @@ static void s3c24xx_serial_release_dma(struct s3c24xx_uart_port *p)
 
 	if (dma->tx_chan) {
 		dmaengine_terminate_all(dma->tx_chan);
-		dma_unmap_single(dma->tx_chan->device->dev, dma->tx_addr,
+		dma_unmap_single(p->port.dev, dma->tx_addr,
 				UART_XMIT_SIZE, DMA_TO_DEVICE);
 		dma_release_channel(dma->tx_chan);
 		dma->tx_chan = NULL;
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index c5ff13f..a876d47 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -311,6 +311,12 @@ static void acm_ctrl_irq(struct urb *urb)
 		break;
 
 	case USB_CDC_NOTIFY_SERIAL_STATE:
+		if (le16_to_cpu(dr->wLength) != 2) {
+			dev_dbg(&acm->control->dev,
+				"%s - malformed serial state\n", __func__);
+			break;
+		}
+
 		newctrl = get_unaligned_le16(data);
 
 		if (!acm->clocal && (acm->ctrlin & ~newctrl & ACM_CTRL_DCD)) {
@@ -347,11 +353,10 @@ static void acm_ctrl_irq(struct urb *urb)
 
 	default:
 		dev_dbg(&acm->control->dev,
-			"%s - unknown notification %d received: index %d "
-			"len %d data0 %d data1 %d\n",
+			"%s - unknown notification %d received: index %d len %d\n",
 			__func__,
-			dr->bNotificationType, dr->wIndex,
-			dr->wLength, data[0], data[1]);
+			dr->bNotificationType, dr->wIndex, dr->wLength);
+
 		break;
 	}
 exit:
diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
index 0a63695..0b845e5 100644
--- a/drivers/usb/class/cdc-wdm.c
+++ b/drivers/usb/class/cdc-wdm.c
@@ -58,7 +58,6 @@ MODULE_DEVICE_TABLE (usb, wdm_ids);
 #define WDM_SUSPENDING		8
 #define WDM_RESETTING		9
 #define WDM_OVERFLOW		10
-#define WDM_DRAIN_ON_OPEN	11
 
 #define WDM_MAX			16
 
@@ -182,7 +181,7 @@ static void wdm_in_callback(struct urb *urb)
 				"nonzero urb status received: -ESHUTDOWN\n");
 			goto skip_error;
 		case -EPIPE:
-			dev_dbg(&desc->intf->dev,
+			dev_err(&desc->intf->dev,
 				"nonzero urb status received: -EPIPE\n");
 			break;
 		default:
@@ -210,25 +209,6 @@ static void wdm_in_callback(struct urb *urb)
 			desc->reslength = length;
 		}
 	}
-
-	/*
-	 * Handling devices with the WDM_DRAIN_ON_OPEN flag set:
-	 * If desc->resp_count is unset, then the urb was submitted
-	 * without a prior notification.  If the device returned any
-	 * data, then this implies that it had messages queued without
-	 * notifying us.  Continue reading until that queue is flushed.
-	 */
-	if (!desc->resp_count) {
-		if (!length) {
-			/* do not propagate the expected -EPIPE */
-			desc->rerr = 0;
-			goto unlock;
-		}
-		dev_dbg(&desc->intf->dev, "got %d bytes without notification\n", length);
-		set_bit(WDM_RESPONDING, &desc->flags);
-		usb_submit_urb(desc->response, GFP_ATOMIC);
-	}
-
 skip_error:
 	set_bit(WDM_READ, &desc->flags);
 	wake_up(&desc->wait);
@@ -243,7 +223,6 @@ static void wdm_in_callback(struct urb *urb)
 		service_outstanding_interrupt(desc);
 	}
 
-unlock:
 	spin_unlock(&desc->iuspin);
 }
 
@@ -686,17 +665,6 @@ static int wdm_open(struct inode *inode, struct file *file)
 			dev_err(&desc->intf->dev,
 				"Error submitting int urb - %d\n", rv);
 			rv = usb_translate_errors(rv);
-		} else if (test_bit(WDM_DRAIN_ON_OPEN, &desc->flags)) {
-			/*
-			 * Some devices keep pending messages queued
-			 * without resending notifications.  We must
-			 * flush the message queue before we can
-			 * assume a one-to-one relationship between
-			 * notifications and messages in the queue
-			 */
-			dev_dbg(&desc->intf->dev, "draining queued data\n");
-			set_bit(WDM_RESPONDING, &desc->flags);
-			rv = usb_submit_urb(desc->response, GFP_KERNEL);
 		}
 	} else {
 		rv = 0;
@@ -803,8 +771,7 @@ static void wdm_rxwork(struct work_struct *work)
 /* --- hotplug --- */
 
 static int wdm_create(struct usb_interface *intf, struct usb_endpoint_descriptor *ep,
-		u16 bufsize, int (*manage_power)(struct usb_interface *, int),
-		bool drain_on_open)
+		u16 bufsize, int (*manage_power)(struct usb_interface *, int))
 {
 	int rv = -ENOMEM;
 	struct wdm_device *desc;
@@ -891,68 +858,6 @@ static int wdm_create(struct usb_interface *intf, struct usb_endpoint_descriptor
 
 	desc->manage_power = manage_power;
 
-	/*
-	 * "drain_on_open" enables a hack to work around a firmware
-	 * issue observed on network functions, in particular MBIM
-	 * functions.
-	 *
-	 * Quoting section 7 of the CDC-WMC r1.1 specification:
-	 *
-	 *  "The firmware shall interpret GetEncapsulatedResponse as a
-	 *   request to read response bytes. The firmware shall send
-	 *   the next wLength bytes from the response. The firmware
-	 *   shall allow the host to retrieve data using any number of
-	 *   GetEncapsulatedResponse requests. The firmware shall
-	 *   return a zero- length reply if there are no data bytes
-	 *   available.
-	 *
-	 *   The firmware shall send ResponseAvailable notifications
-	 *   periodically, using any appropriate algorithm, to inform
-	 *   the host that there is data available in the reply
-	 *   buffer. The firmware is allowed to send ResponseAvailable
-	 *   notifications even if there is no data available, but
-	 *   this will obviously reduce overall performance."
-	 *
-	 * These requirements, although they make equally sense, are
-	 * often not implemented by network functions. Some firmwares
-	 * will queue data indefinitely, without ever resending a
-	 * notification. The result is that the driver and firmware
-	 * loses "syncronization" if the driver ever fails to respond
-	 * to a single notification, something which easily can happen
-	 * on release(). When this happens, the driver will appear to
-	 * never receive notifications for the most current data. Each
-	 * notification will only cause a single read, which returns
-	 * the oldest data in the firmware's queue.
-	 *
-	 * The "drain_on_open" hack resolves the situation by draining
-	 * data from the firmware until none is returned, without a
-	 * prior notification.
-	 *
-	 * This will inevitably race with the firmware, risking that
-	 * we read data from the device before handling the associated
-	 * notification. To make things worse, some of the devices
-	 * needing the hack do not implement the "return zero if no
-	 * data is available" requirement either. Instead they return
-	 * an error on the subsequent read in this case.  This means
-	 * that "winning" the race can cause an unexpected EIO to
-	 * userspace.
-	 *
-	 * "winning" the race is more likely on resume() than on
-	 * open(), and the unexpected error is more harmful in the
-	 * middle of an open session. The hack is therefore only
-	 * applied on open(), and not on resume() where it logically
-	 * would be equally necessary. So we define open() as the only
-	 * driver <-> device "syncronization point".  Should we happen
-	 * to lose a notification after open(), then syncronization
-	 * will be lost until release()
-	 *
-	 * The hack should not be enabled for CDC WDM devices
-	 * conforming to the CDC-WMC r1.1 specification.  This is
-	 * ensured by setting drain_on_open to false in wdm_probe().
-	 */
-	if (drain_on_open)
-		set_bit(WDM_DRAIN_ON_OPEN, &desc->flags);
-
 	spin_lock(&wdm_device_list_lock);
 	list_add(&desc->device_list, &wdm_device_list);
 	spin_unlock(&wdm_device_list_lock);
@@ -1006,7 +911,7 @@ static int wdm_probe(struct usb_interface *intf, const struct usb_device_id *id)
 		goto err;
 	ep = &iface->endpoint[0].desc;
 
-	rv = wdm_create(intf, ep, maxcom, &wdm_manage_power, false);
+	rv = wdm_create(intf, ep, maxcom, &wdm_manage_power);
 
 err:
 	return rv;
@@ -1038,7 +943,7 @@ struct usb_driver *usb_cdc_wdm_register(struct usb_interface *intf,
 {
 	int rv = -EINVAL;
 
-	rv = wdm_create(intf, ep, bufsize, manage_power, true);
+	rv = wdm_create(intf, ep, bufsize, manage_power);
 	if (rv < 0)
 		goto err;
 
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
index 4016dae..840930b0 100644
--- a/drivers/usb/core/devio.c
+++ b/drivers/usb/core/devio.c
@@ -481,11 +481,11 @@ static void snoop_urb(struct usb_device *udev,
 
 	if (userurb) {		/* Async */
 		if (when == SUBMIT)
-			dev_info(&udev->dev, "userurb %p, ep%d %s-%s, "
+			dev_info(&udev->dev, "userurb %pK, ep%d %s-%s, "
 					"length %u\n",
 					userurb, ep, t, d, length);
 		else
-			dev_info(&udev->dev, "userurb %p, ep%d %s-%s, "
+			dev_info(&udev->dev, "userurb %pK, ep%d %s-%s, "
 					"actual_length %u status %d\n",
 					userurb, ep, t, d, length,
 					timeout_or_status);
@@ -1905,7 +1905,7 @@ static int proc_reapurb(struct usb_dev_state *ps, void __user *arg)
 	if (as) {
 		int retval;
 
-		snoop(&ps->dev->dev, "reap %p\n", as->userurb);
+		snoop(&ps->dev->dev, "reap %pK\n", as->userurb);
 		retval = processcompl(as, (void __user * __user *)arg);
 		free_async(as);
 		return retval;
@@ -1922,7 +1922,7 @@ static int proc_reapurbnonblock(struct usb_dev_state *ps, void __user *arg)
 
 	as = async_getcompleted(ps);
 	if (as) {
-		snoop(&ps->dev->dev, "reap %p\n", as->userurb);
+		snoop(&ps->dev->dev, "reap %pK\n", as->userurb);
 		retval = processcompl(as, (void __user * __user *)arg);
 		free_async(as);
 	} else {
@@ -2053,7 +2053,7 @@ static int proc_reapurb_compat(struct usb_dev_state *ps, void __user *arg)
 	if (as) {
 		int retval;
 
-		snoop(&ps->dev->dev, "reap %p\n", as->userurb);
+		snoop(&ps->dev->dev, "reap %pK\n", as->userurb);
 		retval = processcompl_compat(as, (void __user * __user *)arg);
 		free_async(as);
 		return retval;
@@ -2070,7 +2070,7 @@ static int proc_reapurbnonblock_compat(struct usb_dev_state *ps, void __user *ar
 
 	as = async_getcompleted(ps);
 	if (as) {
-		snoop(&ps->dev->dev, "reap %p\n", as->userurb);
+		snoop(&ps->dev->dev, "reap %pK\n", as->userurb);
 		retval = processcompl_compat(as, (void __user * __user *)arg);
 		free_async(as);
 	} else {
@@ -2499,7 +2499,7 @@ static long usbdev_do_ioctl(struct file *file, unsigned int cmd,
 #endif
 
 	case USBDEVFS_DISCARDURB:
-		snoop(&dev->dev, "%s: DISCARDURB %p\n", __func__, p);
+		snoop(&dev->dev, "%s: DISCARDURB %pK\n", __func__, p);
 		ret = proc_unlinkurb(ps, p);
 		break;
 
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
index 26a305f..ee33c0d 100644
--- a/drivers/usb/core/driver.c
+++ b/drivers/usb/core/driver.c
@@ -1328,6 +1328,24 @@ static int usb_suspend_both(struct usb_device *udev, pm_message_t msg)
 		 */
 		if (udev->parent && !PMSG_IS_AUTO(msg))
 			status = 0;
+
+		/*
+		 * If the device is inaccessible, don't try to resume
+		 * suspended interfaces and just return the error.
+		 */
+		if (status && status != -EBUSY) {
+			int err;
+			u16 devstat;
+
+			err = usb_get_status(udev, USB_RECIP_DEVICE, 0,
+					     &devstat);
+			if (err) {
+				dev_err(&udev->dev,
+					"Failed to suspend device, error %d\n",
+					status);
+				goto done;
+			}
+		}
 	}
 
 	/* If the suspend failed, resume interfaces that did get suspended */
@@ -1772,6 +1790,9 @@ static int autosuspend_check(struct usb_device *udev)
 	int			w, i;
 	struct usb_interface	*intf;
 
+	if (udev->state == USB_STATE_NOTATTACHED)
+		return -ENODEV;
+
 	/* Fail if autosuspend is disabled, or any interfaces are in use, or
 	 * any interface drivers require remote wakeup but it isn't available.
 	 */
diff --git a/drivers/usb/core/file.c b/drivers/usb/core/file.c
index 822ced9..422ce7b 100644
--- a/drivers/usb/core/file.c
+++ b/drivers/usb/core/file.c
@@ -27,6 +27,7 @@
 #define MAX_USB_MINORS	256
 static const struct file_operations *usb_minors[MAX_USB_MINORS];
 static DECLARE_RWSEM(minor_rwsem);
+static DEFINE_MUTEX(init_usb_class_mutex);
 
 static int usb_open(struct inode *inode, struct file *file)
 {
@@ -109,8 +110,9 @@ static void release_usb_class(struct kref *kref)
 
 static void destroy_usb_class(void)
 {
-	if (usb_class)
-		kref_put(&usb_class->kref, release_usb_class);
+	mutex_lock(&init_usb_class_mutex);
+	kref_put(&usb_class->kref, release_usb_class);
+	mutex_unlock(&init_usb_class_mutex);
 }
 
 int usb_major_init(void)
@@ -171,7 +173,10 @@ int usb_register_dev(struct usb_interface *intf,
 	if (intf->minor >= 0)
 		return -EADDRINUSE;
 
+	mutex_lock(&init_usb_class_mutex);
 	retval = init_usb_class();
+	mutex_unlock(&init_usb_class_mutex);
+
 	if (retval)
 		return retval;
 
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index ff45ebf..32f99da 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -1723,7 +1723,7 @@ int usb_hcd_unlink_urb (struct urb *urb, int status)
 		if (retval == 0)
 			retval = -EINPROGRESS;
 		else if (retval != -EIDRM && retval != -EBUSY)
-			dev_dbg(&udev->dev, "hcd_unlink_urb %p fail %d\n",
+			dev_dbg(&udev->dev, "hcd_unlink_urb %pK fail %d\n",
 					urb, retval);
 		usb_put_dev(udev);
 	}
@@ -1890,7 +1890,7 @@ void usb_hcd_flush_endpoint(struct usb_device *udev,
 		/* kick hcd */
 		unlink1(hcd, urb, -ESHUTDOWN);
 		dev_dbg (hcd->self.controller,
-			"shutdown urb %p ep%d%s%s\n",
+			"shutdown urb %pK ep%d%s%s\n",
 			urb, usb_endpoint_num(&ep->desc),
 			is_in ? "in" : "out",
 			({	char *s;
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index edb7a9a..fcbaa61 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -365,7 +365,8 @@ static void usb_set_lpm_parameters(struct usb_device *udev)
 }
 
 /* USB 2.0 spec Section 11.24.4.5 */
-static int get_hub_descriptor(struct usb_device *hdev, void *data)
+static int get_hub_descriptor(struct usb_device *hdev,
+		struct usb_hub_descriptor *desc)
 {
 	int i, ret, size;
 	unsigned dtype;
@@ -381,10 +382,18 @@ static int get_hub_descriptor(struct usb_device *hdev, void *data)
 	for (i = 0; i < 3; i++) {
 		ret = usb_control_msg(hdev, usb_rcvctrlpipe(hdev, 0),
 			USB_REQ_GET_DESCRIPTOR, USB_DIR_IN | USB_RT_HUB,
-			dtype << 8, 0, data, size,
+			dtype << 8, 0, desc, size,
 			USB_CTRL_GET_TIMEOUT);
-		if (ret >= (USB_DT_HUB_NONVAR_SIZE + 2))
+		if (hub_is_superspeed(hdev)) {
+			if (ret == size)
+				return ret;
+		} else if (ret >= USB_DT_HUB_NONVAR_SIZE + 2) {
+			/* Make sure we have the DeviceRemovable field. */
+			size = USB_DT_HUB_NONVAR_SIZE + desc->bNbrPorts / 8 + 1;
+			if (ret < size)
+				return -EMSGSIZE;
 			return ret;
+		}
 	}
 	return -EINVAL;
 }
@@ -1075,6 +1084,9 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
 
 		portstatus = portchange = 0;
 		status = hub_port_status(hub, port1, &portstatus, &portchange);
+		if (status)
+			goto abort;
+
 		if (udev || (portstatus & USB_PORT_STAT_CONNECTION))
 			dev_dbg(&port_dev->dev, "status %04x change %04x\n",
 					portstatus, portchange);
@@ -1207,7 +1219,7 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
 
 	/* Scan all ports that need attention */
 	kick_hub_wq(hub);
-
+ abort:
 	if (type == HUB_INIT2 || type == HUB_INIT3) {
 		/* Allow autosuspend if it was suppressed */
  disconnected:
@@ -1319,7 +1331,7 @@ static int hub_configure(struct usb_hub *hub,
 	}
 	mutex_init(&hub->status_mutex);
 
-	hub->descriptor = kmalloc(sizeof(*hub->descriptor), GFP_KERNEL);
+	hub->descriptor = kzalloc(sizeof(*hub->descriptor), GFP_KERNEL);
 	if (!hub->descriptor) {
 		ret = -ENOMEM;
 		goto fail;
@@ -1327,7 +1339,7 @@ static int hub_configure(struct usb_hub *hub,
 
 	/* Request the entire hub descriptor.
 	 * hub->descriptor can handle USB_MAXCHILDREN ports,
-	 * but the hub can/will return fewer bytes here.
+	 * but a (non-SS) hub can/will return fewer bytes here.
 	 */
 	ret = get_hub_descriptor(hdev, hub->descriptor);
 	if (ret < 0) {
@@ -2093,6 +2105,12 @@ void usb_disconnect(struct usb_device **pdev)
 	dev_info(&udev->dev, "USB disconnect, device number %d\n",
 			udev->devnum);
 
+	/*
+	 * Ensure that the pm runtime code knows that the USB device
+	 * is in the process of being disconnected.
+	 */
+	pm_runtime_barrier(&udev->dev);
+
 	usb_lock_device(udev);
 
 	hub_disconnect_children(udev);
diff --git a/drivers/usb/core/urb.c b/drivers/usb/core/urb.c
index a903969..5133ab9 100644
--- a/drivers/usb/core/urb.c
+++ b/drivers/usb/core/urb.c
@@ -333,7 +333,7 @@ int usb_submit_urb(struct urb *urb, gfp_t mem_flags)
 	if (!urb || !urb->complete)
 		return -EINVAL;
 	if (urb->hcpriv) {
-		WARN_ONCE(1, "URB %p submitted while active\n", urb);
+		WARN_ONCE(1, "URB %pK submitted while active\n", urb);
 		return -EBUSY;
 	}
 
diff --git a/drivers/usb/dwc3/dwc3-msm.c b/drivers/usb/dwc3/dwc3-msm.c
index a159011..38614fa 100644
--- a/drivers/usb/dwc3/dwc3-msm.c
+++ b/drivers/usb/dwc3/dwc3-msm.c
@@ -3743,7 +3743,8 @@ static int dwc3_msm_gadget_vbus_draw(struct dwc3_msm *mdwc, unsigned int mA)
 		}
 	}
 
-	power_supply_get_property(mdwc->usb_psy, POWER_SUPPLY_PROP_TYPE, &pval);
+	power_supply_get_property(mdwc->usb_psy,
+			POWER_SUPPLY_PROP_REAL_TYPE, &pval);
 	if (pval.intval != POWER_SUPPLY_TYPE_USB)
 		return 0;
 
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 62574bf..026ff6c 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -990,11 +990,16 @@ static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
 	}
 
 	/* always enable Continue on Short Packet */
-	trb->ctrl |= DWC3_TRB_CTRL_CSP;
+	if (usb_endpoint_dir_out(dep->endpoint.desc)) {
+		trb->ctrl |= DWC3_TRB_CTRL_CSP;
+
+		if (req->request.short_not_ok)
+			trb->ctrl |= DWC3_TRB_CTRL_ISP_IMI;
+	}
 
 	if ((!req->request.no_interrupt && !chain) ||
 			(dwc3_calc_trbs_left(dep) == 0))
-		trb->ctrl |= DWC3_TRB_CTRL_IOC | DWC3_TRB_CTRL_ISP_IMI;
+		trb->ctrl |= DWC3_TRB_CTRL_IOC;
 
 	if (chain)
 		trb->ctrl |= DWC3_TRB_CTRL_CHN;
@@ -3419,13 +3424,13 @@ static void dwc3_process_event_entry(struct dwc3 *dwc,
 	trace_dwc3_event(event->raw);
 	/* skip event processing in absence of vbus */
 	if (!dwc->vbus_active) {
-		dev_err(dwc->dev, "SKIP EVT:%x", event->raw);
+		dbg_event(0xFF, "SKIP_EVT", event->raw);
 		return;
 	}
 
 	/* If run/stop is cleared don't process any more events */
 	if (!dwc->pullups_connected) {
-		dev_err(dwc->dev, "SKIP_EVT_PULLUP:%x", event->raw);
+		dbg_event(0xFF, "SKIP_EVT_PULLUP", event->raw);
 		return;
 	}
 
@@ -3549,6 +3554,15 @@ static irqreturn_t dwc3_check_event_buf(struct dwc3 *dwc)
 
 	evt = dwc->ev_buf;
 
+	/*
+	 * With PCIe legacy interrupt, test shows that top-half irq handler can
+	 * be called again after HW interrupt deassertion. Check if bottom-half
+	 * irq event handler completes before caching new event to prevent
+	 * losing events.
+	 */
+	if (evt->flags & DWC3_EVENT_PENDING)
+		return IRQ_HANDLED;
+
 	count = dwc3_readl(dwc->regs, DWC3_GEVNTCOUNT(0));
 	count &= DWC3_GEVNTCOUNT_MASK;
 	if (!count)
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index da284fe..b040fdd 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -562,6 +562,7 @@
 choice
 	tristate "USB Gadget Drivers"
 	default USB_ETH
+	optional
 	help
 	  A Linux "Gadget Driver" talks to the USB Peripheral Controller
 	  driver through the abstract "gadget" API.  Some other operating
diff --git a/drivers/usb/gadget/function/f_mtp.c b/drivers/usb/gadget/function/f_mtp.c
index af1bca6..ea17164 100644
--- a/drivers/usb/gadget/function/f_mtp.c
+++ b/drivers/usb/gadget/function/f_mtp.c
@@ -591,14 +591,10 @@ static ssize_t mtp_read(struct file *fp, char __user *buf,
 	ssize_t r = count;
 	unsigned xfer;
 	int ret = 0;
-	size_t len;
+	size_t len = 0;
 
 	DBG(cdev, "mtp_read(%zu) state:%d\n", count, dev->state);
 
-	len = usb_ep_align_maybe(cdev->gadget, dev->ep_out, count);
-	if (len > MTP_BULK_BUFFER_SIZE)
-		return -EINVAL;
-
 	/* we will block until we're online */
 	DBG(cdev, "mtp_read: waiting for online state\n");
 	ret = wait_event_interruptible(dev->read_wq,
@@ -613,6 +609,14 @@ static ssize_t mtp_read(struct file *fp, char __user *buf,
 		return -EINVAL;
 
 	spin_lock_irq(&dev->lock);
+	if (dev->ep_out->desc) {
+		len = usb_ep_align_maybe(cdev->gadget, dev->ep_out, count);
+		if (len > MTP_BULK_BUFFER_SIZE) {
+			spin_unlock_irq(&dev->lock);
+			return -EINVAL;
+		}
+	}
+
 	if (dev->state == STATE_CANCELED) {
 		/* report cancelation to userspace */
 		dev->state = STATE_READY;
@@ -968,6 +972,10 @@ static void receive_file_work(struct work_struct *data)
 				break;
 			}
 
+			if (read_req->status) {
+				r = read_req->status;
+				break;
+			}
 			/* Check if we aligned the size due to MTU constraint */
 			if (count < read_req->length)
 				read_req->actual = (read_req->actual > count ?
diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
index 86612ac..f6c7a27 100644
--- a/drivers/usb/host/ohci-hcd.c
+++ b/drivers/usb/host/ohci-hcd.c
@@ -231,7 +231,8 @@ static int ohci_urb_enqueue (
 
 		/* Start up the I/O watchdog timer, if it's not running */
 		if (!timer_pending(&ohci->io_watchdog) &&
-				list_empty(&ohci->eds_in_use)) {
+				list_empty(&ohci->eds_in_use) &&
+				!(ohci->flags & OHCI_QUIRK_QEMU)) {
 			ohci->prev_frame_no = ohci_frame_no(ohci);
 			mod_timer(&ohci->io_watchdog,
 					jiffies + IO_WATCHDOG_DELAY);
diff --git a/drivers/usb/host/ohci-pci.c b/drivers/usb/host/ohci-pci.c
index bb15096..a84aebe 100644
--- a/drivers/usb/host/ohci-pci.c
+++ b/drivers/usb/host/ohci-pci.c
@@ -164,6 +164,15 @@ static int ohci_quirk_amd700(struct usb_hcd *hcd)
 	return 0;
 }
 
+static int ohci_quirk_qemu(struct usb_hcd *hcd)
+{
+	struct ohci_hcd *ohci = hcd_to_ohci(hcd);
+
+	ohci->flags |= OHCI_QUIRK_QEMU;
+	ohci_dbg(ohci, "enabled qemu quirk\n");
+	return 0;
+}
+
 /* List of quirks for OHCI */
 static const struct pci_device_id ohci_pci_quirks[] = {
 	{
@@ -214,6 +223,13 @@ static const struct pci_device_id ohci_pci_quirks[] = {
 		PCI_DEVICE(PCI_VENDOR_ID_ATI, 0x4399),
 		.driver_data = (unsigned long)ohci_quirk_amd700,
 	},
+	{
+		.vendor		= PCI_VENDOR_ID_APPLE,
+		.device		= 0x003f,
+		.subvendor	= PCI_SUBVENDOR_ID_REDHAT_QUMRANET,
+		.subdevice	= PCI_SUBDEVICE_ID_QEMU,
+		.driver_data	= (unsigned long)ohci_quirk_qemu,
+	},
 
 	/* FIXME for some of the early AMD 760 southbridges, OHCI
 	 * won't work at all.  blacklist them.
diff --git a/drivers/usb/host/ohci.h b/drivers/usb/host/ohci.h
index 37f1725..a51b189 100644
--- a/drivers/usb/host/ohci.h
+++ b/drivers/usb/host/ohci.h
@@ -418,6 +418,7 @@ struct ohci_hcd {
 #define	OHCI_QUIRK_AMD_PLL	0x200			/* AMD PLL quirk*/
 #define	OHCI_QUIRK_AMD_PREFETCH	0x400			/* pre-fetch for ISO transfer */
 #define	OHCI_QUIRK_GLOBAL_SUSPEND	0x800		/* must suspend ports */
+#define	OHCI_QUIRK_QEMU		0x1000			/* relax timing expectations */
 
 	// there are also chip quirks/bugs in init logic
 
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index b59efd2..d680eb3 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -56,7 +56,7 @@ static struct xhci_segment *xhci_segment_alloc(struct xhci_hcd *xhci,
 	}
 
 	if (max_packet) {
-		seg->bounce_buf = kzalloc(max_packet, flags | GFP_DMA);
+		seg->bounce_buf = kzalloc(max_packet, flags);
 		if (!seg->bounce_buf) {
 			dma_pool_free(xhci->segment_pool, seg->trbs, dma);
 			kfree(seg);
@@ -1494,6 +1494,17 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
 	 */
 	max_esit_payload = xhci_get_max_esit_payload(udev, ep);
 	interval = xhci_get_endpoint_interval(udev, ep);
+
+	/* Periodic endpoint bInterval limit quirk */
+	if (usb_endpoint_xfer_int(&ep->desc) ||
+	    usb_endpoint_xfer_isoc(&ep->desc)) {
+		if ((xhci->quirks & XHCI_LIMIT_ENDPOINT_INTERVAL_7) &&
+		    udev->speed >= USB_SPEED_HIGH &&
+		    interval >= 7) {
+			interval = 6;
+		}
+	}
+
 	mult = xhci_get_endpoint_mult(udev, ep);
 	max_packet = GET_MAX_PACKET(usb_endpoint_maxp(&ep->desc));
 	max_burst = xhci_get_endpoint_max_burst(udev, ep);
@@ -1715,7 +1726,7 @@ static int scratchpad_alloc(struct xhci_hcd *xhci, gfp_t flags)
 	xhci->dcbaa->dev_context_ptrs[0] = cpu_to_le64(xhci->scratchpad->sp_dma);
 	for (i = 0; i < num_sp; i++) {
 		dma_addr_t dma;
-		void *buf = dma_alloc_coherent(dev, xhci->page_size, &dma,
+		void *buf = dma_zalloc_coherent(dev, xhci->page_size, &dma,
 				flags);
 		if (!buf)
 			goto fail_sp5;
@@ -2752,7 +2763,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
 		(xhci->cmd_ring->first_seg->dma & (u64) ~CMD_RING_RSVD_BITS) |
 		xhci->cmd_ring->cycle_state;
 	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
-			"// Setting command ring address to 0x%x", val);
+			"// Setting command ring address to 0x%016llx", val_64);
 	xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring);
 	xhci_dbg_cmd_ptrs(xhci);
 
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index 954abfd..e7d6752 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -52,6 +52,7 @@
 #define PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI		0x0aa8
 #define PCI_DEVICE_ID_INTEL_BROXTON_B_XHCI		0x1aa8
 #define PCI_DEVICE_ID_INTEL_APL_XHCI			0x5aa8
+#define PCI_DEVICE_ID_INTEL_DNV_XHCI			0x19d0
 
 static const char hcd_name[] = "xhci_hcd";
 
@@ -166,7 +167,8 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
 		 pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI ||
 		 pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI ||
 		 pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_B_XHCI ||
-		 pdev->device == PCI_DEVICE_ID_INTEL_APL_XHCI)) {
+		 pdev->device == PCI_DEVICE_ID_INTEL_APL_XHCI ||
+		 pdev->device == PCI_DEVICE_ID_INTEL_DNV_XHCI)) {
 		xhci->quirks |= XHCI_PME_STUCK_QUIRK;
 	}
 	if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
@@ -175,7 +177,8 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
 	}
 	if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
 	    (pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI ||
-	     pdev->device == PCI_DEVICE_ID_INTEL_APL_XHCI))
+	     pdev->device == PCI_DEVICE_ID_INTEL_APL_XHCI ||
+	     pdev->device == PCI_DEVICE_ID_INTEL_DNV_XHCI))
 		xhci->quirks |= XHCI_MISSING_CAS;
 
 	if (pdev->vendor == PCI_VENDOR_ID_ETRON &&
@@ -199,6 +202,9 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
 			pdev->device == 0x1042)
 		xhci->quirks |= XHCI_BROKEN_STREAMS;
 
+	if (pdev->vendor == PCI_VENDOR_ID_TI && pdev->device == 0x8241)
+		xhci->quirks |= XHCI_LIMIT_ENDPOINT_INTERVAL_7;
+
 	if (xhci->quirks & XHCI_RESET_ON_RESUME)
 		xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
 				"QUIRK: Resetting on resume");
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index a0bc61f..6cb5ab3 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -210,7 +210,7 @@ static int xhci_plat_probe(struct platform_device *pdev)
 
 	irq = platform_get_irq(pdev, 0);
 	if (irq < 0)
-		return -ENODEV;
+		return irq;
 
 	/*
 	 * sysdev must point to a device that is known to the system firmware
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 918f659..86d578e 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1667,6 +1667,7 @@ struct xhci_hcd {
 #define XHCI_MISSING_CAS	(1 << 24)
 /* For controller with a broken Port Disable implementation */
 #define XHCI_BROKEN_PORT_PED	(1 << 25)
+#define XHCI_LIMIT_ENDPOINT_INTERVAL_7	(1 << 26)
 
 	unsigned int		num_active_eps;
 	unsigned int		limit_active_eps;
diff --git a/drivers/usb/misc/chaoskey.c b/drivers/usb/misc/chaoskey.c
index 6ddd08a..efecb87 100644
--- a/drivers/usb/misc/chaoskey.c
+++ b/drivers/usb/misc/chaoskey.c
@@ -194,7 +194,7 @@ static int chaoskey_probe(struct usb_interface *interface,
 
 	dev->in_ep = in_ep;
 
-	if (udev->descriptor.idVendor != ALEA_VENDOR_ID)
+	if (le16_to_cpu(udev->descriptor.idVendor) != ALEA_VENDOR_ID)
 		dev->reads_started = 1;
 
 	dev->size = size;
diff --git a/drivers/usb/misc/iowarrior.c b/drivers/usb/misc/iowarrior.c
index 37c63cb..0ef29d2 100644
--- a/drivers/usb/misc/iowarrior.c
+++ b/drivers/usb/misc/iowarrior.c
@@ -554,7 +554,7 @@ static long iowarrior_ioctl(struct file *file, unsigned int cmd,
 			info.revision = le16_to_cpu(dev->udev->descriptor.bcdDevice);
 
 			/* 0==UNKNOWN, 1==LOW(usb1.1) ,2=FULL(usb1.1), 3=HIGH(usb2.0) */
-			info.speed = le16_to_cpu(dev->udev->speed);
+			info.speed = dev->udev->speed;
 			info.if_num = dev->interface->cur_altsetting->desc.bInterfaceNumber;
 			info.report_size = dev->report_size;
 
diff --git a/drivers/usb/misc/legousbtower.c b/drivers/usb/misc/legousbtower.c
index c8fbe7b..c2e2b2e 100644
--- a/drivers/usb/misc/legousbtower.c
+++ b/drivers/usb/misc/legousbtower.c
@@ -317,9 +317,16 @@ static int tower_open (struct inode *inode, struct file *file)
 	int subminor;
 	int retval = 0;
 	struct usb_interface *interface;
-	struct tower_reset_reply reset_reply;
+	struct tower_reset_reply *reset_reply;
 	int result;
 
+	reset_reply = kmalloc(sizeof(*reset_reply), GFP_KERNEL);
+
+	if (!reset_reply) {
+		retval = -ENOMEM;
+		goto exit;
+	}
+
 	nonseekable_open(inode, file);
 	subminor = iminor(inode);
 
@@ -364,8 +371,8 @@ static int tower_open (struct inode *inode, struct file *file)
 				  USB_TYPE_VENDOR | USB_DIR_IN | USB_RECIP_DEVICE,
 				  0,
 				  0,
-				  &reset_reply,
-				  sizeof(reset_reply),
+				  reset_reply,
+				  sizeof(*reset_reply),
 				  1000);
 	if (result < 0) {
 		dev_err(&dev->udev->dev,
@@ -406,6 +413,7 @@ static int tower_open (struct inode *inode, struct file *file)
 	mutex_unlock(&dev->lock);
 
 exit:
+	kfree(reset_reply);
 	return retval;
 }
 
@@ -808,7 +816,7 @@ static int tower_probe (struct usb_interface *interface, const struct usb_device
 	struct lego_usb_tower *dev = NULL;
 	struct usb_host_interface *iface_desc;
 	struct usb_endpoint_descriptor* endpoint;
-	struct tower_get_version_reply get_version_reply;
+	struct tower_get_version_reply *get_version_reply = NULL;
 	int i;
 	int retval = -ENOMEM;
 	int result;
@@ -886,6 +894,13 @@ static int tower_probe (struct usb_interface *interface, const struct usb_device
 	dev->interrupt_in_interval = interrupt_in_interval ? interrupt_in_interval : dev->interrupt_in_endpoint->bInterval;
 	dev->interrupt_out_interval = interrupt_out_interval ? interrupt_out_interval : dev->interrupt_out_endpoint->bInterval;
 
+	get_version_reply = kmalloc(sizeof(*get_version_reply), GFP_KERNEL);
+
+	if (!get_version_reply) {
+		retval = -ENOMEM;
+		goto error;
+	}
+
 	/* get the firmware version and log it */
 	result = usb_control_msg (udev,
 				  usb_rcvctrlpipe(udev, 0),
@@ -893,18 +908,19 @@ static int tower_probe (struct usb_interface *interface, const struct usb_device
 				  USB_TYPE_VENDOR | USB_DIR_IN | USB_RECIP_DEVICE,
 				  0,
 				  0,
-				  &get_version_reply,
-				  sizeof(get_version_reply),
+				  get_version_reply,
+				  sizeof(*get_version_reply),
 				  1000);
 	if (result < 0) {
 		dev_err(idev, "LEGO USB Tower get version control request failed\n");
 		retval = result;
 		goto error;
 	}
-	dev_info(&interface->dev, "LEGO USB Tower firmware version is %d.%d "
-		 "build %d\n", get_version_reply.major,
-		 get_version_reply.minor,
-		 le16_to_cpu(get_version_reply.build_no));
+	dev_info(&interface->dev,
+		 "LEGO USB Tower firmware version is %d.%d build %d\n",
+		 get_version_reply->major,
+		 get_version_reply->minor,
+		 le16_to_cpu(get_version_reply->build_no));
 
 	/* we can register the device now, as it is ready */
 	usb_set_intfdata (interface, dev);
@@ -925,9 +941,11 @@ static int tower_probe (struct usb_interface *interface, const struct usb_device
 		 USB_MAJOR, dev->minor);
 
 exit:
+	kfree(get_version_reply);
 	return retval;
 
 error:
+	kfree(get_version_reply);
 	tower_delete(dev);
 	return retval;
 }
diff --git a/drivers/usb/misc/usbtest.c b/drivers/usb/misc/usbtest.c
index 5c8210d..d94927e 100644
--- a/drivers/usb/misc/usbtest.c
+++ b/drivers/usb/misc/usbtest.c
@@ -159,6 +159,7 @@ get_endpoints(struct usbtest_dev *dev, struct usb_interface *intf)
 			case USB_ENDPOINT_XFER_INT:
 				if (dev->info->intr)
 					goto try_intr;
+				continue;
 			case USB_ENDPOINT_XFER_ISOC:
 				if (dev->info->iso)
 					goto try_iso;
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index 8064514..99beda9 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -2780,10 +2780,11 @@ int musb_host_setup(struct musb *musb, int power_budget)
 	int ret;
 	struct usb_hcd *hcd = musb->hcd;
 
-	MUSB_HST_MODE(musb);
-	musb->xceiv->otg->default_a = 1;
-	musb->xceiv->otg->state = OTG_STATE_A_IDLE;
-
+	if (musb->port_mode == MUSB_PORT_MODE_HOST) {
+		MUSB_HST_MODE(musb);
+		musb->xceiv->otg->default_a = 1;
+		musb->xceiv->otg->state = OTG_STATE_A_IDLE;
+	}
 	otg_set_host(musb->xceiv->otg, &hcd->self);
 	hcd->self.otg_port = 1;
 	musb->xceiv->otg->host = &hcd->self;
diff --git a/drivers/usb/musb/tusb6010_omap.c b/drivers/usb/musb/tusb6010_omap.c
index e6959cc..4047426 100644
--- a/drivers/usb/musb/tusb6010_omap.c
+++ b/drivers/usb/musb/tusb6010_omap.c
@@ -220,6 +220,7 @@ static int tusb_omap_dma_program(struct dma_channel *channel, u16 packet_sz,
 	u32				dma_remaining;
 	int				src_burst, dst_burst;
 	u16				csr;
+	u32				psize;
 	int				ch;
 	s8				dmareq;
 	s8				sync_dev;
@@ -391,15 +392,19 @@ static int tusb_omap_dma_program(struct dma_channel *channel, u16 packet_sz,
 
 	if (chdat->tx) {
 		/* Send transfer_packet_sz packets at a time */
-		musb_writel(ep_conf, TUSB_EP_MAX_PACKET_SIZE_OFFSET,
-			chdat->transfer_packet_sz);
+		psize = musb_readl(ep_conf, TUSB_EP_MAX_PACKET_SIZE_OFFSET);
+		psize &= ~0x7ff;
+		psize |= chdat->transfer_packet_sz;
+		musb_writel(ep_conf, TUSB_EP_MAX_PACKET_SIZE_OFFSET, psize);
 
 		musb_writel(ep_conf, TUSB_EP_TX_OFFSET,
 			TUSB_EP_CONFIG_XFR_SIZE(chdat->transfer_len));
 	} else {
 		/* Receive transfer_packet_sz packets at a time */
-		musb_writel(ep_conf, TUSB_EP_MAX_PACKET_SIZE_OFFSET,
-			chdat->transfer_packet_sz << 16);
+		psize = musb_readl(ep_conf, TUSB_EP_MAX_PACKET_SIZE_OFFSET);
+		psize &= ~(0x7ff << 16);
+		psize |= (chdat->transfer_packet_sz << 16);
+		musb_writel(ep_conf, TUSB_EP_MAX_PACKET_SIZE_OFFSET, psize);
 
 		musb_writel(ep_conf, TUSB_EP_RX_OFFSET,
 			TUSB_EP_CONFIG_XFR_SIZE(chdat->transfer_len));
diff --git a/drivers/usb/pd/policy_engine.c b/drivers/usb/pd/policy_engine.c
index 3ee2938..d951abb 100644
--- a/drivers/usb/pd/policy_engine.c
+++ b/drivers/usb/pd/policy_engine.c
@@ -2374,7 +2374,7 @@ static int psy_changed(struct notifier_block *nb, unsigned long evt, void *ptr)
 	pd->vbus_present = val.intval;
 
 	ret = power_supply_get_property(pd->usb_psy,
-			POWER_SUPPLY_PROP_TYPE, &val);
+			POWER_SUPPLY_PROP_REAL_TYPE, &val);
 	if (ret) {
 		usbpd_err(&pd->dev, "Unable to read USB TYPE: %d\n", ret);
 		return ret;
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index d8d13ee..1939496 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -809,10 +809,10 @@ static const struct usb_device_id id_table_combined[] = {
 	{ USB_DEVICE(FTDI_VID, FTDI_PROPOX_ISPCABLEIII_PID) },
 	{ USB_DEVICE(FTDI_VID, CYBER_CORTEX_AV_PID),
 		.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
-	{ USB_DEVICE(OLIMEX_VID, OLIMEX_ARM_USB_OCD_PID),
-		.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
-	{ USB_DEVICE(OLIMEX_VID, OLIMEX_ARM_USB_OCD_H_PID),
-		.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
+	{ USB_DEVICE_INTERFACE_NUMBER(OLIMEX_VID, OLIMEX_ARM_USB_OCD_PID, 1) },
+	{ USB_DEVICE_INTERFACE_NUMBER(OLIMEX_VID, OLIMEX_ARM_USB_OCD_H_PID, 1) },
+	{ USB_DEVICE_INTERFACE_NUMBER(OLIMEX_VID, OLIMEX_ARM_USB_TINY_PID, 1) },
+	{ USB_DEVICE_INTERFACE_NUMBER(OLIMEX_VID, OLIMEX_ARM_USB_TINY_H_PID, 1) },
 	{ USB_DEVICE(FIC_VID, FIC_NEO1973_DEBUG_PID),
 		.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
 	{ USB_DEVICE(FTDI_VID, FTDI_OOCDLINK_PID),
@@ -873,6 +873,7 @@ static const struct usb_device_id id_table_combined[] = {
 	{ USB_DEVICE_AND_INTERFACE_INFO(MICROCHIP_VID, MICROCHIP_USB_BOARD_PID,
 					USB_CLASS_VENDOR_SPEC,
 					USB_SUBCLASS_VENDOR_SPEC, 0x00) },
+	{ USB_DEVICE_INTERFACE_NUMBER(ACTEL_VID, MICROSEMI_ARROW_SF2PLUS_BOARD_PID, 2) },
 	{ USB_DEVICE(JETI_VID, JETI_SPC1201_PID) },
 	{ USB_DEVICE(MARVELL_VID, MARVELL_SHEEVAPLUG_PID),
 		.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
@@ -1507,9 +1508,9 @@ static int set_serial_info(struct tty_struct *tty,
 					(new_serial.flags & ASYNC_FLAGS));
 	priv->custom_divisor = new_serial.custom_divisor;
 
+check_and_exit:
 	write_latency_timer(port);
 
-check_and_exit:
 	if ((old_priv.flags & ASYNC_SPD_MASK) !=
 	     (priv->flags & ASYNC_SPD_MASK)) {
 		if ((priv->flags & ASYNC_SPD_MASK) == ASYNC_SPD_HI)
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index 48ee04c..4fcf1ce 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -873,9 +873,17 @@
 #define	FIC_VID			0x1457
 #define	FIC_NEO1973_DEBUG_PID	0x5118
 
+/*
+ * Actel / Microsemi
+ */
+#define ACTEL_VID				0x1514
+#define MICROSEMI_ARROW_SF2PLUS_BOARD_PID	0x2008
+
 /* Olimex */
 #define OLIMEX_VID			0x15BA
 #define OLIMEX_ARM_USB_OCD_PID		0x0003
+#define OLIMEX_ARM_USB_TINY_PID	0x0004
+#define OLIMEX_ARM_USB_TINY_H_PID	0x002a
 #define OLIMEX_ARM_USB_OCD_H_PID	0x002b
 
 /*
diff --git a/drivers/usb/serial/io_ti.c b/drivers/usb/serial/io_ti.c
index f1a8fdc..e98532f 100644
--- a/drivers/usb/serial/io_ti.c
+++ b/drivers/usb/serial/io_ti.c
@@ -2349,8 +2349,11 @@ static void change_port_settings(struct tty_struct *tty,
 	if (!baud) {
 		/* pick a default, any default... */
 		baud = 9600;
-	} else
+	} else {
+		/* Avoid a zero divisor. */
+		baud = min(baud, 461550);
 		tty_encode_baud_rate(tty, baud, baud);
+	}
 
 	edge_port->baud_rate = baud;
 	config->wBaudRate = (__u16)((461550L + baud/2) / baud);
diff --git a/drivers/usb/serial/mct_u232.c b/drivers/usb/serial/mct_u232.c
index edbc81f..70f346f 100644
--- a/drivers/usb/serial/mct_u232.c
+++ b/drivers/usb/serial/mct_u232.c
@@ -189,7 +189,7 @@ static int mct_u232_set_baud_rate(struct tty_struct *tty,
 		return -ENOMEM;
 
 	divisor = mct_u232_calculate_baud_rate(serial, value, &speed);
-	put_unaligned_le32(cpu_to_le32(divisor), buf);
+	put_unaligned_le32(divisor, buf);
 	rc = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
 				MCT_U232_SET_BAUD_RATE_REQUEST,
 				MCT_U232_SET_REQUEST_TYPE,
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index af67a0d..3bf61ac 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -281,6 +281,7 @@ static void option_instat_callback(struct urb *urb);
 #define TELIT_PRODUCT_LE922_USBCFG0		0x1042
 #define TELIT_PRODUCT_LE922_USBCFG3		0x1043
 #define TELIT_PRODUCT_LE922_USBCFG5		0x1045
+#define TELIT_PRODUCT_ME910			0x1100
 #define TELIT_PRODUCT_LE920			0x1200
 #define TELIT_PRODUCT_LE910			0x1201
 #define TELIT_PRODUCT_LE910_USBCFG4		0x1206
@@ -640,6 +641,11 @@ static const struct option_blacklist_info simcom_sim7100e_blacklist = {
 	.reserved = BIT(5) | BIT(6),
 };
 
+static const struct option_blacklist_info telit_me910_blacklist = {
+	.sendsetup = BIT(0),
+	.reserved = BIT(1) | BIT(3),
+};
+
 static const struct option_blacklist_info telit_le910_blacklist = {
 	.sendsetup = BIT(0),
 	.reserved = BIT(1) | BIT(2),
@@ -1235,6 +1241,8 @@ static const struct usb_device_id option_ids[] = {
 		.driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 },
 	{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG5, 0xff),
 		.driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg0 },
+	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910),
+		.driver_info = (kernel_ulong_t)&telit_me910_blacklist },
 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910),
 		.driver_info = (kernel_ulong_t)&telit_le910_blacklist },
 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910_USBCFG4),
diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
index 38b3f0d..fd509ed6c 100644
--- a/drivers/usb/serial/qcserial.c
+++ b/drivers/usb/serial/qcserial.c
@@ -162,6 +162,8 @@ static const struct usb_device_id id_table[] = {
 	{DEVICE_SWI(0x1199, 0x9071)},	/* Sierra Wireless MC74xx */
 	{DEVICE_SWI(0x1199, 0x9078)},	/* Sierra Wireless EM74xx */
 	{DEVICE_SWI(0x1199, 0x9079)},	/* Sierra Wireless EM74xx */
+	{DEVICE_SWI(0x1199, 0x907a)},	/* Sierra Wireless EM74xx QDL */
+	{DEVICE_SWI(0x1199, 0x907b)},	/* Sierra Wireless EM74xx */
 	{DEVICE_SWI(0x413c, 0x81a2)},	/* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card */
 	{DEVICE_SWI(0x413c, 0x81a3)},	/* Dell Wireless 5570 HSPA+ (42Mbps) Mobile Broadband Card */
 	{DEVICE_SWI(0x413c, 0x81a4)},	/* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */
diff --git a/drivers/usb/storage/ene_ub6250.c b/drivers/usb/storage/ene_ub6250.c
index 02bdaa9..4340b49 100644
--- a/drivers/usb/storage/ene_ub6250.c
+++ b/drivers/usb/storage/ene_ub6250.c
@@ -446,6 +446,10 @@ struct ms_lib_ctrl {
 #define SD_BLOCK_LEN  9
 
 struct ene_ub6250_info {
+
+	/* I/O bounce buffer */
+	u8		*bbuf;
+
 	/* for 6250 code */
 	struct SD_STATUS	SD_Status;
 	struct MS_STATUS	MS_Status;
@@ -493,8 +497,11 @@ static int ene_load_bincode(struct us_data *us, unsigned char flag);
 
 static void ene_ub6250_info_destructor(void *extra)
 {
+	struct ene_ub6250_info *info = (struct ene_ub6250_info *) extra;
+
 	if (!extra)
 		return;
+	kfree(info->bbuf);
 }
 
 static int ene_send_scsi_cmd(struct us_data *us, u8 fDir, void *buf, int use_sg)
@@ -860,8 +867,9 @@ static int ms_read_readpage(struct us_data *us, u32 PhyBlockAddr,
 		u8 PageNum, u32 *PageBuf, struct ms_lib_type_extdat *ExtraDat)
 {
 	struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf;
+	struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra;
+	u8 *bbuf = info->bbuf;
 	int result;
-	u8 ExtBuf[4];
 	u32 bn = PhyBlockAddr * 0x20 + PageNum;
 
 	result = ene_load_bincode(us, MS_RW_PATTERN);
@@ -901,7 +909,7 @@ static int ms_read_readpage(struct us_data *us, u32 PhyBlockAddr,
 	bcb->CDB[2]     = (unsigned char)(PhyBlockAddr>>16);
 	bcb->CDB[6]     = 0x01;
 
-	result = ene_send_scsi_cmd(us, FDIR_READ, &ExtBuf, 0);
+	result = ene_send_scsi_cmd(us, FDIR_READ, bbuf, 0);
 	if (result != USB_STOR_XFER_GOOD)
 		return USB_STOR_TRANSPORT_ERROR;
 
@@ -910,9 +918,9 @@ static int ms_read_readpage(struct us_data *us, u32 PhyBlockAddr,
 	ExtraDat->status0  = 0x10;  /* Not yet,fireware support */
 
 	ExtraDat->status1  = 0x00;  /* Not yet,fireware support */
-	ExtraDat->ovrflg   = ExtBuf[0];
-	ExtraDat->mngflg   = ExtBuf[1];
-	ExtraDat->logadr   = memstick_logaddr(ExtBuf[2], ExtBuf[3]);
+	ExtraDat->ovrflg   = bbuf[0];
+	ExtraDat->mngflg   = bbuf[1];
+	ExtraDat->logadr   = memstick_logaddr(bbuf[2], bbuf[3]);
 
 	return USB_STOR_TRANSPORT_GOOD;
 }
@@ -1332,8 +1340,9 @@ static int ms_lib_read_extra(struct us_data *us, u32 PhyBlock,
 				u8 PageNum, struct ms_lib_type_extdat *ExtraDat)
 {
 	struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf;
+	struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra;
+	u8 *bbuf = info->bbuf;
 	int result;
-	u8 ExtBuf[4];
 
 	memset(bcb, 0, sizeof(struct bulk_cb_wrap));
 	bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
@@ -1347,7 +1356,7 @@ static int ms_lib_read_extra(struct us_data *us, u32 PhyBlock,
 	bcb->CDB[2]     = (unsigned char)(PhyBlock>>16);
 	bcb->CDB[6]     = 0x01;
 
-	result = ene_send_scsi_cmd(us, FDIR_READ, &ExtBuf, 0);
+	result = ene_send_scsi_cmd(us, FDIR_READ, bbuf, 0);
 	if (result != USB_STOR_XFER_GOOD)
 		return USB_STOR_TRANSPORT_ERROR;
 
@@ -1355,9 +1364,9 @@ static int ms_lib_read_extra(struct us_data *us, u32 PhyBlock,
 	ExtraDat->intr     = 0x80;  /* Not yet, waiting for fireware support */
 	ExtraDat->status0  = 0x10;  /* Not yet, waiting for fireware support */
 	ExtraDat->status1  = 0x00;  /* Not yet, waiting for fireware support */
-	ExtraDat->ovrflg   = ExtBuf[0];
-	ExtraDat->mngflg   = ExtBuf[1];
-	ExtraDat->logadr   = memstick_logaddr(ExtBuf[2], ExtBuf[3]);
+	ExtraDat->ovrflg   = bbuf[0];
+	ExtraDat->mngflg   = bbuf[1];
+	ExtraDat->logadr   = memstick_logaddr(bbuf[2], bbuf[3]);
 
 	return USB_STOR_TRANSPORT_GOOD;
 }
@@ -1558,9 +1567,9 @@ static int ms_lib_scan_logicalblocknumber(struct us_data *us, u16 btBlk1st)
 	u16 PhyBlock, newblk, i;
 	u16 LogStart, LogEnde;
 	struct ms_lib_type_extdat extdat;
-	u8 buf[0x200];
 	u32 count = 0, index = 0;
 	struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra;
+	u8 *bbuf = info->bbuf;
 
 	for (PhyBlock = 0; PhyBlock < info->MS_Lib.NumberOfPhyBlock;) {
 		ms_lib_phy_to_log_range(PhyBlock, &LogStart, &LogEnde);
@@ -1574,14 +1583,16 @@ static int ms_lib_scan_logicalblocknumber(struct us_data *us, u16 btBlk1st)
 			}
 
 			if (count == PhyBlock) {
-				ms_lib_read_extrablock(us, PhyBlock, 0, 0x80, &buf);
+				ms_lib_read_extrablock(us, PhyBlock, 0, 0x80,
+						bbuf);
 				count += 0x80;
 			}
 			index = (PhyBlock % 0x80) * 4;
 
-			extdat.ovrflg = buf[index];
-			extdat.mngflg = buf[index+1];
-			extdat.logadr = memstick_logaddr(buf[index+2], buf[index+3]);
+			extdat.ovrflg = bbuf[index];
+			extdat.mngflg = bbuf[index+1];
+			extdat.logadr = memstick_logaddr(bbuf[index+2],
+					bbuf[index+3]);
 
 			if ((extdat.ovrflg & MS_REG_OVR_BKST) != MS_REG_OVR_BKST_OK) {
 				ms_lib_setacquired_errorblock(us, PhyBlock);
@@ -2064,9 +2075,9 @@ static int ene_ms_init(struct us_data *us)
 {
 	struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf;
 	int result;
-	u8 buf[0x200];
 	u16 MSP_BlockSize, MSP_UserAreaBlocks;
 	struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra;
+	u8 *bbuf = info->bbuf;
 
 	printk(KERN_INFO "transport --- ENE_MSInit\n");
 
@@ -2085,13 +2096,13 @@ static int ene_ms_init(struct us_data *us)
 	bcb->CDB[0]     = 0xF1;
 	bcb->CDB[1]     = 0x01;
 
-	result = ene_send_scsi_cmd(us, FDIR_READ, &buf, 0);
+	result = ene_send_scsi_cmd(us, FDIR_READ, bbuf, 0);
 	if (result != USB_STOR_XFER_GOOD) {
 		printk(KERN_ERR "Execution MS Init Code Fail !!\n");
 		return USB_STOR_TRANSPORT_ERROR;
 	}
 	/* the same part to test ENE */
-	info->MS_Status = *(struct MS_STATUS *)&buf[0];
+	info->MS_Status = *(struct MS_STATUS *) bbuf;
 
 	if (info->MS_Status.Insert && info->MS_Status.Ready) {
 		printk(KERN_INFO "Insert     = %x\n", info->MS_Status.Insert);
@@ -2100,15 +2111,15 @@ static int ene_ms_init(struct us_data *us)
 		printk(KERN_INFO "IsMSPHG    = %x\n", info->MS_Status.IsMSPHG);
 		printk(KERN_INFO "WtP= %x\n", info->MS_Status.WtP);
 		if (info->MS_Status.IsMSPro) {
-			MSP_BlockSize      = (buf[6] << 8) | buf[7];
-			MSP_UserAreaBlocks = (buf[10] << 8) | buf[11];
+			MSP_BlockSize      = (bbuf[6] << 8) | bbuf[7];
+			MSP_UserAreaBlocks = (bbuf[10] << 8) | bbuf[11];
 			info->MSP_TotalBlock = MSP_BlockSize * MSP_UserAreaBlocks;
 		} else {
 			ms_card_init(us); /* Card is MS (to ms.c)*/
 		}
 		usb_stor_dbg(us, "MS Init Code OK !!\n");
 	} else {
-		usb_stor_dbg(us, "MS Card Not Ready --- %x\n", buf[0]);
+		usb_stor_dbg(us, "MS Card Not Ready --- %x\n", bbuf[0]);
 		return USB_STOR_TRANSPORT_ERROR;
 	}
 
@@ -2118,9 +2129,9 @@ static int ene_ms_init(struct us_data *us)
 static int ene_sd_init(struct us_data *us)
 {
 	int result;
-	u8  buf[0x200];
 	struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf;
 	struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra;
+	u8 *bbuf = info->bbuf;
 
 	usb_stor_dbg(us, "transport --- ENE_SDInit\n");
 	/* SD Init Part-1 */
@@ -2154,17 +2165,17 @@ static int ene_sd_init(struct us_data *us)
 	bcb->Flags              = US_BULK_FLAG_IN;
 	bcb->CDB[0]             = 0xF1;
 
-	result = ene_send_scsi_cmd(us, FDIR_READ, &buf, 0);
+	result = ene_send_scsi_cmd(us, FDIR_READ, bbuf, 0);
 	if (result != USB_STOR_XFER_GOOD) {
 		usb_stor_dbg(us, "Execution SD Init Code Fail !!\n");
 		return USB_STOR_TRANSPORT_ERROR;
 	}
 
-	info->SD_Status =  *(struct SD_STATUS *)&buf[0];
+	info->SD_Status =  *(struct SD_STATUS *) bbuf;
 	if (info->SD_Status.Insert && info->SD_Status.Ready) {
 		struct SD_STATUS *s = &info->SD_Status;
 
-		ene_get_card_status(us, (unsigned char *)&buf);
+		ene_get_card_status(us, bbuf);
 		usb_stor_dbg(us, "Insert     = %x\n", s->Insert);
 		usb_stor_dbg(us, "Ready      = %x\n", s->Ready);
 		usb_stor_dbg(us, "IsMMC      = %x\n", s->IsMMC);
@@ -2172,7 +2183,7 @@ static int ene_sd_init(struct us_data *us)
 		usb_stor_dbg(us, "HiSpeed    = %x\n", s->HiSpeed);
 		usb_stor_dbg(us, "WtP        = %x\n", s->WtP);
 	} else {
-		usb_stor_dbg(us, "SD Card Not Ready --- %x\n", buf[0]);
+		usb_stor_dbg(us, "SD Card Not Ready --- %x\n", bbuf[0]);
 		return USB_STOR_TRANSPORT_ERROR;
 	}
 	return USB_STOR_TRANSPORT_GOOD;
@@ -2182,13 +2193,15 @@ static int ene_sd_init(struct us_data *us)
 static int ene_init(struct us_data *us)
 {
 	int result;
-	u8  misc_reg03 = 0;
+	u8  misc_reg03;
 	struct ene_ub6250_info *info = (struct ene_ub6250_info *)(us->extra);
+	u8 *bbuf = info->bbuf;
 
-	result = ene_get_card_type(us, REG_CARD_STATUS, &misc_reg03);
+	result = ene_get_card_type(us, REG_CARD_STATUS, bbuf);
 	if (result != USB_STOR_XFER_GOOD)
 		return USB_STOR_TRANSPORT_ERROR;
 
+	misc_reg03 = bbuf[0];
 	if (misc_reg03 & 0x01) {
 		if (!info->SD_Status.Ready) {
 			result = ene_sd_init(us);
@@ -2305,8 +2318,9 @@ static int ene_ub6250_probe(struct usb_interface *intf,
 			 const struct usb_device_id *id)
 {
 	int result;
-	u8  misc_reg03 = 0;
+	u8  misc_reg03;
 	struct us_data *us;
+	struct ene_ub6250_info *info;
 
 	result = usb_stor_probe1(&us, intf, id,
 		   (id - ene_ub6250_usb_ids) + ene_ub6250_unusual_dev_list,
@@ -2315,11 +2329,16 @@ static int ene_ub6250_probe(struct usb_interface *intf,
 		return result;
 
 	/* FIXME: where should the code alloc extra buf ? */
-	if (!us->extra) {
-		us->extra = kzalloc(sizeof(struct ene_ub6250_info), GFP_KERNEL);
-		if (!us->extra)
-			return -ENOMEM;
-		us->extra_destructor = ene_ub6250_info_destructor;
+	us->extra = kzalloc(sizeof(struct ene_ub6250_info), GFP_KERNEL);
+	if (!us->extra)
+		return -ENOMEM;
+	us->extra_destructor = ene_ub6250_info_destructor;
+
+	info = (struct ene_ub6250_info *)(us->extra);
+	info->bbuf = kmalloc(512, GFP_KERNEL);
+	if (!info->bbuf) {
+		kfree(us->extra);
+		return -ENOMEM;
 	}
 
 	us->transport_name = "ene_ub6250";
@@ -2331,12 +2350,13 @@ static int ene_ub6250_probe(struct usb_interface *intf,
 		return result;
 
 	/* probe card type */
-	result = ene_get_card_type(us, REG_CARD_STATUS, &misc_reg03);
+	result = ene_get_card_type(us, REG_CARD_STATUS, info->bbuf);
 	if (result != USB_STOR_XFER_GOOD) {
 		usb_stor_disconnect(intf);
 		return USB_STOR_TRANSPORT_ERROR;
 	}
 
+	misc_reg03 = info->bbuf[0];
 	if (!(misc_reg03 & 0x01)) {
 		pr_info("ums_eneub6250: This driver only supports SD/MS cards. "
 			"It does not support SM cards.\n");
diff --git a/drivers/uwb/i1480/dfu/usb.c b/drivers/uwb/i1480/dfu/usb.c
index 6345e85..a50cf45 100644
--- a/drivers/uwb/i1480/dfu/usb.c
+++ b/drivers/uwb/i1480/dfu/usb.c
@@ -341,6 +341,7 @@ int i1480_usb_cmd(struct i1480 *i1480, const char *cmd_name, size_t cmd_size)
 static
 int i1480_usb_probe(struct usb_interface *iface, const struct usb_device_id *id)
 {
+	struct usb_device *udev = interface_to_usbdev(iface);
 	struct i1480_usb *i1480_usb;
 	struct i1480 *i1480;
 	struct device *dev = &iface->dev;
@@ -352,8 +353,8 @@ int i1480_usb_probe(struct usb_interface *iface, const struct usb_device_id *id)
 			iface->cur_altsetting->desc.bInterfaceNumber);
 		goto error;
 	}
-	if (iface->num_altsetting > 1
-	    && interface_to_usbdev(iface)->descriptor.idProduct == 0xbabe) {
+	if (iface->num_altsetting > 1 &&
+			le16_to_cpu(udev->descriptor.idProduct) == 0xbabe) {
 		/* Need altsetting #1 [HW QUIRK] or EP1 won't work */
 		result = usb_set_interface(interface_to_usbdev(iface), 0, 1);
 		if (result < 0)
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index 2ba1942..1d48e62 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -130,57 +130,36 @@ static void vfio_unlink_dma(struct vfio_iommu *iommu, struct vfio_dma *old)
 	rb_erase(&old->node, &iommu->dma_list);
 }
 
-struct vwork {
-	struct mm_struct	*mm;
-	long			npage;
-	struct work_struct	work;
-};
-
-/* delayed decrement/increment for locked_vm */
-static void vfio_lock_acct_bg(struct work_struct *work)
+static int vfio_lock_acct(long npage, bool *lock_cap)
 {
-	struct vwork *vwork = container_of(work, struct vwork, work);
-	struct mm_struct *mm;
+	int ret;
 
-	mm = vwork->mm;
-	down_write(&mm->mmap_sem);
-	mm->locked_vm += vwork->npage;
-	up_write(&mm->mmap_sem);
-	mmput(mm);
-	kfree(vwork);
-}
+	if (!npage)
+		return 0;
 
-static void vfio_lock_acct(long npage)
-{
-	struct vwork *vwork;
-	struct mm_struct *mm;
+	if (!current->mm)
+		return -ESRCH; /* process exited */
 
-	if (!current->mm || !npage)
-		return; /* process exited or nothing to do */
+	ret = down_write_killable(&current->mm->mmap_sem);
+	if (!ret) {
+		if (npage > 0) {
+			if (lock_cap ? !*lock_cap : !capable(CAP_IPC_LOCK)) {
+				unsigned long limit;
 
-	if (down_write_trylock(&current->mm->mmap_sem)) {
-		current->mm->locked_vm += npage;
+				limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
+
+				if (current->mm->locked_vm + npage > limit)
+					ret = -ENOMEM;
+			}
+		}
+
+		if (!ret)
+			current->mm->locked_vm += npage;
+
 		up_write(&current->mm->mmap_sem);
-		return;
 	}
 
-	/*
-	 * Couldn't get mmap_sem lock, so must setup to update
-	 * mm->locked_vm later. If locked_vm were atomic, we
-	 * wouldn't need this silliness
-	 */
-	vwork = kmalloc(sizeof(struct vwork), GFP_KERNEL);
-	if (!vwork)
-		return;
-	mm = get_task_mm(current);
-	if (!mm) {
-		kfree(vwork);
-		return;
-	}
-	INIT_WORK(&vwork->work, vfio_lock_acct_bg);
-	vwork->mm = mm;
-	vwork->npage = npage;
-	schedule_work(&vwork->work);
+	return ret;
 }
 
 /*
@@ -262,9 +241,9 @@ static int vaddr_get_pfn(unsigned long vaddr, int prot, unsigned long *pfn)
 static long vfio_pin_pages(unsigned long vaddr, long npage,
 			   int prot, unsigned long *pfn_base)
 {
-	unsigned long limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
+	unsigned long pfn = 0, limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
 	bool lock_cap = capable(CAP_IPC_LOCK);
-	long ret, i;
+	long ret, i = 1;
 	bool rsvd;
 
 	if (!current->mm)
@@ -283,16 +262,11 @@ static long vfio_pin_pages(unsigned long vaddr, long npage,
 		return -ENOMEM;
 	}
 
-	if (unlikely(disable_hugepages)) {
-		if (!rsvd)
-			vfio_lock_acct(1);
-		return 1;
-	}
+	if (unlikely(disable_hugepages))
+		goto out;
 
 	/* Lock all the consecutive pages from pfn_base */
-	for (i = 1, vaddr += PAGE_SIZE; i < npage; i++, vaddr += PAGE_SIZE) {
-		unsigned long pfn = 0;
-
+	for (vaddr += PAGE_SIZE; i < npage; i++, vaddr += PAGE_SIZE) {
 		ret = vaddr_get_pfn(vaddr, prot, &pfn);
 		if (ret)
 			break;
@@ -308,12 +282,24 @@ static long vfio_pin_pages(unsigned long vaddr, long npage,
 			put_pfn(pfn, prot);
 			pr_warn("%s: RLIMIT_MEMLOCK (%ld) exceeded\n",
 				__func__, limit << PAGE_SHIFT);
-			break;
+			ret = -ENOMEM;
+			goto unpin_out;
 		}
 	}
 
+out:
 	if (!rsvd)
-		vfio_lock_acct(i);
+		ret = vfio_lock_acct(i, &lock_cap);
+
+unpin_out:
+	if (ret) {
+		if (!rsvd) {
+			for (pfn = *pfn_base ; i ; pfn++, i--)
+				put_pfn(pfn, prot);
+		}
+
+		return ret;
+	}
 
 	return i;
 }
@@ -328,7 +314,7 @@ static long vfio_unpin_pages(unsigned long pfn, long npage,
 		unlocked += put_pfn(pfn++, prot);
 
 	if (do_accounting)
-		vfio_lock_acct(-unlocked);
+		vfio_lock_acct(-unlocked, NULL);
 
 	return unlocked;
 }
@@ -390,7 +376,7 @@ static void vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma)
 		cond_resched();
 	}
 
-	vfio_lock_acct(-unlocked);
+	vfio_lock_acct(-unlocked, NULL);
 }
 
 static void vfio_remove_dma(struct vfio_iommu *iommu, struct vfio_dma *dma)
diff --git a/drivers/video/hdmi.c b/drivers/video/hdmi.c
index 1626892..1cf907e 100644
--- a/drivers/video/hdmi.c
+++ b/drivers/video/hdmi.c
@@ -533,6 +533,10 @@ hdmi_picture_aspect_get_name(enum hdmi_picture_aspect picture_aspect)
 		return "4:3";
 	case HDMI_PICTURE_ASPECT_16_9:
 		return "16:9";
+	case HDMI_PICTURE_ASPECT_64_27:
+		return "64:27";
+	case HDMI_PICTURE_ASPECT_256_135:
+		return "256:135";
 	case HDMI_PICTURE_ASPECT_RESERVED:
 		return "Reserved";
 	}
diff --git a/drivers/watchdog/pcwd_usb.c b/drivers/watchdog/pcwd_usb.c
index 99ebf6e..5615f40 100644
--- a/drivers/watchdog/pcwd_usb.c
+++ b/drivers/watchdog/pcwd_usb.c
@@ -630,6 +630,9 @@ static int usb_pcwd_probe(struct usb_interface *interface,
 		return -ENODEV;
 	}
 
+	if (iface_desc->desc.bNumEndpoints < 1)
+		return -ENODEV;
+
 	/* check out the endpoint: it has to be Interrupt & IN */
 	endpoint = &iface_desc->endpoint[0].desc;
 
diff --git a/fs/block_dev.c b/fs/block_dev.c
index 9ad527f..2924bddb 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -102,12 +102,11 @@ void invalidate_bdev(struct block_device *bdev)
 {
 	struct address_space *mapping = bdev->bd_inode->i_mapping;
 
-	if (mapping->nrpages == 0)
-		return;
-
-	invalidate_bh_lrus();
-	lru_add_drain_all();	/* make sure all lru add caches are flushed */
-	invalidate_mapping_pages(mapping, 0, -1);
+	if (mapping->nrpages) {
+		invalidate_bh_lrus();
+		lru_add_drain_all();	/* make sure all lru add caches are flushed */
+		invalidate_mapping_pages(mapping, 0, -1);
+	}
 	/* 99% of the time, we don't need to flush the cleancache on the bdev.
 	 * But, for the strange corners, lets be cautious
 	 */
diff --git a/fs/ceph/xattr.c b/fs/ceph/xattr.c
index febc28f..75267cd 100644
--- a/fs/ceph/xattr.c
+++ b/fs/ceph/xattr.c
@@ -392,6 +392,7 @@ static int __set_xattr(struct ceph_inode_info *ci,
 
 	if (update_xattr) {
 		int err = 0;
+
 		if (xattr && (flags & XATTR_CREATE))
 			err = -EEXIST;
 		else if (!xattr && (flags & XATTR_REPLACE))
@@ -399,12 +400,14 @@ static int __set_xattr(struct ceph_inode_info *ci,
 		if (err) {
 			kfree(name);
 			kfree(val);
+			kfree(*newxattr);
 			return err;
 		}
 		if (update_xattr < 0) {
 			if (xattr)
 				__remove_xattr(ci, xattr);
 			kfree(name);
+			kfree(*newxattr);
 			return 0;
 		}
 	}
diff --git a/fs/cifs/cifs_unicode.c b/fs/cifs/cifs_unicode.c
index 02b071bf..a0b3e7d 100644
--- a/fs/cifs/cifs_unicode.c
+++ b/fs/cifs/cifs_unicode.c
@@ -83,6 +83,9 @@ convert_sfm_char(const __u16 src_char, char *target)
 	case SFM_COLON:
 		*target = ':';
 		break;
+	case SFM_DOUBLEQUOTE:
+		*target = '"';
+		break;
 	case SFM_ASTERISK:
 		*target = '*';
 		break;
@@ -418,6 +421,9 @@ static __le16 convert_to_sfm_char(char src_char, bool end_of_string)
 	case ':':
 		dest_char = cpu_to_le16(SFM_COLON);
 		break;
+	case '"':
+		dest_char = cpu_to_le16(SFM_DOUBLEQUOTE);
+		break;
 	case '*':
 		dest_char = cpu_to_le16(SFM_ASTERISK);
 		break;
diff --git a/fs/cifs/cifs_unicode.h b/fs/cifs/cifs_unicode.h
index 479bc0a..07ade70 100644
--- a/fs/cifs/cifs_unicode.h
+++ b/fs/cifs/cifs_unicode.h
@@ -57,6 +57,7 @@
  * not conflict (although almost does) with the mapping above.
  */
 
+#define SFM_DOUBLEQUOTE ((__u16) 0xF020)
 #define SFM_ASTERISK    ((__u16) 0xF021)
 #define SFM_QUESTION    ((__u16) 0xF025)
 #define SFM_COLON       ((__u16) 0xF022)
@@ -64,8 +65,8 @@
 #define SFM_LESSTHAN    ((__u16) 0xF023)
 #define SFM_PIPE        ((__u16) 0xF027)
 #define SFM_SLASH       ((__u16) 0xF026)
-#define SFM_PERIOD	((__u16) 0xF028)
-#define SFM_SPACE	((__u16) 0xF029)
+#define SFM_SPACE	((__u16) 0xF028)
+#define SFM_PERIOD	((__u16) 0xF029)
 
 /*
  * Mapping mechanism to use when one of the seven reserved characters is
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index 15261ba..c0c2530 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -87,6 +87,7 @@ extern mempool_t *cifs_req_poolp;
 extern mempool_t *cifs_mid_poolp;
 
 struct workqueue_struct	*cifsiod_wq;
+struct workqueue_struct	*cifsoplockd_wq;
 __u32 cifs_lock_secret;
 
 /*
@@ -1283,9 +1284,16 @@ init_cifs(void)
 		goto out_clean_proc;
 	}
 
+	cifsoplockd_wq = alloc_workqueue("cifsoplockd",
+					 WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
+	if (!cifsoplockd_wq) {
+		rc = -ENOMEM;
+		goto out_destroy_cifsiod_wq;
+	}
+
 	rc = cifs_fscache_register();
 	if (rc)
-		goto out_destroy_wq;
+		goto out_destroy_cifsoplockd_wq;
 
 	rc = cifs_init_inodecache();
 	if (rc)
@@ -1333,7 +1341,9 @@ init_cifs(void)
 	cifs_destroy_inodecache();
 out_unreg_fscache:
 	cifs_fscache_unregister();
-out_destroy_wq:
+out_destroy_cifsoplockd_wq:
+	destroy_workqueue(cifsoplockd_wq);
+out_destroy_cifsiod_wq:
 	destroy_workqueue(cifsiod_wq);
 out_clean_proc:
 	cifs_proc_clean();
@@ -1356,6 +1366,7 @@ exit_cifs(void)
 	cifs_destroy_mids();
 	cifs_destroy_inodecache();
 	cifs_fscache_unregister();
+	destroy_workqueue(cifsoplockd_wq);
 	destroy_workqueue(cifsiod_wq);
 	cifs_proc_clean();
 }
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index b3830f7..48ef401 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -1651,6 +1651,7 @@ void cifs_oplock_break(struct work_struct *work);
 
 extern const struct slow_work_ops cifs_oplock_break_ops;
 extern struct workqueue_struct *cifsiod_wq;
+extern struct workqueue_struct *cifsoplockd_wq;
 extern __u32 cifs_lock_secret;
 
 extern mempool_t *cifs_mid_poolp;
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index 586fdac..1f91c9d 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -717,6 +717,9 @@ CIFSSMBEcho(struct TCP_Server_Info *server)
 	if (rc)
 		return rc;
 
+	if (server->capabilities & CAP_UNICODE)
+		smb->hdr.Flags2 |= SMBFLG2_UNICODE;
+
 	/* set up echo request */
 	smb->hdr.Tid = 0xffff;
 	smb->hdr.WordCount = 1;
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index b8015de..1a54569 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -2839,16 +2839,14 @@ match_prepath(struct super_block *sb, struct cifs_mnt_data *mnt_data)
 {
 	struct cifs_sb_info *old = CIFS_SB(sb);
 	struct cifs_sb_info *new = mnt_data->cifs_sb;
+	bool old_set = old->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH;
+	bool new_set = new->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH;
 
-	if (old->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH) {
-		if (!(new->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH))
-			return 0;
-		/* The prepath should be null terminated strings */
-		if (strcmp(new->prepath, old->prepath))
-			return 0;
-
+	if (old_set && new_set && !strcmp(new->prepath, old->prepath))
 		return 1;
-	}
+	else if (!old_set && !new_set)
+		return 1;
+
 	return 0;
 }
 
diff --git a/fs/cifs/ioctl.c b/fs/cifs/ioctl.c
index 0015287..bdba9e7 100644
--- a/fs/cifs/ioctl.c
+++ b/fs/cifs/ioctl.c
@@ -264,10 +264,14 @@ long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg)
 				rc = -EOPNOTSUPP;
 			break;
 		case CIFS_IOC_GET_MNT_INFO:
+			if (pSMBFile == NULL)
+				break;
 			tcon = tlink_tcon(pSMBFile->tlink);
 			rc = smb_mnt_get_fsinfo(xid, tcon, (void __user *)arg);
 			break;
 		case CIFS_ENUMERATE_SNAPSHOTS:
+			if (pSMBFile == NULL)
+				break;
 			if (arg == 0) {
 				rc = -EINVAL;
 				goto cifs_ioc_exit;
diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
index c672915..5419afe 100644
--- a/fs/cifs/misc.c
+++ b/fs/cifs/misc.c
@@ -492,7 +492,7 @@ is_valid_oplock_break(char *buffer, struct TCP_Server_Info *srv)
 					   CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2,
 					   &pCifsInode->flags);
 
-				queue_work(cifsiod_wq,
+				queue_work(cifsoplockd_wq,
 					   &netfile->oplock_break);
 				netfile->oplock_break_cancelled = false;
 
diff --git a/fs/cifs/smb2misc.c b/fs/cifs/smb2misc.c
index 9730780..967dfe6 100644
--- a/fs/cifs/smb2misc.c
+++ b/fs/cifs/smb2misc.c
@@ -494,7 +494,7 @@ smb2_tcon_has_lease(struct cifs_tcon *tcon, struct smb2_lease_break *rsp,
 		else
 			cfile->oplock_break_cancelled = true;
 
-		queue_work(cifsiod_wq, &cfile->oplock_break);
+		queue_work(cifsoplockd_wq, &cfile->oplock_break);
 		kfree(lw);
 		return true;
 	}
@@ -638,7 +638,8 @@ smb2_is_valid_oplock_break(char *buffer, struct TCP_Server_Info *server)
 					   CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2,
 					   &cinode->flags);
 				spin_unlock(&cfile->file_info_lock);
-				queue_work(cifsiod_wq, &cfile->oplock_break);
+				queue_work(cifsoplockd_wq,
+					   &cfile->oplock_break);
 
 				spin_unlock(&tcon->open_file_lock);
 				spin_unlock(&cifs_tcp_ses_lock);
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
index 007abf7..36334fe 100644
--- a/fs/cifs/smb2ops.c
+++ b/fs/cifs/smb2ops.c
@@ -924,6 +924,7 @@ smb3_enum_snapshots(const unsigned int xid, struct cifs_tcon *tcon,
 		}
 		if (snapshot_in.snapshot_array_size < sizeof(struct smb_snapshot_array)) {
 			rc = -ERANGE;
+			kfree(retbuf);
 			return rc;
 		}
 
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
index 8021853..7c1c6c3 100644
--- a/fs/cifs/smb2pdu.c
+++ b/fs/cifs/smb2pdu.c
@@ -569,8 +569,12 @@ int smb3_validate_negotiate(const unsigned int xid, struct cifs_tcon *tcon)
 	}
 
 	if (rsplen != sizeof(struct validate_negotiate_info_rsp)) {
-		cifs_dbg(VFS, "invalid size of protocol negotiate response\n");
-		return -EIO;
+		cifs_dbg(VFS, "invalid protocol negotiate response size: %d\n",
+			 rsplen);
+
+		/* relax check since Mac returns max bufsize allowed on ioctl */
+		if (rsplen > CIFSMaxBufSize)
+			return -EIO;
 	}
 
 	/* check validate negotiate info response matches what we got earlier */
@@ -1670,8 +1674,12 @@ SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
 	 * than one credit. Windows typically sets this smaller, but for some
 	 * ioctls it may be useful to allow server to send more. No point
 	 * limiting what the server can send as long as fits in one credit
+	 * Unfortunately - we can not handle more than CIFS_MAX_MSG_SIZE
+	 * (by default, note that it can be overridden to make max larger)
+	 * in responses (except for read responses which can be bigger.
+	 * We may want to bump this limit up
 	 */
-	req->MaxOutputResponse = cpu_to_le32(0xFF00); /* < 64K uses 1 credit */
+	req->MaxOutputResponse = cpu_to_le32(CIFSMaxBufSize);
 
 	if (is_fsctl)
 		req->Flags = cpu_to_le32(SMB2_0_IOCTL_IS_FSCTL);
diff --git a/fs/crypto/fname.c b/fs/crypto/fname.c
index 80bb956..d1bbdc9 100644
--- a/fs/crypto/fname.c
+++ b/fs/crypto/fname.c
@@ -300,7 +300,7 @@ int fscrypt_fname_disk_to_usr(struct inode *inode,
 	} else {
 		memset(buf, 0, 8);
 	}
-	memcpy(buf + 8, iname->name + iname->len - 16, 16);
+	memcpy(buf + 8, iname->name + ((iname->len - 17) & ~15), 16);
 	oname->name[0] = '_';
 	oname->len = 1 + digest_encode(buf, 24, oname->name + 1);
 	return 0;
diff --git a/fs/crypto/policy.c b/fs/crypto/policy.c
index abc1884..bb4e209 100644
--- a/fs/crypto/policy.c
+++ b/fs/crypto/policy.c
@@ -161,27 +161,61 @@ int fscrypt_get_policy(struct inode *inode, struct fscrypt_policy *policy)
 }
 EXPORT_SYMBOL(fscrypt_get_policy);
 
+/**
+ * fscrypt_has_permitted_context() - is a file's encryption policy permitted
+ *				     within its directory?
+ *
+ * @parent: inode for parent directory
+ * @child: inode for file being looked up, opened, or linked into @parent
+ *
+ * Filesystems must call this before permitting access to an inode in a
+ * situation where the parent directory is encrypted (either before allowing
+ * ->lookup() to succeed, or for a regular file before allowing it to be opened)
+ * and before any operation that involves linking an inode into an encrypted
+ * directory, including link, rename, and cross rename.  It enforces the
+ * constraint that within a given encrypted directory tree, all files use the
+ * same encryption policy.  The pre-access check is needed to detect potentially
+ * malicious offline violations of this constraint, while the link and rename
+ * checks are needed to prevent online violations of this constraint.
+ *
+ * Return: 1 if permitted, 0 if forbidden.  If forbidden, the caller must fail
+ * the filesystem operation with EPERM.
+ */
 int fscrypt_has_permitted_context(struct inode *parent, struct inode *child)
 {
-	struct fscrypt_info *parent_ci, *child_ci;
+	const struct fscrypt_operations *cops = parent->i_sb->s_cop;
+	const struct fscrypt_info *parent_ci, *child_ci;
+	struct fscrypt_context parent_ctx, child_ctx;
 	int res;
 
-	if ((parent == NULL) || (child == NULL)) {
-		printk(KERN_ERR	"parent %p child %p\n", parent, child);
-		BUG_ON(1);
-	}
-
 	/* No restrictions on file types which are never encrypted */
 	if (!S_ISREG(child->i_mode) && !S_ISDIR(child->i_mode) &&
 	    !S_ISLNK(child->i_mode))
 		return 1;
 
-	/* no restrictions if the parent directory is not encrypted */
-	if (!parent->i_sb->s_cop->is_encrypted(parent))
+	/* No restrictions if the parent directory is unencrypted */
+	if (!cops->is_encrypted(parent))
 		return 1;
-	/* if the child directory is not encrypted, this is always a problem */
-	if (!parent->i_sb->s_cop->is_encrypted(child))
+
+	/* Encrypted directories must not contain unencrypted files */
+	if (!cops->is_encrypted(child))
 		return 0;
+
+	/*
+	 * Both parent and child are encrypted, so verify they use the same
+	 * encryption policy.  Compare the fscrypt_info structs if the keys are
+	 * available, otherwise retrieve and compare the fscrypt_contexts.
+	 *
+	 * Note that the fscrypt_context retrieval will be required frequently
+	 * when accessing an encrypted directory tree without the key.
+	 * Performance-wise this is not a big deal because we already don't
+	 * really optimize for file access without the key (to the extent that
+	 * such access is even possible), given that any attempted access
+	 * already causes a fscrypt_context retrieval and keyring search.
+	 *
+	 * In any case, if an unexpected error occurs, fall back to "forbidden".
+	 */
+
 	res = fscrypt_get_encryption_info(parent);
 	if (res)
 		return 0;
@@ -190,17 +224,32 @@ int fscrypt_has_permitted_context(struct inode *parent, struct inode *child)
 		return 0;
 	parent_ci = parent->i_crypt_info;
 	child_ci = child->i_crypt_info;
-	if (!parent_ci && !child_ci)
-		return 1;
-	if (!parent_ci || !child_ci)
+
+	if (parent_ci && child_ci) {
+		return memcmp(parent_ci->ci_master_key, child_ci->ci_master_key,
+			      FS_KEY_DESCRIPTOR_SIZE) == 0 &&
+			(parent_ci->ci_data_mode == child_ci->ci_data_mode) &&
+			(parent_ci->ci_filename_mode ==
+			 child_ci->ci_filename_mode) &&
+			(parent_ci->ci_flags == child_ci->ci_flags);
+	}
+
+	res = cops->get_context(parent, &parent_ctx, sizeof(parent_ctx));
+	if (res != sizeof(parent_ctx))
 		return 0;
 
-	return (memcmp(parent_ci->ci_master_key,
-			child_ci->ci_master_key,
-			FS_KEY_DESCRIPTOR_SIZE) == 0 &&
-		(parent_ci->ci_data_mode == child_ci->ci_data_mode) &&
-		(parent_ci->ci_filename_mode == child_ci->ci_filename_mode) &&
-		(parent_ci->ci_flags == child_ci->ci_flags));
+	res = cops->get_context(child, &child_ctx, sizeof(child_ctx));
+	if (res != sizeof(child_ctx))
+		return 0;
+
+	return memcmp(parent_ctx.master_key_descriptor,
+		      child_ctx.master_key_descriptor,
+		      FS_KEY_DESCRIPTOR_SIZE) == 0 &&
+		(parent_ctx.contents_encryption_mode ==
+		 child_ctx.contents_encryption_mode) &&
+		(parent_ctx.filenames_encryption_mode ==
+		 child_ctx.filenames_encryption_mode) &&
+		(parent_ctx.flags == child_ctx.flags);
 }
 EXPORT_SYMBOL(fscrypt_has_permitted_context);
 
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 3cb7fa2..42723b2 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -5741,6 +5741,11 @@ int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
 	file_update_time(vma->vm_file);
 
 	down_read(&EXT4_I(inode)->i_mmap_sem);
+
+	ret = ext4_convert_inline_data(inode);
+	if (ret)
+		goto out_ret;
+
 	/* Delalloc case is easy... */
 	if (test_opt(inode->i_sb, DELALLOC) &&
 	    !ext4_should_journal_data(inode) &&
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index c4a389a..423a21c 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -1255,9 +1255,9 @@ static inline int ext4_match(struct ext4_filename *fname,
 	if (unlikely(!name)) {
 		if (fname->usr_fname->name[0] == '_') {
 			int ret;
-			if (de->name_len < 16)
+			if (de->name_len <= 32)
 				return 0;
-			ret = memcmp(de->name + de->name_len - 16,
+			ret = memcmp(de->name + ((de->name_len - 17) & ~15),
 				     fname->crypto_buf.name + 8, 16);
 			return (ret == 0) ? 1 : 0;
 		}
diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c
index ebdc90f..11f3717 100644
--- a/fs/f2fs/dir.c
+++ b/fs/f2fs/dir.c
@@ -130,19 +130,29 @@ struct f2fs_dir_entry *find_target_dentry(struct fscrypt_name *fname,
 			continue;
 		}
 
-		/* encrypted case */
+		if (de->hash_code != namehash)
+			goto not_match;
+
 		de_name.name = d->filename[bit_pos];
 		de_name.len = le16_to_cpu(de->name_len);
 
-		/* show encrypted name */
-		if (fname->hash) {
-			if (de->hash_code == fname->hash)
-				goto found;
-		} else if (de_name.len == name->len &&
-			de->hash_code == namehash &&
-			!memcmp(de_name.name, name->name, name->len))
+#ifdef CONFIG_F2FS_FS_ENCRYPTION
+		if (unlikely(!name->name)) {
+			if (fname->usr_fname->name[0] == '_') {
+				if (de_name.len > 32 &&
+					!memcmp(de_name.name + ((de_name.len - 17) & ~15),
+						fname->crypto_buf.name + 8, 16))
+					goto found;
+				goto not_match;
+			}
+			name->name = fname->crypto_buf.name;
+			name->len = fname->crypto_buf.len;
+		}
+#endif
+		if (de_name.len == name->len &&
+				!memcmp(de_name.name, name->name, name->len))
 			goto found;
-
+not_match:
 		if (max_slots && max_len > *max_slots)
 			*max_slots = max_len;
 		max_len = 0;
@@ -170,12 +180,7 @@ static struct f2fs_dir_entry *find_in_level(struct inode *dir,
 	struct f2fs_dir_entry *de = NULL;
 	bool room = false;
 	int max_slots;
-	f2fs_hash_t namehash;
-
-	if(fname->hash)
-		namehash = cpu_to_le32(fname->hash);
-	else
-		namehash = f2fs_dentry_hash(&name);
+	f2fs_hash_t namehash = f2fs_dentry_hash(&name, fname);
 
 	nbucket = dir_buckets(level, F2FS_I(dir)->i_dir_level);
 	nblock = bucket_blocks(level);
@@ -539,7 +544,7 @@ int f2fs_add_regular_entry(struct inode *dir, const struct qstr *new_name,
 
 	level = 0;
 	slots = GET_DENTRY_SLOTS(new_name->len);
-	dentry_hash = f2fs_dentry_hash(new_name);
+	dentry_hash = f2fs_dentry_hash(new_name, NULL);
 
 	current_depth = F2FS_I(dir)->i_current_depth;
 	if (F2FS_I(dir)->chash == dentry_hash) {
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index 3a1640b..c12f695 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -2016,7 +2016,8 @@ int sanity_check_ckpt(struct f2fs_sb_info *sbi);
 /*
  * hash.c
  */
-f2fs_hash_t f2fs_dentry_hash(const struct qstr *);
+f2fs_hash_t f2fs_dentry_hash(const struct qstr *name_info,
+				struct fscrypt_name *fname);
 
 /*
  * node.c
diff --git a/fs/f2fs/hash.c b/fs/f2fs/hash.c
index 71b7206..eb2e031 100644
--- a/fs/f2fs/hash.c
+++ b/fs/f2fs/hash.c
@@ -70,7 +70,8 @@ static void str2hashbuf(const unsigned char *msg, size_t len,
 		*buf++ = pad;
 }
 
-f2fs_hash_t f2fs_dentry_hash(const struct qstr *name_info)
+f2fs_hash_t f2fs_dentry_hash(const struct qstr *name_info,
+				struct fscrypt_name *fname)
 {
 	__u32 hash;
 	f2fs_hash_t f2fs_hash;
@@ -79,6 +80,10 @@ f2fs_hash_t f2fs_dentry_hash(const struct qstr *name_info)
 	const unsigned char *name = name_info->name;
 	size_t len = name_info->len;
 
+	/* encrypted bigname case */
+	if (fname && !fname->disk_name.name)
+		return cpu_to_le32(fname->hash);
+
 	if (is_dot_dotdot(name_info))
 		return 0;
 
diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c
index 1427db9..e14edc9 100644
--- a/fs/f2fs/inline.c
+++ b/fs/f2fs/inline.c
@@ -312,7 +312,7 @@ struct f2fs_dir_entry *find_in_inline_dir(struct inode *dir,
 		return NULL;
 	}
 
-	namehash = f2fs_dentry_hash(&name);
+	namehash = f2fs_dentry_hash(&name, fname);
 
 	inline_dentry = inline_data_addr(ipage);
 
@@ -549,7 +549,7 @@ int f2fs_add_inline_entry(struct inode *dir, const struct qstr *new_name,
 
 	f2fs_wait_on_page_writeback(ipage, NODE, true);
 
-	name_hash = f2fs_dentry_hash(new_name);
+	name_hash = f2fs_dentry_hash(new_name, NULL);
 	make_dentry_ptr(NULL, &d, (void *)dentry_blk, 2);
 	f2fs_update_dentry(ino, mode, &d, new_name, name_hash, bit_pos);
 
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 4e894d3..fc9b049 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -2385,8 +2385,10 @@ static int _nfs4_proc_open(struct nfs4_opendata *data)
 		if (status != 0)
 			return status;
 	}
-	if (!(o_res->f_attr->valid & NFS_ATTR_FATTR))
+	if (!(o_res->f_attr->valid & NFS_ATTR_FATTR)) {
+		nfs4_sequence_free_slot(&o_res->seq_res);
 		nfs4_proc_getattr(server, &o_res->fh, o_res->f_attr, o_res->f_label);
+	}
 	return 0;
 }
 
diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
index 965db47..142a74f 100644
--- a/fs/nfs/pagelist.c
+++ b/fs/nfs/pagelist.c
@@ -29,13 +29,14 @@
 static struct kmem_cache *nfs_page_cachep;
 static const struct rpc_call_ops nfs_pgio_common_ops;
 
-static bool nfs_pgarray_set(struct nfs_page_array *p, unsigned int pagecount)
+static bool nfs_pgarray_set(struct nfs_page_array *p, unsigned int pagecount,
+					gfp_t gfp_flags)
 {
 	p->npages = pagecount;
 	if (pagecount <= ARRAY_SIZE(p->page_array))
 		p->pagevec = p->page_array;
 	else {
-		p->pagevec = kcalloc(pagecount, sizeof(struct page *), GFP_KERNEL);
+		p->pagevec = kcalloc(pagecount, sizeof(struct page *), gfp_flags);
 		if (!p->pagevec)
 			p->npages = 0;
 	}
@@ -681,6 +682,7 @@ void nfs_pageio_init(struct nfs_pageio_descriptor *desc,
 {
 	struct nfs_pgio_mirror *new;
 	int i;
+	gfp_t gfp_flags = GFP_KERNEL;
 
 	desc->pg_moreio = 0;
 	desc->pg_inode = inode;
@@ -700,8 +702,10 @@ void nfs_pageio_init(struct nfs_pageio_descriptor *desc,
 	if (pg_ops->pg_get_mirror_count) {
 		/* until we have a request, we don't have an lseg and no
 		 * idea how many mirrors there will be */
+		if (desc->pg_rw_ops->rw_mode == FMODE_WRITE)
+			gfp_flags = GFP_NOIO;
 		new = kcalloc(NFS_PAGEIO_DESCRIPTOR_MIRROR_MAX,
-			      sizeof(struct nfs_pgio_mirror), GFP_KERNEL);
+			      sizeof(struct nfs_pgio_mirror), gfp_flags);
 		desc->pg_mirrors_dynamic = new;
 		desc->pg_mirrors = new;
 
@@ -755,9 +759,12 @@ int nfs_generic_pgio(struct nfs_pageio_descriptor *desc,
 	struct list_head *head = &mirror->pg_list;
 	struct nfs_commit_info cinfo;
 	unsigned int pagecount, pageused;
+	gfp_t gfp_flags = GFP_KERNEL;
 
 	pagecount = nfs_page_array_len(mirror->pg_base, mirror->pg_count);
-	if (!nfs_pgarray_set(&hdr->page_array, pagecount)) {
+	if (desc->pg_rw_ops->rw_mode == FMODE_WRITE)
+		gfp_flags = GFP_NOIO;
+	if (!nfs_pgarray_set(&hdr->page_array, pagecount, gfp_flags)) {
 		nfs_pgio_error(hdr);
 		desc->pg_error = -ENOMEM;
 		return desc->pg_error;
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 5321183..e4772a8 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -548,9 +548,9 @@ static void nfs_write_error_remove_page(struct nfs_page *req)
 {
 	nfs_unlock_request(req);
 	nfs_end_page_writeback(req);
-	nfs_release_request(req);
 	generic_error_remove_page(page_file_mapping(req->wb_page),
 				  req->wb_page);
+	nfs_release_request(req);
 }
 
 /*
diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
index abb09b5..650226f 100644
--- a/fs/nfsd/nfs4proc.c
+++ b/fs/nfsd/nfs4proc.c
@@ -1273,7 +1273,8 @@ nfsd4_layout_verify(struct svc_export *exp, unsigned int layout_type)
 		return NULL;
 	}
 
-	if (!(exp->ex_layout_types & (1 << layout_type))) {
+	if (layout_type >= LAYOUT_TYPE_MAX ||
+	    !(exp->ex_layout_types & (1 << layout_type))) {
 		dprintk("%s: layout type %d not supported\n",
 			__func__, layout_type);
 		return NULL;
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index c2d2895..2ee80e1 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -4081,8 +4081,7 @@ nfsd4_encode_getdeviceinfo(struct nfsd4_compoundres *resp, __be32 nfserr,
 		struct nfsd4_getdeviceinfo *gdev)
 {
 	struct xdr_stream *xdr = &resp->xdr;
-	const struct nfsd4_layout_ops *ops =
-		nfsd4_layout_ops[gdev->gd_layout_type];
+	const struct nfsd4_layout_ops *ops;
 	u32 starting_len = xdr->buf->len, needed_len;
 	__be32 *p;
 
@@ -4099,6 +4098,7 @@ nfsd4_encode_getdeviceinfo(struct nfsd4_compoundres *resp, __be32 nfserr,
 
 	/* If maxcount is 0 then just update notifications */
 	if (gdev->gd_maxcount != 0) {
+		ops = nfsd4_layout_ops[gdev->gd_layout_type];
 		nfserr = ops->encode_getdeviceinfo(xdr, gdev);
 		if (nfserr) {
 			/*
@@ -4151,8 +4151,7 @@ nfsd4_encode_layoutget(struct nfsd4_compoundres *resp, __be32 nfserr,
 		struct nfsd4_layoutget *lgp)
 {
 	struct xdr_stream *xdr = &resp->xdr;
-	const struct nfsd4_layout_ops *ops =
-		nfsd4_layout_ops[lgp->lg_layout_type];
+	const struct nfsd4_layout_ops *ops;
 	__be32 *p;
 
 	dprintk("%s: err %d\n", __func__, nfserr);
@@ -4175,6 +4174,7 @@ nfsd4_encode_layoutget(struct nfsd4_compoundres *resp, __be32 nfserr,
 	*p++ = cpu_to_be32(lgp->lg_seg.iomode);
 	*p++ = cpu_to_be32(lgp->lg_layout_type);
 
+	ops = nfsd4_layout_ops[lgp->lg_layout_type];
 	nfserr = ops->encode_layoutget(xdr, lgp);
 out:
 	kfree(lgp->lg_content);
diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
index 7f99c96..cef9885 100644
--- a/fs/notify/fanotify/fanotify_user.c
+++ b/fs/notify/fanotify/fanotify_user.c
@@ -294,27 +294,37 @@ static ssize_t fanotify_read(struct file *file, char __user *buf,
 		}
 
 		ret = copy_event_to_user(group, kevent, buf);
+		if (unlikely(ret == -EOPENSTALE)) {
+			/*
+			 * We cannot report events with stale fd so drop it.
+			 * Setting ret to 0 will continue the event loop and
+			 * do the right thing if there are no more events to
+			 * read (i.e. return bytes read, -EAGAIN or wait).
+			 */
+			ret = 0;
+		}
+
 		/*
 		 * Permission events get queued to wait for response.  Other
 		 * events can be destroyed now.
 		 */
 		if (!(kevent->mask & FAN_ALL_PERM_EVENTS)) {
 			fsnotify_destroy_event(group, kevent);
-			if (ret < 0)
-				break;
 		} else {
 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
-			if (ret < 0) {
+			if (ret <= 0) {
 				FANOTIFY_PE(kevent)->response = FAN_DENY;
 				wake_up(&group->fanotify_data.access_waitq);
-				break;
+			} else {
+				spin_lock(&group->notification_lock);
+				list_add_tail(&kevent->list,
+					&group->fanotify_data.access_list);
+				spin_unlock(&group->notification_lock);
 			}
-			spin_lock(&group->notification_lock);
-			list_add_tail(&kevent->list,
-				      &group->fanotify_data.access_list);
-			spin_unlock(&group->notification_lock);
 #endif
 		}
+		if (ret < 0)
+			break;
 		buf += ret;
 		count -= ret;
 	}
diff --git a/fs/orangefs/inode.c b/fs/orangefs/inode.c
index ef3b4eb..08ecdee 100644
--- a/fs/orangefs/inode.c
+++ b/fs/orangefs/inode.c
@@ -223,8 +223,7 @@ int orangefs_setattr(struct dentry *dentry, struct iattr *iattr)
 	if (ret)
 		goto out;
 
-	if ((iattr->ia_valid & ATTR_SIZE) &&
-	    iattr->ia_size != i_size_read(inode)) {
+	if (iattr->ia_valid & ATTR_SIZE) {
 		ret = orangefs_setattr_size(inode, iattr);
 		if (ret)
 			goto out;
diff --git a/fs/orangefs/namei.c b/fs/orangefs/namei.c
index a290ff6..7c31593 100644
--- a/fs/orangefs/namei.c
+++ b/fs/orangefs/namei.c
@@ -193,8 +193,6 @@ static struct dentry *orangefs_lookup(struct inode *dir, struct dentry *dentry,
 		goto out;
 	}
 
-	ORANGEFS_I(inode)->getattr_time = jiffies - 1;
-
 	gossip_debug(GOSSIP_NAME_DEBUG,
 		     "%s:%s:%d "
 		     "Found good inode [%lu] with count [%d]\n",
diff --git a/fs/orangefs/xattr.c b/fs/orangefs/xattr.c
index 74a81b1..237c9c0 100644
--- a/fs/orangefs/xattr.c
+++ b/fs/orangefs/xattr.c
@@ -76,11 +76,8 @@ ssize_t orangefs_inode_getxattr(struct inode *inode, const char *name,
 	if (S_ISLNK(inode->i_mode))
 		return -EOPNOTSUPP;
 
-	if (strlen(name) >= ORANGEFS_MAX_XATTR_NAMELEN) {
-		gossip_err("Invalid key length (%d)\n",
-			   (int)strlen(name));
+	if (strlen(name) > ORANGEFS_MAX_XATTR_NAMELEN)
 		return -EINVAL;
-	}
 
 	fsuid = from_kuid(&init_user_ns, current_fsuid());
 	fsgid = from_kgid(&init_user_ns, current_fsgid());
@@ -172,6 +169,9 @@ static int orangefs_inode_removexattr(struct inode *inode, const char *name,
 	struct orangefs_kernel_op_s *new_op = NULL;
 	int ret = -ENOMEM;
 
+	if (strlen(name) > ORANGEFS_MAX_XATTR_NAMELEN)
+		return -EINVAL;
+
 	down_write(&orangefs_inode->xattr_sem);
 	new_op = op_alloc(ORANGEFS_VFS_OP_REMOVEXATTR);
 	if (!new_op)
@@ -231,23 +231,13 @@ int orangefs_inode_setxattr(struct inode *inode, const char *name,
 		     "%s: name %s, buffer_size %zd\n",
 		     __func__, name, size);
 
-	if (size >= ORANGEFS_MAX_XATTR_VALUELEN ||
-	    flags < 0) {
-		gossip_err("orangefs_inode_setxattr: bogus values of size(%d), flags(%d)\n",
-			   (int)size,
-			   flags);
+	if (size > ORANGEFS_MAX_XATTR_VALUELEN)
 		return -EINVAL;
-	}
+	if (strlen(name) > ORANGEFS_MAX_XATTR_NAMELEN)
+		return -EINVAL;
 
 	internal_flag = convert_to_internal_xattr_flags(flags);
 
-	if (strlen(name) >= ORANGEFS_MAX_XATTR_NAMELEN) {
-		gossip_err
-		    ("orangefs_inode_setxattr: bogus key size (%d)\n",
-		     (int)(strlen(name)));
-		return -EINVAL;
-	}
-
 	/* This is equivalent to a removexattr */
 	if (size == 0 && value == NULL) {
 		gossip_debug(GOSSIP_XATTR_DEBUG,
@@ -358,7 +348,7 @@ ssize_t orangefs_listxattr(struct dentry *dentry, char *buffer, size_t size)
 
 	returned_count = new_op->downcall.resp.listxattr.returned_count;
 	if (returned_count < 0 ||
-	    returned_count >= ORANGEFS_MAX_XATTR_LISTLEN) {
+	    returned_count > ORANGEFS_MAX_XATTR_LISTLEN) {
 		gossip_err("%s: impossible value for returned_count:%d:\n",
 		__func__,
 		returned_count);
diff --git a/fs/proc/generic.c b/fs/proc/generic.c
index 5f2dc20..6047471 100644
--- a/fs/proc/generic.c
+++ b/fs/proc/generic.c
@@ -471,6 +471,7 @@ struct proc_dir_entry *proc_create_mount_point(const char *name)
 		ent->data = NULL;
 		ent->proc_fops = NULL;
 		ent->proc_iops = NULL;
+		parent->nlink++;
 		if (proc_register(parent, ent) < 0) {
 			kfree(ent);
 			parent->nlink--;
diff --git a/fs/pstore/platform.c b/fs/pstore/platform.c
index 14984d9..43033a3 100644
--- a/fs/pstore/platform.c
+++ b/fs/pstore/platform.c
@@ -704,6 +704,7 @@ int pstore_register(struct pstore_info *psi)
 	if (psi->flags & PSTORE_FLAGS_PMSG)
 		pstore_register_pmsg();
 
+	/* Start watching for new records, if desired. */
 	if (pstore_update_ms >= 0) {
 		pstore_timer.expires = jiffies +
 			msecs_to_jiffies(pstore_update_ms);
@@ -726,6 +727,11 @@ EXPORT_SYMBOL_GPL(pstore_register);
 
 void pstore_unregister(struct pstore_info *psi)
 {
+	/* Stop timer and make sure all work has finished. */
+	pstore_update_ms = -1;
+	del_timer_sync(&pstore_timer);
+	flush_work(&pstore_work);
+
 	if (psi->flags & PSTORE_FLAGS_PMSG)
 		pstore_unregister_pmsg();
 	if (psi->flags & PSTORE_FLAGS_FTRACE)
@@ -825,7 +831,9 @@ static void pstore_timefunc(unsigned long dummy)
 		schedule_work(&pstore_work);
 	}
 
-	mod_timer(&pstore_timer, jiffies + msecs_to_jiffies(pstore_update_ms));
+	if (pstore_update_ms >= 0)
+		mod_timer(&pstore_timer,
+			  jiffies + msecs_to_jiffies(pstore_update_ms));
 }
 
 module_param(backend, charp, 0444);
diff --git a/fs/sdcardfs/dentry.c b/fs/sdcardfs/dentry.c
index ae2b4ba..e9426a6 100644
--- a/fs/sdcardfs/dentry.c
+++ b/fs/sdcardfs/dentry.c
@@ -34,6 +34,8 @@ static int sdcardfs_d_revalidate(struct dentry *dentry, unsigned int flags)
 	struct dentry *parent_lower_dentry = NULL;
 	struct dentry *lower_cur_parent_dentry = NULL;
 	struct dentry *lower_dentry = NULL;
+	struct inode *inode;
+	struct sdcardfs_inode_data *data;
 
 	if (flags & LOOKUP_RCU)
 		return -ECHILD;
@@ -103,6 +105,21 @@ static int sdcardfs_d_revalidate(struct dentry *dentry, unsigned int flags)
 		spin_unlock(&dentry->d_lock);
 		spin_unlock(&lower_dentry->d_lock);
 	}
+	if (!err)
+		goto out;
+
+	/* If our top's inode is gone, we may be out of date */
+	inode = igrab(d_inode(dentry));
+	if (inode) {
+		data = top_data_get(SDCARDFS_I(inode));
+		if (!data || data->abandoned) {
+			d_drop(dentry);
+			err = 0;
+		}
+		if (data)
+			data_put(data);
+		iput(inode);
+	}
 
 out:
 	dput(parent_dentry);
diff --git a/fs/sdcardfs/derived_perm.c b/fs/sdcardfs/derived_perm.c
index 5a0ef38..1239d1c 100644
--- a/fs/sdcardfs/derived_perm.c
+++ b/fs/sdcardfs/derived_perm.c
@@ -26,28 +26,28 @@ static void inherit_derived_state(struct inode *parent, struct inode *child)
 	struct sdcardfs_inode_info *pi = SDCARDFS_I(parent);
 	struct sdcardfs_inode_info *ci = SDCARDFS_I(child);
 
-	ci->perm = PERM_INHERIT;
-	ci->userid = pi->userid;
-	ci->d_uid = pi->d_uid;
-	ci->under_android = pi->under_android;
-	ci->under_cache = pi->under_cache;
-	ci->under_obb = pi->under_obb;
-	set_top(ci, pi->top);
+	ci->data->perm = PERM_INHERIT;
+	ci->data->userid = pi->data->userid;
+	ci->data->d_uid = pi->data->d_uid;
+	ci->data->under_android = pi->data->under_android;
+	ci->data->under_cache = pi->data->under_cache;
+	ci->data->under_obb = pi->data->under_obb;
+	set_top(ci, pi->top_data);
 }
 
 /* helper function for derived state */
 void setup_derived_state(struct inode *inode, perm_t perm, userid_t userid,
-						uid_t uid, bool under_android,
-						struct inode *top)
+					uid_t uid, bool under_android,
+					struct sdcardfs_inode_data *top)
 {
 	struct sdcardfs_inode_info *info = SDCARDFS_I(inode);
 
-	info->perm = perm;
-	info->userid = userid;
-	info->d_uid = uid;
-	info->under_android = under_android;
-	info->under_cache = false;
-	info->under_obb = false;
+	info->data->perm = perm;
+	info->data->userid = userid;
+	info->data->d_uid = uid;
+	info->data->under_android = under_android;
+	info->data->under_cache = false;
+	info->data->under_obb = false;
 	set_top(info, top);
 }
 
@@ -58,7 +58,8 @@ void get_derived_permission_new(struct dentry *parent, struct dentry *dentry,
 				const struct qstr *name)
 {
 	struct sdcardfs_inode_info *info = SDCARDFS_I(d_inode(dentry));
-	struct sdcardfs_inode_info *parent_info = SDCARDFS_I(d_inode(parent));
+	struct sdcardfs_inode_data *parent_data =
+			SDCARDFS_I(d_inode(parent))->data;
 	appid_t appid;
 	unsigned long user_num;
 	int err;
@@ -82,60 +83,61 @@ void get_derived_permission_new(struct dentry *parent, struct dentry *dentry,
 	if (!S_ISDIR(d_inode(dentry)->i_mode))
 		return;
 	/* Derive custom permissions based on parent and current node */
-	switch (parent_info->perm) {
+	switch (parent_data->perm) {
 	case PERM_INHERIT:
 	case PERM_ANDROID_PACKAGE_CACHE:
 		/* Already inherited above */
 		break;
 	case PERM_PRE_ROOT:
 		/* Legacy internal layout places users at top level */
-		info->perm = PERM_ROOT;
+		info->data->perm = PERM_ROOT;
 		err = kstrtoul(name->name, 10, &user_num);
 		if (err)
-			info->userid = 0;
+			info->data->userid = 0;
 		else
-			info->userid = user_num;
-		set_top(info, &info->vfs_inode);
+			info->data->userid = user_num;
+		set_top(info, info->data);
 		break;
 	case PERM_ROOT:
 		/* Assume masked off by default. */
 		if (qstr_case_eq(name, &q_Android)) {
 			/* App-specific directories inside; let anyone traverse */
-			info->perm = PERM_ANDROID;
-			info->under_android = true;
-			set_top(info, &info->vfs_inode);
+			info->data->perm = PERM_ANDROID;
+			info->data->under_android = true;
+			set_top(info, info->data);
 		}
 		break;
 	case PERM_ANDROID:
 		if (qstr_case_eq(name, &q_data)) {
 			/* App-specific directories inside; let anyone traverse */
-			info->perm = PERM_ANDROID_DATA;
-			set_top(info, &info->vfs_inode);
+			info->data->perm = PERM_ANDROID_DATA;
+			set_top(info, info->data);
 		} else if (qstr_case_eq(name, &q_obb)) {
 			/* App-specific directories inside; let anyone traverse */
-			info->perm = PERM_ANDROID_OBB;
-			info->under_obb = true;
-			set_top(info, &info->vfs_inode);
+			info->data->perm = PERM_ANDROID_OBB;
+			info->data->under_obb = true;
+			set_top(info, info->data);
 			/* Single OBB directory is always shared */
 		} else if (qstr_case_eq(name, &q_media)) {
 			/* App-specific directories inside; let anyone traverse */
-			info->perm = PERM_ANDROID_MEDIA;
-			set_top(info, &info->vfs_inode);
+			info->data->perm = PERM_ANDROID_MEDIA;
+			set_top(info, info->data);
 		}
 		break;
 	case PERM_ANDROID_OBB:
 	case PERM_ANDROID_DATA:
 	case PERM_ANDROID_MEDIA:
-		info->perm = PERM_ANDROID_PACKAGE;
+		info->data->perm = PERM_ANDROID_PACKAGE;
 		appid = get_appid(name->name);
-		if (appid != 0 && !is_excluded(name->name, parent_info->userid))
-			info->d_uid = multiuser_get_uid(parent_info->userid, appid);
-		set_top(info, &info->vfs_inode);
+		if (appid != 0 && !is_excluded(name->name, parent_data->userid))
+			info->data->d_uid =
+				multiuser_get_uid(parent_data->userid, appid);
+		set_top(info, info->data);
 		break;
 	case PERM_ANDROID_PACKAGE:
 		if (qstr_case_eq(name, &q_cache)) {
-			info->perm = PERM_ANDROID_PACKAGE_CACHE;
-			info->under_cache = true;
+			info->data->perm = PERM_ANDROID_PACKAGE_CACHE;
+			info->data->under_cache = true;
 		}
 		break;
 	}
@@ -166,7 +168,8 @@ void fixup_lower_ownership(struct dentry *dentry, const char *name)
 	struct inode *delegated_inode = NULL;
 	int error;
 	struct sdcardfs_inode_info *info;
-	struct sdcardfs_inode_info *info_top;
+	struct sdcardfs_inode_data *info_d;
+	struct sdcardfs_inode_data *info_top;
 	perm_t perm;
 	struct sdcardfs_sb_info *sbi = SDCARDFS_SB(dentry->d_sb);
 	uid_t uid = sbi->options.fs_low_uid;
@@ -174,15 +177,16 @@ void fixup_lower_ownership(struct dentry *dentry, const char *name)
 	struct iattr newattrs;
 
 	info = SDCARDFS_I(d_inode(dentry));
-	perm = info->perm;
-	if (info->under_obb) {
+	info_d = info->data;
+	perm = info_d->perm;
+	if (info_d->under_obb) {
 		perm = PERM_ANDROID_OBB;
-	} else if (info->under_cache) {
+	} else if (info_d->under_cache) {
 		perm = PERM_ANDROID_PACKAGE_CACHE;
 	} else if (perm == PERM_INHERIT) {
-		info_top = SDCARDFS_I(grab_top(info));
+		info_top = top_data_get(info);
 		perm = info_top->perm;
-		release_top(info);
+		data_put(info_top);
 	}
 
 	switch (perm) {
@@ -192,7 +196,7 @@ void fixup_lower_ownership(struct dentry *dentry, const char *name)
 	case PERM_ANDROID_MEDIA:
 	case PERM_ANDROID_PACKAGE:
 	case PERM_ANDROID_PACKAGE_CACHE:
-		uid = multiuser_get_uid(info->userid, uid);
+		uid = multiuser_get_uid(info_d->userid, uid);
 		break;
 	case PERM_ANDROID_OBB:
 		uid = AID_MEDIA_OBB;
@@ -207,24 +211,24 @@ void fixup_lower_ownership(struct dentry *dentry, const char *name)
 	case PERM_ANDROID_DATA:
 	case PERM_ANDROID_MEDIA:
 		if (S_ISDIR(d_inode(dentry)->i_mode))
-			gid = multiuser_get_uid(info->userid, AID_MEDIA_RW);
+			gid = multiuser_get_uid(info_d->userid, AID_MEDIA_RW);
 		else
-			gid = multiuser_get_uid(info->userid, get_type(name));
+			gid = multiuser_get_uid(info_d->userid, get_type(name));
 		break;
 	case PERM_ANDROID_OBB:
 		gid = AID_MEDIA_OBB;
 		break;
 	case PERM_ANDROID_PACKAGE:
-		if (uid_is_app(info->d_uid))
-			gid = multiuser_get_ext_gid(info->d_uid);
+		if (uid_is_app(info_d->d_uid))
+			gid = multiuser_get_ext_gid(info_d->d_uid);
 		else
-			gid = multiuser_get_uid(info->userid, AID_MEDIA_RW);
+			gid = multiuser_get_uid(info_d->userid, AID_MEDIA_RW);
 		break;
 	case PERM_ANDROID_PACKAGE_CACHE:
-		if (uid_is_app(info->d_uid))
-			gid = multiuser_get_ext_cache_gid(info->d_uid);
+		if (uid_is_app(info_d->d_uid))
+			gid = multiuser_get_ext_cache_gid(info_d->d_uid);
 		else
-			gid = multiuser_get_uid(info->userid, AID_MEDIA_RW);
+			gid = multiuser_get_uid(info_d->userid, AID_MEDIA_RW);
 		break;
 	case PERM_PRE_ROOT:
 	default:
@@ -257,11 +261,13 @@ void fixup_lower_ownership(struct dentry *dentry, const char *name)
 	sdcardfs_put_lower_path(dentry, &path);
 }
 
-static int descendant_may_need_fixup(struct sdcardfs_inode_info *info, struct limit_search *limit)
+static int descendant_may_need_fixup(struct sdcardfs_inode_data *data,
+		struct limit_search *limit)
 {
-	if (info->perm == PERM_ROOT)
-		return (limit->flags & BY_USERID)?info->userid == limit->userid:1;
-	if (info->perm == PERM_PRE_ROOT || info->perm == PERM_ANDROID)
+	if (data->perm == PERM_ROOT)
+		return (limit->flags & BY_USERID) ?
+				data->userid == limit->userid : 1;
+	if (data->perm == PERM_PRE_ROOT || data->perm == PERM_ANDROID)
 		return 1;
 	return 0;
 }
@@ -292,7 +298,7 @@ static void __fixup_perms_recursive(struct dentry *dentry, struct limit_search *
 	}
 	info = SDCARDFS_I(d_inode(dentry));
 
-	if (needs_fixup(info->perm)) {
+	if (needs_fixup(info->data->perm)) {
 		list_for_each_entry(child, &dentry->d_subdirs, d_child) {
 			spin_lock_nested(&child->d_lock, depth + 1);
 			if (!(limit->flags & BY_NAME) || qstr_case_eq(&child->d_name, &limit->name)) {
@@ -305,7 +311,7 @@ static void __fixup_perms_recursive(struct dentry *dentry, struct limit_search *
 			}
 			spin_unlock(&child->d_lock);
 		}
-	} else if (descendant_may_need_fixup(info, limit)) {
+	} else if (descendant_may_need_fixup(info->data, limit)) {
 		list_for_each_entry(child, &dentry->d_subdirs, d_child) {
 			__fixup_perms_recursive(child, limit, depth + 1);
 		}
@@ -349,12 +355,12 @@ int need_graft_path(struct dentry *dentry)
 	struct sdcardfs_sb_info *sbi = SDCARDFS_SB(dentry->d_sb);
 	struct qstr obb = QSTR_LITERAL("obb");
 
-	if (parent_info->perm == PERM_ANDROID &&
+	if (parent_info->data->perm == PERM_ANDROID &&
 			qstr_case_eq(&dentry->d_name, &obb)) {
 
 		/* /Android/obb is the base obbpath of DERIVED_UNIFIED */
 		if (!(sbi->options.multiuser == false
-				&& parent_info->userid == 0)) {
+				&& parent_info->data->userid == 0)) {
 			ret = 1;
 		}
 	}
@@ -415,11 +421,11 @@ int is_base_obbpath(struct dentry *dentry)
 
 	spin_lock(&SDCARDFS_D(dentry)->lock);
 	if (sbi->options.multiuser) {
-		if (parent_info->perm == PERM_PRE_ROOT &&
+		if (parent_info->data->perm == PERM_PRE_ROOT &&
 				qstr_case_eq(&dentry->d_name, &q_obb)) {
 			ret = 1;
 		}
-	} else  if (parent_info->perm == PERM_ANDROID &&
+	} else  if (parent_info->data->perm == PERM_ANDROID &&
 			qstr_case_eq(&dentry->d_name, &q_obb)) {
 		ret = 1;
 	}
diff --git a/fs/sdcardfs/inode.c b/fs/sdcardfs/inode.c
index 4d558b8..d48da41 100644
--- a/fs/sdcardfs/inode.c
+++ b/fs/sdcardfs/inode.c
@@ -23,7 +23,8 @@
 #include <linux/ratelimit.h>
 
 /* Do not directly use this function. Use OVERRIDE_CRED() instead. */
-const struct cred *override_fsids(struct sdcardfs_sb_info *sbi, struct sdcardfs_inode_info *info)
+const struct cred *override_fsids(struct sdcardfs_sb_info *sbi,
+		struct sdcardfs_inode_data *data)
 {
 	struct cred *cred;
 	const struct cred *old_cred;
@@ -33,10 +34,10 @@ const struct cred *override_fsids(struct sdcardfs_sb_info *sbi, struct sdcardfs_
 	if (!cred)
 		return NULL;
 
-	if (info->under_obb)
+	if (data->under_obb)
 		uid = AID_MEDIA_OBB;
 	else
-		uid = multiuser_get_uid(info->userid, sbi->options.fs_low_uid);
+		uid = multiuser_get_uid(data->userid, sbi->options.fs_low_uid);
 	cred->fsuid = make_kuid(&init_user_ns, uid);
 	cred->fsgid = make_kgid(&init_user_ns, sbi->options.fs_low_gid);
 
@@ -96,7 +97,8 @@ static int sdcardfs_create(struct inode *dir, struct dentry *dentry,
 	if (err)
 		goto out;
 
-	err = sdcardfs_interpose(dentry, dir->i_sb, &lower_path, SDCARDFS_I(dir)->userid);
+	err = sdcardfs_interpose(dentry, dir->i_sb, &lower_path,
+			SDCARDFS_I(dir)->data->userid);
 	if (err)
 		goto out;
 	fsstack_copy_attr_times(dir, sdcardfs_lower_inode(dir));
@@ -267,7 +269,7 @@ static int sdcardfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode
 	struct path lower_path;
 	struct sdcardfs_sb_info *sbi = SDCARDFS_SB(dentry->d_sb);
 	const struct cred *saved_cred = NULL;
-	struct sdcardfs_inode_info *pi = SDCARDFS_I(dir);
+	struct sdcardfs_inode_data *pd = SDCARDFS_I(dir)->data;
 	int touch_err = 0;
 	struct fs_struct *saved_fs;
 	struct fs_struct *copied_fs;
@@ -336,7 +338,7 @@ static int sdcardfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode
 			make_nomedia_in_obb = 1;
 	}
 
-	err = sdcardfs_interpose(dentry, dir->i_sb, &lower_path, pi->userid);
+	err = sdcardfs_interpose(dentry, dir->i_sb, &lower_path, pd->userid);
 	if (err) {
 		unlock_dir(lower_parent_dentry);
 		goto out;
@@ -349,12 +351,13 @@ static int sdcardfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode
 	fixup_lower_ownership(dentry, dentry->d_name.name);
 	unlock_dir(lower_parent_dentry);
 	if ((!sbi->options.multiuser) && (qstr_case_eq(&dentry->d_name, &q_obb))
-		&& (pi->perm == PERM_ANDROID) && (pi->userid == 0))
+		&& (pd->perm == PERM_ANDROID) && (pd->userid == 0))
 		make_nomedia_in_obb = 1;
 
 	/* When creating /Android/data and /Android/obb, mark them as .nomedia */
 	if (make_nomedia_in_obb ||
-		((pi->perm == PERM_ANDROID) && (qstr_case_eq(&dentry->d_name, &q_data)))) {
+		((pd->perm == PERM_ANDROID)
+				&& (qstr_case_eq(&dentry->d_name, &q_data)))) {
 		REVERT_CRED(saved_cred);
 		OVERRIDE_CRED(SDCARDFS_SB(dir->i_sb), saved_cred, SDCARDFS_I(d_inode(dentry)));
 		set_fs_pwd(current->fs, &lower_path);
@@ -620,7 +623,7 @@ static int sdcardfs_permission(struct vfsmount *mnt, struct inode *inode, int ma
 {
 	int err;
 	struct inode tmp;
-	struct inode *top = grab_top(SDCARDFS_I(inode));
+	struct sdcardfs_inode_data *top = top_data_get(SDCARDFS_I(inode));
 
 	if (!top)
 		return -EINVAL;
@@ -637,10 +640,11 @@ static int sdcardfs_permission(struct vfsmount *mnt, struct inode *inode, int ma
 	 * locks must be dealt with to avoid undefined behavior.
 	 */
 	copy_attrs(&tmp, inode);
-	tmp.i_uid = make_kuid(&init_user_ns, SDCARDFS_I(top)->d_uid);
-	tmp.i_gid = make_kgid(&init_user_ns, get_gid(mnt, SDCARDFS_I(top)));
-	tmp.i_mode = (inode->i_mode & S_IFMT) | get_mode(mnt, SDCARDFS_I(top));
-	release_top(SDCARDFS_I(inode));
+	tmp.i_uid = make_kuid(&init_user_ns, top->d_uid);
+	tmp.i_gid = make_kgid(&init_user_ns, get_gid(mnt, top));
+	tmp.i_mode = (inode->i_mode & S_IFMT)
+			| get_mode(mnt, SDCARDFS_I(inode), top);
+	data_put(top);
 	tmp.i_sb = inode->i_sb;
 	if (IS_POSIXACL(inode))
 		pr_warn("%s: This may be undefined behavior...\n", __func__);
@@ -692,11 +696,12 @@ static int sdcardfs_setattr(struct vfsmount *mnt, struct dentry *dentry, struct
 	struct dentry *parent;
 	struct inode tmp;
 	struct dentry tmp_d;
-	struct inode *top;
+	struct sdcardfs_inode_data *top;
+
 	const struct cred *saved_cred = NULL;
 
 	inode = d_inode(dentry);
-	top = grab_top(SDCARDFS_I(inode));
+	top = top_data_get(SDCARDFS_I(inode));
 
 	if (!top)
 		return -EINVAL;
@@ -714,11 +719,12 @@ static int sdcardfs_setattr(struct vfsmount *mnt, struct dentry *dentry, struct
 	 *
 	 */
 	copy_attrs(&tmp, inode);
-	tmp.i_uid = make_kuid(&init_user_ns, SDCARDFS_I(top)->d_uid);
-	tmp.i_gid = make_kgid(&init_user_ns, get_gid(mnt, SDCARDFS_I(top)));
-	tmp.i_mode = (inode->i_mode & S_IFMT) | get_mode(mnt, SDCARDFS_I(top));
+	tmp.i_uid = make_kuid(&init_user_ns, top->d_uid);
+	tmp.i_gid = make_kgid(&init_user_ns, get_gid(mnt, top));
+	tmp.i_mode = (inode->i_mode & S_IFMT)
+			| get_mode(mnt, SDCARDFS_I(inode), top);
 	tmp.i_size = i_size_read(inode);
-	release_top(SDCARDFS_I(inode));
+	data_put(top);
 	tmp.i_sb = inode->i_sb;
 	tmp_d.d_inode = &tmp;
 
@@ -821,17 +827,17 @@ static int sdcardfs_fillattr(struct vfsmount *mnt,
 				struct inode *inode, struct kstat *stat)
 {
 	struct sdcardfs_inode_info *info = SDCARDFS_I(inode);
-	struct inode *top = grab_top(info);
+	struct sdcardfs_inode_data *top = top_data_get(info);
 
 	if (!top)
 		return -EINVAL;
 
 	stat->dev = inode->i_sb->s_dev;
 	stat->ino = inode->i_ino;
-	stat->mode = (inode->i_mode  & S_IFMT) | get_mode(mnt, SDCARDFS_I(top));
+	stat->mode = (inode->i_mode  & S_IFMT) | get_mode(mnt, info, top);
 	stat->nlink = inode->i_nlink;
-	stat->uid = make_kuid(&init_user_ns, SDCARDFS_I(top)->d_uid);
-	stat->gid = make_kgid(&init_user_ns, get_gid(mnt, SDCARDFS_I(top)));
+	stat->uid = make_kuid(&init_user_ns, top->d_uid);
+	stat->gid = make_kgid(&init_user_ns, get_gid(mnt, top));
 	stat->rdev = inode->i_rdev;
 	stat->size = i_size_read(inode);
 	stat->atime = inode->i_atime;
@@ -839,7 +845,7 @@ static int sdcardfs_fillattr(struct vfsmount *mnt,
 	stat->ctime = inode->i_ctime;
 	stat->blksize = (1 << inode->i_blkbits);
 	stat->blocks = inode->i_blocks;
-	release_top(info);
+	data_put(top);
 	return 0;
 }
 
diff --git a/fs/sdcardfs/lookup.c b/fs/sdcardfs/lookup.c
index 706329d..17761c5 100644
--- a/fs/sdcardfs/lookup.c
+++ b/fs/sdcardfs/lookup.c
@@ -71,7 +71,7 @@ struct inode_data {
 static int sdcardfs_inode_test(struct inode *inode, void *candidate_data/*void *candidate_lower_inode*/)
 {
 	struct inode *current_lower_inode = sdcardfs_lower_inode(inode);
-	userid_t current_userid = SDCARDFS_I(inode)->userid;
+	userid_t current_userid = SDCARDFS_I(inode)->data->userid;
 
 	if (current_lower_inode == ((struct inode_data *)candidate_data)->lower_inode &&
 			current_userid == ((struct inode_data *)candidate_data)->id)
@@ -438,7 +438,8 @@ struct dentry *sdcardfs_lookup(struct inode *dir, struct dentry *dentry,
 		goto out;
 	}
 
-	ret = __sdcardfs_lookup(dentry, flags, &lower_parent_path, SDCARDFS_I(dir)->userid);
+	ret = __sdcardfs_lookup(dentry, flags, &lower_parent_path,
+				SDCARDFS_I(dir)->data->userid);
 	if (IS_ERR(ret))
 		goto out;
 	if (ret)
diff --git a/fs/sdcardfs/main.c b/fs/sdcardfs/main.c
index 953d215..3c5b51d 100644
--- a/fs/sdcardfs/main.c
+++ b/fs/sdcardfs/main.c
@@ -327,13 +327,13 @@ static int sdcardfs_read_super(struct vfsmount *mnt, struct super_block *sb,
 	mutex_lock(&sdcardfs_super_list_lock);
 	if (sb_info->options.multiuser) {
 		setup_derived_state(d_inode(sb->s_root), PERM_PRE_ROOT,
-					sb_info->options.fs_user_id, AID_ROOT,
-					false, d_inode(sb->s_root));
+				sb_info->options.fs_user_id, AID_ROOT,
+				false, SDCARDFS_I(d_inode(sb->s_root))->data);
 		snprintf(sb_info->obbpath_s, PATH_MAX, "%s/obb", dev_name);
 	} else {
 		setup_derived_state(d_inode(sb->s_root), PERM_ROOT,
-					sb_info->options.fs_user_id, AID_ROOT,
-					false, d_inode(sb->s_root));
+				sb_info->options.fs_user_id, AID_ROOT,
+				false, SDCARDFS_I(d_inode(sb->s_root))->data);
 		snprintf(sb_info->obbpath_s, PATH_MAX, "%s/Android/obb", dev_name);
 	}
 	fixup_tmp_permissions(d_inode(sb->s_root));
diff --git a/fs/sdcardfs/packagelist.c b/fs/sdcardfs/packagelist.c
index 5ea6469..00a0f65 100644
--- a/fs/sdcardfs/packagelist.c
+++ b/fs/sdcardfs/packagelist.c
@@ -156,7 +156,7 @@ int check_caller_access_to_name(struct inode *parent_node, const struct qstr *na
 	struct qstr q_android_secure = QSTR_LITERAL("android_secure");
 
 	/* Always block security-sensitive files at root */
-	if (parent_node && SDCARDFS_I(parent_node)->perm == PERM_ROOT) {
+	if (parent_node && SDCARDFS_I(parent_node)->data->perm == PERM_ROOT) {
 		if (qstr_case_eq(name, &q_autorun)
 			|| qstr_case_eq(name, &q__android_secure)
 			|| qstr_case_eq(name, &q_android_secure)) {
diff --git a/fs/sdcardfs/sdcardfs.h b/fs/sdcardfs/sdcardfs.h
index 380982b..3687b22 100644
--- a/fs/sdcardfs/sdcardfs.h
+++ b/fs/sdcardfs/sdcardfs.h
@@ -30,6 +30,7 @@
 #include <linux/file.h>
 #include <linux/fs.h>
 #include <linux/aio.h>
+#include <linux/kref.h>
 #include <linux/mm.h>
 #include <linux/mount.h>
 #include <linux/namei.h>
@@ -81,7 +82,8 @@
  */
 #define fixup_tmp_permissions(x)	\
 	do {						\
-		(x)->i_uid = make_kuid(&init_user_ns, SDCARDFS_I(x)->d_uid);	\
+		(x)->i_uid = make_kuid(&init_user_ns,	\
+				SDCARDFS_I(x)->data->d_uid);	\
 		(x)->i_gid = make_kgid(&init_user_ns, AID_SDCARD_RW);	\
 		(x)->i_mode = ((x)->i_mode & S_IFMT) | 0775;\
 	} while (0)
@@ -97,14 +99,14 @@
  */
 #define OVERRIDE_CRED(sdcardfs_sbi, saved_cred, info)		\
 	do {	\
-		saved_cred = override_fsids(sdcardfs_sbi, info);	\
+		saved_cred = override_fsids(sdcardfs_sbi, info->data);	\
 		if (!saved_cred)	\
 			return -ENOMEM;	\
 	} while (0)
 
 #define OVERRIDE_CRED_PTR(sdcardfs_sbi, saved_cred, info)	\
 	do {	\
-		saved_cred = override_fsids(sdcardfs_sbi, info);	\
+		saved_cred = override_fsids(sdcardfs_sbi, info->data);	\
 		if (!saved_cred)	\
 			return ERR_PTR(-ENOMEM);	\
 	} while (0)
@@ -142,9 +144,11 @@ typedef enum {
 struct sdcardfs_sb_info;
 struct sdcardfs_mount_options;
 struct sdcardfs_inode_info;
+struct sdcardfs_inode_data;
 
 /* Do not directly use this function. Use OVERRIDE_CRED() instead. */
-const struct cred *override_fsids(struct sdcardfs_sb_info *sbi, struct sdcardfs_inode_info *info);
+const struct cred *override_fsids(struct sdcardfs_sb_info *sbi,
+			struct sdcardfs_inode_data *data);
 /* Do not directly use this function, use REVERT_CRED() instead. */
 void revert_fsids(const struct cred *old_cred);
 
@@ -178,18 +182,26 @@ struct sdcardfs_file_info {
 	const struct vm_operations_struct *lower_vm_ops;
 };
 
-/* sdcardfs inode data in memory */
-struct sdcardfs_inode_info {
-	struct inode *lower_inode;
-	/* state derived based on current position in hierachy */
+struct sdcardfs_inode_data {
+	struct kref refcount;
+	bool abandoned;
+
 	perm_t perm;
 	userid_t userid;
 	uid_t d_uid;
 	bool under_android;
 	bool under_cache;
 	bool under_obb;
+};
+
+/* sdcardfs inode data in memory */
+struct sdcardfs_inode_info {
+	struct inode *lower_inode;
+	/* state derived based on current position in hierarchy */
+	struct sdcardfs_inode_data *data;
+
 	/* top folder for ownership */
-	struct inode *top;
+	struct sdcardfs_inode_data *top_data;
 
 	struct inode vfs_inode;
 };
@@ -351,39 +363,56 @@ SDCARDFS_DENT_FUNC(orig_path)
 
 static inline bool sbinfo_has_sdcard_magic(struct sdcardfs_sb_info *sbinfo)
 {
-	return sbinfo && sbinfo->sb && sbinfo->sb->s_magic == SDCARDFS_SUPER_MAGIC;
+	return sbinfo && sbinfo->sb
+			&& sbinfo->sb->s_magic == SDCARDFS_SUPER_MAGIC;
 }
 
-/* grab a refererence if we aren't linking to ourself */
-static inline void set_top(struct sdcardfs_inode_info *info, struct inode *top)
+static inline struct sdcardfs_inode_data *data_get(
+		struct sdcardfs_inode_data *data)
 {
-	struct inode *old_top = NULL;
-
-	BUG_ON(IS_ERR_OR_NULL(top));
-	if (info->top && info->top != &info->vfs_inode)
-		old_top = info->top;
-	if (top != &info->vfs_inode)
-		igrab(top);
-	info->top = top;
-	iput(old_top);
+	if (data)
+		kref_get(&data->refcount);
+	return data;
 }
 
-static inline struct inode *grab_top(struct sdcardfs_inode_info *info)
+static inline struct sdcardfs_inode_data *top_data_get(
+		struct sdcardfs_inode_info *info)
 {
-	struct inode *top = info->top;
+	return data_get(info->top_data);
+}
+
+extern void data_release(struct kref *ref);
+
+static inline void data_put(struct sdcardfs_inode_data *data)
+{
+	kref_put(&data->refcount, data_release);
+}
+
+static inline void release_own_data(struct sdcardfs_inode_info *info)
+{
+	/*
+	 * This happens exactly once per inode. At this point, the inode that
+	 * originally held this data is about to be freed, and all references
+	 * to it are held as a top value, and will likely be released soon.
+	 */
+	info->data->abandoned = true;
+	data_put(info->data);
+}
+
+static inline void set_top(struct sdcardfs_inode_info *info,
+			struct sdcardfs_inode_data *top)
+{
+	struct sdcardfs_inode_data *old_top = info->top_data;
 
 	if (top)
-		return igrab(top);
-	else
-		return NULL;
+		data_get(top);
+	info->top_data = top;
+	if (old_top)
+		data_put(old_top);
 }
 
-static inline void release_top(struct sdcardfs_inode_info *info)
-{
-	iput(info->top);
-}
-
-static inline int get_gid(struct vfsmount *mnt, struct sdcardfs_inode_info *info)
+static inline int get_gid(struct vfsmount *mnt,
+		struct sdcardfs_inode_data *data)
 {
 	struct sdcardfs_vfsmount_options *opts = mnt->data;
 
@@ -396,10 +425,12 @@ static inline int get_gid(struct vfsmount *mnt, struct sdcardfs_inode_info *info
 		 */
 		return AID_SDCARD_RW;
 	else
-		return multiuser_get_uid(info->userid, opts->gid);
+		return multiuser_get_uid(data->userid, opts->gid);
 }
 
-static inline int get_mode(struct vfsmount *mnt, struct sdcardfs_inode_info *info)
+static inline int get_mode(struct vfsmount *mnt,
+		struct sdcardfs_inode_info *info,
+		struct sdcardfs_inode_data *data)
 {
 	int owner_mode;
 	int filtered_mode;
@@ -407,12 +438,12 @@ static inline int get_mode(struct vfsmount *mnt, struct sdcardfs_inode_info *inf
 	int visible_mode = 0775 & ~opts->mask;
 
 
-	if (info->perm == PERM_PRE_ROOT) {
+	if (data->perm == PERM_PRE_ROOT) {
 		/* Top of multi-user view should always be visible to ensure
 		* secondary users can traverse inside.
 		*/
 		visible_mode = 0711;
-	} else if (info->under_android) {
+	} else if (data->under_android) {
 		/* Block "other" access to Android directories, since only apps
 		* belonging to a specific user should be in there; we still
 		* leave +x open for the default view.
@@ -481,8 +512,9 @@ struct limit_search {
 	userid_t userid;
 };
 
-extern void setup_derived_state(struct inode *inode, perm_t perm, userid_t userid,
-			uid_t uid, bool under_android, struct inode *top);
+extern void setup_derived_state(struct inode *inode, perm_t perm,
+		userid_t userid, uid_t uid, bool under_android,
+		struct sdcardfs_inode_data *top);
 extern void get_derived_permission(struct dentry *parent, struct dentry *dentry);
 extern void get_derived_permission_new(struct dentry *parent, struct dentry *dentry, const struct qstr *name);
 extern void fixup_perms_recursive(struct dentry *dentry, struct limit_search *limit);
@@ -601,7 +633,7 @@ static inline void sdcardfs_copy_and_fix_attrs(struct inode *dest, const struct
 {
 	dest->i_mode = (src->i_mode  & S_IFMT) | S_IRWXU | S_IRWXG |
 			S_IROTH | S_IXOTH; /* 0775 */
-	dest->i_uid = make_kuid(&init_user_ns, SDCARDFS_I(dest)->d_uid);
+	dest->i_uid = make_kuid(&init_user_ns, SDCARDFS_I(dest)->data->d_uid);
 	dest->i_gid = make_kgid(&init_user_ns, AID_SDCARD_RW);
 	dest->i_rdev = src->i_rdev;
 	dest->i_atime = src->i_atime;
diff --git a/fs/sdcardfs/super.c b/fs/sdcardfs/super.c
index 8a9c9c7..7f4539b 100644
--- a/fs/sdcardfs/super.c
+++ b/fs/sdcardfs/super.c
@@ -26,6 +26,23 @@
  */
 static struct kmem_cache *sdcardfs_inode_cachep;
 
+/*
+ * To support the top references, we must track some data separately.
+ * An sdcardfs_inode_info always has a reference to its data, and once set up,
+ * also has a reference to its top. The top may be itself, in which case it
+ * holds two references to its data. When top is changed, it takes a ref to the
+ * new data and then drops the ref to the old data.
+ */
+static struct kmem_cache *sdcardfs_inode_data_cachep;
+
+void data_release(struct kref *ref)
+{
+	struct sdcardfs_inode_data *data =
+		container_of(ref, struct sdcardfs_inode_data, refcount);
+
+	kmem_cache_free(sdcardfs_inode_data_cachep, data);
+}
+
 /* final actions when unmounting a file system */
 static void sdcardfs_put_super(struct super_block *sb)
 {
@@ -166,6 +183,7 @@ static void sdcardfs_evict_inode(struct inode *inode)
 	struct inode *lower_inode;
 
 	truncate_inode_pages(&inode->i_data, 0);
+	set_top(SDCARDFS_I(inode), NULL);
 	clear_inode(inode);
 	/*
 	 * Decrement a reference to a lower_inode, which was incremented
@@ -173,13 +191,13 @@ static void sdcardfs_evict_inode(struct inode *inode)
 	 */
 	lower_inode = sdcardfs_lower_inode(inode);
 	sdcardfs_set_lower_inode(inode, NULL);
-	set_top(SDCARDFS_I(inode), inode);
 	iput(lower_inode);
 }
 
 static struct inode *sdcardfs_alloc_inode(struct super_block *sb)
 {
 	struct sdcardfs_inode_info *i;
+	struct sdcardfs_inode_data *d;
 
 	i = kmem_cache_alloc(sdcardfs_inode_cachep, GFP_KERNEL);
 	if (!i)
@@ -188,6 +206,16 @@ static struct inode *sdcardfs_alloc_inode(struct super_block *sb)
 	/* memset everything up to the inode to 0 */
 	memset(i, 0, offsetof(struct sdcardfs_inode_info, vfs_inode));
 
+	d = kmem_cache_alloc(sdcardfs_inode_data_cachep,
+					GFP_KERNEL | __GFP_ZERO);
+	if (!d) {
+		kmem_cache_free(sdcardfs_inode_cachep, i);
+		return NULL;
+	}
+
+	i->data = d;
+	kref_init(&d->refcount);
+
 	i->vfs_inode.i_version = 1;
 	return &i->vfs_inode;
 }
@@ -196,6 +224,7 @@ static void i_callback(struct rcu_head *head)
 {
 	struct inode *inode = container_of(head, struct inode, i_rcu);
 
+	release_own_data(SDCARDFS_I(inode));
 	kmem_cache_free(sdcardfs_inode_cachep, SDCARDFS_I(inode));
 }
 
@@ -214,20 +243,30 @@ static void init_once(void *obj)
 
 int sdcardfs_init_inode_cache(void)
 {
-	int err = 0;
-
 	sdcardfs_inode_cachep =
 		kmem_cache_create("sdcardfs_inode_cache",
 				  sizeof(struct sdcardfs_inode_info), 0,
 				  SLAB_RECLAIM_ACCOUNT, init_once);
+
 	if (!sdcardfs_inode_cachep)
-		err = -ENOMEM;
-	return err;
+		return -ENOMEM;
+
+	sdcardfs_inode_data_cachep =
+		kmem_cache_create("sdcardfs_inode_data_cache",
+				  sizeof(struct sdcardfs_inode_data), 0,
+				  SLAB_RECLAIM_ACCOUNT, NULL);
+	if (!sdcardfs_inode_data_cachep) {
+		kmem_cache_destroy(sdcardfs_inode_cachep);
+		return -ENOMEM;
+	}
+
+	return 0;
 }
 
 /* sdcardfs inode cache destructor */
 void sdcardfs_destroy_inode_cache(void)
 {
+	kmem_cache_destroy(sdcardfs_inode_data_cachep);
 	kmem_cache_destroy(sdcardfs_inode_cachep);
 }
 
diff --git a/fs/xattr.c b/fs/xattr.c
index 2d13b4e..ed8c374 100644
--- a/fs/xattr.c
+++ b/fs/xattr.c
@@ -530,7 +530,7 @@ getxattr(struct dentry *d, const char __user *name, void __user *value,
 			size = XATTR_SIZE_MAX;
 		kvalue = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
 		if (!kvalue) {
-			kvalue = vmalloc(size);
+			kvalue = vzalloc(size);
 			if (!kvalue)
 				return -ENOMEM;
 		}
diff --git a/include/drm/drm_atomic.h b/include/drm/drm_atomic.h
index 9701f2d..a5696c1 100644
--- a/include/drm/drm_atomic.h
+++ b/include/drm/drm_atomic.h
@@ -144,6 +144,7 @@ struct __drm_crtcs_state {
 	struct drm_crtc *ptr;
 	struct drm_crtc_state *state;
 	struct drm_crtc_commit *commit;
+	s32 __user *out_fence_ptr;
 };
 
 struct __drm_connnectors_state {
@@ -316,6 +317,8 @@ drm_atomic_set_crtc_for_plane(struct drm_plane_state *plane_state,
 			      struct drm_crtc *crtc);
 void drm_atomic_set_fb_for_plane(struct drm_plane_state *plane_state,
 				 struct drm_framebuffer *fb);
+void drm_atomic_set_fence_for_plane(struct drm_plane_state *plane_state,
+				    struct fence *fence);
 int __must_check
 drm_atomic_set_crtc_for_connector(struct drm_connector_state *conn_state,
 				  struct drm_crtc *crtc);
diff --git a/include/drm/drm_connector.h b/include/drm/drm_connector.h
index ac9d7d8..1c12875 100644
--- a/include/drm/drm_connector.h
+++ b/include/drm/drm_connector.h
@@ -531,6 +531,20 @@ struct drm_cmdline_mode {
  * @audio_latency: audio latency info from ELD, if found
  * @null_edid_counter: track sinks that give us all zeros for the EDID
  * @bad_edid_counter: track sinks that give us an EDID with invalid checksum
+ * @max_tmds_char: indicates the maximum TMDS Character Rate supported
+ * @scdc_present: when set the sink supports SCDC functionality
+ * @rr_capable: when set the sink is capable of initiating an SCDC read request
+ * @supports_scramble: when set the sink supports less than 340Mcsc scrambling
+ * @flags_3d: 3D view(s) supported by the sink, see drm_edid.h (DRM_EDID_3D_*)
+ * @pt_scan_info: PT scan info obtained from the VCDB of EDID
+ * @it_scan_info: IT scan info obtained from the VCDB of EDID
+ * @ce_scan_info: CE scan info obtained from the VCDB of EDID
+ * @hdr_eotf: Electro optical transfer function obtained from HDR block
+ * @hdr_metadata_type_one: Metadata type one obtained from HDR block
+ * @hdr_max_luminance: desired max luminance obtained from HDR block
+ * @hdr_avg_luminance: desired avg luminance obtained from HDR block
+ * @hdr_min_luminance: desired min luminance obtained from HDR block
+ * @hdr_supported: does the sink support HDR content
  * @edid_corrupt: indicates whether the last read EDID was corrupt
  * @debugfs_entry: debugfs directory for this connector
  * @state: current atomic state for this connector
@@ -665,6 +679,22 @@ struct drm_connector {
 	int null_edid_counter; /* needed to workaround some HW bugs where we get all 0s */
 	unsigned bad_edid_counter;
 
+	/* EDID bits HDMI 2.0 */
+	int max_tmds_char;	/* in Mcsc */
+	bool scdc_present;
+	bool rr_capable;
+	bool supports_scramble;
+	int flags_3d;
+	u8 pt_scan_info;
+	u8 it_scan_info;
+	u8 ce_scan_info;
+	u32 hdr_eotf;
+	bool hdr_metadata_type_one;
+	u32 hdr_max_luminance;
+	u32 hdr_avg_luminance;
+	u32 hdr_min_luminance;
+	bool hdr_supported;
+
 	/* Flag for raw EDID header corruption - used in Displayport
 	 * compliance testing - * Displayport Link CTS Core 1.2 rev1.1 4.2.2.6
 	 */
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index 0aa2925..f3d58c7 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -680,6 +680,35 @@ struct drm_crtc {
 	 * context.
 	 */
 	struct drm_modeset_acquire_ctx *acquire_ctx;
+
+	/**
+	 * @fence_context:
+	 *
+	 * timeline context used for fence operations.
+	 */
+	unsigned int fence_context;
+
+	/**
+	 * @fence_lock:
+	 *
+	 * spinlock to protect the fences in the fence_context.
+	 */
+
+	spinlock_t fence_lock;
+	/**
+	 * @fence_seqno:
+	 *
+	 * Seqno variable used as monotonic counter for the fences
+	 * created on the CRTC's timeline.
+	 */
+	unsigned long fence_seqno;
+
+	/**
+	 * @timeline_name:
+	 *
+	 * The name of the CRTC's fence timeline.
+	 */
+	char timeline_name[32];
 };
 
 /**
@@ -1160,6 +1189,17 @@ struct drm_mode_config {
 	 */
 	struct drm_property *prop_fb_id;
 	/**
+	 * @prop_in_fence_fd: Sync File fd representing the incoming fences
+	 * for a Plane.
+	 */
+	struct drm_property *prop_in_fence_fd;
+	/**
+	 * @prop_out_fence_ptr: Sync File fd pointer representing the
+	 * outgoing fences for a CRTC. Userspace should provide a pointer to a
+	 * value of type s32, and then cast that pointer to u64.
+	 */
+	struct drm_property *prop_out_fence_ptr;
+	/**
 	 * @prop_crtc_id: Default atomic plane property to specify the
 	 * &drm_crtc.
 	 */
diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h
index c3a7d44..32bd104 100644
--- a/include/drm/drm_edid.h
+++ b/include/drm/drm_edid.h
@@ -269,6 +269,11 @@ struct detailed_timing {
 
 #define DRM_ELD_CEA_SAD(mnl, sad)	(20 + (mnl) + 3 * (sad))
 
+/* HDMI 2.0 */
+#define DRM_EDID_3D_INDEPENDENT_VIEW	(1 << 2)
+#define DRM_EDID_3D_DUAL_VIEW		(1 << 1)
+#define DRM_EDID_3D_OSD_DISPARITY	(1 << 0)
+
 struct edid {
 	u8 header[8];
 	/* Vendor & product info */
diff --git a/include/drm/drm_fb_cma_helper.h b/include/drm/drm_fb_cma_helper.h
index f313211..3b00f64 100644
--- a/include/drm/drm_fb_cma_helper.h
+++ b/include/drm/drm_fb_cma_helper.h
@@ -12,6 +12,8 @@ struct drm_fb_helper;
 struct drm_device;
 struct drm_file;
 struct drm_mode_fb_cmd2;
+struct drm_plane;
+struct drm_plane_state;
 
 struct drm_fbdev_cma *drm_fbdev_cma_init_with_funcs(struct drm_device *dev,
 	unsigned int preferred_bpp, unsigned int num_crtc,
@@ -41,6 +43,9 @@ struct drm_framebuffer *drm_fb_cma_create(struct drm_device *dev,
 struct drm_gem_cma_object *drm_fb_cma_get_gem_obj(struct drm_framebuffer *fb,
 	unsigned int plane);
 
+int drm_fb_cma_prepare_fb(struct drm_plane *plane,
+			  struct drm_plane_state *state);
+
 #ifdef CONFIG_DEBUG_FS
 struct seq_file;
 
diff --git a/include/drm/drm_plane.h b/include/drm/drm_plane.h
index 8b4dc62..952ef84 100644
--- a/include/drm/drm_plane.h
+++ b/include/drm/drm_plane.h
@@ -65,7 +65,7 @@ struct drm_plane_state {
 
 	struct drm_crtc *crtc;   /* do not write directly, use drm_atomic_set_crtc_for_plane() */
 	struct drm_framebuffer *fb;  /* do not write directly, use drm_atomic_set_fb_for_plane() */
-	struct fence *fence;
+	struct fence *fence; /* do not write directly, use drm_atomic_set_fence_for_plane() */
 
 	/* Signed dest location allows it to be partially off screen */
 	int32_t crtc_x, crtc_y;
diff --git a/include/dt-bindings/clock/qcom,gcc-sdm845.h b/include/dt-bindings/clock/qcom,gcc-sdm845.h
index a95d494..115b62f 100644
--- a/include/dt-bindings/clock/qcom,gcc-sdm845.h
+++ b/include/dt-bindings/clock/qcom,gcc-sdm845.h
@@ -185,19 +185,17 @@
 #define GPLL0							167
 #define GPLL0_OUT_EVEN						168
 #define GPLL0_OUT_MAIN						169
-#define GPLL1							170
-#define GPLL1_OUT_MAIN						171
-#define GCC_UFS_CARD_AXI_HW_CTL_CLK				172
-#define GCC_UFS_PHY_AXI_HW_CTL_CLK				173
-#define GCC_UFS_CARD_UNIPRO_CORE_HW_CTL_CLK			174
-#define GCC_UFS_PHY_UNIPRO_CORE_HW_CTL_CLK			175
-#define GCC_UFS_CARD_ICE_CORE_HW_CTL_CLK			176
-#define GCC_UFS_PHY_ICE_CORE_HW_CTL_CLK				177
-#define GCC_AGGRE_UFS_CARD_AXI_HW_CTL_CLK			178
-#define GCC_AGGRE_UFS_PHY_AXI_HW_CTL_CLK			179
-#define GCC_UFS_CARD_PHY_AUX_HW_CTL_CLK				180
-#define GCC_UFS_PHY_PHY_AUX_HW_CTL_CLK				181
-#define GCC_GPU_IREF_CLK					182
+#define GCC_UFS_CARD_AXI_HW_CTL_CLK				170
+#define GCC_UFS_PHY_AXI_HW_CTL_CLK				171
+#define GCC_UFS_CARD_UNIPRO_CORE_HW_CTL_CLK			172
+#define GCC_UFS_PHY_UNIPRO_CORE_HW_CTL_CLK			173
+#define GCC_UFS_CARD_ICE_CORE_HW_CTL_CLK			174
+#define GCC_UFS_PHY_ICE_CORE_HW_CTL_CLK				175
+#define GCC_AGGRE_UFS_CARD_AXI_HW_CTL_CLK			176
+#define GCC_AGGRE_UFS_PHY_AXI_HW_CTL_CLK			177
+#define GCC_UFS_CARD_PHY_AUX_HW_CTL_CLK				178
+#define GCC_UFS_PHY_PHY_AUX_HW_CTL_CLK				179
+#define GCC_GPU_IREF_CLK					180
 
 /* GCC reset clocks */
 #define GCC_GPU_BCR						0
diff --git a/include/dt-bindings/clock/qcom,gcc-sdxpoorwills.h b/include/dt-bindings/clock/qcom,gcc-sdxpoorwills.h
index 915ac08..e773848 100644
--- a/include/dt-bindings/clock/qcom,gcc-sdxpoorwills.h
+++ b/include/dt-bindings/clock/qcom,gcc-sdxpoorwills.h
@@ -83,19 +83,20 @@
 #define GCC_SPMI_FETCHER_CLK					65
 #define GCC_SPMI_FETCHER_CLK_SRC				66
 #define GCC_SYS_NOC_CPUSS_AHB_CLK				67
-#define GCC_USB30_MASTER_CLK					68
-#define GCC_USB30_MASTER_CLK_SRC				69
-#define GCC_USB30_MOCK_UTMI_CLK					70
-#define GCC_USB30_MOCK_UTMI_CLK_SRC				71
-#define GCC_USB30_SLEEP_CLK					72
-#define GCC_USB3_PRIM_CLKREF_CLK				73
-#define GCC_USB3_PHY_AUX_CLK					74
-#define GCC_USB3_PHY_AUX_CLK_SRC				75
-#define GCC_USB3_PHY_PIPE_CLK					76
-#define GCC_USB_PHY_CFG_AHB2PHY_CLK				77
-#define GCC_XO_DIV4_CLK						78
-#define GPLL0							79
-#define GPLL0_OUT_EVEN						80
+#define GCC_SYS_NOC_USB3_CLK					68
+#define GCC_USB30_MASTER_CLK					69
+#define GCC_USB30_MASTER_CLK_SRC				70
+#define GCC_USB30_MOCK_UTMI_CLK					71
+#define GCC_USB30_MOCK_UTMI_CLK_SRC				72
+#define GCC_USB30_SLEEP_CLK					73
+#define GCC_USB3_PRIM_CLKREF_CLK				74
+#define GCC_USB3_PHY_AUX_CLK					75
+#define GCC_USB3_PHY_AUX_CLK_SRC				76
+#define GCC_USB3_PHY_PIPE_CLK					77
+#define GCC_USB_PHY_CFG_AHB2PHY_CLK				78
+#define GCC_XO_DIV4_CLK						79
+#define GPLL0							80
+#define GPLL0_OUT_EVEN						81
 
 /* GDSCs */
 #define PCIE_GDSC						0
diff --git a/include/dt-bindings/clock/qcom,gpucc-sdm845.h b/include/dt-bindings/clock/qcom,gpucc-sdm845.h
index c43a9f8..323beaf 100644
--- a/include/dt-bindings/clock/qcom,gpucc-sdm845.h
+++ b/include/dt-bindings/clock/qcom,gpucc-sdm845.h
@@ -40,13 +40,10 @@
 #define GPU_CC_PLL1_OUT_ODD					22
 #define GPU_CC_PLL1_OUT_TEST					23
 #define GPU_CC_PLL_TEST_CLK					24
-#define GPU_CC_RBCPR_AHB_CLK					25
-#define GPU_CC_RBCPR_CLK					26
-#define GPU_CC_RBCPR_CLK_SRC					27
-#define GPU_CC_SLEEP_CLK					28
-#define GPU_CC_GMU_CLK_SRC					29
-#define GPU_CC_CX_GFX3D_CLK					30
-#define GPU_CC_CX_GFX3D_SLV_CLK					31
+#define GPU_CC_SLEEP_CLK					25
+#define GPU_CC_GMU_CLK_SRC					26
+#define GPU_CC_CX_GFX3D_CLK					27
+#define GPU_CC_CX_GFX3D_SLV_CLK					28
 
 /* GPUCC reset clock registers */
 #define GPUCC_GPU_CC_ACD_BCR					0
@@ -54,9 +51,8 @@
 #define GPUCC_GPU_CC_GFX3D_AON_BCR				2
 #define GPUCC_GPU_CC_GMU_BCR					3
 #define GPUCC_GPU_CC_GX_BCR					4
-#define GPUCC_GPU_CC_RBCPR_BCR					5
-#define GPUCC_GPU_CC_SPDM_BCR					6
-#define GPUCC_GPU_CC_XO_BCR					7
+#define GPUCC_GPU_CC_SPDM_BCR					5
+#define GPUCC_GPU_CC_XO_BCR					6
 
 /* GFX3D clock registers */
 #define GPU_CC_PLL0						0
diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h
new file mode 100644
index 0000000..ec80d0c
--- /dev/null
+++ b/include/linux/bpf-cgroup.h
@@ -0,0 +1,79 @@
+#ifndef _BPF_CGROUP_H
+#define _BPF_CGROUP_H
+
+#include <linux/bpf.h>
+#include <linux/jump_label.h>
+#include <uapi/linux/bpf.h>
+
+struct sock;
+struct cgroup;
+struct sk_buff;
+
+#ifdef CONFIG_CGROUP_BPF
+
+extern struct static_key_false cgroup_bpf_enabled_key;
+#define cgroup_bpf_enabled static_branch_unlikely(&cgroup_bpf_enabled_key)
+
+struct cgroup_bpf {
+	/*
+	 * Store two sets of bpf_prog pointers, one for programs that are
+	 * pinned directly to this cgroup, and one for those that are effective
+	 * when this cgroup is accessed.
+	 */
+	struct bpf_prog *prog[MAX_BPF_ATTACH_TYPE];
+	struct bpf_prog *effective[MAX_BPF_ATTACH_TYPE];
+};
+
+void cgroup_bpf_put(struct cgroup *cgrp);
+void cgroup_bpf_inherit(struct cgroup *cgrp, struct cgroup *parent);
+
+void __cgroup_bpf_update(struct cgroup *cgrp,
+			 struct cgroup *parent,
+			 struct bpf_prog *prog,
+			 enum bpf_attach_type type);
+
+/* Wrapper for __cgroup_bpf_update() protected by cgroup_mutex */
+void cgroup_bpf_update(struct cgroup *cgrp,
+		       struct bpf_prog *prog,
+		       enum bpf_attach_type type);
+
+int __cgroup_bpf_run_filter(struct sock *sk,
+			    struct sk_buff *skb,
+			    enum bpf_attach_type type);
+
+/* Wrappers for __cgroup_bpf_run_filter() guarded by cgroup_bpf_enabled. */
+#define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk,skb)			\
+({									\
+	int __ret = 0;							\
+	if (cgroup_bpf_enabled)						\
+		__ret = __cgroup_bpf_run_filter(sk, skb,		\
+						BPF_CGROUP_INET_INGRESS); \
+									\
+	__ret;								\
+})
+
+#define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk,skb)				\
+({									\
+	int __ret = 0;							\
+	if (cgroup_bpf_enabled && sk && sk == skb->sk) {		\
+		typeof(sk) __sk = sk_to_full_sk(sk);			\
+		if (sk_fullsock(__sk))					\
+			__ret = __cgroup_bpf_run_filter(__sk, skb,	\
+						BPF_CGROUP_INET_EGRESS); \
+	}								\
+	__ret;								\
+})
+
+#else
+
+struct cgroup_bpf {};
+static inline void cgroup_bpf_put(struct cgroup *cgrp) {}
+static inline void cgroup_bpf_inherit(struct cgroup *cgrp,
+				      struct cgroup *parent) {}
+
+#define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk,skb) ({ 0; })
+#define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk,skb) ({ 0; })
+
+#endif /* CONFIG_CGROUP_BPF */
+
+#endif /* _BPF_CGROUP_H */
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
index 0f4548c..b008a33 100644
--- a/include/linux/cgroup-defs.h
+++ b/include/linux/cgroup-defs.h
@@ -16,6 +16,7 @@
 #include <linux/percpu-refcount.h>
 #include <linux/percpu-rwsem.h>
 #include <linux/workqueue.h>
+#include <linux/bpf-cgroup.h>
 
 #ifdef CONFIG_CGROUPS
 
@@ -300,6 +301,9 @@ struct cgroup {
 	/* used to schedule release agent */
 	struct work_struct release_agent_work;
 
+	/* used to store eBPF programs */
+	struct cgroup_bpf bpf;
+
 	/* ids of the ancestors at each level including self */
 	int ancestor_ids[];
 };
diff --git a/include/linux/fence.h b/include/linux/fence.h
index 8d7265f..fd9b89f 100644
--- a/include/linux/fence.h
+++ b/include/linux/fence.h
@@ -185,6 +185,16 @@ void fence_release(struct kref *kref);
 void fence_free(struct fence *fence);
 
 /**
+ * fence_put - decreases refcount of the fence
+ * @fence:	[in]	fence to reduce refcount of
+ */
+static inline void fence_put(struct fence *fence)
+{
+	if (fence)
+		kref_put(&fence->refcount, fence_release);
+}
+
+/**
  * fence_get - increases refcount of the fence
  * @fence:	[in]	fence to increase refcount of
  *
@@ -212,13 +222,49 @@ static inline struct fence *fence_get_rcu(struct fence *fence)
 }
 
 /**
- * fence_put - decreases refcount of the fence
- * @fence:	[in]	fence to reduce refcount of
+ * fence_get_rcu_safe  - acquire a reference to an RCU tracked fence
+ * @fence:	[in]	pointer to fence to increase refcount of
+ *
+ * Function returns NULL if no refcount could be obtained, or the fence.
+ * This function handles acquiring a reference to a fence that may be
+ * reallocated within the RCU grace period (such as with SLAB_DESTROY_BY_RCU),
+ * so long as the caller is using RCU on the pointer to the fence.
+ *
+ * An alternative mechanism is to employ a seqlock to protect a bunch of
+ * fences, such as used by struct reservation_object. When using a seqlock,
+ * the seqlock must be taken before and checked after a reference to the
+ * fence is acquired (as shown here).
+ *
+ * The caller is required to hold the RCU read lock.
  */
-static inline void fence_put(struct fence *fence)
+static inline struct fence *fence_get_rcu_safe(struct fence * __rcu *fencep)
 {
-	if (fence)
-		kref_put(&fence->refcount, fence_release);
+	do {
+		struct fence *fence;
+
+		fence = rcu_dereference(*fencep);
+		if (!fence || !fence_get_rcu(fence))
+			return NULL;
+
+		/* The atomic_inc_not_zero() inside fence_get_rcu()
+		 * provides a full memory barrier upon success (such as now).
+		 * This is paired with the write barrier from assigning
+		 * to the __rcu protected fence pointer so that if that
+		 * pointer still matches the current fence, we know we
+		 * have successfully acquire a reference to it. If it no
+		 * longer matches, we are holding a reference to some other
+		 * reallocated pointer. This is possible if the allocator
+		 * is using a freelist like SLAB_DESTROY_BY_RCU where the
+		 * fence remains valid for the RCU grace period, but it
+		 * may be reallocated. When using such allocators, we are
+		 * responsible for ensuring the reference we get is to
+		 * the right fence, as below.
+		 */
+		if (fence == rcu_access_pointer(*fencep))
+			return rcu_pointer_handoff(fence);
+
+		fence_put(fence);
+	} while (1);
 }
 
 int fence_signal(struct fence *fence);
diff --git a/include/linux/hdmi.h b/include/linux/hdmi.h
index e974420..bc38b99a 100644
--- a/include/linux/hdmi.h
+++ b/include/linux/hdmi.h
@@ -35,6 +35,7 @@ enum hdmi_infoframe_type {
 };
 
 #define HDMI_IEEE_OUI 0x000c03
+#define HDMI_IEEE_OUI_HF	0xc45dd8
 #define HDMI_INFOFRAME_HEADER_SIZE  4
 #define HDMI_AVI_INFOFRAME_SIZE    13
 #define HDMI_SPD_INFOFRAME_SIZE    25
@@ -78,6 +79,8 @@ enum hdmi_picture_aspect {
 	HDMI_PICTURE_ASPECT_NONE,
 	HDMI_PICTURE_ASPECT_4_3,
 	HDMI_PICTURE_ASPECT_16_9,
+	HDMI_PICTURE_ASPECT_64_27,
+	HDMI_PICTURE_ASPECT_256_135,
 	HDMI_PICTURE_ASPECT_RESERVED,
 };
 
diff --git a/include/linux/hid-sensor-hub.h b/include/linux/hid-sensor-hub.h
index dd85f35..039e6ab 100644
--- a/include/linux/hid-sensor-hub.h
+++ b/include/linux/hid-sensor-hub.h
@@ -231,6 +231,8 @@ struct hid_sensor_common {
 	unsigned usage_id;
 	atomic_t data_ready;
 	atomic_t user_requested_state;
+	int poll_interval;
+	int raw_hystersis;
 	struct iio_trigger *trigger;
 	struct hid_sensor_hub_attribute_info poll;
 	struct hid_sensor_hub_attribute_info report_state;
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h
index 8f68490..e233925 100644
--- a/include/linux/kprobes.h
+++ b/include/linux/kprobes.h
@@ -330,7 +330,9 @@ extern int proc_kprobes_optimization_handler(struct ctl_table *table,
 					     int write, void __user *buffer,
 					     size_t *length, loff_t *ppos);
 #endif
-
+extern void wait_for_kprobe_optimizer(void);
+#else
+static inline void wait_for_kprobe_optimizer(void) { }
 #endif /* CONFIG_OPTPROBES */
 #ifdef CONFIG_KPROBES_ON_FTRACE
 extern void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
diff --git a/include/linux/leds-qpnp-flash.h b/include/linux/leds-qpnp-flash.h
index 4b5a339..1fe6e17 100644
--- a/include/linux/leds-qpnp-flash.h
+++ b/include/linux/leds-qpnp-flash.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -18,7 +18,6 @@
 #define ENABLE_REGULATOR	BIT(0)
 #define DISABLE_REGULATOR	BIT(1)
 #define QUERY_MAX_CURRENT	BIT(2)
-#define PRE_FLASH		BIT(3)
 
 #define FLASH_LED_PREPARE_OPTIONS_MASK	GENMASK(3, 0)
 
diff --git a/arch/arm64/boot/dts/qcom/sdm830-mtp.dtsi b/include/linux/nfcinfo.h
similarity index 85%
rename from arch/arm64/boot/dts/qcom/sdm830-mtp.dtsi
rename to include/linux/nfcinfo.h
index b2d607d..b67a65f 100644
--- a/arch/arm64/boot/dts/qcom/sdm830-mtp.dtsi
+++ b/include/linux/nfcinfo.h
@@ -10,5 +10,9 @@
  * GNU General Public License for more details.
  */
 
-#include "sdm845-mtp.dtsi"
-#include "sdm830-pinctrl.dtsi"
+#ifndef _NFCINFO_H
+#define _NFCINFO_H
+
+#include <uapi/linux/nfc/nfcinfo.h>
+
+#endif
diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h
index 77912a1..72f9211 100644
--- a/include/linux/power_supply.h
+++ b/include/linux/power_supply.h
@@ -247,6 +247,7 @@ enum power_supply_property {
 	POWER_SUPPLY_PROP_CONNECTOR_HEALTH,
 	POWER_SUPPLY_PROP_CTM_CURRENT_MAX,
 	POWER_SUPPLY_PROP_HW_CURRENT_MAX,
+	POWER_SUPPLY_PROP_REAL_TYPE,
 	/* Local extensions of type int64_t */
 	POWER_SUPPLY_PROP_CHARGE_COUNTER_EXT,
 	/* Properties of type `const char *' */
diff --git a/include/linux/qcom-geni-se.h b/include/linux/qcom-geni-se.h
index 657ac07..5947107 100644
--- a/include/linux/qcom-geni-se.h
+++ b/include/linux/qcom-geni-se.h
@@ -308,6 +308,22 @@ struct se_geni_rsc {
 #define SE_DMA_RX_MAX_BURST	(0xD5C)
 #define SE_DMA_RX_FLUSH		(0xD60)
 
+/* SE_DMA_TX_IRQ_STAT Register fields */
+#define TX_DMA_DONE		(BIT(0))
+#define TX_EOT			(BIT(1))
+#define TX_SBE			(BIT(2))
+#define TX_RESET_DONE		(BIT(3))
+
+/* SE_DMA_RX_IRQ_STAT Register fields */
+#define RX_DMA_DONE		(BIT(0))
+#define RX_EOT			(BIT(1))
+#define RX_SBE			(BIT(2))
+#define RX_RESET_DONE		(BIT(3))
+#define RX_FLUSH_DONE		(BIT(4))
+#define RX_GENI_GP_IRQ		(GENMASK(10, 5))
+#define RX_GENI_CANCEL_IRQ	(BIT(11))
+#define RX_GENI_GP_IRQ_EXT	(GENMASK(13, 12))
+
 #define DEFAULT_BUS_WIDTH	(4)
 #define DEFAULT_SE_CLK		(19200000)
 
diff --git a/include/linux/reservation.h b/include/linux/reservation.h
index b0f305e..bad7710 100644
--- a/include/linux/reservation.h
+++ b/include/linux/reservation.h
@@ -177,17 +177,14 @@ static inline struct fence *
 reservation_object_get_excl_rcu(struct reservation_object *obj)
 {
 	struct fence *fence;
-	unsigned seq;
-retry:
-	seq = read_seqcount_begin(&obj->seq);
+
+	if (!rcu_access_pointer(obj->fence_excl))
+		return NULL;
+
 	rcu_read_lock();
-	fence = rcu_dereference(obj->fence_excl);
-	if (read_seqcount_retry(&obj->seq, seq)) {
-		rcu_read_unlock();
-		goto retry;
-	}
-	fence = fence_get(fence);
+	fence = fence_get_rcu_safe(&obj->fence_excl);
 	rcu_read_unlock();
+
 	return fence;
 }
 
diff --git a/include/linux/sde_rsc.h b/include/linux/sde_rsc.h
index f3fa9e6..f921909 100644
--- a/include/linux/sde_rsc.h
+++ b/include/linux/sde_rsc.h
@@ -206,6 +206,23 @@ struct sde_rsc_event *sde_rsc_register_event(int rsc_index, uint32_t event_type,
  */
 void sde_rsc_unregister_event(struct sde_rsc_event *event);
 
+/**
+ * is_sde_rsc_available - check if display rsc available.
+ * @rsc_index:   A client will be created on this RSC. As of now only
+ *               SDE_RSC_INDEX is valid rsc index.
+ * Returns: true if rsc is available; false in all other cases
+ */
+bool is_sde_rsc_available(int rsc_index);
+
+/**
+ * get_sde_rsc_current_state - gets the current state of sde rsc.
+ * @rsc_index:   A client will be created on this RSC. As of now only
+ *               SDE_RSC_INDEX is valid rsc index.
+ * Returns: current state if rsc available; SDE_RSC_IDLE_STATE for
+ *          all other cases
+ */
+enum sde_rsc_state get_sde_rsc_current_state(int rsc_index);
+
 #else
 
 static inline struct sde_rsc_client *sde_rsc_client_create(u32 rsc_index,
@@ -242,6 +259,15 @@ static inline void sde_rsc_unregister_event(struct sde_rsc_event *event)
 {
 }
 
+static inline bool is_sde_rsc_available(int rsc_index)
+{
+	return false;
+}
+
+static inline enum sde_rsc_state get_sde_rsc_current_state(int rsc_index)
+{
+	return SDE_RSC_IDLE_STATE;
+}
 #endif /* CONFIG_DRM_SDE_RSC */
 
 #endif /* _SDE_RSC_H_ */
diff --git a/include/linux/sock_diag.h b/include/linux/sock_diag.h
index a0596ca0..a2f8109 100644
--- a/include/linux/sock_diag.h
+++ b/include/linux/sock_diag.h
@@ -24,6 +24,7 @@ void sock_diag_unregister(const struct sock_diag_handler *h);
 void sock_diag_register_inet_compat(int (*fn)(struct sk_buff *skb, struct nlmsghdr *nlh));
 void sock_diag_unregister_inet_compat(int (*fn)(struct sk_buff *skb, struct nlmsghdr *nlh));
 
+u64 sock_gen_cookie(struct sock *sk);
 int sock_diag_check_cookie(struct sock *sk, const __u32 *cookie);
 void sock_diag_save_cookie(struct sock *sk, __u32 *cookie);
 
diff --git a/include/soc/qcom/socinfo.h b/include/soc/qcom/socinfo.h
index b54eefc..dc404e4 100644
--- a/include/soc/qcom/socinfo.h
+++ b/include/soc/qcom/socinfo.h
@@ -100,8 +100,8 @@
 	of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,sdxpoorwills")
 #define early_machine_is_sdm845()	\
 	of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,sdm845")
-#define early_machine_is_sdm830()	\
-	of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,sdm830")
+#define early_machine_is_sdm670()	\
+	of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,sdm670")
 #else
 #define of_board_is_sim()		0
 #define of_board_is_rumi()		0
@@ -141,7 +141,7 @@
 #define early_machine_is_msmfalcon()	0
 #define early_machine_is_sdxpoorwills()	0
 #define early_machine_is_sdm845()	0
-#define early_machine_is_sdm830()	0
+#define early_machine_is_sdm670()	0
 #endif
 
 #define PLATFORM_SUBTYPE_MDM	1
@@ -203,7 +203,7 @@ enum msm_cpu {
 	MSM_CPU_FALCON,
 	SDX_CPU_SDXPOORWILLS,
 	MSM_CPU_SDM845,
-	MSM_CPU_SDM830,
+	MSM_CPU_SDM670,
 };
 
 struct msm_soc_info {
diff --git a/include/soc/qcom/subsystem_restart.h b/include/soc/qcom/subsystem_restart.h
index 5478417..9a0a53e 100644
--- a/include/soc/qcom/subsystem_restart.h
+++ b/include/soc/qcom/subsystem_restart.h
@@ -56,6 +56,8 @@ struct module;
  * @sysmon_shutdown_ret: Return value for the call to sysmon_send_shutdown
  * @system_debug: If "set", triggers a device restart when the
  * subsystem's wdog bite handler is invoked.
+ * @ignore_ssr_failure: SSR failures are usually fatal and results in panic. If
+ * set will ignore failure.
  * @edge: GLINK logical name of the subsystem
  */
 struct subsys_desc {
@@ -91,6 +93,7 @@ struct subsys_desc {
 	u32 sysmon_pid;
 	int sysmon_shutdown_ret;
 	bool system_debug;
+	bool ignore_ssr_failure;
 	const char *edge;
 };
 
diff --git a/include/trace/events/pdc.h b/include/trace/events/pdc.h
new file mode 100644
index 0000000..400e959
--- /dev/null
+++ b/include/trace/events/pdc.h
@@ -0,0 +1,50 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM pdc
+
+#if !defined(_TRACE_PDC_) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_PDC_H_
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(irq_pin_config,
+
+	TP_PROTO(char *func, u32 pin, u32 hwirq, u32 type, u32 enable),
+
+	TP_ARGS(func, pin, hwirq, type, enable),
+
+	TP_STRUCT__entry(
+		__field(char *, func)
+		__field(u32, pin)
+		__field(u32, hwirq)
+		__field(u32, type)
+		__field(u32, enable)
+	),
+
+	TP_fast_assign(
+		__entry->pin = pin;
+		__entry->func = func;
+		__entry->hwirq = hwirq;
+		__entry->type = type;
+		__entry->enable = enable;
+	),
+
+	TP_printk("%s hwirq:%u pin:%u type:%u enable:%u",
+		__entry->func, __entry->pin, __entry->hwirq, __entry->type,
+		__entry->enable)
+);
+
+#endif
+#define TRACE_INCLUDE_FILE pdc
+#include <trace/define_trace.h>
diff --git a/include/uapi/drm/drm_mode.h b/include/uapi/drm/drm_mode.h
index f66ba9c..9c927a5 100644
--- a/include/uapi/drm/drm_mode.h
+++ b/include/uapi/drm/drm_mode.h
@@ -77,7 +77,8 @@ extern "C" {
 #define  DRM_MODE_FLAG_3D_TOP_AND_BOTTOM	(7<<14)
 #define  DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF	(8<<14)
 #define  DRM_MODE_FLAG_SEAMLESS			(1<<19)
-
+#define  DRM_MODE_FLAG_SUPPORTS_RGB		(1<<20)
+#define  DRM_MODE_FLAG_SUPPORTS_YUV		(1<<21)
 
 /* DPMS flags */
 /* bit compatible with the xorg definitions. */
diff --git a/include/uapi/drm/msm_drm.h b/include/uapi/drm/msm_drm.h
index eb18389..ab38f9e 100644
--- a/include/uapi/drm/msm_drm.h
+++ b/include/uapi/drm/msm_drm.h
@@ -321,6 +321,8 @@ struct drm_msm_event_resp {
 /* sde custom events */
 #define DRM_EVENT_HISTOGRAM 0x80000000
 #define DRM_EVENT_AD_BACKLIGHT 0x80000001
+#define DRM_EVENT_CRTC_POWER 0x80000002
+#define DRM_EVENT_SYS_BACKLIGHT 0x80000003
 
 #define DRM_IOCTL_MSM_GET_PARAM        DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GET_PARAM, struct drm_msm_param)
 #define DRM_IOCTL_MSM_GEM_NEW          DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GEM_NEW, struct drm_msm_gem_new)
diff --git a/include/uapi/drm/msm_drm_pp.h b/include/uapi/drm/msm_drm_pp.h
index e809c03..d9155a9 100644
--- a/include/uapi/drm/msm_drm_pp.h
+++ b/include/uapi/drm/msm_drm_pp.h
@@ -96,8 +96,8 @@ struct drm_msm_memcol {
  * @c2_c1: Holds c2/c1 values
  */
 struct drm_msm_3d_col {
-	__u32 c0;
 	__u32 c2_c1;
+	__u32 c0;
 };
 /**
  * struct drm_msm_3d_gamut - 3d gamut feature structure
diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild
index 7c1899e..cd758c2 100644
--- a/include/uapi/linux/Kbuild
+++ b/include/uapi/linux/Kbuild
@@ -382,6 +382,7 @@
 header-y += psci.h
 header-y += ptp_clock.h
 header-y += ptrace.h
+header-y += qbt1000.h
 header-y += qcedev.h
 header-y += qcota.h
 header-y += qnx4_fs.h
@@ -524,3 +525,4 @@
 header-y += msm_dsps.h
 header-y += msm-core-interface.h
 header-y += msm_rotator.h
+header-y += nfc/
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index f09c70b..14eaf2d 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -73,6 +73,8 @@ enum bpf_cmd {
 	BPF_PROG_LOAD,
 	BPF_OBJ_PIN,
 	BPF_OBJ_GET,
+	BPF_PROG_ATTACH,
+	BPF_PROG_DETACH,
 };
 
 enum bpf_map_type {
@@ -96,8 +98,17 @@ enum bpf_prog_type {
 	BPF_PROG_TYPE_TRACEPOINT,
 	BPF_PROG_TYPE_XDP,
 	BPF_PROG_TYPE_PERF_EVENT,
+	BPF_PROG_TYPE_CGROUP_SKB,
 };
 
+enum bpf_attach_type {
+	BPF_CGROUP_INET_INGRESS,
+	BPF_CGROUP_INET_EGRESS,
+	__MAX_BPF_ATTACH_TYPE
+};
+
+#define MAX_BPF_ATTACH_TYPE __MAX_BPF_ATTACH_TYPE
+
 #define BPF_PSEUDO_MAP_FD	1
 
 /* flags for BPF_MAP_UPDATE_ELEM command */
@@ -141,6 +152,12 @@ union bpf_attr {
 		__aligned_u64	pathname;
 		__u32		bpf_fd;
 	};
+
+	struct { /* anonymous struct used by BPF_PROG_ATTACH/DETACH commands */
+		__u32		target_fd;	/* container object to attach to */
+		__u32		attach_bpf_fd;	/* eBPF program to attach */
+		__u32		attach_type;
+	};
 } __attribute__((aligned(8)));
 
 /* integer value in 'imm' field of BPF_CALL instruction selects which helper
@@ -426,6 +443,67 @@ enum bpf_func_id {
 	 */
 	BPF_FUNC_set_hash_invalid,
 
+	/**
+	 * int bpf_get_numa_node_id()
+	 *     Return: Id of current NUMA node.
+	 */
+	BPF_FUNC_get_numa_node_id,
+
+	/**
+	 * int bpf_skb_change_head()
+	 *     Grows headroom of skb and adjusts MAC header offset accordingly.
+	 *     Will extends/reallocae as required automatically.
+	 *     May change skb data pointer and will thus invalidate any check
+	 *     performed for direct packet access.
+	 *     @skb: pointer to skb
+	 *     @len: length of header to be pushed in front
+	 *     @flags: Flags (unused for now)
+	 *     Return: 0 on success or negative error
+	 */
+	BPF_FUNC_skb_change_head,
+
+	/**
+	 * int bpf_xdp_adjust_head(xdp_md, delta)
+	 *     Adjust the xdp_md.data by delta
+	 *     @xdp_md: pointer to xdp_md
+	 *     @delta: An positive/negative integer to be added to xdp_md.data
+	 *     Return: 0 on success or negative on error
+	 */
+	BPF_FUNC_xdp_adjust_head,
+
+	/**
+	 * int bpf_probe_read_str(void *dst, int size, const void *unsafe_ptr)
+	 *     Copy a NUL terminated string from unsafe address. In case the string
+	 *     length is smaller than size, the target is not padded with further NUL
+	 *     bytes. In case the string length is larger than size, just count-1
+	 *     bytes are copied and the last byte is set to NUL.
+	 *     @dst: destination address
+	 *     @size: maximum number of bytes to copy, including the trailing NUL
+	 *     @unsafe_ptr: unsafe address
+	 *     Return:
+	 *       > 0 length of the string including the trailing NUL on success
+	 *       < 0 error
+	 */
+	BPF_FUNC_probe_read_str,
+
+	/**
+	 * u64 bpf_bpf_get_socket_cookie(skb)
+	 *     Get the cookie for the socket stored inside sk_buff.
+	 *     @skb: pointer to skb
+	 *     Return: 8 Bytes non-decreasing number on success or 0 if the socket
+	 *     field is missing inside sk_buff
+	 */
+	BPF_FUNC_get_socket_cookie,
+
+	/**
+	 * u32 bpf_get_socket_uid(skb)
+	 *     Get the owner uid of the socket stored inside sk_buff.
+	 *     @skb: pointer to skb
+	 *     Return: uid of the socket owner on success or 0 if the socket pointer
+	 *     inside sk_buff is NULL
+	 */
+	BPF_FUNC_get_socket_uid,
+
 	__BPF_FUNC_MAX_ID,
 };
 
diff --git a/include/uapi/linux/msm_ipa.h b/include/uapi/linux/msm_ipa.h
index ea68202..57c2ca4 100644
--- a/include/uapi/linux/msm_ipa.h
+++ b/include/uapi/linux/msm_ipa.h
@@ -130,89 +130,123 @@
  * enum ipa_client_type - names for the various IPA "clients"
  * these are from the perspective of the clients, for e.g.
  * HSIC1_PROD means HSIC client is the producer and IPA is the
- * consumer
+ * consumer.
+ * PROD clients are always even, and CONS clients are always odd.
+ * Add new clients in the end of the list and update IPA_CLIENT_MAX
  */
 enum ipa_client_type {
-	IPA_CLIENT_PROD,
-	IPA_CLIENT_HSIC1_PROD = IPA_CLIENT_PROD,
-	IPA_CLIENT_WLAN1_PROD,
-	IPA_CLIENT_HSIC2_PROD,
-	IPA_CLIENT_USB2_PROD,
-	IPA_CLIENT_HSIC3_PROD,
-	IPA_CLIENT_USB3_PROD,
-	IPA_CLIENT_HSIC4_PROD,
-	IPA_CLIENT_USB4_PROD,
-	IPA_CLIENT_HSIC5_PROD,
-	IPA_CLIENT_USB_PROD,
-	IPA_CLIENT_A5_WLAN_AMPDU_PROD,
-	IPA_CLIENT_A2_EMBEDDED_PROD,
-	IPA_CLIENT_A2_TETHERED_PROD,
-	IPA_CLIENT_APPS_LAN_PROD,
-	IPA_CLIENT_APPS_WAN_PROD,
+	IPA_CLIENT_HSIC1_PROD			= 0,
+	IPA_CLIENT_HSIC1_CONS			= 1,
+
+	IPA_CLIENT_HSIC2_PROD			= 2,
+	IPA_CLIENT_HSIC2_CONS			= 3,
+
+	IPA_CLIENT_HSIC3_PROD			= 4,
+	IPA_CLIENT_HSIC3_CONS			= 5,
+
+	IPA_CLIENT_HSIC4_PROD			= 6,
+	IPA_CLIENT_HSIC4_CONS			= 7,
+
+	IPA_CLIENT_HSIC5_PROD			= 8,
+	IPA_CLIENT_HSIC5_CONS			= 9,
+
+	IPA_CLIENT_WLAN1_PROD			= 10,
+	IPA_CLIENT_WLAN1_CONS			= 11,
+
+	IPA_CLIENT_A5_WLAN_AMPDU_PROD		= 12,
+	IPA_CLIENT_WLAN2_CONS			= 13,
+
+	/* RESERVERD PROD			= 14, */
+	IPA_CLIENT_WLAN3_CONS			= 15,
+
+	/* RESERVERD PROD			= 16, */
+	IPA_CLIENT_WLAN4_CONS			= 17,
+
+	IPA_CLIENT_USB_PROD			= 18,
+	IPA_CLIENT_USB_CONS			= 19,
+
+	IPA_CLIENT_USB2_PROD			= 20,
+	IPA_CLIENT_USB2_CONS			= 21,
+
+	IPA_CLIENT_USB3_PROD			= 22,
+	IPA_CLIENT_USB3_CONS			= 23,
+
+	IPA_CLIENT_USB4_PROD			= 24,
+	IPA_CLIENT_USB4_CONS			= 25,
+
+	IPA_CLIENT_UC_USB_PROD			= 26,
+	IPA_CLIENT_USB_DPL_CONS			= 27,
+
+	IPA_CLIENT_A2_EMBEDDED_PROD		= 28,
+	IPA_CLIENT_A2_EMBEDDED_CONS		= 29,
+
+	IPA_CLIENT_A2_TETHERED_PROD		= 30,
+	IPA_CLIENT_A2_TETHERED_CONS		= 31,
+
+	IPA_CLIENT_APPS_LAN_PROD		= 32,
+	IPA_CLIENT_APPS_LAN_CONS		= 33,
+
+	IPA_CLIENT_APPS_WAN_PROD		= 34,
 	IPA_CLIENT_APPS_LAN_WAN_PROD = IPA_CLIENT_APPS_WAN_PROD,
-	IPA_CLIENT_APPS_CMD_PROD,
-	IPA_CLIENT_ODU_PROD,
-	IPA_CLIENT_MHI_PROD,
-	IPA_CLIENT_Q6_LAN_PROD,
-	IPA_CLIENT_Q6_WAN_PROD,
-	IPA_CLIENT_Q6_CMD_PROD,
-	IPA_CLIENT_MEMCPY_DMA_SYNC_PROD,
-	IPA_CLIENT_MEMCPY_DMA_ASYNC_PROD,
-	IPA_CLIENT_Q6_DECOMP_PROD,
-	IPA_CLIENT_Q6_DECOMP2_PROD,
-	IPA_CLIENT_UC_USB_PROD,
-	IPA_CLIENT_ETHERNET_PROD,
+	IPA_CLIENT_APPS_WAN_CONS		= 35,
 
-	/* Below PROD client type is only for test purpose */
-	IPA_CLIENT_TEST_PROD,
-	IPA_CLIENT_TEST1_PROD,
-	IPA_CLIENT_TEST2_PROD,
-	IPA_CLIENT_TEST3_PROD,
-	IPA_CLIENT_TEST4_PROD,
+	IPA_CLIENT_APPS_CMD_PROD		= 36,
+	IPA_CLIENT_A5_LAN_WAN_CONS		= 37,
 
-	IPA_CLIENT_CONS,
-	IPA_CLIENT_HSIC1_CONS = IPA_CLIENT_CONS,
-	IPA_CLIENT_WLAN1_CONS,
-	IPA_CLIENT_HSIC2_CONS,
-	IPA_CLIENT_USB2_CONS,
-	IPA_CLIENT_WLAN2_CONS,
-	IPA_CLIENT_HSIC3_CONS,
-	IPA_CLIENT_USB3_CONS,
-	IPA_CLIENT_WLAN3_CONS,
-	IPA_CLIENT_HSIC4_CONS,
-	IPA_CLIENT_USB4_CONS,
-	IPA_CLIENT_WLAN4_CONS,
-	IPA_CLIENT_HSIC5_CONS,
-	IPA_CLIENT_USB_CONS,
-	IPA_CLIENT_USB_DPL_CONS,
-	IPA_CLIENT_A2_EMBEDDED_CONS,
-	IPA_CLIENT_A2_TETHERED_CONS,
-	IPA_CLIENT_A5_LAN_WAN_CONS,
-	IPA_CLIENT_APPS_LAN_CONS,
-	IPA_CLIENT_APPS_WAN_CONS,
-	IPA_CLIENT_ODU_EMB_CONS,
-	IPA_CLIENT_ODU_TETH_CONS,
-	IPA_CLIENT_MHI_CONS,
-	IPA_CLIENT_Q6_LAN_CONS,
-	IPA_CLIENT_Q6_WAN_CONS,
-	IPA_CLIENT_Q6_DUN_CONS,
-	IPA_CLIENT_MEMCPY_DMA_SYNC_CONS,
-	IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS,
-	IPA_CLIENT_Q6_DECOMP_CONS,
-	IPA_CLIENT_Q6_DECOMP2_CONS,
-	IPA_CLIENT_Q6_LTE_WIFI_AGGR_CONS,
-	IPA_CLIENT_ETHERNET_CONS,
+	IPA_CLIENT_ODU_PROD			= 38,
+	IPA_CLIENT_ODU_EMB_CONS			= 39,
 
-	/* Below CONS client type is only for test purpose */
-	IPA_CLIENT_TEST_CONS,
-	IPA_CLIENT_TEST1_CONS,
-	IPA_CLIENT_TEST2_CONS,
-	IPA_CLIENT_TEST3_CONS,
-	IPA_CLIENT_TEST4_CONS,
+	/* RESERVERD PROD			= 40, */
+	IPA_CLIENT_ODU_TETH_CONS		= 41,
 
-	IPA_CLIENT_MAX,
+	IPA_CLIENT_MHI_PROD			= 42,
+	IPA_CLIENT_MHI_CONS			= 43,
+
+	IPA_CLIENT_MEMCPY_DMA_SYNC_PROD		= 44,
+	IPA_CLIENT_MEMCPY_DMA_SYNC_CONS		= 45,
+
+	IPA_CLIENT_MEMCPY_DMA_ASYNC_PROD	= 46,
+	IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS	= 47,
+
+	IPA_CLIENT_ETHERNET_PROD		= 48,
+	IPA_CLIENT_ETHERNET_CONS		= 49,
+
+	IPA_CLIENT_Q6_LAN_PROD			= 50,
+	IPA_CLIENT_Q6_LAN_CONS			= 51,
+
+	IPA_CLIENT_Q6_WAN_PROD			= 52,
+	IPA_CLIENT_Q6_WAN_CONS			= 53,
+
+	IPA_CLIENT_Q6_CMD_PROD			= 54,
+	IPA_CLIENT_Q6_DUN_CONS			= 55,
+
+	IPA_CLIENT_Q6_DECOMP_PROD		= 56,
+	IPA_CLIENT_Q6_DECOMP_CONS		= 57,
+
+	IPA_CLIENT_Q6_DECOMP2_PROD		= 58,
+	IPA_CLIENT_Q6_DECOMP2_CONS		= 59,
+
+	/* RESERVERD PROD			= 60, */
+	IPA_CLIENT_Q6_LTE_WIFI_AGGR_CONS	= 61,
+
+	IPA_CLIENT_TEST_PROD			= 62,
+	IPA_CLIENT_TEST_CONS			= 63,
+
+	IPA_CLIENT_TEST1_PROD			= 64,
+	IPA_CLIENT_TEST1_CONS			= 65,
+
+	IPA_CLIENT_TEST2_PROD			= 66,
+	IPA_CLIENT_TEST2_CONS			= 67,
+
+	IPA_CLIENT_TEST3_PROD			= 68,
+	IPA_CLIENT_TEST3_CONS			= 69,
+
+	IPA_CLIENT_TEST4_PROD			= 70,
+	IPA_CLIENT_TEST4_CONS			= 71,
 };
 
+#define IPA_CLIENT_MAX (IPA_CLIENT_TEST4_CONS + 1)
+
 #define IPA_CLIENT_IS_APPS_CONS(client) \
 	((client) == IPA_CLIENT_APPS_LAN_CONS || \
 	(client) == IPA_CLIENT_APPS_WAN_CONS)
@@ -317,8 +351,8 @@ enum ipa_ip_type {
 enum ipa_rule_type {
 	IPA_RULE_HASHABLE,
 	IPA_RULE_NON_HASHABLE,
-	IPA_RULE_TYPE_MAX
 };
+#define IPA_RULE_TYPE_MAX (IPA_RULE_NON_HASHABLE + 1)
 
 /**
  * enum ipa_flt_action - action field of filtering rule
@@ -405,35 +439,44 @@ enum ipa_tethering_stats_event {
 /**
  * enum ipa_rm_resource_name - IPA RM clients identification names
  *
- * Add new mapping to ipa_rm_prod_index() / ipa_rm_cons_index()
- * when adding new entry to this enum.
+ * PROD resources are always even, and CONS resources are always odd.
+ * Add new clients in the end of the list and update IPA_RM_RESOURCE_MAX
  */
 enum ipa_rm_resource_name {
-	IPA_RM_RESOURCE_PROD = 0,
-	IPA_RM_RESOURCE_Q6_PROD = IPA_RM_RESOURCE_PROD,
-	IPA_RM_RESOURCE_USB_PROD,
-	IPA_RM_RESOURCE_USB_DPL_DUMMY_PROD,
-	IPA_RM_RESOURCE_HSIC_PROD,
-	IPA_RM_RESOURCE_STD_ECM_PROD,
-	IPA_RM_RESOURCE_RNDIS_PROD,
-	IPA_RM_RESOURCE_WWAN_0_PROD,
-	IPA_RM_RESOURCE_WLAN_PROD,
-	IPA_RM_RESOURCE_ODU_ADAPT_PROD,
-	IPA_RM_RESOURCE_MHI_PROD,
-	IPA_RM_RESOURCE_ETHERNET_PROD,
-	IPA_RM_RESOURCE_PROD_MAX,
+	IPA_RM_RESOURCE_Q6_PROD				= 0,
+	IPA_RM_RESOURCE_Q6_CONS				= 1,
 
-	IPA_RM_RESOURCE_Q6_CONS = IPA_RM_RESOURCE_PROD_MAX,
-	IPA_RM_RESOURCE_USB_CONS,
-	IPA_RM_RESOURCE_USB_DPL_CONS,
-	IPA_RM_RESOURCE_HSIC_CONS,
-	IPA_RM_RESOURCE_WLAN_CONS,
-	IPA_RM_RESOURCE_APPS_CONS,
-	IPA_RM_RESOURCE_ODU_ADAPT_CONS,
-	IPA_RM_RESOURCE_MHI_CONS,
-	IPA_RM_RESOURCE_ETHERNET_CONS,
-	IPA_RM_RESOURCE_MAX
+	IPA_RM_RESOURCE_USB_PROD			= 2,
+	IPA_RM_RESOURCE_USB_CONS			= 3,
+
+	IPA_RM_RESOURCE_USB_DPL_DUMMY_PROD		= 4,
+	IPA_RM_RESOURCE_USB_DPL_CONS			= 5,
+
+	IPA_RM_RESOURCE_HSIC_PROD			= 6,
+	IPA_RM_RESOURCE_HSIC_CONS			= 7,
+
+	IPA_RM_RESOURCE_STD_ECM_PROD			= 8,
+	IPA_RM_RESOURCE_APPS_CONS			= 9,
+
+	IPA_RM_RESOURCE_RNDIS_PROD			= 10,
+	/* RESERVED CONS				= 11, */
+
+	IPA_RM_RESOURCE_WWAN_0_PROD			= 12,
+	/* RESERVED CONS				= 13, */
+
+	IPA_RM_RESOURCE_WLAN_PROD			= 14,
+	IPA_RM_RESOURCE_WLAN_CONS			= 15,
+
+	IPA_RM_RESOURCE_ODU_ADAPT_PROD			= 16,
+	IPA_RM_RESOURCE_ODU_ADAPT_CONS			= 17,
+
+	IPA_RM_RESOURCE_MHI_PROD			= 18,
+	IPA_RM_RESOURCE_MHI_CONS			= 19,
+
+	IPA_RM_RESOURCE_ETHERNET_PROD			= 20,
+	IPA_RM_RESOURCE_ETHERNET_CONS			= 21,
 };
+#define IPA_RM_RESOURCE_MAX (IPA_RM_RESOURCE_ETHERNET_CONS + 1)
 
 /**
  * enum ipa_hw_type - IPA hardware version type
@@ -465,8 +508,8 @@ enum ipa_hw_type {
 	IPA_HW_v3_5 = 12,
 	IPA_HW_v3_5_1 = 13,
 	IPA_HW_v4_0 = 14,
-	IPA_HW_MAX
 };
+#define IPA_HW_MAX (IPA_HW_v4_0 + 1)
 
 #define IPA_HW_v4_0 IPA_HW_v4_0
 
@@ -700,8 +743,8 @@ enum ipa_hdr_l2_type {
 	IPA_HDR_L2_NONE,
 	IPA_HDR_L2_ETHERNET_II,
 	IPA_HDR_L2_802_3,
-	IPA_HDR_L2_MAX,
 };
+#define IPA_HDR_L2_MAX (IPA_HDR_L2_802_3 + 1)
 
 /**
  * enum ipa_hdr_l2_type - Processing context type
@@ -717,8 +760,8 @@ enum ipa_hdr_proc_type {
 	IPA_HDR_PROC_ETHII_TO_802_3,
 	IPA_HDR_PROC_802_3_TO_ETHII,
 	IPA_HDR_PROC_802_3_TO_802_3,
-	IPA_HDR_PROC_MAX,
 };
+#define IPA_HDR_PROC_MAX (IPA_HDR_PROC_802_3_TO_802_3 + 1)
 
 /**
  * struct ipa_rt_rule - attributes of a routing rule
diff --git a/include/uapi/linux/nfc/Kbuild b/include/uapi/linux/nfc/Kbuild
new file mode 100644
index 0000000..9071015
--- /dev/null
+++ b/include/uapi/linux/nfc/Kbuild
@@ -0,0 +1,2 @@
+#UAPI export list
+header-y += nfcinfo.h
diff --git a/include/uapi/linux/nfc/nfcinfo.h b/include/uapi/linux/nfc/nfcinfo.h
new file mode 100644
index 0000000..df178e2
--- /dev/null
+++ b/include/uapi/linux/nfc/nfcinfo.h
@@ -0,0 +1,21 @@
+#ifndef _UAPI_NFCINFO_H_
+#define _UAPI_NFCINFO_H_
+
+#include <linux/ioctl.h>
+
+#define NFCC_MAGIC 0xE9
+#define NFCC_GET_INFO _IOW(NFCC_MAGIC, 0x09, unsigned int)
+
+struct nqx_devinfo {
+	unsigned char chip_type;
+	unsigned char rom_version;
+	unsigned char fw_major;
+	unsigned char fw_minor;
+};
+
+union nqx_uinfo {
+	unsigned int i;
+	struct nqx_devinfo info;
+};
+
+#endif
diff --git a/include/uapi/linux/qbt1000.h b/include/uapi/linux/qbt1000.h
new file mode 100644
index 0000000..a4f0dca
--- /dev/null
+++ b/include/uapi/linux/qbt1000.h
@@ -0,0 +1,99 @@
+#ifndef _UAPI_QBT1000_H_
+#define _UAPI_QBT1000_H_
+
+#define MAX_NAME_SIZE					 32
+
+/*
+ * enum qbt1000_commands -
+ *      enumeration of command options
+ * @QBT1000_LOAD_APP - cmd loads TZ app
+ * @QBT1000_UNLOAD_APP - cmd unloads TZ app
+ * @QBT1000_SEND_TZCMD - sends cmd to TZ app
+ * @QBT1000_SET_FINGER_DETECT_KEY - sets the input key to send on finger detect
+ * @QBT1000_CONFIGURE_POWER_KEY - enables/disables sending the power key on
+	finger down events
+*/
+enum qbt1000_commands {
+	QBT1000_LOAD_APP = 100,
+	QBT1000_UNLOAD_APP = 101,
+	QBT1000_SEND_TZCMD = 102,
+	QBT1000_SET_FINGER_DETECT_KEY = 103,
+	QBT1000_CONFIGURE_POWER_KEY = 104
+};
+
+/*
+ * enum qbt1000_fw_event -
+ *      enumeration of firmware events
+ * @FW_EVENT_FINGER_DOWN - finger down detected
+ * @FW_EVENT_FINGER_UP - finger up detected
+ * @FW_EVENT_INDICATION - an indication IPC from the firmware is pending
+ */
+enum qbt1000_fw_event {
+	FW_EVENT_FINGER_DOWN = 1,
+	FW_EVENT_FINGER_UP = 2,
+	FW_EVENT_CBGE_REQUIRED = 3,
+};
+
+/*
+ * struct qbt1000_app -
+ *      used to load and unload apps in TZ
+ * @app_handle - qseecom handle for clients
+ * @name - Name of secure app to load
+ * @size - Size of requested buffer of secure app
+ * @high_band_width - 1 - for high bandwidth usage
+ *                    0 - for normal bandwidth usage
+ */
+struct qbt1000_app {
+	struct qseecom_handle **app_handle;
+	char name[MAX_NAME_SIZE];
+	uint32_t size;
+	uint8_t high_band_width;
+};
+
+/*
+ * struct qbt1000_send_tz_cmd -
+ *      used to cmds to TZ App
+ * @app_handle - qseecom handle for clients
+ * @req_buf - Buffer containing request for secure app
+ * @req_buf_len - Length of request buffer
+ * @rsp_buf - Buffer containing response from secure app
+ * @rsp_buf_len - Length of response buffer
+ */
+struct qbt1000_send_tz_cmd {
+	struct qseecom_handle *app_handle;
+	uint8_t *req_buf;
+	uint32_t req_buf_len;
+	uint8_t *rsp_buf;
+	uint32_t rsp_buf_len;
+};
+
+/*
+ * struct qbt1000_erie_event -
+ *      used to receive events from Erie
+ * @buf - Buffer containing event from Erie
+ * @buf_len - Length of buffer
+ */
+struct qbt1000_erie_event {
+	uint8_t *buf;
+	uint32_t buf_len;
+};
+
+/*
+ * struct qbt1000_set_finger_detect_key -
+ *      used to configure the input key which is sent on finger down/up event
+ * @key_code - Key code to send on finger down/up. 0 disables sending key events
+ */
+struct qbt1000_set_finger_detect_key {
+	unsigned int key_code;
+};
+
+/*
+ * struct qbt1000_configure_power_key -
+ *      used to configure whether the power key is sent on finger down
+ * @enable - if non-zero, power key is sent on finger down
+ */
+struct qbt1000_configure_power_key {
+	unsigned int enable;
+};
+
+#endif /* _UAPI_QBT1000_H_ */
diff --git a/include/uapi/media/cam_req_mgr.h b/include/uapi/media/cam_req_mgr.h
index b736755..e6c1a45 100644
--- a/include/uapi/media/cam_req_mgr.h
+++ b/include/uapi/media/cam_req_mgr.h
@@ -325,4 +325,55 @@ struct cam_mem_cache_ops_cmd {
 	uint32_t mem_cache_ops;
 };
 
+/**
+ * Request Manager : error message type
+ * @CAM_REQ_MGR_ERROR_TYPE_DEVICE: Device error message, fatal to session
+ * @CAM_REQ_MGR_ERROR_TYPE_REQUEST: Error on a single request, not fatal
+ * @CAM_REQ_MGR_ERROR_TYPE_BUFFER: Buffer was not filled, not fatal
+ */
+#define CAM_REQ_MGR_ERROR_TYPE_DEVICE           0
+#define CAM_REQ_MGR_ERROR_TYPE_REQUEST          1
+#define CAM_REQ_MGR_ERROR_TYPE_BUFFER           2
+
+/**
+ * struct cam_req_mgr_error_msg
+ * @error_type: type of error
+ * @request_id: request id of frame
+ * @device_hdl: device handle
+ * @reserved: reserved field
+ * @resource_size: size of the resource
+ */
+struct cam_req_mgr_error_msg {
+	uint32_t error_type;
+	uint32_t request_id;
+	int32_t device_hdl;
+	int32_t reserved;
+	uint64_t resource_size;
+};
+
+/**
+ * struct cam_req_mgr_frame_msg
+ * @request_id: request id of frame
+ * @frame_count: running count of frames
+ * @timestamp: timestamp of frame
+ */
+struct cam_req_mgr_frame_msg {
+	uint64_t request_id;
+	uint64_t frame_count;
+	uint64_t timestamp;
+};
+
+/**
+ * struct cam_req_mgr_message
+ * @session_hdl: session to which the frame belongs to
+ * @reserved: reserved field
+ * @u: union which can either be error or frame message
+ */
+struct cam_req_mgr_message {
+	int32_t session_hdl;
+	union {
+		struct cam_req_mgr_error_msg err_msg;
+		struct cam_req_mgr_frame_msg frame_msg;
+	} u;
+};
 #endif /* __UAPI_LINUX_CAM_REQ_MGR_H */
diff --git a/init/Kconfig b/init/Kconfig
index bdfcc0f..d8a5868 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -1248,6 +1248,19 @@
 
 	  Say N if unsure.
 
+config CGROUP_BPF
+	bool "Support for eBPF programs attached to cgroups"
+	depends on BPF_SYSCALL
+	select SOCK_CGROUP_DATA
+	help
+	  Allow attaching eBPF programs to a cgroup using the bpf(2)
+	  syscall command BPF_PROG_ATTACH.
+
+	  In which context these programs are accessed depends on the type
+	  of attachment. For instance, programs that are attached using
+	  BPF_CGROUP_INET_INGRESS will be executed on the ingress path of
+	  inet sockets.
+
 config CGROUP_DEBUG
 	bool "Example controller"
 	default n
diff --git a/kernel/bpf/Makefile b/kernel/bpf/Makefile
index eed911d..b22256b 100644
--- a/kernel/bpf/Makefile
+++ b/kernel/bpf/Makefile
@@ -5,3 +5,4 @@
 ifeq ($(CONFIG_PERF_EVENTS),y)
 obj-$(CONFIG_BPF_SYSCALL) += stackmap.o
 endif
+obj-$(CONFIG_CGROUP_BPF) += cgroup.o
diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c
new file mode 100644
index 0000000..d05c292
--- /dev/null
+++ b/kernel/bpf/cgroup.c
@@ -0,0 +1,170 @@
+/*
+ * Functions to manage eBPF programs attached to cgroups
+ *
+ * Copyright (c) 2016 Daniel Mack
+ *
+ * This file is subject to the terms and conditions of version 2 of the GNU
+ * General Public License.  See the file COPYING in the main directory of the
+ * Linux distribution for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/atomic.h>
+#include <linux/cgroup.h>
+#include <linux/slab.h>
+#include <linux/bpf.h>
+#include <linux/bpf-cgroup.h>
+#include <net/sock.h>
+
+DEFINE_STATIC_KEY_FALSE(cgroup_bpf_enabled_key);
+EXPORT_SYMBOL(cgroup_bpf_enabled_key);
+
+/**
+ * cgroup_bpf_put() - put references of all bpf programs
+ * @cgrp: the cgroup to modify
+ */
+void cgroup_bpf_put(struct cgroup *cgrp)
+{
+	unsigned int type;
+
+	for (type = 0; type < ARRAY_SIZE(cgrp->bpf.prog); type++) {
+		struct bpf_prog *prog = cgrp->bpf.prog[type];
+
+		if (prog) {
+			bpf_prog_put(prog);
+			static_branch_dec(&cgroup_bpf_enabled_key);
+		}
+	}
+}
+
+/**
+ * cgroup_bpf_inherit() - inherit effective programs from parent
+ * @cgrp: the cgroup to modify
+ * @parent: the parent to inherit from
+ */
+void cgroup_bpf_inherit(struct cgroup *cgrp, struct cgroup *parent)
+{
+	unsigned int type;
+
+	for (type = 0; type < ARRAY_SIZE(cgrp->bpf.effective); type++) {
+		struct bpf_prog *e;
+
+		e = rcu_dereference_protected(parent->bpf.effective[type],
+					      lockdep_is_held(&cgroup_mutex));
+		rcu_assign_pointer(cgrp->bpf.effective[type], e);
+	}
+}
+
+/**
+ * __cgroup_bpf_update() - Update the pinned program of a cgroup, and
+ *                         propagate the change to descendants
+ * @cgrp: The cgroup which descendants to traverse
+ * @parent: The parent of @cgrp, or %NULL if @cgrp is the root
+ * @prog: A new program to pin
+ * @type: Type of pinning operation (ingress/egress)
+ *
+ * Each cgroup has a set of two pointers for bpf programs; one for eBPF
+ * programs it owns, and which is effective for execution.
+ *
+ * If @prog is %NULL, this function attaches a new program to the cgroup and
+ * releases the one that is currently attached, if any. @prog is then made
+ * the effective program of type @type in that cgroup.
+ *
+ * If @prog is %NULL, the currently attached program of type @type is released,
+ * and the effective program of the parent cgroup (if any) is inherited to
+ * @cgrp.
+ *
+ * Then, the descendants of @cgrp are walked and the effective program for
+ * each of them is set to the effective program of @cgrp unless the
+ * descendant has its own program attached, in which case the subbranch is
+ * skipped. This ensures that delegated subcgroups with own programs are left
+ * untouched.
+ *
+ * Must be called with cgroup_mutex held.
+ */
+void __cgroup_bpf_update(struct cgroup *cgrp,
+			 struct cgroup *parent,
+			 struct bpf_prog *prog,
+			 enum bpf_attach_type type)
+{
+	struct bpf_prog *old_prog, *effective;
+	struct cgroup_subsys_state *pos;
+
+	old_prog = xchg(cgrp->bpf.prog + type, prog);
+
+	effective = (!prog && parent) ?
+		rcu_dereference_protected(parent->bpf.effective[type],
+					  lockdep_is_held(&cgroup_mutex)) :
+		prog;
+
+	css_for_each_descendant_pre(pos, &cgrp->self) {
+		struct cgroup *desc = container_of(pos, struct cgroup, self);
+
+		/* skip the subtree if the descendant has its own program */
+		if (desc->bpf.prog[type] && desc != cgrp)
+			pos = css_rightmost_descendant(pos);
+		else
+			rcu_assign_pointer(desc->bpf.effective[type],
+					   effective);
+	}
+
+	if (prog)
+		static_branch_inc(&cgroup_bpf_enabled_key);
+
+	if (old_prog) {
+		bpf_prog_put(old_prog);
+		static_branch_dec(&cgroup_bpf_enabled_key);
+	}
+}
+
+/**
+ * __cgroup_bpf_run_filter() - Run a program for packet filtering
+ * @sk: The socket sending or receiving traffic
+ * @skb: The skb that is being sent or received
+ * @type: The type of program to be exectuted
+ *
+ * If no socket is passed, or the socket is not of type INET or INET6,
+ * this function does nothing and returns 0.
+ *
+ * The program type passed in via @type must be suitable for network
+ * filtering. No further check is performed to assert that.
+ *
+ * This function will return %-EPERM if any if an attached program was found
+ * and if it returned != 1 during execution. In all other cases, 0 is returned.
+ */
+int __cgroup_bpf_run_filter(struct sock *sk,
+			    struct sk_buff *skb,
+			    enum bpf_attach_type type)
+{
+	struct bpf_prog *prog;
+	struct cgroup *cgrp;
+	int ret = 0;
+
+	if (!sk || !sk_fullsock(sk))
+		return 0;
+
+	if (sk->sk_family != AF_INET &&
+	    sk->sk_family != AF_INET6)
+		return 0;
+
+	cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
+
+	rcu_read_lock();
+
+	prog = rcu_dereference(cgrp->bpf.effective[type]);
+	if (prog) {
+		unsigned int offset = skb->data - skb_network_header(skb);
+		struct sock *save_sk = skb->sk;
+
+		skb->sk = sk;
+		__skb_push(skb, offset);
+		ret = bpf_prog_run_save_cb(prog, skb) == 1 ? 0 : -EPERM;
+		__skb_pull(skb, offset);
+		skb->sk = save_sk;
+	}
+
+	rcu_read_unlock();
+
+	return ret;
+}
+EXPORT_SYMBOL(__cgroup_bpf_run_filter);
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 237f3d6..e13157f 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -824,6 +824,77 @@ static int bpf_obj_get(const union bpf_attr *attr)
 	return bpf_obj_get_user(u64_to_ptr(attr->pathname));
 }
 
+#ifdef CONFIG_CGROUP_BPF
+
+#define BPF_PROG_ATTACH_LAST_FIELD attach_type
+
+static int bpf_prog_attach(const union bpf_attr *attr)
+{
+	struct bpf_prog *prog;
+	struct cgroup *cgrp;
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
+	if (CHECK_ATTR(BPF_PROG_ATTACH))
+		return -EINVAL;
+
+	switch (attr->attach_type) {
+	case BPF_CGROUP_INET_INGRESS:
+	case BPF_CGROUP_INET_EGRESS:
+		prog = bpf_prog_get_type(attr->attach_bpf_fd,
+					 BPF_PROG_TYPE_CGROUP_SKB);
+		if (IS_ERR(prog))
+			return PTR_ERR(prog);
+
+		cgrp = cgroup_get_from_fd(attr->target_fd);
+		if (IS_ERR(cgrp)) {
+			bpf_prog_put(prog);
+			return PTR_ERR(cgrp);
+		}
+
+		cgroup_bpf_update(cgrp, prog, attr->attach_type);
+		cgroup_put(cgrp);
+		break;
+
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+#define BPF_PROG_DETACH_LAST_FIELD attach_type
+
+static int bpf_prog_detach(const union bpf_attr *attr)
+{
+	struct cgroup *cgrp;
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
+	if (CHECK_ATTR(BPF_PROG_DETACH))
+		return -EINVAL;
+
+	switch (attr->attach_type) {
+	case BPF_CGROUP_INET_INGRESS:
+	case BPF_CGROUP_INET_EGRESS:
+		cgrp = cgroup_get_from_fd(attr->target_fd);
+		if (IS_ERR(cgrp))
+			return PTR_ERR(cgrp);
+
+		cgroup_bpf_update(cgrp, NULL, attr->attach_type);
+		cgroup_put(cgrp);
+		break;
+
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+#endif /* CONFIG_CGROUP_BPF */
+
 SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, size)
 {
 	union bpf_attr attr = {};
@@ -890,6 +961,16 @@ SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, siz
 	case BPF_OBJ_GET:
 		err = bpf_obj_get(&attr);
 		break;
+
+#ifdef CONFIG_CGROUP_BPF
+	case BPF_PROG_ATTACH:
+		err = bpf_prog_attach(&attr);
+		break;
+	case BPF_PROG_DETACH:
+		err = bpf_prog_detach(&attr);
+		break;
+#endif
+
 	default:
 		err = -EINVAL;
 		break;
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 6670008..0fab276 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -5096,6 +5096,8 @@ static void css_release_work_fn(struct work_struct *work)
 		if (cgrp->kn)
 			RCU_INIT_POINTER(*(void __rcu __force **)&cgrp->kn->priv,
 					 NULL);
+
+		cgroup_bpf_put(cgrp);
 	}
 
 	mutex_unlock(&cgroup_mutex);
@@ -5308,6 +5310,9 @@ static struct cgroup *cgroup_create(struct cgroup *parent)
 	if (!cgroup_on_dfl(cgrp))
 		cgrp->subtree_control = cgroup_control(cgrp);
 
+	if (parent)
+		cgroup_bpf_inherit(cgrp, parent);
+
 	cgroup_propagate_control(cgrp);
 
 	return cgrp;
@@ -6514,6 +6519,19 @@ static __init int cgroup_namespaces_init(void)
 }
 subsys_initcall(cgroup_namespaces_init);
 
+#ifdef CONFIG_CGROUP_BPF
+void cgroup_bpf_update(struct cgroup *cgrp,
+		       struct bpf_prog *prog,
+		       enum bpf_attach_type type)
+{
+	struct cgroup *parent = cgroup_parent(cgrp);
+
+	mutex_lock(&cgroup_mutex);
+	__cgroup_bpf_update(cgrp, parent, prog, type);
+	mutex_unlock(&cgroup_mutex);
+}
+#endif /* CONFIG_CGROUP_BPF */
+
 #ifdef CONFIG_CGROUP_DEBUG
 static struct cgroup_subsys_state *
 debug_css_alloc(struct cgroup_subsys_state *parent_css)
diff --git a/kernel/configs/android-base.config b/kernel/configs/android-base.config
index b7b997f..bc615c6 100644
--- a/kernel/configs/android-base.config
+++ b/kernel/configs/android-base.config
@@ -19,6 +19,7 @@
 CONFIG_CGROUP_DEBUG=y
 CONFIG_CGROUP_FREEZER=y
 CONFIG_CGROUP_SCHED=y
+CONFIG_CGROUP_BPF=y
 CONFIG_CP15_BARRIER_EMULATION=y
 CONFIG_DEFAULT_SECURITY_SELINUX=y
 CONFIG_EMBEDDED=y
diff --git a/kernel/fork.c b/kernel/fork.c
index cb4faae..33663b0 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -521,7 +521,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node)
 	set_task_stack_end_magic(tsk);
 
 #ifdef CONFIG_CC_STACKPROTECTOR
-	tsk->stack_canary = get_random_int();
+	tsk->stack_canary = get_random_long();
 #endif
 
 	/*
@@ -1774,11 +1774,13 @@ static __latent_entropy struct task_struct *copy_process(
 	*/
 	recalc_sigpending();
 	if (signal_pending(current)) {
-		spin_unlock(&current->sighand->siglock);
-		write_unlock_irq(&tasklist_lock);
 		retval = -ERESTARTNOINTR;
 		goto bad_fork_cancel_cgroup;
 	}
+	if (unlikely(!(ns_of_pid(pid)->nr_hashed & PIDNS_HASH_ADDING))) {
+		retval = -ENOMEM;
+		goto bad_fork_cancel_cgroup;
+	}
 
 	if (likely(p->pid)) {
 		ptrace_init_task(p, (clone_flags & CLONE_PTRACE) || trace);
@@ -1829,6 +1831,8 @@ static __latent_entropy struct task_struct *copy_process(
 	return p;
 
 bad_fork_cancel_cgroup:
+	spin_unlock(&current->sighand->siglock);
+	write_unlock_irq(&tasklist_lock);
 	cgroup_cancel_fork(p);
 bad_fork_free_pid:
 	threadgroup_change_end(current);
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index be3c34e..077c87f 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -877,8 +877,8 @@ irq_set_chained_handler_and_data(unsigned int irq, irq_flow_handler_t handle,
 	if (!desc)
 		return;
 
-	__irq_do_set_handler(desc, handle, 1, NULL);
 	desc->irq_common_data.handler_data = data;
+	__irq_do_set_handler(desc, handle, 1, NULL);
 
 	irq_put_desc_busunlock(desc, flags);
 }
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index d630954..a1a07cf 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -563,7 +563,7 @@ static void kprobe_optimizer(struct work_struct *work)
 }
 
 /* Wait for completing optimization and unoptimization */
-static void wait_for_kprobe_optimizer(void)
+void wait_for_kprobe_optimizer(void)
 {
 	mutex_lock(&kprobe_mutex);
 
diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
index a70b90d..c61c56f 100644
--- a/kernel/locking/mutex.c
+++ b/kernel/locking/mutex.c
@@ -26,6 +26,7 @@
 #include <linux/interrupt.h>
 #include <linux/debug_locks.h>
 #include <linux/osq_lock.h>
+#include <linux/delay.h>
 
 /*
  * In the DEBUG case we are using the "NULL fastpath" for mutexes,
@@ -378,6 +379,17 @@ static bool mutex_optimistic_spin(struct mutex *lock,
 		 * values at the cost of a few extra spins.
 		 */
 		cpu_relax_lowlatency();
+
+		/*
+		 * On arm systems, we must slow down the waiter's repeated
+		 * aquisition of spin_mlock and atomics on the lock count, or
+		 * we risk starving out a thread attempting to release the
+		 * mutex. The mutex slowpath release must take spin lock
+		 * wait_lock. This spin lock can share a monitor with the
+		 * other waiter atomics in the mutex data structure, so must
+		 * take care to rate limit the waiters.
+		 */
+		udelay(1);
 	}
 
 	osq_unlock(&lock->osq);
diff --git a/kernel/padata.c b/kernel/padata.c
index b4a3c0a..e4a8f8d 100644
--- a/kernel/padata.c
+++ b/kernel/padata.c
@@ -358,7 +358,7 @@ static int padata_setup_cpumasks(struct parallel_data *pd,
 
 	cpumask_and(pd->cpumask.pcpu, pcpumask, cpu_online_mask);
 	if (!alloc_cpumask_var(&pd->cpumask.cbcpu, GFP_KERNEL)) {
-		free_cpumask_var(pd->cpumask.cbcpu);
+		free_cpumask_var(pd->cpumask.pcpu);
 		return -ENOMEM;
 	}
 
diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c
index eef2ce9..3976dd5 100644
--- a/kernel/pid_namespace.c
+++ b/kernel/pid_namespace.c
@@ -274,7 +274,7 @@ void zap_pid_ns_processes(struct pid_namespace *pid_ns)
 	 * if reparented.
 	 */
 	for (;;) {
-		set_current_state(TASK_UNINTERRUPTIBLE);
+		set_current_state(TASK_INTERRUPTIBLE);
 		if (pid_ns->nr_hashed == init_pids)
 			break;
 		schedule();
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index ccb2321..b6fb796 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -3327,33 +3327,6 @@ static void sched_freq_tick(int cpu)
 static inline void sched_freq_tick(int cpu) { }
 #endif /* CONFIG_CPU_FREQ_GOV_SCHED */
 
-#ifdef CONFIG_SCHED_WALT
-static atomic64_t walt_irq_work_lastq_ws;
-
-static inline u64 walt_window_start_of(struct rq *rq)
-{
-	return rq->window_start;
-}
-
-static inline void run_walt_irq_work(u64 window_start, struct rq *rq)
-{
-	/* No HMP since that uses sched_get_cpus_busy */
-	if (rq->window_start != window_start &&
-		atomic_cmpxchg(&walt_irq_work_lastq_ws, window_start,
-			   rq->window_start) == window_start)
-		irq_work_queue(&rq->irq_work);
-}
-#else
-static inline u64 walt_window_start_of(struct rq *rq)
-{
-	return 0;
-}
-
-static inline void run_walt_irq_work(u64 window_start, struct rq *rq)
-{
-}
-#endif
-
 /*
  * This function gets called by the timer code, with HZ frequency.
  * We call it with interrupts disabled.
@@ -3367,22 +3340,14 @@ void scheduler_tick(void)
 	bool early_notif;
 	u32 old_load;
 	struct related_thread_group *grp;
-	u64 window_start;
 
 	sched_clock_tick();
 
 	raw_spin_lock(&rq->lock);
 
-	/*
-	 * Record current window_start. If after utra() below the window
-	 * has rolled over, schedule a load-reporting irq-work
-	 */
-	window_start = walt_window_start_of(rq);
-
 	old_load = task_load(curr);
 	set_window_start(rq);
 
-
 	wallclock = sched_ktime_clock();
 	update_task_ravg(rq->curr, rq, TASK_UPDATE, wallclock, 0);
 
@@ -3392,8 +3357,6 @@ void scheduler_tick(void)
 	calc_global_load_tick(rq);
 	cpufreq_update_util(rq, 0);
 
-	run_walt_irq_work(window_start, rq);
-
 	early_notif = early_detection_notify(rq, wallclock);
 
 	raw_spin_unlock(&rq->lock);
diff --git a/kernel/sched/core_ctl.c b/kernel/sched/core_ctl.c
index 005d15e..e594804 100644
--- a/kernel/sched/core_ctl.c
+++ b/kernel/sched/core_ctl.c
@@ -44,6 +44,7 @@ struct cluster_data {
 	bool pending;
 	spinlock_t pending_lock;
 	bool is_big_cluster;
+	bool enable;
 	int nrrun;
 	struct task_struct *core_ctl_thread;
 	unsigned int first_cpu;
@@ -247,6 +248,29 @@ static ssize_t show_is_big_cluster(const struct cluster_data *state, char *buf)
 	return snprintf(buf, PAGE_SIZE, "%u\n", state->is_big_cluster);
 }
 
+static ssize_t store_enable(struct cluster_data *state,
+				const char *buf, size_t count)
+{
+	unsigned int val;
+	bool bval;
+
+	if (sscanf(buf, "%u\n", &val) != 1)
+		return -EINVAL;
+
+	bval = !!val;
+	if (bval != state->enable) {
+		state->enable = bval;
+		apply_need(state);
+	}
+
+	return count;
+}
+
+static ssize_t show_enable(const struct cluster_data *state, char *buf)
+{
+	return scnprintf(buf, PAGE_SIZE, "%u\n", state->enable);
+}
+
 static ssize_t show_need_cpus(const struct cluster_data *state, char *buf)
 {
 	return snprintf(buf, PAGE_SIZE, "%u\n", state->need_cpus);
@@ -377,6 +401,7 @@ core_ctl_attr_ro(need_cpus);
 core_ctl_attr_ro(active_cpus);
 core_ctl_attr_ro(global_state);
 core_ctl_attr_rw(not_preferred);
+core_ctl_attr_rw(enable);
 
 static struct attribute *default_attrs[] = {
 	&min_cpus.attr,
@@ -386,6 +411,7 @@ static struct attribute *default_attrs[] = {
 	&busy_down_thres.attr,
 	&task_thres.attr,
 	&is_big_cluster.attr,
+	&enable.attr,
 	&need_cpus.attr,
 	&active_cpus.attr,
 	&global_state.attr,
@@ -529,7 +555,7 @@ static bool eval_need(struct cluster_data *cluster)
 
 	spin_lock_irqsave(&state_lock, flags);
 
-	if (cluster->boost) {
+	if (cluster->boost || !cluster->enable) {
 		need_cpus = cluster->max_cpus;
 	} else {
 		cluster->active_cpus = get_active_cpu_count(cluster);
@@ -1020,6 +1046,7 @@ static int cluster_init(const struct cpumask *mask)
 	cluster->offline_delay_ms = 100;
 	cluster->task_thres = UINT_MAX;
 	cluster->nrrun = cluster->num_cpus;
+	cluster->enable = true;
 	INIT_LIST_HEAD(&cluster->lru);
 	spin_lock_init(&cluster->pending_lock);
 
diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
index 42630ec..c42380a 100644
--- a/kernel/sched/cpufreq_schedutil.c
+++ b/kernel/sched/cpufreq_schedutil.c
@@ -24,6 +24,7 @@ struct sugov_tunables {
 	struct gov_attr_set attr_set;
 	unsigned int rate_limit_us;
 	unsigned int hispeed_freq;
+	bool pl;
 };
 
 struct sugov_policy {
@@ -224,7 +225,8 @@ static void sugov_walt_adjust(struct sugov_cpu *sg_cpu, unsigned long *util,
 	if (is_hiload && nl >= mult_frac(cpu_util, NL_RATIO, 100))
 		*util = *max;
 
-	*util = max(*util, sg_cpu->walt_load.pl);
+	if (sg_policy->tunables->pl)
+		*util = max(*util, sg_cpu->walt_load.pl);
 }
 
 static void sugov_update_single(struct update_util_data *hook, u64 time,
@@ -450,12 +452,32 @@ static ssize_t hispeed_freq_store(struct gov_attr_set *attr_set,
 	return count;
 }
 
+static ssize_t pl_show(struct gov_attr_set *attr_set, char *buf)
+{
+	struct sugov_tunables *tunables = to_sugov_tunables(attr_set);
+
+	return sprintf(buf, "%u\n", tunables->pl);
+}
+
+static ssize_t pl_store(struct gov_attr_set *attr_set, const char *buf,
+				   size_t count)
+{
+	struct sugov_tunables *tunables = to_sugov_tunables(attr_set);
+
+	if (kstrtobool(buf, &tunables->pl))
+		return -EINVAL;
+
+	return count;
+}
+
 static struct governor_attr rate_limit_us = __ATTR_RW(rate_limit_us);
 static struct governor_attr hispeed_freq = __ATTR_RW(hispeed_freq);
+static struct governor_attr pl = __ATTR_RW(pl);
 
 static struct attribute *sugov_attributes[] = {
 	&rate_limit_us.attr,
 	&hispeed_freq.attr,
+	&pl.attr,
 	NULL
 };
 
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 82e6490..6ccd3a7 100755
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -12319,7 +12319,7 @@ void check_for_migration(struct rq *rq, struct task_struct *p)
 			return;
 
 		new_cpu = energy_aware_wake_cpu(p, cpu, 0);
-		if (new_cpu != cpu) {
+		if (capacity_orig_of(new_cpu) > capacity_orig_of(cpu)) {
 			active_balance = kick_active_balance(rq, p, new_cpu);
 			if (active_balance) {
 				mark_reserved(new_cpu);
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 29b6e3d..5220511 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1816,6 +1816,7 @@ cpu_util_freq(int cpu, struct sched_walt_cpu_load *walt_load)
 
 			walt_load->prev_window_util = util;
 			walt_load->nl = nl;
+			walt_load->pl = 0;
 		}
 	}
 #endif
diff --git a/kernel/sched/walt.c b/kernel/sched/walt.c
index 58854b0..b89abbd 100644
--- a/kernel/sched/walt.c
+++ b/kernel/sched/walt.c
@@ -47,6 +47,7 @@ static bool sched_ktime_suspended;
 static struct cpu_cycle_counter_cb cpu_cycle_counter_cb;
 static bool use_cycle_counter;
 DEFINE_MUTEX(cluster_lock);
+static atomic64_t walt_irq_work_lastq_ws;
 
 u64 sched_ktime_clock(void)
 {
@@ -298,11 +299,12 @@ void reset_hmp_stats(struct hmp_sched_stats *stats, int reset_cra)
  */
 __read_mostly int sched_freq_aggregate_threshold;
 
-static void
+static u64
 update_window_start(struct rq *rq, u64 wallclock, int event)
 {
 	s64 delta;
 	int nr_windows;
+	u64 old_window_start = rq->window_start;
 
 	delta = wallclock - rq->window_start;
 	/* If the MPM global timer is cleared, set delta as 0 to avoid kernel BUG happening */
@@ -312,7 +314,7 @@ update_window_start(struct rq *rq, u64 wallclock, int event)
 	}
 
 	if (delta < sched_ravg_window)
-		return;
+		return old_window_start;
 
 	nr_windows = div64_u64(delta, sched_ravg_window);
 	rq->window_start += (u64)nr_windows * (u64)sched_ravg_window;
@@ -320,6 +322,8 @@ update_window_start(struct rq *rq, u64 wallclock, int event)
 	rq->cum_window_demand = rq->hmp_stats.cumulative_runnable_avg;
 	if (event == PUT_PREV_TASK)
 		rq->cum_window_demand += rq->curr->ravg.demand;
+
+	return old_window_start;
 }
 
 int register_cpu_cycle_counter_cb(struct cpu_cycle_counter_cb *cb)
@@ -913,12 +917,13 @@ void set_window_start(struct rq *rq)
 {
 	static int sync_cpu_available;
 
-	if (rq->window_start)
+	if (likely(rq->window_start))
 		return;
 
 	if (!sync_cpu_available) {
-		rq->window_start = sched_ktime_clock();
+		rq->window_start = 1;
 		sync_cpu_available = 1;
+		atomic_set(&walt_irq_work_lastq_ws, rq->window_start);
 	} else {
 		struct rq *sync_rq = cpu_rq(cpumask_any(cpu_online_mask));
 
@@ -1904,11 +1909,24 @@ update_task_rq_cpu_cycles(struct task_struct *p, struct rq *rq, int event,
 	trace_sched_get_task_cpu_cycles(cpu, event, rq->cc.cycles, rq->cc.time);
 }
 
+static inline void run_walt_irq_work(u64 old_window_start, struct rq *rq)
+{
+	u64 result;
+
+	if (old_window_start == rq->window_start)
+		return;
+
+	result = atomic_cmpxchg(&walt_irq_work_lastq_ws, old_window_start,
+				   rq->window_start);
+	if (result == old_window_start)
+		irq_work_queue(&rq->irq_work);
+}
+
 /* Reflect task activity on its demand and cpu's busy time statistics */
 void update_task_ravg(struct task_struct *p, struct rq *rq, int event,
 						u64 wallclock, u64 irqtime)
 {
-	u64 runtime;
+	u64 runtime, old_window_start;
 
 	if (!rq->window_start || sched_disable_window_stats ||
 	    p->ravg.mark_start == wallclock)
@@ -1916,7 +1934,7 @@ void update_task_ravg(struct task_struct *p, struct rq *rq, int event,
 
 	lockdep_assert_held(&rq->lock);
 
-	update_window_start(rq, wallclock, event);
+	old_window_start = update_window_start(rq, wallclock, event);
 
 	if (!p->ravg.mark_start) {
 		update_task_cpu_cycles(p, cpu_of(rq));
@@ -1936,6 +1954,8 @@ void update_task_ravg(struct task_struct *p, struct rq *rq, int event,
 				rq->cc.cycles, rq->cc.time, &rq->grp_time);
 
 	p->ravg.mark_start = wallclock;
+
+	run_walt_irq_work(old_window_start, rq);
 }
 
 u32 sched_get_init_task_load(struct task_struct *p)
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index eb6c9f1..8d2b4d8 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -1484,6 +1484,11 @@ static __init int kprobe_trace_self_tests_init(void)
 
 end:
 	release_all_trace_kprobes();
+	/*
+	 * Wait for the optimizer work to finish. Otherwise it might fiddle
+	 * with probes in already freed __init text.
+	 */
+	wait_for_kprobe_optimizer();
 	if (warn)
 		pr_cont("NG: Some tests are failed. Please check them.\n");
 	else
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 8e57301..0ca9565 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -3152,6 +3152,7 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
 		enum compact_priority prio, enum compact_result *compact_result)
 {
 	struct page *page;
+	unsigned int noreclaim_flag = current->flags & PF_MEMALLOC;
 
 	if (!order)
 		return NULL;
@@ -3159,7 +3160,7 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
 	current->flags |= PF_MEMALLOC;
 	*compact_result = try_to_compact_pages(gfp_mask, order, alloc_flags, ac,
 									prio);
-	current->flags &= ~PF_MEMALLOC;
+	current->flags = (current->flags & ~PF_MEMALLOC) | noreclaim_flag;
 
 	if (*compact_result <= COMPACT_INACTIVE)
 		return NULL;
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
index 48f9471..c88a600 100644
--- a/net/bluetooth/hci_sock.c
+++ b/net/bluetooth/hci_sock.c
@@ -1680,7 +1680,8 @@ static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg,
 	if (msg->msg_flags & MSG_OOB)
 		return -EOPNOTSUPP;
 
-	if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE))
+	if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE|
+			       MSG_CMSG_COMPAT))
 		return -EINVAL;
 
 	if (len < 4 || len > HCI_MAX_FRAME_SIZE)
diff --git a/net/core/datagram.c b/net/core/datagram.c
index 963732e..58dfa23 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -740,7 +740,7 @@ int skb_copy_and_csum_datagram_msg(struct sk_buff *skb,
 
 	if (msg_data_left(msg) < chunk) {
 		if (__skb_checksum_complete(skb))
-			goto csum_error;
+			return -EINVAL;
 		if (skb_copy_datagram_msg(skb, hlen, msg, chunk))
 			goto fault;
 	} else {
@@ -748,15 +748,16 @@ int skb_copy_and_csum_datagram_msg(struct sk_buff *skb,
 		if (skb_copy_and_csum_datagram(skb, hlen, &msg->msg_iter,
 					       chunk, &csum))
 			goto fault;
-		if (csum_fold(csum))
-			goto csum_error;
+
+		if (csum_fold(csum)) {
+			iov_iter_revert(&msg->msg_iter, chunk);
+			return -EINVAL;
+		}
+
 		if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE))
 			netdev_rx_csum_fault(skb->dev);
 	}
 	return 0;
-csum_error:
-	iov_iter_revert(&msg->msg_iter, chunk);
-	return -EINVAL;
 fault:
 	return -EFAULT;
 }
diff --git a/net/core/filter.c b/net/core/filter.c
index b391209..2cb4f0f 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -26,6 +26,7 @@
 #include <linux/mm.h>
 #include <linux/fcntl.h>
 #include <linux/socket.h>
+#include <linux/sock_diag.h>
 #include <linux/in.h>
 #include <linux/inet.h>
 #include <linux/netdevice.h>
@@ -78,6 +79,10 @@ int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap)
 	if (skb_pfmemalloc(skb) && !sock_flag(sk, SOCK_MEMALLOC))
 		return -ENOMEM;
 
+	err = BPF_CGROUP_RUN_PROG_INET_INGRESS(sk, skb);
+	if (err)
+		return err;
+
 	err = security_sock_rcv_skb(sk, skb);
 	if (err)
 		return err;
@@ -85,8 +90,13 @@ int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap)
 	rcu_read_lock();
 	filter = rcu_dereference(sk->sk_filter);
 	if (filter) {
-		unsigned int pkt_len = bpf_prog_run_save_cb(filter->prog, skb);
+		struct sock *save_sk = skb->sk;
+		unsigned int pkt_len;
+
+		skb->sk = sk;
+		pkt_len = bpf_prog_run_save_cb(filter->prog, skb);
 		err = pkt_len ? pskb_trim(skb, max(cap, pkt_len)) : -EPERM;
+		skb->sk = save_sk;
 	}
 	rcu_read_unlock();
 
@@ -2530,6 +2540,36 @@ static const struct bpf_func_proto bpf_xdp_event_output_proto = {
 	.arg5_type	= ARG_CONST_STACK_SIZE,
 };
 
+BPF_CALL_1(bpf_get_socket_cookie, struct sk_buff *, skb)
+{
+	return skb->sk ? sock_gen_cookie(skb->sk) : 0;
+}
+
+static const struct bpf_func_proto bpf_get_socket_cookie_proto = {
+	.func           = bpf_get_socket_cookie,
+	.gpl_only       = false,
+	.ret_type       = RET_INTEGER,
+	.arg1_type      = ARG_PTR_TO_CTX,
+};
+
+BPF_CALL_1(bpf_get_socket_uid, struct sk_buff *, skb)
+{
+	struct sock *sk = sk_to_full_sk(skb->sk);
+	kuid_t kuid;
+
+	if (!sk || !sk_fullsock(sk))
+		return overflowuid;
+	kuid = sock_net_uid(sock_net(sk), sk);
+	return from_kuid_munged(sock_net(sk)->user_ns, kuid);
+}
+
+static const struct bpf_func_proto bpf_get_socket_uid_proto = {
+	.func           = bpf_get_socket_uid,
+	.gpl_only       = false,
+	.ret_type       = RET_INTEGER,
+	.arg1_type      = ARG_PTR_TO_CTX,
+};
+
 static const struct bpf_func_proto *
 sk_filter_func_proto(enum bpf_func_id func_id)
 {
@@ -2551,6 +2591,10 @@ sk_filter_func_proto(enum bpf_func_id func_id)
 	case BPF_FUNC_trace_printk:
 		if (capable(CAP_SYS_ADMIN))
 			return bpf_get_trace_printk_proto();
+	case BPF_FUNC_get_socket_cookie:
+		return &bpf_get_socket_cookie_proto;
+	case BPF_FUNC_get_socket_uid:
+		return &bpf_get_socket_uid_proto;
 	default:
 		return NULL;
 	}
@@ -2628,6 +2672,17 @@ xdp_func_proto(enum bpf_func_id func_id)
 	}
 }
 
+static const struct bpf_func_proto *
+cg_skb_func_proto(enum bpf_func_id func_id)
+{
+	switch (func_id) {
+	case BPF_FUNC_skb_load_bytes:
+		return &bpf_skb_load_bytes_proto;
+	default:
+		return sk_filter_func_proto(func_id);
+	}
+}
+
 static bool __is_valid_access(int off, int size, enum bpf_access_type type)
 {
 	if (off < 0 || off >= sizeof(struct __sk_buff))
@@ -2990,6 +3045,12 @@ static const struct bpf_verifier_ops xdp_ops = {
 	.convert_ctx_access	= xdp_convert_ctx_access,
 };
 
+static const struct bpf_verifier_ops cg_skb_ops = {
+	.get_func_proto		= cg_skb_func_proto,
+	.is_valid_access	= sk_filter_is_valid_access,
+	.convert_ctx_access	= sk_filter_convert_ctx_access,
+};
+
 static struct bpf_prog_type_list sk_filter_type __read_mostly = {
 	.ops	= &sk_filter_ops,
 	.type	= BPF_PROG_TYPE_SOCKET_FILTER,
@@ -3010,12 +3071,18 @@ static struct bpf_prog_type_list xdp_type __read_mostly = {
 	.type	= BPF_PROG_TYPE_XDP,
 };
 
+static struct bpf_prog_type_list cg_skb_type __read_mostly = {
+	.ops	= &cg_skb_ops,
+	.type	= BPF_PROG_TYPE_CGROUP_SKB,
+};
+
 static int __init register_sk_filter_ops(void)
 {
 	bpf_register_prog_type(&sk_filter_type);
 	bpf_register_prog_type(&sched_cls_type);
 	bpf_register_prog_type(&sched_act_type);
 	bpf_register_prog_type(&xdp_type);
+	bpf_register_prog_type(&cg_skb_type);
 
 	return 0;
 }
diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c
index 6b10573..acd2a6c 100644
--- a/net/core/sock_diag.c
+++ b/net/core/sock_diag.c
@@ -19,7 +19,7 @@ static int (*inet_rcv_compat)(struct sk_buff *skb, struct nlmsghdr *nlh);
 static DEFINE_MUTEX(sock_diag_table_mutex);
 static struct workqueue_struct *broadcast_wq;
 
-static u64 sock_gen_cookie(struct sock *sk)
+u64 sock_gen_cookie(struct sock *sk)
 {
 	while (1) {
 		u64 res = atomic64_read(&sk->sk_cookie);
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 0bd3efe..2c18bcf 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -74,6 +74,7 @@
 #include <net/checksum.h>
 #include <net/inetpeer.h>
 #include <net/lwtunnel.h>
+#include <linux/bpf-cgroup.h>
 #include <linux/igmp.h>
 #include <linux/netfilter_ipv4.h>
 #include <linux/netfilter_bridge.h>
@@ -287,6 +288,13 @@ static int ip_finish_output_gso(struct net *net, struct sock *sk,
 static int ip_finish_output(struct net *net, struct sock *sk, struct sk_buff *skb)
 {
 	unsigned int mtu;
+	int ret;
+
+	ret = BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb);
+	if (ret) {
+		kfree_skb(skb);
+		return ret;
+	}
 
 #if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM)
 	/* Policy lookup after SNAT yielded a new policy */
@@ -305,6 +313,20 @@ static int ip_finish_output(struct net *net, struct sock *sk, struct sk_buff *sk
 	return ip_finish_output2(net, sk, skb);
 }
 
+static int ip_mc_finish_output(struct net *net, struct sock *sk,
+			       struct sk_buff *skb)
+{
+	int ret;
+
+	ret = BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb);
+	if (ret) {
+		kfree_skb(skb);
+		return ret;
+	}
+
+	return dev_loopback_xmit(net, sk, skb);
+}
+
 int ip_mc_output(struct net *net, struct sock *sk, struct sk_buff *skb)
 {
 	struct rtable *rt = skb_rtable(skb);
@@ -342,7 +364,7 @@ int ip_mc_output(struct net *net, struct sock *sk, struct sk_buff *skb)
 			if (newskb)
 				NF_HOOK(NFPROTO_IPV4, NF_INET_POST_ROUTING,
 					net, sk, newskb, NULL, newskb->dev,
-					dev_loopback_xmit);
+					ip_mc_finish_output);
 		}
 
 		/* Multicasts with ttl 0 must not go beyond the host */
@@ -358,7 +380,7 @@ int ip_mc_output(struct net *net, struct sock *sk, struct sk_buff *skb)
 		if (newskb)
 			NF_HOOK(NFPROTO_IPV4, NF_INET_POST_ROUTING,
 				net, sk, newskb, NULL, newskb->dev,
-				dev_loopback_xmit);
+				ip_mc_finish_output);
 	}
 
 	return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING,
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index e27b8fd..a215802 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -39,6 +39,7 @@
 #include <linux/module.h>
 #include <linux/slab.h>
 
+#include <linux/bpf-cgroup.h>
 #include <linux/netfilter.h>
 #include <linux/netfilter_ipv6.h>
 
@@ -131,6 +132,14 @@ static int ip6_finish_output2(struct net *net, struct sock *sk, struct sk_buff *
 
 static int ip6_finish_output(struct net *net, struct sock *sk, struct sk_buff *skb)
 {
+	int ret;
+
+	ret = BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb);
+	if (ret) {
+		kfree_skb(skb);
+		return ret;
+	}
+
 	if ((skb->len > ip6_skb_dst_mtu(skb) && !skb_is_gso(skb)) ||
 	    dst_allfrag(skb_dst(skb)) ||
 	    (IP6CB(skb)->frag_max_size && skb->len > IP6CB(skb)->frag_max_size))
diff --git a/net/ipx/af_ipx.c b/net/ipx/af_ipx.c
index 48d0dc89b..e735f78 100644
--- a/net/ipx/af_ipx.c
+++ b/net/ipx/af_ipx.c
@@ -1168,11 +1168,10 @@ static int ipxitf_ioctl(unsigned int cmd, void __user *arg)
 		sipx->sipx_network	= ipxif->if_netnum;
 		memcpy(sipx->sipx_node, ipxif->if_node,
 			sizeof(sipx->sipx_node));
-		rc = -EFAULT;
-		if (copy_to_user(arg, &ifr, sizeof(ifr)))
-			break;
-		ipxitf_put(ipxif);
 		rc = 0;
+		if (copy_to_user(arg, &ifr, sizeof(ifr)))
+			rc = -EFAULT;
+		ipxitf_put(ipxif);
 		break;
 	}
 	case SIOCAIPXITFCRT:
diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile
index 72c5867..b2cdced 100644
--- a/samples/bpf/Makefile
+++ b/samples/bpf/Makefile
@@ -22,6 +22,7 @@
 hostprogs-y += map_perf_test
 hostprogs-y += test_overhead
 hostprogs-y += test_cgrp2_array_pin
+hostprogs-y += test_cgrp2_attach
 hostprogs-y += xdp1
 hostprogs-y += xdp2
 hostprogs-y += test_current_task_under_cgroup
@@ -50,6 +51,7 @@
 map_perf_test-objs := bpf_load.o libbpf.o map_perf_test_user.o
 test_overhead-objs := bpf_load.o libbpf.o test_overhead_user.o
 test_cgrp2_array_pin-objs := libbpf.o test_cgrp2_array_pin.o
+test_cgrp2_attach-objs := libbpf.o test_cgrp2_attach.o
 xdp1-objs := bpf_load.o libbpf.o xdp1_user.o
 # reuse xdp1 source intentionally
 xdp2-objs := bpf_load.o libbpf.o xdp1_user.o
diff --git a/samples/bpf/libbpf.c b/samples/bpf/libbpf.c
index 9969e35..9ce707b 100644
--- a/samples/bpf/libbpf.c
+++ b/samples/bpf/libbpf.c
@@ -104,6 +104,27 @@ int bpf_prog_load(enum bpf_prog_type prog_type,
 	return syscall(__NR_bpf, BPF_PROG_LOAD, &attr, sizeof(attr));
 }
 
+int bpf_prog_attach(int prog_fd, int target_fd, enum bpf_attach_type type)
+{
+	union bpf_attr attr = {
+		.target_fd = target_fd,
+		.attach_bpf_fd = prog_fd,
+		.attach_type = type,
+	};
+
+	return syscall(__NR_bpf, BPF_PROG_ATTACH, &attr, sizeof(attr));
+}
+
+int bpf_prog_detach(int target_fd, enum bpf_attach_type type)
+{
+	union bpf_attr attr = {
+		.target_fd = target_fd,
+		.attach_type = type,
+	};
+
+	return syscall(__NR_bpf, BPF_PROG_DETACH, &attr, sizeof(attr));
+}
+
 int bpf_obj_pin(int fd, const char *pathname)
 {
 	union bpf_attr attr = {
diff --git a/samples/bpf/libbpf.h b/samples/bpf/libbpf.h
index ac6edb6..d0a799a 100644
--- a/samples/bpf/libbpf.h
+++ b/samples/bpf/libbpf.h
@@ -15,6 +15,9 @@ int bpf_prog_load(enum bpf_prog_type prog_type,
 		  const struct bpf_insn *insns, int insn_len,
 		  const char *license, int kern_version);
 
+int bpf_prog_attach(int prog_fd, int attachable_fd, enum bpf_attach_type type);
+int bpf_prog_detach(int attachable_fd, enum bpf_attach_type type);
+
 int bpf_obj_pin(int fd, const char *pathname);
 int bpf_obj_get(const char *pathname);
 
diff --git a/samples/bpf/test_cgrp2_attach.c b/samples/bpf/test_cgrp2_attach.c
new file mode 100644
index 0000000..63ef208
--- /dev/null
+++ b/samples/bpf/test_cgrp2_attach.c
@@ -0,0 +1,147 @@
+/* eBPF example program:
+ *
+ * - Creates arraymap in kernel with 4 bytes keys and 8 byte values
+ *
+ * - Loads eBPF program
+ *
+ *   The eBPF program accesses the map passed in to store two pieces of
+ *   information. The number of invocations of the program, which maps
+ *   to the number of packets received, is stored to key 0. Key 1 is
+ *   incremented on each iteration by the number of bytes stored in
+ *   the skb.
+ *
+ * - Detaches any eBPF program previously attached to the cgroup
+ *
+ * - Attaches the new program to a cgroup using BPF_PROG_ATTACH
+ *
+ * - Every second, reads map[0] and map[1] to see how many bytes and
+ *   packets were seen on any socket of tasks in the given cgroup.
+ */
+
+#define _GNU_SOURCE
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <stddef.h>
+#include <string.h>
+#include <unistd.h>
+#include <assert.h>
+#include <errno.h>
+#include <fcntl.h>
+
+#include <linux/bpf.h>
+
+#include "libbpf.h"
+
+enum {
+	MAP_KEY_PACKETS,
+	MAP_KEY_BYTES,
+};
+
+static int prog_load(int map_fd, int verdict)
+{
+	struct bpf_insn prog[] = {
+		BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), /* save r6 so it's not clobbered by BPF_CALL */
+
+		/* Count packets */
+		BPF_MOV64_IMM(BPF_REG_0, MAP_KEY_PACKETS), /* r0 = 0 */
+		BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -4), /* *(u32 *)(fp - 4) = r0 */
+		BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+		BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), /* r2 = fp - 4 */
+		BPF_LD_MAP_FD(BPF_REG_1, map_fd), /* load map fd to r1 */
+		BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+		BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
+		BPF_MOV64_IMM(BPF_REG_1, 1), /* r1 = 1 */
+		BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_DW, BPF_REG_0, BPF_REG_1, 0, 0), /* xadd r0 += r1 */
+
+		/* Count bytes */
+		BPF_MOV64_IMM(BPF_REG_0, MAP_KEY_BYTES), /* r0 = 1 */
+		BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -4), /* *(u32 *)(fp - 4) = r0 */
+		BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+		BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), /* r2 = fp - 4 */
+		BPF_LD_MAP_FD(BPF_REG_1, map_fd),
+		BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+		BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
+		BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_6, offsetof(struct __sk_buff, len)), /* r1 = skb->len */
+		BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_DW, BPF_REG_0, BPF_REG_1, 0, 0), /* xadd r0 += r1 */
+
+		BPF_MOV64_IMM(BPF_REG_0, verdict), /* r0 = verdict */
+		BPF_EXIT_INSN(),
+	};
+
+	return bpf_prog_load(BPF_PROG_TYPE_CGROUP_SKB,
+			     prog, sizeof(prog), "GPL", 0);
+}
+
+static int usage(const char *argv0)
+{
+	printf("Usage: %s <cg-path> <egress|ingress> [drop]\n", argv0);
+	return EXIT_FAILURE;
+}
+
+int main(int argc, char **argv)
+{
+	int cg_fd, map_fd, prog_fd, key, ret;
+	long long pkt_cnt, byte_cnt;
+	enum bpf_attach_type type;
+	int verdict = 1;
+
+	if (argc < 3)
+		return usage(argv[0]);
+
+	if (strcmp(argv[2], "ingress") == 0)
+		type = BPF_CGROUP_INET_INGRESS;
+	else if (strcmp(argv[2], "egress") == 0)
+		type = BPF_CGROUP_INET_EGRESS;
+	else
+		return usage(argv[0]);
+
+	if (argc > 3 && strcmp(argv[3], "drop") == 0)
+		verdict = 0;
+
+	cg_fd = open(argv[1], O_DIRECTORY | O_RDONLY);
+	if (cg_fd < 0) {
+		printf("Failed to open cgroup path: '%s'\n", strerror(errno));
+		return EXIT_FAILURE;
+	}
+
+	map_fd = bpf_create_map(BPF_MAP_TYPE_ARRAY,
+				sizeof(key), sizeof(byte_cnt),
+				256, 0);
+	if (map_fd < 0) {
+		printf("Failed to create map: '%s'\n", strerror(errno));
+		return EXIT_FAILURE;
+	}
+
+	prog_fd = prog_load(map_fd, verdict);
+	printf("Output from kernel verifier:\n%s\n-------\n", bpf_log_buf);
+
+	if (prog_fd < 0) {
+		printf("Failed to load prog: '%s'\n", strerror(errno));
+		return EXIT_FAILURE;
+	}
+
+	ret = bpf_prog_detach(cg_fd, type);
+	printf("bpf_prog_detach() returned '%s' (%d)\n", strerror(errno), errno);
+
+	ret = bpf_prog_attach(prog_fd, cg_fd, type);
+	if (ret < 0) {
+		printf("Failed to attach prog to cgroup: '%s'\n",
+		       strerror(errno));
+		return EXIT_FAILURE;
+	}
+
+	while (1) {
+		key = MAP_KEY_PACKETS;
+		assert(bpf_lookup_elem(map_fd, &key, &pkt_cnt) == 0);
+
+		key = MAP_KEY_BYTES;
+		assert(bpf_lookup_elem(map_fd, &key, &byte_cnt) == 0);
+
+		printf("cgroup received %lld packets, %lld bytes\n",
+		       pkt_cnt, byte_cnt);
+		sleep(1);
+	}
+
+	return EXIT_SUCCESS;
+}
diff --git a/security/integrity/ima/ima_appraise.c b/security/integrity/ima/ima_appraise.c
index 389325a..0974598 100644
--- a/security/integrity/ima/ima_appraise.c
+++ b/security/integrity/ima/ima_appraise.c
@@ -204,10 +204,11 @@ int ima_appraise_measurement(enum ima_hooks func,
 
 		cause = "missing-hash";
 		status = INTEGRITY_NOLABEL;
-		if (opened & FILE_CREATED) {
+		if (opened & FILE_CREATED)
 			iint->flags |= IMA_NEW_FILE;
+		if ((iint->flags & IMA_NEW_FILE) &&
+		    !(iint->flags & IMA_DIGSIG_REQUIRED))
 			status = INTEGRITY_PASS;
-		}
 		goto out;
 	}
 
diff --git a/sound/hda/hdac_controller.c b/sound/hda/hdac_controller.c
index 0430658..0f41257 100644
--- a/sound/hda/hdac_controller.c
+++ b/sound/hda/hdac_controller.c
@@ -106,7 +106,11 @@ void snd_hdac_bus_stop_cmd_io(struct hdac_bus *bus)
 	/* disable ringbuffer DMAs */
 	snd_hdac_chip_writeb(bus, RIRBCTL, 0);
 	snd_hdac_chip_writeb(bus, CORBCTL, 0);
+	spin_unlock_irq(&bus->reg_lock);
+
 	hdac_wait_for_cmd_dmas(bus);
+
+	spin_lock_irq(&bus->reg_lock);
 	/* disable unsolicited responses */
 	snd_hdac_chip_updatel(bus, GCTL, AZX_GCTL_UNSOL, 0);
 	spin_unlock_irq(&bus->reg_lock);
diff --git a/sound/soc/codecs/cs4271.c b/sound/soc/codecs/cs4271.c
index 8c0f3b8..e78b5f0 100644
--- a/sound/soc/codecs/cs4271.c
+++ b/sound/soc/codecs/cs4271.c
@@ -498,7 +498,7 @@ static int cs4271_reset(struct snd_soc_codec *codec)
 	struct cs4271_private *cs4271 = snd_soc_codec_get_drvdata(codec);
 
 	if (gpio_is_valid(cs4271->gpio_nreset)) {
-		gpio_set_value(cs4271->gpio_nreset, 0);
+		gpio_direction_output(cs4271->gpio_nreset, 0);
 		mdelay(1);
 		gpio_set_value(cs4271->gpio_nreset, 1);
 		mdelay(1);
diff --git a/sound/soc/codecs/wcd9330.c b/sound/soc/codecs/wcd9330.c
index 0b07393..4278e36 100644
--- a/sound/soc/codecs/wcd9330.c
+++ b/sound/soc/codecs/wcd9330.c
@@ -1536,6 +1536,13 @@ static int tomtom_mad_input_put(struct snd_kcontrol *kcontrol,
 	tomtom_mad_input = ucontrol->value.integer.value[0];
 	micb_4_int_reg = tomtom->resmgr.reg_addr->micb_4_int_rbias;
 
+	if (tomtom_mad_input >= ARRAY_SIZE(tomtom_conn_mad_text)) {
+		dev_err(codec->dev,
+			"%s: tomtom_mad_input = %d out of bounds\n",
+			__func__, tomtom_mad_input);
+		return -EINVAL;
+	}
+
 	pr_debug("%s: tomtom_mad_input = %s\n", __func__,
 			tomtom_conn_mad_text[tomtom_mad_input]);
 
diff --git a/sound/soc/msm/qdsp6v2/Makefile b/sound/soc/msm/qdsp6v2/Makefile
index d4db55f..36382ba 100644
--- a/sound/soc/msm/qdsp6v2/Makefile
+++ b/sound/soc/msm/qdsp6v2/Makefile
@@ -1,5 +1,5 @@
-snd-soc-qdsp6v2-objs += msm-dai-q6-v2.o msm-pcm-q6-v2.o msm-pcm-routing-v2.o \
-			msm-compress-q6-v2.o msm-compr-q6-v2.o \
+snd-soc-qdsp6v2-objs += msm-dai-q6-v2.o msm-pcm-q6-v2.o \
+			msm-pcm-routing-v2.o msm-compress-q6-v2.o \
 			msm-pcm-afe-v2.o msm-pcm-voip-v2.o \
 			msm-pcm-voice-v2.o msm-dai-q6-hdmi-v2.o \
 			msm-lsm-client.o msm-pcm-host-voice-v2.o \
diff --git a/sound/soc/msm/qdsp6v2/audio_cal_utils.c b/sound/soc/msm/qdsp6v2/audio_cal_utils.c
index 5d4a0ba..820aa1b 100644
--- a/sound/soc/msm/qdsp6v2/audio_cal_utils.c
+++ b/sound/soc/msm/qdsp6v2/audio_cal_utils.c
@@ -646,7 +646,9 @@ static struct cal_block_data *create_cal_block(struct cal_type_data *cal_type,
 	return cal_block;
 err:
 	kfree(cal_block->cal_info);
+	cal_block->cal_info = NULL;
 	kfree(cal_block->client_info);
+	cal_block->client_info = NULL;
 	kfree(cal_block);
 	cal_block = NULL;
 	return cal_block;
diff --git a/sound/soc/msm/qdsp6v2/msm-compr-q6-v2.c b/sound/soc/msm/qdsp6v2/msm-compr-q6-v2.c
deleted file mode 100644
index 449325c..0000000
--- a/sound/soc/msm/qdsp6v2/msm-compr-q6-v2.c
+++ /dev/null
@@ -1,1714 +0,0 @@
-/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-
-
-#include <linux/init.h>
-#include <linux/err.h>
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/time.h>
-#include <linux/wait.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
-#include <sound/core.h>
-#include <sound/soc.h>
-#include <sound/soc-dapm.h>
-#include <sound/pcm.h>
-#include <sound/initval.h>
-#include <sound/control.h>
-#include <sound/q6asm-v2.h>
-#include <sound/pcm_params.h>
-#include <asm/dma.h>
-#include <linux/dma-mapping.h>
-#include <linux/msm_audio_ion.h>
-
-#include <sound/timer.h>
-
-#include "msm-compr-q6-v2.h"
-#include "msm-pcm-routing-v2.h"
-#include <sound/tlv.h>
-
-#define COMPRE_CAPTURE_NUM_PERIODS	16
-/* Allocate the worst case frame size for compressed audio */
-#define COMPRE_CAPTURE_HEADER_SIZE	(sizeof(struct snd_compr_audio_info))
-/* Changing period size to 4032. 4032 will make sure COMPRE_CAPTURE_PERIOD_SIZE
- * is 4096 with meta data size of 64 and MAX_NUM_FRAMES_PER_BUFFER 1
- */
-#define COMPRE_CAPTURE_MAX_FRAME_SIZE	(4032)
-#define COMPRE_CAPTURE_PERIOD_SIZE	((COMPRE_CAPTURE_MAX_FRAME_SIZE + \
-					  COMPRE_CAPTURE_HEADER_SIZE) * \
-					  MAX_NUM_FRAMES_PER_BUFFER)
-#define COMPRE_OUTPUT_METADATA_SIZE	(sizeof(struct output_meta_data_st))
-#define COMPRESSED_LR_VOL_MAX_STEPS	0x20002000
-
-#define MAX_AC3_PARAM_SIZE		(18*2*sizeof(int))
-#define AMR_WB_BAND_MODE 8
-#define AMR_WB_DTX_MODE 0
-
-
-const DECLARE_TLV_DB_LINEAR(compr_rx_vol_gain, 0,
-			    COMPRESSED_LR_VOL_MAX_STEPS);
-
-static struct audio_locks the_locks;
-
-static struct snd_pcm_hardware msm_compr_hardware_capture = {
-	.info =		 (SNDRV_PCM_INFO_MMAP |
-				SNDRV_PCM_INFO_BLOCK_TRANSFER |
-				SNDRV_PCM_INFO_MMAP_VALID |
-				SNDRV_PCM_INFO_INTERLEAVED |
-				SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_RESUME),
-	.formats =	      SNDRV_PCM_FMTBIT_S16_LE,
-	.rates =		SNDRV_PCM_RATE_8000_48000,
-	.rate_min =	     8000,
-	.rate_max =	     48000,
-	.channels_min =	 1,
-	.channels_max =	 8,
-	.buffer_bytes_max =
-		COMPRE_CAPTURE_PERIOD_SIZE * COMPRE_CAPTURE_NUM_PERIODS,
-	.period_bytes_min =	COMPRE_CAPTURE_PERIOD_SIZE,
-	.period_bytes_max = COMPRE_CAPTURE_PERIOD_SIZE,
-	.periods_min =	  COMPRE_CAPTURE_NUM_PERIODS,
-	.periods_max =	  COMPRE_CAPTURE_NUM_PERIODS,
-	.fifo_size =	    0,
-};
-
-static struct snd_pcm_hardware msm_compr_hardware_playback = {
-	.info =		 (SNDRV_PCM_INFO_MMAP |
-				SNDRV_PCM_INFO_BLOCK_TRANSFER |
-				SNDRV_PCM_INFO_MMAP_VALID |
-				SNDRV_PCM_INFO_INTERLEAVED |
-				SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_RESUME),
-	.formats =	      SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE,
-	.rates =		SNDRV_PCM_RATE_8000_48000 | SNDRV_PCM_RATE_KNOT,
-	.rate_min =	     8000,
-	.rate_max =	     48000,
-	.channels_min =	 1,
-	.channels_max =	 8,
-	.buffer_bytes_max =     1024 * 1024,
-	.period_bytes_min =	128 * 1024,
-	.period_bytes_max =     256 * 1024,
-	.periods_min =	  4,
-	.periods_max =	  8,
-	.fifo_size =	    0,
-};
-
-/* Conventional and unconventional sample rate supported */
-static unsigned int supported_sample_rates[] = {
-	8000, 11025, 12000, 16000, 22050, 24000, 32000, 44100, 48000
-};
-
-/* Add supported codecs for compress capture path */
-static uint32_t supported_compr_capture_codecs[] = {
-	SND_AUDIOCODEC_AMRWB
-};
-
-static struct snd_pcm_hw_constraint_list constraints_sample_rates = {
-	.count = ARRAY_SIZE(supported_sample_rates),
-	.list = supported_sample_rates,
-	.mask = 0,
-};
-
-static bool msm_compr_capture_codecs(uint32_t req_codec)
-{
-	int i;
-
-	pr_debug("%s req_codec:%d\n", __func__, req_codec);
-	if (req_codec == 0)
-		return false;
-	for (i = 0; i < ARRAY_SIZE(supported_compr_capture_codecs); i++) {
-		if (req_codec == supported_compr_capture_codecs[i])
-			return true;
-	}
-	return false;
-}
-
-static void compr_event_handler(uint32_t opcode,
-		uint32_t token, uint32_t *payload, void *priv)
-{
-	struct compr_audio *compr = priv;
-	struct msm_audio *prtd = &compr->prtd;
-	struct snd_pcm_substream *substream = prtd->substream;
-	struct snd_pcm_runtime *runtime = substream->runtime;
-	struct audio_aio_write_param param;
-	struct audio_aio_read_param read_param;
-	struct audio_buffer *buf = NULL;
-	phys_addr_t temp;
-	struct output_meta_data_st output_meta_data;
-	uint32_t *ptrmem = (uint32_t *)payload;
-	int i = 0;
-	int time_stamp_flag = 0;
-	int buffer_length = 0;
-	int stop_playback = 0;
-
-	pr_debug("%s opcode =%08x\n", __func__, opcode);
-	switch (opcode) {
-	case ASM_DATA_EVENT_WRITE_DONE_V2: {
-		uint32_t *ptrmem = (uint32_t *)&param;
-
-		pr_debug("ASM_DATA_EVENT_WRITE_DONE\n");
-		pr_debug("Buffer Consumed = 0x%08x\n", *ptrmem);
-		prtd->pcm_irq_pos += prtd->pcm_count;
-		if (atomic_read(&prtd->start))
-			snd_pcm_period_elapsed(substream);
-		else
-			if (substream->timer_running)
-				snd_timer_interrupt(substream->timer, 1);
-		atomic_inc(&prtd->out_count);
-		wake_up(&the_locks.write_wait);
-		if (!atomic_read(&prtd->start)) {
-			atomic_set(&prtd->pending_buffer, 1);
-			break;
-		}
-		atomic_set(&prtd->pending_buffer, 0);
-
-		/*
-		 * check for underrun
-		 */
-		snd_pcm_stream_lock_irq(substream);
-		if (runtime->status->hw_ptr >= runtime->control->appl_ptr) {
-			runtime->render_flag |= SNDRV_RENDER_STOPPED;
-			stop_playback = 1;
-		}
-		snd_pcm_stream_unlock_irq(substream);
-
-		if (stop_playback) {
-			pr_err("underrun! render stopped\n");
-			break;
-		}
-
-		buf = prtd->audio_client->port[IN].buf;
-		pr_debug("%s:writing %d bytes of buffer[%d] to dsp 2\n",
-				__func__, prtd->pcm_count, prtd->out_head);
-		temp = buf[0].phys + (prtd->out_head * prtd->pcm_count);
-		pr_debug("%s:writing buffer[%d] from 0x%pK\n",
-			__func__, prtd->out_head, &temp);
-
-		if (runtime->tstamp_mode == SNDRV_PCM_TSTAMP_ENABLE)
-			time_stamp_flag = SET_TIMESTAMP;
-		else
-			time_stamp_flag = NO_TIMESTAMP;
-		memcpy(&output_meta_data, (char *)(buf->data +
-			prtd->out_head * prtd->pcm_count),
-			COMPRE_OUTPUT_METADATA_SIZE);
-
-		buffer_length = output_meta_data.frame_size;
-		pr_debug("meta_data_length: %d, frame_length: %d\n",
-			 output_meta_data.meta_data_length,
-			 output_meta_data.frame_size);
-		pr_debug("timestamp_msw: %d, timestamp_lsw: %d\n",
-			 output_meta_data.timestamp_msw,
-			 output_meta_data.timestamp_lsw);
-		if (buffer_length == 0) {
-			pr_debug("Received a zero length buffer-break out");
-			break;
-		}
-		param.paddr = temp + output_meta_data.meta_data_length;
-		param.len = buffer_length;
-		param.msw_ts = output_meta_data.timestamp_msw;
-		param.lsw_ts = output_meta_data.timestamp_lsw;
-		param.flags = time_stamp_flag;
-		param.uid = prtd->session_id;
-		for (i = 0; i < sizeof(struct audio_aio_write_param)/4;
-					i++, ++ptrmem)
-			pr_debug("cmd[%d]=0x%08x\n", i, *ptrmem);
-		if (q6asm_async_write(prtd->audio_client,
-					&param) < 0)
-			pr_err("%s:q6asm_async_write failed\n",
-				__func__);
-		else
-			prtd->out_head =
-				(prtd->out_head + 1) & (runtime->periods - 1);
-		break;
-	}
-	case ASM_DATA_EVENT_RENDERED_EOS:
-		pr_debug("ASM_DATA_CMDRSP_EOS\n");
-		if (atomic_read(&prtd->eos)) {
-			pr_debug("ASM_DATA_CMDRSP_EOS wake up\n");
-			prtd->cmd_ack = 1;
-			wake_up(&the_locks.eos_wait);
-			atomic_set(&prtd->eos, 0);
-		}
-		break;
-	case ASM_DATA_EVENT_READ_DONE_V2: {
-		pr_debug("ASM_DATA_EVENT_READ_DONE\n");
-		pr_debug("buf = %pK, data = 0x%X, *data = %pK,\n"
-			 "prtd->pcm_irq_pos = %d\n",
-				prtd->audio_client->port[OUT].buf,
-			 *(uint32_t *)prtd->audio_client->port[OUT].buf->data,
-				prtd->audio_client->port[OUT].buf->data,
-				prtd->pcm_irq_pos);
-
-		memcpy(prtd->audio_client->port[OUT].buf->data +
-			   prtd->pcm_irq_pos, (ptrmem + READDONE_IDX_SIZE),
-			   COMPRE_CAPTURE_HEADER_SIZE);
-		pr_debug("buf = %pK, updated data = 0x%X, *data = %pK\n",
-				prtd->audio_client->port[OUT].buf,
-			*(uint32_t *)(prtd->audio_client->port[OUT].buf->data +
-				prtd->pcm_irq_pos),
-				prtd->audio_client->port[OUT].buf->data);
-		if (!atomic_read(&prtd->start))
-			break;
-		pr_debug("frame size=%d, buffer = 0x%X\n",
-				ptrmem[READDONE_IDX_SIZE],
-				ptrmem[READDONE_IDX_BUFADD_LSW]);
-		if (ptrmem[READDONE_IDX_SIZE] > COMPRE_CAPTURE_MAX_FRAME_SIZE) {
-			pr_err("Frame length exceeded the max length");
-			break;
-		}
-		buf = prtd->audio_client->port[OUT].buf;
-
-		pr_debug("pcm_irq_pos=%d, buf[0].phys = 0x%pK\n",
-				prtd->pcm_irq_pos, &buf[0].phys);
-		read_param.len = prtd->pcm_count - COMPRE_CAPTURE_HEADER_SIZE;
-		read_param.paddr = buf[0].phys +
-			prtd->pcm_irq_pos + COMPRE_CAPTURE_HEADER_SIZE;
-		prtd->pcm_irq_pos += prtd->pcm_count;
-
-		if (atomic_read(&prtd->start))
-			snd_pcm_period_elapsed(substream);
-
-		q6asm_async_read(prtd->audio_client, &read_param);
-		break;
-	}
-	case APR_BASIC_RSP_RESULT: {
-		switch (payload[0]) {
-		case ASM_SESSION_CMD_RUN_V2: {
-			if (substream->stream
-				!= SNDRV_PCM_STREAM_PLAYBACK) {
-				atomic_set(&prtd->start, 1);
-				break;
-			}
-			if (!atomic_read(&prtd->pending_buffer))
-				break;
-			pr_debug("%s: writing %d bytes of buffer[%d] to dsp\n",
-				__func__, prtd->pcm_count, prtd->out_head);
-			buf = prtd->audio_client->port[IN].buf;
-			pr_debug("%s: writing buffer[%d] from 0x%pK head %d count %d\n",
-				__func__, prtd->out_head, &buf[0].phys,
-				prtd->pcm_count, prtd->out_head);
-			if (runtime->tstamp_mode == SNDRV_PCM_TSTAMP_ENABLE)
-				time_stamp_flag = SET_TIMESTAMP;
-			else
-				time_stamp_flag = NO_TIMESTAMP;
-			memcpy(&output_meta_data, (char *)(buf->data +
-				prtd->out_head * prtd->pcm_count),
-				COMPRE_OUTPUT_METADATA_SIZE);
-			buffer_length = output_meta_data.frame_size;
-			pr_debug("meta_data_length: %d, frame_length: %d\n",
-				 output_meta_data.meta_data_length,
-				 output_meta_data.frame_size);
-			pr_debug("timestamp_msw: %d, timestamp_lsw: %d\n",
-				 output_meta_data.timestamp_msw,
-				 output_meta_data.timestamp_lsw);
-			param.paddr = buf[prtd->out_head].phys
-					+ output_meta_data.meta_data_length;
-			param.len = buffer_length;
-			param.msw_ts = output_meta_data.timestamp_msw;
-			param.lsw_ts = output_meta_data.timestamp_lsw;
-			param.flags = time_stamp_flag;
-			param.uid = prtd->session_id;
-			param.metadata_len = COMPRE_OUTPUT_METADATA_SIZE;
-			if (q6asm_async_write(prtd->audio_client,
-						&param) < 0)
-				pr_err("%s:q6asm_async_write failed\n",
-					__func__);
-			else
-				prtd->out_head =
-					(prtd->out_head + 1)
-					& (runtime->periods - 1);
-			atomic_set(&prtd->pending_buffer, 0);
-		}
-			break;
-		case ASM_STREAM_CMD_FLUSH:
-			pr_debug("ASM_STREAM_CMD_FLUSH\n");
-			prtd->cmd_ack = 1;
-			wake_up(&the_locks.flush_wait);
-			break;
-		default:
-			break;
-		}
-		break;
-	}
-	default:
-		pr_debug("Not Supported Event opcode[0x%x]\n", opcode);
-		break;
-	}
-}
-
-static int msm_compr_send_ddp_cfg(struct audio_client *ac,
-					struct snd_dec_ddp *ddp)
-{
-	int i, rc;
-
-	pr_debug("%s\n", __func__);
-
-	if (ddp->params_length / 2 > SND_DEC_DDP_MAX_PARAMS) {
-		pr_err("%s: Invalid number of params %u, max allowed %u\n",
-			__func__, ddp->params_length / 2,
-			SND_DEC_DDP_MAX_PARAMS);
-		return -EINVAL;
-	}
-
-	for (i = 0; i < ddp->params_length/2; i++) {
-		rc = q6asm_ds1_set_endp_params(ac, ddp->params_id[i],
-						ddp->params_value[i]);
-		if (rc) {
-			pr_err("sending params_id: %d failed\n",
-				ddp->params_id[i]);
-			return rc;
-		}
-	}
-	return 0;
-}
-
-static int msm_compr_playback_prepare(struct snd_pcm_substream *substream)
-{
-	struct snd_pcm_runtime *runtime = substream->runtime;
-	struct compr_audio *compr = runtime->private_data;
-	struct snd_soc_pcm_runtime *soc_prtd = substream->private_data;
-	struct msm_audio *prtd = &compr->prtd;
-	struct snd_pcm_hw_params *params;
-	struct asm_aac_cfg aac_cfg;
-	uint16_t bits_per_sample = 16;
-	int ret;
-
-	struct asm_softpause_params softpause = {
-		.enable = SOFT_PAUSE_ENABLE,
-		.period = SOFT_PAUSE_PERIOD,
-		.step = SOFT_PAUSE_STEP,
-		.rampingcurve = SOFT_PAUSE_CURVE_LINEAR,
-	};
-	struct asm_softvolume_params softvol = {
-		.period = SOFT_VOLUME_PERIOD,
-		.step = SOFT_VOLUME_STEP,
-		.rampingcurve = SOFT_VOLUME_CURVE_LINEAR,
-	};
-
-	pr_debug("%s\n", __func__);
-
-	params = &soc_prtd->dpcm[substream->stream].hw_params;
-	if (runtime->format == SNDRV_PCM_FORMAT_S24_LE)
-		bits_per_sample = 24;
-
-	ret = q6asm_open_write_v2(prtd->audio_client,
-			compr->codec, bits_per_sample);
-	if (ret < 0) {
-		pr_err("%s: Session out open failed\n",
-				__func__);
-		return -ENOMEM;
-	}
-	msm_pcm_routing_reg_phy_stream(
-			soc_prtd->dai_link->id,
-			prtd->audio_client->perf_mode,
-			prtd->session_id,
-			substream->stream);
-	/*
-	 * the number of channels are required to call volume api
-	 * accoridngly. So, get channels from hw params
-	 */
-	if ((params_channels(params) > 0) &&
-			(params_periods(params) <= runtime->hw.channels_max))
-		prtd->channel_mode = params_channels(params);
-
-	ret = q6asm_set_softpause(prtd->audio_client, &softpause);
-	if (ret < 0)
-		pr_err("%s: Send SoftPause Param failed ret=%d\n",
-				__func__, ret);
-	ret = q6asm_set_softvolume(prtd->audio_client, &softvol);
-	if (ret < 0)
-		pr_err("%s: Send SoftVolume Param failed ret=%d\n",
-				__func__, ret);
-
-	ret = q6asm_set_io_mode(prtd->audio_client,
-			(COMPRESSED_IO | ASYNC_IO_MODE));
-	if (ret < 0) {
-		pr_err("%s: Set IO mode failed\n", __func__);
-		return -ENOMEM;
-	}
-
-	prtd->pcm_size = snd_pcm_lib_buffer_bytes(substream);
-	prtd->pcm_count = snd_pcm_lib_period_bytes(substream);
-	prtd->pcm_irq_pos = 0;
-	/* rate and channels are sent to audio driver */
-	prtd->samp_rate = runtime->rate;
-	prtd->channel_mode = runtime->channels;
-	prtd->out_head = 0;
-	atomic_set(&prtd->out_count, runtime->periods);
-
-	if (prtd->enabled)
-		return 0;
-
-	switch (compr->info.codec_param.codec.id) {
-	case SND_AUDIOCODEC_MP3:
-		/* No media format block for mp3 */
-		break;
-	case SND_AUDIOCODEC_AAC:
-		pr_debug("%s: SND_AUDIOCODEC_AAC\n", __func__);
-		memset(&aac_cfg, 0x0, sizeof(struct asm_aac_cfg));
-		aac_cfg.aot = AAC_ENC_MODE_EAAC_P;
-		aac_cfg.format = 0x03;
-		aac_cfg.ch_cfg = runtime->channels;
-		aac_cfg.sample_rate =  runtime->rate;
-		ret = q6asm_media_format_block_aac(prtd->audio_client,
-					&aac_cfg);
-		if (ret < 0)
-			pr_err("%s: CMD Format block failed\n", __func__);
-		break;
-	case SND_AUDIOCODEC_AC3: {
-		struct snd_dec_ddp *ddp =
-				&compr->info.codec_param.codec.options.ddp;
-		pr_debug("%s: SND_AUDIOCODEC_AC3\n", __func__);
-		ret = msm_compr_send_ddp_cfg(prtd->audio_client, ddp);
-		if (ret < 0)
-			pr_err("%s: DDP CMD CFG failed\n", __func__);
-		break;
-	}
-	case SND_AUDIOCODEC_EAC3: {
-		struct snd_dec_ddp *ddp =
-				&compr->info.codec_param.codec.options.ddp;
-		pr_debug("%s: SND_AUDIOCODEC_EAC3\n", __func__);
-		ret = msm_compr_send_ddp_cfg(prtd->audio_client, ddp);
-		if (ret < 0)
-			pr_err("%s: DDP CMD CFG failed\n", __func__);
-		break;
-	}
-	default:
-		return -EINVAL;
-	}
-
-	prtd->enabled = 1;
-	prtd->cmd_ack = 0;
-	prtd->cmd_interrupt = 0;
-
-	return 0;
-}
-
-static int msm_compr_capture_prepare(struct snd_pcm_substream *substream)
-{
-	struct snd_pcm_runtime *runtime = substream->runtime;
-	struct compr_audio *compr = runtime->private_data;
-	struct msm_audio *prtd = &compr->prtd;
-	struct audio_buffer *buf = prtd->audio_client->port[OUT].buf;
-	struct snd_codec *codec = &compr->info.codec_param.codec;
-	struct snd_soc_pcm_runtime *soc_prtd = substream->private_data;
-	struct audio_aio_read_param read_param;
-	uint16_t bits_per_sample = 16;
-	int ret = 0;
-	int i;
-
-	prtd->pcm_size = snd_pcm_lib_buffer_bytes(substream);
-	prtd->pcm_count = snd_pcm_lib_period_bytes(substream);
-	prtd->pcm_irq_pos = 0;
-
-	if (runtime->format == SNDRV_PCM_FORMAT_S24_LE)
-		bits_per_sample = 24;
-
-	if (!msm_compr_capture_codecs(
-				compr->info.codec_param.codec.id)) {
-		/*
-		 * request codec invalid or not supported,
-		 * use default compress format
-		 */
-		compr->info.codec_param.codec.id =
-			SND_AUDIOCODEC_AMRWB;
-	}
-	switch (compr->info.codec_param.codec.id) {
-	case SND_AUDIOCODEC_AMRWB:
-		pr_debug("q6asm_open_read(FORMAT_AMRWB)\n");
-		ret = q6asm_open_read(prtd->audio_client,
-				FORMAT_AMRWB);
-		if (ret < 0) {
-			pr_err("%s: compressed Session out open failed\n",
-					__func__);
-			return -ENOMEM;
-		}
-		pr_debug("msm_pcm_routing_reg_phy_stream\n");
-		msm_pcm_routing_reg_phy_stream(
-				soc_prtd->dai_link->id,
-				prtd->audio_client->perf_mode,
-				prtd->session_id, substream->stream);
-		break;
-	default:
-		pr_debug("q6asm_open_read_compressed(COMPRESSED_META_DATA_MODE)\n");
-		/*
-		 * ret = q6asm_open_read_compressed(prtd->audio_client,
-		 * MAX_NUM_FRAMES_PER_BUFFER,
-		 * COMPRESSED_META_DATA_MODE);
-		 */
-			ret = -EINVAL;
-			break;
-	}
-
-	if (ret < 0) {
-		pr_err("%s: compressed Session out open failed\n",
-				__func__);
-		return -ENOMEM;
-	}
-
-	ret = q6asm_set_io_mode(prtd->audio_client,
-		(COMPRESSED_IO | ASYNC_IO_MODE));
-		if (ret < 0) {
-			pr_err("%s: Set IO mode failed\n", __func__);
-				return -ENOMEM;
-		}
-
-	if (!msm_compr_capture_codecs(codec->id)) {
-		/*
-		 * request codec invalid or not supported,
-		 * use default compress format
-		 */
-		codec->id = SND_AUDIOCODEC_AMRWB;
-	}
-	/* rate and channels are sent to audio driver */
-	prtd->samp_rate = runtime->rate;
-	prtd->channel_mode = runtime->channels;
-
-	if (prtd->enabled)
-		return ret;
-	read_param.len = prtd->pcm_count;
-
-	switch (codec->id) {
-	case SND_AUDIOCODEC_AMRWB:
-		pr_debug("SND_AUDIOCODEC_AMRWB\n");
-		ret = q6asm_enc_cfg_blk_amrwb(prtd->audio_client,
-			MAX_NUM_FRAMES_PER_BUFFER,
-			/*
-			 * use fixed band mode and dtx mode
-			 * band mode - 23.85 kbps
-			 */
-			AMR_WB_BAND_MODE,
-			/* dtx mode - disable */
-			AMR_WB_DTX_MODE);
-		if (ret < 0)
-			pr_err("%s: CMD Format block failed: %d\n",
-				__func__, ret);
-		break;
-	default:
-		pr_debug("No config for codec %d\n", codec->id);
-	}
-	pr_debug("%s: Samp_rate = %d, Channel = %d, pcm_size = %d,\n"
-			 "pcm_count = %d, periods = %d\n",
-			 __func__, prtd->samp_rate, prtd->channel_mode,
-			 prtd->pcm_size, prtd->pcm_count, runtime->periods);
-
-	for (i = 0; i < runtime->periods; i++) {
-		read_param.uid = i;
-		switch (codec->id) {
-		case SND_AUDIOCODEC_AMRWB:
-			read_param.len = prtd->pcm_count
-					- COMPRE_CAPTURE_HEADER_SIZE;
-			read_param.paddr = buf[i].phys
-					+ COMPRE_CAPTURE_HEADER_SIZE;
-			pr_debug("Push buffer [%d] to DSP, paddr: %pK, vaddr: %pK\n",
-					i, &read_param.paddr,
-					buf[i].data);
-			q6asm_async_read(prtd->audio_client, &read_param);
-			break;
-		default:
-			read_param.paddr = buf[i].phys;
-			/* q6asm_async_read_compressed(prtd->audio_client,
-			 * &read_param);
-			 */
-			pr_debug("%s: To add support for read compressed\n",
-								__func__);
-			ret = -EINVAL;
-			break;
-		}
-	}
-	prtd->periods = runtime->periods;
-
-	prtd->enabled = 1;
-
-	return ret;
-}
-
-static int msm_compr_trigger(struct snd_pcm_substream *substream, int cmd)
-{
-	int ret = 0;
-	struct snd_pcm_runtime *runtime = substream->runtime;
-	struct snd_soc_pcm_runtime *soc_prtd = substream->private_data;
-	struct compr_audio *compr = runtime->private_data;
-	struct msm_audio *prtd = &compr->prtd;
-
-	pr_debug("%s\n", __func__);
-	switch (cmd) {
-	case SNDRV_PCM_TRIGGER_START:
-		prtd->pcm_irq_pos = 0;
-
-		if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
-			if (!msm_compr_capture_codecs(
-				compr->info.codec_param.codec.id)) {
-				/*
-				 * request codec invalid or not supported,
-				 * use default compress format
-				 */
-				compr->info.codec_param.codec.id =
-				SND_AUDIOCODEC_AMRWB;
-			}
-			switch (compr->info.codec_param.codec.id) {
-			case SND_AUDIOCODEC_AMRWB:
-				break;
-			default:
-				msm_pcm_routing_reg_psthr_stream(
-					soc_prtd->dai_link->id,
-					prtd->session_id, substream->stream);
-				break;
-			}
-		}
-		atomic_set(&prtd->pending_buffer, 1);
-		/* fallthrough */
-	case SNDRV_PCM_TRIGGER_RESUME:
-	case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
-		pr_debug("%s: Trigger start\n", __func__);
-		q6asm_run_nowait(prtd->audio_client, 0, 0, 0);
-		atomic_set(&prtd->start, 1);
-		break;
-	case SNDRV_PCM_TRIGGER_STOP:
-		pr_debug("SNDRV_PCM_TRIGGER_STOP\n");
-		if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
-			switch (compr->info.codec_param.codec.id) {
-			case SND_AUDIOCODEC_AMRWB:
-				break;
-			default:
-				msm_pcm_routing_reg_psthr_stream(
-					soc_prtd->dai_link->id,
-					prtd->session_id, substream->stream);
-				break;
-			}
-		}
-		atomic_set(&prtd->start, 0);
-		runtime->render_flag &= ~SNDRV_RENDER_STOPPED;
-		break;
-	case SNDRV_PCM_TRIGGER_SUSPEND:
-	case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
-		pr_debug("SNDRV_PCM_TRIGGER_PAUSE\n");
-		q6asm_cmd_nowait(prtd->audio_client, CMD_PAUSE);
-		atomic_set(&prtd->start, 0);
-		runtime->render_flag &= ~SNDRV_RENDER_STOPPED;
-		break;
-	default:
-		ret = -EINVAL;
-		break;
-	}
-
-	return ret;
-}
-
-static void populate_codec_list(struct compr_audio *compr,
-		struct snd_pcm_runtime *runtime)
-{
-	pr_debug("%s\n", __func__);
-	/* MP3 Block */
-	compr->info.compr_cap.num_codecs = 5;
-	compr->info.compr_cap.min_fragment_size = runtime->hw.period_bytes_min;
-	compr->info.compr_cap.max_fragment_size = runtime->hw.period_bytes_max;
-	compr->info.compr_cap.min_fragments = runtime->hw.periods_min;
-	compr->info.compr_cap.max_fragments = runtime->hw.periods_max;
-	compr->info.compr_cap.codecs[0] = SND_AUDIOCODEC_MP3;
-	compr->info.compr_cap.codecs[1] = SND_AUDIOCODEC_AAC;
-	compr->info.compr_cap.codecs[2] = SND_AUDIOCODEC_AC3;
-	compr->info.compr_cap.codecs[3] = SND_AUDIOCODEC_EAC3;
-	compr->info.compr_cap.codecs[4] = SND_AUDIOCODEC_AMRWB;
-	/* Add new codecs here */
-}
-
-static int msm_compr_open(struct snd_pcm_substream *substream)
-{
-	struct snd_pcm_runtime *runtime = substream->runtime;
-	struct compr_audio *compr;
-	struct msm_audio *prtd;
-	int ret = 0;
-
-	pr_debug("%s\n", __func__);
-	compr = kzalloc(sizeof(struct compr_audio), GFP_KERNEL);
-	if (compr == NULL) {
-		pr_err("Failed to allocate memory for msm_audio\n");
-		return -ENOMEM;
-	}
-	prtd = &compr->prtd;
-	prtd->substream = substream;
-	runtime->render_flag = SNDRV_DMA_MODE;
-	prtd->audio_client = q6asm_audio_client_alloc(
-				(app_cb)compr_event_handler, compr);
-	if (!prtd->audio_client) {
-		pr_info("%s: Could not allocate memory\n", __func__);
-		kfree(prtd);
-		return -ENOMEM;
-	}
-
-	prtd->audio_client->perf_mode = false;
-	pr_info("%s: session ID %d\n", __func__, prtd->audio_client->session);
-
-	prtd->session_id = prtd->audio_client->session;
-
-	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
-		runtime->hw = msm_compr_hardware_playback;
-		prtd->cmd_ack = 1;
-	} else {
-		runtime->hw = msm_compr_hardware_capture;
-	}
-
-
-	ret = snd_pcm_hw_constraint_list(runtime, 0,
-			SNDRV_PCM_HW_PARAM_RATE,
-			&constraints_sample_rates);
-	if (ret < 0)
-		pr_info("snd_pcm_hw_constraint_list failed\n");
-	/* Ensure that buffer size is a multiple of period size */
-	ret = snd_pcm_hw_constraint_integer(runtime,
-			    SNDRV_PCM_HW_PARAM_PERIODS);
-	if (ret < 0)
-		pr_info("snd_pcm_hw_constraint_integer failed\n");
-
-	prtd->dsp_cnt = 0;
-	atomic_set(&prtd->pending_buffer, 1);
-	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
-		compr->codec = FORMAT_MP3;
-	populate_codec_list(compr, runtime);
-	runtime->private_data = compr;
-	atomic_set(&prtd->eos, 0);
-	return 0;
-}
-
-static int compressed_set_volume(struct msm_audio *prtd, uint32_t volume)
-{
-	int rc = 0;
-	int avg_vol = 0;
-	int lgain = (volume >> 16) & 0xFFFF;
-	int rgain = volume & 0xFFFF;
-
-	if (prtd && prtd->audio_client) {
-		pr_debug("%s: channels %d volume 0x%x\n", __func__,
-			prtd->channel_mode, volume);
-		if ((prtd->channel_mode == 2) &&
-			(lgain != rgain)) {
-			pr_debug("%s: call q6asm_set_lrgain\n", __func__);
-			rc = q6asm_set_lrgain(prtd->audio_client, lgain, rgain);
-		} else {
-			avg_vol = (lgain + rgain)/2;
-			pr_debug("%s: call q6asm_set_volume\n", __func__);
-			rc = q6asm_set_volume(prtd->audio_client, avg_vol);
-		}
-		if (rc < 0) {
-			pr_err("%s: Send Volume command failed rc=%d\n",
-				__func__, rc);
-		}
-	}
-	return rc;
-}
-
-static int msm_compr_playback_close(struct snd_pcm_substream *substream)
-{
-	struct snd_pcm_runtime *runtime = substream->runtime;
-	struct snd_soc_pcm_runtime *soc_prtd = substream->private_data;
-	struct compr_audio *compr = runtime->private_data;
-	struct msm_audio *prtd = &compr->prtd;
-	int dir = 0;
-
-	pr_debug("%s\n", __func__);
-
-	dir = IN;
-	atomic_set(&prtd->pending_buffer, 0);
-
-	prtd->pcm_irq_pos = 0;
-	q6asm_cmd(prtd->audio_client, CMD_CLOSE);
-	q6asm_audio_client_buf_free_contiguous(dir,
-				prtd->audio_client);
-		msm_pcm_routing_dereg_phy_stream(
-			soc_prtd->dai_link->id,
-			SNDRV_PCM_STREAM_PLAYBACK);
-	q6asm_audio_client_free(prtd->audio_client);
-	kfree(prtd);
-	return 0;
-}
-
-static int msm_compr_capture_close(struct snd_pcm_substream *substream)
-{
-	struct snd_pcm_runtime *runtime = substream->runtime;
-	struct snd_soc_pcm_runtime *soc_prtd = substream->private_data;
-	struct compr_audio *compr = runtime->private_data;
-	struct msm_audio *prtd = &compr->prtd;
-	int dir = OUT;
-
-	pr_debug("%s\n", __func__);
-	atomic_set(&prtd->pending_buffer, 0);
-	q6asm_cmd(prtd->audio_client, CMD_CLOSE);
-	q6asm_audio_client_buf_free_contiguous(dir,
-				prtd->audio_client);
-	msm_pcm_routing_dereg_phy_stream(soc_prtd->dai_link->id,
-				SNDRV_PCM_STREAM_CAPTURE);
-	q6asm_audio_client_free(prtd->audio_client);
-	kfree(prtd);
-	return 0;
-}
-
-static int msm_compr_close(struct snd_pcm_substream *substream)
-{
-	int ret = 0;
-
-	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
-		ret = msm_compr_playback_close(substream);
-	else if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
-		ret = msm_compr_capture_close(substream);
-	return ret;
-}
-
-static int msm_compr_prepare(struct snd_pcm_substream *substream)
-{
-	int ret = 0;
-
-	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
-		ret = msm_compr_playback_prepare(substream);
-	else if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
-		ret = msm_compr_capture_prepare(substream);
-	return ret;
-}
-
-static snd_pcm_uframes_t msm_compr_pointer(struct snd_pcm_substream *substream)
-{
-
-	struct snd_pcm_runtime *runtime = substream->runtime;
-	struct compr_audio *compr = runtime->private_data;
-	struct msm_audio *prtd = &compr->prtd;
-
-	if (prtd->pcm_irq_pos >= prtd->pcm_size)
-		prtd->pcm_irq_pos = 0;
-
-	pr_debug("%s: pcm_irq_pos = %d, pcm_size = %d, sample_bits = %d,\n"
-			 "frame_bits = %d\n", __func__, prtd->pcm_irq_pos,
-			 prtd->pcm_size, runtime->sample_bits,
-			 runtime->frame_bits);
-	return bytes_to_frames(runtime, (prtd->pcm_irq_pos));
-}
-
-static int msm_compr_mmap(struct snd_pcm_substream *substream,
-				struct vm_area_struct *vma)
-{
-	struct snd_pcm_runtime *runtime = substream->runtime;
-	struct msm_audio *prtd = runtime->private_data;
-	struct audio_client *ac = prtd->audio_client;
-	struct audio_port_data *apd = ac->port;
-	struct audio_buffer *ab;
-	int dir = -1;
-
-	prtd->mmap_flag = 1;
-	runtime->render_flag = SNDRV_NON_DMA_MODE;
-	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
-		dir = IN;
-	else
-		dir = OUT;
-	ab = &(apd[dir].buf[0]);
-
-	return msm_audio_ion_mmap(ab, vma);
-}
-
-static int msm_compr_hw_params(struct snd_pcm_substream *substream,
-				struct snd_pcm_hw_params *params)
-{
-	struct snd_pcm_runtime *runtime = substream->runtime;
-	struct compr_audio *compr = runtime->private_data;
-	struct msm_audio *prtd = &compr->prtd;
-	struct snd_dma_buffer *dma_buf = &substream->dma_buffer;
-	struct audio_buffer *buf;
-	int dir, ret;
-
-	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
-		dir = IN;
-	else
-		dir = OUT;
-	/* Modifying kernel hardware params based on userspace config */
-	if (params_periods(params) > 0 &&
-		(params_periods(params) != runtime->hw.periods_max)) {
-		runtime->hw.periods_max = params_periods(params);
-	}
-	if (params_period_bytes(params) > 0 &&
-		(params_period_bytes(params) != runtime->hw.period_bytes_min)) {
-		runtime->hw.period_bytes_min = params_period_bytes(params);
-	}
-	runtime->hw.buffer_bytes_max =
-			runtime->hw.period_bytes_min * runtime->hw.periods_max;
-	pr_debug("allocate %zd buffers each of size %d\n",
-		runtime->hw.period_bytes_min,
-		runtime->hw.periods_max);
-	ret = q6asm_audio_client_buf_alloc_contiguous(dir,
-			prtd->audio_client,
-			runtime->hw.period_bytes_min,
-			runtime->hw.periods_max);
-	if (ret < 0) {
-		pr_err("Audio Start: Buffer Allocation failed rc = %d\n",
-						ret);
-		return -ENOMEM;
-	}
-	buf = prtd->audio_client->port[dir].buf;
-
-	dma_buf->dev.type = SNDRV_DMA_TYPE_DEV;
-	dma_buf->dev.dev = substream->pcm->card->dev;
-	dma_buf->private_data = NULL;
-	dma_buf->area = buf[0].data;
-	dma_buf->addr =  buf[0].phys;
-	dma_buf->bytes = runtime->hw.buffer_bytes_max;
-
-	pr_debug("%s: buf[%pK]dma_buf->area[%pK]dma_buf->addr[%pK]\n"
-		 "dma_buf->bytes[%zd]\n", __func__,
-		 (void *)buf, (void *)dma_buf->area,
-		 &dma_buf->addr, dma_buf->bytes);
-	if (!dma_buf->area)
-		return -ENOMEM;
-
-	snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer);
-	return 0;
-}
-
-static int msm_compr_ioctl_shared(struct snd_pcm_substream *substream,
-		unsigned int cmd, void *arg)
-{
-	int rc = 0;
-	struct snd_pcm_runtime *runtime = substream->runtime;
-	struct compr_audio *compr = runtime->private_data;
-	struct msm_audio *prtd = &compr->prtd;
-	uint64_t timestamp;
-	uint64_t temp;
-
-	switch (cmd) {
-	case SNDRV_COMPRESS_TSTAMP: {
-		struct snd_compr_tstamp *tstamp;
-
-		pr_debug("SNDRV_COMPRESS_TSTAMP\n");
-		tstamp = arg;
-		memset(tstamp, 0x0, sizeof(*tstamp));
-		rc = q6asm_get_session_time(prtd->audio_client, &timestamp);
-		if (rc < 0) {
-			pr_err("%s: Get Session Time return value =%lld\n",
-				__func__, timestamp);
-			return -EAGAIN;
-		}
-		temp = (timestamp * 2 * runtime->channels);
-		temp = temp * (runtime->rate/1000);
-		temp = div_u64(temp, 1000);
-		tstamp->sampling_rate = runtime->rate;
-		tstamp->timestamp = timestamp;
-		pr_debug("%s: bytes_consumed:,timestamp = %lld,\n",
-						__func__,
-			tstamp->timestamp);
-		return 0;
-	}
-	case SNDRV_COMPRESS_GET_CAPS: {
-		struct snd_compr_caps *caps;
-
-		caps = arg;
-		memset(caps, 0, sizeof(*caps));
-		pr_debug("SNDRV_COMPRESS_GET_CAPS\n");
-		memcpy(caps, &compr->info.compr_cap, sizeof(*caps));
-		return 0;
-	}
-	case SNDRV_COMPRESS_SET_PARAMS:
-		pr_debug("SNDRV_COMPRESS_SET_PARAMS:\n");
-		memcpy(&compr->info.codec_param, (void *) arg,
-			sizeof(struct snd_compr_params));
-		switch (compr->info.codec_param.codec.id) {
-		case SND_AUDIOCODEC_MP3:
-			/* For MP3 we dont need any other parameter */
-			pr_debug("SND_AUDIOCODEC_MP3\n");
-			compr->codec = FORMAT_MP3;
-			break;
-		case SND_AUDIOCODEC_AAC:
-			pr_debug("SND_AUDIOCODEC_AAC\n");
-			compr->codec = FORMAT_MPEG4_AAC;
-			break;
-		case SND_AUDIOCODEC_AC3: {
-			char params_value[MAX_AC3_PARAM_SIZE];
-			int *params_value_data = (int *)params_value;
-			/* 36 is the max param length for ddp */
-			int i;
-			struct snd_dec_ddp *ddp =
-				&compr->info.codec_param.codec.options.ddp;
-			uint32_t params_length = 0;
-
-			memset(params_value, 0, MAX_AC3_PARAM_SIZE);
-			/* check integer overflow */
-			if (ddp->params_length > UINT_MAX/sizeof(int)) {
-				pr_err("%s: Integer overflow ddp->params_length %d\n",
-				__func__, ddp->params_length);
-				return -EINVAL;
-			}
-			params_length = ddp->params_length*sizeof(int);
-			if (params_length > MAX_AC3_PARAM_SIZE) {
-				/*MAX is 36*sizeof(int) this should not happen*/
-				pr_err("%s: params_length(%d) is greater than %zd\n",
-				__func__, params_length, MAX_AC3_PARAM_SIZE);
-				return -EINVAL;
-			}
-			pr_debug("SND_AUDIOCODEC_AC3\n");
-			compr->codec = FORMAT_AC3;
-			pr_debug("params_length: %d\n", ddp->params_length);
-			for (i = 0; i < params_length/sizeof(int); i++)
-				pr_debug("params_value[%d]: %x\n", i,
-					params_value_data[i]);
-			for (i = 0; i < ddp->params_length/2; i++) {
-				ddp->params_id[i] = params_value_data[2*i];
-				ddp->params_value[i] = params_value_data[2*i+1];
-			}
-			if (atomic_read(&prtd->start)) {
-				rc = msm_compr_send_ddp_cfg(prtd->audio_client,
-								ddp);
-				if (rc < 0)
-					pr_err("%s: DDP CMD CFG failed\n",
-						__func__);
-			}
-			break;
-		}
-		case SND_AUDIOCODEC_EAC3: {
-			char params_value[MAX_AC3_PARAM_SIZE];
-			int *params_value_data = (int *)params_value;
-			/* 36 is the max param length for ddp */
-			int i;
-			struct snd_dec_ddp *ddp =
-				&compr->info.codec_param.codec.options.ddp;
-			uint32_t params_length = 0;
-
-			memset(params_value, 0, MAX_AC3_PARAM_SIZE);
-			/* check integer overflow */
-			if (ddp->params_length > UINT_MAX/sizeof(int)) {
-				pr_err("%s: Integer overflow ddp->params_length %d\n",
-				__func__, ddp->params_length);
-				return -EINVAL;
-			}
-			params_length = ddp->params_length*sizeof(int);
-			if (params_length > MAX_AC3_PARAM_SIZE) {
-				/*MAX is 36*sizeof(int) this should not happen*/
-				pr_err("%s: params_length(%d) is greater than %zd\n",
-				__func__, params_length, MAX_AC3_PARAM_SIZE);
-				return -EINVAL;
-			}
-			pr_debug("SND_AUDIOCODEC_EAC3\n");
-			compr->codec = FORMAT_EAC3;
-			pr_debug("params_length: %d\n", ddp->params_length);
-			for (i = 0; i < ddp->params_length; i++)
-				pr_debug("params_value[%d]: %x\n", i,
-					params_value_data[i]);
-			for (i = 0; i < ddp->params_length/2; i++) {
-				ddp->params_id[i] = params_value_data[2*i];
-				ddp->params_value[i] = params_value_data[2*i+1];
-			}
-			if (atomic_read(&prtd->start)) {
-				rc = msm_compr_send_ddp_cfg(prtd->audio_client,
-								ddp);
-				if (rc < 0)
-					pr_err("%s: DDP CMD CFG failed\n",
-						__func__);
-			}
-			break;
-		}
-		default:
-			pr_debug("FORMAT_LINEAR_PCM\n");
-			compr->codec = FORMAT_LINEAR_PCM;
-			break;
-		}
-		return 0;
-	case SNDRV_PCM_IOCTL1_RESET:
-		pr_debug("SNDRV_PCM_IOCTL1_RESET\n");
-		/* Flush only when session is started during CAPTURE,
-		 * while PLAYBACK has no such restriction.
-		 */
-		if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK ||
-			  (substream->stream == SNDRV_PCM_STREAM_CAPTURE &&
-						atomic_read(&prtd->start))) {
-			if (atomic_read(&prtd->eos)) {
-				prtd->cmd_interrupt = 1;
-				wake_up(&the_locks.eos_wait);
-				atomic_set(&prtd->eos, 0);
-			}
-
-			/* A unlikely race condition possible with FLUSH
-			 * DRAIN if ack is set by flush and reset by drain
-			 */
-			prtd->cmd_ack = 0;
-			rc = q6asm_cmd(prtd->audio_client, CMD_FLUSH);
-			if (rc < 0) {
-				pr_err("%s: flush cmd failed rc=%d\n",
-					__func__, rc);
-				return rc;
-			}
-			rc = wait_event_timeout(the_locks.flush_wait,
-				prtd->cmd_ack, 5 * HZ);
-			if (!rc)
-				pr_err("Flush cmd timeout\n");
-			prtd->pcm_irq_pos = 0;
-		}
-		break;
-	case SNDRV_COMPRESS_DRAIN:
-		pr_debug("%s: SNDRV_COMPRESS_DRAIN\n", __func__);
-		if (atomic_read(&prtd->pending_buffer)) {
-			pr_debug("%s: no pending writes, drain would block\n",
-			 __func__);
-			return -EWOULDBLOCK;
-		}
-
-		atomic_set(&prtd->eos, 1);
-		atomic_set(&prtd->pending_buffer, 0);
-		prtd->cmd_ack = 0;
-		q6asm_cmd_nowait(prtd->audio_client, CMD_EOS);
-		/* Wait indefinitely for  DRAIN. Flush can also signal this*/
-		rc = wait_event_interruptible(the_locks.eos_wait,
-			(prtd->cmd_ack || prtd->cmd_interrupt));
-
-		if (rc < 0)
-			pr_err("EOS cmd interrupted\n");
-		pr_debug("%s: SNDRV_COMPRESS_DRAIN  out of wait\n", __func__);
-
-		if (prtd->cmd_interrupt)
-			rc = -EINTR;
-
-		prtd->cmd_interrupt = 0;
-		return rc;
-	default:
-		break;
-	}
-	return snd_pcm_lib_ioctl(substream, cmd, arg);
-}
-#ifdef CONFIG_COMPAT
-struct snd_enc_wma32 {
-	u32 super_block_align; /* WMA Type-specific data */
-	u32 encodeopt1;
-	u32 encodeopt2;
-};
-
-struct snd_enc_vorbis32 {
-	s32 quality;
-	u32 managed;
-	u32 max_bit_rate;
-	u32 min_bit_rate;
-	u32 downmix;
-};
-
-struct snd_enc_real32 {
-	u32 quant_bits;
-	u32 start_region;
-	u32 num_regions;
-};
-
-struct snd_enc_flac32 {
-	u32 num;
-	u32 gain;
-};
-
-struct snd_enc_generic32 {
-	u32 bw;	/* encoder bandwidth */
-	s32 reserved[15];
-};
-struct snd_dec_ddp32 {
-	u32 params_length;
-	u32 params_id[18];
-	u32 params_value[18];
-};
-
-union snd_codec_options32 {
-	struct snd_enc_wma32 wma;
-	struct snd_enc_vorbis32 vorbis;
-	struct snd_enc_real32 real;
-	struct snd_enc_flac32 flac;
-	struct snd_enc_generic32 generic;
-	struct snd_dec_ddp32 ddp;
-};
-
-struct snd_codec32 {
-	u32 id;
-	u32 ch_in;
-	u32 ch_out;
-	u32 sample_rate;
-	u32 bit_rate;
-	u32 rate_control;
-	u32 profile;
-	u32 level;
-	u32 ch_mode;
-	u32 format;
-	u32 align;
-	union snd_codec_options32 options;
-	u32 reserved[3];
-};
-
-struct snd_compressed_buffer32 {
-	u32 fragment_size;
-	u32 fragments;
-};
-
-struct snd_compr_params32 {
-	struct snd_compressed_buffer32 buffer;
-	struct snd_codec32 codec;
-	u8 no_wake_mode;
-};
-
-struct snd_compr_caps32 {
-	u32 num_codecs;
-	u32 direction;
-	u32 min_fragment_size;
-	u32 max_fragment_size;
-	u32 min_fragments;
-	u32 max_fragments;
-	u32 codecs[MAX_NUM_CODECS];
-	u32 reserved[11];
-};
-struct snd_compr_tstamp32 {
-	u32 byte_offset;
-	u32 copied_total;
-	compat_ulong_t pcm_frames;
-	compat_ulong_t pcm_io_frames;
-	u32 sampling_rate;
-	compat_u64 timestamp;
-};
-enum {
-	SNDRV_COMPRESS_TSTAMP32 = _IOR('C', 0x20, struct snd_compr_tstamp32),
-	SNDRV_COMPRESS_GET_CAPS32 = _IOWR('C', 0x10, struct snd_compr_caps32),
-	SNDRV_COMPRESS_SET_PARAMS32 =
-	_IOW('C', 0x12, struct snd_compr_params32),
-};
-static int msm_compr_compat_ioctl(struct snd_pcm_substream *substream,
-		unsigned int cmd, void *arg)
-{
-	int err = 0;
-
-	switch (cmd) {
-	case SNDRV_COMPRESS_TSTAMP32: {
-		struct snd_compr_tstamp tstamp;
-		struct snd_compr_tstamp32 tstamp32;
-
-		memset(&tstamp, 0, sizeof(tstamp));
-		memset(&tstamp32, 0, sizeof(tstamp32));
-		cmd = SNDRV_COMPRESS_TSTAMP;
-		err = msm_compr_ioctl_shared(substream, cmd, &tstamp);
-		if (err) {
-			pr_err("%s: COMPRESS_TSTAMP failed rc %d\n",
-			__func__, err);
-			goto bail_out;
-		}
-		tstamp32.byte_offset = tstamp.byte_offset;
-		tstamp32.copied_total = tstamp.copied_total;
-		tstamp32.pcm_frames = tstamp.pcm_frames;
-		tstamp32.pcm_io_frames = tstamp.pcm_io_frames;
-		tstamp32.sampling_rate = tstamp.sampling_rate;
-		tstamp32.timestamp = tstamp.timestamp;
-		if (copy_to_user(arg, &tstamp32, sizeof(tstamp32))) {
-			pr_err("%s: copytouser failed COMPRESS_TSTAMP32\n",
-			__func__);
-			err = -EFAULT;
-		}
-		break;
-	}
-	case SNDRV_COMPRESS_GET_CAPS32: {
-		struct snd_compr_caps caps;
-		struct snd_compr_caps32 caps32;
-		u32 i;
-
-		memset(&caps, 0, sizeof(caps));
-		memset(&caps32, 0, sizeof(caps32));
-		cmd = SNDRV_COMPRESS_GET_CAPS;
-		err = msm_compr_ioctl_shared(substream, cmd, &caps);
-		if (err) {
-			pr_err("%s: GET_CAPS failed rc %d\n",
-			__func__, err);
-			goto bail_out;
-		}
-		pr_debug("SNDRV_COMPRESS_GET_CAPS_32\n");
-		if (!err && caps.num_codecs >= MAX_NUM_CODECS) {
-			pr_err("%s: Invalid number of codecs\n", __func__);
-			err = -EINVAL;
-			goto bail_out;
-		}
-		caps32.direction = caps.direction;
-		caps32.max_fragment_size = caps.max_fragment_size;
-		caps32.max_fragments = caps.max_fragments;
-		caps32.min_fragment_size = caps.min_fragment_size;
-		caps32.num_codecs = caps.num_codecs;
-		for (i = 0; i < caps.num_codecs; i++)
-			caps32.codecs[i] = caps.codecs[i];
-		if (copy_to_user(arg, &caps32, sizeof(caps32))) {
-			pr_err("%s: copytouser failed COMPRESS_GETCAPS32\n",
-			__func__);
-			err = -EFAULT;
-		}
-		break;
-	}
-	case SNDRV_COMPRESS_SET_PARAMS32: {
-		struct snd_compr_params32 params32;
-		struct snd_compr_params params;
-
-		memset(&params32, 0, sizeof(params32));
-		memset(&params, 0, sizeof(params));
-		cmd = SNDRV_COMPRESS_SET_PARAMS;
-		if (copy_from_user(&params32, arg, sizeof(params32))) {
-			pr_err("%s: copyfromuser failed SET_PARAMS32\n",
-			__func__);
-			err = -EFAULT;
-			goto bail_out;
-		}
-		params.no_wake_mode = params32.no_wake_mode;
-		params.codec.id = params32.codec.id;
-		params.codec.ch_in = params32.codec.ch_in;
-		params.codec.ch_out = params32.codec.ch_out;
-		params.codec.sample_rate = params32.codec.sample_rate;
-		params.codec.bit_rate = params32.codec.bit_rate;
-		params.codec.rate_control = params32.codec.rate_control;
-		params.codec.profile = params32.codec.profile;
-		params.codec.level = params32.codec.level;
-		params.codec.ch_mode = params32.codec.ch_mode;
-		params.codec.format = params32.codec.format;
-		params.codec.align = params32.codec.align;
-
-		switch (params.codec.id) {
-		case SND_AUDIOCODEC_WMA:
-		case SND_AUDIOCODEC_WMA_PRO:
-			params.codec.options.wma.encodeopt1 =
-			params32.codec.options.wma.encodeopt1;
-			params.codec.options.wma.encodeopt2 =
-			params32.codec.options.wma.encodeopt2;
-			params.codec.options.wma.super_block_align =
-			params32.codec.options.wma.super_block_align;
-		break;
-		case SND_AUDIOCODEC_VORBIS:
-			params.codec.options.vorbis.downmix =
-			params32.codec.options.vorbis.downmix;
-			params.codec.options.vorbis.managed =
-			params32.codec.options.vorbis.managed;
-			params.codec.options.vorbis.max_bit_rate =
-			params32.codec.options.vorbis.max_bit_rate;
-			params.codec.options.vorbis.min_bit_rate =
-			params32.codec.options.vorbis.min_bit_rate;
-			params.codec.options.vorbis.quality =
-			params32.codec.options.vorbis.quality;
-		break;
-		case SND_AUDIOCODEC_REAL:
-			params.codec.options.real.num_regions =
-			params32.codec.options.real.num_regions;
-			params.codec.options.real.quant_bits =
-			params32.codec.options.real.quant_bits;
-			params.codec.options.real.start_region =
-			params32.codec.options.real.start_region;
-		break;
-		case SND_AUDIOCODEC_FLAC:
-			params.codec.options.flac.gain =
-			params32.codec.options.flac.gain;
-			params.codec.options.flac.num =
-			params32.codec.options.flac.num;
-		break;
-		case SND_AUDIOCODEC_DTS:
-		case SND_AUDIOCODEC_DTS_PASS_THROUGH:
-		case SND_AUDIOCODEC_DTS_LBR:
-		case SND_AUDIOCODEC_DTS_LBR_PASS_THROUGH:
-		case SND_AUDIOCODEC_DTS_TRANSCODE_LOOPBACK:
-		break;
-		case SND_AUDIOCODEC_AC3:
-		case SND_AUDIOCODEC_EAC3:
-			params.codec.options.ddp.params_length =
-			params32.codec.options.ddp.params_length;
-			memcpy(params.codec.options.ddp.params_value,
-			params32.codec.options.ddp.params_value,
-			sizeof(params32.codec.options.ddp.params_value));
-			memcpy(params.codec.options.ddp.params_id,
-			params32.codec.options.ddp.params_id,
-			sizeof(params32.codec.options.ddp.params_id));
-		break;
-		default:
-			params.codec.options.generic.bw =
-			params32.codec.options.generic.bw;
-		break;
-		}
-		if (!err)
-			err = msm_compr_ioctl_shared(substream, cmd, &params);
-		break;
-	}
-	default:
-		err = msm_compr_ioctl_shared(substream, cmd, arg);
-	}
-bail_out:
-	return err;
-
-}
-#endif
-static int msm_compr_ioctl(struct snd_pcm_substream *substream,
-		unsigned int cmd, void *arg)
-{
-	int err = 0;
-
-	if (!substream) {
-		pr_err("%s: Invalid params\n", __func__);
-		return -EINVAL;
-	}
-	pr_debug("%s called with cmd = %d\n", __func__, cmd);
-	switch (cmd) {
-	case SNDRV_COMPRESS_TSTAMP: {
-		struct snd_compr_tstamp tstamp;
-
-		if (!arg) {
-			pr_err("%s: Invalid params Tstamp\n", __func__);
-			return -EINVAL;
-		}
-		err = msm_compr_ioctl_shared(substream, cmd, &tstamp);
-		if (err)
-			pr_err("%s: COMPRESS_TSTAMP failed rc %d\n",
-			__func__, err);
-		if (!err && copy_to_user(arg, &tstamp, sizeof(tstamp))) {
-			pr_err("%s: copytouser failed COMPRESS_TSTAMP\n",
-			__func__);
-			err = -EFAULT;
-		}
-		break;
-	}
-	case SNDRV_COMPRESS_GET_CAPS: {
-		struct snd_compr_caps cap;
-
-		if (!arg) {
-			pr_err("%s: Invalid params getcaps\n", __func__);
-			return -EINVAL;
-		}
-		pr_debug("SNDRV_COMPRESS_GET_CAPS\n");
-		err = msm_compr_ioctl_shared(substream, cmd, &cap);
-		if (err)
-			pr_err("%s: GET_CAPS failed rc %d\n",
-			__func__, err);
-		if (!err && copy_to_user(arg, &cap, sizeof(cap))) {
-			pr_err("%s: copytouser failed GET_CAPS\n",
-			__func__);
-			err = -EFAULT;
-		}
-		break;
-	}
-	case SNDRV_COMPRESS_SET_PARAMS: {
-		struct snd_compr_params params;
-
-		if (!arg) {
-			pr_err("%s: Invalid params setparam\n", __func__);
-			return -EINVAL;
-		}
-		if (copy_from_user(&params, arg,
-			sizeof(struct snd_compr_params))) {
-			pr_err("%s: SET_PARAMS\n", __func__);
-			return -EFAULT;
-		}
-		err = msm_compr_ioctl_shared(substream, cmd, &params);
-		if (err)
-			pr_err("%s: SET_PARAMS failed rc %d\n",
-			__func__, err);
-		break;
-	}
-	default:
-		err = msm_compr_ioctl_shared(substream, cmd, arg);
-	}
-	return err;
-}
-
-static int msm_compr_restart(struct snd_pcm_substream *substream)
-{
-	struct snd_pcm_runtime *runtime = substream->runtime;
-	struct compr_audio *compr = runtime->private_data;
-	struct msm_audio *prtd = &compr->prtd;
-	struct audio_aio_write_param param;
-	struct audio_buffer *buf = NULL;
-	struct output_meta_data_st output_meta_data;
-	int time_stamp_flag = 0;
-	int buffer_length = 0;
-
-	pr_debug("%s, trigger restart\n", __func__);
-
-	if (runtime->render_flag & SNDRV_RENDER_STOPPED) {
-		buf = prtd->audio_client->port[IN].buf;
-		pr_debug("%s:writing %d bytes of buffer[%d] to dsp 2\n",
-				__func__, prtd->pcm_count, prtd->out_head);
-		pr_debug("%s:writing buffer[%d] from 0x%08x\n",
-				__func__, prtd->out_head,
-				((unsigned int)buf[0].phys
-				+ (prtd->out_head * prtd->pcm_count)));
-
-		if (runtime->tstamp_mode == SNDRV_PCM_TSTAMP_ENABLE)
-			time_stamp_flag = SET_TIMESTAMP;
-		else
-			time_stamp_flag = NO_TIMESTAMP;
-		memcpy(&output_meta_data, (char *)(buf->data +
-			prtd->out_head * prtd->pcm_count),
-			COMPRE_OUTPUT_METADATA_SIZE);
-
-		buffer_length = output_meta_data.frame_size;
-		pr_debug("meta_data_length: %d, frame_length: %d\n",
-			 output_meta_data.meta_data_length,
-			 output_meta_data.frame_size);
-		pr_debug("timestamp_msw: %d, timestamp_lsw: %d\n",
-			 output_meta_data.timestamp_msw,
-			 output_meta_data.timestamp_lsw);
-
-		param.paddr = (unsigned long)buf[0].phys
-				+ (prtd->out_head * prtd->pcm_count)
-				+ output_meta_data.meta_data_length;
-		param.len = buffer_length;
-		param.msw_ts = output_meta_data.timestamp_msw;
-		param.lsw_ts = output_meta_data.timestamp_lsw;
-		param.flags = time_stamp_flag;
-		param.uid = prtd->session_id;
-		if (q6asm_async_write(prtd->audio_client,
-					&param) < 0)
-			pr_err("%s:q6asm_async_write failed\n",
-				__func__);
-		else
-			prtd->out_head =
-				(prtd->out_head + 1) & (runtime->periods - 1);
-
-		runtime->render_flag &= ~SNDRV_RENDER_STOPPED;
-		return 0;
-	}
-	return 0;
-}
-
-static int msm_compr_volume_ctl_put(struct snd_kcontrol *kcontrol,
-				    struct snd_ctl_elem_value *ucontrol)
-{
-	int rc = 0;
-	struct snd_pcm_volume *vol = snd_kcontrol_chip(kcontrol);
-	struct snd_pcm_substream *substream =
-			 vol->pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream;
-	struct msm_audio *prtd;
-	int volume = ucontrol->value.integer.value[0];
-
-	pr_debug("%s: volume : %x\n", __func__, volume);
-	if (!substream)
-		return -ENODEV;
-	if (!substream->runtime)
-		return 0;
-	prtd = substream->runtime->private_data;
-	if (prtd)
-		rc = compressed_set_volume(prtd, volume);
-
-	return rc;
-}
-
-static int msm_compr_volume_ctl_get(struct snd_kcontrol *kcontrol,
-				  struct snd_ctl_elem_value *ucontrol)
-{
-	struct snd_pcm_volume *vol = snd_kcontrol_chip(kcontrol);
-	struct snd_pcm_substream *substream =
-			 vol->pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream;
-	struct msm_audio *prtd;
-
-	pr_debug("%s\n", __func__);
-	if (!substream)
-		return -ENODEV;
-	if (!substream->runtime)
-		return 0;
-	prtd = substream->runtime->private_data;
-	if (prtd)
-		ucontrol->value.integer.value[0] = prtd->volume;
-	return 0;
-}
-
-static int msm_compr_add_controls(struct snd_soc_pcm_runtime *rtd)
-{
-	int ret = 0;
-	struct snd_pcm *pcm = rtd->pcm;
-	struct snd_pcm_volume *volume_info;
-	struct snd_kcontrol *kctl;
-
-	dev_dbg(rtd->dev, "%s, Volume cntrl add\n", __func__);
-	ret = snd_pcm_add_volume_ctls(pcm, SNDRV_PCM_STREAM_PLAYBACK,
-				      NULL, 1, rtd->dai_link->id,
-				      &volume_info);
-	if (ret < 0)
-		return ret;
-	kctl = volume_info->kctl;
-	kctl->put = msm_compr_volume_ctl_put;
-	kctl->get = msm_compr_volume_ctl_get;
-	kctl->tlv.p = compr_rx_vol_gain;
-	return 0;
-}
-
-static const struct snd_pcm_ops msm_compr_ops = {
-	.open	   = msm_compr_open,
-	.hw_params	= msm_compr_hw_params,
-	.close	  = msm_compr_close,
-	.ioctl	  = msm_compr_ioctl,
-	.prepare	= msm_compr_prepare,
-	.trigger	= msm_compr_trigger,
-	.pointer	= msm_compr_pointer,
-	.mmap		= msm_compr_mmap,
-	.restart	= msm_compr_restart,
-#ifdef CONFIG_COMPAT
-	.compat_ioctl   = msm_compr_compat_ioctl,
-#endif
-};
-
-static int msm_asoc_pcm_new(struct snd_soc_pcm_runtime *rtd)
-{
-	struct snd_card *card = rtd->card->snd_card;
-	int ret = 0;
-
-	if (!card->dev->coherent_dma_mask)
-		card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
-
-	ret = msm_compr_add_controls(rtd);
-	if (ret)
-		pr_err("%s, kctl add failed\n", __func__);
-	return ret;
-}
-
-static struct snd_soc_platform_driver msm_soc_platform = {
-	.ops		= &msm_compr_ops,
-	.pcm_new	= msm_asoc_pcm_new,
-};
-
-static int msm_compr_probe(struct platform_device *pdev)
-{
-
-	dev_info(&pdev->dev, "%s: dev name %s\n",
-			 __func__, dev_name(&pdev->dev));
-
-	return snd_soc_register_platform(&pdev->dev,
-				   &msm_soc_platform);
-}
-
-static int msm_compr_remove(struct platform_device *pdev)
-{
-	snd_soc_unregister_platform(&pdev->dev);
-	return 0;
-}
-
-static const struct of_device_id msm_compr_dt_match[] = {
-	{.compatible = "qcom,msm-compr-dsp"},
-	{}
-};
-MODULE_DEVICE_TABLE(of, msm_compr_dt_match);
-
-static struct platform_driver msm_compr_driver = {
-	.driver = {
-		.name = "msm-compr-dsp",
-		.owner = THIS_MODULE,
-		.of_match_table = msm_compr_dt_match,
-	},
-	.probe = msm_compr_probe,
-	.remove = msm_compr_remove,
-};
-
-static int __init msm_soc_platform_init(void)
-{
-	init_waitqueue_head(&the_locks.enable_wait);
-	init_waitqueue_head(&the_locks.eos_wait);
-	init_waitqueue_head(&the_locks.write_wait);
-	init_waitqueue_head(&the_locks.read_wait);
-	init_waitqueue_head(&the_locks.flush_wait);
-
-	return platform_driver_register(&msm_compr_driver);
-}
-module_init(msm_soc_platform_init);
-
-static void __exit msm_soc_platform_exit(void)
-{
-	platform_driver_unregister(&msm_compr_driver);
-}
-module_exit(msm_soc_platform_exit);
-
-MODULE_DESCRIPTION("PCM module platform driver");
-MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/msm/qdsp6v2/msm-compr-q6-v2.h b/sound/soc/msm/qdsp6v2/msm-compr-q6-v2.h
deleted file mode 100644
index d6e3ec6..0000000
--- a/sound/soc/msm/qdsp6v2/msm-compr-q6-v2.h
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * Copyright (c) 2012, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-
-#ifndef _MSM_COMPR_H
-#define _MSM_COMPR_H
-#include <sound/apr_audio-v2.h>
-#include <sound/q6asm-v2.h>
-#include <sound/compress_params.h>
-#include <sound/compress_offload.h>
-#include <sound/compress_driver.h>
-
-#include "msm-pcm-q6-v2.h"
-
-struct compr_info {
-	struct snd_compr_caps compr_cap;
-	struct snd_compr_codec_caps codec_caps;
-	struct snd_compr_params codec_param;
-};
-
-struct compr_audio {
-	struct msm_audio prtd;
-	struct compr_info info;
-	uint32_t codec;
-};
-
-#endif /*_MSM_COMPR_H*/
diff --git a/sound/soc/msm/qdsp6v2/msm-lsm-client.c b/sound/soc/msm/qdsp6v2/msm-lsm-client.c
index 37dd31f..421769e 100644
--- a/sound/soc/msm/qdsp6v2/msm-lsm-client.c
+++ b/sound/soc/msm/qdsp6v2/msm-lsm-client.c
@@ -1165,28 +1165,27 @@ static int msm_lsm_ioctl_shared(struct snd_pcm_substream *substream,
 		break;
 
 	case SNDRV_LSM_SET_FWK_MODE_CONFIG: {
-		u32 *mode = NULL;
+		u32 mode;
 
-		if (!arg) {
-			dev_err(rtd->dev,
-				"%s: Invalid param arg for ioctl %s session %d\n",
-				__func__, "SNDRV_LSM_SET_FWK_MODE_CONFIG",
-				prtd->lsm_client->session);
-			rc = -EINVAL;
-			break;
+		if (copy_from_user(&mode, arg, sizeof(mode))) {
+			dev_err(rtd->dev, "%s: %s: copy_frm_user failed\n",
+				__func__, "LSM_SET_FWK_MODE_CONFIG");
+			return -EFAULT;
 		}
-		mode = (u32 *)arg;
-		if (prtd->lsm_client->event_mode == *mode) {
+
+		dev_dbg(rtd->dev, "%s: ioctl %s, enable = %d\n",
+			__func__, "SNDRV_LSM_SET_FWK_MODE_CONFIG", mode);
+		if (prtd->lsm_client->event_mode == mode) {
 			dev_dbg(rtd->dev,
 				"%s: mode for %d already set to %d\n",
-				__func__, prtd->lsm_client->session, *mode);
+				__func__, prtd->lsm_client->session, mode);
 			rc = 0;
 		} else {
 			dev_dbg(rtd->dev, "%s: Event mode = %d\n",
-				 __func__, *mode);
-			rc = q6lsm_set_fwk_mode_cfg(prtd->lsm_client, *mode);
+				 __func__, mode);
+			rc = q6lsm_set_fwk_mode_cfg(prtd->lsm_client, mode);
 			if (!rc)
-				prtd->lsm_client->event_mode = *mode;
+				prtd->lsm_client->event_mode = mode;
 			else
 				dev_err(rtd->dev,
 					"%s: set event mode failed %d\n",
diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
index 9e5fc16..42dfbeb 100644
--- a/tools/include/uapi/linux/bpf.h
+++ b/tools/include/uapi/linux/bpf.h
@@ -375,6 +375,105 @@ enum bpf_func_id {
 	 */
 	BPF_FUNC_probe_write_user,
 
+	/**
+	 * int bpf_skb_change_tail(skb, len, flags)
+	 *     The helper will resize the skb to the given new size, to be used f.e.
+	 *     with control messages.
+	 *     @skb: pointer to skb
+	 *     @len: new skb length
+	 *     @flags: reserved
+	 *     Return: 0 on success or negative error
+	 */
+	BPF_FUNC_skb_change_tail,
+
+	/**
+	 * int bpf_skb_pull_data(skb, len)
+	 *     The helper will pull in non-linear data in case the skb is non-linear
+	 *     and not all of len are part of the linear section. Only needed for
+	 *     read/write with direct packet access.
+	 *     @skb: pointer to skb
+	 *     @len: len to make read/writeable
+	 *     Return: 0 on success or negative error
+	 */
+	BPF_FUNC_skb_pull_data,
+
+	/**
+	 * s64 bpf_csum_update(skb, csum)
+	 *     Adds csum into skb->csum in case of CHECKSUM_COMPLETE.
+	 *     @skb: pointer to skb
+	 *     @csum: csum to add
+	 *     Return: csum on success or negative error
+	 */
+	BPF_FUNC_csum_update,
+
+	/**
+	 * void bpf_set_hash_invalid(skb)
+	 *     Invalidate current skb->hash.
+	 *     @skb: pointer to skb
+	 */
+	BPF_FUNC_set_hash_invalid,
+
+	/**
+	 * int bpf_get_numa_node_id()
+	 *     Return: Id of current NUMA node.
+	 */
+	BPF_FUNC_get_numa_node_id,
+
+	/**
+	 * int bpf_skb_change_head()
+	 *     Grows headroom of skb and adjusts MAC header offset accordingly.
+	 *     Will extends/reallocae as required automatically.
+	 *     May change skb data pointer and will thus invalidate any check
+	 *     performed for direct packet access.
+	 *     @skb: pointer to skb
+	 *     @len: length of header to be pushed in front
+	 *     @flags: Flags (unused for now)
+	 *     Return: 0 on success or negative error
+	 */
+	BPF_FUNC_skb_change_head,
+
+	/**
+	 * int bpf_xdp_adjust_head(xdp_md, delta)
+	 *     Adjust the xdp_md.data by delta
+	 *     @xdp_md: pointer to xdp_md
+	 *     @delta: An positive/negative integer to be added to xdp_md.data
+	 *     Return: 0 on success or negative on error
+	 */
+	BPF_FUNC_xdp_adjust_head,
+
+	/**
+	 * int bpf_probe_read_str(void *dst, int size, const void *unsafe_ptr)
+	 *     Copy a NUL terminated string from unsafe address. In case the string
+	 *     length is smaller than size, the target is not padded with further NUL
+	 *     bytes. In case the string length is larger than size, just count-1
+	 *     bytes are copied and the last byte is set to NUL.
+	 *     @dst: destination address
+	 *     @size: maximum number of bytes to copy, including the trailing NUL
+	 *     @unsafe_ptr: unsafe address
+	 *     Return:
+	 *       > 0 length of the string including the trailing NUL on success
+	 *       < 0 error
+	 */
+	BPF_FUNC_probe_read_str,
+
+	/**
+	 * u64 bpf_get_socket_cookie(skb)
+	 * Get the cookie for the socket stored inside sk_buff.
+	 * @skb: pointer to skb
+	 * Return: 8 Bytes non-decreasing number on success or 0 if
+	 * the socket
+	 * field is missing inside sk_buff
+	 */
+	BPF_FUNC_get_socket_cookie,
+
+	/**
+	 * u32 bpf_get_socket_uid(skb)
+	 *     Get the owner uid of the socket stored inside sk_buff.
+	 *     @skb: pointer to skb
+	 *     Return: uid of the socket owner on success or overflowuid if failed.
+	 */
+	BPF_FUNC_get_socket_uid,
+
 	__BPF_FUNC_MAX_ID,
 };
 
diff --git a/tools/perf/util/auxtrace.c b/tools/perf/util/auxtrace.c
index c5a6e0b..78bd632 100644
--- a/tools/perf/util/auxtrace.c
+++ b/tools/perf/util/auxtrace.c
@@ -1826,7 +1826,7 @@ static int addr_filter__resolve_kernel_syms(struct addr_filter *filt)
 		filt->addr = start;
 		if (filt->range && !filt->size && !filt->sym_to) {
 			filt->size = size;
-			no_size = !!size;
+			no_size = !size;
 		}
 	}
 
@@ -1840,7 +1840,7 @@ static int addr_filter__resolve_kernel_syms(struct addr_filter *filt)
 		if (err)
 			return err;
 		filt->size = start + size - filt->addr;
-		no_size = !!size;
+		no_size = !size;
 	}
 
 	/* The very last symbol in kallsyms does not imply a particular size */
diff --git a/tools/testing/selftests/x86/ldt_gdt.c b/tools/testing/selftests/x86/ldt_gdt.c
index 4af4707..e717fed 100644
--- a/tools/testing/selftests/x86/ldt_gdt.c
+++ b/tools/testing/selftests/x86/ldt_gdt.c
@@ -403,6 +403,51 @@ static void *threadproc(void *ctx)
 	}
 }
 
+#ifdef __i386__
+
+#ifndef SA_RESTORE
+#define SA_RESTORER 0x04000000
+#endif
+
+/*
+ * The UAPI header calls this 'struct sigaction', which conflicts with
+ * glibc.  Sigh.
+ */
+struct fake_ksigaction {
+	void *handler;  /* the real type is nasty */
+	unsigned long sa_flags;
+	void (*sa_restorer)(void);
+	unsigned char sigset[8];
+};
+
+static void fix_sa_restorer(int sig)
+{
+	struct fake_ksigaction ksa;
+
+	if (syscall(SYS_rt_sigaction, sig, NULL, &ksa, 8) == 0) {
+		/*
+		 * glibc has a nasty bug: it sometimes writes garbage to
+		 * sa_restorer.  This interacts quite badly with anything
+		 * that fiddles with SS because it can trigger legacy
+		 * stack switching.  Patch it up.  See:
+		 *
+		 * https://sourceware.org/bugzilla/show_bug.cgi?id=21269
+		 */
+		if (!(ksa.sa_flags & SA_RESTORER) && ksa.sa_restorer) {
+			ksa.sa_restorer = NULL;
+			if (syscall(SYS_rt_sigaction, sig, &ksa, NULL,
+				    sizeof(ksa.sigset)) != 0)
+				err(1, "rt_sigaction");
+		}
+	}
+}
+#else
+static void fix_sa_restorer(int sig)
+{
+	/* 64-bit glibc works fine. */
+}
+#endif
+
 static void sethandler(int sig, void (*handler)(int, siginfo_t *, void *),
 		       int flags)
 {
@@ -414,6 +459,7 @@ static void sethandler(int sig, void (*handler)(int, siginfo_t *, void *),
 	if (sigaction(sig, &sa, 0))
 		err(1, "sigaction");
 
+	fix_sa_restorer(sig);
 }
 
 static jmp_buf jmpbuf;